Bug 1428952 - P4: Vendor rust crates. r=rillian
authorDan Glastonbury <dan.glastonbury@gmail.com>
Mon, 06 Nov 2017 16:26:30 +1000
changeset 455287 33873f5e18b985b326cfd61ec605856b59df88f2
parent 455286 5f79f72ac9fed56b8fdc81100dd672ad4f2fe585
child 455288 76d337a7168f75dae16403f1697f080527b02e1a
push id1683
push usersfraser@mozilla.com
push dateThu, 26 Apr 2018 16:43:40 +0000
treeherdermozilla-release@5af6cb21869d [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersrillian
bugs1428952
milestone60.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1428952 - P4: Vendor rust crates. r=rillian MozReview-Commit-ID: 7UQPozxpmC1
third_party/rust/futures-cpupool/.cargo-checksum.json
third_party/rust/futures-cpupool/Cargo.toml
third_party/rust/futures-cpupool/README.md
third_party/rust/futures-cpupool/src/lib.rs
third_party/rust/futures-cpupool/tests/smoke.rs
third_party/rust/log/.cargo-checksum.json
third_party/rust/log/Cargo.lock
third_party/rust/mio-uds/.cargo-checksum.json
third_party/rust/mio-uds/Cargo.lock
third_party/rust/scoped-tls/.cargo-checksum.json
third_party/rust/scoped-tls/.travis.yml
third_party/rust/scoped-tls/Cargo.toml
third_party/rust/scoped-tls/LICENSE-APACHE
third_party/rust/scoped-tls/LICENSE-MIT
third_party/rust/scoped-tls/README.md
third_party/rust/scoped-tls/appveyor.yml
third_party/rust/scoped-tls/src/lib.rs
third_party/rust/slab/.cargo-checksum.json
third_party/rust/slab/Cargo.lock
third_party/rust/tokio-core/.cargo-checksum.json
third_party/rust/tokio-core/.travis.yml
third_party/rust/tokio-core/Cargo.toml
third_party/rust/tokio-core/LICENSE-APACHE
third_party/rust/tokio-core/LICENSE-MIT
third_party/rust/tokio-core/README.md
third_party/rust/tokio-core/appveyor.yml
third_party/rust/tokio-core/benches/latency.rs
third_party/rust/tokio-core/benches/mio-ops.rs
third_party/rust/tokio-core/examples/chat.rs
third_party/rust/tokio-core/examples/connect.rs
third_party/rust/tokio-core/examples/echo-udp.rs
third_party/rust/tokio-core/examples/echo.rs
third_party/rust/tokio-core/examples/hello.rs
third_party/rust/tokio-core/examples/proxy.rs
third_party/rust/tokio-core/examples/sink.rs
third_party/rust/tokio-core/examples/udp-codec.rs
third_party/rust/tokio-core/src/channel.rs
third_party/rust/tokio-core/src/heap.rs
third_party/rust/tokio-core/src/io/copy.rs
third_party/rust/tokio-core/src/io/flush.rs
third_party/rust/tokio-core/src/io/frame.rs
third_party/rust/tokio-core/src/io/mod.rs
third_party/rust/tokio-core/src/io/read.rs
third_party/rust/tokio-core/src/io/read_exact.rs
third_party/rust/tokio-core/src/io/read_to_end.rs
third_party/rust/tokio-core/src/io/read_until.rs
third_party/rust/tokio-core/src/io/split.rs
third_party/rust/tokio-core/src/io/window.rs
third_party/rust/tokio-core/src/io/write_all.rs
third_party/rust/tokio-core/src/lib.rs
third_party/rust/tokio-core/src/net/mod.rs
third_party/rust/tokio-core/src/net/tcp.rs
third_party/rust/tokio-core/src/net/udp/frame.rs
third_party/rust/tokio-core/src/net/udp/mod.rs
third_party/rust/tokio-core/src/reactor/interval.rs
third_party/rust/tokio-core/src/reactor/io_token.rs
third_party/rust/tokio-core/src/reactor/mod.rs
third_party/rust/tokio-core/src/reactor/poll_evented.rs
third_party/rust/tokio-core/src/reactor/timeout.rs
third_party/rust/tokio-core/src/reactor/timeout_token.rs
third_party/rust/tokio-core/tests/buffered.rs
third_party/rust/tokio-core/tests/chain.rs
third_party/rust/tokio-core/tests/echo.rs
third_party/rust/tokio-core/tests/interval.rs
third_party/rust/tokio-core/tests/limit.rs
third_party/rust/tokio-core/tests/line-frames.rs
third_party/rust/tokio-core/tests/pipe-hup.rs
third_party/rust/tokio-core/tests/spawn.rs
third_party/rust/tokio-core/tests/stream-buffered.rs
third_party/rust/tokio-core/tests/tcp.rs
third_party/rust/tokio-core/tests/timeout.rs
third_party/rust/tokio-core/tests/udp.rs
third_party/rust/tokio-io/.cargo-checksum.json
third_party/rust/tokio-io/.travis.yml
third_party/rust/tokio-io/Cargo.toml
third_party/rust/tokio-io/LICENSE-APACHE
third_party/rust/tokio-io/LICENSE-MIT
third_party/rust/tokio-io/README.md
third_party/rust/tokio-io/src/codec.rs
third_party/rust/tokio-io/src/copy.rs
third_party/rust/tokio-io/src/flush.rs
third_party/rust/tokio-io/src/framed.rs
third_party/rust/tokio-io/src/framed_read.rs
third_party/rust/tokio-io/src/framed_write.rs
third_party/rust/tokio-io/src/io.rs
third_party/rust/tokio-io/src/length_delimited.rs
third_party/rust/tokio-io/src/lib.rs
third_party/rust/tokio-io/src/lines.rs
third_party/rust/tokio-io/src/read.rs
third_party/rust/tokio-io/src/read_exact.rs
third_party/rust/tokio-io/src/read_to_end.rs
third_party/rust/tokio-io/src/read_until.rs
third_party/rust/tokio-io/src/shutdown.rs
third_party/rust/tokio-io/src/split.rs
third_party/rust/tokio-io/src/window.rs
third_party/rust/tokio-io/src/write_all.rs
third_party/rust/tokio-io/tests/async_read.rs
third_party/rust/tokio-io/tests/framed.rs
third_party/rust/tokio-io/tests/framed_read.rs
third_party/rust/tokio-io/tests/framed_write.rs
third_party/rust/tokio-io/tests/length_delimited.rs
third_party/rust/tokio-uds/.cargo-checksum.json
third_party/rust/tokio-uds/.travis.yml
third_party/rust/tokio-uds/Cargo.lock
third_party/rust/tokio-uds/Cargo.toml
third_party/rust/tokio-uds/LICENSE-APACHE
third_party/rust/tokio-uds/LICENSE-MIT
third_party/rust/tokio-uds/README.md
third_party/rust/tokio-uds/src/frame.rs
third_party/rust/tokio-uds/src/lib.rs
third_party/rust/tokio-uds/src/ucred.rs
toolkit/library/gtest/rust/Cargo.lock
toolkit/library/rust/Cargo.lock
toolkit/library/rust/shared/Cargo.toml
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures-cpupool/.cargo-checksum.json
@@ -0,0 +1,1 @@
+{"files":{"Cargo.toml":"07c97c2816b3cc41857a0cbbb5109f2a7ef2bd81131a3f4f3621f438a1eb7561","README.md":"09c5f4bacff34b3f7e1969f5b9590c062a8aabac7c2442944eab1d2fc1301373","src/lib.rs":"a368e87ed6f93552ba12391cd765d0b0b34b9fe42617a2c1f6a5ce81a0c5de11","tests/smoke.rs":"3e237fc14d19775026f6cff45d73de6bb6b4db6699ce8ab4972ed85165200ec2"},"package":"a283c84501e92cade5ea673a2a7ca44f71f209ccdd302a3e0896f50083d2c5ff"}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures-cpupool/Cargo.toml
@@ -0,0 +1,25 @@
+[package]
+name = "futures-cpupool"
+version = "0.1.5"
+authors = ["Alex Crichton <alex@alexcrichton.com>"]
+license = "MIT/Apache-2.0"
+repository = "https://github.com/alexcrichton/futures-rs"
+homepage = "https://github.com/alexcrichton/futures-rs"
+documentation = "https://docs.rs/futures-cpupool"
+description = """
+An implementation of thread pools which hand out futures to the results of the
+computation on the threads themselves.
+"""
+
+[dependencies]
+num_cpus = "1.0"
+
+[dependencies.futures]
+path = ".."
+version = "0.1"
+default-features = false
+features = ["use_std"]
+
+[features]
+default = ["with-deprecated"]
+with-deprecated = ["futures/with-deprecated"]
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures-cpupool/README.md
@@ -0,0 +1,36 @@
+# futures-cpupool
+
+A library for creating futures representing work happening concurrently on a
+dedicated thread pool.
+
+[![Build Status](https://travis-ci.org/alexcrichton/futures-rs.svg?branch=master)](https://travis-ci.org/alexcrichton/futures-rs)
+[![Build status](https://ci.appveyor.com/api/projects/status/yl5w3ittk4kggfsh?svg=true)](https://ci.appveyor.com/project/alexcrichton/futures-rs)
+
+[Documentation](https://docs.rs/futures-cpupool)
+
+## Usage
+
+First, add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+futures = "0.1"
+futures-cpupool = "0.1"
+```
+
+Next, add this to your crate:
+
+```rust
+extern crate futures;
+extern crate futures_cpupool;
+
+use futures_cpupool::CpuPool;
+```
+
+# License
+
+`futures-cpupool` is primarily distributed under the terms of both the MIT
+license and the Apache License (Version 2.0), with portions covered by various
+BSD-like licenses.
+
+See LICENSE-APACHE, and LICENSE-MIT for details.
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures-cpupool/src/lib.rs
@@ -0,0 +1,384 @@
+//! A simple crate for executing work on a thread pool, and getting back a
+//! future.
+//!
+//! This crate provides a simple thread pool abstraction for running work
+//! externally from the current thread that's running. An instance of `Future`
+//! is handed back to represent that the work may be done later, and further
+//! computations can be chained along with it as well.
+//!
+//! ```rust
+//! extern crate futures;
+//! extern crate futures_cpupool;
+//!
+//! use futures::Future;
+//! use futures_cpupool::CpuPool;
+//!
+//! # fn long_running_future(a: u32) -> futures::future::BoxFuture<u32, ()> {
+//! #     futures::future::result(Ok(a)).boxed()
+//! # }
+//! # fn main() {
+//!
+//! // Create a worker thread pool with four threads
+//! let pool = CpuPool::new(4);
+//!
+//! // Execute some work on the thread pool, optionally closing over data.
+//! let a = pool.spawn(long_running_future(2));
+//! let b = pool.spawn(long_running_future(100));
+//!
+//! // Express some further computation once the work is completed on the thread
+//! // pool.
+//! let c = a.join(b).map(|(a, b)| a + b).wait().unwrap();
+//!
+//! // Print out the result
+//! println!("{:?}", c);
+//! # }
+//! ```
+
+#![deny(missing_docs)]
+
+extern crate futures;
+extern crate num_cpus;
+
+use std::panic::{self, AssertUnwindSafe};
+use std::sync::{Arc, Mutex};
+use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
+use std::sync::mpsc;
+use std::thread;
+
+use futures::{IntoFuture, Future, Poll, Async};
+use futures::future::lazy;
+use futures::sync::oneshot::{channel, Sender, Receiver};
+use futures::executor::{self, Run, Executor};
+
+/// A thread pool intended to run CPU intensive work.
+///
+/// This thread pool will hand out futures representing the completed work
+/// that happens on the thread pool itself, and the futures can then be later
+/// composed with other work as part of an overall computation.
+///
+/// The worker threads associated with a thread pool are kept alive so long as
+/// there is an open handle to the `CpuPool` or there is work running on them. Once
+/// all work has been drained and all references have gone away the worker
+/// threads will be shut down.
+///
+/// Currently `CpuPool` implements `Clone` which just clones a new reference to
+/// the underlying thread pool.
+///
+/// **Note:** if you use CpuPool inside a library it's better accept a
+/// `Builder` object for thread configuration rather than configuring just
+/// pool size.  This not only future proof for other settings but also allows
+/// user to attach monitoring tools to lifecycle hooks.
+pub struct CpuPool {
+    inner: Arc<Inner>,
+}
+
+/// Thread pool configuration object
+///
+/// Builder starts with a number of workers equal to the number
+/// of CPUs on the host. But you can change it until you call `create()`.
+pub struct Builder {
+    pool_size: usize,
+    name_prefix: Option<String>,
+    after_start: Option<Arc<Fn() + Send + Sync>>,
+    before_stop: Option<Arc<Fn() + Send + Sync>>,
+}
+
+struct MySender<F, T> {
+    fut: F,
+    tx: Option<Sender<T>>,
+    keep_running_flag: Arc<AtomicBool>,
+}
+
+fn _assert() {
+    fn _assert_send<T: Send>() {}
+    fn _assert_sync<T: Sync>() {}
+    _assert_send::<CpuPool>();
+    _assert_sync::<CpuPool>();
+}
+
+struct Inner {
+    tx: Mutex<mpsc::Sender<Message>>,
+    rx: Mutex<mpsc::Receiver<Message>>,
+    cnt: AtomicUsize,
+    size: usize,
+    after_start: Option<Arc<Fn() + Send + Sync>>,
+    before_stop: Option<Arc<Fn() + Send + Sync>>,
+}
+
+/// The type of future returned from the `CpuPool::spawn` function, which
+/// proxies the futures running on the thread pool.
+///
+/// This future will resolve in the same way as the underlying future, and it
+/// will propagate panics.
+#[must_use]
+pub struct CpuFuture<T, E> {
+    inner: Receiver<thread::Result<Result<T, E>>>,
+    keep_running_flag: Arc<AtomicBool>,
+}
+
+enum Message {
+    Run(Run),
+    Close,
+}
+
+impl CpuPool {
+    /// Creates a new thread pool with `size` worker threads associated with it.
+    ///
+    /// The returned handle can use `execute` to run work on this thread pool,
+    /// and clones can be made of it to get multiple references to the same
+    /// thread pool.
+    ///
+    /// This is a shortcut for:
+    /// ```rust
+    /// Builder::new().pool_size(size).create()
+    /// ```
+    ///
+    /// # Panics
+    ///
+    /// Panics if `size == 0`.
+    pub fn new(size: usize) -> CpuPool {
+        Builder::new().pool_size(size).create()
+    }
+
+    /// Creates a new thread pool with a number of workers equal to the number
+    /// of CPUs on the host.
+    ///
+    /// This is a shortcut for:
+    /// ```rust
+    /// Builder::new().create()
+    /// ```
+    pub fn new_num_cpus() -> CpuPool {
+        Builder::new().create()
+    }
+
+    /// Spawns a future to run on this thread pool, returning a future
+    /// representing the produced value.
+    ///
+    /// This function will execute the future `f` on the associated thread
+    /// pool, and return a future representing the finished computation. The
+    /// returned future serves as a proxy to the computation that `F` is
+    /// running.
+    ///
+    /// To simply run an arbitrary closure on a thread pool and extract the
+    /// result, you can use the `future::lazy` combinator to defer work to
+    /// executing on the thread pool itself.
+    ///
+    /// Note that if the future `f` panics it will be caught by default and the
+    /// returned future will propagate the panic. That is, panics will not tear
+    /// down the thread pool and will be propagated to the returned future's
+    /// `poll` method if queried.
+    ///
+    /// If the returned future is dropped then this `CpuPool` will attempt to
+    /// cancel the computation, if possible. That is, if the computation is in
+    /// the middle of working, it will be interrupted when possible.
+    pub fn spawn<F>(&self, f: F) -> CpuFuture<F::Item, F::Error>
+        where F: Future + Send + 'static,
+              F::Item: Send + 'static,
+              F::Error: Send + 'static,
+    {
+        let (tx, rx) = channel();
+        let keep_running_flag = Arc::new(AtomicBool::new(false));
+        // AssertUnwindSafe is used here becuase `Send + 'static` is basically
+        // an alias for an implementation of the `UnwindSafe` trait but we can't
+        // express that in the standard library right now.
+        let sender = MySender {
+            fut: AssertUnwindSafe(f).catch_unwind(),
+            tx: Some(tx),
+            keep_running_flag: keep_running_flag.clone(),
+        };
+        executor::spawn(sender).execute(self.inner.clone());
+        CpuFuture { inner: rx , keep_running_flag: keep_running_flag.clone() }
+    }
+
+    /// Spawns a closure on this thread pool.
+    ///
+    /// This function is a convenience wrapper around the `spawn` function above
+    /// for running a closure wrapped in `future::lazy`. It will spawn the
+    /// function `f` provided onto the thread pool, and continue to run the
+    /// future returned by `f` on the thread pool as well.
+    ///
+    /// The returned future will be a handle to the result produced by the
+    /// future that `f` returns.
+    pub fn spawn_fn<F, R>(&self, f: F) -> CpuFuture<R::Item, R::Error>
+        where F: FnOnce() -> R + Send + 'static,
+              R: IntoFuture + 'static,
+              R::Future: Send + 'static,
+              R::Item: Send + 'static,
+              R::Error: Send + 'static,
+    {
+        self.spawn(lazy(f))
+    }
+}
+
+impl Inner {
+    fn send(&self, msg: Message) {
+        self.tx.lock().unwrap().send(msg).unwrap();
+    }
+
+    fn work(&self) {
+        self.after_start.as_ref().map(|fun| fun());
+        loop {
+            let msg = self.rx.lock().unwrap().recv().unwrap();
+            match msg {
+                Message::Run(r) => r.run(),
+                Message::Close => break,
+            }
+        }
+        self.before_stop.as_ref().map(|fun| fun());
+    }
+}
+
+impl Clone for CpuPool {
+    fn clone(&self) -> CpuPool {
+        self.inner.cnt.fetch_add(1, Ordering::Relaxed);
+        CpuPool { inner: self.inner.clone() }
+    }
+}
+
+impl Drop for CpuPool {
+    fn drop(&mut self) {
+        if self.inner.cnt.fetch_sub(1, Ordering::Relaxed) == 1 {
+            for _ in 0..self.inner.size {
+                self.inner.send(Message::Close);
+            }
+        }
+    }
+}
+
+impl Executor for Inner {
+    fn execute(&self, run: Run) {
+        self.send(Message::Run(run))
+    }
+}
+
+impl<T, E> CpuFuture<T, E> {
+    /// Drop this future without canceling the underlying future.
+    ///
+    /// When `CpuFuture` is dropped, `CpuPool` will try to abort the underlying
+    /// future. This function can be used when user wants to drop but keep
+    /// executing the underlying future.
+    pub fn forget(self) {
+        self.keep_running_flag.store(true, Ordering::SeqCst);
+    }
+}
+
+impl<T: Send + 'static, E: Send + 'static> Future for CpuFuture<T, E> {
+    type Item = T;
+    type Error = E;
+
+    fn poll(&mut self) -> Poll<T, E> {
+        match self.inner.poll().expect("shouldn't be canceled") {
+            Async::Ready(Ok(Ok(e))) => Ok(e.into()),
+            Async::Ready(Ok(Err(e))) => Err(e),
+            Async::Ready(Err(e)) => panic::resume_unwind(e),
+            Async::NotReady => Ok(Async::NotReady),
+        }
+    }
+}
+
+impl<F: Future> Future for MySender<F, Result<F::Item, F::Error>> {
+    type Item = ();
+    type Error = ();
+
+    fn poll(&mut self) -> Poll<(), ()> {
+        if let Ok(Async::Ready(_)) = self.tx.as_mut().unwrap().poll_cancel() {
+            if !self.keep_running_flag.load(Ordering::SeqCst) {
+                // Cancelled, bail out
+                return Ok(().into())
+            }
+        }
+
+        let res = match self.fut.poll() {
+            Ok(Async::Ready(e)) => Ok(e),
+            Ok(Async::NotReady) => return Ok(Async::NotReady),
+            Err(e) => Err(e),
+        };
+
+        // if the receiving end has gone away then that's ok, we just ignore the
+        // send error here.
+        drop(self.tx.take().unwrap().send(res));
+        Ok(Async::Ready(()))
+    }
+}
+
+impl Builder {
+    /// Create a builder a number of workers equal to the number
+    /// of CPUs on the host.
+    pub fn new() -> Builder {
+        Builder {
+            pool_size: num_cpus::get(),
+            name_prefix: None,
+            after_start: None,
+            before_stop: None,
+        }
+    }
+
+    /// Set size of a future CpuPool
+    ///
+    /// The size of a thread pool is the number of worker threads spawned
+    pub fn pool_size(&mut self, size: usize) -> &mut Self {
+        self.pool_size = size;
+        self
+    }
+
+    /// Set thread name prefix of a future CpuPool
+    ///
+    /// Thread name prefix is used for generating thread names. For example, if prefix is
+    /// `my-pool-`, then threads in the pool will get names like `my-pool-1` etc.
+    pub fn name_prefix<S: Into<String>>(&mut self, name_prefix: S) -> &mut Self {
+        self.name_prefix = Some(name_prefix.into());
+        self
+    }
+
+    /// Execute function `f` right after each thread is started but before
+    /// running any jobs on it
+    ///
+    /// This is initially intended for bookkeeping and monitoring uses
+    pub fn after_start<F>(&mut self, f: F) -> &mut Self
+        where F: Fn() + Send + Sync + 'static
+    {
+        self.after_start = Some(Arc::new(f));
+        self
+    }
+
+    /// Execute function `f` before each worker thread stops
+    ///
+    /// This is initially intended for bookkeeping and monitoring uses
+    pub fn before_stop<F>(&mut self, f: F) -> &mut Self
+        where F: Fn() + Send + Sync + 'static
+    {
+        self.before_stop = Some(Arc::new(f));
+        self
+    }
+
+    /// Create CpuPool with configured parameters
+    ///
+    /// # Panics
+    ///
+    /// Panics if `pool_size == 0`.
+    pub fn create(&mut self) -> CpuPool {
+        let (tx, rx) = mpsc::channel();
+        let pool = CpuPool {
+            inner: Arc::new(Inner {
+                tx: Mutex::new(tx),
+                rx: Mutex::new(rx),
+                cnt: AtomicUsize::new(1),
+                size: self.pool_size,
+                after_start: self.after_start.clone(),
+                before_stop: self.before_stop.clone(),
+            }),
+        };
+        assert!(self.pool_size > 0);
+
+        for counter in 0..self.pool_size {
+            let inner = pool.inner.clone();
+            let mut thread_builder = thread::Builder::new();
+            if let Some(ref name_prefix) = self.name_prefix {
+                thread_builder = thread_builder.name(format!("{}{}", name_prefix, counter));
+            }
+            thread_builder.spawn(move || inner.work()).unwrap();
+        }
+
+        return pool
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/futures-cpupool/tests/smoke.rs
@@ -0,0 +1,110 @@
+extern crate futures;
+extern crate futures_cpupool;
+
+use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
+use std::thread;
+use std::time::Duration;
+
+use futures::future::{Future, BoxFuture};
+use futures_cpupool::{CpuPool, Builder};
+
+fn done<T: Send + 'static>(t: T) -> BoxFuture<T, ()> {
+    futures::future::ok(t).boxed()
+}
+
+#[test]
+fn join() {
+    let pool = CpuPool::new(2);
+    let a = pool.spawn(done(1));
+    let b = pool.spawn(done(2));
+    let res = a.join(b).map(|(a, b)| a + b).wait();
+
+    assert_eq!(res.unwrap(), 3);
+}
+
+#[test]
+fn select() {
+    let pool = CpuPool::new(2);
+    let a = pool.spawn(done(1));
+    let b = pool.spawn(done(2));
+    let (item1, next) = a.select(b).wait().ok().unwrap();
+    let item2 = next.wait().unwrap();
+
+    assert!(item1 != item2);
+    assert!((item1 == 1 && item2 == 2) || (item1 == 2 && item2 == 1));
+}
+
+#[test]
+fn threads_go_away() {
+    static CNT: AtomicUsize = ATOMIC_USIZE_INIT;
+
+    struct A;
+
+    impl Drop for A {
+        fn drop(&mut self) {
+            CNT.fetch_add(1, Ordering::SeqCst);
+        }
+    }
+
+    thread_local!(static FOO: A = A);
+
+    let pool = CpuPool::new(2);
+    let _handle = pool.spawn_fn(|| {
+        FOO.with(|_| ());
+        Ok::<(), ()>(())
+    });
+    drop(pool);
+
+    for _ in 0..100 {
+        if CNT.load(Ordering::SeqCst) == 1 {
+            return
+        }
+        thread::sleep(Duration::from_millis(10));
+    }
+    panic!("thread didn't exit");
+}
+
+#[test]
+fn lifecycle_test() {
+    static NUM_STARTS: AtomicUsize = ATOMIC_USIZE_INIT;
+    static NUM_STOPS: AtomicUsize = ATOMIC_USIZE_INIT;
+
+    fn after_start() {
+        NUM_STARTS.fetch_add(1, Ordering::SeqCst);
+    }
+
+    fn before_stop() {
+        NUM_STOPS.fetch_add(1, Ordering::SeqCst);
+    }
+
+    let pool = Builder::new()
+        .pool_size(4)
+        .after_start(after_start)
+        .before_stop(before_stop)
+        .create();
+    let _handle = pool.spawn_fn(|| {
+        Ok::<(), ()>(())
+    });
+    drop(pool);
+
+    for _ in 0..100 {
+        if NUM_STOPS.load(Ordering::SeqCst) == 4 {
+            assert_eq!(NUM_STARTS.load(Ordering::SeqCst), 4);
+            return;
+        }
+        thread::sleep(Duration::from_millis(10));
+    }
+    panic!("thread didn't exit");
+}
+
+#[test]
+fn thread_name() {
+    let pool = Builder::new()
+        .name_prefix("my-pool-")
+        .create();
+    let future = pool.spawn_fn(|| {
+        assert!(thread::current().name().unwrap().starts_with("my-pool-"));
+        Ok::<(), ()>(())
+    });
+    let _ = future.wait();
+}
--- a/third_party/rust/log/.cargo-checksum.json
+++ b/third_party/rust/log/.cargo-checksum.json
@@ -1,1 +1,1 @@
-{"files":{".travis.yml":"985cf95c79f32c65766927fd6ef5079f8c14f235ddb4213e6410d90a86a95811","Cargo.toml":"0a4a756f7ef47f5dfa221a173b21f9ec496b448aafcd9bde08d9d16935b55007","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"aa8356455efcc3d49f66e7fa394eac292c9158164dff074e32c699b64891cb4b","appveyor.yml":"c61473b8c780ad2626282ce2b2ba0ef278082b6afe151a62ff419f33eaf90221","src/lib.rs":"75b44acfc9627b821cd725649db07693a43b2e44b2fac19b79354c6d950c4038","src/macros.rs":"3953610da3ee2dc393262b753f2406d1864a1cbd74d2bd20d279e09aabfe7131","tests/filters.rs":"cc61ed41a6cd77e5aef91cc8c76216b492d8de34f00635254f3835a3d964ce22"},"package":"880f77541efa6e5cc74e76910c9884d9859683118839d6a1dc3b11e63512565b"}
\ No newline at end of file
+{"files":{".travis.yml":"985cf95c79f32c65766927fd6ef5079f8c14f235ddb4213e6410d90a86a95811","Cargo.lock":"3418c448e56694895051838795d37282fffc911ec82ba2f1abaf2524d3f1f997","Cargo.toml":"0a4a756f7ef47f5dfa221a173b21f9ec496b448aafcd9bde08d9d16935b55007","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"aa8356455efcc3d49f66e7fa394eac292c9158164dff074e32c699b64891cb4b","appveyor.yml":"c61473b8c780ad2626282ce2b2ba0ef278082b6afe151a62ff419f33eaf90221","src/lib.rs":"75b44acfc9627b821cd725649db07693a43b2e44b2fac19b79354c6d950c4038","src/macros.rs":"3953610da3ee2dc393262b753f2406d1864a1cbd74d2bd20d279e09aabfe7131","tests/filters.rs":"cc61ed41a6cd77e5aef91cc8c76216b492d8de34f00635254f3835a3d964ce22"},"package":"880f77541efa6e5cc74e76910c9884d9859683118839d6a1dc3b11e63512565b"}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/third_party/rust/log/Cargo.lock
@@ -0,0 +1,4 @@
+[root]
+name = "log"
+version = "0.3.8"
+
--- a/third_party/rust/mio-uds/.cargo-checksum.json
+++ b/third_party/rust/mio-uds/.cargo-checksum.json
@@ -1,1 +1,1 @@
-{"files":{".travis.yml":"0602d18a229e5bd001e2aaf8ff26c1bdb3dba926f911aec8901c0ee7bed27ca9","Cargo.toml":"e503ea1d349539b2c75e3659660bc6232a447719ce2c7f7b7bec38fbbab6f640","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"322030d7ae24aec8fb2d2c32a7245c7a6dab5b885b439d599d3acd2ddca9bd80","src/datagram.rs":"b4311804bd4e330905fbf3e47e8c738759bbc039bf6ad2045490080a958d48c2","src/lib.rs":"381e167fff02b16d5234fe8bfa3f85684fee4796f83356c2dfdcbfe09fa9a1fe","src/listener.rs":"1cf1d1ca896f4718df27d1affbbc9125d86484c60f3dc479741f50ecb484a290","src/socket.rs":"6f14598a19d66cf76e50fe6a72c17dc840bf46216597a2e055a3bb5efff267e4","src/stream.rs":"7353ebe4a104ed0226c849e638cf9f6922083488b81b2e862c17b59d404ac15f","tests/echo.rs":"3056f97689f0696e970cc401bf0b1f5c0cd4f9952b6fe2dda60831c870f6171c","tests/smoke.rs":"2a6ee54b3f9d58a63cb3beecda8646f17ebdb3d20aa59c740f8c972cc06063e9"},"package":"1731a873077147b626d89cc6c2a0db6288d607496c5d10c0cfcf3adc697ec673"}
\ No newline at end of file
+{"files":{".travis.yml":"0602d18a229e5bd001e2aaf8ff26c1bdb3dba926f911aec8901c0ee7bed27ca9","Cargo.lock":"0182dfb563bbacc21f8b91f4d8894d6f2342bc947a14a6326a44b5a4d7da0051","Cargo.toml":"e503ea1d349539b2c75e3659660bc6232a447719ce2c7f7b7bec38fbbab6f640","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"322030d7ae24aec8fb2d2c32a7245c7a6dab5b885b439d599d3acd2ddca9bd80","src/datagram.rs":"b4311804bd4e330905fbf3e47e8c738759bbc039bf6ad2045490080a958d48c2","src/lib.rs":"381e167fff02b16d5234fe8bfa3f85684fee4796f83356c2dfdcbfe09fa9a1fe","src/listener.rs":"1cf1d1ca896f4718df27d1affbbc9125d86484c60f3dc479741f50ecb484a290","src/socket.rs":"6f14598a19d66cf76e50fe6a72c17dc840bf46216597a2e055a3bb5efff267e4","src/stream.rs":"7353ebe4a104ed0226c849e638cf9f6922083488b81b2e862c17b59d404ac15f","tests/echo.rs":"3056f97689f0696e970cc401bf0b1f5c0cd4f9952b6fe2dda60831c870f6171c","tests/smoke.rs":"2a6ee54b3f9d58a63cb3beecda8646f17ebdb3d20aa59c740f8c972cc06063e9"},"package":"1731a873077147b626d89cc6c2a0db6288d607496c5d10c0cfcf3adc697ec673"}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/third_party/rust/mio-uds/Cargo.lock
@@ -0,0 +1,169 @@
+[root]
+name = "mio-uds"
+version = "0.6.4"
+dependencies = [
+ "libc 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)",
+ "mio 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "bitflags"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "cfg-if"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "fuchsia-zircon"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "fuchsia-zircon-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "fuchsia-zircon-sys"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "iovec"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "libc 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "kernel32-sys"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "lazycell"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "libc"
+version = "0.2.34"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "log"
+version = "0.3.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "mio"
+version = "0.6.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "fuchsia-zircon 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "fuchsia-zircon-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "iovec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazycell 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)",
+ "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)",
+ "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "miow"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "net2"
+version = "0.2.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rand"
+version = "0.3.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "fuchsia-zircon 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "slab"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "tempdir"
+version = "0.3.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rand 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "winapi"
+version = "0.2.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "winapi-build"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "ws2_32-sys"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[metadata]
+"checksum bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aad18937a628ec6abcd26d1489012cc0e18c21798210f491af69ded9b881106d"
+"checksum cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d4c819a1287eb618df47cc647173c5c4c66ba19d888a6e50d605672aed3140de"
+"checksum fuchsia-zircon 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f6c0581a4e363262e52b87f59ee2afe3415361c6ec35e665924eb08afe8ff159"
+"checksum fuchsia-zircon-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "43f3795b4bae048dc6123a6b972cadde2e676f9ded08aef6bb77f5f157684a82"
+"checksum iovec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b6e8b9c2247fcf6c6a1151f1156932be5606c9fd6f55a2d7f9fc1cb29386b2f7"
+"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
+"checksum lazycell 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3b585b7a6811fb03aa10e74b278a0f00f8dd9b45dc681f148bb29fa5cb61859b"
+"checksum libc 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)" = "36fbc8a8929c632868295d0178dd8f63fc423fd7537ad0738372bd010b3ac9b0"
+"checksum log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "880f77541efa6e5cc74e76910c9884d9859683118839d6a1dc3b11e63512565b"
+"checksum mio 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)" = "0e8411968194c7b139e9105bc4ae7db0bae232af087147e72f0616ebf5fdb9cb"
+"checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919"
+"checksum net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)" = "3a80f842784ef6c9a958b68b7516bc7e35883c614004dd94959a4dca1b716c09"
+"checksum rand 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)" = "6475140dfd8655aeb72e1fd4b7a1cc1c202be65d71669476e392fe62532b9edd"
+"checksum slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "17b4fcaed89ab08ef143da37bc52adbcc04d4a69014f4c1208d6b51f0c47bc23"
+"checksum tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "87974a6f5c1dfb344d733055601650059a3363de2a6104819293baff662132d6"
+"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a"
+"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
+"checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e"
new file mode 100644
--- /dev/null
+++ b/third_party/rust/scoped-tls/.cargo-checksum.json
@@ -0,0 +1,1 @@
+{"files":{".travis.yml":"3eb5f096dc0fa1f3a3b0abaa000a26296720bf8074b4823eb7daf905ce85660a","Cargo.toml":"ac2be902201c6ed727fc1a35d5d55322127f0f155a9b51dfb9caade37b383b68","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"9d85b54d8b58f56770bb280919b04b6387942c7b1aae6be380e7f6f144817230","appveyor.yml":"da991211b72fa6f231af7adb84c9fb72f5a9131d1c0a3d47b8ceffe5a82c8542","src/lib.rs":"31c17694802e227246b06d64be3b30f3e378e82d2ee010f42896a6734e1f7682"},"package":"f417c22df063e9450888a7561788e9bd46d3bb3c1466435b4eccb903807f147d"}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/third_party/rust/scoped-tls/.travis.yml
@@ -0,0 +1,23 @@
+language: rust
+rust:
+  - stable
+  - beta
+  - nightly
+sudo: false
+before_script:
+  - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH
+script:
+  - cargo build
+  - cargo test
+  - cargo doc --no-deps
+after_success:
+  - travis-cargo --only nightly doc-upload
+env:
+  global:
+    secure: oVWP/awx6NPXC66CmVrkqSi3oF7jDfn9bv49KaGDQ9kOspY26ao1Iq0Xvl7VYNxbGdpniCUPEILgpeIEI4BcsFXet+1DVbLmOJrKeOxJAy8CkkUYLG6x7Wnaa77FR/TExNfMY6X7Ei3N/1ayEuCiiye5bivUCXVLy1JbTcabuwwtkrXmsWWimppm4KpKBefBKbT0/ZNQ/WyIIPdsRoPTiJnTv1U1XZbIr4TQLnrUA7fAVBCxFh3B7gnz5yn4pNYi6Gc8cmYfCZwYptGeBpBdVgqqmW/ImqjQrocMJzH8thOcNiE29ZlQsaACH2BjjLDaOktHZfaVhQF0IjUfcKJ/8v8cCZhWr5kt/ih/SXRiwF3dE6wBXL2vxSw7rSaceQDXgTAlXvTx/i4UFrzzjWgPC+HNwwvBnUrUMfF2AL2pUqnd2L2x0EdIhuAOVyPh4kmOwZAmXrERyUAQhArJGKhm8sbFHhizzPna4rEqsfVwystEXeBsFBwtmBBM3CdawE47OnpKpdlm/DH+PUty98WMYXpwDNuu8oSSybe4fyqNExDfqZ4n3H80XOxAFoXRTdWRp7zexw6YwjSMKF1TwjuGoJjW5NOT7a20PDV/jXNN5KyiKFUxMWbcZcfIdjaCOhiQp7RcHFsRFDvbUnbomrnn7k/jDKaytpKIQVY2RG0pGpY=
+notifications:
+  email:
+    on_success: never
+os:
+  - linux
+  - osx
new file mode 100644
--- /dev/null
+++ b/third_party/rust/scoped-tls/Cargo.toml
@@ -0,0 +1,14 @@
+[package]
+name = "scoped-tls"
+version = "0.1.0"
+authors = ["Alex Crichton <alex@alexcrichton.com>"]
+license = "MIT/Apache-2.0"
+readme = "README.md"
+repository = "https://github.com/alexcrichton/scoped-tls"
+homepage = "https://github.com/alexcrichton/scoped-tls"
+documentation = "http://alexcrichton.com/scoped-tls"
+description = """
+Library implementation of the standard library's old `scoped_thread_local!`
+macro for providing scoped access to thread local storage (TLS) so any type can
+be stored into TLS.
+"""
new file mode 100644
--- /dev/null
+++ b/third_party/rust/scoped-tls/LICENSE-APACHE
@@ -0,0 +1,201 @@
+                              Apache License
+                        Version 2.0, January 2004
+                     http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+   "License" shall mean the terms and conditions for use, reproduction,
+   and distribution as defined by Sections 1 through 9 of this document.
+
+   "Licensor" shall mean the copyright owner or entity authorized by
+   the copyright owner that is granting the License.
+
+   "Legal Entity" shall mean the union of the acting entity and all
+   other entities that control, are controlled by, or are under common
+   control with that entity. For the purposes of this definition,
+   "control" means (i) the power, direct or indirect, to cause the
+   direction or management of such entity, whether by contract or
+   otherwise, or (ii) ownership of fifty percent (50%) or more of the
+   outstanding shares, or (iii) beneficial ownership of such entity.
+
+   "You" (or "Your") shall mean an individual or Legal Entity
+   exercising permissions granted by this License.
+
+   "Source" form shall mean the preferred form for making modifications,
+   including but not limited to software source code, documentation
+   source, and configuration files.
+
+   "Object" form shall mean any form resulting from mechanical
+   transformation or translation of a Source form, including but
+   not limited to compiled object code, generated documentation,
+   and conversions to other media types.
+
+   "Work" shall mean the work of authorship, whether in Source or
+   Object form, made available under the License, as indicated by a
+   copyright notice that is included in or attached to the work
+   (an example is provided in the Appendix below).
+
+   "Derivative Works" shall mean any work, whether in Source or Object
+   form, that is based on (or derived from) the Work and for which the
+   editorial revisions, annotations, elaborations, or other modifications
+   represent, as a whole, an original work of authorship. For the purposes
+   of this License, Derivative Works shall not include works that remain
+   separable from, or merely link (or bind by name) to the interfaces of,
+   the Work and Derivative Works thereof.
+
+   "Contribution" shall mean any work of authorship, including
+   the original version of the Work and any modifications or additions
+   to that Work or Derivative Works thereof, that is intentionally
+   submitted to Licensor for inclusion in the Work by the copyright owner
+   or by an individual or Legal Entity authorized to submit on behalf of
+   the copyright owner. For the purposes of this definition, "submitted"
+   means any form of electronic, verbal, or written communication sent
+   to the Licensor or its representatives, including but not limited to
+   communication on electronic mailing lists, source code control systems,
+   and issue tracking systems that are managed by, or on behalf of, the
+   Licensor for the purpose of discussing and improving the Work, but
+   excluding communication that is conspicuously marked or otherwise
+   designated in writing by the copyright owner as "Not a Contribution."
+
+   "Contributor" shall mean Licensor and any individual or Legal Entity
+   on behalf of whom a Contribution has been received by Licensor and
+   subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   copyright license to reproduce, prepare Derivative Works of,
+   publicly display, publicly perform, sublicense, and distribute the
+   Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   (except as stated in this section) patent license to make, have made,
+   use, offer to sell, sell, import, and otherwise transfer the Work,
+   where such license applies only to those patent claims licensable
+   by such Contributor that are necessarily infringed by their
+   Contribution(s) alone or by combination of their Contribution(s)
+   with the Work to which such Contribution(s) was submitted. If You
+   institute patent litigation against any entity (including a
+   cross-claim or counterclaim in a lawsuit) alleging that the Work
+   or a Contribution incorporated within the Work constitutes direct
+   or contributory patent infringement, then any patent licenses
+   granted to You under this License for that Work shall terminate
+   as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+   Work or Derivative Works thereof in any medium, with or without
+   modifications, and in Source or Object form, provided that You
+   meet the following conditions:
+
+   (a) You must give any other recipients of the Work or
+       Derivative Works a copy of this License; and
+
+   (b) You must cause any modified files to carry prominent notices
+       stating that You changed the files; and
+
+   (c) You must retain, in the Source form of any Derivative Works
+       that You distribute, all copyright, patent, trademark, and
+       attribution notices from the Source form of the Work,
+       excluding those notices that do not pertain to any part of
+       the Derivative Works; and
+
+   (d) If the Work includes a "NOTICE" text file as part of its
+       distribution, then any Derivative Works that You distribute must
+       include a readable copy of the attribution notices contained
+       within such NOTICE file, excluding those notices that do not
+       pertain to any part of the Derivative Works, in at least one
+       of the following places: within a NOTICE text file distributed
+       as part of the Derivative Works; within the Source form or
+       documentation, if provided along with the Derivative Works; or,
+       within a display generated by the Derivative Works, if and
+       wherever such third-party notices normally appear. The contents
+       of the NOTICE file are for informational purposes only and
+       do not modify the License. You may add Your own attribution
+       notices within Derivative Works that You distribute, alongside
+       or as an addendum to the NOTICE text from the Work, provided
+       that such additional attribution notices cannot be construed
+       as modifying the License.
+
+   You may add Your own copyright statement to Your modifications and
+   may provide additional or different license terms and conditions
+   for use, reproduction, or distribution of Your modifications, or
+   for any such Derivative Works as a whole, provided Your use,
+   reproduction, and distribution of the Work otherwise complies with
+   the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+   any Contribution intentionally submitted for inclusion in the Work
+   by You to the Licensor shall be under the terms and conditions of
+   this License, without any additional terms or conditions.
+   Notwithstanding the above, nothing herein shall supersede or modify
+   the terms of any separate license agreement you may have executed
+   with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+   names, trademarks, service marks, or product names of the Licensor,
+   except as required for reasonable and customary use in describing the
+   origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+   agreed to in writing, Licensor provides the Work (and each
+   Contributor provides its Contributions) on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+   implied, including, without limitation, any warranties or conditions
+   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+   PARTICULAR PURPOSE. You are solely responsible for determining the
+   appropriateness of using or redistributing the Work and assume any
+   risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+   whether in tort (including negligence), contract, or otherwise,
+   unless required by applicable law (such as deliberate and grossly
+   negligent acts) or agreed to in writing, shall any Contributor be
+   liable to You for damages, including any direct, indirect, special,
+   incidental, or consequential damages of any character arising as a
+   result of this License or out of the use or inability to use the
+   Work (including but not limited to damages for loss of goodwill,
+   work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses), even if such Contributor
+   has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+   the Work or Derivative Works thereof, You may choose to offer,
+   and charge a fee for, acceptance of support, warranty, indemnity,
+   or other liability obligations and/or rights consistent with this
+   License. However, in accepting such obligations, You may act only
+   on Your own behalf and on Your sole responsibility, not on behalf
+   of any other Contributor, and only if You agree to indemnify,
+   defend, and hold each Contributor harmless for any liability
+   incurred by, or claims asserted against, such Contributor by reason
+   of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+   To apply the Apache License to your work, attach the following
+   boilerplate notice, with the fields enclosed by brackets "[]"
+   replaced with your own identifying information. (Don't include
+   the brackets!)  The text should be enclosed in the appropriate
+   comment syntax for the file format. We also recommend that a
+   file or class name and description of purpose be included on the
+   same "printed page" as the copyright notice for easier
+   identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
new file mode 100644
--- /dev/null
+++ b/third_party/rust/scoped-tls/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2014 Alex Crichton
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
new file mode 100644
--- /dev/null
+++ b/third_party/rust/scoped-tls/README.md
@@ -0,0 +1,23 @@
+# scoped-tls
+
+[![Build Status](https://travis-ci.org/alexcrichton/scoped-tls.svg?branch=master)](https://travis-ci.org/alexcrichton/scoped-tls)
+[![Build status](https://ci.appveyor.com/api/projects/status/9tatexq47i3ee13k?svg=true)](https://ci.appveyor.com/project/alexcrichton/flate2-rs)
+
+[Documentation](http://alexcrichton.com/scoped-tls)
+
+A Rust library providing the old standard library's `scoped_thread_local!` macro
+as a library implementation on crates.io.
+
+```toml
+# Cargo.toml
+[dependencies]
+scoped-tls = "0.1"
+```
+
+# License
+
+`scoped-tls` is primarily distributed under the terms of both the MIT license and
+the Apache License (Version 2.0), with portions covered by various BSD-like
+licenses.
+
+See LICENSE-APACHE, and LICENSE-MIT for details.
new file mode 100644
--- /dev/null
+++ b/third_party/rust/scoped-tls/appveyor.yml
@@ -0,0 +1,17 @@
+environment:
+  matrix:
+  - TARGET: x86_64-pc-windows-msvc
+  - TARGET: i686-pc-windows-msvc
+  - TARGET: i686-pc-windows-gnu
+install:
+  - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-nightly-${env:TARGET}.exe"
+  - rust-nightly-%TARGET%.exe /VERYSILENT /NORESTART /DIR="C:\Program Files (x86)\Rust"
+  - SET PATH=%PATH%;C:\Program Files (x86)\Rust\bin
+  - SET PATH=%PATH%;C:\MinGW\bin
+  - rustc -V
+  - cargo -V
+
+build: false
+
+test_script:
+  - cargo test --verbose
new file mode 100644
--- /dev/null
+++ b/third_party/rust/scoped-tls/src/lib.rs
@@ -0,0 +1,249 @@
+// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Scoped thread-local storage
+//!
+//! This module provides the ability to generate *scoped* thread-local
+//! variables. In this sense, scoped indicates that thread local storage
+//! actually stores a reference to a value, and this reference is only placed
+//! in storage for a scoped amount of time.
+//!
+//! There are no restrictions on what types can be placed into a scoped
+//! variable, but all scoped variables are initialized to the equivalent of
+//! null. Scoped thread local storage is useful when a value is present for a known
+//! period of time and it is not required to relinquish ownership of the
+//! contents.
+//!
+//! # Examples
+//!
+//! ```
+//! #[macro_use]
+//! extern crate scoped_tls;
+//!
+//! scoped_thread_local!(static FOO: u32);
+//!
+//! # fn main() {
+//! // Initially each scoped slot is empty.
+//! assert!(!FOO.is_set());
+//!
+//! // When inserting a value, the value is only in place for the duration
+//! // of the closure specified.
+//! FOO.set(&1, || {
+//!     FOO.with(|slot| {
+//!         assert_eq!(*slot, 1);
+//!     });
+//! });
+//! # }
+//! ```
+
+#![deny(missing_docs, warnings)]
+
+use std::cell::Cell;
+use std::marker;
+use std::thread::LocalKey;
+
+#[macro_export]
+macro_rules! scoped_thread_local {
+    (static $name:ident: $ty:ty) => (
+        static $name: $crate::ScopedKey<$ty> = $crate::ScopedKey {
+            inner: {
+                thread_local!(static FOO: ::std::cell::Cell<usize> = {
+                    ::std::cell::Cell::new(0)
+                });
+                &FOO
+            },
+            _marker: ::std::marker::PhantomData,
+        };
+    )
+}
+
+/// Type representing a thread local storage key corresponding to a reference
+/// to the type parameter `T`.
+///
+/// Keys are statically allocated and can contain a reference to an instance of
+/// type `T` scoped to a particular lifetime. Keys provides two methods, `set`
+/// and `with`, both of which currently use closures to control the scope of
+/// their contents.
+pub struct ScopedKey<T> {
+    #[doc(hidden)]
+    pub inner: &'static LocalKey<Cell<usize>>,
+    #[doc(hidden)]
+    pub _marker: marker::PhantomData<T>,
+}
+
+unsafe impl<T> Sync for ScopedKey<T> {}
+
+impl<T> ScopedKey<T> {
+    /// Inserts a value into this scoped thread local storage slot for a
+    /// duration of a closure.
+    ///
+    /// While `cb` is running, the value `t` will be returned by `get` unless
+    /// this function is called recursively inside of `cb`.
+    ///
+    /// Upon return, this function will restore the previous value, if any
+    /// was available.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #[macro_use]
+    /// extern crate scoped_tls;
+    ///
+    /// scoped_thread_local!(static FOO: u32);
+    ///
+    /// # fn main() {
+    /// FOO.set(&100, || {
+    ///     let val = FOO.with(|v| *v);
+    ///     assert_eq!(val, 100);
+    ///
+    ///     // set can be called recursively
+    ///     FOO.set(&101, || {
+    ///         // ...
+    ///     });
+    ///
+    ///     // Recursive calls restore the previous value.
+    ///     let val = FOO.with(|v| *v);
+    ///     assert_eq!(val, 100);
+    /// });
+    /// # }
+    /// ```
+    pub fn set<F, R>(&'static self, t: &T, f: F) -> R
+        where F: FnOnce() -> R
+    {
+        struct Reset {
+            key: &'static LocalKey<Cell<usize>>,
+            val: usize,
+        }
+        impl Drop for Reset {
+            fn drop(&mut self) {
+                self.key.with(|c| c.set(self.val));
+            }
+        }
+        let prev = self.inner.with(|c| {
+            let prev = c.get();
+            c.set(t as *const T as usize);
+            prev
+        });
+        let _reset = Reset { key: self.inner, val: prev };
+        f()
+    }
+
+    /// Gets a value out of this scoped variable.
+    ///
+    /// This function takes a closure which receives the value of this
+    /// variable.
+    ///
+    /// # Panics
+    ///
+    /// This function will panic if `set` has not previously been called.
+    ///
+    /// # Examples
+    ///
+    /// ```no_run
+    /// #[macro_use]
+    /// extern crate scoped_tls;
+    ///
+    /// scoped_thread_local!(static FOO: u32);
+    ///
+    /// # fn main() {
+    /// FOO.with(|slot| {
+    ///     // work with `slot`
+    /// # drop(slot);
+    /// });
+    /// # }
+    /// ```
+    pub fn with<F, R>(&'static self, f: F) -> R
+        where F: FnOnce(&T) -> R
+    {
+        let val = self.inner.with(|c| c.get());
+        assert!(val != 0, "cannot access a scoped thread local \
+                           variable without calling `set` first");
+        unsafe {
+            f(&*(val as *const T))
+        }
+    }
+
+    /// Test whether this TLS key has been `set` for the current thread.
+    pub fn is_set(&'static self) -> bool {
+        self.inner.with(|c| c.get() != 0)
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::cell::Cell;
+    use std::sync::mpsc::{channel, Sender};
+    use std::thread;
+
+    scoped_thread_local!(static FOO: u32);
+
+    #[test]
+    fn smoke() {
+        scoped_thread_local!(static BAR: u32);
+
+        assert!(!BAR.is_set());
+        BAR.set(&1, || {
+            assert!(BAR.is_set());
+            BAR.with(|slot| {
+                assert_eq!(*slot, 1);
+            });
+        });
+        assert!(!BAR.is_set());
+    }
+
+    #[test]
+    fn cell_allowed() {
+        scoped_thread_local!(static BAR: Cell<u32>);
+
+        BAR.set(&Cell::new(1), || {
+            BAR.with(|slot| {
+                assert_eq!(slot.get(), 1);
+            });
+        });
+    }
+
+    #[test]
+    fn scope_item_allowed() {
+        assert!(!FOO.is_set());
+        FOO.set(&1, || {
+            assert!(FOO.is_set());
+            FOO.with(|slot| {
+                assert_eq!(*slot, 1);
+            });
+        });
+        assert!(!FOO.is_set());
+    }
+
+    #[test]
+    fn panic_resets() {
+        struct Check(Sender<u32>);
+        impl Drop for Check {
+            fn drop(&mut self) {
+                FOO.with(|r| {
+                    self.0.send(*r).unwrap();
+                })
+            }
+        }
+
+        let (tx, rx) = channel();
+        let t = thread::spawn(|| {
+            FOO.set(&1, || {
+                let _r = Check(tx);
+
+                FOO.set(&2, || {
+                    panic!()
+                });
+            });
+        });
+
+        assert_eq!(rx.recv().unwrap(), 1);
+        assert!(t.join().is_err());
+    }
+}
--- a/third_party/rust/slab/.cargo-checksum.json
+++ b/third_party/rust/slab/.cargo-checksum.json
@@ -1,1 +1,1 @@
-{"files":{"Cargo.toml":"4cd8cbaedfe34dd4e0cc476e1484dc950b7ae90b693073fa89a298b014e6c0a1","README.md":"36ba748d4deb1875f5355dbf997be6ef1cb857709d78db7127c24d640e90300a","src/lib.rs":"003277f46755d1870148756841dbaad216109812cd659e4862e220e7a5b0c963"},"package":"17b4fcaed89ab08ef143da37bc52adbcc04d4a69014f4c1208d6b51f0c47bc23"}
\ No newline at end of file
+{"files":{"Cargo.lock":"1818357c6fb5ec8fdb98a7e21f2470a1175164c304fb391a818cb87c1f6202ab","Cargo.toml":"4cd8cbaedfe34dd4e0cc476e1484dc950b7ae90b693073fa89a298b014e6c0a1","README.md":"36ba748d4deb1875f5355dbf997be6ef1cb857709d78db7127c24d640e90300a","src/lib.rs":"003277f46755d1870148756841dbaad216109812cd659e4862e220e7a5b0c963"},"package":"17b4fcaed89ab08ef143da37bc52adbcc04d4a69014f4c1208d6b51f0c47bc23"}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/third_party/rust/slab/Cargo.lock
@@ -0,0 +1,4 @@
+[root]
+name = "slab"
+version = "0.3.0"
+
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/.cargo-checksum.json
@@ -0,0 +1,1 @@
+{"files":{".travis.yml":"2b7fd7a794a404db00e20b9f9fc3f9b45cfa3ab816382b15e8eb0ba9fd0ea043","Cargo.toml":"71e9047397cdbb17762daf98bd12950afd195e7be2b08744349bf18fd2eec15a","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"69036b033e4bb951821964dbc3d9b1efe6913a6e36d9c1f206de4035a1a85cc4","README.md":"0b79c264429cd07f6a26caf0cd6e053ee054efb78290e3b1ba33dc657a0a2f0b","appveyor.yml":"65fe7e8f9acb2cb6305fe65f8fa5eb88d38dcd18988dd6932f085476c2fb70c7","benches/latency.rs":"fdb479530b48e08ec1437917c6f61b4d516c5dc04e4c01424d0861a724767950","benches/mio-ops.rs":"ea21132d525821f0876c6ba07d53ee3abc80d20c7f961dcdbb5785c011279ffa","examples/chat.rs":"56751b765b167deee1554425c94fa031e654c6c7c5c32459770ed0c4aee7419f","examples/connect.rs":"17fdcb50e10b5826240ff6dab51e4f1cc5856a2827abcca97d572d59c285ceb3","examples/echo-udp.rs":"e79c3e43ce38fc9f53e51a6ad2fb8d48906564ed6ed4b500eee2dbdc870108d3","examples/echo.rs":"b496ae301786f5e864a0edc8434f0183fe4461e38b9ced2ac106dd6293296077","examples/hello.rs":"b9906d61f29d0d3b5bdb78094f8808eb97ee40fc1d148f0e44e5a3468d60a627","examples/proxy.rs":"3cfb742c40d1a8b9441ba6d7ed89032c90850f36b41b0e5e32fcf43d6e457c3d","examples/sink.rs":"c0013ef03e0dcbf83179b233ac289988786e270ac5c1cb5a587646b443de98db","examples/udp-codec.rs":"9c75011293674c7e940496a178fe900c542ec5579ee15cbc36392546fa2be4a3","src/channel.rs":"b41271b362614595bd8a1f40907285075453e44ea5afc403316215ccbc9a24e5","src/heap.rs":"91c7a89cd5eb30618209800b4d9779923b36aa21cffef175d9320bbf8b631e17","src/io/copy.rs":"0e05d1d3fb5989e8bd2f78f5602c96b1fff37e7cdb3630997e0611ae6ff04780","src/io/flush.rs":"861cbdbb40f8d16e3302e40012ae19a0520761e3e77bfe591f1f23b6b19e98fb","src/io/frame.rs":"652fba7c3a56d9c980dc173e067cf36b4fe08e2b15bfebaad303d455467efbb6","src/io/mod.rs":"9ba9c57bca90c4eec5b5673894571dfff97fcc5f9f0fc5a67e9ede82639d8358","src/io/read.rs":"f71c562b50902aa7fad1a597bdcffa5e9028149c4dc243308d6633d35dddcb99","src/io/read_exact.rs":"bdcfe2abfc10db5fd48135268aaa84f40a86dbfb768ddacdcc6f387acbd0e66b","src/io/read_to_end.rs":"b39c5a24e8a7c8bd83f29cb435b5d7cc65e5ce9bd6ae43190c1ca8bfd0f9c8a8","src/io/read_until.rs":"da944ab44103caddc0b1a40565775fe31a963db7f14508dd7a69d39fea6de3de","src/io/split.rs":"8f6fa075bf5204380dbacce06bad3392f41b3aee76711563f67bdcd8ffff69c4","src/io/window.rs":"263c3eae1f36563edbe2f37d728e0e0d0bcc9ed2f60540efc18e50bef5dd55f6","src/io/write_all.rs":"b6d6a41e92841d9608d6e041d1c191cfb36176a745e49631f51cc540c6297b5e","src/lib.rs":"ff3447535164d6057026dd4d1be78972a8e4e292ad2c0cb997379ea4dd93f425","src/net/mod.rs":"2d250c12416d2cc1b088f6a3a3226ec53861a9cf67bcd1f1c99b25dc5c9e416e","src/net/tcp.rs":"760deea09312f72b2108198d2c291afd96772dcec889ee784880b3861b8d9347","src/net/udp/frame.rs":"4e8e4ea1a3a1bdac03ee6576f523e055137a46c7e4cbdd5e58c7be6ccb818af0","src/net/udp/mod.rs":"1820953251a03ba514e9d4a4af4d27497b3c3f60bb6a57dd7cbbda3410677345","src/reactor/interval.rs":"149f45c1ff982c495860aab17c1a6ffe9f696753caf49825b4c06369f3d073c2","src/reactor/io_token.rs":"ab6c23c79b149d9a7f4bfb02732b12a71dc2a83717baf374000c8599438a85ed","src/reactor/mod.rs":"8f974ba31db656373f1146acf85881b03d0316d9c2ae8be023a17b33d51d9409","src/reactor/poll_evented.rs":"a37657eb4357d112c62d5de128117a2cc9a0642142785f92d65f5eb711032e9c","src/reactor/timeout.rs":"b9730643d236347c29c883235c3b95df957c33dc61b832bc1537badbde7279b0","src/reactor/timeout_token.rs":"2536aa2f8aefb4073a70dbe3e89d81bd1fe27c618d2ea6b6f21594bdee34191c","tests/buffered.rs":"6bb8371b8c35c277fcf33e213a18ed521534b4285c853b8773d94b843fe1e99f","tests/chain.rs":"e40145b41e48da31d6d0ab4dc49036bf5b0582b5b9cccb9e16e621c2c59286ff","tests/echo.rs":"ad686f974455ae5bab9a9745f6ec3dc6b5e9ae3f00c3171894706c2c285dccda","tests/interval.rs":"5bd96d5ce47f9ab5637057cdbc1cb292d1f969bcf7bec4d36c5a0044d64c3f9a","tests/limit.rs":"da99b76fb6ef9c7e94877ed85fcde992be0f368070ea65fc9ea041542c5165a4","tests/line-frames.rs":"67c8f1f6d430c8e27adf1da40de4becef389cf090247bda30153d0cbcb37261b","tests/pipe-hup.rs":"392b46b77cbf8c4145e75d0ca0f0bbc8d614b949a600b391fbea559ed32efdbd","tests/spawn.rs":"0a167ae55c4f192c87e1d75ed352385ccff78b18c46ca1c021484cd1bbff1563","tests/stream-buffered.rs":"6ea52c26af8abdedf7839d78b30086662d622b9f7a8a36ed5f3554cc48a37c7a","tests/tcp.rs":"c200e00f7ee74f2fe758ac9b7a1febe678bc0a72bd4eb389e2577b7c3cfba823","tests/timeout.rs":"11ecf55a3c6f1956ac30f0e238ffb6262f3f20d751b8ee543c259d98a50d3ffa","tests/udp.rs":"d6cfd67748a497771d604b8059ae7cf71ba17f28c5d611c9f83c914e60715eb3"},"package":"febd81b3e2ef615c6c8077347b33f3f3deec3d708ecd08194c9707b7a1eccfc9"}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/.travis.yml
@@ -0,0 +1,27 @@
+language: rust
+
+rust:
+  - stable
+  - beta
+  - nightly
+sudo: false
+before_script:
+  - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH
+script:
+  - cargo build
+  - cargo test
+  - cargo doc --no-deps
+  - if [ "$TRAVIS_RUST_VERSION" == "nightly" ]; then cargo bench ; fi
+
+after_success:
+  - travis-cargo --only nightly doc-upload
+env:
+  global:
+    - secure: "gOETHEX34re+YOgwdPG+wxSWZ1Nn5Q4+pk5b3mpaPS2RRVLdNlm7oJFYJMp1MsO3r4t5z4ntpBQUy/rQXPzzSOUqb0E+wnOtAFD+rspY0z5rJMwOghfdNst/Jsa5+EJeGWHEXd6YNdH1fILg94OCzzzmdjQH59F5UqRtY4EfMZQ9BzxuH0nNrCtys4xf0fstmlezw6mCyKR7DL2JxMf7ux10JeCTsj8BCT/yFKZ4HhFiKGVUpWSSTY3+lESnI4rKLynZEnFAkrHlIMyNRXf+lLfoTCTdmG0LAjf4AMsxLA9sSHVEhz9gvazQB4lX4B+E2Tuq1v/QecKqpRvfb4nM+ldRrsIW6zNf5DGA4J07h1qnhB0DO0TftDNuZNArueDW/yaeO5u6M4TspozdKYRx8QVvHg609WEdQPiDg4HdR2EUHyGBYbWJTVoBbYM+Yv3Pa1zBw8r/82sH4SGj1GtBFfH4QxTwMzGpX8AF4l2HUUFlpLgCrrWwTCwTxuQUsvjUPfrKHIisZPFGeu92qjmMN+YZh8U1a/W9xOLFbrTOH+FVRt9XrkT2Cwtfcia/7TMS2kXWyxrz82zpAwL5SEpP0k84B7GqLGlZrCKboufMBrtE6Chycp2D2quyVM0/kF5x2ev6QHToT1FH2McVB1XwkxJNeCMZhOe4EDpyfovPweQ="
+
+notifications:
+  email:
+    on_success: never
+os:
+  - linux
+  - osx
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/Cargo.toml
@@ -0,0 +1,31 @@
+[package]
+name = "tokio-core"
+version = "0.1.7"
+authors = ["Alex Crichton <alex@alexcrichton.com>"]
+license = "MIT/Apache-2.0"
+repository = "https://github.com/tokio-rs/tokio-core"
+homepage = "https://tokio.rs"
+documentation = "https://docs.rs/tokio-core/0.1"
+description = """
+Core I/O and event loop primitives for asynchronous I/O in Rust. Foundation for
+the rest of the tokio crates.
+"""
+categories = ["asynchronous"]
+
+[badges]
+travis-ci = { repository = "tokio-rs/tokio-core" }
+appveyor = { repository = "alexcrichton/tokio-core" }
+
+[dependencies]
+bytes = "0.4"
+log = "0.3"
+mio = "0.6.5"
+scoped-tls = "0.1.0"
+slab = "0.3"
+iovec = "0.1"
+tokio-io = "0.1"
+futures = "0.1.11"
+
+[dev-dependencies]
+env_logger = { version = "0.3", default-features = false }
+libc = "0.2"
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/LICENSE-APACHE
@@ -0,0 +1,201 @@
+                              Apache License
+                        Version 2.0, January 2004
+                     http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+   "License" shall mean the terms and conditions for use, reproduction,
+   and distribution as defined by Sections 1 through 9 of this document.
+
+   "Licensor" shall mean the copyright owner or entity authorized by
+   the copyright owner that is granting the License.
+
+   "Legal Entity" shall mean the union of the acting entity and all
+   other entities that control, are controlled by, or are under common
+   control with that entity. For the purposes of this definition,
+   "control" means (i) the power, direct or indirect, to cause the
+   direction or management of such entity, whether by contract or
+   otherwise, or (ii) ownership of fifty percent (50%) or more of the
+   outstanding shares, or (iii) beneficial ownership of such entity.
+
+   "You" (or "Your") shall mean an individual or Legal Entity
+   exercising permissions granted by this License.
+
+   "Source" form shall mean the preferred form for making modifications,
+   including but not limited to software source code, documentation
+   source, and configuration files.
+
+   "Object" form shall mean any form resulting from mechanical
+   transformation or translation of a Source form, including but
+   not limited to compiled object code, generated documentation,
+   and conversions to other media types.
+
+   "Work" shall mean the work of authorship, whether in Source or
+   Object form, made available under the License, as indicated by a
+   copyright notice that is included in or attached to the work
+   (an example is provided in the Appendix below).
+
+   "Derivative Works" shall mean any work, whether in Source or Object
+   form, that is based on (or derived from) the Work and for which the
+   editorial revisions, annotations, elaborations, or other modifications
+   represent, as a whole, an original work of authorship. For the purposes
+   of this License, Derivative Works shall not include works that remain
+   separable from, or merely link (or bind by name) to the interfaces of,
+   the Work and Derivative Works thereof.
+
+   "Contribution" shall mean any work of authorship, including
+   the original version of the Work and any modifications or additions
+   to that Work or Derivative Works thereof, that is intentionally
+   submitted to Licensor for inclusion in the Work by the copyright owner
+   or by an individual or Legal Entity authorized to submit on behalf of
+   the copyright owner. For the purposes of this definition, "submitted"
+   means any form of electronic, verbal, or written communication sent
+   to the Licensor or its representatives, including but not limited to
+   communication on electronic mailing lists, source code control systems,
+   and issue tracking systems that are managed by, or on behalf of, the
+   Licensor for the purpose of discussing and improving the Work, but
+   excluding communication that is conspicuously marked or otherwise
+   designated in writing by the copyright owner as "Not a Contribution."
+
+   "Contributor" shall mean Licensor and any individual or Legal Entity
+   on behalf of whom a Contribution has been received by Licensor and
+   subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   copyright license to reproduce, prepare Derivative Works of,
+   publicly display, publicly perform, sublicense, and distribute the
+   Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   (except as stated in this section) patent license to make, have made,
+   use, offer to sell, sell, import, and otherwise transfer the Work,
+   where such license applies only to those patent claims licensable
+   by such Contributor that are necessarily infringed by their
+   Contribution(s) alone or by combination of their Contribution(s)
+   with the Work to which such Contribution(s) was submitted. If You
+   institute patent litigation against any entity (including a
+   cross-claim or counterclaim in a lawsuit) alleging that the Work
+   or a Contribution incorporated within the Work constitutes direct
+   or contributory patent infringement, then any patent licenses
+   granted to You under this License for that Work shall terminate
+   as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+   Work or Derivative Works thereof in any medium, with or without
+   modifications, and in Source or Object form, provided that You
+   meet the following conditions:
+
+   (a) You must give any other recipients of the Work or
+       Derivative Works a copy of this License; and
+
+   (b) You must cause any modified files to carry prominent notices
+       stating that You changed the files; and
+
+   (c) You must retain, in the Source form of any Derivative Works
+       that You distribute, all copyright, patent, trademark, and
+       attribution notices from the Source form of the Work,
+       excluding those notices that do not pertain to any part of
+       the Derivative Works; and
+
+   (d) If the Work includes a "NOTICE" text file as part of its
+       distribution, then any Derivative Works that You distribute must
+       include a readable copy of the attribution notices contained
+       within such NOTICE file, excluding those notices that do not
+       pertain to any part of the Derivative Works, in at least one
+       of the following places: within a NOTICE text file distributed
+       as part of the Derivative Works; within the Source form or
+       documentation, if provided along with the Derivative Works; or,
+       within a display generated by the Derivative Works, if and
+       wherever such third-party notices normally appear. The contents
+       of the NOTICE file are for informational purposes only and
+       do not modify the License. You may add Your own attribution
+       notices within Derivative Works that You distribute, alongside
+       or as an addendum to the NOTICE text from the Work, provided
+       that such additional attribution notices cannot be construed
+       as modifying the License.
+
+   You may add Your own copyright statement to Your modifications and
+   may provide additional or different license terms and conditions
+   for use, reproduction, or distribution of Your modifications, or
+   for any such Derivative Works as a whole, provided Your use,
+   reproduction, and distribution of the Work otherwise complies with
+   the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+   any Contribution intentionally submitted for inclusion in the Work
+   by You to the Licensor shall be under the terms and conditions of
+   this License, without any additional terms or conditions.
+   Notwithstanding the above, nothing herein shall supersede or modify
+   the terms of any separate license agreement you may have executed
+   with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+   names, trademarks, service marks, or product names of the Licensor,
+   except as required for reasonable and customary use in describing the
+   origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+   agreed to in writing, Licensor provides the Work (and each
+   Contributor provides its Contributions) on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+   implied, including, without limitation, any warranties or conditions
+   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+   PARTICULAR PURPOSE. You are solely responsible for determining the
+   appropriateness of using or redistributing the Work and assume any
+   risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+   whether in tort (including negligence), contract, or otherwise,
+   unless required by applicable law (such as deliberate and grossly
+   negligent acts) or agreed to in writing, shall any Contributor be
+   liable to You for damages, including any direct, indirect, special,
+   incidental, or consequential damages of any character arising as a
+   result of this License or out of the use or inability to use the
+   Work (including but not limited to damages for loss of goodwill,
+   work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses), even if such Contributor
+   has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+   the Work or Derivative Works thereof, You may choose to offer,
+   and charge a fee for, acceptance of support, warranty, indemnity,
+   or other liability obligations and/or rights consistent with this
+   License. However, in accepting such obligations, You may act only
+   on Your own behalf and on Your sole responsibility, not on behalf
+   of any other Contributor, and only if You agree to indemnify,
+   defend, and hold each Contributor harmless for any liability
+   incurred by, or claims asserted against, such Contributor by reason
+   of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+   To apply the Apache License to your work, attach the following
+   boilerplate notice, with the fields enclosed by brackets "[]"
+   replaced with your own identifying information. (Don't include
+   the brackets!)  The text should be enclosed in the appropriate
+   comment syntax for the file format. We also recommend that a
+   file or class name and description of purpose be included on the
+   same "printed page" as the copyright notice for easier
+   identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2016 Alex Crichton
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/README.md
@@ -0,0 +1,39 @@
+# tokio-core
+
+Core I/O and event loop abstraction for asynchronous I/O in Rust built on
+`futures` and `mio`.
+
+[![Build Status](https://travis-ci.org/tokio-rs/tokio-core.svg?branch=master)](https://travis-ci.org/tokio-rs/tokio-core)
+[![Build status](https://ci.appveyor.com/api/projects/status/caxmxbg8181kk9mq/branch/master?svg=true)](https://ci.appveyor.com/project/carllerche/tokio-core)
+
+[Documentation](https://docs.rs/tokio-core)
+
+[Tutorial](https://tokio.rs/)
+
+## Usage
+
+First, add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+tokio-core = "0.1"
+```
+
+Next, add this to your crate:
+
+```rust
+extern crate tokio_core;
+```
+
+You can find extensive documentation and examples about how to use this crate
+online at [https://tokio.rs](https://tokio.rs) as well as the `examples` folder
+in this repository. The [API documentation](https://docs.rs/tokio-core) is also
+a great place to get started for the nitty-gritty.
+
+# License
+
+`tokio-core` is primarily distributed under the terms of both the MIT license
+and the Apache License (Version 2.0), with portions covered by various BSD-like
+licenses.
+
+See LICENSE-APACHE, and LICENSE-MIT for details.
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/appveyor.yml
@@ -0,0 +1,15 @@
+environment:
+  matrix:
+  - TARGET: x86_64-pc-windows-msvc
+install:
+  - curl -sSf -o rustup-init.exe https://win.rustup.rs/
+  - rustup-init.exe -y --default-host %TARGET%
+  - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
+  - rustc -V
+  - cargo -V
+
+build: false
+
+test_script:
+  - cargo build
+  - cargo test
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/benches/latency.rs
@@ -0,0 +1,120 @@
+#![allow(deprecated)]
+#![feature(test)]
+
+extern crate test;
+extern crate futures;
+#[macro_use]
+extern crate tokio_core;
+
+use std::io;
+use std::net::SocketAddr;
+use std::thread;
+
+use futures::sync::oneshot;
+use futures::sync::mpsc;
+use futures::{Future, Poll, Sink, Stream};
+use test::Bencher;
+use tokio_core::net::UdpSocket;
+use tokio_core::reactor::Core;
+
+/// UDP echo server
+struct EchoServer {
+    socket: UdpSocket,
+    buf: Vec<u8>,
+    to_send: Option<(usize, SocketAddr)>,
+}
+
+impl EchoServer {
+    fn new(s: UdpSocket) -> Self {
+        EchoServer {
+            socket: s,
+            to_send: None,
+            buf: vec![0u8; 1600],
+        }
+    }
+}
+
+impl Future for EchoServer {
+    type Item = ();
+    type Error = io::Error;
+
+    fn poll(&mut self) -> Poll<(), io::Error> {
+        loop {
+            if let Some(&(size, peer)) = self.to_send.as_ref() {
+                try_nb!(self.socket.send_to(&self.buf[..size], &peer));
+                self.to_send = None;
+            }
+            self.to_send = Some(try_nb!(self.socket.recv_from(&mut self.buf)));
+        }
+    }
+}
+
+#[bench]
+fn udp_echo_latency(b: &mut Bencher) {
+    let any_addr = "127.0.0.1:0".to_string();
+    let any_addr = any_addr.parse::<SocketAddr>().unwrap();
+
+    let (stop_c, stop_p) = oneshot::channel::<()>();
+    let (tx, rx) = oneshot::channel();
+
+    let child = thread::spawn(move || {
+        let mut l = Core::new().unwrap();
+        let handle = l.handle();
+
+        let socket = tokio_core::net::UdpSocket::bind(&any_addr, &handle).unwrap();
+        tx.complete(socket.local_addr().unwrap());
+
+        let server = EchoServer::new(socket);
+        let server = server.select(stop_p.map_err(|_| panic!()));
+        let server = server.map_err(|_| ());
+        l.run(server).unwrap()
+    });
+
+
+    let client = std::net::UdpSocket::bind(&any_addr).unwrap();
+
+    let server_addr = rx.wait().unwrap();
+    let mut buf = [0u8; 1000];
+
+    // warmup phase; for some reason initial couple of
+    // runs are much slower
+    //
+    // TODO: Describe the exact reasons; caching? branch predictor? lazy closures?
+    for _ in 0..8 {
+        client.send_to(&buf, &server_addr).unwrap();
+        let _ = client.recv_from(&mut buf).unwrap();
+    }
+
+    b.iter(|| {
+        client.send_to(&buf, &server_addr).unwrap();
+        let _ = client.recv_from(&mut buf).unwrap();
+    });
+
+    stop_c.complete(());
+    child.join().unwrap();
+}
+
+#[bench]
+fn futures_channel_latency(b: &mut Bencher) {
+    let (mut in_tx, in_rx) = mpsc::channel(32);
+    let (out_tx, out_rx) = mpsc::channel::<_>(32);
+
+    let child = thread::spawn(|| out_tx.send_all(in_rx.then(|r| r.unwrap())).wait());
+    let mut rx_iter = out_rx.wait();
+
+    // warmup phase; for some reason initial couple of runs are much slower
+    //
+    // TODO: Describe the exact reasons; caching? branch predictor? lazy closures?
+    for _ in 0..8 {
+        in_tx.start_send(Ok(1usize)).unwrap();
+        let _ = rx_iter.next();
+    }
+
+    b.iter(|| {
+        in_tx.start_send(Ok(1usize)).unwrap();
+        let _ = rx_iter.next();
+    });
+
+    drop(in_tx);
+    child.join().unwrap().unwrap();
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/benches/mio-ops.rs
@@ -0,0 +1,57 @@
+// Measure cost of different operations
+// to get a sense of performance tradeoffs
+#![feature(test)]
+
+extern crate test;
+extern crate mio;
+
+use test::Bencher;
+
+use mio::tcp::TcpListener;
+use mio::{Token, Ready, PollOpt};
+
+
+#[bench]
+fn mio_register_deregister(b: &mut Bencher) {
+    let addr = "127.0.0.1:0".parse().unwrap();
+    // Setup the server socket
+    let sock = TcpListener::bind(&addr).unwrap();
+    let poll = mio::Poll::new().unwrap();
+
+    const CLIENT: Token = Token(1);
+
+    b.iter(|| {
+        poll.register(&sock, CLIENT, Ready::readable(),
+              PollOpt::edge()).unwrap();
+        poll.deregister(&sock).unwrap();
+    });
+}
+
+#[bench]
+fn mio_reregister(b: &mut Bencher) {
+    let addr = "127.0.0.1:0".parse().unwrap();
+    // Setup the server socket
+    let sock = TcpListener::bind(&addr).unwrap();
+    let poll = mio::Poll::new().unwrap();
+
+    const CLIENT: Token = Token(1);
+    poll.register(&sock, CLIENT, Ready::readable(),
+    PollOpt::edge()).unwrap();
+
+    b.iter(|| {
+        poll.reregister(&sock, CLIENT, Ready::readable(),
+        PollOpt::edge()).unwrap();
+    });
+    poll.deregister(&sock).unwrap();
+}
+
+#[bench]
+fn mio_poll(b: &mut Bencher) {
+    let poll = mio::Poll::new().unwrap();
+    let timeout = std::time::Duration::new(0, 0);
+    let mut events = mio::Events::with_capacity(1024);
+
+    b.iter(|| {
+        poll.poll(&mut events, Some(timeout)).unwrap();
+    });
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/examples/chat.rs
@@ -0,0 +1,134 @@
+//! A chat server that broadcasts a message to all connections.
+//!
+//! This is a simple line-based server which accepts connections, reads lines
+//! from those connections, and broadcasts the lines to all other connected
+//! clients. In a sense this is a bit of a "poor man's chat server".
+//!
+//! You can test this out by running:
+//!
+//!     cargo run --example chat
+//!
+//! And then in another window run:
+//!
+//!     nc -4 localhost 8080
+//!
+//! You can run the second command in multiple windows and then chat between the
+//! two, seeing the messages from the other client as they're received. For all
+//! connected clients they'll all join the same room and see everyone else's
+//! messages.
+
+extern crate futures;
+extern crate tokio_core;
+extern crate tokio_io;
+
+use std::collections::HashMap;
+use std::rc::Rc;
+use std::cell::RefCell;
+use std::iter;
+use std::env;
+use std::io::{Error, ErrorKind, BufReader};
+
+use futures::Future;
+use futures::stream::{self, Stream};
+use tokio_core::net::TcpListener;
+use tokio_core::reactor::Core;
+use tokio_io::io;
+use tokio_io::AsyncRead;
+
+fn main() {
+    let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string());
+    let addr = addr.parse().unwrap();
+
+    // Create the event loop and TCP listener we'll accept connections on.
+    let mut core = Core::new().unwrap();
+    let handle = core.handle();
+    let socket = TcpListener::bind(&addr, &handle).unwrap();
+    println!("Listening on: {}", addr);
+
+    // This is a single-threaded server, so we can just use Rc and RefCell to
+    // store the map of all connections we know about.
+    let connections = Rc::new(RefCell::new(HashMap::new()));
+
+    let srv = socket.incoming().for_each(move |(stream, addr)| {
+        println!("New Connection: {}", addr);
+        let (reader, writer) = stream.split();
+
+        // Create a channel for our stream, which other sockets will use to
+        // send us messages. Then register our address with the stream to send
+        // data to us.
+        let (tx, rx) = futures::sync::mpsc::unbounded();
+        connections.borrow_mut().insert(addr, tx);
+
+        // Define here what we do for the actual I/O. That is, read a bunch of
+        // lines from the socket and dispatch them while we also write any lines
+        // from other sockets.
+        let connections_inner = connections.clone();
+        let reader = BufReader::new(reader);
+
+        // Model the read portion of this socket by mapping an infinite
+        // iterator to each line off the socket. This "loop" is then
+        // terminated with an error once we hit EOF on the socket.
+        let iter = stream::iter(iter::repeat(()).map(Ok::<(), Error>));
+        let socket_reader = iter.fold(reader, move |reader, _| {
+            // Read a line off the socket, failing if we're at EOF
+            let line = io::read_until(reader, b'\n', Vec::new());
+            let line = line.and_then(|(reader, vec)| {
+                if vec.len() == 0 {
+                    Err(Error::new(ErrorKind::BrokenPipe, "broken pipe"))
+                } else {
+                    Ok((reader, vec))
+                }
+            });
+
+            // Convert the bytes we read into a string, and then send that
+            // string to all other connected clients.
+            let line = line.map(|(reader, vec)| {
+                (reader, String::from_utf8(vec))
+            });
+            let connections = connections_inner.clone();
+            line.map(move |(reader, message)| {
+                println!("{}: {:?}", addr, message);
+                let mut conns = connections.borrow_mut();
+                if let Ok(msg) = message {
+                    // For each open connection except the sender, send the
+                    // string via the channel.
+                    let iter = conns.iter_mut()
+                                    .filter(|&(&k, _)| k != addr)
+                                    .map(|(_, v)| v);
+                    for tx in iter {
+                        tx.send(format!("{}: {}", addr, msg)).unwrap();
+                    }
+                } else {
+                    let tx = conns.get_mut(&addr).unwrap();
+                    tx.send("You didn't send valid UTF-8.".to_string()).unwrap();
+                }
+                reader
+            })
+        });
+
+        // Whenever we receive a string on the Receiver, we write it to
+        // `WriteHalf<TcpStream>`.
+        let socket_writer = rx.fold(writer, |writer, msg| {
+            let amt = io::write_all(writer, msg.into_bytes());
+            let amt = amt.map(|(writer, _)| writer);
+            amt.map_err(|_| ())
+        });
+
+        // Now that we've got futures representing each half of the socket, we
+        // use the `select` combinator to wait for either half to be done to
+        // tear down the other. Then we spawn off the result.
+        let connections = connections.clone();
+        let socket_reader = socket_reader.map_err(|_| ());
+        let connection = socket_reader.map(|_| ()).select(socket_writer.map(|_| ()));
+        handle.spawn(connection.then(move |_| {
+            connections.borrow_mut().remove(&addr);
+            println!("Connection {} closed.", addr);
+            Ok(())
+        }));
+
+        Ok(())
+    });
+
+    // execute server
+    core.run(srv).unwrap();
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/examples/connect.rs
@@ -0,0 +1,132 @@
+//! A simple example of hooking up stdin/stdout to a TCP stream.
+//!
+//! This example will connect to a server specified in the argument list and
+//! then forward all data read on stdin to the server, printing out all data
+//! received on stdout.
+//!
+//! Note that this is not currently optimized for performance, especially around
+//! buffer management. Rather it's intended to show an example of working with a
+//! client.
+
+extern crate futures;
+extern crate tokio_core;
+extern crate tokio_io;
+extern crate bytes;
+
+use std::env;
+use std::io::{self, Read, Write};
+use std::net::SocketAddr;
+use std::thread;
+
+use bytes::{BufMut, BytesMut};
+use futures::sync::mpsc;
+use futures::{Sink, Future, Stream};
+use tokio_core::net::TcpStream;
+use tokio_core::reactor::Core;
+use tokio_io::AsyncRead;
+use tokio_io::codec::{Encoder, Decoder};
+
+fn main() {
+    // Parse what address we're going to connect to
+    let addr = env::args().nth(1).unwrap_or_else(|| {
+        panic!("this program requires at least one argument")
+    });
+    let addr = addr.parse::<SocketAddr>().unwrap();
+
+    // Create the event loop and initiate the connection to the remote server
+    let mut core = Core::new().unwrap();
+    let handle = core.handle();
+    let tcp = TcpStream::connect(&addr, &handle);
+
+    // Right now Tokio doesn't support a handle to stdin running on the event
+    // loop, so we farm out that work to a separate thread. This thread will
+    // read data from stdin and then send it to the event loop over a standard
+    // futures channel.
+    let (stdin_tx, stdin_rx) = mpsc::channel(0);
+    thread::spawn(|| read_stdin(stdin_tx));
+    let stdin_rx = stdin_rx.map_err(|_| panic!()); // errors not possible on rx
+
+    // After the TCP connection has been established, we set up our client to
+    // start forwarding data.
+    //
+    // First we use the `Io::framed` method with a simple implementation of a
+    // `Codec` (listed below) that just ships bytes around. We then split that
+    // in two to work with the stream and sink separately.
+    //
+    // Half of the work we're going to do is to take all data we receive on
+    // stdin (`stdin_rx`) and send that along the TCP stream (`sink`). The
+    // second half is to take all the data we receive (`stream`) and then write
+    // that to stdout. Currently we just write to stdout in a synchronous
+    // fashion.
+    //
+    // Finally we set the client to terminate once either half of this work
+    // finishes. If we don't have any more data to read or we won't receive any
+    // more work from the remote then we can exit.
+    let mut stdout = io::stdout();
+    let client = tcp.and_then(|stream| {
+        let (sink, stream) = stream.framed(Bytes).split();
+        let send_stdin = stdin_rx.forward(sink);
+        let write_stdout = stream.for_each(move |buf| {
+            stdout.write_all(&buf)
+        });
+
+        send_stdin.map(|_| ())
+                  .select(write_stdout.map(|_| ()))
+                  .then(|_| Ok(()))
+    });
+
+    // And now that we've got our client, we execute it in the event loop!
+    core.run(client).unwrap();
+}
+
+/// A simple `Codec` implementation that just ships bytes around.
+///
+/// This type is used for "framing" a TCP stream of bytes but it's really just a
+/// convenient method for us to work with streams/sinks for now. This'll just
+/// take any data read and interpret it as a "frame" and conversely just shove
+/// data into the output location without looking at it.
+struct Bytes;
+
+impl Decoder for Bytes {
+    type Item = BytesMut;
+    type Error = io::Error;
+
+    fn decode(&mut self, buf: &mut BytesMut) -> io::Result<Option<BytesMut>> {
+        if buf.len() > 0 {
+            let len = buf.len();
+            Ok(Some(buf.split_to(len)))
+        } else {
+            Ok(None)
+        }
+    }
+
+    fn decode_eof(&mut self, buf: &mut BytesMut) -> io::Result<Option<BytesMut>> {
+        self.decode(buf)
+    }
+}
+
+impl Encoder for Bytes {
+    type Item = Vec<u8>;
+    type Error = io::Error;
+
+    fn encode(&mut self, data: Vec<u8>, buf: &mut BytesMut) -> io::Result<()> {
+        buf.put(&data[..]);
+        Ok(())
+    }
+}
+
+// Our helper method which will read data from stdin and send it along the
+// sender provided.
+fn read_stdin(mut tx: mpsc::Sender<Vec<u8>>) {
+    let mut stdin = io::stdin();
+    loop {
+        let mut buf = vec![0; 1024];
+        let n = match stdin.read(&mut buf) {
+            Err(_) |
+            Ok(0) => break,
+            Ok(n) => n,
+        };
+        buf.truncate(n);
+        tx = tx.send(buf).wait().unwrap();
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/examples/echo-udp.rs
@@ -0,0 +1,70 @@
+//! An UDP echo server that just sends back everything that it receives.
+//!
+//! If you're on unix you can test this out by in one terminal executing:
+//!
+//!     cargo run --example echo-udp
+//!
+//! and in another terminal you can run:
+//!
+//!     nc -4u localhost 8080
+//!
+//! Each line you type in to the `nc` terminal should be echo'd back to you!
+
+extern crate futures;
+#[macro_use]
+extern crate tokio_core;
+
+use std::{env, io};
+use std::net::SocketAddr;
+
+use futures::{Future, Poll};
+use tokio_core::net::UdpSocket;
+use tokio_core::reactor::Core;
+
+struct Server {
+    socket: UdpSocket,
+    buf: Vec<u8>,
+    to_send: Option<(usize, SocketAddr)>,
+}
+
+impl Future for Server {
+    type Item = ();
+    type Error = io::Error;
+
+    fn poll(&mut self) -> Poll<(), io::Error> {
+        loop {
+            // First we check to see if there's a message we need to echo back.
+            // If so then we try to send it back to the original source, waiting
+            // until it's writable and we're able to do so.
+            if let Some((size, peer)) = self.to_send {
+                let amt = try_nb!(self.socket.send_to(&self.buf[..size], &peer));
+                println!("Echoed {}/{} bytes to {}", amt, size, peer);
+                self.to_send = None;
+            }
+
+            // If we're here then `to_send` is `None`, so we take a look for the
+            // next message we're going to echo back.
+            self.to_send = Some(try_nb!(self.socket.recv_from(&mut self.buf)));
+        }
+    }
+}
+
+fn main() {
+    let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string());
+    let addr = addr.parse::<SocketAddr>().unwrap();
+
+    // Create the event loop that will drive this server, and also bind the
+    // socket we'll be listening to.
+    let mut l = Core::new().unwrap();
+    let handle = l.handle();
+    let socket = UdpSocket::bind(&addr, &handle).unwrap();
+    println!("Listening on: {}", addr);
+
+    // Next we'll create a future to spawn (the one we defined above) and then
+    // we'll run the event loop by running the future.
+    l.run(Server {
+        socket: socket,
+        buf: vec![0; 1024],
+        to_send: None,
+    }).unwrap();
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/examples/echo.rs
@@ -0,0 +1,131 @@
+//! An "hello world" echo server with tokio-core
+//!
+//! This server will create a TCP listener, accept connections in a loop, and
+//! simply write back everything that's read off of each TCP connection. Each
+//! TCP connection is processed concurrently with all other TCP connections, and
+//! each connection will have its own buffer that it's reading in/out of.
+//!
+//! To see this server in action, you can run this in one terminal:
+//!
+//!     cargo run --example echo
+//!
+//! and in another terminal you can run:
+//!
+//!     cargo run --example connect 127.0.0.1:8080
+//!
+//! Each line you type in to the `connect` terminal should be echo'd back to
+//! you! If you open up multiple terminals running the `connect` example you
+//! should be able to see them all make progress simultaneously.
+
+extern crate futures;
+extern crate tokio_core;
+extern crate tokio_io;
+
+use std::env;
+use std::net::SocketAddr;
+
+use futures::Future;
+use futures::stream::Stream;
+use tokio_io::AsyncRead;
+use tokio_io::io::copy;
+use tokio_core::net::TcpListener;
+use tokio_core::reactor::Core;
+
+fn main() {
+    // Allow passing an address to listen on as the first argument of this
+    // program, but otherwise we'll just set up our TCP listener on
+    // 127.0.0.1:8080 for connections.
+    let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string());
+    let addr = addr.parse::<SocketAddr>().unwrap();
+
+    // First up we'll create the event loop that's going to drive this server.
+    // This is done by creating an instance of the `Core` type, tokio-core's
+    // event loop. Most functions in tokio-core return an `io::Result`, and
+    // `Core::new` is no exception. For this example, though, we're mostly just
+    // ignoring errors, so we unwrap the return value.
+    //
+    // After the event loop is created we acquire a handle to it through the
+    // `handle` method. With this handle we'll then later be able to create I/O
+    // objects and spawn futures.
+    let mut core = Core::new().unwrap();
+    let handle = core.handle();
+
+    // Next up we create a TCP listener which will listen for incoming
+    // connections. This TCP listener is bound to the address we determined
+    // above and must be associated with an event loop, so we pass in a handle
+    // to our event loop. After the socket's created we inform that we're ready
+    // to go and start accepting connections.
+    let socket = TcpListener::bind(&addr, &handle).unwrap();
+    println!("Listening on: {}", addr);
+
+    // Here we convert the `TcpListener` to a stream of incoming connections
+    // with the `incoming` method. We then define how to process each element in
+    // the stream with the `for_each` method.
+    //
+    // This combinator, defined on the `Stream` trait, will allow us to define a
+    // computation to happen for all items on the stream (in this case TCP
+    // connections made to the server).  The return value of the `for_each`
+    // method is itself a future representing processing the entire stream of
+    // connections, and ends up being our server.
+    let done = socket.incoming().for_each(move |(socket, addr)| {
+
+        // Once we're inside this closure this represents an accepted client
+        // from our server. The `socket` is the client connection and `addr` is
+        // the remote address of the client (similar to how the standard library
+        // operates).
+        //
+        // We just want to copy all data read from the socket back onto the
+        // socket itself (e.g. "echo"). We can use the standard `io::copy`
+        // combinator in the `tokio-core` crate to do precisely this!
+        //
+        // The `copy` function takes two arguments, where to read from and where
+        // to write to. We only have one argument, though, with `socket`.
+        // Luckily there's a method, `Io::split`, which will split an Read/Write
+        // stream into its two halves. This operation allows us to work with
+        // each stream independently, such as pass them as two arguments to the
+        // `copy` function.
+        //
+        // The `copy` function then returns a future, and this future will be
+        // resolved when the copying operation is complete, resolving to the
+        // amount of data that was copied.
+        let (reader, writer) = socket.split();
+        let amt = copy(reader, writer);
+
+        // After our copy operation is complete we just print out some helpful
+        // information.
+        let msg = amt.then(move |result| {
+            match result {
+                Ok((amt, _, _)) => println!("wrote {} bytes to {}", amt, addr),
+                Err(e) => println!("error on {}: {}", addr, e),
+            }
+
+            Ok(())
+        });
+
+        // And this is where much of the magic of this server happens. We
+        // crucially want all clients to make progress concurrently, rather than
+        // blocking one on completion of another. To achieve this we use the
+        // `spawn` function on `Handle` to essentially execute some work in the
+        // background.
+        //
+        // This function will transfer ownership of the future (`msg` in this
+        // case) to the event loop that `handle` points to. The event loop will
+        // then drive the future to completion.
+        //
+        // Essentially here we're spawning a new task to run concurrently, which
+        // will allow all of our clients to be processed concurrently.
+        handle.spawn(msg);
+
+        Ok(())
+    });
+
+    // And finally now that we've define what our server is, we run it! We
+    // didn't actually do much I/O up to this point and this `Core::run` method
+    // is responsible for driving the entire server to completion.
+    //
+    // The `run` method will return the result of the future that it's running,
+    // but in our case the `done` future won't ever finish because a TCP
+    // listener is never done accepting clients. That basically just means that
+    // we're going to be running the server until it's killed (e.g. ctrl-c).
+    core.run(done).unwrap();
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/examples/hello.rs
@@ -0,0 +1,46 @@
+//! A small example of a server that accepts TCP connections and writes out
+//! `Hello!` to them, afterwards closing the connection.
+//!
+//! You can test this out by running:
+//!
+//!     cargo run --example hello
+//!
+//! and then in another terminal executing
+//!
+//!     nc -4 localhost 8080
+//!
+//! You should see `Hello!` printed out and then the `nc` program will exit.
+
+extern crate env_logger;
+extern crate futures;
+extern crate tokio_core;
+extern crate tokio_io;
+
+use std::env;
+use std::net::SocketAddr;
+
+use futures::stream::Stream;
+use tokio_core::reactor::Core;
+use tokio_core::net::TcpListener;
+
+fn main() {
+    env_logger::init().unwrap();
+    let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string());
+    let addr = addr.parse::<SocketAddr>().unwrap();
+
+    let mut core = Core::new().unwrap();
+    let listener = TcpListener::bind(&addr, &core.handle()).unwrap();
+
+    let addr = listener.local_addr().unwrap();
+    println!("Listening for connections on {}", addr);
+
+    let clients = listener.incoming();
+    let welcomes = clients.and_then(|(socket, _peer_addr)| {
+        tokio_io::io::write_all(socket, b"Hello!\n")
+    });
+    let server = welcomes.for_each(|(_socket, _welcome)| {
+        Ok(())
+    });
+
+    core.run(server).unwrap();
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/examples/proxy.rs
@@ -0,0 +1,112 @@
+//! A proxy that forwards data to another server and forwards that server's
+//! responses back to clients.
+
+extern crate futures;
+extern crate tokio_core;
+extern crate tokio_io;
+
+use std::sync::Arc;
+use std::env;
+use std::net::{Shutdown, SocketAddr};
+use std::io::{self, Read, Write};
+
+use futures::stream::Stream;
+use futures::{Future, Poll};
+use tokio_core::net::{TcpListener, TcpStream};
+use tokio_core::reactor::Core;
+use tokio_io::{AsyncRead, AsyncWrite};
+use tokio_io::io::{copy, shutdown};
+
+fn main() {
+    let listen_addr = env::args().nth(1).unwrap_or("127.0.0.1:8081".to_string());
+    let listen_addr = listen_addr.parse::<SocketAddr>().unwrap();
+
+    let server_addr = env::args().nth(2).unwrap_or("127.0.0.1:8080".to_string());
+    let server_addr = server_addr.parse::<SocketAddr>().unwrap();
+
+    // Create the event loop that will drive this server.
+    let mut l = Core::new().unwrap();
+    let handle = l.handle();
+
+    // Create a TCP listener which will listen for incoming connections.
+    let socket = TcpListener::bind(&listen_addr, &l.handle()).unwrap();
+    println!("Listening on: {}", listen_addr);
+    println!("Proxying to: {}", server_addr);
+
+    let done = socket.incoming().for_each(move |(client, client_addr)| {
+        let server = TcpStream::connect(&server_addr, &handle);
+        let amounts = server.and_then(move |server| {
+            // Create separate read/write handles for the TCP clients that we're
+            // proxying data between. Note that typically you'd use
+            // `AsyncRead::split` for this operation, but we want our writer
+            // handles to have a custom implementation of `shutdown` which
+            // actually calls `TcpStream::shutdown` to ensure that EOF is
+            // transmitted properly across the proxied connection.
+            //
+            // As a result, we wrap up our client/server manually in arcs and
+            // use the impls below on our custom `MyTcpStream` type.
+            let client_reader = MyTcpStream(Arc::new(client));
+            let client_writer = client_reader.clone();
+            let server_reader = MyTcpStream(Arc::new(server));
+            let server_writer = server_reader.clone();
+
+            // Copy the data (in parallel) between the client and the server.
+            // After the copy is done we indicate to the remote side that we've
+            // finished by shutting down the connection.
+            let client_to_server = copy(client_reader, server_writer)
+                .and_then(|(n, _, server_writer)| {
+                    shutdown(server_writer).map(move |_| n)
+                });
+
+            let server_to_client = copy(server_reader, client_writer)
+                .and_then(|(n, _, client_writer)| {
+                    shutdown(client_writer).map(move |_| n)
+                });
+
+            client_to_server.join(server_to_client)
+        });
+
+        let msg = amounts.map(move |(from_client, from_server)| {
+            println!("client at {} wrote {} bytes and received {} bytes",
+                     client_addr, from_client, from_server);
+        }).map_err(|e| {
+            // Don't panic. Maybe the client just disconnected too soon.
+            println!("error: {}", e);
+        });
+        handle.spawn(msg);
+
+        Ok(())
+    });
+    l.run(done).unwrap();
+}
+
+// This is a custom type used to have a custom implementation of the
+// `AsyncWrite::shutdown` method which actually calls `TcpStream::shutdown` to
+// notify the remote end that we're done writing.
+#[derive(Clone)]
+struct MyTcpStream(Arc<TcpStream>);
+
+impl Read for MyTcpStream {
+    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+        (&*self.0).read(buf)
+    }
+}
+
+impl Write for MyTcpStream {
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        (&*self.0).write(buf)
+    }
+
+    fn flush(&mut self) -> io::Result<()> {
+        Ok(())
+    }
+}
+
+impl AsyncRead for MyTcpStream {}
+
+impl AsyncWrite for MyTcpStream {
+    fn shutdown(&mut self) -> Poll<(), io::Error> {
+        try!(self.0.shutdown(Shutdown::Write));
+        Ok(().into())
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/examples/sink.rs
@@ -0,0 +1,57 @@
+//! A small server that writes as many nul bytes on all connections it receives.
+//!
+//! There is no concurrency in this server, only one connection is written to at
+//! a time. You can use this as a benchmark for the raw performance of writing
+//! data to a socket by measuring how much data is being written on each
+//! connection.
+//!
+//! Typically you'll want to run this example with:
+//!
+//!     cargo run --example sink --release
+//!
+//! And then you can connect to it via:
+//!
+//!     nc -4 localhost 8080 > /dev/null
+//!
+//! You should see your CPUs light up as data's being shove into the ether.
+
+extern crate env_logger;
+extern crate futures;
+extern crate tokio_core;
+extern crate tokio_io;
+
+use std::env;
+use std::iter;
+use std::net::SocketAddr;
+
+use futures::Future;
+use futures::stream::{self, Stream};
+use tokio_io::IoFuture;
+use tokio_core::net::{TcpListener, TcpStream};
+use tokio_core::reactor::Core;
+
+fn main() {
+    env_logger::init().unwrap();
+    let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string());
+    let addr = addr.parse::<SocketAddr>().unwrap();
+
+    let mut l = Core::new().unwrap();
+    let socket = TcpListener::bind(&addr, &l.handle()).unwrap();
+    println!("Listening on: {}", addr);
+    let server = socket.incoming().and_then(|(socket, addr)| {
+        println!("got a socket: {}", addr);
+        write(socket).or_else(|_| Ok(()))
+    }).for_each(|()| {
+        println!("lost the socket");
+        Ok(())
+    });
+    l.run(server).unwrap();
+}
+
+fn write(socket: TcpStream) -> IoFuture<()> {
+    static BUF: &'static [u8] = &[0; 64 * 1024];
+    let iter = iter::repeat(()).map(|()| Ok(()));
+    stream::iter(iter).fold(socket, |socket, ()| {
+        tokio_io::io::write_all(socket, BUF).map(|(socket, _)| socket)
+    }).map(|_| ()).boxed()
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/examples/udp-codec.rs
@@ -0,0 +1,79 @@
+//! This is a basic example of leveraging `UdpCodec` to create a simple UDP
+//! client and server which speak a custom protocol.
+//!
+//! Here we're using the a custom codec to convert a UDP socket to a stream of
+//! client messages. These messages are then processed and returned back as a
+//! new message with a new destination. Overall, we then use this to construct a
+//! "ping pong" pair where two sockets are sending messages back and forth.
+
+extern crate tokio_core;
+extern crate env_logger;
+extern crate futures;
+
+use std::io;
+use std::net::SocketAddr;
+use std::str;
+
+use futures::{Future, Stream, Sink};
+use tokio_core::net::{UdpSocket, UdpCodec};
+use tokio_core::reactor::Core;
+
+pub struct LineCodec;
+
+impl UdpCodec for LineCodec {
+    type In = (SocketAddr, Vec<u8>);
+    type Out = (SocketAddr, Vec<u8>);
+
+    fn decode(&mut self, addr: &SocketAddr, buf: &[u8]) -> io::Result<Self::In> {
+        Ok((*addr, buf.to_vec()))
+    }
+
+    fn encode(&mut self, (addr, buf): Self::Out, into: &mut Vec<u8>) -> SocketAddr {
+        into.extend(buf);
+        addr
+    }
+}
+
+fn main() {
+    drop(env_logger::init());
+
+    let mut core = Core::new().unwrap();
+    let handle = core.handle();
+
+    let addr: SocketAddr = "127.0.0.1:0".parse().unwrap();
+
+    // Bind both our sockets and then figure out what ports we got.
+    let a = UdpSocket::bind(&addr, &handle).unwrap();
+    let b = UdpSocket::bind(&addr, &handle).unwrap();
+    let b_addr = b.local_addr().unwrap();
+
+    // We're parsing each socket with the `LineCodec` defined above, and then we
+    // `split` each codec into the sink/stream halves.
+    let (a_sink, a_stream) = a.framed(LineCodec).split();
+    let (b_sink, b_stream) = b.framed(LineCodec).split();
+
+    // Start off by sending a ping from a to b, afterwards we just print out
+    // what they send us and continually send pings
+    // let pings = stream::iter((0..5).map(Ok));
+    let a = a_sink.send((b_addr, b"PING".to_vec())).and_then(|a_sink| {
+        let mut i = 0;
+        let a_stream = a_stream.take(4).map(move |(addr, msg)| {
+            i += 1;
+            println!("[a] recv: {}", String::from_utf8_lossy(&msg));
+            (addr, format!("PING {}", i).into_bytes())
+        });
+        a_sink.send_all(a_stream)
+    });
+
+    // The second client we have will receive the pings from `a` and then send
+    // back pongs.
+    let b_stream = b_stream.map(|(addr, msg)| {
+        println!("[b] recv: {}", String::from_utf8_lossy(&msg));
+        (addr, b"PONG".to_vec())
+    });
+    let b = b_sink.send_all(b_stream);
+
+    // Spawn the sender of pongs and then wait for our pinger to finish.
+    handle.spawn(b.then(|_| Ok(())));
+    drop(core.run(a));
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/src/channel.rs
@@ -0,0 +1,128 @@
+//! In-memory evented channels.
+//!
+//! This module contains a `Sender` and `Receiver` pair types which can be used
+//! to send messages between different future tasks.
+
+#![deprecated(since = "0.1.1", note = "use `futures::sync::mpsc` instead")]
+#![allow(deprecated)]
+#![cfg(feature = "with-deprecated")]
+
+use std::io;
+use std::sync::mpsc::TryRecvError;
+
+use futures::{Poll, Async, Sink, AsyncSink, StartSend, Stream};
+use mio::channel;
+
+use reactor::{Handle, PollEvented};
+
+/// The transmission half of a channel used for sending messages to a receiver.
+///
+/// A `Sender` can be `clone`d to have multiple threads or instances sending
+/// messages to one receiver.
+///
+/// This type is created by the [`channel`] function.
+///
+/// [`channel`]: fn.channel.html
+pub struct Sender<T> {
+    tx: channel::Sender<T>,
+}
+
+/// The receiving half of a channel used for processing messages sent by a
+/// `Sender`.
+///
+/// A `Receiver` cannot be cloned, so only one thread can receive messages at a
+/// time.
+///
+/// This type is created by the [`channel`] function and implements the
+/// `Stream` trait to represent received messages.
+///
+/// [`channel`]: fn.channel.html
+pub struct Receiver<T> {
+    rx: PollEvented<channel::Receiver<T>>,
+}
+
+/// Creates a new in-memory channel used for sending data across `Send +
+/// 'static` boundaries, frequently threads.
+///
+/// This type can be used to conveniently send messages between futures.
+/// Unlike the futures crate `channel` method and types, the returned tx/rx
+/// pair is a multi-producer single-consumer (mpsc) channel *with no
+/// backpressure*. Currently it's left up to the application to implement a
+/// mechanism, if necessary, to avoid messages piling up.
+///
+/// The returned `Sender` can be used to send messages that are processed by
+/// the returned `Receiver`. The `Sender` can be cloned to send messages
+/// from multiple sources simultaneously.
+pub fn channel<T>(handle: &Handle) -> io::Result<(Sender<T>, Receiver<T>)>
+    where T: Send + 'static,
+{
+    let (tx, rx) = channel::channel();
+    let rx = try!(PollEvented::new(rx, handle));
+    Ok((Sender { tx: tx }, Receiver { rx: rx }))
+}
+
+impl<T> Sender<T> {
+    /// Sends a message to the corresponding receiver of this sender.
+    ///
+    /// The message provided will be enqueued on the channel immediately, and
+    /// this function will return immediately. Keep in mind that the
+    /// underlying channel has infinite capacity, and this may not always be
+    /// desired.
+    ///
+    /// If an I/O error happens while sending the message, or if the receiver
+    /// has gone away, then an error will be returned. Note that I/O errors here
+    /// are generally quite abnormal.
+    pub fn send(&self, t: T) -> io::Result<()> {
+        self.tx.send(t).map_err(|e| {
+            match e {
+                channel::SendError::Io(e) => e,
+                channel::SendError::Disconnected(_) => {
+                    io::Error::new(io::ErrorKind::Other,
+                                   "channel has been disconnected")
+                }
+            }
+        })
+    }
+}
+
+impl<T> Sink for Sender<T> {
+    type SinkItem = T;
+    type SinkError = io::Error;
+
+    fn start_send(&mut self, t: T) -> StartSend<T, io::Error> {
+        Sender::send(self, t).map(|()| AsyncSink::Ready)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), io::Error> {
+        Ok(().into())
+    }
+
+    fn close(&mut self) -> Poll<(), io::Error> {
+        Ok(().into())
+    }
+}
+
+impl<T> Clone for Sender<T> {
+    fn clone(&self) -> Sender<T> {
+        Sender { tx: self.tx.clone() }
+    }
+}
+
+impl<T> Stream for Receiver<T> {
+    type Item = T;
+    type Error = io::Error;
+
+    fn poll(&mut self) -> Poll<Option<T>, io::Error> {
+        if let Async::NotReady = self.rx.poll_read() {
+            return Ok(Async::NotReady)
+        }
+        match self.rx.get_ref().try_recv() {
+            Ok(t) => Ok(Async::Ready(Some(t))),
+            Err(TryRecvError::Empty) => {
+                self.rx.need_read();
+                Ok(Async::NotReady)
+            }
+            Err(TryRecvError::Disconnected) => Ok(Async::Ready(None)),
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/src/heap.rs
@@ -0,0 +1,305 @@
+//! A simple binary heap with support for removal of arbitrary elements
+//!
+//! This heap is used to manage timer state in the event loop. All timeouts go
+//! into this heap and we also cancel timeouts from this heap. The crucial
+//! feature of this heap over the standard library's `BinaryHeap` is the ability
+//! to remove arbitrary elements. (e.g. when a timer is canceled)
+//!
+//! Note that this heap is not at all optimized right now, it should hopefully
+//! just work.
+
+use std::mem;
+
+use slab::Slab;
+
+pub struct Heap<T> {
+    // Binary heap of items, plus the slab index indicating what position in the
+    // list they're in.
+    items: Vec<(T, usize)>,
+
+    // A map from a slab index (assigned to an item above) to the actual index
+    // in the array the item appears at.
+    index: Slab<usize>,
+}
+
+pub struct Slot {
+    idx: usize,
+}
+
+impl<T: Ord> Heap<T> {
+    pub fn new() -> Heap<T> {
+        Heap {
+            items: Vec::new(),
+            index: Slab::with_capacity(128),
+        }
+    }
+
+    /// Pushes an element onto this heap, returning a slot token indicating
+    /// where it was pushed on to.
+    ///
+    /// The slot can later get passed to `remove` to remove the element from the
+    /// heap, but only if the element was previously not removed from the heap.
+    pub fn push(&mut self, t: T) -> Slot {
+        self.assert_consistent();
+        let len = self.items.len();
+        if self.index.available() == 0 {
+            self.index.reserve_exact(len);
+        }
+        let slot_idx = self.index.insert(len).unwrap();
+        self.items.push((t, slot_idx));
+        self.percolate_up(len);
+        self.assert_consistent();
+        Slot { idx: slot_idx }
+    }
+
+    pub fn peek(&self) -> Option<&T> {
+        self.assert_consistent();
+        self.items.get(0).map(|i| &i.0)
+    }
+
+    pub fn pop(&mut self) -> Option<T> {
+        self.assert_consistent();
+        if self.items.len() == 0 {
+            return None
+        }
+        let slot = Slot { idx: self.items[0].1 };
+        Some(self.remove(slot))
+    }
+
+    pub fn remove(&mut self, slot: Slot) -> T {
+        self.assert_consistent();
+        let idx = self.index.remove(slot.idx).unwrap();
+        let (item, slot_idx) = self.items.swap_remove(idx);
+        debug_assert_eq!(slot.idx, slot_idx);
+        if idx < self.items.len() {
+            self.index[self.items[idx].1] = idx;
+            if self.items[idx].0 < item {
+                self.percolate_up(idx);
+            } else {
+                self.percolate_down(idx);
+            }
+        }
+        self.assert_consistent();
+        return item
+    }
+
+    fn percolate_up(&mut self, mut idx: usize) -> usize {
+        while idx > 0 {
+            let parent = (idx - 1) / 2;
+            if self.items[idx].0 >= self.items[parent].0 {
+                break
+            }
+            let (a, b) = self.items.split_at_mut(idx);
+            mem::swap(&mut a[parent], &mut b[0]);
+            self.index[a[parent].1] = parent;
+            self.index[b[0].1] = idx;
+            idx = parent;
+        }
+        return idx
+    }
+
+    fn percolate_down(&mut self, mut idx: usize) -> usize {
+        loop {
+            let left = 2 * idx + 1;
+            let right = 2 * idx + 2;
+
+            let mut swap_left = true;
+            match (self.items.get(left), self.items.get(right)) {
+                (Some(left), None) => {
+                    if left.0 >= self.items[idx].0 {
+                        break
+                    }
+                }
+                (Some(left), Some(right)) => {
+                    if left.0 < self.items[idx].0 {
+                        if right.0 < left.0 {
+                            swap_left = false;
+                        }
+                    } else if right.0 < self.items[idx].0 {
+                        swap_left = false;
+                    } else {
+                        break
+                    }
+                }
+
+                (None, None) => break,
+                (None, Some(_right)) => panic!("not possible"),
+            }
+
+            let (a, b) = if swap_left {
+                self.items.split_at_mut(left)
+            } else {
+                self.items.split_at_mut(right)
+            };
+            mem::swap(&mut a[idx], &mut b[0]);
+            self.index[a[idx].1] = idx;
+            self.index[b[0].1] = a.len();
+            idx = a.len();
+        }
+        return idx
+    }
+
+    fn assert_consistent(&self) {
+        if cfg!(not(debug_assertions)) {
+            return
+        }
+
+        assert_eq!(self.items.len(), self.index.len());
+
+        for (i, &(_, j)) in self.items.iter().enumerate() {
+            if self.index[j] != i {
+                panic!("self.index[j] != i : i={} j={} self.index[j]={}",
+                       i, j, self.index[j]);
+            }
+        }
+
+        for (i, &(ref item, _)) in self.items.iter().enumerate() {
+            if i > 0 {
+                assert!(*item >= self.items[(i - 1) / 2].0, "bad at index: {}", i);
+            }
+            if let Some(left) = self.items.get(2 * i + 1) {
+                assert!(*item <= left.0, "bad left at index: {}", i);
+            }
+            if let Some(right) = self.items.get(2 * i + 2) {
+                assert!(*item <= right.0, "bad right at index: {}", i);
+            }
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::Heap;
+
+    #[test]
+    fn simple() {
+        let mut h = Heap::new();
+        h.push(1);
+        h.push(2);
+        h.push(8);
+        h.push(4);
+        assert_eq!(h.pop(), Some(1));
+        assert_eq!(h.pop(), Some(2));
+        assert_eq!(h.pop(), Some(4));
+        assert_eq!(h.pop(), Some(8));
+        assert_eq!(h.pop(), None);
+        assert_eq!(h.pop(), None);
+    }
+
+    #[test]
+    fn simple2() {
+        let mut h = Heap::new();
+        h.push(5);
+        h.push(4);
+        h.push(3);
+        h.push(2);
+        h.push(1);
+        assert_eq!(h.pop(), Some(1));
+        h.push(8);
+        assert_eq!(h.pop(), Some(2));
+        h.push(1);
+        assert_eq!(h.pop(), Some(1));
+        assert_eq!(h.pop(), Some(3));
+        assert_eq!(h.pop(), Some(4));
+        h.push(5);
+        assert_eq!(h.pop(), Some(5));
+        assert_eq!(h.pop(), Some(5));
+        assert_eq!(h.pop(), Some(8));
+    }
+
+    #[test]
+    fn remove() {
+        let mut h = Heap::new();
+        h.push(5);
+        h.push(4);
+        h.push(3);
+        let two = h.push(2);
+        h.push(1);
+        assert_eq!(h.pop(), Some(1));
+        assert_eq!(h.remove(two), 2);
+        h.push(1);
+        assert_eq!(h.pop(), Some(1));
+        assert_eq!(h.pop(), Some(3));
+    }
+
+    fn vec2heap<T: Ord>(v: Vec<T>) -> Heap<T> {
+        let mut h = Heap::new();
+        for t in v {
+            h.push(t);
+        }
+        return h
+    }
+
+    #[test]
+    fn test_peek_and_pop() {
+        let data = vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1];
+        let mut sorted = data.clone();
+        sorted.sort();
+        let mut heap = vec2heap(data);
+        while heap.peek().is_some() {
+            assert_eq!(heap.peek().unwrap(), sorted.first().unwrap());
+            assert_eq!(heap.pop().unwrap(), sorted.remove(0));
+        }
+    }
+
+    #[test]
+    fn test_push() {
+        let mut heap = Heap::new();
+        heap.push(-2);
+        heap.push(-4);
+        heap.push(-9);
+        assert!(*heap.peek().unwrap() == -9);
+        heap.push(-11);
+        assert!(*heap.peek().unwrap() == -11);
+        heap.push(-5);
+        assert!(*heap.peek().unwrap() == -11);
+        heap.push(-27);
+        assert!(*heap.peek().unwrap() == -27);
+        heap.push(-3);
+        assert!(*heap.peek().unwrap() == -27);
+        heap.push(-103);
+        assert!(*heap.peek().unwrap() == -103);
+    }
+
+    fn check_to_vec(mut data: Vec<i32>) {
+        let mut heap = Heap::new();
+        for data in data.iter() {
+            heap.push(*data);
+        }
+        data.sort();
+        let mut v = Vec::new();
+        while let Some(i) = heap.pop() {
+            v.push(i);
+        }
+        assert_eq!(v, data);
+    }
+
+    #[test]
+    fn test_to_vec() {
+        check_to_vec(vec![]);
+        check_to_vec(vec![5]);
+        check_to_vec(vec![3, 2]);
+        check_to_vec(vec![2, 3]);
+        check_to_vec(vec![5, 1, 2]);
+        check_to_vec(vec![1, 100, 2, 3]);
+        check_to_vec(vec![1, 3, 5, 7, 9, 2, 4, 6, 8, 0]);
+        check_to_vec(vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1]);
+        check_to_vec(vec![9, 11, 9, 9, 9, 9, 11, 2, 3, 4, 11, 9, 0, 0, 0, 0]);
+        check_to_vec(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+        check_to_vec(vec![10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]);
+        check_to_vec(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 1, 2]);
+        check_to_vec(vec![5, 4, 3, 2, 1, 5, 4, 3, 2, 1, 5, 4, 3, 2, 1]);
+    }
+
+    #[test]
+    fn test_empty_pop() {
+        let mut heap = Heap::<i32>::new();
+        assert!(heap.pop().is_none());
+    }
+
+    #[test]
+    fn test_empty_peek() {
+        let empty = Heap::<i32>::new();
+        assert!(empty.peek().is_none());
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/src/io/copy.rs
@@ -0,0 +1,84 @@
+use std::io::{self, Read, Write};
+
+use futures::{Future, Poll};
+
+/// A future which will copy all data from a reader into a writer.
+///
+/// Created by the [`copy`] function, this future will resolve to the number of
+/// bytes copied or an error if one happens.
+///
+/// [`copy`]: fn.copy.html
+pub struct Copy<R, W> {
+    reader: R,
+    read_done: bool,
+    writer: W,
+    pos: usize,
+    cap: usize,
+    amt: u64,
+    buf: Box<[u8]>,
+}
+
+/// Creates a future which represents copying all the bytes from one object to
+/// another.
+///
+/// The returned future will copy all the bytes read from `reader` into the
+/// `writer` specified. This future will only complete once the `reader` has hit
+/// EOF and all bytes have been written to and flushed from the `writer`
+/// provided.
+///
+/// On success the number of bytes is returned and the `reader` and `writer` are
+/// consumed. On error the error is returned and the I/O objects are consumed as
+/// well.
+pub fn copy<R, W>(reader: R, writer: W) -> Copy<R, W>
+    where R: Read,
+          W: Write,
+{
+    Copy {
+        reader: reader,
+        read_done: false,
+        writer: writer,
+        amt: 0,
+        pos: 0,
+        cap: 0,
+        buf: Box::new([0; 2048]),
+    }
+}
+
+impl<R, W> Future for Copy<R, W>
+    where R: Read,
+          W: Write,
+{
+    type Item = u64;
+    type Error = io::Error;
+
+    fn poll(&mut self) -> Poll<u64, io::Error> {
+        loop {
+            // If our buffer is empty, then we need to read some data to
+            // continue.
+            if self.pos == self.cap && !self.read_done {
+                let n = try_nb!(self.reader.read(&mut self.buf));
+                if n == 0 {
+                    self.read_done = true;
+                } else {
+                    self.pos = 0;
+                    self.cap = n;
+                }
+            }
+
+            // If our buffer has some data, let's write it out!
+            while self.pos < self.cap {
+                let i = try_nb!(self.writer.write(&self.buf[self.pos..self.cap]));
+                self.pos += i;
+                self.amt += i as u64;
+            }
+
+            // If we've written all the data and we've seen EOF, flush out the
+            // data and finish the transfer.
+            // done with the entire transfer.
+            if self.pos == self.cap && self.read_done {
+                try_nb!(self.writer.flush());
+                return Ok(self.amt.into())
+            }
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/src/io/flush.rs
@@ -0,0 +1,41 @@
+use std::io::{self, Write};
+
+use futures::{Poll, Future, Async};
+
+/// A future used to fully flush an I/O object.
+///
+/// Resolves to the underlying I/O object once the flush operation is complete.
+///
+/// Created by the [`flush`] function.
+///
+/// [`flush`]: fn.flush.html
+pub struct Flush<A> {
+    a: Option<A>,
+}
+
+/// Creates a future which will entirely flush an I/O object and then yield the
+/// object itself.
+///
+/// This function will consume the object provided if an error happens, and
+/// otherwise it will repeatedly call `flush` until it sees `Ok(())`, scheduling
+/// a retry if `WouldBlock` is seen along the way.
+pub fn flush<A>(a: A) -> Flush<A>
+    where A: Write,
+{
+    Flush {
+        a: Some(a),
+    }
+}
+
+impl<A> Future for Flush<A>
+    where A: Write,
+{
+    type Item = A;
+    type Error = io::Error;
+
+    fn poll(&mut self) -> Poll<A, io::Error> {
+        try_nb!(self.a.as_mut().unwrap().flush());
+        Ok(Async::Ready(self.a.take().unwrap()))
+    }
+}
+
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/src/io/frame.rs
@@ -0,0 +1,587 @@
+use std::fmt;
+use std::io;
+use std::hash;
+use std::mem;
+use std::cmp;
+use std::ops::{Deref, DerefMut};
+use std::sync::Arc;
+
+use futures::{Async, Poll, Stream, Sink, StartSend, AsyncSink};
+
+use io::Io;
+
+const INITIAL_CAPACITY: usize = 8 * 1024;
+
+/// A reference counted buffer of bytes.
+///
+/// An `EasyBuf` is a representation of a byte buffer where sub-slices of it can
+/// be handed out efficiently, each with a `'static` lifetime which keeps the
+/// data alive. The buffer also supports mutation but may require bytes to be
+/// copied to complete the operation.
+#[derive(Clone, Eq)]
+pub struct EasyBuf {
+    buf: Arc<Vec<u8>>,
+    start: usize,
+    end: usize,
+}
+
+/// An RAII object returned from `get_mut` which provides mutable access to the
+/// underlying `Vec<u8>`.
+pub struct EasyBufMut<'a> {
+    buf: &'a mut Vec<u8>,
+    end: &'a mut usize,
+}
+
+impl EasyBuf {
+    /// Creates a new EasyBuf with no data and the default capacity.
+    pub fn new() -> EasyBuf {
+        EasyBuf::with_capacity(INITIAL_CAPACITY)
+    }
+
+    /// Creates a new EasyBuf with `cap` capacity.
+    pub fn with_capacity(cap: usize) -> EasyBuf {
+        EasyBuf {
+            buf: Arc::new(Vec::with_capacity(cap)),
+            start: 0,
+            end: 0,
+        }
+    }
+
+    /// Changes the starting index of this window to the index specified.
+    ///
+    /// Returns the windows back to chain multiple calls to this method.
+    ///
+    /// # Panics
+    ///
+    /// This method will panic if `start` is out of bounds for the underlying
+    /// slice or if it comes after the `end` configured in this window.
+    fn set_start(&mut self, start: usize) -> &mut EasyBuf {
+        assert!(start <= self.buf.as_ref().len());
+        assert!(start <= self.end);
+        self.start = start;
+        self
+    }
+
+    /// Changes the end index of this window to the index specified.
+    ///
+    /// Returns the windows back to chain multiple calls to this method.
+    ///
+    /// # Panics
+    ///
+    /// This method will panic if `end` is out of bounds for the underlying
+    /// slice or if it comes after the `end` configured in this window.
+    fn set_end(&mut self, end: usize) -> &mut EasyBuf {
+        assert!(end <= self.buf.len());
+        assert!(self.start <= end);
+        self.end = end;
+        self
+    }
+
+    /// Returns the number of bytes contained in this `EasyBuf`.
+    pub fn len(&self) -> usize {
+        self.end - self.start
+    }
+
+    /// Returns the inner contents of this `EasyBuf` as a slice.
+    pub fn as_slice(&self) -> &[u8] {
+        self.as_ref()
+    }
+
+    /// Splits the buffer into two at the given index.
+    ///
+    /// Afterwards `self` contains elements `[0, at)`, and the returned `EasyBuf`
+    /// contains elements `[at, len)`.
+    ///
+    /// This is an O(1) operation that just increases the reference count and
+    /// sets a few indexes.
+    ///
+    /// # Panics
+    ///
+    /// Panics if `at > len`
+    pub fn split_off(&mut self, at: usize) -> EasyBuf {
+        let mut other = EasyBuf { buf: self.buf.clone(), ..*self };
+        let idx = self.start + at;
+        other.set_start(idx);
+        self.set_end(idx);
+        return other
+    }
+
+    /// Splits the buffer into two at the given index.
+    ///
+    /// Afterwards `self` contains elements `[at, len)`, and the returned `EasyBuf`
+    /// contains elements `[0, at)`.
+    ///
+    /// This is an O(1) operation that just increases the reference count and
+    /// sets a few indexes.
+    ///
+    /// # Panics
+    ///
+    /// Panics if `at > len`
+    pub fn drain_to(&mut self, at: usize) -> EasyBuf {
+        let mut other = EasyBuf { buf: self.buf.clone(), ..*self };
+        let idx = self.start + at;
+        other.set_end(idx);
+        self.set_start(idx);
+        return other
+    }
+
+    /// Returns a mutable reference to the underlying growable buffer of bytes.
+    ///
+    /// If this `EasyBuf` is the only instance pointing at the underlying buffer
+    /// of bytes, a direct mutable reference will be returned. Otherwise the
+    /// contents of this `EasyBuf` will be reallocated in a fresh `Vec<u8>`
+    /// allocation with the same capacity as an `EasyBuf` created with `EasyBuf::new()`,
+    /// and that allocation will be returned.
+    ///
+    /// This operation **is not O(1)** as it may clone the entire contents of
+    /// this buffer.
+    ///
+    /// The returned `EasyBufMut` type implement `Deref` and `DerefMut` to
+    /// `Vec<u8>` can the byte buffer can be manipulated using the standard
+    /// `Vec<u8>` methods.
+    pub fn get_mut(&mut self) -> EasyBufMut {
+        // Fast path if we can get mutable access to our own current
+        // buffer.
+        //
+        // TODO: this should be a match or an if-let
+        if Arc::get_mut(&mut self.buf).is_some() {
+            let buf = Arc::get_mut(&mut self.buf).unwrap();
+            buf.drain(self.end..);
+            buf.drain(..self.start);
+            self.start = 0;
+            return EasyBufMut { buf: buf, end: &mut self.end }
+        }
+
+        // If we couldn't get access above then we give ourself a new buffer
+        // here.
+        let mut v = Vec::with_capacity(cmp::max(INITIAL_CAPACITY, self.as_ref().len()));
+        v.extend_from_slice(self.as_ref());
+        self.start = 0;
+        self.buf = Arc::new(v);
+        EasyBufMut {
+            buf: Arc::get_mut(&mut self.buf).unwrap(),
+            end: &mut self.end,
+        }
+    }
+}
+
+impl AsRef<[u8]> for EasyBuf {
+    fn as_ref(&self) -> &[u8] {
+        &self.buf[self.start..self.end]
+    }
+}
+
+impl<'a> Deref for EasyBufMut<'a> {
+    type Target = Vec<u8>;
+
+    fn deref(&self) -> &Vec<u8> {
+        self.buf
+    }
+}
+
+impl<'a> DerefMut for EasyBufMut<'a> {
+    fn deref_mut(&mut self) -> &mut Vec<u8> {
+        self.buf
+    }
+}
+
+impl From<Vec<u8>> for EasyBuf {
+    fn from(vec: Vec<u8>) -> EasyBuf {
+        let end = vec.len();
+        EasyBuf {
+            buf: Arc::new(vec),
+            start: 0,
+            end: end,
+        }
+    }
+}
+
+impl<T: AsRef<[u8]>> PartialEq<T> for EasyBuf {
+    fn eq(&self, other: &T) -> bool {
+        self.as_slice().eq(other.as_ref())
+    }
+}
+
+impl Ord for EasyBuf {
+    fn cmp(&self, other: &Self) -> cmp::Ordering {
+        self.as_slice().cmp(other.as_slice())
+    }
+}
+
+impl<T: AsRef<[u8]>> PartialOrd<T> for EasyBuf {
+    fn partial_cmp(&self, other: &T) -> Option<cmp::Ordering> {
+        self.as_slice().partial_cmp(other.as_ref())
+    }
+}
+
+impl hash::Hash for EasyBuf {
+    fn hash<H: hash::Hasher>(&self, state: &mut H) {
+        self.as_slice().hash(state)
+    }
+}
+
+impl<'a> Drop for EasyBufMut<'a> {
+    fn drop(&mut self) {
+        *self.end = self.buf.len();
+    }
+}
+
+impl fmt::Debug for EasyBuf {
+    fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+        let bytes = self.as_ref();
+        let len = self.len();
+        if len < 10 {
+            write!(formatter, "EasyBuf{{len={}/{} {:?}}}", self.len(), self.buf.len(), bytes)
+        } else { // choose a more compact representation
+            write!(formatter, "EasyBuf{{len={}/{} [{}, {}, {}, {}, ..., {}, {}, {}, {}]}}", self.len(), self.buf.len(), bytes[0], bytes[1], bytes[2], bytes[3], bytes[len-4], bytes[len-3], bytes[len-2], bytes[len-1])
+        }
+    }
+}
+
+impl Into<Vec<u8>> for EasyBuf {
+    fn into(mut self) -> Vec<u8> {
+        mem::replace(self.get_mut().buf, vec![])
+    }
+}
+
+/// Encoding and decoding of frames via buffers.
+///
+/// This trait is used when constructing an instance of `Framed`. It provides
+/// two types: `In`, for decoded input frames, and `Out`, for outgoing frames
+/// that need to be encoded. It also provides methods to actually perform the
+/// encoding and decoding, which work with corresponding buffer types.
+///
+/// The trait itself is implemented on a type that can track state for decoding
+/// or encoding, which is particularly useful for streaming parsers. In many
+/// cases, though, this type will simply be a unit struct (e.g. `struct
+/// HttpCodec`).
+pub trait Codec {
+    /// The type of decoded frames.
+    type In;
+
+    /// The type of frames to be encoded.
+    type Out;
+
+    /// Attempts to decode a frame from the provided buffer of bytes.
+    ///
+    /// This method is called by `Framed` whenever bytes are ready to be parsed.
+    /// The provided buffer of bytes is what's been read so far, and this
+    /// instance of `Decode` can determine whether an entire frame is in the
+    /// buffer and is ready to be returned.
+    ///
+    /// If an entire frame is available, then this instance will remove those
+    /// bytes from the buffer provided and return them as a decoded
+    /// frame. Note that removing bytes from the provided buffer doesn't always
+    /// necessarily copy the bytes, so this should be an efficient operation in
+    /// most circumstances.
+    ///
+    /// If the bytes look valid, but a frame isn't fully available yet, then
+    /// `Ok(None)` is returned. This indicates to the `Framed` instance that
+    /// it needs to read some more bytes before calling this method again.
+    ///
+    /// Finally, if the bytes in the buffer are malformed then an error is
+    /// returned indicating why. This informs `Framed` that the stream is now
+    /// corrupt and should be terminated.
+    fn decode(&mut self, buf: &mut EasyBuf) -> io::Result<Option<Self::In>>;
+
+    /// A default method available to be called when there are no more bytes
+    /// available to be read from the underlying I/O.
+    ///
+    /// This method defaults to calling `decode` and returns an error if
+    /// `Ok(None)` is returned. Typically this doesn't need to be implemented
+    /// unless the framing protocol differs near the end of the stream.
+    fn decode_eof(&mut self, buf: &mut EasyBuf) -> io::Result<Self::In> {
+        match try!(self.decode(buf)) {
+            Some(frame) => Ok(frame),
+            None => Err(io::Error::new(io::ErrorKind::Other,
+                                       "bytes remaining on stream")),
+        }
+    }
+
+    /// Encodes a frame into the buffer provided.
+    ///
+    /// This method will encode `msg` into the byte buffer provided by `buf`.
+    /// The `buf` provided is an internal buffer of the `Framed` instance and
+    /// will be written out when possible.
+    fn encode(&mut self, msg: Self::Out, buf: &mut Vec<u8>) -> io::Result<()>;
+}
+
+/// A unified `Stream` and `Sink` interface to an underlying `Io` object, using
+/// the `Codec` trait to encode and decode frames.
+///
+/// You can acquire a `Framed` instance by using the `Io::framed` adapter.
+pub struct Framed<T, C> {
+    upstream: T,
+    codec: C,
+    eof: bool,
+    is_readable: bool,
+    rd: EasyBuf,
+    wr: Vec<u8>,
+}
+
+impl<T: Io, C: Codec> Stream for Framed<T, C> {
+    type Item = C::In;
+    type Error = io::Error;
+
+    fn poll(&mut self) -> Poll<Option<C::In>, io::Error> {
+        loop {
+            // If the read buffer has any pending data, then it could be
+            // possible that `decode` will return a new frame. We leave it to
+            // the decoder to optimize detecting that more data is required.
+            if self.is_readable {
+                if self.eof {
+                    if self.rd.len() == 0 {
+                        return Ok(None.into())
+                    } else {
+                        let frame = try!(self.codec.decode_eof(&mut self.rd));
+                        return Ok(Async::Ready(Some(frame)))
+                    }
+                }
+                trace!("attempting to decode a frame");
+                if let Some(frame) = try!(self.codec.decode(&mut self.rd)) {
+                    trace!("frame decoded from buffer");
+                    return Ok(Async::Ready(Some(frame)));
+                }
+                self.is_readable = false;
+            }
+
+            assert!(!self.eof);
+
+            // Otherwise, try to read more data and try again
+            //
+            // TODO: shouldn't read_to_end, that may read a lot
+            let before = self.rd.len();
+            let ret = self.upstream.read_to_end(&mut self.rd.get_mut());
+            match ret {
+                Ok(_n) => self.eof = true,
+                Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+                    if self.rd.len() == before {
+                        return Ok(Async::NotReady)
+                    }
+                }
+                Err(e) => return Err(e),
+            }
+            self.is_readable = true;
+        }
+    }
+}
+
+impl<T: Io, C: Codec> Sink for Framed<T, C> {
+    type SinkItem = C::Out;
+    type SinkError = io::Error;
+
+    fn start_send(&mut self, item: C::Out) -> StartSend<C::Out, io::Error> {
+        // If the buffer is already over 8KiB, then attempt to flush it. If after flushing it's
+        // *still* over 8KiB, then apply backpressure (reject the send).
+        const BACKPRESSURE_BOUNDARY: usize = INITIAL_CAPACITY;
+        if self.wr.len() > BACKPRESSURE_BOUNDARY {
+            try!(self.poll_complete());
+            if self.wr.len() > BACKPRESSURE_BOUNDARY {
+                return Ok(AsyncSink::NotReady(item));
+            }
+        }
+
+        try!(self.codec.encode(item, &mut self.wr));
+        Ok(AsyncSink::Ready)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), io::Error> {
+        trace!("flushing framed transport");
+
+        while !self.wr.is_empty() {
+            trace!("writing; remaining={}", self.wr.len());
+            let n = try_nb!(self.upstream.write(&self.wr));
+            if n == 0 {
+                return Err(io::Error::new(io::ErrorKind::WriteZero,
+                                          "failed to write frame to transport"));
+            }
+            self.wr.drain(..n);
+        }
+
+        // Try flushing the underlying IO
+        try_nb!(self.upstream.flush());
+
+        trace!("framed transport flushed");
+        return Ok(Async::Ready(()));
+    }
+
+    fn close(&mut self) -> Poll<(), io::Error> {
+        try_ready!(self.poll_complete());
+        Ok(().into())
+    }
+}
+
+pub fn framed<T, C>(io: T, codec: C) -> Framed<T, C> {
+    Framed {
+        upstream: io,
+        codec: codec,
+        eof: false,
+        is_readable: false,
+        rd: EasyBuf::new(),
+        wr: Vec::with_capacity(INITIAL_CAPACITY),
+    }
+}
+
+impl<T, C> Framed<T, C> {
+
+    /// Returns a reference to the underlying I/O stream wrapped by `Framed`.
+    ///
+    /// Note that care should be taken to not tamper with the underlying stream
+    /// of data coming in as it may corrupt the stream of frames otherwise being
+    /// worked with.
+    pub fn get_ref(&self) -> &T {
+        &self.upstream
+    }
+
+    /// Returns a mutable reference to the underlying I/O stream wrapped by
+    /// `Framed`.
+    ///
+    /// Note that care should be taken to not tamper with the underlying stream
+    /// of data coming in as it may corrupt the stream of frames otherwise being
+    /// worked with.
+    pub fn get_mut(&mut self) -> &mut T {
+        &mut self.upstream
+    }
+
+    /// Consumes the `Framed`, returning its underlying I/O stream.
+    ///
+    /// Note that care should be taken to not tamper with the underlying stream
+    /// of data coming in as it may corrupt the stream of frames otherwise being
+    /// worked with.
+    pub fn into_inner(self) -> T {
+        self.upstream
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::{INITIAL_CAPACITY, EasyBuf};
+    use std::mem;
+
+    #[test]
+    fn debug_empty_easybuf() {
+        let buf: EasyBuf = vec![].into();
+        assert_eq!("EasyBuf{len=0/0 []}", format!("{:?}", buf));
+    }
+
+    #[test]
+    fn debug_small_easybuf() {
+        let buf: EasyBuf = vec![1, 2, 3, 4, 5, 6].into();
+        assert_eq!("EasyBuf{len=6/6 [1, 2, 3, 4, 5, 6]}", format!("{:?}", buf));
+    }
+
+    #[test]
+    fn debug_small_easybuf_split() {
+        let mut buf: EasyBuf = vec![1, 2, 3, 4, 5, 6].into();
+        let split = buf.split_off(4);
+        assert_eq!("EasyBuf{len=4/6 [1, 2, 3, 4]}", format!("{:?}", buf));
+        assert_eq!("EasyBuf{len=2/6 [5, 6]}", format!("{:?}", split));
+    }
+
+    #[test]
+    fn debug_large_easybuf() {
+        let vec: Vec<u8> = (0u8..255u8).collect();
+        let buf: EasyBuf = vec.into();
+        assert_eq!("EasyBuf{len=255/255 [0, 1, 2, 3, ..., 251, 252, 253, 254]}", format!("{:?}", buf));
+    }
+
+    #[test]
+    fn easybuf_get_mut_sliced() {
+        let vec: Vec<u8> = (0u8..10u8).collect();
+        let mut buf: EasyBuf = vec.into();
+        buf.split_off(9);
+        buf.drain_to(3);
+        assert_eq!(*buf.get_mut(), [3, 4, 5, 6, 7, 8]);
+    }
+
+    #[test]
+    fn easybuf_get_mut_sliced_allocating_at_least_initial_capacity() {
+        let vec: Vec<u8> = (0u8..10u8).collect();
+        let mut buf: EasyBuf = vec.into();
+        buf.split_off(9);
+        buf.drain_to(3);
+        // Clone to make shared
+        let clone = buf.clone();
+        assert_eq!(*buf.get_mut(), [3, 4, 5, 6, 7, 8]);
+        assert_eq!(buf.get_mut().buf.capacity(), INITIAL_CAPACITY);
+        mem::drop(clone); // prevent unused warning
+    }
+
+    #[test]
+    fn easybuf_get_mut_sliced_allocating_required_capacity() {
+        let vec: Vec<u8> = (0..INITIAL_CAPACITY * 2).map(|_|0u8).collect();
+        let mut buf: EasyBuf = vec.into();
+        buf.drain_to(INITIAL_CAPACITY / 2);
+        let clone = buf.clone();
+        assert_eq!(buf.get_mut().buf.capacity(), INITIAL_CAPACITY + INITIAL_CAPACITY / 2);
+        mem::drop(clone)
+    }
+
+    #[test]
+    fn easybuf_into_vec_simple() {
+        let vec: Vec<u8> = (0u8..10u8).collect();
+        let reference = vec.clone();
+        let buf: EasyBuf = vec.into();
+        let original_pointer = buf.buf.as_ref().as_ptr();
+        let result: Vec<u8> = buf.into();
+        assert_eq!(result, reference);
+        let new_pointer = result.as_ptr();
+        assert_eq!(original_pointer, new_pointer, "Into<Vec<u8>> should reuse the exclusive Vec");
+    }
+
+    #[test]
+    fn easybuf_into_vec_sliced() {
+        let vec: Vec<u8> = (0u8..10u8).collect();
+        let mut buf: EasyBuf = vec.into();
+        let original_pointer = buf.buf.as_ref().as_ptr();
+        buf.split_off(9);
+        buf.drain_to(3);
+        let result: Vec<u8> = buf.into();
+        let reference: Vec<u8> = (3u8..9u8).collect();
+        assert_eq!(result, reference);
+        let new_pointer = result.as_ptr();
+        assert_eq!(original_pointer, new_pointer, "Into<Vec<u8>> should reuse the exclusive Vec");
+    }
+
+    #[test]
+    fn easybuf_into_vec_sliced_allocating() {
+        let vec: Vec<u8> = (0u8..10u8).collect();
+        let mut buf: EasyBuf = vec.into();
+        let original_pointer = buf.buf.as_ref().as_ptr();
+        // Create a clone to create second reference to this EasyBuf and force allocation
+        let original = buf.clone();
+        buf.split_off(9);
+        buf.drain_to(3);
+        let result: Vec<u8> = buf.into();
+        let reference: Vec<u8> = (3u8..9u8).collect();
+        assert_eq!(result, reference);
+        let original_reference: EasyBuf =(0u8..10u8).collect::<Vec<u8>>().into();
+        assert_eq!(original.as_ref(), original_reference.as_ref());
+        let new_pointer = result.as_ptr();
+        assert_ne!(original_pointer, new_pointer, "A new vec should be allocated");
+    }
+
+    #[test]
+    fn easybuf_equality_same_underlying_vec() {
+        let mut buf: EasyBuf = (0u8..10).collect::<Vec<_>>().into();
+        assert_eq!(buf, buf);
+        let other = buf.drain_to(5);
+        assert_ne!(buf, other);
+
+        let buf: EasyBuf = (0u8..5).collect::<Vec<_>>().into();
+        assert_eq!(buf, other);
+    }
+
+    #[test]
+    fn easybuf_equality_different_underlying_vec() {
+        let mut buf: EasyBuf = (0u8..10).collect::<Vec<_>>().into();
+        let mut other: EasyBuf = (0u8..10).collect::<Vec<_>>().into();
+        assert_eq!(buf, other);
+
+        buf = buf.drain_to(5);
+        assert_ne!(buf, other);
+
+        other = other.drain_to(5);
+        assert_eq!(buf, other);
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/src/io/mod.rs
@@ -0,0 +1,254 @@
+//! I/O conveniences when working with primitives in `tokio-core`
+//!
+//! Contains various combinators to work with I/O objects and type definitions
+//! as well.
+//!
+//! A description of the high-level I/O combinators can be [found online] in
+//! addition to a description of the [low level details].
+//!
+//! [found online]: https://tokio.rs/docs/getting-started/core/
+//! [low level details]: https://tokio.rs/docs/going-deeper-tokio/core-low-level/
+
+#![deprecated(note = "moved to the `tokio-io` crate")]
+
+use std::io;
+
+use futures::{Async, Poll};
+use futures::future::BoxFuture;
+use futures::stream::BoxStream;
+use iovec::IoVec;
+
+/// A convenience typedef around a `Future` whose error component is `io::Error`
+pub type IoFuture<T> = BoxFuture<T, io::Error>;
+
+/// A convenience typedef around a `Stream` whose error component is `io::Error`
+pub type IoStream<T> = BoxStream<T, io::Error>;
+
+/// A convenience macro for working with `io::Result<T>` from the `Read` and
+/// `Write` traits.
+///
+/// This macro takes `io::Result<T>` as input, and returns `T` as the output. If
+/// the input type is of the `Err` variant, then `Poll::NotReady` is returned if
+/// it indicates `WouldBlock` or otherwise `Err` is returned.
+#[macro_export]
+macro_rules! try_nb {
+    ($e:expr) => (match $e {
+        Ok(t) => t,
+        Err(ref e) if e.kind() == ::std::io::ErrorKind::WouldBlock => {
+            return Ok(::futures::Async::NotReady)
+        }
+        Err(e) => return Err(e.into()),
+    })
+}
+
+mod copy;
+mod frame;
+mod flush;
+mod read_exact;
+mod read_to_end;
+mod read;
+mod read_until;
+mod split;
+mod window;
+mod write_all;
+pub use self::copy::{copy, Copy};
+pub use self::frame::{EasyBuf, EasyBufMut, Framed, Codec};
+pub use self::flush::{flush, Flush};
+pub use self::read_exact::{read_exact, ReadExact};
+pub use self::read_to_end::{read_to_end, ReadToEnd};
+pub use self::read::{read, Read};
+pub use self::read_until::{read_until, ReadUntil};
+pub use self::split::{ReadHalf, WriteHalf};
+pub use self::window::Window;
+pub use self::write_all::{write_all, WriteAll};
+
+/// A trait for read/write I/O objects
+///
+/// This trait represents I/O object which are readable and writable.
+/// Additionally, they're associated with the ability to test whether they're
+/// readable or writable.
+///
+/// Importantly, the methods of this trait are intended to be used in conjunction
+/// with the current task of a future. Namely whenever any of them return a
+/// value that indicates "would block" the current future's task is arranged to
+/// receive a notification when the method would otherwise not indicate that it
+/// would block.
+pub trait Io: io::Read + io::Write {
+    /// Tests to see if this I/O object may be readable.
+    ///
+    /// This method returns an `Async<()>` indicating whether the object
+    /// **might** be readable. It is possible that even if this method returns
+    /// `Async::Ready` that a call to `read` would return a `WouldBlock` error.
+    ///
+    /// There is a default implementation for this function which always
+    /// indicates that an I/O object is readable, but objects which can
+    /// implement a finer grained version of this are recommended to do so.
+    ///
+    /// If this function returns `Async::NotReady` then the current future's
+    /// task is arranged to receive a notification when it might not return
+    /// `NotReady`.
+    ///
+    /// # Panics
+    ///
+    /// This method is likely to panic if called from outside the context of a
+    /// future's task.
+    fn poll_read(&mut self) -> Async<()> {
+        Async::Ready(())
+    }
+
+    /// Tests to see if this I/O object may be writable.
+    ///
+    /// This method returns an `Async<()>` indicating whether the object
+    /// **might** be writable. It is possible that even if this method returns
+    /// `Async::Ready` that a call to `write` would return a `WouldBlock` error.
+    ///
+    /// There is a default implementation for this function which always
+    /// indicates that an I/O object is writable, but objects which can
+    /// implement a finer grained version of this are recommended to do so.
+    ///
+    /// If this function returns `Async::NotReady` then the current future's
+    /// task is arranged to receive a notification when it might not return
+    /// `NotReady`.
+    ///
+    /// # Panics
+    ///
+    /// This method is likely to panic if called from outside the context of a
+    /// future's task.
+    fn poll_write(&mut self) -> Async<()> {
+        Async::Ready(())
+    }
+
+    /// Read in a list of buffers all at once.
+    ///
+    /// This operation will attempt to read bytes from this socket and place
+    /// them into the list of buffers provided. Note that each buffer is an
+    /// `IoVec` which can be created from a byte slice.
+    ///
+    /// The buffers provided will be filled in sequentially. A buffer will be
+    /// entirely filled up before the next is written to.
+    ///
+    /// The number of bytes read is returned, if successful, or an error is
+    /// returned otherwise. If no bytes are available to be read yet then
+    /// a "would block" error is returned. This operation should not block.
+    ///
+    /// There is a default implementation for this function which treats this
+    /// as a single read using the first buffer in the list, but objects which
+    /// can implement this as an atomic read using all the buffers are
+    /// recommended to do so. For example, `TcpStream` can implement this
+    /// using the `readv` syscall.
+    fn read_vec(&mut self, bufs: &mut [&mut IoVec]) -> io::Result<usize> {
+        if bufs.is_empty() {
+            Ok(0)
+        } else {
+            self.read(&mut bufs[0])
+        }
+    }
+
+    /// Write a list of buffers all at once.
+    ///
+    /// This operation will attempt to write a list of byte buffers to this
+    /// socket. Note that each buffer is an `IoVec` which can be created from a
+    /// byte slice.
+    ///
+    /// The buffers provided will be written sequentially. A buffer will be
+    /// entirely written before the next is written.
+    ///
+    /// The number of bytes written is returned, if successful, or an error is
+    /// returned otherwise. If the socket is not currently writable then a
+    /// "would block" error is returned. This operation should not block.
+    ///
+    /// There is a default implementation for this function which writes the
+    /// first buffer only, but objects which can implement this as an atomic
+    /// write using all the buffers are  recommended to do so. For example,
+    /// `TcpStream` can implement this  using the `writev` syscall.
+    fn write_vec(&mut self, bufs: &[&IoVec]) -> io::Result<usize> {
+        if bufs.is_empty() {
+            Ok(0)
+        } else {
+            self.write(&bufs[0])
+        }
+    }
+
+    /// Provides a `Stream` and `Sink` interface for reading and writing to this
+    /// `Io` object, using `Decode` and `Encode` to read and write the raw data.
+    ///
+    /// Raw I/O objects work with byte sequences, but higher-level code usually
+    /// wants to batch these into meaningful chunks, called "frames". This
+    /// method layers framing on top of an I/O object, by using the `Codec`
+    /// traits to handle encoding and decoding of messages frames. Note that
+    /// the incoming and outgoing frame types may be distinct.
+    ///
+    /// This function returns a *single* object that is both `Stream` and
+    /// `Sink`; grouping this into a single object is often useful for layering
+    /// things like gzip or TLS, which require both read and write access to the
+    /// underlying object.
+    ///
+    /// If you want to work more directly with the streams and sink, consider
+    /// calling `split` on the `Framed` returned by this method, which will
+    /// break them into separate objects, allowing them to interact more easily.
+    fn framed<C: Codec>(self, codec: C) -> Framed<Self, C>
+        where Self: Sized,
+    {
+        frame::framed(self, codec)
+    }
+
+    /// Helper method for splitting this read/write object into two halves.
+    ///
+    /// The two halves returned implement the `Read` and `Write` traits,
+    /// respectively.
+    fn split(self) -> (ReadHalf<Self>, WriteHalf<Self>)
+        where Self: Sized
+    {
+        split::split(self)
+    }
+}
+
+/// A trait for framed reading and writing.
+///
+/// Most implementations of `FramedIo` are for doing protocol level
+/// serialization and deserialization.
+///
+/// Importantly, the methods of this trait are intended to be used in conjunction
+/// with the current task of a future. Namely whenever any of them return a
+/// value that indicates "would block" the current future's task is arranged to
+/// receive a notification when the method would otherwise not indicate that it
+/// would block.
+//
+/// For a sample implementation of `FramedIo` you can take a look at the
+/// `Framed` type in the `frame` module of this crate.
+#[doc(hidden)]
+#[deprecated(since = "0.1.1", note = "replaced by Sink + Stream")]
+pub trait FramedIo {
+    /// Messages written
+    type In;
+
+    /// Messages read
+    type Out;
+
+    /// Tests to see if this `FramedIo` may be readable.
+    fn poll_read(&mut self) -> Async<()>;
+
+    /// Read a message frame from the `FramedIo`
+    fn read(&mut self) -> Poll<Self::Out, io::Error>;
+
+    /// Tests to see if this `FramedIo` may be writable.
+    ///
+    /// Unlike most other calls to poll readiness, it is important that when
+    /// `FramedIo::poll_write` returns `Async::Ready` that a write will
+    /// succeed.
+    fn poll_write(&mut self) -> Async<()>;
+
+    /// Write a message frame to the `FramedIo`
+    fn write(&mut self, req: Self::In) -> Poll<(), io::Error>;
+
+    /// Flush pending writes or do any other work not driven by reading /
+    /// writing.
+    ///
+    /// Since the backing source is non-blocking, there is no guarantee that a
+    /// call to `FramedIo::write` is able to write the full message to the
+    /// backing source immediately. In this case, the `FramedIo` will need to
+    /// buffer the remaining data to write. Calls to `FramedIo:flush` attempt
+    /// to write any remaining data in the write buffer to the underlying
+    /// source.
+    fn flush(&mut self) -> Poll<(), io::Error>;
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/src/io/read.rs
@@ -0,0 +1,51 @@
+use std::mem;
+
+use futures::{Future, Poll};
+
+enum State<R, T> {
+    Pending {
+        rd: R,
+        buf: T,
+    },
+    Empty,
+}
+
+/// Tries to read some bytes directly into the given `buf` in asynchronous
+/// manner, returning a future type.
+///
+/// The returned future will resolve to both the I/O stream and the buffer
+/// as well as the number of bytes read once the read operation is completed.
+pub fn read<R, T>(rd: R, buf: T) -> Read<R, T>
+    where R: ::std::io::Read,
+          T: AsMut<[u8]>
+{
+    Read { state: State::Pending { rd: rd, buf: buf } }
+}
+
+/// A future which can be used to easily read available number of bytes to fill
+/// a buffer.
+///
+/// Created by the [`read`] function.
+pub struct Read<R, T> {
+    state: State<R, T>,
+}
+
+impl<R, T> Future for Read<R, T>
+    where R: ::std::io::Read,
+          T: AsMut<[u8]>
+{
+    type Item = (R, T, usize);
+    type Error = ::std::io::Error;
+
+    fn poll(&mut self) -> Poll<(R, T, usize), ::std::io::Error> {
+        let nread = match self.state {
+            State::Pending { ref mut rd, ref mut buf } => try_nb!(rd.read(&mut buf.as_mut()[..])),
+            State::Empty => panic!("poll a Read after it's done"),
+        };
+
+        match mem::replace(&mut self.state, State::Empty) {
+            State::Pending { rd, buf } => Ok((rd, buf, nread).into()),
+            State::Empty => panic!("invalid internal state"),
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/src/io/read_exact.rs
@@ -0,0 +1,79 @@
+use std::io::{self, Read};
+use std::mem;
+
+use futures::{Poll, Future};
+
+/// A future which can be used to easily read exactly enough bytes to fill
+/// a buffer.
+///
+/// Created by the [`read_exact`] function.
+///
+/// [`read_exact`]: fn.read_exact.html
+pub struct ReadExact<A, T> {
+    state: State<A, T>,
+}
+
+enum State<A, T> {
+    Reading {
+        a: A,
+        buf: T,
+        pos: usize,
+    },
+    Empty,
+}
+
+/// Creates a future which will read exactly enough bytes to fill `buf`,
+/// returning an error if EOF is hit sooner.
+///
+/// The returned future will resolve to both the I/O stream as well as the
+/// buffer once the read operation is completed.
+///
+/// In the case of an error the buffer and the object will be discarded, with
+/// the error yielded. In the case of success the object will be destroyed and
+/// the buffer will be returned, with all data read from the stream appended to
+/// the buffer.
+pub fn read_exact<A, T>(a: A, buf: T) -> ReadExact<A, T>
+    where A: Read,
+          T: AsMut<[u8]>,
+{
+    ReadExact {
+        state: State::Reading {
+            a: a,
+            buf: buf,
+            pos: 0,
+        },
+    }
+}
+
+fn eof() -> io::Error {
+    io::Error::new(io::ErrorKind::UnexpectedEof, "early eof")
+}
+
+impl<A, T> Future for ReadExact<A, T>
+    where A: Read,
+          T: AsMut<[u8]>,
+{
+    type Item = (A, T);
+    type Error = io::Error;
+
+    fn poll(&mut self) -> Poll<(A, T), io::Error> {
+        match self.state {
+            State::Reading { ref mut a, ref mut buf, ref mut pos } => {
+                let buf = buf.as_mut();
+                while *pos < buf.len() {
+                    let n = try_nb!(a.read(&mut buf[*pos..]));
+                    *pos += n;
+                    if n == 0 {
+                        return Err(eof())
+                    }
+                }
+            }
+            State::Empty => panic!("poll a ReadExact after it's done"),
+        }
+
+        match mem::replace(&mut self.state, State::Empty) {
+            State::Reading { a, buf, .. } => Ok((a, buf).into()),
+            State::Empty => panic!(),
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/src/io/read_to_end.rs
@@ -0,0 +1,64 @@
+use std::io::{self, Read};
+use std::mem;
+
+use futures::{Poll, Future};
+
+/// A future which can be used to easily read the entire contents of a stream
+/// into a vector.
+///
+/// Created by the [`read_to_end`] function.
+///
+/// [`read_to_end`]: fn.read_to_end.html
+pub struct ReadToEnd<A> {
+    state: State<A>,
+}
+
+enum State<A> {
+    Reading {
+        a: A,
+        buf: Vec<u8>,
+    },
+    Empty,
+}
+
+/// Creates a future which will read all the bytes associated with the I/O
+/// object `A` into the buffer provided.
+///
+/// In the case of an error the buffer and the object will be discarded, with
+/// the error yielded. In the case of success the object will be destroyed and
+/// the buffer will be returned, with all data read from the stream appended to
+/// the buffer.
+pub fn read_to_end<A>(a: A, buf: Vec<u8>) -> ReadToEnd<A>
+    where A: Read,
+{
+    ReadToEnd {
+        state: State::Reading {
+            a: a,
+            buf: buf,
+        }
+    }
+}
+
+impl<A> Future for ReadToEnd<A>
+    where A: Read,
+{
+    type Item = (A, Vec<u8>);
+    type Error = io::Error;
+
+    fn poll(&mut self) -> Poll<(A, Vec<u8>), io::Error> {
+        match self.state {
+            State::Reading { ref mut a, ref mut buf } => {
+                // If we get `Ok`, then we know the stream hit EOF and we're done. If we
+                // hit "would block" then all the read data so far is in our buffer, and
+                // otherwise we propagate errors
+                try_nb!(a.read_to_end(buf));
+            },
+            State::Empty => panic!("poll ReadToEnd after it's done"),
+        }
+
+        match mem::replace(&mut self.state, State::Empty) {
+            State::Reading { a, buf } => Ok((a, buf).into()),
+            State::Empty => unreachable!(),
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/src/io/read_until.rs
@@ -0,0 +1,70 @@
+use std::io::{self, Read, BufRead};
+use std::mem;
+
+use futures::{Poll, Future};
+
+/// A future which can be used to easily read the contents of a stream into a 
+/// vector until the delimiter is reached.
+///
+/// Created by the [`read_until`] function.
+///
+/// [`read_until`]: fn.read_until.html
+pub struct ReadUntil<A> {
+    state: State<A>,
+}
+
+enum State<A> {
+    Reading {
+        a: A,
+        byte: u8,
+        buf: Vec<u8>,
+    },
+    Empty,
+}
+
+/// Creates a future which will read all the bytes associated with the I/O
+/// object `A` into the buffer provided until the delimiter `byte` is reached.
+/// This method is the async equivalent to [`BufRead::read_until`].
+///
+/// In case of an error the buffer and the object will be discarded, with
+/// the error yielded. In the case of success the object will be destroyed and
+/// the buffer will be returned, with all bytes up to, and including, the delimiter
+/// (if found).
+///
+/// [`BufRead::read_until`]: https://doc.rust-lang.org/std/io/trait.BufRead.html#method.read_until
+pub fn read_until<A>(a: A, byte: u8, buf: Vec<u8>) -> ReadUntil<A>
+    where A: BufRead
+{
+    ReadUntil {
+        state: State::Reading {
+            a: a,
+            byte: byte,
+            buf: buf,
+        }
+    }
+}
+
+impl<A> Future for ReadUntil<A>
+    where A: Read + BufRead
+{
+    type Item = (A, Vec<u8>);
+    type Error = io::Error;
+
+    fn poll(&mut self) -> Poll<(A, Vec<u8>), io::Error> {
+        match self.state {
+            State::Reading { ref mut a, byte, ref mut buf } => {
+                // If we get `Ok(n)`, then we know the stream hit EOF or the delimiter.
+                // and just return it, as we are finished.
+                // If we hit "would block" then all the read data so far
+                // is in our buffer, and otherwise we propagate errors.
+                try_nb!(a.read_until(byte, buf));
+            },
+            State::Empty => panic!("poll ReadUntil after it's done"),
+        }
+
+        match mem::replace(&mut self.state, State::Empty) {
+            State::Reading { a, byte: _, buf } => Ok((a, buf).into()),
+            State::Empty => unreachable!(),
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/src/io/split.rs
@@ -0,0 +1,68 @@
+use std::io::{self, Read, Write};
+
+use futures::Async;
+use futures::sync::BiLock;
+
+use io::Io;
+
+/// The readable half of an object returned from `Io::split`.
+pub struct ReadHalf<T> {
+    handle: BiLock<T>,
+}
+
+/// The writable half of an object returned from `Io::split`.
+pub struct WriteHalf<T> {
+    handle: BiLock<T>,
+}
+
+pub fn split<T: Io>(t: T) -> (ReadHalf<T>, WriteHalf<T>) {
+    let (a, b) = BiLock::new(t);
+    (ReadHalf { handle: a }, WriteHalf { handle: b })
+}
+
+impl<T: Io> ReadHalf<T> {
+    /// Calls the underlying `poll_read` function on this handling, testing to
+    /// see if it's ready to be read from.
+    pub fn poll_read(&mut self) -> Async<()> {
+        match self.handle.poll_lock() {
+            Async::Ready(mut l) => l.poll_read(),
+            Async::NotReady => Async::NotReady,
+        }
+    }
+}
+
+impl<T: Io> WriteHalf<T> {
+    /// Calls the underlying `poll_write` function on this handling, testing to
+    /// see if it's ready to be written to.
+    pub fn poll_write(&mut self) -> Async<()> {
+        match self.handle.poll_lock() {
+            Async::Ready(mut l) => l.poll_write(),
+            Async::NotReady => Async::NotReady,
+        }
+    }
+}
+
+impl<T: Read> Read for ReadHalf<T> {
+    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+        match self.handle.poll_lock() {
+            Async::Ready(mut l) => l.read(buf),
+            Async::NotReady => Err(::would_block()),
+        }
+    }
+}
+
+impl<T: Write> Write for WriteHalf<T> {
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        match self.handle.poll_lock() {
+            Async::Ready(mut l) => l.write(buf),
+            Async::NotReady => Err(::would_block()),
+        }
+    }
+
+    fn flush(&mut self) -> io::Result<()> {
+        match self.handle.poll_lock() {
+            Async::Ready(mut l) => l.flush(),
+            Async::NotReady => Err(::would_block()),
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/src/io/window.rs
@@ -0,0 +1,116 @@
+use std::ops;
+
+/// A owned window around an underlying buffer.
+///
+/// Normally slices work great for considering sub-portions of a buffer, but
+/// unfortunately a slice is a *borrowed* type in Rust which has an associated
+/// lifetime. When working with future and async I/O these lifetimes are not
+/// always appropriate, and are sometimes difficult to store in tasks. This
+/// type strives to fill this gap by providing an "owned slice" around an
+/// underlying buffer of bytes.
+///
+/// A `Window<T>` wraps an underlying buffer, `T`, and has configurable
+/// start/end indexes to alter the behavior of the `AsRef<[u8]>` implementation
+/// that this type carries.
+///
+/// This type can be particularly useful when working with the `write_all`
+/// combinator in this crate. Data can be sliced via `Window`, consumed by
+/// `write_all`, and then earned back once the write operation finishes through
+/// the `into_inner` method on this type.
+pub struct Window<T> {
+    inner: T,
+    range: ops::Range<usize>,
+}
+
+impl<T: AsRef<[u8]>> Window<T> {
+    /// Creates a new window around the buffer `t` defaulting to the entire
+    /// slice.
+    ///
+    /// Further methods can be called on the returned `Window<T>` to alter the
+    /// window into the data provided.
+    pub fn new(t: T) -> Window<T> {
+        Window {
+            range: 0..t.as_ref().len(),
+            inner: t,
+        }
+    }
+
+    /// Gets a shared reference to the underlying buffer inside of this
+    /// `Window`.
+    pub fn get_ref(&self) -> &T {
+        &self.inner
+    }
+
+    /// Gets a mutable reference to the underlying buffer inside of this
+    /// `Window`.
+    pub fn get_mut(&mut self) -> &mut T {
+        &mut self.inner
+    }
+
+    /// Consumes this `Window`, returning the underlying buffer.
+    pub fn into_inner(self) -> T {
+        self.inner
+    }
+
+    /// Returns the starting index of this window into the underlying buffer
+    /// `T`.
+    pub fn start(&self) -> usize {
+        self.range.start
+    }
+
+    /// Returns the end index of this window into the underlying buffer
+    /// `T`.
+    pub fn end(&self) -> usize {
+        self.range.end
+    }
+
+    /// Changes the starting index of this window to the index specified.
+    ///
+    /// Returns the windows back to chain multiple calls to this method.
+    ///
+    /// # Panics
+    ///
+    /// This method will panic if `start` is out of bounds for the underlying
+    /// slice or if it comes after the `end` configured in this window.
+    pub fn set_start(&mut self, start: usize) -> &mut Window<T> {
+        assert!(start <= self.inner.as_ref().len());
+        assert!(start <= self.range.end);
+        self.range.start = start;
+        self
+    }
+
+    /// Changes the end index of this window to the index specified.
+    ///
+    /// Returns the windows back to chain multiple calls to this method.
+    ///
+    /// # Panics
+    ///
+    /// This method will panic if `end` is out of bounds for the underlying
+    /// slice or if it comes before the `start` configured in this window.
+    pub fn set_end(&mut self, end: usize) -> &mut Window<T> {
+        assert!(end <= self.inner.as_ref().len());
+        assert!(self.range.start <= end);
+        self.range.end = end;
+        self
+    }
+
+    // TODO: how about a generic set() method along the lines of:
+    //
+    //       buffer.set(..3)
+    //             .set(0..2)
+    //             .set(4..)
+    //
+    // etc.
+}
+
+impl<T: AsRef<[u8]>> AsRef<[u8]> for Window<T> {
+    fn as_ref(&self) -> &[u8] {
+        &self.inner.as_ref()[self.range.start..self.range.end]
+    }
+}
+
+impl<T: AsMut<[u8]>> AsMut<[u8]> for Window<T> {
+    fn as_mut(&mut self) -> &mut [u8] {
+        &mut self.inner.as_mut()[self.range.start..self.range.end]
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/src/io/write_all.rs
@@ -0,0 +1,82 @@
+use std::io::{self, Write};
+use std::mem;
+
+use futures::{Poll, Future};
+
+/// A future used to write the entire contents of some data to a stream.
+///
+/// This is created by the [`write_all`] top-level method.
+///
+/// [`write_all`]: fn.write_all.html
+pub struct WriteAll<A, T> {
+    state: State<A, T>,
+}
+
+enum State<A, T> {
+    Writing {
+        a: A,
+        buf: T,
+        pos: usize,
+    },
+    Empty,
+}
+
+/// Creates a future that will write the entire contents of the buffer `buf` to
+/// the stream `a` provided.
+///
+/// The returned future will not return until all the data has been written, and
+/// the future will resolve to the stream as well as the buffer (for reuse if
+/// needed).
+///
+/// Any error which happens during writing will cause both the stream and the
+/// buffer to get destroyed.
+///
+/// The `buf` parameter here only requires the `AsRef<[u8]>` trait, which should
+/// be broadly applicable to accepting data which can be converted to a slice.
+/// The `Window` struct is also available in this crate to provide a different
+/// window into a slice if necessary.
+pub fn write_all<A, T>(a: A, buf: T) -> WriteAll<A, T>
+    where A: Write,
+          T: AsRef<[u8]>,
+{
+    WriteAll {
+        state: State::Writing {
+            a: a,
+            buf: buf,
+            pos: 0,
+        },
+    }
+}
+
+fn zero_write() -> io::Error {
+    io::Error::new(io::ErrorKind::WriteZero, "zero-length write")
+}
+
+impl<A, T> Future for WriteAll<A, T>
+    where A: Write,
+          T: AsRef<[u8]>,
+{
+    type Item = (A, T);
+    type Error = io::Error;
+
+    fn poll(&mut self) -> Poll<(A, T), io::Error> {
+        match self.state {
+            State::Writing { ref mut a, ref buf, ref mut pos } => {
+                let buf = buf.as_ref();
+                while *pos < buf.len() {
+                    let n = try_nb!(a.write(&buf[*pos..]));
+                    *pos += n;
+                    if n == 0 {
+                        return Err(zero_write())
+                    }
+                }
+            }
+            State::Empty => panic!("poll a WriteAll after it's done"),
+        }
+
+        match mem::replace(&mut self.state, State::Empty) {
+            State::Writing { a, buf, .. } => Ok((a, buf).into()),
+            State::Empty => panic!(),
+        }
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/src/lib.rs
@@ -0,0 +1,120 @@
+//! `Future`-powered I/O at the core of Tokio
+//!
+//! This crate uses the `futures` crate to provide an event loop ("reactor
+//! core") which can be used to drive I/O like TCP and UDP, spawned future
+//! tasks, and other events like channels/timeouts. All asynchronous I/O is
+//! powered by the `mio` crate.
+//!
+//! The concrete types provided in this crate are relatively bare bones but are
+//! intended to be the essential foundation for further projects needing an
+//! event loop. In this crate you'll find:
+//!
+//! * TCP, both streams and listeners
+//! * UDP sockets
+//! * Timeouts
+//! * An event loop to run futures
+//!
+//! More functionality is likely to be added over time, but otherwise the crate
+//! is intended to be flexible, with the `PollEvented` type accepting any
+//! type that implements `mio::Evented`. For example, the `tokio-uds` crate
+//! uses `PollEvented` to provide support for Unix domain sockets.
+//!
+//! Some other important tasks covered by this crate are:
+//!
+//! * The ability to spawn futures into an event loop. The `Handle` and `Remote`
+//!   types have a `spawn` method which allows executing a future on an event
+//!   loop. The `Handle::spawn` method crucially does not require the future
+//!   itself to be `Send`.
+//!
+//! * The `Io` trait serves as an abstraction for future crates to build on top
+//!   of. This packages up `Read` and `Write` functionality as well as the
+//!   ability to poll for readiness on both ends.
+//!
+//! * All I/O is futures-aware. If any action in this crate returns "not ready"
+//!   or "would block", then the current future task is scheduled to receive a
+//!   notification when it would otherwise make progress.
+//!
+//! You can find more extensive documentation in terms of tutorials at
+//! [https://tokio.rs](https://tokio.rs).
+//!
+//! # Examples
+//!
+//! A simple TCP echo server:
+//!
+//! ```no_run
+//! extern crate futures;
+//! extern crate tokio_core;
+//!
+//! use futures::{Future, Stream};
+//! use tokio_core::io::{copy, Io};
+//! use tokio_core::net::TcpListener;
+//! use tokio_core::reactor::Core;
+//!
+//! fn main() {
+//!     // Create the event loop that will drive this server
+//!     let mut core = Core::new().unwrap();
+//!     let handle = core.handle();
+//!
+//!     // Bind the server's socket
+//!     let addr = "127.0.0.1:12345".parse().unwrap();
+//!     let listener = TcpListener::bind(&addr, &handle).unwrap();
+//!
+//!     // Pull out a stream of sockets for incoming connections
+//!     let server = listener.incoming().for_each(|(sock, _)| {
+//!         // Split up the reading and writing parts of the
+//!         // socket
+//!         let (reader, writer) = sock.split();
+//!
+//!         // A future that echos the data and returns how
+//!         // many bytes were copied...
+//!         let bytes_copied = copy(reader, writer);
+//!
+//!         // ... after which we'll print what happened
+//!         let handle_conn = bytes_copied.map(|amt| {
+//!             println!("wrote {} bytes", amt)
+//!         }).map_err(|err| {
+//!             println!("IO error {:?}", err)
+//!         });
+//!
+//!         // Spawn the future as a concurrent task
+//!         handle.spawn(handle_conn);
+//!
+//!         Ok(())
+//!     });
+//!
+//!     // Spin up the server on the event loop
+//!     core.run(server).unwrap();
+//! }
+//! ```
+
+#![doc(html_root_url = "https://docs.rs/tokio-core/0.1")]
+#![deny(missing_docs)]
+
+extern crate bytes;
+#[macro_use]
+extern crate futures;
+extern crate iovec;
+extern crate mio;
+extern crate slab;
+extern crate tokio_io;
+
+#[macro_use]
+extern crate scoped_tls;
+
+#[macro_use]
+extern crate log;
+
+#[macro_use]
+pub mod io;
+
+mod heap;
+#[doc(hidden)]
+pub mod channel;
+pub mod net;
+pub mod reactor;
+
+use std::io as sio;
+
+fn would_block() -> sio::Error {
+    sio::Error::new(sio::ErrorKind::WouldBlock, "would block")
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/src/net/mod.rs
@@ -0,0 +1,11 @@
+//! TCP/UDP bindings for `tokio-core`
+//!
+//! This module contains the TCP/UDP networking types, similar to the standard
+//! library, which can be used to implement networking protocols.
+
+mod tcp;
+mod udp;
+
+pub use self::tcp::{TcpStream, TcpStreamNew};
+pub use self::tcp::{TcpListener, Incoming};
+pub use self::udp::{UdpSocket, UdpCodec, UdpFramed, SendDgram, RecvDgram};
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/src/net/tcp.rs
@@ -0,0 +1,664 @@
+use std::fmt;
+use std::io::{self, Read, Write};
+use std::mem;
+use std::net::{self, SocketAddr, Shutdown};
+
+use bytes::{Buf, BufMut};
+use futures::stream::Stream;
+use futures::sync::oneshot;
+use futures::{Future, Poll, Async};
+use iovec::IoVec;
+use mio;
+use tokio_io::{AsyncRead, AsyncWrite};
+
+use reactor::{Handle, PollEvented};
+
+/// An I/O object representing a TCP socket listening for incoming connections.
+///
+/// This object can be converted into a stream of incoming connections for
+/// various forms of processing.
+pub struct TcpListener {
+    io: PollEvented<mio::tcp::TcpListener>,
+    pending_accept: Option<oneshot::Receiver<io::Result<(TcpStream, SocketAddr)>>>,
+}
+
+/// Stream returned by the `TcpListener::incoming` function representing the
+/// stream of sockets received from a listener.
+pub struct Incoming {
+    inner: TcpListener,
+}
+
+impl TcpListener {
+    /// Create a new TCP listener associated with this event loop.
+    ///
+    /// The TCP listener will bind to the provided `addr` address, if available.
+    /// If the result is `Ok`, the socket has successfully bound.
+    pub fn bind(addr: &SocketAddr, handle: &Handle) -> io::Result<TcpListener> {
+        let l = try!(mio::tcp::TcpListener::bind(addr));
+        TcpListener::new(l, handle)
+    }
+
+    /// Attempt to accept a connection and create a new connected `TcpStream` if
+    /// successful.
+    ///
+    /// This function will attempt an accept operation, but will not block
+    /// waiting for it to complete. If the operation would block then a "would
+    /// block" error is returned. Additionally, if this method would block, it
+    /// registers the current task to receive a notification when it would
+    /// otherwise not block.
+    ///
+    /// Note that typically for simple usage it's easier to treat incoming
+    /// connections as a `Stream` of `TcpStream`s with the `incoming` method
+    /// below.
+    ///
+    /// # Panics
+    ///
+    /// This function will panic if it is called outside the context of a
+    /// future's task. It's recommended to only call this from the
+    /// implementation of a `Future::poll`, if necessary.
+    pub fn accept(&mut self) -> io::Result<(TcpStream, SocketAddr)> {
+        loop {
+            if let Some(mut pending) = self.pending_accept.take() {
+                match pending.poll().expect("shouldn't be canceled") {
+                    Async::NotReady => {
+                        self.pending_accept = Some(pending);
+                        return Err(::would_block())
+                    },
+                    Async::Ready(r) => return r,
+                }
+            }
+
+            if let Async::NotReady = self.io.poll_read() {
+                return Err(io::Error::new(io::ErrorKind::WouldBlock, "not ready"))
+            }
+
+            match self.io.get_ref().accept() {
+                Err(e) => {
+                    if e.kind() == io::ErrorKind::WouldBlock {
+                        self.io.need_read();
+                    }
+                    return Err(e)
+                },
+                Ok((sock, addr)) => {
+                    // Fast path if we haven't left the event loop
+                    if let Some(handle) = self.io.remote().handle() {
+                        let io = try!(PollEvented::new(sock, &handle));
+                        return Ok((TcpStream { io: io }, addr))
+                    }
+
+                    // If we're off the event loop then send the socket back
+                    // over there to get registered and then we'll get it back
+                    // eventually.
+                    let (tx, rx) = oneshot::channel();
+                    let remote = self.io.remote().clone();
+                    remote.spawn(move |handle| {
+                        let res = PollEvented::new(sock, handle)
+                            .map(move |io| {
+                                (TcpStream { io: io }, addr)
+                            });
+                        drop(tx.send(res));
+                        Ok(())
+                    });
+                    self.pending_accept = Some(rx);
+                    // continue to polling the `rx` at the beginning of the loop
+                }
+            }
+        }
+    }
+
+    /// Create a new TCP listener from the standard library's TCP listener.
+    ///
+    /// This method can be used when the `Handle::tcp_listen` method isn't
+    /// sufficient because perhaps some more configuration is needed in terms of
+    /// before the calls to `bind` and `listen`.
+    ///
+    /// This API is typically paired with the `net2` crate and the `TcpBuilder`
+    /// type to build up and customize a listener before it's shipped off to the
+    /// backing event loop. This allows configuration of options like
+    /// `SO_REUSEPORT`, binding to multiple addresses, etc.
+    ///
+    /// The `addr` argument here is one of the addresses that `listener` is
+    /// bound to and the listener will only be guaranteed to accept connections
+    /// of the same address type currently.
+    ///
+    /// Finally, the `handle` argument is the event loop that this listener will
+    /// be bound to.
+    ///
+    /// The platform specific behavior of this function looks like:
+    ///
+    /// * On Unix, the socket is placed into nonblocking mode and connections
+    ///   can be accepted as normal
+    ///
+    /// * On Windows, the address is stored internally and all future accepts
+    ///   will only be for the same IP version as `addr` specified. That is, if
+    ///   `addr` is an IPv4 address then all sockets accepted will be IPv4 as
+    ///   well (same for IPv6).
+    pub fn from_listener(listener: net::TcpListener,
+                         addr: &SocketAddr,
+                         handle: &Handle) -> io::Result<TcpListener> {
+        let l = try!(mio::tcp::TcpListener::from_listener(listener, addr));
+        TcpListener::new(l, handle)
+    }
+
+    fn new(listener: mio::tcp::TcpListener, handle: &Handle)
+           -> io::Result<TcpListener> {
+        let io = try!(PollEvented::new(listener, handle));
+        Ok(TcpListener { io: io, pending_accept: None })
+    }
+
+    /// Test whether this socket is ready to be read or not.
+    pub fn poll_read(&self) -> Async<()> {
+        self.io.poll_read()
+    }
+
+    /// Returns the local address that this listener is bound to.
+    ///
+    /// This can be useful, for example, when binding to port 0 to figure out
+    /// which port was actually bound.
+    pub fn local_addr(&self) -> io::Result<SocketAddr> {
+        self.io.get_ref().local_addr()
+    }
+
+    /// Consumes this listener, returning a stream of the sockets this listener
+    /// accepts.
+    ///
+    /// This method returns an implementation of the `Stream` trait which
+    /// resolves to the sockets the are accepted on this listener.
+    pub fn incoming(self) -> Incoming {
+        Incoming { inner: self }
+    }
+
+    /// Sets the value for the `IP_TTL` option on this socket.
+    ///
+    /// This value sets the time-to-live field that is used in every packet sent
+    /// from this socket.
+    pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+        self.io.get_ref().set_ttl(ttl)
+    }
+
+    /// Gets the value of the `IP_TTL` option for this socket.
+    ///
+    /// For more information about this option, see [`set_ttl`][link].
+    ///
+    /// [link]: #method.set_ttl
+    pub fn ttl(&self) -> io::Result<u32> {
+        self.io.get_ref().ttl()
+    }
+
+    /// Sets the value for the `IPV6_V6ONLY` option on this socket.
+    ///
+    /// If this is set to `true` then the socket is restricted to sending and
+    /// receiving IPv6 packets only. In this case two IPv4 and IPv6 applications
+    /// can bind the same port at the same time.
+    ///
+    /// If this is set to `false` then the socket can be used to send and
+    /// receive packets from an IPv4-mapped IPv6 address.
+    pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> {
+        self.io.get_ref().set_only_v6(only_v6)
+    }
+
+    /// Gets the value of the `IPV6_V6ONLY` option for this socket.
+    ///
+    /// For more information about this option, see [`set_only_v6`][link].
+    ///
+    /// [link]: #method.set_only_v6
+    pub fn only_v6(&self) -> io::Result<bool> {
+        self.io.get_ref().only_v6()
+    }
+}
+
+impl fmt::Debug for TcpListener {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        self.io.get_ref().fmt(f)
+    }
+}
+
+impl Stream for Incoming {
+    type Item = (TcpStream, SocketAddr);
+    type Error = io::Error;
+
+    fn poll(&mut self) -> Poll<Option<Self::Item>, io::Error> {
+        Ok(Async::Ready(Some(try_nb!(self.inner.accept()))))
+    }
+}
+
+/// An I/O object representing a TCP stream connected to a remote endpoint.
+///
+/// A TCP stream can either be created by connecting to an endpoint or by
+/// accepting a connection from a listener. Inside the stream is access to the
+/// raw underlying I/O object as well as streams for the read/write
+/// notifications on the stream itself.
+pub struct TcpStream {
+    io: PollEvented<mio::tcp::TcpStream>,
+}
+
+/// Future returned by `TcpStream::connect` which will resolve to a `TcpStream`
+/// when the stream is connected.
+pub struct TcpStreamNew {
+    inner: TcpStreamNewState,
+}
+
+enum TcpStreamNewState {
+    Waiting(TcpStream),
+    Error(io::Error),
+    Empty,
+}
+
+impl TcpStream {
+    /// Create a new TCP stream connected to the specified address.
+    ///
+    /// This function will create a new TCP socket and attempt to connect it to
+    /// the `addr` provided. The returned future will be resolved once the
+    /// stream has successfully connected. If an error happens during the
+    /// connection or during the socket creation, that error will be returned to
+    /// the future instead.
+    pub fn connect(addr: &SocketAddr, handle: &Handle) -> TcpStreamNew {
+        let inner = match mio::tcp::TcpStream::connect(addr) {
+            Ok(tcp) => TcpStream::new(tcp, handle),
+            Err(e) => TcpStreamNewState::Error(e),
+        };
+        TcpStreamNew { inner: inner }
+    }
+
+    fn new(connected_stream: mio::tcp::TcpStream, handle: &Handle)
+           -> TcpStreamNewState {
+        match PollEvented::new(connected_stream, handle) {
+            Ok(io) => TcpStreamNewState::Waiting(TcpStream { io: io }),
+            Err(e) => TcpStreamNewState::Error(e),
+        }
+    }
+
+    /// Create a new `TcpStream` from a `net::TcpStream`.
+    ///
+    /// This function will convert a TCP stream in the standard library to a TCP
+    /// stream ready to be used with the provided event loop handle. The object
+    /// returned is associated with the event loop and ready to perform I/O.
+    pub fn from_stream(stream: net::TcpStream, handle: &Handle)
+                       -> io::Result<TcpStream> {
+        let inner = try!(mio::tcp::TcpStream::from_stream(stream));
+        Ok(TcpStream {
+            io: try!(PollEvented::new(inner, handle)),
+        })
+    }
+
+    /// Creates a new `TcpStream` from the pending socket inside the given
+    /// `std::net::TcpStream`, connecting it to the address specified.
+    ///
+    /// This constructor allows configuring the socket before it's actually
+    /// connected, and this function will transfer ownership to the returned
+    /// `TcpStream` if successful. An unconnected `TcpStream` can be created
+    /// with the `net2::TcpBuilder` type (and also configured via that route).
+    ///
+    /// The platform specific behavior of this function looks like:
+    ///
+    /// * On Unix, the socket is placed into nonblocking mode and then a
+    ///   `connect` call is issued.
+    ///
+    /// * On Windows, the address is stored internally and the connect operation
+    ///   is issued when the returned `TcpStream` is registered with an event
+    ///   loop. Note that on Windows you must `bind` a socket before it can be
+    ///   connected, so if a custom `TcpBuilder` is used it should be bound
+    ///   (perhaps to `INADDR_ANY`) before this method is called.
+    pub fn connect_stream(stream: net::TcpStream,
+                          addr: &SocketAddr,
+                          handle: &Handle)
+                          -> Box<Future<Item=TcpStream, Error=io::Error> + Send> {
+        let state = match mio::tcp::TcpStream::connect_stream(stream, addr) {
+            Ok(tcp) => TcpStream::new(tcp, handle),
+            Err(e) => TcpStreamNewState::Error(e),
+        };
+        state.boxed()
+    }
+
+    /// Test whether this socket is ready to be read or not.
+    ///
+    /// If the socket is *not* readable then the current task is scheduled to
+    /// get a notification when the socket does become readable. That is, this
+    /// is only suitable for calling in a `Future::poll` method and will
+    /// automatically handle ensuring a retry once the socket is readable again.
+    pub fn poll_read(&self) -> Async<()> {
+        self.io.poll_read()
+    }
+
+    /// Test whether this socket is ready to be written to or not.
+    ///
+    /// If the socket is *not* writable then the current task is scheduled to
+    /// get a notification when the socket does become writable. That is, this
+    /// is only suitable for calling in a `Future::poll` method and will
+    /// automatically handle ensuring a retry once the socket is writable again.
+    pub fn poll_write(&self) -> Async<()> {
+        self.io.poll_write()
+    }
+
+    /// Returns the local address that this stream is bound to.
+    pub fn local_addr(&self) -> io::Result<SocketAddr> {
+        self.io.get_ref().local_addr()
+    }
+
+    /// Returns the remote address that this stream is connected to.
+    pub fn peer_addr(&self) -> io::Result<SocketAddr> {
+        self.io.get_ref().peer_addr()
+    }
+
+    /// Shuts down the read, write, or both halves of this connection.
+    ///
+    /// This function will cause all pending and future I/O on the specified
+    /// portions to return immediately with an appropriate value (see the
+    /// documentation of `Shutdown`).
+    pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
+        self.io.get_ref().shutdown(how)
+    }
+
+    /// Sets the value of the `TCP_NODELAY` option on this socket.
+    ///
+    /// If set, this option disables the Nagle algorithm. This means that
+    /// segments are always sent as soon as possible, even if there is only a
+    /// small amount of data. When not set, data is buffered until there is a
+    /// sufficient amount to send out, thereby avoiding the frequent sending of
+    /// small packets.
+    pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
+        self.io.get_ref().set_nodelay(nodelay)
+    }
+
+    /// Gets the value of the `TCP_NODELAY` option on this socket.
+    ///
+    /// For more information about this option, see [`set_nodelay`][link].
+    ///
+    /// [link]: #method.set_nodelay
+    pub fn nodelay(&self) -> io::Result<bool> {
+        self.io.get_ref().nodelay()
+    }
+
+    /// Sets whether keepalive messages are enabled to be sent on this socket.
+    ///
+    /// On Unix, this option will set the `SO_KEEPALIVE` as well as the
+    /// `TCP_KEEPALIVE` or `TCP_KEEPIDLE` option (depending on your platform).
+    /// On Windows, this will set the `SIO_KEEPALIVE_VALS` option.
+    ///
+    /// If `None` is specified then keepalive messages are disabled, otherwise
+    /// the number of milliseconds specified will be the time to remain idle
+    /// before sending a TCP keepalive probe.
+    ///
+    /// Some platforms specify this value in seconds, so sub-second millisecond
+    /// specifications may be omitted.
+    pub fn set_keepalive_ms(&self, keepalive: Option<u32>) -> io::Result<()> {
+        self.io.get_ref().set_keepalive_ms(keepalive)
+    }
+
+    /// Returns whether keepalive messages are enabled on this socket, and if so
+    /// the amount of milliseconds between them.
+    ///
+    /// For more information about this option, see [`set_keepalive_ms`][link].
+    ///
+    /// [link]: #method.set_keepalive_ms
+    pub fn keepalive_ms(&self) -> io::Result<Option<u32>> {
+        self.io.get_ref().keepalive_ms()
+    }
+
+    /// Sets the value for the `IP_TTL` option on this socket.
+    ///
+    /// This value sets the time-to-live field that is used in every packet sent
+    /// from this socket.
+    pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+        self.io.get_ref().set_ttl(ttl)
+    }
+
+    /// Gets the value of the `IP_TTL` option for this socket.
+    ///
+    /// For more information about this option, see [`set_ttl`][link].
+    ///
+    /// [link]: #method.set_ttl
+    pub fn ttl(&self) -> io::Result<u32> {
+        self.io.get_ref().ttl()
+    }
+}
+
+impl Read for TcpStream {
+    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+        self.io.read(buf)
+    }
+}
+
+impl Write for TcpStream {
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        self.io.write(buf)
+    }
+    fn flush(&mut self) -> io::Result<()> {
+        self.io.flush()
+    }
+}
+
+impl AsyncRead for TcpStream {
+    unsafe fn prepare_uninitialized_buffer(&self, _: &mut [u8]) -> bool {
+        false
+    }
+
+    fn read_buf<B: BufMut>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
+        <&TcpStream>::read_buf(&mut &*self, buf)
+    }
+}
+
+impl AsyncWrite for TcpStream {
+    fn shutdown(&mut self) -> Poll<(), io::Error> {
+        <&TcpStream>::shutdown(&mut &*self)
+    }
+
+    fn write_buf<B: Buf>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
+        <&TcpStream>::write_buf(&mut &*self, buf)
+    }
+}
+
+#[allow(deprecated)]
+impl ::io::Io for TcpStream {
+    fn poll_read(&mut self) -> Async<()> {
+        <TcpStream>::poll_read(self)
+    }
+
+    fn poll_write(&mut self) -> Async<()> {
+        <TcpStream>::poll_write(self)
+    }
+
+    fn read_vec(&mut self, bufs: &mut [&mut IoVec]) -> io::Result<usize> {
+        if let Async::NotReady = <TcpStream>::poll_read(self) {
+            return Err(::would_block())
+        }
+        let r = self.io.get_ref().read_bufs(bufs);
+        if is_wouldblock(&r) {
+            self.io.need_read();
+        }
+        return r
+    }
+
+    fn write_vec(&mut self, bufs: &[&IoVec]) -> io::Result<usize> {
+        if let Async::NotReady = <TcpStream>::poll_write(self) {
+            return Err(::would_block())
+        }
+        let r = self.io.get_ref().write_bufs(bufs);
+        if is_wouldblock(&r) {
+            self.io.need_write();
+        }
+        return r
+    }
+}
+
+fn is_wouldblock<T>(r: &io::Result<T>) -> bool {
+    match *r {
+        Ok(_) => false,
+        Err(ref e) => e.kind() == io::ErrorKind::WouldBlock,
+    }
+}
+
+impl<'a> Read for &'a TcpStream {
+    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+        (&self.io).read(buf)
+    }
+}
+
+impl<'a> Write for &'a TcpStream {
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        (&self.io).write(buf)
+    }
+
+    fn flush(&mut self) -> io::Result<()> {
+        (&self.io).flush()
+    }
+}
+
+impl<'a> AsyncRead for &'a TcpStream {
+    unsafe fn prepare_uninitialized_buffer(&self, _: &mut [u8]) -> bool {
+        false
+    }
+
+    fn read_buf<B: BufMut>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
+        if let Async::NotReady = <TcpStream>::poll_read(self) {
+            return Ok(Async::NotReady)
+        }
+        let r = unsafe {
+            let mut bufs: [_; 16] = Default::default();
+            let n = buf.bytes_vec_mut(&mut bufs);
+            self.io.get_ref().read_bufs(&mut bufs[..n])
+        };
+
+        match r {
+            Ok(n) => {
+                unsafe { buf.advance_mut(n); }
+                Ok(Async::Ready(n))
+            }
+            Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+                self.io.need_read();
+                Ok(Async::NotReady)
+            }
+            Err(e) => Err(e),
+        }
+    }
+}
+
+impl<'a> AsyncWrite for &'a TcpStream {
+    fn shutdown(&mut self) -> Poll<(), io::Error> {
+        Ok(().into())
+    }
+
+    fn write_buf<B: Buf>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
+        if let Async::NotReady = <TcpStream>::poll_write(self) {
+            return Ok(Async::NotReady)
+        }
+        let r = {
+            let mut bufs: [_; 16] = Default::default();
+            let n = buf.bytes_vec(&mut bufs);
+            self.io.get_ref().write_bufs(&bufs[..n])
+        };
+        match r {
+            Ok(n) => {
+                buf.advance(n);
+                Ok(Async::Ready(n))
+            }
+            Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
+                self.io.need_write();
+                Ok(Async::NotReady)
+            }
+            Err(e) => Err(e),
+        }
+    }
+}
+
+#[allow(deprecated)]
+impl<'a> ::io::Io for &'a TcpStream {
+    fn poll_read(&mut self) -> Async<()> {
+        <TcpStream>::poll_read(self)
+    }
+
+    fn poll_write(&mut self) -> Async<()> {
+        <TcpStream>::poll_write(self)
+    }
+}
+
+impl fmt::Debug for TcpStream {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        self.io.get_ref().fmt(f)
+    }
+}
+
+impl Future for TcpStreamNew {
+    type Item = TcpStream;
+    type Error = io::Error;
+
+    fn poll(&mut self) -> Poll<TcpStream, io::Error> {
+        self.inner.poll()
+    }
+}
+
+impl Future for TcpStreamNewState {
+    type Item = TcpStream;
+    type Error = io::Error;
+
+    fn poll(&mut self) -> Poll<TcpStream, io::Error> {
+        {
+            let stream = match *self {
+                TcpStreamNewState::Waiting(ref s) => s,
+                TcpStreamNewState::Error(_) => {
+                    let e = match mem::replace(self, TcpStreamNewState::Empty) {
+                        TcpStreamNewState::Error(e) => e,
+                        _ => panic!(),
+                    };
+                    return Err(e)
+                }
+                TcpStreamNewState::Empty => panic!("can't poll TCP stream twice"),
+            };
+
+            // Once we've connected, wait for the stream to be writable as
+            // that's when the actual connection has been initiated. Once we're
+            // writable we check for `take_socket_error` to see if the connect
+            // actually hit an error or not.
+            //
+            // If all that succeeded then we ship everything on up.
+            if let Async::NotReady = stream.io.poll_write() {
+                return Ok(Async::NotReady)
+            }
+            if let Some(e) = try!(stream.io.get_ref().take_error()) {
+                return Err(e)
+            }
+        }
+        match mem::replace(self, TcpStreamNewState::Empty) {
+            TcpStreamNewState::Waiting(stream) => Ok(Async::Ready(stream)),
+            _ => panic!(),
+        }
+    }
+}
+
+#[cfg(unix)]
+mod sys {
+    use std::os::unix::prelude::*;
+    use super::{TcpStream, TcpListener};
+
+    impl AsRawFd for TcpStream {
+        fn as_raw_fd(&self) -> RawFd {
+            self.io.get_ref().as_raw_fd()
+        }
+    }
+
+    impl AsRawFd for TcpListener {
+        fn as_raw_fd(&self) -> RawFd {
+            self.io.get_ref().as_raw_fd()
+        }
+    }
+}
+
+#[cfg(windows)]
+mod sys {
+    // TODO: let's land these upstream with mio and then we can add them here.
+    //
+    // use std::os::windows::prelude::*;
+    // use super::{TcpStream, TcpListener};
+    //
+    // impl AsRawHandle for TcpStream {
+    //     fn as_raw_handle(&self) -> RawHandle {
+    //         self.io.get_ref().as_raw_handle()
+    //     }
+    // }
+    //
+    // impl AsRawHandle for TcpListener {
+    //     fn as_raw_handle(&self) -> RawHandle {
+    //         self.listener.io().as_raw_handle()
+    //     }
+    // }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/src/net/udp/frame.rs
@@ -0,0 +1,159 @@
+use std::io;
+use std::net::{SocketAddr, Ipv4Addr, SocketAddrV4};
+
+use futures::{Async, Poll, Stream, Sink, StartSend, AsyncSink};
+
+use net::UdpSocket;
+
+/// Encoding of frames via buffers.
+///
+/// This trait is used when constructing an instance of `UdpFramed` and provides
+/// the `In` and `Out` types which are decoded and encoded from the socket,
+/// respectively.
+///
+/// Because UDP is a connectionless protocol, the `decode` method receives the
+/// address where data came from and the `encode` method is also responsible for
+/// determining the remote host to which the datagram should be sent
+///
+/// The trait itself is implemented on a type that can track state for decoding
+/// or encoding, which is particularly useful for streaming parsers. In many
+/// cases, though, this type will simply be a unit struct (e.g. `struct
+/// HttpCodec`).
+pub trait UdpCodec {
+    /// The type of decoded frames.
+    type In;
+
+    /// The type of frames to be encoded.
+    type Out;
+
+    /// Attempts to decode a frame from the provided buffer of bytes.
+    ///
+    /// This method is called by `UdpFramed` on a single datagram which has been
+    /// read from a socket. The `buf` argument contains the data that was
+    /// received from the remote address, and `src` is the address the data came
+    /// from. Note that typically this method should require the entire contents
+    /// of `buf` to be valid or otherwise return an error with trailing data.
+    ///
+    /// Finally, if the bytes in the buffer are malformed then an error is
+    /// returned indicating why. This informs `Framed` that the stream is now
+    /// corrupt and should be terminated.
+    fn decode(&mut self, src: &SocketAddr, buf: &[u8]) -> io::Result<Self::In>;
+
+    /// Encodes a frame into the buffer provided.
+    ///
+    /// This method will encode `msg` into the byte buffer provided by `buf`.
+    /// The `buf` provided is an internal buffer of the `Framed` instance and
+    /// will be written out when possible.
+    ///
+    /// The encode method also determines the destination to which the buffer
+    /// should be directed, which will be returned as a `SocketAddr`.
+    fn encode(&mut self, msg: Self::Out, buf: &mut Vec<u8>) -> SocketAddr;
+}
+
+/// A unified `Stream` and `Sink` interface to an underlying `UdpSocket`, using
+/// the `UdpCodec` trait to encode and decode frames.
+///
+/// You can acquire a `UdpFramed` instance by using the `UdpSocket::framed`
+/// adapter.
+pub struct UdpFramed<C> {
+    socket: UdpSocket,
+    codec: C,
+    rd: Vec<u8>,
+    wr: Vec<u8>,
+    out_addr: SocketAddr,
+}
+
+impl<C: UdpCodec> Stream for UdpFramed<C> {
+    type Item = C::In;
+    type Error = io::Error;
+
+    fn poll(&mut self) -> Poll<Option<C::In>, io::Error> {
+        let (n, addr) = try_nb!(self.socket.recv_from(&mut self.rd));
+        trace!("received {} bytes, decoding", n);
+        let frame = try!(self.codec.decode(&addr, &self.rd[..n]));
+        trace!("frame decoded from buffer");
+        Ok(Async::Ready(Some(frame)))
+    }
+}
+
+impl<C: UdpCodec> Sink for UdpFramed<C> {
+    type SinkItem = C::Out;
+    type SinkError = io::Error;
+
+    fn start_send(&mut self, item: C::Out) -> StartSend<C::Out, io::Error> {
+        if self.wr.len() > 0 {
+            try!(self.poll_complete());
+            if self.wr.len() > 0 {
+                return Ok(AsyncSink::NotReady(item));
+            }
+        }
+
+        self.out_addr = self.codec.encode(item, &mut self.wr);
+        Ok(AsyncSink::Ready)
+    }
+
+    fn poll_complete(&mut self) -> Poll<(), io::Error> {
+        trace!("flushing framed transport");
+
+        if self.wr.is_empty() {
+            return Ok(Async::Ready(()))
+        }
+
+        trace!("writing; remaining={}", self.wr.len());
+        let n = try_nb!(self.socket.send_to(&self.wr, &self.out_addr));
+        trace!("written {}", n);
+        let wrote_all = n == self.wr.len();
+        self.wr.clear();
+        if wrote_all {
+            Ok(Async::Ready(()))
+        } else {
+            Err(io::Error::new(io::ErrorKind::Other,
+                               "failed to write entire datagram to socket"))
+        }
+    }
+
+    fn close(&mut self) -> Poll<(), io::Error> {
+        try_ready!(self.poll_complete());
+        Ok(().into())
+    }
+}
+
+pub fn new<C: UdpCodec>(socket: UdpSocket, codec: C) -> UdpFramed<C> {
+    UdpFramed {
+        socket: socket,
+        codec: codec,
+        out_addr: SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(0, 0, 0, 0), 0)),
+        rd: vec![0; 64 * 1024],
+        wr: Vec::with_capacity(8 * 1024),
+    }
+}
+
+impl<C> UdpFramed<C> {
+    /// Returns a reference to the underlying I/O stream wrapped by `Framed`.
+    ///
+    /// Note that care should be taken to not tamper with the underlying stream
+    /// of data coming in as it may corrupt the stream of frames otherwise being
+    /// worked with.
+    pub fn get_ref(&self) -> &UdpSocket {
+        &self.socket
+    }
+
+    /// Returns a mutable reference to the underlying I/O stream wrapped by
+    /// `Framed`.
+    ///
+    /// Note that care should be taken to not tamper with the underlying stream
+    /// of data coming in as it may corrupt the stream of frames otherwise being
+    /// worked with.
+    pub fn get_mut(&mut self) -> &mut UdpSocket {
+        &mut self.socket
+    }
+
+    /// Consumes the `Framed`, returning its underlying I/O stream.
+    ///
+    /// Note that care should be taken to not tamper with the underlying stream
+    /// of data coming in as it may corrupt the stream of frames otherwise being
+    /// worked with.
+    pub fn into_inner(self) -> UdpSocket {
+        self.socket
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/src/net/udp/mod.rs
@@ -0,0 +1,440 @@
+use std::io;
+use std::mem;
+use std::net::{self, SocketAddr, Ipv4Addr, Ipv6Addr};
+use std::fmt;
+
+use futures::{Async, Future, Poll};
+use mio;
+
+use reactor::{Handle, PollEvented};
+
+/// An I/O object representing a UDP socket.
+pub struct UdpSocket {
+    io: PollEvented<mio::udp::UdpSocket>,
+}
+
+mod frame;
+pub use self::frame::{UdpFramed, UdpCodec};
+
+impl UdpSocket {
+    /// Create a new UDP socket bound to the specified address.
+    ///
+    /// This function will create a new UDP socket and attempt to bind it to the
+    /// `addr` provided. If the result is `Ok`, the socket has successfully bound.
+    pub fn bind(addr: &SocketAddr, handle: &Handle) -> io::Result<UdpSocket> {
+        let udp = try!(mio::udp::UdpSocket::bind(addr));
+        UdpSocket::new(udp, handle)
+    }
+
+    fn new(socket: mio::udp::UdpSocket, handle: &Handle) -> io::Result<UdpSocket> {
+        let io = try!(PollEvented::new(socket, handle));
+        Ok(UdpSocket { io: io })
+    }
+
+    /// Creates a new `UdpSocket` from the previously bound socket provided.
+    ///
+    /// The socket given will be registered with the event loop that `handle` is
+    /// associated with. This function requires that `socket` has previously
+    /// been bound to an address to work correctly.
+    ///
+    /// This can be used in conjunction with net2's `UdpBuilder` interface to
+    /// configure a socket before it's handed off, such as setting options like
+    /// `reuse_address` or binding to multiple addresses.
+    pub fn from_socket(socket: net::UdpSocket,
+                       handle: &Handle) -> io::Result<UdpSocket> {
+        let udp = try!(mio::udp::UdpSocket::from_socket(socket));
+        UdpSocket::new(udp, handle)
+    }
+
+    /// Provides a `Stream` and `Sink` interface for reading and writing to this
+    /// `UdpSocket` object, using the provided `UdpCodec` to read and write the
+    /// raw data.
+    ///
+    /// Raw UDP sockets work with datagrams, but higher-level code usually
+    /// wants to batch these into meaningful chunks, called "frames". This
+    /// method layers framing on top of this socket by using the `UdpCodec`
+    /// trait to handle encoding and decoding of messages frames. Note that
+    /// the incoming and outgoing frame types may be distinct.
+    ///
+    /// This function returns a *single* object that is both `Stream` and
+    /// `Sink`; grouping this into a single object is often useful for layering
+    /// things which require both read and write access to the underlying
+    /// object.
+    ///
+    /// If you want to work more directly with the streams and sink, consider
+    /// calling `split` on the `UdpFramed` returned by this method, which will
+    /// break them into separate objects, allowing them to interact more
+    /// easily.
+    pub fn framed<C: UdpCodec>(self, codec: C) -> UdpFramed<C> {
+        frame::new(self, codec)
+    }
+
+    /// Returns the local address that this stream is bound to.
+    pub fn local_addr(&self) -> io::Result<SocketAddr> {
+        self.io.get_ref().local_addr()
+    }
+
+    /// Test whether this socket is ready to be read or not.
+    ///
+    /// If the socket is *not* readable then the current task is scheduled to
+    /// get a notification when the socket does become readable. That is, this
+    /// is only suitable for calling in a `Future::poll` method and will
+    /// automatically handle ensuring a retry once the socket is readable again.
+    pub fn poll_read(&self) -> Async<()> {
+        self.io.poll_read()
+    }
+
+    /// Test whether this socket is ready to be written to or not.
+    ///
+    /// If the socket is *not* writable then the current task is scheduled to
+    /// get a notification when the socket does become writable. That is, this
+    /// is only suitable for calling in a `Future::poll` method and will
+    /// automatically handle ensuring a retry once the socket is writable again.
+    pub fn poll_write(&self) -> Async<()> {
+        self.io.poll_write()
+    }
+
+    /// Sends data on the socket to the given address. On success, returns the
+    /// number of bytes written.
+    ///
+    /// Address type can be any implementor of `ToSocketAddrs` trait. See its
+    /// documentation for concrete examples.
+    pub fn send_to(&self, buf: &[u8], target: &SocketAddr) -> io::Result<usize> {
+        if let Async::NotReady = self.io.poll_write() {
+            return Err(::would_block())
+        }
+        match self.io.get_ref().send_to(buf, target) {
+            Ok(Some(n)) => Ok(n),
+            Ok(None) => {
+                self.io.need_write();
+                Err(::would_block())
+            }
+            Err(e) => Err(e),
+        }
+    }
+
+    /// Creates a future that will write the entire contents of the buffer
+    /// `buf` provided as a datagram to this socket.
+    ///
+    /// The returned future will return after data has been written to the
+    /// outbound socket.  The future will resolve to the stream as well as the
+    /// buffer (for reuse if needed).
+    ///
+    /// Any error which happens during writing will cause both the stream and
+    /// the buffer to get destroyed. Note that failure to write the entire
+    /// buffer is considered an error for the purposes of sending a datagram.
+    ///
+    /// The `buf` parameter here only requires the `AsRef<[u8]>` trait, which
+    /// should be broadly applicable to accepting data which can be converted
+    /// to a slice.  The `Window` struct is also available in this crate to
+    /// provide a different window into a slice if necessary.
+    pub fn send_dgram<T>(self, buf: T, addr: SocketAddr) -> SendDgram<T>
+        where T: AsRef<[u8]>,
+    {
+        SendDgram {
+            state: SendState::Writing {
+                sock: self,
+                addr: addr,
+                buf: buf,
+            },
+        }
+    }
+
+    /// Receives data from the socket. On success, returns the number of bytes
+    /// read and the address from whence the data came.
+    pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
+        if let Async::NotReady = self.io.poll_read() {
+            return Err(::would_block())
+        }
+        match self.io.get_ref().recv_from(buf) {
+            Ok(Some(n)) => Ok(n),
+            Ok(None) => {
+                self.io.need_read();
+                Err(::would_block())
+            }
+            Err(e) => Err(e),
+        }
+    }
+
+    /// Creates a future that receive a datagram to be written to the buffer
+    /// provided.
+    ///
+    /// The returned future will return after a datagram has been received on
+    /// this socket. The future will resolve to the socket, the buffer, the
+    /// amount of data read, and the address the data was received from.
+    ///
+    /// An error during reading will cause the socket and buffer to get
+    /// destroyed and the socket will be returned.
+    ///
+    /// The `buf` parameter here only requires the `AsMut<[u8]>` trait, which
+    /// should be broadly applicable to accepting data which can be converted
+    /// to a slice.  The `Window` struct is also available in this crate to
+    /// provide a different window into a slice if necessary.
+    pub fn recv_dgram<T>(self, buf: T) -> RecvDgram<T>
+        where T: AsMut<[u8]>,
+    {
+        RecvDgram {
+            state: RecvState::Reading {
+                sock: self,
+                buf: buf,
+            },
+        }
+    }
+
+    /// Gets the value of the `SO_BROADCAST` option for this socket.
+    ///
+    /// For more information about this option, see
+    /// [`set_broadcast`][link].
+    ///
+    /// [link]: #method.set_broadcast
+    pub fn broadcast(&self) -> io::Result<bool> {
+        self.io.get_ref().broadcast()
+    }
+
+    /// Sets the value of the `SO_BROADCAST` option for this socket.
+    ///
+    /// When enabled, this socket is allowed to send packets to a broadcast
+    /// address.
+    pub fn set_broadcast(&self, on: bool) -> io::Result<()> {
+        self.io.get_ref().set_broadcast(on)
+    }
+
+    /// Gets the value of the `IP_MULTICAST_LOOP` option for this socket.
+    ///
+    /// For more information about this option, see
+    /// [`set_multicast_loop_v4`][link].
+    ///
+    /// [link]: #method.set_multicast_loop_v4
+    pub fn multicast_loop_v4(&self) -> io::Result<bool> {
+        self.io.get_ref().multicast_loop_v4()
+    }
+
+    /// Sets the value of the `IP_MULTICAST_LOOP` option for this socket.
+    ///
+    /// If enabled, multicast packets will be looped back to the local socket.
+    /// Note that this may not have any affect on IPv6 sockets.
+    pub fn set_multicast_loop_v4(&self, on: bool) -> io::Result<()> {
+        self.io.get_ref().set_multicast_loop_v4(on)
+    }
+
+    /// Gets the value of the `IP_MULTICAST_TTL` option for this socket.
+    ///
+    /// For more information about this option, see
+    /// [`set_multicast_ttl_v4`][link].
+    ///
+    /// [link]: #method.set_multicast_ttl_v4
+    pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
+        self.io.get_ref().multicast_ttl_v4()
+    }
+
+    /// Sets the value of the `IP_MULTICAST_TTL` option for this socket.
+    ///
+    /// Indicates the time-to-live value of outgoing multicast packets for
+    /// this socket. The default value is 1 which means that multicast packets
+    /// don't leave the local network unless explicitly requested.
+    ///
+    /// Note that this may not have any affect on IPv6 sockets.
+    pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> {
+        self.io.get_ref().set_multicast_ttl_v4(ttl)
+    }
+
+    /// Gets the value of the `IPV6_MULTICAST_LOOP` option for this socket.
+    ///
+    /// For more information about this option, see
+    /// [`set_multicast_loop_v6`][link].
+    ///
+    /// [link]: #method.set_multicast_loop_v6
+    pub fn multicast_loop_v6(&self) -> io::Result<bool> {
+        self.io.get_ref().multicast_loop_v6()
+    }
+
+    /// Sets the value of the `IPV6_MULTICAST_LOOP` option for this socket.
+    ///
+    /// Controls whether this socket sees the multicast packets it sends itself.
+    /// Note that this may not have any affect on IPv4 sockets.
+    pub fn set_multicast_loop_v6(&self, on: bool) -> io::Result<()> {
+        self.io.get_ref().set_multicast_loop_v6(on)
+    }
+
+    /// Gets the value of the `IP_TTL` option for this socket.
+    ///
+    /// For more information about this option, see [`set_ttl`][link].
+    ///
+    /// [link]: #method.set_ttl
+    pub fn ttl(&self) -> io::Result<u32> {
+        self.io.get_ref().ttl()
+    }
+
+    /// Sets the value for the `IP_TTL` option on this socket.
+    ///
+    /// This value sets the time-to-live field that is used in every packet sent
+    /// from this socket.
+    pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
+        self.io.get_ref().set_ttl(ttl)
+    }
+
+    /// Executes an operation of the `IP_ADD_MEMBERSHIP` type.
+    ///
+    /// This function specifies a new multicast group for this socket to join.
+    /// The address must be a valid multicast address, and `interface` is the
+    /// address of the local interface with which the system should join the
+    /// multicast group. If it's equal to `INADDR_ANY` then an appropriate
+    /// interface is chosen by the system.
+    pub fn join_multicast_v4(&self,
+                             multiaddr: &Ipv4Addr,
+                             interface: &Ipv4Addr) -> io::Result<()> {
+        self.io.get_ref().join_multicast_v4(multiaddr, interface)
+    }
+
+    /// Executes an operation of the `IPV6_ADD_MEMBERSHIP` type.
+    ///
+    /// This function specifies a new multicast group for this socket to join.
+    /// The address must be a valid multicast address, and `interface` is the
+    /// index of the interface to join/leave (or 0 to indicate any interface).
+    pub fn join_multicast_v6(&self,
+                             multiaddr: &Ipv6Addr,
+                             interface: u32) -> io::Result<()> {
+        self.io.get_ref().join_multicast_v6(multiaddr, interface)
+    }
+
+    /// Executes an operation of the `IP_DROP_MEMBERSHIP` type.
+    ///
+    /// For more information about this option, see
+    /// [`join_multicast_v4`][link].
+    ///
+    /// [link]: #method.join_multicast_v4
+    pub fn leave_multicast_v4(&self,
+                              multiaddr: &Ipv4Addr,
+                              interface: &Ipv4Addr) -> io::Result<()> {
+        self.io.get_ref().leave_multicast_v4(multiaddr, interface)
+    }
+
+    /// Executes an operation of the `IPV6_DROP_MEMBERSHIP` type.
+    ///
+    /// For more information about this option, see
+    /// [`join_multicast_v6`][link].
+    ///
+    /// [link]: #method.join_multicast_v6
+    pub fn leave_multicast_v6(&self,
+                              multiaddr: &Ipv6Addr,
+                              interface: u32) -> io::Result<()> {
+        self.io.get_ref().leave_multicast_v6(multiaddr, interface)
+    }
+}
+
+impl fmt::Debug for UdpSocket {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        self.io.get_ref().fmt(f)
+    }
+}
+
+/// A future used to write the entire contents of some data to a UDP socket.
+///
+/// This is created by the `UdpSocket::send_dgram` method.
+pub struct SendDgram<T> {
+    state: SendState<T>,
+}
+
+enum SendState<T> {
+    Writing {
+        sock: UdpSocket,
+        buf: T,
+        addr: SocketAddr,
+    },
+    Empty,
+}
+
+fn incomplete_write(reason: &str) -> io::Error {
+    io::Error::new(io::ErrorKind::Other, reason)
+}
+
+impl<T> Future for SendDgram<T>
+    where T: AsRef<[u8]>,
+{
+    type Item = (UdpSocket, T);
+    type Error = io::Error;
+
+    fn poll(&mut self) -> Poll<(UdpSocket, T), io::Error> {
+        match self.state {
+            SendState::Writing { ref sock, ref buf, ref addr } => {
+                let n = try_nb!(sock.send_to(buf.as_ref(), addr));
+                if n != buf.as_ref().len() {
+                    return Err(incomplete_write("failed to send entire message \
+                                                 in datagram"))
+                }
+            }
+            SendState::Empty => panic!("poll a SendDgram after it's done"),
+        }
+
+        match mem::replace(&mut self.state, SendState::Empty) {
+            SendState::Writing { sock, buf, addr: _ } => {
+                Ok(Async::Ready((sock, buf)))
+            }
+            SendState::Empty => panic!(),
+        }
+    }
+}
+
+/// A future used to receive a datagram from a UDP socket.
+///
+/// This is created by the `UdpSocket::recv_dgram` method.
+pub struct RecvDgram<T> {
+    state: RecvState<T>,
+}
+
+enum RecvState<T> {
+    Reading {
+        sock: UdpSocket,
+        buf: T,
+    },
+    Empty,
+}
+
+impl<T> Future for RecvDgram<T>
+    where T: AsMut<[u8]>,
+{
+    type Item = (UdpSocket, T, usize, SocketAddr);
+    type Error = io::Error;
+
+    fn poll(&mut self) -> Poll<Self::Item, io::Error> {
+        let (n, addr) = match self.state {
+            RecvState::Reading { ref sock, ref mut buf } => {
+                try_nb!(sock.recv_from(buf.as_mut()))
+            }
+            RecvState::Empty => panic!("poll a RecvDgram after it's done"),
+        };
+
+        match mem::replace(&mut self.state, RecvState::Empty) {
+            RecvState::Reading { sock, buf } => {
+                Ok(Async::Ready((sock, buf, n, addr)))
+            }
+            RecvState::Empty => panic!(),
+        }
+    }
+}
+
+#[cfg(unix)]
+mod sys {
+    use std::os::unix::prelude::*;
+    use super::UdpSocket;
+
+    impl AsRawFd for UdpSocket {
+        fn as_raw_fd(&self) -> RawFd {
+            self.io.get_ref().as_raw_fd()
+        }
+    }
+}
+
+#[cfg(windows)]
+mod sys {
+    // TODO: let's land these upstream with mio and then we can add them here.
+    //
+    // use std::os::windows::prelude::*;
+    // use super::UdpSocket;
+    //
+    // impl AsRawHandle for UdpSocket {
+    //     fn as_raw_handle(&self) -> RawHandle {
+    //         self.io.get_ref().as_raw_handle()
+    //     }
+    // }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/src/reactor/interval.rs
@@ -0,0 +1,171 @@
+//! Support for creating futures that represent intervals.
+//!
+//! This module contains the `Interval` type which is a stream that will
+//! resolve at a fixed intervals in future
+
+use std::io;
+use std::time::{Duration, Instant};
+
+use futures::{Poll, Async};
+use futures::stream::{Stream};
+
+use reactor::{Remote, Handle};
+use reactor::timeout_token::TimeoutToken;
+
+/// A stream representing notifications at fixed interval
+///
+/// Intervals are created through the `Interval::new` or
+/// `Interval::new_at` methods indicating when a first notification
+/// should be triggered and when it will be repeated.
+///
+/// Note that timeouts are not intended for high resolution timers, but rather
+/// they will likely fire some granularity after the exact instant that they're
+/// otherwise indicated to fire at.
+pub struct Interval {
+    token: TimeoutToken,
+    next: Instant,
+    interval: Duration,
+    handle: Remote,
+}
+
+impl Interval {
+    /// Creates a new interval which will fire at `dur` time into the future,
+    /// and will repeat every `dur` interval after
+    ///
+    /// This function will return a future that will resolve to the actual
+    /// interval object. The interval object itself is then a stream which will
+    /// be set to fire at the specified intervals
+    pub fn new(dur: Duration, handle: &Handle) -> io::Result<Interval> {
+        Interval::new_at(Instant::now() + dur, dur, handle)
+    }
+
+    /// Creates a new interval which will fire at the time specified by `at`,
+    /// and then will repeat every `dur` interval after
+    ///
+    /// This function will return a future that will resolve to the actual
+    /// timeout object. The timeout object itself is then a future which will be
+    /// set to fire at the specified point in the future.
+    pub fn new_at(at: Instant, dur: Duration, handle: &Handle)
+        -> io::Result<Interval>
+    {
+        Ok(Interval {
+            token: try!(TimeoutToken::new(at, &handle)),
+            next: at,
+            interval: dur,
+            handle: handle.remote().clone(),
+        })
+    }
+}
+
+impl Stream for Interval {
+    type Item = ();
+    type Error = io::Error;
+
+    fn poll(&mut self) -> Poll<Option<()>, io::Error> {
+        // TODO: is this fast enough?
+        let now = Instant::now();
+        if self.next <= now {
+            self.next = next_interval(self.next, now, self.interval);
+            self.token.reset_timeout(self.next, &self.handle);
+            Ok(Async::Ready(Some(())))
+        } else {
+            self.token.update_timeout(&self.handle);
+            Ok(Async::NotReady)
+        }
+    }
+}
+
+impl Drop for Interval {
+    fn drop(&mut self) {
+        self.token.cancel_timeout(&self.handle);
+    }
+}
+
+/// Converts Duration object to raw nanoseconds if possible
+///
+/// This is useful to divide intervals.
+///
+/// While technically for large duration it's impossible to represent any
+/// duration as nanoseconds, the largest duration we can represent is about
+/// 427_000 years. Large enough for any interval we would use or calculate in
+/// tokio.
+fn duration_to_nanos(dur: Duration) -> Option<u64> {
+    dur.as_secs()
+        .checked_mul(1_000_000_000)
+        .and_then(|v| v.checked_add(dur.subsec_nanos() as u64))
+}
+
+fn next_interval(prev: Instant, now: Instant, interval: Duration) -> Instant {
+    let new = prev + interval;
+    if new > now {
+        return new;
+    } else {
+        let spent_ns = duration_to_nanos(now.duration_since(prev))
+            .expect("interval should be expired");
+        let interval_ns = duration_to_nanos(interval)
+            .expect("interval is less that 427 thousand years");
+        let mult = spent_ns/interval_ns + 1;
+        assert!(mult < (1 << 32),
+            "can't skip more than 4 billion intervals of {:?} \
+             (trying to skip {})", interval, mult);
+        return prev + interval * (mult as u32);
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use std::time::{Instant, Duration};
+    use super::next_interval;
+
+    struct Timeline(Instant);
+
+    impl Timeline {
+        fn new() -> Timeline {
+            Timeline(Instant::now())
+        }
+        fn at(&self, millis: u64) -> Instant {
+            self.0 + Duration::from_millis(millis)
+        }
+        fn at_ns(&self, sec: u64, nanos: u32) -> Instant {
+            self.0 + Duration::new(sec, nanos)
+        }
+    }
+
+    fn dur(millis: u64) -> Duration {
+        Duration::from_millis(millis)
+    }
+
+    #[test]
+    fn norm_next() {
+        let tm = Timeline::new();
+        assert_eq!(next_interval(tm.at(1), tm.at(2), dur(10)), tm.at(11));
+        assert_eq!(next_interval(tm.at(7777), tm.at(7788), dur(100)),
+                                 tm.at(7877));
+        assert_eq!(next_interval(tm.at(1), tm.at(1000), dur(2100)),
+                                 tm.at(2101));
+    }
+
+    #[test]
+    fn fast_forward() {
+        let tm = Timeline::new();
+        assert_eq!(next_interval(tm.at(1), tm.at(1000), dur(10)),
+                                 tm.at(1001));
+        assert_eq!(next_interval(tm.at(7777), tm.at(8888), dur(100)),
+                                 tm.at(8977));
+        assert_eq!(next_interval(tm.at(1), tm.at(10000), dur(2100)),
+                                 tm.at(10501));
+    }
+
+    /// TODO: this test actually should be successful, but since we can't
+    ///       multiply Duration on anything larger than u32 easily we decided
+    ///       to allow it to fail for now
+    #[test]
+    #[should_panic(expected = "can't skip more than 4 billion intervals")]
+    fn large_skip() {
+        let tm = Timeline::new();
+        assert_eq!(next_interval(
+            tm.at_ns(0, 1), tm.at_ns(25, 0), Duration::new(0, 2)),
+            tm.at_ns(25, 1));
+    }
+
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/src/reactor/io_token.rs
@@ -0,0 +1,141 @@
+use std::sync::Arc;
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::io;
+
+use futures::task;
+use mio::event::Evented;
+
+use reactor::{Message, Remote, Handle, Direction};
+
+/// A token that identifies an active timeout.
+pub struct IoToken {
+    token: usize,
+    // TODO: can we avoid this allocation? It's kind of a bummer...
+    readiness: Arc<AtomicUsize>,
+}
+
+impl IoToken {
+    /// Add a new source to an event loop, returning a future which will resolve
+    /// to the token that can be used to identify this source.
+    ///
+    /// When a new I/O object is created it needs to be communicated to the
+    /// event loop to ensure that it's registered and ready to receive
+    /// notifications. The event loop with then respond back with the I/O object
+    /// and a token which can be used to send more messages to the event loop.
+    ///
+    /// The token returned is then passed in turn to each of the methods below
+    /// to interact with notifications on the I/O object itself.
+    ///
+    /// # Panics
+    ///
+    /// The returned future will panic if the event loop this handle is
+    /// associated with has gone away, or if there is an error communicating
+    /// with the event loop.
+    pub fn new(source: &Evented, handle: &Handle) -> io::Result<IoToken> {
+        match handle.inner.upgrade() {
+            Some(inner) => {
+                let (ready, token) = try!(inner.borrow_mut().add_source(source));
+                Ok(IoToken { token: token, readiness: ready })
+            }
+            None => Err(io::Error::new(io::ErrorKind::Other, "event loop gone")),
+        }
+    }
+
+	/// Consumes the last readiness notification the token this source is for
+    /// registered.
+	///
+	/// Currently sources receive readiness notifications on an edge-basis. That
+	/// is, once you receive a notification that an object can be read, you
+	/// won't receive any more notifications until all of that data has been
+	/// read.
+	///
+	/// The event loop will fill in this information and then inform futures
+	/// that they're ready to go with the `schedule` method, and then the `poll`
+	/// method can use this to figure out what happened.
+    ///
+    /// > **Note**: This method should generally not be used directly, but
+    /// >           rather the `ReadinessStream` type should be used instead.
+    // TODO: this should really return a proper newtype/enum, not a usize
+    pub fn take_readiness(&self) -> usize {
+        self.readiness.swap(0, Ordering::SeqCst)
+    }
+
+    /// Schedule the current future task to receive a notification when the
+    /// corresponding I/O object is readable.
+    ///
+    /// Once an I/O object has been registered with the event loop through the
+    /// `add_source` method, this method can be used with the assigned token to
+    /// notify the current future task when the next read notification comes in.
+    ///
+    /// The current task will only receive a notification **once** and to
+    /// receive further notifications it will need to call `schedule_read`
+    /// again.
+    ///
+    /// > **Note**: This method should generally not be used directly, but
+    /// >           rather the `ReadinessStream` type should be used instead.
+    ///
+    /// # Panics
+    ///
+    /// This function will panic if the event loop this handle is associated
+    /// with has gone away, or if there is an error communicating with the event
+    /// loop.
+    ///
+    /// This function will also panic if there is not a currently running future
+    /// task.
+    pub fn schedule_read(&self, handle: &Remote) {
+        handle.send(Message::Schedule(self.token, task::park(), Direction::Read));
+    }
+
+    /// Schedule the current future task to receive a notification when the
+    /// corresponding I/O object is writable.
+    ///
+    /// Once an I/O object has been registered with the event loop through the
+    /// `add_source` method, this method can be used with the assigned token to
+    /// notify the current future task when the next write notification comes
+    /// in.
+    ///
+    /// The current task will only receive a notification **once** and to
+    /// receive further notifications it will need to call `schedule_write`
+    /// again.
+    ///
+    /// > **Note**: This method should generally not be used directly, but
+    /// >           rather the `ReadinessStream` type should be used instead.
+    ///
+    /// # Panics
+    ///
+    /// This function will panic if the event loop this handle is associated
+    /// with has gone away, or if there is an error communicating with the event
+    /// loop.
+    ///
+    /// This function will also panic if there is not a currently running future
+    /// task.
+    pub fn schedule_write(&self, handle: &Remote) {
+        handle.send(Message::Schedule(self.token, task::park(), Direction::Write));
+    }
+
+    /// Unregister all information associated with a token on an event loop,
+    /// deallocating all internal resources assigned to the given token.
+    ///
+    /// This method should be called whenever a source of events is being
+    /// destroyed. This will ensure that the event loop can reuse `tok` for
+    /// another I/O object if necessary and also remove it from any poll
+    /// notifications and callbacks.
+    ///
+    /// Note that wake callbacks may still be invoked after this method is
+    /// called as it may take some time for the message to drop a source to
+    /// reach the event loop. Despite this fact, this method will attempt to
+    /// ensure that the callbacks are **not** invoked, so pending scheduled
+    /// callbacks cannot be relied upon to get called.
+    ///
+    /// > **Note**: This method should generally not be used directly, but
+    /// >           rather the `ReadinessStream` type should be used instead.
+    ///
+    /// # Panics
+    ///
+    /// This function will panic if the event loop this handle is associated
+    /// with has gone away, or if there is an error communicating with the event
+    /// loop.
+    pub fn drop_source(&self, handle: &Remote) {
+        handle.send(Message::DropSource(self.token));
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/src/reactor/mod.rs
@@ -0,0 +1,782 @@
+//! The core reactor driving all I/O
+//!
+//! This module contains the `Core` type which is the reactor for all I/O
+//! happening in `tokio-core`. This reactor (or event loop) is used to run
+//! futures, schedule tasks, issue I/O requests, etc.
+
+use std::cell::RefCell;
+use std::cmp;
+use std::fmt;
+use std::io::{self, ErrorKind};
+use std::mem;
+use std::rc::{Rc, Weak};
+use std::sync::Arc;
+use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
+use std::time::{Instant, Duration};
+
+use futures::{Future, IntoFuture, Async};
+use futures::future;
+use futures::executor::{self, Spawn, Unpark};
+use futures::sync::mpsc;
+use futures::task::Task;
+use mio;
+use mio::event::Evented;
+use slab::Slab;
+
+use heap::{Heap, Slot};
+
+mod io_token;
+mod timeout_token;
+
+mod poll_evented;
+mod timeout;
+mod interval;
+pub use self::poll_evented::PollEvented;
+pub use self::timeout::Timeout;
+pub use self::interval::Interval;
+
+static NEXT_LOOP_ID: AtomicUsize = ATOMIC_USIZE_INIT;
+scoped_thread_local!(static CURRENT_LOOP: Core);
+
+/// An event loop.
+///
+/// The event loop is the main source of blocking in an application which drives
+/// all other I/O events and notifications happening. Each event loop can have
+/// multiple handles pointing to it, each of which can then be used to create
+/// various I/O objects to interact with the event loop in interesting ways.
+// TODO: expand this
+pub struct Core {
+    events: mio::Events,
+    tx: mpsc::UnboundedSender<Message>,
+    rx: RefCell<Spawn<mpsc::UnboundedReceiver<Message>>>,
+    _rx_registration: mio::Registration,
+    rx_readiness: Arc<MySetReadiness>,
+
+    inner: Rc<RefCell<Inner>>,
+
+    // Used for determining when the future passed to `run` is ready. Once the
+    // registration is passed to `io` above we never touch it again, just keep
+    // it alive.
+    _future_registration: mio::Registration,
+    future_readiness: Arc<MySetReadiness>,
+}
+
+struct Inner {
+    id: usize,
+    io: mio::Poll,
+
+    // Dispatch slabs for I/O and futures events
+    io_dispatch: Slab<ScheduledIo>,
+    task_dispatch: Slab<ScheduledTask>,
+
+    // Timer wheel keeping track of all timeouts. The `usize` stored in the
+    // timer wheel is an index into the slab below.
+    //
+    // The slab below keeps track of the timeouts themselves as well as the
+    // state of the timeout itself. The `TimeoutToken` type is an index into the
+    // `timeouts` slab.
+    timer_heap: Heap<(Instant, usize)>,
+    timeouts: Slab<(Option<Slot>, TimeoutState)>,
+}
+
+/// An unique ID for a Core
+///
+/// An ID by which different cores may be distinguished. Can be compared and used as an index in
+/// a `HashMap`.
+///
+/// The ID is globally unique and never reused.
+#[derive(Clone,Copy,Eq,PartialEq,Hash,Debug)]
+pub struct CoreId(usize);
+
+/// Handle to an event loop, used to construct I/O objects, send messages, and
+/// otherwise interact indirectly with the event loop itself.
+///
+/// Handles can be cloned, and when cloned they will still refer to the
+/// same underlying event loop.
+#[derive(Clone)]
+pub struct Remote {
+    id: usize,
+    tx: mpsc::UnboundedSender<Message>,
+}
+
+/// A non-sendable handle to an event loop, useful for manufacturing instances
+/// of `LoopData`.
+#[derive(Clone)]
+pub struct Handle {
+    remote: Remote,
+    inner: Weak<RefCell<Inner>>,
+}
+
+struct ScheduledIo {
+    readiness: Arc<AtomicUsize>,
+    reader: Option<Task>,
+    writer: Option<Task>,
+}
+
+struct ScheduledTask {
+    _registration: mio::Registration,
+    spawn: Option<Spawn<Box<Future<Item=(), Error=()>>>>,
+    wake: Arc<MySetReadiness>,
+}
+
+enum TimeoutState {
+    NotFired,
+    Fired,
+    Waiting(Task),
+}
+
+enum Direction {
+    Read,
+    Write,
+}
+
+enum Message {
+    DropSource(usize),
+    Schedule(usize, Task, Direction),
+    UpdateTimeout(usize, Task),
+    ResetTimeout(usize, Instant),
+    CancelTimeout(usize),
+    Run(Box<FnBox>),
+}
+
+#[repr(usize)]
+#[derive(Clone, Copy, Debug, PartialEq)]
+enum Readiness {
+    Readable = 1,
+    Writable = 2,
+}
+
+const TOKEN_MESSAGES: mio::Token = mio::Token(0);
+const TOKEN_FUTURE: mio::Token = mio::Token(1);
+const TOKEN_START: usize = 2;
+
+impl Core {
+    /// Creates a new event loop, returning any error that happened during the
+    /// creation.
+    pub fn new() -> io::Result<Core> {
+        let io = try!(mio::Poll::new());
+        let future_pair = mio::Registration::new2();
+        try!(io.register(&future_pair.0,
+                         TOKEN_FUTURE,
+                         mio::Ready::readable(),
+                         mio::PollOpt::level()));
+        let (tx, rx) = mpsc::unbounded();
+        let channel_pair = mio::Registration::new2();
+        try!(io.register(&channel_pair.0,
+                         TOKEN_MESSAGES,
+                         mio::Ready::readable(),
+                         mio::PollOpt::level()));
+        let rx_readiness = Arc::new(MySetReadiness(channel_pair.1));
+        rx_readiness.unpark();
+
+        Ok(Core {
+            events: mio::Events::with_capacity(1024),
+            tx: tx,
+            rx: RefCell::new(executor::spawn(rx)),
+            _rx_registration: channel_pair.0,
+            rx_readiness: rx_readiness,
+
+            _future_registration: future_pair.0,
+            future_readiness: Arc::new(MySetReadiness(future_pair.1)),
+
+            inner: Rc::new(RefCell::new(Inner {
+                id: NEXT_LOOP_ID.fetch_add(1, Ordering::Relaxed),
+                io: io,
+                io_dispatch: Slab::with_capacity(1),
+                task_dispatch: Slab::with_capacity(1),
+                timeouts: Slab::with_capacity(1),
+                timer_heap: Heap::new(),
+            })),
+        })
+    }
+
+    /// Returns a handle to this event loop which cannot be sent across threads
+    /// but can be used as a proxy to the event loop itself.
+    ///
+    /// Handles are cloneable and clones always refer to the same event loop.
+    /// This handle is typically passed into functions that create I/O objects
+    /// to bind them to this event loop.
+    pub fn handle(&self) -> Handle {
+        Handle {
+            remote: self.remote(),
+            inner: Rc::downgrade(&self.inner),
+        }
+    }
+
+    /// Generates a remote handle to this event loop which can be used to spawn
+    /// tasks from other threads into this event loop.
+    pub fn remote(&self) -> Remote {
+        Remote {
+            id: self.inner.borrow().id,
+            tx: self.tx.clone(),
+        }
+    }
+
+    /// Runs a future until completion, driving the event loop while we're
+    /// otherwise waiting for the future to complete.
+    ///
+    /// This function will begin executing the event loop and will finish once
+    /// the provided future is resolved. Note that the future argument here
+    /// crucially does not require the `'static` nor `Send` bounds. As a result
+    /// the future will be "pinned" to not only this thread but also this stack
+    /// frame.
+    ///
+    /// This function will return the value that the future resolves to once
+    /// the future has finished. If the future never resolves then this function
+    /// will never return.
+    ///
+    /// # Panics
+    ///
+    /// This method will **not** catch panics from polling the future `f`. If
+    /// the future panics then it's the responsibility of the caller to catch
+    /// that panic and handle it as appropriate.
+    pub fn run<F>(&mut self, f: F) -> Result<F::Item, F::Error>
+        where F: Future,
+    {
+        let mut task = executor::spawn(f);
+        let ready = self.future_readiness.clone();
+        let mut future_fired = true;
+
+        loop {
+            if future_fired {
+                let res = try!(CURRENT_LOOP.set(self, || {
+                    task.poll_future(ready.clone())
+                }));
+                if let Async::Ready(e) = res {
+                    return Ok(e)
+                }
+            }
+            future_fired = self.poll(None);
+        }
+    }
+
+    /// Performs one iteration of the event loop, blocking on waiting for events
+    /// for at most `max_wait` (forever if `None`).
+    ///
+    /// It only makes sense to call this method if you've previously spawned
+    /// a future onto this event loop.
+    ///
+    /// `loop { lp.turn(None) }` is equivalent to calling `run` with an
+    /// empty future (one that never finishes).
+    pub fn turn(&mut self, max_wait: Option<Duration>) {
+        self.poll(max_wait);
+    }
+
+    fn poll(&mut self, max_wait: Option<Duration>) -> bool {
+        // Given the `max_wait` variable specified, figure out the actual
+        // timeout that we're going to pass to `poll`. This involves taking a
+        // look at active timers on our heap as well.
+        let start = Instant::now();
+        let timeout = self.inner.borrow_mut().timer_heap.peek().map(|t| {
+            if t.0 < start {
+                Duration::new(0, 0)
+            } else {
+                t.0 - start
+            }
+        });
+        let timeout = match (max_wait, timeout) {
+            (Some(d1), Some(d2)) => Some(cmp::min(d1, d2)),
+            (max_wait, timeout) => max_wait.or(timeout),
+        };
+
+        // Block waiting for an event to happen, peeling out how many events
+        // happened.
+        let amt = match self.inner.borrow_mut().io.poll(&mut self.events, timeout) {
+            Ok(a) => a,
+            Err(ref e) if e.kind() == ErrorKind::Interrupted => return false,
+            Err(e) => panic!("error in poll: {}", e),
+        };
+
+        let after_poll = Instant::now();
+        debug!("loop poll - {:?}", after_poll - start);
+        debug!("loop time - {:?}", after_poll);
+
+        // Process all timeouts that may have just occurred, updating the
+        // current time since
+        self.consume_timeouts(after_poll);
+
+        // Process all the events that came in, dispatching appropriately
+        let mut fired = false;
+        for i in 0..self.events.len() {
+            let event = self.events.get(i).unwrap();
+            let token = event.token();
+            trace!("event {:?} {:?}", event.readiness(), event.token());
+
+            if token == TOKEN_MESSAGES {
+                self.rx_readiness.0.set_readiness(mio::Ready::empty()).unwrap();
+                CURRENT_LOOP.set(&self, || self.consume_queue());
+            } else if token == TOKEN_FUTURE {
+                self.future_readiness.0.set_readiness(mio::Ready::empty()).unwrap();
+                fired = true;
+            } else {
+                self.dispatch(token, event.readiness());
+            }
+        }
+        debug!("loop process - {} events, {:?}", amt, after_poll.elapsed());
+        return fired
+    }
+
+    fn dispatch(&mut self, token: mio::Token, ready: mio::Ready) {
+        let token = usize::from(token) - TOKEN_START;
+        if token % 2 == 0 {
+            self.dispatch_io(token / 2, ready)
+        } else {
+            self.dispatch_task(token / 2)
+        }
+    }
+
+    fn dispatch_io(&mut self, token: usize, ready: mio::Ready) {
+        let mut reader = None;
+        let mut writer = None;
+        let mut inner = self.inner.borrow_mut();
+        if let Some(io) = inner.io_dispatch.get_mut(token) {
+            if ready.is_readable() || platform::is_hup(&ready) {
+                reader = io.reader.take();
+                io.readiness.fetch_or(Readiness::Readable as usize,
+                    Ordering::Relaxed);
+            }
+            if ready.is_writable() {
+                writer = io.writer.take();
+                io.readiness.fetch_or(Readiness::Writable as usize,
+                    Ordering::Relaxed);
+            }
+        }
+        drop(inner);
+        // TODO: don't notify the same task twice
+        if let Some(reader) = reader {
+            self.notify_handle(reader);
+        }
+        if let Some(writer) = writer {
+            self.notify_handle(writer);
+        }
+    }
+
+    fn dispatch_task(&mut self, token: usize) {
+        let mut inner = self.inner.borrow_mut();
+        let (task, wake) = match inner.task_dispatch.get_mut(token) {
+            Some(slot) => (slot.spawn.take(), slot.wake.clone()),
+            None => return,
+        };
+        wake.0.set_readiness(mio::Ready::empty()).unwrap();
+        let mut task = match task {
+            Some(task) => task,
+            None => return,
+        };
+        drop(inner);
+        let res = CURRENT_LOOP.set(self, || task.poll_future(wake));
+        let _task_to_drop;
+        inner = self.inner.borrow_mut();
+        match res {
+            Ok(Async::NotReady) => {
+                assert!(inner.task_dispatch[token].spawn.is_none());
+                inner.task_dispatch[token].spawn = Some(task);
+            }
+            Ok(Async::Ready(())) |
+            Err(()) => {
+                _task_to_drop = inner.task_dispatch.remove(token).unwrap();
+            }
+        }
+        drop(inner);
+    }
+
+    fn consume_timeouts(&mut self, now: Instant) {
+        loop {
+            let mut inner = self.inner.borrow_mut();
+            match inner.timer_heap.peek() {
+                Some(head) if head.0 <= now => {}
+                Some(_) => break,
+                None => break,
+            };
+            let (_, slab_idx) = inner.timer_heap.pop().unwrap();
+
+            trace!("firing timeout: {}", slab_idx);
+            inner.timeouts[slab_idx].0.take().unwrap();
+            let handle = inner.timeouts[slab_idx].1.fire();
+            drop(inner);
+            if let Some(handle) = handle {
+                self.notify_handle(handle);
+            }
+        }
+    }
+
+    /// Method used to notify a task handle.
+    ///
+    /// Note that this should be used instead of `handle.unpark()` to ensure
+    /// that the `CURRENT_LOOP` variable is set appropriately.
+    fn notify_handle(&self, handle: Task) {
+        debug!("notifying a task handle");
+        CURRENT_LOOP.set(&self, || handle.unpark());
+    }
+
+    fn consume_queue(&self) {
+        debug!("consuming notification queue");
+        // TODO: can we do better than `.unwrap()` here?
+        let unpark = self.rx_readiness.clone();
+        loop {
+            let msg = self.rx.borrow_mut().poll_stream(unpark.clone()).unwrap();
+            match msg {
+                Async::Ready(Some(msg)) => self.notify(msg),
+                Async::NotReady |
+                Async::Ready(None) => break,
+            }
+        }
+    }
+
+    fn notify(&self, msg: Message) {
+        match msg {
+            Message::DropSource(tok) => self.inner.borrow_mut().drop_source(tok),
+            Message::Schedule(tok, wake, dir) => {
+                let task = self.inner.borrow_mut().schedule(tok, wake, dir);
+                if let Some(task) = task {
+                    self.notify_handle(task);
+                }
+            }
+            Message::UpdateTimeout(t, handle) => {
+                let task = self.inner.borrow_mut().update_timeout(t, handle);
+                if let Some(task) = task {
+                    self.notify_handle(task);
+                }
+            }
+            Message::ResetTimeout(t, at) => {
+                self.inner.borrow_mut().reset_timeout(t, at);
+            }
+            Message::CancelTimeout(t) => {
+                self.inner.borrow_mut().cancel_timeout(t)
+            }
+            Message::Run(r) => r.call_box(self),
+        }
+    }
+
+    /// Get the ID of this loop
+    pub fn id(&self) -> CoreId {
+        CoreId(self.inner.borrow().id)
+    }
+}
+
+impl fmt::Debug for Core {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("Core")
+         .field("id", &self.id())
+         .finish()
+    }
+}
+
+impl Inner {
+    fn add_source(&mut self, source: &Evented)
+                  -> io::Result<(Arc<AtomicUsize>, usize)> {
+        debug!("adding a new I/O source");
+        let sched = ScheduledIo {
+            readiness: Arc::new(AtomicUsize::new(0)),
+            reader: None,
+            writer: None,
+        };
+        if self.io_dispatch.vacant_entry().is_none() {
+            let amt = self.io_dispatch.len();
+            self.io_dispatch.reserve_exact(amt);
+        }
+        let entry = self.io_dispatch.vacant_entry().unwrap();
+        try!(self.io.register(source,
+                              mio::Token(TOKEN_START + entry.index() * 2),
+                              mio::Ready::readable() |
+                                mio::Ready::writable() |
+                                platform::hup(),
+                              mio::PollOpt::edge()));
+        Ok((sched.readiness.clone(), entry.insert(sched).index()))
+    }
+
+    fn deregister_source(&mut self, source: &Evented) -> io::Result<()> {
+        self.io.deregister(source)
+    }
+
+    fn drop_source(&mut self, token: usize) {
+        debug!("dropping I/O source: {}", token);
+        self.io_dispatch.remove(token).unwrap();
+    }
+
+    fn schedule(&mut self, token: usize, wake: Task, dir: Direction)
+                -> Option<Task> {
+        debug!("scheduling direction for: {}", token);
+        let sched = self.io_dispatch.get_mut(token).unwrap();
+        let (slot, bit) = match dir {
+            Direction::Read => (&mut sched.reader, Readiness::Readable as usize),
+            Direction::Write => (&mut sched.writer, Readiness::Writable as usize),
+        };
+        if sched.readiness.load(Ordering::SeqCst) & bit != 0 {
+            *slot = None;
+            Some(wake)
+        } else {
+            *slot = Some(wake);
+            None
+        }
+    }
+
+    fn add_timeout(&mut self, at: Instant) -> usize {
+        if self.timeouts.vacant_entry().is_none() {
+            let len = self.timeouts.len();
+            self.timeouts.reserve_exact(len);
+        }
+        let entry = self.timeouts.vacant_entry().unwrap();
+        let slot = self.timer_heap.push((at, entry.index()));
+        let entry = entry.insert((Some(slot), TimeoutState::NotFired));
+        debug!("added a timeout: {}", entry.index());
+        return entry.index();
+    }
+
+    fn update_timeout(&mut self, token: usize, handle: Task) -> Option<Task> {
+        debug!("updating a timeout: {}", token);
+        self.timeouts[token].1.block(handle)
+    }
+
+    fn reset_timeout(&mut self, token: usize, at: Instant) {
+        let pair = &mut self.timeouts[token];
+        // TODO: avoid remove + push and instead just do one sift of the heap?
+        // In theory we could update it in place and then do the percolation
+        // as necessary
+        if let Some(slot) = pair.0.take() {
+            self.timer_heap.remove(slot);
+        }
+        let slot = self.timer_heap.push((at, token));
+        *pair = (Some(slot), TimeoutState::NotFired);
+        debug!("set a timeout: {}", token);
+    }
+
+    fn cancel_timeout(&mut self, token: usize) {
+        debug!("cancel a timeout: {}", token);
+        let pair = self.timeouts.remove(token);
+        if let Some((Some(slot), _state)) = pair {
+            self.timer_heap.remove(slot);
+        }
+    }
+
+    fn spawn(&mut self, future: Box<Future<Item=(), Error=()>>) {
+        if self.task_dispatch.vacant_entry().is_none() {
+            let len = self.task_dispatch.len();
+            self.task_dispatch.reserve_exact(len);
+        }
+        let entry = self.task_dispatch.vacant_entry().unwrap();
+        let token = TOKEN_START + 2 * entry.index() + 1;
+        let pair = mio::Registration::new2();
+        self.io.register(&pair.0,
+                         mio::Token(token),
+                         mio::Ready::readable(),
+                         mio::PollOpt::level())
+            .expect("cannot fail future registration with mio");
+        let unpark = Arc::new(MySetReadiness(pair.1));
+        let entry = entry.insert(ScheduledTask {
+            spawn: Some(executor::spawn(future)),
+            wake: unpark,
+            _registration: pair.0,
+        });
+        entry.get().wake.clone().unpark();
+    }
+}
+
+impl Remote {
+    fn send(&self, msg: Message) {
+        self.with_loop(|lp| {
+            match lp {
+                Some(lp) => {
+                    // Need to execute all existing requests first, to ensure
+                    // that our message is processed "in order"
+                    lp.consume_queue();
+                    lp.notify(msg);
+                }
+                None => {
+                    match mpsc::UnboundedSender::send(&self.tx, msg) {
+                        Ok(()) => {}
+
+                        // TODO: this error should punt upwards and we should
+                        //       notify the caller that the message wasn't
+                        //       received. This is tokio-core#17
+                        Err(e) => drop(e),
+                    }
+                }
+            }
+        })
+    }
+
+    fn with_loop<F, R>(&self, f: F) -> R
+        where F: FnOnce(Option<&Core>) -> R
+    {
+        if CURRENT_LOOP.is_set() {
+            CURRENT_LOOP.with(|lp| {
+                let same = lp.inner.borrow().id == self.id;
+                if same {
+                    f(Some(lp))
+                } else {
+                    f(None)
+                }
+            })
+        } else {
+            f(None)
+        }
+    }
+
+    /// Spawns a new future into the event loop this remote is associated with.
+    ///
+    /// This function takes a closure which is executed within the context of
+    /// the I/O loop itself. The future returned by the closure will be
+    /// scheduled on the event loop an run to completion.
+    ///
+    /// Note that while the closure, `F`, requires the `Send` bound as it might
+    /// cross threads, the future `R` does not.
+    pub fn spawn<F, R>(&self, f: F)
+        where F: FnOnce(&Handle) -> R + Send + 'static,
+              R: IntoFuture<Item=(), Error=()>,
+              R::Future: 'static,
+    {
+        self.send(Message::Run(Box::new(|lp: &Core| {
+            let f = f(&lp.handle());
+            lp.inner.borrow_mut().spawn(Box::new(f.into_future()));
+        })));
+    }
+
+    /// Return the ID of the represented Core
+    pub fn id(&self) -> CoreId {
+        CoreId(self.id)
+    }
+
+    /// Attempts to "promote" this remote to a handle, if possible.
+    ///
+    /// This function is intended for structures which typically work through a
+    /// `Remote` but want to optimize runtime when the remote doesn't actually
+    /// leave the thread of the original reactor. This will attempt to return a
+    /// handle if the `Remote` is on the same thread as the event loop and the
+    /// event loop is running.
+    ///
+    /// If this `Remote` has moved to a different thread or if the event loop is
+    /// running, then `None` may be returned. If you need to guarantee access to
+    /// a `Handle`, then you can call this function and fall back to using
+    /// `spawn` above if it returns `None`.
+    pub fn handle(&self) -> Option<Handle> {
+        if CURRENT_LOOP.is_set() {
+            CURRENT_LOOP.with(|lp| {
+                let same = lp.inner.borrow().id == self.id;
+                if same {
+                    Some(lp.handle())
+                } else {
+                    None
+                }
+            })
+        } else {
+            None
+        }
+    }
+}
+
+impl fmt::Debug for Remote {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("Remote")
+         .field("id", &self.id())
+         .finish()
+    }
+}
+
+impl Handle {
+    /// Returns a reference to the underlying remote handle to the event loop.
+    pub fn remote(&self) -> &Remote {
+        &self.remote
+    }
+
+    /// Spawns a new future on the event loop this handle is associated with.
+    pub fn spawn<F>(&self, f: F)
+        where F: Future<Item=(), Error=()> + 'static,
+    {
+        let inner = match self.inner.upgrade() {
+            Some(inner) => inner,
+            None => return,
+        };
+        inner.borrow_mut().spawn(Box::new(f));
+    }
+
+    /// Spawns a closure on this event loop.
+    ///
+    /// This function is a convenience wrapper around the `spawn` function above
+    /// for running a closure wrapped in `futures::lazy`. It will spawn the
+    /// function `f` provided onto the event loop, and continue to run the
+    /// future returned by `f` on the event loop as well.
+    pub fn spawn_fn<F, R>(&self, f: F)
+        where F: FnOnce() -> R + 'static,
+              R: IntoFuture<Item=(), Error=()> + 'static,
+    {
+        self.spawn(future::lazy(f))
+    }
+
+    /// Return the ID of the represented Core
+    pub fn id(&self) -> CoreId {
+        self.remote.id()
+    }
+}
+
+impl fmt::Debug for Handle {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("Handle")
+         .field("id", &self.id())
+         .finish()
+    }
+}
+
+impl TimeoutState {
+    fn block(&mut self, handle: Task) -> Option<Task> {
+        match *self {
+            TimeoutState::Fired => return Some(handle),
+            _ => {}
+        }
+        *self = TimeoutState::Waiting(handle);
+        None
+    }
+
+    fn fire(&mut self) -> Option<Task> {
+        match mem::replace(self, TimeoutState::Fired) {
+            TimeoutState::NotFired => None,
+            TimeoutState::Fired => panic!("fired twice?"),
+            TimeoutState::Waiting(handle) => Some(handle),
+        }
+    }
+}
+
+struct MySetReadiness(mio::SetReadiness);
+
+impl Unpark for MySetReadiness {
+    fn unpark(&self) {
+        self.0.set_readiness(mio::Ready::readable())
+              .expect("failed to set readiness");
+    }
+}
+
+trait FnBox: Send + 'static {
+    fn call_box(self: Box<Self>, lp: &Core);
+}
+
+impl<F: FnOnce(&Core) + Send + 'static> FnBox for F {
+    fn call_box(self: Box<Self>, lp: &Core) {
+        (*self)(lp)
+    }
+}
+
+#[cfg(unix)]
+mod platform {
+    use mio::Ready;
+    use mio::unix::UnixReady;
+
+    pub fn is_hup(event: &Ready) -> bool {
+        UnixReady::from(*event).is_hup()
+    }
+
+    pub fn hup() -> Ready {
+        UnixReady::hup().into()
+    }
+}
+
+#[cfg(windows)]
+mod platform {
+    use mio::Ready;
+
+    pub fn is_hup(_event: &Ready) -> bool {
+        false
+    }
+
+    pub fn hup() -> Ready {
+        Ready::empty()
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/src/reactor/poll_evented.rs
@@ -0,0 +1,328 @@
+//! Readiness tracking streams, backing I/O objects.
+//!
+//! This module contains the core type which is used to back all I/O on object
+//! in `tokio-core`. The `PollEvented` type is the implementation detail of
+//! all I/O. Each `PollEvented` manages registration with a reactor,
+//! acquisition of a token, and tracking of the readiness state on the
+//! underlying I/O primitive.
+
+use std::fmt;
+use std::io::{self, Read, Write};
+use std::sync::atomic::{AtomicUsize, Ordering};
+
+use futures::{Async, Poll};
+use mio::event::Evented;
+use tokio_io::{AsyncRead, AsyncWrite};
+
+use reactor::{Handle, Remote};
+use reactor::Readiness::*;
+use reactor::io_token::IoToken;
+
+/// A concrete implementation of a stream of readiness notifications for I/O
+/// objects that originates from an event loop.
+///
+/// Created by the `PollEvented::new` method, each `PollEvented` is
+/// associated with a specific event loop and source of events that will be
+/// registered with an event loop.
+///
+/// Each readiness stream has a number of methods to test whether the underlying
+/// object is readable or writable. Once the methods return that an object is
+/// readable/writable, then it will continue to do so until the `need_read` or
+/// `need_write` methods are called.
+///
+/// That is, this object is typically wrapped in another form of I/O object.
+/// It's the responsibility of the wrapper to inform the readiness stream when a
+/// "would block" I/O event is seen. The readiness stream will then take care of
+/// any scheduling necessary to get notified when the event is ready again.
+///
+/// You can find more information about creating a custom I/O object [online].
+///
+/// [online]: https://tokio.rs/docs/going-deeper-tokio/core-low-level/#custom-io
+pub struct PollEvented<E> {
+    token: IoToken,
+    handle: Remote,
+    readiness: AtomicUsize,
+    io: E,
+}
+
+impl<E: Evented + fmt::Debug> fmt::Debug for PollEvented<E> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct("PollEvented")
+         .field("io", &self.io)
+         .finish()
+    }
+}
+
+impl<E: Evented> PollEvented<E> {
+    /// Creates a new readiness stream associated with the provided
+    /// `loop_handle` and for the given `source`.
+    ///
+    /// This method returns a future which will resolve to the readiness stream
+    /// when it's ready.
+    pub fn new(io: E, handle: &Handle) -> io::Result<PollEvented<E>> {
+        Ok(PollEvented {
+            token: try!(IoToken::new(&io, handle)),
+            handle: handle.remote().clone(),
+            readiness: AtomicUsize::new(0),
+            io: io,
+        })
+    }
+
+    /// Deregisters this source of events from the reactor core specified.
+    ///
+    /// This method can optionally be called to unregister the underlying I/O
+    /// object with the event loop that the `handle` provided points to.
+    /// Typically this method is not required as this automatically happens when
+    /// `E` is dropped, but for some use cases the `E` object doesn't represent
+    /// an owned reference, so dropping it won't automatically unreigster with
+    /// the event loop.
+    ///
+    /// This consumes `self` as it will no longer provide events after the
+    /// method is called, and will likely return an error if this `PollEvented`
+    /// was created on a separate event loop from the `handle` specified.
+    pub fn deregister(self, handle: &Handle) -> io::Result<()> {
+        let inner = match handle.inner.upgrade() {
+            Some(inner) => inner,
+            None => return Ok(()),
+        };
+        let ret = inner.borrow_mut().deregister_source(&self.io);
+        return ret
+    }
+}
+
+impl<E> PollEvented<E> {
+    /// Tests to see if this source is ready to be read from or not.
+    ///
+    /// If this stream is not ready for a read then `NotReady` will be returned
+    /// and the current task will be scheduled to receive a notification when
+    /// the stream is readable again. In other words, this method is only safe
+    /// to call from within the context of a future's task, typically done in a
+    /// `Future::poll` method.
+    pub fn poll_read(&self) -> Async<()> {
+        if self.readiness.load(Ordering::SeqCst) & Readable as usize != 0 {
+            return Async::Ready(())
+        }
+        self.readiness.fetch_or(self.token.take_readiness(), Ordering::SeqCst);
+        if self.readiness.load(Ordering::SeqCst) & Readable as usize != 0 {
+            Async::Ready(())
+        } else {
+            self.token.schedule_read(&self.handle);
+            Async::NotReady
+        }
+    }
+
+    /// Tests to see if this source is ready to be written to or not.
+    ///
+    /// If this stream is not ready for a write then `NotReady` will be returned
+    /// and the current task will be scheduled to receive a notification when
+    /// the stream is writable again. In other words, this method is only safe
+    /// to call from within the context of a future's task, typically done in a
+    /// `Future::poll` method.
+    pub fn poll_write(&self) -> Async<()> {
+        if self.readiness.load(Ordering::SeqCst) & Writable as usize != 0 {
+            return Async::Ready(())
+        }
+        self.readiness.fetch_or(self.token.take_readiness(), Ordering::SeqCst);
+        if self.readiness.load(Ordering::SeqCst) & Writable as usize != 0 {
+            Async::Ready(())
+        } else {
+            self.token.schedule_write(&self.handle);
+            Async::NotReady
+        }
+    }
+
+    /// Indicates to this source of events that the corresponding I/O object is
+    /// no longer readable, but it needs to be.
+    ///
+    /// This function, like `poll_read`, is only safe to call from the context
+    /// of a future's task (typically in a `Future::poll` implementation). It
+    /// informs this readiness stream that the underlying object is no longer
+    /// readable, typically because a "would block" error was seen.
+    ///
+    /// The flag indicating that this stream is readable is unset and the
+    /// current task is scheduled to receive a notification when the stream is
+    /// then again readable.
+    ///
+    /// Note that it is also only valid to call this method if `poll_read`
+    /// previously indicated that the object is readable. That is, this function
+    /// must always be paired with calls to `poll_read` previously.
+    pub fn need_read(&self) {
+        self.readiness.fetch_and(!(Readable as usize), Ordering::SeqCst);
+        self.token.schedule_read(&self.handle)
+    }
+
+    /// Indicates to this source of events that the corresponding I/O object is
+    /// no longer writable, but it needs to be.
+    ///
+    /// This function, like `poll_write`, is only safe to call from the context
+    /// of a future's task (typically in a `Future::poll` implementation). It
+    /// informs this readiness stream that the underlying object is no longer
+    /// writable, typically because a "would block" error was seen.
+    ///
+    /// The flag indicating that this stream is writable is unset and the
+    /// current task is scheduled to receive a notification when the stream is
+    /// then again writable.
+    ///
+    /// Note that it is also only valid to call this method if `poll_write`
+    /// previously indicated that the object is writable. That is, this function
+    /// must always be paired with calls to `poll_write` previously.
+    pub fn need_write(&self) {
+        self.readiness.fetch_and(!(Writable as usize), Ordering::SeqCst);
+        self.token.schedule_write(&self.handle)
+    }
+
+    /// Returns a reference to the event loop handle that this readiness stream
+    /// is associated with.
+    pub fn remote(&self) -> &Remote {
+        &self.handle
+    }
+
+    /// Returns a shared reference to the underlying I/O object this readiness
+    /// stream is wrapping.
+    pub fn get_ref(&self) -> &E {
+        &self.io
+    }
+
+    /// Returns a mutable reference to the underlying I/O object this readiness
+    /// stream is wrapping.
+    pub fn get_mut(&mut self) -> &mut E {
+        &mut self.io
+    }
+}
+
+impl<E: Read> Read for PollEvented<E> {
+    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+        if let Async::NotReady = self.poll_read() {
+            return Err(::would_block())
+        }
+        let r = self.get_mut().read(buf);
+        if is_wouldblock(&r) {
+            self.need_read();
+        }
+        return r
+    }
+}
+
+impl<E: Write> Write for PollEvented<E> {
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        if let Async::NotReady = self.poll_write() {
+            return Err(::would_block())
+        }
+        let r = self.get_mut().write(buf);
+        if is_wouldblock(&r) {
+            self.need_write();
+        }
+        return r
+    }
+
+    fn flush(&mut self) -> io::Result<()> {
+        if let Async::NotReady = self.poll_write() {
+            return Err(::would_block())
+        }
+        let r = self.get_mut().flush();
+        if is_wouldblock(&r) {
+            self.need_write();
+        }
+        return r
+    }
+}
+
+impl<E: Read> AsyncRead for PollEvented<E> {
+}
+
+impl<E: Write> AsyncWrite for PollEvented<E> {
+    fn shutdown(&mut self) -> Poll<(), io::Error> {
+        Ok(().into())
+    }
+}
+
+#[allow(deprecated)]
+impl<E: Read + Write> ::io::Io for PollEvented<E> {
+    fn poll_read(&mut self) -> Async<()> {
+        <PollEvented<E>>::poll_read(self)
+    }
+
+    fn poll_write(&mut self) -> Async<()> {
+        <PollEvented<E>>::poll_write(self)
+    }
+}
+
+impl<'a, E> Read for &'a PollEvented<E>
+    where &'a E: Read,
+{
+    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+        if let Async::NotReady = self.poll_read() {
+            return Err(::would_block())
+        }
+        let r = self.get_ref().read(buf);
+        if is_wouldblock(&r) {
+            self.need_read();
+        }
+        return r
+    }
+}
+
+impl<'a, E> Write for &'a PollEvented<E>
+    where &'a E: Write,
+{
+    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+        if let Async::NotReady = self.poll_write() {
+            return Err(::would_block())
+        }
+        let r = self.get_ref().write(buf);
+        if is_wouldblock(&r) {
+            self.need_write();
+        }
+        return r
+    }
+
+    fn flush(&mut self) -> io::Result<()> {
+        if let Async::NotReady = self.poll_write() {
+            return Err(::would_block())
+        }
+        let r = self.get_ref().flush();
+        if is_wouldblock(&r) {
+            self.need_write();
+        }
+        return r
+    }
+}
+
+impl<'a, E> AsyncRead for &'a PollEvented<E>
+    where &'a E: Read,
+{
+}
+
+impl<'a, E> AsyncWrite for &'a PollEvented<E>
+    where &'a E: Write,
+{
+    fn shutdown(&mut self) -> Poll<(), io::Error> {
+        Ok(().into())
+    }
+}
+
+#[allow(deprecated)]
+impl<'a, E> ::io::Io for &'a PollEvented<E>
+    where &'a E: Read + Write,
+{
+    fn poll_read(&mut self) -> Async<()> {
+        <PollEvented<E>>::poll_read(self)
+    }
+
+    fn poll_write(&mut self) -> Async<()> {
+        <PollEvented<E>>::poll_write(self)
+    }
+}
+
+fn is_wouldblock<T>(r: &io::Result<T>) -> bool {
+    match *r {
+        Ok(_) => false,
+        Err(ref e) => e.kind() == io::ErrorKind::WouldBlock,
+    }
+}
+
+impl<E> Drop for PollEvented<E> {
+    fn drop(&mut self) {
+        self.token.drop_source(&self.handle);
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/src/reactor/timeout.rs
@@ -0,0 +1,71 @@
+//! Support for creating futures that represent timeouts.
+//!
+//! This module contains the `Timeout` type which is a future that will resolve
+//! at a particular point in the future.
+
+use std::io;
+use std::time::{Duration, Instant};
+
+use futures::{Future, Poll, Async};
+
+use reactor::{Remote, Handle};
+use reactor::timeout_token::TimeoutToken;
+
+/// A future representing the notification that a timeout has occurred.
+///
+/// Timeouts are created through the `Timeout::new` or
+/// `Timeout::new_at` methods indicating when a timeout should fire at.
+/// Note that timeouts are not intended for high resolution timers, but rather
+/// they will likely fire some granularity after the exact instant that they're
+/// otherwise indicated to fire at.
+pub struct Timeout {
+    token: TimeoutToken,
+    when: Instant,
+    handle: Remote,
+}
+
+impl Timeout {
+    /// Creates a new timeout which will fire at `dur` time into the future.
+    ///
+    /// This function will return a future that will resolve to the actual
+    /// timeout object. The timeout object itself is then a future which will be
+    /// set to fire at the specified point in the future.
+    pub fn new(dur: Duration, handle: &Handle) -> io::Result<Timeout> {
+        Timeout::new_at(Instant::now() + dur, handle)
+    }
+
+    /// Creates a new timeout which will fire at the time specified by `at`.
+    ///
+    /// This function will return a future that will resolve to the actual
+    /// timeout object. The timeout object itself is then a future which will be
+    /// set to fire at the specified point in the future.
+    pub fn new_at(at: Instant, handle: &Handle) -> io::Result<Timeout> {
+        Ok(Timeout {
+            token: try!(TimeoutToken::new(at, &handle)),
+            when: at,
+            handle: handle.remote().clone(),
+        })
+    }
+}
+
+impl Future for Timeout {
+    type Item = ();
+    type Error = io::Error;
+
+    fn poll(&mut self) -> Poll<(), io::Error> {
+        // TODO: is this fast enough?
+        let now = Instant::now();
+        if self.when <= now {
+            Ok(Async::Ready(()))
+        } else {
+            self.token.update_timeout(&self.handle);
+            Ok(Async::NotReady)
+        }
+    }
+}
+
+impl Drop for Timeout {
+    fn drop(&mut self) {
+        self.token.cancel_timeout(&self.handle);
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/src/reactor/timeout_token.rs
@@ -0,0 +1,56 @@
+use std::io;
+use std::time::Instant;
+
+use futures::task;
+
+use reactor::{Message, Handle, Remote};
+
+/// A token that identifies an active timeout.
+pub struct TimeoutToken {
+    token: usize,
+}
+
+impl TimeoutToken {
+    /// Adds a new timeout to get fired at the specified instant, notifying the
+    /// specified task.
+    pub fn new(at: Instant, handle: &Handle) -> io::Result<TimeoutToken> {
+        match handle.inner.upgrade() {
+            Some(inner) => {
+                let token = inner.borrow_mut().add_timeout(at);
+                Ok(TimeoutToken { token: token })
+            }
+            None => Err(io::Error::new(io::ErrorKind::Other, "event loop gone")),
+        }
+    }
+
+    /// Updates a previously added timeout to notify a new task instead.
+    ///
+    /// # Panics
+    ///
+    /// This method will panic if the timeout specified was not created by this
+    /// loop handle's `add_timeout` method.
+    pub fn update_timeout(&self, handle: &Remote) {
+        handle.send(Message::UpdateTimeout(self.token, task::park()))
+    }
+
+    /// Resets previously added (or fired) timeout to an new timeout
+    ///
+    /// # Panics
+    ///
+    /// This method will panic if the timeout specified was not created by this
+    /// loop handle's `add_timeout` method.
+    pub fn reset_timeout(&mut self, at: Instant, handle: &Remote) {
+        handle.send(Message::ResetTimeout(self.token, at));
+    }
+
+    /// Cancel a previously added timeout.
+    ///
+    /// # Panics
+    ///
+    /// This method will panic if the timeout specified was not created by this
+    /// loop handle's `add_timeout` method.
+    pub fn cancel_timeout(&self, handle: &Remote) {
+        debug!("cancel timeout {}", self.token);
+        handle.send(Message::CancelTimeout(self.token))
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/tests/buffered.rs
@@ -0,0 +1,65 @@
+extern crate env_logger;
+extern crate futures;
+extern crate tokio_core;
+extern crate tokio_io;
+
+use std::net::TcpStream;
+use std::thread;
+use std::io::{Read, Write, BufReader, BufWriter};
+
+use futures::Future;
+use futures::stream::Stream;
+use tokio_io::io::copy;
+use tokio_core::net::TcpListener;
+use tokio_core::reactor::Core;
+
+macro_rules! t {
+    ($e:expr) => (match $e {
+        Ok(e) => e,
+        Err(e) => panic!("{} failed with {:?}", stringify!($e), e),
+    })
+}
+
+#[test]
+fn echo_server() {
+    const N: usize = 1024;
+    drop(env_logger::init());
+
+    let mut l = t!(Core::new());
+    let srv = t!(TcpListener::bind(&t!("127.0.0.1:0".parse()), &l.handle()));
+    let addr = t!(srv.local_addr());
+
+    let msg = "foo bar baz";
+    let t = thread::spawn(move || {
+        let mut s = t!(TcpStream::connect(&addr));
+
+        let t2 = thread::spawn(move || {
+            let mut s = t!(TcpStream::connect(&addr));
+            let mut b = vec![0; msg.len() * N];
+            t!(s.read_exact(&mut b));
+            b
+        });
+
+        let mut expected = Vec::<u8>::new();
+        for _i in 0..N {
+            expected.extend(msg.as_bytes());
+            assert_eq!(t!(s.write(msg.as_bytes())), msg.len());
+        }
+        (expected, t2)
+    });
+
+    let clients = srv.incoming().take(2).map(|e| e.0).collect();
+    let copied = clients.and_then(|clients| {
+        let mut clients = clients.into_iter();
+        let a = BufReader::new(clients.next().unwrap());
+        let b = BufWriter::new(clients.next().unwrap());
+        copy(a, b)
+    });
+
+    let (amt, _, _) = t!(l.run(copied));
+    let (expected, t2) = t.join().unwrap();
+    let actual = t2.join().unwrap();
+
+    assert!(expected == actual);
+    assert_eq!(amt, msg.len() as u64 * 1024);
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/tests/chain.rs
@@ -0,0 +1,51 @@
+extern crate futures;
+extern crate tokio_core;
+extern crate tokio_io;
+
+use std::net::TcpStream;
+use std::thread;
+use std::io::{Write, Read};
+
+use futures::Future;
+use futures::stream::Stream;
+use tokio_io::io::read_to_end;
+use tokio_core::net::TcpListener;
+use tokio_core::reactor::Core;
+
+macro_rules! t {
+    ($e:expr) => (match $e {
+        Ok(e) => e,
+        Err(e) => panic!("{} failed with {:?}", stringify!($e), e),
+    })
+}
+
+#[test]
+fn chain_clients() {
+    let mut l = t!(Core::new());
+    let srv = t!(TcpListener::bind(&t!("127.0.0.1:0".parse()), &l.handle()));
+    let addr = t!(srv.local_addr());
+
+    let t = thread::spawn(move || {
+        let mut s1 = TcpStream::connect(&addr).unwrap();
+        s1.write_all(b"foo ").unwrap();
+        let mut s2 = TcpStream::connect(&addr).unwrap();
+        s2.write_all(b"bar ").unwrap();
+        let mut s3 = TcpStream::connect(&addr).unwrap();
+        s3.write_all(b"baz").unwrap();
+    });
+
+    let clients = srv.incoming().map(|e| e.0).take(3);
+    let copied = clients.collect().and_then(|clients| {
+        let mut clients = clients.into_iter();
+        let a = clients.next().unwrap();
+        let b = clients.next().unwrap();
+        let c = clients.next().unwrap();
+
+        read_to_end(a.chain(b).chain(c), Vec::new())
+    });
+
+    let (_, data) = t!(l.run(copied));
+    t.join().unwrap();
+
+    assert_eq!(data, b"foo bar baz");
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/tests/echo.rs
@@ -0,0 +1,53 @@
+extern crate env_logger;
+extern crate futures;
+extern crate tokio_core;
+extern crate tokio_io;
+
+use std::io::{Read, Write};
+use std::net::TcpStream;
+use std::thread;
+
+use futures::Future;
+use futures::stream::Stream;
+use tokio_core::net::TcpListener;
+use tokio_core::reactor::Core;
+use tokio_io::AsyncRead;
+use tokio_io::io::copy;
+
+macro_rules! t {
+    ($e:expr) => (match $e {
+        Ok(e) => e,
+        Err(e) => panic!("{} failed with {:?}", stringify!($e), e),
+    })
+}
+
+#[test]
+fn echo_server() {
+    drop(env_logger::init());
+
+    let mut l = t!(Core::new());
+    let srv = t!(TcpListener::bind(&t!("127.0.0.1:0".parse()), &l.handle()));
+    let addr = t!(srv.local_addr());
+
+    let msg = "foo bar baz";
+    let t = thread::spawn(move || {
+        let mut s = TcpStream::connect(&addr).unwrap();
+
+        for _i in 0..1024 {
+            assert_eq!(t!(s.write(msg.as_bytes())), msg.len());
+            let mut buf = [0; 1024];
+            assert_eq!(t!(s.read(&mut buf)), msg.len());
+            assert_eq!(&buf[..msg.len()], msg.as_bytes());
+        }
+    });
+
+    let clients = srv.incoming();
+    let client = clients.into_future().map(|e| e.0.unwrap()).map_err(|e| e.0);
+    let halves = client.map(|s| s.0.split());
+    let copied = halves.and_then(|(a, b)| copy(a, b));
+
+    let (amt, _, _) = t!(l.run(copied));
+    t.join().unwrap();
+
+    assert_eq!(amt, msg.len() as u64 * 1024);
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/tests/interval.rs
@@ -0,0 +1,38 @@
+extern crate env_logger;
+extern crate futures;
+extern crate tokio_core;
+
+use std::time::{Instant, Duration};
+
+use futures::stream::{Stream};
+use tokio_core::reactor::{Core, Interval};
+
+macro_rules! t {
+    ($e:expr) => (match $e {
+        Ok(e) => e,
+        Err(e) => panic!("{} failed with {:?}", stringify!($e), e),
+    })
+}
+
+#[test]
+fn single() {
+    drop(env_logger::init());
+    let mut l = t!(Core::new());
+    let dur = Duration::from_millis(10);
+    let interval = t!(Interval::new(dur, &l.handle()));
+    let start = Instant::now();
+    t!(l.run(interval.take(1).collect()));
+    assert!(start.elapsed() >= dur);
+}
+
+#[test]
+fn two_times() {
+    drop(env_logger::init());
+    let mut l = t!(Core::new());
+    let dur = Duration::from_millis(10);
+    let interval = t!(Interval::new(dur, &l.handle()));
+    let start = Instant::now();
+    let result = t!(l.run(interval.take(2).collect()));
+    assert!(start.elapsed() >= dur*2);
+    assert_eq!(result, vec![(), ()]);
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/tests/limit.rs
@@ -0,0 +1,45 @@
+extern crate futures;
+extern crate tokio_core;
+extern crate tokio_io;
+
+use std::net::TcpStream;
+use std::thread;
+use std::io::{Write, Read};
+
+use futures::Future;
+use futures::stream::Stream;
+use tokio_io::io::read_to_end;
+use tokio_core::net::TcpListener;
+use tokio_core::reactor::Core;
+
+macro_rules! t {
+    ($e:expr) => (match $e {
+        Ok(e) => e,
+        Err(e) => panic!("{} failed with {:?}", stringify!($e), e),
+    })
+}
+
+#[test]
+fn limit() {
+    let mut l = t!(Core::new());
+    let srv = t!(TcpListener::bind(&t!("127.0.0.1:0".parse()), &l.handle()));
+    let addr = t!(srv.local_addr());
+
+    let t = thread::spawn(move || {
+        let mut s1 = TcpStream::connect(&addr).unwrap();
+        s1.write_all(b"foo bar baz").unwrap();
+    });
+
+    let clients = srv.incoming().map(|e| e.0).take(1);
+    let copied = clients.collect().and_then(|clients| {
+        let mut clients = clients.into_iter();
+        let a = clients.next().unwrap();
+
+        read_to_end(a.take(4), Vec::new())
+    });
+
+    let (_, data) = t!(l.run(copied));
+    t.join().unwrap();
+
+    assert_eq!(data, b"foo ");
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/tests/line-frames.rs
@@ -0,0 +1,86 @@
+extern crate env_logger;
+extern crate futures;
+extern crate tokio_core;
+extern crate tokio_io;
+extern crate bytes;
+
+use std::io;
+use std::net::Shutdown;
+
+use bytes::{BytesMut, BufMut};
+use futures::{Future, Stream, Sink};
+use tokio_core::net::{TcpListener, TcpStream};
+use tokio_core::reactor::Core;
+use tokio_io::codec::{Encoder, Decoder};
+use tokio_io::io::{write_all, read};
+use tokio_io::AsyncRead;
+
+pub struct LineCodec;
+
+impl Decoder for LineCodec {
+    type Item = BytesMut;
+    type Error = io::Error;
+
+    fn decode(&mut self, buf: &mut BytesMut) -> Result<Option<BytesMut>, io::Error> {
+        match buf.iter().position(|&b| b == b'\n') {
+            Some(i) => Ok(Some(buf.split_to(i + 1).into())),
+            None => Ok(None),
+        }
+    }
+
+    fn decode_eof(&mut self, buf: &mut BytesMut) -> io::Result<Option<BytesMut>> {
+        if buf.len() == 0 {
+            Ok(None)
+        } else {
+            let amt = buf.len();
+            Ok(Some(buf.split_to(amt)))
+        }
+    }
+}
+
+impl Encoder for LineCodec {
+    type Item = BytesMut;
+    type Error = io::Error;
+
+    fn encode(&mut self, item: BytesMut, into: &mut BytesMut) -> io::Result<()> {
+        into.put(&item[..]);
+        Ok(())
+    }
+}
+
+#[test]
+fn echo() {
+    drop(env_logger::init());
+
+    let mut core = Core::new().unwrap();
+    let handle = core.handle();
+
+    let listener = TcpListener::bind(&"127.0.0.1:0".parse().unwrap(), &handle).unwrap();
+    let addr = listener.local_addr().unwrap();
+    let srv = listener.incoming().for_each(move |(socket, _)| {
+        let (sink, stream) = socket.framed(LineCodec).split();
+        handle.spawn(sink.send_all(stream).map(|_| ()).map_err(|_| ()));
+        Ok(())
+    });
+
+    let handle = core.handle();
+    handle.spawn(srv.map_err(|e| panic!("srv error: {}", e)));
+
+    let client = TcpStream::connect(&addr, &handle);
+    let client = core.run(client).unwrap();
+    let (client, _) = core.run(write_all(client, b"a\n")).unwrap();
+    let (client, buf, amt) = core.run(read(client, vec![0; 1024])).unwrap();
+    assert_eq!(amt, 2);
+    assert_eq!(&buf[..2], b"a\n");
+
+    let (client, _) = core.run(write_all(client, b"\n")).unwrap();
+    let (client, buf, amt) = core.run(read(client, buf)).unwrap();
+    assert_eq!(amt, 1);
+    assert_eq!(&buf[..1], b"\n");
+
+    let (client, _) = core.run(write_all(client, b"b")).unwrap();
+    client.shutdown(Shutdown::Write).unwrap();
+    let (_client, buf, amt) = core.run(read(client, buf)).unwrap();
+    assert_eq!(amt, 1);
+    assert_eq!(&buf[..1], b"b");
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/tests/pipe-hup.rs
@@ -0,0 +1,87 @@
+#![cfg(unix)]
+
+extern crate env_logger;
+extern crate futures;
+extern crate libc;
+extern crate mio;
+extern crate tokio_core;
+extern crate tokio_io;
+
+use std::fs::File;
+use std::io::{self, Write};
+use std::os::unix::io::{AsRawFd, FromRawFd};
+use std::thread;
+use std::time::Duration;
+
+use mio::unix::{UnixReady, EventedFd};
+use mio::{PollOpt, Ready, Token};
+use mio::event::Evented;
+use tokio_core::reactor::{Core, PollEvented};
+use tokio_io::io::read_to_end;
+
+macro_rules! t {
+    ($e:expr) => (match $e {
+        Ok(e) => e,
+        Err(e) => panic!("{} failed with {:?}", stringify!($e), e),
+    })
+}
+
+struct MyFile(File);
+
+impl MyFile {
+    fn new(file: File) -> MyFile {
+        unsafe {
+            let r = libc::fcntl(file.as_raw_fd(), libc::F_SETFL, libc::O_NONBLOCK);
+            assert!(r != -1, "fcntl error: {}", io::Error::last_os_error());
+        }
+        MyFile(file)
+    }
+}
+
+impl io::Read for MyFile {
+    fn read(&mut self, bytes: &mut [u8]) -> io::Result<usize> {
+        self.0.read(bytes)
+    }
+}
+
+impl Evented for MyFile {
+    fn register(&self, poll: &mio::Poll, token: Token, interest: Ready, opts: PollOpt)
+                -> io::Result<()> {
+        let hup: Ready = UnixReady::hup().into();
+        EventedFd(&self.0.as_raw_fd()).register(poll, token, interest | hup, opts)
+    }
+    fn reregister(&self, poll: &mio::Poll, token: Token, interest: Ready, opts: PollOpt)
+                  -> io::Result<()> {
+        let hup: Ready = UnixReady::hup().into();
+        EventedFd(&self.0.as_raw_fd()).reregister(poll, token, interest | hup, opts)
+    }
+    fn deregister(&self, poll: &mio::Poll) -> io::Result<()> {
+        EventedFd(&self.0.as_raw_fd()).deregister(poll)
+    }
+}
+
+#[test]
+fn hup() {
+    drop(env_logger::init());
+
+    let mut l = t!(Core::new());
+    unsafe {
+        let mut pipes = [0; 2];
+        assert!(libc::pipe(pipes.as_mut_ptr()) != -1,
+                "pipe error: {}", io::Error::last_os_error());
+        let read = File::from_raw_fd(pipes[0]);
+        let mut write = File::from_raw_fd(pipes[1]);
+        let t = thread::spawn(move || {
+            write.write_all(b"Hello!\n").unwrap();
+            write.write_all(b"Good bye!\n").unwrap();
+            thread::sleep(Duration::from_millis(100));
+        });
+
+        let source = PollEvented::new(MyFile::new(read), &l.handle()).unwrap();
+
+        let reader = read_to_end(source, Vec::new());
+        let (_, content) = t!(l.run(reader));
+        assert_eq!(&b"Hello!\nGood bye!\n"[..], &content[..]);
+        t.join().unwrap();
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/tests/spawn.rs
@@ -0,0 +1,147 @@
+extern crate tokio_core;
+extern crate env_logger;
+extern crate futures;
+
+use std::any::Any;
+use std::sync::mpsc;
+use std::thread;
+use std::time::Duration;
+
+use futures::{Future, Poll};
+use futures::future;
+use futures::sync::oneshot;
+use tokio_core::reactor::{Core, Timeout};
+
+#[test]
+fn simple() {
+    drop(env_logger::init());
+    let mut lp = Core::new().unwrap();
+
+    let (tx1, rx1) = oneshot::channel();
+    let (tx2, rx2) = oneshot::channel();
+    lp.handle().spawn(future::lazy(|| {
+        tx1.send(1).unwrap();
+        Ok(())
+    }));
+    lp.remote().spawn(|_| {
+        future::lazy(|| {
+            tx2.send(2).unwrap();
+            Ok(())
+        })
+    });
+
+    assert_eq!(lp.run(rx1.join(rx2)).unwrap(), (1, 2));
+}
+
+#[test]
+fn simple_core_poll() {
+    drop(env_logger::init());
+    let mut lp = Core::new().unwrap();
+
+    let (tx, rx) = mpsc::channel();
+    let (tx1, tx2) = (tx.clone(), tx.clone());
+
+    lp.turn(Some(Duration::new(0, 0)));
+    lp.handle().spawn(future::lazy(move || {
+        tx1.send(1).unwrap();
+        Ok(())
+    }));
+    lp.turn(Some(Duration::new(0, 0)));
+    lp.handle().spawn(future::lazy(move || {
+        tx2.send(2).unwrap();
+        Ok(())
+    }));
+    assert_eq!(rx.try_recv().unwrap(), 1);
+    assert!(rx.try_recv().is_err());
+    lp.turn(Some(Duration::new(0, 0)));
+    assert_eq!(rx.try_recv().unwrap(), 2);
+}
+
+#[test]
+fn spawn_in_poll() {
+    drop(env_logger::init());
+    let mut lp = Core::new().unwrap();
+
+    let (tx1, rx1) = oneshot::channel();
+    let (tx2, rx2) = oneshot::channel();
+    let remote = lp.remote();
+    lp.handle().spawn(future::lazy(move || {
+        tx1.send(1).unwrap();
+        remote.spawn(|_| {
+            future::lazy(|| {
+                tx2.send(2).unwrap();
+                Ok(())
+            })
+        });
+        Ok(())
+    }));
+
+    assert_eq!(lp.run(rx1.join(rx2)).unwrap(), (1, 2));
+}
+
+#[test]
+fn drop_timeout_in_spawn() {
+    drop(env_logger::init());
+    let mut lp = Core::new().unwrap();
+
+    let (tx, rx) = oneshot::channel();
+    let remote = lp.remote();
+    thread::spawn(move || {
+        remote.spawn(|handle| {
+            drop(Timeout::new(Duration::new(1, 0), handle));
+            tx.send(()).unwrap();
+            Ok(())
+        });
+    });
+
+    lp.run(rx).unwrap();
+}
+
+#[test]
+fn spawn_in_drop() {
+    drop(env_logger::init());
+    let mut lp = Core::new().unwrap();
+
+    let (tx, rx) = oneshot::channel();
+    let remote = lp.remote();
+
+    struct OnDrop<F: FnMut()>(F);
+
+    impl<F: FnMut()> Drop for OnDrop<F> {
+        fn drop(&mut self) {
+            (self.0)();
+        }
+    }
+
+    struct MyFuture {
+        _data: Box<Any>,
+    }
+
+    impl Future for MyFuture {
+        type Item = ();
+        type Error = ();
+
+        fn poll(&mut self) -> Poll<(), ()> {
+            Ok(().into())
+        }
+    }
+
+    thread::spawn(move || {
+        let mut tx = Some(tx);
+        remote.spawn(|handle| {
+            let handle = handle.clone();
+            MyFuture {
+                _data: Box::new(OnDrop(move || {
+                    let mut tx = tx.take();
+                    handle.spawn_fn(move || {
+                        tx.take().unwrap().send(()).unwrap();
+                        Ok(())
+                    });
+                })),
+
+            }
+        });
+    });
+
+    lp.run(rx).unwrap();
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/tests/stream-buffered.rs
@@ -0,0 +1,56 @@
+extern crate env_logger;
+extern crate futures;
+extern crate tokio_core;
+extern crate tokio_io;
+
+use std::io::{Read, Write};
+use std::net::TcpStream;
+use std::thread;
+
+use futures::Future;
+use futures::stream::Stream;
+use tokio_io::io::copy;
+use tokio_io::AsyncRead;
+use tokio_core::net::TcpListener;
+use tokio_core::reactor::Core;
+
+macro_rules! t {
+    ($e:expr) => (match $e {
+        Ok(e) => e,
+        Err(e) => panic!("{} failed with {:?}", stringify!($e), e),
+    })
+}
+
+#[test]
+fn echo_server() {
+    drop(env_logger::init());
+
+    let mut l = t!(Core::new());
+    let srv = t!(TcpListener::bind(&t!("127.0.0.1:0".parse()), &l.handle()));
+    let addr = t!(srv.local_addr());
+
+    let t = thread::spawn(move || {
+        let mut s1 = t!(TcpStream::connect(&addr));
+        let mut s2 = t!(TcpStream::connect(&addr));
+
+        let msg = b"foo";
+        assert_eq!(t!(s1.write(msg)), msg.len());
+        assert_eq!(t!(s2.write(msg)), msg.len());
+        let mut buf = [0; 1024];
+        assert_eq!(t!(s1.read(&mut buf)), msg.len());
+        assert_eq!(&buf[..msg.len()], msg);
+        assert_eq!(t!(s2.read(&mut buf)), msg.len());
+        assert_eq!(&buf[..msg.len()], msg);
+    });
+
+    let future = srv.incoming()
+                    .map(|s| s.0.split())
+                    .map(|(a, b)| copy(a, b).map(|_| ()))
+                    .buffered(10)
+                    .take(2)
+                    .collect();
+
+    t!(l.run(future));
+
+    t.join().unwrap();
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/tests/tcp.rs
@@ -0,0 +1,85 @@
+extern crate env_logger;
+extern crate futures;
+extern crate tokio_core;
+
+use std::net;
+use std::sync::mpsc::channel;
+use std::thread;
+
+use futures::Future;
+use futures::stream::Stream;
+use tokio_core::reactor::Core;
+use tokio_core::net::{TcpListener, TcpStream};
+
+macro_rules! t {
+    ($e:expr) => (match $e {
+        Ok(e) => e,
+        Err(e) => panic!("{} failed with {:?}", stringify!($e), e),
+    })
+}
+
+#[test]
+fn connect() {
+    drop(env_logger::init());
+    let mut l = t!(Core::new());
+    let srv = t!(net::TcpListener::bind("127.0.0.1:0"));
+    let addr = t!(srv.local_addr());
+    let t = thread::spawn(move || {
+        t!(srv.accept()).0
+    });
+
+    let stream = TcpStream::connect(&addr, &l.handle());
+    let mine = t!(l.run(stream));
+    let theirs = t.join().unwrap();
+
+    assert_eq!(t!(mine.local_addr()), t!(theirs.peer_addr()));
+    assert_eq!(t!(theirs.local_addr()), t!(mine.peer_addr()));
+}
+
+#[test]
+fn accept() {
+    drop(env_logger::init());
+    let mut l = t!(Core::new());
+    let srv = t!(TcpListener::bind(&t!("127.0.0.1:0".parse()), &l.handle()));
+    let addr = t!(srv.local_addr());
+
+    let (tx, rx) = channel();
+    let client = srv.incoming().map(move |t| {
+        tx.send(()).unwrap();
+        t.0
+    }).into_future().map_err(|e| e.0);
+    assert!(rx.try_recv().is_err());
+    let t = thread::spawn(move || {
+        net::TcpStream::connect(&addr).unwrap()
+    });
+
+    let (mine, _remaining) = t!(l.run(client));
+    let mine = mine.unwrap();
+    let theirs = t.join().unwrap();
+
+    assert_eq!(t!(mine.local_addr()), t!(theirs.peer_addr()));
+    assert_eq!(t!(theirs.local_addr()), t!(mine.peer_addr()));
+}
+
+#[test]
+fn accept2() {
+    drop(env_logger::init());
+    let mut l = t!(Core::new());
+    let srv = t!(TcpListener::bind(&t!("127.0.0.1:0".parse()), &l.handle()));
+    let addr = t!(srv.local_addr());
+
+    let t = thread::spawn(move || {
+        net::TcpStream::connect(&addr).unwrap()
+    });
+
+    let (tx, rx) = channel();
+    let client = srv.incoming().map(move |t| {
+        tx.send(()).unwrap();
+        t.0
+    }).into_future().map_err(|e| e.0);
+    assert!(rx.try_recv().is_err());
+
+    let (mine, _remaining) = t!(l.run(client));
+    mine.unwrap();
+    t.join().unwrap();
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/tests/timeout.rs
@@ -0,0 +1,37 @@
+extern crate env_logger;
+extern crate futures;
+extern crate tokio_core;
+
+use std::time::{Instant, Duration};
+
+use tokio_core::reactor::{Core, Timeout};
+
+macro_rules! t {
+    ($e:expr) => (match $e {
+        Ok(e) => e,
+        Err(e) => panic!("{} failed with {:?}", stringify!($e), e),
+    })
+}
+
+#[test]
+fn smoke() {
+    drop(env_logger::init());
+    let mut l = t!(Core::new());
+    let dur = Duration::from_millis(10);
+    let timeout = t!(Timeout::new(dur, &l.handle()));
+    let start = Instant::now();
+    t!(l.run(timeout));
+    assert!(start.elapsed() >= (dur / 2));
+}
+
+#[test]
+fn two() {
+    drop(env_logger::init());
+
+    let mut l = t!(Core::new());
+    let dur = Duration::from_millis(10);
+    let timeout = t!(Timeout::new(dur, &l.handle()));
+    t!(l.run(timeout));
+    let timeout = t!(Timeout::new(dur, &l.handle()));
+    t!(l.run(timeout));
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-core/tests/udp.rs
@@ -0,0 +1,65 @@
+extern crate futures;
+#[macro_use]
+extern crate tokio_core;
+
+use std::io;
+use std::net::SocketAddr;
+
+use futures::{Future, Poll};
+use tokio_core::net::UdpSocket;
+use tokio_core::reactor::Core;
+
+macro_rules! t {
+    ($e:expr) => (match $e {
+        Ok(e) => e,
+        Err(e) => panic!("{} failed with {:?}", stringify!($e), e),
+    })
+}
+
+#[test]
+fn send_messages() {
+    let mut l = t!(Core::new());
+    let a = t!(UdpSocket::bind(&t!("127.0.0.1:0".parse()), &l.handle()));
+    let b = t!(UdpSocket::bind(&t!("127.0.0.1:0".parse()), &l.handle()));
+    let a_addr = t!(a.local_addr());
+    let b_addr = t!(b.local_addr());
+
+    let send = SendMessage { socket: a, addr: b_addr };
+    let recv = RecvMessage { socket: b, expected_addr: a_addr };
+    t!(l.run(send.join(recv)));
+}
+
+struct SendMessage {
+    socket: UdpSocket,
+    addr: SocketAddr,
+}
+
+impl Future for SendMessage {
+    type Item = ();
+    type Error = io::Error;
+
+    fn poll(&mut self) -> Poll<(), io::Error> {
+        let n = try_nb!(self.socket.send_to(b"1234", &self.addr));
+        assert_eq!(n, 4);
+        Ok(().into())
+    }
+}
+
+struct RecvMessage {
+    socket: UdpSocket,
+    expected_addr: SocketAddr,
+}
+
+impl Future for RecvMessage {
+    type Item = ();
+    type Error = io::Error;
+
+    fn poll(&mut self) -> Poll<(), io::Error> {
+        let mut buf = [0; 32];
+        let (n, addr) = try_nb!(self.socket.recv_from(&mut buf));
+        assert_eq!(n, 4);
+        assert_eq!(&buf[..4], b"1234");
+        assert_eq!(addr, self.expected_addr);
+        Ok(().into())
+    }
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-io/.cargo-checksum.json
@@ -0,0 +1,1 @@
+{"files":{".travis.yml":"471c401b386c3fe94c943a0ac3704d4d92ea02fa0b037169a0102b3f40c69f69","Cargo.toml":"ba04305fe1a3dadfbf0717a65b88e9c8ab75a276c35b2579f37213908ecc9ec4","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"69036b033e4bb951821964dbc3d9b1efe6913a6e36d9c1f206de4035a1a85cc4","README.md":"cce9a15791ab2ad9f67f8e441f5c9bd9a8ac51f6b37d46029a43710175ab8248","src/codec.rs":"f71e713df0055765c3187fed54463d94a33864833cb252286b88c9141bdcbcfe","src/copy.rs":"c7a8a530da6d6ecbb33fa502d1bd97c552e7d874570b3ef47ded52d97d779bc3","src/flush.rs":"b354745517e3679a380699e3b799f896bc818b6baccebb526e1e6c33f04252d6","src/framed.rs":"72d65dad3c132d79c550e76b0d0426c24b95f4d78650a90468da259089c7f1ee","src/framed_read.rs":"165d30cc7d9256fd5705e7490ea709a5c5d6fed2159adaefc37f28a2f1fb9244","src/framed_write.rs":"0d84e58184f565f6b5e8344f3e7a9a561833c68b89d2b42924c090d0a1d658c6","src/io.rs":"90b14300e9849a959d2b09de0bf467d51460822bee80d5054728b8018cf75ffb","src/length_delimited.rs":"16e282101a067350e7e4df0052fbcf58cda6b5665a83e6b7a7941eb2c0135894","src/lib.rs":"8e0dd19afd29c38efebd303e805cadce16b35c876a08f166c8527128c8f26c53","src/lines.rs":"9e3970714f4a6496fbfcfbf07ca24b8f7c7f5203a5778f037736df3bece1640c","src/read.rs":"345d8430416b55c506b213f717c86e531465159ce779bea654c605f5b6508f96","src/read_exact.rs":"79a39e865e271e31f789d94a282b76d58682dcf64b737e55a8d3952191df06bc","src/read_to_end.rs":"54e7696e3427351ec168d7d3dc820169a7acfdafe28106e14f2d18316b104671","src/read_until.rs":"ecc3d7e967fd3b92efa6dce43198d719c7628a01e70facfdfe0d2806d060eabe","src/shutdown.rs":"d708bcd6a54ecb871769c6c7e31db2c4dfdf40c7a76b855e0a45833c68114e77","src/split.rs":"00232b018571ecb8ebf0c5c927e78f3aee573632ec976f27d65c63fe0e50e89b","src/window.rs":"e6fca0e2e8d99c76b4fe3ab9956997c6fd8167005d4dec01cae0c7879203e3c5","src/write_all.rs":"269d4f744ce038a62cc6c49272af6035a98f37b041bda154a40b5144392b7891","tests/async_read.rs":"79aafcd01876c0eaa642950556482e00598e6bc8b5560a04d2df4b6aafd5a8c4","tests/framed.rs":"705eea7cab52b00ce680da96a5f329ecce1b0ac13dea06504ebed0188d0b97d8","tests/framed_read.rs":"5c3a0f82b31807a3a7994b6c9867d3dbad1b06ead4125cef2eec414b46c1783f","tests/framed_write.rs":"e6c85612726d69f48d95c15d30906e72b5d7a4e2cea9e874c65c38df3cea8691","tests/length_delimited.rs":"fc6a9ba729459bc7962ae8b1ad7c3d15df7f8d8941694d1a4ea965bedb61c1e0"},"package":"b4ab83e7adb5677e42e405fa4ceff75659d93c4d7d7dd22f52fcec59ee9f02af"}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-io/.travis.yml
@@ -0,0 +1,24 @@
+language: rust
+
+rust:
+  - stable
+  - beta
+  - nightly
+sudo: false
+before_script:
+  - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH
+script:
+  - cargo build
+  - cargo test
+  - cargo test --no-default-features
+  - cargo doc --no-deps
+
+after_success:
+  - travis-cargo --only nightly doc-upload
+env:
+  global:
+    - secure: "aDOBmkUbJR3kY1EDzchDhxdzV2HBt8yUHicNlWDZUh+JOmeV/2ezqPt4bre2fgludm98P7tmTG7GHbtnYtMaU4MTw8EarmEXR3tXqUQdDoWzSsMbUsJVZp3wRUWEF2UUZMv7u+xsvSDrIwF2ux6LcySEN6j2gmlobphmOod5NzhJp8d4ap7yLZ6UW4cnJ3m69HtD4yYa8wy3kGvfYOgcFBoB1HODSu2J9sFCzVrdxe9tm3aBvvl/dR0RAmRXyM7ZNE8Fv6aiISJ91M3EaulN1jzggdYEkN3bU0oxnzHvzrFDt1zmi30uR8jBYJmbBlSKYnhSoQqCKZMS7QEATqMDxGl1/M8QJPnaKg+Hu3w0i5yH5QInLW/8j+myzhMzLM8/IDrppZS4fuEb1XcJ/5m+ip3XjLSrXQzFRioA908NvcOUL5t71Yx1uey2kSccUOsGh3wETRbSWWs5SQPxt4BYP9jd8zpVZIInJRgztLFwqGcTDdDSrVHpKpzVNJSMmdgOG8lNubGjdwyrC8J2EyPlWa+QOyx7CoSoyygm4nV4a/UpGPeNgHHkbj/qrf3dhueypLnlj8nJyBk2Lzug8PVCszyCwfv2wXVJ9OCO40lp01XTvxT0cLzgWinn+TvmRn+Mhyt13u2urLjqfjKjA93v6OGZUqnvDG+2FiwGNP3GS+E="
+
+notifications:
+  email:
+    on_success: never
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-io/Cargo.toml
@@ -0,0 +1,30 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g. crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "tokio-io"
+version = "0.1.3"
+authors = ["Alex Crichton <alex@alexcrichton.com>"]
+description = "Core I/O primitives for asynchronous I/O in Rust.\n"
+homepage = "https://tokio.rs"
+documentation = "https://docs.rs/tokio-io/0.1"
+categories = ["asynchronous"]
+license = "MIT/Apache-2.0"
+repository = "https://github.com/tokio-rs/tokio-io"
+[dependencies.log]
+version = "0.3"
+
+[dependencies.bytes]
+version = "0.4"
+
+[dependencies.futures]
+version = "0.1.11"
new file mode 100644
--- /dev/null
+++ b/third_party/rust/tokio-io/LICENSE-APACHE
@@ -0,0 +1,201 @@
+                              Apache License
+                        Version 2.0, January 2004
+                     http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+   "License" shall mean the terms and conditions for use, reproduction,
+   and distribution as defined by Sections 1 through 9 of this document.
+
+   "Licensor" shall mean the copyright owner or entity authorized by
+   the copyright owner that is granting the License.
+
+   "Legal Entity" shall mean the union of the acting entity and all
+   other entities that control, are controlled by, or are under common
+   control with that entity. For the purposes of this definition,
+   "control" means (i) the power, direct or indirect, to cause the
+   direction or management of such entity, whether by contract or
+   otherwise, or (ii) ownership of fifty percent (50%) or more of the
+   outstanding shares, or (iii) beneficial ownership of such entity.
+
+   "You" (or "Your") shall mean an individual or Legal Entity
+   exercising permissions granted by this License.
+
+   "Source" form shall mean the preferred form for making modifications,
+   including but not limited to software source code, documentation
+   source, and configuration files.
+
+   "Object" form shall mean any form resulting from mechanical
+   transformation or translation of a Source form, including but
+   not limited to compiled object code, generated documentation,
+   and conversions to other media types.
+
+   "Work" shall mean the work of authorship, whether in Source or
+   Object form, made available under the License, as indicated by a
+   copyright notice that is included in or attached to the work
+   (an example is provided in the Appendix below).
+
+   "Derivative Works" shall mean any work, whether in Source or Object
+   form, that is based on (or derived from) the Work and for which the
+   editorial revisions, annotations, elaborations, or other modifications
+   represent, as a whole, an original work of authorship. For the purposes
+   of this License, Derivative Works shall not include works that remain
+   separable from, or merely link (or bind by name) to the interfaces of,
+   the Work and Derivative Works thereof.
+
+   "Contribution" shall mean any work of authorship, including
+   the original version of the Work and any modifications or additions
+   to that Work or Derivative Works thereof, that is intentionally
+   submitted to Licensor for inclusion in the Work by the copyright owner
+   or by an individual or Legal Entity authorized to submit on behalf of
+   the copyright owner. For the purposes of this definition, "submitted"
+   means any form of electronic, verbal, or written communication sent
+   to the Licensor or its representatives, including but not limited to
+   communication on electronic mailing lists, source code control systems,
+   and issue tracking systems that are managed by, or on behalf of, the
+   Licensor for the purpose of discussing and improving the Work, but
+   excluding communication that is conspicuously marked or otherwise
+   designated in writing by the copyright owner as "Not a Contribution."
+
+   "Contributor" shall mean Licensor and any individual or Legal Entity
+   on behalf of whom a Contribution has been received by Licensor and
+   subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   copyright license to reproduce, prepare Derivative Works of,
+   publicly display, publicly perform, sublicense, and distribute the<