diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 0000000000..00642f8379 --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1 @@ +github: seanmonstar diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index b4647c632e..aa6d7c62fa 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -24,6 +24,8 @@ jobs: - ffi - ffi-header - doc + - check-external-types + - udeps steps: - run: exit 0 @@ -32,7 +34,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@stable @@ -40,10 +42,11 @@ jobs: components: rustfmt - name: cargo fmt --check - uses: actions-rs/cargo@v1 - with: - command: fmt - args: --all -- --check + run: | + if ! rustfmt --check --edition 2021 $(git ls-files '*.rs'); then + printf "Please run \`rustfmt --edition 2021 \$(git ls-files '*.rs')\` to fix rustfmt errors.\nSee CONTRIBUTING.md for more details.\n" >&2 + exit 1 + fi test: name: Test ${{ matrix.rust }} on ${{ matrix.os }} @@ -73,7 +76,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install Rust (${{ matrix.rust }}) uses: dtolnay/rust-toolchain@master @@ -81,17 +84,11 @@ jobs: toolchain: ${{ matrix.rust }} - name: Test - uses: actions-rs/cargo@v1 - with: - command: test - args: ${{ matrix.features }} + run: cargo test ${{ matrix.features }} - name: Test all benches if: matrix.benches - uses: actions-rs/cargo@v1 - with: - command: test - args: --benches ${{ matrix.features }} + run: cargo test --benches ${{ matrix.features }} msrv: name: Check MSRV @@ -101,7 +98,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Get MSRV from package metadata id: msrv @@ -113,10 +110,7 @@ jobs: toolchain: ${{ steps.msrv.outputs.version }} - name: Check - uses: actions-rs/cargo@v1 - with: - command: check - args: --features full + run: cargo check --features full miri: name: Test with Miri @@ -125,7 +119,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@nightly @@ -142,16 +136,16 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install Rust - uses: dtolnay/rust-toolchain@nightly + uses: dtolnay/rust-toolchain@stable - name: Install cargo-hack - run: cargo install cargo-hack + uses: taiki-e/install-action@cargo-hack - name: check --feature-powerset - run: cargo hack check --feature-powerset --depth 2 --skip ffi -Z avoid-dev-deps + run: cargo hack --no-dev-deps check --feature-powerset --depth 2 --skip ffi,tracing ffi: name: Test C API (FFI) @@ -159,59 +153,48 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@stable - name: Build FFI - uses: actions-rs/cargo@v1 env: RUSTFLAGS: --cfg hyper_unstable_ffi - with: - command: rustc - args: --features ffi --crate-type cdylib --release + run: cargo rustc --features ffi --crate-type cdylib --release - name: Make Examples run: cd capi/examples && make - name: Run FFI unit tests - uses: actions-rs/cargo@v1 env: RUSTFLAGS: --cfg hyper_unstable_ffi - with: - command: test - args: --features ffi --lib + run: cargo test --features ffi --lib ffi-header: name: Verify hyper.h is up to date runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install Rust - uses: dtolnay/rust-toolchain@nightly + uses: dtolnay/rust-toolchain@stable - name: Install cbindgen - uses: actions-rs/cargo@v1 + uses: taiki-e/cache-cargo-install-action@v1 with: - command: install - args: cbindgen + tool: cbindgen - name: Install cargo-expand - uses: actions-rs/cargo@v1 + uses: taiki-e/cache-cargo-install-action@v1 with: - command: install - args: cargo-expand + tool: cargo-expand - name: Build FFI - uses: actions-rs/cargo@v1 env: RUSTFLAGS: --cfg hyper_unstable_ffi - with: - command: build - args: --features client,http1,http2,ffi + run: cargo build --features client,http1,http2,ffi - name: Ensure that hyper.h is up to date run: ./capi/gen_header.sh --verify @@ -222,13 +205,50 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@nightly - name: cargo doc - uses: actions-rs/cargo@v1 + run: cargo rustdoc --features full,ffi -- --cfg docsrs --cfg hyper_unstable_ffi -D broken-intra-doc-links + + check-external-types: + name: Check exposed types + needs: [style, test] + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install Rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: nightly-2023-05-31 # Compatible version for cargo-check-external-types + + - name: Install cargo-check-external-types + uses: taiki-e/cache-cargo-install-action@v1 with: - command: rustdoc - args: --features full,ffi -- --cfg docsrs --cfg hyper_unstable_ffi -D broken-intra-doc-links + tool: cargo-check-external-types@0.1.7 + + - name: check-external-types + run: cargo check-external-types --config .github/workflows/external-types.toml + + udeps: + needs: [style] + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install Rust + uses: dtolnay/rust-toolchain@nightly + + - name: Install cargo-udeps + uses: taiki-e/install-action@cargo-udeps + + - name: Check unused dependencies on default features + run: cargo udeps + + - name: Check unused dependencies on full features + run: cargo udeps --features full diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index d866605750..b4a00eab96 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -15,7 +15,7 @@ jobs: #- end_to_end #- pipeline steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@nightly diff --git a/.github/workflows/external-types.toml b/.github/workflows/external-types.toml new file mode 100644 index 0000000000..23a83c1f02 --- /dev/null +++ b/.github/workflows/external-types.toml @@ -0,0 +1,15 @@ +allowed_external_types = [ + "bytes::buf::buf_impl::Buf", + "bytes::bytes::Bytes", + "http::header", + "http::header::map::HeaderMap", + "http::method::Method", + "http::request::Request", + "http::response::Response", + "http::status::StatusCode", + "http::uri::Uri", + "http::version::Version", + "http_body::Body", + "http_body::frame::Frame", + "http_body::size_hint::SizeHint", +] diff --git a/CHANGELOG.md b/CHANGELOG.md index fcf2e98be7..815bcec794 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,40 @@ +### v1.0.0-rc.4 (2023-07-10) + + +#### Bug Fixes + +* **http1:** + * http1 server graceful shutdown fix (#3261) ([f4b51300](https://github.com/hyperium/hyper/commit/f4b513009d81083081d1c60c1981847bbb17dd5d)) + * send error on Incoming body when connection errors (#3256) ([52f19259](https://github.com/hyperium/hyper/commit/52f192593fb9ebcf6d3894e0c85cbf710da4decd), closes [#3253](https://github.com/hyperium/hyper/issues/3253)) + * properly end chunked bodies when it was known to be empty (#3254) ([fec64cf0](https://github.com/hyperium/hyper/commit/fec64cf0abdc678e30ca5f1b310c5118b2e01999), closes [#3252](https://github.com/hyperium/hyper/issues/3252)) + + +#### Features + +* **client:** Make clients able to use non-Send executor (#3184) ([d977f209](https://github.com/hyperium/hyper/commit/d977f209bc6068d8f878b22803fc42d90c887fcc), closes [#3017](https://github.com/hyperium/hyper/issues/3017)) +* **rt:** + * replace IO traits with hyper::rt ones (#3230) ([f9f65b7a](https://github.com/hyperium/hyper/commit/f9f65b7aa67fa3ec0267fe015945973726285bc2), closes [#3110](https://github.com/hyperium/hyper/issues/3110)) + * add downcast on `Sleep` trait (#3125) ([d92d3917](https://github.com/hyperium/hyper/commit/d92d3917d950e4c61c37c2170f3ce273d2a0f7d1), closes [#3027](https://github.com/hyperium/hyper/issues/3027)) +* **service:** change Service::call to take &self (#3223) ([d894439e](https://github.com/hyperium/hyper/commit/d894439e009aa75103f6382a7ba98fb17da72f02), closes [#3040](https://github.com/hyperium/hyper/issues/3040)) + + +#### Breaking Changes + +* Any IO transport type provided must not implement `hyper::rt::{Read, Write}` instead of + `tokio::io` traits. You can grab a helper type from `hyper-util` to wrap Tokio types, or implement the traits yourself, + if it's a custom type. + ([f9f65b7a](https://github.com/hyperium/hyper/commit/f9f65b7aa67fa3ec0267fe015945973726285bc2)) +* `client::conn::http2` types now use another generic for an `Executor`. + Code that names `Connection` needs to include the additional generic parameter. + ([d977f209](https://github.com/hyperium/hyper/commit/d977f209bc6068d8f878b22803fc42d90c887fcc)) +* The Service::call function no longer takes a mutable reference to self. + The FnMut trait bound on the service::util::service_fn function and the trait bound + on the impl for the ServiceFn struct were changed from FnMut to Fn. + + ([d894439e](https://github.com/hyperium/hyper/commit/d894439e009aa75103f6382a7ba98fb17da72f02)) + + + ### v1.0.0-rc.3 (2023-02-23) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 56e31dd2bb..ae904d6b50 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -9,3 +9,13 @@ You want to contribute? You're awesome! Don't know where to start? Check the [li - [Submitting a Pull Request](./docs/PULL_REQUESTS.md#submitting-a-pull-request) - [Commit Guidelines](./docs/COMMITS.md) + +## Cargo fmt +`cargo fmt --all` does not work in hyper. Please use the following commands: +```txt +# Mac or Linux +rustfmt --check --edition 2018 $(git ls-files '*.rs') + +# Powershell +Get-ChildItem . -Filter "*.rs" -Recurse | foreach { rustfmt --check --edition 2018 $_.FullName } +``` \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index 9cfc62e074..8841c914f1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "hyper" -version = "1.0.0-rc.3" +version = "1.0.0-rc.4" description = "A fast and correct HTTP library." readme = "README.md" homepage = "https://hyper.rs" @@ -11,7 +11,7 @@ authors = ["Sean McArthur "] keywords = ["http", "hyper", "hyperium"] categories = ["network-programming", "web-programming::http-client", "web-programming::http-server"] edition = "2018" -rust-version = "1.56" # keep in sync with MSRV.md dev doc +rust-version = "1.63" # keep in sync with MSRV.md dev doc include = [ "Cargo.toml", @@ -21,31 +21,28 @@ include = [ [dependencies] bytes = "1" -futures-core = { version = "0.3", default-features = false } futures-channel = "0.3" futures-util = { version = "0.3", default-features = false } http = "0.2" http-body = "=1.0.0-rc.2" -http-body-util = { version = "=0.1.0-rc.2", optional = true } -httpdate = "1.0" -httparse = "1.8" -h2 = { version = "0.3.9", optional = true } -itoa = "1" -tracing = { version = "0.1", default-features = false, features = ["std"] } pin-project-lite = "0.2.4" tokio = { version = "1", features = ["sync"] } -want = "0.3" # Optional +h2 = { version = "0.3.9", optional = true } +http-body-util = { version = "=0.1.0-rc.3", optional = true } +httparse = { version = "1.8", optional = true } +httpdate = { version = "1.0", optional = true } +itoa = { version = "1", optional = true } libc = { version = "0.2", optional = true } +tracing = { version = "0.1", default-features = false, features = ["std"], optional = true } +want = { version = "0.3", optional = true } [dev-dependencies] futures-util = { version = "0.3", default-features = false, features = ["alloc"] } -http-body-util = "=0.1.0-rc.2" -matches = "0.1" -num_cpus = "1.0" -pretty_env_logger = "0.4" +http-body-util = "=0.1.0-rc.3" +pretty_env_logger = "0.5" spmc = "0.3" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" @@ -62,7 +59,6 @@ tokio = { version = "1", features = [ "test-util", ] } tokio-test = "0.4" -tokio-util = { version = "0.7", features = ["codec"] } url = "2.2" [features] @@ -73,22 +69,25 @@ default = [] full = ["client", "http1", "http2", "server"] # HTTP versions -http1 = [] -http2 = ["h2"] +http1 = ["dep:httparse", "dep:itoa"] +http2 = ["dep:h2"] # Client/Server -client = [] -server = [] +client = ["dep:want"] +server = ["dep:httpdate"] # C-API support (currently unstable (no semver)) -ffi = ["libc", "full", "http-body-util"] +ffi = ["dep:libc", "full", "dep:http-body-util"] + +# Utilize tracing (currently unstable) +tracing = ["dep:tracing"] # internal features used in CI nightly = [] [package.metadata.docs.rs] -features = ["ffi", "full"] -rustdoc-args = ["--cfg", "docsrs", "--cfg", "hyper_unstable_ffi"] +features = ["ffi", "full", "tracing"] +rustdoc-args = ["--cfg", "docsrs", "--cfg", "hyper_unstable_ffi", "--cfg", "hyper_unstable_tracing"] [package.metadata.playground] features = ["full"] @@ -121,6 +120,11 @@ name = "gateway" path = "examples/gateway.rs" required-features = ["full"] +[[example]] +name = "graceful_shutdown" +path = "examples/graceful_shutdown.rs" +required-features = ["full"] + [[example]] name = "hello" path = "examples/hello.rs" diff --git a/README.md b/README.md index 79038abca2..f1b1743353 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ A **fast** and **correct** HTTP implementation for Rust. - Extensive production use - Client and Server APIs -**Get started** by looking over the [guides](https://hyper.rs/guides). +**Get started** by looking over the [guides](https://hyper.rs/guides/1/). ## "Low-level" @@ -27,8 +27,8 @@ hyper is a relatively low-level library, meant to be a building block for libraries and applications. If you are looking for a convenient HTTP client, then you may wish to consider -[reqwest](https://github.com/seanmonstar/reqwest). If you are looking for a -convenient HTTP server, then you may wish to consider [warp](https://github.com/seanmonstar/warp). +[reqwest](https://github.com/seanmonstar/reqwest). +If you are not sure what HTTP server to choose, then you may want to consider [axum](https://github.com/tokio-rs/axum) or [warp](https://github.com/seanmonstar/warp), the latter taking a more functional approach. Both are built on top of this library. ## Contributing diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs index 89c3caf4e2..3558e5c611 100644 --- a/benches/end_to_end.rs +++ b/benches/end_to_end.rs @@ -4,8 +4,7 @@ extern crate test; mod support; -// TODO: Reimplement Opts::bench using hyper::server::conn and hyper::client::conn -// (instead of Server and HttpClient). +// TODO: Reimplement parallel for HTTP/1 use std::convert::Infallible; use std::net::SocketAddr; @@ -315,7 +314,8 @@ impl Opts { let mut client = rt.block_on(async { if self.http2 { - let io = tokio::net::TcpStream::connect(&addr).await.unwrap(); + let tcp = tokio::net::TcpStream::connect(&addr).await.unwrap(); + let io = support::TokioIo::new(tcp); let (tx, conn) = hyper::client::conn::http2::Builder::new(support::TokioExecutor) .initial_stream_window_size(self.http2_stream_window) .initial_connection_window_size(self.http2_conn_window) @@ -328,7 +328,8 @@ impl Opts { } else if self.parallel_cnt > 1 { todo!("http/1 parallel >1"); } else { - let io = tokio::net::TcpStream::connect(&addr).await.unwrap(); + let tcp = tokio::net::TcpStream::connect(&addr).await.unwrap(); + let io = support::TokioIo::new(tcp); let (tx, conn) = hyper::client::conn::http1::Builder::new() .handshake(io) .await @@ -414,6 +415,7 @@ fn spawn_server(rt: &tokio::runtime::Runtime, opts: &Opts) -> SocketAddr { let opts = opts.clone(); rt.spawn(async move { while let Ok((sock, _)) = listener.accept().await { + let io = support::TokioIo::new(sock); if opts.http2 { tokio::spawn( hyper::server::conn::http2::Builder::new(support::TokioExecutor) @@ -421,7 +423,7 @@ fn spawn_server(rt: &tokio::runtime::Runtime, opts: &Opts) -> SocketAddr { .initial_connection_window_size(opts.http2_conn_window) .adaptive_window(opts.http2_adaptive_window) .serve_connection( - sock, + io, service_fn(move |req: Request| async move { let mut req_body = req.into_body(); while let Some(_chunk) = req_body.frame().await {} @@ -433,7 +435,7 @@ fn spawn_server(rt: &tokio::runtime::Runtime, opts: &Opts) -> SocketAddr { ); } else { tokio::spawn(hyper::server::conn::http1::Builder::new().serve_connection( - sock, + io, service_fn(move |req: Request| async move { let mut req_body = req.into_body(); while let Some(_chunk) = req_body.frame().await {} diff --git a/benches/pipeline.rs b/benches/pipeline.rs index a60100fa51..b79232de9b 100644 --- a/benches/pipeline.rs +++ b/benches/pipeline.rs @@ -3,6 +3,8 @@ extern crate test; +mod support; + use std::convert::Infallible; use std::io::{Read, Write}; use std::net::{SocketAddr, TcpStream}; @@ -40,11 +42,12 @@ fn hello_world_16(b: &mut test::Bencher) { rt.spawn(async move { loop { let (stream, _addr) = listener.accept().await.expect("accept"); + let io = support::TokioIo::new(stream); http1::Builder::new() .pipeline_flush(true) .serve_connection( - stream, + io, service_fn(|_| async { Ok::<_, Infallible>(Response::new(Full::new(Bytes::from( "Hello, World!", diff --git a/benches/server.rs b/benches/server.rs index 17eefa0694..c5424105a8 100644 --- a/benches/server.rs +++ b/benches/server.rs @@ -3,6 +3,8 @@ extern crate test; +mod support; + use std::io::{Read, Write}; use std::net::{SocketAddr, TcpListener, TcpStream}; use std::sync::mpsc; @@ -38,10 +40,11 @@ macro_rules! bench_server { rt.spawn(async move { loop { let (stream, _) = listener.accept().await.expect("accept"); + let io = support::TokioIo::new(stream); http1::Builder::new() .serve_connection( - stream, + io, service_fn(|_| async { Ok::<_, hyper::Error>( Response::builder() diff --git a/benches/support/mod.rs b/benches/support/mod.rs index 48e8048e8b..85cb67fd33 100644 --- a/benches/support/mod.rs +++ b/benches/support/mod.rs @@ -1,2 +1,2 @@ mod tokiort; -pub use tokiort::{TokioExecutor, TokioTimer}; +pub use tokiort::{TokioExecutor, TokioIo, TokioTimer}; diff --git a/benches/support/tokiort.rs b/benches/support/tokiort.rs index 816f9a6a4e..b6f32ff733 100644 --- a/benches/support/tokiort.rs +++ b/benches/support/tokiort.rs @@ -1,12 +1,12 @@ #![allow(dead_code)] //! Various runtimes for hyper use std::{ + future::Future, pin::Pin, task::{Context, Poll}, time::{Duration, Instant}, }; -use futures_util::Future; use hyper::rt::{Sleep, Timer}; #[derive(Clone)] @@ -36,6 +36,12 @@ impl Timer for TokioTimer { fn sleep_until(&self, deadline: Instant) -> Pin> { Box::pin(tokio::time::sleep_until(deadline.into())) } + + fn reset(&self, sleep: &mut Pin>, new_deadline: Instant) { + if let Some(sleep) = sleep.as_mut().downcast_mut_pin::() { + sleep.reset(new_deadline.into()) + } + } } struct TokioTimeout { @@ -52,3 +58,172 @@ where self.inner.as_mut().poll(context) } } + +// Use TokioSleep to get tokio::time::Sleep to implement Unpin. +// see https://docs.rs/tokio/latest/tokio/time/struct.Sleep.html +pin_project! { + pub(crate) struct TokioSleep { + #[pin] + pub(crate) inner: tokio::time::Sleep, + } +} + +impl Future for TokioSleep { + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.project().inner.poll(cx) + } +} + +impl TokioSleep { + pub fn reset(self: Pin<&mut Self>, deadline: Instant) { + self.project().inner.as_mut().reset(deadline.into()); + } +} + +pin_project! { + #[derive(Debug)] + pub struct TokioIo { + #[pin] + inner: T, + } +} + +impl TokioIo { + pub fn new(inner: T) -> Self { + Self { inner } + } + + pub fn inner(self) -> T { + self.inner + } +} + +impl hyper::rt::Read for TokioIo +where + T: tokio::io::AsyncRead, +{ + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + mut buf: hyper::rt::ReadBufCursor<'_>, + ) -> Poll> { + let n = unsafe { + let mut tbuf = tokio::io::ReadBuf::uninit(buf.as_mut()); + match tokio::io::AsyncRead::poll_read(self.project().inner, cx, &mut tbuf) { + Poll::Ready(Ok(())) => tbuf.filled().len(), + other => return other, + } + }; + + unsafe { + buf.advance(n); + } + Poll::Ready(Ok(())) + } +} + +impl hyper::rt::Write for TokioIo +where + T: tokio::io::AsyncWrite, +{ + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + tokio::io::AsyncWrite::poll_write(self.project().inner, cx, buf) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + tokio::io::AsyncWrite::poll_flush(self.project().inner, cx) + } + + fn poll_shutdown( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + tokio::io::AsyncWrite::poll_shutdown(self.project().inner, cx) + } + + fn is_write_vectored(&self) -> bool { + tokio::io::AsyncWrite::is_write_vectored(&self.inner) + } + + fn poll_write_vectored( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[std::io::IoSlice<'_>], + ) -> Poll> { + tokio::io::AsyncWrite::poll_write_vectored(self.project().inner, cx, bufs) + } +} + +impl tokio::io::AsyncRead for TokioIo +where + T: hyper::rt::Read, +{ + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + tbuf: &mut tokio::io::ReadBuf<'_>, + ) -> Poll> { + //let init = tbuf.initialized().len(); + let filled = tbuf.filled().len(); + let sub_filled = unsafe { + let mut buf = hyper::rt::ReadBuf::uninit(tbuf.unfilled_mut()); + + match hyper::rt::Read::poll_read(self.project().inner, cx, buf.unfilled()) { + Poll::Ready(Ok(())) => buf.filled().len(), + other => return other, + } + }; + + let n_filled = filled + sub_filled; + // At least sub_filled bytes had to have been initialized. + let n_init = sub_filled; + unsafe { + tbuf.assume_init(n_init); + tbuf.set_filled(n_filled); + } + + Poll::Ready(Ok(())) + } +} + +impl tokio::io::AsyncWrite for TokioIo +where + T: hyper::rt::Write, +{ + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + hyper::rt::Write::poll_write(self.project().inner, cx, buf) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + hyper::rt::Write::poll_flush(self.project().inner, cx) + } + + fn poll_shutdown( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + hyper::rt::Write::poll_shutdown(self.project().inner, cx) + } + + fn is_write_vectored(&self) -> bool { + hyper::rt::Write::is_write_vectored(&self.inner) + } + + fn poll_write_vectored( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[std::io::IoSlice<'_>], + ) -> Poll> { + hyper::rt::Write::poll_write_vectored(self.project().inner, cx, bufs) + } +} diff --git a/capi/examples/.gitignore b/capi/examples/.gitignore new file mode 100644 index 0000000000..d1da6f3056 --- /dev/null +++ b/capi/examples/.gitignore @@ -0,0 +1,4 @@ +/*.o +/server +/client +/upload diff --git a/capi/examples/server.c b/capi/examples/server.c index 39f089f8f8..ad06228fce 100644 --- a/capi/examples/server.c +++ b/capi/examples/server.c @@ -345,7 +345,7 @@ static void server_callback( 8 ); - if (!strcmp((char*)method, "GET")) { + if (!strncmp((char*)method, "GET", method_len)) { // ...add a body hyper_body* body = hyper_body_new(); hyper_body_set_data_func(body, send_each_body_chunk); diff --git a/capi/include/hyper.h b/capi/include/hyper.h index a162d7699f..cc97c84e3c 100644 --- a/capi/include/hyper.h +++ b/capi/include/hyper.h @@ -234,11 +234,11 @@ typedef struct hyper_waker hyper_waker; typedef int (*hyper_body_foreach_callback)(void*, const struct hyper_buf*); /* - Many hyper entites can be given userdata to allow user callbacks to corellate work together. - Since much of hyper is asychronous it's often useful to treat these userdata objects as "owned" - by the hyper entity (and hence to be cleaned up when that entity is dropped). + Many hyper entities can be given userdata to allow user callbacks to correlate work together. + Since much of hyper is asynchronous it's often useful to treat these userdata objects as + "owned" by the hyper entity (and hence to be cleaned up when that entity is dropped). - To acheive this a `hyepr_userdata_drop` callback is passed by calling code alongside the + To achieve this a `hyper_userdata_drop` callback is passed by calling code alongside the userdata to register a cleanup function. This function may be provided as NULL if the calling code wants to manage memory lifetimes @@ -287,11 +287,17 @@ const char *hyper_version(void); Create a new "empty" body. If not configured, this body acts as an empty payload. + + To avoid a memory leak, the body must eventually be consumed by + `hyper_body_free`, `hyper_body_foreach`, or `hyper_request_set_body`. */ struct hyper_body *hyper_body_new(void); /* - Free a `hyper_body *`. + Free a body. + + This should only be used if the request isn't consumed by + `hyper_body_foreach` or `hyper_request_set_body`. */ void hyper_body_free(struct hyper_body *body); @@ -304,6 +310,10 @@ void hyper_body_free(struct hyper_body *body); - `HYPER_TASK_ERROR`: An error retrieving the data. - `HYPER_TASK_EMPTY`: The body has finished streaming data. + To avoid a memory leak, the task must eventually be consumed by + `hyper_task_free`, or taken ownership of by `hyper_executor_push` + without subsequently being given back by `hyper_executor_poll`. + This does not consume the `hyper_body *`, so it may be used to again. However, it MUST NOT be used or freed until the related task completes. */ @@ -313,6 +323,10 @@ struct hyper_task *hyper_body_data(struct hyper_body *body); Return a task that will poll the body and execute the callback with each body chunk that is received. + To avoid a memory leak, the task must eventually be consumed by + `hyper_task_free`, or taken ownership of by `hyper_executor_push` + without subsequently being given back by `hyper_executor_poll`. + The `hyper_buf` pointer is only a borrowed reference, it cannot live outside the execution of the callback. You must make a copy to retain it. @@ -358,7 +372,10 @@ void hyper_body_set_data_func(struct hyper_body *body, hyper_body_data_callback Create a new `hyper_buf *` by copying the provided bytes. This makes an owned copy of the bytes, so the `buf` argument can be - freed or changed afterwards. + freed (with `hyper_buf_free`) or changed afterwards. + + To avoid a memory leak, the copy must eventually be consumed by + `hyper_buf_free`. This returns `NULL` if allocating a new buffer fails. */ @@ -382,6 +399,8 @@ size_t hyper_buf_len(const struct hyper_buf *buf); /* Free this buffer. + + This should be used for any buffer once it is no longer needed. */ void hyper_buf_free(struct hyper_buf *buf); @@ -390,9 +409,14 @@ void hyper_buf_free(struct hyper_buf *buf); and options. Both the `io` and the `options` are consumed in this function call. + They should not be used or freed afterwards. + + The returned task must be polled with an executor until the handshake + completes, at which point the value can be taken. - The returned `hyper_task *` must be polled with an executor until the - handshake completes, at which point the value can be taken. + To avoid a memory leak, the task must eventually be consumed by + `hyper_task_free`, or taken ownership of by `hyper_executor_push` + without subsequently being given back by `hyper_executor_poll`. */ struct hyper_task *hyper_clientconn_handshake(struct hyper_io *io, struct hyper_clientconn_options *options); @@ -400,18 +424,30 @@ struct hyper_task *hyper_clientconn_handshake(struct hyper_io *io, /* Send a request on the client connection. + This consumes the request. You should not use or free the request + afterwards. + Returns a task that needs to be polled until it is ready. When ready, the task yields a `hyper_response *`. + + To avoid a memory leak, the task must eventually be consumed by + `hyper_task_free`, or taken ownership of by `hyper_executor_push` + without subsequently being given back by `hyper_executor_poll`. */ struct hyper_task *hyper_clientconn_send(struct hyper_clientconn *conn, struct hyper_request *req); /* Free a `hyper_clientconn *`. + + This should be used for any connection once it is no longer needed. */ void hyper_clientconn_free(struct hyper_clientconn *conn); /* Creates a new set of HTTP clientconn options to be used in a handshake. + + To avoid a memory leak, the options must eventually be consumed by + `hyper_clientconn_options_free` or `hyper_clientconn_handshake`. */ struct hyper_clientconn_options *hyper_clientconn_options_new(void); @@ -432,7 +468,10 @@ void hyper_clientconn_options_set_preserve_header_order(struct hyper_clientconn_ int enabled); /* - Free a `hyper_clientconn_options *`. + Free a set of HTTP clientconn options. + + This should only be used if the options aren't consumed by + `hyper_clientconn_handshake`. */ void hyper_clientconn_options_free(struct hyper_clientconn_options *opts); @@ -535,7 +574,7 @@ enum hyper_code hyper_http1_serverconn_options_header_read_timeout(struct hyper_ Setting this to true will force hyper to use queued strategy which may eliminate unnecessary cloning on some TLS backends. - Default is to automatically guess which mode to use, this function overrides the huristic. + Default is to automatically guess which mode to use, this function overrides the heuristic. */ enum hyper_code hyper_http1_serverconn_options_writev(struct hyper_http1_serverconn_options *opts, bool enabled); @@ -614,7 +653,7 @@ enum hyper_code hyper_http2_serverconn_options_max_concurrent_streams(struct hyp /* Sets an interval for HTTP/2 Ping frames should be sent to keep a connection alive. - Default is to not use keepalive pings. Passing `0` will use this default. + Default is to not use keep-alive pings. Passing `0` will use this default. */ enum hyper_code hyper_http2_serverconn_options_keep_alive_interval(struct hyper_http2_serverconn_options *opts, uint64_t interval_seconds); @@ -729,6 +768,8 @@ void hyper_response_channel_send(struct hyper_response_channel *channel, /* Frees a `hyper_error`. + + This should be used for any error once it is no longer needed. */ void hyper_error_free(struct hyper_error *err); @@ -749,11 +790,17 @@ size_t hyper_error_print(const struct hyper_error *err, uint8_t *dst, size_t dst /* Construct a new HTTP request. + + To avoid a memory leak, the request must eventually be consumed by + `hyper_request_free` or `hyper_clientconn_send`. */ struct hyper_request *hyper_request_new(void); /* - Free an HTTP request if not going to send it on a client. + Free an HTTP request. + + This should only be used if the request isn't consumed by + `hyper_clientconn_send`. */ void hyper_request_free(struct hyper_request *req); @@ -817,7 +864,7 @@ enum hyper_code hyper_request_set_uri_parts(struct hyper_request *req, Get the URI of the request split into scheme, authority and path/query strings. Each of `scheme`, `authority` and `path_and_query` may be pointers to buffers that this - function will populate with the appopriate values from the request. If one of these + function will populate with the appropriate values from the request. If one of these pointers is non-NULL then the associated `_len` field must be a pointer to a `size_t` which, on call, is populated with the maximum length of the buffer and, on successful response, will be set to the actual length of the value written into the buffer. @@ -911,7 +958,9 @@ enum hyper_code hyper_request_on_informational(struct hyper_request *req, struct hyper_response *hyper_response_new(void); /* - Free an HTTP response after using it. + Free an HTTP response. + + This should be used for any response once it is no longer needed. */ void hyper_response_free(struct hyper_response *resp); @@ -991,6 +1040,9 @@ enum hyper_code hyper_response_set_body(struct hyper_response *rsp, struct hyper Take ownership of the body of this response. It is safe to free the response even after taking ownership of its body. + + To avoid a memory leak, the body must eventually be consumed by + `hyper_body_free`, `hyper_body_foreach`, or `hyper_request_set_body`. */ struct hyper_body *hyper_response_body(struct hyper_response *resp); @@ -1034,14 +1086,17 @@ enum hyper_code hyper_headers_add(struct hyper_headers *headers, The read and write functions of this transport should be set with `hyper_io_set_read` and `hyper_io_set_write`. + + To avoid a memory leak, the IO handle must eventually be consumed by + `hyper_io_free` or `hyper_clientconn_handshake`. */ struct hyper_io *hyper_io_new(void); /* - Free an unused `hyper_io *`. + Free an IO handle. - This is typically only useful if you aren't going to pass ownership - of the IO handle to hyper, such as with `hyper_clientconn_handshake()`. + This should only be used if the request isn't consumed by + `hyper_clientconn_handshake`. */ void hyper_io_free(struct hyper_io *io); @@ -1051,8 +1106,8 @@ void hyper_io_free(struct hyper_io *io); This value is passed as an argument to the read and write callbacks. If passed, the `drop_func` will be called on the `userdata` when the - `hyper_io` is destroyed (either explicitely by `hyper_io_free` or - implicitely by an associated hyper task completing). + `hyper_io` is destroyed (either explicitly by `hyper_io_free` or + implicitly by an associated hyper task completing). */ void hyper_io_set_userdata(struct hyper_io *io, void *data, hyper_userdata_drop drop_func); @@ -1104,18 +1159,23 @@ void hyper_io_set_write(struct hyper_io *io, hyper_io_write_callback func); /* Creates a new task executor. + + To avoid a memory leak, the executor must eventually be consumed by + `hyper_executor_free`. */ const struct hyper_executor *hyper_executor_new(void); /* Frees an executor and any incomplete tasks still part of it. + + This should be used for any executor once it is no longer needed. */ void hyper_executor_free(const struct hyper_executor *exec); /* Push a task onto the executor. - The executor takes ownership of the task, it should not be accessed + The executor takes ownership of the task, which should not be accessed again unless returned back to the user with `hyper_executor_poll`. */ enum hyper_code hyper_executor_push(const struct hyper_executor *exec, struct hyper_task *task); @@ -1126,6 +1186,10 @@ enum hyper_code hyper_executor_push(const struct hyper_executor *exec, struct hy If ready, returns a task from the executor that has completed. + To avoid a memory leak, the task must eventually be consumed by + `hyper_task_free`, or taken ownership of by `hyper_executor_push` + without subsequently being given back by `hyper_executor_poll`. + If there are no ready tasks, this returns `NULL`. */ struct hyper_task *hyper_executor_poll(const struct hyper_executor *exec); @@ -1142,6 +1206,10 @@ int hyper_executor_next_timer_pop(const struct hyper_executor *exec); /* Free a task. + + This should only be used if the task isn't consumed by + `hyper_clientconn_handshake` or taken ownership of by + `hyper_executor_push`. */ void hyper_task_free(struct hyper_task *task); @@ -1152,6 +1220,11 @@ void hyper_task_free(struct hyper_task *task); this task. Use `hyper_task_type` to determine the type of the `void *` return value. + + To avoid a memory leak, a non-empty return value must eventually be + consumed by a function appropriate for its type, one of + `hyper_error_free`, `hyper_clientconn_free`, `hyper_response_free`, or + `hyper_buf_free`. */ void *hyper_task_value(struct hyper_task *task); @@ -1175,11 +1248,17 @@ void *hyper_task_userdata(struct hyper_task *task); /* Copies a waker out of the task context. + + To avoid a memory leak, the waker must eventually be consumed by + `hyper_waker_free` or `hyper_waker_wake`. */ struct hyper_waker *hyper_context_waker(struct hyper_context *cx); /* - Free a waker that hasn't been woken. + Free a waker. + + This should only be used if the request isn't consumed by + `hyper_waker_wake`. */ void hyper_waker_free(struct hyper_waker *waker); diff --git a/docs/MAINTAINERS.md b/docs/MAINTAINERS.md index 5726faf710..8db1e081a8 100644 --- a/docs/MAINTAINERS.md +++ b/docs/MAINTAINERS.md @@ -6,6 +6,7 @@ To see what these roles do, and how to become one, look at [GOVERNANCE](./GOVERN - Eray Karatay (@programatik29) - Oddbjørn Grødem (@oddgrd) +- Noah Kennedy (@Noah-Kennedy) ## Collaborators diff --git a/docs/MSRV.md b/docs/MSRV.md index 65127c99bd..70752c9138 100644 --- a/docs/MSRV.md +++ b/docs/MSRV.md @@ -6,4 +6,4 @@ hyper. It is possible that an older compiler can work, but that is not guaranteed. We try to increase the MSRV responsibly, only when a significant new feature is needed. -The current MSRV is: **1.56**. +The current MSRV is: **1.63**. diff --git a/examples/README.md b/examples/README.md index 2d08936c1c..5010b39ca6 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,25 +1,34 @@ # Examples of using hyper -These examples show of how to do common tasks using `hyper`. You may also find the [Guides](https://hyper.rs/guides) helpful. +These examples show how to do common tasks using `hyper`. You may also find the [Guides](https://hyper.rs/guides/1/) helpful. -If you checkout this repository, you can run any of the examples `cargo run --example example_name`. +If you checkout this repository, you can run any of the examples with the command: + + `cargo run --example {example_name} --features="full"` ### Dependencies -Most of these examples use these dependencies: +A complete list of dependencies used across these examples: ```toml [dependencies] -hyper = { version = "0.14", features = ["full"] } +hyper = { version = "1.0.0-rc.3", features = ["full"] } tokio = { version = "1", features = ["full"] } pretty_env_logger = "0.4" +http-body-util = "0.1.0-rc.2" +bytes = "1" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +url = "2.2" +http = "0.2" +futures-util = { version = "0.3", default-features = false, features = ["alloc"] } ``` ## Getting Started ### Clients -* [`client`](client.rs) - A simple CLI http client that request the url passed in parameters and outputs the response content and details to the stdout, reading content chunk-by-chunk. +* [`client`](client.rs) - A simple CLI http client that requests the url passed in parameters and outputs the response content and details to the stdout, reading content chunk-by-chunk. * [`client_json`](client_json.rs) - A simple program that GETs some json, reads the body asynchronously, parses it with serde and outputs the result. @@ -33,6 +42,8 @@ pretty_env_logger = "0.4" * [`gateway`](gateway.rs) - A server gateway (reverse proxy) that proxies to the `hello` service above. +* [`graceful_shutdown`](graceful_shutdown.rs) - A server that has a timeout for incoming connections and does graceful connection shutdown. + * [`http_proxy`](http_proxy.rs) - A simple HTTP(S) proxy that handle and upgrade `CONNECT` requests and then proxy data between client and remote server. * [`multi_server`](multi_server.rs) - A server that listens to two different ports, a different `Service` per port. diff --git a/examples/client.rs b/examples/client.rs index ffcc026719..046f59de02 100644 --- a/examples/client.rs +++ b/examples/client.rs @@ -8,6 +8,10 @@ use hyper::Request; use tokio::io::{self, AsyncWriteExt as _}; use tokio::net::TcpStream; +#[path = "../benches/support/mod.rs"] +mod support; +use support::TokioIo; + // A simple type alias so as to DRY. type Result = std::result::Result>; @@ -40,8 +44,9 @@ async fn fetch_url(url: hyper::Uri) -> Result<()> { let port = url.port_u16().unwrap_or(80); let addr = format!("{}:{}", host, port); let stream = TcpStream::connect(addr).await?; + let io = TokioIo::new(stream); - let (mut sender, conn) = hyper::client::conn::http1::handshake(stream).await?; + let (mut sender, conn) = hyper::client::conn::http1::handshake(io).await?; tokio::task::spawn(async move { if let Err(err) = conn.await { println!("Connection failed: {:?}", err); diff --git a/examples/client_json.rs b/examples/client_json.rs index 4ba6787a6e..6a6753528c 100644 --- a/examples/client_json.rs +++ b/examples/client_json.rs @@ -7,6 +7,10 @@ use hyper::{body::Buf, Request}; use serde::Deserialize; use tokio::net::TcpStream; +#[path = "../benches/support/mod.rs"] +mod support; +use support::TokioIo; + // A simple type alias so as to DRY. type Result = std::result::Result>; @@ -29,8 +33,9 @@ async fn fetch_json(url: hyper::Uri) -> Result> { let addr = format!("{}:{}", host, port); let stream = TcpStream::connect(addr).await?; + let io = TokioIo::new(stream); - let (mut sender, conn) = hyper::client::conn::http1::handshake(stream).await?; + let (mut sender, conn) = hyper::client::conn::http1::handshake(io).await?; tokio::task::spawn(async move { if let Err(err) = conn.await { println!("Connection failed: {:?}", err); diff --git a/examples/echo.rs b/examples/echo.rs index a644996b3c..60d03b368d 100644 --- a/examples/echo.rs +++ b/examples/echo.rs @@ -10,6 +10,10 @@ use hyper::service::service_fn; use hyper::{body::Body, Method, Request, Response, StatusCode}; use tokio::net::TcpListener; +#[path = "../benches/support/mod.rs"] +mod support; +use support::TokioIo; + /// This is our service handler. It receives a Request, routes on its /// path, and returns a Future of a Response. async fn echo( @@ -18,7 +22,7 @@ async fn echo( match (req.method(), req.uri().path()) { // Serve some instructions at / (&Method::GET, "/") => Ok(Response::new(full( - "Try POSTing data to /echo such as: `curl localhost:3000/echo -XPOST -d 'hello world'`", + "Try POSTing data to /echo such as: `curl localhost:3000/echo -XPOST -d \"hello world\"`", ))), // Simply echo the body back to the client. @@ -92,10 +96,11 @@ async fn main() -> Result<(), Box> { println!("Listening on http://{}", addr); loop { let (stream, _) = listener.accept().await?; + let io = TokioIo::new(stream); tokio::task::spawn(async move { if let Err(err) = http1::Builder::new() - .serve_connection(stream, service_fn(echo)) + .serve_connection(io, service_fn(echo)) .await { println!("Error serving connection: {:?}", err); diff --git a/examples/gateway.rs b/examples/gateway.rs index 907f2fdba2..e0e3e053d0 100644 --- a/examples/gateway.rs +++ b/examples/gateway.rs @@ -4,6 +4,10 @@ use hyper::{server::conn::http1, service::service_fn}; use std::net::SocketAddr; use tokio::net::{TcpListener, TcpStream}; +#[path = "../benches/support/mod.rs"] +mod support; +use support::TokioIo; + #[tokio::main] async fn main() -> Result<(), Box> { pretty_env_logger::init(); @@ -20,6 +24,7 @@ async fn main() -> Result<(), Box> { loop { let (stream, _) = listener.accept().await?; + let io = TokioIo::new(stream); // This is the `Service` that will handle the connection. // `service_fn` is a helper to convert a function that @@ -42,9 +47,9 @@ async fn main() -> Result<(), Box> { async move { let client_stream = TcpStream::connect(addr).await.unwrap(); + let io = TokioIo::new(client_stream); - let (mut sender, conn) = - hyper::client::conn::http1::handshake(client_stream).await?; + let (mut sender, conn) = hyper::client::conn::http1::handshake(io).await?; tokio::task::spawn(async move { if let Err(err) = conn.await { println!("Connection failed: {:?}", err); @@ -56,10 +61,7 @@ async fn main() -> Result<(), Box> { }); tokio::task::spawn(async move { - if let Err(err) = http1::Builder::new() - .serve_connection(stream, service) - .await - { + if let Err(err) = http1::Builder::new().serve_connection(io, service).await { println!("Failed to serve the connection: {:?}", err); } }); diff --git a/examples/graceful_shutdown.rs b/examples/graceful_shutdown.rs new file mode 100644 index 0000000000..47fbb8da20 --- /dev/null +++ b/examples/graceful_shutdown.rs @@ -0,0 +1,95 @@ +#![deny(warnings)] + +use std::convert::Infallible; +use std::net::SocketAddr; +use std::time::Duration; + +use bytes::Bytes; +use http_body_util::Full; +use hyper::server::conn::http1; +use hyper::service::service_fn; +use hyper::{Request, Response}; +use tokio::net::TcpListener; +use tokio::pin; + +#[path = "../benches/support/mod.rs"] +mod support; +use support::TokioIo; + +// An async function that consumes a request, does nothing with it and returns a +// response. +async fn hello(_: Request) -> Result>, Infallible> { + // Sleep for 6 seconds to simulate long processing. + // This is longer than the initial 5 second connection timeout, + // but within the 2 second graceful shutdown timeout. + println!("in hello before sleep"); + tokio::time::sleep(Duration::from_secs(6)).await; + println!("in hello after sleep"); + Ok(Response::new(Full::new(Bytes::from("Hello World!")))) +} + +#[tokio::main] +pub async fn main() -> Result<(), Box> { + pretty_env_logger::init(); + + // This address is localhost + let addr: SocketAddr = ([127, 0, 0, 1], 3000).into(); + + // Use a 5 second timeout for incoming connections to the server. + // If a request is in progress when the 5 second timeout elapses, + // use a 2 second timeout for processing the final request and graceful shutdown. + let connection_timeouts = vec![Duration::from_secs(5), Duration::from_secs(2)]; + + // Bind to the port and listen for incoming TCP connections + let listener = TcpListener::bind(addr).await?; + println!("Listening on http://{}", addr); + loop { + // When an incoming TCP connection is received grab a TCP stream for + // client<->server communication. + let (tcp, remote_address) = listener.accept().await?; + + // Use an adapter to access something implementing `tokio::io` traits as if they implement + // `hyper::rt` IO traits. + let io = TokioIo::new(tcp); + + // Print the remote address connecting to our server. + println!("accepted connection from {:?}", remote_address); + + // Clone the connection_timeouts so they can be passed to the new task. + let connection_timeouts_clone = connection_timeouts.clone(); + + // Spin up a new task in Tokio so we can continue to listen for new TCP connection on the + // current task without waiting for the processing of the HTTP1 connection we just received + // to finish + tokio::task::spawn(async move { + // Pin the connection object so we can use tokio::select! below. + let conn = http1::Builder::new().serve_connection(io, service_fn(hello)); + pin!(conn); + + // Iterate the timeouts. Use tokio::select! to wait on the + // result of polling the connection itself, + // and also on tokio::time::sleep for the current timeout duration. + for (iter, sleep_duration) in connection_timeouts_clone.iter().enumerate() { + println!("iter = {} sleep_duration = {:?}", iter, sleep_duration); + tokio::select! { + res = conn.as_mut() => { + // Polling the connection returned a result. + // In this case print either the successful or error result for the connection + // and break out of the loop. + match res { + Ok(()) => println!("after polling conn, no error"), + Err(e) => println!("error serving connection: {:?}", e), + }; + break; + } + _ = tokio::time::sleep(*sleep_duration) => { + // tokio::time::sleep returned a result. + // Call graceful_shutdown on the connection and continue the loop. + println!("iter = {} got timeout_interval, calling conn.graceful_shutdown", iter); + conn.as_mut().graceful_shutdown(); + } + } + } + }); + } +} diff --git a/examples/hello.rs b/examples/hello.rs index a11199adb8..d9d6b8c4c7 100644 --- a/examples/hello.rs +++ b/examples/hello.rs @@ -10,6 +10,10 @@ use hyper::service::service_fn; use hyper::{Request, Response}; use tokio::net::TcpListener; +#[path = "../benches/support/mod.rs"] +mod support; +use support::TokioIo; + // An async function that consumes a request, does nothing with it and returns a // response. async fn hello(_: Request) -> Result>, Infallible> { @@ -35,7 +39,10 @@ pub async fn main() -> Result<(), Box> { // has work to do. In this case, a connection arrives on the port we are listening on and // the task is woken up, at which point the task is then put back on a thread, and is // driven forward by the runtime, eventually yielding a TCP stream. - let (stream, _) = listener.accept().await?; + let (tcp, _) = listener.accept().await?; + // Use an adapter to access something implementing `tokio::io` traits as if they implement + // `hyper::rt` IO traits. + let io = TokioIo::new(tcp); // Spin up a new task in Tokio so we can continue to listen for new TCP connection on the // current task without waiting for the processing of the HTTP1 connection we just received @@ -44,7 +51,7 @@ pub async fn main() -> Result<(), Box> { // Handle the connection from the client using HTTP1 and pass any // HTTP requests received on that connection to the `hello` function if let Err(err) = http1::Builder::new() - .serve_connection(stream, service_fn(hello)) + .serve_connection(io, service_fn(hello)) .await { println!("Error serving connection: {:?}", err); diff --git a/examples/http_proxy.rs b/examples/http_proxy.rs index 0b4a6818b8..c36cc23778 100644 --- a/examples/http_proxy.rs +++ b/examples/http_proxy.rs @@ -12,6 +12,10 @@ use hyper::{Method, Request, Response}; use tokio::net::{TcpListener, TcpStream}; +#[path = "../benches/support/mod.rs"] +mod support; +use support::TokioIo; + // To try this example: // 1. cargo run --example http_proxy // 2. config http_proxy in command line @@ -28,12 +32,13 @@ async fn main() -> Result<(), Box> { loop { let (stream, _) = listener.accept().await?; + let io = TokioIo::new(stream); tokio::task::spawn(async move { if let Err(err) = http1::Builder::new() .preserve_header_case(true) .title_case_headers(true) - .serve_connection(stream, service_fn(proxy)) + .serve_connection(io, service_fn(proxy)) .with_upgrades() .await { @@ -88,11 +93,12 @@ async fn proxy( let addr = format!("{}:{}", host, port); let stream = TcpStream::connect(addr).await.unwrap(); + let io = TokioIo::new(stream); let (mut sender, conn) = Builder::new() .preserve_header_case(true) .title_case_headers(true) - .handshake(stream) + .handshake(io) .await?; tokio::task::spawn(async move { if let Err(err) = conn.await { @@ -123,9 +129,10 @@ fn full>(chunk: T) -> BoxBody { // Create a TCP connection to host:port, build a tunnel between the connection and // the upgraded connection -async fn tunnel(mut upgraded: Upgraded, addr: String) -> std::io::Result<()> { +async fn tunnel(upgraded: Upgraded, addr: String) -> std::io::Result<()> { // Connect to remote server let mut server = TcpStream::connect(addr).await?; + let mut upgraded = TokioIo::new(upgraded); // Proxying data let (from_client, from_server) = diff --git a/examples/multi_server.rs b/examples/multi_server.rs index 5eb520dbdb..51e6c39ca7 100644 --- a/examples/multi_server.rs +++ b/examples/multi_server.rs @@ -11,6 +11,10 @@ use hyper::service::service_fn; use hyper::{Request, Response}; use tokio::net::TcpListener; +#[path = "../benches/support/mod.rs"] +mod support; +use support::TokioIo; + static INDEX1: &[u8] = b"The 1st service!"; static INDEX2: &[u8] = b"The 2nd service!"; @@ -33,10 +37,11 @@ async fn main() -> Result<(), Box> { let listener = TcpListener::bind(addr1).await.unwrap(); loop { let (stream, _) = listener.accept().await.unwrap(); + let io = TokioIo::new(stream); tokio::task::spawn(async move { if let Err(err) = http1::Builder::new() - .serve_connection(stream, service_fn(index1)) + .serve_connection(io, service_fn(index1)) .await { println!("Error serving connection: {:?}", err); @@ -49,10 +54,11 @@ async fn main() -> Result<(), Box> { let listener = TcpListener::bind(addr2).await.unwrap(); loop { let (stream, _) = listener.accept().await.unwrap(); + let io = TokioIo::new(stream); tokio::task::spawn(async move { if let Err(err) = http1::Builder::new() - .serve_connection(stream, service_fn(index2)) + .serve_connection(io, service_fn(index2)) .await { println!("Error serving connection: {:?}", err); diff --git a/examples/params.rs b/examples/params.rs index a902867f2e..3ba39326a1 100644 --- a/examples/params.rs +++ b/examples/params.rs @@ -13,6 +13,10 @@ use std::convert::Infallible; use std::net::SocketAddr; use url::form_urlencoded; +#[path = "../benches/support/mod.rs"] +mod support; +use support::TokioIo; + static INDEX: &[u8] = b"
Name:
Number:
"; static MISSING: &[u8] = b"Missing field"; static NOTNUMERIC: &[u8] = b"Number field is not numeric"; @@ -124,10 +128,11 @@ async fn main() -> Result<(), Box> { println!("Listening on http://{}", addr); loop { let (stream, _) = listener.accept().await?; + let io = TokioIo::new(stream); tokio::task::spawn(async move { if let Err(err) = http1::Builder::new() - .serve_connection(stream, service_fn(param_example)) + .serve_connection(io, service_fn(param_example)) .await { println!("Error serving connection: {:?}", err); diff --git a/examples/send_file.rs b/examples/send_file.rs index a4514eb52b..ec489ec34f 100644 --- a/examples/send_file.rs +++ b/examples/send_file.rs @@ -10,6 +10,10 @@ use http_body_util::Full; use hyper::service::service_fn; use hyper::{Method, Request, Response, Result, StatusCode}; +#[path = "../benches/support/mod.rs"] +mod support; +use support::TokioIo; + static INDEX: &str = "examples/send_file_index.html"; static NOTFOUND: &[u8] = b"Not Found"; @@ -24,10 +28,11 @@ async fn main() -> std::result::Result<(), Box> { loop { let (stream, _) = listener.accept().await?; + let io = TokioIo::new(stream); tokio::task::spawn(async move { if let Err(err) = http1::Builder::new() - .serve_connection(stream, service_fn(response_examples)) + .serve_connection(io, service_fn(response_examples)) .await { println!("Failed to serve connection: {:?}", err); diff --git a/examples/service_struct_impl.rs b/examples/service_struct_impl.rs index 50fd3ab749..fc0f79356c 100644 --- a/examples/service_struct_impl.rs +++ b/examples/service_struct_impl.rs @@ -8,6 +8,11 @@ use tokio::net::TcpListener; use std::future::Future; use std::net::SocketAddr; use std::pin::Pin; +use std::sync::Mutex; + +#[path = "../benches/support/mod.rs"] +mod support; +use support::TokioIo; type Counter = i32; @@ -20,10 +25,16 @@ async fn main() -> Result<(), Box> { loop { let (stream, _) = listener.accept().await?; + let io = TokioIo::new(stream); tokio::task::spawn(async move { if let Err(err) = http1::Builder::new() - .serve_connection(stream, Svc { counter: 81818 }) + .serve_connection( + io, + Svc { + counter: Mutex::new(81818), + }, + ) .await { println!("Failed to serve connection: {:?}", err); @@ -33,7 +44,7 @@ async fn main() -> Result<(), Box> { } struct Svc { - counter: Counter, + counter: Mutex, } impl Service> for Svc { @@ -41,7 +52,7 @@ impl Service> for Svc { type Error = hyper::Error; type Future = Pin> + Send>>; - fn call(&mut self, req: Request) -> Self::Future { + fn call(&self, req: Request) -> Self::Future { fn mk_response(s: String) -> Result>, hyper::Error> { Ok(Response::builder().body(Full::new(Bytes::from(s))).unwrap()) } @@ -58,7 +69,7 @@ impl Service> for Svc { }; if req.uri().path() != "/favicon.ico" { - self.counter += 1; + *self.counter.lock().expect("lock poisoned") += 1; } Box::pin(async { res }) diff --git a/examples/single_threaded.rs b/examples/single_threaded.rs index ee109d54fa..40e8d942b2 100644 --- a/examples/single_threaded.rs +++ b/examples/single_threaded.rs @@ -1,17 +1,36 @@ #![deny(warnings)] - +/// This example shows how to use hyper with a single-threaded runtime. +/// This example exists also to test if the code compiles when `Body` is not `Send`. +/// +/// This Example includes HTTP/1 and HTTP/2 server and client. +/// +/// In HTTP/1 it is possible to use a `!Send` `Body`type. +/// In HTTP/2 it is possible to use a `!Send` `Body` and `IO` type. +/// +/// The `Body` and `IOTypeNotSend` structs in this example are `!Send` +/// +/// For HTTP/2 this only works if the `Executor` trait is implemented without the `Send` bound. +use http_body_util::BodyExt; use hyper::server::conn::http2; use std::cell::Cell; use std::net::SocketAddr; use std::rc::Rc; +use tokio::io::{self, AsyncWriteExt}; use tokio::net::TcpListener; use hyper::body::{Body as HttpBody, Bytes, Frame}; use hyper::service::service_fn; +use hyper::Request; use hyper::{Error, Response}; use std::marker::PhantomData; use std::pin::Pin; use std::task::{Context, Poll}; +use std::thread; +use tokio::net::TcpStream; + +#[path = "../benches/support/mod.rs"] +mod support; +use support::TokioIo; struct Body { // Our Body type is !Send and !Sync: @@ -40,30 +59,181 @@ impl HttpBody for Body { } } -fn main() -> Result<(), Box> { +fn main() { pretty_env_logger::init(); - // Configure a runtime that runs everything on the current thread - let rt = tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .expect("build runtime"); + let server_http2 = thread::spawn(move || { + // Configure a runtime for the server that runs everything on the current thread + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .expect("build runtime"); + + // Combine it with a `LocalSet, which means it can spawn !Send futures... + let local = tokio::task::LocalSet::new(); + local.block_on(&rt, http2_server()).unwrap(); + }); + + let client_http2 = thread::spawn(move || { + // Configure a runtime for the client that runs everything on the current thread + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .expect("build runtime"); + + // Combine it with a `LocalSet, which means it can spawn !Send futures... + let local = tokio::task::LocalSet::new(); + local + .block_on( + &rt, + http2_client("http://localhost:3000".parse::().unwrap()), + ) + .unwrap(); + }); + + let server_http1 = thread::spawn(move || { + // Configure a runtime for the server that runs everything on the current thread + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .expect("build runtime"); - // Combine it with a `LocalSet, which means it can spawn !Send futures... - let local = tokio::task::LocalSet::new(); - local.block_on(&rt, run()) + // Combine it with a `LocalSet, which means it can spawn !Send futures... + let local = tokio::task::LocalSet::new(); + local.block_on(&rt, http1_server()).unwrap(); + }); + + let client_http1 = thread::spawn(move || { + // Configure a runtime for the client that runs everything on the current thread + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .expect("build runtime"); + + // Combine it with a `LocalSet, which means it can spawn !Send futures... + let local = tokio::task::LocalSet::new(); + local + .block_on( + &rt, + http1_client("http://localhost:3001".parse::().unwrap()), + ) + .unwrap(); + }); + + server_http2.join().unwrap(); + client_http2.join().unwrap(); + + server_http1.join().unwrap(); + client_http1.join().unwrap(); } -async fn run() -> Result<(), Box> { - let addr: SocketAddr = ([127, 0, 0, 1], 3000).into(); +async fn http1_server() -> Result<(), Box> { + let addr = SocketAddr::from(([127, 0, 0, 1], 3001)); + + let listener = TcpListener::bind(addr).await?; + + // For each connection, clone the counter to use in our service... + let counter = Rc::new(Cell::new(0)); + + loop { + let (stream, _) = listener.accept().await?; + + let io = TokioIo::new(stream); + + let cnt = counter.clone(); + + let service = service_fn(move |_| { + let prev = cnt.get(); + cnt.set(prev + 1); + let value = cnt.get(); + async move { Ok::<_, Error>(Response::new(Body::from(format!("Request #{}", value)))) } + }); + + tokio::task::spawn_local(async move { + if let Err(err) = hyper::server::conn::http1::Builder::new() + .serve_connection(io, service) + .await + { + println!("Error serving connection: {:?}", err); + } + }); + } +} + +async fn http1_client(url: hyper::Uri) -> Result<(), Box> { + let host = url.host().expect("uri has no host"); + let port = url.port_u16().unwrap_or(80); + let addr = format!("{}:{}", host, port); + let stream = TcpStream::connect(addr).await?; + + let io = TokioIo::new(stream); + + let (mut sender, conn) = hyper::client::conn::http1::handshake(io).await?; + + tokio::task::spawn_local(async move { + if let Err(err) = conn.await { + let mut stdout = io::stdout(); + stdout + .write_all(format!("Connection failed: {:?}", err).as_bytes()) + .await + .unwrap(); + stdout.flush().await.unwrap(); + } + }); + + let authority = url.authority().unwrap().clone(); + + // Make 4 requests + for _ in 0..4 { + let req = Request::builder() + .uri(url.clone()) + .header(hyper::header::HOST, authority.as_str()) + .body(Body::from("test".to_string()))?; + let mut res = sender.send_request(req).await?; + + let mut stdout = io::stdout(); + stdout + .write_all(format!("Response: {}\n", res.status()).as_bytes()) + .await + .unwrap(); + stdout + .write_all(format!("Headers: {:#?}\n", res.headers()).as_bytes()) + .await + .unwrap(); + stdout.flush().await.unwrap(); + + // Print the response body + while let Some(next) = res.frame().await { + let frame = next?; + if let Some(chunk) = frame.data_ref() { + stdout.write_all(&chunk).await.unwrap(); + } + } + stdout.write_all(b"\n-----------------\n").await.unwrap(); + stdout.flush().await.unwrap(); + } + Ok(()) +} + +async fn http2_server() -> Result<(), Box> { + let mut stdout = io::stdout(); + + let addr: SocketAddr = ([127, 0, 0, 1], 3000).into(); // Using a !Send request counter is fine on 1 thread... let counter = Rc::new(Cell::new(0)); let listener = TcpListener::bind(addr).await?; - println!("Listening on http://{}", addr); + + stdout + .write_all(format!("Listening on http://{}", addr).as_bytes()) + .await + .unwrap(); + stdout.flush().await.unwrap(); + loop { let (stream, _) = listener.accept().await?; + let io = IOTypeNotSend::new(TokioIo::new(stream)); // For each connection, clone the counter to use in our service... let cnt = counter.clone(); @@ -77,15 +247,76 @@ async fn run() -> Result<(), Box> { tokio::task::spawn_local(async move { if let Err(err) = http2::Builder::new(LocalExec) - .serve_connection(stream, service) + .serve_connection(io, service) .await { - println!("Error serving connection: {:?}", err); + let mut stdout = io::stdout(); + stdout + .write_all(format!("Error serving connection: {:?}", err).as_bytes()) + .await + .unwrap(); + stdout.flush().await.unwrap(); } }); } } +async fn http2_client(url: hyper::Uri) -> Result<(), Box> { + let host = url.host().expect("uri has no host"); + let port = url.port_u16().unwrap_or(80); + let addr = format!("{}:{}", host, port); + let stream = TcpStream::connect(addr).await?; + + let stream = IOTypeNotSend::new(TokioIo::new(stream)); + + let (mut sender, conn) = hyper::client::conn::http2::handshake(LocalExec, stream).await?; + + tokio::task::spawn_local(async move { + if let Err(err) = conn.await { + let mut stdout = io::stdout(); + stdout + .write_all(format!("Connection failed: {:?}", err).as_bytes()) + .await + .unwrap(); + stdout.flush().await.unwrap(); + } + }); + + let authority = url.authority().unwrap().clone(); + + // Make 4 requests + for _ in 0..4 { + let req = Request::builder() + .uri(url.clone()) + .header(hyper::header::HOST, authority.as_str()) + .body(Body::from("test".to_string()))?; + + let mut res = sender.send_request(req).await?; + + let mut stdout = io::stdout(); + stdout + .write_all(format!("Response: {}\n", res.status()).as_bytes()) + .await + .unwrap(); + stdout + .write_all(format!("Headers: {:#?}\n", res.headers()).as_bytes()) + .await + .unwrap(); + stdout.flush().await.unwrap(); + + // Print the response body + while let Some(next) = res.frame().await { + let frame = next?; + if let Some(chunk) = frame.data_ref() { + stdout.write_all(&chunk).await.unwrap(); + } + } + stdout.write_all(b"\n-----------------\n").await.unwrap(); + stdout.flush().await.unwrap(); + } + Ok(()) +} + // NOTE: This part is only needed for HTTP/2. HTTP/1 doesn't need an executor. // // Since the Server needs to spawn some background tasks, we needed @@ -102,3 +333,51 @@ where tokio::task::spawn_local(fut); } } + +struct IOTypeNotSend { + _marker: PhantomData<*const ()>, + stream: TokioIo, +} + +impl IOTypeNotSend { + fn new(stream: TokioIo) -> Self { + Self { + _marker: PhantomData, + stream, + } + } +} + +impl hyper::rt::Write for IOTypeNotSend { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + Pin::new(&mut self.stream).poll_write(cx, buf) + } + + fn poll_flush( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + Pin::new(&mut self.stream).poll_flush(cx) + } + + fn poll_shutdown( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + Pin::new(&mut self.stream).poll_shutdown(cx) + } +} + +impl hyper::rt::Read for IOTypeNotSend { + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: hyper::rt::ReadBufCursor<'_>, + ) -> Poll> { + Pin::new(&mut self.stream).poll_read(cx, buf) + } +} diff --git a/examples/state.rs b/examples/state.rs index 7d060efe1d..5263efdadc 100644 --- a/examples/state.rs +++ b/examples/state.rs @@ -12,6 +12,10 @@ use hyper::{server::conn::http1, service::service_fn}; use hyper::{Error, Response}; use tokio::net::TcpListener; +#[path = "../benches/support/mod.rs"] +mod support; +use support::TokioIo; + #[tokio::main] async fn main() -> Result<(), Box> { pretty_env_logger::init(); @@ -26,6 +30,7 @@ async fn main() -> Result<(), Box> { println!("Listening on http://{}", addr); loop { let (stream, _) = listener.accept().await?; + let io = TokioIo::new(stream); // Each connection could send multiple requests, so // the `Service` needs a clone to handle later requests. @@ -46,10 +51,7 @@ async fn main() -> Result<(), Box> { } }); - if let Err(err) = http1::Builder::new() - .serve_connection(stream, service) - .await - { + if let Err(err) = http1::Builder::new().serve_connection(io, service).await { println!("Error serving connection: {:?}", err); } } diff --git a/examples/upgrades.rs b/examples/upgrades.rs index 92a80d7567..f9754e5d49 100644 --- a/examples/upgrades.rs +++ b/examples/upgrades.rs @@ -16,11 +16,16 @@ use hyper::service::service_fn; use hyper::upgrade::Upgraded; use hyper::{Request, Response, StatusCode}; +#[path = "../benches/support/mod.rs"] +mod support; +use support::TokioIo; + // A simple type alias so as to DRY. type Result = std::result::Result>; /// Handle server-side I/O after HTTP upgraded. -async fn server_upgraded_io(mut upgraded: Upgraded) -> Result<()> { +async fn server_upgraded_io(upgraded: Upgraded) -> Result<()> { + let mut upgraded = TokioIo::new(upgraded); // we have an upgraded connection that we can read and // write on directly. // @@ -75,7 +80,8 @@ async fn server_upgrade(mut req: Request) -> Result Result<()> { +async fn client_upgraded_io(upgraded: Upgraded) -> Result<()> { + let mut upgraded = TokioIo::new(upgraded); // We've gotten an upgraded connection that we can read // and write directly on. Let's start out 'foobar' protocol. upgraded.write_all(b"foo=bar").await?; @@ -97,7 +103,8 @@ async fn client_upgrade_request(addr: SocketAddr) -> Result<()> { .unwrap(); let stream = TcpStream::connect(addr).await?; - let (mut sender, conn) = hyper::client::conn::http1::handshake(stream).await?; + let io = TokioIo::new(stream); + let (mut sender, conn) = hyper::client::conn::http1::handshake(io).await?; tokio::task::spawn(async move { if let Err(err) = conn.await { @@ -146,10 +153,11 @@ async fn main() { tokio::select! { res = listener.accept() => { let (stream, _) = res.expect("Failed to accept"); + let io = TokioIo::new(stream); let mut rx = rx.clone(); tokio::task::spawn(async move { - let conn = http1::Builder::new().serve_connection(stream, service_fn(server_upgrade)); + let conn = http1::Builder::new().serve_connection(io, service_fn(server_upgrade)); // Don't forget to enable upgrades on the connection. let mut conn = conn.with_upgrades(); diff --git a/examples/web_api.rs b/examples/web_api.rs index 79834a0acd..91d9e9b72f 100644 --- a/examples/web_api.rs +++ b/examples/web_api.rs @@ -9,6 +9,10 @@ use hyper::service::service_fn; use hyper::{body::Incoming as IncomingBody, header, Method, Request, Response, StatusCode}; use tokio::net::{TcpListener, TcpStream}; +#[path = "../benches/support/mod.rs"] +mod support; +use support::TokioIo; + type GenericError = Box; type Result = std::result::Result; type BoxBody = http_body_util::combinators::BoxBody; @@ -30,8 +34,9 @@ async fn client_request_response() -> Result> { let host = req.uri().host().expect("uri has no host"); let port = req.uri().port_u16().expect("uri has no port"); let stream = TcpStream::connect(format!("{}:{}", host, port)).await?; + let io = TokioIo::new(stream); - let (mut sender, conn) = hyper::client::conn::http1::handshake(stream).await?; + let (mut sender, conn) = hyper::client::conn::http1::handshake(io).await?; tokio::task::spawn(async move { if let Err(err) = conn.await { @@ -109,14 +114,12 @@ async fn main() -> Result<()> { println!("Listening on http://{}", addr); loop { let (stream, _) = listener.accept().await?; + let io = TokioIo::new(stream); tokio::task::spawn(async move { let service = service_fn(move |req| response_examples(req)); - if let Err(err) = http1::Builder::new() - .serve_connection(stream, service) - .await - { + if let Err(err) = http1::Builder::new().serve_connection(io, service).await { println!("Failed to serve connection: {:?}", err); } }); diff --git a/src/body/incoming.rs b/src/body/incoming.rs index bf953d4c69..cdebd3db58 100644 --- a/src/body/incoming.rs +++ b/src/body/incoming.rs @@ -3,7 +3,7 @@ use std::fmt; use bytes::Bytes; use futures_channel::mpsc; use futures_channel::oneshot; -use futures_core::{FusedStream, Stream}; // for mpsc::Receiver +use futures_util::{stream::FusedStream, Stream}; // for mpsc::Receiver use http::HeaderMap; use http_body::{Body, Frame, SizeHint}; @@ -201,7 +201,16 @@ impl Body for Incoming { ping.record_data(bytes.len()); return Poll::Ready(Some(Ok(Frame::data(bytes)))); } - Some(Err(e)) => return Poll::Ready(Some(Err(crate::Error::new_body(e)))), + Some(Err(e)) => { + return match e.reason() { + // These reasons should cause the body reading to stop, but not fail it. + // The same logic as for `Read for H2Upgraded` is applied here. + Some(h2::Reason::NO_ERROR) | Some(h2::Reason::CANCEL) => { + Poll::Ready(None) + } + _ => Poll::Ready(Some(Err(crate::Error::new_body(e)))), + }; + } None => { *data_done = true; // fall through to trailers @@ -337,19 +346,17 @@ impl Sender { .map_err(|err| err.into_inner().expect("just sent Ok")) } - /// Aborts the body in an abnormal fashion. #[allow(unused)] - pub(crate) fn abort(self) { + pub(crate) fn abort(mut self) { + self.send_error(crate::Error::new_body_write_aborted()); + } + + pub(crate) fn send_error(&mut self, err: crate::Error) { let _ = self .data_tx // clone so the send works even if buffer is full .clone() - .try_send(Err(crate::Error::new_body_write_aborted())); - } - - #[cfg(feature = "http1")] - pub(crate) fn send_error(&mut self, err: crate::Error) { - let _ = self.data_tx.try_send(Err(err)); + .try_send(Err(err)); } } diff --git a/src/body/length.rs b/src/body/length.rs index e2bbee8039..2e46e4b309 100644 --- a/src/body/length.rs +++ b/src/body/length.rs @@ -50,8 +50,6 @@ impl DecodedLength { /// Checks the `u64` is within the maximum allowed for content-length. #[cfg(any(feature = "http1", feature = "http2"))] pub(crate) fn checked_new(len: u64) -> Result { - use tracing::warn; - if len <= MAX_LEN { Ok(DecodedLength(len)) } else { diff --git a/src/body/mod.rs b/src/body/mod.rs index 60c9914596..54f85c173c 100644 --- a/src/body/mod.rs +++ b/src/body/mod.rs @@ -10,9 +10,14 @@ //! - **The [`Body`](Body) trait** describes all possible bodies. //! hyper allows any body type that implements `Body`, allowing //! applications to have fine-grained control over their streaming. -//! - **The [`Incoming`](Incoming) concrete type**, which is an implementation of -//! `Body`, and returned by hyper as a "receive stream" (so, for server +//! - **The [`Incoming`](Incoming) concrete type**, which is an implementation +//! of `Body`, and returned by hyper as a "receive stream" (so, for server //! requests and client responses). +//! +//! There are additional implementations available in [`http-body-util`][], +//! such as a `Full` or `Empty` body. +//! +//! [`http-body-util`]: https://docs.rs/http-body-util pub use bytes::{Buf, Bytes}; pub use http_body::Body; diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs index ed87a991f9..4887f86663 100644 --- a/src/client/conn/http1.rs +++ b/src/client/conn/http1.rs @@ -3,16 +3,14 @@ use std::error::Error as StdError; use std::fmt; +use crate::rt::{Read, Write}; use bytes::Bytes; use http::{Request, Response}; use httparse::ParserConfig; -use tokio::io::{AsyncRead, AsyncWrite}; use super::super::dispatch; use crate::body::{Body, Incoming as IncomingBody}; -use crate::common::{ - task, Future, Pin, Poll, -}; +use crate::common::{task, Future, Pin, Poll}; use crate::proto; use crate::upgrade::Upgraded; @@ -51,7 +49,7 @@ pub struct Parts { #[must_use = "futures do nothing unless polled"] pub struct Connection where - T: AsyncRead + AsyncWrite + Send + 'static, + T: Read + Write + Send + 'static, B: Body + 'static, { inner: Option>, @@ -59,7 +57,7 @@ where impl Connection where - T: AsyncRead + AsyncWrite + Send + Unpin + 'static, + T: Read + Write + Send + Unpin + 'static, B: Body + 'static, B::Error: Into>, { @@ -116,7 +114,7 @@ pub struct Builder { /// See [`client::conn`](crate::client::conn) for more. pub async fn handshake(io: T) -> crate::Result<(SendRequest, Connection)> where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + T: Read + Write + Unpin + Send + 'static, B: Body + 'static, B::Data: Send, B::Error: Into>, @@ -193,8 +191,7 @@ where Err(_canceled) => panic!("dispatch dropped without returning error"), }, Err(_req) => { - tracing::debug!("connection was not ready"); - + debug!("connection was not ready"); Err(crate::Error::new_canceled().with("connection was not ready")) } } @@ -221,7 +218,7 @@ where })) } Err(req) => { - tracing::debug!("connection was not ready"); + debug!("connection was not ready"); let err = crate::Error::new_canceled().with("connection was not ready"); Either::Right(future::err((err, Some(req)))) } @@ -240,7 +237,7 @@ impl fmt::Debug for SendRequest { impl fmt::Debug for Connection where - T: AsyncRead + AsyncWrite + fmt::Debug + Send + 'static, + T: Read + Write + fmt::Debug + Send + 'static, B: Body + 'static, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -250,8 +247,8 @@ where impl Future for Connection where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - B: Body + Send + 'static, + T: Read + Write + Unpin + Send + 'static, + B: Body + 'static, B::Data: Send, B::Error: Into>, { @@ -319,10 +316,7 @@ impl Builder { /// Default is false. /// /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 - pub fn allow_spaces_after_header_name_in_responses( - &mut self, - enabled: bool, - ) -> &mut Builder { + pub fn allow_spaces_after_header_name_in_responses(&mut self, enabled: bool) -> &mut Builder { self.h1_parser_config .allow_spaces_after_header_name_in_responses(enabled); self @@ -360,10 +354,7 @@ impl Builder { /// Default is false. /// /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 - pub fn allow_obsolete_multiline_headers_in_responses( - &mut self, - enabled: bool, - ) -> &mut Builder { + pub fn allow_obsolete_multiline_headers_in_responses(&mut self, enabled: bool) -> &mut Builder { self.h1_parser_config .allow_obsolete_multiline_headers_in_responses(enabled); self @@ -478,7 +469,7 @@ impl Builder { io: T, ) -> impl Future, Connection)>> where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + T: Read + Write + Unpin + Send + 'static, B: Body + 'static, B::Data: Send, B::Error: Into>, @@ -486,7 +477,7 @@ impl Builder { let opts = self.clone(); async move { - tracing::trace!("client handshake HTTP/1"); + trace!("client handshake HTTP/1"); let (tx, rx) = dispatch::channel(); let mut conn = proto::Conn::new(io); diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs index c45b67dffd..edb99cfeff 100644 --- a/src/client/conn/http2.rs +++ b/src/client/conn/http2.rs @@ -1,23 +1,21 @@ //! HTTP/2 client connections -use std::error::Error as StdError; +use std::error::Error; use std::fmt; use std::marker::PhantomData; use std::sync::Arc; use std::time::Duration; +use crate::rt::{Read, Write}; use http::{Request, Response}; -use tokio::io::{AsyncRead, AsyncWrite}; use super::super::dispatch; use crate::body::{Body, Incoming as IncomingBody}; use crate::common::time::Time; -use crate::common::{ - exec::{BoxSendFuture, Exec}, - task, Future, Pin, Poll, -}; +use crate::common::{task, Future, Pin, Poll}; use crate::proto; -use crate::rt::{Executor, Timer}; +use crate::rt::bounds::ExecutorClient; +use crate::rt::Timer; /// The sender side of an established connection. pub struct SendRequest { @@ -26,7 +24,9 @@ pub struct SendRequest { impl Clone for SendRequest { fn clone(&self) -> SendRequest { - SendRequest { dispatch: self.dispatch.clone() } + SendRequest { + dispatch: self.dispatch.clone(), + } } } @@ -35,20 +35,22 @@ impl Clone for SendRequest { /// In most cases, this should just be spawned into an executor, so that it /// can process incoming and outgoing messages, notice hangups, and the like. #[must_use = "futures do nothing unless polled"] -pub struct Connection +pub struct Connection where - T: AsyncRead + AsyncWrite + Send + 'static, + T: Read + Write + 'static + Unpin, B: Body + 'static, + E: ExecutorClient + Unpin, + B::Error: Into>, { - inner: (PhantomData, proto::h2::ClientTask), + inner: (PhantomData, proto::h2::ClientTask), } /// A builder to configure an HTTP connection. /// /// After setting options, the builder is used to create a handshake future. #[derive(Clone, Debug)] -pub struct Builder { - pub(super) exec: Exec, +pub struct Builder { + pub(super) exec: Ex, pub(super) timer: Time, h2_builder: proto::h2::client::Config, } @@ -60,13 +62,13 @@ pub struct Builder { pub async fn handshake( exec: E, io: T, -) -> crate::Result<(SendRequest, Connection)> +) -> crate::Result<(SendRequest, Connection)> where - E: Executor + Send + Sync + 'static, - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + T: Read + Write + Unpin + 'static, B: Body + 'static, B::Data: Send, - B::Error: Into>, + B::Error: Into>, + E: ExecutorClient + Unpin + Clone, { Builder::new(exec).handshake(io).await } @@ -144,7 +146,7 @@ where Err(_canceled) => panic!("dispatch dropped without returning error"), }, Err(_req) => { - tracing::debug!("connection was not ready"); + debug!("connection was not ready"); Err(crate::Error::new_canceled().with("connection was not ready")) } @@ -172,7 +174,7 @@ where })) } Err(req) => { - tracing::debug!("connection was not ready"); + debug!("connection was not ready"); let err = crate::Error::new_canceled().with("connection was not ready"); Either::Right(future::err((err, Some(req)))) } @@ -189,12 +191,13 @@ impl fmt::Debug for SendRequest { // ===== impl Connection -impl Connection +impl Connection where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - B: Body + Unpin + Send + 'static, + T: Read + Write + Unpin + 'static, + B: Body + Unpin + 'static, B::Data: Send, - B::Error: Into>, + B::Error: Into>, + E: ExecutorClient + Unpin, { /// Returns whether the [extended CONNECT protocol][1] is enabled or not. /// @@ -210,22 +213,26 @@ where } } -impl fmt::Debug for Connection +impl fmt::Debug for Connection where - T: AsyncRead + AsyncWrite + fmt::Debug + Send + 'static, + T: Read + Write + fmt::Debug + 'static + Unpin, B: Body + 'static, + E: ExecutorClient + Unpin, + B::Error: Into>, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Connection").finish() } } -impl Future for Connection +impl Future for Connection where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - B: Body + Send + 'static, + T: Read + Write + Unpin + 'static, + B: Body + 'static + Unpin, B::Data: Send, - B::Error: Into>, + E: Unpin, + B::Error: Into>, + E: ExecutorClient + 'static + Send + Sync + Unpin, { type Output = crate::Result<()>; @@ -240,22 +247,22 @@ where // ===== impl Builder -impl Builder { +impl Builder +where + Ex: Clone, +{ /// Creates a new connection builder. #[inline] - pub fn new(exec: E) -> Builder - where - E: Executor + Send + Sync + 'static, - { + pub fn new(exec: Ex) -> Builder { Builder { - exec: Exec::new(exec), + exec, timer: Time::Empty, h2_builder: Default::default(), } } /// Provide a timer to execute background HTTP2 tasks. - pub fn timer(&mut self, timer: M) -> &mut Builder + pub fn timer(&mut self, timer: M) -> &mut Builder where M: Timer + Send + Sync + 'static, { @@ -270,7 +277,7 @@ impl Builder { /// /// If not set, hyper will use a default. /// - /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE + /// [spec]: https://httpwg.org/specs/rfc9113.html#SETTINGS_INITIAL_WINDOW_SIZE pub fn initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { if let Some(sz) = sz.into() { self.h2_builder.adaptive_window = false; @@ -284,10 +291,7 @@ impl Builder { /// Passing `None` will do nothing. /// /// If not set, hyper will use a default. - pub fn initial_connection_window_size( - &mut self, - sz: impl Into>, - ) -> &mut Self { + pub fn initial_connection_window_size(&mut self, sz: impl Into>) -> &mut Self { if let Some(sz) = sz.into() { self.h2_builder.adaptive_window = false; self.h2_builder.initial_conn_window_size = sz; @@ -329,10 +333,7 @@ impl Builder { /// Pass `None` to disable HTTP2 keep-alive. /// /// Default is currently disabled. - pub fn keep_alive_interval( - &mut self, - interval: impl Into>, - ) -> &mut Self { + pub fn keep_alive_interval(&mut self, interval: impl Into>) -> &mut Self { self.h2_builder.keep_alive_interval = interval.into(); self } @@ -395,17 +396,18 @@ impl Builder { pub fn handshake( &self, io: T, - ) -> impl Future, Connection)>> + ) -> impl Future, Connection)>> where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + T: Read + Write + Unpin + 'static, B: Body + 'static, B::Data: Send, - B::Error: Into>, + B::Error: Into>, + Ex: ExecutorClient + Unpin, { let opts = self.clone(); async move { - tracing::trace!("client handshake HTTP/1"); + trace!("client handshake HTTP/1"); let (tx, rx) = dispatch::channel(); let h2 = proto::h2::client::handshake(io, rx, &opts.h2_builder, opts.exec, opts.timer) diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs index f60bce4080..eda436a8b8 100644 --- a/src/client/conn/mod.rs +++ b/src/client/conn/mod.rs @@ -9,7 +9,9 @@ //! higher-level [Client](super) API. //! //! ## Example -//! A simple example that uses the `SendRequest` struct to talk HTTP over a Tokio TCP stream +//! +//! A simple example that uses the `SendRequest` struct to talk HTTP over some TCP stream. +//! //! ```no_run //! # #[cfg(all(feature = "client", feature = "http1"))] //! # mod rt { @@ -17,38 +19,38 @@ //! use http::{Request, StatusCode}; //! use http_body_util::Empty; //! use hyper::client::conn; -//! use tokio::net::TcpStream; -//! -//! #[tokio::main] -//! async fn main() -> Result<(), Box> { -//! let target_stream = TcpStream::connect("example.com:80").await?; -//! -//! let (mut request_sender, connection) = conn::http1::handshake(target_stream).await?; -//! -//! // spawn a task to poll the connection and drive the HTTP state -//! tokio::spawn(async move { -//! if let Err(e) = connection.await { -//! eprintln!("Error in connection: {}", e); -//! } -//! }); -//! -//! let request = Request::builder() -//! // We need to manually add the host header because SendRequest does not -//! .header("Host", "example.com") -//! .method("GET") -//! .body(Empty::::new())?; -//! let response = request_sender.send_request(request).await?; -//! assert!(response.status() == StatusCode::OK); -//! -//! let request = Request::builder() -//! .header("Host", "example.com") -//! .method("GET") -//! .body(Empty::::new())?; -//! let response = request_sender.send_request(request).await?; -//! assert!(response.status() == StatusCode::OK); -//! Ok(()) -//! } -//! +//! # use hyper::rt::{Read, Write}; +//! # async fn run(tcp: I) -> Result<(), Box> +//! # where +//! # I: Read + Write + Unpin + Send + 'static, +//! # { +//! let (mut request_sender, connection) = conn::http1::handshake(tcp).await?; +//! +//! // spawn a task to poll the connection and drive the HTTP state +//! tokio::spawn(async move { +//! if let Err(e) = connection.await { +//! eprintln!("Error in connection: {}", e); +//! } +//! }); +//! +//! let request = Request::builder() +//! // We need to manually add the host header because SendRequest does not +//! .header("Host", "example.com") +//! .method("GET") +//! .body(Empty::::new())?; +//! +//! let response = request_sender.send_request(request).await?; +//! assert!(response.status() == StatusCode::OK); +//! +//! let request = Request::builder() +//! .header("Host", "example.com") +//! .method("GET") +//! .body(Empty::::new())?; +//! +//! let response = request_sender.send_request(request).await?; +//! assert!(response.status() == StatusCode::OK); +//! # Ok(()) +//! # } //! # } //! ``` @@ -56,4 +58,3 @@ pub mod http1; #[cfg(feature = "http2")] pub mod http2; - diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs index 3aef84012f..ef9bce181c 100644 --- a/src/client/dispatch.rs +++ b/src/client/dispatch.rs @@ -1,11 +1,17 @@ #[cfg(feature = "http2")] use std::future::Future; +use http::{Request, Response}; +use http_body::Body; +use pin_project_lite::pin_project; use tokio::sync::{mpsc, oneshot}; +use crate::{ + body::Incoming, + common::{task, Poll}, +}; #[cfg(feature = "http2")] -use crate::common::Pin; -use crate::common::{task, Poll}; +use crate::{common::Pin, proto::h2::client::ResponseFutMap}; #[cfg(test)] pub(crate) type RetryPromise = oneshot::Receiver)>>; @@ -266,37 +272,57 @@ impl Callback { } } } +} - #[cfg(feature = "http2")] - pub(crate) async fn send_when( - self, - mut when: impl Future)>> + Unpin, - ) { - use futures_util::future; - use tracing::trace; - - let mut cb = Some(self); - - // "select" on this callback being canceled, and the future completing - future::poll_fn(move |cx| { - match Pin::new(&mut when).poll(cx) { - Poll::Ready(Ok(res)) => { - cb.take().expect("polled after complete").send(Ok(res)); - Poll::Ready(()) - } - Poll::Pending => { - // check if the callback is canceled - ready!(cb.as_mut().unwrap().poll_canceled(cx)); - trace!("send_when canceled"); - Poll::Ready(()) - } - Poll::Ready(Err(err)) => { - cb.take().expect("polled after complete").send(Err(err)); - Poll::Ready(()) - } +#[cfg(feature = "http2")] +pin_project! { + pub struct SendWhen + where + B: Body, + B: 'static, + { + #[pin] + pub(crate) when: ResponseFutMap, + #[pin] + pub(crate) call_back: Option, Response>>, + } +} + +#[cfg(feature = "http2")] +impl Future for SendWhen +where + B: Body + 'static, +{ + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { + let mut this = self.project(); + + let mut call_back = this.call_back.take().expect("polled after complete"); + + match Pin::new(&mut this.when).poll(cx) { + Poll::Ready(Ok(res)) => { + call_back.send(Ok(res)); + Poll::Ready(()) } - }) - .await + Poll::Pending => { + // check if the callback is canceled + match call_back.poll_canceled(cx) { + Poll::Ready(v) => v, + Poll::Pending => { + // Move call_back back to struct before return + this.call_back.set(Some(call_back)); + return std::task::Poll::Pending; + } + }; + trace!("send_when canceled"); + Poll::Ready(()) + } + Poll::Ready(Err(err)) => { + call_back.send(Err(err)); + Poll::Ready(()) + } + } } } diff --git a/src/common/date.rs b/src/common/date.rs index a436fc07c0..6eae674695 100644 --- a/src/common/date.rs +++ b/src/common/date.rs @@ -29,13 +29,15 @@ pub(crate) fn update_and_header_value() -> HeaderValue { CACHED.with(|cache| { let mut cache = cache.borrow_mut(); cache.check(); - HeaderValue::from_bytes(cache.buffer()).expect("Date format should be valid HeaderValue") + cache.header_value.clone() }) } struct CachedDate { bytes: [u8; DATE_VALUE_LENGTH], pos: usize, + #[cfg(feature = "http2")] + header_value: HeaderValue, next_update: SystemTime, } @@ -46,6 +48,8 @@ impl CachedDate { let mut cache = CachedDate { bytes: [0; DATE_VALUE_LENGTH], pos: 0, + #[cfg(feature = "http2")] + header_value: HeaderValue::from_static(""), next_update: SystemTime::now(), }; cache.update(cache.next_update); @@ -72,7 +76,17 @@ impl CachedDate { self.pos = 0; let _ = write!(self, "{}", HttpDate::from(now)); debug_assert!(self.pos == DATE_VALUE_LENGTH); + self.render_http2(); } + + #[cfg(feature = "http2")] + fn render_http2(&mut self) { + self.header_value = HeaderValue::from_bytes(self.buffer()) + .expect("Date format should be valid HeaderValue"); + } + + #[cfg(not(feature = "http2"))] + fn render_http2(&mut self) {} } impl fmt::Write for CachedDate { diff --git a/src/common/exec.rs b/src/common/exec.rs index ef006c9d84..69d19e9bb7 100644 --- a/src/common/exec.rs +++ b/src/common/exec.rs @@ -1,50 +1,14 @@ -use std::fmt; use std::future::Future; use std::pin::Pin; -use std::sync::Arc; - -use crate::rt::Executor; - -pub(crate) type BoxSendFuture = Pin + Send>>; - -// Executor must be provided by the user -#[derive(Clone)] -pub(crate) struct Exec(Arc + Send + Sync>); - -// ===== impl Exec ===== - -impl Exec { - pub(crate) fn new(exec: E) -> Self - where - E: Executor + Send + Sync + 'static, - { - Self(Arc::new(exec)) - } - - pub(crate) fn execute(&self, fut: F) - where - F: Future + Send + 'static, - { - self.0.execute(Box::pin(fut)) - } -} - -impl fmt::Debug for Exec { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Exec").finish() - } -} // If http2 is not enable, we just have a stub here, so that the trait bounds // that *would* have been needed are still checked. Why? // // Because enabling `http2` shouldn't suddenly add new trait bounds that cause // a compilation error. -#[cfg(not(feature = "http2"))] -#[allow(missing_debug_implementations)] + pub struct H2Stream(std::marker::PhantomData<(F, B)>); -#[cfg(not(feature = "http2"))] impl Future for H2Stream where F: Future, E>>, diff --git a/src/common/io/compat.rs b/src/common/io/compat.rs new file mode 100644 index 0000000000..3320e4ff44 --- /dev/null +++ b/src/common/io/compat.rs @@ -0,0 +1,150 @@ +use std::pin::Pin; +use std::task::{Context, Poll}; + +/// This adapts from `hyper` IO traits to the ones in Tokio. +/// +/// This is currently used by `h2`, and by hyper internal unit tests. +#[derive(Debug)] +pub(crate) struct Compat(pub(crate) T); + +pub(crate) fn compat(io: T) -> Compat { + Compat(io) +} + +impl Compat { + fn p(self: Pin<&mut Self>) -> Pin<&mut T> { + // SAFETY: The simplest of projections. This is just + // a wrapper, we don't do anything that would undo the projection. + unsafe { self.map_unchecked_mut(|me| &mut me.0) } + } +} + +impl tokio::io::AsyncRead for Compat +where + T: crate::rt::Read, +{ + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + tbuf: &mut tokio::io::ReadBuf<'_>, + ) -> Poll> { + let init = tbuf.initialized().len(); + let filled = tbuf.filled().len(); + let (new_init, new_filled) = unsafe { + let mut buf = crate::rt::ReadBuf::uninit(tbuf.inner_mut()); + buf.set_init(init); + buf.set_filled(filled); + + match crate::rt::Read::poll_read(self.p(), cx, buf.unfilled()) { + Poll::Ready(Ok(())) => (buf.init_len(), buf.len()), + other => return other, + } + }; + + let n_init = new_init - init; + unsafe { + tbuf.assume_init(n_init); + tbuf.set_filled(new_filled); + } + + Poll::Ready(Ok(())) + } +} + +impl tokio::io::AsyncWrite for Compat +where + T: crate::rt::Write, +{ + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + crate::rt::Write::poll_write(self.p(), cx, buf) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + crate::rt::Write::poll_flush(self.p(), cx) + } + + fn poll_shutdown( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + crate::rt::Write::poll_shutdown(self.p(), cx) + } + + fn is_write_vectored(&self) -> bool { + crate::rt::Write::is_write_vectored(&self.0) + } + + fn poll_write_vectored( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[std::io::IoSlice<'_>], + ) -> Poll> { + crate::rt::Write::poll_write_vectored(self.p(), cx, bufs) + } +} + +#[cfg(test)] +impl crate::rt::Read for Compat +where + T: tokio::io::AsyncRead, +{ + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + mut buf: crate::rt::ReadBufCursor<'_>, + ) -> Poll> { + let n = unsafe { + let mut tbuf = tokio::io::ReadBuf::uninit(buf.as_mut()); + match tokio::io::AsyncRead::poll_read(self.p(), cx, &mut tbuf) { + Poll::Ready(Ok(())) => tbuf.filled().len(), + other => return other, + } + }; + + unsafe { + buf.advance(n); + } + Poll::Ready(Ok(())) + } +} + +#[cfg(test)] +impl crate::rt::Write for Compat +where + T: tokio::io::AsyncWrite, +{ + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + tokio::io::AsyncWrite::poll_write(self.p(), cx, buf) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + tokio::io::AsyncWrite::poll_flush(self.p(), cx) + } + + fn poll_shutdown( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + tokio::io::AsyncWrite::poll_shutdown(self.p(), cx) + } + + fn is_write_vectored(&self) -> bool { + tokio::io::AsyncWrite::is_write_vectored(&self.0) + } + + fn poll_write_vectored( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[std::io::IoSlice<'_>], + ) -> Poll> { + tokio::io::AsyncWrite::poll_write_vectored(self.p(), cx, bufs) + } +} diff --git a/src/common/io/mod.rs b/src/common/io/mod.rs index 2e6d506153..6ad07bb771 100644 --- a/src/common/io/mod.rs +++ b/src/common/io/mod.rs @@ -1,3 +1,7 @@ +#[cfg(any(feature = "http2", test))] +mod compat; mod rewind; +#[cfg(any(feature = "http2", test))] +pub(crate) use self::compat::{compat, Compat}; pub(crate) use self::rewind::Rewind; diff --git a/src/common/io/rewind.rs b/src/common/io/rewind.rs index 5642d897d1..f6b6bab3c7 100644 --- a/src/common/io/rewind.rs +++ b/src/common/io/rewind.rs @@ -2,9 +2,9 @@ use std::marker::Unpin; use std::{cmp, io}; use bytes::{Buf, Bytes}; -use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use crate::common::{task, Pin, Poll}; +use crate::rt::{Read, ReadBufCursor, Write}; /// Combine a buffer with an IO, rewinding reads to use the buffer. #[derive(Debug)] @@ -44,14 +44,14 @@ impl Rewind { // } } -impl AsyncRead for Rewind +impl Read for Rewind where - T: AsyncRead + Unpin, + T: Read + Unpin, { fn poll_read( mut self: Pin<&mut Self>, cx: &mut task::Context<'_>, - buf: &mut ReadBuf<'_>, + mut buf: ReadBufCursor<'_>, ) -> Poll> { if let Some(mut prefix) = self.pre.take() { // If there are no remaining bytes, let the bytes get dropped. @@ -72,9 +72,9 @@ where } } -impl AsyncWrite for Rewind +impl Write for Rewind where - T: AsyncWrite + Unpin, + T: Write + Unpin, { fn poll_write( mut self: Pin<&mut Self>, @@ -109,6 +109,7 @@ where mod tests { // FIXME: re-implement tests with `async/await`, this import should // trigger a warning to remind us + use super::super::compat; use super::Rewind; use bytes::Bytes; use tokio::io::AsyncReadExt; @@ -120,14 +121,14 @@ mod tests { let mock = tokio_test::io::Builder::new().read(&underlying).build(); - let mut stream = Rewind::new(mock); + let mut stream = compat(Rewind::new(compat(mock))); // Read off some bytes, ensure we filled o1 let mut buf = [0; 2]; stream.read_exact(&mut buf).await.expect("read1"); // Rewind the stream so that it is as if we never read in the first place. - stream.rewind(Bytes::copy_from_slice(&buf[..])); + stream.0.rewind(Bytes::copy_from_slice(&buf[..])); let mut buf = [0; 5]; stream.read_exact(&mut buf).await.expect("read1"); @@ -143,13 +144,13 @@ mod tests { let mock = tokio_test::io::Builder::new().read(&underlying).build(); - let mut stream = Rewind::new(mock); + let mut stream = compat(Rewind::new(compat(mock))); let mut buf = [0; 5]; stream.read_exact(&mut buf).await.expect("read1"); // Rewind the stream so that it is as if we never read in the first place. - stream.rewind(Bytes::copy_from_slice(&buf[..])); + stream.0.rewind(Bytes::copy_from_slice(&buf[..])); let mut buf = [0; 5]; stream.read_exact(&mut buf).await.expect("read1"); diff --git a/src/common/mod.rs b/src/common/mod.rs index 67b2bbde59..632b363e2b 100644 --- a/src/common/mod.rs +++ b/src/common/mod.rs @@ -10,17 +10,14 @@ macro_rules! ready { pub(crate) mod buf; #[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] pub(crate) mod date; -#[cfg(any(feature = "http1", feature = "http2", feature = "server"))] +#[cfg(not(feature = "http2"))] pub(crate) mod exec; pub(crate) mod io; -mod never; pub(crate) mod task; #[cfg(any(feature = "http1", feature = "http2", feature = "server"))] pub(crate) mod time; pub(crate) mod watch; -#[cfg(any(feature = "http1", feature = "http2"))] -pub(crate) use self::never::Never; pub(crate) use self::task::Poll; // group up types normally needed for `Future` diff --git a/src/common/never.rs b/src/common/never.rs deleted file mode 100644 index f143caf60f..0000000000 --- a/src/common/never.rs +++ /dev/null @@ -1,21 +0,0 @@ -//! An uninhabitable type meaning it can never happen. -//! -//! To be replaced with `!` once it is stable. - -use std::error::Error; -use std::fmt; - -#[derive(Debug)] -pub(crate) enum Never {} - -impl fmt::Display for Never { - fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self {} - } -} - -impl Error for Never { - fn description(&self) -> &str { - match *self {} - } -} diff --git a/src/common/task.rs b/src/common/task.rs index ec70c957d6..10a7802bdd 100644 --- a/src/common/task.rs +++ b/src/common/task.rs @@ -1,12 +1,12 @@ #[cfg(feature = "http1")] -use super::Never; +use std::convert::Infallible; pub(crate) use std::task::{Context, Poll}; /// A function to help "yield" a future, such that it is re-scheduled immediately. /// /// Useful for spin counts, so a future doesn't hog too much time. #[cfg(feature = "http1")] -pub(crate) fn yield_now(cx: &mut Context<'_>) -> Poll { +pub(crate) fn yield_now(cx: &mut Context<'_>) -> Poll { cx.waker().wake_by_ref(); Poll::Pending } diff --git a/src/error.rs b/src/error.rs index b07a22c409..a51916359d 100644 --- a/src/error.rs +++ b/src/error.rs @@ -8,6 +8,26 @@ pub type Result = std::result::Result; type Cause = Box; /// Represents errors that can occur handling HTTP streams. +/// +/// # Formatting +/// +/// The `Display` implementation of this type will only print the details of +/// this level of error, even though it may have been caused by another error +/// and contain that error in its source. To print all the relevant +/// information, including the source chain, using something like +/// `std::error::Report`, or equivalent 3rd party types. +/// +/// The contents of the formatted error message of this specific `Error` type +/// is unspecified. **You must not depend on it.** The wording and details may +/// change in any version, with the goal of improving error messages. +/// +/// # Source +/// +/// A `hyper::Error` may be caused by another error. To aid in debugging, +/// those are exposed in `Error::source()` as erased types. While it is +/// possible to check the exact type of the sources, they **can not be depended +/// on**. They may come from private internal dependencies, and are subject to +/// change at any moment. pub struct Error { inner: Box, } @@ -34,9 +54,6 @@ pub(super) enum Kind { /// An `io::Error` that occurred while trying to read or write to a network stream. #[cfg(any(feature = "http1", feature = "http2"))] Io, - /// Error creating a TcpListener. - #[cfg(all(feature = "tcp", feature = "server"))] - Listen, /// User took too long to send headers #[cfg(all(feature = "http1", feature = "server"))] HeaderTimeout, @@ -173,11 +190,6 @@ impl Error { self.find_source::().is_some() } - /// Consumes the error, returning its cause. - pub fn into_cause(self) -> Option> { - self.inner.cause - } - pub(super) fn new(kind: Kind) -> Error { Error { inner: Box::new(ErrorImpl { kind, cause: None }), @@ -245,11 +257,6 @@ impl Error { Error::new(Kind::Io).with(cause) } - #[cfg(all(feature = "server", feature = "tcp"))] - pub(super) fn new_listen>(cause: E) -> Error { - Error::new(Kind::Listen).with(cause) - } - pub(super) fn new_closed() -> Error { Error::new(Kind::ChannelClosed) } @@ -332,11 +339,6 @@ impl Error { } } - /// The error's standalone message, without the message from the source. - pub fn message(&self) -> impl fmt::Display + '_ { - self.description() - } - fn description(&self) -> &str { match self.inner.kind { Kind::Parse(Parse::Method) => "invalid HTTP method parsed", @@ -368,8 +370,6 @@ impl Error { Kind::UnexpectedMessage => "received unexpected message from connection", Kind::ChannelClosed => "channel closed", Kind::Canceled => "operation was canceled", - #[cfg(all(feature = "server", feature = "tcp"))] - Kind::Listen => "error creating server listener", #[cfg(all(feature = "http1", feature = "server"))] Kind::HeaderTimeout => "read header from client timeout", #[cfg(any(feature = "http1", feature = "http2"))] @@ -420,11 +420,7 @@ impl fmt::Debug for Error { impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if let Some(ref cause) = self.inner.cause { - write!(f, "{}: {}", self.description(), cause) - } else { - f.write_str(self.description()) - } + f.write_str(self.description()) } } @@ -460,6 +456,7 @@ impl Parse { } } +#[cfg(feature = "http1")] impl From for Parse { fn from(err: httparse::Error) -> Parse { match err { diff --git a/src/ext/h1_reason_phrase.rs b/src/ext/h1_reason_phrase.rs index 021b632b6d..62173baa72 100644 --- a/src/ext/h1_reason_phrase.rs +++ b/src/ext/h1_reason_phrase.rs @@ -42,7 +42,7 @@ impl ReasonPhrase { } /// Converts a static byte slice to a reason phrase. - pub fn from_static(reason: &'static [u8]) -> Self { + pub const fn from_static(reason: &'static [u8]) -> Self { // TODO: this can be made const once MSRV is >= 1.57.0 if find_invalid_byte(reason).is_some() { panic!("invalid byte in static reason phrase"); @@ -50,11 +50,12 @@ impl ReasonPhrase { Self(Bytes::from_static(reason)) } + // Not public on purpose. /// Converts a `Bytes` directly into a `ReasonPhrase` without validating. /// /// Use with care; invalid bytes in a reason phrase can cause serious security problems if /// emitted in a response. - pub unsafe fn from_bytes_unchecked(reason: Bytes) -> Self { + pub(crate) fn from_bytes_unchecked(reason: Bytes) -> Self { Self(reason) } } diff --git a/src/ext.rs b/src/ext/mod.rs similarity index 100% rename from src/ext.rs rename to src/ext/mod.rs diff --git a/src/ffi/body.rs b/src/ffi/body.rs index d72223835c..89f1ad3dd3 100644 --- a/src/ffi/body.rs +++ b/src/ffi/body.rs @@ -33,13 +33,19 @@ ffi_fn! { /// Create a new "empty" body. /// /// If not configured, this body acts as an empty payload. + /// + /// To avoid a memory leak, the body must eventually be consumed by + /// `hyper_body_free`, `hyper_body_foreach`, or `hyper_request_set_body`. fn hyper_body_new() -> *mut hyper_body { Box::into_raw(Box::new(hyper_body(IncomingBody::ffi()))) } ?= ptr::null_mut() } ffi_fn! { - /// Free a `hyper_body *`. + /// Free a body. + /// + /// This should only be used if the request isn't consumed by + /// `hyper_body_foreach` or `hyper_request_set_body`. fn hyper_body_free(body: *mut hyper_body) { drop(non_null!(Box::from_raw(body) ?= ())); } @@ -54,6 +60,10 @@ ffi_fn! { /// - `HYPER_TASK_ERROR`: An error retrieving the data. /// - `HYPER_TASK_EMPTY`: The body has finished streaming data. /// + /// To avoid a memory leak, the task must eventually be consumed by + /// `hyper_task_free`, or taken ownership of by `hyper_executor_push` + /// without subsequently being given back by `hyper_executor_poll`. + /// /// This does not consume the `hyper_body *`, so it may be used to again. /// However, it MUST NOT be used or freed until the related task completes. fn hyper_body_data(body: *mut hyper_body) -> *mut hyper_task { @@ -82,6 +92,10 @@ ffi_fn! { /// Return a task that will poll the body and execute the callback with each /// body chunk that is received. /// + /// To avoid a memory leak, the task must eventually be consumed by + /// `hyper_task_free`, or taken ownership of by `hyper_executor_push` + /// without subsequently being given back by `hyper_executor_poll`. + /// /// The `hyper_buf` pointer is only a borrowed reference, it cannot live outside /// the execution of the callback. You must make a copy to retain it. /// @@ -195,7 +209,10 @@ ffi_fn! { /// Create a new `hyper_buf *` by copying the provided bytes. /// /// This makes an owned copy of the bytes, so the `buf` argument can be - /// freed or changed afterwards. + /// freed (with `hyper_buf_free`) or changed afterwards. + /// + /// To avoid a memory leak, the copy must eventually be consumed by + /// `hyper_buf_free`. /// /// This returns `NULL` if allocating a new buffer fails. fn hyper_buf_copy(buf: *const u8, len: size_t) -> *mut hyper_buf { @@ -228,6 +245,8 @@ ffi_fn! { ffi_fn! { /// Free this buffer. + /// + /// This should be used for any buffer once it is no longer needed. fn hyper_buf_free(buf: *mut hyper_buf) { drop(unsafe { Box::from_raw(buf) }); } diff --git a/src/ffi/client.rs b/src/ffi/client.rs index 0abb3ed0da..d1c078cadc 100644 --- a/src/ffi/client.rs +++ b/src/ffi/client.rs @@ -45,9 +45,14 @@ ffi_fn! { /// and options. /// /// Both the `io` and the `options` are consumed in this function call. + /// They should not be used or freed afterwards. /// - /// The returned `hyper_task *` must be polled with an executor until the - /// handshake completes, at which point the value can be taken. + /// The returned task must be polled with an executor until the handshake + /// completes, at which point the value can be taken. + /// + /// To avoid a memory leak, the task must eventually be consumed by + /// `hyper_task_free`, or taken ownership of by `hyper_executor_push` + /// without subsequently being given back by `hyper_executor_poll`. fn hyper_clientconn_handshake(io: *mut hyper_io, options: *mut hyper_clientconn_options) -> *mut hyper_task { let options = non_null! { Box::from_raw(options) ?= ptr::null_mut() }; let io = non_null! { Box::from_raw(io) ?= ptr::null_mut() }; @@ -87,8 +92,15 @@ ffi_fn! { ffi_fn! { /// Send a request on the client connection. /// + /// This consumes the request. You should not use or free the request + /// afterwards. + /// /// Returns a task that needs to be polled until it is ready. When ready, the /// task yields a `hyper_response *`. + /// + /// To avoid a memory leak, the task must eventually be consumed by + /// `hyper_task_free`, or taken ownership of by `hyper_executor_push` + /// without subsequently being given back by `hyper_executor_poll`. fn hyper_clientconn_send(conn: *mut hyper_clientconn, req: *mut hyper_request) -> *mut hyper_task { let req = non_null! { Box::from_raw(req) ?= ptr::null_mut() }; @@ -110,6 +122,8 @@ ffi_fn! { ffi_fn! { /// Free a `hyper_clientconn *`. + /// + /// This should be used for any connection once it is no longer needed. fn hyper_clientconn_free(conn: *mut hyper_clientconn) { drop(non_null! { Box::from_raw(conn) ?= () }); } @@ -125,6 +139,9 @@ unsafe impl AsTaskType for hyper_clientconn { ffi_fn! { /// Creates a new set of HTTP clientconn options to be used in a handshake. + /// + /// To avoid a memory leak, the options must eventually be consumed by + /// `hyper_clientconn_options_free` or `hyper_clientconn_handshake`. fn hyper_clientconn_options_new() -> *mut hyper_clientconn_options { Box::into_raw(Box::new(hyper_clientconn_options { http1_allow_obsolete_multiline_headers_in_responses: false, @@ -157,7 +174,10 @@ ffi_fn! { } ffi_fn! { - /// Free a `hyper_clientconn_options *`. + /// Free a set of HTTP clientconn options. + /// + /// This should only be used if the options aren't consumed by + /// `hyper_clientconn_handshake`. fn hyper_clientconn_options_free(opts: *mut hyper_clientconn_options) { drop(non_null! { Box::from_raw(opts) ?= () }); } diff --git a/src/ffi/error.rs b/src/ffi/error.rs index 541e49ac21..c4e93c5378 100644 --- a/src/ffi/error.rs +++ b/src/ffi/error.rs @@ -59,6 +59,8 @@ impl hyper_error { ffi_fn! { /// Frees a `hyper_error`. + /// + /// This should be used for any error once it is no longer needed. fn hyper_error_free(err: *mut hyper_error) { drop(non_null!(Box::from_raw(err) ?= ())); } diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs index 2aba01681c..22f2cd700f 100644 --- a/src/ffi/http_types.rs +++ b/src/ffi/http_types.rs @@ -38,13 +38,19 @@ type hyper_request_on_informational_callback = extern "C" fn(*mut c_void, *mut h ffi_fn! { /// Construct a new HTTP request. + /// + /// To avoid a memory leak, the request must eventually be consumed by + /// `hyper_request_free` or `hyper_clientconn_send`. fn hyper_request_new() -> *mut hyper_request { Box::into_raw(Box::new(hyper_request::from(Request::new(IncomingBody::empty())))) } ?= std::ptr::null_mut() } ffi_fn! { - /// Free an HTTP request if not going to send it on a client. + /// Free an HTTP request. + /// + /// This should only be used if the request isn't consumed by + /// `hyper_clientconn_send`. fn hyper_request_free(req: *mut hyper_request) { drop(non_null!(Box::from_raw(req) ?= ())); } @@ -177,7 +183,7 @@ ffi_fn! { /// Get the URI of the request split into scheme, authority and path/query strings. /// /// Each of `scheme`, `authority` and `path_and_query` may be pointers to buffers that this - /// function will populate with the appopriate values from the request. If one of these + /// function will populate with the appropriate values from the request. If one of these /// pointers is non-NULL then the associated `_len` field must be a pointer to a `size_t` /// which, on call, is populated with the maximum length of the buffer and, on successful /// response, will be set to the actual length of the value written into the buffer. @@ -391,7 +397,9 @@ ffi_fn! { } ffi_fn! { - /// Free an HTTP response after using it. + /// Free an HTTP response. + /// + /// This should be used for any response once it is no longer needed. fn hyper_response_free(resp: *mut hyper_response) { drop(non_null!(Box::from_raw(resp) ?= ())); } @@ -513,6 +521,9 @@ ffi_fn! { /// Take ownership of the body of this response. /// /// It is safe to free the response even after taking ownership of its body. + /// + /// To avoid a memory leak, the body must eventually be consumed by + /// `hyper_body_free`, `hyper_body_foreach`, or `hyper_request_set_body`. fn hyper_response_body(resp: *mut hyper_response) -> *mut hyper_body { let body = std::mem::replace(non_null!(&mut *resp ?= std::ptr::null_mut()).0.body_mut(), IncomingBody::empty()); Box::into_raw(Box::new(hyper_body(body))) diff --git a/src/ffi/io.rs b/src/ffi/io.rs index 2347c025c2..0d297f8c15 100644 --- a/src/ffi/io.rs +++ b/src/ffi/io.rs @@ -2,11 +2,11 @@ use std::ffi::c_void; use std::pin::Pin; use std::task::{Context, Poll}; +use crate::rt::{Read, Write}; use libc::size_t; -use tokio::io::{AsyncRead, AsyncWrite}; use super::task::hyper_context; -use super::userdata::Userdata; +use super::userdata::{Userdata, hyper_userdata_drop}; /// Sentinel value to return from a read or write callback that the operation /// is pending. @@ -32,6 +32,9 @@ ffi_fn! { /// /// The read and write functions of this transport should be set with /// `hyper_io_set_read` and `hyper_io_set_write`. + /// + /// To avoid a memory leak, the IO handle must eventually be consumed by + /// `hyper_io_free` or `hyper_clientconn_handshake`. fn hyper_io_new() -> *mut hyper_io { Box::into_raw(Box::new(hyper_io { read: read_noop, @@ -42,10 +45,10 @@ ffi_fn! { } ffi_fn! { - /// Free an unused `hyper_io *`. + /// Free an IO handle. /// - /// This is typically only useful if you aren't going to pass ownership - /// of the IO handle to hyper, such as with `hyper_clientconn_handshake()`. + /// This should only be used if the request isn't consumed by + /// `hyper_clientconn_handshake`. fn hyper_io_free(io: *mut hyper_io) { drop(non_null!(Box::from_raw(io) ?= ())); } @@ -57,12 +60,12 @@ ffi_fn! { /// This value is passed as an argument to the read and write callbacks. /// /// If passed, the `drop_func` will be called on the `userdata` when the - /// `hyper_io` is destroyed (either explicitely by `hyper_io_free` or - /// implicitely by an associated hyper task completing). + /// `hyper_io` is destroyed (either explicitly by `hyper_io_free` or + /// implicitly by an associated hyper task completing). fn hyper_io_set_userdata( io: *mut hyper_io, data: *mut c_void, - drop_func: super::userdata::hyper_userdata_drop, + drop_func: hyper_userdata_drop, ) { let io = non_null!(&mut *io? = ()); io.userdata = Userdata::new(data, drop_func); @@ -141,13 +144,13 @@ extern "C" fn write_noop( 0 } -impl AsyncRead for hyper_io { +impl Read for hyper_io { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, - buf: &mut tokio::io::ReadBuf<'_>, + mut buf: crate::rt::ReadBufCursor<'_>, ) -> Poll> { - let buf_ptr = unsafe { buf.unfilled_mut() }.as_mut_ptr() as *mut u8; + let buf_ptr = unsafe { buf.as_mut() }.as_mut_ptr() as *mut u8; let buf_len = buf.remaining(); match (self.read)(self.userdata.as_ptr(), hyper_context::wrap(cx), buf_ptr, buf_len) { @@ -159,15 +162,14 @@ impl AsyncRead for hyper_io { ok => { // We have to trust that the user's read callback actually // filled in that many bytes... :( - unsafe { buf.assume_init(ok) }; - buf.advance(ok); + unsafe { buf.advance(ok) }; Poll::Ready(Ok(())) } } } } -impl AsyncWrite for hyper_io { +impl Write for hyper_io { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, diff --git a/src/ffi/mod.rs b/src/ffi/mod.rs index 37183cac3c..6213b85e6f 100644 --- a/src/ffi/mod.rs +++ b/src/ffi/mod.rs @@ -23,7 +23,7 @@ //! `cargo`, staring with `1.64.0`, it can be compiled with the following command: //! //! ```notrust -//! RUSTFLAGS="--cfg hyper_unstable_ffi" cargo rustc --features client,http1,http2,ffi --crate-type cdylib +//! RUSTFLAGS="--cfg hyper_unstable_ffi" cargo rustc --crate-type cdylib --features client,http1,http2,ffi //! ``` #[cfg(not(hyper_unstable_ffi))] diff --git a/src/ffi/server.rs b/src/ffi/server.rs index 8a9afcb8c0..22e656e34d 100644 --- a/src/ffi/server.rs +++ b/src/ffi/server.rs @@ -161,7 +161,7 @@ ffi_fn! { /// Setting this to true will force hyper to use queued strategy which may eliminate /// unnecessary cloning on some TLS backends. /// - /// Default is to automatically guess which mode to use, this function overrides the huristic. + /// Default is to automatically guess which mode to use, this function overrides the heuristic. fn hyper_http1_serverconn_options_writev( opts: *mut hyper_http1_serverconn_options, enabled: bool, @@ -318,7 +318,7 @@ ffi_fn! { ffi_fn! { /// Sets an interval for HTTP/2 Ping frames should be sent to keep a connection alive. /// - /// Default is to not use keepalive pings. Passing `0` will use this default. + /// Default is to not use keep-alive pings. Passing `0` will use this default. fn hyper_http2_serverconn_options_keep_alive_interval( opts: *mut hyper_http2_serverconn_options, interval_seconds: u64, @@ -518,7 +518,7 @@ impl crate::service::Service> for hyper_service { Box> + Send>, >; - fn call(&mut self, req: crate::Request) -> Self::Future { + fn call(&self, req: crate::Request) -> Self::Future { let req_ptr = Box::into_raw(Box::new(hyper_request::from(req))); let (tx, rx) = futures_channel::oneshot::channel(); @@ -554,7 +554,7 @@ unsafe impl crate::ffi::task::AsTaskType for ServerConn { impl std::future::Future for AutoConnection where - IO: tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin + 'static, + IO: crate::rt::Read + crate::rt::Write + Unpin + 'static, Serv: crate::service::HttpService, Exec: crate::rt::Executor> + Unpin diff --git a/src/ffi/task.rs b/src/ffi/task.rs index d226b01bb7..3afdf419c8 100644 --- a/src/ffi/task.rs +++ b/src/ffi/task.rs @@ -14,8 +14,6 @@ use libc::c_int; use super::error::hyper_code; use super::userdata::{Userdata, hyper_userdata_drop}; -use crate::proto::h2::server::H2Stream; - type BoxFuture = Pin + Send>>; type BoxAny = Box; @@ -204,26 +202,23 @@ impl WeakExec { } } -impl crate::rt::Executor> for WeakExec { - fn execute(&self, fut: BoxFuture<()>) { +impl crate::rt::Executor for WeakExec +where + F: Future + Send + 'static, + F::Output: Send + Sync + AsTaskType, +{ + fn execute(&self, fut: F) { if let Some(exec) = self.0.upgrade() { exec.spawn(hyper_task::boxed(fut)); } } } -impl crate::rt::Executor> for WeakExec -where - H2Stream: Future + Send + 'static, - B: crate::body::Body, -{ - fn execute(&self, fut: H2Stream) { - >::execute(&self, Box::pin(fut) as Pin>) - } -} - ffi_fn! { /// Creates a new task executor. + /// + /// To avoid a memory leak, the executor must eventually be consumed by + /// `hyper_executor_free`. fn hyper_executor_new() -> *const hyper_executor { Arc::into_raw(hyper_executor::new()) } ?= ptr::null() @@ -231,6 +226,8 @@ ffi_fn! { ffi_fn! { /// Frees an executor and any incomplete tasks still part of it. + /// + /// This should be used for any executor once it is no longer needed. fn hyper_executor_free(exec: *const hyper_executor) { drop(non_null!(Arc::from_raw(exec) ?= ())); } @@ -239,7 +236,7 @@ ffi_fn! { ffi_fn! { /// Push a task onto the executor. /// - /// The executor takes ownership of the task, it should not be accessed + /// The executor takes ownership of the task, which should not be accessed /// again unless returned back to the user with `hyper_executor_poll`. fn hyper_executor_push(exec: *const hyper_executor, task: *mut hyper_task) -> hyper_code { let exec = non_null!(&*exec ?= hyper_code::HYPERE_INVALID_ARG); @@ -255,6 +252,10 @@ ffi_fn! { /// /// If ready, returns a task from the executor that has completed. /// + /// To avoid a memory leak, the task must eventually be consumed by + /// `hyper_task_free`, or taken ownership of by `hyper_executor_push` + /// without subsequently being given back by `hyper_executor_poll`. + /// /// If there are no ready tasks, this returns `NULL`. fn hyper_executor_poll(exec: *const hyper_executor) -> *mut hyper_task { let exec = non_null!(&*exec ?= ptr::null_mut()); @@ -324,6 +325,10 @@ impl Future for TaskFuture { ffi_fn! { /// Free a task. + /// + /// This should only be used if the task isn't consumed by + /// `hyper_clientconn_handshake` or taken ownership of by + /// `hyper_executor_push`. fn hyper_task_free(task: *mut hyper_task) { drop(non_null!(Box::from_raw(task) ?= ())); } @@ -336,6 +341,11 @@ ffi_fn! { /// this task. /// /// Use `hyper_task_type` to determine the type of the `void *` return value. + /// + /// To avoid a memory leak, a non-empty return value must eventually be + /// consumed by a function appropriate for its type, one of + /// `hyper_error_free`, `hyper_clientconn_free`, `hyper_response_free`, or + /// `hyper_buf_free`. fn hyper_task_value(task: *mut hyper_task) -> *mut c_void { let task = non_null!(&mut *task ?= ptr::null_mut()); @@ -438,6 +448,9 @@ impl hyper_context<'_> { ffi_fn! { /// Copies a waker out of the task context. + /// + /// To avoid a memory leak, the waker must eventually be consumed by + /// `hyper_waker_free` or `hyper_waker_wake`. fn hyper_context_waker(cx: *mut hyper_context<'_>) -> *mut hyper_waker { let waker = non_null!(&mut *cx ?= ptr::null_mut()).0.waker().clone(); Box::into_raw(Box::new(hyper_waker { waker })) @@ -447,7 +460,10 @@ ffi_fn! { // ===== impl hyper_waker ===== ffi_fn! { - /// Free a waker that hasn't been woken. + /// Free a waker. + /// + /// This should only be used if the request isn't consumed by + /// `hyper_waker_wake`. fn hyper_waker_free(waker: *mut hyper_waker) { drop(non_null!(Box::from_raw(waker) ?= ())); } diff --git a/src/ffi/userdata.rs b/src/ffi/userdata.rs index c0d73494ca..9197e407dd 100644 --- a/src/ffi/userdata.rs +++ b/src/ffi/userdata.rs @@ -1,33 +1,29 @@ use std::ffi::c_void; -/// Many hyper entites can be given userdata to allow user callbacks to corellate work together. -/// Since much of hyper is asychronous it's often useful to treat these userdata objects as "owned" -/// by the hyper entity (and hence to be cleaned up when that entity is dropped). +/// Many hyper entities can be given userdata to allow user callbacks to correlate work together. +/// Since much of hyper is asynchronous it's often useful to treat these userdata objects as +/// "owned" by the hyper entity (and hence to be cleaned up when that entity is dropped). /// -/// To acheive this a `hyepr_userdata_drop` callback is passed by calling code alongside the +/// To achieve this a `hyper_userdata_drop` callback is passed by calling code alongside the /// userdata to register a cleanup function. /// /// This function may be provided as NULL if the calling code wants to manage memory lifetimes /// itself, in which case the hyper object will logically consider the userdata "borrowed" until /// the hyper entity is dropped. -pub type hyper_userdata_drop = extern "C" fn(*mut c_void); +pub type hyper_userdata_drop = Option; /// A handle to a user-provided arbitrary object, along with an optional drop callback for the /// object. pub(crate) struct Userdata { data: *mut c_void, - drop: Option, + drop: hyper_userdata_drop, } impl Userdata { pub(crate) fn new(data: *mut c_void, drop: hyper_userdata_drop) -> Self { Self { data, - drop: if (drop as *const c_void).is_null() { - None - } else { - Some(drop) - } + drop, } } diff --git a/src/headers.rs b/src/headers.rs index 8407be185f..6fe672de01 100644 --- a/src/headers.rs +++ b/src/headers.rs @@ -53,15 +53,15 @@ pub(super) fn content_length_parse_all_values(values: ValueIter<'_, HeaderValue> return None; } } else { - return None + return None; } } } else { - return None + return None; } } - return content_length + return content_length; } fn from_digits(bytes: &[u8]) -> Option { @@ -80,7 +80,7 @@ fn from_digits(bytes: &[u8]) -> Option { b'0'..=b'9' => { result = result.checked_mul(RADIX)?; result = result.checked_add((b - b'0') as u64)?; - }, + } _ => { // not a DIGIT, get outta here! return None; diff --git a/src/lib.rs b/src/lib.rs index 91786b0e92..0d1db8320a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -19,7 +19,7 @@ //! - Extensive production use //! - [Client](client/index.html) and [Server](server/index.html) APIs //! -//! If just starting out, **check out the [Guides](https://hyper.rs/guides) +//! If just starting out, **check out the [Guides](https://hyper.rs/guides/1/) //! first.** //! //! ## "Low-level" @@ -51,7 +51,20 @@ //! - `server`: Enables the HTTP `server`. //! //! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section - +//! +//! # Unstable Features +//! hyper includes a set of unstable optional features that can be enabled through the use of a +//! feature flag and a [configuration flag]. +//! +//! The following is a list of feature flags and their corresponding `RUSTFLAG`: +//! - `ffi`: Enables C API for hyper `hyper_unstable_ffi`. +//! - `tracing`: Enables debug logging with `hyper_unstable_tracing`. +//! +//! Enabling an unstable feature is possible with the following `cargo` command, as of version `1.64.0`: +//! ```notrust +//! RUSTFLAGS="--cfg hyper_unstable_tracing" cargo rustc --features client,http1,http2,tracing --crate-type cdylib +//!``` +//! [configuration flag]: https://doc.rust-lang.org/reference/conditional-compilation.html #[doc(hidden)] pub use http; @@ -67,6 +80,10 @@ pub use crate::error::{Error, Result}; #[macro_use] mod cfg; + +#[macro_use] +mod trace; + #[macro_use] mod common; pub mod body; @@ -78,10 +95,9 @@ pub mod rt; pub mod service; pub mod upgrade; -cfg_feature! { - #![feature = "ffi"] - pub mod ffi; -} +#[cfg(feature = "ffi")] +#[cfg_attr(docsrs, doc(cfg(all(feature = "ffi", hyper_unstable_ffi))))] +pub mod ffi; cfg_proto! { mod headers; diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs index b7c619683c..ce7c41a366 100644 --- a/src/proto/h1/conn.rs +++ b/src/proto/h1/conn.rs @@ -4,12 +4,11 @@ use std::marker::PhantomData; #[cfg(feature = "server")] use std::time::Duration; +use crate::rt::{Read, Write}; use bytes::{Buf, Bytes}; use http::header::{HeaderValue, CONNECTION}; use http::{HeaderMap, Method, Version}; use httparse::ParserConfig; -use tokio::io::{AsyncRead, AsyncWrite}; -use tracing::{debug, error, trace}; use super::io::Buffered; use super::{Decoder, Encode, EncodedBuf, Encoder, Http1Transaction, ParseContext, Wants}; @@ -25,7 +24,7 @@ use crate::rt::Sleep; const H2_PREFACE: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; /// This handles a connection, which will have been established over an -/// `AsyncRead + AsyncWrite` (like a socket), and will likely include multiple +/// `Read + Write` (like a socket), and will likely include multiple /// `Transaction`s over HTTP. /// /// The connection will determine when a message begins and ends as well as @@ -39,7 +38,7 @@ pub(crate) struct Conn { impl Conn where - I: AsyncRead + AsyncWrite + Unpin, + I: Read + Write + Unpin, B: Buf, T: Http1Transaction, { @@ -175,6 +174,13 @@ where } } + #[cfg(feature = "server")] + pub(crate) fn has_initial_read_write_state(&self) -> bool { + matches!(self.state.reading, Reading::Init) + && matches!(self.state.writing, Writing::Init) + && self.io.read_buf().is_empty() + } + fn should_error_on_eof(&self) -> bool { // If we're idle, it's probably just the connection closing gracefully. T::should_error_on_parse_eof() && !self.state.is_idle() @@ -250,7 +256,7 @@ where if !T::should_read_first() { self.try_keep_alive(cx); } - } else if msg.expect_continue { + } else if msg.expect_continue && msg.head.version.gt(&Version::HTTP_10) { self.state.reading = Reading::Continue(Decoder::new(msg.decode)); wants = wants.add(Wants::EXPECT); } else { @@ -432,7 +438,7 @@ where let result = ready!(self.io.poll_read_from_io(cx)); Poll::Ready(result.map_err(|e| { - trace!("force_io_read; io error = {:?}", e); + trace!(error = %e, "force_io_read; io error"); self.state.close(); e })) @@ -742,7 +748,9 @@ where // If still in Reading::Body, just give up match self.state.reading { - Reading::Init | Reading::KeepAlive => trace!("body drained"), + Reading::Init | Reading::KeepAlive => { + trace!("body drained") + } _ => self.close_read(), } } @@ -1037,12 +1045,13 @@ mod tests { #[bench] fn bench_read_head_short(b: &mut ::test::Bencher) { use super::*; + use crate::common::io::Compat; let s = b"GET / HTTP/1.1\r\nHost: localhost:8080\r\n\r\n"; let len = s.len(); b.bytes = len as u64; // an empty IO, we'll be skipping and using the read buffer anyways - let io = tokio_test::io::Builder::new().build(); + let io = Compat(tokio_test::io::Builder::new().build()); let mut conn = Conn::<_, bytes::Bytes, crate::proto::h1::ServerTransaction>::new(io); *conn.io.read_buf_mut() = ::bytes::BytesMut::from(&s[..]); conn.state.cached_headers = Some(HeaderMap::with_capacity(2)); diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs index 4077b22062..81ac3a95c3 100644 --- a/src/proto/h1/decode.rs +++ b/src/proto/h1/decode.rs @@ -4,7 +4,6 @@ use std::io; use std::usize; use bytes::Bytes; -use tracing::{debug, trace}; use crate::common::{task, Poll}; @@ -428,9 +427,9 @@ impl StdError for IncompleteBody {} #[cfg(test)] mod tests { use super::*; + use crate::rt::{Read, ReadBuf}; use std::pin::Pin; use std::time::Duration; - use tokio::io::{AsyncRead, ReadBuf}; impl<'a> MemRead for &'a [u8] { fn read_mem(&mut self, _: &mut task::Context<'_>, len: usize) -> Poll> { @@ -446,11 +445,11 @@ mod tests { } } - impl<'a> MemRead for &'a mut (dyn AsyncRead + Unpin) { + impl<'a> MemRead for &'a mut (dyn Read + Unpin) { fn read_mem(&mut self, cx: &mut task::Context<'_>, len: usize) -> Poll> { let mut v = vec![0; len]; let mut buf = ReadBuf::new(&mut v); - ready!(Pin::new(self).poll_read(cx, &mut buf)?); + ready!(Pin::new(self).poll_read(cx, buf.unfilled())?); Poll::Ready(Ok(Bytes::copy_from_slice(&buf.filled()))) } } @@ -629,7 +628,7 @@ mod tests { async fn read_async(mut decoder: Decoder, content: &[u8], block_at: usize) -> String { let mut outs = Vec::new(); - let mut ins = if block_at == 0 { + let mut ins = crate::common::io::compat(if block_at == 0 { tokio_test::io::Builder::new() .wait(Duration::from_millis(10)) .read(content) @@ -640,9 +639,9 @@ mod tests { .wait(Duration::from_millis(10)) .read(&content[block_at..]) .build() - }; + }); - let mut ins = &mut ins as &mut (dyn AsyncRead + Unpin); + let mut ins = &mut ins as &mut (dyn Read + Unpin); loop { let buf = decoder diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs index cd494581b9..705bc77a4a 100644 --- a/src/proto/h1/dispatch.rs +++ b/src/proto/h1/dispatch.rs @@ -1,9 +1,8 @@ use std::error::Error as StdError; +use crate::rt::{Read, Write}; use bytes::{Buf, Bytes}; use http::Request; -use tokio::io::{AsyncRead, AsyncWrite}; -use tracing::{debug, trace}; use super::{Http1Transaction, Wants}; use crate::body::{Body, DecodedLength, Incoming as IncomingBody}; @@ -28,7 +27,8 @@ pub(crate) trait Dispatch { self: Pin<&mut Self>, cx: &mut task::Context<'_>, ) -> Poll>>; - fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, IncomingBody)>) -> crate::Result<()>; + fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, IncomingBody)>) + -> crate::Result<()>; fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll>; fn should_poll(&self) -> bool; } @@ -63,7 +63,7 @@ where RecvItem = MessageHead, > + Unpin, D::PollError: Into>, - I: AsyncRead + AsyncWrite + Unpin, + I: Read + Write + Unpin, T: Http1Transaction + Unpin, Bs: Body + 'static, Bs::Error: Into>, @@ -81,7 +81,11 @@ where #[cfg(feature = "server")] pub(crate) fn disable_keep_alive(&mut self) { self.conn.disable_keep_alive(); - if self.conn.is_write_closed() { + + // If keep alive has been disabled and no read or write has been seen on + // the connection yet, we must be in a state where the server is being asked to + // shut down before any data has been seen on the connection + if self.conn.is_write_closed() || self.conn.has_initial_read_write_state() { self.close(); } } @@ -92,7 +96,7 @@ where } /// Run this dispatcher until HTTP says this connection is done, - /// but don't call `AsyncWrite::shutdown` on the underlying IO. + /// but don't call `Write::shutdown` on the underlying IO. /// /// This is useful for old-style HTTP upgrades, but ignores /// newer-style upgrade API. @@ -116,6 +120,10 @@ where should_shutdown: bool, ) -> Poll> { Poll::Ready(ready!(self.poll_inner(cx, should_shutdown)).or_else(|e| { + // Be sure to alert a streaming body of the failure. + if let Some(mut body) = self.body_tx.take() { + body.send_error(crate::Error::new_body("connection error")); + } // An error means we're shutting down either way. // We just try to give the error to the user, // and close the connection with an Ok. If we @@ -249,7 +257,8 @@ where let body = match body_len { DecodedLength::ZERO => IncomingBody::empty(), other => { - let (tx, rx) = IncomingBody::new_channel(other, wants.contains(Wants::EXPECT)); + let (tx, rx) = + IncomingBody::new_channel(other, wants.contains(Wants::EXPECT)); self.body_tx = Some(tx); rx } @@ -366,7 +375,12 @@ where self.conn.end_body()?; } } else { - return Poll::Pending; + // If there's no body_rx, end the body + if self.conn.can_write_body() { + self.conn.end_body()?; + } else { + return Poll::Pending; + } } } } @@ -411,7 +425,7 @@ where RecvItem = MessageHead, > + Unpin, D::PollError: Into>, - I: AsyncRead + AsyncWrite + Unpin, + I: Read + Write + Unpin, T: Http1Transaction + Unpin, Bs: Body + 'static, Bs::Error: Into>, @@ -535,6 +549,8 @@ cfg_server! { // ===== impl Client ===== cfg_client! { + use std::convert::Infallible; + impl Client { pub(crate) fn new(rx: ClientRx) -> Client { Client { @@ -551,13 +567,13 @@ cfg_client! { { type PollItem = RequestHead; type PollBody = B; - type PollError = crate::common::Never; + type PollError = Infallible; type RecvItem = crate::proto::ResponseHead; fn poll_msg( mut self: Pin<&mut Self>, cx: &mut task::Context<'_>, - ) -> Poll>> { + ) -> Poll>> { let mut this = self.as_mut(); debug_assert!(!this.rx_closed); match this.rx.poll_recv(cx) { @@ -649,6 +665,7 @@ cfg_client! { #[cfg(test)] mod tests { use super::*; + use crate::common::io::compat; use crate::proto::h1::ClientTransaction; use std::time::Duration; @@ -662,7 +679,7 @@ mod tests { // Block at 0 for now, but we will release this response before // the request is ready to write later... let (mut tx, rx) = crate::client::dispatch::channel(); - let conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io); + let conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(compat(io)); let mut dispatcher = Dispatcher::new(Client::new(rx), conn); // First poll is needed to allow tx to send... @@ -699,7 +716,7 @@ mod tests { .build_with_handle(); let (mut tx, rx) = crate::client::dispatch::channel(); - let mut conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io); + let mut conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(compat(io)); conn.set_write_strategy_queue(); let dispatcher = Dispatcher::new(Client::new(rx), conn); @@ -730,7 +747,7 @@ mod tests { .build(); let (mut tx, rx) = crate::client::dispatch::channel(); - let conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io); + let conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(compat(io)); let mut dispatcher = tokio_test::task::spawn(Dispatcher::new(Client::new(rx), conn)); // First poll is needed to allow tx to send... diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs index cb4a7841fe..c98c55d664 100644 --- a/src/proto/h1/encode.rs +++ b/src/proto/h1/encode.rs @@ -3,7 +3,6 @@ use std::io::IoSlice; use bytes::buf::{Chain, Take}; use bytes::Buf; -use tracing::trace; use super::io::WriteBuf; diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs index da4101b6fb..785f6c0221 100644 --- a/src/proto/h1/io.rs +++ b/src/proto/h1/io.rs @@ -6,9 +6,8 @@ use std::io::{self, IoSlice}; use std::marker::Unpin; use std::mem::MaybeUninit; +use crate::rt::{Read, ReadBuf, Write}; use bytes::{Buf, BufMut, Bytes, BytesMut}; -use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; -use tracing::{debug, trace}; use super::{Http1Transaction, ParseContext, ParsedMessage}; use crate::common::buf::BufList; @@ -55,7 +54,7 @@ where impl Buffered where - T: AsyncRead + AsyncWrite + Unpin, + T: Read + Write + Unpin, B: Buf, { pub(crate) fn new(io: T) -> Buffered { @@ -224,7 +223,7 @@ where if Pin::new(h1_header_read_timeout_fut).poll(cx).is_ready() { *parse_ctx.h1_header_read_timeout_running = false; - tracing::warn!("read header from client timeout"); + warn!("read header from client timeout"); return Poll::Ready(Err(crate::Error::new_header_timeout())); } } @@ -251,7 +250,7 @@ where let dst = self.read_buf.chunk_mut(); let dst = unsafe { &mut *(dst as *mut _ as *mut [MaybeUninit]) }; let mut buf = ReadBuf::uninit(dst); - match Pin::new(&mut self.io).poll_read(cx, &mut buf) { + match Pin::new(&mut self.io).poll_read(cx, buf.unfilled()) { Poll::Ready(Ok(_)) => { let n = buf.filled().len(); trace!("received {} bytes", n); @@ -359,7 +358,7 @@ pub(crate) trait MemRead { impl MemRead for Buffered where - T: AsyncRead + AsyncWrite + Unpin, + T: Read + Write + Unpin, B: Buf, { fn read_mem(&mut self, cx: &mut task::Context<'_>, len: usize) -> Poll> { @@ -662,6 +661,7 @@ enum WriteStrategy { #[cfg(test)] mod tests { + use crate::common::io::compat; use crate::common::time::Time; use super::*; @@ -717,7 +717,7 @@ mod tests { .wait(Duration::from_secs(1)) .build(); - let mut buffered = Buffered::<_, Cursor>>::new(mock); + let mut buffered = Buffered::<_, Cursor>>::new(compat(mock)); // We expect a `parse` to be not ready, and so can't await it directly. // Rather, this `poll_fn` will wrap the `Poll` result. @@ -862,7 +862,7 @@ mod tests { #[cfg(debug_assertions)] // needs to trigger a debug_assert fn write_buf_requires_non_empty_bufs() { let mock = Mock::new().build(); - let mut buffered = Buffered::<_, Cursor>>::new(mock); + let mut buffered = Buffered::<_, Cursor>>::new(compat(mock)); buffered.buffer(Cursor::new(Vec::new())); } @@ -897,7 +897,7 @@ mod tests { let mock = Mock::new().write(b"hello world, it's hyper!").build(); - let mut buffered = Buffered::<_, Cursor>>::new(mock); + let mut buffered = Buffered::<_, Cursor>>::new(compat(mock)); buffered.write_buf.set_strategy(WriteStrategy::Flatten); buffered.headers_buf().extend(b"hello "); @@ -956,7 +956,7 @@ mod tests { .write(b"hyper!") .build(); - let mut buffered = Buffered::<_, Cursor>>::new(mock); + let mut buffered = Buffered::<_, Cursor>>::new(compat(mock)); buffered.write_buf.set_strategy(WriteStrategy::Queue); // we have 4 buffers, and vec IO disabled, but explicitly said diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs index 6a1c5a87d3..f964d2138f 100644 --- a/src/proto/h1/role.rs +++ b/src/proto/h1/role.rs @@ -9,7 +9,6 @@ use bytes::BytesMut; use http::header::ValueIter; use http::header::{self, Entry, HeaderName, HeaderValue}; use http::{HeaderMap, Method, StatusCode, Version}; -use tracing::{debug, error, trace, trace_span, warn}; use crate::body::DecodedLength; #[cfg(feature = "server")] @@ -72,8 +71,7 @@ where return Ok(None); } - let span = trace_span!("parse_headers"); - let _s = span.enter(); + let _entered = trace_span!("parse_headers"); #[cfg(feature = "server")] if !*ctx.h1_header_read_timeout_running { @@ -103,8 +101,7 @@ pub(super) fn encode_headers( where T: Http1Transaction, { - let span = trace_span!("encode_headers"); - let _s = span.enter(); + let _entered = trace_span!("encode_headers"); T::encode(enc, dst) } @@ -1059,7 +1056,7 @@ impl Http1Transaction for Client { if let Some(reason) = reason { // Safety: httparse ensures that only valid reason phrase bytes are present in this // field. - let reason = unsafe { crate::ext::ReasonPhrase::from_bytes_unchecked(reason) }; + let reason = crate::ext::ReasonPhrase::from_bytes_unchecked(reason); extensions.insert(reason); } diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs index 121e24dd84..c6b24212e8 100644 --- a/src/proto/h2/client.rs +++ b/src/proto/h2/client.rs @@ -1,25 +1,28 @@ -use std::error::Error as StdError; -use std::time::Duration; +use std::{convert::Infallible, marker::PhantomData, time::Duration}; +use crate::rt::{Read, Write}; use bytes::Bytes; +use futures_channel::mpsc::{Receiver, Sender}; use futures_channel::{mpsc, oneshot}; -use futures_util::future::{self, Either, FutureExt as _, TryFutureExt as _}; -use futures_util::stream::StreamExt as _; -use h2::client::{Builder, SendRequest}; +use futures_util::future::{Either, FusedFuture, FutureExt as _}; +use futures_util::stream::{StreamExt as _, StreamFuture}; +use h2::client::{Builder, Connection, SendRequest}; use h2::SendStream; use http::{Method, StatusCode}; -use tokio::io::{AsyncRead, AsyncWrite}; -use tracing::{debug, trace, warn}; +use pin_project_lite::pin_project; +use super::ping::{Ponger, Recorder}; use super::{ping, H2Upgraded, PipeToSendStream, SendBuf}; use crate::body::{Body, Incoming as IncomingBody}; +use crate::client::dispatch::{Callback, SendWhen}; +use crate::common::io::Compat; use crate::common::time::Time; -use crate::client::dispatch::Callback; -use crate::common::{exec::Exec, task, Future, Never, Pin, Poll}; +use crate::common::{task, Future, Pin, Poll}; use crate::ext::Protocol; use crate::headers; use crate::proto::h2::UpgradedSendStream; use crate::proto::Dispatched; +use crate::rt::bounds::ExecutorClient; use crate::upgrade::Upgraded; use crate::{Request, Response}; use h2::client::ResponseFuture; @@ -28,11 +31,11 @@ type ClientRx = crate::client::dispatch::Receiver, Response; +type ConnDropRef = mpsc::Sender; ///// A oneshot channel watches the `Connection` task, and when it completes, ///// the "dispatch" task will be notified and can shutdown sooner. -type ConnEof = oneshot::Receiver; +type ConnEof = oneshot::Receiver; // Our defaults are chosen for the "majority" case, which usually are not // resource constrained, and so the spec default of 64kb can be too limiting @@ -98,20 +101,22 @@ fn new_ping_config(config: &Config) -> ping::Config { } } -pub(crate) async fn handshake( +pub(crate) async fn handshake( io: T, req_rx: ClientRx, config: &Config, - exec: Exec, + mut exec: E, timer: Time, -) -> crate::Result> +) -> crate::Result> where - T: AsyncRead + AsyncWrite + Send + Unpin + 'static, - B: Body, + T: Read + Write + Unpin + 'static, + B: Body + 'static, B::Data: Send + 'static, + E: ExecutorClient + Unpin, + B::Error: Into>, { let (h2_tx, mut conn) = new_builder(config) - .handshake::<_, SendBuf>(io) + .handshake::<_, SendBuf>(crate::common::io::compat(io)) .await .map_err(crate::Error::new_h2)?; @@ -122,40 +127,27 @@ where let (conn_drop_ref, rx) = mpsc::channel(1); let (cancel_tx, conn_eof) = oneshot::channel(); - let conn_drop_rx = rx.into_future().map(|(item, _rx)| { - if let Some(never) = item { - match never {} - } - }); + let conn_drop_rx = rx.into_future(); let ping_config = new_ping_config(&config); let (conn, ping) = if ping_config.is_enabled() { let pp = conn.ping_pong().expect("conn.ping_pong"); - let (recorder, mut ponger) = ping::channel(pp, ping_config, timer); - - let conn = future::poll_fn(move |cx| { - match ponger.poll(cx) { - Poll::Ready(ping::Ponged::SizeUpdate(wnd)) => { - conn.set_target_window_size(wnd); - conn.set_initial_window_size(wnd)?; - } - Poll::Ready(ping::Ponged::KeepAliveTimedOut) => { - debug!("connection keep-alive timed out"); - return Poll::Ready(Ok(())); - } - Poll::Pending => {} - } + let (recorder, ponger) = ping::channel(pp, ping_config, timer); - Pin::new(&mut conn).poll(cx) - }); + let conn: Conn<_, B> = Conn::new(ponger, conn); (Either::Left(conn), recorder) } else { (Either::Right(conn), ping::disabled()) }; - let conn = conn.map_err(|e| debug!("connection error: {}", e)); + let conn: ConnMapErr = ConnMapErr { + conn, + is_terminated: false, + }; - exec.execute(conn_task(conn, conn_drop_rx, cancel_tx)); + exec.execute_h2_future(H2ClientFuture::Task { + task: ConnTask::new(conn, conn_drop_rx, cancel_tx), + }); Ok(ClientTask { ping, @@ -165,25 +157,215 @@ where h2_tx, req_rx, fut_ctx: None, + marker: PhantomData, }) } -async fn conn_task(conn: C, drop_rx: D, cancel_tx: oneshot::Sender) +pin_project! { + struct Conn + where + B: Body, + { + #[pin] + ponger: Ponger, + #[pin] + conn: Connection, SendBuf<::Data>>, + } +} + +impl Conn where - C: Future + Unpin, - D: Future + Unpin, + B: Body, + T: Read + Write + Unpin, +{ + fn new(ponger: Ponger, conn: Connection, SendBuf<::Data>>) -> Self { + Conn { ponger, conn } + } +} + +impl Future for Conn +where + B: Body, + T: Read + Write + Unpin, { - match future::select(conn, drop_rx).await { - Either::Left(_) => { - // ok or err, the `conn` has finished + type Output = Result<(), h2::Error>; + + fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { + let mut this = self.project(); + match this.ponger.poll(cx) { + Poll::Ready(ping::Ponged::SizeUpdate(wnd)) => { + this.conn.set_target_window_size(wnd); + this.conn.set_initial_window_size(wnd)?; + } + Poll::Ready(ping::Ponged::KeepAliveTimedOut) => { + debug!("connection keep-alive timed out"); + return Poll::Ready(Ok(())); + } + Poll::Pending => {} } - Either::Right(((), conn)) => { - // mpsc has been dropped, hopefully polling - // the connection some more should start shutdown - // and then close - trace!("send_request dropped, starting conn shutdown"); - drop(cancel_tx); - let _ = conn.await; + + Pin::new(&mut this.conn).poll(cx) + } +} + +pin_project! { + struct ConnMapErr + where + B: Body, + T: Read, + T: Write, + T: Unpin, + { + #[pin] + conn: Either, Connection, SendBuf<::Data>>>, + #[pin] + is_terminated: bool, + } +} + +impl Future for ConnMapErr +where + B: Body, + T: Read + Write + Unpin, +{ + type Output = Result<(), ()>; + + fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { + let mut this = self.project(); + + if *this.is_terminated { + return Poll::Pending; + } + let polled = this.conn.poll(cx); + if polled.is_ready() { + *this.is_terminated = true; + } + polled.map_err(|_e| { + debug!(error = %_e, "connection error"); + }) + } +} + +impl FusedFuture for ConnMapErr +where + B: Body, + T: Read + Write + Unpin, +{ + fn is_terminated(&self) -> bool { + self.is_terminated + } +} + +pin_project! { + pub struct ConnTask + where + B: Body, + T: Read, + T: Write, + T: Unpin, + { + #[pin] + drop_rx: StreamFuture>, + #[pin] + cancel_tx: Option>, + #[pin] + conn: ConnMapErr, + } +} + +impl ConnTask +where + B: Body, + T: Read + Write + Unpin, +{ + fn new( + conn: ConnMapErr, + drop_rx: StreamFuture>, + cancel_tx: oneshot::Sender, + ) -> Self { + Self { + drop_rx, + cancel_tx: Some(cancel_tx), + conn, + } + } +} + +impl Future for ConnTask +where + B: Body, + T: Read + Write + Unpin, +{ + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { + let mut this = self.project(); + + if !this.conn.is_terminated() { + if let Poll::Ready(_) = this.conn.poll_unpin(cx) { + // ok or err, the `conn` has finished. + return Poll::Ready(()); + }; + } + + if !this.drop_rx.is_terminated() { + if let Poll::Ready(_) = this.drop_rx.poll_unpin(cx) { + // mpsc has been dropped, hopefully polling + // the connection some more should start shutdown + // and then close. + trace!("send_request dropped, starting conn shutdown"); + drop(this.cancel_tx.take().expect("ConnTask Future polled twice")); + } + }; + + Poll::Pending + } +} + +pin_project! { + #[project = H2ClientFutureProject] + pub enum H2ClientFuture + where + B: http_body::Body, + B: 'static, + B::Error: Into>, + T: Read, + T: Write, + T: Unpin, + { + Pipe { + #[pin] + pipe: PipeMap, + }, + Send { + #[pin] + send_when: SendWhen, + }, + Task { + #[pin] + task: ConnTask, + }, + } +} + +impl Future for H2ClientFuture +where + B: http_body::Body + 'static, + B::Error: Into>, + T: Read + Write + Unpin, +{ + type Output = (); + + fn poll( + self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll { + let this = self.project(); + + match this { + H2ClientFutureProject::Pipe { pipe } => pipe.poll(cx), + H2ClientFutureProject::Send { send_when } => send_when.poll(cx), + H2ClientFutureProject::Task { task } => task.poll(cx), } } } @@ -202,43 +384,89 @@ where impl Unpin for FutCtx {} -pub(crate) struct ClientTask +pub(crate) struct ClientTask where B: Body, + E: Unpin, { ping: ping::Recorder, conn_drop_ref: ConnDropRef, conn_eof: ConnEof, - executor: Exec, + executor: E, h2_tx: SendRequest>, req_rx: ClientRx, fut_ctx: Option>, + marker: PhantomData, } -impl ClientTask +impl ClientTask where B: Body + 'static, + E: ExecutorClient + Unpin, + B::Error: Into>, + T: Read + Write + Unpin, { pub(crate) fn is_extended_connect_protocol_enabled(&self) -> bool { self.h2_tx.is_extended_connect_protocol_enabled() } } -impl ClientTask +pin_project! { + pub struct PipeMap + where + S: Body, + { + #[pin] + pipe: PipeToSendStream, + #[pin] + conn_drop_ref: Option>, + #[pin] + ping: Option, + } +} + +impl Future for PipeMap +where + B: http_body::Body, + B::Error: Into>, +{ + type Output = (); + + fn poll( + self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll { + let mut this = self.project(); + + match this.pipe.poll_unpin(cx) { + Poll::Ready(result) => { + if let Err(_e) = result { + debug!("client request body error: {}", _e); + } + drop(this.conn_drop_ref.take().expect("Future polled twice")); + drop(this.ping.take().expect("Future polled twice")); + return Poll::Ready(()); + } + Poll::Pending => (), + }; + Poll::Pending + } +} + +impl ClientTask where - B: Body + Send + 'static, + B: Body + 'static + Unpin, B::Data: Send, - B::Error: Into>, + E: ExecutorClient + Unpin, + B::Error: Into>, + T: Read + Write + Unpin, { fn poll_pipe(&mut self, f: FutCtx, cx: &mut task::Context<'_>) { let ping = self.ping.clone(); + let send_stream = if !f.is_connect { if !f.eos { - let mut pipe = Box::pin(PipeToSendStream::new(f.body, f.body_tx)).map(|res| { - if let Err(e) = res { - debug!("client request body error: {}", e); - } - }); + let mut pipe = PipeToSendStream::new(f.body, f.body_tx); // eagerly see if the body pipe is ready and // can thus skip allocating in the executor @@ -250,13 +478,15 @@ where // "open stream" alive while this body is // still sending... let ping = ping.clone(); - let pipe = pipe.map(move |x| { - drop(conn_drop_ref); - drop(ping); - x - }); + + let pipe = PipeMap { + pipe, + conn_drop_ref: Some(conn_drop_ref), + ping: Some(ping), + }; // Clear send task - self.executor.execute(pipe); + self.executor + .execute_h2_future(H2ClientFuture::Pipe { pipe: pipe }); } } } @@ -266,7 +496,49 @@ where Some(f.body_tx) }; - let fut = f.fut.map(move |result| match result { + self.executor.execute_h2_future(H2ClientFuture::Send { + send_when: SendWhen { + when: ResponseFutMap { + fut: f.fut, + ping: Some(ping), + send_stream: Some(send_stream), + }, + call_back: Some(f.cb), + }, + }); + } +} + +pin_project! { + pub(crate) struct ResponseFutMap + where + B: Body, + B: 'static, + { + #[pin] + fut: ResponseFuture, + #[pin] + ping: Option, + #[pin] + send_stream: Option::Data>>>>, + } +} + +impl Future for ResponseFutMap +where + B: Body + 'static, +{ + type Output = Result, (crate::Error, Option>)>; + + fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { + let mut this = self.project(); + + let result = ready!(this.fut.poll(cx)); + + let ping = this.ping.take().expect("Future polled twice"); + let send_stream = this.send_stream.take().expect("Future polled twice"); + + match result { Ok(res) => { // record that we got the response headers ping.record_non_data(); @@ -277,17 +549,17 @@ where warn!("h2 connect response with non-zero body not supported"); send_stream.send_reset(h2::Reason::INTERNAL_ERROR); - return Err(( + return Poll::Ready(Err(( crate::Error::new_h2(h2::Reason::INTERNAL_ERROR.into()), - None, - )); + None::>, + ))); } let (parts, recv_stream) = res.into_parts(); let mut res = Response::from_parts(parts, IncomingBody::empty()); let (pending, on_upgrade) = crate::upgrade::pending(); let io = H2Upgraded { - ping, + ping: ping, send_stream: unsafe { UpgradedSendStream::new(send_stream) }, recv_stream, buf: Bytes::new(), @@ -297,31 +569,32 @@ where pending.fulfill(upgraded); res.extensions_mut().insert(on_upgrade); - Ok(res) + Poll::Ready(Ok(res)) } else { let res = res.map(|stream| { let ping = ping.for_stream(&stream); IncomingBody::h2(stream, content_length.into(), ping) }); - Ok(res) + Poll::Ready(Ok(res)) } } Err(err) => { ping.ensure_not_timed_out().map_err(|e| (e, None))?; debug!("client response error: {}", err); - Err((crate::Error::new_h2(err), None)) + Poll::Ready(Err((crate::Error::new_h2(err), None::>))) } - }); - self.executor.execute(f.cb.send_when(fut)); + } } } -impl Future for ClientTask +impl Future for ClientTask where - B: Body + Send + 'static, + B: Body + 'static + Unpin, B::Data: Send, - B::Error: Into>, + B::Error: Into>, + E: ExecutorClient + 'static + Send + Sync + Unpin, + T: Read + Write + Unpin, { type Output = crate::Result; diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs index a1cbd25813..defc2512d6 100644 --- a/src/proto/h2/mod.rs +++ b/src/proto/h2/mod.rs @@ -1,14 +1,13 @@ +use crate::rt::{Read, ReadBufCursor, Write}; use bytes::{Buf, Bytes}; use h2::{Reason, RecvStream, SendStream}; use http::header::{HeaderName, CONNECTION, TE, TRAILER, TRANSFER_ENCODING, UPGRADE}; use http::HeaderMap; use pin_project_lite::pin_project; use std::error::Error as StdError; -use std::io::{self, Cursor, IoSlice}; +use std::io::{Cursor, IoSlice}; use std::mem; use std::task::Context; -use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; -use tracing::{debug, trace, warn}; use crate::body::Body; use crate::common::{task, Future, Pin, Poll}; @@ -29,21 +28,21 @@ cfg_server! { /// Default initial stream window size defined in HTTP2 spec. pub(crate) const SPEC_WINDOW_SIZE: u32 = 65_535; +// List of connection headers from: +// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Connection +// +// TE headers are allowed in HTTP/2 requests as long as the value is "trailers", so they're +// tested separately. +const CONNECTION_HEADERS: [HeaderName; 5] = [ + HeaderName::from_static("keep-alive"), + HeaderName::from_static("proxy-connection"), + TRAILER, + TRANSFER_ENCODING, + UPGRADE, +]; + fn strip_connection_headers(headers: &mut HeaderMap, is_request: bool) { - // List of connection headers from: - // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Connection - // - // TE headers are allowed in HTTP/2 requests as long as the value is "trailers", so they're - // tested separately. - let connection_headers = [ - HeaderName::from_lowercase(b"keep-alive").unwrap(), - HeaderName::from_lowercase(b"proxy-connection").unwrap(), - TRAILER, - TRANSFER_ENCODING, - UPGRADE, - ]; - - for header in connection_headers.iter() { + for header in &CONNECTION_HEADERS { if headers.remove(header).is_some() { warn!("Connection header illegal in HTTP/2: {}", header.as_str()); } @@ -85,7 +84,7 @@ fn strip_connection_headers(headers: &mut HeaderMap, is_request: bool) { // body adapters used by both Client and Server pin_project! { - struct PipeToSendStream + pub(crate) struct PipeToSendStream where S: Body, { @@ -129,9 +128,7 @@ where match ready!(me.body_tx.poll_capacity(cx)) { Some(Ok(0)) => {} Some(Ok(_)) => break, - Some(Err(e)) => { - return Poll::Ready(Err(crate::Error::new_body_write(e))) - } + Some(Err(e)) => return Poll::Ready(Err(crate::Error::new_body_write(e))), None => { // None means the stream is no longer in a // streaming state, we either finished it @@ -148,9 +145,7 @@ where .map_err(crate::Error::new_body_write)? { debug!("stream received RST_STREAM: {:?}", reason); - return Poll::Ready(Err(crate::Error::new_body_write(::h2::Error::from( - reason, - )))); + return Poll::Ready(Err(crate::Error::new_body_write(::h2::Error::from(reason)))); } match ready!(me.stream.as_mut().poll_frame(cx)) { @@ -275,15 +270,15 @@ where buf: Bytes, } -impl AsyncRead for H2Upgraded +impl Read for H2Upgraded where B: Buf, { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - read_buf: &mut ReadBuf<'_>, - ) -> Poll> { + mut read_buf: ReadBufCursor<'_>, + ) -> Poll> { if self.buf.is_empty() { self.buf = loop { match ready!(self.recv_stream.poll_data(cx)) { @@ -299,7 +294,7 @@ where return Poll::Ready(match e.reason() { Some(Reason::NO_ERROR) | Some(Reason::CANCEL) => Ok(()), Some(Reason::STREAM_CLOSED) => { - Err(io::Error::new(io::ErrorKind::BrokenPipe, e)) + Err(std::io::Error::new(std::io::ErrorKind::BrokenPipe, e)) } _ => Err(h2_to_io_error(e)), }) @@ -315,7 +310,7 @@ where } } -impl AsyncWrite for H2Upgraded +impl Write for H2Upgraded where B: Buf, { @@ -323,7 +318,7 @@ where mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], - ) -> Poll> { + ) -> Poll> { if buf.is_empty() { return Poll::Ready(Ok(0)); } @@ -348,7 +343,7 @@ where Poll::Ready(Err(h2_to_io_error( match ready!(self.send_stream.poll_reset(cx)) { Ok(Reason::NO_ERROR) | Ok(Reason::CANCEL) | Ok(Reason::STREAM_CLOSED) => { - return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())) + return Poll::Ready(Err(std::io::ErrorKind::BrokenPipe.into())) } Ok(reason) => reason.into(), Err(e) => e, @@ -356,25 +351,23 @@ where ))) } - fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } fn poll_shutdown( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll> { + ) -> Poll> { if self.send_stream.write(&[], true).is_ok() { - return Poll::Ready(Ok(())) + return Poll::Ready(Ok(())); } Poll::Ready(Err(h2_to_io_error( match ready!(self.send_stream.poll_reset(cx)) { - Ok(Reason::NO_ERROR) => { - return Poll::Ready(Ok(())) - } + Ok(Reason::NO_ERROR) => return Poll::Ready(Ok(())), Ok(Reason::CANCEL) | Ok(Reason::STREAM_CLOSED) => { - return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())) + return Poll::Ready(Err(std::io::ErrorKind::BrokenPipe.into())) } Ok(reason) => reason.into(), Err(e) => e, @@ -383,11 +376,11 @@ where } } -fn h2_to_io_error(e: h2::Error) -> io::Error { +fn h2_to_io_error(e: h2::Error) -> std::io::Error { if e.is_io() { e.into_io().unwrap() } else { - io::Error::new(io::ErrorKind::Other, e) + std::io::Error::new(std::io::ErrorKind::Other, e) } } @@ -414,7 +407,7 @@ where unsafe { self.as_inner_unchecked().poll_reset(cx) } } - fn write(&mut self, buf: &[u8], end_of_stream: bool) -> Result<(), io::Error> { + fn write(&mut self, buf: &[u8], end_of_stream: bool) -> Result<(), std::io::Error> { let send_buf = SendBuf::Cursor(Cursor::new(buf.into())); unsafe { self.as_inner_unchecked() diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs index 297e0c7876..61d24112f6 100644 --- a/src/proto/h2/ping.rs +++ b/src/proto/h2/ping.rs @@ -26,7 +26,6 @@ use std::task::{self, Poll}; use std::time::{Duration, Instant}; use h2::{Ping, PingPong}; -use tracing::{debug, trace}; use crate::common::time::Time; use crate::rt::Sleep; @@ -300,8 +299,8 @@ impl Ponger { } } } - Poll::Ready(Err(e)) => { - debug!("pong error: {}", e); + Poll::Ready(Err(_e)) => { + debug!("pong error: {}", _e); } Poll::Pending => { if let Some(ref mut ka) = self.keep_alive { @@ -332,8 +331,8 @@ impl Shared { self.ping_sent_at = Some(Instant::now()); trace!("sent ping"); } - Err(err) => { - debug!("error sending ping: {}", err); + Err(_err) => { + debug!("error sending ping: {}", _err); } } } diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs index a5bd75f92c..0d830ad017 100644 --- a/src/proto/h2/server.rs +++ b/src/proto/h2/server.rs @@ -3,17 +3,15 @@ use std::marker::Unpin; use std::time::Duration; +use crate::rt::{Read, Write}; use bytes::Bytes; use h2::server::{Connection, Handshake, SendResponse}; use h2::{Reason, RecvStream}; use http::{Method, Request}; use pin_project_lite::pin_project; -use tokio::io::{AsyncRead, AsyncWrite}; -use tracing::{debug, trace, warn}; use super::{ping, PipeToSendStream, SendBuf}; use crate::body::{Body, Incoming as IncomingBody}; -use crate::rt::bounds::Http2ConnExec; use crate::common::time::Time; use crate::common::{date, task, Future, Pin, Poll}; use crate::ext::Protocol; @@ -21,10 +19,11 @@ use crate::headers; use crate::proto::h2::ping::Recorder; use crate::proto::h2::{H2Upgraded, UpgradedSendStream}; use crate::proto::Dispatched; +use crate::rt::bounds::Http2ConnExec; use crate::service::HttpService; use crate::upgrade::{OnUpgrade, Pending, Upgraded}; -use crate::{Response}; +use crate::Response; // Our defaults are chosen for the "majority" case, which usually are not // resource constrained, and so the spec default of 64kb can be too limiting @@ -89,7 +88,7 @@ where { Handshaking { ping_config: ping::Config, - hs: Handshake>, + hs: Handshake, SendBuf>, }, Serving(Serving), Closed, @@ -100,13 +99,13 @@ where B: Body, { ping: Option<(ping::Recorder, ping::Ponger)>, - conn: Connection>, + conn: Connection, SendBuf>, closing: Option, } impl Server where - T: AsyncRead + AsyncWrite + Unpin, + T: Read + Write + Unpin, S: HttpService, S::Error: Into>, B: Body + 'static, @@ -132,7 +131,7 @@ where if config.enable_connect_protocol { builder.enable_connect_protocol(); } - let handshake = builder.handshake(io); + let handshake = builder.handshake(crate::common::io::compat(io)); let bdp = if config.adaptive_window { Some(config.initial_stream_window_size) @@ -182,7 +181,7 @@ where impl Future for Server where - T: AsyncRead + AsyncWrite + Unpin, + T: Read + Write + Unpin, S: HttpService, S::Error: Into>, B: Body + 'static, @@ -228,7 +227,7 @@ where impl Serving where - T: AsyncRead + AsyncWrite + Unpin, + T: Read + Write + Unpin, B: Body + 'static, { fn poll_server( @@ -508,8 +507,8 @@ where fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { self.poll2(cx).map(|res| { - if let Err(e) = res { - debug!("stream error: {}", e); + if let Err(_e) = res { + debug!("stream error: {}", _e); } }) } diff --git a/src/proto/mod.rs b/src/proto/mod.rs index f938bf532b..3628576dc1 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -50,7 +50,7 @@ pub(crate) enum BodyLength { Unknown, } -/// Status of when a Disaptcher future completes. +/// Status of when a Dispatcher future completes. pub(crate) enum Dispatched { /// Dispatcher completely shutdown connection. Shutdown, diff --git a/src/rt/bounds.rs b/src/rt/bounds.rs index 69115ef2ca..36f3683ead 100644 --- a/src/rt/bounds.rs +++ b/src/rt/bounds.rs @@ -6,14 +6,18 @@ #[cfg(all(feature = "server", feature = "http2"))] pub use self::h2::Http2ConnExec; -#[cfg(all(feature = "server", feature = "http2"))] +#[cfg(all(feature = "client", feature = "http2"))] +pub use self::h2_client::ExecutorClient; + +#[cfg(all(feature = "client", feature = "http2"))] #[cfg_attr(docsrs, doc(cfg(all(feature = "server", feature = "http2"))))] -mod h2 { - use crate::{common::exec::Exec, proto::h2::server::H2Stream, rt::Executor}; - use http_body::Body; - use std::future::Future; +mod h2_client { + use std::{error::Error, future::Future}; - /// An executor to spawn http2 connections. + use crate::rt::{Read, Write}; + use crate::{proto::h2::client::H2ClientFuture, rt::Executor}; + + /// An executor to spawn http2 futures for the client. /// /// This trait is implemented for any type that implements [`Executor`] /// trait for any future. @@ -21,28 +25,64 @@ mod h2 { /// This trait is sealed and cannot be implemented for types outside this crate. /// /// [`Executor`]: crate::rt::Executor - pub trait Http2ConnExec: sealed::Sealed<(F, B)> + Clone { + pub trait ExecutorClient: sealed_client::Sealed<(B, T)> + where + B: http_body::Body, + B::Error: Into>, + T: Read + Write + Unpin, + { #[doc(hidden)] - fn execute_h2stream(&mut self, fut: H2Stream); + fn execute_h2_future(&mut self, future: H2ClientFuture); } - impl Http2ConnExec for Exec + impl ExecutorClient for E where - H2Stream: Future + Send + 'static, - B: Body, + E: Executor>, + B: http_body::Body + 'static, + B::Error: Into>, + H2ClientFuture: Future, + T: Read + Write + Unpin, { - fn execute_h2stream(&mut self, fut: H2Stream) { - self.execute(fut) + fn execute_h2_future(&mut self, future: H2ClientFuture) { + self.execute(future) } } - impl sealed::Sealed<(F, B)> for Exec + impl sealed_client::Sealed<(B, T)> for E where - H2Stream: Future + Send + 'static, - B: Body, + E: Executor>, + B: http_body::Body + 'static, + B::Error: Into>, + H2ClientFuture: Future, + T: Read + Write + Unpin, { } + mod sealed_client { + pub trait Sealed {} + } +} + +#[cfg(all(feature = "server", feature = "http2"))] +#[cfg_attr(docsrs, doc(cfg(all(feature = "server", feature = "http2"))))] +mod h2 { + use crate::{proto::h2::server::H2Stream, rt::Executor}; + use http_body::Body; + use std::future::Future; + + /// An executor to spawn http2 connections. + /// + /// This trait is implemented for any type that implements [`Executor`] + /// trait for any future. + /// + /// This trait is sealed and cannot be implemented for types outside this crate. + /// + /// [`Executor`]: crate::rt::Executor + pub trait Http2ConnExec: sealed::Sealed<(F, B)> + Clone { + #[doc(hidden)] + fn execute_h2stream(&mut self, fut: H2Stream); + } + #[doc(hidden)] impl Http2ConnExec for E where diff --git a/src/rt/io.rs b/src/rt/io.rs new file mode 100644 index 0000000000..c39e1e098d --- /dev/null +++ b/src/rt/io.rs @@ -0,0 +1,334 @@ +use std::fmt; +use std::mem::MaybeUninit; +use std::pin::Pin; +use std::task::{Context, Poll}; + +// New IO traits? What?! Why, are you bonkers? +// +// I mean, yes, probably. But, here's the goals: +// +// 1. Supports poll-based IO operations. +// 2. Opt-in vectored IO. +// 3. Can use an optional buffer pool. +// 4. Able to add completion-based (uring) IO eventually. +// +// Frankly, the last point is the entire reason we're doing this. We want to +// have forwards-compatibility with an eventually stable io-uring runtime. We +// don't need that to work right away. But it must be possible to add in here +// without breaking hyper 1.0. +// +// While in here, if there's small tweaks to poll_read or poll_write that would +// allow even the "slow" path to be faster, such as if someone didn't remember +// to forward along an `is_completion` call. + +/// Reads bytes from a source. +/// +/// This trait is similar to `std::io::Read`, but supports asynchronous reads. +pub trait Read { + /// Attempts to read bytes into the `buf`. + /// + /// On success, returns `Poll::Ready(Ok(()))` and places data in the + /// unfilled portion of `buf`. If no data was read (`buf.remaining()` is + /// unchanged), it implies that EOF has been reached. + /// + /// If no data is available for reading, the method returns `Poll::Pending` + /// and arranges for the current task (via `cx.waker()`) to receive a + /// notification when the object becomes readable or is closed. + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: ReadBufCursor<'_>, + ) -> Poll>; +} + +/// Write bytes asynchronously. +/// +/// This trait is similar to `std::io::Write`, but for asynchronous writes. +pub trait Write { + /// Attempt to write bytes from `buf` into the destination. + /// + /// On success, returns `Poll::Ready(Ok(num_bytes_written)))`. If + /// successful, it must be guaranteed that `n <= buf.len()`. A return value + /// of `0` means that the underlying object is no longer able to accept + /// bytes, or that the provided buffer is empty. + /// + /// If the object is not ready for writing, the method returns + /// `Poll::Pending` and arranges for the current task (via `cx.waker()`) to + /// receive a notification when the object becomes writable or is closed. + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll>; + + /// Attempts to flush the object. + /// + /// On success, returns `Poll::Ready(Ok(()))`. + /// + /// If flushing cannot immediately complete, this method returns + /// `Poll::Pending` and arranges for the current task (via `cx.waker()`) to + /// receive a notification when the object can make progress. + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; + + /// Attempts to shut down this writer. + fn poll_shutdown( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>; + + /// Returns whether this writer has an efficient `poll_write_vectored` + /// implementation. + /// + /// The default implementation returns `false`. + fn is_write_vectored(&self) -> bool { + false + } + + /// Like `poll_write`, except that it writes from a slice of buffers. + fn poll_write_vectored( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[std::io::IoSlice<'_>], + ) -> Poll> { + let buf = bufs + .iter() + .find(|b| !b.is_empty()) + .map_or(&[][..], |b| &**b); + self.poll_write(cx, buf) + } +} + +/// A wrapper around a byte buffer that is incrementally filled and initialized. +/// +/// This type is a sort of "double cursor". It tracks three regions in the +/// buffer: a region at the beginning of the buffer that has been logically +/// filled with data, a region that has been initialized at some point but not +/// yet logically filled, and a region at the end that may be uninitialized. +/// The filled region is guaranteed to be a subset of the initialized region. +/// +/// In summary, the contents of the buffer can be visualized as: +/// +/// ```not_rust +/// [ capacity ] +/// [ filled | unfilled ] +/// [ initialized | uninitialized ] +/// ``` +/// +/// It is undefined behavior to de-initialize any bytes from the uninitialized +/// region, since it is merely unknown whether this region is uninitialized or +/// not, and if part of it turns out to be initialized, it must stay initialized. +pub struct ReadBuf<'a> { + raw: &'a mut [MaybeUninit], + filled: usize, + init: usize, +} + +/// The cursor part of a [`ReadBuf`]. +/// +/// This is created by calling `ReadBuf::unfilled()`. +#[derive(Debug)] +pub struct ReadBufCursor<'a> { + buf: &'a mut ReadBuf<'a>, +} + +impl<'data> ReadBuf<'data> { + #[inline] + #[cfg(test)] + pub(crate) fn new(raw: &'data mut [u8]) -> Self { + let len = raw.len(); + Self { + // SAFETY: We never de-init the bytes ourselves. + raw: unsafe { &mut *(raw as *mut [u8] as *mut [MaybeUninit]) }, + filled: 0, + init: len, + } + } + + /// Create a new `ReadBuf` with a slice of uninitialized bytes. + #[inline] + pub fn uninit(raw: &'data mut [MaybeUninit]) -> Self { + Self { + raw, + filled: 0, + init: 0, + } + } + + /// Get a slice of the buffer that has been filled in with bytes. + #[inline] + pub fn filled(&self) -> &[u8] { + // SAFETY: We only slice the filled part of the buffer, which is always valid + unsafe { &*(&self.raw[0..self.filled] as *const [MaybeUninit] as *const [u8]) } + } + + /// Get a cursor to the unfilled portion of the buffer. + #[inline] + pub fn unfilled<'cursor>(&'cursor mut self) -> ReadBufCursor<'cursor> { + ReadBufCursor { + // SAFETY: self.buf is never re-assigned, so its safe to narrow + // the lifetime. + buf: unsafe { + std::mem::transmute::<&'cursor mut ReadBuf<'data>, &'cursor mut ReadBuf<'cursor>>( + self, + ) + }, + } + } + + #[inline] + pub(crate) unsafe fn set_init(&mut self, n: usize) { + self.init = self.init.max(n); + } + + #[inline] + pub(crate) unsafe fn set_filled(&mut self, n: usize) { + self.filled = self.filled.max(n); + } + + #[inline] + pub(crate) fn len(&self) -> usize { + self.filled + } + + #[inline] + pub(crate) fn init_len(&self) -> usize { + self.init + } + + #[inline] + fn remaining(&self) -> usize { + self.capacity() - self.filled + } + + #[inline] + fn capacity(&self) -> usize { + self.raw.len() + } +} + +impl<'data> fmt::Debug for ReadBuf<'data> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ReadBuf") + .field("filled", &self.filled) + .field("init", &self.init) + .field("capacity", &self.capacity()) + .finish() + } +} + +impl<'data> ReadBufCursor<'data> { + /// Access the unfilled part of the buffer. + /// + /// # Safety + /// + /// The caller must not uninitialize any bytes that may have been + /// initialized before. + #[inline] + pub unsafe fn as_mut(&mut self) -> &mut [MaybeUninit] { + &mut self.buf.raw[self.buf.filled..] + } + + /// Advance the `filled` cursor by `n` bytes. + /// + /// # Safety + /// + /// The caller must take care that `n` more bytes have been initialized. + #[inline] + pub unsafe fn advance(&mut self, n: usize) { + self.buf.filled = self.buf.filled.checked_add(n).expect("overflow"); + self.buf.init = self.buf.filled.max(self.buf.init); + } + + #[inline] + pub(crate) fn remaining(&self) -> usize { + self.buf.remaining() + } + + #[inline] + pub(crate) fn put_slice(&mut self, buf: &[u8]) { + assert!( + self.buf.remaining() >= buf.len(), + "buf.len() must fit in remaining()" + ); + + let amt = buf.len(); + // Cannot overflow, asserted above + let end = self.buf.filled + amt; + + // Safety: the length is asserted above + unsafe { + self.buf.raw[self.buf.filled..end] + .as_mut_ptr() + .cast::() + .copy_from_nonoverlapping(buf.as_ptr(), amt); + } + + if self.buf.init < end { + self.buf.init = end; + } + self.buf.filled = end; + } +} + +macro_rules! deref_async_read { + () => { + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: ReadBufCursor<'_>, + ) -> Poll> { + Pin::new(&mut **self).poll_read(cx, buf) + } + }; +} + +impl Read for Box { + deref_async_read!(); +} + +impl Read for &mut T { + deref_async_read!(); +} + +macro_rules! deref_async_write { + () => { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + Pin::new(&mut **self).poll_write(cx, buf) + } + + fn poll_write_vectored( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[std::io::IoSlice<'_>], + ) -> Poll> { + Pin::new(&mut **self).poll_write_vectored(cx, bufs) + } + + fn is_write_vectored(&self) -> bool { + (**self).is_write_vectored() + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut **self).poll_flush(cx) + } + + fn poll_shutdown( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + Pin::new(&mut **self).poll_shutdown(cx) + } + }; +} + +impl Write for Box { + deref_async_write!(); +} + +impl Write for &mut T { + deref_async_write!(); +} diff --git a/src/rt/mod.rs b/src/rt/mod.rs index cf6473df1f..de67c3fc89 100644 --- a/src/rt/mod.rs +++ b/src/rt/mod.rs @@ -1,27 +1,29 @@ //! Runtime components //! -//! By default, hyper includes the [tokio](https://tokio.rs) runtime. +//! The traits and types within this module are used to allow plugging in +//! runtime types. These include: //! -//! If the `runtime` feature is disabled, the types in this module can be used -//! to plug in other runtimes. +//! - Executors +//! - Timers +//! - IO transports pub mod bounds; +mod io; +mod timer; -use std::{ - future::Future, - pin::Pin, - time::{Duration, Instant}, -}; +pub use self::io::{Read, ReadBuf, ReadBufCursor, Write}; +pub use self::timer::{Sleep, Timer}; /// An executor of futures. /// -/// This trait should be implemented for any future. +/// This trait allows Hyper to abstract over async runtimes. Implement this trait for your own type. /// /// # Example /// /// ``` /// # use hyper::rt::Executor; /// # use std::future::Future; +/// #[derive(Clone)] /// struct TokioExecutor; /// /// impl Executor for TokioExecutor @@ -38,21 +40,3 @@ pub trait Executor { /// Place the future into the executor to be run. fn execute(&self, fut: Fut); } - -/// A timer which provides timer-like functions. -pub trait Timer { - /// Return a future that resolves in `duration` time. - fn sleep(&self, duration: Duration) -> Pin>; - - /// Return a future that resolves at `deadline`. - fn sleep_until(&self, deadline: Instant) -> Pin>; - - /// Reset a future to resolve at `new_deadline` instead. - fn reset(&self, sleep: &mut Pin>, new_deadline: Instant) { - *sleep = self.sleep_until(new_deadline); - } -} - -/// A future returned by a `Timer`. -pub trait Sleep: Send + Sync + Future {} -impl Sleep for T where T: Send + Sync + Future {} diff --git a/src/rt/timer.rs b/src/rt/timer.rs new file mode 100644 index 0000000000..c79f3bfc15 --- /dev/null +++ b/src/rt/timer.rs @@ -0,0 +1,128 @@ +//! Provides a timer trait with timer-like functions +//! +//! Example using tokio timer: +//! ```rust +//! use std::{ +//! future::Future, +//! pin::Pin, +//! task::{Context, Poll}, +//! time::{Duration, Instant}, +//! }; +//! +//! use pin_project_lite::pin_project; +//! use hyper::rt::{Timer, Sleep}; +//! +//! #[derive(Clone, Debug)] +//! pub struct TokioTimer; +//! +//! impl Timer for TokioTimer { +//! fn sleep(&self, duration: Duration) -> Pin> { +//! Box::pin(TokioSleep { +//! inner: tokio::time::sleep(duration), +//! }) +//! } +//! +//! fn sleep_until(&self, deadline: Instant) -> Pin> { +//! Box::pin(TokioSleep { +//! inner: tokio::time::sleep_until(deadline.into()), +//! }) +//! } +//! +//! fn reset(&self, sleep: &mut Pin>, new_deadline: Instant) { +//! if let Some(sleep) = sleep.as_mut().downcast_mut_pin::() { +//! sleep.reset(new_deadline.into()) +//! } +//! } +//! } +//! +//! pin_project! { +//! pub(crate) struct TokioSleep { +//! #[pin] +//! pub(crate) inner: tokio::time::Sleep, +//! } +//! } +//! +//! impl Future for TokioSleep { +//! type Output = (); +//! +//! fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { +//! self.project().inner.poll(cx) +//! } +//! } +//! +//! impl Sleep for TokioSleep {} +//! +//! impl TokioSleep { +//! pub fn reset(self: Pin<&mut Self>, deadline: Instant) { +//! self.project().inner.as_mut().reset(deadline.into()); +//! } +//! } +//! ```` + +use std::{ + any::TypeId, + future::Future, + pin::Pin, + time::{Duration, Instant}, +}; + +/// A timer which provides timer-like functions. +pub trait Timer { + /// Return a future that resolves in `duration` time. + fn sleep(&self, duration: Duration) -> Pin>; + + /// Return a future that resolves at `deadline`. + fn sleep_until(&self, deadline: Instant) -> Pin>; + + /// Reset a future to resolve at `new_deadline` instead. + fn reset(&self, sleep: &mut Pin>, new_deadline: Instant) { + *sleep = self.sleep_until(new_deadline); + } +} + +/// A future returned by a `Timer`. +pub trait Sleep: Send + Sync + Future { + #[doc(hidden)] + /// This method is private and can not be implemented by downstream crate + fn __type_id(&self, _: private::Sealed) -> TypeId + where + Self: 'static, + { + TypeId::of::() + } +} +impl Sleep for T where T: Send + Sync + Future {} + +impl dyn Sleep { + //! This is a re-implementation of downcast methods from std::any::Any + + /// Check whether the type is the same as `T` + pub fn is(&self) -> bool + where + T: Sleep + 'static, + { + self.__type_id(private::Sealed {}) == TypeId::of::() + } + + /// Downcast a pinned &mut Sleep object to its original type + pub fn downcast_mut_pin(self: Pin<&mut Self>) -> Option> + where + T: Sleep + 'static, + { + if self.is::() { + unsafe { + let inner = Pin::into_inner_unchecked(self); + Some(Pin::new_unchecked( + &mut *(&mut *inner as *mut dyn Sleep as *mut T), + )) + } + } else { + None + } + } +} + +mod private { + #![allow(missing_debug_implementations)] + pub struct Sealed {} +} diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs index 9ee2fe159f..50629cf4fe 100644 --- a/src/server/conn/http1.rs +++ b/src/server/conn/http1.rs @@ -5,23 +5,29 @@ use std::fmt; use std::sync::Arc; use std::time::Duration; +use crate::rt::{Read, Write}; use bytes::Bytes; -use tokio::io::{AsyncRead, AsyncWrite}; use crate::body::{Body, Incoming as IncomingBody}; use crate::common::{task, Future, Pin, Poll, Unpin}; -use crate::{common::time::Time, rt::Timer}; use crate::proto; use crate::service::HttpService; +use crate::{common::time::Time, rt::Timer}; -type Http1Dispatcher = - proto::h1::Dispatcher, B, T, proto::ServerTransaction>; - +type Http1Dispatcher = proto::h1::Dispatcher< + proto::h1::dispatch::Server, + B, + T, + proto::ServerTransaction, +>; pin_project_lite::pin_project! { - /// A future binding an http1 connection with a Service. + /// A [`Future`](core::future::Future) representing an HTTP/1 connection, bound to a + /// [`Service`](crate::service::Service), returned from + /// [`Builder::serve_connection`](struct.Builder.html#method.serve_connection). /// - /// Polling this future will drive HTTP forward. + /// To drive HTTP on this connection this future **must be polled**, typically with + /// `.await`. If it isn't polled, no progress will be made on this connection. #[must_use = "futures do nothing unless polled"] pub struct Connection where @@ -31,7 +37,6 @@ pin_project_lite::pin_project! { } } - /// A configuration builder for HTTP/1 server connections. #[derive(Clone, Debug)] pub struct Builder { @@ -83,7 +88,7 @@ impl Connection where S: HttpService, S::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin, + I: Read + Write + Unpin, B: Body + 'static, B::Error: Into>, { @@ -151,9 +156,7 @@ where let mut zelf = Some(self); futures_util::future::poll_fn(move |cx| { ready!(zelf.as_mut().unwrap().conn.poll_without_shutdown(cx))?; - Poll::Ready( - Ok(zelf.take().unwrap().into_parts()) - ) + Poll::Ready(Ok(zelf.take().unwrap().into_parts())) }) } @@ -168,12 +171,11 @@ where } } - impl Future for Connection where S: HttpService, S::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin + 'static, + I: Read + Write + Unpin + 'static, B: Body + 'static, B::Error: Into>, { @@ -194,7 +196,7 @@ where }; return Poll::Ready(Ok(())); } - Err(e) => Poll::Ready(Err(e)), + Err(e) => Poll::Ready(Err(e)), } } } @@ -334,10 +336,10 @@ impl Builder { /// # use hyper::{body::Incoming, Request, Response}; /// # use hyper::service::Service; /// # use hyper::server::conn::http1::Builder; - /// # use tokio::io::{AsyncRead, AsyncWrite}; + /// # use hyper::rt::{Read, Write}; /// # async fn run(some_io: I, some_service: S) /// # where - /// # I: AsyncRead + AsyncWrite + Unpin + Send + 'static, + /// # I: Read + Write + Unpin + Send + 'static, /// # S: Service, Response=hyper::Response> + Send + 'static, /// # S::Error: Into>, /// # S::Future: Send, @@ -357,7 +359,7 @@ impl Builder { S::Error: Into>, S::ResBody: 'static, ::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin, + I: Read + Write + Unpin, { let mut conn = proto::Conn::new(io); conn.set_timer(self.timer.clone()); @@ -389,9 +391,7 @@ impl Builder { } let sd = proto::h1::dispatch::Server::new(service); let proto = proto::h1::Dispatcher::new(sd, conn); - Connection { - conn: proto, - } + Connection { conn: proto } } } @@ -416,7 +416,7 @@ mod upgrades { where S: HttpService, S::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin, + I: Read + Write + Unpin, B: Body + 'static, B::Error: Into>, { @@ -433,7 +433,7 @@ mod upgrades { where S: HttpService, S::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin + Send + 'static, + I: Read + Write + Unpin + Send + 'static, B: Body + 'static, B::Error: Into>, { diff --git a/src/server/conn/http2.rs b/src/server/conn/http2.rs index 45e0760956..1cc351134b 100644 --- a/src/server/conn/http2.rs +++ b/src/server/conn/http2.rs @@ -5,8 +5,8 @@ use std::fmt; use std::sync::Arc; use std::time::Duration; +use crate::rt::{Read, Write}; use pin_project_lite::pin_project; -use tokio::io::{AsyncRead, AsyncWrite}; use crate::body::{Body, Incoming as IncomingBody}; use crate::common::{task, Future, Pin, Poll, Unpin}; @@ -16,9 +16,12 @@ use crate::service::HttpService; use crate::{common::time::Time, rt::Timer}; pin_project! { - /// A future binding an HTTP/2 connection with a Service. + /// A [`Future`](core::future::Future) representing an HTTP/2 connection, bound to a + /// [`Service`](crate::service::Service), returned from + /// [`Builder::serve_connection`](struct.Builder.html#method.serve_connection). /// - /// Polling this future will drive HTTP forward. + /// To drive HTTP on this connection this future **must be polled**, typically with + /// `.await`. If it isn't polled, no progress will be made on this connection. #[must_use = "futures do nothing unless polled"] pub struct Connection where @@ -51,7 +54,7 @@ impl Connection where S: HttpService, S::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin, + I: Read + Write + Unpin, B: Body + 'static, B::Error: Into>, E: Http2ConnExec, @@ -75,7 +78,7 @@ impl Future for Connection where S: HttpService, S::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin + 'static, + I: Read + Write + Unpin + 'static, B: Body + 'static, B::Error: Into>, E: Http2ConnExec, @@ -118,7 +121,7 @@ impl Builder { /// /// If not set, hyper will use a default. /// - /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE + /// [spec]: https://httpwg.org/specs/rfc9113.html#SETTINGS_INITIAL_WINDOW_SIZE pub fn initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { if let Some(sz) = sz.into() { self.h2_builder.adaptive_window = false; @@ -132,10 +135,7 @@ impl Builder { /// Passing `None` will do nothing. /// /// If not set, hyper will use a default. - pub fn initial_connection_window_size( - &mut self, - sz: impl Into>, - ) -> &mut Self { + pub fn initial_connection_window_size(&mut self, sz: impl Into>) -> &mut Self { if let Some(sz) = sz.into() { self.h2_builder.adaptive_window = false; self.h2_builder.initial_conn_window_size = sz; @@ -176,7 +176,7 @@ impl Builder { /// /// Default is no limit (`std::u32::MAX`). Passing `None` will do nothing. /// - /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_MAX_CONCURRENT_STREAMS + /// [spec]: https://httpwg.org/specs/rfc9113.html#SETTINGS_MAX_CONCURRENT_STREAMS pub fn max_concurrent_streams(&mut self, max: impl Into>) -> &mut Self { self.h2_builder.max_concurrent_streams = max.into(); self @@ -191,10 +191,7 @@ impl Builder { /// /// # Cargo Feature /// - pub fn keep_alive_interval( - &mut self, - interval: impl Into>, - ) -> &mut Self { + pub fn keep_alive_interval(&mut self, interval: impl Into>) -> &mut Self { self.h2_builder.keep_alive_interval = interval.into(); self } @@ -261,7 +258,7 @@ impl Builder { S::Error: Into>, Bd: Body + 'static, Bd::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin, + I: Read + Write + Unpin, E: Http2ConnExec, { let proto = proto::h2::Server::new( diff --git a/src/server/conn/mod.rs b/src/server/conn/mod.rs index 2e7157c5b8..54b309e88e 100644 --- a/src/server/conn/mod.rs +++ b/src/server/conn/mod.rs @@ -5,48 +5,16 @@ //! are not handled at this level. This module provides the building blocks to //! customize those things externally. //! -//! This module is split by HTTP version. Both work similarly, but do have -//! specific options on each builder. -//! -//! ## Example -//! -//! A simple example that prepares an HTTP/1 connection over a Tokio TCP stream. -//! -//! ```no_run -//! # #[cfg(feature = "http1")] -//! # mod rt { -//! use http::{Request, Response, StatusCode}; -//! use http_body_util::Full; -//! use hyper::{server::conn::http1, service::service_fn, body, body::Bytes}; -//! use std::{net::SocketAddr, convert::Infallible}; -//! use tokio::net::TcpListener; -//! -//! #[tokio::main] -//! async fn main() -> Result<(), Box> { -//! let addr: SocketAddr = ([127, 0, 0, 1], 8080).into(); -//! -//! let mut tcp_listener = TcpListener::bind(addr).await?; -//! loop { -//! let (tcp_stream, _) = tcp_listener.accept().await?; -//! tokio::task::spawn(async move { -//! if let Err(http_err) = http1::Builder::new() -//! .keep_alive(true) -//! .serve_connection(tcp_stream, service_fn(hello)) -//! .await { -//! eprintln!("Error while serving HTTP connection: {}", http_err); -//! } -//! }); -//! } -//! } -//! -//! async fn hello(_req: Request) -> Result>, Infallible> { -//! Ok(Response::new(Full::new(Bytes::from("Hello World!")))) -//! } -//! # } -//! ``` +//! This module is split by HTTP version, providing a connection builder for +//! each. They work similarly, but they each have specific options. +//! +//! If your server needs to support both versions, an auto-connection builder is +//! provided in the [`hyper-util`](https://github.com/hyperium/hyper-util/tree/master) +//! crate. This builder wraps the HTTP/1 and HTTP/2 connection builders from this +//! module, allowing you to set configuration for both. The builder will then check +//! the version of the incoming connection and serve it accordingly. #[cfg(feature = "http1")] pub mod http1; #[cfg(feature = "http2")] pub mod http2; - diff --git a/src/server/mod.rs b/src/server/mod.rs index 46d6bf51a7..afc704cd98 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -7,4 +7,3 @@ //! concerns itself with. After you have a connection, you can handle HTTP over //! it with the types in the [`conn`](conn) module. pub mod conn; - diff --git a/src/service/service.rs b/src/service/service.rs index b5de9bec20..95024bee44 100644 --- a/src/service/service.rs +++ b/src/service/service.rs @@ -28,5 +28,13 @@ pub trait Service { type Future: Future>; /// Process the request and return the response asynchronously. - fn call(&mut self, req: Request) -> Self::Future; + /// call takes a &self instead of a mut &self because: + /// - It prepares the way for async fn, + /// since then the future only borrows &self, and thus a Service can concurrently handle + /// multiple outstanding requests at once. + /// - It's clearer that Services can likely be cloned + /// - To share state across clones you generally need Arc> + /// that means you're not really using the &mut self and could do with a &self + /// To see the discussion on this see: + fn call(&self, req: Request) -> Self::Future; } diff --git a/src/service/util.rs b/src/service/util.rs index 1d8587fe82..710ba53543 100644 --- a/src/service/util.rs +++ b/src/service/util.rs @@ -29,7 +29,7 @@ use crate::{Request, Response}; /// ``` pub fn service_fn(f: F) -> ServiceFn where - F: FnMut(Request) -> S, + F: Fn(Request) -> S, S: Future, { ServiceFn { @@ -46,7 +46,7 @@ pub struct ServiceFn { impl Service> for ServiceFn where - F: FnMut(Request) -> Ret, + F: Fn(Request) -> Ret, ReqBody: Body, Ret: Future, E>>, E: Into>, @@ -56,7 +56,7 @@ where type Error = E; type Future = Ret; - fn call(&mut self, req: Request) -> Self::Future { + fn call(&self, req: Request) -> Self::Future { (self.f)(req) } } diff --git a/src/trace.rs b/src/trace.rs new file mode 100644 index 0000000000..88f9a243a0 --- /dev/null +++ b/src/trace.rs @@ -0,0 +1,128 @@ +// For completeness, wrappers around all of tracing's public logging and span macros are provided, +// even if they are not used at the present time. +#![allow(unused_macros)] + +#[cfg(all(not(hyper_unstable_tracing), feature = "tracing"))] +compile_error!( + "\ + The `tracing` feature is unstable, and requires the \ + `RUSTFLAGS='--cfg hyper_unstable_tracing'` environment variable to be set.\ +" +); + +macro_rules! debug { + ($($arg:tt)+) => { + #[cfg(feature = "tracing")] + { + tracing::debug!($($arg)+); + } + } +} + +macro_rules! debug_span { + ($($arg:tt)*) => { + { + #[cfg(feature = "tracing")] + { + let _span = tracing::debug_span!($($arg)+); + _span.entered() + } + } + } +} + +macro_rules! error { + ($($arg:tt)*) => { + #[cfg(feature = "tracing")] + { + tracing::error!($($arg)+); + } + } +} + +macro_rules! error_span { + ($($arg:tt)*) => { + { + #[cfg(feature = "tracing")] + { + let _span = tracing::error_span!($($arg)+); + _span.entered() + } + } + } +} + +macro_rules! info { + ($($arg:tt)*) => { + #[cfg(feature = "tracing")] + { + tracing::info!($($arg)+); + } + } +} + +macro_rules! info_span { + ($($arg:tt)*) => { + { + #[cfg(feature = "tracing")] + { + let _span = tracing::info_span!($($arg)+); + _span.entered() + } + } + } +} + +macro_rules! trace { + ($($arg:tt)*) => { + #[cfg(feature = "tracing")] + { + tracing::trace!($($arg)+); + } + } +} + +macro_rules! trace_span { + ($($arg:tt)*) => { + { + #[cfg(feature = "tracing")] + { + let _span = tracing::trace_span!($($arg)+); + _span.entered() + } + } + } +} + +macro_rules! span { + ($($arg:tt)*) => { + { + #[cfg(feature = "tracing")] + { + let _span = tracing::span!($($arg)+); + _span.entered() + } + } + } +} + +macro_rules! warn { + ($($arg:tt)*) => { + #[cfg(feature = "tracing")] + { + tracing::warn!($($arg)+); + } + } +} + +macro_rules! warn_span { + ($($arg:tt)*) => { + { + #[cfg(feature = "tracing")] + { + let _span = tracing::warn_span!($($arg)+); + _span.entered() + } + } + } +} diff --git a/src/upgrade.rs b/src/upgrade.rs index 1c7b5b01cd..03a56af6c5 100644 --- a/src/upgrade.rs +++ b/src/upgrade.rs @@ -45,11 +45,9 @@ use std::fmt; use std::io; use std::marker::Unpin; +use crate::rt::{Read, ReadBufCursor, Write}; use bytes::Bytes; -use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use tokio::sync::oneshot; -#[cfg(any(feature = "http1", feature = "http2"))] -use tracing::trace; use crate::common::io::Rewind; use crate::common::{task, Future, Pin, Poll}; @@ -122,7 +120,7 @@ impl Upgraded { #[cfg(any(feature = "http1", feature = "http2", test))] pub(super) fn new(io: T, read_buf: Bytes) -> Self where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + T: Read + Write + Unpin + Send + 'static, { Upgraded { io: Rewind::new_buffered(Box::new(io), read_buf), @@ -133,7 +131,7 @@ impl Upgraded { /// /// On success, returns the downcasted parts. On error, returns the /// `Upgraded` back. - pub fn downcast(self) -> Result, Self> { + pub fn downcast(self) -> Result, Self> { let (io, buf) = self.io.into_inner(); match io.__hyper_downcast() { Ok(t) => Ok(Parts { @@ -148,17 +146,17 @@ impl Upgraded { } } -impl AsyncRead for Upgraded { +impl Read for Upgraded { fn poll_read( mut self: Pin<&mut Self>, cx: &mut task::Context<'_>, - buf: &mut ReadBuf<'_>, + buf: ReadBufCursor<'_>, ) -> Poll> { Pin::new(&mut self.io).poll_read(cx, buf) } } -impl AsyncWrite for Upgraded { +impl Write for Upgraded { fn poll_write( mut self: Pin<&mut Self>, cx: &mut task::Context<'_>, @@ -241,6 +239,7 @@ impl Pending { /// Don't fulfill the pending Upgrade, but instead signal that /// upgrades are handled manually. pub(super) fn manual(self) { + #[cfg(any(feature = "http1", feature = "http2"))] trace!("pending upgrade handled manually"); let _ = self.tx.send(Err(crate::Error::new_user_manual_upgrade())); } @@ -265,13 +264,13 @@ impl StdError for UpgradeExpected {} // ===== impl Io ===== -pub(super) trait Io: AsyncRead + AsyncWrite + Unpin + 'static { +pub(super) trait Io: Read + Write + Unpin + 'static { fn __hyper_type_id(&self) -> TypeId { TypeId::of::() } } -impl Io for T {} +impl Io for T {} impl dyn Io + Send { fn __hyper_is(&self) -> bool { @@ -340,7 +339,9 @@ mod tests { fn upgraded_downcast() { let upgraded = Upgraded::new(Mock, Bytes::new()); - let upgraded = upgraded.downcast::>>().unwrap_err(); + let upgraded = upgraded + .downcast::>>>() + .unwrap_err(); upgraded.downcast::().unwrap(); } @@ -348,17 +349,17 @@ mod tests { // TODO: replace with tokio_test::io when it can test write_buf struct Mock; - impl AsyncRead for Mock { + impl Read for Mock { fn poll_read( self: Pin<&mut Self>, _cx: &mut task::Context<'_>, - _buf: &mut ReadBuf<'_>, + _buf: ReadBufCursor<'_>, ) -> Poll> { unreachable!("Mock::poll_read") } } - impl AsyncWrite for Mock { + impl Write for Mock { fn poll_write( self: Pin<&mut Self>, _: &mut task::Context<'_>, diff --git a/tests/client.rs b/tests/client.rs index 739f223b16..b306016eea 100644 --- a/tests/client.rs +++ b/tests/client.rs @@ -1,11 +1,9 @@ #![deny(warnings)] #![warn(rust_2018_idioms)] -#[macro_use] -extern crate matches; - use std::convert::Infallible; use std::fmt; +use std::future::Future; use std::io::{Read, Write}; use std::net::{SocketAddr, TcpListener}; use std::pin::Pin; @@ -20,8 +18,8 @@ use hyper::{Method, Request, StatusCode, Uri, Version}; use bytes::Bytes; use futures_channel::oneshot; -use futures_core::{Future, TryFuture}; -use futures_util::future::{self, FutureExt, TryFutureExt}; +use futures_util::future::{self, FutureExt, TryFuture, TryFutureExt}; +use support::TokioIo; use tokio::net::TcpStream; mod support; @@ -36,8 +34,8 @@ where b.collect().await.map(|c| c.to_bytes()) } -fn tcp_connect(addr: &SocketAddr) -> impl Future> { - TcpStream::connect(*addr) +async fn tcp_connect(addr: &SocketAddr) -> std::io::Result> { + TcpStream::connect(*addr).await.map(TokioIo::new) } struct HttpInfo { @@ -312,7 +310,7 @@ macro_rules! test { req.headers_mut().append("Host", HeaderValue::from_str(&host).unwrap()); } - let (mut sender, conn) = builder.handshake(stream).await?; + let (mut sender, conn) = builder.handshake(TokioIo::new(stream)).await?; tokio::task::spawn(async move { if let Err(err) = conn.await { @@ -1337,9 +1335,9 @@ mod conn { use bytes::{Buf, Bytes}; use futures_channel::{mpsc, oneshot}; use futures_util::future::{self, poll_fn, FutureExt, TryFutureExt}; - use http_body_util::{BodyExt, Empty, StreamBody}; + use http_body_util::{BodyExt, Empty, Full, StreamBody}; use hyper::rt::Timer; - use tokio::io::{AsyncRead, AsyncReadExt as _, AsyncWrite, AsyncWriteExt as _, ReadBuf}; + use tokio::io::{AsyncReadExt as _, AsyncWriteExt as _}; use tokio::net::{TcpListener as TkTcpListener, TcpStream}; use hyper::body::{Body, Frame}; @@ -1349,7 +1347,7 @@ mod conn { use super::{concat, s, support, tcp_connect, FutureHyperExt}; - use support::{TokioExecutor, TokioTimer}; + use support::{TokioExecutor, TokioIo, TokioTimer}; fn setup_logger() { let _ = pretty_env_logger::try_init(); @@ -1773,18 +1771,19 @@ mod conn { } let parts = conn.into_parts(); - let mut io = parts.io; + let io = parts.io; let buf = parts.read_buf; assert_eq!(buf, b"foobar=ready"[..]); assert!(!io.shutdown_called, "upgrade shouldn't shutdown AsyncWrite"); rt.block_on(poll_fn(|ctx| { let ready = client.poll_ready(ctx); - assert_matches!(ready, Poll::Ready(Err(_))); + assert!(matches!(ready, Poll::Ready(Err(_)))); ready })) .unwrap_err(); + let mut io = io.tcp.inner(); let mut vec = vec![]; rt.block_on(io.write_all(b"foo=bar")).unwrap(); rt.block_on(io.read_to_end(&mut vec)).unwrap(); @@ -1861,7 +1860,7 @@ mod conn { } let parts = conn.into_parts(); - let mut io = parts.io; + let io = parts.io; let buf = parts.read_buf; assert_eq!(buf, b"foobar=ready"[..]); @@ -1869,11 +1868,12 @@ mod conn { rt.block_on(poll_fn(|ctx| { let ready = client.poll_ready(ctx); - assert_matches!(ready, Poll::Ready(Err(_))); + assert!(matches!(ready, Poll::Ready(Err(_)))); ready })) .unwrap_err(); + let mut io = io.tcp.inner(); let mut vec = vec![]; rt.block_on(io.write_all(b"foo=bar")).unwrap(); rt.block_on(io.read_to_end(&mut vec)).unwrap(); @@ -1895,6 +1895,7 @@ mod conn { tokio::select! { res = listener.accept() => { let (stream, _) = res.unwrap(); + let stream = TokioIo::new(stream); let service = service_fn(|_:Request| future::ok::<_, hyper::Error>(Response::new(Empty::::new()))); @@ -2077,7 +2078,7 @@ mod conn { // Spawn an HTTP2 server that reads the whole body and responds tokio::spawn(async move { - let sock = listener.accept().await.unwrap().0; + let sock = TokioIo::new(listener.accept().await.unwrap().0); hyper::server::conn::http2::Builder::new(TokioExecutor) .timer(TokioTimer) .serve_connection( @@ -2122,6 +2123,62 @@ mod conn { .expect("client should be open"); } + #[tokio::test] + async fn http2_responds_before_consuming_request_body() { + // Test that a early-response from server works correctly (request body wasn't fully consumed). + // https://github.com/hyperium/hyper/issues/2872 + use hyper::service::service_fn; + + let _ = pretty_env_logger::try_init(); + + let (listener, addr) = setup_tk_test_server().await; + + // Spawn an HTTP2 server that responds before reading the whole request body. + // It's normal case to decline the request due to headers or size of the body. + tokio::spawn(async move { + let sock = TokioIo::new(listener.accept().await.unwrap().0); + hyper::server::conn::http2::Builder::new(TokioExecutor) + .timer(TokioTimer) + .serve_connection( + sock, + service_fn(|_req| async move { + Ok::<_, hyper::Error>(Response::new(Full::new(Bytes::from( + "No bread for you!", + )))) + }), + ) + .await + .expect("serve_connection"); + }); + + let io = tcp_connect(&addr).await.expect("tcp connect"); + let (mut client, conn) = conn::http2::Builder::new(TokioExecutor) + .timer(TokioTimer) + .handshake(io) + .await + .expect("http handshake"); + + tokio::spawn(async move { + conn.await.expect("client conn shouldn't error"); + }); + + // Use a channel to keep request stream open + let (_tx, recv) = mpsc::channel::, Box>>(0); + let req = Request::post("/a").body(StreamBody::new(recv)).unwrap(); + let resp = client.send_request(req).await.expect("send_request"); + assert!(resp.status().is_success()); + + let mut body = String::new(); + concat(resp.into_body()) + .await + .unwrap() + .reader() + .read_to_string(&mut body) + .unwrap(); + + assert_eq!(&body, "No bread for you!"); + } + #[tokio::test] async fn h2_connect() { let (listener, addr) = setup_tk_test_server().await; @@ -2166,7 +2223,7 @@ mod conn { let res = client.send_request(req).await.expect("send_request"); assert_eq!(res.status(), StatusCode::OK); - let mut upgraded = hyper::upgrade::on(res).await.unwrap(); + let mut upgraded = TokioIo::new(hyper::upgrade::on(res).await.unwrap()); let mut vec = vec![]; upgraded.read_to_end(&mut vec).await.unwrap(); @@ -2258,13 +2315,9 @@ mod conn { let error = client.send_request(req).await.unwrap_err(); assert!(error.is_user()); - assert_eq!( - error.to_string(), - "dispatch task is gone: user code panicked" - ); } - async fn drain_til_eof(mut sock: T) -> io::Result<()> { + async fn drain_til_eof(mut sock: T) -> io::Result<()> { let mut buf = [0u8; 1024]; loop { let n = sock.read(&mut buf).await?; @@ -2276,11 +2329,11 @@ mod conn { } struct DebugStream { - tcp: TcpStream, + tcp: TokioIo, shutdown_called: bool, } - impl AsyncWrite for DebugStream { + impl hyper::rt::Write for DebugStream { fn poll_shutdown( mut self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -2305,11 +2358,11 @@ mod conn { } } - impl AsyncRead for DebugStream { + impl hyper::rt::Read for DebugStream { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, + buf: hyper::rt::ReadBufCursor<'_>, ) -> Poll> { Pin::new(&mut self.tcp).poll_read(cx, buf) } diff --git a/tests/server.rs b/tests/server.rs index 632ce4839a..16a5a9afbe 100644 --- a/tests/server.rs +++ b/tests/server.rs @@ -22,8 +22,8 @@ use h2::{RecvStream, SendStream}; use http::header::{HeaderName, HeaderValue}; use http_body_util::{combinators::BoxBody, BodyExt, Empty, Full, StreamBody}; use hyper::rt::Timer; -use support::{TokioExecutor, TokioTimer}; -use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; +use hyper::rt::{Read as AsyncRead, Write as AsyncWrite}; +use support::{TokioExecutor, TokioIo, TokioTimer}; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::net::{TcpListener as TkTcpListener, TcpListener, TcpStream as TkTcpStream}; @@ -31,6 +31,7 @@ use hyper::body::{Body, Incoming as IncomingBody}; use hyper::server::conn::{http1, http2}; use hyper::service::{service_fn, Service}; use hyper::{Method, Request, Response, StatusCode, Uri, Version}; +use tokio::pin; mod support; @@ -92,6 +93,7 @@ mod response_body_lengths { } fn run_test(case: TestCase) { + let _ = pretty_env_logger::try_init(); assert!( case.version == 0 || case.version == 1, "TestCase.version must 0 or 1" @@ -155,18 +157,22 @@ mod response_body_lengths { let n = body.find("\r\n\r\n").unwrap() + 4; if case.expects_chunked { - let len = body.len(); - assert_eq!( - &body[n + 1..n + 3], - "\r\n", - "expected body chunk size header" - ); - assert_eq!(&body[n + 3..len - 7], body_str, "expected body"); - assert_eq!( - &body[len - 7..], - "\r\n0\r\n\r\n", - "expected body final chunk size header" - ); + if body_str.len() > 0 { + let len = body.len(); + assert_eq!( + &body[n + 1..n + 3], + "\r\n", + "expected body chunk size header" + ); + assert_eq!(&body[n + 3..len - 7], body_str, "expected body"); + assert_eq!( + &body[len - 7..], + "\r\n0\r\n\r\n", + "expected body final chunk size header" + ); + } else { + assert_eq!(&body[n..], "0\r\n\r\n"); + } } else { assert_eq!(&body[n..], body_str, "expected body"); } @@ -217,6 +223,17 @@ mod response_body_lengths { }); } + #[test] + fn chunked_response_known_empty() { + run_test(TestCase { + version: 1, + headers: &[("transfer-encoding", "chunked")], + body: Bd::Known(""), + expects_chunked: true, // should still send chunked, and 0\r\n\r\n + expects_con_len: false, + }); + } + #[test] fn chunked_response_unknown() { run_test(TestCase { @@ -506,6 +523,7 @@ fn post_with_chunked_body() { #[test] fn post_with_chunked_overflow() { + use std::error::Error as _; let server = serve(); let mut req = connect(server.addr()); req.write_all( @@ -525,7 +543,7 @@ fn post_with_chunked_overflow() { .unwrap(); req.read(&mut [0; 256]).unwrap(); - let err = server.body_err().to_string(); + let err = server.body_err().source().unwrap().to_string(); assert!( err.contains("overflow"), "error should be overflow: {:?}", @@ -899,6 +917,39 @@ fn expect_continue_accepts_upper_cased_expectation() { assert_eq!(body, msg); } +#[test] +fn expect_continue_but_http_10_is_ignored() { + let server = serve(); + let mut req = connect(server.addr()); + server.reply(); + + req.write_all( + b"\ + POST /foo HTTP/1.0\r\n\ + Host: example.domain\r\n\ + Expect: 100-Continue\r\n\ + Content-Length: 5\r\n\ + Connection: Close\r\n\ + \r\n\ + ", + ) + .expect("write 1"); + + let msg = b"hello"; + req.write_all(msg).expect("write 2"); + + let s_line = b"HTTP/1.0 200 OK\r\n"; + let mut buf = vec![0; s_line.len()]; + req.read_exact(&mut buf).expect("read 1"); + assert_eq!(buf, s_line); + + let mut body = String::new(); + req.read_to_string(&mut body).expect("read 2"); + + let body = server.body(); + assert_eq!(body, msg); +} + #[test] fn expect_continue_but_no_body_is_ignored() { let server = serve(); @@ -958,6 +1009,7 @@ async fn expect_continue_waits_for_body_poll() { }); let (socket, _) = listener.accept().await.expect("accept"); + let socket = TokioIo::new(socket); http1::Builder::new() .serve_connection( @@ -1123,14 +1175,21 @@ async fn disable_keep_alive_mid_request() { let child = thread::spawn(move || { let mut req = connect(&addr); req.write_all(b"GET / HTTP/1.1\r\n").unwrap(); + thread::sleep(Duration::from_millis(10)); tx1.send(()).unwrap(); rx2.recv().unwrap(); req.write_all(b"Host: localhost\r\n\r\n").unwrap(); let mut buf = vec![]; req.read_to_end(&mut buf).unwrap(); + assert!( + buf.starts_with(b"HTTP/1.1 200 OK\r\n"), + "should receive OK response, but buf: {:?}", + buf, + ); }); let (socket, _) = listener.accept().await.unwrap(); + let socket = TokioIo::new(socket); let srv = http1::Builder::new().serve_connection(socket, HelloWorld); future::try_select(srv, rx1) .then(|r| match r { @@ -1178,7 +1237,7 @@ async fn disable_keep_alive_post_request() { let dropped2 = dropped.clone(); let (socket, _) = listener.accept().await.unwrap(); let transport = DebugStream { - stream: socket, + stream: TokioIo::new(socket), _debug: dropped2, }; let server = http1::Builder::new().serve_connection(transport, HelloWorld); @@ -1206,6 +1265,7 @@ async fn empty_parse_eof_does_not_return_error() { }); let (socket, _) = listener.accept().await.unwrap(); + let socket = TokioIo::new(socket); http1::Builder::new() .serve_connection(socket, HelloWorld) .await @@ -1222,6 +1282,7 @@ async fn nonempty_parse_eof_returns_error() { }); let (socket, _) = listener.accept().await.unwrap(); + let socket = TokioIo::new(socket); http1::Builder::new() .serve_connection(socket, HelloWorld) .await @@ -1245,6 +1306,7 @@ async fn http1_allow_half_close() { }); let (socket, _) = listener.accept().await.unwrap(); + let socket = TokioIo::new(socket); http1::Builder::new() .half_close(true) .serve_connection( @@ -1272,6 +1334,7 @@ async fn disconnect_after_reading_request_before_responding() { }); let (socket, _) = listener.accept().await.unwrap(); + let socket = TokioIo::new(socket); http1::Builder::new() .half_close(false) .serve_connection( @@ -1303,6 +1366,7 @@ async fn returning_1xx_response_is_error() { }); let (socket, _) = listener.accept().await.unwrap(); + let socket = TokioIo::new(socket); http1::Builder::new() .serve_connection( socket, @@ -1367,6 +1431,7 @@ async fn header_read_timeout_slow_writes() { }); let (socket, _) = listener.accept().await.unwrap(); + let socket = TokioIo::new(socket); let conn = http1::Builder::new() .timer(TokioTimer) .header_read_timeout(Duration::from_secs(5)) @@ -1442,6 +1507,7 @@ async fn header_read_timeout_slow_writes_multiple_requests() { }); let (socket, _) = listener.accept().await.unwrap(); + let socket = TokioIo::new(socket); let conn = http1::Builder::new() .timer(TokioTimer) .header_read_timeout(Duration::from_secs(5)) @@ -1488,6 +1554,7 @@ async fn upgrades() { }); let (socket, _) = listener.accept().await.unwrap(); + let socket = TokioIo::new(socket); let conn = http1::Builder::new().serve_connection( socket, service_fn(|_| { @@ -1506,7 +1573,7 @@ async fn upgrades() { // wait so that we don't write until other side saw 101 response rx.await.unwrap(); - let mut io = parts.io; + let mut io = parts.io.inner(); io.write_all(b"foo=bar").await.unwrap(); let mut vec = vec![]; io.read_to_end(&mut vec).await.unwrap(); @@ -1541,6 +1608,7 @@ async fn http_connect() { }); let (socket, _) = listener.accept().await.unwrap(); + let socket = TokioIo::new(socket); let conn = http1::Builder::new().serve_connection( socket, service_fn(|_| { @@ -1558,7 +1626,7 @@ async fn http_connect() { // wait so that we don't write until other side saw 101 response rx.await.unwrap(); - let mut io = parts.io; + let mut io = parts.io.inner(); io.write_all(b"foo=bar").await.unwrap(); let mut vec = vec![]; io.read_to_end(&mut vec).await.unwrap(); @@ -1611,6 +1679,7 @@ async fn upgrades_new() { }); let (socket, _) = listener.accept().await.unwrap(); + let socket = TokioIo::new(socket); http1::Builder::new() .serve_connection(socket, svc) .with_upgrades() @@ -1623,10 +1692,10 @@ async fn upgrades_new() { read_101_rx.await.unwrap(); let upgraded = on_upgrade.await.expect("on_upgrade"); - let parts = upgraded.downcast::().unwrap(); + let parts = upgraded.downcast::>().unwrap(); assert_eq!(parts.read_buf, "eagerly optimistic"); - let mut io = parts.io; + let mut io = parts.io.inner(); io.write_all(b"foo=bar").await.unwrap(); let mut vec = vec![]; io.read_to_end(&mut vec).await.unwrap(); @@ -1645,6 +1714,7 @@ async fn upgrades_ignored() { loop { let (socket, _) = listener.accept().await.unwrap(); + let socket = TokioIo::new(socket); tokio::task::spawn(async move { http1::Builder::new() .serve_connection(socket, svc) @@ -1715,6 +1785,7 @@ async fn http_connect_new() { }); let (socket, _) = listener.accept().await.unwrap(); + let socket = TokioIo::new(socket); http1::Builder::new() .serve_connection(socket, svc) .with_upgrades() @@ -1727,10 +1798,10 @@ async fn http_connect_new() { read_200_rx.await.unwrap(); let upgraded = on_upgrade.await.expect("on_upgrade"); - let parts = upgraded.downcast::().unwrap(); + let parts = upgraded.downcast::>().unwrap(); assert_eq!(parts.read_buf, "eagerly optimistic"); - let mut io = parts.io; + let mut io = parts.io.inner(); io.write_all(b"foo=bar").await.unwrap(); let mut vec = vec![]; io.read_to_end(&mut vec).await.unwrap(); @@ -1776,7 +1847,7 @@ async fn h2_connect() { let on_upgrade = hyper::upgrade::on(req); tokio::spawn(async move { - let mut upgraded = on_upgrade.await.expect("on_upgrade"); + let mut upgraded = TokioIo::new(on_upgrade.await.expect("on_upgrade")); upgraded.write_all(b"Bread?").await.unwrap(); let mut vec = vec![]; @@ -1795,6 +1866,7 @@ async fn h2_connect() { }); let (socket, _) = listener.accept().await.unwrap(); + let socket = TokioIo::new(socket); http2::Builder::new(TokioExecutor) .serve_connection(socket, svc) //.with_upgrades() @@ -1868,7 +1940,7 @@ async fn h2_connect_multiplex() { assert!(upgrade_res.expect_err("upgrade cancelled").is_canceled()); return; } - let mut upgraded = upgrade_res.expect("upgrade successful"); + let mut upgraded = TokioIo::new(upgrade_res.expect("upgrade successful")); upgraded.write_all(b"Bread?").await.unwrap(); @@ -1904,6 +1976,7 @@ async fn h2_connect_multiplex() { }); let (socket, _) = listener.accept().await.unwrap(); + let socket = TokioIo::new(socket); http2::Builder::new(TokioExecutor) .serve_connection(socket, svc) //.with_upgrades() @@ -1955,7 +2028,7 @@ async fn h2_connect_large_body() { let on_upgrade = hyper::upgrade::on(req); tokio::spawn(async move { - let mut upgraded = on_upgrade.await.expect("on_upgrade"); + let mut upgraded = TokioIo::new(on_upgrade.await.expect("on_upgrade")); upgraded.write_all(b"Bread?").await.unwrap(); let mut vec = vec![]; @@ -1976,6 +2049,7 @@ async fn h2_connect_large_body() { }); let (socket, _) = listener.accept().await.unwrap(); + let socket = TokioIo::new(socket); http2::Builder::new(TokioExecutor) .serve_connection(socket, svc) //.with_upgrades() @@ -2026,7 +2100,7 @@ async fn h2_connect_empty_frames() { let on_upgrade = hyper::upgrade::on(req); tokio::spawn(async move { - let mut upgraded = on_upgrade.await.expect("on_upgrade"); + let mut upgraded = TokioIo::new(on_upgrade.await.expect("on_upgrade")); upgraded.write_all(b"Bread?").await.unwrap(); let mut vec = vec![]; @@ -2045,6 +2119,7 @@ async fn h2_connect_empty_frames() { }); let (socket, _) = listener.accept().await.unwrap(); + let socket = TokioIo::new(socket); http2::Builder::new(TokioExecutor) .serve_connection(socket, svc) //.with_upgrades() @@ -2067,6 +2142,7 @@ async fn parse_errors_send_4xx_response() { }); let (socket, _) = listener.accept().await.unwrap(); + let socket = TokioIo::new(socket); http1::Builder::new() .serve_connection(socket, HelloWorld) .await @@ -2089,6 +2165,7 @@ async fn illegal_request_length_returns_400_response() { }); let (socket, _) = listener.accept().await.unwrap(); + let socket = TokioIo::new(socket); http1::Builder::new() .serve_connection(socket, HelloWorld) .await @@ -2129,6 +2206,7 @@ async fn max_buf_size() { }); let (socket, _) = listener.accept().await.unwrap(); + let socket = TokioIo::new(socket); http1::Builder::new() .max_buf_size(MAX) .serve_connection(socket, HelloWorld) @@ -2136,6 +2214,32 @@ async fn max_buf_size() { .expect_err("should TooLarge error"); } +#[cfg(feature = "http1")] +#[tokio::test] +async fn graceful_shutdown_before_first_request_no_block() { + let (listener, addr) = setup_tcp_listener(); + + tokio::spawn(async move { + let socket = listener.accept().await.unwrap().0; + let socket = TokioIo::new(socket); + + let future = http1::Builder::new().serve_connection(socket, HelloWorld); + pin!(future); + future.as_mut().graceful_shutdown(); + + future.await.unwrap(); + }); + + let mut stream = TkTcpStream::connect(addr).await.unwrap(); + + let mut buf = vec![]; + + tokio::time::timeout(Duration::from_secs(5), stream.read_to_end(&mut buf)) + .await + .expect("timed out waiting for graceful shutdown") + .expect("error receiving response"); +} + #[test] fn streaming_body() { use futures_util::StreamExt; @@ -2359,6 +2463,7 @@ async fn http2_keep_alive_detects_unresponsive_client() { }); let (socket, _) = listener.accept().await.expect("accept"); + let socket = TokioIo::new(socket); let err = http2::Builder::new(TokioExecutor) .timer(TokioTimer) @@ -2377,6 +2482,7 @@ async fn http2_keep_alive_with_responsive_client() { tokio::spawn(async move { let (socket, _) = listener.accept().await.expect("accept"); + let socket = TokioIo::new(socket); http2::Builder::new(TokioExecutor) .timer(TokioTimer) @@ -2387,7 +2493,7 @@ async fn http2_keep_alive_with_responsive_client() { .expect("serve_connection"); }); - let tcp = connect_async(addr).await; + let tcp = TokioIo::new(connect_async(addr).await); let (mut client, conn) = hyper::client::conn::http2::Builder::new(TokioExecutor) .handshake(tcp) .await @@ -2440,6 +2546,7 @@ async fn http2_keep_alive_count_server_pings() { tokio::spawn(async move { let (socket, _) = listener.accept().await.expect("accept"); + let socket = TokioIo::new(socket); http2::Builder::new(TokioExecutor) .timer(TokioTimer) @@ -2656,7 +2763,7 @@ impl Service> for TestService { type Error = BoxError; type Future = BoxFuture; - fn call(&mut self, mut req: Request) -> Self::Future { + fn call(&self, mut req: Request) -> Self::Future { let tx = self.tx.clone(); let replies = self.reply.clone(); @@ -2722,7 +2829,7 @@ impl Service> for HelloWorld { type Error = hyper::Error; type Future = future::Ready>; - fn call(&mut self, _req: Request) -> Self::Future { + fn call(&self, _req: Request) -> Self::Future { let response = Response::new(Full::new(HELLO.into())); future::ok(response) } @@ -2823,6 +2930,7 @@ impl ServeOptions { tokio::select! { res = listener.accept() => { let (stream, _) = res.unwrap(); + let stream = TokioIo::new(stream); tokio::task::spawn(async move { let msg_tx = msg_tx.clone(); @@ -2874,7 +2982,7 @@ fn has_header(msg: &str, name: &str) -> bool { msg[..n].contains(name) } -fn tcp_bind(addr: &SocketAddr) -> ::tokio::io::Result { +fn tcp_bind(addr: &SocketAddr) -> std::io::Result { let std_listener = StdTcpListener::bind(addr).unwrap(); std_listener.set_nonblocking(true).unwrap(); TcpListener::from_std(std_listener) @@ -2953,7 +3061,7 @@ impl AsyncRead for DebugStream { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, + buf: hyper::rt::ReadBufCursor<'_>, ) -> Poll> { Pin::new(&mut self.stream).poll_read(cx, buf) } @@ -3010,9 +3118,11 @@ impl TestClient { let host = req.uri().host().expect("uri has no host"); let port = req.uri().port_u16().expect("uri has no port"); - let stream = TkTcpStream::connect(format!("{}:{}", host, port)) - .await - .unwrap(); + let stream = TokioIo::new( + TkTcpStream::connect(format!("{}:{}", host, port)) + .await + .unwrap(), + ); if self.http2_only { let (mut sender, conn) = hyper::client::conn::http2::Builder::new(TokioExecutor) diff --git a/tests/support/mod.rs b/tests/support/mod.rs index e7e1e8c6bd..c46eff89ea 100644 --- a/tests/support/mod.rs +++ b/tests/support/mod.rs @@ -21,7 +21,7 @@ pub use hyper::{HeaderMap, StatusCode}; pub use std::net::SocketAddr; mod tokiort; -pub use tokiort::{TokioExecutor, TokioTimer}; +pub use tokiort::{TokioExecutor, TokioIo, TokioTimer}; #[allow(unused_macros)] macro_rules! t { @@ -357,6 +357,7 @@ async fn async_test(cfg: __TestConfig) { loop { let (stream, _) = listener.accept().await.expect("server error"); + let io = TokioIo::new(stream); // Move a clone into the service_fn let serve_handles = serve_handles.clone(); @@ -386,12 +387,12 @@ async fn async_test(cfg: __TestConfig) { tokio::task::spawn(async move { if http2_only { server::conn::http2::Builder::new(TokioExecutor) - .serve_connection(stream, service) + .serve_connection(io, service) .await .expect("server error"); } else { server::conn::http1::Builder::new() - .serve_connection(stream, service) + .serve_connection(io, service) .await .expect("server error"); } @@ -425,10 +426,11 @@ async fn async_test(cfg: __TestConfig) { async move { let stream = TcpStream::connect(addr).await.unwrap(); + let io = TokioIo::new(stream); let res = if http2_only { let (mut sender, conn) = hyper::client::conn::http2::Builder::new(TokioExecutor) - .handshake(stream) + .handshake(io) .await .unwrap(); @@ -440,7 +442,7 @@ async fn async_test(cfg: __TestConfig) { sender.send_request(req).await.unwrap() } else { let (mut sender, conn) = hyper::client::conn::http1::Builder::new() - .handshake(stream) + .handshake(io) .await .unwrap(); @@ -508,6 +510,7 @@ async fn naive_proxy(cfg: ProxyConfig) -> (SocketAddr, impl Future) loop { let (stream, _) = listener.accept().await.unwrap(); + let io = TokioIo::new(stream); let service = service_fn(move |mut req| { async move { @@ -523,11 +526,12 @@ async fn naive_proxy(cfg: ProxyConfig) -> (SocketAddr, impl Future) let stream = TcpStream::connect(format!("{}:{}", uri, port)) .await .unwrap(); + let io = TokioIo::new(stream); let resp = if http2_only { let (mut sender, conn) = hyper::client::conn::http2::Builder::new(TokioExecutor) - .handshake(stream) + .handshake(io) .await .unwrap(); @@ -540,7 +544,7 @@ async fn naive_proxy(cfg: ProxyConfig) -> (SocketAddr, impl Future) sender.send_request(req).await? } else { let builder = hyper::client::conn::http1::Builder::new(); - let (mut sender, conn) = builder.handshake(stream).await.unwrap(); + let (mut sender, conn) = builder.handshake(io).await.unwrap(); tokio::task::spawn(async move { if let Err(err) = conn.await { @@ -569,12 +573,12 @@ async fn naive_proxy(cfg: ProxyConfig) -> (SocketAddr, impl Future) if http2_only { server::conn::http2::Builder::new(TokioExecutor) - .serve_connection(stream, service) + .serve_connection(io, service) .await .unwrap(); } else { server::conn::http1::Builder::new() - .serve_connection(stream, service) + .serve_connection(io, service) .await .unwrap(); }