This commit is contained in:
nora 2023-03-07 14:08:47 +01:00
parent 7af1274587
commit 189f24e53b
58 changed files with 1489 additions and 12529 deletions

View file

@ -119,35 +119,20 @@ impl Body {
/// ```
#[inline]
pub fn empty() -> Body {
Body::new(Kind::Once(None))
loop {}
}
/// Create a `Body` stream with an associated sender half.
///
/// Useful when wanting to stream chunks from another thread.
#[inline]
pub(crate) fn channel() -> (Sender, Body) {
Self::new_channel(DecodedLength::CHUNKED, false)
loop {}
}
pub(crate) fn new_channel(
content_length: DecodedLength,
wanter: bool,
) -> (Sender, Body) {
let (data_tx, data_rx) = mpsc::channel(0);
let (trailers_tx, trailers_rx) = oneshot::channel();
let want = if wanter { WANT_PENDING } else { WANT_READY };
let (want_tx, want_rx) = watch::channel(want);
let tx = Sender {
want_rx,
data_tx,
trailers_tx: Some(trailers_tx),
};
let rx = Body::new(Kind::Chan {
content_length,
want_tx,
data_rx,
trailers_rx,
});
(tx, rx)
loop {}
}
/// Wrap a futures `Stream` in a box inside `Body`.
///
@ -178,11 +163,10 @@ impl Body {
O: Into<Bytes> + 'static,
E: Into<Box<dyn StdError + Send + Sync>> + 'static,
{
let mapped = stream.map_ok(Into::into).map_err(Into::into);
Body::new(Kind::Wrapped(SyncWrapper::new(Box::pin(mapped))))
loop {}
}
fn new(kind: Kind) -> Body {
Body { kind, extra: None }
loop {}
}
#[cfg(all(feature = "http2", any(feature = "client", feature = "server")))]
pub(crate) fn h2(
@ -190,144 +174,46 @@ impl Body {
mut content_length: DecodedLength,
ping: ping::Recorder,
) -> Self {
if !content_length.is_exact() && recv.is_end_stream() {
content_length = DecodedLength::ZERO;
}
let body = Body::new(Kind::H2 {
ping,
content_length,
recv,
});
body
loop {}
}
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "client")]
pub(crate) fn delayed_eof(&mut self, fut: DelayEofUntil) {
self.extra_mut().delayed_eof = Some(DelayEof::NotEof(fut));
loop {}
}
fn take_delayed_eof(&mut self) -> Option<DelayEof> {
self.extra.as_mut().and_then(|extra| extra.delayed_eof.take())
loop {}
}
#[cfg(any(feature = "http1", feature = "http2"))]
fn extra_mut(&mut self) -> &mut Extra {
self.extra.get_or_insert_with(|| Box::new(Extra { delayed_eof: None }))
loop {}
}
fn poll_eof(
&mut self,
cx: &mut task::Context<'_>,
) -> Poll<Option<crate::Result<Bytes>>> {
match self.take_delayed_eof() {
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "client")]
Some(DelayEof::NotEof(mut delay)) => {
match self.poll_inner(cx) {
ok @ Poll::Ready(Some(Ok(..))) | ok @ Poll::Pending => {
self.extra_mut().delayed_eof = Some(DelayEof::NotEof(delay));
ok
}
Poll::Ready(None) => {
match Pin::new(&mut delay).poll(cx) {
Poll::Ready(Ok(never)) => match never {}
Poll::Pending => {
self.extra_mut().delayed_eof = Some(DelayEof::Eof(delay));
Poll::Pending
}
Poll::Ready(Err(_done)) => Poll::Ready(None),
}
}
Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))),
}
}
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "client")]
Some(DelayEof::Eof(mut delay)) => {
match Pin::new(&mut delay).poll(cx) {
Poll::Ready(Ok(never)) => match never {}
Poll::Pending => {
self.extra_mut().delayed_eof = Some(DelayEof::Eof(delay));
Poll::Pending
}
Poll::Ready(Err(_done)) => Poll::Ready(None),
}
}
#[cfg(
any(
not(any(feature = "http1", feature = "http2")),
not(feature = "client")
)
)]
Some(delay_eof) => match delay_eof {}
None => self.poll_inner(cx),
}
loop {}
}
#[cfg(feature = "ffi")]
pub(crate) fn as_ffi_mut(&mut self) -> &mut crate::ffi::UserBody {
match self.kind {
Kind::Ffi(ref mut body) => return body,
_ => {
self.kind = Kind::Ffi(crate::ffi::UserBody::new());
}
}
match self.kind {
Kind::Ffi(ref mut body) => body,
_ => unreachable!(),
}
loop {}
}
fn poll_inner(
&mut self,
cx: &mut task::Context<'_>,
) -> Poll<Option<crate::Result<Bytes>>> {
match self.kind {
Kind::Once(ref mut val) => Poll::Ready(val.take().map(Ok)),
Kind::Chan {
content_length: ref mut len,
ref mut data_rx,
ref mut want_tx,
..
} => {
want_tx.send(WANT_READY);
match ready!(Pin::new(data_rx).poll_next(cx) ?) {
Some(chunk) => {
len.sub_if(chunk.len() as u64);
Poll::Ready(Some(Ok(chunk)))
}
None => Poll::Ready(None),
}
}
#[cfg(all(feature = "http2", any(feature = "client", feature = "server")))]
Kind::H2 { ref ping, recv: ref mut h2, content_length: ref mut len } => {
match ready!(h2.poll_data(cx)) {
Some(Ok(bytes)) => {
let _ = h2.flow_control().release_capacity(bytes.len());
len.sub_if(bytes.len() as u64);
ping.record_data(bytes.len());
Poll::Ready(Some(Ok(bytes)))
}
Some(Err(e)) => Poll::Ready(Some(Err(crate::Error::new_body(e)))),
None => Poll::Ready(None),
}
}
#[cfg(feature = "ffi")]
Kind::Ffi(ref mut body) => body.poll_data(cx),
#[cfg(feature = "stream")]
Kind::Wrapped(ref mut s) => {
match ready!(s.get_mut().as_mut().poll_next(cx)) {
Some(res) => Poll::Ready(Some(res.map_err(crate::Error::new_body))),
None => Poll::Ready(None),
}
}
}
loop {}
}
#[cfg(feature = "http1")]
pub(super) fn take_full_data(&mut self) -> Option<Bytes> {
if let Kind::Once(ref mut chunk) = self.kind { chunk.take() } else { None }
loop {}
}
}
impl Default for Body {
/// Returns `Body::empty()`.
#[inline]
fn default() -> Body {
Body::empty()
loop {}
}
}
impl HttpBody for Body {
@ -337,7 +223,7 @@ impl HttpBody for Body {
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Option<Result<Self::Data, Self::Error>>> {
self.poll_eof(cx)
loop {}
}
fn poll_trailers(
#[cfg_attr(not(feature = "http2"), allow(unused_mut))]
@ -345,75 +231,18 @@ impl HttpBody for Body {
#[cfg_attr(not(feature = "http2"), allow(unused))]
cx: &mut task::Context<'_>,
) -> Poll<Result<Option<HeaderMap>, Self::Error>> {
match self.kind {
#[cfg(all(feature = "http2", any(feature = "client", feature = "server")))]
Kind::H2 { recv: ref mut h2, ref ping, .. } => {
match ready!(h2.poll_trailers(cx)) {
Ok(t) => {
ping.record_non_data();
Poll::Ready(Ok(t))
}
Err(e) => Poll::Ready(Err(crate::Error::new_h2(e))),
}
}
Kind::Chan { ref mut trailers_rx, .. } => {
match ready!(Pin::new(trailers_rx).poll(cx)) {
Ok(t) => Poll::Ready(Ok(Some(t))),
Err(_) => Poll::Ready(Ok(None)),
}
}
#[cfg(feature = "ffi")]
Kind::Ffi(ref mut body) => body.poll_trailers(cx),
_ => Poll::Ready(Ok(None)),
}
loop {}
}
fn is_end_stream(&self) -> bool {
match self.kind {
Kind::Once(ref val) => val.is_none(),
Kind::Chan { content_length, .. } => content_length == DecodedLength::ZERO,
#[cfg(all(feature = "http2", any(feature = "client", feature = "server")))]
Kind::H2 { recv: ref h2, .. } => h2.is_end_stream(),
#[cfg(feature = "ffi")]
Kind::Ffi(..) => false,
#[cfg(feature = "stream")]
Kind::Wrapped(..) => false,
}
loop {}
}
fn size_hint(&self) -> SizeHint {
macro_rules! opt_len {
($content_length:expr) => {
{ let mut hint = SizeHint::default(); if let Some(content_length) =
$content_length .into_opt() { hint.set_exact(content_length); } hint }
};
}
match self.kind {
Kind::Once(Some(ref val)) => SizeHint::with_exact(val.len() as u64),
Kind::Once(None) => SizeHint::with_exact(0),
#[cfg(feature = "stream")]
Kind::Wrapped(..) => SizeHint::default(),
Kind::Chan { content_length, .. } => opt_len!(content_length),
#[cfg(all(feature = "http2", any(feature = "client", feature = "server")))]
Kind::H2 { content_length, .. } => opt_len!(content_length),
#[cfg(feature = "ffi")]
Kind::Ffi(..) => SizeHint::default(),
}
loop {}
}
}
impl fmt::Debug for Body {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
#[derive(Debug)]
struct Streaming;
#[derive(Debug)]
struct Empty;
#[derive(Debug)]
struct Full<'a>(&'a Bytes);
let mut builder = f.debug_tuple("Body");
match self.kind {
Kind::Once(None) => builder.field(&Empty),
Kind::Once(Some(ref chunk)) => builder.field(&Full(chunk)),
_ => builder.field(&Streaming),
};
builder.finish()
loop {}
}
}
/// # Optional
@ -427,7 +256,7 @@ impl Stream for Body {
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Option<Self::Item>> {
HttpBody::poll_data(self, cx)
loop {}
}
}
/// # Optional
@ -443,55 +272,49 @@ for Body {
dyn Stream<Item = Result<Bytes, Box<dyn StdError + Send + Sync>>> + Send,
>,
) -> Body {
Body::new(Kind::Wrapped(SyncWrapper::new(stream.into())))
loop {}
}
}
impl From<Bytes> for Body {
#[inline]
fn from(chunk: Bytes) -> Body {
if chunk.is_empty() { Body::empty() } else { Body::new(Kind::Once(Some(chunk))) }
loop {}
}
}
impl From<Vec<u8>> for Body {
#[inline]
fn from(vec: Vec<u8>) -> Body {
Body::from(Bytes::from(vec))
loop {}
}
}
impl From<&'static [u8]> for Body {
#[inline]
fn from(slice: &'static [u8]) -> Body {
Body::from(Bytes::from(slice))
loop {}
}
}
impl From<Cow<'static, [u8]>> for Body {
#[inline]
fn from(cow: Cow<'static, [u8]>) -> Body {
match cow {
Cow::Borrowed(b) => Body::from(b),
Cow::Owned(o) => Body::from(o),
}
loop {}
}
}
impl From<String> for Body {
#[inline]
fn from(s: String) -> Body {
Body::from(Bytes::from(s.into_bytes()))
loop {}
}
}
impl From<&'static str> for Body {
#[inline]
fn from(slice: &'static str) -> Body {
Body::from(Bytes::from(slice.as_bytes()))
loop {}
}
}
impl From<Cow<'static, str>> for Body {
#[inline]
fn from(cow: Cow<'static, str>) -> Body {
match cow {
Cow::Borrowed(b) => Body::from(b),
Cow::Owned(o) => Body::from(o),
}
loop {}
}
}
impl Sender {
@ -500,35 +323,24 @@ impl Sender {
&mut self,
cx: &mut task::Context<'_>,
) -> Poll<crate::Result<()>> {
ready!(self.poll_want(cx) ?);
self.data_tx.poll_ready(cx).map_err(|_| crate::Error::new_closed())
loop {}
}
fn poll_want(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
match self.want_rx.load(cx) {
WANT_READY => Poll::Ready(Ok(())),
WANT_PENDING => Poll::Pending,
watch::CLOSED => Poll::Ready(Err(crate::Error::new_closed())),
unexpected => unreachable!("want_rx value: {}", unexpected),
}
loop {}
}
async fn ready(&mut self) -> crate::Result<()> {
futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await
loop {}
}
/// Send data on data channel when it is ready.
pub(crate) async fn send_data(&mut self, chunk: Bytes) -> crate::Result<()> {
self.ready().await?;
self.data_tx.try_send(Ok(chunk)).map_err(|_| crate::Error::new_closed())
loop {}
}
/// Send trailers on trailers channel.
pub(crate) async fn send_trailers(
&mut self,
trailers: HeaderMap,
) -> crate::Result<()> {
let tx = match self.trailers_tx.take() {
Some(tx) => tx,
None => return Err(crate::Error::new_closed()),
};
tx.send(trailers).map_err(|_| crate::Error::new_closed())
loop {}
}
/// Try to send data on this channel.
///
@ -543,34 +355,20 @@ impl Sender {
/// that doesn't have an async context. If in an async context, prefer
/// `send_data()` instead.
pub(crate) fn try_send_data(&mut self, chunk: Bytes) -> Result<(), Bytes> {
self.data_tx
.try_send(Ok(chunk))
.map_err(|err| err.into_inner().expect("just sent Ok"))
loop {}
}
/// Aborts the body in an abnormal fashion.
pub(crate) fn abort(self) {
let _ = self
.data_tx
.clone()
.try_send(Err(crate::Error::new_body_write_aborted()));
loop {}
}
#[cfg(feature = "http1")]
pub(crate) fn send_error(&mut self, err: crate::Error) {
let _ = self.data_tx.try_send(Err(err));
loop {}
}
}
impl fmt::Debug for Sender {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
#[derive(Debug)]
struct Open;
#[derive(Debug)]
struct Closed;
let mut builder = f.debug_tuple("Sender");
match self.want_rx.peek() {
watch::CLOSED => builder.field(&Closed),
_ => builder.field(&Open),
};
builder.finish()
loop {}
}
}
#[cfg(test)]
@ -580,96 +378,38 @@ mod tests {
use super::{Body, DecodedLength, HttpBody, Sender, SizeHint};
#[test]
fn test_size_of() {
let body_size = mem::size_of::<Body>();
let body_expected_size = mem::size_of::<u64>() * 6;
assert!(
body_size <= body_expected_size, "Body size = {} <= {}", body_size,
body_expected_size,
);
assert_eq!(body_size, mem::size_of::< Option < Body >> (), "Option<Body>");
assert_eq!(
mem::size_of::< Sender > (), mem::size_of::< usize > () * 5, "Sender"
);
assert_eq!(
mem::size_of::< Sender > (), mem::size_of::< Option < Sender >> (),
"Option<Sender>"
);
loop {}
}
#[test]
fn size_hint() {
fn eq(body: Body, b: SizeHint, note: &str) {
let a = body.size_hint();
assert_eq!(a.lower(), b.lower(), "lower for {:?}", note);
assert_eq!(a.upper(), b.upper(), "upper for {:?}", note);
}
eq(Body::from("Hello"), SizeHint::with_exact(5), "from str");
eq(Body::empty(), SizeHint::with_exact(0), "empty");
eq(Body::channel().1, SizeHint::new(), "channel");
eq(
Body::new_channel(DecodedLength::new(4), false).1,
SizeHint::with_exact(4),
"channel with length",
);
loop {}
}
#[tokio::test]
async fn channel_abort() {
let (tx, mut rx) = Body::channel();
tx.abort();
let err = rx.data().await.unwrap().unwrap_err();
assert!(err.is_body_write_aborted(), "{:?}", err);
loop {}
}
#[tokio::test]
async fn channel_abort_when_buffer_is_full() {
let (mut tx, mut rx) = Body::channel();
tx.try_send_data("chunk 1".into()).expect("send 1");
tx.abort();
let chunk1 = rx.data().await.expect("item 1").expect("chunk 1");
assert_eq!(chunk1, "chunk 1");
let err = rx.data().await.unwrap().unwrap_err();
assert!(err.is_body_write_aborted(), "{:?}", err);
loop {}
}
#[test]
fn channel_buffers_one() {
let (mut tx, _rx) = Body::channel();
tx.try_send_data("chunk 1".into()).expect("send 1");
let chunk2 = tx.try_send_data("chunk 2".into()).expect_err("send 2");
assert_eq!(chunk2, "chunk 2");
loop {}
}
#[tokio::test]
async fn channel_empty() {
let (_, mut rx) = Body::channel();
assert!(rx.data().await.is_none());
loop {}
}
#[test]
fn channel_ready() {
let (mut tx, _rx) = Body::new_channel(DecodedLength::CHUNKED, false);
let mut tx_ready = tokio_test::task::spawn(tx.ready());
assert!(tx_ready.poll().is_ready(), "tx is ready immediately");
loop {}
}
#[test]
fn channel_wanter() {
let (mut tx, mut rx) = Body::new_channel(DecodedLength::CHUNKED, true);
let mut tx_ready = tokio_test::task::spawn(tx.ready());
let mut rx_data = tokio_test::task::spawn(rx.data());
assert!(
tx_ready.poll().is_pending(), "tx isn't ready before rx has been polled"
);
assert!(rx_data.poll().is_pending(), "poll rx.data");
assert!(tx_ready.is_woken(), "rx poll wakes tx");
assert!(tx_ready.poll().is_ready(), "tx is ready after rx has been polled");
loop {}
}
#[test]
fn channel_notices_closure() {
let (mut tx, rx) = Body::new_channel(DecodedLength::CHUNKED, true);
let mut tx_ready = tokio_test::task::spawn(tx.ready());
assert!(
tx_ready.poll().is_pending(), "tx isn't ready before rx has been polled"
);
drop(rx);
assert!(tx_ready.is_woken(), "dropping rx wakes tx");
match tx_ready.poll() {
Poll::Ready(Err(ref e)) if e.is_closed() => {}
unexpected => panic!("tx poll ready unexpected: {:?}", unexpected),
}
loop {}
}
}

View file

@ -1,33 +1,22 @@
use std::fmt;
#[derive(Clone, Copy, PartialEq, Eq)]
pub(crate) struct DecodedLength(u64);
#[cfg(any(feature = "http1", feature = "http2"))]
impl From<Option<u64>> for DecodedLength {
fn from(len: Option<u64>) -> Self {
len.and_then(|len| {
// If the length is u64::MAX, oh well, just reported chunked.
Self::checked_new(len).ok()
})
.unwrap_or(DecodedLength::CHUNKED)
loop {}
}
}
#[cfg(any(feature = "http1", feature = "http2", test))]
const MAX_LEN: u64 = std::u64::MAX - 2;
impl DecodedLength {
pub(crate) const CLOSE_DELIMITED: DecodedLength = DecodedLength(::std::u64::MAX);
pub(crate) const CHUNKED: DecodedLength = DecodedLength(::std::u64::MAX - 1);
pub(crate) const ZERO: DecodedLength = DecodedLength(0);
#[cfg(test)]
pub(crate) fn new(len: u64) -> Self {
debug_assert!(len <= MAX_LEN);
DecodedLength(len)
loop {}
}
/// Takes the length as a content-length without other checks.
///
/// Should only be called if previously confirmed this isn't
@ -35,40 +24,20 @@ impl DecodedLength {
#[inline]
#[cfg(feature = "http1")]
pub(crate) fn danger_len(self) -> u64 {
debug_assert!(self.0 < Self::CHUNKED.0);
self.0
loop {}
}
/// Converts to an Option<u64> representing a Known or Unknown length.
pub(crate) fn into_opt(self) -> Option<u64> {
match self {
DecodedLength::CHUNKED | DecodedLength::CLOSE_DELIMITED => None,
DecodedLength(known) => Some(known),
}
loop {}
}
/// Checks the `u64` is within the maximum allowed for content-length.
#[cfg(any(feature = "http1", feature = "http2"))]
pub(crate) fn checked_new(len: u64) -> Result<Self, crate::error::Parse> {
use tracing::warn;
if len <= MAX_LEN {
Ok(DecodedLength(len))
} else {
warn!("content-length bigger than maximum: {} > {}", len, MAX_LEN);
Err(crate::error::Parse::TooLarge)
}
loop {}
}
pub(crate) fn sub_if(&mut self, amt: u64) {
match *self {
DecodedLength::CHUNKED | DecodedLength::CLOSE_DELIMITED => (),
DecodedLength(ref mut known) => {
*known -= amt;
}
}
loop {}
}
/// Returns whether this represents an exact length.
///
/// This includes 0, which of course is an exact known length.
@ -76,48 +45,28 @@ impl DecodedLength {
/// It would return false if "chunked" or otherwise size-unknown.
#[cfg(feature = "http2")]
pub(crate) fn is_exact(&self) -> bool {
self.0 <= MAX_LEN
loop {}
}
}
impl fmt::Debug for DecodedLength {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
DecodedLength::CLOSE_DELIMITED => f.write_str("CLOSE_DELIMITED"),
DecodedLength::CHUNKED => f.write_str("CHUNKED"),
DecodedLength(n) => f.debug_tuple("DecodedLength").field(&n).finish(),
}
loop {}
}
}
impl fmt::Display for DecodedLength {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
DecodedLength::CLOSE_DELIMITED => f.write_str("close-delimited"),
DecodedLength::CHUNKED => f.write_str("chunked encoding"),
DecodedLength::ZERO => f.write_str("empty"),
DecodedLength(n) => write!(f, "content-length ({} bytes)", n),
}
loop {}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn sub_if_known() {
let mut len = DecodedLength::new(30);
len.sub_if(20);
assert_eq!(len.0, 10);
loop {}
}
#[test]
fn sub_if_chunked() {
let mut len = DecodedLength::CHUNKED;
len.sub_if(20);
assert_eq!(len, DecodedLength::CHUNKED);
loop {}
}
}

View file

@ -14,52 +14,24 @@
//! `HttpBody`, and returned by hyper as a "receive stream" (so, for server
//! requests and client responses). It is also a decent default implementation
//! if you don't have very custom needs of your send streams.
pub use bytes::{Buf, Bytes};
pub use http_body::Body as HttpBody;
pub use http_body::SizeHint;
pub use self::aggregate::aggregate;
pub use self::body::{Body, Sender};
pub(crate) use self::length::DecodedLength;
pub use self::to_bytes::to_bytes;
mod aggregate;
mod body;
mod length;
mod to_bytes;
/// An optimization to try to take a full body if immediately available.
///
/// This is currently limited to *only* `hyper::Body`s.
#[cfg(feature = "http1")]
pub(crate) fn take_full_data<T: HttpBody + 'static>(body: &mut T) -> Option<T::Data> {
use std::any::{Any, TypeId};
// This static type check can be optimized at compile-time.
if TypeId::of::<T>() == TypeId::of::<Body>() {
let mut full = (body as &mut dyn Any)
.downcast_mut::<Body>()
.expect("must be Body")
.take_full_data();
// This second cast is required to make the type system happy.
// Without it, the compiler cannot reason that the type is actually
// `T::Data`. Oh wells.
//
// It's still a measurable win!
(&mut full as &mut dyn Any)
.downcast_mut::<Option<T::Data>>()
.expect("must be T::Data")
.take()
} else {
None
}
loop {}
}
fn _assert_send_sync() {
fn _assert_send<T: Send>() {}
fn _assert_sync<T: Sync>() {}
_assert_send::<Body>();
_assert_sync::<Body>();
loop {}
}

View file

@ -1,7 +1,5 @@
use bytes::{Buf, BufMut, Bytes};
use super::HttpBody;
/// Concatenate the buffers from a body into a single `Bytes` asynchronously.
///
/// This may require copying the data into a single buffer. If you don't need
@ -48,35 +46,5 @@ pub async fn to_bytes<T>(body: T) -> Result<Bytes, T::Error>
where
T: HttpBody,
{
futures_util::pin_mut!(body);
// If there's only 1 chunk, we can just return Buf::to_bytes()
let mut first = if let Some(buf) = body.data().await {
buf?
} else {
return Ok(Bytes::new());
};
let second = if let Some(buf) = body.data().await {
buf?
} else {
return Ok(first.copy_to_bytes(first.remaining()));
};
// Don't pre-emptively reserve *too* much.
let rest = (body.size_hint().lower() as usize).min(1024 * 16);
let cap = first
.remaining()
.saturating_add(second.remaining())
.saturating_add(rest);
// With more than 1 buf, we gotta flatten into a Vec first.
let mut vec = Vec::with_capacity(cap);
vec.put(first);
vec.put(second);
while let Some(buf) = body.data().await {
vec.put(buf?);
}
Ok(vec.into())
loop {}
}

View file

@ -59,13 +59,13 @@ impl Client<HttpConnector, Body> {
#[cfg_attr(docsrs, doc(cfg(feature = "tcp")))]
#[inline]
pub(crate) fn new() -> Client<HttpConnector, Body> {
Builder::default().build_http()
loop {}
}
}
#[cfg(feature = "tcp")]
impl Default for Client<HttpConnector, Body> {
fn default() -> Client<HttpConnector, Body> {
Client::new()
loop {}
}
}
impl Client<(), Body> {
@ -90,7 +90,7 @@ impl Client<(), Body> {
/// ```
#[inline]
pub(crate) fn builder() -> Builder {
Builder::default()
loop {}
}
}
impl<C, B> Client<C, B>
@ -125,15 +125,7 @@ where
where
B: Default,
{
let body = B::default();
if !body.is_end_stream() {
warn!(
"default HttpBody used for get() does not return true for is_end_stream"
);
}
let mut req = Request::new(body);
*req.uri_mut() = uri;
self.request(req)
loop {}
}
/// Send a constructed `Request` using this `Client`.
///
@ -157,175 +149,27 @@ where
/// # fn main() {}
/// ```
pub(crate) fn request(&self, mut req: Request<B>) -> ResponseFuture {
let is_http_connect = req.method() == Method::CONNECT;
match req.version() {
Version::HTTP_11 => {}
Version::HTTP_10 => {
if is_http_connect {
warn!("CONNECT is not allowed for HTTP/1.0");
return ResponseFuture::new(
future::err(crate::Error::new_user_unsupported_request_method()),
);
}
}
Version::HTTP_2 => {}
other => return ResponseFuture::error_version(other),
};
let pool_key = match extract_domain(req.uri_mut(), is_http_connect) {
Ok(s) => s,
Err(err) => {
return ResponseFuture::new(future::err(err));
}
};
ResponseFuture::new(self.clone().retryably_send_request(req, pool_key))
loop {}
}
async fn retryably_send_request(
self,
mut req: Request<B>,
pool_key: PoolKey,
) -> crate::Result<Response<Body>> {
let uri = req.uri().clone();
loop {
req = match self.send_request(req, pool_key.clone()).await {
Ok(resp) => return Ok(resp),
Err(ClientError::Normal(err)) => return Err(err),
Err(ClientError::Canceled { connection_reused, mut req, reason }) => {
if !self.config.retry_canceled_requests || !connection_reused {
return Err(reason);
}
trace!(
"unstarted request canceled, trying again (reason={:?})", reason
);
*req.uri_mut() = uri.clone();
req
}
};
}
loop {}
}
async fn send_request(
&self,
mut req: Request<B>,
pool_key: PoolKey,
) -> Result<Response<Body>, ClientError<B>> {
let mut pooled = match self.connection_for(pool_key).await {
Ok(pooled) => pooled,
Err(ClientConnectError::Normal(err)) => return Err(ClientError::Normal(err)),
Err(ClientConnectError::H2CheckoutIsClosed(reason)) => {
return Err(ClientError::Canceled {
connection_reused: true,
req,
reason,
});
}
};
if pooled.is_http1() {
if req.version() == Version::HTTP_2 {
warn!("Connection is HTTP/1, but request requires HTTP/2");
return Err(
ClientError::Normal(crate::Error::new_user_unsupported_version()),
);
}
if self.config.set_host {
let uri = req.uri().clone();
req.headers_mut()
.entry(HOST)
.or_insert_with(|| {
let hostname = uri.host().expect("authority implies host");
if let Some(port) = get_non_default_port(&uri) {
let s = format!("{}:{}", hostname, port);
HeaderValue::from_str(&s)
} else {
HeaderValue::from_str(hostname)
}
.expect("uri host is valid header value")
});
}
if req.method() == Method::CONNECT {
authority_form(req.uri_mut());
} else if pooled.conn_info.is_proxied {
absolute_form(req.uri_mut());
} else {
origin_form(req.uri_mut());
}
} else if req.method() == Method::CONNECT {
authority_form(req.uri_mut());
}
let fut = pooled
.send_request_retryable(req)
.map_err(ClientError::map_with_reused(pooled.is_reused()));
let extra_info = pooled.conn_info.extra.clone();
let fut = fut
.map_ok(move |mut res| {
if let Some(extra) = extra_info {
extra.set(res.extensions_mut());
}
res
});
if pooled.is_closed() {
return fut.await;
}
let mut res = fut.await?;
if pooled.is_http2() || !pooled.is_pool_enabled() || pooled.is_ready() {
drop(pooled);
} else if !res.body().is_end_stream() {
let (delayed_tx, delayed_rx) = oneshot::channel();
res.body_mut().delayed_eof(delayed_rx);
let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx))
.map(move |_| {
drop(delayed_tx);
});
self.conn_builder.exec.execute(on_idle);
} else {
let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(|_| ());
self.conn_builder.exec.execute(on_idle);
}
Ok(res)
loop {}
}
async fn connection_for(
&self,
pool_key: PoolKey,
) -> Result<Pooled<PoolClient<B>>, ClientConnectError> {
let checkout = self.pool.checkout(pool_key.clone());
let connect = self.connect_to(pool_key);
let is_ver_h2 = self.config.ver == Ver::Http2;
match future::select(checkout, connect).await {
Either::Left((Ok(checked_out), connecting)) => {
if connecting.started() {
let bg = connecting
.map_err(|err| {
trace!("background connect error: {}", err);
})
.map(|_pooled| {});
self.conn_builder.exec.execute(bg);
}
Ok(checked_out)
}
Either::Right((Ok(connected), _checkout)) => Ok(connected),
Either::Left((Err(err), connecting)) => {
if err.is_canceled() {
connecting.await.map_err(ClientConnectError::Normal)
} else {
Err(ClientConnectError::Normal(err))
}
}
Either::Right((Err(err), checkout)) => {
if err.is_canceled() {
checkout
.await
.map_err(move |err| {
if is_ver_h2 && err.is_canceled()
&& err.find_source::<CheckoutIsClosedError>().is_some()
{
ClientConnectError::H2CheckoutIsClosed(err)
} else {
ClientConnectError::Normal(err)
}
})
} else {
Err(ClientConnectError::Normal(err))
}
}
}
loop {}
}
fn connect_to(
&self,
@ -432,10 +276,10 @@ where
&mut self,
_: &mut task::Context<'_>,
) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
loop {}
}
fn call(&mut self, req: Request<B>) -> Self::Future {
self.request(req)
loop {}
}
}
impl<C, B> tower_service::Service<Request<B>> for &'_ Client<C, B>
@ -452,25 +296,20 @@ where
&mut self,
_: &mut task::Context<'_>,
) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
loop {}
}
fn call(&mut self, req: Request<B>) -> Self::Future {
self.request(req)
loop {}
}
}
impl<C: Clone, B> Clone for Client<C, B> {
fn clone(&self) -> Client<C, B> {
Client {
config: self.config.clone(),
conn_builder: self.conn_builder.clone(),
connector: self.connector.clone(),
pool: self.pool.clone(),
}
loop {}
}
}
impl<C, B> fmt::Debug for Client<C, B> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Client").finish()
loop {}
}
}
impl ResponseFuture {
@ -478,26 +317,21 @@ impl ResponseFuture {
where
F: Future<Output = crate::Result<Response<Body>>> + Send + 'static,
{
Self {
inner: SyncWrapper::new(Box::pin(value)),
}
loop {}
}
fn error_version(ver: Version) -> Self {
warn!("Request has unsupported version \"{:?}\"", ver);
ResponseFuture::new(
Box::pin(future::err(crate::Error::new_user_unsupported_version())),
)
loop {}
}
}
impl fmt::Debug for ResponseFuture {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("Future<Response>")
loop {}
}
}
impl Future for ResponseFuture {
type Output = crate::Result<Response<Body>>;
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
self.inner.get_mut().as_mut().poll(cx)
loop {}
}
}
#[allow(missing_debug_implementations)]
@ -512,35 +346,19 @@ enum PoolTx<B> {
}
impl<B> PoolClient<B> {
fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
match self.tx {
PoolTx::Http1(ref mut tx) => tx.poll_ready(cx),
#[cfg(feature = "http2")]
PoolTx::Http2(_) => Poll::Ready(Ok(())),
}
loop {}
}
fn is_http1(&self) -> bool {
!self.is_http2()
loop {}
}
fn is_http2(&self) -> bool {
match self.tx {
PoolTx::Http1(_) => false,
#[cfg(feature = "http2")]
PoolTx::Http2(_) => true,
}
loop {}
}
fn is_ready(&self) -> bool {
match self.tx {
PoolTx::Http1(ref tx) => tx.is_ready(),
#[cfg(feature = "http2")]
PoolTx::Http2(ref tx) => tx.is_ready(),
}
loop {}
}
fn is_closed(&self) -> bool {
match self.tx {
PoolTx::Http1(ref tx) => tx.is_closed(),
#[cfg(feature = "http2")]
PoolTx::Http2(ref tx) => tx.is_closed(),
}
loop {}
}
}
impl<B: HttpBody + 'static> PoolClient<B> {
@ -566,36 +384,13 @@ where
B: Send + 'static,
{
fn is_open(&self) -> bool {
match self.tx {
PoolTx::Http1(ref tx) => tx.is_ready(),
#[cfg(feature = "http2")]
PoolTx::Http2(ref tx) => tx.is_ready(),
}
loop {}
}
fn reserve(self) -> Reservation<Self> {
match self.tx {
PoolTx::Http1(tx) => {
Reservation::Unique(PoolClient {
conn_info: self.conn_info,
tx: PoolTx::Http1(tx),
})
}
#[cfg(feature = "http2")]
PoolTx::Http2(tx) => {
let b = PoolClient {
conn_info: self.conn_info.clone(),
tx: PoolTx::Http2(tx.clone()),
};
let a = PoolClient {
conn_info: self.conn_info,
tx: PoolTx::Http2(tx),
};
Reservation::Shared(a, b)
}
}
loop {}
}
fn can_share(&self) -> bool {
self.is_http2()
loop {}
}
}
#[allow(missing_debug_implementations)]
@ -631,93 +426,28 @@ pub(super) enum Ver {
Http2,
}
fn origin_form(uri: &mut Uri) {
let path = match uri.path_and_query() {
Some(path) if path.as_str() != "/" => {
let mut parts = ::http::uri::Parts::default();
parts.path_and_query = Some(path.clone());
Uri::from_parts(parts).expect("path is valid uri")
}
_none_or_just_slash => {
debug_assert!(Uri::default() == "/");
Uri::default()
}
};
*uri = path;
loop {}
}
fn absolute_form(uri: &mut Uri) {
debug_assert!(uri.scheme().is_some(), "absolute_form needs a scheme");
debug_assert!(uri.authority().is_some(), "absolute_form needs an authority");
if uri.scheme() == Some(&Scheme::HTTPS) {
origin_form(uri);
}
loop {}
}
fn authority_form(uri: &mut Uri) {
if let Some(path) = uri.path_and_query() {
if path != "/" {
warn!("HTTP/1.1 CONNECT request stripping path: {:?}", path);
}
}
*uri = match uri.authority() {
Some(auth) => {
let mut parts = ::http::uri::Parts::default();
parts.authority = Some(auth.clone());
Uri::from_parts(parts).expect("authority is valid")
}
None => {
unreachable!("authority_form with relative uri");
}
};
loop {}
}
fn extract_domain(uri: &mut Uri, is_http_connect: bool) -> crate::Result<PoolKey> {
let uri_clone = uri.clone();
match (uri_clone.scheme(), uri_clone.authority()) {
(Some(scheme), Some(auth)) => Ok((scheme.clone(), auth.clone())),
(None, Some(auth)) if is_http_connect => {
let scheme = match auth.port_u16() {
Some(443) => {
set_scheme(uri, Scheme::HTTPS);
Scheme::HTTPS
}
_ => {
set_scheme(uri, Scheme::HTTP);
Scheme::HTTP
}
};
Ok((scheme, auth.clone()))
}
_ => {
debug!("Client requires absolute-form URIs, received: {:?}", uri);
Err(crate::Error::new_user_absolute_uri_required())
}
}
loop {}
}
fn domain_as_uri((scheme, auth): PoolKey) -> Uri {
http::uri::Builder::new()
.scheme(scheme)
.authority(auth)
.path_and_query("/")
.build()
.expect("domain is valid Uri")
loop {}
}
fn set_scheme(uri: &mut Uri, scheme: Scheme) {
debug_assert!(uri.scheme().is_none(), "set_scheme expects no existing scheme");
let old = mem::replace(uri, Uri::default());
let mut parts: ::http::uri::Parts = old.into();
parts.scheme = Some(scheme);
parts.path_and_query = Some("/".parse().expect("slash is a valid path"));
*uri = Uri::from_parts(parts).expect("scheme is valid");
loop {}
}
fn get_non_default_port(uri: &Uri) -> Option<Port<&str>> {
match (uri.port().map(|p| p.as_u16()), is_schema_secure(uri)) {
(Some(443), true) => None,
(Some(80), false) => None,
_ => uri.port(),
}
loop {}
}
fn is_schema_secure(uri: &Uri) -> bool {
uri.scheme_str()
.map(|scheme_str| matches!(scheme_str, "wss" | "https"))
.unwrap_or_default()
loop {}
}
/// A builder to configure a new [`Client`](Client).
///
@ -747,18 +477,7 @@ pub struct Builder {
}
impl Default for Builder {
fn default() -> Self {
Self {
client_config: Config {
retry_canceled_requests: true,
set_host: true,
ver: Ver::Auto,
},
conn_builder: conn::Builder::new(),
pool_config: pool::Config {
idle_timeout: Some(Duration::from_secs(90)),
max_idle_per_host: std::usize::MAX,
},
}
loop {}
}
}
impl Builder {
@ -767,13 +486,7 @@ impl Builder {
note = "name is confusing, to disable the connection pool, call pool_max_idle_per_host(0)"
)]
pub(crate) fn keep_alive(&mut self, val: bool) -> &mut Self {
if !val {
self.pool_max_idle_per_host(0)
} else if self.pool_config.max_idle_per_host == 0 {
self.pool_max_idle_per_host(std::usize::MAX)
} else {
self
}
loop {}
}
#[doc(hidden)]
#[deprecated(note = "renamed to `pool_idle_timeout`")]
@ -781,7 +494,7 @@ impl Builder {
where
D: Into<Option<Duration>>,
{
self.pool_idle_timeout(val)
loop {}
}
/// Set an optional timeout for idle sockets being kept-alive.
///
@ -792,21 +505,18 @@ impl Builder {
where
D: Into<Option<Duration>>,
{
self.pool_config.idle_timeout = val.into();
self
loop {}
}
#[doc(hidden)]
#[deprecated(note = "renamed to `pool_max_idle_per_host`")]
pub(crate) fn max_idle_per_host(&mut self, max_idle: usize) -> &mut Self {
self.pool_config.max_idle_per_host = max_idle;
self
loop {}
}
/// Sets the maximum idle connection per host allowed in the pool.
///
/// Default is `usize::MAX` (no limit).
pub(crate) fn pool_max_idle_per_host(&mut self, max_idle: usize) -> &mut Self {
self.pool_config.max_idle_per_host = max_idle;
self
loop {}
}
/// Sets the exact size of the read buffer to *always* use.
///
@ -814,8 +524,7 @@ impl Builder {
///
/// Default is an adaptive read buffer.
pub(crate) fn http1_read_buf_exact_size(&mut self, sz: usize) -> &mut Self {
self.conn_builder.http1_read_buf_exact_size(Some(sz));
self
loop {}
}
/// Set the maximum buffer size for the connection.
///
@ -829,8 +538,7 @@ impl Builder {
#[cfg(feature = "http1")]
#[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
pub(crate) fn http1_max_buf_size(&mut self, max: usize) -> &mut Self {
self.conn_builder.http1_max_buf_size(max);
self
loop {}
}
/// Set whether HTTP/1 connections will accept spaces between header names
/// and the colon that follow them in responses.
@ -858,8 +566,7 @@ impl Builder {
&mut self,
val: bool,
) -> &mut Self {
self.conn_builder.http1_allow_spaces_after_header_name_in_responses(val);
self
loop {}
}
/// Set whether HTTP/1 connections will accept obsolete line folding for
/// header values.
@ -896,8 +603,7 @@ impl Builder {
&mut self,
val: bool,
) -> &mut Self {
self.conn_builder.http1_allow_obsolete_multiline_headers_in_responses(val);
self
loop {}
}
/// Sets whether invalid header lines should be silently ignored in HTTP/1 responses.
///
@ -927,8 +633,7 @@ impl Builder {
&mut self,
val: bool,
) -> &mut Builder {
self.conn_builder.http1_ignore_invalid_headers_in_responses(val);
self
loop {}
}
/// Set whether HTTP/1 connections should try to use vectored writes,
/// or always flatten into a single buffer.
@ -943,8 +648,7 @@ impl Builder {
/// Default is `auto`. In this mode hyper will try to guess which
/// mode to use
pub(crate) fn http1_writev(&mut self, enabled: bool) -> &mut Builder {
self.conn_builder.http1_writev(enabled);
self
loop {}
}
/// Set whether HTTP/1 connections will write header names as title case at
/// the socket level.
@ -953,8 +657,7 @@ impl Builder {
///
/// Default is false.
pub(crate) fn http1_title_case_headers(&mut self, val: bool) -> &mut Self {
self.conn_builder.http1_title_case_headers(val);
self
loop {}
}
/// Set whether to support preserving original header cases.
///
@ -970,15 +673,13 @@ impl Builder {
///
/// Default is false.
pub(crate) fn http1_preserve_header_case(&mut self, val: bool) -> &mut Self {
self.conn_builder.http1_preserve_header_case(val);
self
loop {}
}
/// Set whether HTTP/0.9 responses should be tolerated.
///
/// Default is false.
pub(crate) fn http09_responses(&mut self, val: bool) -> &mut Self {
self.conn_builder.http09_responses(val);
self
loop {}
}
/// Set whether the connection **must** use HTTP/2.
///
@ -993,8 +694,7 @@ impl Builder {
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_only(&mut self, val: bool) -> &mut Self {
self.client_config.ver = if val { Ver::Http2 } else { Ver::Auto };
self
loop {}
}
/// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2
/// stream-level flow control.
@ -1010,8 +710,7 @@ impl Builder {
&mut self,
sz: impl Into<Option<u32>>,
) -> &mut Self {
self.conn_builder.http2_initial_stream_window_size(sz.into());
self
loop {}
}
/// Sets the max connection-level flow control for HTTP2
///
@ -1024,8 +723,7 @@ impl Builder {
&mut self,
sz: impl Into<Option<u32>>,
) -> &mut Self {
self.conn_builder.http2_initial_connection_window_size(sz.into());
self
loop {}
}
/// Sets whether to use an adaptive flow control.
///
@ -1035,8 +733,7 @@ impl Builder {
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_adaptive_window(&mut self, enabled: bool) -> &mut Self {
self.conn_builder.http2_adaptive_window(enabled);
self
loop {}
}
/// Sets the maximum frame size to use for HTTP2.
///
@ -1049,8 +746,7 @@ impl Builder {
&mut self,
sz: impl Into<Option<u32>>,
) -> &mut Self {
self.conn_builder.http2_max_frame_size(sz);
self
loop {}
}
/// Sets an interval for HTTP2 Ping frames should be sent to keep a
/// connection alive.
@ -1069,8 +765,7 @@ impl Builder {
&mut self,
interval: impl Into<Option<Duration>>,
) -> &mut Self {
self.conn_builder.http2_keep_alive_interval(interval);
self
loop {}
}
/// Sets a timeout for receiving an acknowledgement of the keep-alive ping.
///
@ -1086,8 +781,7 @@ impl Builder {
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self {
self.conn_builder.http2_keep_alive_timeout(timeout);
self
loop {}
}
/// Sets whether HTTP2 keep-alive should apply while the connection is idle.
///
@ -1105,8 +799,7 @@ impl Builder {
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self {
self.conn_builder.http2_keep_alive_while_idle(enabled);
self
loop {}
}
/// Sets the maximum number of HTTP2 concurrent locally reset streams.
///
@ -1122,8 +815,7 @@ impl Builder {
&mut self,
max: usize,
) -> &mut Self {
self.conn_builder.http2_max_concurrent_reset_streams(max);
self
loop {}
}
/// Set the maximum write buffer size for each HTTP/2 stream.
///
@ -1135,8 +827,7 @@ impl Builder {
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_max_send_buf_size(&mut self, max: usize) -> &mut Self {
self.conn_builder.http2_max_send_buf_size(max);
self
loop {}
}
/// Set whether to retry requests that get disrupted before ever starting
/// to write.
@ -1151,8 +842,7 @@ impl Builder {
/// Default is `true`.
#[inline]
pub(crate) fn retry_canceled_requests(&mut self, val: bool) -> &mut Self {
self.client_config.retry_canceled_requests = val;
self
loop {}
}
/// Set whether to automatically add the `Host` header to requests.
///
@ -1162,16 +852,14 @@ impl Builder {
/// Default is `true`.
#[inline]
pub(crate) fn set_host(&mut self, val: bool) -> &mut Self {
self.client_config.set_host = val;
self
loop {}
}
/// Provide an executor to execute background `Connection` tasks.
pub(crate) fn executor<E>(&mut self, exec: E) -> &mut Self
where
E: Executor<BoxSendFuture> + Send + Sync + 'static,
{
self.conn_builder.executor(exec);
self
loop {}
}
/// Builder a client with this configuration and the default `HttpConnector`.
#[cfg(feature = "tcp")]
@ -1180,11 +868,7 @@ impl Builder {
B: HttpBody + Send,
B::Data: Send,
{
let mut connector = HttpConnector::new();
if self.pool_config.is_enabled() {
connector.set_keepalive(self.pool_config.idle_timeout);
}
self.build(connector)
loop {}
}
/// Combine the configuration of this builder with a connector to create a `Client`.
pub(crate) fn build<C, B>(&self, connector: C) -> Client<C, B>
@ -1193,21 +877,12 @@ impl Builder {
B: HttpBody + Send,
B::Data: Send,
{
Client {
config: self.client_config,
conn_builder: self.conn_builder.clone(),
connector,
pool: Pool::new(self.pool_config, &self.conn_builder.exec),
}
loop {}
}
}
impl fmt::Debug for Builder {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Builder")
.field("client_config", &self.client_config)
.field("conn_builder", &self.conn_builder)
.field("pool_config", &self.pool_config)
.finish()
loop {}
}
}
#[cfg(test)]
@ -1215,91 +890,34 @@ mod unit_tests {
use super::*;
#[test]
fn response_future_is_sync() {
fn assert_sync<T: Sync>() {}
assert_sync::<ResponseFuture>();
loop {}
}
#[test]
fn set_relative_uri_with_implicit_path() {
let mut uri = "http://hyper.rs".parse().unwrap();
origin_form(&mut uri);
assert_eq!(uri.to_string(), "/");
loop {}
}
#[test]
fn test_origin_form() {
let mut uri = "http://hyper.rs/guides".parse().unwrap();
origin_form(&mut uri);
assert_eq!(uri.to_string(), "/guides");
let mut uri = "http://hyper.rs/guides?foo=bar".parse().unwrap();
origin_form(&mut uri);
assert_eq!(uri.to_string(), "/guides?foo=bar");
loop {}
}
#[test]
fn test_absolute_form() {
let mut uri = "http://hyper.rs/guides".parse().unwrap();
absolute_form(&mut uri);
assert_eq!(uri.to_string(), "http://hyper.rs/guides");
let mut uri = "https://hyper.rs/guides".parse().unwrap();
absolute_form(&mut uri);
assert_eq!(uri.to_string(), "/guides");
loop {}
}
#[test]
fn test_authority_form() {
let _ = pretty_env_logger::try_init();
let mut uri = "http://hyper.rs".parse().unwrap();
authority_form(&mut uri);
assert_eq!(uri.to_string(), "hyper.rs");
let mut uri = "hyper.rs".parse().unwrap();
authority_form(&mut uri);
assert_eq!(uri.to_string(), "hyper.rs");
loop {}
}
#[test]
fn test_extract_domain_connect_no_port() {
let mut uri = "hyper.rs".parse().unwrap();
let (scheme, host) = extract_domain(&mut uri, true).expect("extract domain");
assert_eq!(scheme, * "http");
assert_eq!(host, "hyper.rs");
loop {}
}
#[test]
fn test_is_secure() {
assert_eq!(
is_schema_secure(& "http://hyper.rs".parse::< Uri > ().unwrap()), false
);
assert_eq!(is_schema_secure(& "hyper.rs".parse::< Uri > ().unwrap()), false);
assert_eq!(
is_schema_secure(& "wss://hyper.rs".parse::< Uri > ().unwrap()), true
);
assert_eq!(
is_schema_secure(& "ws://hyper.rs".parse::< Uri > ().unwrap()), false
);
loop {}
}
#[test]
fn test_get_non_default_port() {
assert!(
get_non_default_port(& "http://hyper.rs".parse::< Uri > ().unwrap())
.is_none()
);
assert!(
get_non_default_port(& "http://hyper.rs:80".parse::< Uri > ().unwrap())
.is_none()
);
assert!(
get_non_default_port(& "https://hyper.rs:443".parse::< Uri > ().unwrap())
.is_none()
);
assert!(
get_non_default_port(& "hyper.rs:80".parse::< Uri > ().unwrap()).is_none()
);
assert_eq!(
get_non_default_port(& "http://hyper.rs:123".parse::< Uri > ().unwrap())
.unwrap().as_u16(), 123
);
assert_eq!(
get_non_default_port(& "https://hyper.rs:80".parse::< Uri > ().unwrap())
.unwrap().as_u16(), 80
);
assert_eq!(
get_non_default_port(& "hyper.rs:123".parse::< Uri > ().unwrap()).unwrap()
.as_u16(), 123
);
loop {}
}
}

View file

@ -107,7 +107,7 @@ pub(crate) async fn handshake<T>(
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
{
Builder::new().handshake(io).await
loop {}
}
/// The sender side of an established connection.
pub struct SendRequest<B> {
@ -196,27 +196,20 @@ impl<B> SendRequest<B> {
&mut self,
cx: &mut task::Context<'_>,
) -> Poll<crate::Result<()>> {
self.dispatch.poll_ready(cx)
loop {}
}
pub(super) async fn when_ready(self) -> crate::Result<Self> {
let mut me = Some(self);
future::poll_fn(move |cx| {
ready!(me.as_mut().unwrap().poll_ready(cx))?;
Poll::Ready(Ok(me.take().unwrap()))
})
.await
loop {}
}
pub(super) fn is_ready(&self) -> bool {
self.dispatch.is_ready()
loop {}
}
pub(super) fn is_closed(&self) -> bool {
self.dispatch.is_closed()
loop {}
}
#[cfg(feature = "http2")]
pub(super) fn into_http2(self) -> Http2SendRequest<B> {
Http2SendRequest {
dispatch: self.dispatch.unbound(),
}
loop {}
}
}
impl<B> SendRequest<B>
@ -265,15 +258,7 @@ where
/// # fn main() {}
/// ```
pub(crate) fn send_request(&mut self, req: Request<B>) -> ResponseFuture {
let inner = match self.dispatch.send(req) {
Ok(rx) => ResponseFutureState::Waiting(rx),
Err(_req) => {
debug!("connection was not ready");
let err = crate::Error::new_canceled().with("connection was not ready");
ResponseFutureState::Error(Some(err))
}
};
ResponseFuture { inner }
loop {}
}
pub(super) fn send_request_retryable(
&mut self,
@ -316,24 +301,24 @@ where
&mut self,
cx: &mut task::Context<'_>,
) -> Poll<Result<(), Self::Error>> {
self.poll_ready(cx)
loop {}
}
fn call(&mut self, req: Request<B>) -> Self::Future {
self.send_request(req)
loop {}
}
}
impl<B> fmt::Debug for SendRequest<B> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SendRequest").finish()
loop {}
}
}
#[cfg(feature = "http2")]
impl<B> Http2SendRequest<B> {
pub(super) fn is_ready(&self) -> bool {
self.dispatch.is_ready()
loop {}
}
pub(super) fn is_closed(&self) -> bool {
self.dispatch.is_closed()
loop {}
}
}
#[cfg(feature = "http2")]
@ -372,15 +357,13 @@ where
#[cfg(feature = "http2")]
impl<B> fmt::Debug for Http2SendRequest<B> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Http2SendRequest").finish()
loop {}
}
}
#[cfg(feature = "http2")]
impl<B> Clone for Http2SendRequest<B> {
fn clone(&self) -> Self {
Http2SendRequest {
dispatch: self.dispatch.clone(),
}
loop {}
}
}
impl<T, B> Connection<T, B>
@ -394,18 +377,7 @@ where
///
/// Only works for HTTP/1 connections. HTTP/2 connections will panic.
pub(crate) fn into_parts(self) -> Parts<T> {
match self.inner.expect("already upgraded") {
#[cfg(feature = "http1")]
ProtoClient::H1 { h1 } => {
let (io, read_buf, _) = h1.into_inner();
Parts { io, read_buf, _inner: () }
}
ProtoClient::H2 { .. } => {
panic!("http2 cannot into_inner");
}
#[cfg(not(feature = "http1"))]
ProtoClient::H1 { h1 } => match h1.0 {}
}
loop {}
}
/// Poll the connection for completion, but without calling `shutdown`
/// on the underlying IO.
@ -422,16 +394,7 @@ where
&mut self,
cx: &mut task::Context<'_>,
) -> Poll<crate::Result<()>> {
match *self.inner.as_mut().expect("already upgraded") {
#[cfg(feature = "http1")]
ProtoClient::H1 { ref mut h1 } => h1.poll_without_shutdown(cx),
#[cfg(feature = "http2")]
ProtoClient::H2 { ref mut h2, .. } => Pin::new(h2).poll(cx).map_ok(|_| ()),
#[cfg(not(feature = "http1"))]
ProtoClient::H1 { ref mut h1 } => match h1.0 {}
#[cfg(not(feature = "http2"))]
ProtoClient::H2 { ref mut h2, .. } => match h2.0 {}
}
loop {}
}
/// Prevent shutdown of the underlying IO object at the end of service the request,
/// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`.
@ -455,10 +418,7 @@ where
/// [2]: https://datatracker.ietf.org/doc/html/rfc8441#section-3
#[cfg(feature = "http2")]
pub(crate) fn http2_is_extended_connect_protocol_enabled(&self) -> bool {
match self.inner.as_ref().unwrap() {
ProtoClient::H1 { .. } => false,
ProtoClient::H2 { h2 } => h2.is_extended_connect_protocol_enabled(),
}
loop {}
}
}
impl<T, B> Future for Connection<T, B>
@ -470,23 +430,7 @@ where
{
type Output = crate::Result<()>;
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
match ready!(Pin::new(self.inner.as_mut().unwrap()).poll(cx))? {
proto::Dispatched::Shutdown => Poll::Ready(Ok(())),
#[cfg(feature = "http1")]
proto::Dispatched::Upgrade(pending) => {
match self.inner.take() {
Some(ProtoClient::H1 { h1 }) => {
let (io, buf, _) = h1.into_inner();
pending.fulfill(Upgraded::new(io, buf));
Poll::Ready(Ok(()))
}
_ => {
drop(pending);
unreachable!("Upgrade expects h1");
}
}
}
}
loop {}
}
}
impl<T, B> fmt::Debug for Connection<T, B>
@ -495,48 +439,27 @@ where
B: HttpBody + 'static,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Connection").finish()
loop {}
}
}
impl Builder {
/// Creates a new connection builder.
#[inline]
pub(crate) fn new() -> Builder {
Builder {
exec: Exec::Default,
h09_responses: false,
h1_writev: None,
h1_read_buf_exact_size: None,
h1_parser_config: Default::default(),
h1_title_case_headers: false,
h1_preserve_header_case: false,
#[cfg(feature = "ffi")]
h1_preserve_header_order: false,
h1_max_buf_size: None,
#[cfg(feature = "ffi")]
h1_headers_raw: false,
#[cfg(feature = "http2")]
h2_builder: Default::default(),
#[cfg(feature = "http1")]
version: Proto::Http1,
#[cfg(not(feature = "http1"))]
version: Proto::Http2,
}
loop {}
}
/// Provide an executor to execute background HTTP2 tasks.
pub(crate) fn executor<E>(&mut self, exec: E) -> &mut Builder
where
E: Executor<BoxSendFuture> + Send + Sync + 'static,
{
self.exec = Exec::Executor(Arc::new(exec));
self
loop {}
}
/// Set whether HTTP/0.9 responses should be tolerated.
///
/// Default is false.
pub(crate) fn http09_responses(&mut self, enabled: bool) -> &mut Builder {
self.h09_responses = enabled;
self
loop {}
}
/// Set whether HTTP/1 connections will accept spaces between header names
/// and the colon that follow them in responses.
@ -561,8 +484,7 @@ impl Builder {
&mut self,
enabled: bool,
) -> &mut Builder {
self.h1_parser_config.allow_spaces_after_header_name_in_responses(enabled);
self
loop {}
}
/// Set whether HTTP/1 connections will accept obsolete line folding for
/// header values.
@ -602,8 +524,7 @@ impl Builder {
&mut self,
enabled: bool,
) -> &mut Builder {
self.h1_parser_config.allow_obsolete_multiline_headers_in_responses(enabled);
self
loop {}
}
/// Set whether HTTP/1 connections will silently ignored malformed header lines.
///
@ -618,8 +539,7 @@ impl Builder {
&mut self,
enabled: bool,
) -> &mut Builder {
self.h1_parser_config.ignore_invalid_headers_in_responses(enabled);
self
loop {}
}
/// Set whether HTTP/1 connections should try to use vectored writes,
/// or always flatten into a single buffer.
@ -634,8 +554,7 @@ impl Builder {
/// Default is `auto`. In this mode hyper will try to guess which
/// mode to use
pub(crate) fn http1_writev(&mut self, enabled: bool) -> &mut Builder {
self.h1_writev = Some(enabled);
self
loop {}
}
/// Set whether HTTP/1 connections will write header names as title case at
/// the socket level.
@ -644,8 +563,7 @@ impl Builder {
///
/// Default is false.
pub(crate) fn http1_title_case_headers(&mut self, enabled: bool) -> &mut Builder {
self.h1_title_case_headers = enabled;
self
loop {}
}
/// Set whether to support preserving original header cases.
///
@ -661,8 +579,7 @@ impl Builder {
///
/// Default is false.
pub(crate) fn http1_preserve_header_case(&mut self, enabled: bool) -> &mut Builder {
self.h1_preserve_header_case = enabled;
self
loop {}
}
/// Set whether to support preserving original header order.
///
@ -675,8 +592,7 @@ impl Builder {
/// Default is false.
#[cfg(feature = "ffi")]
pub(crate) fn http1_preserve_header_order(&mut self, enabled: bool) -> &mut Builder {
self.h1_preserve_header_order = enabled;
self
loop {}
}
/// Sets the exact size of the read buffer to *always* use.
///
@ -687,9 +603,7 @@ impl Builder {
&mut self,
sz: Option<usize>,
) -> &mut Builder {
self.h1_read_buf_exact_size = sz;
self.h1_max_buf_size = None;
self
loop {}
}
/// Set the maximum buffer size for the connection.
///
@ -703,18 +617,11 @@ impl Builder {
#[cfg(feature = "http1")]
#[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
pub(crate) fn http1_max_buf_size(&mut self, max: usize) -> &mut Self {
assert!(
max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE,
"the max_buf_size cannot be smaller than the minimum that h1 specifies."
);
self.h1_max_buf_size = Some(max);
self.h1_read_buf_exact_size = None;
self
loop {}
}
#[cfg(feature = "ffi")]
pub(crate) fn http1_headers_raw(&mut self, enabled: bool) -> &mut Self {
self.h1_headers_raw = enabled;
self
loop {}
}
/// Sets whether HTTP2 is required.
///
@ -722,10 +629,7 @@ impl Builder {
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_only(&mut self, enabled: bool) -> &mut Builder {
if enabled {
self.version = Proto::Http2;
}
self
loop {}
}
/// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2
/// stream-level flow control.
@ -741,11 +645,7 @@ impl Builder {
&mut self,
sz: impl Into<Option<u32>>,
) -> &mut Self {
if let Some(sz) = sz.into() {
self.h2_builder.adaptive_window = false;
self.h2_builder.initial_stream_window_size = sz;
}
self
loop {}
}
/// Sets the max connection-level flow control for HTTP2
///
@ -758,11 +658,7 @@ impl Builder {
&mut self,
sz: impl Into<Option<u32>>,
) -> &mut Self {
if let Some(sz) = sz.into() {
self.h2_builder.adaptive_window = false;
self.h2_builder.initial_conn_window_size = sz;
}
self
loop {}
}
/// Sets whether to use an adaptive flow control.
///
@ -772,13 +668,7 @@ impl Builder {
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_adaptive_window(&mut self, enabled: bool) -> &mut Self {
use proto::h2::SPEC_WINDOW_SIZE;
self.h2_builder.adaptive_window = enabled;
if enabled {
self.h2_builder.initial_conn_window_size = SPEC_WINDOW_SIZE;
self.h2_builder.initial_stream_window_size = SPEC_WINDOW_SIZE;
}
self
loop {}
}
/// Sets the maximum frame size to use for HTTP2.
///
@ -791,10 +681,7 @@ impl Builder {
&mut self,
sz: impl Into<Option<u32>>,
) -> &mut Self {
if let Some(sz) = sz.into() {
self.h2_builder.max_frame_size = sz;
}
self
loop {}
}
/// Sets an interval for HTTP2 Ping frames should be sent to keep a
/// connection alive.
@ -813,8 +700,7 @@ impl Builder {
&mut self,
interval: impl Into<Option<Duration>>,
) -> &mut Self {
self.h2_builder.keep_alive_interval = interval.into();
self
loop {}
}
/// Sets a timeout for receiving an acknowledgement of the keep-alive ping.
///
@ -830,8 +716,7 @@ impl Builder {
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self {
self.h2_builder.keep_alive_timeout = timeout;
self
loop {}
}
/// Sets whether HTTP2 keep-alive should apply while the connection is idle.
///
@ -849,8 +734,7 @@ impl Builder {
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self {
self.h2_builder.keep_alive_while_idle = enabled;
self
loop {}
}
/// Sets the maximum number of HTTP2 concurrent locally reset streams.
///
@ -866,8 +750,7 @@ impl Builder {
&mut self,
max: usize,
) -> &mut Self {
self.h2_builder.max_concurrent_reset_streams = Some(max);
self
loop {}
}
/// Set the maximum write buffer size for each HTTP/2 stream.
///
@ -879,9 +762,7 @@ impl Builder {
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_max_send_buf_size(&mut self, max: usize) -> &mut Self {
assert!(max <= std::u32::MAX as usize);
self.h2_builder.max_send_buffer_size = max;
self
loop {}
}
/// Constructs a connection with the configured options and IO.
/// See [`client::conn`](crate::client::conn) for more.
@ -957,27 +838,12 @@ impl Builder {
impl Future for ResponseFuture {
type Output = crate::Result<Response<Body>>;
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
match self.inner {
ResponseFutureState::Waiting(ref mut rx) => {
Pin::new(rx)
.poll(cx)
.map(|res| match res {
Ok(Ok(resp)) => Ok(resp),
Ok(Err(err)) => Err(err),
Err(_canceled) => {
panic!("dispatch dropped without returning error")
}
})
}
ResponseFutureState::Error(ref mut err) => {
Poll::Ready(Err(err.take().expect("polled after ready")))
}
}
loop {}
}
}
impl fmt::Debug for ResponseFuture {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ResponseFuture").finish()
loop {}
}
}
impl<T, B> Future for ProtoClient<T, B>
@ -989,16 +855,7 @@ where
{
type Output = crate::Result<proto::Dispatched>;
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
match self.project() {
#[cfg(feature = "http1")]
ProtoClientProj::H1 { h1 } => h1.poll(cx),
#[cfg(feature = "http2")]
ProtoClientProj::H2 { h2, .. } => h2.poll(cx),
#[cfg(not(feature = "http1"))]
ProtoClientProj::H1 { h1 } => match h1.0 {}
#[cfg(not(feature = "http2"))]
ProtoClientProj::H2 { h2, .. } => match h2.0 {}
}
loop {}
}
}
trait AssertSend: Send {}

View file

@ -54,27 +54,27 @@ pub struct GaiFuture {
}
impl Name {
pub(super) fn new(host: Box<str>) -> Name {
Name { host }
loop {}
}
/// View the hostname as a string slice.
pub(crate) fn as_str(&self) -> &str {
&self.host
loop {}
}
}
impl fmt::Debug for Name {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&self.host, f)
loop {}
}
}
impl fmt::Display for Name {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&self.host, f)
loop {}
}
}
impl FromStr for Name {
type Err = InvalidNameError;
fn from_str(host: &str) -> Result<Self, Self::Err> {
Ok(Name::new(host.into()))
loop {}
}
}
/// Error indicating a given string was not a valid domain name.
@ -82,14 +82,14 @@ impl FromStr for Name {
pub struct InvalidNameError(());
impl fmt::Display for InvalidNameError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("Not a valid domain name")
loop {}
}
}
impl Error for InvalidNameError {}
impl GaiResolver {
/// Construct a new `GaiResolver`.
pub(crate) fn new() -> Self {
GaiResolver { _priv: () }
loop {}
}
}
impl Service<Name> for GaiResolver {
@ -100,58 +100,42 @@ impl Service<Name> for GaiResolver {
&mut self,
_cx: &mut task::Context<'_>,
) -> Poll<Result<(), io::Error>> {
Poll::Ready(Ok(()))
loop {}
}
fn call(&mut self, name: Name) -> Self::Future {
let blocking = tokio::task::spawn_blocking(move || {
debug!("resolving host={:?}", name.host);
(&*name.host, 0).to_socket_addrs().map(|i| SocketAddrs { iter: i })
});
GaiFuture { inner: blocking }
loop {}
}
}
impl fmt::Debug for GaiResolver {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("GaiResolver")
loop {}
}
}
impl Future for GaiFuture {
type Output = Result<GaiAddrs, io::Error>;
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
Pin::new(&mut self.inner)
.poll(cx)
.map(|res| match res {
Ok(Ok(addrs)) => Ok(GaiAddrs { inner: addrs }),
Ok(Err(err)) => Err(err),
Err(join_err) => {
if join_err.is_cancelled() {
Err(io::Error::new(io::ErrorKind::Interrupted, join_err))
} else {
panic!("gai background task failed: {:?}", join_err)
}
}
})
loop {}
}
}
impl fmt::Debug for GaiFuture {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("GaiFuture")
loop {}
}
}
impl Drop for GaiFuture {
fn drop(&mut self) {
self.inner.abort();
loop {}
}
}
impl Iterator for GaiAddrs {
type Item = SocketAddr;
fn next(&mut self) -> Option<Self::Item> {
self.inner.next()
loop {}
}
}
impl fmt::Debug for GaiAddrs {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("GaiAddrs")
loop {}
}
}
pub(super) struct SocketAddrs {
@ -159,67 +143,34 @@ pub(super) struct SocketAddrs {
}
impl SocketAddrs {
pub(super) fn new(addrs: Vec<SocketAddr>) -> Self {
SocketAddrs {
iter: addrs.into_iter(),
}
loop {}
}
pub(super) fn try_parse(host: &str, port: u16) -> Option<SocketAddrs> {
if let Ok(addr) = host.parse::<Ipv4Addr>() {
let addr = SocketAddrV4::new(addr, port);
return Some(SocketAddrs {
iter: vec![SocketAddr::V4(addr)].into_iter(),
});
}
if let Ok(addr) = host.parse::<Ipv6Addr>() {
let addr = SocketAddrV6::new(addr, port, 0, 0);
return Some(SocketAddrs {
iter: vec![SocketAddr::V6(addr)].into_iter(),
});
}
None
loop {}
}
#[inline]
fn filter(self, predicate: impl FnMut(&SocketAddr) -> bool) -> SocketAddrs {
SocketAddrs::new(self.iter.filter(predicate).collect())
loop {}
}
pub(super) fn split_by_preference(
self,
local_addr_ipv4: Option<Ipv4Addr>,
local_addr_ipv6: Option<Ipv6Addr>,
) -> (SocketAddrs, SocketAddrs) {
match (local_addr_ipv4, local_addr_ipv6) {
(Some(_), None) => {
(self.filter(SocketAddr::is_ipv4), SocketAddrs::new(vec![]))
}
(None, Some(_)) => {
(self.filter(SocketAddr::is_ipv6), SocketAddrs::new(vec![]))
}
_ => {
let preferring_v6 = self
.iter
.as_slice()
.first()
.map(SocketAddr::is_ipv6)
.unwrap_or(false);
let (preferred, fallback) = self
.iter
.partition::<Vec<_>, _>(|addr| addr.is_ipv6() == preferring_v6);
(SocketAddrs::new(preferred), SocketAddrs::new(fallback))
}
}
loop {}
}
pub(super) fn is_empty(&self) -> bool {
self.iter.as_slice().is_empty()
loop {}
}
pub(super) fn len(&self) -> usize {
self.iter.as_slice().len()
loop {}
}
}
impl Iterator for SocketAddrs {
type Item = SocketAddr;
#[inline]
fn next(&mut self) -> Option<SocketAddr> {
self.iter.next()
loop {}
}
}
mod sealed {
@ -249,10 +200,10 @@ mod sealed {
&mut self,
cx: &mut task::Context<'_>,
) -> Poll<Result<(), Self::Error>> {
Service::poll_ready(self, cx)
loop {}
}
fn resolve(&mut self, name: Name) -> Self::Future {
Service::call(self, name)
loop {}
}
}
}
@ -263,8 +214,7 @@ pub(super) async fn resolve<R>(
where
R: Resolve,
{
futures_util::future::poll_fn(|cx| resolver.poll_ready(cx)).await?;
resolver.resolve(name).await
loop {}
}
#[cfg(test)]
mod tests {
@ -272,52 +222,10 @@ mod tests {
use std::net::{Ipv4Addr, Ipv6Addr};
#[test]
fn test_ip_addrs_split_by_preference() {
let ip_v4 = Ipv4Addr::new(127, 0, 0, 1);
let ip_v6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1);
let v4_addr = (ip_v4, 80).into();
let v6_addr = (ip_v6, 80).into();
let (mut preferred, mut fallback) = SocketAddrs {
iter: vec![v4_addr, v6_addr].into_iter(),
}
.split_by_preference(None, None);
assert!(preferred.next().unwrap().is_ipv4());
assert!(fallback.next().unwrap().is_ipv6());
let (mut preferred, mut fallback) = SocketAddrs {
iter: vec![v6_addr, v4_addr].into_iter(),
}
.split_by_preference(None, None);
assert!(preferred.next().unwrap().is_ipv6());
assert!(fallback.next().unwrap().is_ipv4());
let (mut preferred, mut fallback) = SocketAddrs {
iter: vec![v4_addr, v6_addr].into_iter(),
}
.split_by_preference(Some(ip_v4), Some(ip_v6));
assert!(preferred.next().unwrap().is_ipv4());
assert!(fallback.next().unwrap().is_ipv6());
let (mut preferred, mut fallback) = SocketAddrs {
iter: vec![v6_addr, v4_addr].into_iter(),
}
.split_by_preference(Some(ip_v4), Some(ip_v6));
assert!(preferred.next().unwrap().is_ipv6());
assert!(fallback.next().unwrap().is_ipv4());
let (mut preferred, fallback) = SocketAddrs {
iter: vec![v4_addr, v6_addr].into_iter(),
}
.split_by_preference(Some(ip_v4), None);
assert!(preferred.next().unwrap().is_ipv4());
assert!(fallback.is_empty());
let (mut preferred, fallback) = SocketAddrs {
iter: vec![v4_addr, v6_addr].into_iter(),
}
.split_by_preference(None, Some(ip_v6));
assert!(preferred.next().unwrap().is_ipv6());
assert!(fallback.is_empty());
loop {}
}
#[test]
fn test_name_from_str() {
const DOMAIN: &str = "test.example.com";
let name = Name::from_str(DOMAIN).expect("Should be a valid domain");
assert_eq!(name.as_str(), DOMAIN);
assert_eq!(name.to_string(), DOMAIN);
loop {}
}
}

View file

@ -79,7 +79,7 @@ struct Config {
impl HttpConnector {
/// Construct a new HttpConnector.
pub(crate) fn new() -> HttpConnector {
HttpConnector::new_with_resolver(GaiResolver::new())
loop {}
}
}
impl<R> HttpConnector<R> {
@ -87,28 +87,14 @@ impl<R> HttpConnector<R> {
///
/// Takes a [`Resolver`](crate::client::connect::dns#resolvers-are-services) to handle DNS lookups.
pub(crate) fn new_with_resolver(resolver: R) -> HttpConnector<R> {
HttpConnector {
config: Arc::new(Config {
connect_timeout: None,
enforce_http: true,
happy_eyeballs_timeout: Some(Duration::from_millis(300)),
keep_alive_timeout: None,
local_address_ipv4: None,
local_address_ipv6: None,
nodelay: false,
reuse_address: false,
send_buffer_size: None,
recv_buffer_size: None,
}),
resolver,
}
loop {}
}
/// Option to enforce all `Uri`s have the `http` scheme.
///
/// Enabled by default.
#[inline]
pub(crate) fn enforce_http(&mut self, is_enforced: bool) {
self.config_mut().enforce_http = is_enforced;
loop {}
}
/// Set that all sockets have `SO_KEEPALIVE` set with the supplied duration.
///
@ -117,24 +103,24 @@ impl<R> HttpConnector<R> {
/// Default is `None`.
#[inline]
pub(crate) fn set_keepalive(&mut self, dur: Option<Duration>) {
self.config_mut().keep_alive_timeout = dur;
loop {}
}
/// Set that all sockets have `SO_NODELAY` set to the supplied value `nodelay`.
///
/// Default is `false`.
#[inline]
pub(crate) fn set_nodelay(&mut self, nodelay: bool) {
self.config_mut().nodelay = nodelay;
loop {}
}
/// Sets the value of the SO_SNDBUF option on the socket.
#[inline]
pub(crate) fn set_send_buffer_size(&mut self, size: Option<usize>) {
self.config_mut().send_buffer_size = size;
loop {}
}
/// Sets the value of the SO_RCVBUF option on the socket.
#[inline]
pub(crate) fn set_recv_buffer_size(&mut self, size: Option<usize>) {
self.config_mut().recv_buffer_size = size;
loop {}
}
/// Set that all sockets are bound to the configured address before connection.
///
@ -143,14 +129,7 @@ impl<R> HttpConnector<R> {
/// Default is `None`.
#[inline]
pub(crate) fn set_local_address(&mut self, addr: Option<IpAddr>) {
let (v4, v6) = match addr {
Some(IpAddr::V4(a)) => (Some(a), None),
Some(IpAddr::V6(a)) => (None, Some(a)),
_ => (None, None),
};
let cfg = self.config_mut();
cfg.local_address_ipv4 = v4;
cfg.local_address_ipv6 = v6;
loop {}
}
/// Set that all sockets are bound to the configured IPv4 or IPv6 address (depending on host's
/// preferences) before connection.
@ -160,9 +139,7 @@ impl<R> HttpConnector<R> {
addr_ipv4: Ipv4Addr,
addr_ipv6: Ipv6Addr,
) {
let cfg = self.config_mut();
cfg.local_address_ipv4 = Some(addr_ipv4);
cfg.local_address_ipv6 = Some(addr_ipv6);
loop {}
}
/// Set the connect timeout.
///
@ -172,7 +149,7 @@ impl<R> HttpConnector<R> {
/// Default is `None`.
#[inline]
pub(crate) fn set_connect_timeout(&mut self, dur: Option<Duration>) {
self.config_mut().connect_timeout = dur;
loop {}
}
/// Set timeout for [RFC 6555 (Happy Eyeballs)][RFC 6555] algorithm.
///
@ -188,18 +165,17 @@ impl<R> HttpConnector<R> {
/// [RFC 6555]: https://tools.ietf.org/html/rfc6555
#[inline]
pub(crate) fn set_happy_eyeballs_timeout(&mut self, dur: Option<Duration>) {
self.config_mut().happy_eyeballs_timeout = dur;
loop {}
}
/// Set that all socket have `SO_REUSEADDR` set to the supplied value `reuse_address`.
///
/// Default is `false`.
#[inline]
pub(crate) fn set_reuse_address(&mut self, reuse_address: bool) -> &mut Self {
self.config_mut().reuse_address = reuse_address;
self
loop {}
}
fn config_mut(&mut self) -> &mut Config {
Arc::make_mut(&mut self.config)
loop {}
}
}
static INVALID_NOT_HTTP: &str = "invalid URL, scheme is not http";
@ -207,7 +183,7 @@ static INVALID_MISSING_SCHEME: &str = "invalid URL, scheme is missing";
static INVALID_MISSING_HOST: &str = "invalid URL, host is missing";
impl<R: fmt::Debug> fmt::Debug for HttpConnector<R> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("HttpConnector").finish()
loop {}
}
}
impl<R> tower_service::Service<Uri> for HttpConnector<R>
@ -222,106 +198,39 @@ where
&mut self,
cx: &mut task::Context<'_>,
) -> Poll<Result<(), Self::Error>> {
ready!(self.resolver.poll_ready(cx)).map_err(ConnectError::dns)?;
Poll::Ready(Ok(()))
loop {}
}
fn call(&mut self, dst: Uri) -> Self::Future {
let mut self_ = self.clone();
HttpConnecting {
fut: Box::pin(async move { self_.call_async(dst).await }),
_marker: PhantomData,
}
loop {}
}
}
fn get_host_port<'u>(
config: &Config,
dst: &'u Uri,
) -> Result<(&'u str, u16), ConnectError> {
trace!(
"Http::connect; scheme={:?}, host={:?}, port={:?}", dst.scheme(), dst.host(), dst
.port(),
);
if config.enforce_http {
if dst.scheme() != Some(&Scheme::HTTP) {
return Err(ConnectError {
msg: INVALID_NOT_HTTP.into(),
cause: None,
});
}
} else if dst.scheme().is_none() {
return Err(ConnectError {
msg: INVALID_MISSING_SCHEME.into(),
cause: None,
});
}
let host = match dst.host() {
Some(s) => s,
None => {
return Err(ConnectError {
msg: INVALID_MISSING_HOST.into(),
cause: None,
});
}
};
let port = match dst.port() {
Some(port) => port.as_u16(),
None => if dst.scheme() == Some(&Scheme::HTTPS) { 443 } else { 80 }
};
Ok((host, port))
loop {}
}
impl<R> HttpConnector<R>
where
R: Resolve,
{
async fn call_async(&mut self, dst: Uri) -> Result<TcpStream, ConnectError> {
let config = &self.config;
let (host, port) = get_host_port(config, &dst)?;
let host = host.trim_start_matches('[').trim_end_matches(']');
let addrs = if let Some(addrs) = dns::SocketAddrs::try_parse(host, port) {
addrs
} else {
let addrs = resolve(&mut self.resolver, dns::Name::new(host.into()))
.await
.map_err(ConnectError::dns)?;
let addrs = addrs
.map(|mut addr| {
addr.set_port(port);
addr
})
.collect();
dns::SocketAddrs::new(addrs)
};
let c = ConnectingTcp::new(addrs, config);
let sock = c.connect().await?;
if let Err(e) = sock.set_nodelay(config.nodelay) {
warn!("tcp set_nodelay error: {}", e);
}
Ok(sock)
loop {}
}
}
impl Connection for TcpStream {
fn connected(&self) -> Connected {
let connected = Connected::new();
if let (Ok(remote_addr), Ok(local_addr))
= (self.peer_addr(), self.local_addr()) {
connected
.extra(HttpInfo {
remote_addr,
local_addr,
})
} else {
connected
}
loop {}
}
}
impl HttpInfo {
/// Get the remote address of the transport used.
pub(crate) fn remote_addr(&self) -> SocketAddr {
self.remote_addr
loop {}
}
/// Get the local address of the transport used.
pub(crate) fn local_addr(&self) -> SocketAddr {
self.local_addr
loop {}
}
}
pin_project! {
@ -334,7 +243,7 @@ type BoxConnecting = Pin<Box<dyn Future<Output = ConnectResult> + Send>>;
impl<R: Resolve> Future for HttpConnecting<R> {
type Output = ConnectResult;
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
self.project().fut.poll(cx)
loop {}
}
}
pub struct ConnectError {
@ -347,16 +256,13 @@ impl ConnectError {
S: Into<Box<str>>,
E: Into<Box<dyn StdError + Send + Sync>>,
{
ConnectError {
msg: msg.into(),
cause: Some(cause.into()),
}
loop {}
}
fn dns<E>(cause: E) -> ConnectError
where
E: Into<Box<dyn StdError + Send + Sync>>,
{
ConnectError::new("dns error", cause)
loop {}
}
fn m<S, E>(msg: S) -> impl FnOnce(E) -> ConnectError
where
@ -368,25 +274,17 @@ impl ConnectError {
}
impl fmt::Debug for ConnectError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Some(ref cause) = self.cause {
f.debug_tuple("ConnectError").field(&self.msg).field(cause).finish()
} else {
self.msg.fmt(f)
}
loop {}
}
}
impl fmt::Display for ConnectError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(&self.msg)?;
if let Some(ref cause) = self.cause {
write!(f, ": {}", cause)?;
}
Ok(())
loop {}
}
}
impl StdError for ConnectError {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
self.cause.as_ref().map(|e| &**e as _)
loop {}
}
}
struct ConnectingTcp<'a> {
@ -396,46 +294,7 @@ struct ConnectingTcp<'a> {
}
impl<'a> ConnectingTcp<'a> {
fn new(remote_addrs: dns::SocketAddrs, config: &'a Config) -> Self {
if let Some(fallback_timeout) = config.happy_eyeballs_timeout {
let (preferred_addrs, fallback_addrs) = remote_addrs
.split_by_preference(
config.local_address_ipv4,
config.local_address_ipv6,
);
if fallback_addrs.is_empty() {
return ConnectingTcp {
preferred: ConnectingTcpRemote::new(
preferred_addrs,
config.connect_timeout,
),
fallback: None,
config,
};
}
ConnectingTcp {
preferred: ConnectingTcpRemote::new(
preferred_addrs,
config.connect_timeout,
),
fallback: Some(ConnectingTcpFallback {
delay: tokio::time::sleep(fallback_timeout),
remote: ConnectingTcpRemote::new(
fallback_addrs,
config.connect_timeout,
),
}),
config,
}
} else {
ConnectingTcp {
preferred: ConnectingTcpRemote::new(
remote_addrs,
config.connect_timeout,
),
fallback: None,
config,
}
}
loop {}
}
}
struct ConnectingTcpFallback {
@ -448,40 +307,12 @@ struct ConnectingTcpRemote {
}
impl ConnectingTcpRemote {
fn new(addrs: dns::SocketAddrs, connect_timeout: Option<Duration>) -> Self {
let connect_timeout = connect_timeout.map(|t| t / (addrs.len() as u32));
Self { addrs, connect_timeout }
loop {}
}
}
impl ConnectingTcpRemote {
async fn connect(&mut self, config: &Config) -> Result<TcpStream, ConnectError> {
let mut err = None;
for addr in &mut self.addrs {
debug!("connecting to {}", addr);
match connect(&addr, config, self.connect_timeout)?.await {
Ok(tcp) => {
debug!("connected to {}", addr);
return Ok(tcp);
}
Err(e) => {
trace!("connect error for {}: {:?}", addr, e);
err = Some(e);
}
}
}
match err {
Some(e) => Err(e),
None => {
Err(
ConnectError::new(
"tcp connect error",
std::io::Error::new(
std::io::ErrorKind::NotConnected,
"Network unreachable",
),
),
)
}
}
loop {}
}
}
fn bind_local_address(
@ -490,24 +321,7 @@ fn bind_local_address(
local_addr_ipv4: &Option<Ipv4Addr>,
local_addr_ipv6: &Option<Ipv6Addr>,
) -> io::Result<()> {
match (*dst_addr, local_addr_ipv4, local_addr_ipv6) {
(SocketAddr::V4(_), Some(addr), _) => {
socket.bind(&SocketAddr::new(addr.clone().into(), 0).into())?;
}
(SocketAddr::V6(_), _, Some(addr)) => {
socket.bind(&SocketAddr::new(addr.clone().into(), 0).into())?;
}
_ => {
if cfg!(windows) {
let any: SocketAddr = match *dst_addr {
SocketAddr::V4(_) => ([0, 0, 0, 0], 0).into(),
SocketAddr::V6(_) => ([0, 0, 0, 0, 0, 0, 0, 0], 0).into(),
};
socket.bind(&any.into())?;
}
}
}
Ok(())
loop {}
}
fn connect(
addr: &SocketAddr,
@ -579,33 +393,7 @@ fn connect(
}
impl ConnectingTcp<'_> {
async fn connect(mut self) -> Result<TcpStream, ConnectError> {
match self.fallback {
None => self.preferred.connect(self.config).await,
Some(mut fallback) => {
let preferred_fut = self.preferred.connect(self.config);
futures_util::pin_mut!(preferred_fut);
let fallback_fut = fallback.remote.connect(self.config);
futures_util::pin_mut!(fallback_fut);
let fallback_delay = fallback.delay;
futures_util::pin_mut!(fallback_delay);
let (result, future) = match futures_util::future::select(
preferred_fut,
fallback_delay,
)
.await
{
Either::Left((result, _fallback_delay)) => {
(result, Either::Right(fallback_fut))
}
Either::Right(((), preferred_fut)) => {
futures_util::future::select(preferred_fut, fallback_fut)
.await
.factor_first()
}
};
if result.is_err() { future.await } else { result }
}
}
loop {}
}
}
#[cfg(test)]
@ -621,222 +409,28 @@ mod tests {
where
C: Connect,
{
connector.connect(super::super::sealed::Internal, dst).await
loop {}
}
#[tokio::test]
async fn test_errors_enforce_http() {
let dst = "https://example.domain/foo/bar?baz".parse().unwrap();
let connector = HttpConnector::new();
let err = connect(connector, dst).await.unwrap_err();
assert_eq!(&* err.msg, super::INVALID_NOT_HTTP);
loop {}
}
#[cfg(any(target_os = "linux", target_os = "macos"))]
fn get_local_ips() -> (Option<std::net::Ipv4Addr>, Option<std::net::Ipv6Addr>) {
use std::net::{IpAddr, TcpListener};
let mut ip_v4 = None;
let mut ip_v6 = None;
let ips = pnet_datalink::interfaces()
.into_iter()
.flat_map(|i| i.ips.into_iter().map(|n| n.ip()));
for ip in ips {
match ip {
IpAddr::V4(ip) if TcpListener::bind((ip, 0)).is_ok() => ip_v4 = Some(ip),
IpAddr::V6(ip) if TcpListener::bind((ip, 0)).is_ok() => ip_v6 = Some(ip),
_ => {}
}
if ip_v4.is_some() && ip_v6.is_some() {
break;
}
}
(ip_v4, ip_v6)
loop {}
}
#[tokio::test]
async fn test_errors_missing_scheme() {
let dst = "example.domain".parse().unwrap();
let mut connector = HttpConnector::new();
connector.enforce_http(false);
let err = connect(connector, dst).await.unwrap_err();
assert_eq!(&* err.msg, super::INVALID_MISSING_SCHEME);
loop {}
}
#[cfg(any(target_os = "linux", target_os = "macos"))]
#[tokio::test]
async fn local_address() {
use std::net::{IpAddr, TcpListener};
let _ = pretty_env_logger::try_init();
let (bind_ip_v4, bind_ip_v6) = get_local_ips();
let server4 = TcpListener::bind("127.0.0.1:0").unwrap();
let port = server4.local_addr().unwrap().port();
let server6 = TcpListener::bind(&format!("[::1]:{}", port)).unwrap();
let assert_client_ip = |dst: String, server: TcpListener, expected_ip: IpAddr| async move {
let mut connector = HttpConnector::new();
match (bind_ip_v4, bind_ip_v6) {
(Some(v4), Some(v6)) => connector.set_local_addresses(v4, v6),
(Some(v4), None) => connector.set_local_address(Some(v4.into())),
(None, Some(v6)) => connector.set_local_address(Some(v6.into())),
_ => unreachable!(),
}
connect(connector, dst.parse().unwrap()).await.unwrap();
let (_, client_addr) = server.accept().unwrap();
assert_eq!(client_addr.ip(), expected_ip);
};
if let Some(ip) = bind_ip_v4 {
assert_client_ip(format!("http://127.0.0.1:{}", port), server4, ip.into())
.await;
}
if let Some(ip) = bind_ip_v6 {
assert_client_ip(format!("http://[::1]:{}", port), server6, ip.into()).await;
}
loop {}
}
#[test]
#[cfg_attr(not(feature = "__internal_happy_eyeballs_tests"), ignore)]
fn client_happy_eyeballs() {
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, TcpListener};
use std::time::{Duration, Instant};
use super::dns;
use super::ConnectingTcp;
let _ = pretty_env_logger::try_init();
let server4 = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server4.local_addr().unwrap();
let _server6 = TcpListener::bind(&format!("[::1]:{}", addr.port())).unwrap();
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap();
let local_timeout = Duration::default();
let unreachable_v4_timeout = measure_connect(unreachable_ipv4_addr()).1;
let unreachable_v6_timeout = measure_connect(unreachable_ipv6_addr()).1;
let fallback_timeout = std::cmp::max(
unreachable_v4_timeout,
unreachable_v6_timeout,
) + Duration::from_millis(250);
let scenarios = &[
(&[local_ipv4_addr()][..], 4, local_timeout, false),
(&[local_ipv6_addr()][..], 6, local_timeout, false),
(&[local_ipv4_addr(), local_ipv6_addr()][..], 4, local_timeout, false),
(&[local_ipv6_addr(), local_ipv4_addr()][..], 6, local_timeout, false),
(
&[unreachable_ipv4_addr(), local_ipv4_addr()][..],
4,
unreachable_v4_timeout,
false,
),
(
&[unreachable_ipv6_addr(), local_ipv6_addr()][..],
6,
unreachable_v6_timeout,
false,
),
(
&[unreachable_ipv4_addr(), local_ipv4_addr(), local_ipv6_addr()][..],
4,
unreachable_v4_timeout,
false,
),
(
&[unreachable_ipv6_addr(), local_ipv6_addr(), local_ipv4_addr()][..],
6,
unreachable_v6_timeout,
true,
),
(
&[slow_ipv4_addr(), local_ipv4_addr(), local_ipv6_addr()][..],
6,
fallback_timeout,
false,
),
(
&[slow_ipv6_addr(), local_ipv6_addr(), local_ipv4_addr()][..],
4,
fallback_timeout,
true,
),
(
&[slow_ipv4_addr(), unreachable_ipv6_addr(), local_ipv6_addr()][..],
6,
fallback_timeout + unreachable_v6_timeout,
false,
),
(
&[slow_ipv6_addr(), unreachable_ipv4_addr(), local_ipv4_addr()][..],
4,
fallback_timeout + unreachable_v4_timeout,
true,
),
];
let ipv6_accessible = measure_connect(slow_ipv6_addr()).0;
for &(hosts, family, timeout, needs_ipv6_access) in scenarios {
if needs_ipv6_access && !ipv6_accessible {
continue;
}
let (start, stream) = rt
.block_on(async move {
let addrs = hosts
.iter()
.map(|host| (host.clone(), addr.port()).into())
.collect();
let cfg = Config {
local_address_ipv4: None,
local_address_ipv6: None,
connect_timeout: None,
keep_alive_timeout: None,
happy_eyeballs_timeout: Some(fallback_timeout),
nodelay: false,
reuse_address: false,
enforce_http: false,
send_buffer_size: None,
recv_buffer_size: None,
};
let connecting_tcp = ConnectingTcp::new(
dns::SocketAddrs::new(addrs),
&cfg,
);
let start = Instant::now();
Ok::<
_,
ConnectError,
>((start, ConnectingTcp::connect(connecting_tcp).await?))
})
.unwrap();
let res = if stream.peer_addr().unwrap().is_ipv4() { 4 } else { 6 };
let duration = start.elapsed();
let min_duration = if timeout >= Duration::from_millis(150) {
timeout - Duration::from_millis(150)
} else {
Duration::default()
};
let max_duration = timeout + Duration::from_millis(150);
assert_eq!(res, family);
assert!(duration >= min_duration);
assert!(duration <= max_duration);
}
fn local_ipv4_addr() -> IpAddr {
Ipv4Addr::new(127, 0, 0, 1).into()
}
fn local_ipv6_addr() -> IpAddr {
Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).into()
}
fn unreachable_ipv4_addr() -> IpAddr {
Ipv4Addr::new(127, 0, 0, 2).into()
}
fn unreachable_ipv6_addr() -> IpAddr {
Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 2).into()
}
fn slow_ipv4_addr() -> IpAddr {
Ipv4Addr::new(198, 18, 0, 25).into()
}
fn slow_ipv6_addr() -> IpAddr {
Ipv6Addr::new(2001, 2, 0, 0, 0, 0, 0, 254).into()
}
fn measure_connect(addr: IpAddr) -> (bool, Duration) {
let start = Instant::now();
let result = std::net::TcpStream::connect_timeout(
&(addr, 80).into(),
Duration::from_secs(1),
);
let reachable = result.is_ok()
|| result.unwrap_err().kind() == io::ErrorKind::TimedOut;
let duration = start.elapsed();
(reachable, duration)
}
loop {}
}
}

View file

@ -112,11 +112,7 @@ pub(super) enum Alpn {
impl Connected {
/// Create new `Connected` type with empty metadata.
pub(crate) fn new() -> Connected {
Connected {
alpn: Alpn::None,
is_proxied: false,
extra: None,
}
loop {}
}
/// Set whether the connected transport is to an HTTP proxy.
///
@ -137,62 +133,49 @@ impl Connected {
///
/// Default is `false`.
pub(crate) fn proxy(mut self, is_proxied: bool) -> Connected {
self.is_proxied = is_proxied;
self
loop {}
}
/// Determines if the connected transport is to an HTTP proxy.
pub(crate) fn is_proxied(&self) -> bool {
self.is_proxied
loop {}
}
/// Set extra connection information to be set in the extensions of every `Response`.
pub(crate) fn extra<T: Clone + Send + Sync + 'static>(
mut self,
extra: T,
) -> Connected {
if let Some(prev) = self.extra {
self.extra = Some(Extra(Box::new(ExtraChain(prev.0, extra))));
} else {
self.extra = Some(Extra(Box::new(ExtraEnvelope(extra))));
}
self
loop {}
}
/// Copies the extra connection information into an `Extensions` map.
pub(crate) fn get_extras(&self, extensions: &mut Extensions) {
if let Some(extra) = &self.extra {
extra.set(extensions);
}
loop {}
}
/// Set that the connected transport negotiated HTTP/2 as its next protocol.
pub(crate) fn negotiated_h2(mut self) -> Connected {
self.alpn = Alpn::H2;
self
loop {}
}
/// Determines if the connected transport negotiated HTTP/2 as its next protocol.
pub(crate) fn is_negotiated_h2(&self) -> bool {
self.alpn == Alpn::H2
loop {}
}
#[cfg(feature = "http2")]
pub(super) fn clone(&self) -> Connected {
Connected {
alpn: self.alpn.clone(),
is_proxied: self.is_proxied,
extra: self.extra.clone(),
}
loop {}
}
}
impl Extra {
pub(super) fn set(&self, res: &mut Extensions) {
self.0.set(res);
loop {}
}
}
impl Clone for Extra {
fn clone(&self) -> Extra {
Extra(self.0.clone_box())
loop {}
}
}
impl fmt::Debug for Extra {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Extra").finish()
loop {}
}
}
trait ExtraInner: Send + Sync {
@ -206,16 +189,16 @@ where
T: Clone + Send + Sync + 'static,
{
fn clone_box(&self) -> Box<dyn ExtraInner> {
Box::new(self.clone())
loop {}
}
fn set(&self, res: &mut Extensions) {
res.insert(self.0.clone());
loop {}
}
}
struct ExtraChain<T>(Box<dyn ExtraInner>, T);
impl<T: Clone> Clone for ExtraChain<T> {
fn clone(&self) -> Self {
ExtraChain(self.0.clone_box(), self.1.clone())
loop {}
}
}
impl<T> ExtraInner for ExtraChain<T>
@ -223,11 +206,10 @@ where
T: Clone + Send + Sync + 'static,
{
fn clone_box(&self) -> Box<dyn ExtraInner> {
Box::new(self.clone())
loop {}
}
fn set(&self, res: &mut Extensions) {
self.0.set(res);
res.insert(self.1.clone());
loop {}
}
}
#[cfg(any(feature = "http1", feature = "http2"))]
@ -275,7 +257,7 @@ pub(super) mod sealed {
{
type _Svc = S;
fn connect(self, _: Internal, dst: Uri) -> crate::service::Oneshot<S, Uri> {
crate::service::oneshot(self, dst)
loop {}
}
}
impl<S, T> ConnectSvc for S
@ -289,7 +271,7 @@ pub(super) mod sealed {
type Error = S::Error;
type Future = crate::service::Oneshot<S, Uri>;
fn connect(self, _: Internal, dst: Uri) -> Self::Future {
crate::service::oneshot(self, dst)
loop {}
}
}
impl<S, T> Sealed for S
@ -314,30 +296,10 @@ mod tests {
struct Ex3(&'static str);
#[test]
fn test_connected_extra() {
let c1 = Connected::new().extra(Ex1(41));
let mut ex = ::http::Extensions::new();
assert_eq!(ex.get::< Ex1 > (), None);
c1.extra.as_ref().expect("c1 extra").set(&mut ex);
assert_eq!(ex.get::< Ex1 > (), Some(& Ex1(41)));
loop {}
}
#[test]
fn test_connected_extra_chain() {
let c1 = Connected::new()
.extra(Ex1(45))
.extra(Ex2("zoom"))
.extra(Ex3("pew pew"));
let mut ex1 = ::http::Extensions::new();
assert_eq!(ex1.get::< Ex1 > (), None);
assert_eq!(ex1.get::< Ex2 > (), None);
assert_eq!(ex1.get::< Ex3 > (), None);
c1.extra.as_ref().expect("c1 extra").set(&mut ex1);
assert_eq!(ex1.get::< Ex1 > (), Some(& Ex1(45)));
assert_eq!(ex1.get::< Ex2 > (), Some(& Ex2("zoom")));
assert_eq!(ex1.get::< Ex3 > (), Some(& Ex3("pew pew")));
let c2 = Connected::new().extra(Ex1(33)).extra(Ex2("hiccup")).extra(Ex1(99));
let mut ex2 = ::http::Extensions::new();
c2.extra.as_ref().expect("c2 extra").set(&mut ex2);
assert_eq!(ex2.get::< Ex1 > (), Some(& Ex1(99)));
assert_eq!(ex2.get::< Ex2 > (), Some(& Ex2("hiccup")));
loop {}
}
}

View file

@ -1,28 +1,17 @@
#[cfg(feature = "http2")]
use std::future::Future;
use futures_util::FutureExt;
use tokio::sync::{mpsc, oneshot};
#[cfg(feature = "http2")]
use crate::common::Pin;
use crate::common::{task, Poll};
pub(crate) type RetryPromise<T, U> = oneshot::Receiver<Result<U, (crate::Error, Option<T>)>>;
pub(crate) type RetryPromise<T, U> = oneshot::Receiver<
Result<U, (crate::Error, Option<T>)>,
>;
pub(crate) type Promise<T> = oneshot::Receiver<Result<T, crate::Error>>;
pub(crate) fn channel<T, U>() -> (Sender<T, U>, Receiver<T, U>) {
let (tx, rx) = mpsc::unbounded_channel();
let (giver, taker) = want::new();
let tx = Sender {
buffered_once: false,
giver,
inner: tx,
};
let rx = Receiver { inner: rx, taker };
(tx, rx)
loop {}
}
/// A bounded sender of requests and callbacks for when responses are ready.
///
/// While the inner sender is unbounded, the Giver is used to determine
@ -40,7 +29,6 @@ pub(crate) struct Sender<T, U> {
/// Actually bounded by the Giver, plus `buffered_once`.
inner: mpsc::UnboundedSender<Envelope<T, U>>,
}
/// An unbounded version.
///
/// Cannot poll the Giver, but can still use it to determine if the Receiver
@ -51,386 +39,163 @@ pub(crate) struct UnboundedSender<T, U> {
giver: want::SharedGiver,
inner: mpsc::UnboundedSender<Envelope<T, U>>,
}
impl<T, U> Sender<T, U> {
pub(crate) fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
self.giver
.poll_want(cx)
.map_err(|_| crate::Error::new_closed())
pub(crate) fn poll_ready(
&mut self,
cx: &mut task::Context<'_>,
) -> Poll<crate::Result<()>> {
loop {}
}
pub(crate) fn is_ready(&self) -> bool {
self.giver.is_wanting()
loop {}
}
pub(crate) fn is_closed(&self) -> bool {
self.giver.is_canceled()
loop {}
}
fn can_send(&mut self) -> bool {
if self.giver.give() || !self.buffered_once {
// If the receiver is ready *now*, then of course we can send.
//
// If the receiver isn't ready yet, but we don't have anything
// in the channel yet, then allow one message.
self.buffered_once = true;
true
} else {
false
}
loop {}
}
pub(crate) fn try_send(&mut self, val: T) -> Result<RetryPromise<T, U>, T> {
if !self.can_send() {
return Err(val);
}
let (tx, rx) = oneshot::channel();
self.inner
.send(Envelope(Some((val, Callback::Retry(Some(tx))))))
.map(move |_| rx)
.map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0)
loop {}
}
pub(crate) fn send(&mut self, val: T) -> Result<Promise<U>, T> {
if !self.can_send() {
return Err(val);
}
let (tx, rx) = oneshot::channel();
self.inner
.send(Envelope(Some((val, Callback::NoRetry(Some(tx))))))
.map(move |_| rx)
.map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0)
loop {}
}
#[cfg(feature = "http2")]
pub(crate) fn unbound(self) -> UnboundedSender<T, U> {
UnboundedSender {
giver: self.giver.shared(),
inner: self.inner,
}
loop {}
}
}
#[cfg(feature = "http2")]
impl<T, U> UnboundedSender<T, U> {
pub(crate) fn is_ready(&self) -> bool {
!self.giver.is_canceled()
loop {}
}
pub(crate) fn is_closed(&self) -> bool {
self.giver.is_canceled()
loop {}
}
pub(crate) fn try_send(&mut self, val: T) -> Result<RetryPromise<T, U>, T> {
let (tx, rx) = oneshot::channel();
self.inner
.send(Envelope(Some((val, Callback::Retry(Some(tx))))))
.map(move |_| rx)
.map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0)
loop {}
}
}
#[cfg(feature = "http2")]
impl<T, U> Clone for UnboundedSender<T, U> {
fn clone(&self) -> Self {
UnboundedSender {
giver: self.giver.clone(),
inner: self.inner.clone(),
}
loop {}
}
}
pub(crate) struct Receiver<T, U> {
inner: mpsc::UnboundedReceiver<Envelope<T, U>>,
taker: want::Taker,
}
impl<T, U> Receiver<T, U> {
pub(crate) fn poll_recv(
&mut self,
cx: &mut task::Context<'_>,
) -> Poll<Option<(T, Callback<T, U>)>> {
match self.inner.poll_recv(cx) {
Poll::Ready(item) => {
Poll::Ready(item.map(|mut env| env.0.take().expect("envelope not dropped")))
}
Poll::Pending => {
self.taker.want();
Poll::Pending
}
}
loop {}
}
#[cfg(feature = "http1")]
pub(crate) fn close(&mut self) {
self.taker.cancel();
self.inner.close();
loop {}
}
#[cfg(feature = "http1")]
pub(crate) fn try_recv(&mut self) -> Option<(T, Callback<T, U>)> {
match self.inner.recv().now_or_never() {
Some(Some(mut env)) => env.0.take(),
_ => None,
}
loop {}
}
}
impl<T, U> Drop for Receiver<T, U> {
fn drop(&mut self) {
// Notify the giver about the closure first, before dropping
// the mpsc::Receiver.
self.taker.cancel();
loop {}
}
}
struct Envelope<T, U>(Option<(T, Callback<T, U>)>);
impl<T, U> Drop for Envelope<T, U> {
fn drop(&mut self) {
if let Some((val, cb)) = self.0.take() {
cb.send(Err((
crate::Error::new_canceled().with("connection closed"),
Some(val),
)));
}
loop {}
}
}
pub(crate) enum Callback<T, U> {
Retry(Option<oneshot::Sender<Result<U, (crate::Error, Option<T>)>>>),
NoRetry(Option<oneshot::Sender<Result<U, crate::Error>>>),
}
impl<T, U> Drop for Callback<T, U> {
fn drop(&mut self) {
// FIXME(nox): What errors do we want here?
let error = crate::Error::new_user_dispatch_gone().with(if std::thread::panicking() {
"user code panicked"
} else {
"runtime dropped the dispatch task"
});
match self {
Callback::Retry(tx) => {
if let Some(tx) = tx.take() {
let _ = tx.send(Err((error, None)));
}
}
Callback::NoRetry(tx) => {
if let Some(tx) = tx.take() {
let _ = tx.send(Err(error));
}
}
}
loop {}
}
}
impl<T, U> Callback<T, U> {
#[cfg(feature = "http2")]
pub(crate) fn is_canceled(&self) -> bool {
match *self {
Callback::Retry(Some(ref tx)) => tx.is_closed(),
Callback::NoRetry(Some(ref tx)) => tx.is_closed(),
_ => unreachable!(),
}
loop {}
}
pub(crate) fn poll_canceled(&mut self, cx: &mut task::Context<'_>) -> Poll<()> {
match *self {
Callback::Retry(Some(ref mut tx)) => tx.poll_closed(cx),
Callback::NoRetry(Some(ref mut tx)) => tx.poll_closed(cx),
_ => unreachable!(),
}
loop {}
}
pub(crate) fn send(mut self, val: Result<U, (crate::Error, Option<T>)>) {
match self {
Callback::Retry(ref mut tx) => {
let _ = tx.take().unwrap().send(val);
}
Callback::NoRetry(ref mut tx) => {
let _ = tx.take().unwrap().send(val.map_err(|e| e.0));
}
}
loop {}
}
#[cfg(feature = "http2")]
pub(crate) async fn send_when(
self,
mut when: impl Future<Output = Result<U, (crate::Error, Option<T>)>> + Unpin,
) {
use futures_util::future;
use tracing::trace;
let mut cb = Some(self);
// "select" on this callback being canceled, and the future completing
future::poll_fn(move |cx| {
match Pin::new(&mut when).poll(cx) {
Poll::Ready(Ok(res)) => {
cb.take().expect("polled after complete").send(Ok(res));
Poll::Ready(())
}
Poll::Pending => {
// check if the callback is canceled
ready!(cb.as_mut().unwrap().poll_canceled(cx));
trace!("send_when canceled");
Poll::Ready(())
}
Poll::Ready(Err(err)) => {
cb.take().expect("polled after complete").send(Err(err));
Poll::Ready(())
}
}
})
.await
loop {}
}
}
#[cfg(test)]
mod tests {
#[cfg(feature = "nightly")]
extern crate test;
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use super::{channel, Callback, Receiver};
#[derive(Debug)]
struct Custom(i32);
impl<T, U> Future for Receiver<T, U> {
type Output = Option<(T, Callback<T, U>)>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.poll_recv(cx)
loop {}
}
}
/// Helper to check if the future is ready after polling once.
struct PollOnce<'a, F>(&'a mut F);
impl<F, T> Future for PollOnce<'_, F>
where
F: Future<Output = T> + Unpin,
{
type Output = Option<()>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match Pin::new(&mut self.0).poll(cx) {
Poll::Ready(_) => Poll::Ready(Some(())),
Poll::Pending => Poll::Ready(None),
}
loop {}
}
}
#[tokio::test]
async fn drop_receiver_sends_cancel_errors() {
let _ = pretty_env_logger::try_init();
let (mut tx, mut rx) = channel::<Custom, ()>();
// must poll once for try_send to succeed
assert!(PollOnce(&mut rx).await.is_none(), "rx empty");
let promise = tx.try_send(Custom(43)).unwrap();
drop(rx);
let fulfilled = promise.await;
let err = fulfilled
.expect("fulfilled")
.expect_err("promise should error");
match (err.0.kind(), err.1) {
(&crate::error::Kind::Canceled, Some(_)) => (),
e => panic!("expected Error::Cancel(_), found {:?}", e),
}
loop {}
}
#[tokio::test]
async fn sender_checks_for_want_on_send() {
let (mut tx, mut rx) = channel::<Custom, ()>();
// one is allowed to buffer, second is rejected
let _ = tx.try_send(Custom(1)).expect("1 buffered");
tx.try_send(Custom(2)).expect_err("2 not ready");
assert!(PollOnce(&mut rx).await.is_some(), "rx once");
// Even though 1 has been popped, only 1 could be buffered for the
// lifetime of the channel.
tx.try_send(Custom(2)).expect_err("2 still not ready");
assert!(PollOnce(&mut rx).await.is_none(), "rx empty");
let _ = tx.try_send(Custom(2)).expect("2 ready");
loop {}
}
#[cfg(feature = "http2")]
#[test]
fn unbounded_sender_doesnt_bound_on_want() {
let (tx, rx) = channel::<Custom, ()>();
let mut tx = tx.unbound();
let _ = tx.try_send(Custom(1)).unwrap();
let _ = tx.try_send(Custom(2)).unwrap();
let _ = tx.try_send(Custom(3)).unwrap();
drop(rx);
let _ = tx.try_send(Custom(4)).unwrap_err();
loop {}
}
#[cfg(feature = "nightly")]
#[bench]
fn giver_queue_throughput(b: &mut test::Bencher) {
use crate::{Body, Request, Response};
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap();
let (mut tx, mut rx) = channel::<Request<Body>, Response<Body>>();
b.iter(move || {
let _ = tx.send(Request::default()).unwrap();
rt.block_on(async {
loop {
let poll_once = PollOnce(&mut rx);
let opt = poll_once.await;
if opt.is_none() {
break;
}
}
});
})
loop {}
}
#[cfg(feature = "nightly")]
#[bench]
fn giver_queue_not_ready(b: &mut test::Bencher) {
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap();
let (_tx, mut rx) = channel::<i32, ()>();
b.iter(move || {
rt.block_on(async {
let poll_once = PollOnce(&mut rx);
assert!(poll_once.await.is_none());
});
})
loop {}
}
#[cfg(feature = "nightly")]
#[bench]
fn giver_queue_cancel(b: &mut test::Bencher) {
let (_tx, mut rx) = channel::<i32, ()>();
b.iter(move || {
rx.taker.cancel();
})
loop {}
}
}

File diff suppressed because it is too large Load diff

View file

@ -25,11 +25,7 @@ impl<C, B, T> Connect<C, B, T> {
/// Create a new `Connect` with some inner connector `C` and a connection
/// builder.
pub(crate) fn new(inner: C, builder: Builder) -> Self {
Self {
inner,
builder,
_pd: PhantomData,
}
loop {}
}
}
impl<C, B, T> Service<T> for Connect<C, B, T>
@ -51,37 +47,9 @@ where
&mut self,
cx: &mut task::Context<'_>,
) -> Poll<Result<(), Self::Error>> {
self.inner
.poll_ready(cx)
.map_err(|e| crate::Error::new(crate::error::Kind::Connect).with(e.into()))
loop {}
}
fn call(&mut self, req: T) -> Self::Future {
let builder = self.builder.clone();
let io = self.inner.make_connection(req);
let fut = async move {
match io.await {
Ok(io) => {
match builder.handshake(io).await {
Ok((sr, conn)) => {
builder
.exec
.execute(async move {
if let Err(e) = conn.await {
debug!("connection error: {:?}", e);
}
});
Ok(sr)
}
Err(e) => Err(e),
}
}
Err(e) => {
let err = crate::Error::new(crate::error::Kind::Connect)
.with(e.into());
Err(err)
}
}
};
Box::pin(fut)
loop {}
}
}

View file

@ -1,286 +1,8 @@
use std::io;
use futures_util::future;
use tokio::net::TcpStream;
use super::Client;
#[tokio::test]
async fn client_connect_uri_argument() {
let connector = tower::service_fn(|dst: http::Uri| {
assert_eq!(dst.scheme(), Some(&http::uri::Scheme::HTTP));
assert_eq!(dst.host(), Some("example.local"));
assert_eq!(dst.port(), None);
assert_eq!(dst.path(), "/", "path should be removed");
future::err::<TcpStream, _>(io::Error::new(io::ErrorKind::Other, "expect me"))
});
let client = Client::builder().build::<_, crate::Body>(connector);
let _ = client
.get("http://example.local/and/a/path".parse().unwrap())
.await
.expect_err("response should fail");
loop {}
}
/*
// FIXME: re-implement tests with `async/await`
#[test]
fn retryable_request() {
let _ = pretty_env_logger::try_init();
let mut rt = Runtime::new().expect("new rt");
let mut connector = MockConnector::new();
let sock1 = connector.mock("http://mock.local");
let sock2 = connector.mock("http://mock.local");
let client = Client::builder()
.build::<_, crate::Body>(connector);
client.pool.no_timer();
{
let req = Request::builder()
.uri("http://mock.local/a")
.body(Default::default())
.unwrap();
let res1 = client.request(req);
let srv1 = poll_fn(|| {
try_ready!(sock1.read(&mut [0u8; 512]));
try_ready!(sock1.write(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n"));
Ok(Async::Ready(()))
}).map_err(|e: std::io::Error| panic!("srv1 poll_fn error: {}", e));
rt.block_on(res1.join(srv1)).expect("res1");
}
drop(sock1);
let req = Request::builder()
.uri("http://mock.local/b")
.body(Default::default())
.unwrap();
let res2 = client.request(req)
.map(|res| {
assert_eq!(res.status().as_u16(), 222);
});
let srv2 = poll_fn(|| {
try_ready!(sock2.read(&mut [0u8; 512]));
try_ready!(sock2.write(b"HTTP/1.1 222 OK\r\nContent-Length: 0\r\n\r\n"));
Ok(Async::Ready(()))
}).map_err(|e: std::io::Error| panic!("srv2 poll_fn error: {}", e));
rt.block_on(res2.join(srv2)).expect("res2");
}
#[test]
fn conn_reset_after_write() {
let _ = pretty_env_logger::try_init();
let mut rt = Runtime::new().expect("new rt");
let mut connector = MockConnector::new();
let sock1 = connector.mock("http://mock.local");
let client = Client::builder()
.build::<_, crate::Body>(connector);
client.pool.no_timer();
{
let req = Request::builder()
.uri("http://mock.local/a")
.body(Default::default())
.unwrap();
let res1 = client.request(req);
let srv1 = poll_fn(|| {
try_ready!(sock1.read(&mut [0u8; 512]));
try_ready!(sock1.write(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n"));
Ok(Async::Ready(()))
}).map_err(|e: std::io::Error| panic!("srv1 poll_fn error: {}", e));
rt.block_on(res1.join(srv1)).expect("res1");
}
let req = Request::builder()
.uri("http://mock.local/a")
.body(Default::default())
.unwrap();
let res2 = client.request(req);
let mut sock1 = Some(sock1);
let srv2 = poll_fn(|| {
// We purposefully keep the socket open until the client
// has written the second request, and THEN disconnect.
//
// Not because we expect servers to be jerks, but to trigger
// state where we write on an assumedly good connection, and
// only reset the close AFTER we wrote bytes.
try_ready!(sock1.as_mut().unwrap().read(&mut [0u8; 512]));
sock1.take();
Ok(Async::Ready(()))
}).map_err(|e: std::io::Error| panic!("srv2 poll_fn error: {}", e));
let err = rt.block_on(res2.join(srv2)).expect_err("res2");
assert!(err.is_incomplete_message(), "{:?}", err);
}
#[test]
fn checkout_win_allows_connect_future_to_be_pooled() {
let _ = pretty_env_logger::try_init();
let mut rt = Runtime::new().expect("new rt");
let mut connector = MockConnector::new();
let (tx, rx) = oneshot::channel::<()>();
let sock1 = connector.mock("http://mock.local");
let sock2 = connector.mock_fut("http://mock.local", rx);
let client = Client::builder()
.build::<_, crate::Body>(connector);
client.pool.no_timer();
let uri = "http://mock.local/a".parse::<crate::Uri>().expect("uri parse");
// First request just sets us up to have a connection able to be put
// back in the pool. *However*, it doesn't insert immediately. The
// body has 1 pending byte, and we will only drain in request 2, once
// the connect future has been started.
let mut body = {
let res1 = client.get(uri.clone())
.map(|res| res.into_body().concat2());
let srv1 = poll_fn(|| {
try_ready!(sock1.read(&mut [0u8; 512]));
// Chunked is used so as to force 2 body reads.
try_ready!(sock1.write(b"\
HTTP/1.1 200 OK\r\n\
transfer-encoding: chunked\r\n\
\r\n\
1\r\nx\r\n\
0\r\n\r\n\
"));
Ok(Async::Ready(()))
}).map_err(|e: std::io::Error| panic!("srv1 poll_fn error: {}", e));
rt.block_on(res1.join(srv1)).expect("res1").0
};
// The second request triggers the only mocked connect future, but then
// the drained body allows the first socket to go back to the pool,
// "winning" the checkout race.
{
let res2 = client.get(uri.clone());
let drain = poll_fn(move || {
body.poll()
});
let srv2 = poll_fn(|| {
try_ready!(sock1.read(&mut [0u8; 512]));
try_ready!(sock1.write(b"HTTP/1.1 200 OK\r\nConnection: close\r\n\r\nx"));
Ok(Async::Ready(()))
}).map_err(|e: std::io::Error| panic!("srv2 poll_fn error: {}", e));
rt.block_on(res2.join(drain).join(srv2)).expect("res2");
}
// "Release" the mocked connect future, and let the runtime spin once so
// it's all setup...
{
let mut tx = Some(tx);
let client = &client;
let key = client.pool.h1_key("http://mock.local");
let mut tick_cnt = 0;
let fut = poll_fn(move || {
tx.take();
if client.pool.idle_count(&key) == 0 {
tick_cnt += 1;
assert!(tick_cnt < 10, "ticked too many times waiting for idle");
trace!("no idle yet; tick count: {}", tick_cnt);
::futures::task::current().notify();
Ok(Async::NotReady)
} else {
Ok::<_, ()>(Async::Ready(()))
}
});
rt.block_on(fut).unwrap();
}
// Third request just tests out that the "loser" connection was pooled. If
// it isn't, this will panic since the MockConnector doesn't have any more
// mocks to give out.
{
let res3 = client.get(uri);
let srv3 = poll_fn(|| {
try_ready!(sock2.read(&mut [0u8; 512]));
try_ready!(sock2.write(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n"));
Ok(Async::Ready(()))
}).map_err(|e: std::io::Error| panic!("srv3 poll_fn error: {}", e));
rt.block_on(res3.join(srv3)).expect("res3");
}
}
#[cfg(feature = "nightly")]
#[bench]
fn bench_http1_get_0b(b: &mut test::Bencher) {
let _ = pretty_env_logger::try_init();
let mut rt = Runtime::new().expect("new rt");
let mut connector = MockConnector::new();
let client = Client::builder()
.build::<_, crate::Body>(connector.clone());
client.pool.no_timer();
let uri = Uri::from_static("http://mock.local/a");
b.iter(move || {
let sock1 = connector.mock("http://mock.local");
let res1 = client
.get(uri.clone())
.and_then(|res| {
res.into_body().for_each(|_| Ok(()))
});
let srv1 = poll_fn(|| {
try_ready!(sock1.read(&mut [0u8; 512]));
try_ready!(sock1.write(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n"));
Ok(Async::Ready(()))
}).map_err(|e: std::io::Error| panic!("srv1 poll_fn error: {}", e));
rt.block_on(res1.join(srv1)).expect("res1");
});
}
#[cfg(feature = "nightly")]
#[bench]
fn bench_http1_get_10b(b: &mut test::Bencher) {
let _ = pretty_env_logger::try_init();
let mut rt = Runtime::new().expect("new rt");
let mut connector = MockConnector::new();
let client = Client::builder()
.build::<_, crate::Body>(connector.clone());
client.pool.no_timer();
let uri = Uri::from_static("http://mock.local/a");
b.iter(move || {
let sock1 = connector.mock("http://mock.local");
let res1 = client
.get(uri.clone())
.and_then(|res| {
res.into_body().for_each(|_| Ok(()))
});
let srv1 = poll_fn(|| {
try_ready!(sock1.read(&mut [0u8; 512]));
try_ready!(sock1.write(b"HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n0123456789"));
Ok(Async::Ready(()))
}).map_err(|e: std::io::Error| panic!("srv1 poll_fn error: {}", e));
rt.block_on(res1.join(srv1)).expect("res1");
});
}
*/

View file

@ -1,151 +1,71 @@
use std::collections::VecDeque;
use std::io::IoSlice;
use bytes::{Buf, BufMut, Bytes, BytesMut};
pub(crate) struct BufList<T> {
bufs: VecDeque<T>,
}
impl<T: Buf> BufList<T> {
pub(crate) fn new() -> BufList<T> {
BufList {
bufs: VecDeque::new(),
}
loop {}
}
#[inline]
pub(crate) fn push(&mut self, buf: T) {
debug_assert!(buf.has_remaining());
self.bufs.push_back(buf);
loop {}
}
#[inline]
#[cfg(feature = "http1")]
pub(crate) fn bufs_cnt(&self) -> usize {
self.bufs.len()
loop {}
}
}
impl<T: Buf> Buf for BufList<T> {
#[inline]
fn remaining(&self) -> usize {
self.bufs.iter().map(|buf| buf.remaining()).sum()
loop {}
}
#[inline]
fn chunk(&self) -> &[u8] {
self.bufs.front().map(Buf::chunk).unwrap_or_default()
loop {}
}
#[inline]
fn advance(&mut self, mut cnt: usize) {
while cnt > 0 {
{
let front = &mut self.bufs[0];
let rem = front.remaining();
if rem > cnt {
front.advance(cnt);
return;
} else {
front.advance(rem);
cnt -= rem;
}
}
self.bufs.pop_front();
}
loop {}
}
#[inline]
fn chunks_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize {
if dst.is_empty() {
return 0;
}
let mut vecs = 0;
for buf in &self.bufs {
vecs += buf.chunks_vectored(&mut dst[vecs..]);
if vecs == dst.len() {
break;
}
}
vecs
loop {}
}
#[inline]
fn copy_to_bytes(&mut self, len: usize) -> Bytes {
// Our inner buffer may have an optimized version of copy_to_bytes, and if the whole
// request can be fulfilled by the front buffer, we can take advantage.
match self.bufs.front_mut() {
Some(front) if front.remaining() == len => {
let b = front.copy_to_bytes(len);
self.bufs.pop_front();
b
}
Some(front) if front.remaining() > len => front.copy_to_bytes(len),
_ => {
assert!(len <= self.remaining(), "`len` greater than remaining");
let mut bm = BytesMut::with_capacity(len);
bm.put(self.take(len));
bm.freeze()
}
}
loop {}
}
}
#[cfg(test)]
mod tests {
use std::ptr;
use super::*;
fn hello_world_buf() -> BufList<Bytes> {
BufList {
bufs: vec![Bytes::from("Hello"), Bytes::from(" "), Bytes::from("World")].into(),
}
loop {}
}
#[test]
fn to_bytes_shorter() {
let mut bufs = hello_world_buf();
let old_ptr = bufs.chunk().as_ptr();
let start = bufs.copy_to_bytes(4);
assert_eq!(start, "Hell");
assert!(ptr::eq(old_ptr, start.as_ptr()));
assert_eq!(bufs.chunk(), b"o");
assert!(ptr::eq(old_ptr.wrapping_add(4), bufs.chunk().as_ptr()));
assert_eq!(bufs.remaining(), 7);
loop {}
}
#[test]
fn to_bytes_eq() {
let mut bufs = hello_world_buf();
let old_ptr = bufs.chunk().as_ptr();
let start = bufs.copy_to_bytes(5);
assert_eq!(start, "Hello");
assert!(ptr::eq(old_ptr, start.as_ptr()));
assert_eq!(bufs.chunk(), b" ");
assert_eq!(bufs.remaining(), 6);
loop {}
}
#[test]
fn to_bytes_longer() {
let mut bufs = hello_world_buf();
let start = bufs.copy_to_bytes(7);
assert_eq!(start, "Hello W");
assert_eq!(bufs.remaining(), 4);
loop {}
}
#[test]
fn one_long_buf_to_bytes() {
let mut buf = BufList::new();
buf.push(b"Hello World" as &[_]);
assert_eq!(buf.copy_to_bytes(5), "Hello");
assert_eq!(buf.chunk(), b" World");
loop {}
}
#[test]
#[should_panic(expected = "`len` greater than remaining")]
fn buf_to_bytes_too_many() {
hello_world_buf().copy_to_bytes(42);
loop {}
}
}

View file

@ -2,123 +2,67 @@ use std::cell::RefCell;
use std::fmt::{self, Write};
use std::str;
use std::time::{Duration, SystemTime};
#[cfg(feature = "http2")]
use http::header::HeaderValue;
use httpdate::HttpDate;
// "Sun, 06 Nov 1994 08:49:37 GMT".len()
pub(crate) const DATE_VALUE_LENGTH: usize = 29;
#[cfg(feature = "http1")]
pub(crate) fn extend(dst: &mut Vec<u8>) {
CACHED.with(|cache| {
dst.extend_from_slice(cache.borrow().buffer());
})
loop {}
}
#[cfg(feature = "http1")]
pub(crate) fn update() {
CACHED.with(|cache| {
cache.borrow_mut().check();
})
loop {}
}
#[cfg(feature = "http2")]
pub(crate) fn update_and_header_value() -> HeaderValue {
CACHED.with(|cache| {
let mut cache = cache.borrow_mut();
cache.check();
HeaderValue::from_bytes(cache.buffer()).expect("Date format should be valid HeaderValue")
})
loop {}
}
struct CachedDate {
bytes: [u8; DATE_VALUE_LENGTH],
pos: usize,
next_update: SystemTime,
}
thread_local!(static CACHED: RefCell<CachedDate> = RefCell::new(CachedDate::new()));
thread_local!(static CACHED : RefCell < CachedDate > = RefCell::new(CachedDate::new()));
impl CachedDate {
fn new() -> Self {
let mut cache = CachedDate {
bytes: [0; DATE_VALUE_LENGTH],
pos: 0,
next_update: SystemTime::now(),
};
cache.update(cache.next_update);
cache
loop {}
}
fn buffer(&self) -> &[u8] {
&self.bytes[..]
loop {}
}
fn check(&mut self) {
let now = SystemTime::now();
if now > self.next_update {
self.update(now);
}
loop {}
}
fn update(&mut self, now: SystemTime) {
self.render(now);
self.next_update = now + Duration::new(1, 0);
loop {}
}
fn render(&mut self, now: SystemTime) {
self.pos = 0;
let _ = write!(self, "{}", HttpDate::from(now));
debug_assert!(self.pos == DATE_VALUE_LENGTH);
loop {}
}
}
impl fmt::Write for CachedDate {
fn write_str(&mut self, s: &str) -> fmt::Result {
let len = s.len();
self.bytes[self.pos..self.pos + len].copy_from_slice(s.as_bytes());
self.pos += len;
Ok(())
loop {}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[cfg(feature = "nightly")]
use test::Bencher;
#[test]
fn test_date_len() {
assert_eq!(DATE_VALUE_LENGTH, "Sun, 06 Nov 1994 08:49:37 GMT".len());
loop {}
}
#[cfg(feature = "nightly")]
#[bench]
fn bench_date_check(b: &mut Bencher) {
let mut date = CachedDate::new();
// cache the first update
date.check();
b.iter(|| {
date.check();
});
loop {}
}
#[cfg(feature = "nightly")]
#[bench]
fn bench_date_render(b: &mut Bencher) {
let mut date = CachedDate::new();
let now = SystemTime::now();
date.render(now);
b.bytes = date.buffer().len() as u64;
b.iter(|| {
date.render(now);
test::black_box(&date);
});
loop {}
}
}

View file

@ -1,217 +1,80 @@
use std::mem;
use pin_project_lite::pin_project;
use tokio::sync::watch;
use super::{task, Future, Pin, Poll};
pub(crate) fn channel() -> (Signal, Watch) {
let (tx, rx) = watch::channel(());
(Signal { tx }, Watch { rx })
loop {}
}
pub(crate) struct Signal {
tx: watch::Sender<()>,
}
pub(crate) struct Draining(Pin<Box<dyn Future<Output = ()> + Send + Sync>>);
#[derive(Clone)]
pub(crate) struct Watch {
rx: watch::Receiver<()>,
}
pin_project! {
#[allow(missing_debug_implementations)]
pub struct Watching<F, FN> {
#[pin]
future: F,
state: State<FN>,
watch: Pin<Box<dyn Future<Output = ()> + Send + Sync>>,
_rx: watch::Receiver<()>,
}
#[allow(missing_debug_implementations)] pub struct Watching < F, FN > { #[pin] future
: F, state : State < FN >, watch : Pin < Box < dyn Future < Output = () > + Send +
Sync >>, _rx : watch::Receiver < () >, }
}
enum State<F> {
Watch(F),
Draining,
}
impl Signal {
pub(crate) fn drain(self) -> Draining {
let _ = self.tx.send(());
Draining(Box::pin(async move { self.tx.closed().await }))
loop {}
}
}
impl Future for Draining {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
Pin::new(&mut self.as_mut().0).poll(cx)
loop {}
}
}
impl Watch {
pub(crate) fn watch<F, FN>(self, future: F, on_drain: FN) -> Watching<F, FN>
where
F: Future,
FN: FnOnce(Pin<&mut F>),
{
let Self { mut rx } = self;
let _rx = rx.clone();
Watching {
future,
state: State::Watch(on_drain),
watch: Box::pin(async move {
let _ = rx.changed().await;
}),
// Keep the receiver alive until the future completes, so that
// dropping it can signal that draining has completed.
_rx,
}
loop {}
}
}
impl<F, FN> Future for Watching<F, FN>
where
F: Future,
FN: FnOnce(Pin<&mut F>),
{
type Output = F::Output;
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
let mut me = self.project();
loop {
match mem::replace(me.state, State::Draining) {
State::Watch(on_drain) => {
match Pin::new(&mut me.watch).poll(cx) {
Poll::Ready(()) => {
// Drain has been triggered!
on_drain(me.future.as_mut());
}
Poll::Pending => {
*me.state = State::Watch(on_drain);
return me.future.poll(cx);
}
}
}
State::Draining => return me.future.poll(cx),
}
}
loop {}
}
}
#[cfg(test)]
mod tests {
use super::*;
struct TestMe {
draining: bool,
finished: bool,
poll_cnt: usize,
}
impl Future for TestMe {
type Output = ();
fn poll(mut self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll<Self::Output> {
self.poll_cnt += 1;
if self.finished {
Poll::Ready(())
} else {
Poll::Pending
}
fn poll(
mut self: Pin<&mut Self>,
_: &mut task::Context<'_>,
) -> Poll<Self::Output> {
loop {}
}
}
#[test]
fn watch() {
let mut mock = tokio_test::task::spawn(());
mock.enter(|cx, _| {
let (tx, rx) = channel();
let fut = TestMe {
draining: false,
finished: false,
poll_cnt: 0,
};
let mut watch = rx.watch(fut, |mut fut| {
fut.draining = true;
});
assert_eq!(watch.future.poll_cnt, 0);
// First poll should poll the inner future
assert!(Pin::new(&mut watch).poll(cx).is_pending());
assert_eq!(watch.future.poll_cnt, 1);
// Second poll should poll the inner future again
assert!(Pin::new(&mut watch).poll(cx).is_pending());
assert_eq!(watch.future.poll_cnt, 2);
let mut draining = tx.drain();
// Drain signaled, but needs another poll to be noticed.
assert!(!watch.future.draining);
assert_eq!(watch.future.poll_cnt, 2);
// Now, poll after drain has been signaled.
assert!(Pin::new(&mut watch).poll(cx).is_pending());
assert_eq!(watch.future.poll_cnt, 3);
assert!(watch.future.draining);
// Draining is not ready until watcher completes
assert!(Pin::new(&mut draining).poll(cx).is_pending());
// Finishing up the watch future
watch.future.finished = true;
assert!(Pin::new(&mut watch).poll(cx).is_ready());
assert_eq!(watch.future.poll_cnt, 4);
drop(watch);
assert!(Pin::new(&mut draining).poll(cx).is_ready());
})
loop {}
}
#[test]
fn watch_clones() {
let mut mock = tokio_test::task::spawn(());
mock.enter(|cx, _| {
let (tx, rx) = channel();
let fut1 = TestMe {
draining: false,
finished: false,
poll_cnt: 0,
};
let fut2 = TestMe {
draining: false,
finished: false,
poll_cnt: 0,
};
let watch1 = rx.clone().watch(fut1, |mut fut| {
fut.draining = true;
});
let watch2 = rx.watch(fut2, |mut fut| {
fut.draining = true;
});
let mut draining = tx.drain();
// Still 2 outstanding watchers
assert!(Pin::new(&mut draining).poll(cx).is_pending());
// drop 1 for whatever reason
drop(watch1);
// Still not ready, 1 other watcher still pending
assert!(Pin::new(&mut draining).poll(cx).is_pending());
drop(watch2);
// Now all watchers are gone, draining is complete
assert!(Pin::new(&mut draining).poll(cx).is_ready());
});
loop {}
}
}

View file

@ -32,23 +32,12 @@ impl Exec {
where
F: Future<Output = ()> + Send + 'static,
{
match *self {
Exec::Default => {
#[cfg(feature = "tcp")]
{
tokio::task::spawn(fut);
}
#[cfg(not(feature = "tcp"))] { panic!("executor must be set") }
}
Exec::Executor(ref e) => {
e.execute(Box::pin(fut));
}
}
loop {}
}
}
impl fmt::Debug for Exec {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Exec").finish()
loop {}
}
}
#[cfg(feature = "server")]
@ -58,7 +47,7 @@ where
B: HttpBody,
{
fn execute_h2stream(&mut self, fut: H2Stream<F, B>) {
self.execute(fut)
loop {}
}
}
#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))]
@ -80,7 +69,7 @@ where
B: HttpBody,
{
fn execute_h2stream(&mut self, fut: H2Stream<F, B>) {
self.execute(fut)
loop {}
}
}
#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))]
@ -111,6 +100,6 @@ where
self: Pin<&mut Self>,
_cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Self::Output> {
unreachable!()
loop {}
}
}

View file

@ -1,49 +1,30 @@
use std::marker::Unpin;
use std::{cmp, io};
use bytes::{Buf, Bytes};
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use crate::common::{task, Pin, Poll};
/// Combine a buffer with an IO, rewinding reads to use the buffer.
#[derive(Debug)]
pub(crate) struct Rewind<T> {
pre: Option<Bytes>,
inner: T,
}
impl<T> Rewind<T> {
#[cfg(any(all(feature = "http2", feature = "server"), test))]
pub(crate) fn new(io: T) -> Self {
Rewind {
pre: None,
inner: io,
}
loop {}
}
pub(crate) fn new_buffered(io: T, buf: Bytes) -> Self {
Rewind {
pre: Some(buf),
inner: io,
}
loop {}
}
#[cfg(any(all(feature = "http1", feature = "http2", feature = "server"), test))]
pub(crate) fn rewind(&mut self, bs: Bytes) {
debug_assert!(self.pre.is_none());
self.pre = Some(bs);
loop {}
}
pub(crate) fn into_inner(self) -> (T, Bytes) {
(self.inner, self.pre.unwrap_or_else(Bytes::new))
loop {}
}
// pub(crate) fn get_mut(&mut self) -> &mut T {
// &mut self.inner
// }
}
impl<T> AsyncRead for Rewind<T>
where
T: AsyncRead + Unpin,
@ -53,25 +34,9 @@ where
cx: &mut task::Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
if let Some(mut prefix) = self.pre.take() {
// If there are no remaining bytes, let the bytes get dropped.
if !prefix.is_empty() {
let copy_len = cmp::min(prefix.len(), buf.remaining());
// TODO: There should be a way to do following two lines cleaner...
buf.put_slice(&prefix[..copy_len]);
prefix.advance(copy_len);
// Put back what's left
if !prefix.is_empty() {
self.pre = Some(prefix);
}
return Poll::Ready(Ok(()));
}
}
Pin::new(&mut self.inner).poll_read(cx, buf)
loop {}
}
}
impl<T> AsyncWrite for Rewind<T>
where
T: AsyncWrite + Unpin,
@ -81,75 +46,42 @@ where
cx: &mut task::Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.inner).poll_write(cx, buf)
loop {}
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.inner).poll_write_vectored(cx, bufs)
loop {}
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.inner).poll_flush(cx)
fn poll_flush(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<io::Result<()>> {
loop {}
}
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.inner).poll_shutdown(cx)
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<io::Result<()>> {
loop {}
}
fn is_write_vectored(&self) -> bool {
self.inner.is_write_vectored()
loop {}
}
}
#[cfg(test)]
mod tests {
// FIXME: re-implement tests with `async/await`, this import should
// trigger a warning to remind us
use super::Rewind;
use bytes::Bytes;
use tokio::io::AsyncReadExt;
#[tokio::test]
async fn partial_rewind() {
let underlying = [104, 101, 108, 108, 111];
let mock = tokio_test::io::Builder::new().read(&underlying).build();
let mut stream = Rewind::new(mock);
// Read off some bytes, ensure we filled o1
let mut buf = [0; 2];
stream.read_exact(&mut buf).await.expect("read1");
// Rewind the stream so that it is as if we never read in the first place.
stream.rewind(Bytes::copy_from_slice(&buf[..]));
let mut buf = [0; 5];
stream.read_exact(&mut buf).await.expect("read1");
// At this point we should have read everything that was in the MockStream
assert_eq!(&buf, &underlying);
loop {}
}
#[tokio::test]
async fn full_rewind() {
let underlying = [104, 101, 108, 108, 111];
let mock = tokio_test::io::Builder::new().read(&underlying).build();
let mut stream = Rewind::new(mock);
let mut buf = [0; 5];
stream.read_exact(&mut buf).await.expect("read1");
// Rewind the stream so that it is as if we never read in the first place.
stream.rewind(Bytes::copy_from_slice(&buf[..]));
let mut buf = [0; 5];
stream.read_exact(&mut buf).await.expect("read1");
loop {}
}
}

View file

@ -1,76 +1,39 @@
use pin_project_lite::pin_project;
use super::{task, Future, Pin, Poll};
pub(crate) trait Started: Future {
fn started(&self) -> bool;
}
pub(crate) fn lazy<F, R>(func: F) -> Lazy<F, R>
where
F: FnOnce() -> R,
R: Future + Unpin,
{
Lazy {
inner: Inner::Init { func },
}
loop {}
}
// FIXME: allow() required due to `impl Trait` leaking types to this lint
pin_project! {
#[allow(missing_debug_implementations)]
pub(crate) struct Lazy<F, R> {
#[pin]
inner: Inner<F, R>,
}
#[allow(missing_debug_implementations)] pub (crate) struct Lazy < F, R > { #[pin]
inner : Inner < F, R >, }
}
pin_project! {
#[project = InnerProj]
#[project_replace = InnerProjReplace]
enum Inner<F, R> {
Init { func: F },
Fut { #[pin] fut: R },
Empty,
}
#[project = InnerProj] #[project_replace = InnerProjReplace] enum Inner < F, R > {
Init { func : F }, Fut { #[pin] fut : R }, Empty, }
}
impl<F, R> Started for Lazy<F, R>
where
F: FnOnce() -> R,
R: Future,
{
fn started(&self) -> bool {
match self.inner {
Inner::Init { .. } => false,
Inner::Fut { .. } | Inner::Empty => true,
}
loop {}
}
}
impl<F, R> Future for Lazy<F, R>
where
F: FnOnce() -> R,
R: Future,
{
type Output = R::Output;
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
if let InnerProj::Fut { fut } = this.inner.as_mut().project() {
return fut.poll(cx);
}
match this.inner.as_mut().project_replace(Inner::Empty) {
InnerProjReplace::Init { func } => {
this.inner.set(Inner::Fut { fut: func() });
if let InnerProj::Fut { fut } = this.inner.project() {
return fut.poll(cx);
}
unreachable!()
}
_ => unreachable!("lazy state wrong"),
}
loop {}
}
}

View file

@ -1,21 +1,17 @@
//! An uninhabitable type meaning it can never happen.
//!
//! To be replaced with `!` once it is stable.
use std::error::Error;
use std::fmt;
#[derive(Debug)]
pub(crate) enum Never {}
impl fmt::Display for Never {
fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {}
loop {}
}
}
impl Error for Never {
fn description(&self) -> &str {
match *self {}
loop {}
}
}

View file

@ -1,7 +1,3 @@
/*
* This is a copy of the sync_wrapper crate.
*/
/// A mutual exclusion primitive that relies on static type information only
///
/// In some cases synchronization can be proven statically: whenever you hold an exclusive `&mut`
@ -42,7 +38,6 @@
/// [`Sync`]: https://doc.rust-lang.org/std/marker/trait.Sync.html
#[repr(transparent)]
pub(crate) struct SyncWrapper<T>(T);
impl<T> SyncWrapper<T> {
/// Creates a new SyncWrapper containing the given value.
///
@ -54,9 +49,8 @@ impl<T> SyncWrapper<T> {
/// let wrapped = SyncWrapper::new(42);
/// ```
pub(crate) fn new(value: T) -> Self {
Self(value)
loop {}
}
/// Acquires a reference to the protected value.
///
/// This is safe because it requires an exclusive reference to the wrapper. Therefore this method
@ -78,9 +72,8 @@ impl<T> SyncWrapper<T> {
/// assert_eq!(*wrapped.get_mut(), 0);
/// ```
pub(crate) fn get_mut(&mut self) -> &mut T {
&mut self.0
loop {}
}
/// Consumes this wrapper, returning the underlying data.
///
/// This is safe because it requires ownership of the wrapper, aherefore this method will neither
@ -101,10 +94,7 @@ impl<T> SyncWrapper<T> {
/// ```
#[allow(dead_code)]
pub(crate) fn into_inner(self) -> T {
self.0
loop {}
}
}
// this is safe because the only operations permitted on this data structure require exclusive
// access or ownership
unsafe impl<T: Send> Sync for SyncWrapper<T> {}

View file

@ -1,12 +1,10 @@
#[cfg(feature = "http1")]
use super::Never;
pub(crate) use std::task::{Context, Poll};
/// A function to help "yield" a future, such that it is re-scheduled immediately.
///
/// Useful for spin counts, so a future doesn't hog too much time.
#[cfg(feature = "http1")]
pub(crate) fn yield_now(cx: &mut Context<'_>) -> Poll<Never> {
cx.waker().wake_by_ref();
Poll::Pending
loop {}
}

View file

@ -3,71 +3,42 @@
//! - The value can only be a `usize`.
//! - The consumer is only notified if the value is different.
//! - The value `0` is reserved for closed.
use futures_util::task::AtomicWaker;
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
};
use std::task;
type Value = usize;
pub(crate) const CLOSED: usize = 0;
pub(crate) fn channel(initial: Value) -> (Sender, Receiver) {
debug_assert!(
initial != CLOSED,
"watch::channel initial state of 0 is reserved"
);
let shared = Arc::new(Shared {
value: AtomicUsize::new(initial),
waker: AtomicWaker::new(),
});
(
Sender {
shared: shared.clone(),
},
Receiver { shared },
)
loop {}
}
pub(crate) struct Sender {
shared: Arc<Shared>,
}
pub(crate) struct Receiver {
shared: Arc<Shared>,
}
struct Shared {
value: AtomicUsize,
waker: AtomicWaker,
}
impl Sender {
pub(crate) fn send(&mut self, value: Value) {
if self.shared.value.swap(value, Ordering::SeqCst) != value {
self.shared.waker.wake();
}
loop {}
}
}
impl Drop for Sender {
fn drop(&mut self) {
self.send(CLOSED);
loop {}
}
}
impl Receiver {
pub(crate) fn load(&mut self, cx: &mut task::Context<'_>) -> Value {
self.shared.waker.register(cx.waker());
self.shared.value.load(Ordering::SeqCst)
loop {}
}
pub(crate) fn peek(&self) -> Value {
self.shared.value.load(Ordering::Relaxed)
loop {}
}
}

View file

@ -136,384 +136,254 @@ pub(super) struct TimedOut;
impl Error {
/// Returns true if this was an HTTP parse error.
pub(crate) fn is_parse(&self) -> bool {
matches!(self.inner.kind, Kind::Parse(_))
loop {}
}
/// Returns true if this was an HTTP parse error caused by a message that was too large.
pub(crate) fn is_parse_too_large(&self) -> bool {
matches!(
self.inner.kind, Kind::Parse(Parse::TooLarge) |
Kind::Parse(Parse::UriTooLong)
)
loop {}
}
/// Returns true if this was an HTTP parse error caused by an invalid response status code or
/// reason phrase.
pub(crate) fn is_parse_status(&self) -> bool {
matches!(self.inner.kind, Kind::Parse(Parse::Status))
loop {}
}
/// Returns true if this error was caused by user code.
pub(crate) fn is_user(&self) -> bool {
matches!(self.inner.kind, Kind::User(_))
loop {}
}
/// Returns true if this was about a `Request` that was canceled.
pub(crate) fn is_canceled(&self) -> bool {
matches!(self.inner.kind, Kind::Canceled)
loop {}
}
/// Returns true if a sender's channel is closed.
pub(crate) fn is_closed(&self) -> bool {
matches!(self.inner.kind, Kind::ChannelClosed)
loop {}
}
/// Returns true if this was an error from `Connect`.
pub(crate) fn is_connect(&self) -> bool {
matches!(self.inner.kind, Kind::Connect)
loop {}
}
/// Returns true if the connection closed before a message could complete.
pub(crate) fn is_incomplete_message(&self) -> bool {
matches!(self.inner.kind, Kind::IncompleteMessage)
loop {}
}
/// Returns true if the body write was aborted.
pub(crate) fn is_body_write_aborted(&self) -> bool {
matches!(self.inner.kind, Kind::User(User::BodyWriteAborted))
loop {}
}
/// Returns true if the error was caused by a timeout.
pub(crate) fn is_timeout(&self) -> bool {
self.find_source::<TimedOut>().is_some()
loop {}
}
/// Consumes the error, returning its cause.
pub(crate) fn into_cause(self) -> Option<Box<dyn StdError + Send + Sync>> {
self.inner.cause
loop {}
}
pub(super) fn new(kind: Kind) -> Error {
Error {
inner: Box::new(ErrorImpl { kind, cause: None }),
}
loop {}
}
pub(super) fn with<C: Into<Cause>>(mut self, cause: C) -> Error {
self.inner.cause = Some(cause.into());
self
loop {}
}
#[cfg(any(all(feature = "http1", feature = "server"), feature = "ffi"))]
pub(super) fn kind(&self) -> &Kind {
&self.inner.kind
loop {}
}
pub(crate) fn find_source<E: StdError + 'static>(&self) -> Option<&E> {
let mut cause = self.source();
while let Some(err) = cause {
if let Some(ref typed) = err.downcast_ref() {
return Some(typed);
}
cause = err.source();
}
None
loop {}
}
#[cfg(feature = "http2")]
pub(super) fn h2_reason(&self) -> h2::Reason {
self.find_source::<h2::Error>()
.and_then(|h2_err| h2_err.reason())
.unwrap_or(h2::Reason::INTERNAL_ERROR)
loop {}
}
pub(super) fn new_canceled() -> Error {
Error::new(Kind::Canceled)
loop {}
}
#[cfg(feature = "http1")]
pub(super) fn new_incomplete() -> Error {
Error::new(Kind::IncompleteMessage)
loop {}
}
#[cfg(feature = "http1")]
pub(super) fn new_too_large() -> Error {
Error::new(Kind::Parse(Parse::TooLarge))
loop {}
}
#[cfg(feature = "http1")]
pub(super) fn new_version_h2() -> Error {
Error::new(Kind::Parse(Parse::VersionH2))
loop {}
}
#[cfg(feature = "http1")]
pub(super) fn new_unexpected_message() -> Error {
Error::new(Kind::UnexpectedMessage)
loop {}
}
#[cfg(any(feature = "http1", feature = "http2"))]
pub(super) fn new_io(cause: std::io::Error) -> Error {
Error::new(Kind::Io).with(cause)
loop {}
}
#[cfg(all(feature = "server", feature = "tcp"))]
pub(super) fn new_listen<E: Into<Cause>>(cause: E) -> Error {
Error::new(Kind::Listen).with(cause)
loop {}
}
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "server")]
pub(super) fn new_accept<E: Into<Cause>>(cause: E) -> Error {
Error::new(Kind::Accept).with(cause)
loop {}
}
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "client")]
pub(super) fn new_connect<E: Into<Cause>>(cause: E) -> Error {
Error::new(Kind::Connect).with(cause)
loop {}
}
pub(super) fn new_closed() -> Error {
Error::new(Kind::ChannelClosed)
loop {}
}
#[cfg(any(feature = "http1", feature = "http2", feature = "stream"))]
pub(super) fn new_body<E: Into<Cause>>(cause: E) -> Error {
Error::new(Kind::Body).with(cause)
loop {}
}
#[cfg(any(feature = "http1", feature = "http2"))]
pub(super) fn new_body_write<E: Into<Cause>>(cause: E) -> Error {
Error::new(Kind::BodyWrite).with(cause)
loop {}
}
pub(super) fn new_body_write_aborted() -> Error {
Error::new(Kind::User(User::BodyWriteAborted))
loop {}
}
fn new_user(user: User) -> Error {
Error::new(Kind::User(user))
loop {}
}
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "server")]
pub(super) fn new_user_header() -> Error {
Error::new_user(User::UnexpectedHeader)
loop {}
}
#[cfg(all(feature = "http1", feature = "server", feature = "runtime"))]
pub(super) fn new_header_timeout() -> Error {
Error::new(Kind::HeaderTimeout)
loop {}
}
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "client")]
pub(super) fn new_user_unsupported_version() -> Error {
Error::new_user(User::UnsupportedVersion)
loop {}
}
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "client")]
pub(super) fn new_user_unsupported_request_method() -> Error {
Error::new_user(User::UnsupportedRequestMethod)
loop {}
}
#[cfg(feature = "http1")]
#[cfg(feature = "server")]
pub(super) fn new_user_unsupported_status_code() -> Error {
Error::new_user(User::UnsupportedStatusCode)
loop {}
}
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "client")]
pub(super) fn new_user_absolute_uri_required() -> Error {
Error::new_user(User::AbsoluteUriRequired)
loop {}
}
pub(super) fn new_user_no_upgrade() -> Error {
Error::new_user(User::NoUpgrade)
loop {}
}
#[cfg(feature = "http1")]
pub(super) fn new_user_manual_upgrade() -> Error {
Error::new_user(User::ManualUpgrade)
loop {}
}
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "server")]
pub(super) fn new_user_make_service<E: Into<Cause>>(cause: E) -> Error {
Error::new_user(User::MakeService).with(cause)
loop {}
}
#[cfg(any(feature = "http1", feature = "http2"))]
pub(super) fn new_user_service<E: Into<Cause>>(cause: E) -> Error {
Error::new_user(User::Service).with(cause)
loop {}
}
#[cfg(any(feature = "http1", feature = "http2"))]
pub(super) fn new_user_body<E: Into<Cause>>(cause: E) -> Error {
Error::new_user(User::Body).with(cause)
loop {}
}
#[cfg(feature = "server")]
pub(super) fn new_without_shutdown_not_h1() -> Error {
Error::new(Kind::User(User::WithoutShutdownNonHttp1))
loop {}
}
#[cfg(feature = "http1")]
pub(super) fn new_shutdown(cause: std::io::Error) -> Error {
Error::new(Kind::Shutdown).with(cause)
loop {}
}
#[cfg(feature = "ffi")]
pub(super) fn new_user_aborted_by_callback() -> Error {
Error::new_user(User::AbortedByCallback)
loop {}
}
#[cfg(feature = "client")]
pub(super) fn new_user_dispatch_gone() -> Error {
Error::new(Kind::User(User::DispatchGone))
loop {}
}
#[cfg(feature = "http2")]
pub(super) fn new_h2(cause: ::h2::Error) -> Error {
if cause.is_io() {
Error::new_io(cause.into_io().expect("h2::Error::is_io"))
} else {
Error::new(Kind::Http2).with(cause)
}
loop {}
}
/// The error's standalone message, without the message from the source.
pub(crate) fn message(&self) -> impl fmt::Display + '_ {
self.description()
}
fn description(&self) -> &str {
match self.inner.kind {
Kind::Parse(Parse::Method) => "invalid HTTP method parsed",
Kind::Parse(Parse::Version) => "invalid HTTP version parsed",
#[cfg(feature = "http1")]
Kind::Parse(Parse::VersionH2) => {
"invalid HTTP version parsed (found HTTP2 preface)"
}
Kind::Parse(Parse::Uri) => "invalid URI",
Kind::Parse(Parse::UriTooLong) => "URI too long",
Kind::Parse(Parse::Header(Header::Token)) => "invalid HTTP header parsed",
#[cfg(feature = "http1")]
Kind::Parse(Parse::Header(Header::ContentLengthInvalid)) => {
"invalid content-length parsed"
}
#[cfg(all(feature = "http1", feature = "server"))]
Kind::Parse(Parse::Header(Header::TransferEncodingInvalid)) => {
"invalid transfer-encoding parsed"
}
#[cfg(feature = "http1")]
Kind::Parse(Parse::Header(Header::TransferEncodingUnexpected)) => {
"unexpected transfer-encoding parsed"
}
Kind::Parse(Parse::TooLarge) => "message head is too large",
Kind::Parse(Parse::Status) => "invalid HTTP status-code parsed",
Kind::Parse(Parse::Internal) => {
"internal error inside Hyper and/or its dependencies, please report"
}
Kind::IncompleteMessage => "connection closed before message completed",
#[cfg(feature = "http1")]
Kind::UnexpectedMessage => "received unexpected message from connection",
Kind::ChannelClosed => "channel closed",
Kind::Connect => "error trying to connect",
Kind::Canceled => "operation was canceled",
#[cfg(all(feature = "server", feature = "tcp"))]
Kind::Listen => "error creating server listener",
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "server")]
Kind::Accept => "error accepting connection",
#[cfg(all(feature = "http1", feature = "server", feature = "runtime"))]
Kind::HeaderTimeout => "read header from client timeout",
#[cfg(any(feature = "http1", feature = "http2", feature = "stream"))]
Kind::Body => "error reading a body from connection",
#[cfg(any(feature = "http1", feature = "http2"))]
Kind::BodyWrite => "error writing a body to connection",
#[cfg(feature = "http1")]
Kind::Shutdown => "error shutting down connection",
#[cfg(feature = "http2")]
Kind::Http2 => "http2 error",
#[cfg(any(feature = "http1", feature = "http2"))]
Kind::Io => "connection error",
#[cfg(any(feature = "http1", feature = "http2"))]
Kind::User(User::Body) => "error from user's HttpBody stream",
Kind::User(User::BodyWriteAborted) => "user body write aborted",
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "server")]
Kind::User(User::MakeService) => "error from user's MakeService",
#[cfg(any(feature = "http1", feature = "http2"))]
Kind::User(User::Service) => "error from user's Service",
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "server")]
Kind::User(User::UnexpectedHeader) => "user sent unexpected header",
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "client")]
Kind::User(User::UnsupportedVersion) => {
"request has unsupported HTTP version"
}
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "client")]
Kind::User(User::UnsupportedRequestMethod) => {
"request has unsupported HTTP method"
}
#[cfg(feature = "http1")]
#[cfg(feature = "server")]
Kind::User(User::UnsupportedStatusCode) => {
"response has 1xx status code, not supported by server"
}
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "client")]
Kind::User(User::AbsoluteUriRequired) => "client requires absolute-form URIs",
Kind::User(User::NoUpgrade) => "no upgrade available",
#[cfg(feature = "http1")]
Kind::User(User::ManualUpgrade) => {
"upgrade expected but low level API in use"
}
#[cfg(feature = "server")]
Kind::User(User::WithoutShutdownNonHttp1) => {
"without_shutdown() called on a non-HTTP/1 connection"
}
#[cfg(feature = "client")]
Kind::User(User::DispatchGone) => "dispatch task is gone",
#[cfg(feature = "ffi")]
Kind::User(User::AbortedByCallback) => {
"operation aborted by an application callback"
}
}
loop {}
}
}
impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut f = f.debug_tuple("hyper::Error");
f.field(&self.inner.kind);
if let Some(ref cause) = self.inner.cause {
f.field(cause);
}
f.finish()
loop {}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Some(ref cause) = self.inner.cause {
write!(f, "{}: {}", self.description(), cause)
} else {
f.write_str(self.description())
}
loop {}
}
}
impl StdError for Error {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
self.inner.cause.as_ref().map(|cause| &**cause as &(dyn StdError + 'static))
loop {}
}
}
#[doc(hidden)]
impl From<Parse> for Error {
fn from(err: Parse) -> Error {
Error::new(Kind::Parse(err))
loop {}
}
}
#[cfg(feature = "http1")]
impl Parse {
pub(crate) fn content_length_invalid() -> Self {
Parse::Header(Header::ContentLengthInvalid)
loop {}
}
#[cfg(all(feature = "http1", feature = "server"))]
pub(crate) fn transfer_encoding_invalid() -> Self {
Parse::Header(Header::TransferEncodingInvalid)
loop {}
}
pub(crate) fn transfer_encoding_unexpected() -> Self {
Parse::Header(Header::TransferEncodingUnexpected)
loop {}
}
}
impl From<httparse::Error> for Parse {
fn from(err: httparse::Error) -> Parse {
match err {
httparse::Error::HeaderName
| httparse::Error::HeaderValue
| httparse::Error::NewLine
| httparse::Error::Token => Parse::Header(Header::Token),
httparse::Error::Status => Parse::Status,
httparse::Error::TooManyHeaders => Parse::TooLarge,
httparse::Error::Version => Parse::Version,
}
loop {}
}
}
impl From<http::method::InvalidMethod> for Parse {
fn from(_: http::method::InvalidMethod) -> Parse {
Parse::Method
loop {}
}
}
impl From<http::status::InvalidStatusCode> for Parse {
fn from(_: http::status::InvalidStatusCode) -> Parse {
Parse::Status
loop {}
}
}
impl From<http::uri::InvalidUri> for Parse {
fn from(_: http::uri::InvalidUri) -> Parse {
Parse::Uri
loop {}
}
}
impl From<http::uri::InvalidUriParts> for Parse {
fn from(_: http::uri::InvalidUriParts) -> Parse {
Parse::Uri
loop {}
}
}
#[doc(hidden)]
@ -522,7 +392,7 @@ trait AssertSendSync: Send + Sync + 'static {}
impl AssertSendSync for Error {}
impl fmt::Display for TimedOut {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("operation timed out")
loop {}
}
}
impl StdError for TimedOut {}
@ -532,27 +402,21 @@ mod tests {
use std::mem;
#[test]
fn error_size_of() {
assert_eq!(mem::size_of::< Error > (), mem::size_of::< usize > ());
loop {}
}
#[cfg(feature = "http2")]
#[test]
fn h2_reason_unknown() {
let closed = Error::new_closed();
assert_eq!(closed.h2_reason(), h2::Reason::INTERNAL_ERROR);
loop {}
}
#[cfg(feature = "http2")]
#[test]
fn h2_reason_one_level() {
let body_err = Error::new_user_body(
h2::Error::from(h2::Reason::ENHANCE_YOUR_CALM),
);
assert_eq!(body_err.h2_reason(), h2::Reason::ENHANCE_YOUR_CALM);
loop {}
}
#[cfg(feature = "http2")]
#[test]
fn h2_reason_nested() {
let recvd = Error::new_h2(h2::Error::from(h2::Reason::HTTP_1_1_REQUIRED));
let svc_err = Error::new_user_service(recvd);
assert_eq!(svc_err.h2_reason(), h2::Reason::HTTP_1_1_REQUIRED);
loop {}
}
}

View file

@ -26,40 +26,36 @@ pub(crate) struct Protocol {
impl Protocol {
/// Converts a static string to a protocol name.
pub(crate) const fn from_static(value: &'static str) -> Self {
Self {
inner: h2::ext::Protocol::from_static(value),
}
loop {}
}
/// Returns a str representation of the header.
pub(crate) fn as_str(&self) -> &str {
self.inner.as_str()
loop {}
}
#[cfg(feature = "server")]
pub(crate) fn from_inner(inner: h2::ext::Protocol) -> Self {
Self { inner }
loop {}
}
pub(crate) fn into_inner(self) -> h2::ext::Protocol {
self.inner
loop {}
}
}
#[cfg(feature = "http2")]
impl<'a> From<&'a str> for Protocol {
fn from(value: &'a str) -> Self {
Self {
inner: h2::ext::Protocol::from(value),
}
loop {}
}
}
#[cfg(feature = "http2")]
impl AsRef<[u8]> for Protocol {
fn as_ref(&self) -> &[u8] {
self.inner.as_ref()
loop {}
}
}
#[cfg(feature = "http2")]
impl fmt::Debug for Protocol {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.inner.fmt(f)
loop {}
}
}
/// A map from header names to their original casing as received in an HTTP message.
@ -101,20 +97,20 @@ impl HeaderCaseMap {
&'a self,
name: &HeaderName,
) -> ValueIter<'_, Bytes> {
self.0.get_all(name).into_iter()
loop {}
}
pub(crate) fn default() -> Self {
Self(Default::default())
loop {}
}
#[cfg(any(test, feature = "ffi"))]
pub(crate) fn insert(&mut self, name: HeaderName, orig: Bytes) {
self.0.insert(name, orig);
loop {}
}
pub(crate) fn append<N>(&mut self, name: N, orig: Bytes)
where
N: IntoHeaderName,
{
self.0.append(name, orig);
loop {}
}
}
#[cfg(feature = "ffi")]
@ -134,32 +130,16 @@ pub(crate) struct OriginalHeaderOrder {
#[cfg(all(feature = "http1", feature = "ffi"))]
impl OriginalHeaderOrder {
pub(crate) fn default() -> Self {
OriginalHeaderOrder {
num_entries: HashMap::new(),
entry_order: Vec::new(),
}
loop {}
}
pub(crate) fn insert(&mut self, name: HeaderName) {
if !self.num_entries.contains_key(&name) {
let idx = 0;
self.num_entries.insert(name.clone(), 1);
self.entry_order.push((name, idx));
}
loop {}
}
pub(crate) fn append<N>(&mut self, name: N)
where
N: IntoHeaderName + Into<HeaderName> + Clone,
{
let name: HeaderName = name.into();
let idx;
if self.num_entries.contains_key(&name) {
idx = self.num_entries[&name];
*self.num_entries.get_mut(&name).unwrap() += 1;
} else {
idx = 0;
self.num_entries.insert(name.clone(), 1);
}
self.entry_order.push((name, idx));
loop {}
}
/// This returns an iterator that provides header names and indexes
/// in the original order received.
@ -199,6 +179,6 @@ impl OriginalHeaderOrder {
/// assert_eq!(b"c=d", h_map.get_all(name).nth(idx).unwrap());
/// ```
pub(crate) fn get_in_order(&self) -> impl Iterator<Item = &(HeaderName, usize)> {
self.entry_order.iter()
loop {}
}
}

View file

@ -35,71 +35,52 @@ pub(crate) struct ReasonPhrase(Bytes);
impl ReasonPhrase {
/// Gets the reason phrase as bytes.
pub(crate) fn as_bytes(&self) -> &[u8] {
&self.0
loop {}
}
/// Converts a static byte slice to a reason phrase.
pub(crate) fn from_static(reason: &'static [u8]) -> Self {
if find_invalid_byte(reason).is_some() {
panic!("invalid byte in static reason phrase");
}
Self(Bytes::from_static(reason))
loop {}
}
/// Converts a `Bytes` directly into a `ReasonPhrase` without validating.
///
/// Use with care; invalid bytes in a reason phrase can cause serious security problems if
/// emitted in a response.
pub(crate) unsafe fn from_bytes_unchecked(reason: Bytes) -> Self {
Self(reason)
loop {}
}
}
impl TryFrom<&[u8]> for ReasonPhrase {
type Error = InvalidReasonPhrase;
fn try_from(reason: &[u8]) -> Result<Self, Self::Error> {
if let Some(bad_byte) = find_invalid_byte(reason) {
Err(InvalidReasonPhrase { bad_byte })
} else {
Ok(Self(Bytes::copy_from_slice(reason)))
}
loop {}
}
}
impl TryFrom<Vec<u8>> for ReasonPhrase {
type Error = InvalidReasonPhrase;
fn try_from(reason: Vec<u8>) -> Result<Self, Self::Error> {
if let Some(bad_byte) = find_invalid_byte(&reason) {
Err(InvalidReasonPhrase { bad_byte })
} else {
Ok(Self(Bytes::from(reason)))
}
loop {}
}
}
impl TryFrom<String> for ReasonPhrase {
type Error = InvalidReasonPhrase;
fn try_from(reason: String) -> Result<Self, Self::Error> {
if let Some(bad_byte) = find_invalid_byte(reason.as_bytes()) {
Err(InvalidReasonPhrase { bad_byte })
} else {
Ok(Self(Bytes::from(reason)))
}
loop {}
}
}
impl TryFrom<Bytes> for ReasonPhrase {
type Error = InvalidReasonPhrase;
fn try_from(reason: Bytes) -> Result<Self, Self::Error> {
if let Some(bad_byte) = find_invalid_byte(&reason) {
Err(InvalidReasonPhrase { bad_byte })
} else {
Ok(Self(reason))
}
loop {}
}
}
impl Into<Bytes> for ReasonPhrase {
fn into(self) -> Bytes {
self.0
loop {}
}
}
impl AsRef<[u8]> for ReasonPhrase {
fn as_ref(&self) -> &[u8] {
&self.0
loop {}
}
}
/// Error indicating an invalid byte when constructing a `ReasonPhrase`.
@ -113,70 +94,49 @@ pub(crate) struct InvalidReasonPhrase {
}
impl std::fmt::Display for InvalidReasonPhrase {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "Invalid byte in reason phrase: {}", self.bad_byte)
loop {}
}
}
impl std::error::Error for InvalidReasonPhrase {}
const fn is_valid_byte(b: u8) -> bool {
const fn is_vchar(b: u8) -> bool {
0x21 <= b && b <= 0x7E
}
#[allow(unused_comparisons)]
const fn is_obs_text(b: u8) -> bool {
0x80 <= b && b <= 0xFF
}
b == b'\t' || b == b' ' || is_vchar(b) || is_obs_text(b)
loop {}
}
const fn find_invalid_byte(bytes: &[u8]) -> Option<u8> {
let mut i = 0;
while i < bytes.len() {
let b = bytes[i];
if !is_valid_byte(b) {
return Some(b);
}
i += 1;
}
None
loop {}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn basic_valid() {
const PHRASE: &'static [u8] = b"OK";
assert_eq!(ReasonPhrase::from_static(PHRASE).as_bytes(), PHRASE);
assert_eq!(ReasonPhrase::try_from(PHRASE).unwrap().as_bytes(), PHRASE);
loop {}
}
#[test]
fn empty_valid() {
const PHRASE: &'static [u8] = b"";
assert_eq!(ReasonPhrase::from_static(PHRASE).as_bytes(), PHRASE);
assert_eq!(ReasonPhrase::try_from(PHRASE).unwrap().as_bytes(), PHRASE);
loop {}
}
#[test]
fn obs_text_valid() {
const PHRASE: &'static [u8] = b"hyp\xe9r";
assert_eq!(ReasonPhrase::from_static(PHRASE).as_bytes(), PHRASE);
assert_eq!(ReasonPhrase::try_from(PHRASE).unwrap().as_bytes(), PHRASE);
loop {}
}
const NEWLINE_PHRASE: &'static [u8] = b"hyp\ner";
#[test]
#[should_panic]
fn newline_invalid_panic() {
ReasonPhrase::from_static(NEWLINE_PHRASE);
loop {}
}
#[test]
fn newline_invalid_err() {
assert!(ReasonPhrase::try_from(NEWLINE_PHRASE).is_err());
loop {}
}
const CR_PHRASE: &'static [u8] = b"hyp\rer";
#[test]
#[should_panic]
fn cr_invalid_panic() {
ReasonPhrase::from_static(CR_PHRASE);
loop {}
}
#[test]
fn cr_invalid_err() {
assert!(ReasonPhrase::try_from(CR_PHRASE).is_err());
loop {}
}
}

View file

@ -86,49 +86,19 @@ ffi_fn! {
}
impl UserBody {
pub(crate) fn new() -> UserBody {
UserBody {
data_func: data_noop,
userdata: std::ptr::null_mut(),
}
loop {}
}
pub(crate) fn poll_data(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Option<crate::Result<Bytes>>> {
let mut out = std::ptr::null_mut();
match (self.data_func)(self.userdata, hyper_context::wrap(cx), &mut out) {
super::task::HYPER_POLL_READY => {
if out.is_null() {
Poll::Ready(None)
} else {
let buf = unsafe { Box::from_raw(out) };
Poll::Ready(Some(Ok(buf.0)))
}
}
super::task::HYPER_POLL_PENDING => Poll::Pending,
super::task::HYPER_POLL_ERROR => {
Poll::Ready(Some(Err(crate::Error::new_body_write_aborted())))
}
unexpected => {
Poll::Ready(
Some(
Err(
crate::Error::new_body_write(
format!(
"unexpected hyper_body_data_func return code {}", unexpected
),
),
),
),
)
}
}
loop {}
}
pub(crate) fn poll_trailers(
&mut self,
_cx: &mut Context<'_>,
) -> Poll<crate::Result<Option<HeaderMap>>> {
Poll::Ready(Ok(None))
loop {}
}
}
/// cbindgen:ignore
@ -137,7 +107,7 @@ extern "C" fn data_noop(
_: *mut hyper_context<'_>,
_: *mut *mut hyper_buf,
) -> c_int {
super::task::HYPER_POLL_READY
loop {}
}
unsafe impl Send for UserBody {}
unsafe impl Sync for UserBody {}
@ -169,6 +139,6 @@ ffi_fn! {
}
unsafe impl AsTaskType for hyper_buf {
fn as_task_type(&self) -> hyper_task_return_type {
hyper_task_return_type::HYPER_TASK_BUF
loop {}
}
}

View file

@ -54,7 +54,7 @@ ffi_fn! {
}
unsafe impl AsTaskType for hyper_clientconn {
fn as_task_type(&self) -> hyper_task_return_type {
hyper_task_return_type::HYPER_TASK_CLIENTCONN
loop {}
}
}
ffi_fn! {

View file

@ -25,22 +25,10 @@ pub(crate) enum hyper_code {
}
impl hyper_error {
fn code(&self) -> hyper_code {
use crate::error::Kind as ErrorKind;
use crate::error::User;
match self.0.kind() {
ErrorKind::Parse(_) => hyper_code::HYPERE_INVALID_PEER_MESSAGE,
ErrorKind::IncompleteMessage => hyper_code::HYPERE_UNEXPECTED_EOF,
ErrorKind::User(User::AbortedByCallback) => {
hyper_code::HYPERE_ABORTED_BY_CALLBACK
}
_ => hyper_code::HYPERE_ERROR,
}
loop {}
}
fn print_to(&self, dst: &mut [u8]) -> usize {
use std::io::Write;
let mut dst = std::io::Cursor::new(dst);
let _ = write!(dst, "{}", & self.0);
dst.position() as usize
loop {}
}
}
ffi_fn! {

View file

@ -140,11 +140,7 @@ ffi_fn! {
}
impl hyper_request {
pub(super) fn finalize_request(&mut self) {
if let Some(headers) = self.0.extensions_mut().remove::<hyper_headers>() {
*self.0.headers_mut() = headers.headers;
self.0.extensions_mut().insert(headers.orig_casing);
self.0.extensions_mut().insert(headers.orig_order);
}
loop {}
}
}
ffi_fn! {
@ -211,36 +207,15 @@ ffi_fn! {
}
impl hyper_response {
pub(super) fn wrap(mut resp: Response<Body>) -> hyper_response {
let headers = std::mem::take(resp.headers_mut());
let orig_casing = resp
.extensions_mut()
.remove::<HeaderCaseMap>()
.unwrap_or_else(HeaderCaseMap::default);
let orig_order = resp
.extensions_mut()
.remove::<OriginalHeaderOrder>()
.unwrap_or_else(OriginalHeaderOrder::default);
resp.extensions_mut()
.insert(hyper_headers {
headers,
orig_casing,
orig_order,
});
hyper_response(resp)
loop {}
}
fn reason_phrase(&self) -> &[u8] {
if let Some(reason) = self.0.extensions().get::<ReasonPhrase>() {
return reason.as_bytes();
}
if let Some(reason) = self.0.status().canonical_reason() {
return reason.as_bytes();
}
&[]
loop {}
}
}
unsafe impl AsTaskType for hyper_response {
fn as_task_type(&self) -> hyper_task_return_type {
hyper_task_return_type::HYPER_TASK_RESPONSE
loop {}
}
}
type hyper_headers_foreach_callback = extern "C" fn(
@ -252,10 +227,7 @@ type hyper_headers_foreach_callback = extern "C" fn(
) -> c_int;
impl hyper_headers {
pub(super) fn get_or_default(ext: &mut http::Extensions) -> &mut hyper_headers {
if let None = ext.get_mut::<hyper_headers>() {
ext.insert(hyper_headers::default());
}
ext.get_mut::<hyper_headers>().unwrap()
loop {}
}
}
ffi_fn! {
@ -306,11 +278,7 @@ ffi_fn! {
}
impl Default for hyper_headers {
fn default() -> Self {
Self {
headers: Default::default(),
orig_casing: HeaderCaseMap::default(),
orig_order: OriginalHeaderOrder::default(),
}
loop {}
}
}
unsafe fn raw_name_value(
@ -319,23 +287,11 @@ unsafe fn raw_name_value(
value: *const u8,
value_len: size_t,
) -> Result<(HeaderName, HeaderValue, Bytes), hyper_code> {
let name = std::slice::from_raw_parts(name, name_len);
let orig_name = Bytes::copy_from_slice(name);
let name = match HeaderName::from_bytes(name) {
Ok(name) => name,
Err(_) => return Err(hyper_code::HYPERE_INVALID_ARG),
};
let value = std::slice::from_raw_parts(value, value_len);
let value = match HeaderValue::from_bytes(value) {
Ok(val) => val,
Err(_) => return Err(hyper_code::HYPERE_INVALID_ARG),
};
Ok((name, value, orig_name))
loop {}
}
impl OnInformational {
pub(crate) fn call(&mut self, resp: Response<Body>) {
let mut resp = hyper_response::wrap(resp);
(self.func)(self.data.0, &mut resp);
loop {}
}
}
#[cfg(test)]
@ -343,101 +299,11 @@ mod tests {
use super::*;
#[test]
fn test_headers_foreach_cases_preserved() {
let mut headers = hyper_headers::default();
let name1 = b"Set-CookiE";
let value1 = b"a=b";
hyper_headers_add(
&mut headers,
name1.as_ptr(),
name1.len(),
value1.as_ptr(),
value1.len(),
);
let name2 = b"SET-COOKIE";
let value2 = b"c=d";
hyper_headers_add(
&mut headers,
name2.as_ptr(),
name2.len(),
value2.as_ptr(),
value2.len(),
);
let mut vec = Vec::<u8>::new();
hyper_headers_foreach(&headers, concat, &mut vec as *mut _ as *mut c_void);
assert_eq!(vec, b"Set-CookiE: a=b\r\nSET-COOKIE: c=d\r\n");
extern "C" fn concat(
vec: *mut c_void,
name: *const u8,
name_len: usize,
value: *const u8,
value_len: usize,
) -> c_int {
unsafe {
let vec = &mut *(vec as *mut Vec<u8>);
let name = std::slice::from_raw_parts(name, name_len);
let value = std::slice::from_raw_parts(value, value_len);
vec.extend(name);
vec.extend(b": ");
vec.extend(value);
vec.extend(b"\r\n");
}
HYPER_ITER_CONTINUE
}
loop {}
}
#[cfg(all(feature = "http1", feature = "ffi"))]
#[test]
fn test_headers_foreach_order_preserved() {
let mut headers = hyper_headers::default();
let name1 = b"Set-CookiE";
let value1 = b"a=b";
hyper_headers_add(
&mut headers,
name1.as_ptr(),
name1.len(),
value1.as_ptr(),
value1.len(),
);
let name2 = b"Content-Encoding";
let value2 = b"gzip";
hyper_headers_add(
&mut headers,
name2.as_ptr(),
name2.len(),
value2.as_ptr(),
value2.len(),
);
let name3 = b"SET-COOKIE";
let value3 = b"c=d";
hyper_headers_add(
&mut headers,
name3.as_ptr(),
name3.len(),
value3.as_ptr(),
value3.len(),
);
let mut vec = Vec::<u8>::new();
hyper_headers_foreach(&headers, concat, &mut vec as *mut _ as *mut c_void);
println!("{}", std::str::from_utf8(& vec).unwrap());
assert_eq!(
vec, b"Set-CookiE: a=b\r\nContent-Encoding: gzip\r\nSET-COOKIE: c=d\r\n"
);
extern "C" fn concat(
vec: *mut c_void,
name: *const u8,
name_len: usize,
value: *const u8,
value_len: usize,
) -> c_int {
unsafe {
let vec = &mut *(vec as *mut Vec<u8>);
let name = std::slice::from_raw_parts(name, name_len);
let value = std::slice::from_raw_parts(value, value_len);
vec.extend(name);
vec.extend(b": ");
vec.extend(value);
vec.extend(b"\r\n");
}
HYPER_ITER_CONTINUE
}
loop {}
}
}

View file

@ -84,7 +84,7 @@ extern "C" fn read_noop(
_buf: *mut u8,
_buf_len: size_t,
) -> size_t {
0
loop {}
}
/// cbindgen:ignore
extern "C" fn write_noop(
@ -93,7 +93,7 @@ extern "C" fn write_noop(
_buf: *const u8,
_buf_len: size_t,
) -> size_t {
0
loop {}
}
impl AsyncRead for hyper_io {
fn poll_read(
@ -101,21 +101,7 @@ impl AsyncRead for hyper_io {
cx: &mut Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
let buf_ptr = unsafe { buf.unfilled_mut() }.as_mut_ptr() as *mut u8;
let buf_len = buf.remaining();
match (self.read)(self.userdata, hyper_context::wrap(cx), buf_ptr, buf_len) {
HYPER_IO_PENDING => Poll::Pending,
HYPER_IO_ERROR => {
Poll::Ready(
Err(std::io::Error::new(std::io::ErrorKind::Other, "io error")),
)
}
ok => {
unsafe { buf.assume_init(ok) };
buf.advance(ok);
Poll::Ready(Ok(()))
}
}
loop {}
}
}
impl AsyncWrite for hyper_io {
@ -124,29 +110,19 @@ impl AsyncWrite for hyper_io {
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<std::io::Result<usize>> {
let buf_ptr = buf.as_ptr();
let buf_len = buf.len();
match (self.write)(self.userdata, hyper_context::wrap(cx), buf_ptr, buf_len) {
HYPER_IO_PENDING => Poll::Pending,
HYPER_IO_ERROR => {
Poll::Ready(
Err(std::io::Error::new(std::io::ErrorKind::Other, "io error")),
)
}
ok => Poll::Ready(Ok(ok)),
}
loop {}
}
fn poll_flush(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<std::io::Result<()>> {
Poll::Ready(Ok(()))
loop {}
}
fn poll_shutdown(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<std::io::Result<()>> {
Poll::Ready(Ok(()))
loop {}
}
}
unsafe impl Send for hyper_io {}

View file

@ -81,64 +81,34 @@ pub(crate) trait IntoDynTaskType {
}
impl hyper_executor {
fn new() -> Arc<hyper_executor> {
Arc::new(hyper_executor {
driver: Mutex::new(FuturesUnordered::new()),
spawn_queue: Mutex::new(Vec::new()),
is_woken: Arc::new(ExecWaker(AtomicBool::new(false))),
})
loop {}
}
pub(crate) fn downgrade(exec: &Arc<hyper_executor>) -> WeakExec {
WeakExec(Arc::downgrade(exec))
loop {}
}
fn spawn(&self, task: Box<hyper_task>) {
self.spawn_queue.lock().unwrap().push(TaskFuture { task: Some(task) });
loop {}
}
fn poll_next(&self) -> Option<Box<hyper_task>> {
self.drain_queue();
let waker = futures_util::task::waker_ref(&self.is_woken);
let mut cx = Context::from_waker(&waker);
loop {
match Pin::new(&mut *self.driver.lock().unwrap()).poll_next(&mut cx) {
Poll::Ready(val) => return val,
Poll::Pending => {
if self.drain_queue() {
continue;
}
if self.is_woken.0.swap(false, Ordering::SeqCst) {
continue;
}
return None;
}
}
}
loop {}
}
fn drain_queue(&self) -> bool {
let mut queue = self.spawn_queue.lock().unwrap();
if queue.is_empty() {
return false;
}
let driver = self.driver.lock().unwrap();
for task in queue.drain(..) {
driver.push(task);
}
true
loop {}
}
}
impl futures_util::task::ArcWake for ExecWaker {
fn wake_by_ref(me: &Arc<ExecWaker>) {
me.0.store(true, Ordering::SeqCst);
loop {}
}
}
impl WeakExec {
pub(crate) fn new() -> Self {
WeakExec(Weak::new())
loop {}
}
}
impl crate::rt::Executor<BoxFuture<()>> for WeakExec {
fn execute(&self, fut: BoxFuture<()>) {
if let Some(exec) = self.0.upgrade() {
exec.spawn(hyper_task::boxed(fut));
}
loop {}
}
}
ffi_fn! {
@ -175,30 +145,16 @@ impl hyper_task {
F: Future + Send + 'static,
F::Output: IntoDynTaskType + Send + Sync + 'static,
{
Box::new(hyper_task {
future: Box::pin(async move { fut.await.into_dyn_task_type() }),
output: None,
userdata: UserDataPointer(ptr::null_mut()),
})
loop {}
}
fn output_type(&self) -> hyper_task_return_type {
match self.output {
None => hyper_task_return_type::HYPER_TASK_EMPTY,
Some(ref val) => val.as_task_type(),
}
loop {}
}
}
impl Future for TaskFuture {
type Output = Box<hyper_task>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match Pin::new(&mut self.task.as_mut().unwrap().future).poll(cx) {
Poll::Ready(val) => {
let mut task = self.task.take().unwrap();
task.output = Some(val);
Poll::Ready(task)
}
Poll::Pending => Poll::Pending,
}
loop {}
}
}
ffi_fn! {
@ -235,12 +191,12 @@ ffi_fn! {
}
unsafe impl AsTaskType for () {
fn as_task_type(&self) -> hyper_task_return_type {
hyper_task_return_type::HYPER_TASK_EMPTY
loop {}
}
}
unsafe impl AsTaskType for crate::Error {
fn as_task_type(&self) -> hyper_task_return_type {
hyper_task_return_type::HYPER_TASK_ERROR
loop {}
}
}
impl<T> IntoDynTaskType for T
@ -248,7 +204,7 @@ where
T: AsTaskType + Send + Sync + 'static,
{
fn into_dyn_task_type(self) -> BoxAny {
Box::new(self)
loop {}
}
}
impl<T> IntoDynTaskType for crate::Result<T>
@ -256,10 +212,7 @@ where
T: IntoDynTaskType + Send + Sync + 'static,
{
fn into_dyn_task_type(self) -> BoxAny {
match self {
Ok(val) => val.into_dyn_task_type(),
Err(err) => Box::new(err),
}
loop {}
}
}
impl<T> IntoDynTaskType for Option<T>
@ -267,15 +220,12 @@ where
T: IntoDynTaskType + Send + Sync + 'static,
{
fn into_dyn_task_type(self) -> BoxAny {
match self {
Some(val) => val.into_dyn_task_type(),
None => ().into_dyn_task_type(),
}
loop {}
}
}
impl hyper_context<'_> {
pub(crate) fn wrap<'a, 'b>(cx: &'a mut Context<'b>) -> &'a mut hyper_context<'b> {
unsafe { std::mem::transmute::<&mut Context<'_>, &mut hyper_context<'_>>(cx) }
loop {}
}
}
ffi_fn! {

View file

@ -5,150 +5,54 @@ use http::header::{HeaderValue, ValueIter};
use http::HeaderMap;
#[cfg(all(feature = "http2", feature = "client"))]
use http::Method;
#[cfg(feature = "http1")]
pub(super) fn connection_keep_alive(value: &HeaderValue) -> bool {
connection_has(value, "keep-alive")
loop {}
}
#[cfg(feature = "http1")]
pub(super) fn connection_close(value: &HeaderValue) -> bool {
connection_has(value, "close")
loop {}
}
#[cfg(feature = "http1")]
fn connection_has(value: &HeaderValue, needle: &str) -> bool {
if let Ok(s) = value.to_str() {
for val in s.split(',') {
if val.trim().eq_ignore_ascii_case(needle) {
return true;
}
}
}
false
loop {}
}
#[cfg(all(feature = "http1", feature = "server"))]
pub(super) fn content_length_parse(value: &HeaderValue) -> Option<u64> {
from_digits(value.as_bytes())
loop {}
}
pub(super) fn content_length_parse_all(headers: &HeaderMap) -> Option<u64> {
content_length_parse_all_values(headers.get_all(CONTENT_LENGTH).into_iter())
loop {}
}
pub(super) fn content_length_parse_all_values(values: ValueIter<'_, HeaderValue>) -> Option<u64> {
// If multiple Content-Length headers were sent, everything can still
// be alright if they all contain the same value, and all parse
// correctly. If not, then it's an error.
let mut content_length: Option<u64> = None;
for h in values {
if let Ok(line) = h.to_str() {
for v in line.split(',') {
if let Some(n) = from_digits(v.trim().as_bytes()) {
if content_length.is_none() {
content_length = Some(n)
} else if content_length != Some(n) {
return None;
}
} else {
return None
}
}
} else {
return None
}
}
return content_length
pub(super) fn content_length_parse_all_values(
values: ValueIter<'_, HeaderValue>,
) -> Option<u64> {
loop {}
}
fn from_digits(bytes: &[u8]) -> Option<u64> {
// cannot use FromStr for u64, since it allows a signed prefix
let mut result = 0u64;
const RADIX: u64 = 10;
if bytes.is_empty() {
return None;
}
for &b in bytes {
// can't use char::to_digit, since we haven't verified these bytes
// are utf-8.
match b {
b'0'..=b'9' => {
result = result.checked_mul(RADIX)?;
result = result.checked_add((b - b'0') as u64)?;
},
_ => {
// not a DIGIT, get outta here!
return None;
}
}
}
Some(result)
loop {}
}
#[cfg(all(feature = "http2", feature = "client"))]
pub(super) fn method_has_defined_payload_semantics(method: &Method) -> bool {
match *method {
Method::GET | Method::HEAD | Method::DELETE | Method::CONNECT => false,
_ => true,
}
loop {}
}
#[cfg(feature = "http2")]
pub(super) fn set_content_length_if_missing(headers: &mut HeaderMap, len: u64) {
headers
.entry(CONTENT_LENGTH)
.or_insert_with(|| HeaderValue::from(len));
loop {}
}
#[cfg(feature = "http1")]
pub(super) fn transfer_encoding_is_chunked(headers: &HeaderMap) -> bool {
is_chunked(headers.get_all(http::header::TRANSFER_ENCODING).into_iter())
loop {}
}
#[cfg(feature = "http1")]
pub(super) fn is_chunked(mut encodings: ValueIter<'_, HeaderValue>) -> bool {
// chunked must always be the last encoding, according to spec
if let Some(line) = encodings.next_back() {
return is_chunked_(line);
}
false
loop {}
}
#[cfg(feature = "http1")]
pub(super) fn is_chunked_(value: &HeaderValue) -> bool {
// chunked must always be the last encoding, according to spec
if let Ok(s) = value.to_str() {
if let Some(encoding) = s.rsplit(',').next() {
return encoding.trim().eq_ignore_ascii_case("chunked");
}
}
false
loop {}
}
#[cfg(feature = "http1")]
pub(super) fn add_chunked(mut entry: http::header::OccupiedEntry<'_, HeaderValue>) {
const CHUNKED: &str = "chunked";
if let Some(line) = entry.iter_mut().next_back() {
// + 2 for ", "
let new_cap = line.as_bytes().len() + CHUNKED.len() + 2;
let mut buf = BytesMut::with_capacity(new_cap);
buf.extend_from_slice(line.as_bytes());
buf.extend_from_slice(b", ");
buf.extend_from_slice(CHUNKED.as_bytes());
*line = HeaderValue::from_maybe_shared(buf.freeze())
.expect("original header value plus ascii is valid");
return;
}
entry.insert(HeaderValue::from_static(CHUNKED));
loop {}
}

File diff suppressed because it is too large Load diff

View file

@ -1,18 +1,13 @@
use std::error::Error as StdError;
use bytes::{Buf, Bytes};
use http::Request;
use tokio::io::{AsyncRead, AsyncWrite};
use tracing::{debug, trace};
use super::{Http1Transaction, Wants};
use crate::body::{Body, DecodedLength, HttpBody};
use crate::common::{task, Future, Pin, Poll, Unpin};
use crate::proto::{
BodyLength, Conn, Dispatched, MessageHead, RequestHead,
};
use crate::proto::{BodyLength, Conn, Dispatched, MessageHead, RequestHead};
use crate::upgrade::OnUpgrade;
pub(crate) struct Dispatcher<D, Bs: HttpBody, I, T> {
conn: Conn<I, Bs::Data, T>,
dispatch: D,
@ -20,7 +15,6 @@ pub(crate) struct Dispatcher<D, Bs: HttpBody, I, T> {
body_rx: Pin<Box<Option<Bs>>>,
is_closing: bool,
}
pub(crate) trait Dispatch {
type PollItem;
type PollBody;
@ -30,40 +24,30 @@ pub(crate) trait Dispatch {
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Option<Result<(Self::PollItem, Self::PollBody), Self::PollError>>>;
fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Body)>) -> crate::Result<()>;
fn recv_msg(
&mut self,
msg: crate::Result<(Self::RecvItem, Body)>,
) -> crate::Result<()>;
fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), ()>>;
fn should_poll(&self) -> bool;
}
cfg_server! {
use crate::service::HttpService;
pub(crate) struct Server<S: HttpService<B>, B> {
in_flight: Pin<Box<Option<S::Future>>>,
pub(crate) service: S,
}
use crate ::service::HttpService; pub (crate) struct Server < S : HttpService < B >,
B > { in_flight : Pin < Box < Option < S::Future >>>, pub (crate) service : S, }
}
cfg_client! {
pin_project_lite::pin_project! {
pub(crate) struct Client<B> {
callback: Option<crate::client::dispatch::Callback<Request<B>, http::Response<Body>>>,
#[pin]
rx: ClientRx<B>,
rx_closed: bool,
}
}
type ClientRx<B> = crate::client::dispatch::Receiver<Request<B>, http::Response<Body>>;
pin_project_lite::pin_project! { pub (crate) struct Client < B > { callback : Option
< crate ::client::dispatch::Callback < Request < B >, http::Response < Body >>>,
#[pin] rx : ClientRx < B >, rx_closed : bool, } } type ClientRx < B > = crate
::client::dispatch::Receiver < Request < B >, http::Response < Body >>;
}
impl<D, Bs, I, T> Dispatcher<D, Bs, I, T>
where
D: Dispatch<
PollItem = MessageHead<T::Outgoing>,
PollBody = Bs,
RecvItem = MessageHead<T::Incoming>,
> + Unpin,
PollItem = MessageHead<T::Outgoing>,
PollBody = Bs,
RecvItem = MessageHead<T::Incoming>,
> + Unpin,
D::PollError: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin,
T: Http1Transaction + Unpin,
@ -71,28 +55,15 @@ where
Bs::Error: Into<Box<dyn StdError + Send + Sync>>,
{
pub(crate) fn new(dispatch: D, conn: Conn<I, Bs::Data, T>) -> Self {
Dispatcher {
conn,
dispatch,
body_tx: None,
body_rx: Box::pin(None),
is_closing: false,
}
loop {}
}
#[cfg(feature = "server")]
pub(crate) fn disable_keep_alive(&mut self) {
self.conn.disable_keep_alive();
if self.conn.is_write_closed() {
self.close();
}
loop {}
}
pub(crate) fn into_inner(self) -> (I, Bytes, D) {
let (io, buf) = self.conn.into_inner();
(io, buf, self.dispatch)
loop {}
}
/// Run this dispatcher until HTTP says this connection is done,
/// but don't call `AsyncWrite::shutdown` on the underlying IO.
///
@ -105,312 +76,51 @@ where
where
Self: Unpin,
{
Pin::new(self).poll_catch(cx, false).map_ok(|ds| {
if let Dispatched::Upgrade(pending) = ds {
pending.manual();
}
})
loop {}
}
fn poll_catch(
&mut self,
cx: &mut task::Context<'_>,
should_shutdown: bool,
) -> Poll<crate::Result<Dispatched>> {
Poll::Ready(ready!(self.poll_inner(cx, should_shutdown)).or_else(|e| {
// An error means we're shutting down either way.
// We just try to give the error to the user,
// and close the connection with an Ok. If we
// cannot give it to the user, then return the Err.
self.dispatch.recv_msg(Err(e))?;
Ok(Dispatched::Shutdown)
}))
loop {}
}
fn poll_inner(
&mut self,
cx: &mut task::Context<'_>,
should_shutdown: bool,
) -> Poll<crate::Result<Dispatched>> {
T::update_date();
ready!(self.poll_loop(cx))?;
if self.is_done() {
if let Some(pending) = self.conn.pending_upgrade() {
self.conn.take_error()?;
return Poll::Ready(Ok(Dispatched::Upgrade(pending)));
} else if should_shutdown {
ready!(self.conn.poll_shutdown(cx)).map_err(crate::Error::new_shutdown)?;
}
self.conn.take_error()?;
Poll::Ready(Ok(Dispatched::Shutdown))
} else {
Poll::Pending
}
loop {}
}
fn poll_loop(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
// Limit the looping on this connection, in case it is ready far too
// often, so that other futures don't starve.
//
// 16 was chosen arbitrarily, as that is number of pipelined requests
// benchmarks often use. Perhaps it should be a config option instead.
for _ in 0..16 {
let _ = self.poll_read(cx)?;
let _ = self.poll_write(cx)?;
let _ = self.poll_flush(cx)?;
// This could happen if reading paused before blocking on IO,
// such as getting to the end of a framed message, but then
// writing/flushing set the state back to Init. In that case,
// if the read buffer still had bytes, we'd want to try poll_read
// again, or else we wouldn't ever be woken up again.
//
// Using this instead of task::current() and notify() inside
// the Conn is noticeably faster in pipelined benchmarks.
if !self.conn.wants_read_again() {
//break;
return Poll::Ready(Ok(()));
}
}
trace!("poll_loop yielding (self = {:p})", self);
task::yield_now(cx).map(|never| match never {})
loop {}
}
fn poll_read(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
loop {
if self.is_closing {
return Poll::Ready(Ok(()));
} else if self.conn.can_read_head() {
ready!(self.poll_read_head(cx))?;
} else if let Some(mut body) = self.body_tx.take() {
if self.conn.can_read_body() {
match body.poll_ready(cx) {
Poll::Ready(Ok(())) => (),
Poll::Pending => {
self.body_tx = Some(body);
return Poll::Pending;
}
Poll::Ready(Err(_canceled)) => {
// user doesn't care about the body
// so we should stop reading
trace!("body receiver dropped before eof, draining or closing");
self.conn.poll_drain_or_close_read(cx);
continue;
}
}
match self.conn.poll_read_body(cx) {
Poll::Ready(Some(Ok(chunk))) => match body.try_send_data(chunk) {
Ok(()) => {
self.body_tx = Some(body);
}
Err(_canceled) => {
if self.conn.can_read_body() {
trace!("body receiver dropped before eof, closing");
self.conn.close_read();
}
}
},
Poll::Ready(None) => {
// just drop, the body will close automatically
}
Poll::Pending => {
self.body_tx = Some(body);
return Poll::Pending;
}
Poll::Ready(Some(Err(e))) => {
body.send_error(crate::Error::new_body(e));
}
}
} else {
// just drop, the body will close automatically
}
} else {
return self.conn.poll_read_keep_alive(cx);
}
}
loop {}
}
fn poll_read_head(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
// can dispatch receive, or does it still care about, an incoming message?
match ready!(self.dispatch.poll_ready(cx)) {
Ok(()) => (),
Err(()) => {
trace!("dispatch no longer receiving messages");
self.close();
return Poll::Ready(Ok(()));
}
}
// dispatch is ready for a message, try to read one
match ready!(self.conn.poll_read_head(cx)) {
Some(Ok((mut head, body_len, wants))) => {
let body = match body_len {
DecodedLength::ZERO => Body::empty(),
other => {
let (tx, rx) = Body::new_channel(other, wants.contains(Wants::EXPECT));
self.body_tx = Some(tx);
rx
}
};
if wants.contains(Wants::UPGRADE) {
let upgrade = self.conn.on_upgrade();
debug_assert!(!upgrade.is_none(), "empty upgrade");
debug_assert!(head.extensions.get::<OnUpgrade>().is_none(), "OnUpgrade already set");
head.extensions.insert(upgrade);
}
self.dispatch.recv_msg(Ok((head, body)))?;
Poll::Ready(Ok(()))
}
Some(Err(err)) => {
debug!("read_head error: {}", err);
self.dispatch.recv_msg(Err(err))?;
// if here, the dispatcher gave the user the error
// somewhere else. we still need to shutdown, but
// not as a second error.
self.close();
Poll::Ready(Ok(()))
}
None => {
// read eof, the write side will have been closed too unless
// allow_read_close was set to true, in which case just do
// nothing...
debug_assert!(self.conn.is_read_closed());
if self.conn.is_write_closed() {
self.close();
}
Poll::Ready(Ok(()))
}
}
loop {}
}
fn poll_write(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
loop {
if self.is_closing {
return Poll::Ready(Ok(()));
} else if self.body_rx.is_none()
&& self.conn.can_write_head()
&& self.dispatch.should_poll()
{
if let Some(msg) = ready!(Pin::new(&mut self.dispatch).poll_msg(cx)) {
let (head, mut body) = msg.map_err(crate::Error::new_user_service)?;
// Check if the body knows its full data immediately.
//
// If so, we can skip a bit of bookkeeping that streaming
// bodies need to do.
if let Some(full) = crate::body::take_full_data(&mut body) {
self.conn.write_full_msg(head, full);
return Poll::Ready(Ok(()));
}
let body_type = if body.is_end_stream() {
self.body_rx.set(None);
None
} else {
let btype = body
.size_hint()
.exact()
.map(BodyLength::Known)
.or_else(|| Some(BodyLength::Unknown));
self.body_rx.set(Some(body));
btype
};
self.conn.write_head(head, body_type);
} else {
self.close();
return Poll::Ready(Ok(()));
}
} else if !self.conn.can_buffer_body() {
ready!(self.poll_flush(cx))?;
} else {
// A new scope is needed :(
if let (Some(mut body), clear_body) =
OptGuard::new(self.body_rx.as_mut()).guard_mut()
{
debug_assert!(!*clear_body, "opt guard defaults to keeping body");
if !self.conn.can_write_body() {
trace!(
"no more write body allowed, user body is_end_stream = {}",
body.is_end_stream(),
);
*clear_body = true;
continue;
}
let item = ready!(body.as_mut().poll_data(cx));
if let Some(item) = item {
let chunk = item.map_err(|e| {
*clear_body = true;
crate::Error::new_user_body(e)
})?;
let eos = body.is_end_stream();
if eos {
*clear_body = true;
if chunk.remaining() == 0 {
trace!("discarding empty chunk");
self.conn.end_body()?;
} else {
self.conn.write_body_and_end(chunk);
}
} else {
if chunk.remaining() == 0 {
trace!("discarding empty chunk");
continue;
}
self.conn.write_body(chunk);
}
} else {
*clear_body = true;
self.conn.end_body()?;
}
} else {
return Poll::Pending;
}
}
}
loop {}
}
fn poll_flush(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
self.conn.poll_flush(cx).map_err(|err| {
debug!("error writing: {}", err);
crate::Error::new_body_write(err)
})
loop {}
}
fn close(&mut self) {
self.is_closing = true;
self.conn.close_read();
self.conn.close_write();
loop {}
}
fn is_done(&self) -> bool {
if self.is_closing {
return true;
}
let read_done = self.conn.is_read_closed();
if !T::should_read_first() && read_done {
// a client that cannot read may was well be done.
true
} else {
let write_done = self.conn.is_write_closed()
|| (!self.dispatch.should_poll() && self.body_rx.is_none());
read_done && write_done
}
loop {}
}
}
impl<D, Bs, I, T> Future for Dispatcher<D, Bs, I, T>
where
D: Dispatch<
PollItem = MessageHead<T::Outgoing>,
PollBody = Bs,
RecvItem = MessageHead<T::Incoming>,
> + Unpin,
PollItem = MessageHead<T::Outgoing>,
PollBody = Bs,
RecvItem = MessageHead<T::Incoming>,
> + Unpin,
D::PollError: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin,
T: Http1Transaction + Unpin,
@ -418,333 +128,98 @@ where
Bs::Error: Into<Box<dyn StdError + Send + Sync>>,
{
type Output = crate::Result<Dispatched>;
#[inline]
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
self.poll_catch(cx, true)
loop {}
}
}
// ===== impl OptGuard =====
/// A drop guard to allow a mutable borrow of an Option while being able to
/// set whether the `Option` should be cleared on drop.
struct OptGuard<'a, T>(Pin<&'a mut Option<T>>, bool);
impl<'a, T> OptGuard<'a, T> {
fn new(pin: Pin<&'a mut Option<T>>) -> Self {
OptGuard(pin, false)
loop {}
}
fn guard_mut(&mut self) -> (Option<Pin<&mut T>>, &mut bool) {
(self.0.as_mut().as_pin_mut(), &mut self.1)
loop {}
}
}
impl<'a, T> Drop for OptGuard<'a, T> {
fn drop(&mut self) {
if self.1 {
self.0.set(None);
}
loop {}
}
}
// ===== impl Server =====
cfg_server! {
impl<S, B> Server<S, B>
where
S: HttpService<B>,
{
pub(crate) fn new(service: S) -> Server<S, B> {
Server {
in_flight: Box::pin(None),
service,
}
}
pub(crate) fn into_service(self) -> S {
self.service
}
}
// Service is never pinned
impl<S: HttpService<B>, B> Unpin for Server<S, B> {}
impl<S, Bs> Dispatch for Server<S, Body>
where
S: HttpService<Body, ResBody = Bs>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
Bs: HttpBody,
{
type PollItem = MessageHead<http::StatusCode>;
type PollBody = Bs;
type PollError = S::Error;
type RecvItem = RequestHead;
fn poll_msg(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Option<Result<(Self::PollItem, Self::PollBody), Self::PollError>>> {
let mut this = self.as_mut();
let ret = if let Some(ref mut fut) = this.in_flight.as_mut().as_pin_mut() {
let resp = ready!(fut.as_mut().poll(cx)?);
let (parts, body) = resp.into_parts();
let head = MessageHead {
version: parts.version,
subject: parts.status,
headers: parts.headers,
extensions: parts.extensions,
};
Poll::Ready(Some(Ok((head, body))))
} else {
unreachable!("poll_msg shouldn't be called if no inflight");
};
// Since in_flight finished, remove it
this.in_flight.set(None);
ret
}
fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Body)>) -> crate::Result<()> {
let (msg, body) = msg?;
let mut req = Request::new(body);
*req.method_mut() = msg.subject.0;
*req.uri_mut() = msg.subject.1;
*req.headers_mut() = msg.headers;
*req.version_mut() = msg.version;
*req.extensions_mut() = msg.extensions;
let fut = self.service.call(req);
self.in_flight.set(Some(fut));
Ok(())
}
fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), ()>> {
if self.in_flight.is_some() {
Poll::Pending
} else {
self.service.poll_ready(cx).map_err(|_e| {
// FIXME: return error value.
trace!("service closed");
})
}
}
fn should_poll(&self) -> bool {
self.in_flight.is_some()
}
}
impl < S, B > Server < S, B > where S : HttpService < B >, { pub (crate) fn
new(service : S) -> Server < S, B > { Server { in_flight : Box::pin(None), service, }
} pub (crate) fn into_service(self) -> S { self.service } } impl < S : HttpService <
B >, B > Unpin for Server < S, B > {} impl < S, Bs > Dispatch for Server < S, Body >
where S : HttpService < Body, ResBody = Bs >, S::Error : Into < Box < dyn StdError +
Send + Sync >>, Bs : HttpBody, { type PollItem = MessageHead < http::StatusCode >;
type PollBody = Bs; type PollError = S::Error; type RecvItem = RequestHead; fn
poll_msg(mut self : Pin <& mut Self >, cx : & mut task::Context <'_ >,) -> Poll <
Option < Result < (Self::PollItem, Self::PollBody), Self::PollError >>> { let mut
this = self.as_mut(); let ret = if let Some(ref mut fut) = this.in_flight.as_mut()
.as_pin_mut() { let resp = ready!(fut.as_mut().poll(cx) ?); let (parts, body) = resp
.into_parts(); let head = MessageHead { version : parts.version, subject : parts
.status, headers : parts.headers, extensions : parts.extensions, };
Poll::Ready(Some(Ok((head, body)))) } else {
unreachable!("poll_msg shouldn't be called if no inflight"); }; this.in_flight
.set(None); ret } fn recv_msg(& mut self, msg : crate ::Result < (Self::RecvItem,
Body) >) -> crate ::Result < () > { let (msg, body) = msg ?; let mut req =
Request::new(body); * req.method_mut() = msg.subject.0; * req.uri_mut() = msg.subject
.1; * req.headers_mut() = msg.headers; * req.version_mut() = msg.version; * req
.extensions_mut() = msg.extensions; let fut = self.service.call(req); self.in_flight
.set(Some(fut)); Ok(()) } fn poll_ready(& mut self, cx : & mut task::Context <'_ >)
-> Poll < Result < (), () >> { if self.in_flight.is_some() { Poll::Pending } else {
self.service.poll_ready(cx).map_err(| _e | { trace!("service closed"); }) } } fn
should_poll(& self) -> bool { self.in_flight.is_some() } }
}
// ===== impl Client =====
cfg_client! {
impl<B> Client<B> {
pub(crate) fn new(rx: ClientRx<B>) -> Client<B> {
Client {
callback: None,
rx,
rx_closed: false,
}
}
}
impl<B> Dispatch for Client<B>
where
B: HttpBody,
{
type PollItem = RequestHead;
type PollBody = B;
type PollError = crate::common::Never;
type RecvItem = crate::proto::ResponseHead;
fn poll_msg(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Option<Result<(Self::PollItem, Self::PollBody), crate::common::Never>>> {
let mut this = self.as_mut();
debug_assert!(!this.rx_closed);
match this.rx.poll_recv(cx) {
Poll::Ready(Some((req, mut cb))) => {
// check that future hasn't been canceled already
match cb.poll_canceled(cx) {
Poll::Ready(()) => {
trace!("request canceled");
Poll::Ready(None)
}
Poll::Pending => {
let (parts, body) = req.into_parts();
let head = RequestHead {
version: parts.version,
subject: crate::proto::RequestLine(parts.method, parts.uri),
headers: parts.headers,
extensions: parts.extensions,
};
this.callback = Some(cb);
Poll::Ready(Some(Ok((head, body))))
}
}
}
Poll::Ready(None) => {
// user has dropped sender handle
trace!("client tx closed");
this.rx_closed = true;
Poll::Ready(None)
}
Poll::Pending => Poll::Pending,
}
}
fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Body)>) -> crate::Result<()> {
match msg {
Ok((msg, body)) => {
if let Some(cb) = self.callback.take() {
let res = msg.into_response(body);
cb.send(Ok(res));
Ok(())
} else {
// Getting here is likely a bug! An error should have happened
// in Conn::require_empty_read() before ever parsing a
// full message!
Err(crate::Error::new_unexpected_message())
}
}
Err(err) => {
if let Some(cb) = self.callback.take() {
cb.send(Err((err, None)));
Ok(())
} else if !self.rx_closed {
self.rx.close();
if let Some((req, cb)) = self.rx.try_recv() {
trace!("canceling queued request with connection error: {}", err);
// in this case, the message was never even started, so it's safe to tell
// the user that the request was completely canceled
cb.send(Err((crate::Error::new_canceled().with(err), Some(req))));
Ok(())
} else {
Err(err)
}
} else {
Err(err)
}
}
}
}
fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), ()>> {
match self.callback {
Some(ref mut cb) => match cb.poll_canceled(cx) {
Poll::Ready(()) => {
trace!("callback receiver has dropped");
Poll::Ready(Err(()))
}
Poll::Pending => Poll::Ready(Ok(())),
},
None => Poll::Ready(Err(())),
}
}
fn should_poll(&self) -> bool {
self.callback.is_none()
}
impl < B > Client < B > { pub (crate) fn new(rx : ClientRx < B >) -> Client < B > {
Client { callback : None, rx, rx_closed : false, } } } impl < B > Dispatch for Client
< B > where B : HttpBody, { type PollItem = RequestHead; type PollBody = B; type
PollError = crate ::common::Never; type RecvItem = crate ::proto::ResponseHead; fn
poll_msg(mut self : Pin <& mut Self >, cx : & mut task::Context <'_ >,) -> Poll <
Option < Result < (Self::PollItem, Self::PollBody), crate ::common::Never >>> { let
mut this = self.as_mut(); debug_assert!(! this.rx_closed); match this.rx
.poll_recv(cx) { Poll::Ready(Some((req, mut cb))) => { match cb.poll_canceled(cx) {
Poll::Ready(()) => { trace!("request canceled"); Poll::Ready(None) } Poll::Pending =>
{ let (parts, body) = req.into_parts(); let head = RequestHead { version : parts
.version, subject : crate ::proto::RequestLine(parts.method, parts.uri), headers :
parts.headers, extensions : parts.extensions, }; this.callback = Some(cb);
Poll::Ready(Some(Ok((head, body)))) } } } Poll::Ready(None) => {
trace!("client tx closed"); this.rx_closed = true; Poll::Ready(None) } Poll::Pending
=> Poll::Pending, } } fn recv_msg(& mut self, msg : crate ::Result < (Self::RecvItem,
Body) >) -> crate ::Result < () > { match msg { Ok((msg, body)) => { if let Some(cb)
= self.callback.take() { let res = msg.into_response(body); cb.send(Ok(res)); Ok(())
} else { Err(crate ::Error::new_unexpected_message()) } } Err(err) => { if let
Some(cb) = self.callback.take() { cb.send(Err((err, None))); Ok(()) } else if ! self
.rx_closed { self.rx.close(); if let Some((req, cb)) = self.rx.try_recv() {
trace!("canceling queued request with connection error: {}", err); cb.send(Err((crate
::Error::new_canceled().with(err), Some(req)))); Ok(()) } else { Err(err) } } else {
Err(err) } } } } fn poll_ready(& mut self, cx : & mut task::Context <'_ >) -> Poll <
Result < (), () >> { match self.callback { Some(ref mut cb) => match cb
.poll_canceled(cx) { Poll::Ready(()) => { trace!("callback receiver has dropped");
Poll::Ready(Err(())) } Poll::Pending => Poll::Ready(Ok(())), }, None =>
Poll::Ready(Err(())), } } fn should_poll(& self) -> bool { self.callback.is_none() }
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::proto::h1::ClientTransaction;
use std::time::Duration;
#[test]
fn client_read_bytes_before_writing_request() {
let _ = pretty_env_logger::try_init();
tokio_test::task::spawn(()).enter(|cx, _| {
let (io, mut handle) = tokio_test::io::Builder::new().build_with_handle();
// Block at 0 for now, but we will release this response before
// the request is ready to write later...
let (mut tx, rx) = crate::client::dispatch::channel();
let conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io);
let mut dispatcher = Dispatcher::new(Client::new(rx), conn);
// First poll is needed to allow tx to send...
assert!(Pin::new(&mut dispatcher).poll(cx).is_pending());
// Unblock our IO, which has a response before we've sent request!
//
handle.read(b"HTTP/1.1 200 OK\r\n\r\n");
let mut res_rx = tx
.try_send(crate::Request::new(crate::Body::empty()))
.unwrap();
tokio_test::assert_ready_ok!(Pin::new(&mut dispatcher).poll(cx));
let err = tokio_test::assert_ready_ok!(Pin::new(&mut res_rx).poll(cx))
.expect_err("callback should send error");
match (err.0.kind(), err.1) {
(&crate::error::Kind::Canceled, Some(_)) => (),
other => panic!("expected Canceled, got {:?}", other),
}
});
loop {}
}
#[tokio::test]
async fn client_flushing_is_not_ready_for_next_request() {
let _ = pretty_env_logger::try_init();
let (io, _handle) = tokio_test::io::Builder::new()
.write(b"POST / HTTP/1.1\r\ncontent-length: 4\r\n\r\n")
.read(b"HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n")
.wait(std::time::Duration::from_secs(2))
.build_with_handle();
let (mut tx, rx) = crate::client::dispatch::channel();
let mut conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io);
conn.set_write_strategy_queue();
let dispatcher = Dispatcher::new(Client::new(rx), conn);
let _dispatcher = tokio::spawn(async move { dispatcher.await });
let req = crate::Request::builder()
.method("POST")
.body(crate::Body::from("reee"))
.unwrap();
let res = tx.try_send(req).unwrap().await.expect("response");
drop(res);
assert!(!tx.is_ready());
loop {}
}
#[tokio::test]
async fn body_empty_chunks_ignored() {
let _ = pretty_env_logger::try_init();
let io = tokio_test::io::Builder::new()
// no reading or writing, just be blocked for the test...
.wait(Duration::from_secs(5))
.build();
let (mut tx, rx) = crate::client::dispatch::channel();
let conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io);
let mut dispatcher = tokio_test::task::spawn(Dispatcher::new(Client::new(rx), conn));
// First poll is needed to allow tx to send...
assert!(dispatcher.poll().is_pending());
let body = {
let (mut tx, body) = crate::Body::channel();
tx.try_send_data("".into()).unwrap();
body
};
let _res_rx = tx.try_send(crate::Request::new(body)).unwrap();
// Ensure conn.write_body wasn't called with the empty chunk.
// If it is, it will trigger an assertion.
assert!(dispatcher.poll().is_pending());
loop {}
}
}

View file

@ -1,29 +1,22 @@
use std::fmt;
use std::io::IoSlice;
use bytes::buf::{Chain, Take};
use bytes::Buf;
use tracing::trace;
use super::io::WriteBuf;
type StaticBuf = &'static [u8];
/// Encoders to handle different Transfer-Encodings.
#[derive(Debug, Clone, PartialEq)]
pub(crate) struct Encoder {
kind: Kind,
is_last: bool,
}
#[derive(Debug)]
pub(crate) struct EncodedBuf<B> {
kind: BufKind<B>,
}
#[derive(Debug)]
pub(crate) struct NotEof(u64);
#[derive(Debug, PartialEq, Clone)]
enum Kind {
/// An Encoder for when Transfer-Encoding includes `chunked`.
@ -39,7 +32,6 @@ enum Kind {
#[cfg(feature = "server")]
CloseDelimited,
}
#[derive(Debug)]
enum BufKind<B> {
Exact(B),
@ -47,140 +39,52 @@ enum BufKind<B> {
Chunked(Chain<Chain<ChunkSize, B>, StaticBuf>),
ChunkedEnd(StaticBuf),
}
impl Encoder {
fn new(kind: Kind) -> Encoder {
Encoder {
kind,
is_last: false,
}
loop {}
}
pub(crate) fn chunked() -> Encoder {
Encoder::new(Kind::Chunked)
loop {}
}
pub(crate) fn length(len: u64) -> Encoder {
Encoder::new(Kind::Length(len))
loop {}
}
#[cfg(feature = "server")]
pub(crate) fn close_delimited() -> Encoder {
Encoder::new(Kind::CloseDelimited)
loop {}
}
pub(crate) fn is_eof(&self) -> bool {
matches!(self.kind, Kind::Length(0))
loop {}
}
#[cfg(feature = "server")]
pub(crate) fn set_last(mut self, is_last: bool) -> Self {
self.is_last = is_last;
self
loop {}
}
pub(crate) fn is_last(&self) -> bool {
self.is_last
loop {}
}
pub(crate) fn is_close_delimited(&self) -> bool {
match self.kind {
#[cfg(feature = "server")]
Kind::CloseDelimited => true,
_ => false,
}
loop {}
}
pub(crate) fn end<B>(&self) -> Result<Option<EncodedBuf<B>>, NotEof> {
match self.kind {
Kind::Length(0) => Ok(None),
Kind::Chunked => Ok(Some(EncodedBuf {
kind: BufKind::ChunkedEnd(b"0\r\n\r\n"),
})),
#[cfg(feature = "server")]
Kind::CloseDelimited => Ok(None),
Kind::Length(n) => Err(NotEof(n)),
}
loop {}
}
pub(crate) fn encode<B>(&mut self, msg: B) -> EncodedBuf<B>
where
B: Buf,
{
let len = msg.remaining();
debug_assert!(len > 0, "encode() called with empty buf");
let kind = match self.kind {
Kind::Chunked => {
trace!("encoding chunked {}B", len);
let buf = ChunkSize::new(len)
.chain(msg)
.chain(b"\r\n" as &'static [u8]);
BufKind::Chunked(buf)
}
Kind::Length(ref mut remaining) => {
trace!("sized write, len = {}", len);
if len as u64 > *remaining {
let limit = *remaining as usize;
*remaining = 0;
BufKind::Limited(msg.take(limit))
} else {
*remaining -= len as u64;
BufKind::Exact(msg)
}
}
#[cfg(feature = "server")]
Kind::CloseDelimited => {
trace!("close delimited write {}B", len);
BufKind::Exact(msg)
}
};
EncodedBuf { kind }
loop {}
}
pub(super) fn encode_and_end<B>(&self, msg: B, dst: &mut WriteBuf<EncodedBuf<B>>) -> bool
pub(super) fn encode_and_end<B>(
&self,
msg: B,
dst: &mut WriteBuf<EncodedBuf<B>>,
) -> bool
where
B: Buf,
{
let len = msg.remaining();
debug_assert!(len > 0, "encode() called with empty buf");
match self.kind {
Kind::Chunked => {
trace!("encoding chunked {}B", len);
let buf = ChunkSize::new(len)
.chain(msg)
.chain(b"\r\n0\r\n\r\n" as &'static [u8]);
dst.buffer(buf);
!self.is_last
}
Kind::Length(remaining) => {
use std::cmp::Ordering;
trace!("sized write, len = {}", len);
match (len as u64).cmp(&remaining) {
Ordering::Equal => {
dst.buffer(msg);
!self.is_last
}
Ordering::Greater => {
dst.buffer(msg.take(remaining as usize));
!self.is_last
}
Ordering::Less => {
dst.buffer(msg);
false
}
}
}
#[cfg(feature = "server")]
Kind::CloseDelimited => {
trace!("close delimited write {}B", len);
dst.buffer(msg);
false
}
}
loop {}
}
/// Encodes the full body, without verifying the remaining length matches.
///
/// This is used in conjunction with HttpBody::__hyper_full_data(), which
@ -190,250 +94,106 @@ impl Encoder {
where
B: Buf,
{
debug_assert!(msg.remaining() > 0, "encode() called with empty buf");
debug_assert!(
match self.kind {
Kind::Length(len) => len == msg.remaining() as u64,
_ => true,
},
"danger_full_buf length mismatches"
);
match self.kind {
Kind::Chunked => {
let len = msg.remaining();
trace!("encoding chunked {}B", len);
let buf = ChunkSize::new(len)
.chain(msg)
.chain(b"\r\n0\r\n\r\n" as &'static [u8]);
dst.buffer(buf);
}
_ => {
dst.buffer(msg);
}
}
loop {}
}
}
impl<B> Buf for EncodedBuf<B>
where
B: Buf,
{
#[inline]
fn remaining(&self) -> usize {
match self.kind {
BufKind::Exact(ref b) => b.remaining(),
BufKind::Limited(ref b) => b.remaining(),
BufKind::Chunked(ref b) => b.remaining(),
BufKind::ChunkedEnd(ref b) => b.remaining(),
}
loop {}
}
#[inline]
fn chunk(&self) -> &[u8] {
match self.kind {
BufKind::Exact(ref b) => b.chunk(),
BufKind::Limited(ref b) => b.chunk(),
BufKind::Chunked(ref b) => b.chunk(),
BufKind::ChunkedEnd(ref b) => b.chunk(),
}
loop {}
}
#[inline]
fn advance(&mut self, cnt: usize) {
match self.kind {
BufKind::Exact(ref mut b) => b.advance(cnt),
BufKind::Limited(ref mut b) => b.advance(cnt),
BufKind::Chunked(ref mut b) => b.advance(cnt),
BufKind::ChunkedEnd(ref mut b) => b.advance(cnt),
}
loop {}
}
#[inline]
fn chunks_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize {
match self.kind {
BufKind::Exact(ref b) => b.chunks_vectored(dst),
BufKind::Limited(ref b) => b.chunks_vectored(dst),
BufKind::Chunked(ref b) => b.chunks_vectored(dst),
BufKind::ChunkedEnd(ref b) => b.chunks_vectored(dst),
}
loop {}
}
}
#[cfg(target_pointer_width = "32")]
const USIZE_BYTES: usize = 4;
#[cfg(target_pointer_width = "64")]
const USIZE_BYTES: usize = 8;
// each byte will become 2 hex
const CHUNK_SIZE_MAX_BYTES: usize = USIZE_BYTES * 2;
#[derive(Clone, Copy)]
struct ChunkSize {
bytes: [u8; CHUNK_SIZE_MAX_BYTES + 2],
pos: u8,
len: u8,
}
impl ChunkSize {
fn new(len: usize) -> ChunkSize {
use std::fmt::Write;
let mut size = ChunkSize {
bytes: [0; CHUNK_SIZE_MAX_BYTES + 2],
pos: 0,
len: 0,
};
write!(&mut size, "{:X}\r\n", len).expect("CHUNK_SIZE_MAX_BYTES should fit any usize");
size
loop {}
}
}
impl Buf for ChunkSize {
#[inline]
fn remaining(&self) -> usize {
(self.len - self.pos).into()
loop {}
}
#[inline]
fn chunk(&self) -> &[u8] {
&self.bytes[self.pos.into()..self.len.into()]
loop {}
}
#[inline]
fn advance(&mut self, cnt: usize) {
assert!(cnt <= self.remaining());
self.pos += cnt as u8; // just asserted cnt fits in u8
loop {}
}
}
impl fmt::Debug for ChunkSize {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ChunkSize")
.field("bytes", &&self.bytes[..self.len.into()])
.field("pos", &self.pos)
.finish()
loop {}
}
}
impl fmt::Write for ChunkSize {
fn write_str(&mut self, num: &str) -> fmt::Result {
use std::io::Write;
(&mut self.bytes[self.len.into()..])
.write_all(num.as_bytes())
.expect("&mut [u8].write() cannot error");
self.len += num.len() as u8; // safe because bytes is never bigger than 256
Ok(())
loop {}
}
}
impl<B: Buf> From<B> for EncodedBuf<B> {
fn from(buf: B) -> Self {
EncodedBuf {
kind: BufKind::Exact(buf),
}
loop {}
}
}
impl<B: Buf> From<Take<B>> for EncodedBuf<B> {
fn from(buf: Take<B>) -> Self {
EncodedBuf {
kind: BufKind::Limited(buf),
}
loop {}
}
}
impl<B: Buf> From<Chain<Chain<ChunkSize, B>, StaticBuf>> for EncodedBuf<B> {
fn from(buf: Chain<Chain<ChunkSize, B>, StaticBuf>) -> Self {
EncodedBuf {
kind: BufKind::Chunked(buf),
}
loop {}
}
}
impl fmt::Display for NotEof {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "early end, expected {} more bytes", self.0)
loop {}
}
}
impl std::error::Error for NotEof {}
#[cfg(test)]
mod tests {
use bytes::BufMut;
use super::super::io::Cursor;
use super::Encoder;
#[test]
fn chunked() {
let mut encoder = Encoder::chunked();
let mut dst = Vec::new();
let msg1 = b"foo bar".as_ref();
let buf1 = encoder.encode(msg1);
dst.put(buf1);
assert_eq!(dst, b"7\r\nfoo bar\r\n");
let msg2 = b"baz quux herp".as_ref();
let buf2 = encoder.encode(msg2);
dst.put(buf2);
assert_eq!(dst, b"7\r\nfoo bar\r\nD\r\nbaz quux herp\r\n");
let end = encoder.end::<Cursor<Vec<u8>>>().unwrap().unwrap();
dst.put(end);
assert_eq!(
dst,
b"7\r\nfoo bar\r\nD\r\nbaz quux herp\r\n0\r\n\r\n".as_ref()
);
loop {}
}
#[test]
fn length() {
let max_len = 8;
let mut encoder = Encoder::length(max_len as u64);
let mut dst = Vec::new();
let msg1 = b"foo bar".as_ref();
let buf1 = encoder.encode(msg1);
dst.put(buf1);
assert_eq!(dst, b"foo bar");
assert!(!encoder.is_eof());
encoder.end::<()>().unwrap_err();
let msg2 = b"baz".as_ref();
let buf2 = encoder.encode(msg2);
dst.put(buf2);
assert_eq!(dst.len(), max_len);
assert_eq!(dst, b"foo barb");
assert!(encoder.is_eof());
assert!(encoder.end::<()>().unwrap().is_none());
loop {}
}
#[test]
fn eof() {
let mut encoder = Encoder::close_delimited();
let mut dst = Vec::new();
let msg1 = b"foo bar".as_ref();
let buf1 = encoder.encode(msg1);
dst.put(buf1);
assert_eq!(dst, b"foo bar");
assert!(!encoder.is_eof());
encoder.end::<()>().unwrap();
let msg2 = b"baz".as_ref();
let buf2 = encoder.encode(msg2);
dst.put(buf2);
assert_eq!(dst, b"foo barbaz");
assert!(!encoder.is_eof());
encoder.end::<()>().unwrap();
loop {}
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,68 +1,55 @@
#[cfg(all(feature = "server", feature = "runtime"))]
use std::{pin::Pin, time::Duration};
use bytes::BytesMut;
use http::{HeaderMap, Method};
use httparse::ParserConfig;
#[cfg(all(feature = "server", feature = "runtime"))]
use tokio::time::Sleep;
use crate::body::DecodedLength;
use crate::proto::{BodyLength, MessageHead};
pub(crate) use self::conn::Conn;
pub(crate) use self::decode::Decoder;
pub(crate) use self::dispatch::Dispatcher;
pub(crate) use self::encode::{EncodedBuf, Encoder};
//TODO: move out of h1::io
pub(crate) use self::io::MINIMUM_MAX_BUFFER_SIZE;
mod conn;
mod decode;
pub(crate) mod dispatch;
mod encode;
mod io;
mod role;
cfg_client! {
pub(crate) type ClientTransaction = role::Client;
pub (crate) type ClientTransaction = role::Client;
}
cfg_server! {
pub(crate) type ServerTransaction = role::Server;
pub (crate) type ServerTransaction = role::Server;
}
pub(crate) trait Http1Transaction {
type Incoming;
type Outgoing: Default;
const LOG: &'static str;
fn parse(bytes: &mut BytesMut, ctx: ParseContext<'_>) -> ParseResult<Self::Incoming>;
fn encode(enc: Encode<'_, Self::Outgoing>, dst: &mut Vec<u8>) -> crate::Result<Encoder>;
fn encode(
enc: Encode<'_, Self::Outgoing>,
dst: &mut Vec<u8>,
) -> crate::Result<Encoder>;
fn on_error(err: &crate::Error) -> Option<MessageHead<Self::Outgoing>>;
fn is_client() -> bool {
!Self::is_server()
loop {}
}
fn is_server() -> bool {
!Self::is_client()
loop {}
}
fn should_error_on_parse_eof() -> bool {
Self::is_client()
loop {}
}
fn should_read_first() -> bool {
Self::is_server()
loop {}
}
fn update_date() {}
}
/// Result newtype for Http1Transaction::parse.
pub(crate) type ParseResult<T> = Result<Option<ParsedMessage<T>>, crate::error::Parse>;
#[derive(Debug)]
pub(crate) struct ParsedMessage<T> {
head: MessageHead<T>,
@ -71,7 +58,6 @@ pub(crate) struct ParsedMessage<T> {
keep_alive: bool,
wants_upgrade: bool,
}
pub(crate) struct ParseContext<'a> {
cached_headers: &'a mut Option<HeaderMap>,
req_method: &'a mut Option<Method>,
@ -91,7 +77,6 @@ pub(crate) struct ParseContext<'a> {
#[cfg(feature = "ffi")]
raw_headers: bool,
}
/// Passed to Http1Transaction::encode
pub(crate) struct Encode<'a, T> {
head: &'a mut MessageHead<T>,
@ -101,22 +86,18 @@ pub(crate) struct Encode<'a, T> {
req_method: &'a mut Option<Method>,
title_case_headers: bool,
}
/// Extra flags that a request "wants", like expect-continue or upgrades.
#[derive(Clone, Copy, Debug)]
struct Wants(u8);
impl Wants {
const EMPTY: Wants = Wants(0b00);
const EXPECT: Wants = Wants(0b01);
const UPGRADE: Wants = Wants(0b10);
#[must_use]
fn add(self, other: Wants) -> Wants {
Wants(self.0 | other.0)
loop {}
}
fn contains(&self, other: Wants) -> bool {
(self.0 & other.0) == other.0
loop {}
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,7 +1,6 @@
use std::error::Error as StdError;
#[cfg(feature = "runtime")]
use std::time::Duration;
use bytes::Bytes;
use futures_channel::{mpsc, oneshot};
use futures_util::future::{self, Either, FutureExt as _, TryFutureExt as _};
@ -11,7 +10,6 @@ use h2::SendStream;
use http::{Method, StatusCode};
use tokio::io::{AsyncRead, AsyncWrite};
use tracing::{debug, trace, warn};
use super::{ping, H2Upgraded, PipeToSendStream, SendBuf};
use crate::body::HttpBody;
use crate::client::dispatch::Callback;
@ -23,25 +21,13 @@ use crate::proto::Dispatched;
use crate::upgrade::Upgraded;
use crate::{Body, Request, Response};
use h2::client::ResponseFuture;
type ClientRx<B> = crate::client::dispatch::Receiver<Request<B>, Response<Body>>;
///// An mpsc channel is used to help notify the `Connection` task when *all*
///// other handles to it have been dropped, so that it can shutdown.
type ConnDropRef = mpsc::Sender<Never>;
///// A oneshot channel watches the `Connection` task, and when it completes,
///// the "dispatch" task will be notified and can shutdown sooner.
type ConnEof = oneshot::Receiver<Never>;
// Our defaults are chosen for the "majority" case, which usually are not
// resource constrained, and so the spec default of 64kb can be too limiting
// for performance.
const DEFAULT_CONN_WINDOW: u32 = 1024 * 1024 * 5; // 5mb
const DEFAULT_STREAM_WINDOW: u32 = 1024 * 1024 * 2; // 2mb
const DEFAULT_MAX_FRAME_SIZE: u32 = 1024 * 16; // 16kb
const DEFAULT_MAX_SEND_BUF_SIZE: usize = 1024 * 1024; // 1mb
const DEFAULT_CONN_WINDOW: u32 = 1024 * 1024 * 5;
const DEFAULT_STREAM_WINDOW: u32 = 1024 * 1024 * 2;
const DEFAULT_MAX_FRAME_SIZE: u32 = 1024 * 16;
const DEFAULT_MAX_SEND_BUF_SIZE: usize = 1024 * 1024;
#[derive(Clone, Debug)]
pub(crate) struct Config {
pub(crate) adaptive_window: bool,
@ -57,56 +43,17 @@ pub(crate) struct Config {
pub(crate) max_concurrent_reset_streams: Option<usize>,
pub(crate) max_send_buffer_size: usize,
}
impl Default for Config {
fn default() -> Config {
Config {
adaptive_window: false,
initial_conn_window_size: DEFAULT_CONN_WINDOW,
initial_stream_window_size: DEFAULT_STREAM_WINDOW,
max_frame_size: DEFAULT_MAX_FRAME_SIZE,
#[cfg(feature = "runtime")]
keep_alive_interval: None,
#[cfg(feature = "runtime")]
keep_alive_timeout: Duration::from_secs(20),
#[cfg(feature = "runtime")]
keep_alive_while_idle: false,
max_concurrent_reset_streams: None,
max_send_buffer_size: DEFAULT_MAX_SEND_BUF_SIZE,
}
loop {}
}
}
fn new_builder(config: &Config) -> Builder {
let mut builder = Builder::default();
builder
.initial_window_size(config.initial_stream_window_size)
.initial_connection_window_size(config.initial_conn_window_size)
.max_frame_size(config.max_frame_size)
.max_send_buffer_size(config.max_send_buffer_size)
.enable_push(false);
if let Some(max) = config.max_concurrent_reset_streams {
builder.max_concurrent_reset_streams(max);
}
builder
loop {}
}
fn new_ping_config(config: &Config) -> ping::Config {
ping::Config {
bdp_initial_window: if config.adaptive_window {
Some(config.initial_stream_window_size)
} else {
None
},
#[cfg(feature = "runtime")]
keep_alive_interval: config.keep_alive_interval,
#[cfg(feature = "runtime")]
keep_alive_timeout: config.keep_alive_timeout,
#[cfg(feature = "runtime")]
keep_alive_while_idle: config.keep_alive_while_idle,
}
loop {}
}
pub(crate) async fn handshake<T, B>(
io: T,
req_rx: ClientRx<B>,
@ -118,85 +65,15 @@ where
B: HttpBody,
B::Data: Send + 'static,
{
let (h2_tx, mut conn) = new_builder(config)
.handshake::<_, SendBuf<B::Data>>(io)
.await
.map_err(crate::Error::new_h2)?;
// An mpsc channel is used entirely to detect when the
// 'Client' has been dropped. This is to get around a bug
// in h2 where dropping all SendRequests won't notify a
// parked Connection.
let (conn_drop_ref, rx) = mpsc::channel(1);
let (cancel_tx, conn_eof) = oneshot::channel();
let conn_drop_rx = rx.into_future().map(|(item, _rx)| {
if let Some(never) = item {
match never {}
}
});
let ping_config = new_ping_config(&config);
let (conn, ping) = if ping_config.is_enabled() {
let pp = conn.ping_pong().expect("conn.ping_pong");
let (recorder, mut ponger) = ping::channel(pp, ping_config);
let conn = future::poll_fn(move |cx| {
match ponger.poll(cx) {
Poll::Ready(ping::Ponged::SizeUpdate(wnd)) => {
conn.set_target_window_size(wnd);
conn.set_initial_window_size(wnd)?;
}
#[cfg(feature = "runtime")]
Poll::Ready(ping::Ponged::KeepAliveTimedOut) => {
debug!("connection keep-alive timed out");
return Poll::Ready(Ok(()));
}
Poll::Pending => {}
}
Pin::new(&mut conn).poll(cx)
});
(Either::Left(conn), recorder)
} else {
(Either::Right(conn), ping::disabled())
};
let conn = conn.map_err(|e| debug!("connection error: {}", e));
exec.execute(conn_task(conn, conn_drop_rx, cancel_tx));
Ok(ClientTask {
ping,
conn_drop_ref,
conn_eof,
executor: exec,
h2_tx,
req_rx,
fut_ctx: None,
})
loop {}
}
async fn conn_task<C, D>(conn: C, drop_rx: D, cancel_tx: oneshot::Sender<Never>)
where
C: Future + Unpin,
D: Future<Output = ()> + Unpin,
{
match future::select(conn, drop_rx).await {
Either::Left(_) => {
// ok or err, the `conn` has finished
}
Either::Right(((), conn)) => {
// mpsc has been dropped, hopefully polling
// the connection some more should start shutdown
// and then close
trace!("send_request dropped, starting conn shutdown");
drop(cancel_tx);
let _ = conn.await;
}
}
loop {}
}
struct FutCtx<B>
where
B: HttpBody,
@ -208,9 +85,7 @@ where
body: B,
cb: Callback<Request<B>, Response<Body>>,
}
impl<B: HttpBody> Unpin for FutCtx<B> {}
pub(crate) struct ClientTask<B>
where
B: HttpBody,
@ -223,16 +98,14 @@ where
req_rx: ClientRx<B>,
fut_ctx: Option<FutCtx<B>>,
}
impl<B> ClientTask<B>
where
B: HttpBody + 'static,
{
pub(crate) fn is_extended_connect_protocol_enabled(&self) -> bool {
self.h2_tx.is_extended_connect_protocol_enabled()
loop {}
}
}
impl<B> ClientTask<B>
where
B: HttpBody + Send + 'static,
@ -240,92 +113,9 @@ where
B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
fn poll_pipe(&mut self, f: FutCtx<B>, cx: &mut task::Context<'_>) {
let ping = self.ping.clone();
let send_stream = if !f.is_connect {
if !f.eos {
let mut pipe = Box::pin(PipeToSendStream::new(f.body, f.body_tx)).map(|res| {
if let Err(e) = res {
debug!("client request body error: {}", e);
}
});
// eagerly see if the body pipe is ready and
// can thus skip allocating in the executor
match Pin::new(&mut pipe).poll(cx) {
Poll::Ready(_) => (),
Poll::Pending => {
let conn_drop_ref = self.conn_drop_ref.clone();
// keep the ping recorder's knowledge of an
// "open stream" alive while this body is
// still sending...
let ping = ping.clone();
let pipe = pipe.map(move |x| {
drop(conn_drop_ref);
drop(ping);
x
});
// Clear send task
self.executor.execute(pipe);
}
}
}
None
} else {
Some(f.body_tx)
};
let fut = f.fut.map(move |result| match result {
Ok(res) => {
// record that we got the response headers
ping.record_non_data();
let content_length = headers::content_length_parse_all(res.headers());
if let (Some(mut send_stream), StatusCode::OK) = (send_stream, res.status()) {
if content_length.map_or(false, |len| len != 0) {
warn!("h2 connect response with non-zero body not supported");
send_stream.send_reset(h2::Reason::INTERNAL_ERROR);
return Err((
crate::Error::new_h2(h2::Reason::INTERNAL_ERROR.into()),
None,
));
}
let (parts, recv_stream) = res.into_parts();
let mut res = Response::from_parts(parts, Body::empty());
let (pending, on_upgrade) = crate::upgrade::pending();
let io = H2Upgraded {
ping,
send_stream: unsafe { UpgradedSendStream::new(send_stream) },
recv_stream,
buf: Bytes::new(),
};
let upgraded = Upgraded::new(io, Bytes::new());
pending.fulfill(upgraded);
res.extensions_mut().insert(on_upgrade);
Ok(res)
} else {
let res = res.map(|stream| {
let ping = ping.for_stream(&stream);
crate::Body::h2(stream, content_length.into(), ping)
});
Ok(res)
}
}
Err(err) => {
ping.ensure_not_timed_out().map_err(|e| (e, None))?;
debug!("client response error: {}", err);
Err((crate::Error::new_h2(err), None))
}
});
self.executor.execute(f.cb.send_when(fut));
loop {}
}
}
impl<B> Future for ClientTask<B>
where
B: HttpBody + Send + 'static,
@ -333,118 +123,7 @@ where
B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
type Output = crate::Result<Dispatched>;
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
loop {
match ready!(self.h2_tx.poll_ready(cx)) {
Ok(()) => (),
Err(err) => {
self.ping.ensure_not_timed_out()?;
return if err.reason() == Some(::h2::Reason::NO_ERROR) {
trace!("connection gracefully shutdown");
Poll::Ready(Ok(Dispatched::Shutdown))
} else {
Poll::Ready(Err(crate::Error::new_h2(err)))
};
}
};
match self.fut_ctx.take() {
// If we were waiting on pending open
// continue where we left off.
Some(f) => {
self.poll_pipe(f, cx);
continue;
}
None => (),
}
match self.req_rx.poll_recv(cx) {
Poll::Ready(Some((req, cb))) => {
// check that future hasn't been canceled already
if cb.is_canceled() {
trace!("request callback is canceled");
continue;
}
let (head, body) = req.into_parts();
let mut req = ::http::Request::from_parts(head, ());
super::strip_connection_headers(req.headers_mut(), true);
if let Some(len) = body.size_hint().exact() {
if len != 0 || headers::method_has_defined_payload_semantics(req.method()) {
headers::set_content_length_if_missing(req.headers_mut(), len);
}
}
let is_connect = req.method() == Method::CONNECT;
let eos = body.is_end_stream();
if is_connect {
if headers::content_length_parse_all(req.headers())
.map_or(false, |len| len != 0)
{
warn!("h2 connect request with non-zero body not supported");
cb.send(Err((
crate::Error::new_h2(h2::Reason::INTERNAL_ERROR.into()),
None,
)));
continue;
}
}
if let Some(protocol) = req.extensions_mut().remove::<Protocol>() {
req.extensions_mut().insert(protocol.into_inner());
}
let (fut, body_tx) = match self.h2_tx.send_request(req, !is_connect && eos) {
Ok(ok) => ok,
Err(err) => {
debug!("client send request error: {}", err);
cb.send(Err((crate::Error::new_h2(err), None)));
continue;
}
};
let f = FutCtx {
is_connect,
eos,
fut,
body_tx,
body,
cb,
};
// Check poll_ready() again.
// If the call to send_request() resulted in the new stream being pending open
// we have to wait for the open to complete before accepting new requests.
match self.h2_tx.poll_ready(cx) {
Poll::Pending => {
// Save Context
self.fut_ctx = Some(f);
return Poll::Pending;
}
Poll::Ready(Ok(())) => (),
Poll::Ready(Err(err)) => {
f.cb.send(Err((crate::Error::new_h2(err), None)));
continue;
}
}
self.poll_pipe(f, cx);
continue;
}
Poll::Ready(None) => {
trace!("client::dispatch::Sender dropped");
return Poll::Ready(Ok(Dispatched::Shutdown));
}
Poll::Pending => match ready!(Pin::new(&mut self.conn_eof).poll(cx)) {
Ok(never) => match never {},
Err(_conn_is_eof) => {
trace!("connection task is closed, closing dispatch task");
return Poll::Ready(Ok(Dispatched::Shutdown));
}
},
}
}
loop {}
}
}

View file

@ -9,280 +9,83 @@ use std::mem;
use std::task::Context;
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tracing::{debug, trace, warn};
use crate::body::HttpBody;
use crate::common::{task, Future, Pin, Poll};
use crate::proto::h2::ping::Recorder;
pub(crate) mod ping;
cfg_client! {
pub(crate) mod client;
pub(crate) use self::client::ClientTask;
pub (crate) mod client; pub (crate) use self::client::ClientTask;
}
cfg_server! {
pub(crate) mod server;
pub(crate) use self::server::Server;
pub (crate) mod server; pub (crate) use self::server::Server;
}
/// Default initial stream window size defined in HTTP2 spec.
pub(crate) const SPEC_WINDOW_SIZE: u32 = 65_535;
fn strip_connection_headers(headers: &mut HeaderMap, is_request: bool) {
// List of connection headers from:
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Connection
//
// TE headers are allowed in HTTP/2 requests as long as the value is "trailers", so they're
// tested separately.
let connection_headers = [
HeaderName::from_lowercase(b"keep-alive").unwrap(),
HeaderName::from_lowercase(b"proxy-connection").unwrap(),
TRAILER,
TRANSFER_ENCODING,
UPGRADE,
];
for header in connection_headers.iter() {
if headers.remove(header).is_some() {
warn!("Connection header illegal in HTTP/2: {}", header.as_str());
}
}
if is_request {
if headers
.get(TE)
.map(|te_header| te_header != "trailers")
.unwrap_or(false)
{
warn!("TE headers not set to \"trailers\" are illegal in HTTP/2 requests");
headers.remove(TE);
}
} else if headers.remove(TE).is_some() {
warn!("TE headers illegal in HTTP/2 responses");
}
if let Some(header) = headers.remove(CONNECTION) {
warn!(
"Connection header illegal in HTTP/2: {}",
CONNECTION.as_str()
);
let header_contents = header.to_str().unwrap();
// A `Connection` header may have a comma-separated list of names of other headers that
// are meant for only this specific connection.
//
// Iterate these names and remove them as headers. Connection-specific headers are
// forbidden in HTTP2, as that information has been moved into frame types of the h2
// protocol.
for name in header_contents.split(',') {
let name = name.trim();
headers.remove(name);
}
}
loop {}
}
// body adapters used by both Client and Server
pin_project! {
struct PipeToSendStream<S>
where
S: HttpBody,
{
body_tx: SendStream<SendBuf<S::Data>>,
data_done: bool,
#[pin]
stream: S,
}
struct PipeToSendStream < S > where S : HttpBody, { body_tx : SendStream < SendBuf <
S::Data >>, data_done : bool, #[pin] stream : S, }
}
impl<S> PipeToSendStream<S>
where
S: HttpBody,
{
fn new(stream: S, tx: SendStream<SendBuf<S::Data>>) -> PipeToSendStream<S> {
PipeToSendStream {
body_tx: tx,
data_done: false,
stream,
}
loop {}
}
}
impl<S> Future for PipeToSendStream<S>
where
S: HttpBody,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
{
type Output = crate::Result<()>;
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
let mut me = self.project();
loop {
if !*me.data_done {
// we don't have the next chunk of data yet, so just reserve 1 byte to make
// sure there's some capacity available. h2 will handle the capacity management
// for the actual body chunk.
me.body_tx.reserve_capacity(1);
if me.body_tx.capacity() == 0 {
loop {
match ready!(me.body_tx.poll_capacity(cx)) {
Some(Ok(0)) => {}
Some(Ok(_)) => break,
Some(Err(e)) => {
return Poll::Ready(Err(crate::Error::new_body_write(e)))
}
None => {
// None means the stream is no longer in a
// streaming state, we either finished it
// somehow, or the remote reset us.
return Poll::Ready(Err(crate::Error::new_body_write(
"send stream capacity unexpectedly closed",
)));
}
}
}
} else if let Poll::Ready(reason) = me
.body_tx
.poll_reset(cx)
.map_err(crate::Error::new_body_write)?
{
debug!("stream received RST_STREAM: {:?}", reason);
return Poll::Ready(Err(crate::Error::new_body_write(::h2::Error::from(
reason,
))));
}
match ready!(me.stream.as_mut().poll_data(cx)) {
Some(Ok(chunk)) => {
let is_eos = me.stream.is_end_stream();
trace!(
"send body chunk: {} bytes, eos={}",
chunk.remaining(),
is_eos,
);
let buf = SendBuf::Buf(chunk);
me.body_tx
.send_data(buf, is_eos)
.map_err(crate::Error::new_body_write)?;
if is_eos {
return Poll::Ready(Ok(()));
}
}
Some(Err(e)) => return Poll::Ready(Err(me.body_tx.on_user_err(e))),
None => {
me.body_tx.reserve_capacity(0);
let is_eos = me.stream.is_end_stream();
if is_eos {
return Poll::Ready(me.body_tx.send_eos_frame());
} else {
*me.data_done = true;
// loop again to poll_trailers
}
}
}
} else {
if let Poll::Ready(reason) = me
.body_tx
.poll_reset(cx)
.map_err(crate::Error::new_body_write)?
{
debug!("stream received RST_STREAM: {:?}", reason);
return Poll::Ready(Err(crate::Error::new_body_write(::h2::Error::from(
reason,
))));
}
match ready!(me.stream.poll_trailers(cx)) {
Ok(Some(trailers)) => {
me.body_tx
.send_trailers(trailers)
.map_err(crate::Error::new_body_write)?;
return Poll::Ready(Ok(()));
}
Ok(None) => {
// There were no trailers, so send an empty DATA frame...
return Poll::Ready(me.body_tx.send_eos_frame());
}
Err(e) => return Poll::Ready(Err(me.body_tx.on_user_err(e))),
}
}
}
loop {}
}
}
trait SendStreamExt {
fn on_user_err<E>(&mut self, err: E) -> crate::Error
where
E: Into<Box<dyn std::error::Error + Send + Sync>>;
fn send_eos_frame(&mut self) -> crate::Result<()>;
}
impl<B: Buf> SendStreamExt for SendStream<SendBuf<B>> {
fn on_user_err<E>(&mut self, err: E) -> crate::Error
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
let err = crate::Error::new_user_body(err);
debug!("send body user stream error: {}", err);
self.send_reset(err.h2_reason());
err
loop {}
}
fn send_eos_frame(&mut self) -> crate::Result<()> {
trace!("send body eos");
self.send_data(SendBuf::None, true)
.map_err(crate::Error::new_body_write)
loop {}
}
}
#[repr(usize)]
enum SendBuf<B> {
Buf(B),
Cursor(Cursor<Box<[u8]>>),
None,
}
impl<B: Buf> Buf for SendBuf<B> {
#[inline]
fn remaining(&self) -> usize {
match *self {
Self::Buf(ref b) => b.remaining(),
Self::Cursor(ref c) => Buf::remaining(c),
Self::None => 0,
}
loop {}
}
#[inline]
fn chunk(&self) -> &[u8] {
match *self {
Self::Buf(ref b) => b.chunk(),
Self::Cursor(ref c) => c.chunk(),
Self::None => &[],
}
loop {}
}
#[inline]
fn advance(&mut self, cnt: usize) {
match *self {
Self::Buf(ref mut b) => b.advance(cnt),
Self::Cursor(ref mut c) => c.advance(cnt),
Self::None => {}
}
loop {}
}
fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize {
match *self {
Self::Buf(ref b) => b.chunks_vectored(dst),
Self::Cursor(ref c) => c.chunks_vectored(dst),
Self::None => 0,
}
loop {}
}
}
struct H2Upgraded<B>
where
B: Buf,
@ -292,7 +95,6 @@ where
recv_stream: RecvStream,
buf: Bytes,
}
impl<B> AsyncRead for H2Upgraded<B>
where
B: Buf,
@ -302,37 +104,9 @@ where
cx: &mut Context<'_>,
read_buf: &mut ReadBuf<'_>,
) -> Poll<Result<(), io::Error>> {
if self.buf.is_empty() {
self.buf = loop {
match ready!(self.recv_stream.poll_data(cx)) {
None => return Poll::Ready(Ok(())),
Some(Ok(buf)) if buf.is_empty() && !self.recv_stream.is_end_stream() => {
continue
}
Some(Ok(buf)) => {
self.ping.record_data(buf.len());
break buf;
}
Some(Err(e)) => {
return Poll::Ready(match e.reason() {
Some(Reason::NO_ERROR) | Some(Reason::CANCEL) => Ok(()),
Some(Reason::STREAM_CLOSED) => {
Err(io::Error::new(io::ErrorKind::BrokenPipe, e))
}
_ => Err(h2_to_io_error(e)),
})
}
}
};
}
let cnt = std::cmp::min(self.buf.len(), read_buf.remaining());
read_buf.put_slice(&self.buf[..cnt]);
self.buf.advance(cnt);
let _ = self.recv_stream.flow_control().release_capacity(cnt);
Poll::Ready(Ok(()))
loop {}
}
}
impl<B> AsyncWrite for H2Upgraded<B>
where
B: Buf,
@ -342,130 +116,69 @@ where
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
if buf.is_empty() {
return Poll::Ready(Ok(0));
}
self.send_stream.reserve_capacity(buf.len());
// We ignore all errors returned by `poll_capacity` and `write`, as we
// will get the correct from `poll_reset` anyway.
let cnt = match ready!(self.send_stream.poll_capacity(cx)) {
None => Some(0),
Some(Ok(cnt)) => self
.send_stream
.write(&buf[..cnt], false)
.ok()
.map(|()| cnt),
Some(Err(_)) => None,
};
if let Some(cnt) = cnt {
return Poll::Ready(Ok(cnt));
}
Poll::Ready(Err(h2_to_io_error(
match ready!(self.send_stream.poll_reset(cx)) {
Ok(Reason::NO_ERROR) | Ok(Reason::CANCEL) | Ok(Reason::STREAM_CLOSED) => {
return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into()))
}
Ok(reason) => reason.into(),
Err(e) => e,
},
)))
loop {}
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
Poll::Ready(Ok(()))
fn poll_flush(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Result<(), io::Error>> {
loop {}
}
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), io::Error>> {
if self.send_stream.write(&[], true).is_ok() {
return Poll::Ready(Ok(()))
}
Poll::Ready(Err(h2_to_io_error(
match ready!(self.send_stream.poll_reset(cx)) {
Ok(Reason::NO_ERROR) => {
return Poll::Ready(Ok(()))
}
Ok(Reason::CANCEL) | Ok(Reason::STREAM_CLOSED) => {
return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into()))
}
Ok(reason) => reason.into(),
Err(e) => e,
},
)))
loop {}
}
}
fn h2_to_io_error(e: h2::Error) -> io::Error {
if e.is_io() {
e.into_io().unwrap()
} else {
io::Error::new(io::ErrorKind::Other, e)
}
loop {}
}
struct UpgradedSendStream<B>(SendStream<SendBuf<Neutered<B>>>);
impl<B> UpgradedSendStream<B>
where
B: Buf,
{
unsafe fn new(inner: SendStream<SendBuf<B>>) -> Self {
assert_eq!(mem::size_of::<B>(), mem::size_of::<Neutered<B>>());
Self(mem::transmute(inner))
loop {}
}
fn reserve_capacity(&mut self, cnt: usize) {
unsafe { self.as_inner_unchecked().reserve_capacity(cnt) }
loop {}
}
fn poll_capacity(&mut self, cx: &mut Context<'_>) -> Poll<Option<Result<usize, h2::Error>>> {
unsafe { self.as_inner_unchecked().poll_capacity(cx) }
fn poll_capacity(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Option<Result<usize, h2::Error>>> {
loop {}
}
fn poll_reset(&mut self, cx: &mut Context<'_>) -> Poll<Result<h2::Reason, h2::Error>> {
unsafe { self.as_inner_unchecked().poll_reset(cx) }
fn poll_reset(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Result<h2::Reason, h2::Error>> {
loop {}
}
fn write(&mut self, buf: &[u8], end_of_stream: bool) -> Result<(), io::Error> {
let send_buf = SendBuf::Cursor(Cursor::new(buf.into()));
unsafe {
self.as_inner_unchecked()
.send_data(send_buf, end_of_stream)
.map_err(h2_to_io_error)
}
loop {}
}
unsafe fn as_inner_unchecked(&mut self) -> &mut SendStream<SendBuf<B>> {
&mut *(&mut self.0 as *mut _ as *mut _)
loop {}
}
}
#[repr(transparent)]
struct Neutered<B> {
_inner: B,
impossible: Impossible,
}
enum Impossible {}
unsafe impl<B> Send for Neutered<B> {}
impl<B> Buf for Neutered<B> {
fn remaining(&self) -> usize {
match self.impossible {}
loop {}
}
fn chunk(&self) -> &[u8] {
match self.impossible {}
loop {}
}
fn advance(&mut self, _cnt: usize) {
match self.impossible {}
loop {}
}
}

View file

@ -18,7 +18,6 @@
/// 3b. Merge RTT with a running average.
/// 3c. Calculate bdp as bytes/rtt.
/// 3d. If bdp is over 2/3 max, set new max to bdp and update windows.
#[cfg(feature = "runtime")]
use std::fmt;
#[cfg(feature = "runtime")]
@ -30,74 +29,17 @@ use std::task::{self, Poll};
use std::time::Duration;
#[cfg(not(feature = "runtime"))]
use std::time::Instant;
use h2::{Ping, PingPong};
#[cfg(feature = "runtime")]
use tokio::time::{Instant, Sleep};
use tracing::{debug, trace};
type WindowSize = u32;
pub(super) fn disabled() -> Recorder {
Recorder { shared: None }
loop {}
}
pub(super) fn channel(ping_pong: PingPong, config: Config) -> (Recorder, Ponger) {
debug_assert!(
config.is_enabled(),
"ping channel requires bdp or keep-alive config",
);
let bdp = config.bdp_initial_window.map(|wnd| Bdp {
bdp: wnd,
max_bandwidth: 0.0,
rtt: 0.0,
ping_delay: Duration::from_millis(100),
stable_count: 0,
});
let (bytes, next_bdp_at) = if bdp.is_some() {
(Some(0), Some(Instant::now()))
} else {
(None, None)
};
#[cfg(feature = "runtime")]
let keep_alive = config.keep_alive_interval.map(|interval| KeepAlive {
interval,
timeout: config.keep_alive_timeout,
while_idle: config.keep_alive_while_idle,
timer: Box::pin(tokio::time::sleep(interval)),
state: KeepAliveState::Init,
});
#[cfg(feature = "runtime")]
let last_read_at = keep_alive.as_ref().map(|_| Instant::now());
let shared = Arc::new(Mutex::new(Shared {
bytes,
#[cfg(feature = "runtime")]
last_read_at,
#[cfg(feature = "runtime")]
is_keep_alive_timed_out: false,
ping_pong,
ping_sent_at: None,
next_bdp_at,
}));
(
Recorder {
shared: Some(shared.clone()),
},
Ponger {
bdp,
#[cfg(feature = "runtime")]
keep_alive,
shared,
},
)
loop {}
}
#[derive(Clone)]
pub(super) struct Config {
pub(super) bdp_initial_window: Option<WindowSize>,
@ -112,41 +54,32 @@ pub(super) struct Config {
#[cfg(feature = "runtime")]
pub(super) keep_alive_while_idle: bool,
}
#[derive(Clone)]
pub(crate) struct Recorder {
shared: Option<Arc<Mutex<Shared>>>,
}
pub(super) struct Ponger {
bdp: Option<Bdp>,
#[cfg(feature = "runtime")]
keep_alive: Option<KeepAlive>,
shared: Arc<Mutex<Shared>>,
}
struct Shared {
ping_pong: PingPong,
ping_sent_at: Option<Instant>,
// bdp
/// If `Some`, bdp is enabled, and this tracks how many bytes have been
/// read during the current sample.
bytes: Option<usize>,
/// We delay a variable amount of time between BDP pings. This allows us
/// to send less pings as the bandwidth stabilizes.
next_bdp_at: Option<Instant>,
// keep-alive
/// If `Some`, keep-alive is enabled, and the Instant is how long ago
/// the connection read the last frame.
#[cfg(feature = "runtime")]
last_read_at: Option<Instant>,
#[cfg(feature = "runtime")]
is_keep_alive_timed_out: bool,
}
struct Bdp {
/// Current BDP in bytes
bdp: u32,
@ -161,7 +94,6 @@ struct Bdp {
/// The count of ping round trips where BDP has stayed the same.
stable_count: u32,
}
#[cfg(feature = "runtime")]
struct KeepAlive {
/// If no frames are received in this amount of time, a PING frame is sent.
@ -171,385 +103,113 @@ struct KeepAlive {
timeout: Duration,
/// If true, sends pings even when there are no active streams.
while_idle: bool,
state: KeepAliveState,
timer: Pin<Box<Sleep>>,
}
#[cfg(feature = "runtime")]
enum KeepAliveState {
Init,
Scheduled,
PingSent,
}
pub(super) enum Ponged {
SizeUpdate(WindowSize),
#[cfg(feature = "runtime")]
KeepAliveTimedOut,
}
#[cfg(feature = "runtime")]
#[derive(Debug)]
pub(super) struct KeepAliveTimedOut;
// ===== impl Config =====
impl Config {
pub(super) fn is_enabled(&self) -> bool {
#[cfg(feature = "runtime")]
{
self.bdp_initial_window.is_some() || self.keep_alive_interval.is_some()
}
#[cfg(not(feature = "runtime"))]
{
self.bdp_initial_window.is_some()
}
loop {}
}
}
// ===== impl Recorder =====
impl Recorder {
pub(crate) fn record_data(&self, len: usize) {
let shared = if let Some(ref shared) = self.shared {
shared
} else {
return;
};
let mut locked = shared.lock().unwrap();
#[cfg(feature = "runtime")]
locked.update_last_read_at();
// are we ready to send another bdp ping?
// if not, we don't need to record bytes either
if let Some(ref next_bdp_at) = locked.next_bdp_at {
if Instant::now() < *next_bdp_at {
return;
} else {
locked.next_bdp_at = None;
}
}
if let Some(ref mut bytes) = locked.bytes {
*bytes += len;
} else {
// no need to send bdp ping if bdp is disabled
return;
}
if !locked.is_ping_sent() {
locked.send_ping();
}
loop {}
}
pub(crate) fn record_non_data(&self) {
#[cfg(feature = "runtime")]
{
let shared = if let Some(ref shared) = self.shared {
shared
} else {
return;
};
let mut locked = shared.lock().unwrap();
locked.update_last_read_at();
}
loop {}
}
/// If the incoming stream is already closed, convert self into
/// a disabled reporter.
#[cfg(feature = "client")]
pub(super) fn for_stream(self, stream: &h2::RecvStream) -> Self {
if stream.is_end_stream() {
disabled()
} else {
self
}
loop {}
}
pub(super) fn ensure_not_timed_out(&self) -> crate::Result<()> {
#[cfg(feature = "runtime")]
{
if let Some(ref shared) = self.shared {
let locked = shared.lock().unwrap();
if locked.is_keep_alive_timed_out {
return Err(KeepAliveTimedOut.crate_error());
}
}
}
// else
Ok(())
loop {}
}
}
// ===== impl Ponger =====
impl Ponger {
pub(super) fn poll(&mut self, cx: &mut task::Context<'_>) -> Poll<Ponged> {
let now = Instant::now();
let mut locked = self.shared.lock().unwrap();
#[cfg(feature = "runtime")]
let is_idle = self.is_idle();
#[cfg(feature = "runtime")]
{
if let Some(ref mut ka) = self.keep_alive {
ka.schedule(is_idle, &locked);
ka.maybe_ping(cx, &mut locked);
}
}
if !locked.is_ping_sent() {
// XXX: this doesn't register a waker...?
return Poll::Pending;
}
match locked.ping_pong.poll_pong(cx) {
Poll::Ready(Ok(_pong)) => {
let start = locked
.ping_sent_at
.expect("pong received implies ping_sent_at");
locked.ping_sent_at = None;
let rtt = now - start;
trace!("recv pong");
#[cfg(feature = "runtime")]
{
if let Some(ref mut ka) = self.keep_alive {
locked.update_last_read_at();
ka.schedule(is_idle, &locked);
}
}
if let Some(ref mut bdp) = self.bdp {
let bytes = locked.bytes.expect("bdp enabled implies bytes");
locked.bytes = Some(0); // reset
trace!("received BDP ack; bytes = {}, rtt = {:?}", bytes, rtt);
let update = bdp.calculate(bytes, rtt);
locked.next_bdp_at = Some(now + bdp.ping_delay);
if let Some(update) = update {
return Poll::Ready(Ponged::SizeUpdate(update))
}
}
}
Poll::Ready(Err(e)) => {
debug!("pong error: {}", e);
}
Poll::Pending => {
#[cfg(feature = "runtime")]
{
if let Some(ref mut ka) = self.keep_alive {
if let Err(KeepAliveTimedOut) = ka.maybe_timeout(cx) {
self.keep_alive = None;
locked.is_keep_alive_timed_out = true;
return Poll::Ready(Ponged::KeepAliveTimedOut);
}
}
}
}
}
// XXX: this doesn't register a waker...?
Poll::Pending
loop {}
}
#[cfg(feature = "runtime")]
fn is_idle(&self) -> bool {
Arc::strong_count(&self.shared) <= 2
loop {}
}
}
// ===== impl Shared =====
impl Shared {
fn send_ping(&mut self) {
match self.ping_pong.send_ping(Ping::opaque()) {
Ok(()) => {
self.ping_sent_at = Some(Instant::now());
trace!("sent ping");
}
Err(err) => {
debug!("error sending ping: {}", err);
}
}
loop {}
}
fn is_ping_sent(&self) -> bool {
self.ping_sent_at.is_some()
loop {}
}
#[cfg(feature = "runtime")]
fn update_last_read_at(&mut self) {
if self.last_read_at.is_some() {
self.last_read_at = Some(Instant::now());
}
loop {}
}
#[cfg(feature = "runtime")]
fn last_read_at(&self) -> Instant {
self.last_read_at.expect("keep_alive expects last_read_at")
loop {}
}
}
// ===== impl Bdp =====
/// Any higher than this likely will be hitting the TCP flow control.
const BDP_LIMIT: usize = 1024 * 1024 * 16;
impl Bdp {
fn calculate(&mut self, bytes: usize, rtt: Duration) -> Option<WindowSize> {
// No need to do any math if we're at the limit.
if self.bdp as usize == BDP_LIMIT {
self.stabilize_delay();
return None;
}
// average the rtt
let rtt = seconds(rtt);
if self.rtt == 0.0 {
// First sample means rtt is first rtt.
self.rtt = rtt;
} else {
// Weigh this rtt as 1/8 for a moving average.
self.rtt += (rtt - self.rtt) * 0.125;
}
// calculate the current bandwidth
let bw = (bytes as f64) / (self.rtt * 1.5);
trace!("current bandwidth = {:.1}B/s", bw);
if bw < self.max_bandwidth {
// not a faster bandwidth, so don't update
self.stabilize_delay();
return None;
} else {
self.max_bandwidth = bw;
}
// if the current `bytes` sample is at least 2/3 the previous
// bdp, increase to double the current sample.
if bytes >= self.bdp as usize * 2 / 3 {
self.bdp = (bytes * 2).min(BDP_LIMIT) as WindowSize;
trace!("BDP increased to {}", self.bdp);
self.stable_count = 0;
self.ping_delay /= 2;
Some(self.bdp)
} else {
self.stabilize_delay();
None
}
loop {}
}
fn stabilize_delay(&mut self) {
if self.ping_delay < Duration::from_secs(10) {
self.stable_count += 1;
if self.stable_count >= 2 {
self.ping_delay *= 4;
self.stable_count = 0;
}
}
loop {}
}
}
fn seconds(dur: Duration) -> f64 {
const NANOS_PER_SEC: f64 = 1_000_000_000.0;
let secs = dur.as_secs() as f64;
secs + (dur.subsec_nanos() as f64) / NANOS_PER_SEC
loop {}
}
// ===== impl KeepAlive =====
#[cfg(feature = "runtime")]
impl KeepAlive {
fn schedule(&mut self, is_idle: bool, shared: &Shared) {
match self.state {
KeepAliveState::Init => {
if !self.while_idle && is_idle {
return;
}
self.state = KeepAliveState::Scheduled;
let interval = shared.last_read_at() + self.interval;
self.timer.as_mut().reset(interval);
}
KeepAliveState::PingSent => {
if shared.is_ping_sent() {
return;
}
self.state = KeepAliveState::Scheduled;
let interval = shared.last_read_at() + self.interval;
self.timer.as_mut().reset(interval);
}
KeepAliveState::Scheduled => (),
}
loop {}
}
fn maybe_ping(&mut self, cx: &mut task::Context<'_>, shared: &mut Shared) {
match self.state {
KeepAliveState::Scheduled => {
if Pin::new(&mut self.timer).poll(cx).is_pending() {
return;
}
// check if we've received a frame while we were scheduled
if shared.last_read_at() + self.interval > self.timer.deadline() {
self.state = KeepAliveState::Init;
cx.waker().wake_by_ref(); // schedule us again
return;
}
trace!("keep-alive interval ({:?}) reached", self.interval);
shared.send_ping();
self.state = KeepAliveState::PingSent;
let timeout = Instant::now() + self.timeout;
self.timer.as_mut().reset(timeout);
}
KeepAliveState::Init | KeepAliveState::PingSent => (),
}
loop {}
}
fn maybe_timeout(&mut self, cx: &mut task::Context<'_>) -> Result<(), KeepAliveTimedOut> {
match self.state {
KeepAliveState::PingSent => {
if Pin::new(&mut self.timer).poll(cx).is_pending() {
return Ok(());
}
trace!("keep-alive timeout ({:?}) reached", self.timeout);
Err(KeepAliveTimedOut)
}
KeepAliveState::Init | KeepAliveState::Scheduled => Ok(()),
}
fn maybe_timeout(
&mut self,
cx: &mut task::Context<'_>,
) -> Result<(), KeepAliveTimedOut> {
loop {}
}
}
// ===== impl KeepAliveTimedOut =====
#[cfg(feature = "runtime")]
impl KeepAliveTimedOut {
pub(super) fn crate_error(self) -> crate::Error {
crate::Error::new(crate::error::Kind::Http2).with(self)
loop {}
}
}
#[cfg(feature = "runtime")]
impl fmt::Display for KeepAliveTimedOut {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("keep-alive timed out")
loop {}
}
}
#[cfg(feature = "runtime")]
impl std::error::Error for KeepAliveTimedOut {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
Some(&crate::error::TimedOut)
loop {}
}
}

View file

@ -2,7 +2,6 @@ use std::error::Error as StdError;
use std::marker::Unpin;
#[cfg(feature = "runtime")]
use std::time::Duration;
use bytes::Bytes;
use h2::server::{Connection, Handshake, SendResponse};
use h2::{Reason, RecvStream};
@ -10,7 +9,6 @@ use http::{Method, Request};
use pin_project_lite::pin_project;
use tokio::io::{AsyncRead, AsyncWrite};
use tracing::{debug, trace, warn};
use super::{ping, PipeToSendStream, SendBuf};
use crate::body::HttpBody;
use crate::common::exec::ConnStreamExec;
@ -21,23 +19,13 @@ use crate::proto::h2::ping::Recorder;
use crate::proto::h2::{H2Upgraded, UpgradedSendStream};
use crate::proto::Dispatched;
use crate::service::HttpService;
use crate::upgrade::{OnUpgrade, Pending, Upgraded};
use crate::{Body, Response};
// Our defaults are chosen for the "majority" case, which usually are not
// resource constrained, and so the spec default of 64kb can be too limiting
// for performance.
//
// At the same time, a server more often has multiple clients connected, and
// so is more likely to use more resources than a client would.
const DEFAULT_CONN_WINDOW: u32 = 1024 * 1024; // 1mb
const DEFAULT_STREAM_WINDOW: u32 = 1024 * 1024; // 1mb
const DEFAULT_MAX_FRAME_SIZE: u32 = 1024 * 16; // 16kb
const DEFAULT_MAX_SEND_BUF_SIZE: usize = 1024 * 400; // 400kb
// 16 MB "sane default" taken from golang http2
const DEFAULT_CONN_WINDOW: u32 = 1024 * 1024;
const DEFAULT_STREAM_WINDOW: u32 = 1024 * 1024;
const DEFAULT_MAX_FRAME_SIZE: u32 = 1024 * 16;
const DEFAULT_MAX_SEND_BUF_SIZE: usize = 1024 * 400;
const DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE: u32 = 16 << 20;
#[derive(Clone, Debug)]
pub(crate) struct Config {
pub(crate) adaptive_window: bool,
@ -53,50 +41,23 @@ pub(crate) struct Config {
pub(crate) max_send_buffer_size: usize,
pub(crate) max_header_list_size: u32,
}
impl Default for Config {
fn default() -> Config {
Config {
adaptive_window: false,
initial_conn_window_size: DEFAULT_CONN_WINDOW,
initial_stream_window_size: DEFAULT_STREAM_WINDOW,
max_frame_size: DEFAULT_MAX_FRAME_SIZE,
enable_connect_protocol: false,
max_concurrent_streams: None,
#[cfg(feature = "runtime")]
keep_alive_interval: None,
#[cfg(feature = "runtime")]
keep_alive_timeout: Duration::from_secs(20),
max_send_buffer_size: DEFAULT_MAX_SEND_BUF_SIZE,
max_header_list_size: DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE,
}
loop {}
}
}
pin_project! {
pub(crate) struct Server<T, S, B, E>
where
S: HttpService<Body>,
B: HttpBody,
{
exec: E,
service: S,
state: State<T, B>,
}
pub (crate) struct Server < T, S, B, E > where S : HttpService < Body >, B :
HttpBody, { exec : E, service : S, state : State < T, B >, }
}
enum State<T, B>
where
B: HttpBody,
{
Handshaking {
ping_config: ping::Config,
hs: Handshake<T, SendBuf<B::Data>>,
},
Handshaking { ping_config: ping::Config, hs: Handshake<T, SendBuf<B::Data>> },
Serving(Serving<T, B>),
Closed,
}
struct Serving<T, B>
where
B: HttpBody,
@ -105,7 +66,6 @@ where
conn: Connection<T, SendBuf<B::Data>>,
closing: Option<crate::Error>,
}
impl<T, S, B, E> Server<T, S, B, E>
where
T: AsyncRead + AsyncWrite + Unpin,
@ -114,70 +74,18 @@ where
B: HttpBody + 'static,
E: ConnStreamExec<S::Future, B>,
{
pub(crate) fn new(io: T, service: S, config: &Config, exec: E) -> Server<T, S, B, E> {
let mut builder = h2::server::Builder::default();
builder
.initial_window_size(config.initial_stream_window_size)
.initial_connection_window_size(config.initial_conn_window_size)
.max_frame_size(config.max_frame_size)
.max_header_list_size(config.max_header_list_size)
.max_send_buffer_size(config.max_send_buffer_size);
if let Some(max) = config.max_concurrent_streams {
builder.max_concurrent_streams(max);
}
if config.enable_connect_protocol {
builder.enable_connect_protocol();
}
let handshake = builder.handshake(io);
let bdp = if config.adaptive_window {
Some(config.initial_stream_window_size)
} else {
None
};
let ping_config = ping::Config {
bdp_initial_window: bdp,
#[cfg(feature = "runtime")]
keep_alive_interval: config.keep_alive_interval,
#[cfg(feature = "runtime")]
keep_alive_timeout: config.keep_alive_timeout,
// If keep-alive is enabled for servers, always enabled while
// idle, so it can more aggressively close dead connections.
#[cfg(feature = "runtime")]
keep_alive_while_idle: true,
};
Server {
exec,
state: State::Handshaking {
ping_config,
hs: handshake,
},
service,
}
pub(crate) fn new(
io: T,
service: S,
config: &Config,
exec: E,
) -> Server<T, S, B, E> {
loop {}
}
pub(crate) fn graceful_shutdown(&mut self) {
trace!("graceful_shutdown");
match self.state {
State::Handshaking { .. } => {
// fall-through, to replace state with Closed
}
State::Serving(ref mut srv) => {
if srv.closing.is_none() {
srv.conn.graceful_shutdown();
}
return;
}
State::Closed => {
return;
}
}
self.state = State::Closed;
loop {}
}
}
impl<T, S, B, E> Future for Server<T, S, B, E>
where
T: AsyncRead + AsyncWrite + Unpin,
@ -187,43 +95,10 @@ where
E: ConnStreamExec<S::Future, B>,
{
type Output = crate::Result<Dispatched>;
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
let me = &mut *self;
loop {
let next = match me.state {
State::Handshaking {
ref mut hs,
ref ping_config,
} => {
let mut conn = ready!(Pin::new(hs).poll(cx).map_err(crate::Error::new_h2))?;
let ping = if ping_config.is_enabled() {
let pp = conn.ping_pong().expect("conn.ping_pong");
Some(ping::channel(pp, ping_config.clone()))
} else {
None
};
State::Serving(Serving {
ping,
conn,
closing: None,
})
}
State::Serving(ref mut srv) => {
ready!(srv.poll_server(cx, &mut me.service, &mut me.exec))?;
return Poll::Ready(Ok(Dispatched::Shutdown));
}
State::Closed => {
// graceful_shutdown was called before handshaking finished,
// nothing to do here...
return Poll::Ready(Ok(Dispatched::Shutdown));
}
};
me.state = next;
}
loop {}
}
}
impl<T, B> Serving<T, B>
where
T: AsyncRead + AsyncWrite + Unpin,
@ -240,171 +115,27 @@ where
S::Error: Into<Box<dyn StdError + Send + Sync>>,
E: ConnStreamExec<S::Future, B>,
{
if self.closing.is_none() {
loop {
self.poll_ping(cx);
// Check that the service is ready to accept a new request.
//
// - If not, just drive the connection some.
// - If ready, try to accept a new request from the connection.
match service.poll_ready(cx) {
Poll::Ready(Ok(())) => (),
Poll::Pending => {
// use `poll_closed` instead of `poll_accept`,
// in order to avoid accepting a request.
ready!(self.conn.poll_closed(cx).map_err(crate::Error::new_h2))?;
trace!("incoming connection complete");
return Poll::Ready(Ok(()));
}
Poll::Ready(Err(err)) => {
let err = crate::Error::new_user_service(err);
debug!("service closed: {}", err);
let reason = err.h2_reason();
if reason == Reason::NO_ERROR {
// NO_ERROR is only used for graceful shutdowns...
trace!("interpreting NO_ERROR user error as graceful_shutdown");
self.conn.graceful_shutdown();
} else {
trace!("abruptly shutting down with {:?}", reason);
self.conn.abrupt_shutdown(reason);
}
self.closing = Some(err);
break;
}
}
// When the service is ready, accepts an incoming request.
match ready!(self.conn.poll_accept(cx)) {
Some(Ok((req, mut respond))) => {
trace!("incoming request");
let content_length = headers::content_length_parse_all(req.headers());
let ping = self
.ping
.as_ref()
.map(|ping| ping.0.clone())
.unwrap_or_else(ping::disabled);
// Record the headers received
ping.record_non_data();
let is_connect = req.method() == Method::CONNECT;
let (mut parts, stream) = req.into_parts();
let (mut req, connect_parts) = if !is_connect {
(
Request::from_parts(
parts,
crate::Body::h2(stream, content_length.into(), ping),
),
None,
)
} else {
if content_length.map_or(false, |len| len != 0) {
warn!("h2 connect request with non-zero body not supported");
respond.send_reset(h2::Reason::INTERNAL_ERROR);
return Poll::Ready(Ok(()));
}
let (pending, upgrade) = crate::upgrade::pending();
debug_assert!(parts.extensions.get::<OnUpgrade>().is_none());
parts.extensions.insert(upgrade);
(
Request::from_parts(parts, crate::Body::empty()),
Some(ConnectParts {
pending,
ping,
recv_stream: stream,
}),
)
};
if let Some(protocol) = req.extensions_mut().remove::<h2::ext::Protocol>() {
req.extensions_mut().insert(Protocol::from_inner(protocol));
}
let fut = H2Stream::new(service.call(req), connect_parts, respond);
exec.execute_h2stream(fut);
}
Some(Err(e)) => {
return Poll::Ready(Err(crate::Error::new_h2(e)));
}
None => {
// no more incoming streams...
if let Some((ref ping, _)) = self.ping {
ping.ensure_not_timed_out()?;
}
trace!("incoming connection complete");
return Poll::Ready(Ok(()));
}
}
}
}
debug_assert!(
self.closing.is_some(),
"poll_server broke loop without closing"
);
ready!(self.conn.poll_closed(cx).map_err(crate::Error::new_h2))?;
Poll::Ready(Err(self.closing.take().expect("polled after error")))
loop {}
}
fn poll_ping(&mut self, cx: &mut task::Context<'_>) {
if let Some((_, ref mut estimator)) = self.ping {
match estimator.poll(cx) {
Poll::Ready(ping::Ponged::SizeUpdate(wnd)) => {
self.conn.set_target_window_size(wnd);
let _ = self.conn.set_initial_window_size(wnd);
}
#[cfg(feature = "runtime")]
Poll::Ready(ping::Ponged::KeepAliveTimedOut) => {
debug!("keep-alive timed out, closing connection");
self.conn.abrupt_shutdown(h2::Reason::NO_ERROR);
}
Poll::Pending => {}
}
}
loop {}
}
}
pin_project! {
#[allow(missing_debug_implementations)]
pub struct H2Stream<F, B>
where
B: HttpBody,
{
reply: SendResponse<SendBuf<B::Data>>,
#[pin]
state: H2StreamState<F, B>,
}
#[allow(missing_debug_implementations)] pub struct H2Stream < F, B > where B :
HttpBody, { reply : SendResponse < SendBuf < B::Data >>, #[pin] state : H2StreamState
< F, B >, }
}
pin_project! {
#[project = H2StreamStateProj]
enum H2StreamState<F, B>
where
B: HttpBody,
{
Service {
#[pin]
fut: F,
connect_parts: Option<ConnectParts>,
},
Body {
#[pin]
pipe: PipeToSendStream<B>,
},
}
#[project = H2StreamStateProj] enum H2StreamState < F, B > where B : HttpBody, {
Service { #[pin] fut : F, connect_parts : Option < ConnectParts >, }, Body { #[pin]
pipe : PipeToSendStream < B >, }, }
}
struct ConnectParts {
pending: Pending,
ping: Recorder,
recv_stream: RecvStream,
}
impl<F, B> H2Stream<F, B>
where
B: HttpBody,
@ -414,26 +145,17 @@ where
connect_parts: Option<ConnectParts>,
respond: SendResponse<SendBuf<B::Data>>,
) -> H2Stream<F, B> {
H2Stream {
reply: respond,
state: H2StreamState::Service { fut, connect_parts },
}
loop {}
}
}
macro_rules! reply {
($me:expr, $res:expr, $eos:expr) => {{
match $me.reply.send_response($res, $eos) {
Ok(tx) => tx,
Err(e) => {
debug!("send response error: {}", e);
$me.reply.send_reset(Reason::INTERNAL_ERROR);
return Poll::Ready(Err(crate::Error::new_h2(e)));
}
}
}};
($me:expr, $res:expr, $eos:expr) => {
{ match $me .reply.send_response($res, $eos) { Ok(tx) => tx, Err(e) => {
debug!("send response error: {}", e); $me .reply
.send_reset(Reason::INTERNAL_ERROR); return Poll::Ready(Err(crate
::Error::new_h2(e))); } } }
};
}
impl<F, B, E> H2Stream<F, B>
where
F: Future<Output = Result<Response<B>, E>>,
@ -442,92 +164,13 @@ where
B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: Into<Box<dyn StdError + Send + Sync>>,
{
fn poll2(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
let mut me = self.project();
loop {
let next = match me.state.as_mut().project() {
H2StreamStateProj::Service {
fut: h,
connect_parts,
} => {
let res = match h.poll(cx) {
Poll::Ready(Ok(r)) => r,
Poll::Pending => {
// Response is not yet ready, so we want to check if the client has sent a
// RST_STREAM frame which would cancel the current request.
if let Poll::Ready(reason) =
me.reply.poll_reset(cx).map_err(crate::Error::new_h2)?
{
debug!("stream received RST_STREAM: {:?}", reason);
return Poll::Ready(Err(crate::Error::new_h2(reason.into())));
}
return Poll::Pending;
}
Poll::Ready(Err(e)) => {
let err = crate::Error::new_user_service(e);
warn!("http2 service errored: {}", err);
me.reply.send_reset(err.h2_reason());
return Poll::Ready(Err(err));
}
};
let (head, body) = res.into_parts();
let mut res = ::http::Response::from_parts(head, ());
super::strip_connection_headers(res.headers_mut(), false);
// set Date header if it isn't already set...
res.headers_mut()
.entry(::http::header::DATE)
.or_insert_with(date::update_and_header_value);
if let Some(connect_parts) = connect_parts.take() {
if res.status().is_success() {
if headers::content_length_parse_all(res.headers())
.map_or(false, |len| len != 0)
{
warn!("h2 successful response to CONNECT request with body not supported");
me.reply.send_reset(h2::Reason::INTERNAL_ERROR);
return Poll::Ready(Err(crate::Error::new_user_header()));
}
let send_stream = reply!(me, res, false);
connect_parts.pending.fulfill(Upgraded::new(
H2Upgraded {
ping: connect_parts.ping,
recv_stream: connect_parts.recv_stream,
send_stream: unsafe { UpgradedSendStream::new(send_stream) },
buf: Bytes::new(),
},
Bytes::new(),
));
return Poll::Ready(Ok(()));
}
}
if !body.is_end_stream() {
// automatically set Content-Length from body...
if let Some(len) = body.size_hint().exact() {
headers::set_content_length_if_missing(res.headers_mut(), len);
}
let body_tx = reply!(me, res, false);
H2StreamState::Body {
pipe: PipeToSendStream::new(body, body_tx),
}
} else {
reply!(me, res, true);
return Poll::Ready(Ok(()));
}
}
H2StreamStateProj::Body { pipe } => {
return pipe.poll(cx);
}
};
me.state.set(next);
}
fn poll2(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<crate::Result<()>> {
loop {}
}
}
impl<F, B, E> Future for H2Stream<F, B>
where
F: Future<Output = Result<Response<B>, E>>,
@ -537,12 +180,7 @@ where
E: Into<Box<dyn StdError + Send + Sync>>,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
self.poll2(cx).map(|res| {
if let Err(e) = res {
debug!("stream error: {}", e);
}
})
loop {}
}
}

View file

@ -1,21 +1,11 @@
//! Pieces pertaining to the HTTP message protocol.
cfg_feature! {
#![feature = "http1"]
pub(crate) mod h1;
pub(crate) use self::h1::Conn;
#[cfg(feature = "client")]
pub(crate) use self::h1::dispatch;
#[cfg(feature = "server")]
pub(crate) use self::h1::ServerTransaction;
#![feature = "http1"] pub (crate) mod h1; pub (crate) use self::h1::Conn;
#[cfg(feature = "client")] pub (crate) use self::h1::dispatch; #[cfg(feature =
"server")] pub (crate) use self::h1::ServerTransaction;
}
#[cfg(feature = "http2")]
pub(crate) mod h2;
/// An Incoming Message head. Includes request/status line, and headers.
#[derive(Debug, Default)]
pub(crate) struct MessageHead<S> {
@ -28,19 +18,15 @@ pub(crate) struct MessageHead<S> {
/// Extensions.
extensions: http::Extensions,
}
/// An incoming request message.
#[cfg(feature = "http1")]
pub(crate) type RequestHead = MessageHead<RequestLine>;
#[derive(Debug, Default, PartialEq)]
#[cfg(feature = "http1")]
pub(crate) struct RequestLine(pub(crate) http::Method, pub(crate) http::Uri);
/// An incoming response message.
#[cfg(all(feature = "http1", feature = "client"))]
pub(crate) type ResponseHead = MessageHead<http::StatusCode>;
#[derive(Debug)]
#[cfg(feature = "http1")]
pub(crate) enum BodyLength {
@ -49,7 +35,6 @@ pub(crate) enum BodyLength {
/// Transfer-Encoding: chunked (if h1)
Unknown,
}
/// Status of when a Disaptcher future completes.
pub(crate) enum Dispatched {
/// Dispatcher completely shutdown connection.
@ -58,14 +43,8 @@ pub(crate) enum Dispatched {
#[cfg(feature = "http1")]
Upgrade(crate::upgrade::Pending),
}
impl MessageHead<http::StatusCode> {
fn into_response<B>(self, body: B) -> http::Response<B> {
let mut res = http::Response::new(body);
*res.status_mut() = self.subject;
*res.headers_mut() = self.headers;
*res.version_mut() = self.version;
*res.extensions_mut() = self.extensions;
res
loop {}
}
}

View file

@ -154,10 +154,7 @@ type Fallback<E> = PhantomData<E>;
#[cfg(all(feature = "http1", feature = "http2"))]
impl<E> Fallback<E> {
fn to_h2(&self) -> bool {
match *self {
Fallback::ToHttp2(..) => true,
Fallback::Http1Only => false,
}
loop {}
}
}
#[cfg(all(feature = "http1", feature = "http2"))]
@ -190,21 +187,7 @@ impl Http {
/// Creates a new instance of the HTTP protocol, ready to spawn a server or
/// start accepting connections.
pub(crate) fn new() -> Http {
Http {
exec: Exec::Default,
h1_half_close: false,
h1_keep_alive: true,
h1_title_case_headers: false,
h1_preserve_header_case: false,
#[cfg(all(feature = "http1", feature = "runtime"))]
h1_header_read_timeout: None,
h1_writev: None,
#[cfg(feature = "http2")]
h2_builder: Default::default(),
mode: ConnectionMode::default(),
max_buf_size: None,
pipeline_flush: false,
}
loop {}
}
}
#[cfg(any(feature = "http1", feature = "http2"))]
@ -215,15 +198,7 @@ impl<E> Http<E> {
#[cfg(feature = "http1")]
#[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
pub(crate) fn http1_only(&mut self, val: bool) -> &mut Self {
if val {
self.mode = ConnectionMode::H1Only;
} else {
#[cfg(feature = "http2")]
{
self.mode = ConnectionMode::Fallback;
}
}
self
loop {}
}
/// Set whether HTTP/1 connections should support half-closures.
///
@ -236,8 +211,7 @@ impl<E> Http<E> {
#[cfg(feature = "http1")]
#[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
pub(crate) fn http1_half_close(&mut self, val: bool) -> &mut Self {
self.h1_half_close = val;
self
loop {}
}
/// Enables or disables HTTP/1 keep-alive.
///
@ -245,8 +219,7 @@ impl<E> Http<E> {
#[cfg(feature = "http1")]
#[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
pub(crate) fn http1_keep_alive(&mut self, val: bool) -> &mut Self {
self.h1_keep_alive = val;
self
loop {}
}
/// Set whether HTTP/1 connections will write header names as title case at
/// the socket level.
@ -257,8 +230,7 @@ impl<E> Http<E> {
#[cfg(feature = "http1")]
#[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
pub(crate) fn http1_title_case_headers(&mut self, enabled: bool) -> &mut Self {
self.h1_title_case_headers = enabled;
self
loop {}
}
/// Set whether to support preserving original header cases.
///
@ -276,8 +248,7 @@ impl<E> Http<E> {
#[cfg(feature = "http1")]
#[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
pub(crate) fn http1_preserve_header_case(&mut self, enabled: bool) -> &mut Self {
self.h1_preserve_header_case = enabled;
self
loop {}
}
/// Set a timeout for reading client request headers. If a client does not
/// transmit the entire header within this time, the connection is closed.
@ -289,8 +260,7 @@ impl<E> Http<E> {
&mut self,
read_timeout: Duration,
) -> &mut Self {
self.h1_header_read_timeout = Some(read_timeout);
self
loop {}
}
/// Set whether HTTP/1 connections should try to use vectored writes,
/// or always flatten into a single buffer.
@ -308,8 +278,7 @@ impl<E> Http<E> {
#[cfg(feature = "http1")]
#[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
pub(crate) fn http1_writev(&mut self, val: bool) -> &mut Self {
self.h1_writev = Some(val);
self
loop {}
}
/// Sets whether HTTP2 is required.
///
@ -317,15 +286,7 @@ impl<E> Http<E> {
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_only(&mut self, val: bool) -> &mut Self {
if val {
self.mode = ConnectionMode::H2Only;
} else {
#[cfg(feature = "http1")]
{
self.mode = ConnectionMode::Fallback;
}
}
self
loop {}
}
/// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2
/// stream-level flow control.
@ -341,11 +302,7 @@ impl<E> Http<E> {
&mut self,
sz: impl Into<Option<u32>>,
) -> &mut Self {
if let Some(sz) = sz.into() {
self.h2_builder.adaptive_window = false;
self.h2_builder.initial_stream_window_size = sz;
}
self
loop {}
}
/// Sets the max connection-level flow control for HTTP2.
///
@ -358,11 +315,7 @@ impl<E> Http<E> {
&mut self,
sz: impl Into<Option<u32>>,
) -> &mut Self {
if let Some(sz) = sz.into() {
self.h2_builder.adaptive_window = false;
self.h2_builder.initial_conn_window_size = sz;
}
self
loop {}
}
/// Sets whether to use an adaptive flow control.
///
@ -372,13 +325,7 @@ impl<E> Http<E> {
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_adaptive_window(&mut self, enabled: bool) -> &mut Self {
use proto::h2::SPEC_WINDOW_SIZE;
self.h2_builder.adaptive_window = enabled;
if enabled {
self.h2_builder.initial_conn_window_size = SPEC_WINDOW_SIZE;
self.h2_builder.initial_stream_window_size = SPEC_WINDOW_SIZE;
}
self
loop {}
}
/// Sets the maximum frame size to use for HTTP2.
///
@ -391,10 +338,7 @@ impl<E> Http<E> {
&mut self,
sz: impl Into<Option<u32>>,
) -> &mut Self {
if let Some(sz) = sz.into() {
self.h2_builder.max_frame_size = sz;
}
self
loop {}
}
/// Sets the [`SETTINGS_MAX_CONCURRENT_STREAMS`][spec] option for HTTP2
/// connections.
@ -408,8 +352,7 @@ impl<E> Http<E> {
&mut self,
max: impl Into<Option<u32>>,
) -> &mut Self {
self.h2_builder.max_concurrent_streams = max.into();
self
loop {}
}
/// Sets an interval for HTTP2 Ping frames should be sent to keep a
/// connection alive.
@ -428,8 +371,7 @@ impl<E> Http<E> {
&mut self,
interval: impl Into<Option<Duration>>,
) -> &mut Self {
self.h2_builder.keep_alive_interval = interval.into();
self
loop {}
}
/// Sets a timeout for receiving an acknowledgement of the keep-alive ping.
///
@ -445,8 +387,7 @@ impl<E> Http<E> {
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self {
self.h2_builder.keep_alive_timeout = timeout;
self
loop {}
}
/// Set the maximum write buffer size for each HTTP/2 stream.
///
@ -458,17 +399,14 @@ impl<E> Http<E> {
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_max_send_buf_size(&mut self, max: usize) -> &mut Self {
assert!(max <= std::u32::MAX as usize);
self.h2_builder.max_send_buffer_size = max;
self
loop {}
}
/// Enables the [extended CONNECT protocol].
///
/// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4
#[cfg(feature = "http2")]
pub(crate) fn http2_enable_connect_protocol(&mut self) -> &mut Self {
self.h2_builder.enable_connect_protocol = true;
self
loop {}
}
/// Sets the max size of received header frames.
///
@ -476,8 +414,7 @@ impl<E> Http<E> {
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_max_header_list_size(&mut self, max: u32) -> &mut Self {
self.h2_builder.max_header_list_size = max;
self
loop {}
}
/// Set the maximum buffer size for the connection.
///
@ -489,12 +426,7 @@ impl<E> Http<E> {
#[cfg(feature = "http1")]
#[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
pub(crate) fn max_buf_size(&mut self, max: usize) -> &mut Self {
assert!(
max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE,
"the max_buf_size cannot be smaller than the minimum that h1 specifies."
);
self.max_buf_size = Some(max);
self
loop {}
}
/// Aggregates flushes to better support pipelined responses.
///
@ -502,28 +434,13 @@ impl<E> Http<E> {
///
/// Default is false.
pub(crate) fn pipeline_flush(&mut self, enabled: bool) -> &mut Self {
self.pipeline_flush = enabled;
self
loop {}
}
/// Set the executor used to spawn background tasks.
///
/// Default uses implicit default (like `tokio::spawn`).
pub(crate) fn with_executor<E2>(self, exec: E2) -> Http<E2> {
Http {
exec,
h1_half_close: self.h1_half_close,
h1_keep_alive: self.h1_keep_alive,
h1_title_case_headers: self.h1_title_case_headers,
h1_preserve_header_case: self.h1_preserve_header_case,
#[cfg(all(feature = "http1", feature = "runtime"))]
h1_header_read_timeout: self.h1_header_read_timeout,
h1_writev: self.h1_writev,
#[cfg(feature = "http2")]
h2_builder: self.h2_builder,
mode: self.mode,
max_buf_size: self.max_buf_size,
pipeline_flush: self.pipeline_flush,
}
loop {}
}
/// Bind a connection together with a [`Service`](crate::service::Service).
///
@ -566,55 +483,7 @@ impl<E> Http<E> {
I: AsyncRead + AsyncWrite + Unpin,
E: ConnStreamExec<S::Future, Bd>,
{
#[cfg(feature = "http1")]
macro_rules! h1 {
() => {
{ let mut conn = proto::Conn::new(io); if ! self.h1_keep_alive { conn
.disable_keep_alive(); } if self.h1_half_close { conn
.set_allow_half_close(); } if self.h1_title_case_headers { conn
.set_title_case_headers(); } if self.h1_preserve_header_case { conn
.set_preserve_header_case(); } #[cfg(all(feature = "http1", feature =
"runtime"))] if let Some(header_read_timeout) = self
.h1_header_read_timeout { conn
.set_http1_header_read_timeout(header_read_timeout); } if let
Some(writev) = self.h1_writev { if writev { conn
.set_write_strategy_queue(); } else { conn.set_write_strategy_flatten();
} } conn.set_flush_pipeline(self.pipeline_flush); if let Some(max) = self
.max_buf_size { conn.set_max_buf_size(max); } let sd =
proto::h1::dispatch::Server::new(service); ProtoServer::H1 { h1 :
proto::h1::Dispatcher::new(sd, conn), } }
};
}
let proto = match self.mode {
#[cfg(feature = "http1")]
#[cfg(not(feature = "http2"))]
ConnectionMode::H1Only => h1!(),
#[cfg(feature = "http2")]
#[cfg(feature = "http1")]
ConnectionMode::H1Only | ConnectionMode::Fallback => h1!(),
#[cfg(feature = "http2")]
ConnectionMode::H2Only => {
let rewind_io = Rewind::new(io);
let h2 = proto::h2::Server::new(
rewind_io,
service,
&self.h2_builder,
self.exec.clone(),
);
ProtoServer::H2 { h2 }
}
};
Connection {
conn: Some(proto),
#[cfg(all(feature = "http1", feature = "http2"))]
fallback: if self.mode == ConnectionMode::Fallback {
Fallback::ToHttp2(self.h2_builder.clone(), self.exec.clone())
} else {
Fallback::Http1Only
},
#[cfg(not(all(feature = "http1", feature = "http2")))]
fallback: PhantomData,
}
loop {}
}
}
#[cfg(any(feature = "http1", feature = "http2"))]
@ -638,21 +507,7 @@ where
/// pending. If called after `Connection::poll` has resolved, this does
/// nothing.
pub(crate) fn graceful_shutdown(mut self: Pin<&mut Self>) {
match self.conn {
#[cfg(feature = "http1")]
Some(ProtoServer::H1 { ref mut h1, .. }) => {
h1.disable_keep_alive();
}
#[cfg(feature = "http2")]
Some(ProtoServer::H2 { ref mut h2 }) => {
h2.graceful_shutdown();
}
None => {}
#[cfg(not(feature = "http1"))]
Some(ProtoServer::H1 { ref mut h1, .. }) => match h1.0 {}
#[cfg(not(feature = "http2"))]
Some(ProtoServer::H2 { ref mut h2 }) => match h2.0 {}
}
loop {}
}
/// Return the inner IO object, and additional information.
///
@ -664,27 +519,13 @@ where
/// # Panics
/// This method will panic if this connection is using an h2 protocol.
pub(crate) fn into_parts(self) -> Parts<I, S> {
self.try_into_parts().unwrap_or_else(|| panic!("h2 cannot into_inner"))
loop {}
}
/// Return the inner IO object, and additional information, if available.
///
/// This method will return a `None` if this connection is using an h2 protocol.
pub(crate) fn try_into_parts(self) -> Option<Parts<I, S>> {
match self.conn.unwrap() {
#[cfg(feature = "http1")]
ProtoServer::H1 { h1, .. } => {
let (io, read_buf, dispatch) = h1.into_inner();
Some(Parts {
io,
read_buf,
service: dispatch.into_service(),
_inner: (),
})
}
ProtoServer::H2 { .. } => None,
#[cfg(not(feature = "http1"))]
ProtoServer::H1 { h1, .. } => match h1.0 {}
}
loop {}
}
/// Poll the connection for completion, but without calling `shutdown`
/// on the underlying IO.
@ -702,35 +543,7 @@ where
S::Future: Unpin,
B: Unpin,
{
loop {
match *self.conn.as_mut().unwrap() {
#[cfg(feature = "http1")]
ProtoServer::H1 { ref mut h1, .. } => {
match ready!(h1.poll_without_shutdown(cx)) {
Ok(()) => return Poll::Ready(Ok(())),
Err(e) => {
#[cfg(feature = "http2")]
match *e.kind() {
Kind::Parse(Parse::VersionH2) if self.fallback.to_h2() => {
self.upgrade_h2();
continue;
}
_ => {}
}
return Poll::Ready(Err(e));
}
}
}
#[cfg(feature = "http2")]
ProtoServer::H2 { ref mut h2 } => {
return Pin::new(h2).poll(cx).map_ok(|_| ());
}
#[cfg(not(feature = "http1"))]
ProtoServer::H1 { ref mut h1, .. } => match h1.0 {}
#[cfg(not(feature = "http2"))]
ProtoServer::H2 { ref mut h2 } => match h2.0 {}
};
}
loop {}
}
/// Prevent shutdown of the underlying IO object at the end of service the request,
/// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`.
@ -760,28 +573,7 @@ where
}
#[cfg(all(feature = "http1", feature = "http2"))]
fn upgrade_h2(&mut self) {
trace!("Trying to upgrade connection to h2");
let conn = self.conn.take();
let (io, read_buf, dispatch) = match conn.unwrap() {
ProtoServer::H1 { h1, .. } => h1.into_inner(),
ProtoServer::H2 { .. } => {
panic!("h2 cannot into_inner");
}
};
let mut rewind_io = Rewind::new(io);
rewind_io.rewind(read_buf);
let (builder, exec) = match self.fallback {
Fallback::ToHttp2(ref builder, ref exec) => (builder, exec),
Fallback::Http1Only => unreachable!("upgrade_h2 with Fallback::Http1Only"),
};
let h2 = proto::h2::Server::new(
rewind_io,
dispatch.into_service(),
builder,
exec.clone(),
);
debug_assert!(self.conn.is_none());
self.conn = Some(ProtoServer::H2 { h2 });
loop {}
}
/// Enable this connection to support higher-level HTTP upgrades.
///
@ -790,9 +582,7 @@ where
where
I: Send,
{
UpgradeableConnection {
inner: self,
}
loop {}
}
}
#[cfg(any(feature = "http1", feature = "http2"))]
@ -807,31 +597,7 @@ where
{
type Output = crate::Result<()>;
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
loop {
match ready!(Pin::new(self.conn.as_mut().unwrap()).poll(cx)) {
Ok(done) => {
match done {
proto::Dispatched::Shutdown => {}
#[cfg(feature = "http1")]
proto::Dispatched::Upgrade(pending) => {
pending.manual();
}
};
return Poll::Ready(Ok(()));
}
Err(e) => {
#[cfg(feature = "http1")] #[cfg(feature = "http2")]
match *e.kind() {
Kind::Parse(Parse::VersionH2) if self.fallback.to_h2() => {
self.upgrade_h2();
continue;
}
_ => {}
}
return Poll::Ready(Err(e));
}
}
}
loop {}
}
}
#[cfg(any(feature = "http1", feature = "http2"))]
@ -840,22 +606,22 @@ where
S: HttpService<Body>,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Connection").finish()
loop {}
}
}
#[cfg(any(feature = "http1", feature = "http2"))]
impl Default for ConnectionMode {
#[cfg(all(feature = "http1", feature = "http2"))]
fn default() -> ConnectionMode {
ConnectionMode::Fallback
loop {}
}
#[cfg(all(feature = "http1", not(feature = "http2")))]
fn default() -> ConnectionMode {
ConnectionMode::H1Only
loop {}
}
#[cfg(all(not(feature = "http1"), feature = "http2"))]
fn default() -> ConnectionMode {
ConnectionMode::H2Only
loop {}
}
}
#[cfg(any(feature = "http1", feature = "http2"))]
@ -870,16 +636,7 @@ where
{
type Output = crate::Result<proto::Dispatched>;
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
match self.project() {
#[cfg(feature = "http1")]
ProtoServerProj::H1 { h1, .. } => h1.poll(cx),
#[cfg(feature = "http2")]
ProtoServerProj::H2 { h2 } => h2.poll(cx),
#[cfg(not(feature = "http1"))]
ProtoServerProj::H1 { h1, .. } => match h1.0 {}
#[cfg(not(feature = "http2"))]
ProtoServerProj::H2 { h2 } => match h2.0 {}
}
loop {}
}
}
#[cfg(any(feature = "http1", feature = "http2"))]
@ -907,7 +664,7 @@ mod upgrades {
/// This `Connection` should continue to be polled until shutdown
/// can finish.
pub(crate) fn graceful_shutdown(mut self: Pin<&mut Self>) {
Pin::new(&mut self.inner).graceful_shutdown()
loop {}
}
}
impl<I, B, S, E> Future for UpgradeableConnection<I, S, E>
@ -924,38 +681,7 @@ mod upgrades {
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Self::Output> {
loop {
match ready!(Pin::new(self.inner.conn.as_mut().unwrap()).poll(cx)) {
Ok(proto::Dispatched::Shutdown) => return Poll::Ready(Ok(())),
#[cfg(feature = "http1")]
Ok(proto::Dispatched::Upgrade(pending)) => {
match self.inner.conn.take() {
Some(ProtoServer::H1 { h1, .. }) => {
let (io, buf, _) = h1.into_inner();
pending.fulfill(Upgraded::new(io, buf));
return Poll::Ready(Ok(()));
}
_ => {
drop(pending);
unreachable!("Upgrade expects h1")
}
};
}
Err(e) => {
#[cfg(feature = "http1")] #[cfg(feature = "http2")]
match *e.kind() {
Kind::Parse(
Parse::VersionH2,
) if self.inner.fallback.to_h2() => {
self.inner.upgrade_h2();
continue;
}
_ => {}
}
return Poll::Ready(Err(e));
}
}
}
loop {}
}
}
}

View file

@ -39,10 +39,7 @@ pub struct Builder<I, E = Exec> {
impl<I> Server<I, ()> {
/// Starts a [`Builder`](Builder) with the provided incoming stream.
pub fn builder(incoming: I) -> Builder<I> {
Builder {
incoming,
protocol: Http_::new(),
}
loop {}
}
}
#[cfg(feature = "tcp")]
@ -62,13 +59,13 @@ impl Server<AddrIncoming, ()> {
}
/// Tries to bind to the provided address, and returns a [`Builder`](Builder).
pub(crate) fn try_bind(addr: &SocketAddr) -> crate::Result<Builder<AddrIncoming>> {
AddrIncoming::new(addr).map(Server::builder)
loop {}
}
/// Create a new instance from a `std::net::TcpListener` instance.
pub(crate) fn from_tcp(
listener: StdTcpListener,
) -> Result<Builder<AddrIncoming>, crate::Error> {
AddrIncoming::from_std(listener).map(Server::builder)
loop {}
}
}
#[cfg(feature = "tcp")]
@ -79,7 +76,7 @@ impl Server<AddrIncoming, ()> {
impl<S, E> Server<AddrIncoming, S, E> {
/// Returns the local address that this server is bound to.
pub(crate) fn local_addr(&self) -> SocketAddr {
self.incoming.local_addr()
loop {}
}
}
#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
@ -135,35 +132,13 @@ where
F: Future<Output = ()>,
E: NewSvcExec<IO, S::Future, S::Service, E, GracefulWatcher>,
{
Graceful::new(self, signal)
loop {}
}
fn poll_next_(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Option<crate::Result<Connecting<IO, S::Future, E>>>> {
let me = self.project();
match ready!(me.make_service.poll_ready_ref(cx)) {
Ok(()) => {}
Err(e) => {
trace!("make_service closed");
return Poll::Ready(Some(Err(crate::Error::new_user_make_service(e))));
}
}
if let Some(item) = ready!(me.incoming.poll_accept(cx)) {
let io = item.map_err(crate::Error::new_accept)?;
let new_fut = me.make_service.make_service_ref(&io);
Poll::Ready(
Some(
Ok(Connecting {
future: new_fut,
io: Some(io),
protocol: me.protocol.clone(),
}),
),
)
} else {
Poll::Ready(None)
}
loop {}
}
pub(super) fn poll_watch<W>(
mut self: Pin<&mut Self>,
@ -174,14 +149,7 @@ where
E: NewSvcExec<IO, S::Future, S::Service, E, W>,
W: Watcher<IO, S::Service, E>,
{
loop {
if let Some(connecting) = ready!(self.as_mut().poll_next_(cx) ?) {
let fut = NewSvcTask::new(connecting, watcher.clone());
self.as_mut().project().protocol.exec.execute_new_svc(fut);
} else {
return Poll::Ready(Ok(()));
}
}
loop {}
}
}
#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
@ -211,9 +179,7 @@ where
}
impl<I: fmt::Debug, S: fmt::Debug> fmt::Debug for Server<I, S> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut st = f.debug_struct("Server");
st.field("listener", &self.incoming);
st.finish()
loop {}
}
}
#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
@ -222,7 +188,7 @@ impl<I, E> Builder<I, E> {
///
/// For a more convenient constructor, see [`Server::bind`](Server::bind).
pub(crate) fn new(incoming: I, protocol: Http_<E>) -> Self {
Builder { incoming, protocol }
loop {}
}
/// Sets whether to use keep-alive for HTTP/1 connections.
///
@ -230,8 +196,7 @@ impl<I, E> Builder<I, E> {
#[cfg(feature = "http1")]
#[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
pub(crate) fn http1_keepalive(mut self, val: bool) -> Self {
self.protocol.http1_keep_alive(val);
self
loop {}
}
/// Set whether HTTP/1 connections should support half-closures.
///
@ -244,8 +209,7 @@ impl<I, E> Builder<I, E> {
#[cfg(feature = "http1")]
#[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
pub(crate) fn http1_half_close(mut self, val: bool) -> Self {
self.protocol.http1_half_close(val);
self
loop {}
}
/// Set the maximum buffer size.
///
@ -253,14 +217,12 @@ impl<I, E> Builder<I, E> {
#[cfg(feature = "http1")]
#[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
pub(crate) fn http1_max_buf_size(mut self, val: usize) -> Self {
self.protocol.max_buf_size(val);
self
loop {}
}
#[doc(hidden)]
#[cfg(feature = "http1")]
pub fn http1_pipeline_flush(mut self, val: bool) -> Self {
self.protocol.pipeline_flush(val);
self
loop {}
}
/// Set whether HTTP/1 connections should try to use vectored writes,
/// or always flatten into a single buffer.
@ -276,8 +238,7 @@ impl<I, E> Builder<I, E> {
/// mode to use
#[cfg(feature = "http1")]
pub(crate) fn http1_writev(mut self, enabled: bool) -> Self {
self.protocol.http1_writev(enabled);
self
loop {}
}
/// Set whether HTTP/1 connections will write header names as title case at
/// the socket level.
@ -288,8 +249,7 @@ impl<I, E> Builder<I, E> {
#[cfg(feature = "http1")]
#[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
pub(crate) fn http1_title_case_headers(mut self, val: bool) -> Self {
self.protocol.http1_title_case_headers(val);
self
loop {}
}
/// Set whether to support preserving original header cases.
///
@ -307,8 +267,7 @@ impl<I, E> Builder<I, E> {
#[cfg(feature = "http1")]
#[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
pub(crate) fn http1_preserve_header_case(mut self, val: bool) -> Self {
self.protocol.http1_preserve_header_case(val);
self
loop {}
}
/// Set a timeout for reading client request headers. If a client does not
/// transmit the entire header within this time, the connection is closed.
@ -317,8 +276,7 @@ impl<I, E> Builder<I, E> {
#[cfg(all(feature = "http1", feature = "runtime"))]
#[cfg_attr(docsrs, doc(cfg(all(feature = "http1", feature = "runtime"))))]
pub(crate) fn http1_header_read_timeout(mut self, read_timeout: Duration) -> Self {
self.protocol.http1_header_read_timeout(read_timeout);
self
loop {}
}
/// Sets whether HTTP/1 is required.
///
@ -326,8 +284,7 @@ impl<I, E> Builder<I, E> {
#[cfg(feature = "http1")]
#[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
pub(crate) fn http1_only(mut self, val: bool) -> Self {
self.protocol.http1_only(val);
self
loop {}
}
/// Sets whether HTTP/2 is required.
///
@ -335,8 +292,7 @@ impl<I, E> Builder<I, E> {
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_only(mut self, val: bool) -> Self {
self.protocol.http2_only(val);
self
loop {}
}
/// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2
/// stream-level flow control.
@ -352,8 +308,7 @@ impl<I, E> Builder<I, E> {
mut self,
sz: impl Into<Option<u32>>,
) -> Self {
self.protocol.http2_initial_stream_window_size(sz.into());
self
loop {}
}
/// Sets the max connection-level flow control for HTTP2
///
@ -366,8 +321,7 @@ impl<I, E> Builder<I, E> {
mut self,
sz: impl Into<Option<u32>>,
) -> Self {
self.protocol.http2_initial_connection_window_size(sz.into());
self
loop {}
}
/// Sets whether to use an adaptive flow control.
///
@ -377,8 +331,7 @@ impl<I, E> Builder<I, E> {
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_adaptive_window(mut self, enabled: bool) -> Self {
self.protocol.http2_adaptive_window(enabled);
self
loop {}
}
/// Sets the maximum frame size to use for HTTP2.
///
@ -388,8 +341,7 @@ impl<I, E> Builder<I, E> {
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_max_frame_size(mut self, sz: impl Into<Option<u32>>) -> Self {
self.protocol.http2_max_frame_size(sz);
self
loop {}
}
/// Sets the max size of received header frames.
///
@ -397,8 +349,7 @@ impl<I, E> Builder<I, E> {
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_max_header_list_size(mut self, max: u32) -> Self {
self.protocol.http2_max_header_list_size(max);
self
loop {}
}
/// Sets the [`SETTINGS_MAX_CONCURRENT_STREAMS`][spec] option for HTTP2
/// connections.
@ -412,8 +363,7 @@ impl<I, E> Builder<I, E> {
mut self,
max: impl Into<Option<u32>>,
) -> Self {
self.protocol.http2_max_concurrent_streams(max.into());
self
loop {}
}
/// Set the maximum write buffer size for each HTTP/2 stream.
///
@ -425,25 +375,20 @@ impl<I, E> Builder<I, E> {
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_max_send_buf_size(mut self, max: usize) -> Self {
self.protocol.http2_max_send_buf_size(max);
self
loop {}
}
/// Enables the [extended CONNECT protocol].
///
/// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4
#[cfg(feature = "http2")]
pub(crate) fn http2_enable_connect_protocol(mut self) -> Self {
self.protocol.http2_enable_connect_protocol();
self
loop {}
}
/// Sets the `Executor` to deal with connection tasks.
///
/// Default is `tokio::spawn`.
pub(crate) fn executor<E2>(self, executor: E2) -> Builder<I, E2> {
Builder {
incoming: self.incoming,
protocol: self.protocol.with_executor(executor),
}
loop {}
}
///
pub fn serve<S, B>(self, _: S) -> Server<I, S>
@ -465,24 +410,20 @@ impl<E> Builder<AddrIncoming, E> {
///
/// If `None` is specified, keepalive is disabled.
pub(crate) fn tcp_keepalive(mut self, keepalive: Option<Duration>) -> Self {
self.incoming.set_keepalive(keepalive);
self
loop {}
}
/// Set the duration between two successive TCP keepalive retransmissions,
/// if acknowledgement to the previous keepalive transmission is not received.
pub(crate) fn tcp_keepalive_interval(mut self, interval: Option<Duration>) -> Self {
self.incoming.set_keepalive_interval(interval);
self
loop {}
}
/// Set the number of retransmissions to be carried out before declaring that remote end is not available.
pub(crate) fn tcp_keepalive_retries(mut self, retries: Option<u32>) -> Self {
self.incoming.set_keepalive_retries(retries);
self
loop {}
}
/// Set the value of `TCP_NODELAY` option for accepted connections.
pub(crate) fn tcp_nodelay(mut self, enabled: bool) -> Self {
self.incoming.set_nodelay(enabled);
self
loop {}
}
/// Set whether to sleep on accept errors.
///
@ -500,8 +441,7 @@ impl<E> Builder<AddrIncoming, E> {
///
/// For more details see [`AddrIncoming::set_sleep_on_errors`]
pub(crate) fn tcp_sleep_on_accept_errors(mut self, val: bool) -> Self {
self.incoming.set_sleep_on_errors(val);
self
loop {}
}
}
pub trait Watcher<I, S: HttpService<Body>, E>: Clone {
@ -521,7 +461,7 @@ where
{
type Future = UpgradeableConnection<I, S, E>;
fn watch(&self, conn: UpgradeableConnection<I, S, E>) -> Self::Future {
conn
loop {}
}
}
pub(crate) mod new_svc {
@ -546,12 +486,7 @@ pub(crate) mod new_svc {
}
impl<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>> NewSvcTask<I, N, S, E, W> {
pub(super) fn new(connecting: Connecting<I, N, E>, watcher: W) -> Self {
NewSvcTask {
state: State::Connecting {
connecting,
watcher,
},
}
loop {}
}
}
impl<I, N, S, NE, B, E, W> Future for NewSvcTask<I, N, S, E, W>
@ -590,9 +525,6 @@ where
{
type Output = Result<Connection<I, S, E>, FE>;
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
let mut me = self.project();
let service = ready!(me.future.poll(cx))?;
let io = Option::take(&mut me.io).expect("polled after complete");
Poll::Ready(Ok(me.protocol.serve_connection(io, service)))
loop {}
}
}

View file

@ -8,6 +8,6 @@ pub(crate) struct Server<I, S, E = Exec> {
}
impl<I: fmt::Debug, S: fmt::Debug> fmt::Debug for Server<I, S> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Server").finish()
loop {}
}
}

View file

@ -1,9 +1,7 @@
use std::error::Error as StdError;
use pin_project_lite::pin_project;
use tokio::io::{AsyncRead, AsyncWrite};
use tracing::debug;
use super::accept::Accept;
use super::conn::UpgradeableConnection;
use super::server::{Server, Watcher};
@ -12,42 +10,20 @@ use crate::common::drain::{self, Draining, Signal, Watch, Watching};
use crate::common::exec::{ConnStreamExec, NewSvcExec};
use crate::common::{task, Future, Pin, Poll, Unpin};
use crate::service::{HttpService, MakeServiceRef};
pin_project! {
#[allow(missing_debug_implementations)]
pub struct Graceful<I, S, F, E> {
#[pin]
state: State<I, S, F, E>,
}
#[allow(missing_debug_implementations)] pub struct Graceful < I, S, F, E > { #[pin]
state : State < I, S, F, E >, }
}
pin_project! {
#[project = StateProj]
pub(super) enum State<I, S, F, E> {
Running {
drain: Option<(Signal, Watch)>,
#[pin]
server: Server<I, S, E>,
#[pin]
signal: F,
},
Draining { draining: Draining },
}
#[project = StateProj] pub (super) enum State < I, S, F, E > { Running { drain :
Option < (Signal, Watch) >, #[pin] server : Server < I, S, E >, #[pin] signal : F, },
Draining { draining : Draining }, }
}
impl<I, S, F, E> Graceful<I, S, F, E> {
pub(super) fn new(server: Server<I, S, E>, signal: F) -> Self {
let drain = Some(drain::channel());
Graceful {
state: State::Running {
drain,
server,
signal,
},
}
loop {}
}
}
impl<I, IO, IE, S, B, F, E> Future for Graceful<I, S, F, E>
where
I: Accept<Conn = IO, Error = IE>,
@ -62,43 +38,13 @@ where
E: NewSvcExec<IO, S::Future, S::Service, E, GracefulWatcher>,
{
type Output = crate::Result<()>;
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
let mut me = self.project();
loop {
let next = {
match me.state.as_mut().project() {
StateProj::Running {
drain,
server,
signal,
} => match signal.poll(cx) {
Poll::Ready(()) => {
debug!("signal received, starting graceful shutdown");
let sig = drain.take().expect("drain channel").0;
State::Draining {
draining: sig.drain(),
}
}
Poll::Pending => {
let watch = drain.as_ref().expect("drain channel").1.clone();
return server.poll_watch(cx, &GracefulWatcher(watch));
}
},
StateProj::Draining { ref mut draining } => {
return Pin::new(draining).poll(cx).map(Ok);
}
}
};
me.state.set(next);
}
loop {}
}
}
#[allow(missing_debug_implementations)]
#[derive(Clone)]
pub struct GracefulWatcher(Watch);
impl<I, S, E> Watcher<I, S, E> for GracefulWatcher
where
I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
@ -107,14 +53,14 @@ where
S::ResBody: 'static,
<S::ResBody as HttpBody>::Error: Into<Box<dyn StdError + Send + Sync>>,
{
type Future =
Watching<UpgradeableConnection<I, S, E>, fn(Pin<&mut UpgradeableConnection<I, S, E>>)>;
type Future = Watching<
UpgradeableConnection<I, S, E>,
fn(Pin<&mut UpgradeableConnection<I, S, E>>),
>;
fn watch(&self, conn: UpgradeableConnection<I, S, E>) -> Self::Future {
self.0.clone().watch(conn, on_drain)
loop {}
}
}
fn on_drain<I, S, E>(conn: Pin<&mut UpgradeableConnection<I, S, E>>)
where
S: HttpService<Body>,
@ -124,5 +70,5 @@ where
<S::ResBody as HttpBody>::Error: Into<Box<dyn StdError + Send + Sync>>,
E: ConnStreamExec<S::Future, S::ResBody>,
{
conn.graceful_shutdown()
loop {}
}

View file

@ -3,107 +3,99 @@ use std::io;
use std::net::{SocketAddr, TcpListener as StdTcpListener};
use std::time::Duration;
use socket2::TcpKeepalive;
use tokio::net::TcpListener;
use tokio::time::Sleep;
use tracing::{debug, error, trace};
use crate::common::{task, Future, Pin, Poll};
#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
#[allow(unreachable_pub)]
pub use self::addr_stream::AddrStream;
use super::accept::Accept;
#[derive(Default, Debug, Clone, Copy)]
struct TcpKeepaliveConfig {
time: Option<Duration>,
interval: Option<Duration>,
retries: Option<u32>,
}
impl TcpKeepaliveConfig {
/// Converts into a `socket2::TcpKeealive` if there is any keep alive configuration.
fn into_socket2(self) -> Option<TcpKeepalive> {
let mut dirty = false;
let mut ka = TcpKeepalive::new();
if let Some(time) = self.time {
ka = ka.with_time(time);
dirty = true
}
if let Some(interval) = self.interval {
ka = Self::ka_with_interval(ka, interval, &mut dirty)
};
if let Some(retries) = self.retries {
ka = Self::ka_with_retries(ka, retries, &mut dirty)
};
if dirty {
Some(ka)
} else {
None
}
loop {}
}
#[cfg(any(
target_os = "android",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "fuchsia",
target_os = "illumos",
target_os = "linux",
target_os = "netbsd",
target_vendor = "apple",
windows,
))]
fn ka_with_interval(ka: TcpKeepalive, interval: Duration, dirty: &mut bool) -> TcpKeepalive {
*dirty = true;
ka.with_interval(interval)
#[cfg(
any(
target_os = "android",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "fuchsia",
target_os = "illumos",
target_os = "linux",
target_os = "netbsd",
target_vendor = "apple",
windows,
)
)]
fn ka_with_interval(
ka: TcpKeepalive,
interval: Duration,
dirty: &mut bool,
) -> TcpKeepalive {
loop {}
}
#[cfg(not(any(
target_os = "android",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "fuchsia",
target_os = "illumos",
target_os = "linux",
target_os = "netbsd",
target_vendor = "apple",
windows,
)))]
#[cfg(
not(
any(
target_os = "android",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "fuchsia",
target_os = "illumos",
target_os = "linux",
target_os = "netbsd",
target_vendor = "apple",
windows,
)
)
)]
fn ka_with_interval(ka: TcpKeepalive, _: Duration, _: &mut bool) -> TcpKeepalive {
ka // no-op as keepalive interval is not supported on this platform
loop {}
}
#[cfg(any(
target_os = "android",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "fuchsia",
target_os = "illumos",
target_os = "linux",
target_os = "netbsd",
target_vendor = "apple",
))]
fn ka_with_retries(ka: TcpKeepalive, retries: u32, dirty: &mut bool) -> TcpKeepalive {
*dirty = true;
ka.with_retries(retries)
#[cfg(
any(
target_os = "android",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "fuchsia",
target_os = "illumos",
target_os = "linux",
target_os = "netbsd",
target_vendor = "apple",
)
)]
fn ka_with_retries(
ka: TcpKeepalive,
retries: u32,
dirty: &mut bool,
) -> TcpKeepalive {
loop {}
}
#[cfg(not(any(
target_os = "android",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "fuchsia",
target_os = "illumos",
target_os = "linux",
target_os = "netbsd",
target_vendor = "apple",
)))]
#[cfg(
not(
any(
target_os = "android",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "fuchsia",
target_os = "illumos",
target_os = "linux",
target_os = "netbsd",
target_vendor = "apple",
)
)
)]
fn ka_with_retries(ka: TcpKeepalive, _: u32, _: &mut bool) -> TcpKeepalive {
ka // no-op as keepalive retries is not supported on this platform
loop {}
}
}
/// A stream of connections from binding to an address.
#[must_use = "streams do nothing unless polled"]
pub struct AddrIncoming {
@ -114,73 +106,44 @@ pub struct AddrIncoming {
tcp_nodelay: bool,
timeout: Option<Pin<Box<Sleep>>>,
}
impl AddrIncoming {
pub(super) fn new(addr: &SocketAddr) -> crate::Result<Self> {
let std_listener = StdTcpListener::bind(addr).map_err(crate::Error::new_listen)?;
AddrIncoming::from_std(std_listener)
loop {}
}
pub(super) fn from_std(std_listener: StdTcpListener) -> crate::Result<Self> {
// TcpListener::from_std doesn't set O_NONBLOCK
std_listener
.set_nonblocking(true)
.map_err(crate::Error::new_listen)?;
let listener = TcpListener::from_std(std_listener).map_err(crate::Error::new_listen)?;
AddrIncoming::from_listener(listener)
loop {}
}
/// Creates a new `AddrIncoming` binding to provided socket address.
pub fn bind(addr: &SocketAddr) -> crate::Result<Self> {
AddrIncoming::new(addr)
loop {}
}
/// Creates a new `AddrIncoming` from an existing `tokio::net::TcpListener`.
pub fn from_listener(listener: TcpListener) -> crate::Result<Self> {
let addr = listener.local_addr().map_err(crate::Error::new_listen)?;
Ok(AddrIncoming {
listener,
addr,
sleep_on_errors: true,
tcp_keepalive_config: TcpKeepaliveConfig::default(),
tcp_nodelay: false,
timeout: None,
})
loop {}
}
/// Get the local address bound to this listener.
pub fn local_addr(&self) -> SocketAddr {
self.addr
loop {}
}
/// Set the duration to remain idle before sending TCP keepalive probes.
///
/// If `None` is specified, keepalive is disabled.
pub fn set_keepalive(&mut self, time: Option<Duration>) -> &mut Self {
self.tcp_keepalive_config.time = time;
self
loop {}
}
/// Set the duration between two successive TCP keepalive retransmissions,
/// if acknowledgement to the previous keepalive transmission is not received.
pub fn set_keepalive_interval(&mut self, interval: Option<Duration>) -> &mut Self {
self.tcp_keepalive_config.interval = interval;
self
loop {}
}
/// Set the number of retransmissions to be carried out before declaring that remote end is not available.
pub fn set_keepalive_retries(&mut self, retries: Option<u32>) -> &mut Self {
self.tcp_keepalive_config.retries = retries;
self
loop {}
}
/// Set the value of `TCP_NODELAY` option for accepted connections.
pub fn set_nodelay(&mut self, enabled: bool) -> &mut Self {
self.tcp_nodelay = enabled;
self
loop {}
}
/// Set whether to sleep on accept errors.
///
/// A possible scenario is that the process has hit the max open files
@ -197,77 +160,25 @@ impl AddrIncoming {
///
/// Default is `true`.
pub fn set_sleep_on_errors(&mut self, val: bool) {
self.sleep_on_errors = val;
loop {}
}
fn poll_next_(&mut self, cx: &mut task::Context<'_>) -> Poll<io::Result<AddrStream>> {
// Check if a previous timeout is active that was set by IO errors.
if let Some(ref mut to) = self.timeout {
ready!(Pin::new(to).poll(cx));
}
self.timeout = None;
loop {
match ready!(self.listener.poll_accept(cx)) {
Ok((socket, remote_addr)) => {
if let Some(tcp_keepalive) = &self.tcp_keepalive_config.into_socket2() {
let sock_ref = socket2::SockRef::from(&socket);
if let Err(e) = sock_ref.set_tcp_keepalive(tcp_keepalive) {
trace!("error trying to set TCP keepalive: {}", e);
}
}
if let Err(e) = socket.set_nodelay(self.tcp_nodelay) {
trace!("error trying to set TCP nodelay: {}", e);
}
let local_addr = socket.local_addr()?;
return Poll::Ready(Ok(AddrStream::new(socket, remote_addr, local_addr)));
}
Err(e) => {
// Connection errors can be ignored directly, continue by
// accepting the next request.
if is_connection_error(&e) {
debug!("accepted connection already errored: {}", e);
continue;
}
if self.sleep_on_errors {
error!("accept error: {}", e);
// Sleep 1s.
let mut timeout = Box::pin(tokio::time::sleep(Duration::from_secs(1)));
match timeout.as_mut().poll(cx) {
Poll::Ready(()) => {
// Wow, it's been a second already? Ok then...
continue;
}
Poll::Pending => {
self.timeout = Some(timeout);
return Poll::Pending;
}
}
} else {
return Poll::Ready(Err(e));
}
}
}
}
fn poll_next_(
&mut self,
cx: &mut task::Context<'_>,
) -> Poll<io::Result<AddrStream>> {
loop {}
}
}
impl Accept for AddrIncoming {
type Conn = AddrStream;
type Error = io::Error;
fn poll_accept(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Option<Result<Self::Conn, Self::Error>>> {
let result = ready!(self.poll_next_(cx));
Poll::Ready(Some(result))
loop {}
}
}
/// This function defines errors that are per-connection. Which basically
/// means that if we get this error from `accept()` system call it means
/// next connection might be ready to be accepted.
@ -276,25 +187,13 @@ impl Accept for AddrIncoming {
/// The timeout is useful to handle resource exhaustion errors like ENFILE
/// and EMFILE. Otherwise, could enter into tight loop.
fn is_connection_error(e: &io::Error) -> bool {
matches!(
e.kind(),
io::ErrorKind::ConnectionRefused
| io::ErrorKind::ConnectionAborted
| io::ErrorKind::ConnectionReset
)
loop {}
}
impl fmt::Debug for AddrIncoming {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("AddrIncoming")
.field("addr", &self.addr)
.field("sleep_on_errors", &self.sleep_on_errors)
.field("tcp_keepalive_config", &self.tcp_keepalive_config)
.field("tcp_nodelay", &self.tcp_nodelay)
.finish()
loop {}
}
}
mod addr_stream {
use std::io;
use std::net::SocketAddr;
@ -302,51 +201,35 @@ mod addr_stream {
use std::os::unix::io::{AsRawFd, RawFd};
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::net::TcpStream;
use crate::common::{task, Pin, Poll};
pin_project_lite::pin_project! {
/// A transport returned yieled by `AddrIncoming`.
#[derive(Debug)]
pub struct AddrStream {
#[pin]
inner: TcpStream,
pub(super) remote_addr: SocketAddr,
pub(super) local_addr: SocketAddr
}
#[doc = " A transport returned yieled by `AddrIncoming`."] #[derive(Debug)] pub
struct AddrStream { #[pin] inner : TcpStream, pub (super) remote_addr :
SocketAddr, pub (super) local_addr : SocketAddr }
}
impl AddrStream {
pub(super) fn new(
tcp: TcpStream,
remote_addr: SocketAddr,
local_addr: SocketAddr,
) -> AddrStream {
AddrStream {
inner: tcp,
remote_addr,
local_addr,
}
loop {}
}
/// Returns the remote (peer) address of this connection.
#[inline]
pub fn remote_addr(&self) -> SocketAddr {
self.remote_addr
loop {}
}
/// Returns the local address of this connection.
#[inline]
pub fn local_addr(&self) -> SocketAddr {
self.local_addr
loop {}
}
/// Consumes the AddrStream and returns the underlying IO object
#[inline]
pub fn into_inner(self) -> TcpStream {
self.inner
loop {}
}
/// Attempt to receive data on the socket, without removing that data
/// from the queue, registering the current task for wakeup if data is
/// not yet available.
@ -355,10 +238,9 @@ mod addr_stream {
cx: &mut task::Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> Poll<io::Result<usize>> {
self.inner.poll_peek(cx, buf)
loop {}
}
}
impl AsyncRead for AddrStream {
#[inline]
fn poll_read(
@ -366,10 +248,9 @@ mod addr_stream {
cx: &mut task::Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
self.project().inner.poll_read(cx, buf)
loop {}
}
}
impl AsyncWrite for AddrStream {
#[inline]
fn poll_write(
@ -377,108 +258,85 @@ mod addr_stream {
cx: &mut task::Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
self.project().inner.poll_write(cx, buf)
loop {}
}
#[inline]
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> Poll<io::Result<usize>> {
self.project().inner.poll_write_vectored(cx, bufs)
loop {}
}
#[inline]
fn poll_flush(self: Pin<&mut Self>, _cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
// TCP flush is a noop
Poll::Ready(Ok(()))
fn poll_flush(
self: Pin<&mut Self>,
_cx: &mut task::Context<'_>,
) -> Poll<io::Result<()>> {
loop {}
}
#[inline]
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
self.project().inner.poll_shutdown(cx)
fn poll_shutdown(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<io::Result<()>> {
loop {}
}
#[inline]
fn is_write_vectored(&self) -> bool {
// Note that since `self.inner` is a `TcpStream`, this could
// *probably* be hard-coded to return `true`...but it seems more
// correct to ask it anyway (maybe we're on some platform without
// scatter-gather IO?)
self.inner.is_write_vectored()
loop {}
}
}
#[cfg(unix)]
impl AsRawFd for AddrStream {
fn as_raw_fd(&self) -> RawFd {
self.inner.as_raw_fd()
loop {}
}
}
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use crate::server::tcp::TcpKeepaliveConfig;
#[test]
fn no_tcp_keepalive_config() {
assert!(TcpKeepaliveConfig::default().into_socket2().is_none());
loop {}
}
#[test]
fn tcp_keepalive_time_config() {
let mut kac = TcpKeepaliveConfig::default();
kac.time = Some(Duration::from_secs(60));
if let Some(tcp_keepalive) = kac.into_socket2() {
assert!(format!("{tcp_keepalive:?}").contains("time: Some(60s)"));
} else {
panic!("test failed");
}
loop {}
}
#[cfg(any(
target_os = "android",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "fuchsia",
target_os = "illumos",
target_os = "linux",
target_os = "netbsd",
target_vendor = "apple",
windows,
))]
#[cfg(
any(
target_os = "android",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "fuchsia",
target_os = "illumos",
target_os = "linux",
target_os = "netbsd",
target_vendor = "apple",
windows,
)
)]
#[test]
fn tcp_keepalive_interval_config() {
let mut kac = TcpKeepaliveConfig::default();
kac.interval = Some(Duration::from_secs(1));
if let Some(tcp_keepalive) = kac.into_socket2() {
assert!(format!("{tcp_keepalive:?}").contains("interval: Some(1s)"));
} else {
panic!("test failed");
}
loop {}
}
#[cfg(any(
target_os = "android",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "fuchsia",
target_os = "illumos",
target_os = "linux",
target_os = "netbsd",
target_vendor = "apple",
))]
#[cfg(
any(
target_os = "android",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "fuchsia",
target_os = "illumos",
target_os = "linux",
target_os = "netbsd",
target_vendor = "apple",
)
)]
#[test]
fn tcp_keepalive_retries_config() {
let mut kac = TcpKeepaliveConfig::default();
kac.retries = Some(3);
if let Some(tcp_keepalive) = kac.into_socket2() {
assert!(format!("{tcp_keepalive:?}").contains("retries: Some(3)"));
} else {
panic!("test failed");
}
loop {}
}
}

View file

@ -1,31 +1,27 @@
use std::error::Error as StdError;
use crate::body::HttpBody;
use crate::common::{task, Future, Poll};
use crate::{Request, Response};
/// An asynchronous function from `Request` to `Response`.
pub trait HttpService<ReqBody>: sealed::Sealed<ReqBody> {
/// The `HttpBody` body of the `http::Response`.
type ResBody: HttpBody;
/// The error type that can occur within this `Service`.
///
/// Note: Returning an `Error` to a hyper server will cause the connection
/// to be abruptly aborted. In most cases, it is better to return a `Response`
/// with a 4xx or 5xx status code.
type Error: Into<Box<dyn StdError + Send + Sync>>;
/// The `Future` returned by this `Service`.
type Future: Future<Output = Result<Response<Self::ResBody>, Self::Error>>;
#[doc(hidden)]
fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>>;
fn poll_ready(
&mut self,
cx: &mut task::Context<'_>,
) -> Poll<Result<(), Self::Error>>;
#[doc(hidden)]
fn call(&mut self, req: Request<ReqBody>) -> Self::Future;
}
impl<T, B1, B2> HttpService<B1> for T
where
T: tower_service::Service<Request<B1>, Response = Response<B2>>,
@ -33,26 +29,23 @@ where
T::Error: Into<Box<dyn StdError + Send + Sync>>,
{
type ResBody = B2;
type Error = T::Error;
type Future = T::Future;
fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
tower_service::Service::poll_ready(self, cx)
fn poll_ready(
&mut self,
cx: &mut task::Context<'_>,
) -> Poll<Result<(), Self::Error>> {
loop {}
}
fn call(&mut self, req: Request<B1>) -> Self::Future {
tower_service::Service::call(self, req)
loop {}
}
}
impl<T, B1, B2> sealed::Sealed<B1> for T
where
T: tower_service::Service<Request<B1>, Response = Response<B2>>,
B2: HttpBody,
{
}
{}
mod sealed {
pub trait Sealed<T> {}
}

View file

@ -30,10 +30,10 @@ where
&mut self,
cx: &mut task::Context<'_>,
) -> Poll<Result<(), Self::Error>> {
Service::poll_ready(self, cx)
loop {}
}
fn make_connection(&mut self, target: Target) -> Self::Future {
Service::call(self, target)
loop {}
}
}
pub trait MakeServiceRef<Target, ReqBody>: self::sealed::Sealed<(Target, ReqBody)> {
@ -69,10 +69,10 @@ where
&mut self,
cx: &mut task::Context<'_>,
) -> Poll<Result<(), Self::MakeError>> {
self.poll_ready(cx)
loop {}
}
fn make_service_ref(&mut self, target: &Target) -> Self::Future {
self.call(target)
loop {}
}
}
impl<T, Target, S, B1, B2> self::sealed::Sealed<(Target, B1)> for T
@ -123,7 +123,7 @@ where
F: FnMut(&Target) -> Ret,
Ret: Future,
{
MakeServiceFn { f }
loop {}
}
/// `MakeService` returned from [`make_service_fn`]
#[derive(Clone, Copy)]
@ -143,15 +143,15 @@ where
&mut self,
_cx: &mut task::Context<'_>,
) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
loop {}
}
fn call(&mut self, target: &'t Target) -> Self::Future {
(self.f)(target)
loop {}
}
}
impl<F> fmt::Debug for MakeServiceFn<F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("MakeServiceFn").finish()
loop {}
}
}
mod sealed {

View file

@ -1,73 +1,27 @@
// TODO: Eventually to be replaced with tower_util::Oneshot.
use pin_project_lite::pin_project;
use tower_service::Service;
use crate::common::{task, Future, Pin, Poll};
pub(crate) fn oneshot<S, Req>(svc: S, req: Req) -> Oneshot<S, Req>
where
S: Service<Req>,
{
Oneshot {
state: State::NotReady { svc, req },
}
loop {}
}
pin_project! {
// A `Future` consuming a `Service` and request, waiting until the `Service`
// is ready, and then calling `Service::call` with the request, and
// waiting for that `Future`.
#[allow(missing_debug_implementations)]
pub struct Oneshot<S: Service<Req>, Req> {
#[pin]
state: State<S, Req>,
}
#[allow(missing_debug_implementations)] pub struct Oneshot < S : Service < Req >, Req
> { #[pin] state : State < S, Req >, }
}
pin_project! {
#[project = StateProj]
#[project_replace = StateProjOwn]
enum State<S: Service<Req>, Req> {
NotReady {
svc: S,
req: Req,
},
Called {
#[pin]
fut: S::Future,
},
Tmp,
}
#[project = StateProj] #[project_replace = StateProjOwn] enum State < S : Service <
Req >, Req > { NotReady { svc : S, req : Req, }, Called { #[pin] fut : S::Future, },
Tmp, }
}
impl<S, Req> Future for Oneshot<S, Req>
where
S: Service<Req>,
{
type Output = Result<S::Response, S::Error>;
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
let mut me = self.project();
loop {
match me.state.as_mut().project() {
StateProj::NotReady { ref mut svc, .. } => {
ready!(svc.poll_ready(cx))?;
// fallthrough out of the match's borrow
}
StateProj::Called { fut } => {
return fut.poll(cx);
}
StateProj::Tmp => unreachable!(),
}
match me.state.as_mut().project_replace(State::Tmp) {
StateProjOwn::NotReady { mut svc, req } => {
me.state.set(State::Called { fut: svc.call(req) });
}
_ => unreachable!(),
}
}
loop {}
}
}

View file

@ -1,11 +1,9 @@
use std::error::Error as StdError;
use std::fmt;
use std::marker::PhantomData;
use crate::body::HttpBody;
use crate::common::{task, Future, Poll};
use crate::{Request, Response};
/// Create a `Service` from a function.
///
/// # Example
@ -29,20 +27,15 @@ where
F: FnMut(Request<R>) -> S,
S: Future,
{
ServiceFn {
f,
_req: PhantomData,
}
loop {}
}
/// Service returned by [`service_fn`]
pub struct ServiceFn<F, R> {
f: F,
_req: PhantomData<fn(R)>,
}
impl<F, ReqBody, Ret, ResBody, E> tower_service::Service<crate::Request<ReqBody>>
for ServiceFn<F, ReqBody>
for ServiceFn<F, ReqBody>
where
F: FnMut(Request<ReqBody>) -> Ret,
ReqBody: HttpBody,
@ -53,32 +46,30 @@ where
type Response = crate::Response<ResBody>;
type Error = E;
type Future = Ret;
fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
fn poll_ready(
&mut self,
_cx: &mut task::Context<'_>,
) -> Poll<Result<(), Self::Error>> {
loop {}
}
fn call(&mut self, req: Request<ReqBody>) -> Self::Future {
(self.f)(req)
loop {}
}
}
impl<F, R> fmt::Debug for ServiceFn<F, R> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("impl Service").finish()
loop {}
}
}
impl<F, R> Clone for ServiceFn<F, R>
where
F: Clone,
{
fn clone(&self) -> Self {
ServiceFn {
f: self.f.clone(),
_req: PhantomData,
}
loop {}
}
}
impl<F, R> Copy for ServiceFn<F, R> where F: Copy {}
impl<F, R> Copy for ServiceFn<F, R>
where
F: Copy,
{}

View file

@ -95,7 +95,7 @@ pub(crate) struct Parts<T> {
/// - `&mut http::Request<B>`
/// - `&mut http::Response<B>`
pub(crate) fn on<T: sealed::CanUpgrade>(msg: T) -> OnUpgrade {
msg.on_upgrade()
loop {}
}
#[cfg(any(feature = "http1", feature = "http2"))]
pub(super) struct Pending {
@ -103,8 +103,7 @@ pub(super) struct Pending {
}
#[cfg(any(feature = "http1", feature = "http2"))]
pub(super) fn pending() -> (Pending, OnUpgrade) {
let (tx, rx) = oneshot::channel();
(Pending { tx }, OnUpgrade { rx: Some(rx) })
loop {}
}
impl Upgraded {
#[cfg(any(feature = "http1", feature = "http2", test))]
@ -112,9 +111,7 @@ impl Upgraded {
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
{
Upgraded {
io: Rewind::new_buffered(Box::new(io), read_buf),
}
loop {}
}
/// Tries to downcast the internal trait object to the type passed.
///
@ -123,21 +120,7 @@ impl Upgraded {
pub(crate) fn downcast<T: AsyncRead + AsyncWrite + Unpin + 'static>(
self,
) -> Result<Parts<T>, Self> {
let (io, buf) = self.io.into_inner();
match io.__hyper_downcast() {
Ok(t) => {
Ok(Parts {
io: *t,
read_buf: buf,
_inner: (),
})
}
Err(io) => {
Err(Upgraded {
io: Rewind::new_buffered(io, buf),
})
}
}
loop {}
}
}
impl AsyncRead for Upgraded {
@ -146,7 +129,7 @@ impl AsyncRead for Upgraded {
cx: &mut task::Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
Pin::new(&mut self.io).poll_read(cx, buf)
loop {}
}
}
impl AsyncWrite for Upgraded {
@ -155,81 +138,66 @@ impl AsyncWrite for Upgraded {
cx: &mut task::Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.io).poll_write(cx, buf)
loop {}
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.io).poll_write_vectored(cx, bufs)
loop {}
}
fn poll_flush(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<io::Result<()>> {
Pin::new(&mut self.io).poll_flush(cx)
loop {}
}
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<io::Result<()>> {
Pin::new(&mut self.io).poll_shutdown(cx)
loop {}
}
fn is_write_vectored(&self) -> bool {
self.io.is_write_vectored()
loop {}
}
}
impl fmt::Debug for Upgraded {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Upgraded").finish()
loop {}
}
}
impl OnUpgrade {
pub(super) fn none() -> Self {
OnUpgrade { rx: None }
loop {}
}
#[cfg(feature = "http1")]
pub(super) fn is_none(&self) -> bool {
self.rx.is_none()
loop {}
}
}
impl Future for OnUpgrade {
type Output = Result<Upgraded, crate::Error>;
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
match self.rx {
Some(ref mut rx) => {
Pin::new(rx)
.poll(cx)
.map(|res| match res {
Ok(Ok(upgraded)) => Ok(upgraded),
Ok(Err(err)) => Err(err),
Err(_oneshot_canceled) => {
Err(crate::Error::new_canceled().with(UpgradeExpected))
}
})
}
None => Poll::Ready(Err(crate::Error::new_user_no_upgrade())),
}
loop {}
}
}
impl fmt::Debug for OnUpgrade {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("OnUpgrade").finish()
loop {}
}
}
#[cfg(any(feature = "http1", feature = "http2"))]
impl Pending {
pub(super) fn fulfill(self, upgraded: Upgraded) {
trace!("pending upgrade fulfill");
let _ = self.tx.send(Ok(upgraded));
loop {}
}
#[cfg(feature = "http1")]
/// Don't fulfill the pending Upgrade, but instead signal that
/// upgrades are handled manually.
pub(super) fn manual(self) {
trace!("pending upgrade handled manually");
let _ = self.tx.send(Err(crate::Error::new_user_manual_upgrade()));
loop {}
}
}
/// Error cause returned when an upgrade was expected but canceled
@ -240,30 +208,22 @@ impl Pending {
struct UpgradeExpected;
impl fmt::Display for UpgradeExpected {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("upgrade expected but not completed")
loop {}
}
}
impl StdError for UpgradeExpected {}
pub(super) trait Io: AsyncRead + AsyncWrite + Unpin + 'static {
fn __hyper_type_id(&self) -> TypeId {
TypeId::of::<Self>()
loop {}
}
}
impl<T: AsyncRead + AsyncWrite + Unpin + 'static> Io for T {}
impl dyn Io + Send {
fn __hyper_is<T: Io>(&self) -> bool {
let t = TypeId::of::<T>();
self.__hyper_type_id() == t
loop {}
}
fn __hyper_downcast<T: Io>(self: Box<Self>) -> Result<Box<T>, Box<Self>> {
if self.__hyper_is::<T>() {
unsafe {
let raw: *mut dyn Io = Box::into_raw(self);
Ok(Box::from_raw(raw as *mut T))
}
} else {
Err(self)
}
loop {}
}
}
mod sealed {
@ -273,22 +233,22 @@ mod sealed {
}
impl<B> CanUpgrade for http::Request<B> {
fn on_upgrade(mut self) -> OnUpgrade {
self.extensions_mut().remove::<OnUpgrade>().unwrap_or_else(OnUpgrade::none)
loop {}
}
}
impl<B> CanUpgrade for &'_ mut http::Request<B> {
fn on_upgrade(self) -> OnUpgrade {
self.extensions_mut().remove::<OnUpgrade>().unwrap_or_else(OnUpgrade::none)
loop {}
}
}
impl<B> CanUpgrade for http::Response<B> {
fn on_upgrade(mut self) -> OnUpgrade {
self.extensions_mut().remove::<OnUpgrade>().unwrap_or_else(OnUpgrade::none)
loop {}
}
}
impl<B> CanUpgrade for &'_ mut http::Response<B> {
fn on_upgrade(self) -> OnUpgrade {
self.extensions_mut().remove::<OnUpgrade>().unwrap_or_else(OnUpgrade::none)
loop {}
}
}
}
@ -297,9 +257,7 @@ mod tests {
use super::*;
#[test]
fn upgraded_downcast() {
let upgraded = Upgraded::new(Mock, Bytes::new());
let upgraded = upgraded.downcast::<std::io::Cursor<Vec<u8>>>().unwrap_err();
upgraded.downcast::<Mock>().unwrap();
loop {}
}
struct Mock;
impl AsyncRead for Mock {
@ -308,7 +266,7 @@ mod tests {
_cx: &mut task::Context<'_>,
_buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
unreachable!("Mock::poll_read")
loop {}
}
}
impl AsyncWrite for Mock {
@ -317,19 +275,19 @@ mod tests {
_: &mut task::Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Poll::Ready(Ok(buf.len()))
loop {}
}
fn poll_flush(
self: Pin<&mut Self>,
_cx: &mut task::Context<'_>,
) -> Poll<io::Result<()>> {
unreachable!("Mock::poll_flush")
loop {}
}
fn poll_shutdown(
self: Pin<&mut Self>,
_cx: &mut task::Context<'_>,
) -> Poll<io::Result<()>> {
unreachable!("Mock::poll_shutdown")
loop {}
}
}
}