mirror of
https://github.com/Noratrieb/icefun.git
synced 2026-01-15 21:25:02 +01:00
loop
This commit is contained in:
parent
0b89e245d9
commit
e1ebd97c91
73 changed files with 3822 additions and 3822 deletions
|
|
@ -18,13 +18,13 @@ use crate::common::{task, Poll, Unpin};
|
|||
|
||||
use crate::proto::{BodyLength, MessageHead};
|
||||
const H2_PREFACE: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
|
||||
/// This handles a connection, which will have been established over an
|
||||
/// `AsyncRead + AsyncWrite` (like a socket), and will likely include multiple
|
||||
/// `Transaction`s over HTTP.
|
||||
///
|
||||
/// The connection will determine when a message begins and ends as well as
|
||||
/// determine if this connection can be kept alive after the message,
|
||||
/// or if it is complete.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
pub(crate) struct Conn<I, B, T> {
|
||||
io: Buffered<I, EncodedBuf<B>>,
|
||||
state: State,
|
||||
|
|
@ -217,7 +217,7 @@ where
|
|||
) -> Poll<io::Result<()>> {
|
||||
loop {}
|
||||
}
|
||||
/// If the read side can be cheaply drained, do so. Otherwise, close.
|
||||
|
||||
pub(super) fn poll_drain_or_close_read(&mut self, cx: &mut task::Context<'_>) {
|
||||
loop {}
|
||||
}
|
||||
|
|
@ -246,17 +246,17 @@ impl<I, B: Buf, T> fmt::Debug for Conn<I, B, T> {
|
|||
impl<I: Unpin, B, T> Unpin for Conn<I, B, T> {}
|
||||
struct State {
|
||||
allow_half_close: bool,
|
||||
/// Re-usable HeaderMap to reduce allocating new ones.
|
||||
|
||||
cached_headers: Option<HeaderMap>,
|
||||
/// If an error occurs when there wasn't a direct way to return it
|
||||
/// back to the user, this is set.
|
||||
|
||||
|
||||
error: Option<crate::Error>,
|
||||
/// Current keep-alive status.
|
||||
|
||||
keep_alive: KA,
|
||||
/// If mid-message, the HTTP Method that started it.
|
||||
///
|
||||
/// This is used to know things such as if the message can include
|
||||
/// a body or not.
|
||||
|
||||
|
||||
|
||||
|
||||
method: Option<Method>,
|
||||
h1_parser_config: ParserConfig,
|
||||
#[cfg(all(feature = "server", feature = "runtime"))]
|
||||
|
|
@ -270,23 +270,23 @@ struct State {
|
|||
preserve_header_order: bool,
|
||||
title_case_headers: bool,
|
||||
h09_responses: bool,
|
||||
/// If set, called with each 1xx informational response received for
|
||||
/// the current request. MUST be unset after a non-1xx response is
|
||||
/// received.
|
||||
|
||||
|
||||
|
||||
#[cfg(feature = "ffi")]
|
||||
on_informational: Option<crate::ffi::OnInformational>,
|
||||
#[cfg(feature = "ffi")]
|
||||
raw_headers: bool,
|
||||
/// Set to true when the Dispatcher should poll read operations
|
||||
/// again. See the `maybe_notify` method for more.
|
||||
|
||||
|
||||
notify_read: bool,
|
||||
/// State of allowed reads
|
||||
|
||||
reading: Reading,
|
||||
/// State of allowed writes
|
||||
|
||||
writing: Writing,
|
||||
/// An expected pending HTTP upgrade.
|
||||
|
||||
upgrade: Option<crate::upgrade::Pending>,
|
||||
/// Either HTTP/1.0 or 1.1 connection
|
||||
|
||||
version: Version,
|
||||
}
|
||||
#[derive(Debug)]
|
||||
|
|
|
|||
|
|
@ -13,10 +13,10 @@ use super::DecodedLength;
|
|||
|
||||
use self::Kind::{Chunked, Eof, Length};
|
||||
|
||||
/// Decoders to handle different Transfer-Encodings.
|
||||
///
|
||||
/// If a message body does not include a Transfer-Encoding, it *should*
|
||||
/// include a Content-Length header.
|
||||
|
||||
|
||||
|
||||
|
||||
#[derive(Clone, PartialEq)]
|
||||
pub(crate) struct Decoder {
|
||||
kind: Kind,
|
||||
|
|
@ -24,26 +24,26 @@ pub(crate) struct Decoder {
|
|||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
enum Kind {
|
||||
/// A Reader used when a Content-Length header is passed with a positive integer.
|
||||
|
||||
Length(u64),
|
||||
/// A Reader used when Transfer-Encoding is `chunked`.
|
||||
|
||||
Chunked(ChunkedState, u64),
|
||||
/// A Reader used for responses that don't indicate a length or chunked.
|
||||
///
|
||||
/// The bool tracks when EOF is seen on the transport.
|
||||
///
|
||||
/// Note: This should only used for `Response`s. It is illegal for a
|
||||
/// `Request` to be made with both `Content-Length` and
|
||||
/// `Transfer-Encoding: chunked` missing, as explained from the spec:
|
||||
///
|
||||
/// > If a Transfer-Encoding header field is present in a response and
|
||||
/// > the chunked transfer coding is not the final encoding, the
|
||||
/// > message body length is determined by reading the connection until
|
||||
/// > it is closed by the server. If a Transfer-Encoding header field
|
||||
/// > is present in a request and the chunked transfer coding is not
|
||||
/// > the final encoding, the message body length cannot be determined
|
||||
/// > reliably; the server MUST respond with the 400 (Bad Request)
|
||||
/// > status code and then close the connection.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Eof(bool),
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -64,11 +64,11 @@ where
|
|||
pub(crate) fn into_inner(self) -> (I, Bytes, D) {
|
||||
loop {}
|
||||
}
|
||||
/// Run this dispatcher until HTTP says this connection is done,
|
||||
/// but don't call `AsyncWrite::shutdown` on the underlying IO.
|
||||
///
|
||||
/// This is useful for old-style HTTP upgrades, but ignores
|
||||
/// newer-style upgrade API.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
pub(crate) fn poll_without_shutdown(
|
||||
&mut self,
|
||||
cx: &mut task::Context<'_>,
|
||||
|
|
@ -133,8 +133,8 @@ where
|
|||
loop {}
|
||||
}
|
||||
}
|
||||
/// A drop guard to allow a mutable borrow of an Option while being able to
|
||||
/// set whether the `Option` should be cleared on drop.
|
||||
|
||||
|
||||
struct OptGuard<'a, T>(Pin<&'a mut Option<T>>, bool);
|
||||
impl<'a, T> OptGuard<'a, T> {
|
||||
fn new(pin: Pin<&'a mut Option<T>>) -> Self {
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ use bytes::Buf;
|
|||
|
||||
use super::io::WriteBuf;
|
||||
type StaticBuf = &'static [u8];
|
||||
/// Encoders to handle different Transfer-Encodings.
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub(crate) struct Encoder {
|
||||
kind: Kind,
|
||||
|
|
@ -19,16 +19,16 @@ pub(crate) struct EncodedBuf<B> {
|
|||
pub(crate) struct NotEof(u64);
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
enum Kind {
|
||||
/// An Encoder for when Transfer-Encoding includes `chunked`.
|
||||
|
||||
Chunked,
|
||||
/// An Encoder for when Content-Length is set.
|
||||
///
|
||||
/// Enforces that the body is not longer than the Content-Length header.
|
||||
|
||||
|
||||
|
||||
Length(u64),
|
||||
/// An Encoder for when neither Content-Length nor Chunked encoding is set.
|
||||
///
|
||||
/// This is mostly only used with HTTP/1.0 with a length. This kind requires
|
||||
/// the connection to be closed when the body is finished.
|
||||
|
||||
|
||||
|
||||
|
||||
#[cfg(feature = "server")]
|
||||
CloseDelimited,
|
||||
}
|
||||
|
|
@ -85,11 +85,11 @@ impl Encoder {
|
|||
{
|
||||
loop {}
|
||||
}
|
||||
/// Encodes the full body, without verifying the remaining length matches.
|
||||
///
|
||||
/// This is used in conjunction with HttpBody::__hyper_full_data(), which
|
||||
/// means we can trust that the buf has the correct size (the buf itself
|
||||
/// was checked to make the headers).
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
pub(super) fn danger_full_buf<B>(self, msg: B, dst: &mut WriteBuf<EncodedBuf<B>>)
|
||||
where
|
||||
B: Buf,
|
||||
|
|
|
|||
|
|
@ -15,18 +15,18 @@ use tokio::time::Instant;
|
|||
use super::{Http1Transaction, ParseContext, ParsedMessage};
|
||||
use crate::common::buf::BufList;
|
||||
use crate::common::{task, Poll};
|
||||
/// The initial buffer size allocated before trying to read from IO.
|
||||
|
||||
pub(crate) const INIT_BUFFER_SIZE: usize = 8192;
|
||||
/// The minimum value that can be set to max buffer size.
|
||||
|
||||
pub(crate) const MINIMUM_MAX_BUFFER_SIZE: usize = INIT_BUFFER_SIZE;
|
||||
/// The default maximum read buffer size. If the buffer gets this big and
|
||||
/// a message is still not complete, a `TooLarge` error is triggered.
|
||||
|
||||
|
||||
pub(crate) const DEFAULT_MAX_BUFFER_SIZE: usize = 8192 + 4096 * 100;
|
||||
/// The maximum number of distinct `Buf`s to hold in a list before requiring
|
||||
/// a flush. Only affects when the buffer strategy is to queue buffers.
|
||||
///
|
||||
/// Note that a flush can happen before reaching the maximum. This simply
|
||||
/// forces a flush if the queue gets this big.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
const MAX_BUF_LIST_BUFFERS: usize = 16;
|
||||
pub(crate) struct Buffered<T, B> {
|
||||
flush_pipeline: bool,
|
||||
|
|
@ -77,16 +77,16 @@ where
|
|||
pub(super) fn read_buf_mut(&mut self) -> &mut BytesMut {
|
||||
loop {}
|
||||
}
|
||||
/// Return the "allocated" available space, not the potential space
|
||||
/// that could be allocated in the future.
|
||||
|
||||
|
||||
fn read_buf_remaining_mut(&self) -> usize {
|
||||
loop {}
|
||||
}
|
||||
/// Return whether we can append to the headers buffer.
|
||||
///
|
||||
/// Reasons we can't:
|
||||
/// - The write buf is in queue mode, and some of the past body is still
|
||||
/// needing to be flushed.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
pub(crate) fn can_headers_buf(&self) -> bool {
|
||||
loop {}
|
||||
}
|
||||
|
|
@ -136,10 +136,10 @@ where
|
|||
) -> Poll<io::Result<()>> {
|
||||
loop {}
|
||||
}
|
||||
/// Specialized version of `flush` when strategy is Flatten.
|
||||
///
|
||||
/// Since all buffered bytes are flattened into the single headers buffer,
|
||||
/// that skips some bookkeeping around using multiple buffers.
|
||||
|
||||
|
||||
|
||||
|
||||
fn poll_flush_flattened(
|
||||
&mut self,
|
||||
cx: &mut task::Context<'_>,
|
||||
|
|
@ -217,9 +217,9 @@ impl<T: AsRef<[u8]>> Cursor<T> {
|
|||
}
|
||||
}
|
||||
impl Cursor<Vec<u8>> {
|
||||
/// If we've advanced the position a bit in this cursor, and wish to
|
||||
/// extend the underlying vector, we may wish to unshift the "read" bytes
|
||||
/// off, and move everything else over.
|
||||
|
||||
|
||||
|
||||
fn maybe_unshift(&mut self, additional: usize) {
|
||||
loop {}
|
||||
}
|
||||
|
|
@ -247,10 +247,10 @@ impl<T: AsRef<[u8]>> Buf for Cursor<T> {
|
|||
}
|
||||
}
|
||||
pub(super) struct WriteBuf<B> {
|
||||
/// Re-usable buffer that holds message headers
|
||||
|
||||
headers: Cursor<Vec<u8>>,
|
||||
max_buf_size: usize,
|
||||
/// Deque of user buffers if strategy is Queue
|
||||
|
||||
queue: BufList<B>,
|
||||
strategy: WriteStrategy,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ pub(crate) trait Http1Transaction {
|
|||
}
|
||||
fn update_date() {}
|
||||
}
|
||||
/// Result newtype for Http1Transaction::parse.
|
||||
|
||||
pub(crate) type ParseResult<T> = Result<Option<ParsedMessage<T>>, crate::error::Parse>;
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct ParsedMessage<T> {
|
||||
|
|
@ -77,7 +77,7 @@ pub(crate) struct ParseContext<'a> {
|
|||
#[cfg(feature = "ffi")]
|
||||
raw_headers: bool,
|
||||
}
|
||||
/// Passed to Http1Transaction::encode
|
||||
|
||||
pub(crate) struct Encode<'a, T> {
|
||||
head: &'a mut MessageHead<T>,
|
||||
body: Option<BodyLength>,
|
||||
|
|
@ -86,7 +86,7 @@ pub(crate) struct Encode<'a, T> {
|
|||
req_method: &'a mut Option<Method>,
|
||||
title_case_headers: bool,
|
||||
}
|
||||
/// Extra flags that a request "wants", like expect-continue or upgrades.
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
struct Wants(u8);
|
||||
impl Wants {
|
||||
|
|
|
|||
|
|
@ -179,9 +179,9 @@ impl Http1Transaction for Client {
|
|||
}
|
||||
#[cfg(feature = "client")]
|
||||
impl Client {
|
||||
/// Returns Some(length, wants_upgrade) if successful.
|
||||
///
|
||||
/// Returns None if this message head should be skipped (like a 100 status).
|
||||
|
||||
|
||||
|
||||
fn decoder(
|
||||
inc: &MessageHead<StatusCode>,
|
||||
method: &mut Option<Method>,
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ cfg_client! {
|
|||
cfg_server! {
|
||||
pub (crate) mod server; pub (crate) use self::server::Server;
|
||||
}
|
||||
/// Default initial stream window size defined in HTTP2 spec.
|
||||
|
||||
pub(crate) const SPEC_WINDOW_SIZE: u32 = 65_535;
|
||||
fn strip_connection_headers(headers: &mut HeaderMap, is_request: bool) {
|
||||
loop {}
|
||||
|
|
|
|||
|
|
@ -1,23 +1,23 @@
|
|||
/// HTTP2 Ping usage
|
||||
///
|
||||
/// hyper uses HTTP2 pings for two purposes:
|
||||
///
|
||||
/// 1. Adaptive flow control using BDP
|
||||
/// 2. Connection keep-alive
|
||||
///
|
||||
/// Both cases are optional.
|
||||
///
|
||||
/// # BDP Algorithm
|
||||
///
|
||||
/// 1. When receiving a DATA frame, if a BDP ping isn't outstanding:
|
||||
/// 1a. Record current time.
|
||||
/// 1b. Send a BDP ping.
|
||||
/// 2. Increment the number of received bytes.
|
||||
/// 3. When the BDP ping ack is received:
|
||||
/// 3a. Record duration from sent time.
|
||||
/// 3b. Merge RTT with a running average.
|
||||
/// 3c. Calculate bdp as bytes/rtt.
|
||||
/// 3d. If bdp is over 2/3 max, set new max to bdp and update windows.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#[cfg(feature = "runtime")]
|
||||
use std::fmt;
|
||||
#[cfg(feature = "runtime")]
|
||||
|
|
@ -43,14 +43,14 @@ pub(super) fn channel(ping_pong: PingPong, config: Config) -> (Recorder, Ponger)
|
|||
#[derive(Clone)]
|
||||
pub(super) struct Config {
|
||||
pub(super) bdp_initial_window: Option<WindowSize>,
|
||||
/// If no frames are received in this amount of time, a PING frame is sent.
|
||||
|
||||
#[cfg(feature = "runtime")]
|
||||
pub(super) keep_alive_interval: Option<Duration>,
|
||||
/// After sending a keepalive PING, the connection will be closed if
|
||||
/// a pong is not received in this amount of time.
|
||||
|
||||
|
||||
#[cfg(feature = "runtime")]
|
||||
pub(super) keep_alive_timeout: Duration,
|
||||
/// If true, sends pings even when there are no active streams.
|
||||
|
||||
#[cfg(feature = "runtime")]
|
||||
pub(super) keep_alive_while_idle: bool,
|
||||
}
|
||||
|
|
@ -67,41 +67,41 @@ pub(super) struct Ponger {
|
|||
struct Shared {
|
||||
ping_pong: PingPong,
|
||||
ping_sent_at: Option<Instant>,
|
||||
/// If `Some`, bdp is enabled, and this tracks how many bytes have been
|
||||
/// read during the current sample.
|
||||
|
||||
|
||||
bytes: Option<usize>,
|
||||
/// We delay a variable amount of time between BDP pings. This allows us
|
||||
/// to send less pings as the bandwidth stabilizes.
|
||||
|
||||
|
||||
next_bdp_at: Option<Instant>,
|
||||
/// If `Some`, keep-alive is enabled, and the Instant is how long ago
|
||||
/// the connection read the last frame.
|
||||
|
||||
|
||||
#[cfg(feature = "runtime")]
|
||||
last_read_at: Option<Instant>,
|
||||
#[cfg(feature = "runtime")]
|
||||
is_keep_alive_timed_out: bool,
|
||||
}
|
||||
struct Bdp {
|
||||
/// Current BDP in bytes
|
||||
|
||||
bdp: u32,
|
||||
/// Largest bandwidth we've seen so far.
|
||||
|
||||
max_bandwidth: f64,
|
||||
/// Round trip time in seconds
|
||||
|
||||
rtt: f64,
|
||||
/// Delay the next ping by this amount.
|
||||
///
|
||||
/// This will change depending on how stable the current bandwidth is.
|
||||
|
||||
|
||||
|
||||
ping_delay: Duration,
|
||||
/// The count of ping round trips where BDP has stayed the same.
|
||||
|
||||
stable_count: u32,
|
||||
}
|
||||
#[cfg(feature = "runtime")]
|
||||
struct KeepAlive {
|
||||
/// If no frames are received in this amount of time, a PING frame is sent.
|
||||
|
||||
interval: Duration,
|
||||
/// After sending a keepalive PING, the connection will be closed if
|
||||
/// a pong is not received in this amount of time.
|
||||
|
||||
|
||||
timeout: Duration,
|
||||
/// If true, sends pings even when there are no active streams.
|
||||
|
||||
while_idle: bool,
|
||||
state: KeepAliveState,
|
||||
timer: Pin<Box<Sleep>>,
|
||||
|
|
@ -132,8 +132,8 @@ impl Recorder {
|
|||
pub(crate) fn record_non_data(&self) {
|
||||
loop {}
|
||||
}
|
||||
/// If the incoming stream is already closed, convert self into
|
||||
/// a disabled reporter.
|
||||
|
||||
|
||||
#[cfg(feature = "client")]
|
||||
pub(super) fn for_stream(self, stream: &h2::RecvStream) -> Self {
|
||||
loop {}
|
||||
|
|
@ -167,7 +167,7 @@ impl Shared {
|
|||
loop {}
|
||||
}
|
||||
}
|
||||
/// Any higher than this likely will be hitting the TCP flow control.
|
||||
|
||||
const BDP_LIMIT: usize = 1024 * 1024 * 16;
|
||||
impl Bdp {
|
||||
fn calculate(&mut self, bytes: usize, rtt: Duration) -> Option<WindowSize> {
|
||||
|
|
|
|||
|
|
@ -6,40 +6,40 @@ cfg_feature! {
|
|||
}
|
||||
#[cfg(feature = "http2")]
|
||||
pub(crate) mod h2;
|
||||
/// An Incoming Message head. Includes request/status line, and headers.
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub(crate) struct MessageHead<S> {
|
||||
/// HTTP version of the message.
|
||||
|
||||
pub(crate) version: http::Version,
|
||||
/// Subject (request line or status line) of Incoming message.
|
||||
|
||||
pub(crate) subject: S,
|
||||
/// Headers of the Incoming message.
|
||||
|
||||
pub(crate) headers: http::HeaderMap,
|
||||
/// Extensions.
|
||||
|
||||
extensions: http::Extensions,
|
||||
}
|
||||
/// An incoming request message.
|
||||
|
||||
#[cfg(feature = "http1")]
|
||||
pub(crate) type RequestHead = MessageHead<RequestLine>;
|
||||
#[derive(Debug, Default, PartialEq)]
|
||||
#[cfg(feature = "http1")]
|
||||
pub(crate) struct RequestLine(pub(crate) http::Method, pub(crate) http::Uri);
|
||||
/// An incoming response message.
|
||||
|
||||
#[cfg(all(feature = "http1", feature = "client"))]
|
||||
pub(crate) type ResponseHead = MessageHead<http::StatusCode>;
|
||||
#[derive(Debug)]
|
||||
#[cfg(feature = "http1")]
|
||||
pub(crate) enum BodyLength {
|
||||
/// Content-Length
|
||||
|
||||
Known(u64),
|
||||
/// Transfer-Encoding: chunked (if h1)
|
||||
|
||||
Unknown,
|
||||
}
|
||||
/// Status of when a Disaptcher future completes.
|
||||
|
||||
pub(crate) enum Dispatched {
|
||||
/// Dispatcher completely shutdown connection.
|
||||
|
||||
Shutdown,
|
||||
/// Dispatcher has pending upgrade, and so did not shutdown.
|
||||
|
||||
#[cfg(feature = "http1")]
|
||||
Upgrade(crate::upgrade::Pending),
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue