This commit is contained in:
nora 2023-03-07 14:22:20 +01:00
parent b3d4232a00
commit 43b96cd25f
5 changed files with 5 additions and 449 deletions

View file

@ -25,46 +25,6 @@ pub trait Accept {
cx: &mut task::Context<'_>,
) -> Poll<Option<Result<Self::Conn, Self::Error>>>;
}
/// Create an `Accept` with a polling function.
///
/// # Example
///
/// ```
/// use std::task::Poll;
/// use hyper::server::{accept, Server};
///
/// # let mock_conn = ();
/// // If we created some mocked connection...
/// let mut conn = Some(mock_conn);
///
/// // And accept just the mocked conn once...
/// let once = accept::poll_fn(move |cx| {
/// Poll::Ready(conn.take().map(Ok::<_, ()>))
/// });
///
/// let builder = Server::builder(once);
/// ```
pub(crate) fn poll_fn<F, IO, E>(func: F) -> impl Accept<Conn = IO, Error = E>
where
F: FnMut(&mut task::Context<'_>) -> Poll<Option<Result<IO, E>>>,
{
struct PollFn<F>(F);
impl<F> Unpin for PollFn<F> {}
impl<F, IO, E> Accept for PollFn<F>
where
F: FnMut(&mut task::Context<'_>) -> Poll<Option<Result<IO, E>>>,
{
type Conn = IO;
type Error = E;
fn poll_accept(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Option<Result<Self::Conn, Self::Error>>> {
(self.get_mut().0)(cx)
}
}
PollFn(func)
}
/// Adapt a `Stream` of incoming connections into an `Accept`.
///
/// # Optional

View file

@ -147,37 +147,8 @@ enum Fallback<E> {
)
)]
type Fallback<E> = PhantomData<E>;
/// Deconstructed parts of a `Connection`.
///
/// This allows taking apart a `Connection` at a later time, in order to
/// reclaim the IO object, and additional related pieces.
#[derive(Debug)]
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
pub(crate) struct Parts<T, S> {
/// The original IO object used in the handshake.
pub(crate) io: T,
/// A buffer of bytes that have been read but not processed as HTTP.
///
/// If the client sent additional bytes after its last request, and
/// this connection "ended" with an upgrade, the read buffer will contain
/// those bytes.
///
/// You will want to check for any existing bytes if you plan to continue
/// communicating on the IO object.
pub(crate) read_buf: Bytes,
/// The `Service` used to serve this connection.
pub(crate) service: S,
_inner: (),
}
#[cfg(any(feature = "http1", feature = "http2"))]
impl Http {
/// Creates a new instance of the HTTP protocol, ready to spawn a server or
/// start accepting connections.
pub(crate) fn new() -> Http {
loop {}
}
}
impl Http {}
#[cfg(any(feature = "http1", feature = "http2"))]
impl Default for ConnectionMode {
#[cfg(all(feature = "http1", feature = "http2"))]
@ -212,15 +183,7 @@ mod upgrades {
B: HttpBody + 'static,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: ConnStreamExec<S::Future, B>,
{
/// Start a graceful shutdown process for this connection.
///
/// This `Connection` should continue to be polled until shutdown
/// can finish.
pub(crate) fn graceful_shutdown(mut self: Pin<&mut Self>) {
loop {}
}
}
{}
impl<I, B, S, E> Future for UpgradeableConnection<I, S, E>
where
S: HttpService<Body, ResBody = B>,

View file

@ -1,9 +1,6 @@
use std::error::Error as StdError;
use std::fmt;
#[cfg(feature = "tcp")]
use std::net::{SocketAddr, TcpListener as StdTcpListener};
#[cfg(feature = "tcp")]
use std::time::Duration;
use pin_project_lite::pin_project;
use tokio::io::{AsyncRead, AsyncWrite};
use super::accept::Accept;
@ -13,7 +10,7 @@ use crate::body::{Body, HttpBody};
use crate::common::exec::Exec;
use crate::common::exec::{ConnStreamExec, NewSvcExec};
use crate::common::{task, Future, Pin, Poll, Unpin};
use super::conn::{Connection, Http as Http_, UpgradeableConnection};
use super::conn::{Http as Http_, UpgradeableConnection};
use super::shutdown::{Graceful, GracefulWatcher};
use crate::service::{HttpService, MakeServiceRef};
use self::new_svc::NewSvcTask;
@ -56,16 +53,6 @@ impl Server<AddrIncoming, ()> {
pub fn bind(addr: &SocketAddr) -> Builder<AddrIncoming> {
loop {}
}
/// Tries to bind to the provided address, and returns a [`Builder`](Builder).
pub(crate) fn try_bind(addr: &SocketAddr) -> crate::Result<Builder<AddrIncoming>> {
loop {}
}
/// Create a new instance from a `std::net::TcpListener` instance.
pub(crate) fn from_tcp(
listener: StdTcpListener,
) -> Result<Builder<AddrIncoming>, crate::Error> {
loop {}
}
}
#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
impl<I, IO, IE, S, E, B> Server<I, S, E>
@ -128,17 +115,6 @@ where
) -> Poll<Option<crate::Result<Connecting<IO, S::Future, E>>>> {
loop {}
}
pub(super) fn poll_watch<W>(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
watcher: &W,
) -> Poll<crate::Result<()>>
where
E: NewSvcExec<IO, S::Future, S::Service, E, W>,
W: Watcher<IO, S::Service, E>,
{
loop {}
}
}
#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
impl<I, IO, IE, S, B, E> Future for Server<I, S, E>
@ -167,91 +143,11 @@ where
}
#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
impl<I, E> Builder<I, E> {
/// Start a new builder, wrapping an incoming stream and low-level options.
///
/// For a more convenient constructor, see [`Server::bind`](Server::bind).
pub(crate) fn new(incoming: I, protocol: Http_<E>) -> Self {
loop {}
}
/// Sets whether to use keep-alive for HTTP/1 connections.
///
/// Default is `true`.
#[cfg(feature = "http1")]
#[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
pub(crate) fn http1_keepalive(mut self, val: bool) -> Self {
loop {}
}
/// Set whether HTTP/1 connections should support half-closures.
///
/// Clients can chose to shutdown their write-side while waiting
/// for the server to respond. Setting this to `true` will
/// prevent closing the connection immediately if `read`
/// detects an EOF in the middle of a request.
///
/// Default is `false`.
#[cfg(feature = "http1")]
#[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
pub(crate) fn http1_half_close(mut self, val: bool) -> Self {
loop {}
}
/// Set the maximum buffer size.
///
/// Default is ~ 400kb.
#[cfg(feature = "http1")]
#[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
pub(crate) fn http1_max_buf_size(mut self, val: usize) -> Self {
loop {}
}
#[doc(hidden)]
#[cfg(feature = "http1")]
pub fn http1_pipeline_flush(mut self, val: bool) -> Self {
loop {}
}
/// Set whether HTTP/1 connections should try to use vectored writes,
/// or always flatten into a single buffer.
///
/// Note that setting this to false may mean more copies of body data,
/// but may also improve performance when an IO transport doesn't
/// support vectored writes well, such as most TLS implementations.
///
/// Setting this to true will force hyper to use queued strategy
/// which may eliminate unnecessary cloning on some TLS backends
///
/// Default is `auto`. In this mode hyper will try to guess which
/// mode to use
#[cfg(feature = "http1")]
pub(crate) fn http1_writev(mut self, enabled: bool) -> Self {
loop {}
}
/// Set whether HTTP/1 connections will write header names as title case at
/// the socket level.
///
/// Note that this setting does not affect HTTP/2.
///
/// Default is false.
#[cfg(feature = "http1")]
#[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
pub(crate) fn http1_title_case_headers(mut self, val: bool) -> Self {
loop {}
}
/// Set whether to support preserving original header cases.
///
/// Currently, this will record the original cases received, and store them
/// in a private extension on the `Request`. It will also look for and use
/// such an extension in any provided `Response`.
///
/// Since the relevant extension is still private, there is no way to
/// interact with the original cases. The only effect this can have now is
/// to forward the cases in a proxy-like fashion.
///
/// Note that this setting does not affect HTTP/2.
///
/// Default is false.
#[cfg(feature = "http1")]
#[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
pub(crate) fn http1_preserve_header_case(mut self, val: bool) -> Self {
loop {}
}
/// Set a timeout for reading client request headers. If a client does not
/// transmit the entire header within this time, the connection is closed.
///
@ -261,118 +157,6 @@ impl<I, E> Builder<I, E> {
pub(crate) fn http1_header_read_timeout(mut self, read_timeout: Duration) -> Self {
loop {}
}
/// Sets whether HTTP/1 is required.
///
/// Default is `false`.
#[cfg(feature = "http1")]
#[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
pub(crate) fn http1_only(mut self, val: bool) -> Self {
loop {}
}
/// Sets whether HTTP/2 is required.
///
/// Default is `false`.
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_only(mut self, val: bool) -> Self {
loop {}
}
/// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2
/// stream-level flow control.
///
/// Passing `None` will do nothing.
///
/// If not set, hyper will use a default.
///
/// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_initial_stream_window_size(
mut self,
sz: impl Into<Option<u32>>,
) -> Self {
loop {}
}
/// Sets the max connection-level flow control for HTTP2
///
/// Passing `None` will do nothing.
///
/// If not set, hyper will use a default.
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_initial_connection_window_size(
mut self,
sz: impl Into<Option<u32>>,
) -> Self {
loop {}
}
/// Sets whether to use an adaptive flow control.
///
/// Enabling this will override the limits set in
/// `http2_initial_stream_window_size` and
/// `http2_initial_connection_window_size`.
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_adaptive_window(mut self, enabled: bool) -> Self {
loop {}
}
/// Sets the maximum frame size to use for HTTP2.
///
/// Passing `None` will do nothing.
///
/// If not set, hyper will use a default.
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_max_frame_size(mut self, sz: impl Into<Option<u32>>) -> Self {
loop {}
}
/// Sets the max size of received header frames.
///
/// Default is currently ~16MB, but may change.
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_max_header_list_size(mut self, max: u32) -> Self {
loop {}
}
/// Sets the [`SETTINGS_MAX_CONCURRENT_STREAMS`][spec] option for HTTP2
/// connections.
///
/// Default is no limit (`std::u32::MAX`). Passing `None` will do nothing.
///
/// [spec]: https://http2.github.io/http2-spec/#SETTINGS_MAX_CONCURRENT_STREAMS
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_max_concurrent_streams(
mut self,
max: impl Into<Option<u32>>,
) -> Self {
loop {}
}
/// Set the maximum write buffer size for each HTTP/2 stream.
///
/// Default is currently ~400KB, but may change.
///
/// # Panics
///
/// The value must be no larger than `u32::MAX`.
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub(crate) fn http2_max_send_buf_size(mut self, max: usize) -> Self {
loop {}
}
/// Enables the [extended CONNECT protocol].
///
/// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4
#[cfg(feature = "http2")]
pub(crate) fn http2_enable_connect_protocol(mut self) -> Self {
loop {}
}
/// Sets the `Executor` to deal with connection tasks.
///
/// Default is `tokio::spawn`.
pub(crate) fn executor<E2>(self, executor: E2) -> Builder<I, E2> {
loop {}
}
///
pub fn serve<S, B>(self, _: S) -> Server<I, S>
where

View file

@ -1,7 +1,6 @@
use std::error::Error as StdError;
use pin_project_lite::pin_project;
use tokio::io::{AsyncRead, AsyncWrite};
use super::accept::Accept;
use super::conn::UpgradeableConnection;
use super::server::{Server, Watcher};
@ -19,11 +18,7 @@ pin_project! {
Option < (Signal, Watch) >, #[pin] server : Server < I, S, E >, #[pin] signal : F, },
Draining { draining : Draining }, }
}
impl<I, S, F, E> Graceful<I, S, F, E> {
pub(super) fn new(server: Server<I, S, E>, signal: F) -> Self {
loop {}
}
}
impl<I, S, F, E> Graceful<I, S, F, E> {}
impl<I, IO, IE, S, B, F, E> Future for Graceful<I, S, F, E>
where
I: Accept<Conn = IO, Error = IE>,
@ -61,14 +56,3 @@ where
loop {}
}
}
fn on_drain<I, S, E>(conn: Pin<&mut UpgradeableConnection<I, S, E>>)
where
S: HttpService<Body>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin,
S::ResBody: HttpBody + 'static,
<S::ResBody as HttpBody>::Error: Into<Box<dyn StdError + Send + Sync>>,
E: ConnStreamExec<S::Future, S::ResBody>,
{
loop {}
}

View file

@ -16,30 +16,6 @@ struct TcpKeepaliveConfig {
retries: Option<u32>,
}
impl TcpKeepaliveConfig {
/// Converts into a `socket2::TcpKeealive` if there is any keep alive configuration.
fn into_socket2(self) -> Option<TcpKeepalive> {
loop {}
}
#[cfg(
any(
target_os = "android",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "fuchsia",
target_os = "illumos",
target_os = "linux",
target_os = "netbsd",
target_vendor = "apple",
windows,
)
)]
fn ka_with_interval(
ka: TcpKeepalive,
interval: Duration,
dirty: &mut bool,
) -> TcpKeepalive {
loop {}
}
#[cfg(
not(
any(
@ -58,25 +34,6 @@ impl TcpKeepaliveConfig {
fn ka_with_interval(ka: TcpKeepalive, _: Duration, _: &mut bool) -> TcpKeepalive {
loop {}
}
#[cfg(
any(
target_os = "android",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "fuchsia",
target_os = "illumos",
target_os = "linux",
target_os = "netbsd",
target_vendor = "apple",
)
)]
fn ka_with_retries(
ka: TcpKeepalive,
retries: u32,
dirty: &mut bool,
) -> TcpKeepalive {
loop {}
}
#[cfg(
not(
any(
@ -106,67 +63,18 @@ pub struct AddrIncoming {
timeout: Option<Pin<Box<Sleep>>>,
}
impl AddrIncoming {
pub(super) fn new(addr: &SocketAddr) -> crate::Result<Self> {
loop {}
}
pub(super) fn from_std(std_listener: StdTcpListener) -> crate::Result<Self> {
loop {}
}
/// Creates a new `AddrIncoming` binding to provided socket address.
pub fn bind(addr: &SocketAddr) -> crate::Result<Self> {
loop {}
}
/// Creates a new `AddrIncoming` from an existing `tokio::net::TcpListener`.
pub fn from_listener(listener: TcpListener) -> crate::Result<Self> {
loop {}
}
/// Get the local address bound to this listener.
pub fn local_addr(&self) -> SocketAddr {
loop {}
}
/// Set the duration to remain idle before sending TCP keepalive probes.
///
/// If `None` is specified, keepalive is disabled.
pub fn set_keepalive(&mut self, time: Option<Duration>) -> &mut Self {
loop {}
}
/// Set the duration between two successive TCP keepalive retransmissions,
/// if acknowledgement to the previous keepalive transmission is not received.
pub fn set_keepalive_interval(&mut self, interval: Option<Duration>) -> &mut Self {
loop {}
}
/// Set the number of retransmissions to be carried out before declaring that remote end is not available.
pub fn set_keepalive_retries(&mut self, retries: Option<u32>) -> &mut Self {
loop {}
}
/// Set the value of `TCP_NODELAY` option for accepted connections.
pub fn set_nodelay(&mut self, enabled: bool) -> &mut Self {
loop {}
}
/// Set whether to sleep on accept errors.
///
/// A possible scenario is that the process has hit the max open files
/// allowed, and so trying to accept a new connection will fail with
/// `EMFILE`. In some cases, it's preferable to just wait for some time, if
/// the application will likely close some files (or connections), and try
/// to accept the connection again. If this option is `true`, the error
/// will be logged at the `error` level, since it is still a big deal,
/// and then the listener will sleep for 1 second.
///
/// In other cases, hitting the max open files should be treat similarly
/// to being out-of-memory, and simply error (and shutdown). Setting
/// this option to `false` will allow that.
///
/// Default is `true`.
pub fn set_sleep_on_errors(&mut self, val: bool) {
loop {}
}
fn poll_next_(
&mut self,
cx: &mut task::Context<'_>,
) -> Poll<io::Result<AddrStream>> {
loop {}
}
}
impl Accept for AddrIncoming {
type Conn = AddrStream;
@ -178,16 +86,6 @@ impl Accept for AddrIncoming {
loop {}
}
}
/// This function defines errors that are per-connection. Which basically
/// means that if we get this error from `accept()` system call it means
/// next connection might be ready to be accepted.
///
/// All other errors will incur a timeout before next `accept()` is performed.
/// The timeout is useful to handle resource exhaustion errors like ENFILE
/// and EMFILE. Otherwise, could enter into tight loop.
fn is_connection_error(e: &io::Error) -> bool {
loop {}
}
impl fmt::Debug for AddrIncoming {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
loop {}
@ -206,40 +104,7 @@ mod addr_stream {
struct AddrStream { #[pin] inner : TcpStream, pub (super) remote_addr :
SocketAddr, pub (super) local_addr : SocketAddr }
}
impl AddrStream {
pub(super) fn new(
tcp: TcpStream,
remote_addr: SocketAddr,
local_addr: SocketAddr,
) -> AddrStream {
loop {}
}
/// Returns the remote (peer) address of this connection.
#[inline]
pub fn remote_addr(&self) -> SocketAddr {
loop {}
}
/// Returns the local address of this connection.
#[inline]
pub fn local_addr(&self) -> SocketAddr {
loop {}
}
/// Consumes the AddrStream and returns the underlying IO object
#[inline]
pub fn into_inner(self) -> TcpStream {
loop {}
}
/// Attempt to receive data on the socket, without removing that data
/// from the queue, registering the current task for wakeup if data is
/// not yet available.
pub fn poll_peek(
&mut self,
cx: &mut task::Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> Poll<io::Result<usize>> {
loop {}
}
}
impl AddrStream {}
impl AsyncRead for AddrStream {
#[inline]
fn poll_read(