This commit is contained in:
nora 2023-03-07 14:00:23 +01:00
parent 25adea4103
commit 7af1274587
160 changed files with 38999 additions and 4 deletions

88
hyper/benches/body.rs Normal file
View file

@ -0,0 +1,88 @@
#![feature(test)]
#![deny(warnings)]
extern crate test;
use bytes::Buf;
use futures_util::stream;
use futures_util::StreamExt;
use hyper::body::Body;
macro_rules! bench_stream {
($bencher:ident, bytes: $bytes:expr, count: $count:expr, $total_ident:ident, $body_pat:pat, $block:expr) => {{
let rt = tokio::runtime::Builder::new_current_thread()
.build()
.expect("rt build");
let $total_ident: usize = $bytes * $count;
$bencher.bytes = $total_ident as u64;
let __s: &'static [&'static [u8]] = &[&[b'x'; $bytes] as &[u8]; $count] as _;
$bencher.iter(|| {
rt.block_on(async {
let $body_pat = Body::wrap_stream(
stream::iter(__s.iter()).map(|&s| Ok::<_, std::convert::Infallible>(s)),
);
$block;
});
});
}};
}
macro_rules! benches {
($($name:ident, $bytes:expr, $count:expr;)+) => (
mod aggregate {
use super::*;
$(
#[bench]
fn $name(b: &mut test::Bencher) {
bench_stream!(b, bytes: $bytes, count: $count, total, body, {
let buf = hyper::body::aggregate(body).await.unwrap();
assert_eq!(buf.remaining(), total);
});
}
)+
}
mod manual_into_vec {
use super::*;
$(
#[bench]
fn $name(b: &mut test::Bencher) {
bench_stream!(b, bytes: $bytes, count: $count, total, mut body, {
let mut vec = Vec::new();
while let Some(chunk) = body.next().await {
vec.extend_from_slice(&chunk.unwrap());
}
assert_eq!(vec.len(), total);
});
}
)+
}
mod to_bytes {
use super::*;
$(
#[bench]
fn $name(b: &mut test::Bencher) {
bench_stream!(b, bytes: $bytes, count: $count, total, body, {
let bytes = hyper::body::to_bytes(body).await.unwrap();
assert_eq!(bytes.len(), total);
});
}
)+
}
)
}
// ===== Actual Benchmarks =====
benches! {
bytes_1_000_count_2, 1_000, 2;
bytes_1_000_count_10, 1_000, 10;
bytes_10_000_count_1, 10_000, 1;
bytes_10_000_count_10, 10_000, 10;
}

37
hyper/benches/connect.rs Normal file
View file

@ -0,0 +1,37 @@
#![feature(test)]
#![deny(warnings)]
extern crate test;
use http::Uri;
use hyper::client::connect::HttpConnector;
use hyper::service::Service;
use std::net::SocketAddr;
use tokio::net::TcpListener;
#[bench]
fn http_connector(b: &mut test::Bencher) {
let _ = pretty_env_logger::try_init();
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.expect("rt build");
let listener = rt
.block_on(TcpListener::bind(&SocketAddr::from(([127, 0, 0, 1], 0))))
.expect("bind");
let addr = listener.local_addr().expect("local_addr");
let dst: Uri = format!("http://{}/", addr).parse().expect("uri parse");
let mut connector = HttpConnector::new();
rt.spawn(async move {
loop {
let _ = listener.accept().await;
}
});
b.iter(|| {
rt.block_on(async {
connector.call(dst.clone()).await.expect("connect");
});
});
}

382
hyper/benches/end_to_end.rs Normal file
View file

@ -0,0 +1,382 @@
#![feature(test)]
#![deny(warnings)]
extern crate test;
use std::net::SocketAddr;
use futures_util::future::join_all;
use hyper::client::HttpConnector;
use hyper::{body::HttpBody as _, Body, Method, Request, Response, Server};
// HTTP1
#[bench]
fn http1_consecutive_x1_empty(b: &mut test::Bencher) {
opts().bench(b)
}
#[bench]
fn http1_consecutive_x1_req_10b(b: &mut test::Bencher) {
opts()
.method(Method::POST)
.request_body(&[b's'; 10])
.bench(b)
}
#[bench]
fn http1_consecutive_x1_both_100kb(b: &mut test::Bencher) {
let body = &[b'x'; 1024 * 100];
opts()
.method(Method::POST)
.request_body(body)
.response_body(body)
.bench(b)
}
#[bench]
fn http1_consecutive_x1_both_10mb(b: &mut test::Bencher) {
let body = &[b'x'; 1024 * 1024 * 10];
opts()
.method(Method::POST)
.request_body(body)
.response_body(body)
.bench(b)
}
#[bench]
fn http1_parallel_x10_empty(b: &mut test::Bencher) {
opts().parallel(10).bench(b)
}
#[bench]
fn http1_parallel_x10_req_10mb(b: &mut test::Bencher) {
let body = &[b'x'; 1024 * 1024 * 10];
opts()
.parallel(10)
.method(Method::POST)
.request_body(body)
.bench(b)
}
#[bench]
fn http1_parallel_x10_req_10kb_100_chunks(b: &mut test::Bencher) {
let body = &[b'x'; 1024 * 10];
opts()
.parallel(10)
.method(Method::POST)
.request_chunks(body, 100)
.bench(b)
}
#[bench]
fn http1_parallel_x10_res_1mb(b: &mut test::Bencher) {
let body = &[b'x'; 1024 * 1024 * 1];
opts().parallel(10).response_body(body).bench(b)
}
#[bench]
fn http1_parallel_x10_res_10mb(b: &mut test::Bencher) {
let body = &[b'x'; 1024 * 1024 * 10];
opts().parallel(10).response_body(body).bench(b)
}
// HTTP2
const HTTP2_MAX_WINDOW: u32 = std::u32::MAX >> 1;
#[bench]
fn http2_consecutive_x1_empty(b: &mut test::Bencher) {
opts().http2().bench(b)
}
#[bench]
fn http2_consecutive_x1_req_10b(b: &mut test::Bencher) {
opts()
.http2()
.method(Method::POST)
.request_body(&[b's'; 10])
.bench(b)
}
#[bench]
fn http2_consecutive_x1_req_100kb(b: &mut test::Bencher) {
let body = &[b'x'; 1024 * 100];
opts()
.http2()
.method(Method::POST)
.request_body(body)
.bench(b)
}
#[bench]
fn http2_parallel_x10_empty(b: &mut test::Bencher) {
opts().http2().parallel(10).bench(b)
}
#[bench]
fn http2_parallel_x10_req_10mb(b: &mut test::Bencher) {
let body = &[b'x'; 1024 * 1024 * 10];
opts()
.http2()
.parallel(10)
.method(Method::POST)
.request_body(body)
.http2_stream_window(HTTP2_MAX_WINDOW)
.http2_conn_window(HTTP2_MAX_WINDOW)
.bench(b)
}
#[bench]
fn http2_parallel_x10_req_10kb_100_chunks(b: &mut test::Bencher) {
let body = &[b'x'; 1024 * 10];
opts()
.http2()
.parallel(10)
.method(Method::POST)
.request_chunks(body, 100)
.bench(b)
}
#[bench]
fn http2_parallel_x10_req_10kb_100_chunks_adaptive_window(b: &mut test::Bencher) {
let body = &[b'x'; 1024 * 10];
opts()
.http2()
.parallel(10)
.method(Method::POST)
.request_chunks(body, 100)
.http2_adaptive_window()
.bench(b)
}
#[bench]
fn http2_parallel_x10_req_10kb_100_chunks_max_window(b: &mut test::Bencher) {
let body = &[b'x'; 1024 * 10];
opts()
.http2()
.parallel(10)
.method(Method::POST)
.request_chunks(body, 100)
.http2_stream_window(HTTP2_MAX_WINDOW)
.http2_conn_window(HTTP2_MAX_WINDOW)
.bench(b)
}
#[bench]
fn http2_parallel_x10_res_1mb(b: &mut test::Bencher) {
let body = &[b'x'; 1024 * 1024 * 1];
opts()
.http2()
.parallel(10)
.response_body(body)
.http2_stream_window(HTTP2_MAX_WINDOW)
.http2_conn_window(HTTP2_MAX_WINDOW)
.bench(b)
}
#[bench]
fn http2_parallel_x10_res_10mb(b: &mut test::Bencher) {
let body = &[b'x'; 1024 * 1024 * 10];
opts()
.http2()
.parallel(10)
.response_body(body)
.http2_stream_window(HTTP2_MAX_WINDOW)
.http2_conn_window(HTTP2_MAX_WINDOW)
.bench(b)
}
// ==== Benchmark Options =====
struct Opts {
http2: bool,
http2_stream_window: Option<u32>,
http2_conn_window: Option<u32>,
http2_adaptive_window: bool,
parallel_cnt: u32,
request_method: Method,
request_body: Option<&'static [u8]>,
request_chunks: usize,
response_body: &'static [u8],
}
fn opts() -> Opts {
Opts {
http2: false,
http2_stream_window: None,
http2_conn_window: None,
http2_adaptive_window: false,
parallel_cnt: 1,
request_method: Method::GET,
request_body: None,
request_chunks: 0,
response_body: b"",
}
}
impl Opts {
fn http2(mut self) -> Self {
self.http2 = true;
self
}
fn http2_stream_window(mut self, sz: impl Into<Option<u32>>) -> Self {
assert!(!self.http2_adaptive_window);
self.http2_stream_window = sz.into();
self
}
fn http2_conn_window(mut self, sz: impl Into<Option<u32>>) -> Self {
assert!(!self.http2_adaptive_window);
self.http2_conn_window = sz.into();
self
}
fn http2_adaptive_window(mut self) -> Self {
assert!(self.http2_stream_window.is_none());
assert!(self.http2_conn_window.is_none());
self.http2_adaptive_window = true;
self
}
fn method(mut self, m: Method) -> Self {
self.request_method = m;
self
}
fn request_body(mut self, body: &'static [u8]) -> Self {
self.request_body = Some(body);
self
}
fn request_chunks(mut self, chunk: &'static [u8], cnt: usize) -> Self {
assert!(cnt > 0);
self.request_body = Some(chunk);
self.request_chunks = cnt;
self
}
fn response_body(mut self, body: &'static [u8]) -> Self {
self.response_body = body;
self
}
fn parallel(mut self, cnt: u32) -> Self {
assert!(cnt > 0, "parallel count must be larger than 0");
self.parallel_cnt = cnt;
self
}
fn bench(self, b: &mut test::Bencher) {
use std::sync::Arc;
let _ = pretty_env_logger::try_init();
// Create a runtime of current thread.
let rt = Arc::new(
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.expect("rt build"),
);
let exec = rt.clone();
let req_len = self.request_body.map(|b| b.len()).unwrap_or(0) as u64;
let req_len = if self.request_chunks > 0 {
req_len * self.request_chunks as u64
} else {
req_len
};
let bytes_per_iter = (req_len + self.response_body.len() as u64) * self.parallel_cnt as u64;
b.bytes = bytes_per_iter;
let addr = spawn_server(&rt, &self);
let connector = HttpConnector::new();
let client = hyper::Client::builder()
.http2_only(self.http2)
.http2_initial_stream_window_size(self.http2_stream_window)
.http2_initial_connection_window_size(self.http2_conn_window)
.http2_adaptive_window(self.http2_adaptive_window)
.build::<_, Body>(connector);
let url: hyper::Uri = format!("http://{}/hello", addr).parse().unwrap();
let make_request = || {
let chunk_cnt = self.request_chunks;
let body = if chunk_cnt > 0 {
let (mut tx, body) = Body::channel();
let chunk = self
.request_body
.expect("request_chunks means request_body");
exec.spawn(async move {
for _ in 0..chunk_cnt {
tx.send_data(chunk.into()).await.expect("send_data");
}
});
body
} else {
self.request_body
.map(Body::from)
.unwrap_or_else(Body::empty)
};
let mut req = Request::new(body);
*req.method_mut() = self.request_method.clone();
*req.uri_mut() = url.clone();
req
};
let send_request = |req: Request<Body>| {
let fut = client.request(req);
async {
let res = fut.await.expect("client wait");
let mut body = res.into_body();
while let Some(_chunk) = body.data().await {}
}
};
if self.parallel_cnt == 1 {
b.iter(|| {
let req = make_request();
rt.block_on(send_request(req));
});
} else {
b.iter(|| {
let futs = (0..self.parallel_cnt).map(|_| {
let req = make_request();
send_request(req)
});
// Await all spawned futures becoming completed.
rt.block_on(join_all(futs));
});
}
}
}
fn spawn_server(rt: &tokio::runtime::Runtime, opts: &Opts) -> SocketAddr {
use hyper::service::{make_service_fn, service_fn};
let addr = "127.0.0.1:0".parse().unwrap();
let body = opts.response_body;
let srv = rt.block_on(async move {
Server::bind(&addr)
.http2_only(opts.http2)
.http2_initial_stream_window_size(opts.http2_stream_window)
.http2_initial_connection_window_size(opts.http2_conn_window)
.http2_adaptive_window(opts.http2_adaptive_window)
.serve(make_service_fn(move |_| async move {
Ok::<_, hyper::Error>(service_fn(move |req: Request<Body>| async move {
let mut req_body = req.into_body();
while let Some(_chunk) = req_body.data().await {}
Ok::<_, hyper::Error>(Response::new(Body::from(body)))
}))
}))
});
let addr = srv.local_addr();
rt.spawn(async {
if let Err(err) = srv.await {
panic!("server error: {}", err);
}
});
addr
}

86
hyper/benches/pipeline.rs Normal file
View file

@ -0,0 +1,86 @@
#![feature(test)]
#![deny(warnings)]
extern crate test;
use std::io::{Read, Write};
use std::net::TcpStream;
use std::sync::mpsc;
use std::time::Duration;
use tokio::sync::oneshot;
use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Response, Server};
const PIPELINED_REQUESTS: usize = 16;
#[bench]
fn hello_world_16(b: &mut test::Bencher) {
let _ = pretty_env_logger::try_init();
let (_until_tx, until_rx) = oneshot::channel::<()>();
let addr = {
let (addr_tx, addr_rx) = mpsc::channel();
std::thread::spawn(move || {
let addr = "127.0.0.1:0".parse().unwrap();
let make_svc = make_service_fn(|_| async {
Ok::<_, hyper::Error>(service_fn(|_| async {
Ok::<_, hyper::Error>(Response::new(Body::from("Hello, World!")))
}))
});
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.expect("rt build");
let srv = rt.block_on(async move {
Server::bind(&addr)
.http1_pipeline_flush(true)
.serve(make_svc)
});
addr_tx.send(srv.local_addr()).unwrap();
let graceful = srv.with_graceful_shutdown(async {
until_rx.await.ok();
});
rt.block_on(async {
if let Err(e) = graceful.await {
panic!("server error: {}", e);
}
});
});
addr_rx.recv().unwrap()
};
let mut pipelined_reqs = Vec::new();
for _ in 0..PIPELINED_REQUESTS {
pipelined_reqs.extend_from_slice(b"GET / HTTP/1.1\r\nHost: localhost\r\n\r\n");
}
let total_bytes = {
let mut tcp = TcpStream::connect(addr).unwrap();
tcp.write_all(b"GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n")
.unwrap();
let mut buf = Vec::new();
tcp.read_to_end(&mut buf).unwrap()
} * PIPELINED_REQUESTS;
let mut tcp = TcpStream::connect(addr).unwrap();
tcp.set_read_timeout(Some(Duration::from_secs(3))).unwrap();
let mut buf = [0u8; 8192];
b.bytes = (pipelined_reqs.len() + total_bytes) as u64;
b.iter(|| {
tcp.write_all(&pipelined_reqs).unwrap();
let mut sum = 0;
while sum < total_bytes {
sum += tcp.read(&mut buf).unwrap();
}
assert_eq!(sum, total_bytes);
});
}

212
hyper/benches/server.rs Normal file
View file

@ -0,0 +1,212 @@
#![feature(test)]
#![deny(warnings)]
extern crate test;
use std::io::{Read, Write};
use std::net::{TcpListener, TcpStream};
use std::sync::mpsc;
use std::time::Duration;
use futures_util::{stream, StreamExt};
use tokio::sync::oneshot;
use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Response, Server};
macro_rules! bench_server {
($b:ident, $header:expr, $body:expr) => {{
let _ = pretty_env_logger::try_init();
let (_until_tx, until_rx) = oneshot::channel::<()>();
let addr = {
let (addr_tx, addr_rx) = mpsc::channel();
std::thread::spawn(move || {
let addr = "127.0.0.1:0".parse().unwrap();
let make_svc = make_service_fn(|_| async {
Ok::<_, hyper::Error>(service_fn(|_| async {
Ok::<_, hyper::Error>(
Response::builder()
.header($header.0, $header.1)
.header("content-type", "text/plain")
.body($body())
.unwrap(),
)
}))
});
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.expect("rt build");
let srv = rt.block_on(async move { Server::bind(&addr).serve(make_svc) });
addr_tx.send(srv.local_addr()).unwrap();
let graceful = srv.with_graceful_shutdown(async {
until_rx.await.ok();
});
rt.block_on(async move {
if let Err(e) = graceful.await {
panic!("server error: {}", e);
}
});
});
addr_rx.recv().unwrap()
};
let total_bytes = {
let mut tcp = TcpStream::connect(addr).unwrap();
tcp.write_all(b"GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n")
.unwrap();
let mut buf = Vec::new();
tcp.read_to_end(&mut buf).unwrap()
};
let mut tcp = TcpStream::connect(addr).unwrap();
tcp.set_read_timeout(Some(Duration::from_secs(3))).unwrap();
let mut buf = [0u8; 8192];
$b.bytes = 35 + total_bytes as u64;
$b.iter(|| {
tcp.write_all(b"GET / HTTP/1.1\r\nHost: localhost\r\n\r\n")
.unwrap();
let mut sum = 0;
while sum < total_bytes {
sum += tcp.read(&mut buf).unwrap();
}
assert_eq!(sum, total_bytes);
});
}};
}
fn body(b: &'static [u8]) -> hyper::Body {
b.into()
}
#[bench]
fn throughput_fixedsize_small_payload(b: &mut test::Bencher) {
bench_server!(b, ("content-length", "13"), || body(b"Hello, World!"))
}
#[bench]
fn throughput_fixedsize_large_payload(b: &mut test::Bencher) {
bench_server!(b, ("content-length", "1000000"), || body(
&[b'x'; 1_000_000]
))
}
#[bench]
fn throughput_fixedsize_many_chunks(b: &mut test::Bencher) {
bench_server!(b, ("content-length", "1000000"), || {
static S: &[&[u8]] = &[&[b'x'; 1_000] as &[u8]; 1_000] as _;
Body::wrap_stream(stream::iter(S.iter()).map(|&s| Ok::<_, String>(s)))
})
}
#[bench]
fn throughput_chunked_small_payload(b: &mut test::Bencher) {
bench_server!(b, ("transfer-encoding", "chunked"), || body(
b"Hello, World!"
))
}
#[bench]
fn throughput_chunked_large_payload(b: &mut test::Bencher) {
bench_server!(b, ("transfer-encoding", "chunked"), || body(
&[b'x'; 1_000_000]
))
}
#[bench]
fn throughput_chunked_many_chunks(b: &mut test::Bencher) {
bench_server!(b, ("transfer-encoding", "chunked"), || {
static S: &[&[u8]] = &[&[b'x'; 1_000] as &[u8]; 1_000] as _;
Body::wrap_stream(stream::iter(S.iter()).map(|&s| Ok::<_, String>(s)))
})
}
#[bench]
fn raw_tcp_throughput_small_payload(b: &mut test::Bencher) {
let (tx, rx) = mpsc::channel();
let listener = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = listener.local_addr().unwrap();
std::thread::spawn(move || {
let mut sock = listener.accept().unwrap().0;
let mut buf = [0u8; 8192];
while rx.try_recv().is_err() {
sock.read(&mut buf).unwrap();
sock.write_all(
b"\
HTTP/1.1 200 OK\r\n\
Content-Length: 13\r\n\
Content-Type: text/plain; charset=utf-8\r\n\
Date: Fri, 12 May 2017 18:21:45 GMT\r\n\
\r\n\
Hello, World!\
",
)
.unwrap();
}
});
let mut tcp = TcpStream::connect(addr).unwrap();
let mut buf = [0u8; 4096];
b.bytes = 130 + 35;
b.iter(|| {
tcp.write_all(b"GET / HTTP/1.1\r\nHost: localhost\r\n\r\n")
.unwrap();
let n = tcp.read(&mut buf).unwrap();
assert_eq!(n, 130);
});
tx.send(()).unwrap();
}
#[bench]
fn raw_tcp_throughput_large_payload(b: &mut test::Bencher) {
let (tx, rx) = mpsc::channel();
let listener = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = listener.local_addr().unwrap();
let srv_head = b"\
HTTP/1.1 200 OK\r\n\
Content-Length: 1000000\r\n\
Content-Type: text/plain; charset=utf-8\r\n\
Date: Fri, 12 May 2017 18:21:45 GMT\r\n\
\r\n\
";
std::thread::spawn(move || {
let mut sock = listener.accept().unwrap().0;
let mut buf = [0u8; 8192];
while rx.try_recv().is_err() {
let r = sock.read(&mut buf).unwrap();
extern crate test;
if r == 0 {
break;
}
sock.write_all(srv_head).unwrap();
sock.write_all(&[b'x'; 1_000_000]).unwrap();
}
});
let mut tcp = TcpStream::connect(addr).unwrap();
let mut buf = [0u8; 8192];
let expect_read = srv_head.len() + 1_000_000;
b.bytes = expect_read as u64 + 35;
b.iter(|| {
tcp.write_all(b"GET / HTTP/1.1\r\nHost: localhost\r\n\r\n")
.unwrap();
let mut sum = 0;
while sum < expect_read {
sum += tcp.read(&mut buf).unwrap();
}
assert_eq!(sum, expect_read);
});
tx.send(()).unwrap();
}