Skip to content

Instantly share code, notes, and snippets.

@fundon
Last active January 23, 2024 08:34
Show Gist options
  • Star 2 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save fundon/b16c5d9271d7521b7c91f4dbe4a4d8b5 to your computer and use it in GitHub Desktop.
Save fundon/b16c5d9271d7521b7c91f4dbe4a4d8b5 to your computer and use it in GitHub Desktop.
smol 16threads vs tokio 16threads
//! An HTTP server based on `hyper`.
//!
//! Run with:
//!
//! ```
//! cargo run --example hyper-server
//! ```
//!
//! Open in the browser any of these addresses:
//!
//! - http://localhost:8000/
use std::io;
use std::net::{Shutdown, TcpListener, TcpStream};
use std::pin::Pin;
use std::task::{Context, Poll};
use std::thread;
use anyhow::{Error, Result};
use futures::prelude::*;
use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Request, Response, Server};
use smol::{Async, Task};
/// Serves a request and returns a response.
async fn serve(_req: Request<Body>) -> Result<Response<Body>> {
Ok(Response::new(Body::from("Hello world!")))
}
/// Listens for incoming connections and serves them.
async fn listen(listener: Async<TcpListener>) -> Result<()> {
// Start a hyper server.
Server::builder(SmolListener::new(listener))
.executor(SmolExecutor)
.serve(make_service_fn(move |_| async {
Ok::<_, Error>(service_fn(serve))
}))
.await?;
Ok(())
}
fn main() -> Result<()> {
// Create an executor thread pool.
//for _ in 0..num_cpus::get().max(1) {
for _ in 0..8 {
thread::spawn(|| smol::run(future::pending::<()>()));
}
// Start HTTP and HTTPS servers.
smol::run(listen(Async::<TcpListener>::bind("127.0.0.1:8000")?))
}
/// Spawns futures.
#[derive(Clone)]
struct SmolExecutor;
impl<F: Future + Send + 'static> hyper::rt::Executor<F> for SmolExecutor {
fn execute(&self, fut: F) {
Task::spawn(async { drop(fut.await) }).detach();
}
}
/// Listens for incoming connections.
struct SmolListener {
listener: Async<TcpListener>,
}
impl SmolListener {
fn new(listener: Async<TcpListener>) -> Self {
Self { listener }
}
}
impl hyper::server::accept::Accept for SmolListener {
type Conn = SmolStream;
type Error = Error;
fn poll_accept(
mut self: Pin<&mut Self>,
cx: &mut Context,
) -> Poll<Option<Result<Self::Conn, Self::Error>>> {
let poll = Pin::new(&mut self.listener.incoming()).poll_next(cx);
let stream = futures::ready!(poll).unwrap()?;
Poll::Ready(Some(Ok(SmolStream(stream))))
}
}
/// A TCP connection.
struct SmolStream(Async<TcpStream>);
impl hyper::client::connect::Connection for SmolStream {
fn connected(&self) -> hyper::client::connect::Connected {
hyper::client::connect::Connected::new()
}
}
impl tokio::io::AsyncRead for SmolStream {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.0).poll_read(cx, buf)
}
}
impl tokio::io::AsyncWrite for SmolStream {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.0).poll_write(cx, buf)
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.0).poll_flush(cx)
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
self.0.get_ref().shutdown(Shutdown::Write)?;
Poll::Ready(Ok(()))
}
}
Threads 17 on macOS
λ autocannon 127.0.0.1:8000
Running 10s test @ http://127.0.0.1:8000
10 connections
┌─────────┬──────┬──────┬───────┬──────┬─────────┬─────────┬──────────┐
│ Stat │ 2.5% │ 50% │ 97.5% │ 99% │ Avg │ Stdev │ Max │
├─────────┼──────┼──────┼───────┼──────┼─────────┼─────────┼──────────┤
│ Latency │ 0 ms │ 0 ms │ 1 ms │ 1 ms │ 0.25 ms │ 0.48 ms │ 10.43 ms │
└─────────┴──────┴──────┴───────┴──────┴─────────┴─────────┴──────────┘
┌───────────┬────────┬────────┬─────────┬─────────┬─────────┬─────────┬────────┐
│ Stat │ 1% │ 2.5% │ 50% │ 97.5% │ Avg │ Stdev │ Min │
├───────────┼────────┼────────┼─────────┼─────────┼─────────┼─────────┼────────┤
│ Req/Sec │ 10863 │ 10863 │ 11447 │ 11631 │ 11393.1 │ 206.72 │ 10857 │
├───────────┼────────┼────────┼─────────┼─────────┼─────────┼─────────┼────────┤
│ Bytes/Sec │ 956 kB │ 956 kB │ 1.01 MB │ 1.02 MB │ 1 MB │ 18.2 kB │ 955 kB │
└───────────┴────────┴────────┴─────────┴─────────┴─────────┴─────────┴────────┘
Req/Bytes counts sampled once per second.
125k requests in 11.03s, 11 MB read
#![deny(warnings)]
use anyhow::{Error, Result};
use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Request, Response, Server};
async fn serve(_req: Request<Body>) -> Result<Response<Body>> {
Ok(Response::new(Body::from("Hello world!")))
}
//#[tokio::main(threaded_scheduler)]
#[tokio::main(core_threads = 16)]
pub async fn main() -> Result<()> {
pretty_env_logger::init();
// For every connection, we must make a `Service` to handle all
// incoming HTTP requests on said connection.
let make_svc = make_service_fn(|_conn| {
// This is the `Service` that will handle the connection.
// `service_fn` is a helper to convert a function that
// returns a Response into a `Service`.
async { Ok::<_, Error>(service_fn(serve)) }
});
let addr = ([127, 0, 0, 1], 8080).into();
let server = Server::bind(&addr).serve(make_svc);
println!("Listening on http://{}", addr);
server.await?;
Ok(())
}
Threads 17 on macOS
λ autocannon 127.0.0.1:8080
Running 10s test @ http://127.0.0.1:8080
10 connections
┌─────────┬──────┬──────┬───────┬──────┬─────────┬─────────┬─────────┐
│ Stat │ 2.5% │ 50% │ 97.5% │ 99% │ Avg │ Stdev │ Max │
├─────────┼──────┼──────┼───────┼──────┼─────────┼─────────┼─────────┤
│ Latency │ 0 ms │ 0 ms │ 0 ms │ 0 ms │ 0.01 ms │ 0.05 ms │ 6.56 ms │
└─────────┴──────┴──────┴───────┴──────┴─────────┴─────────┴─────────┘
┌───────────┬─────────┬─────────┬─────────┬─────────┬──────────┬────────┬─────────┐
│ Stat │ 1% │ 2.5% │ 50% │ 97.5% │ Avg │ Stdev │ Min │
├───────────┼─────────┼─────────┼─────────┼─────────┼──────────┼────────┼─────────┤
│ Req/Sec │ 40127 │ 40127 │ 42687 │ 44479 │ 42456.73 │ 1293.9 │ 40125 │
├───────────┼─────────┼─────────┼─────────┼─────────┼──────────┼────────┼─────────┤
│ Bytes/Sec │ 3.53 MB │ 3.53 MB │ 3.76 MB │ 3.91 MB │ 3.74 MB │ 114 kB │ 3.53 MB │
└───────────┴─────────┴─────────┴─────────┴─────────┴──────────┴────────┴─────────┘
Req/Bytes counts sampled once per second.
467k requests in 11.04s, 41.1 MB read
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment