//! HTTP/1 Server Connections use std::error::Error as StdError; use std::fmt; use std::future::Future; use std::marker::Unpin; use std::pin::Pin; use std::task::{Context, Poll}; use std::time::Duration; use bytes::Bytes; use tokio::io::{AsyncRead, AsyncWrite}; use crate::body::{Body as IncomingBody, HttpBody as Body}; use crate::proto; use crate::service::HttpService; type Http1Dispatcher = proto::h1::Dispatcher< proto::h1::dispatch::Server, B, T, proto::ServerTransaction, >; pin_project_lite::pin_project! { /// A future binding an http1 connection with a Service. /// /// Polling this future will drive HTTP forward. #[must_use = "futures do nothing unless polled"] pub struct Connection where S: HttpService, { conn: Http1Dispatcher, } } /// A configuration builder for HTTP/1 server connections. #[derive(Clone, Debug)] pub struct Builder { h1_half_close: bool, h1_keep_alive: bool, h1_title_case_headers: bool, h1_preserve_header_case: bool, h1_header_read_timeout: Option, h1_writev: Option, max_buf_size: Option, pipeline_flush: bool, } /// Deconstructed parts of a `Connection`. /// /// This allows taking apart a `Connection` at a later time, in order to /// reclaim the IO object, and additional related pieces. #[derive(Debug)] pub struct Parts { /// The original IO object used in the handshake. pub io: T, /// A buffer of bytes that have been read but not processed as HTTP. /// /// If the client sent additional bytes after its last request, and /// this connection "ended" with an upgrade, the read buffer will contain /// those bytes. /// /// You will want to check for any existing bytes if you plan to continue /// communicating on the IO object. pub read_buf: Bytes, /// The `Service` used to serve this connection. pub service: S, _inner: (), } // ===== impl Connection ===== impl fmt::Debug for Connection where S: HttpService, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Connection").finish() } } impl Connection where S: HttpService, S::Error: Into>, I: AsyncRead + AsyncWrite + Unpin, B: Body + 'static, B::Error: Into>, { /// Start a graceful shutdown process for this connection. /// /// This `Connection` should continue to be polled until shutdown /// can finish. /// /// # Note /// /// This should only be called while the `Connection` future is still /// pending. If called after `Connection::poll` has resolved, this does /// nothing. pub fn graceful_shutdown(mut self: Pin<&mut Self>) { self.conn.disable_keep_alive(); } /// Return the inner IO object, and additional information. /// /// If the IO object has been "rewound" the io will not contain those bytes rewound. /// This should only be called after `poll_without_shutdown` signals /// that the connection is "done". Otherwise, it may not have finished /// flushing all necessary HTTP bytes. /// /// # Panics /// This method will panic if this connection is using an h2 protocol. pub fn into_parts(self) -> Parts { let (io, read_buf, dispatch) = self.conn.into_inner(); Parts { io, read_buf, service: dispatch.into_service(), _inner: (), } } /// Poll the connection for completion, but without calling `shutdown` /// on the underlying IO. /// /// This is useful to allow running a connection while doing an HTTP /// upgrade. Once the upgrade is completed, the connection would be "done", /// but it is not desired to actually shutdown the IO object. Instead you /// would take it back using `into_parts`. pub fn poll_without_shutdown(&mut self, cx: &mut Context<'_>) -> Poll> where S: Unpin, S::Future: Unpin, B: Unpin, { self.conn.poll_without_shutdown(cx) } /// Prevent shutdown of the underlying IO object at the end of service the request, /// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`. /// /// # Error /// /// This errors if the underlying connection protocol is not HTTP/1. pub fn without_shutdown(self) -> impl Future>> where S: Unpin, S::Future: Unpin, B: Unpin, { let mut zelf = Some(self); futures_util::future::poll_fn(move |cx| { ready!(zelf.as_mut().unwrap().conn.poll_without_shutdown(cx))?; Poll::Ready(Ok(zelf.take().unwrap().into_parts())) }) } /// Enable this connection to support higher-level HTTP upgrades. /// /// See [the `upgrade` module](crate::upgrade) for more. pub fn with_upgrades(self) -> upgrades::UpgradeableConnection where I: Send, { upgrades::UpgradeableConnection { inner: Some(self) } } } impl Future for Connection where S: HttpService, S::Error: Into>, I: AsyncRead + AsyncWrite + Unpin + 'static, B: Body + 'static, B::Error: Into>, { type Output = crate::Result<()>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { match ready!(Pin::new(&mut self.conn).poll(cx)) { Ok(done) => { match done { proto::Dispatched::Shutdown => {} proto::Dispatched::Upgrade(pending) => { // With no `Send` bound on `I`, we can't try to do // upgrades here. In case a user was trying to use // `Body::on_upgrade` with this API, send a special // error letting them know about that. pending.manual(); } }; return Poll::Ready(Ok(())); } Err(e) => Poll::Ready(Err(e)), } } } // ===== impl Builder ===== impl Builder { /// Create a new connection builder. pub fn new() -> Self { Self { h1_half_close: false, h1_keep_alive: true, h1_title_case_headers: false, h1_preserve_header_case: false, h1_header_read_timeout: None, h1_writev: None, max_buf_size: None, pipeline_flush: false, } } /// Set whether HTTP/1 connections should support half-closures. /// /// Clients can chose to shutdown their write-side while waiting /// for the server to respond. Setting this to `true` will /// prevent closing the connection immediately if `read` /// detects an EOF in the middle of a request. /// /// Default is `false`. pub fn half_close(&mut self, val: bool) -> &mut Self { self.h1_half_close = val; self } /// Enables or disables HTTP/1 keep-alive. /// /// Default is true. pub fn keep_alive(&mut self, val: bool) -> &mut Self { self.h1_keep_alive = val; self } /// Set whether HTTP/1 connections will write header names as title case at /// the socket level. /// /// Default is false. pub fn title_case_headers(&mut self, enabled: bool) -> &mut Self { self.h1_title_case_headers = enabled; self } /// Set whether to support preserving original header cases. /// /// Currently, this will record the original cases received, and store them /// in a private extension on the `Request`. It will also look for and use /// such an extension in any provided `Response`. /// /// Since the relevant extension is still private, there is no way to /// interact with the original cases. The only effect this can have now is /// to forward the cases in a proxy-like fashion. /// /// Default is false. pub fn preserve_header_case(&mut self, enabled: bool) -> &mut Self { self.h1_preserve_header_case = enabled; self } /// Set a timeout for reading client request headers. If a client does not /// transmit the entire header within this time, the connection is closed. /// /// Default is None. pub fn header_read_timeout(&mut self, read_timeout: Duration) -> &mut Self { self.h1_header_read_timeout = Some(read_timeout); self } /// Set whether HTTP/1 connections should try to use vectored writes, /// or always flatten into a single buffer. /// /// Note that setting this to false may mean more copies of body data, /// but may also improve performance when an IO transport doesn't /// support vectored writes well, such as most TLS implementations. /// /// Setting this to true will force hyper to use queued strategy /// which may eliminate unnecessary cloning on some TLS backends /// /// Default is `auto`. In this mode hyper will try to guess which /// mode to use pub fn writev(&mut self, val: bool) -> &mut Self { self.h1_writev = Some(val); self } /// Set the maximum buffer size for the connection. /// /// Default is ~400kb. /// /// # Panics /// /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. pub fn max_buf_size(&mut self, max: usize) -> &mut Self { assert!( max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE, "the max_buf_size cannot be smaller than the minimum that h1 specifies." ); self.max_buf_size = Some(max); self } /// Aggregates flushes to better support pipelined responses. /// /// Experimental, may have bugs. /// /// Default is false. pub fn pipeline_flush(&mut self, enabled: bool) -> &mut Self { self.pipeline_flush = enabled; self } // /// Set the timer used in background tasks. // pub fn timer(&mut self, timer: M) -> &mut Self // where // M: Timer + Send + Sync + 'static, // { // self.timer = Time::Timer(Arc::new(timer)); // self // } /// Bind a connection together with a [`Service`](crate::service::Service). /// /// This returns a Future that must be polled in order for HTTP to be /// driven on the connection. /// /// # Example /// /// ``` /// # use hyper::{Body as Incoming, Request, Response}; /// # use hyper::service::Service; /// # use hyper::server::conn::http1::Builder; /// # use tokio::io::{AsyncRead, AsyncWrite}; /// # async fn run(some_io: I, some_service: S) /// # where /// # I: AsyncRead + AsyncWrite + Unpin + Send + 'static, /// # S: Service, Response=hyper::Response> + Send + 'static, /// # S::Error: Into>, /// # S::Future: Send, /// # { /// let http = Builder::new(); /// let conn = http.serve_connection(some_io, some_service); /// /// if let Err(e) = conn.await { /// eprintln!("server connection error: {}", e); /// } /// # } /// # fn main() {} /// ``` pub fn serve_connection(&self, io: I, service: S) -> Connection where S: HttpService, S::Error: Into>, S::ResBody: 'static, ::Error: Into>, I: AsyncRead + AsyncWrite + Unpin, { let mut conn = proto::Conn::new(io); if !self.h1_keep_alive { conn.disable_keep_alive(); } if self.h1_half_close { conn.set_allow_half_close(); } if self.h1_title_case_headers { conn.set_title_case_headers(); } if self.h1_preserve_header_case { conn.set_preserve_header_case(); } if let Some(header_read_timeout) = self.h1_header_read_timeout { conn.set_http1_header_read_timeout(header_read_timeout); } if let Some(writev) = self.h1_writev { if writev { conn.set_write_strategy_queue(); } else { conn.set_write_strategy_flatten(); } } conn.set_flush_pipeline(self.pipeline_flush); if let Some(max) = self.max_buf_size { conn.set_max_buf_size(max); } let sd = proto::h1::dispatch::Server::new(service); let proto = proto::h1::Dispatcher::new(sd, conn); Connection { conn: proto } } } mod upgrades { use crate::upgrade::Upgraded; use super::*; // A future binding a connection with a Service with Upgrade support. // // This type is unnameable outside the crate. #[must_use = "futures do nothing unless polled"] #[allow(missing_debug_implementations)] pub struct UpgradeableConnection where S: HttpService, { pub(super) inner: Option>, } impl UpgradeableConnection where S: HttpService, S::Error: Into>, I: AsyncRead + AsyncWrite + Unpin, B: Body + 'static, B::Error: Into>, { /// Start a graceful shutdown process for this connection. /// /// This `Connection` should continue to be polled until shutdown /// can finish. pub fn graceful_shutdown(mut self: Pin<&mut Self>) { Pin::new(self.inner.as_mut().unwrap()).graceful_shutdown() } } impl Future for UpgradeableConnection where S: HttpService, S::Error: Into>, I: AsyncRead + AsyncWrite + Unpin + Send + 'static, B: Body + 'static, B::Error: Into>, { type Output = crate::Result<()>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { match ready!(Pin::new(&mut self.inner.as_mut().unwrap().conn).poll(cx)) { Ok(proto::Dispatched::Shutdown) => Poll::Ready(Ok(())), Ok(proto::Dispatched::Upgrade(pending)) => { let (io, buf, _) = self.inner.take().unwrap().conn.into_inner(); pending.fulfill(Upgraded::new(io, buf)); Poll::Ready(Ok(())) } Err(e) => Poll::Ready(Err(e)), } } } }