1 //! The Tokio runtime.
2 //!
3 //! Unlike other Rust programs, asynchronous applications require runtime
4 //! support. In particular, the following runtime services are necessary:
5 //!
6 //! * An **I/O event loop**, called the driver, which drives I/O resources and
7 //!   dispatches I/O events to tasks that depend on them.
8 //! * A **scheduler** to execute [tasks] that use these I/O resources.
9 //! * A **timer** for scheduling work to run after a set period of time.
10 //!
11 //! Tokio's [`Runtime`] bundles all of these services as a single type, allowing
12 //! them to be started, shut down, and configured together. However, often it is
13 //! not required to configure a [`Runtime`] manually, and a user may just use the
14 //! [`tokio::main`] attribute macro, which creates a [`Runtime`] under the hood.
15 //!
16 //! # Usage
17 //!
18 //! When no fine tuning is required, the [`tokio::main`] attribute macro can be
19 //! used.
20 //!
21 //! ```no_run
22 //! use tokio::net::TcpListener;
23 //! use tokio::io::{AsyncReadExt, AsyncWriteExt};
24 //!
25 //! #[tokio::main]
26 //! async fn main() -> Result<(), Box<dyn std::error::Error>> {
27 //!     let listener = TcpListener::bind("127.0.0.1:8080").await?;
28 //!
29 //!     loop {
30 //!         let (mut socket, _) = listener.accept().await?;
31 //!
32 //!         tokio::spawn(async move {
33 //!             let mut buf = [0; 1024];
34 //!
35 //!             // In a loop, read data from the socket and write the data back.
36 //!             loop {
37 //!                 let n = match socket.read(&mut buf).await {
38 //!                     // socket closed
39 //!                     Ok(n) if n == 0 => return,
40 //!                     Ok(n) => n,
41 //!                     Err(e) => {
42 //!                         println!("failed to read from socket; err = {:?}", e);
43 //!                         return;
44 //!                     }
45 //!                 };
46 //!
47 //!                 // Write the data back
48 //!                 if let Err(e) = socket.write_all(&buf[0..n]).await {
49 //!                     println!("failed to write to socket; err = {:?}", e);
50 //!                     return;
51 //!                 }
52 //!             }
53 //!         });
54 //!     }
55 //! }
56 //! ```
57 //!
58 //! From within the context of the runtime, additional tasks are spawned using
59 //! the [`tokio::spawn`] function. Futures spawned using this function will be
60 //! executed on the same thread pool used by the [`Runtime`].
61 //!
62 //! A [`Runtime`] instance can also be used directly.
63 //!
64 //! ```no_run
65 //! use tokio::net::TcpListener;
66 //! use tokio::io::{AsyncReadExt, AsyncWriteExt};
67 //! use tokio::runtime::Runtime;
68 //!
69 //! fn main() -> Result<(), Box<dyn std::error::Error>> {
70 //!     // Create the runtime
71 //!     let rt  = Runtime::new()?;
72 //!
73 //!     // Spawn the root task
74 //!     rt.block_on(async {
75 //!         let listener = TcpListener::bind("127.0.0.1:8080").await?;
76 //!
77 //!         loop {
78 //!             let (mut socket, _) = listener.accept().await?;
79 //!
80 //!             tokio::spawn(async move {
81 //!                 let mut buf = [0; 1024];
82 //!
83 //!                 // In a loop, read data from the socket and write the data back.
84 //!                 loop {
85 //!                     let n = match socket.read(&mut buf).await {
86 //!                         // socket closed
87 //!                         Ok(n) if n == 0 => return,
88 //!                         Ok(n) => n,
89 //!                         Err(e) => {
90 //!                             println!("failed to read from socket; err = {:?}", e);
91 //!                             return;
92 //!                         }
93 //!                     };
94 //!
95 //!                     // Write the data back
96 //!                     if let Err(e) = socket.write_all(&buf[0..n]).await {
97 //!                         println!("failed to write to socket; err = {:?}", e);
98 //!                         return;
99 //!                     }
100 //!                 }
101 //!             });
102 //!         }
103 //!     })
104 //! }
105 //! ```
106 //!
107 //! ## Runtime Configurations
108 //!
109 //! Tokio provides multiple task scheduling strategies, suitable for different
110 //! applications. The [runtime builder] or `#[tokio::main]` attribute may be
111 //! used to select which scheduler to use.
112 //!
113 //! #### Multi-Thread Scheduler
114 //!
115 //! The multi-thread scheduler executes futures on a _thread pool_, using a
116 //! work-stealing strategy. By default, it will start a worker thread for each
117 //! CPU core available on the system. This tends to be the ideal configuration
118 //! for most applications. The multi-thread scheduler requires the `rt-multi-thread`
119 //! feature flag, and is selected by default:
120 //! ```
121 //! use tokio::runtime;
122 //!
123 //! # fn main() -> Result<(), Box<dyn std::error::Error>> {
124 //! let threaded_rt = runtime::Runtime::new()?;
125 //! # Ok(()) }
126 //! ```
127 //!
128 //! Most applications should use the multi-thread scheduler, except in some
129 //! niche use-cases, such as when running only a single thread is required.
130 //!
131 //! #### Current-Thread Scheduler
132 //!
133 //! The current-thread scheduler provides a _single-threaded_ future executor.
134 //! All tasks will be created and executed on the current thread. This requires
135 //! the `rt` feature flag.
136 //! ```
137 //! use tokio::runtime;
138 //!
139 //! # fn main() -> Result<(), Box<dyn std::error::Error>> {
140 //! let rt = runtime::Builder::new_current_thread()
141 //!     .build()?;
142 //! # Ok(()) }
143 //! ```
144 //!
145 //! #### Resource drivers
146 //!
147 //! When configuring a runtime by hand, no resource drivers are enabled by
148 //! default. In this case, attempting to use networking types or time types will
149 //! fail. In order to enable these types, the resource drivers must be enabled.
150 //! This is done with [`Builder::enable_io`] and [`Builder::enable_time`]. As a
151 //! shorthand, [`Builder::enable_all`] enables both resource drivers.
152 //!
153 //! ## Lifetime of spawned threads
154 //!
155 //! The runtime may spawn threads depending on its configuration and usage. The
156 //! multi-thread scheduler spawns threads to schedule tasks and for `spawn_blocking`
157 //! calls.
158 //!
159 //! While the `Runtime` is active, threads may shut down after periods of being
160 //! idle. Once `Runtime` is dropped, all runtime threads have usually been
161 //! terminated, but in the presence of unstoppable spawned work are not
162 //! guaranteed to have been terminated. See the
163 //! [struct level documentation](Runtime#shutdown) for more details.
164 //!
165 //! [tasks]: crate::task
166 //! [`Runtime`]: Runtime
167 //! [`tokio::spawn`]: crate::spawn
168 //! [`tokio::main`]: ../attr.main.html
169 //! [runtime builder]: crate::runtime::Builder
170 //! [`Runtime::new`]: crate::runtime::Runtime::new
171 //! [`Builder::threaded_scheduler`]: crate::runtime::Builder::threaded_scheduler
172 //! [`Builder::enable_io`]: crate::runtime::Builder::enable_io
173 //! [`Builder::enable_time`]: crate::runtime::Builder::enable_time
174 //! [`Builder::enable_all`]: crate::runtime::Builder::enable_all
175 //!
176 //! # Detailed runtime behavior
177 //!
178 //! This section gives more details into how the Tokio runtime will schedule
179 //! tasks for execution.
180 //!
181 //! At its most basic level, a runtime has a collection of tasks that need to be
182 //! scheduled. It will repeatedly remove a task from that collection and
183 //! schedule it (by calling [`poll`]). When the collection is empty, the thread
184 //! will go to sleep until a task is added to the collection.
185 //!
186 //! However, the above is not sufficient to guarantee a well-behaved runtime.
187 //! For example, the runtime might have a single task that is always ready to be
188 //! scheduled, and schedule that task every time. This is a problem because it
189 //! starves other tasks by not scheduling them. To solve this, Tokio provides
190 //! the following fairness guarantee:
191 //!
192 //! > If the total number of tasks does not grow without bound, and no task is
193 //! > [blocking the thread], then it is guaranteed that tasks are scheduled
194 //! > fairly.
195 //!
196 //! Or, more formally:
197 //!
198 //! > Under the following two assumptions:
199 //! >
200 //! > * There is some number `MAX_TASKS` such that the total number of tasks on
201 //! >   the runtime at any specific point in time never exceeds `MAX_TASKS`.
202 //! > * There is some number `MAX_SCHEDULE` such that calling [`poll`] on any
203 //! >   task spawned on the runtime returns within `MAX_SCHEDULE` time units.
204 //! >
205 //! > Then, there is some number `MAX_DELAY` such that when a task is woken, it
206 //! > will be scheduled by the runtime within `MAX_DELAY` time units.
207 //!
208 //! (Here, `MAX_TASKS` and `MAX_SCHEDULE` can be any number and the user of
209 //! the runtime may choose them. The `MAX_DELAY` number is controlled by the
210 //! runtime, and depends on the value of `MAX_TASKS` and `MAX_SCHEDULE`.)
211 //!
212 //! Other than the above fairness guarantee, there is no guarantee about the
213 //! order in which tasks are scheduled. There is also no guarantee that the
214 //! runtime is equally fair to all tasks. For example, if the runtime has two
215 //! tasks A and B that are both ready, then the runtime may schedule A five
216 //! times before it schedules B. This is the case even if A yields using
217 //! [`yield_now`]. All that is guaranteed is that it will schedule B eventually.
218 //!
219 //! Normally, tasks are scheduled only if they have been woken by calling
220 //! [`wake`] on their waker. However, this is not guaranteed, and Tokio may
221 //! schedule tasks that have not been woken under some circumstances. This is
222 //! called a spurious wakeup.
223 //!
224 //! ## IO and timers
225 //!
226 //! Beyond just scheduling tasks, the runtime must also manage IO resources and
227 //! timers. It does this by periodically checking whether there are any IO
228 //! resources or timers that are ready, and waking the relevant task so that
229 //! it will be scheduled.
230 //!
231 //! These checks are performed periodically between scheduling tasks. Under the
232 //! same assumptions as the previous fairness guarantee, Tokio guarantees that
233 //! it will wake tasks with an IO or timer event within some maximum number of
234 //! time units.
235 //!
236 //! ## Current thread runtime (behavior at the time of writing)
237 //!
238 //! This section describes how the [current thread runtime] behaves today. This
239 //! behavior may change in future versions of Tokio.
240 //!
241 //! The current thread runtime maintains two FIFO queues of tasks that are ready
242 //! to be scheduled: the global queue and the local queue. The runtime will prefer
243 //! to choose the next task to schedule from the local queue, and will only pick a
244 //! task from the global queue if the local queue is empty, or if it has picked
245 //! a task from the local queue 31 times in a row. The number 31 can be
246 //! changed using the [`global_queue_interval`] setting.
247 //!
248 //! The runtime will check for new IO or timer events whenever there are no
249 //! tasks ready to be scheduled, or when it has scheduled 61 tasks in a row. The
250 //! number 61 may be changed using the [`event_interval`] setting.
251 //!
252 //! When a task is woken from within a task running on the runtime, then the
253 //! woken task is added directly to the local queue. Otherwise, the task is
254 //! added to the global queue. The current thread runtime does not use [the lifo
255 //! slot optimization].
256 //!
257 //! ## Multi threaded runtime (behavior at the time of writing)
258 //!
259 //! This section describes how the [multi thread runtime] behaves today. This
260 //! behavior may change in future versions of Tokio.
261 //!
262 //! A multi thread runtime has a fixed number of worker threads, which are all
263 //! created on startup. The multi thread runtime maintains one global queue, and
264 //! a local queue for each worker thread. The local queue of a worker thread can
265 //! fit at most 256 tasks. If more than 256 tasks are added to the local queue,
266 //! then half of them are moved to the global queue to make space.
267 //!
268 //! The runtime will prefer to choose the next task to schedule from the local
269 //! queue, and will only pick a task from the global queue if the local queue is
270 //! empty, or if it has picked a task from the local queue
271 //! [`global_queue_interval`] times in a row. If the value of
272 //! [`global_queue_interval`] is not explicitly set using the runtime builder,
273 //! then the runtime will dynamically compute it using a heuristic that targets
274 //! 10ms intervals between each check of the global queue (based on the
275 //! [`worker_mean_poll_time`] metric).
276 //!
277 //! If both the local queue and global queue is empty, then the worker thread
278 //! will attempt to steal tasks from the local queue of another worker thread.
279 //! Stealing is done by moving half of the tasks in one local queue to another
280 //! local queue.
281 //!
282 //! The runtime will check for new IO or timer events whenever there are no
283 //! tasks ready to be scheduled, or when it has scheduled 61 tasks in a row. The
284 //! number 61 may be changed using the [`event_interval`] setting.
285 //!
286 //! The multi thread runtime uses [the lifo slot optimization]: Whenever a task
287 //! wakes up another task, the other task is added to the worker thread's lifo
288 //! slot instead of being added to a queue. If there was already a task in the
289 //! lifo slot when this happened, then the lifo slot is replaced, and the task
290 //! that used to be in the lifo slot is placed in the thread's local queue.
291 //! When the runtime finishes scheduling a task, it will schedule the task in
292 //! the lifo slot immediately, if any. When the lifo slot is used, the [coop
293 //! budget] is not reset. Furthermore, if a worker thread uses the lifo slot
294 //! three times in a row, it is temporarily disabled until the worker thread has
295 //! scheduled a task that didn't come from the lifo slot. The lifo slot can be
296 //! disabled using the [`disable_lifo_slot`] setting. The lifo slot is separate
297 //! from the local queue, so other worker threads cannot steal the task in the
298 //! lifo slot.
299 //!
300 //! When a task is woken from a thread that is not a worker thread, then the
301 //! task is placed in the global queue.
302 //!
303 //! [`poll`]: std::future::Future::poll
304 //! [`wake`]: std::task::Waker::wake
305 //! [`yield_now`]: crate::task::yield_now
306 //! [blocking the thread]: https://ryhl.io/blog/async-what-is-blocking/
307 //! [current thread runtime]: crate::runtime::Builder::new_current_thread
308 //! [multi thread runtime]: crate::runtime::Builder::new_multi_thread
309 //! [`global_queue_interval`]: crate::runtime::Builder::global_queue_interval
310 //! [`event_interval`]: crate::runtime::Builder::event_interval
311 //! [`disable_lifo_slot`]: crate::runtime::Builder::disable_lifo_slot
312 //! [the lifo slot optimization]: crate::runtime::Builder::disable_lifo_slot
313 //! [coop budget]: crate::task#cooperative-scheduling
314 //! [`worker_mean_poll_time`]: crate::runtime::RuntimeMetrics::worker_mean_poll_time
315 
316 // At the top due to macros
317 #[cfg(test)]
318 #[cfg(not(target_family = "wasm"))]
319 #[macro_use]
320 mod tests;
321 
322 pub(crate) mod context;
323 
324 pub(crate) mod coop;
325 
326 pub(crate) mod park;
327 
328 mod driver;
329 
330 pub(crate) mod scheduler;
331 
332 cfg_io_driver_impl! {
333     pub(crate) mod io;
334 }
335 
336 cfg_process_driver! {
337     mod process;
338 }
339 
340 cfg_time! {
341     pub(crate) mod time;
342 }
343 
344 cfg_signal_internal_and_unix! {
345     pub(crate) mod signal;
346 }
347 
348 cfg_rt! {
349     pub(crate) mod task;
350 
351     mod config;
352     use config::Config;
353 
354     mod blocking;
355     #[cfg_attr(target_os = "wasi", allow(unused_imports))]
356     pub(crate) use blocking::spawn_blocking;
357 
358     cfg_trace! {
359         pub(crate) use blocking::Mandatory;
360     }
361 
362     cfg_fs! {
363         pub(crate) use blocking::spawn_mandatory_blocking;
364     }
365 
366     mod builder;
367     pub use self::builder::Builder;
368     cfg_unstable! {
369         mod id;
370         #[cfg_attr(not(tokio_unstable), allow(unreachable_pub))]
371         pub use id::Id;
372 
373         pub use self::builder::UnhandledPanic;
374         pub use crate::util::rand::RngSeed;
375 
376         mod local_runtime;
377         pub use local_runtime::{LocalRuntime, LocalOptions};
378     }
379 
380     cfg_taskdump! {
381         pub mod dump;
382         pub use dump::Dump;
383     }
384 
385     mod task_hooks;
386     pub(crate) use task_hooks::{TaskHooks, TaskCallback};
387     cfg_unstable! {
388         pub use task_hooks::TaskMeta;
389     }
390     #[cfg(not(tokio_unstable))]
391     pub(crate) use task_hooks::TaskMeta;
392 
393     mod handle;
394     pub use handle::{EnterGuard, Handle, TryCurrentError};
395 
396     mod runtime;
397     pub use runtime::{Runtime, RuntimeFlavor};
398 
399     /// Boundary value to prevent stack overflow caused by a large-sized
400     /// Future being placed in the stack.
401     pub(crate) const BOX_FUTURE_THRESHOLD: usize = if cfg!(debug_assertions)  {
402         2048
403     } else {
404         16384
405     };
406 
407     mod thread_id;
408     pub(crate) use thread_id::ThreadId;
409 
410     pub(crate) mod metrics;
411     pub use metrics::RuntimeMetrics;
412 
413     cfg_unstable_metrics! {
414         pub use metrics::{HistogramScale, HistogramConfiguration, LogHistogram, LogHistogramBuilder, InvalidHistogramConfiguration} ;
415 
416         cfg_net! {
417             pub(crate) use metrics::IoDriverMetrics;
418         }
419     }
420 
421     pub(crate) use metrics::{MetricsBatch, SchedulerMetrics, WorkerMetrics, HistogramBuilder};
422 
423     /// After thread starts / before thread stops
424     type Callback = std::sync::Arc<dyn Fn() + Send + Sync>;
425 }
426