1 //! Benchmarks to track basic performance across changes.
2 //!
3 //! Slightly based on the <background.rs> benchmarks, but simplified and stripped down to run
4 //! reasonably fast.
5
6 use std::sync::atomic::{AtomicBool, Ordering};
7 use std::sync::Arc;
8
9 use arc_swap::access::{Access, Map};
10 use arc_swap::cache::Cache;
11 use arc_swap::ArcSwap;
12 use criterion::{black_box, criterion_group, criterion_main, Criterion};
13 use crossbeam_utils::thread;
14
15 /// Execute a group of measurements
16 ///
17 /// It expects any kind of „environment“ is already in place for it.
batch(c: &mut Criterion, name: &str, shared_number: &ArcSwap<usize>)18 fn batch(c: &mut Criterion, name: &str, shared_number: &ArcSwap<usize>) {
19 let mut g = c.benchmark_group(name);
20
21 g.bench_function("load", |b| {
22 b.iter(|| {
23 black_box(shared_number.load());
24 })
25 });
26 g.bench_function("load_full", |b| {
27 b.iter(|| {
28 black_box(shared_number.load_full());
29 })
30 });
31 g.bench_function("load_many", |b| {
32 // Here we simulate running out of the debt slots scenario
33 const MANY: usize = 32;
34 let mut guards = Vec::with_capacity(MANY);
35 b.iter(|| {
36 guards.push(black_box(shared_number.load()));
37 if guards.len() == MANY {
38 guards.clear();
39 }
40 })
41 });
42 g.bench_function("store", |b| {
43 b.iter(|| {
44 black_box(shared_number.store(Arc::new(42)));
45 })
46 });
47 g.bench_function("cache", |b| {
48 let mut cache = Cache::new(shared_number);
49 b.iter(|| {
50 black_box(cache.load());
51 })
52 });
53
54 g.finish();
55 }
56
with_background<F: Fn(&ArcSwap<usize>) + Sync>( c: &mut Criterion, name: &str, cnt: usize, noise: F, )57 fn with_background<F: Fn(&ArcSwap<usize>) + Sync>(
58 c: &mut Criterion,
59 name: &str,
60 cnt: usize,
61 noise: F,
62 ) {
63 let stop = AtomicBool::new(false);
64 let shared_number = ArcSwap::from_pointee(42);
65 thread::scope(|s| {
66 // Start some background noise threads, to contend the arc swap.
67 for _ in 0..cnt {
68 s.spawn(|_| {
69 while !stop.load(Ordering::Relaxed) {
70 noise(&shared_number);
71 }
72 });
73 }
74
75 // Perform the benchmarks
76 batch(c, name, &shared_number);
77
78 // Ask the threads to terminate, so they don't disturb any other banchmarks
79 stop.store(true, Ordering::Relaxed);
80 })
81 .unwrap();
82 }
83
utilities(c: &mut Criterion)84 fn utilities(c: &mut Criterion) {
85 let mut g = c.benchmark_group("utilities");
86
87 struct Composed {
88 val: i32,
89 }
90
91 g.bench_function("access-map", |b| {
92 let a = Arc::new(ArcSwap::from_pointee(Composed { val: 42 }));
93 let m = Map::new(Arc::clone(&a), |c: &Composed| &c.val);
94 b.iter(|| {
95 let g = black_box(m.load());
96 assert_eq!(42, *g);
97 });
98 });
99 }
100
benchmark(c: &mut Criterion)101 fn benchmark(c: &mut Criterion) {
102 batch(c, "uncontended", &ArcSwap::from_pointee(42));
103 with_background(c, "concurrent_loads", 2, |s| {
104 black_box(s.load());
105 });
106 with_background(c, "concurrent_store", 1, |s| {
107 black_box(s.store(Arc::new(42)));
108 });
109 utilities(c);
110 }
111
112 criterion_group!(benches, benchmark);
113 criterion_main!(benches);
114