1 use crate::stats::univariate::Sample;
2 use crate::stats::univariate::{self, mixed};
3 use crate::stats::Distribution;
4 
5 use crate::benchmark::BenchmarkConfig;
6 use crate::error::Result;
7 use crate::estimate::{
8     build_change_estimates, ChangeDistributions, ChangeEstimates, ChangePointEstimates, Estimates,
9 };
10 use crate::measurement::Measurement;
11 use crate::report::BenchmarkId;
12 use crate::{fs, Criterion, SavedSample};
13 
14 // Common comparison procedure
15 #[cfg_attr(feature = "cargo-clippy", allow(clippy::type_complexity))]
common<M: Measurement>( id: &BenchmarkId, avg_times: &Sample<f64>, config: &BenchmarkConfig, criterion: &Criterion<M>, ) -> Result<( f64, Distribution<f64>, ChangeEstimates, ChangeDistributions, Vec<f64>, Vec<f64>, Vec<f64>, Estimates, )>16 pub(crate) fn common<M: Measurement>(
17     id: &BenchmarkId,
18     avg_times: &Sample<f64>,
19     config: &BenchmarkConfig,
20     criterion: &Criterion<M>,
21 ) -> Result<(
22     f64,
23     Distribution<f64>,
24     ChangeEstimates,
25     ChangeDistributions,
26     Vec<f64>,
27     Vec<f64>,
28     Vec<f64>,
29     Estimates,
30 )> {
31     let mut sample_file = criterion.output_directory.clone();
32     sample_file.push(id.as_directory_name());
33     sample_file.push(&criterion.baseline_directory);
34     sample_file.push("sample.json");
35     let sample: SavedSample = fs::load(&sample_file)?;
36     let SavedSample { iters, times, .. } = sample;
37 
38     let mut estimates_file = criterion.output_directory.clone();
39     estimates_file.push(id.as_directory_name());
40     estimates_file.push(&criterion.baseline_directory);
41     estimates_file.push("estimates.json");
42     let base_estimates: Estimates = fs::load(&estimates_file)?;
43 
44     let base_avg_times: Vec<f64> = iters
45         .iter()
46         .zip(times.iter())
47         .map(|(iters, elapsed)| elapsed / iters)
48         .collect();
49     let base_avg_time_sample = Sample::new(&base_avg_times);
50 
51     let mut change_dir = criterion.output_directory.clone();
52     change_dir.push(id.as_directory_name());
53     change_dir.push("change");
54     fs::mkdirp(&change_dir)?;
55     let (t_statistic, t_distribution) = t_test(avg_times, base_avg_time_sample, config);
56 
57     let (estimates, relative_distributions) =
58         estimates(id, avg_times, base_avg_time_sample, config, criterion);
59     Ok((
60         t_statistic,
61         t_distribution,
62         estimates,
63         relative_distributions,
64         iters,
65         times,
66         base_avg_times.clone(),
67         base_estimates,
68     ))
69 }
70 
71 // Performs a two sample t-test
t_test( avg_times: &Sample<f64>, base_avg_times: &Sample<f64>, config: &BenchmarkConfig, ) -> (f64, Distribution<f64>)72 fn t_test(
73     avg_times: &Sample<f64>,
74     base_avg_times: &Sample<f64>,
75     config: &BenchmarkConfig,
76 ) -> (f64, Distribution<f64>) {
77     let nresamples = config.nresamples;
78 
79     let t_statistic = avg_times.t(base_avg_times);
80     let t_distribution = elapsed!(
81         "Bootstrapping the T distribution",
82         mixed::bootstrap(avg_times, base_avg_times, nresamples, |a, b| (a.t(b),))
83     )
84     .0;
85 
86     // HACK: Filter out non-finite numbers, which can happen sometimes when sample size is very small.
87     // Downstream code doesn't like non-finite values here.
88     let t_distribution = Distribution::from(
89         t_distribution
90             .iter()
91             .filter(|a| a.is_finite())
92             .cloned()
93             .collect::<Vec<_>>()
94             .into_boxed_slice(),
95     );
96 
97     (t_statistic, t_distribution)
98 }
99 
100 // Estimates the relative change in the statistics of the population
estimates<M: Measurement>( id: &BenchmarkId, avg_times: &Sample<f64>, base_avg_times: &Sample<f64>, config: &BenchmarkConfig, criterion: &Criterion<M>, ) -> (ChangeEstimates, ChangeDistributions)101 fn estimates<M: Measurement>(
102     id: &BenchmarkId,
103     avg_times: &Sample<f64>,
104     base_avg_times: &Sample<f64>,
105     config: &BenchmarkConfig,
106     criterion: &Criterion<M>,
107 ) -> (ChangeEstimates, ChangeDistributions) {
108     fn stats(a: &Sample<f64>, b: &Sample<f64>) -> (f64, f64) {
109         (
110             a.mean() / b.mean() - 1.,
111             a.percentiles().median() / b.percentiles().median() - 1.,
112         )
113     }
114 
115     let cl = config.confidence_level;
116     let nresamples = config.nresamples;
117 
118     let (dist_mean, dist_median) = elapsed!(
119         "Bootstrapping the relative statistics",
120         univariate::bootstrap(avg_times, base_avg_times, nresamples, stats)
121     );
122 
123     let distributions = ChangeDistributions {
124         mean: dist_mean,
125         median: dist_median,
126     };
127 
128     let (mean, median) = stats(avg_times, base_avg_times);
129     let points = ChangePointEstimates { mean, median };
130 
131     let estimates = build_change_estimates(&distributions, &points, cl);
132 
133     {
134         log_if_err!({
135             let mut estimates_path = criterion.output_directory.clone();
136             estimates_path.push(id.as_directory_name());
137             estimates_path.push("change");
138             estimates_path.push("estimates.json");
139             fs::save(&estimates, &estimates_path)
140         });
141     }
142     (estimates, distributions)
143 }
144