xref: /aosp_15_r20/external/pigweed/pw_allocator/py/pw_allocator/benchmarks.py (revision 61c4878ac05f98d0ceed94b57d316916de578985)
1# Copyright 2024 The Pigweed Authors
2#
3# Licensed under the Apache License, Version 2.0 (the "License"); you may not
4# use this file except in compliance with the License. You may obtain a copy of
5# the License at
6#
7#     https://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12# License for the specific language governing permissions and limitations under
13# the License.
14"""Collects benchmarks for supported allocators."""
15
16import argparse
17import json
18import os
19import subprocess
20import sys
21
22from pathlib import Path
23from typing import Any, IO
24
25
26def _parse_args() -> argparse.Namespace:
27    """Parse arguments."""
28    parser = argparse.ArgumentParser(description=__doc__)
29    parser.add_argument(
30        '-o', '--output', type=str, help='Path to write a CSV file to'
31    )
32    parser.add_argument(
33        '-v', '--verbose', action='store_true', help='Echo benchmarks to stdout'
34    )
35    return parser.parse_args()
36
37
38BAZEL = 'bazelisk'
39
40ALLOCATORS = [
41    'best_fit',
42    'dual_first_fit',
43    'first_fit',
44    'last_fit',
45    'worst_fit',
46]
47
48BY_ALLOC_COUNT = 'by allocation count'
49BY_ALLOC_COUNT_1K = 'allocation count in [1,000, 10,000)'
50BY_ALLOC_COUNT_BUCKETS = [
51    'allocation count in [0, 10)',
52    'allocation count in [10, 100)',
53    'allocation count in [100, 1,000)',
54    BY_ALLOC_COUNT_1K,
55    'allocation count in [10,000, inf)',
56]
57
58BY_FRAGMENTATION = 'by fragmentation'
59BY_FRAGMENTATION_BUCKETS = [
60    'fragmentation in [0.0, 0.2)',
61    'fragmentation in [0.2, 0.4)',
62    'fragmentation in [0.4, 0.6)',
63    'fragmentation in [0.6, 0.8)',
64    'fragmentation in [0.8, 1.0]',
65]
66
67BY_ALLOC_SIZE = 'by allocation size'
68BY_ALLOC_SIZE_BUCKETS = [
69    'usable size in [0, 16)',
70    'usable size in [16, 64)',
71    'usable size in [64, 256)',
72    'usable size in [256, 1024)',
73    'usable size in [1024, 4096)',
74    'usable size in [4096, inf)',
75]
76
77BUCKETS = {
78    BY_ALLOC_COUNT: BY_ALLOC_COUNT_BUCKETS,
79    BY_FRAGMENTATION: BY_FRAGMENTATION_BUCKETS,
80    BY_ALLOC_SIZE: BY_ALLOC_SIZE_BUCKETS,
81}
82
83METRICS = [
84    'mean response time (ns)',
85    'mean fragmentation metric',
86    'mean max available (bytes)',
87    'number of calls that failed',
88]
89
90
91def find_pw_root() -> Path:
92    """Returns the path to the Pigweed repository."""
93    pw_root = os.getenv('PW_ROOT')
94    if pw_root:
95        return Path(pw_root)
96    cwd = Path(os.getcwd())
97    marker = 'PIGWEED_MODULES'
98    while True:
99        f = cwd / marker
100        if f.is_file():
101            return cwd
102        if cwd.parent == cwd:
103            raise RuntimeError('Unable to find Pigweed root')
104        cwd = cwd.parent
105
106
107class Benchmark:
108    """Collection of benchmarks for a single allocator."""
109
110    def __init__(self, allocator: str):
111        self.allocator = allocator
112        self.metrics: dict[str, dict] = {}
113        self.data: dict[str, Any] = {}
114        self.metrics[BY_ALLOC_COUNT] = {}
115        self.metrics[BY_FRAGMENTATION] = {}
116        self.metrics[BY_ALLOC_SIZE] = {}
117
118    def collect(self):
119        """Builds and runs all categories of an allocator benchmark."""
120        pw_root = os.getenv('PW_ROOT')
121        if not pw_root:
122            raise RuntimeError('PW_ROOT is not set')
123        benchmark = f'{self.allocator}_benchmark'
124        label = f'//pw_allocator/benchmarks:{benchmark}'
125        subprocess.run([BAZEL, 'build', label], cwd=pw_root)
126        p1 = subprocess.Popen(
127            [BAZEL, 'run', label], cwd=pw_root, stdout=subprocess.PIPE
128        )
129        p2 = subprocess.Popen(
130            [
131                'python',
132                'pw_tokenizer/py/pw_tokenizer/detokenize.py',
133                'base64',
134                f'bazel-bin/pw_allocator/benchmarks/{benchmark}#.*',
135            ],
136            cwd=pw_root,
137            stdin=p1.stdout,
138            stdout=subprocess.PIPE,
139        )
140        p1.stdout.close()
141        output = p2.communicate()[0]
142        lines = [
143            line[18:] for line in output.decode('utf-8').strip().split('\n')
144        ]
145        for _, data in json.loads('\n'.join(lines)).items():
146            self.data = data
147            self.collect_category(BY_ALLOC_COUNT)
148            self.collect_category(BY_FRAGMENTATION)
149            self.collect_category(BY_ALLOC_SIZE)
150
151    def collect_category(self, category: str):
152        """Runs one category of an allocator benchmark."""
153        category_data = self.data[category]
154        for name in BUCKETS[category]:
155            self.metrics[category][name] = category_data[name]
156
157
158class BenchmarkSuite:
159    """Collection of benchmarks for all supported allocators."""
160
161    def __init__(self):
162        self.benchmarks = [Benchmark(allocator) for allocator in ALLOCATORS]
163
164    def collect(self):
165        """Builds and runs all allocator benchmarks."""
166        for benchmark in self.benchmarks:
167            benchmark.collect()
168
169    def write_benchmarks(self, output: IO):
170        """Reorganizes benchmark metrics and writes them to the given output."""
171        for metric in METRICS:
172            self.write_category(output, metric, BY_ALLOC_COUNT)
173            self.write_category(output, metric, BY_FRAGMENTATION)
174            self.write_category(output, metric, BY_ALLOC_SIZE)
175
176    def write_category(self, output: IO, metric: str, category: str):
177        """Writes a single category of benchmarks to the given output."""
178        output.write(f'{metric} {category}\t')
179        for benchmark in self.benchmarks:
180            output.write(f'{benchmark.allocator}\t')
181        output.write('\n')
182
183        for bucket in BUCKETS[category]:
184            output.write(f'{bucket}\t')
185            for benchmark in self.benchmarks:
186                output.write(f'{benchmark.metrics[category][bucket][metric]}\t')
187            output.write('\n')
188        output.write('\n')
189
190    def print_summary(self):
191        """Writes selected metrics to stdout."""
192        print('\n' + '#' * 80)
193        print(f'Results for {BY_ALLOC_COUNT_1K}:')
194        sys.stdout.write(' ' * 32)
195        for benchmark in self.benchmarks:
196            sys.stdout.write(benchmark.allocator.ljust(16))
197        sys.stdout.write('\n')
198
199        for metric in METRICS:
200            sys.stdout.write(metric.ljust(32))
201            for benchmark in self.benchmarks:
202                metrics = benchmark.metrics[BY_ALLOC_COUNT][BY_ALLOC_COUNT_1K]
203                sys.stdout.write(f'{metrics[metric]}'.ljust(16))
204            sys.stdout.write('\n')
205        print('#' * 80)
206
207
208def main() -> int:
209    """Builds and runs allocator benchmarks."""
210    args = _parse_args()
211    suite = BenchmarkSuite()
212    suite.collect()
213    if args.output:
214        with open(Path(args.output), 'w+') as output:
215            suite.write_benchmarks(output)
216        print(f'\nWrote to {Path(args.output).resolve()}')
217
218    if args.verbose:
219        suite.write_benchmarks(sys.stdout)
220
221    suite.print_summary()
222
223    return 0
224
225
226if __name__ == '__main__':
227    main()
228