1 // Copyright 2018 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 //
15 // Every benchmark should have the same performance as the corresponding
16 // headroom benchmark.
17
18 #include <cstddef>
19 #include <cstdint>
20
21 #include "absl/base/internal/raw_logging.h"
22 #include "absl/container/internal/layout.h"
23 #include "benchmark/benchmark.h"
24
25 namespace absl {
26 ABSL_NAMESPACE_BEGIN
27 namespace container_internal {
28 namespace {
29
30 using ::benchmark::DoNotOptimize;
31
32 using Int128 = int64_t[2];
33
MyAlign(size_t n,size_t m)34 constexpr size_t MyAlign(size_t n, size_t m) { return (n + m - 1) & ~(m - 1); }
35
36 // This benchmark provides the upper bound on performance for BM_OffsetConstant.
37 template <size_t Offset, class... Ts>
BM_OffsetConstantHeadroom(benchmark::State & state)38 void BM_OffsetConstantHeadroom(benchmark::State& state) {
39 for (auto _ : state) {
40 DoNotOptimize(Offset);
41 }
42 }
43
44 template <size_t Offset, class... Ts>
BM_OffsetConstantStatic(benchmark::State & state)45 void BM_OffsetConstantStatic(benchmark::State& state) {
46 using L = typename Layout<Ts...>::template WithStaticSizes<3, 5, 7>;
47 ABSL_RAW_CHECK(L::Partial().template Offset<3>() == Offset, "Invalid offset");
48 for (auto _ : state) {
49 DoNotOptimize(L::Partial().template Offset<3>());
50 }
51 }
52
53 template <size_t Offset, class... Ts>
BM_OffsetConstant(benchmark::State & state)54 void BM_OffsetConstant(benchmark::State& state) {
55 using L = Layout<Ts...>;
56 ABSL_RAW_CHECK(L::Partial(3, 5, 7).template Offset<3>() == Offset,
57 "Invalid offset");
58 for (auto _ : state) {
59 DoNotOptimize(L::Partial(3, 5, 7).template Offset<3>());
60 }
61 }
62
63 template <size_t Offset, class... Ts>
BM_OffsetConstantIndirect(benchmark::State & state)64 void BM_OffsetConstantIndirect(benchmark::State& state) {
65 using L = Layout<Ts...>;
66 auto p = L::Partial(3, 5, 7);
67 ABSL_RAW_CHECK(p.template Offset<3>() == Offset, "Invalid offset");
68 for (auto _ : state) {
69 DoNotOptimize(p);
70 DoNotOptimize(p.template Offset<3>());
71 }
72 }
73
74 template <class... Ts>
75 size_t PartialOffset(size_t k);
76
77 template <>
PartialOffset(size_t k)78 size_t PartialOffset<int8_t, int16_t, int32_t, Int128>(size_t k) {
79 constexpr size_t o = MyAlign(MyAlign(3 * 1, 2) + 5 * 2, 4);
80 return MyAlign(o + k * 4, 8);
81 }
82
83 template <>
PartialOffset(size_t k)84 size_t PartialOffset<Int128, int32_t, int16_t, int8_t>(size_t k) {
85 // No alignment is necessary.
86 return 3 * 16 + 5 * 4 + k * 2;
87 }
88
89 // This benchmark provides the upper bound on performance for BM_OffsetVariable.
90 template <size_t Offset, class... Ts>
BM_OffsetPartialHeadroom(benchmark::State & state)91 void BM_OffsetPartialHeadroom(benchmark::State& state) {
92 size_t k = 7;
93 ABSL_RAW_CHECK(PartialOffset<Ts...>(k) == Offset, "Invalid offset");
94 for (auto _ : state) {
95 DoNotOptimize(k);
96 DoNotOptimize(PartialOffset<Ts...>(k));
97 }
98 }
99
100 template <size_t Offset, class... Ts>
BM_OffsetPartialStatic(benchmark::State & state)101 void BM_OffsetPartialStatic(benchmark::State& state) {
102 using L = typename Layout<Ts...>::template WithStaticSizes<3, 5>;
103 size_t k = 7;
104 ABSL_RAW_CHECK(L::Partial(k).template Offset<3>() == Offset,
105 "Invalid offset");
106 for (auto _ : state) {
107 DoNotOptimize(k);
108 DoNotOptimize(L::Partial(k).template Offset<3>());
109 }
110 }
111
112 template <size_t Offset, class... Ts>
BM_OffsetPartial(benchmark::State & state)113 void BM_OffsetPartial(benchmark::State& state) {
114 using L = Layout<Ts...>;
115 size_t k = 7;
116 ABSL_RAW_CHECK(L::Partial(3, 5, k).template Offset<3>() == Offset,
117 "Invalid offset");
118 for (auto _ : state) {
119 DoNotOptimize(k);
120 DoNotOptimize(L::Partial(3, 5, k).template Offset<3>());
121 }
122 }
123
124 template <class... Ts>
125 size_t VariableOffset(size_t n, size_t m, size_t k);
126
127 template <>
VariableOffset(size_t n,size_t m,size_t k)128 size_t VariableOffset<int8_t, int16_t, int32_t, Int128>(size_t n, size_t m,
129 size_t k) {
130 return MyAlign(MyAlign(MyAlign(n * 1, 2) + m * 2, 4) + k * 4, 8);
131 }
132
133 template <>
VariableOffset(size_t n,size_t m,size_t k)134 size_t VariableOffset<Int128, int32_t, int16_t, int8_t>(size_t n, size_t m,
135 size_t k) {
136 // No alignment is necessary.
137 return n * 16 + m * 4 + k * 2;
138 }
139
140 // This benchmark provides the upper bound on performance for BM_OffsetVariable.
141 template <size_t Offset, class... Ts>
BM_OffsetVariableHeadroom(benchmark::State & state)142 void BM_OffsetVariableHeadroom(benchmark::State& state) {
143 size_t n = 3;
144 size_t m = 5;
145 size_t k = 7;
146 ABSL_RAW_CHECK(VariableOffset<Ts...>(n, m, k) == Offset, "Invalid offset");
147 for (auto _ : state) {
148 DoNotOptimize(n);
149 DoNotOptimize(m);
150 DoNotOptimize(k);
151 DoNotOptimize(VariableOffset<Ts...>(n, m, k));
152 }
153 }
154
155 template <size_t Offset, class... Ts>
BM_OffsetVariable(benchmark::State & state)156 void BM_OffsetVariable(benchmark::State& state) {
157 using L = Layout<Ts...>;
158 size_t n = 3;
159 size_t m = 5;
160 size_t k = 7;
161 ABSL_RAW_CHECK(L::Partial(n, m, k).template Offset<3>() == Offset,
162 "Invalid offset");
163 for (auto _ : state) {
164 DoNotOptimize(n);
165 DoNotOptimize(m);
166 DoNotOptimize(k);
167 DoNotOptimize(L::Partial(n, m, k).template Offset<3>());
168 }
169 }
170
171 template <class... Ts>
172 size_t AllocSize(size_t x);
173
174 template <>
AllocSize(size_t x)175 size_t AllocSize<int8_t, int16_t, int32_t, Int128>(size_t x) {
176 constexpr size_t o =
177 Layout<int8_t, int16_t, int32_t, Int128>::Partial(3, 5, 7)
178 .template Offset<Int128>();
179 return o + sizeof(Int128) * x;
180 }
181
182 template <>
AllocSize(size_t x)183 size_t AllocSize<Int128, int32_t, int16_t, int8_t>(size_t x) {
184 constexpr size_t o =
185 Layout<Int128, int32_t, int16_t, int8_t>::Partial(3, 5, 7)
186 .template Offset<int8_t>();
187 return o + sizeof(int8_t) * x;
188 }
189
190 // This benchmark provides the upper bound on performance for BM_AllocSize
191 template <size_t Size, class... Ts>
BM_AllocSizeHeadroom(benchmark::State & state)192 void BM_AllocSizeHeadroom(benchmark::State& state) {
193 size_t x = 9;
194 ABSL_RAW_CHECK(AllocSize<Ts...>(x) == Size, "Invalid size");
195 for (auto _ : state) {
196 DoNotOptimize(x);
197 DoNotOptimize(AllocSize<Ts...>(x));
198 }
199 }
200
201 template <size_t Size, class... Ts>
BM_AllocSizeStatic(benchmark::State & state)202 void BM_AllocSizeStatic(benchmark::State& state) {
203 using L = typename Layout<Ts...>::template WithStaticSizes<3, 5, 7>;
204 size_t x = 9;
205 ABSL_RAW_CHECK(L(x).AllocSize() == Size, "Invalid offset");
206 for (auto _ : state) {
207 DoNotOptimize(x);
208 DoNotOptimize(L(x).AllocSize());
209 }
210 }
211
212 template <size_t Size, class... Ts>
BM_AllocSize(benchmark::State & state)213 void BM_AllocSize(benchmark::State& state) {
214 using L = Layout<Ts...>;
215 size_t n = 3;
216 size_t m = 5;
217 size_t k = 7;
218 size_t x = 9;
219 ABSL_RAW_CHECK(L(n, m, k, x).AllocSize() == Size, "Invalid offset");
220 for (auto _ : state) {
221 DoNotOptimize(n);
222 DoNotOptimize(m);
223 DoNotOptimize(k);
224 DoNotOptimize(x);
225 DoNotOptimize(L(n, m, k, x).AllocSize());
226 }
227 }
228
229 template <size_t Size, class... Ts>
BM_AllocSizeIndirect(benchmark::State & state)230 void BM_AllocSizeIndirect(benchmark::State& state) {
231 using L = Layout<Ts...>;
232 auto l = L(3, 5, 7, 9);
233 ABSL_RAW_CHECK(l.AllocSize() == Size, "Invalid offset");
234 for (auto _ : state) {
235 DoNotOptimize(l);
236 DoNotOptimize(l.AllocSize());
237 }
238 }
239
240 // Run all benchmarks in two modes:
241 //
242 // Layout with padding: int8_t[3], int16_t[5], int32_t[7], Int128[?].
243 // Layout without padding: Int128[3], int32_t[5], int16_t[7], int8_t[?].
244
245 #define OFFSET_BENCHMARK(NAME, OFFSET, T1, T2, T3, T4) \
246 auto& NAME##_##OFFSET##_##T1##_##T2##_##T3##_##T4 = \
247 NAME<OFFSET, T1, T2, T3, T4>; \
248 BENCHMARK(NAME##_##OFFSET##_##T1##_##T2##_##T3##_##T4)
249
250 OFFSET_BENCHMARK(BM_OffsetConstantHeadroom, 48, int8_t, int16_t, int32_t,
251 Int128);
252 OFFSET_BENCHMARK(BM_OffsetConstantStatic, 48, int8_t, int16_t, int32_t, Int128);
253 OFFSET_BENCHMARK(BM_OffsetConstant, 48, int8_t, int16_t, int32_t, Int128);
254 OFFSET_BENCHMARK(BM_OffsetConstantIndirect, 48, int8_t, int16_t, int32_t,
255 Int128);
256
257 OFFSET_BENCHMARK(BM_OffsetConstantHeadroom, 82, Int128, int32_t, int16_t,
258 int8_t);
259 OFFSET_BENCHMARK(BM_OffsetConstantStatic, 82, Int128, int32_t, int16_t, int8_t);
260 OFFSET_BENCHMARK(BM_OffsetConstant, 82, Int128, int32_t, int16_t, int8_t);
261 OFFSET_BENCHMARK(BM_OffsetConstantIndirect, 82, Int128, int32_t, int16_t,
262 int8_t);
263
264 OFFSET_BENCHMARK(BM_OffsetPartialHeadroom, 48, int8_t, int16_t, int32_t,
265 Int128);
266 OFFSET_BENCHMARK(BM_OffsetPartialStatic, 48, int8_t, int16_t, int32_t, Int128);
267 OFFSET_BENCHMARK(BM_OffsetPartial, 48, int8_t, int16_t, int32_t, Int128);
268
269 OFFSET_BENCHMARK(BM_OffsetPartialHeadroom, 82, Int128, int32_t, int16_t,
270 int8_t);
271 OFFSET_BENCHMARK(BM_OffsetPartialStatic, 82, Int128, int32_t, int16_t, int8_t);
272 OFFSET_BENCHMARK(BM_OffsetPartial, 82, Int128, int32_t, int16_t, int8_t);
273
274 OFFSET_BENCHMARK(BM_OffsetVariableHeadroom, 48, int8_t, int16_t, int32_t,
275 Int128);
276 OFFSET_BENCHMARK(BM_OffsetVariable, 48, int8_t, int16_t, int32_t, Int128);
277
278 OFFSET_BENCHMARK(BM_OffsetVariableHeadroom, 82, Int128, int32_t, int16_t,
279 int8_t);
280 OFFSET_BENCHMARK(BM_OffsetVariable, 82, Int128, int32_t, int16_t, int8_t);
281
282 OFFSET_BENCHMARK(BM_AllocSizeHeadroom, 192, int8_t, int16_t, int32_t, Int128);
283 OFFSET_BENCHMARK(BM_AllocSizeStatic, 192, int8_t, int16_t, int32_t, Int128);
284 OFFSET_BENCHMARK(BM_AllocSize, 192, int8_t, int16_t, int32_t, Int128);
285 OFFSET_BENCHMARK(BM_AllocSizeIndirect, 192, int8_t, int16_t, int32_t, Int128);
286
287 OFFSET_BENCHMARK(BM_AllocSizeHeadroom, 91, Int128, int32_t, int16_t, int8_t);
288 OFFSET_BENCHMARK(BM_AllocSizeStatic, 91, Int128, int32_t, int16_t, int8_t);
289 OFFSET_BENCHMARK(BM_AllocSize, 91, Int128, int32_t, int16_t, int8_t);
290 OFFSET_BENCHMARK(BM_AllocSizeIndirect, 91, Int128, int32_t, int16_t, int8_t);
291
292 } // namespace
293 } // namespace container_internal
294 ABSL_NAMESPACE_END
295 } // namespace absl
296