xref: /aosp_15_r20/external/abseil-cpp/absl/container/inlined_vector_benchmark.cc (revision 9356374a3709195abf420251b3e825997ff56c0f)
1 // Copyright 2019 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include <array>
16 #include <string>
17 #include <vector>
18 
19 #include "absl/base/internal/raw_logging.h"
20 #include "absl/base/macros.h"
21 #include "absl/container/inlined_vector.h"
22 #include "absl/strings/str_cat.h"
23 #include "benchmark/benchmark.h"
24 
25 namespace {
26 
BM_InlinedVectorFill(benchmark::State & state)27 void BM_InlinedVectorFill(benchmark::State& state) {
28   const int len = state.range(0);
29   absl::InlinedVector<int, 8> v;
30   v.reserve(len);
31   for (auto _ : state) {
32     v.resize(0);  // Use resize(0) as InlinedVector releases storage on clear().
33     for (int i = 0; i < len; ++i) {
34       v.push_back(i);
35     }
36     benchmark::DoNotOptimize(v);
37   }
38 }
39 BENCHMARK(BM_InlinedVectorFill)->Range(1, 256);
40 
BM_InlinedVectorFillRange(benchmark::State & state)41 void BM_InlinedVectorFillRange(benchmark::State& state) {
42   const int len = state.range(0);
43   const std::vector<int> src(len, len);
44   absl::InlinedVector<int, 8> v;
45   v.reserve(len);
46   for (auto _ : state) {
47     benchmark::DoNotOptimize(src);
48     v.assign(src.begin(), src.end());
49     benchmark::DoNotOptimize(v);
50   }
51 }
52 BENCHMARK(BM_InlinedVectorFillRange)->Range(1, 256);
53 
BM_StdVectorFill(benchmark::State & state)54 void BM_StdVectorFill(benchmark::State& state) {
55   const int len = state.range(0);
56   std::vector<int> v;
57   v.reserve(len);
58   for (auto _ : state) {
59     v.clear();
60     for (int i = 0; i < len; ++i) {
61       v.push_back(i);
62     }
63     benchmark::DoNotOptimize(v);
64   }
65 }
66 BENCHMARK(BM_StdVectorFill)->Range(1, 256);
67 
68 // The purpose of the next two benchmarks is to verify that
69 // absl::InlinedVector is efficient when moving is more efficient than
70 // copying. To do so, we use strings that are larger than the short
71 // string optimization.
StringRepresentedInline(std::string s)72 bool StringRepresentedInline(std::string s) {
73   const char* chars = s.data();
74   std::string s1 = std::move(s);
75   return s1.data() != chars;
76 }
77 
GetNonShortStringOptimizationSize()78 int GetNonShortStringOptimizationSize() {
79   for (int i = 24; i <= 192; i *= 2) {
80     if (!StringRepresentedInline(std::string(i, 'A'))) {
81       return i;
82     }
83   }
84   ABSL_RAW_LOG(
85       FATAL,
86       "Failed to find a string larger than the short string optimization");
87   return -1;
88 }
89 
BM_InlinedVectorFillString(benchmark::State & state)90 void BM_InlinedVectorFillString(benchmark::State& state) {
91   const int len = state.range(0);
92   const int no_sso = GetNonShortStringOptimizationSize();
93   std::string strings[4] = {std::string(no_sso, 'A'), std::string(no_sso, 'B'),
94                             std::string(no_sso, 'C'), std::string(no_sso, 'D')};
95 
96   for (auto _ : state) {
97     absl::InlinedVector<std::string, 8> v;
98     for (int i = 0; i < len; i++) {
99       v.push_back(strings[i & 3]);
100     }
101   }
102   state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len);
103 }
104 BENCHMARK(BM_InlinedVectorFillString)->Range(0, 1024);
105 
BM_StdVectorFillString(benchmark::State & state)106 void BM_StdVectorFillString(benchmark::State& state) {
107   const int len = state.range(0);
108   const int no_sso = GetNonShortStringOptimizationSize();
109   std::string strings[4] = {std::string(no_sso, 'A'), std::string(no_sso, 'B'),
110                             std::string(no_sso, 'C'), std::string(no_sso, 'D')};
111 
112   for (auto _ : state) {
113     std::vector<std::string> v;
114     for (int i = 0; i < len; i++) {
115       v.push_back(strings[i & 3]);
116     }
117   }
118   state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len);
119 }
120 BENCHMARK(BM_StdVectorFillString)->Range(0, 1024);
121 
122 struct Buffer {  // some arbitrary structure for benchmarking.
123   char* base;
124   int length;
125   int capacity;
126   void* user_data;
127 };
128 
BM_InlinedVectorAssignments(benchmark::State & state)129 void BM_InlinedVectorAssignments(benchmark::State& state) {
130   const int len = state.range(0);
131   using BufferVec = absl::InlinedVector<Buffer, 2>;
132 
133   BufferVec src;
134   src.resize(len);
135 
136   BufferVec dst;
137   for (auto _ : state) {
138     benchmark::DoNotOptimize(dst);
139     benchmark::DoNotOptimize(src);
140     dst = src;
141   }
142 }
143 BENCHMARK(BM_InlinedVectorAssignments)
144     ->Arg(0)
145     ->Arg(1)
146     ->Arg(2)
147     ->Arg(3)
148     ->Arg(4)
149     ->Arg(20);
150 
BM_CreateFromContainer(benchmark::State & state)151 void BM_CreateFromContainer(benchmark::State& state) {
152   for (auto _ : state) {
153     absl::InlinedVector<int, 4> src{1, 2, 3};
154     benchmark::DoNotOptimize(src);
155     absl::InlinedVector<int, 4> dst(std::move(src));
156     benchmark::DoNotOptimize(dst);
157   }
158 }
159 BENCHMARK(BM_CreateFromContainer);
160 
161 struct LargeCopyableOnly {
LargeCopyableOnly__anon50ee758c0111::LargeCopyableOnly162   LargeCopyableOnly() : d(1024, 17) {}
163   LargeCopyableOnly(const LargeCopyableOnly& o) = default;
164   LargeCopyableOnly& operator=(const LargeCopyableOnly& o) = default;
165 
166   std::vector<int> d;
167 };
168 
169 struct LargeCopyableSwappable {
LargeCopyableSwappable__anon50ee758c0111::LargeCopyableSwappable170   LargeCopyableSwappable() : d(1024, 17) {}
171 
172   LargeCopyableSwappable(const LargeCopyableSwappable& o) = default;
173 
operator =__anon50ee758c0111::LargeCopyableSwappable174   LargeCopyableSwappable& operator=(LargeCopyableSwappable o) {
175     using std::swap;
176     swap(*this, o);
177     return *this;
178   }
179 
swap(LargeCopyableSwappable & a,LargeCopyableSwappable & b)180   friend void swap(LargeCopyableSwappable& a, LargeCopyableSwappable& b) {
181     using std::swap;
182     swap(a.d, b.d);
183   }
184 
185   std::vector<int> d;
186 };
187 
188 struct LargeCopyableMovable {
LargeCopyableMovable__anon50ee758c0111::LargeCopyableMovable189   LargeCopyableMovable() : d(1024, 17) {}
190   // Use implicitly defined copy and move.
191 
192   std::vector<int> d;
193 };
194 
195 struct LargeCopyableMovableSwappable {
LargeCopyableMovableSwappable__anon50ee758c0111::LargeCopyableMovableSwappable196   LargeCopyableMovableSwappable() : d(1024, 17) {}
197   LargeCopyableMovableSwappable(const LargeCopyableMovableSwappable& o) =
198       default;
199   LargeCopyableMovableSwappable(LargeCopyableMovableSwappable&& o) = default;
200 
operator =__anon50ee758c0111::LargeCopyableMovableSwappable201   LargeCopyableMovableSwappable& operator=(LargeCopyableMovableSwappable o) {
202     using std::swap;
203     swap(*this, o);
204     return *this;
205   }
206   LargeCopyableMovableSwappable& operator=(LargeCopyableMovableSwappable&& o) =
207       default;
208 
swap(LargeCopyableMovableSwappable & a,LargeCopyableMovableSwappable & b)209   friend void swap(LargeCopyableMovableSwappable& a,
210                    LargeCopyableMovableSwappable& b) {
211     using std::swap;
212     swap(a.d, b.d);
213   }
214 
215   std::vector<int> d;
216 };
217 
218 template <typename ElementType>
BM_SwapElements(benchmark::State & state)219 void BM_SwapElements(benchmark::State& state) {
220   const int len = state.range(0);
221   using Vec = absl::InlinedVector<ElementType, 32>;
222   Vec a(len);
223   Vec b;
224   for (auto _ : state) {
225     using std::swap;
226     benchmark::DoNotOptimize(a);
227     benchmark::DoNotOptimize(b);
228     swap(a, b);
229   }
230 }
231 BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableOnly)->Range(0, 1024);
232 BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableSwappable)->Range(0, 1024);
233 BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableMovable)->Range(0, 1024);
234 BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableMovableSwappable)
235     ->Range(0, 1024);
236 
237 // The following benchmark is meant to track the efficiency of the vector size
238 // as a function of stored type via the benchmark label. It is not meant to
239 // output useful sizeof operator performance. The loop is a dummy operation
240 // to fulfill the requirement of running the benchmark.
241 template <typename VecType>
BM_Sizeof(benchmark::State & state)242 void BM_Sizeof(benchmark::State& state) {
243   int size = 0;
244   for (auto _ : state) {
245     VecType vec;
246     size = sizeof(vec);
247   }
248   state.SetLabel(absl::StrCat("sz=", size));
249 }
250 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<char, 1>);
251 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<char, 4>);
252 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<char, 7>);
253 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<char, 8>);
254 
255 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<int, 1>);
256 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<int, 4>);
257 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<int, 7>);
258 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<int, 8>);
259 
260 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<void*, 1>);
261 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<void*, 4>);
262 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<void*, 7>);
263 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<void*, 8>);
264 
265 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<std::string, 1>);
266 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<std::string, 4>);
267 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<std::string, 7>);
268 BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<std::string, 8>);
269 
BM_InlinedVectorIndexInlined(benchmark::State & state)270 void BM_InlinedVectorIndexInlined(benchmark::State& state) {
271   absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
272   for (auto _ : state) {
273     benchmark::DoNotOptimize(v);
274     benchmark::DoNotOptimize(v[4]);
275   }
276 }
277 BENCHMARK(BM_InlinedVectorIndexInlined);
278 
BM_InlinedVectorIndexExternal(benchmark::State & state)279 void BM_InlinedVectorIndexExternal(benchmark::State& state) {
280   absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
281   for (auto _ : state) {
282     benchmark::DoNotOptimize(v);
283     benchmark::DoNotOptimize(v[4]);
284   }
285 }
286 BENCHMARK(BM_InlinedVectorIndexExternal);
287 
BM_StdVectorIndex(benchmark::State & state)288 void BM_StdVectorIndex(benchmark::State& state) {
289   std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
290   for (auto _ : state) {
291     benchmark::DoNotOptimize(v);
292     benchmark::DoNotOptimize(v[4]);
293   }
294 }
295 BENCHMARK(BM_StdVectorIndex);
296 
BM_InlinedVectorDataInlined(benchmark::State & state)297 void BM_InlinedVectorDataInlined(benchmark::State& state) {
298   absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
299   for (auto _ : state) {
300     benchmark::DoNotOptimize(v);
301     benchmark::DoNotOptimize(v.data());
302   }
303 }
304 BENCHMARK(BM_InlinedVectorDataInlined);
305 
BM_InlinedVectorDataExternal(benchmark::State & state)306 void BM_InlinedVectorDataExternal(benchmark::State& state) {
307   absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
308   for (auto _ : state) {
309     benchmark::DoNotOptimize(v);
310     benchmark::DoNotOptimize(v.data());
311   }
312   state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
313 }
314 BENCHMARK(BM_InlinedVectorDataExternal);
315 
BM_StdVectorData(benchmark::State & state)316 void BM_StdVectorData(benchmark::State& state) {
317   std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
318   for (auto _ : state) {
319     benchmark::DoNotOptimize(v);
320     benchmark::DoNotOptimize(v.data());
321   }
322   state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
323 }
324 BENCHMARK(BM_StdVectorData);
325 
BM_InlinedVectorSizeInlined(benchmark::State & state)326 void BM_InlinedVectorSizeInlined(benchmark::State& state) {
327   absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
328   for (auto _ : state) {
329     benchmark::DoNotOptimize(v);
330     benchmark::DoNotOptimize(v.size());
331   }
332 }
333 BENCHMARK(BM_InlinedVectorSizeInlined);
334 
BM_InlinedVectorSizeExternal(benchmark::State & state)335 void BM_InlinedVectorSizeExternal(benchmark::State& state) {
336   absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
337   for (auto _ : state) {
338     benchmark::DoNotOptimize(v);
339     benchmark::DoNotOptimize(v.size());
340   }
341 }
342 BENCHMARK(BM_InlinedVectorSizeExternal);
343 
BM_StdVectorSize(benchmark::State & state)344 void BM_StdVectorSize(benchmark::State& state) {
345   std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
346   for (auto _ : state) {
347     benchmark::DoNotOptimize(v);
348     benchmark::DoNotOptimize(v.size());
349   }
350 }
351 BENCHMARK(BM_StdVectorSize);
352 
BM_InlinedVectorEmptyInlined(benchmark::State & state)353 void BM_InlinedVectorEmptyInlined(benchmark::State& state) {
354   absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
355   for (auto _ : state) {
356     benchmark::DoNotOptimize(v);
357     benchmark::DoNotOptimize(v.empty());
358   }
359 }
360 BENCHMARK(BM_InlinedVectorEmptyInlined);
361 
BM_InlinedVectorEmptyExternal(benchmark::State & state)362 void BM_InlinedVectorEmptyExternal(benchmark::State& state) {
363   absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
364   for (auto _ : state) {
365     benchmark::DoNotOptimize(v);
366     benchmark::DoNotOptimize(v.empty());
367   }
368 }
369 BENCHMARK(BM_InlinedVectorEmptyExternal);
370 
BM_StdVectorEmpty(benchmark::State & state)371 void BM_StdVectorEmpty(benchmark::State& state) {
372   std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
373   for (auto _ : state) {
374     benchmark::DoNotOptimize(v);
375     benchmark::DoNotOptimize(v.empty());
376   }
377 }
378 BENCHMARK(BM_StdVectorEmpty);
379 
380 constexpr size_t kInlinedCapacity = 4;
381 constexpr size_t kLargeSize = kInlinedCapacity * 2;
382 constexpr size_t kSmallSize = kInlinedCapacity / 2;
383 constexpr size_t kBatchSize = 100;
384 
385 #define ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_FunctionTemplate, T) \
386   BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize);        \
387   BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize)
388 
389 #define ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_FunctionTemplate, T)      \
390   BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize, kLargeSize); \
391   BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize, kSmallSize); \
392   BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize, kLargeSize); \
393   BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize, kSmallSize)
394 
395 template <typename T>
396 using InlVec = absl::InlinedVector<T, kInlinedCapacity>;
397 
398 struct TrivialType {
399   size_t val;
400 };
401 
402 class NontrivialType {
403  public:
NontrivialType()404   ABSL_ATTRIBUTE_NOINLINE NontrivialType() : val_() {
405     benchmark::DoNotOptimize(*this);
406   }
407 
NontrivialType(const NontrivialType & other)408   ABSL_ATTRIBUTE_NOINLINE NontrivialType(const NontrivialType& other)
409       : val_(other.val_) {
410     benchmark::DoNotOptimize(*this);
411   }
412 
operator =(const NontrivialType & other)413   ABSL_ATTRIBUTE_NOINLINE NontrivialType& operator=(
414       const NontrivialType& other) {
415     val_ = other.val_;
416     benchmark::DoNotOptimize(*this);
417     return *this;
418   }
419 
~NontrivialType()420   ABSL_ATTRIBUTE_NOINLINE ~NontrivialType() noexcept {
421     benchmark::DoNotOptimize(*this);
422   }
423 
424  private:
425   size_t val_;
426 };
427 
428 template <typename T, typename PrepareVecFn, typename TestVecFn>
BatchedBenchmark(benchmark::State & state,PrepareVecFn prepare_vec,TestVecFn test_vec)429 void BatchedBenchmark(benchmark::State& state, PrepareVecFn prepare_vec,
430                       TestVecFn test_vec) {
431   std::array<InlVec<T>, kBatchSize> vector_batch{};
432 
433   while (state.KeepRunningBatch(kBatchSize)) {
434     // Prepare batch
435     state.PauseTiming();
436     for (size_t i = 0; i < kBatchSize; ++i) {
437       prepare_vec(vector_batch.data() + i, i);
438     }
439     benchmark::DoNotOptimize(vector_batch);
440     state.ResumeTiming();
441 
442     // Test batch
443     for (size_t i = 0; i < kBatchSize; ++i) {
444       test_vec(vector_batch.data() + i, i);
445     }
446   }
447 }
448 
449 template <typename T, size_t ToSize>
BM_ConstructFromSize(benchmark::State & state)450 void BM_ConstructFromSize(benchmark::State& state) {
451   using VecT = InlVec<T>;
452   auto size = ToSize;
453   BatchedBenchmark<T>(
454       state,
455       /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->~VecT(); },
456       /* test_vec = */
457       [&](void* ptr, size_t) {
458         benchmark::DoNotOptimize(size);
459         ::new (ptr) VecT(size);
460       });
461 }
462 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSize, TrivialType);
463 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSize, NontrivialType);
464 
465 template <typename T, size_t ToSize>
BM_ConstructFromSizeRef(benchmark::State & state)466 void BM_ConstructFromSizeRef(benchmark::State& state) {
467   using VecT = InlVec<T>;
468   auto size = ToSize;
469   auto ref = T();
470   BatchedBenchmark<T>(
471       state,
472       /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->~VecT(); },
473       /* test_vec = */
474       [&](void* ptr, size_t) {
475         benchmark::DoNotOptimize(size);
476         benchmark::DoNotOptimize(ref);
477         ::new (ptr) VecT(size, ref);
478       });
479 }
480 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSizeRef, TrivialType);
481 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSizeRef, NontrivialType);
482 
483 template <typename T, size_t ToSize>
BM_ConstructFromRange(benchmark::State & state)484 void BM_ConstructFromRange(benchmark::State& state) {
485   using VecT = InlVec<T>;
486   std::array<T, ToSize> arr{};
487   BatchedBenchmark<T>(
488       state,
489       /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->~VecT(); },
490       /* test_vec = */
491       [&](void* ptr, size_t) {
492         benchmark::DoNotOptimize(arr);
493         ::new (ptr) VecT(arr.begin(), arr.end());
494       });
495 }
496 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromRange, TrivialType);
497 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromRange, NontrivialType);
498 
499 template <typename T, size_t ToSize>
BM_ConstructFromCopy(benchmark::State & state)500 void BM_ConstructFromCopy(benchmark::State& state) {
501   using VecT = InlVec<T>;
502   VecT other_vec(ToSize);
503   BatchedBenchmark<T>(
504       state,
505       /* prepare_vec = */
506       [](InlVec<T>* vec, size_t) { vec->~VecT(); },
507       /* test_vec = */
508       [&](void* ptr, size_t) {
509         benchmark::DoNotOptimize(other_vec);
510         ::new (ptr) VecT(other_vec);
511       });
512 }
513 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromCopy, TrivialType);
514 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromCopy, NontrivialType);
515 
516 template <typename T, size_t ToSize>
BM_ConstructFromMove(benchmark::State & state)517 void BM_ConstructFromMove(benchmark::State& state) {
518   using VecT = InlVec<T>;
519   std::array<VecT, kBatchSize> vector_batch{};
520   BatchedBenchmark<T>(
521       state,
522       /* prepare_vec = */
523       [&](InlVec<T>* vec, size_t i) {
524         vector_batch[i].clear();
525         vector_batch[i].resize(ToSize);
526         vec->~VecT();
527       },
528       /* test_vec = */
529       [&](void* ptr, size_t i) {
530         benchmark::DoNotOptimize(vector_batch[i]);
531         ::new (ptr) VecT(std::move(vector_batch[i]));
532       });
533 }
534 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, TrivialType);
535 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, NontrivialType);
536 
537 // Measure cost of copy-constructor+destructor.
BM_CopyTrivial(benchmark::State & state)538 void BM_CopyTrivial(benchmark::State& state) {
539   const int n = state.range(0);
540   InlVec<int64_t> src(n);
541   for (auto s : state) {
542     InlVec<int64_t> copy(src);
543     benchmark::DoNotOptimize(copy);
544   }
545 }
546 BENCHMARK(BM_CopyTrivial)->Arg(0)->Arg(1)->Arg(kLargeSize);
547 
548 // Measure cost of copy-constructor+destructor.
BM_CopyNonTrivial(benchmark::State & state)549 void BM_CopyNonTrivial(benchmark::State& state) {
550   const int n = state.range(0);
551   InlVec<InlVec<int64_t>> src(n);
552   for (auto s : state) {
553     InlVec<InlVec<int64_t>> copy(src);
554     benchmark::DoNotOptimize(copy);
555   }
556 }
557 BENCHMARK(BM_CopyNonTrivial)->Arg(0)->Arg(1)->Arg(kLargeSize);
558 
559 template <typename T, size_t FromSize, size_t ToSize>
BM_AssignSizeRef(benchmark::State & state)560 void BM_AssignSizeRef(benchmark::State& state) {
561   auto size = ToSize;
562   auto ref = T();
563   BatchedBenchmark<T>(
564       state,
565       /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); },
566       /* test_vec = */
567       [&](InlVec<T>* vec, size_t) {
568         benchmark::DoNotOptimize(size);
569         benchmark::DoNotOptimize(ref);
570         vec->assign(size, ref);
571       });
572 }
573 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignSizeRef, TrivialType);
574 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignSizeRef, NontrivialType);
575 
576 template <typename T, size_t FromSize, size_t ToSize>
BM_AssignRange(benchmark::State & state)577 void BM_AssignRange(benchmark::State& state) {
578   std::array<T, ToSize> arr{};
579   BatchedBenchmark<T>(
580       state,
581       /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); },
582       /* test_vec = */
583       [&](InlVec<T>* vec, size_t) {
584         benchmark::DoNotOptimize(arr);
585         vec->assign(arr.begin(), arr.end());
586       });
587 }
588 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignRange, TrivialType);
589 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignRange, NontrivialType);
590 
591 template <typename T, size_t FromSize, size_t ToSize>
BM_AssignFromCopy(benchmark::State & state)592 void BM_AssignFromCopy(benchmark::State& state) {
593   InlVec<T> other_vec(ToSize);
594   BatchedBenchmark<T>(
595       state,
596       /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); },
597       /* test_vec = */
598       [&](InlVec<T>* vec, size_t) {
599         benchmark::DoNotOptimize(other_vec);
600         *vec = other_vec;
601       });
602 }
603 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromCopy, TrivialType);
604 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromCopy, NontrivialType);
605 
606 template <typename T, size_t FromSize, size_t ToSize>
BM_AssignFromMove(benchmark::State & state)607 void BM_AssignFromMove(benchmark::State& state) {
608   using VecT = InlVec<T>;
609   std::array<VecT, kBatchSize> vector_batch{};
610   BatchedBenchmark<T>(
611       state,
612       /* prepare_vec = */
613       [&](InlVec<T>* vec, size_t i) {
614         vector_batch[i].clear();
615         vector_batch[i].resize(ToSize);
616         vec->resize(FromSize);
617       },
618       /* test_vec = */
619       [&](InlVec<T>* vec, size_t i) {
620         benchmark::DoNotOptimize(vector_batch[i]);
621         *vec = std::move(vector_batch[i]);
622       });
623 }
624 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromMove, TrivialType);
625 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromMove, NontrivialType);
626 
627 template <typename T, size_t FromSize, size_t ToSize>
BM_ResizeSize(benchmark::State & state)628 void BM_ResizeSize(benchmark::State& state) {
629   BatchedBenchmark<T>(
630       state,
631       /* prepare_vec = */
632       [](InlVec<T>* vec, size_t) {
633         vec->clear();
634         vec->resize(FromSize);
635       },
636       /* test_vec = */
637       [](InlVec<T>* vec, size_t) { vec->resize(ToSize); });
638 }
639 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSize, TrivialType);
640 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSize, NontrivialType);
641 
642 template <typename T, size_t FromSize, size_t ToSize>
BM_ResizeSizeRef(benchmark::State & state)643 void BM_ResizeSizeRef(benchmark::State& state) {
644   auto t = T();
645   BatchedBenchmark<T>(
646       state,
647       /* prepare_vec = */
648       [](InlVec<T>* vec, size_t) {
649         vec->clear();
650         vec->resize(FromSize);
651       },
652       /* test_vec = */
653       [&](InlVec<T>* vec, size_t) {
654         benchmark::DoNotOptimize(t);
655         vec->resize(ToSize, t);
656       });
657 }
658 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSizeRef, TrivialType);
659 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSizeRef, NontrivialType);
660 
661 template <typename T, size_t FromSize, size_t ToSize>
BM_InsertSizeRef(benchmark::State & state)662 void BM_InsertSizeRef(benchmark::State& state) {
663   auto t = T();
664   BatchedBenchmark<T>(
665       state,
666       /* prepare_vec = */
667       [](InlVec<T>* vec, size_t) {
668         vec->clear();
669         vec->resize(FromSize);
670       },
671       /* test_vec = */
672       [&](InlVec<T>* vec, size_t) {
673         benchmark::DoNotOptimize(t);
674         auto* pos = vec->data() + (vec->size() / 2);
675         vec->insert(pos, t);
676       });
677 }
678 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertSizeRef, TrivialType);
679 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertSizeRef, NontrivialType);
680 
681 template <typename T, size_t FromSize, size_t ToSize>
BM_InsertRange(benchmark::State & state)682 void BM_InsertRange(benchmark::State& state) {
683   InlVec<T> other_vec(ToSize);
684   BatchedBenchmark<T>(
685       state,
686       /* prepare_vec = */
687       [](InlVec<T>* vec, size_t) {
688         vec->clear();
689         vec->resize(FromSize);
690       },
691       /* test_vec = */
692       [&](InlVec<T>* vec, size_t) {
693         benchmark::DoNotOptimize(other_vec);
694         auto* pos = vec->data() + (vec->size() / 2);
695         vec->insert(pos, other_vec.begin(), other_vec.end());
696       });
697 }
698 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertRange, TrivialType);
699 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertRange, NontrivialType);
700 
701 template <typename T, size_t FromSize>
BM_EmplaceBack(benchmark::State & state)702 void BM_EmplaceBack(benchmark::State& state) {
703   BatchedBenchmark<T>(
704       state,
705       /* prepare_vec = */
706       [](InlVec<T>* vec, size_t) {
707         vec->clear();
708         vec->resize(FromSize);
709       },
710       /* test_vec = */
711       [](InlVec<T>* vec, size_t) { vec->emplace_back(); });
712 }
713 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EmplaceBack, TrivialType);
714 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EmplaceBack, NontrivialType);
715 
716 template <typename T, size_t FromSize>
BM_PopBack(benchmark::State & state)717 void BM_PopBack(benchmark::State& state) {
718   BatchedBenchmark<T>(
719       state,
720       /* prepare_vec = */
721       [](InlVec<T>* vec, size_t) {
722         vec->clear();
723         vec->resize(FromSize);
724       },
725       /* test_vec = */
726       [](InlVec<T>* vec, size_t) { vec->pop_back(); });
727 }
728 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_PopBack, TrivialType);
729 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_PopBack, NontrivialType);
730 
731 template <typename T, size_t FromSize>
BM_EraseOne(benchmark::State & state)732 void BM_EraseOne(benchmark::State& state) {
733   BatchedBenchmark<T>(
734       state,
735       /* prepare_vec = */
736       [](InlVec<T>* vec, size_t) {
737         vec->clear();
738         vec->resize(FromSize);
739       },
740       /* test_vec = */
741       [](InlVec<T>* vec, size_t) {
742         auto* pos = vec->data() + (vec->size() / 2);
743         vec->erase(pos);
744       });
745 }
746 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseOne, TrivialType);
747 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseOne, NontrivialType);
748 
749 template <typename T, size_t FromSize>
BM_EraseRange(benchmark::State & state)750 void BM_EraseRange(benchmark::State& state) {
751   BatchedBenchmark<T>(
752       state,
753       /* prepare_vec = */
754       [](InlVec<T>* vec, size_t) {
755         vec->clear();
756         vec->resize(FromSize);
757       },
758       /* test_vec = */
759       [](InlVec<T>* vec, size_t) {
760         auto* pos = vec->data() + (vec->size() / 2);
761         vec->erase(pos, pos + 1);
762       });
763 }
764 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseRange, TrivialType);
765 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseRange, NontrivialType);
766 
767 template <typename T, size_t FromSize>
BM_Clear(benchmark::State & state)768 void BM_Clear(benchmark::State& state) {
769   BatchedBenchmark<T>(
770       state,
771       /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); },
772       /* test_vec = */ [](InlVec<T>* vec, size_t) { vec->clear(); });
773 }
774 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, TrivialType);
775 ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, NontrivialType);
776 
777 template <typename T, size_t FromSize, size_t ToCapacity>
BM_Reserve(benchmark::State & state)778 void BM_Reserve(benchmark::State& state) {
779   BatchedBenchmark<T>(
780       state,
781       /* prepare_vec = */
782       [](InlVec<T>* vec, size_t) {
783         vec->clear();
784         vec->resize(FromSize);
785       },
786       /* test_vec = */
787       [](InlVec<T>* vec, size_t) { vec->reserve(ToCapacity); });
788 }
789 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Reserve, TrivialType);
790 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Reserve, NontrivialType);
791 
792 template <typename T, size_t FromCapacity, size_t ToCapacity>
BM_ShrinkToFit(benchmark::State & state)793 void BM_ShrinkToFit(benchmark::State& state) {
794   BatchedBenchmark<T>(
795       state,
796       /* prepare_vec = */
797       [](InlVec<T>* vec, size_t) {
798         vec->clear();
799         vec->resize(ToCapacity);
800         vec->reserve(FromCapacity);
801       },
802       /* test_vec = */ [](InlVec<T>* vec, size_t) { vec->shrink_to_fit(); });
803 }
804 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ShrinkToFit, TrivialType);
805 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ShrinkToFit, NontrivialType);
806 
807 template <typename T, size_t FromSize, size_t ToSize>
BM_Swap(benchmark::State & state)808 void BM_Swap(benchmark::State& state) {
809   using VecT = InlVec<T>;
810   std::array<VecT, kBatchSize> vector_batch{};
811   BatchedBenchmark<T>(
812       state,
813       /* prepare_vec = */
814       [&](InlVec<T>* vec, size_t i) {
815         vector_batch[i].clear();
816         vector_batch[i].resize(ToSize);
817         vec->resize(FromSize);
818       },
819       /* test_vec = */
820       [&](InlVec<T>* vec, size_t i) {
821         using std::swap;
822         benchmark::DoNotOptimize(vector_batch[i]);
823         swap(*vec, vector_batch[i]);
824       });
825 }
826 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Swap, TrivialType);
827 ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Swap, NontrivialType);
828 
829 }  // namespace
830