1 // Copyright 2018 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "absl/container/internal/raw_hash_set.h"
16
17 #include <algorithm>
18 #include <array>
19 #include <atomic>
20 #include <cmath>
21 #include <cstddef>
22 #include <cstdint>
23 #include <deque>
24 #include <functional>
25 #include <iostream>
26 #include <iterator>
27 #include <list>
28 #include <map>
29 #include <memory>
30 #include <numeric>
31 #include <ostream>
32 #include <random>
33 #include <string>
34 #include <tuple>
35 #include <type_traits>
36 #include <unordered_map>
37 #include <unordered_set>
38 #include <utility>
39 #include <vector>
40
41 #include "gmock/gmock.h"
42 #include "gtest/gtest.h"
43 #include "absl/base/attributes.h"
44 #include "absl/base/config.h"
45 #include "absl/base/internal/cycleclock.h"
46 #include "absl/base/prefetch.h"
47 #include "absl/container/flat_hash_map.h"
48 #include "absl/container/flat_hash_set.h"
49 #include "absl/container/internal/container_memory.h"
50 #include "absl/container/internal/hash_function_defaults.h"
51 #include "absl/container/internal/hash_policy_testing.h"
52 #include "absl/container/internal/hashtable_debug.h"
53 #include "absl/container/internal/hashtablez_sampler.h"
54 #include "absl/container/internal/test_allocator.h"
55 #include "absl/container/internal/test_instance_tracker.h"
56 #include "absl/container/node_hash_set.h"
57 #include "absl/functional/function_ref.h"
58 #include "absl/hash/hash.h"
59 #include "absl/log/check.h"
60 #include "absl/log/log.h"
61 #include "absl/memory/memory.h"
62 #include "absl/meta/type_traits.h"
63 #include "absl/strings/str_cat.h"
64 #include "absl/strings/string_view.h"
65
66 namespace absl {
67 ABSL_NAMESPACE_BEGIN
68 namespace container_internal {
69
70 struct RawHashSetTestOnlyAccess {
71 template <typename C>
GetCommonabsl::container_internal::RawHashSetTestOnlyAccess72 static auto GetCommon(const C& c) -> decltype(c.common()) {
73 return c.common();
74 }
75 template <typename C>
GetSlotsabsl::container_internal::RawHashSetTestOnlyAccess76 static auto GetSlots(const C& c) -> decltype(c.slot_array()) {
77 return c.slot_array();
78 }
79 template <typename C>
CountTombstonesabsl::container_internal::RawHashSetTestOnlyAccess80 static size_t CountTombstones(const C& c) {
81 return c.common().TombstonesCount();
82 }
83 };
84
85 namespace {
86
87 using ::testing::ElementsAre;
88 using ::testing::ElementsAreArray;
89 using ::testing::Eq;
90 using ::testing::Ge;
91 using ::testing::Lt;
92 using ::testing::Pair;
93 using ::testing::UnorderedElementsAre;
94
95 // Convenience function to static cast to ctrl_t.
CtrlT(int i)96 ctrl_t CtrlT(int i) { return static_cast<ctrl_t>(i); }
97
TEST(GrowthInfoTest,GetGrowthLeft)98 TEST(GrowthInfoTest, GetGrowthLeft) {
99 GrowthInfo gi;
100 gi.InitGrowthLeftNoDeleted(5);
101 EXPECT_EQ(gi.GetGrowthLeft(), 5);
102 gi.OverwriteFullAsDeleted();
103 EXPECT_EQ(gi.GetGrowthLeft(), 5);
104 }
105
TEST(GrowthInfoTest,HasNoDeleted)106 TEST(GrowthInfoTest, HasNoDeleted) {
107 GrowthInfo gi;
108 gi.InitGrowthLeftNoDeleted(5);
109 EXPECT_TRUE(gi.HasNoDeleted());
110 gi.OverwriteFullAsDeleted();
111 EXPECT_FALSE(gi.HasNoDeleted());
112 // After reinitialization we have no deleted slots.
113 gi.InitGrowthLeftNoDeleted(5);
114 EXPECT_TRUE(gi.HasNoDeleted());
115 }
116
TEST(GrowthInfoTest,HasNoDeletedAndGrowthLeft)117 TEST(GrowthInfoTest, HasNoDeletedAndGrowthLeft) {
118 GrowthInfo gi;
119 gi.InitGrowthLeftNoDeleted(5);
120 EXPECT_TRUE(gi.HasNoDeletedAndGrowthLeft());
121 gi.OverwriteFullAsDeleted();
122 EXPECT_FALSE(gi.HasNoDeletedAndGrowthLeft());
123 gi.InitGrowthLeftNoDeleted(0);
124 EXPECT_FALSE(gi.HasNoDeletedAndGrowthLeft());
125 gi.OverwriteFullAsDeleted();
126 EXPECT_FALSE(gi.HasNoDeletedAndGrowthLeft());
127 // After reinitialization we have no deleted slots.
128 gi.InitGrowthLeftNoDeleted(5);
129 EXPECT_TRUE(gi.HasNoDeletedAndGrowthLeft());
130 }
131
TEST(GrowthInfoTest,HasNoGrowthLeftAndNoDeleted)132 TEST(GrowthInfoTest, HasNoGrowthLeftAndNoDeleted) {
133 GrowthInfo gi;
134 gi.InitGrowthLeftNoDeleted(1);
135 EXPECT_FALSE(gi.HasNoGrowthLeftAndNoDeleted());
136 gi.OverwriteEmptyAsFull();
137 EXPECT_TRUE(gi.HasNoGrowthLeftAndNoDeleted());
138 gi.OverwriteFullAsDeleted();
139 EXPECT_FALSE(gi.HasNoGrowthLeftAndNoDeleted());
140 gi.OverwriteFullAsEmpty();
141 EXPECT_FALSE(gi.HasNoGrowthLeftAndNoDeleted());
142 gi.InitGrowthLeftNoDeleted(0);
143 EXPECT_TRUE(gi.HasNoGrowthLeftAndNoDeleted());
144 gi.OverwriteFullAsEmpty();
145 EXPECT_FALSE(gi.HasNoGrowthLeftAndNoDeleted());
146 }
147
TEST(GrowthInfoTest,OverwriteFullAsEmpty)148 TEST(GrowthInfoTest, OverwriteFullAsEmpty) {
149 GrowthInfo gi;
150 gi.InitGrowthLeftNoDeleted(5);
151 gi.OverwriteFullAsEmpty();
152 EXPECT_EQ(gi.GetGrowthLeft(), 6);
153 gi.OverwriteFullAsDeleted();
154 EXPECT_EQ(gi.GetGrowthLeft(), 6);
155 gi.OverwriteFullAsEmpty();
156 EXPECT_EQ(gi.GetGrowthLeft(), 7);
157 EXPECT_FALSE(gi.HasNoDeleted());
158 }
159
TEST(GrowthInfoTest,OverwriteEmptyAsFull)160 TEST(GrowthInfoTest, OverwriteEmptyAsFull) {
161 GrowthInfo gi;
162 gi.InitGrowthLeftNoDeleted(5);
163 gi.OverwriteEmptyAsFull();
164 EXPECT_EQ(gi.GetGrowthLeft(), 4);
165 gi.OverwriteFullAsDeleted();
166 EXPECT_EQ(gi.GetGrowthLeft(), 4);
167 gi.OverwriteEmptyAsFull();
168 EXPECT_EQ(gi.GetGrowthLeft(), 3);
169 EXPECT_FALSE(gi.HasNoDeleted());
170 }
171
TEST(GrowthInfoTest,OverwriteControlAsFull)172 TEST(GrowthInfoTest, OverwriteControlAsFull) {
173 GrowthInfo gi;
174 gi.InitGrowthLeftNoDeleted(5);
175 gi.OverwriteControlAsFull(ctrl_t::kEmpty);
176 EXPECT_EQ(gi.GetGrowthLeft(), 4);
177 gi.OverwriteControlAsFull(ctrl_t::kDeleted);
178 EXPECT_EQ(gi.GetGrowthLeft(), 4);
179 gi.OverwriteFullAsDeleted();
180 gi.OverwriteControlAsFull(ctrl_t::kDeleted);
181 // We do not count number of deleted, so the bit sticks till the next rehash.
182 EXPECT_FALSE(gi.HasNoDeletedAndGrowthLeft());
183 EXPECT_FALSE(gi.HasNoDeleted());
184 }
185
TEST(Util,NormalizeCapacity)186 TEST(Util, NormalizeCapacity) {
187 EXPECT_EQ(1, NormalizeCapacity(0));
188 EXPECT_EQ(1, NormalizeCapacity(1));
189 EXPECT_EQ(3, NormalizeCapacity(2));
190 EXPECT_EQ(3, NormalizeCapacity(3));
191 EXPECT_EQ(7, NormalizeCapacity(4));
192 EXPECT_EQ(7, NormalizeCapacity(7));
193 EXPECT_EQ(15, NormalizeCapacity(8));
194 EXPECT_EQ(15, NormalizeCapacity(15));
195 EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 1));
196 EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 2));
197 }
198
TEST(Util,GrowthAndCapacity)199 TEST(Util, GrowthAndCapacity) {
200 // Verify that GrowthToCapacity gives the minimum capacity that has enough
201 // growth.
202 for (size_t growth = 0; growth < 10000; ++growth) {
203 SCOPED_TRACE(growth);
204 size_t capacity = NormalizeCapacity(GrowthToLowerboundCapacity(growth));
205 // The capacity is large enough for `growth`.
206 EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth));
207 // For (capacity+1) < kWidth, growth should equal capacity.
208 if (capacity + 1 < Group::kWidth) {
209 EXPECT_THAT(CapacityToGrowth(capacity), Eq(capacity));
210 } else {
211 EXPECT_THAT(CapacityToGrowth(capacity), Lt(capacity));
212 }
213 if (growth != 0 && capacity > 1) {
214 // There is no smaller capacity that works.
215 EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth));
216 }
217 }
218
219 for (size_t capacity = Group::kWidth - 1; capacity < 10000;
220 capacity = 2 * capacity + 1) {
221 SCOPED_TRACE(capacity);
222 size_t growth = CapacityToGrowth(capacity);
223 EXPECT_THAT(growth, Lt(capacity));
224 EXPECT_LE(GrowthToLowerboundCapacity(growth), capacity);
225 EXPECT_EQ(NormalizeCapacity(GrowthToLowerboundCapacity(growth)), capacity);
226 }
227 }
228
TEST(Util,probe_seq)229 TEST(Util, probe_seq) {
230 probe_seq<16> seq(0, 127);
231 auto gen = [&]() {
232 size_t res = seq.offset();
233 seq.next();
234 return res;
235 };
236 std::vector<size_t> offsets(8);
237 std::generate_n(offsets.begin(), 8, gen);
238 EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
239 seq = probe_seq<16>(128, 127);
240 std::generate_n(offsets.begin(), 8, gen);
241 EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
242 }
243
TEST(BitMask,Smoke)244 TEST(BitMask, Smoke) {
245 EXPECT_FALSE((BitMask<uint8_t, 8>(0)));
246 EXPECT_TRUE((BitMask<uint8_t, 8>(5)));
247
248 EXPECT_THAT((BitMask<uint8_t, 8>(0)), ElementsAre());
249 EXPECT_THAT((BitMask<uint8_t, 8>(0x1)), ElementsAre(0));
250 EXPECT_THAT((BitMask<uint8_t, 8>(0x2)), ElementsAre(1));
251 EXPECT_THAT((BitMask<uint8_t, 8>(0x3)), ElementsAre(0, 1));
252 EXPECT_THAT((BitMask<uint8_t, 8>(0x4)), ElementsAre(2));
253 EXPECT_THAT((BitMask<uint8_t, 8>(0x5)), ElementsAre(0, 2));
254 EXPECT_THAT((BitMask<uint8_t, 8>(0x55)), ElementsAre(0, 2, 4, 6));
255 EXPECT_THAT((BitMask<uint8_t, 8>(0xAA)), ElementsAre(1, 3, 5, 7));
256 }
257
TEST(BitMask,WithShift_MatchPortable)258 TEST(BitMask, WithShift_MatchPortable) {
259 // See the non-SSE version of Group for details on what this math is for.
260 uint64_t ctrl = 0x1716151413121110;
261 uint64_t hash = 0x12;
262 constexpr uint64_t lsbs = 0x0101010101010101ULL;
263 auto x = ctrl ^ (lsbs * hash);
264 uint64_t mask = (x - lsbs) & ~x & kMsbs8Bytes;
265 EXPECT_EQ(0x0000000080800000, mask);
266
267 BitMask<uint64_t, 8, 3> b(mask);
268 EXPECT_EQ(*b, 2);
269 }
270
271 constexpr uint64_t kSome8BytesMask = /* */ 0x8000808080008000ULL;
272 constexpr uint64_t kSome8BytesMaskAllOnes = 0xff00ffffff00ff00ULL;
273 constexpr auto kSome8BytesMaskBits = std::array<int, 5>{1, 3, 4, 5, 7};
274
275
TEST(BitMask,WithShift_FullMask)276 TEST(BitMask, WithShift_FullMask) {
277 EXPECT_THAT((BitMask<uint64_t, 8, 3>(kMsbs8Bytes)),
278 ElementsAre(0, 1, 2, 3, 4, 5, 6, 7));
279 EXPECT_THAT(
280 (BitMask<uint64_t, 8, 3, /*NullifyBitsOnIteration=*/true>(kMsbs8Bytes)),
281 ElementsAre(0, 1, 2, 3, 4, 5, 6, 7));
282 EXPECT_THAT(
283 (BitMask<uint64_t, 8, 3, /*NullifyBitsOnIteration=*/true>(~uint64_t{0})),
284 ElementsAre(0, 1, 2, 3, 4, 5, 6, 7));
285 }
286
TEST(BitMask,WithShift_EmptyMask)287 TEST(BitMask, WithShift_EmptyMask) {
288 EXPECT_THAT((BitMask<uint64_t, 8, 3>(0)), ElementsAre());
289 EXPECT_THAT((BitMask<uint64_t, 8, 3, /*NullifyBitsOnIteration=*/true>(0)),
290 ElementsAre());
291 }
292
TEST(BitMask,WithShift_SomeMask)293 TEST(BitMask, WithShift_SomeMask) {
294 EXPECT_THAT((BitMask<uint64_t, 8, 3>(kSome8BytesMask)),
295 ElementsAreArray(kSome8BytesMaskBits));
296 EXPECT_THAT((BitMask<uint64_t, 8, 3, /*NullifyBitsOnIteration=*/true>(
297 kSome8BytesMask)),
298 ElementsAreArray(kSome8BytesMaskBits));
299 EXPECT_THAT((BitMask<uint64_t, 8, 3, /*NullifyBitsOnIteration=*/true>(
300 kSome8BytesMaskAllOnes)),
301 ElementsAreArray(kSome8BytesMaskBits));
302 }
303
TEST(BitMask,WithShift_SomeMaskExtraBitsForNullify)304 TEST(BitMask, WithShift_SomeMaskExtraBitsForNullify) {
305 // Verify that adding extra bits into non zero bytes is fine.
306 uint64_t extra_bits = 77;
307 for (int i = 0; i < 100; ++i) {
308 // Add extra bits, but keep zero bytes untouched.
309 uint64_t extra_mask = extra_bits & kSome8BytesMaskAllOnes;
310 EXPECT_THAT((BitMask<uint64_t, 8, 3, /*NullifyBitsOnIteration=*/true>(
311 kSome8BytesMask | extra_mask)),
312 ElementsAreArray(kSome8BytesMaskBits))
313 << i << " " << extra_mask;
314 extra_bits = (extra_bits + 1) * 3;
315 }
316 }
317
TEST(BitMask,LeadingTrailing)318 TEST(BitMask, LeadingTrailing) {
319 EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).LeadingZeros()), 3);
320 EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).TrailingZeros()), 6);
321
322 EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).LeadingZeros()), 15);
323 EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).TrailingZeros()), 0);
324
325 EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).LeadingZeros()), 0);
326 EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).TrailingZeros()), 15);
327
328 EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).LeadingZeros()), 3);
329 EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).TrailingZeros()), 1);
330
331 EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).LeadingZeros()), 7);
332 EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).TrailingZeros()), 0);
333
334 EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).LeadingZeros()), 0);
335 EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).TrailingZeros()), 7);
336 }
337
TEST(Group,EmptyGroup)338 TEST(Group, EmptyGroup) {
339 for (h2_t h = 0; h != 128; ++h) EXPECT_FALSE(Group{EmptyGroup()}.Match(h));
340 }
341
TEST(Group,Match)342 TEST(Group, Match) {
343 if (Group::kWidth == 16) {
344 ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3),
345 ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
346 CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1),
347 CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)};
348 EXPECT_THAT(Group{group}.Match(0), ElementsAre());
349 EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15));
350 EXPECT_THAT(Group{group}.Match(3), ElementsAre(3, 10));
351 EXPECT_THAT(Group{group}.Match(5), ElementsAre(5, 9));
352 EXPECT_THAT(Group{group}.Match(7), ElementsAre(7, 8));
353 } else if (Group::kWidth == 8) {
354 ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2),
355 ctrl_t::kDeleted, CtrlT(2), CtrlT(1),
356 ctrl_t::kSentinel, CtrlT(1)};
357 EXPECT_THAT(Group{group}.Match(0), ElementsAre());
358 EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 5, 7));
359 EXPECT_THAT(Group{group}.Match(2), ElementsAre(2, 4));
360 } else {
361 FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
362 }
363 }
364
TEST(Group,MaskEmpty)365 TEST(Group, MaskEmpty) {
366 if (Group::kWidth == 16) {
367 ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3),
368 ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
369 CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1),
370 CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)};
371 EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0);
372 EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 4);
373 } else if (Group::kWidth == 8) {
374 ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2),
375 ctrl_t::kDeleted, CtrlT(2), CtrlT(1),
376 ctrl_t::kSentinel, CtrlT(1)};
377 EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0);
378 EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 0);
379 } else {
380 FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
381 }
382 }
383
TEST(Group,MaskFull)384 TEST(Group, MaskFull) {
385 if (Group::kWidth == 16) {
386 ctrl_t group[] = {
387 ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3),
388 ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
389 CtrlT(7), CtrlT(5), ctrl_t::kDeleted, CtrlT(1),
390 CtrlT(1), ctrl_t::kSentinel, ctrl_t::kEmpty, CtrlT(1)};
391 EXPECT_THAT(Group{group}.MaskFull(),
392 ElementsAre(1, 3, 5, 7, 8, 9, 11, 12, 15));
393 } else if (Group::kWidth == 8) {
394 ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kEmpty,
395 ctrl_t::kDeleted, CtrlT(2), ctrl_t::kSentinel,
396 ctrl_t::kSentinel, CtrlT(1)};
397 EXPECT_THAT(Group{group}.MaskFull(), ElementsAre(1, 4, 7));
398 } else {
399 FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
400 }
401 }
402
TEST(Group,MaskNonFull)403 TEST(Group, MaskNonFull) {
404 if (Group::kWidth == 16) {
405 ctrl_t group[] = {
406 ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3),
407 ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
408 CtrlT(7), CtrlT(5), ctrl_t::kDeleted, CtrlT(1),
409 CtrlT(1), ctrl_t::kSentinel, ctrl_t::kEmpty, CtrlT(1)};
410 EXPECT_THAT(Group{group}.MaskNonFull(),
411 ElementsAre(0, 2, 4, 6, 10, 13, 14));
412 } else if (Group::kWidth == 8) {
413 ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kEmpty,
414 ctrl_t::kDeleted, CtrlT(2), ctrl_t::kSentinel,
415 ctrl_t::kSentinel, CtrlT(1)};
416 EXPECT_THAT(Group{group}.MaskNonFull(), ElementsAre(0, 2, 3, 5, 6));
417 } else {
418 FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
419 }
420 }
421
TEST(Group,MaskEmptyOrDeleted)422 TEST(Group, MaskEmptyOrDeleted) {
423 if (Group::kWidth == 16) {
424 ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kEmpty, CtrlT(3),
425 ctrl_t::kDeleted, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
426 CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1),
427 CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)};
428 EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0);
429 EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 4);
430 } else if (Group::kWidth == 8) {
431 ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2),
432 ctrl_t::kDeleted, CtrlT(2), CtrlT(1),
433 ctrl_t::kSentinel, CtrlT(1)};
434 EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0);
435 EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 3);
436 } else {
437 FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
438 }
439 }
440
TEST(Batch,DropDeletes)441 TEST(Batch, DropDeletes) {
442 constexpr size_t kCapacity = 63;
443 constexpr size_t kGroupWidth = container_internal::Group::kWidth;
444 std::vector<ctrl_t> ctrl(kCapacity + 1 + kGroupWidth);
445 ctrl[kCapacity] = ctrl_t::kSentinel;
446 std::vector<ctrl_t> pattern = {
447 ctrl_t::kEmpty, CtrlT(2), ctrl_t::kDeleted, CtrlT(2),
448 ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted};
449 for (size_t i = 0; i != kCapacity; ++i) {
450 ctrl[i] = pattern[i % pattern.size()];
451 if (i < kGroupWidth - 1)
452 ctrl[i + kCapacity + 1] = pattern[i % pattern.size()];
453 }
454 ConvertDeletedToEmptyAndFullToDeleted(ctrl.data(), kCapacity);
455 ASSERT_EQ(ctrl[kCapacity], ctrl_t::kSentinel);
456 for (size_t i = 0; i < kCapacity + kGroupWidth; ++i) {
457 ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()];
458 if (i == kCapacity) expected = ctrl_t::kSentinel;
459 if (expected == ctrl_t::kDeleted) expected = ctrl_t::kEmpty;
460 if (IsFull(expected)) expected = ctrl_t::kDeleted;
461 EXPECT_EQ(ctrl[i], expected)
462 << i << " " << static_cast<int>(pattern[i % pattern.size()]);
463 }
464 }
465
TEST(Group,CountLeadingEmptyOrDeleted)466 TEST(Group, CountLeadingEmptyOrDeleted) {
467 const std::vector<ctrl_t> empty_examples = {ctrl_t::kEmpty, ctrl_t::kDeleted};
468 const std::vector<ctrl_t> full_examples = {
469 CtrlT(0), CtrlT(1), CtrlT(2), CtrlT(3),
470 CtrlT(5), CtrlT(9), CtrlT(127), ctrl_t::kSentinel};
471
472 for (ctrl_t empty : empty_examples) {
473 std::vector<ctrl_t> e(Group::kWidth, empty);
474 EXPECT_EQ(Group::kWidth, Group{e.data()}.CountLeadingEmptyOrDeleted());
475 for (ctrl_t full : full_examples) {
476 for (size_t i = 0; i != Group::kWidth; ++i) {
477 std::vector<ctrl_t> f(Group::kWidth, empty);
478 f[i] = full;
479 EXPECT_EQ(i, Group{f.data()}.CountLeadingEmptyOrDeleted());
480 }
481 std::vector<ctrl_t> f(Group::kWidth, empty);
482 f[Group::kWidth * 2 / 3] = full;
483 f[Group::kWidth / 2] = full;
484 EXPECT_EQ(Group::kWidth / 2,
485 Group{f.data()}.CountLeadingEmptyOrDeleted());
486 }
487 }
488 }
489
490 template <class T, bool kTransferable = false, bool kSoo = false>
491 struct ValuePolicy {
492 using slot_type = T;
493 using key_type = T;
494 using init_type = T;
495
496 template <class Allocator, class... Args>
constructabsl::container_internal::__anonb1b03db00111::ValuePolicy497 static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
498 absl::allocator_traits<Allocator>::construct(*alloc, slot,
499 std::forward<Args>(args)...);
500 }
501
502 template <class Allocator>
destroyabsl::container_internal::__anonb1b03db00111::ValuePolicy503 static void destroy(Allocator* alloc, slot_type* slot) {
504 absl::allocator_traits<Allocator>::destroy(*alloc, slot);
505 }
506
507 template <class Allocator>
transferabsl::container_internal::__anonb1b03db00111::ValuePolicy508 static std::integral_constant<bool, kTransferable> transfer(
509 Allocator* alloc, slot_type* new_slot, slot_type* old_slot) {
510 construct(alloc, new_slot, std::move(*old_slot));
511 destroy(alloc, old_slot);
512 return {};
513 }
514
elementabsl::container_internal::__anonb1b03db00111::ValuePolicy515 static T& element(slot_type* slot) { return *slot; }
516
517 template <class F, class... Args>
518 static decltype(absl::container_internal::DecomposeValue(
519 std::declval<F>(), std::declval<Args>()...))
applyabsl::container_internal::__anonb1b03db00111::ValuePolicy520 apply(F&& f, Args&&... args) {
521 return absl::container_internal::DecomposeValue(
522 std::forward<F>(f), std::forward<Args>(args)...);
523 }
524
525 template <class Hash>
get_hash_slot_fnabsl::container_internal::__anonb1b03db00111::ValuePolicy526 static constexpr HashSlotFn get_hash_slot_fn() {
527 return nullptr;
528 }
529
soo_enabledabsl::container_internal::__anonb1b03db00111::ValuePolicy530 static constexpr bool soo_enabled() { return kSoo; }
531 };
532
533 using IntPolicy = ValuePolicy<int64_t>;
534 using Uint8Policy = ValuePolicy<uint8_t>;
535
536 using TranferableIntPolicy = ValuePolicy<int64_t, /*kTransferable=*/true>;
537
538 // For testing SOO.
539 template <int N>
540 class SizedValue {
541 public:
SizedValue(int64_t v)542 SizedValue(int64_t v) { // NOLINT
543 vals_[0] = v;
544 }
SizedValue()545 SizedValue() : SizedValue(0) {}
546 SizedValue(const SizedValue&) = default;
547 SizedValue& operator=(const SizedValue&) = default;
548
operator *() const549 int64_t operator*() const {
550 // Suppress erroneous uninitialized memory errors on GCC.
551 #if !defined(__clang__) && defined(__GNUC__)
552 #pragma GCC diagnostic push
553 #pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
554 #endif
555 return vals_[0];
556 #if !defined(__clang__) && defined(__GNUC__)
557 #pragma GCC diagnostic pop
558 #endif
559 }
operator int() const560 explicit operator int() const { return **this; }
operator int64_t() const561 explicit operator int64_t() const { return **this; }
562
563 template <typename H>
AbslHashValue(H h,SizedValue sv)564 friend H AbslHashValue(H h, SizedValue sv) {
565 return H::combine(std::move(h), *sv);
566 }
operator ==(const SizedValue & rhs) const567 bool operator==(const SizedValue& rhs) const { return **this == *rhs; }
568
569 private:
570 int64_t vals_[N / sizeof(int64_t)];
571 };
572 template <int N, bool kSoo>
573 using SizedValuePolicy =
574 ValuePolicy<SizedValue<N>, /*kTransferable=*/true, kSoo>;
575
576 class StringPolicy {
577 template <class F, class K, class V,
578 class = typename std::enable_if<
579 std::is_convertible<const K&, absl::string_view>::value>::type>
580 decltype(std::declval<F>()(
581 std::declval<const absl::string_view&>(), std::piecewise_construct,
582 std::declval<std::tuple<K>>(),
apply_impl(F && f,std::pair<std::tuple<K>,V> p)583 std::declval<V>())) static apply_impl(F&& f,
584 std::pair<std::tuple<K>, V> p) {
585 const absl::string_view& key = std::get<0>(p.first);
586 return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first),
587 std::move(p.second));
588 }
589
590 public:
591 struct slot_type {
592 struct ctor {};
593
594 template <class... Ts>
slot_typeabsl::container_internal::__anonb1b03db00111::StringPolicy::slot_type595 explicit slot_type(ctor, Ts&&... ts) : pair(std::forward<Ts>(ts)...) {}
596
597 std::pair<std::string, std::string> pair;
598 };
599
600 using key_type = std::string;
601 using init_type = std::pair<std::string, std::string>;
602
603 template <class allocator_type, class... Args>
construct(allocator_type * alloc,slot_type * slot,Args...args)604 static void construct(allocator_type* alloc, slot_type* slot, Args... args) {
605 std::allocator_traits<allocator_type>::construct(
606 *alloc, slot, typename slot_type::ctor(), std::forward<Args>(args)...);
607 }
608
609 template <class allocator_type>
destroy(allocator_type * alloc,slot_type * slot)610 static void destroy(allocator_type* alloc, slot_type* slot) {
611 std::allocator_traits<allocator_type>::destroy(*alloc, slot);
612 }
613
614 template <class allocator_type>
transfer(allocator_type * alloc,slot_type * new_slot,slot_type * old_slot)615 static void transfer(allocator_type* alloc, slot_type* new_slot,
616 slot_type* old_slot) {
617 construct(alloc, new_slot, std::move(old_slot->pair));
618 destroy(alloc, old_slot);
619 }
620
element(slot_type * slot)621 static std::pair<std::string, std::string>& element(slot_type* slot) {
622 return slot->pair;
623 }
624
625 template <class F, class... Args>
apply(F && f,Args &&...args)626 static auto apply(F&& f, Args&&... args)
627 -> decltype(apply_impl(std::forward<F>(f),
628 PairArgs(std::forward<Args>(args)...))) {
629 return apply_impl(std::forward<F>(f),
630 PairArgs(std::forward<Args>(args)...));
631 }
632
633 template <class Hash>
get_hash_slot_fn()634 static constexpr HashSlotFn get_hash_slot_fn() {
635 return nullptr;
636 }
637 };
638
639 struct StringHash : absl::Hash<absl::string_view> {
640 using is_transparent = void;
641 };
642 struct StringEq : std::equal_to<absl::string_view> {
643 using is_transparent = void;
644 };
645
646 struct StringTable
647 : raw_hash_set<StringPolicy, StringHash, StringEq, std::allocator<int>> {
648 using Base = typename StringTable::raw_hash_set;
649 StringTable() = default;
650 using Base::Base;
651 };
652
653 template <typename T, bool kTransferable = false, bool kSoo = false>
654 struct ValueTable
655 : raw_hash_set<ValuePolicy<T, kTransferable, kSoo>, hash_default_hash<T>,
656 std::equal_to<T>, std::allocator<T>> {
657 using Base = typename ValueTable::raw_hash_set;
658 using Base::Base;
659 };
660
661 using IntTable = ValueTable<int64_t>;
662 using Uint8Table = ValueTable<uint8_t>;
663
664 using TransferableIntTable = ValueTable<int64_t, /*kTransferable=*/true>;
665
666 constexpr size_t kNonSooSize = sizeof(HeapOrSoo) + 8;
667 static_assert(sizeof(SizedValue<kNonSooSize>) >= kNonSooSize, "too small");
668 using NonSooIntTable = ValueTable<SizedValue<kNonSooSize>>;
669 using SooIntTable = ValueTable<int64_t, /*kTransferable=*/true, /*kSoo=*/true>;
670
671 template <typename T>
672 struct CustomAlloc : std::allocator<T> {
673 CustomAlloc() = default;
674
675 template <typename U>
CustomAllocabsl::container_internal::__anonb1b03db00111::CustomAlloc676 explicit CustomAlloc(const CustomAlloc<U>& /*other*/) {}
677
678 template <class U>
679 struct rebind {
680 using other = CustomAlloc<U>;
681 };
682 };
683
684 struct CustomAllocIntTable
685 : raw_hash_set<IntPolicy, hash_default_hash<int64_t>,
686 std::equal_to<int64_t>, CustomAlloc<int64_t>> {
687 using Base = typename CustomAllocIntTable::raw_hash_set;
688 using Base::Base;
689 };
690
691 struct MinimumAlignmentUint8Table
692 : raw_hash_set<Uint8Policy, hash_default_hash<uint8_t>,
693 std::equal_to<uint8_t>, MinimumAlignmentAlloc<uint8_t>> {
694 using Base = typename MinimumAlignmentUint8Table::raw_hash_set;
695 using Base::Base;
696 };
697
698 // Allows for freezing the allocator to expect no further allocations.
699 template <typename T>
700 struct FreezableAlloc : std::allocator<T> {
FreezableAllocabsl::container_internal::__anonb1b03db00111::FreezableAlloc701 explicit FreezableAlloc(bool* f) : frozen(f) {}
702
703 template <typename U>
FreezableAllocabsl::container_internal::__anonb1b03db00111::FreezableAlloc704 explicit FreezableAlloc(const FreezableAlloc<U>& other)
705 : frozen(other.frozen) {}
706
707 template <class U>
708 struct rebind {
709 using other = FreezableAlloc<U>;
710 };
711
allocateabsl::container_internal::__anonb1b03db00111::FreezableAlloc712 T* allocate(size_t n) {
713 EXPECT_FALSE(*frozen);
714 return std::allocator<T>::allocate(n);
715 }
716
717 bool* frozen;
718 };
719
720 template <int N>
721 struct FreezableSizedValueSooTable
722 : raw_hash_set<SizedValuePolicy<N, /*kSoo=*/true>,
723 container_internal::hash_default_hash<SizedValue<N>>,
724 std::equal_to<SizedValue<N>>,
725 FreezableAlloc<SizedValue<N>>> {
726 using Base = typename FreezableSizedValueSooTable::raw_hash_set;
727 using Base::Base;
728 };
729
730 struct BadFastHash {
731 template <class T>
operator ()absl::container_internal::__anonb1b03db00111::BadFastHash732 size_t operator()(const T&) const {
733 return 0;
734 }
735 };
736
737 struct BadHashFreezableIntTable
738 : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int64_t>,
739 FreezableAlloc<int64_t>> {
740 using Base = typename BadHashFreezableIntTable::raw_hash_set;
741 using Base::Base;
742 };
743
744 struct BadTable : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int>,
745 std::allocator<int>> {
746 using Base = typename BadTable::raw_hash_set;
747 BadTable() = default;
748 using Base::Base;
749 };
750
TEST(Table,EmptyFunctorOptimization)751 TEST(Table, EmptyFunctorOptimization) {
752 static_assert(std::is_empty<std::equal_to<absl::string_view>>::value, "");
753 static_assert(std::is_empty<std::allocator<int>>::value, "");
754
755 struct MockTable {
756 void* ctrl;
757 void* slots;
758 size_t size;
759 size_t capacity;
760 };
761 struct StatelessHash {
762 size_t operator()(absl::string_view) const { return 0; }
763 };
764 struct StatefulHash : StatelessHash {
765 size_t dummy;
766 };
767
768 struct GenerationData {
769 size_t reserved_growth;
770 size_t reservation_size;
771 GenerationType* generation;
772 };
773
774 // Ignore unreachable-code warning. Compiler thinks one branch of each ternary
775 // conditional is unreachable.
776 #if defined(__clang__)
777 #pragma clang diagnostic push
778 #pragma clang diagnostic ignored "-Wunreachable-code"
779 #endif
780 constexpr size_t mock_size = sizeof(MockTable);
781 constexpr size_t generation_size =
782 SwisstableGenerationsEnabled() ? sizeof(GenerationData) : 0;
783 #if defined(__clang__)
784 #pragma clang diagnostic pop
785 #endif
786
787 EXPECT_EQ(
788 mock_size + generation_size,
789 sizeof(
790 raw_hash_set<StringPolicy, StatelessHash,
791 std::equal_to<absl::string_view>, std::allocator<int>>));
792
793 EXPECT_EQ(
794 mock_size + sizeof(StatefulHash) + generation_size,
795 sizeof(
796 raw_hash_set<StringPolicy, StatefulHash,
797 std::equal_to<absl::string_view>, std::allocator<int>>));
798 }
799
800 template <class TableType>
801 class SooTest : public testing::Test {};
802
803 using SooTableTypes = ::testing::Types<SooIntTable, NonSooIntTable>;
804 TYPED_TEST_SUITE(SooTest, SooTableTypes);
805
TYPED_TEST(SooTest,Empty)806 TYPED_TEST(SooTest, Empty) {
807 TypeParam t;
808 EXPECT_EQ(0, t.size());
809 EXPECT_TRUE(t.empty());
810 }
811
TYPED_TEST(SooTest,LookupEmpty)812 TYPED_TEST(SooTest, LookupEmpty) {
813 TypeParam t;
814 auto it = t.find(0);
815 EXPECT_TRUE(it == t.end());
816 }
817
TYPED_TEST(SooTest,Insert1)818 TYPED_TEST(SooTest, Insert1) {
819 TypeParam t;
820 EXPECT_TRUE(t.find(0) == t.end());
821 auto res = t.emplace(0);
822 EXPECT_TRUE(res.second);
823 EXPECT_THAT(*res.first, 0);
824 EXPECT_EQ(1, t.size());
825 EXPECT_THAT(*t.find(0), 0);
826 }
827
TYPED_TEST(SooTest,Insert2)828 TYPED_TEST(SooTest, Insert2) {
829 TypeParam t;
830 EXPECT_TRUE(t.find(0) == t.end());
831 auto res = t.emplace(0);
832 EXPECT_TRUE(res.second);
833 EXPECT_THAT(*res.first, 0);
834 EXPECT_EQ(1, t.size());
835 EXPECT_TRUE(t.find(1) == t.end());
836 res = t.emplace(1);
837 EXPECT_TRUE(res.second);
838 EXPECT_THAT(*res.first, 1);
839 EXPECT_EQ(2, t.size());
840 EXPECT_THAT(*t.find(0), 0);
841 EXPECT_THAT(*t.find(1), 1);
842 }
843
TEST(Table,InsertCollision)844 TEST(Table, InsertCollision) {
845 BadTable t;
846 EXPECT_TRUE(t.find(1) == t.end());
847 auto res = t.emplace(1);
848 EXPECT_TRUE(res.second);
849 EXPECT_THAT(*res.first, 1);
850 EXPECT_EQ(1, t.size());
851
852 EXPECT_TRUE(t.find(2) == t.end());
853 res = t.emplace(2);
854 EXPECT_THAT(*res.first, 2);
855 EXPECT_TRUE(res.second);
856 EXPECT_EQ(2, t.size());
857
858 EXPECT_THAT(*t.find(1), 1);
859 EXPECT_THAT(*t.find(2), 2);
860 }
861
862 // Test that we do not add existent element in case we need to search through
863 // many groups with deleted elements
TEST(Table,InsertCollisionAndFindAfterDelete)864 TEST(Table, InsertCollisionAndFindAfterDelete) {
865 BadTable t; // all elements go to the same group.
866 // Have at least 2 groups with Group::kWidth collisions
867 // plus some extra collisions in the last group.
868 constexpr size_t kNumInserts = Group::kWidth * 2 + 5;
869 for (size_t i = 0; i < kNumInserts; ++i) {
870 auto res = t.emplace(i);
871 EXPECT_TRUE(res.second);
872 EXPECT_THAT(*res.first, i);
873 EXPECT_EQ(i + 1, t.size());
874 }
875
876 // Remove elements one by one and check
877 // that we still can find all other elements.
878 for (size_t i = 0; i < kNumInserts; ++i) {
879 EXPECT_EQ(1, t.erase(i)) << i;
880 for (size_t j = i + 1; j < kNumInserts; ++j) {
881 EXPECT_THAT(*t.find(j), j);
882 auto res = t.emplace(j);
883 EXPECT_FALSE(res.second) << i << " " << j;
884 EXPECT_THAT(*res.first, j);
885 EXPECT_EQ(kNumInserts - i - 1, t.size());
886 }
887 }
888 EXPECT_TRUE(t.empty());
889 }
890
TYPED_TEST(SooTest,EraseInSmallTables)891 TYPED_TEST(SooTest, EraseInSmallTables) {
892 for (int64_t size = 0; size < 64; ++size) {
893 TypeParam t;
894 for (int64_t i = 0; i < size; ++i) {
895 t.insert(i);
896 }
897 for (int64_t i = 0; i < size; ++i) {
898 t.erase(i);
899 EXPECT_EQ(t.size(), size - i - 1);
900 for (int64_t j = i + 1; j < size; ++j) {
901 EXPECT_THAT(*t.find(j), j);
902 }
903 }
904 EXPECT_TRUE(t.empty());
905 }
906 }
907
TYPED_TEST(SooTest,InsertWithinCapacity)908 TYPED_TEST(SooTest, InsertWithinCapacity) {
909 TypeParam t;
910 t.reserve(10);
911 const size_t original_capacity = t.capacity();
912 const auto addr = [&](int i) {
913 return reinterpret_cast<uintptr_t>(&*t.find(i));
914 };
915 // Inserting an element does not change capacity.
916 t.insert(0);
917 EXPECT_THAT(t.capacity(), original_capacity);
918 const uintptr_t original_addr_0 = addr(0);
919 // Inserting another element does not rehash.
920 t.insert(1);
921 EXPECT_THAT(t.capacity(), original_capacity);
922 EXPECT_THAT(addr(0), original_addr_0);
923 // Inserting lots of duplicate elements does not rehash.
924 for (int i = 0; i < 100; ++i) {
925 t.insert(i % 10);
926 }
927 EXPECT_THAT(t.capacity(), original_capacity);
928 EXPECT_THAT(addr(0), original_addr_0);
929 // Inserting a range of duplicate elements does not rehash.
930 std::vector<int> dup_range;
931 for (int i = 0; i < 100; ++i) {
932 dup_range.push_back(i % 10);
933 }
934 t.insert(dup_range.begin(), dup_range.end());
935 EXPECT_THAT(t.capacity(), original_capacity);
936 EXPECT_THAT(addr(0), original_addr_0);
937 }
938
939 template <class TableType>
940 class SmallTableResizeTest : public testing::Test {};
941
942 using SmallTableTypes =
943 ::testing::Types<IntTable, TransferableIntTable, SooIntTable>;
944 TYPED_TEST_SUITE(SmallTableResizeTest, SmallTableTypes);
945
TYPED_TEST(SmallTableResizeTest,InsertIntoSmallTable)946 TYPED_TEST(SmallTableResizeTest, InsertIntoSmallTable) {
947 TypeParam t;
948 for (int i = 0; i < 32; ++i) {
949 t.insert(i);
950 ASSERT_EQ(t.size(), i + 1);
951 for (int j = 0; j < i + 1; ++j) {
952 EXPECT_TRUE(t.find(j) != t.end());
953 EXPECT_EQ(*t.find(j), j);
954 }
955 }
956 }
957
TYPED_TEST(SmallTableResizeTest,ResizeGrowSmallTables)958 TYPED_TEST(SmallTableResizeTest, ResizeGrowSmallTables) {
959 for (size_t source_size = 0; source_size < 32; ++source_size) {
960 for (size_t target_size = source_size; target_size < 32; ++target_size) {
961 for (bool rehash : {false, true}) {
962 TypeParam t;
963 for (size_t i = 0; i < source_size; ++i) {
964 t.insert(static_cast<int>(i));
965 }
966 if (rehash) {
967 t.rehash(target_size);
968 } else {
969 t.reserve(target_size);
970 }
971 for (size_t i = 0; i < source_size; ++i) {
972 EXPECT_TRUE(t.find(static_cast<int>(i)) != t.end());
973 EXPECT_EQ(*t.find(static_cast<int>(i)), static_cast<int>(i));
974 }
975 }
976 }
977 }
978 }
979
TYPED_TEST(SmallTableResizeTest,ResizeReduceSmallTables)980 TYPED_TEST(SmallTableResizeTest, ResizeReduceSmallTables) {
981 for (size_t source_size = 0; source_size < 32; ++source_size) {
982 for (size_t target_size = 0; target_size <= source_size; ++target_size) {
983 TypeParam t;
984 size_t inserted_count = std::min<size_t>(source_size, 5);
985 for (size_t i = 0; i < inserted_count; ++i) {
986 t.insert(static_cast<int>(i));
987 }
988 const size_t minimum_capacity = t.capacity();
989 t.reserve(source_size);
990 t.rehash(target_size);
991 if (target_size == 0) {
992 EXPECT_EQ(t.capacity(), minimum_capacity)
993 << "rehash(0) must resize to the minimum capacity";
994 }
995 for (size_t i = 0; i < inserted_count; ++i) {
996 EXPECT_TRUE(t.find(static_cast<int>(i)) != t.end());
997 EXPECT_EQ(*t.find(static_cast<int>(i)), static_cast<int>(i));
998 }
999 }
1000 }
1001 }
1002
TEST(Table,LazyEmplace)1003 TEST(Table, LazyEmplace) {
1004 StringTable t;
1005 bool called = false;
1006 auto it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
1007 called = true;
1008 f("abc", "ABC");
1009 });
1010 EXPECT_TRUE(called);
1011 EXPECT_THAT(*it, Pair("abc", "ABC"));
1012 called = false;
1013 it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
1014 called = true;
1015 f("abc", "DEF");
1016 });
1017 EXPECT_FALSE(called);
1018 EXPECT_THAT(*it, Pair("abc", "ABC"));
1019 }
1020
TYPED_TEST(SooTest,ContainsEmpty)1021 TYPED_TEST(SooTest, ContainsEmpty) {
1022 TypeParam t;
1023
1024 EXPECT_FALSE(t.contains(0));
1025 }
1026
TYPED_TEST(SooTest,Contains1)1027 TYPED_TEST(SooTest, Contains1) {
1028 TypeParam t;
1029
1030 EXPECT_TRUE(t.insert(0).second);
1031 EXPECT_TRUE(t.contains(0));
1032 EXPECT_FALSE(t.contains(1));
1033
1034 EXPECT_EQ(1, t.erase(0));
1035 EXPECT_FALSE(t.contains(0));
1036 }
1037
TYPED_TEST(SooTest,Contains2)1038 TYPED_TEST(SooTest, Contains2) {
1039 TypeParam t;
1040
1041 EXPECT_TRUE(t.insert(0).second);
1042 EXPECT_TRUE(t.contains(0));
1043 EXPECT_FALSE(t.contains(1));
1044
1045 t.clear();
1046 EXPECT_FALSE(t.contains(0));
1047 }
1048
1049 int decompose_constructed;
1050 int decompose_copy_constructed;
1051 int decompose_copy_assigned;
1052 int decompose_move_constructed;
1053 int decompose_move_assigned;
1054 struct DecomposeType {
DecomposeTypeabsl::container_internal::__anonb1b03db00111::DecomposeType1055 DecomposeType(int i = 0) : i(i) { // NOLINT
1056 ++decompose_constructed;
1057 }
1058
DecomposeTypeabsl::container_internal::__anonb1b03db00111::DecomposeType1059 explicit DecomposeType(const char* d) : DecomposeType(*d) {}
1060
DecomposeTypeabsl::container_internal::__anonb1b03db00111::DecomposeType1061 DecomposeType(const DecomposeType& other) : i(other.i) {
1062 ++decompose_copy_constructed;
1063 }
operator =absl::container_internal::__anonb1b03db00111::DecomposeType1064 DecomposeType& operator=(const DecomposeType& other) {
1065 ++decompose_copy_assigned;
1066 i = other.i;
1067 return *this;
1068 }
DecomposeTypeabsl::container_internal::__anonb1b03db00111::DecomposeType1069 DecomposeType(DecomposeType&& other) : i(other.i) {
1070 ++decompose_move_constructed;
1071 }
operator =absl::container_internal::__anonb1b03db00111::DecomposeType1072 DecomposeType& operator=(DecomposeType&& other) {
1073 ++decompose_move_assigned;
1074 i = other.i;
1075 return *this;
1076 }
1077
1078 int i;
1079 };
1080
1081 struct DecomposeHash {
1082 using is_transparent = void;
operator ()absl::container_internal::__anonb1b03db00111::DecomposeHash1083 size_t operator()(const DecomposeType& a) const { return a.i; }
operator ()absl::container_internal::__anonb1b03db00111::DecomposeHash1084 size_t operator()(int a) const { return a; }
operator ()absl::container_internal::__anonb1b03db00111::DecomposeHash1085 size_t operator()(const char* a) const { return *a; }
1086 };
1087
1088 struct DecomposeEq {
1089 using is_transparent = void;
operator ()absl::container_internal::__anonb1b03db00111::DecomposeEq1090 bool operator()(const DecomposeType& a, const DecomposeType& b) const {
1091 return a.i == b.i;
1092 }
operator ()absl::container_internal::__anonb1b03db00111::DecomposeEq1093 bool operator()(const DecomposeType& a, int b) const { return a.i == b; }
operator ()absl::container_internal::__anonb1b03db00111::DecomposeEq1094 bool operator()(const DecomposeType& a, const char* b) const {
1095 return a.i == *b;
1096 }
1097 };
1098
1099 struct DecomposePolicy {
1100 using slot_type = DecomposeType;
1101 using key_type = DecomposeType;
1102 using init_type = DecomposeType;
1103
1104 template <typename T>
constructabsl::container_internal::__anonb1b03db00111::DecomposePolicy1105 static void construct(void*, DecomposeType* slot, T&& v) {
1106 ::new (slot) DecomposeType(std::forward<T>(v));
1107 }
destroyabsl::container_internal::__anonb1b03db00111::DecomposePolicy1108 static void destroy(void*, DecomposeType* slot) { slot->~DecomposeType(); }
elementabsl::container_internal::__anonb1b03db00111::DecomposePolicy1109 static DecomposeType& element(slot_type* slot) { return *slot; }
1110
1111 template <class F, class T>
applyabsl::container_internal::__anonb1b03db00111::DecomposePolicy1112 static auto apply(F&& f, const T& x) -> decltype(std::forward<F>(f)(x, x)) {
1113 return std::forward<F>(f)(x, x);
1114 }
1115
1116 template <class Hash>
get_hash_slot_fnabsl::container_internal::__anonb1b03db00111::DecomposePolicy1117 static constexpr HashSlotFn get_hash_slot_fn() {
1118 return nullptr;
1119 }
1120 };
1121
1122 template <typename Hash, typename Eq>
TestDecompose(bool construct_three)1123 void TestDecompose(bool construct_three) {
1124 DecomposeType elem{0};
1125 const int one = 1;
1126 const char* three_p = "3";
1127 const auto& three = three_p;
1128 const int elem_vector_count = 256;
1129 std::vector<DecomposeType> elem_vector(elem_vector_count, DecomposeType{0});
1130 std::iota(elem_vector.begin(), elem_vector.end(), 0);
1131
1132 using DecomposeSet =
1133 raw_hash_set<DecomposePolicy, Hash, Eq, std::allocator<int>>;
1134 DecomposeSet set1;
1135
1136 decompose_constructed = 0;
1137 int expected_constructed = 0;
1138 EXPECT_EQ(expected_constructed, decompose_constructed);
1139 set1.insert(elem);
1140 EXPECT_EQ(expected_constructed, decompose_constructed);
1141 set1.insert(1);
1142 EXPECT_EQ(++expected_constructed, decompose_constructed);
1143 set1.emplace("3");
1144 EXPECT_EQ(++expected_constructed, decompose_constructed);
1145 EXPECT_EQ(expected_constructed, decompose_constructed);
1146
1147 { // insert(T&&)
1148 set1.insert(1);
1149 EXPECT_EQ(expected_constructed, decompose_constructed);
1150 }
1151
1152 { // insert(const T&)
1153 set1.insert(one);
1154 EXPECT_EQ(expected_constructed, decompose_constructed);
1155 }
1156
1157 { // insert(hint, T&&)
1158 set1.insert(set1.begin(), 1);
1159 EXPECT_EQ(expected_constructed, decompose_constructed);
1160 }
1161
1162 { // insert(hint, const T&)
1163 set1.insert(set1.begin(), one);
1164 EXPECT_EQ(expected_constructed, decompose_constructed);
1165 }
1166
1167 { // emplace(...)
1168 set1.emplace(1);
1169 EXPECT_EQ(expected_constructed, decompose_constructed);
1170 set1.emplace("3");
1171 expected_constructed += construct_three;
1172 EXPECT_EQ(expected_constructed, decompose_constructed);
1173 set1.emplace(one);
1174 EXPECT_EQ(expected_constructed, decompose_constructed);
1175 set1.emplace(three);
1176 expected_constructed += construct_three;
1177 EXPECT_EQ(expected_constructed, decompose_constructed);
1178 }
1179
1180 { // emplace_hint(...)
1181 set1.emplace_hint(set1.begin(), 1);
1182 EXPECT_EQ(expected_constructed, decompose_constructed);
1183 set1.emplace_hint(set1.begin(), "3");
1184 expected_constructed += construct_three;
1185 EXPECT_EQ(expected_constructed, decompose_constructed);
1186 set1.emplace_hint(set1.begin(), one);
1187 EXPECT_EQ(expected_constructed, decompose_constructed);
1188 set1.emplace_hint(set1.begin(), three);
1189 expected_constructed += construct_three;
1190 EXPECT_EQ(expected_constructed, decompose_constructed);
1191 }
1192
1193 decompose_copy_constructed = 0;
1194 decompose_copy_assigned = 0;
1195 decompose_move_constructed = 0;
1196 decompose_move_assigned = 0;
1197 int expected_copy_constructed = 0;
1198 int expected_move_constructed = 0;
1199 { // raw_hash_set(first, last) with random-access iterators
1200 DecomposeSet set2(elem_vector.begin(), elem_vector.end());
1201 // Expect exactly one copy-constructor call for each element if no
1202 // rehashing is done.
1203 expected_copy_constructed += elem_vector_count;
1204 EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed);
1205 EXPECT_EQ(expected_move_constructed, decompose_move_constructed);
1206 EXPECT_EQ(0, decompose_move_assigned);
1207 EXPECT_EQ(0, decompose_copy_assigned);
1208 }
1209
1210 { // raw_hash_set(first, last) with forward iterators
1211 std::list<DecomposeType> elem_list(elem_vector.begin(), elem_vector.end());
1212 expected_copy_constructed = decompose_copy_constructed;
1213 DecomposeSet set2(elem_list.begin(), elem_list.end());
1214 // Expect exactly N elements copied into set, expect at most 2*N elements
1215 // moving internally for all resizing needed (for a growth factor of 2).
1216 expected_copy_constructed += elem_vector_count;
1217 EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed);
1218 expected_move_constructed += elem_vector_count;
1219 EXPECT_LT(expected_move_constructed, decompose_move_constructed);
1220 expected_move_constructed += elem_vector_count;
1221 EXPECT_GE(expected_move_constructed, decompose_move_constructed);
1222 EXPECT_EQ(0, decompose_move_assigned);
1223 EXPECT_EQ(0, decompose_copy_assigned);
1224 expected_copy_constructed = decompose_copy_constructed;
1225 expected_move_constructed = decompose_move_constructed;
1226 }
1227
1228 { // insert(first, last)
1229 DecomposeSet set2;
1230 set2.insert(elem_vector.begin(), elem_vector.end());
1231 // Expect exactly N elements copied into set, expect at most 2*N elements
1232 // moving internally for all resizing needed (for a growth factor of 2).
1233 const int expected_new_elements = elem_vector_count;
1234 const int expected_max_element_moves = 2 * elem_vector_count;
1235 expected_copy_constructed += expected_new_elements;
1236 EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed);
1237 expected_move_constructed += expected_max_element_moves;
1238 EXPECT_GE(expected_move_constructed, decompose_move_constructed);
1239 EXPECT_EQ(0, decompose_move_assigned);
1240 EXPECT_EQ(0, decompose_copy_assigned);
1241 expected_copy_constructed = decompose_copy_constructed;
1242 expected_move_constructed = decompose_move_constructed;
1243 }
1244 }
1245
TEST(Table,Decompose)1246 TEST(Table, Decompose) {
1247 if (SwisstableGenerationsEnabled()) {
1248 GTEST_SKIP() << "Generations being enabled causes extra rehashes.";
1249 }
1250
1251 TestDecompose<DecomposeHash, DecomposeEq>(false);
1252
1253 struct TransparentHashIntOverload {
1254 size_t operator()(const DecomposeType& a) const { return a.i; }
1255 size_t operator()(int a) const { return a; }
1256 };
1257 struct TransparentEqIntOverload {
1258 bool operator()(const DecomposeType& a, const DecomposeType& b) const {
1259 return a.i == b.i;
1260 }
1261 bool operator()(const DecomposeType& a, int b) const { return a.i == b; }
1262 };
1263 TestDecompose<TransparentHashIntOverload, DecomposeEq>(true);
1264 TestDecompose<TransparentHashIntOverload, TransparentEqIntOverload>(true);
1265 TestDecompose<DecomposeHash, TransparentEqIntOverload>(true);
1266 }
1267
1268 // Returns the largest m such that a table with m elements has the same number
1269 // of buckets as a table with n elements.
MaxDensitySize(size_t n)1270 size_t MaxDensitySize(size_t n) {
1271 IntTable t;
1272 t.reserve(n);
1273 for (size_t i = 0; i != n; ++i) t.emplace(i);
1274 const size_t c = t.bucket_count();
1275 while (c == t.bucket_count()) t.emplace(n++);
1276 return t.size() - 1;
1277 }
1278
1279 struct Modulo1000Hash {
operator ()absl::container_internal::__anonb1b03db00111::Modulo1000Hash1280 size_t operator()(int64_t x) const { return static_cast<size_t>(x) % 1000; }
1281 };
1282
1283 struct Modulo1000HashTable
1284 : public raw_hash_set<IntPolicy, Modulo1000Hash, std::equal_to<int>,
1285 std::allocator<int>> {};
1286
1287 // Test that rehash with no resize happen in case of many deleted slots.
TEST(Table,RehashWithNoResize)1288 TEST(Table, RehashWithNoResize) {
1289 if (SwisstableGenerationsEnabled()) {
1290 GTEST_SKIP() << "Generations being enabled causes extra rehashes.";
1291 }
1292
1293 Modulo1000HashTable t;
1294 // Adding the same length (and the same hash) strings
1295 // to have at least kMinFullGroups groups
1296 // with Group::kWidth collisions. Then fill up to MaxDensitySize;
1297 const size_t kMinFullGroups = 7;
1298 std::vector<int> keys;
1299 for (size_t i = 0; i < MaxDensitySize(Group::kWidth * kMinFullGroups); ++i) {
1300 int k = i * 1000;
1301 t.emplace(k);
1302 keys.push_back(k);
1303 }
1304 const size_t capacity = t.capacity();
1305
1306 // Remove elements from all groups except the first and the last one.
1307 // All elements removed from full groups will be marked as ctrl_t::kDeleted.
1308 const size_t erase_begin = Group::kWidth / 2;
1309 const size_t erase_end = (t.size() / Group::kWidth - 1) * Group::kWidth;
1310 for (size_t i = erase_begin; i < erase_end; ++i) {
1311 EXPECT_EQ(1, t.erase(keys[i])) << i;
1312 }
1313 keys.erase(keys.begin() + erase_begin, keys.begin() + erase_end);
1314
1315 auto last_key = keys.back();
1316 size_t last_key_num_probes = GetHashtableDebugNumProbes(t, last_key);
1317
1318 // Make sure that we have to make a lot of probes for last key.
1319 ASSERT_GT(last_key_num_probes, kMinFullGroups);
1320
1321 int x = 1;
1322 // Insert and erase one element, before inplace rehash happen.
1323 while (last_key_num_probes == GetHashtableDebugNumProbes(t, last_key)) {
1324 t.emplace(x);
1325 ASSERT_EQ(capacity, t.capacity());
1326 // All elements should be there.
1327 ASSERT_TRUE(t.find(x) != t.end()) << x;
1328 for (const auto& k : keys) {
1329 ASSERT_TRUE(t.find(k) != t.end()) << k;
1330 }
1331 t.erase(x);
1332 ++x;
1333 }
1334 }
1335
TYPED_TEST(SooTest,InsertEraseStressTest)1336 TYPED_TEST(SooTest, InsertEraseStressTest) {
1337 TypeParam t;
1338 const size_t kMinElementCount = 250;
1339 std::deque<int> keys;
1340 size_t i = 0;
1341 for (; i < MaxDensitySize(kMinElementCount); ++i) {
1342 t.emplace(i);
1343 keys.push_back(i);
1344 }
1345 const size_t kNumIterations = 1000000;
1346 for (; i < kNumIterations; ++i) {
1347 ASSERT_EQ(1, t.erase(keys.front()));
1348 keys.pop_front();
1349 t.emplace(i);
1350 keys.push_back(i);
1351 }
1352 }
1353
TEST(Table,InsertOverloads)1354 TEST(Table, InsertOverloads) {
1355 StringTable t;
1356 // These should all trigger the insert(init_type) overload.
1357 t.insert({{}, {}});
1358 t.insert({"ABC", {}});
1359 t.insert({"DEF", "!!!"});
1360
1361 EXPECT_THAT(t, UnorderedElementsAre(Pair("", ""), Pair("ABC", ""),
1362 Pair("DEF", "!!!")));
1363 }
1364
TYPED_TEST(SooTest,LargeTable)1365 TYPED_TEST(SooTest, LargeTable) {
1366 TypeParam t;
1367 for (int64_t i = 0; i != 100000; ++i) t.emplace(i << 40);
1368 for (int64_t i = 0; i != 100000; ++i)
1369 ASSERT_EQ(i << 40, static_cast<int64_t>(*t.find(i << 40)));
1370 }
1371
1372 // Timeout if copy is quadratic as it was in Rust.
TYPED_TEST(SooTest,EnsureNonQuadraticAsInRust)1373 TYPED_TEST(SooTest, EnsureNonQuadraticAsInRust) {
1374 static const size_t kLargeSize = 1 << 15;
1375
1376 TypeParam t;
1377 for (size_t i = 0; i != kLargeSize; ++i) {
1378 t.insert(i);
1379 }
1380
1381 // If this is quadratic, the test will timeout.
1382 TypeParam t2;
1383 for (const auto& entry : t) t2.insert(entry);
1384 }
1385
TYPED_TEST(SooTest,ClearBug)1386 TYPED_TEST(SooTest, ClearBug) {
1387 if (SwisstableGenerationsEnabled()) {
1388 GTEST_SKIP() << "Generations being enabled causes extra rehashes.";
1389 }
1390
1391 TypeParam t;
1392 constexpr size_t capacity = container_internal::Group::kWidth - 1;
1393 constexpr size_t max_size = capacity / 2 + 1;
1394 for (size_t i = 0; i < max_size; ++i) {
1395 t.insert(i);
1396 }
1397 ASSERT_EQ(capacity, t.capacity());
1398 intptr_t original = reinterpret_cast<intptr_t>(&*t.find(2));
1399 t.clear();
1400 ASSERT_EQ(capacity, t.capacity());
1401 for (size_t i = 0; i < max_size; ++i) {
1402 t.insert(i);
1403 }
1404 ASSERT_EQ(capacity, t.capacity());
1405 intptr_t second = reinterpret_cast<intptr_t>(&*t.find(2));
1406 // We are checking that original and second are close enough to each other
1407 // that they are probably still in the same group. This is not strictly
1408 // guaranteed.
1409 EXPECT_LT(static_cast<size_t>(std::abs(original - second)),
1410 capacity * sizeof(typename TypeParam::value_type));
1411 }
1412
TYPED_TEST(SooTest,Erase)1413 TYPED_TEST(SooTest, Erase) {
1414 TypeParam t;
1415 EXPECT_TRUE(t.find(0) == t.end());
1416 auto res = t.emplace(0);
1417 EXPECT_TRUE(res.second);
1418 EXPECT_EQ(1, t.size());
1419 t.erase(res.first);
1420 EXPECT_EQ(0, t.size());
1421 EXPECT_TRUE(t.find(0) == t.end());
1422 }
1423
TYPED_TEST(SooTest,EraseMaintainsValidIterator)1424 TYPED_TEST(SooTest, EraseMaintainsValidIterator) {
1425 TypeParam t;
1426 const int kNumElements = 100;
1427 for (int i = 0; i < kNumElements; i++) {
1428 EXPECT_TRUE(t.emplace(i).second);
1429 }
1430 EXPECT_EQ(t.size(), kNumElements);
1431
1432 int num_erase_calls = 0;
1433 auto it = t.begin();
1434 while (it != t.end()) {
1435 t.erase(it++);
1436 num_erase_calls++;
1437 }
1438
1439 EXPECT_TRUE(t.empty());
1440 EXPECT_EQ(num_erase_calls, kNumElements);
1441 }
1442
TYPED_TEST(SooTest,EraseBeginEnd)1443 TYPED_TEST(SooTest, EraseBeginEnd) {
1444 TypeParam t;
1445 for (int i = 0; i < 10; ++i) t.insert(i);
1446 EXPECT_EQ(t.size(), 10);
1447 t.erase(t.begin(), t.end());
1448 EXPECT_EQ(t.size(), 0);
1449 }
1450
1451 // Collect N bad keys by following algorithm:
1452 // 1. Create an empty table and reserve it to 2 * N.
1453 // 2. Insert N random elements.
1454 // 3. Take first Group::kWidth - 1 to bad_keys array.
1455 // 4. Clear the table without resize.
1456 // 5. Go to point 2 while N keys not collected
CollectBadMergeKeys(size_t N)1457 std::vector<int64_t> CollectBadMergeKeys(size_t N) {
1458 static constexpr int kGroupSize = Group::kWidth - 1;
1459
1460 auto topk_range = [](size_t b, size_t e,
1461 IntTable* t) -> std::vector<int64_t> {
1462 for (size_t i = b; i != e; ++i) {
1463 t->emplace(i);
1464 }
1465 std::vector<int64_t> res;
1466 res.reserve(kGroupSize);
1467 auto it = t->begin();
1468 for (size_t i = b; i != e && i != b + kGroupSize; ++i, ++it) {
1469 res.push_back(*it);
1470 }
1471 return res;
1472 };
1473
1474 std::vector<int64_t> bad_keys;
1475 bad_keys.reserve(N);
1476 IntTable t;
1477 t.reserve(N * 2);
1478
1479 for (size_t b = 0; bad_keys.size() < N; b += N) {
1480 auto keys = topk_range(b, b + N, &t);
1481 bad_keys.insert(bad_keys.end(), keys.begin(), keys.end());
1482 t.erase(t.begin(), t.end());
1483 EXPECT_TRUE(t.empty());
1484 }
1485 return bad_keys;
1486 }
1487
1488 struct ProbeStats {
1489 // Number of elements with specific probe length over all tested tables.
1490 std::vector<size_t> all_probes_histogram;
1491 // Ratios total_probe_length/size for every tested table.
1492 std::vector<double> single_table_ratios;
1493
1494 // Average ratio total_probe_length/size over tables.
AvgRatioabsl::container_internal::__anonb1b03db00111::ProbeStats1495 double AvgRatio() const {
1496 return std::accumulate(single_table_ratios.begin(),
1497 single_table_ratios.end(), 0.0) /
1498 single_table_ratios.size();
1499 }
1500
1501 // Maximum ratio total_probe_length/size over tables.
MaxRatioabsl::container_internal::__anonb1b03db00111::ProbeStats1502 double MaxRatio() const {
1503 return *std::max_element(single_table_ratios.begin(),
1504 single_table_ratios.end());
1505 }
1506
1507 // Percentile ratio total_probe_length/size over tables.
PercentileRatioabsl::container_internal::__anonb1b03db00111::ProbeStats1508 double PercentileRatio(double Percentile = 0.95) const {
1509 auto r = single_table_ratios;
1510 auto mid = r.begin() + static_cast<size_t>(r.size() * Percentile);
1511 if (mid != r.end()) {
1512 std::nth_element(r.begin(), mid, r.end());
1513 return *mid;
1514 } else {
1515 return MaxRatio();
1516 }
1517 }
1518
1519 // Maximum probe length over all elements and all tables.
MaxProbeabsl::container_internal::__anonb1b03db00111::ProbeStats1520 size_t MaxProbe() const { return all_probes_histogram.size(); }
1521
1522 // Fraction of elements with specified probe length.
ProbeNormalizedHistogramabsl::container_internal::__anonb1b03db00111::ProbeStats1523 std::vector<double> ProbeNormalizedHistogram() const {
1524 double total_elements = std::accumulate(all_probes_histogram.begin(),
1525 all_probes_histogram.end(), 0ull);
1526 std::vector<double> res;
1527 for (size_t p : all_probes_histogram) {
1528 res.push_back(p / total_elements);
1529 }
1530 return res;
1531 }
1532
PercentileProbeabsl::container_internal::__anonb1b03db00111::ProbeStats1533 size_t PercentileProbe(double Percentile = 0.99) const {
1534 size_t idx = 0;
1535 for (double p : ProbeNormalizedHistogram()) {
1536 if (Percentile > p) {
1537 Percentile -= p;
1538 ++idx;
1539 } else {
1540 return idx;
1541 }
1542 }
1543 return idx;
1544 }
1545
operator <<(std::ostream & out,const ProbeStats & s)1546 friend std::ostream& operator<<(std::ostream& out, const ProbeStats& s) {
1547 out << "{AvgRatio:" << s.AvgRatio() << ", MaxRatio:" << s.MaxRatio()
1548 << ", PercentileRatio:" << s.PercentileRatio()
1549 << ", MaxProbe:" << s.MaxProbe() << ", Probes=[";
1550 for (double p : s.ProbeNormalizedHistogram()) {
1551 out << p << ",";
1552 }
1553 out << "]}";
1554
1555 return out;
1556 }
1557 };
1558
1559 struct ExpectedStats {
1560 double avg_ratio;
1561 double max_ratio;
1562 std::vector<std::pair<double, double>> pecentile_ratios;
1563 std::vector<std::pair<double, double>> pecentile_probes;
1564
operator <<(std::ostream & out,const ExpectedStats & s)1565 friend std::ostream& operator<<(std::ostream& out, const ExpectedStats& s) {
1566 out << "{AvgRatio:" << s.avg_ratio << ", MaxRatio:" << s.max_ratio
1567 << ", PercentileRatios: [";
1568 for (auto el : s.pecentile_ratios) {
1569 out << el.first << ":" << el.second << ", ";
1570 }
1571 out << "], PercentileProbes: [";
1572 for (auto el : s.pecentile_probes) {
1573 out << el.first << ":" << el.second << ", ";
1574 }
1575 out << "]}";
1576
1577 return out;
1578 }
1579 };
1580
VerifyStats(size_t size,const ExpectedStats & exp,const ProbeStats & stats)1581 void VerifyStats(size_t size, const ExpectedStats& exp,
1582 const ProbeStats& stats) {
1583 EXPECT_LT(stats.AvgRatio(), exp.avg_ratio) << size << " " << stats;
1584 EXPECT_LT(stats.MaxRatio(), exp.max_ratio) << size << " " << stats;
1585 for (auto pr : exp.pecentile_ratios) {
1586 EXPECT_LE(stats.PercentileRatio(pr.first), pr.second)
1587 << size << " " << pr.first << " " << stats;
1588 }
1589
1590 for (auto pr : exp.pecentile_probes) {
1591 EXPECT_LE(stats.PercentileProbe(pr.first), pr.second)
1592 << size << " " << pr.first << " " << stats;
1593 }
1594 }
1595
1596 using ProbeStatsPerSize = std::map<size_t, ProbeStats>;
1597
1598 // Collect total ProbeStats on num_iters iterations of the following algorithm:
1599 // 1. Create new table and reserve it to keys.size() * 2
1600 // 2. Insert all keys xored with seed
1601 // 3. Collect ProbeStats from final table.
CollectProbeStatsOnKeysXoredWithSeed(const std::vector<int64_t> & keys,size_t num_iters)1602 ProbeStats CollectProbeStatsOnKeysXoredWithSeed(
1603 const std::vector<int64_t>& keys, size_t num_iters) {
1604 const size_t reserve_size = keys.size() * 2;
1605
1606 ProbeStats stats;
1607
1608 int64_t seed = 0x71b1a19b907d6e33;
1609 while (num_iters--) {
1610 seed = static_cast<int64_t>(static_cast<uint64_t>(seed) * 17 + 13);
1611 IntTable t1;
1612 t1.reserve(reserve_size);
1613 for (const auto& key : keys) {
1614 t1.emplace(key ^ seed);
1615 }
1616
1617 auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
1618 stats.all_probes_histogram.resize(
1619 std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
1620 std::transform(probe_histogram.begin(), probe_histogram.end(),
1621 stats.all_probes_histogram.begin(),
1622 stats.all_probes_histogram.begin(), std::plus<size_t>());
1623
1624 size_t total_probe_seq_length = 0;
1625 for (size_t i = 0; i < probe_histogram.size(); ++i) {
1626 total_probe_seq_length += i * probe_histogram[i];
1627 }
1628 stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
1629 keys.size());
1630 t1.erase(t1.begin(), t1.end());
1631 }
1632 return stats;
1633 }
1634
XorSeedExpectedStats()1635 ExpectedStats XorSeedExpectedStats() {
1636 constexpr bool kRandomizesInserts =
1637 #ifdef NDEBUG
1638 false;
1639 #else // NDEBUG
1640 true;
1641 #endif // NDEBUG
1642
1643 // The effective load factor is larger in non-opt mode because we insert
1644 // elements out of order.
1645 switch (container_internal::Group::kWidth) {
1646 case 8:
1647 if (kRandomizesInserts) {
1648 return {0.05,
1649 1.0,
1650 {{0.95, 0.5}},
1651 {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
1652 } else {
1653 return {0.05,
1654 2.0,
1655 {{0.95, 0.1}},
1656 {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
1657 }
1658 case 16:
1659 if (kRandomizesInserts) {
1660 return {0.1,
1661 2.0,
1662 {{0.95, 0.1}},
1663 {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
1664 } else {
1665 return {0.05,
1666 1.0,
1667 {{0.95, 0.05}},
1668 {{0.95, 0}, {0.99, 1}, {0.999, 4}, {0.9999, 10}}};
1669 }
1670 }
1671 LOG(FATAL) << "Unknown Group width";
1672 return {};
1673 }
1674
1675 // TODO(b/80415403): Figure out why this test is so flaky, esp. on MSVC
TEST(Table,DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength)1676 TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) {
1677 ProbeStatsPerSize stats;
1678 std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
1679 for (size_t size : sizes) {
1680 stats[size] =
1681 CollectProbeStatsOnKeysXoredWithSeed(CollectBadMergeKeys(size), 200);
1682 }
1683 auto expected = XorSeedExpectedStats();
1684 for (size_t size : sizes) {
1685 auto& stat = stats[size];
1686 VerifyStats(size, expected, stat);
1687 LOG(INFO) << size << " " << stat;
1688 }
1689 }
1690
1691 // Collect total ProbeStats on num_iters iterations of the following algorithm:
1692 // 1. Create new table
1693 // 2. Select 10% of keys and insert 10 elements key * 17 + j * 13
1694 // 3. Collect ProbeStats from final table
CollectProbeStatsOnLinearlyTransformedKeys(const std::vector<int64_t> & keys,size_t num_iters)1695 ProbeStats CollectProbeStatsOnLinearlyTransformedKeys(
1696 const std::vector<int64_t>& keys, size_t num_iters) {
1697 ProbeStats stats;
1698
1699 std::random_device rd;
1700 std::mt19937 rng(rd());
1701 auto linear_transform = [](size_t x, size_t y) { return x * 17 + y * 13; };
1702 std::uniform_int_distribution<size_t> dist(0, keys.size() - 1);
1703 while (num_iters--) {
1704 IntTable t1;
1705 size_t num_keys = keys.size() / 10;
1706 size_t start = dist(rng);
1707 for (size_t i = 0; i != num_keys; ++i) {
1708 for (size_t j = 0; j != 10; ++j) {
1709 t1.emplace(linear_transform(keys[(i + start) % keys.size()], j));
1710 }
1711 }
1712
1713 auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
1714 stats.all_probes_histogram.resize(
1715 std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
1716 std::transform(probe_histogram.begin(), probe_histogram.end(),
1717 stats.all_probes_histogram.begin(),
1718 stats.all_probes_histogram.begin(), std::plus<size_t>());
1719
1720 size_t total_probe_seq_length = 0;
1721 for (size_t i = 0; i < probe_histogram.size(); ++i) {
1722 total_probe_seq_length += i * probe_histogram[i];
1723 }
1724 stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
1725 t1.size());
1726 t1.erase(t1.begin(), t1.end());
1727 }
1728 return stats;
1729 }
1730
LinearTransformExpectedStats()1731 ExpectedStats LinearTransformExpectedStats() {
1732 constexpr bool kRandomizesInserts =
1733 #ifdef NDEBUG
1734 false;
1735 #else // NDEBUG
1736 true;
1737 #endif // NDEBUG
1738
1739 // The effective load factor is larger in non-opt mode because we insert
1740 // elements out of order.
1741 switch (container_internal::Group::kWidth) {
1742 case 8:
1743 if (kRandomizesInserts) {
1744 return {0.1,
1745 0.5,
1746 {{0.95, 0.3}},
1747 {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
1748 } else {
1749 return {0.4,
1750 0.6,
1751 {{0.95, 0.5}},
1752 {{0.95, 1}, {0.99, 14}, {0.999, 23}, {0.9999, 26}}};
1753 }
1754 case 16:
1755 if (kRandomizesInserts) {
1756 return {0.1,
1757 0.4,
1758 {{0.95, 0.3}},
1759 {{0.95, 1}, {0.99, 2}, {0.999, 9}, {0.9999, 15}}};
1760 } else {
1761 return {0.05,
1762 0.2,
1763 {{0.95, 0.1}},
1764 {{0.95, 0}, {0.99, 1}, {0.999, 6}, {0.9999, 10}}};
1765 }
1766 }
1767 LOG(FATAL) << "Unknown Group width";
1768 return {};
1769 }
1770
1771 // TODO(b/80415403): Figure out why this test is so flaky.
TEST(Table,DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength)1772 TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) {
1773 ProbeStatsPerSize stats;
1774 std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
1775 for (size_t size : sizes) {
1776 stats[size] = CollectProbeStatsOnLinearlyTransformedKeys(
1777 CollectBadMergeKeys(size), 300);
1778 }
1779 auto expected = LinearTransformExpectedStats();
1780 for (size_t size : sizes) {
1781 auto& stat = stats[size];
1782 VerifyStats(size, expected, stat);
1783 LOG(INFO) << size << " " << stat;
1784 }
1785 }
1786
TEST(Table,EraseCollision)1787 TEST(Table, EraseCollision) {
1788 BadTable t;
1789
1790 // 1 2 3
1791 t.emplace(1);
1792 t.emplace(2);
1793 t.emplace(3);
1794 EXPECT_THAT(*t.find(1), 1);
1795 EXPECT_THAT(*t.find(2), 2);
1796 EXPECT_THAT(*t.find(3), 3);
1797 EXPECT_EQ(3, t.size());
1798
1799 // 1 DELETED 3
1800 t.erase(t.find(2));
1801 EXPECT_THAT(*t.find(1), 1);
1802 EXPECT_TRUE(t.find(2) == t.end());
1803 EXPECT_THAT(*t.find(3), 3);
1804 EXPECT_EQ(2, t.size());
1805
1806 // DELETED DELETED 3
1807 t.erase(t.find(1));
1808 EXPECT_TRUE(t.find(1) == t.end());
1809 EXPECT_TRUE(t.find(2) == t.end());
1810 EXPECT_THAT(*t.find(3), 3);
1811 EXPECT_EQ(1, t.size());
1812
1813 // DELETED DELETED DELETED
1814 t.erase(t.find(3));
1815 EXPECT_TRUE(t.find(1) == t.end());
1816 EXPECT_TRUE(t.find(2) == t.end());
1817 EXPECT_TRUE(t.find(3) == t.end());
1818 EXPECT_EQ(0, t.size());
1819 }
1820
TEST(Table,EraseInsertProbing)1821 TEST(Table, EraseInsertProbing) {
1822 BadTable t(100);
1823
1824 // 1 2 3 4
1825 t.emplace(1);
1826 t.emplace(2);
1827 t.emplace(3);
1828 t.emplace(4);
1829
1830 // 1 DELETED 3 DELETED
1831 t.erase(t.find(2));
1832 t.erase(t.find(4));
1833
1834 // 1 10 3 11 12
1835 t.emplace(10);
1836 t.emplace(11);
1837 t.emplace(12);
1838
1839 EXPECT_EQ(5, t.size());
1840 EXPECT_THAT(t, UnorderedElementsAre(1, 10, 3, 11, 12));
1841 }
1842
TEST(Table,GrowthInfoDeletedBit)1843 TEST(Table, GrowthInfoDeletedBit) {
1844 BadTable t;
1845 EXPECT_TRUE(
1846 RawHashSetTestOnlyAccess::GetCommon(t).growth_info().HasNoDeleted());
1847 int64_t init_count = static_cast<int64_t>(
1848 CapacityToGrowth(NormalizeCapacity(Group::kWidth + 1)));
1849 for (int64_t i = 0; i < init_count; ++i) {
1850 t.insert(i);
1851 }
1852 EXPECT_TRUE(
1853 RawHashSetTestOnlyAccess::GetCommon(t).growth_info().HasNoDeleted());
1854 t.erase(0);
1855 EXPECT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 1);
1856 EXPECT_FALSE(
1857 RawHashSetTestOnlyAccess::GetCommon(t).growth_info().HasNoDeleted());
1858 t.rehash(0);
1859 EXPECT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 0);
1860 EXPECT_TRUE(
1861 RawHashSetTestOnlyAccess::GetCommon(t).growth_info().HasNoDeleted());
1862 }
1863
TYPED_TEST(SooTest,Clear)1864 TYPED_TEST(SooTest, Clear) {
1865 TypeParam t;
1866 EXPECT_TRUE(t.find(0) == t.end());
1867 t.clear();
1868 EXPECT_TRUE(t.find(0) == t.end());
1869 auto res = t.emplace(0);
1870 EXPECT_TRUE(res.second);
1871 EXPECT_EQ(1, t.size());
1872 t.clear();
1873 EXPECT_EQ(0, t.size());
1874 EXPECT_TRUE(t.find(0) == t.end());
1875 }
1876
TYPED_TEST(SooTest,Swap)1877 TYPED_TEST(SooTest, Swap) {
1878 TypeParam t;
1879 EXPECT_TRUE(t.find(0) == t.end());
1880 auto res = t.emplace(0);
1881 EXPECT_TRUE(res.second);
1882 EXPECT_EQ(1, t.size());
1883 TypeParam u;
1884 t.swap(u);
1885 EXPECT_EQ(0, t.size());
1886 EXPECT_EQ(1, u.size());
1887 EXPECT_TRUE(t.find(0) == t.end());
1888 EXPECT_THAT(*u.find(0), 0);
1889 }
1890
TYPED_TEST(SooTest,Rehash)1891 TYPED_TEST(SooTest, Rehash) {
1892 TypeParam t;
1893 EXPECT_TRUE(t.find(0) == t.end());
1894 t.emplace(0);
1895 t.emplace(1);
1896 EXPECT_EQ(2, t.size());
1897 t.rehash(128);
1898 EXPECT_EQ(2, t.size());
1899 EXPECT_THAT(*t.find(0), 0);
1900 EXPECT_THAT(*t.find(1), 1);
1901 }
1902
TYPED_TEST(SooTest,RehashDoesNotRehashWhenNotNecessary)1903 TYPED_TEST(SooTest, RehashDoesNotRehashWhenNotNecessary) {
1904 TypeParam t;
1905 t.emplace(0);
1906 t.emplace(1);
1907 auto* p = &*t.find(0);
1908 t.rehash(1);
1909 EXPECT_EQ(p, &*t.find(0));
1910 }
1911
1912 // Following two tests use non-SOO table because they test for 0 capacity.
TEST(Table,RehashZeroDoesNotAllocateOnEmptyTable)1913 TEST(Table, RehashZeroDoesNotAllocateOnEmptyTable) {
1914 NonSooIntTable t;
1915 t.rehash(0);
1916 EXPECT_EQ(0, t.bucket_count());
1917 }
1918
TEST(Table,RehashZeroDeallocatesEmptyTable)1919 TEST(Table, RehashZeroDeallocatesEmptyTable) {
1920 NonSooIntTable t;
1921 t.emplace(0);
1922 t.clear();
1923 EXPECT_NE(0, t.bucket_count());
1924 t.rehash(0);
1925 EXPECT_EQ(0, t.bucket_count());
1926 }
1927
TYPED_TEST(SooTest,RehashZeroForcesRehash)1928 TYPED_TEST(SooTest, RehashZeroForcesRehash) {
1929 TypeParam t;
1930 t.emplace(0);
1931 t.emplace(1);
1932 auto* p = &*t.find(0);
1933 t.rehash(0);
1934 EXPECT_NE(p, &*t.find(0));
1935 }
1936
TEST(Table,ConstructFromInitList)1937 TEST(Table, ConstructFromInitList) {
1938 using P = std::pair<std::string, std::string>;
1939 struct Q {
1940 operator P() const { return {}; } // NOLINT
1941 };
1942 StringTable t = {P(), Q(), {}, {{}, {}}};
1943 }
1944
TYPED_TEST(SooTest,CopyConstruct)1945 TYPED_TEST(SooTest, CopyConstruct) {
1946 TypeParam t;
1947 t.emplace(0);
1948 EXPECT_EQ(1, t.size());
1949 {
1950 TypeParam u(t);
1951 EXPECT_EQ(1, u.size());
1952 EXPECT_THAT(*u.find(0), 0);
1953 }
1954 {
1955 TypeParam u{t};
1956 EXPECT_EQ(1, u.size());
1957 EXPECT_THAT(*u.find(0), 0);
1958 }
1959 {
1960 TypeParam u = t;
1961 EXPECT_EQ(1, u.size());
1962 EXPECT_THAT(*u.find(0), 0);
1963 }
1964 }
1965
TYPED_TEST(SooTest,CopyDifferentSizes)1966 TYPED_TEST(SooTest, CopyDifferentSizes) {
1967 TypeParam t;
1968
1969 for (int i = 0; i < 100; ++i) {
1970 t.emplace(i);
1971 TypeParam c = t;
1972 for (int j = 0; j <= i; ++j) {
1973 ASSERT_TRUE(c.find(j) != c.end()) << "i=" << i << " j=" << j;
1974 }
1975 // Testing find miss to verify that table is not full.
1976 ASSERT_TRUE(c.find(-1) == c.end());
1977 }
1978 }
1979
TYPED_TEST(SooTest,CopyDifferentCapacities)1980 TYPED_TEST(SooTest, CopyDifferentCapacities) {
1981 for (int cap = 1; cap < 100; cap = cap * 2 + 1) {
1982 TypeParam t;
1983 t.reserve(static_cast<size_t>(cap));
1984 for (int i = 0; i <= cap; ++i) {
1985 t.emplace(i);
1986 if (i != cap && i % 5 != 0) {
1987 continue;
1988 }
1989 TypeParam c = t;
1990 for (int j = 0; j <= i; ++j) {
1991 ASSERT_TRUE(c.find(j) != c.end())
1992 << "cap=" << cap << " i=" << i << " j=" << j;
1993 }
1994 // Testing find miss to verify that table is not full.
1995 ASSERT_TRUE(c.find(-1) == c.end());
1996 }
1997 }
1998 }
1999
TEST(Table,CopyConstructWithAlloc)2000 TEST(Table, CopyConstructWithAlloc) {
2001 StringTable t;
2002 t.emplace("a", "b");
2003 EXPECT_EQ(1, t.size());
2004 StringTable u(t, Alloc<std::pair<std::string, std::string>>());
2005 EXPECT_EQ(1, u.size());
2006 EXPECT_THAT(*u.find("a"), Pair("a", "b"));
2007 }
2008
2009 struct ExplicitAllocIntTable
2010 : raw_hash_set<IntPolicy, hash_default_hash<int64_t>,
2011 std::equal_to<int64_t>, Alloc<int64_t>> {
2012 ExplicitAllocIntTable() = default;
2013 };
2014
TEST(Table,AllocWithExplicitCtor)2015 TEST(Table, AllocWithExplicitCtor) {
2016 ExplicitAllocIntTable t;
2017 EXPECT_EQ(0, t.size());
2018 }
2019
TEST(Table,MoveConstruct)2020 TEST(Table, MoveConstruct) {
2021 {
2022 StringTable t;
2023 t.emplace("a", "b");
2024 EXPECT_EQ(1, t.size());
2025
2026 StringTable u(std::move(t));
2027 EXPECT_EQ(1, u.size());
2028 EXPECT_THAT(*u.find("a"), Pair("a", "b"));
2029 }
2030 {
2031 StringTable t;
2032 t.emplace("a", "b");
2033 EXPECT_EQ(1, t.size());
2034
2035 StringTable u{std::move(t)};
2036 EXPECT_EQ(1, u.size());
2037 EXPECT_THAT(*u.find("a"), Pair("a", "b"));
2038 }
2039 {
2040 StringTable t;
2041 t.emplace("a", "b");
2042 EXPECT_EQ(1, t.size());
2043
2044 StringTable u = std::move(t);
2045 EXPECT_EQ(1, u.size());
2046 EXPECT_THAT(*u.find("a"), Pair("a", "b"));
2047 }
2048 }
2049
TEST(Table,MoveConstructWithAlloc)2050 TEST(Table, MoveConstructWithAlloc) {
2051 StringTable t;
2052 t.emplace("a", "b");
2053 EXPECT_EQ(1, t.size());
2054 StringTable u(std::move(t), Alloc<std::pair<std::string, std::string>>());
2055 EXPECT_EQ(1, u.size());
2056 EXPECT_THAT(*u.find("a"), Pair("a", "b"));
2057 }
2058
TEST(Table,CopyAssign)2059 TEST(Table, CopyAssign) {
2060 StringTable t;
2061 t.emplace("a", "b");
2062 EXPECT_EQ(1, t.size());
2063 StringTable u;
2064 u = t;
2065 EXPECT_EQ(1, u.size());
2066 EXPECT_THAT(*u.find("a"), Pair("a", "b"));
2067 }
2068
TEST(Table,CopySelfAssign)2069 TEST(Table, CopySelfAssign) {
2070 StringTable t;
2071 t.emplace("a", "b");
2072 EXPECT_EQ(1, t.size());
2073 t = *&t;
2074 EXPECT_EQ(1, t.size());
2075 EXPECT_THAT(*t.find("a"), Pair("a", "b"));
2076 }
2077
TEST(Table,MoveAssign)2078 TEST(Table, MoveAssign) {
2079 StringTable t;
2080 t.emplace("a", "b");
2081 EXPECT_EQ(1, t.size());
2082 StringTable u;
2083 u = std::move(t);
2084 EXPECT_EQ(1, u.size());
2085 EXPECT_THAT(*u.find("a"), Pair("a", "b"));
2086 }
2087
TEST(Table,MoveSelfAssign)2088 TEST(Table, MoveSelfAssign) {
2089 StringTable t;
2090 t.emplace("a", "b");
2091 EXPECT_EQ(1, t.size());
2092 t = std::move(*&t);
2093 // As long as we don't crash, it's fine.
2094 }
2095
TEST(Table,Equality)2096 TEST(Table, Equality) {
2097 StringTable t;
2098 std::vector<std::pair<std::string, std::string>> v = {{"a", "b"},
2099 {"aa", "bb"}};
2100 t.insert(std::begin(v), std::end(v));
2101 StringTable u = t;
2102 EXPECT_EQ(u, t);
2103 }
2104
TEST(Table,Equality2)2105 TEST(Table, Equality2) {
2106 StringTable t;
2107 std::vector<std::pair<std::string, std::string>> v1 = {{"a", "b"},
2108 {"aa", "bb"}};
2109 t.insert(std::begin(v1), std::end(v1));
2110 StringTable u;
2111 std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
2112 {"aa", "aa"}};
2113 u.insert(std::begin(v2), std::end(v2));
2114 EXPECT_NE(u, t);
2115 }
2116
TEST(Table,Equality3)2117 TEST(Table, Equality3) {
2118 StringTable t;
2119 std::vector<std::pair<std::string, std::string>> v1 = {{"b", "b"},
2120 {"bb", "bb"}};
2121 t.insert(std::begin(v1), std::end(v1));
2122 StringTable u;
2123 std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
2124 {"aa", "aa"}};
2125 u.insert(std::begin(v2), std::end(v2));
2126 EXPECT_NE(u, t);
2127 }
2128
TYPED_TEST(SooTest,NumDeletedRegression)2129 TYPED_TEST(SooTest, NumDeletedRegression) {
2130 TypeParam t;
2131 t.emplace(0);
2132 t.erase(t.find(0));
2133 // construct over a deleted slot.
2134 t.emplace(0);
2135 t.clear();
2136 }
2137
TYPED_TEST(SooTest,FindFullDeletedRegression)2138 TYPED_TEST(SooTest, FindFullDeletedRegression) {
2139 TypeParam t;
2140 for (int i = 0; i < 1000; ++i) {
2141 t.emplace(i);
2142 t.erase(t.find(i));
2143 }
2144 EXPECT_EQ(0, t.size());
2145 }
2146
TYPED_TEST(SooTest,ReplacingDeletedSlotDoesNotRehash)2147 TYPED_TEST(SooTest, ReplacingDeletedSlotDoesNotRehash) {
2148 // We need to disable hashtablez to avoid issues related to SOO and sampling.
2149 SetHashtablezEnabled(false);
2150
2151 size_t n;
2152 {
2153 // Compute n such that n is the maximum number of elements before rehash.
2154 TypeParam t;
2155 t.emplace(0);
2156 size_t c = t.bucket_count();
2157 for (n = 1; c == t.bucket_count(); ++n) t.emplace(n);
2158 --n;
2159 }
2160 TypeParam t;
2161 t.rehash(n);
2162 const size_t c = t.bucket_count();
2163 for (size_t i = 0; i != n; ++i) t.emplace(i);
2164 EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
2165 t.erase(0);
2166 t.emplace(0);
2167 EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
2168 }
2169
TEST(Table,NoThrowMoveConstruct)2170 TEST(Table, NoThrowMoveConstruct) {
2171 ASSERT_TRUE(
2172 std::is_nothrow_copy_constructible<absl::Hash<absl::string_view>>::value);
2173 ASSERT_TRUE(std::is_nothrow_copy_constructible<
2174 std::equal_to<absl::string_view>>::value);
2175 ASSERT_TRUE(std::is_nothrow_copy_constructible<std::allocator<int>>::value);
2176 EXPECT_TRUE(std::is_nothrow_move_constructible<StringTable>::value);
2177 }
2178
TEST(Table,NoThrowMoveAssign)2179 TEST(Table, NoThrowMoveAssign) {
2180 ASSERT_TRUE(
2181 std::is_nothrow_move_assignable<absl::Hash<absl::string_view>>::value);
2182 ASSERT_TRUE(
2183 std::is_nothrow_move_assignable<std::equal_to<absl::string_view>>::value);
2184 ASSERT_TRUE(std::is_nothrow_move_assignable<std::allocator<int>>::value);
2185 ASSERT_TRUE(
2186 absl::allocator_traits<std::allocator<int>>::is_always_equal::value);
2187 EXPECT_TRUE(std::is_nothrow_move_assignable<StringTable>::value);
2188 }
2189
TEST(Table,NoThrowSwappable)2190 TEST(Table, NoThrowSwappable) {
2191 ASSERT_TRUE(
2192 container_internal::IsNoThrowSwappable<absl::Hash<absl::string_view>>());
2193 ASSERT_TRUE(container_internal::IsNoThrowSwappable<
2194 std::equal_to<absl::string_view>>());
2195 ASSERT_TRUE(container_internal::IsNoThrowSwappable<std::allocator<int>>());
2196 EXPECT_TRUE(container_internal::IsNoThrowSwappable<StringTable>());
2197 }
2198
TEST(Table,HeterogeneousLookup)2199 TEST(Table, HeterogeneousLookup) {
2200 struct Hash {
2201 size_t operator()(int64_t i) const { return i; }
2202 size_t operator()(double i) const {
2203 ADD_FAILURE();
2204 return i;
2205 }
2206 };
2207 struct Eq {
2208 bool operator()(int64_t a, int64_t b) const { return a == b; }
2209 bool operator()(double a, int64_t b) const {
2210 ADD_FAILURE();
2211 return a == b;
2212 }
2213 bool operator()(int64_t a, double b) const {
2214 ADD_FAILURE();
2215 return a == b;
2216 }
2217 bool operator()(double a, double b) const {
2218 ADD_FAILURE();
2219 return a == b;
2220 }
2221 };
2222
2223 struct THash {
2224 using is_transparent = void;
2225 size_t operator()(int64_t i) const { return i; }
2226 size_t operator()(double i) const { return i; }
2227 };
2228 struct TEq {
2229 using is_transparent = void;
2230 bool operator()(int64_t a, int64_t b) const { return a == b; }
2231 bool operator()(double a, int64_t b) const { return a == b; }
2232 bool operator()(int64_t a, double b) const { return a == b; }
2233 bool operator()(double a, double b) const { return a == b; }
2234 };
2235
2236 raw_hash_set<IntPolicy, Hash, Eq, Alloc<int64_t>> s{0, 1, 2};
2237 // It will convert to int64_t before the query.
2238 EXPECT_EQ(1, *s.find(double{1.1}));
2239
2240 raw_hash_set<IntPolicy, THash, TEq, Alloc<int64_t>> ts{0, 1, 2};
2241 // It will try to use the double, and fail to find the object.
2242 EXPECT_TRUE(ts.find(1.1) == ts.end());
2243 }
2244
2245 template <class Table>
2246 using CallFind = decltype(std::declval<Table&>().find(17));
2247
2248 template <class Table>
2249 using CallErase = decltype(std::declval<Table&>().erase(17));
2250
2251 template <class Table>
2252 using CallExtract = decltype(std::declval<Table&>().extract(17));
2253
2254 template <class Table>
2255 using CallPrefetch = decltype(std::declval<Table&>().prefetch(17));
2256
2257 template <class Table>
2258 using CallCount = decltype(std::declval<Table&>().count(17));
2259
2260 template <template <typename> class C, class Table, class = void>
2261 struct VerifyResultOf : std::false_type {};
2262
2263 template <template <typename> class C, class Table>
2264 struct VerifyResultOf<C, Table, absl::void_t<C<Table>>> : std::true_type {};
2265
TEST(Table,HeterogeneousLookupOverloads)2266 TEST(Table, HeterogeneousLookupOverloads) {
2267 using NonTransparentTable =
2268 raw_hash_set<StringPolicy, absl::Hash<absl::string_view>,
2269 std::equal_to<absl::string_view>, std::allocator<int>>;
2270
2271 EXPECT_FALSE((VerifyResultOf<CallFind, NonTransparentTable>()));
2272 EXPECT_FALSE((VerifyResultOf<CallErase, NonTransparentTable>()));
2273 EXPECT_FALSE((VerifyResultOf<CallExtract, NonTransparentTable>()));
2274 EXPECT_FALSE((VerifyResultOf<CallPrefetch, NonTransparentTable>()));
2275 EXPECT_FALSE((VerifyResultOf<CallCount, NonTransparentTable>()));
2276
2277 using TransparentTable =
2278 raw_hash_set<StringPolicy, hash_default_hash<absl::string_view>,
2279 hash_default_eq<absl::string_view>, std::allocator<int>>;
2280
2281 EXPECT_TRUE((VerifyResultOf<CallFind, TransparentTable>()));
2282 EXPECT_TRUE((VerifyResultOf<CallErase, TransparentTable>()));
2283 EXPECT_TRUE((VerifyResultOf<CallExtract, TransparentTable>()));
2284 EXPECT_TRUE((VerifyResultOf<CallPrefetch, TransparentTable>()));
2285 EXPECT_TRUE((VerifyResultOf<CallCount, TransparentTable>()));
2286 }
2287
TEST(Iterator,IsDefaultConstructible)2288 TEST(Iterator, IsDefaultConstructible) {
2289 StringTable::iterator i;
2290 EXPECT_TRUE(i == StringTable::iterator());
2291 }
2292
TEST(ConstIterator,IsDefaultConstructible)2293 TEST(ConstIterator, IsDefaultConstructible) {
2294 StringTable::const_iterator i;
2295 EXPECT_TRUE(i == StringTable::const_iterator());
2296 }
2297
TEST(Iterator,ConvertsToConstIterator)2298 TEST(Iterator, ConvertsToConstIterator) {
2299 StringTable::iterator i;
2300 EXPECT_TRUE(i == StringTable::const_iterator());
2301 }
2302
TEST(Iterator,Iterates)2303 TEST(Iterator, Iterates) {
2304 IntTable t;
2305 for (size_t i = 3; i != 6; ++i) EXPECT_TRUE(t.emplace(i).second);
2306 EXPECT_THAT(t, UnorderedElementsAre(3, 4, 5));
2307 }
2308
TEST(Table,Merge)2309 TEST(Table, Merge) {
2310 StringTable t1, t2;
2311 t1.emplace("0", "-0");
2312 t1.emplace("1", "-1");
2313 t2.emplace("0", "~0");
2314 t2.emplace("2", "~2");
2315
2316 EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1")));
2317 EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0"), Pair("2", "~2")));
2318
2319 t1.merge(t2);
2320 EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1"),
2321 Pair("2", "~2")));
2322 EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0")));
2323 }
2324
TEST(Table,IteratorEmplaceConstructibleRequirement)2325 TEST(Table, IteratorEmplaceConstructibleRequirement) {
2326 struct Value {
2327 explicit Value(absl::string_view view) : value(view) {}
2328 std::string value;
2329
2330 bool operator==(const Value& other) const { return value == other.value; }
2331 };
2332 struct H {
2333 size_t operator()(const Value& v) const {
2334 return absl::Hash<std::string>{}(v.value);
2335 }
2336 };
2337
2338 struct Table : raw_hash_set<ValuePolicy<Value>, H, std::equal_to<Value>,
2339 std::allocator<Value>> {
2340 using Base = typename Table::raw_hash_set;
2341 using Base::Base;
2342 };
2343
2344 std::string input[3]{"A", "B", "C"};
2345
2346 Table t(std::begin(input), std::end(input));
2347 EXPECT_THAT(t, UnorderedElementsAre(Value{"A"}, Value{"B"}, Value{"C"}));
2348
2349 input[0] = "D";
2350 input[1] = "E";
2351 input[2] = "F";
2352 t.insert(std::begin(input), std::end(input));
2353 EXPECT_THAT(t, UnorderedElementsAre(Value{"A"}, Value{"B"}, Value{"C"},
2354 Value{"D"}, Value{"E"}, Value{"F"}));
2355 }
2356
TEST(Nodes,EmptyNodeType)2357 TEST(Nodes, EmptyNodeType) {
2358 using node_type = StringTable::node_type;
2359 node_type n;
2360 EXPECT_FALSE(n);
2361 EXPECT_TRUE(n.empty());
2362
2363 EXPECT_TRUE((std::is_same<node_type::allocator_type,
2364 StringTable::allocator_type>::value));
2365 }
2366
TEST(Nodes,ExtractInsert)2367 TEST(Nodes, ExtractInsert) {
2368 constexpr char k0[] = "Very long string zero.";
2369 constexpr char k1[] = "Very long string one.";
2370 constexpr char k2[] = "Very long string two.";
2371 StringTable t = {{k0, ""}, {k1, ""}, {k2, ""}};
2372 EXPECT_THAT(t,
2373 UnorderedElementsAre(Pair(k0, ""), Pair(k1, ""), Pair(k2, "")));
2374
2375 auto node = t.extract(k0);
2376 EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
2377 EXPECT_TRUE(node);
2378 EXPECT_FALSE(node.empty());
2379
2380 StringTable t2;
2381 StringTable::insert_return_type res = t2.insert(std::move(node));
2382 EXPECT_TRUE(res.inserted);
2383 EXPECT_THAT(*res.position, Pair(k0, ""));
2384 EXPECT_FALSE(res.node);
2385 EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, "")));
2386
2387 // Not there.
2388 EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
2389 node = t.extract("Not there!");
2390 EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
2391 EXPECT_FALSE(node);
2392
2393 // Inserting nothing.
2394 res = t2.insert(std::move(node));
2395 EXPECT_FALSE(res.inserted);
2396 EXPECT_EQ(res.position, t2.end());
2397 EXPECT_FALSE(res.node);
2398 EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, "")));
2399
2400 t.emplace(k0, "1");
2401 node = t.extract(k0);
2402
2403 // Insert duplicate.
2404 res = t2.insert(std::move(node));
2405 EXPECT_FALSE(res.inserted);
2406 EXPECT_THAT(*res.position, Pair(k0, ""));
2407 EXPECT_TRUE(res.node);
2408 EXPECT_FALSE(node); // NOLINT(bugprone-use-after-move)
2409 }
2410
TYPED_TEST(SooTest,HintInsert)2411 TYPED_TEST(SooTest, HintInsert) {
2412 TypeParam t = {1, 2, 3};
2413 auto node = t.extract(1);
2414 EXPECT_THAT(t, UnorderedElementsAre(2, 3));
2415 auto it = t.insert(t.begin(), std::move(node));
2416 EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
2417 EXPECT_EQ(*it, 1);
2418 EXPECT_FALSE(node); // NOLINT(bugprone-use-after-move)
2419
2420 node = t.extract(2);
2421 EXPECT_THAT(t, UnorderedElementsAre(1, 3));
2422 // reinsert 2 to make the next insert fail.
2423 t.insert(2);
2424 EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
2425 it = t.insert(t.begin(), std::move(node));
2426 EXPECT_EQ(*it, 2);
2427 // The node was not emptied by the insert call.
2428 EXPECT_TRUE(node); // NOLINT(bugprone-use-after-move)
2429 }
2430
2431 template <typename T>
MakeSimpleTable(size_t size)2432 T MakeSimpleTable(size_t size) {
2433 T t;
2434 while (t.size() < size) t.insert(t.size());
2435 return t;
2436 }
2437
2438 template <typename T>
OrderOfIteration(const T & t)2439 std::vector<int> OrderOfIteration(const T& t) {
2440 std::vector<int> res;
2441 for (auto i : t) res.push_back(static_cast<int>(i));
2442 return res;
2443 }
2444
2445 // These IterationOrderChanges tests depend on non-deterministic behavior.
2446 // We are injecting non-determinism from the pointer of the table, but do so in
2447 // a way that only the page matters. We have to retry enough times to make sure
2448 // we are touching different memory pages to cause the ordering to change.
2449 // We also need to keep the old tables around to avoid getting the same memory
2450 // blocks over and over.
TYPED_TEST(SooTest,IterationOrderChangesByInstance)2451 TYPED_TEST(SooTest, IterationOrderChangesByInstance) {
2452 for (size_t size : {2, 6, 12, 20}) {
2453 const auto reference_table = MakeSimpleTable<TypeParam>(size);
2454 const auto reference = OrderOfIteration(reference_table);
2455
2456 std::vector<TypeParam> tables;
2457 bool found_difference = false;
2458 for (int i = 0; !found_difference && i < 5000; ++i) {
2459 tables.push_back(MakeSimpleTable<TypeParam>(size));
2460 found_difference = OrderOfIteration(tables.back()) != reference;
2461 }
2462 if (!found_difference) {
2463 FAIL()
2464 << "Iteration order remained the same across many attempts with size "
2465 << size;
2466 }
2467 }
2468 }
2469
TYPED_TEST(SooTest,IterationOrderChangesOnRehash)2470 TYPED_TEST(SooTest, IterationOrderChangesOnRehash) {
2471 // We test different sizes with many small numbers, because small table
2472 // resize has a different codepath.
2473 // Note: iteration order for size() <= 1 is always the same.
2474 for (size_t size : std::vector<size_t>{2, 3, 6, 7, 12, 15, 20, 50}) {
2475 for (size_t rehash_size : {
2476 size_t{0}, // Force rehash is guaranteed.
2477 size * 10 // Rehash to the larger capacity is guaranteed.
2478 }) {
2479 std::vector<TypeParam> garbage;
2480 bool ok = false;
2481 for (int i = 0; i < 5000; ++i) {
2482 auto t = MakeSimpleTable<TypeParam>(size);
2483 const auto reference = OrderOfIteration(t);
2484 // Force rehash.
2485 t.rehash(rehash_size);
2486 auto trial = OrderOfIteration(t);
2487 if (trial != reference) {
2488 // We are done.
2489 ok = true;
2490 break;
2491 }
2492 garbage.push_back(std::move(t));
2493 }
2494 EXPECT_TRUE(ok)
2495 << "Iteration order remained the same across many attempts " << size
2496 << "->" << rehash_size << ".";
2497 }
2498 }
2499 }
2500
2501 // Verify that pointers are invalidated as soon as a second element is inserted.
2502 // This prevents dependency on pointer stability on small tables.
TYPED_TEST(SooTest,UnstablePointers)2503 TYPED_TEST(SooTest, UnstablePointers) {
2504 // We need to disable hashtablez to avoid issues related to SOO and sampling.
2505 SetHashtablezEnabled(false);
2506
2507 TypeParam table;
2508
2509 const auto addr = [&](int i) {
2510 return reinterpret_cast<uintptr_t>(&*table.find(i));
2511 };
2512
2513 table.insert(0);
2514 const uintptr_t old_ptr = addr(0);
2515
2516 // This causes a rehash.
2517 table.insert(1);
2518
2519 EXPECT_NE(old_ptr, addr(0));
2520 }
2521
TEST(TableDeathTest,InvalidIteratorAsserts)2522 TEST(TableDeathTest, InvalidIteratorAsserts) {
2523 if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
2524 GTEST_SKIP() << "Assertions not enabled.";
2525
2526 NonSooIntTable t;
2527 // Extra simple "regexp" as regexp support is highly varied across platforms.
2528 EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()),
2529 "erase.* called on end.. iterator.");
2530 typename NonSooIntTable::iterator iter;
2531 EXPECT_DEATH_IF_SUPPORTED(
2532 ++iter, "operator.* called on default-constructed iterator.");
2533 t.insert(0);
2534 iter = t.begin();
2535 t.erase(iter);
2536 const char* const kErasedDeathMessage =
2537 SwisstableGenerationsEnabled()
2538 ? "operator.* called on invalid iterator.*was likely erased"
2539 : "operator.* called on invalid iterator.*might have been "
2540 "erased.*config=asan";
2541 EXPECT_DEATH_IF_SUPPORTED(++iter, kErasedDeathMessage);
2542 }
2543
TEST(TableDeathTest,InvalidIteratorAssertsSoo)2544 TEST(TableDeathTest, InvalidIteratorAssertsSoo) {
2545 if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
2546 GTEST_SKIP() << "Assertions not enabled.";
2547
2548 SooIntTable t;
2549 // Extra simple "regexp" as regexp support is highly varied across platforms.
2550 EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()),
2551 "erase.* called on end.. iterator.");
2552 typename SooIntTable::iterator iter;
2553 EXPECT_DEATH_IF_SUPPORTED(
2554 ++iter, "operator.* called on default-constructed iterator.");
2555
2556 // We can't detect the erased iterator case as invalid in SOO mode because
2557 // the control is static constant.
2558 }
2559
2560 // Invalid iterator use can trigger use-after-free in asan/hwasan,
2561 // use-of-uninitialized-value in msan, or invalidated iterator assertions.
2562 constexpr const char* kInvalidIteratorDeathMessage =
2563 "use-after-free|use-of-uninitialized-value|invalidated "
2564 "iterator|Invalid iterator|invalid iterator";
2565
2566 // MSVC doesn't support | in regex.
2567 #if defined(_MSC_VER)
2568 constexpr bool kMsvc = true;
2569 #else
2570 constexpr bool kMsvc = false;
2571 #endif
2572
TYPED_TEST(SooTest,IteratorInvalidAssertsEqualityOperator)2573 TYPED_TEST(SooTest, IteratorInvalidAssertsEqualityOperator) {
2574 if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
2575 GTEST_SKIP() << "Assertions not enabled.";
2576
2577 TypeParam t;
2578 t.insert(1);
2579 t.insert(2);
2580 t.insert(3);
2581 auto iter1 = t.begin();
2582 auto iter2 = std::next(iter1);
2583 ASSERT_NE(iter1, t.end());
2584 ASSERT_NE(iter2, t.end());
2585 t.erase(iter1);
2586 // Extra simple "regexp" as regexp support is highly varied across platforms.
2587 const char* const kErasedDeathMessage =
2588 SwisstableGenerationsEnabled()
2589 ? "Invalid iterator comparison.*was likely erased"
2590 : "Invalid iterator comparison.*might have been erased.*config=asan";
2591 EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kErasedDeathMessage);
2592 EXPECT_DEATH_IF_SUPPORTED(void(iter2 != iter1), kErasedDeathMessage);
2593 t.erase(iter2);
2594 EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kErasedDeathMessage);
2595
2596 TypeParam t1, t2;
2597 t1.insert(0);
2598 t2.insert(0);
2599 iter1 = t1.begin();
2600 iter2 = t2.begin();
2601 const char* const kContainerDiffDeathMessage =
2602 SwisstableGenerationsEnabled()
2603 ? "Invalid iterator comparison.*iterators from different.* hashtables"
2604 : "Invalid iterator comparison.*may be from different "
2605 ".*containers.*config=asan";
2606 EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kContainerDiffDeathMessage);
2607 EXPECT_DEATH_IF_SUPPORTED(void(iter2 == iter1), kContainerDiffDeathMessage);
2608 }
2609
TYPED_TEST(SooTest,IteratorInvalidAssertsEqualityOperatorRehash)2610 TYPED_TEST(SooTest, IteratorInvalidAssertsEqualityOperatorRehash) {
2611 if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
2612 GTEST_SKIP() << "Assertions not enabled.";
2613 if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regex.";
2614 #ifdef ABSL_HAVE_THREAD_SANITIZER
2615 GTEST_SKIP() << "ThreadSanitizer test runs fail on use-after-free even in "
2616 "EXPECT_DEATH.";
2617 #endif
2618
2619 TypeParam t;
2620 t.insert(0);
2621 auto iter = t.begin();
2622
2623 // Trigger a rehash in t.
2624 for (int i = 0; i < 10; ++i) t.insert(i);
2625
2626 const char* const kRehashedDeathMessage =
2627 SwisstableGenerationsEnabled()
2628 ? kInvalidIteratorDeathMessage
2629 : "Invalid iterator comparison.*might have rehashed.*config=asan";
2630 EXPECT_DEATH_IF_SUPPORTED(void(iter == t.begin()), kRehashedDeathMessage);
2631 }
2632
2633 #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
2634 template <typename T>
2635 class RawHashSamplerTest : public testing::Test {};
2636
2637 using RawHashSamplerTestTypes = ::testing::Types<SooIntTable, NonSooIntTable>;
2638 TYPED_TEST_SUITE(RawHashSamplerTest, RawHashSamplerTestTypes);
2639
TYPED_TEST(RawHashSamplerTest,Sample)2640 TYPED_TEST(RawHashSamplerTest, Sample) {
2641 constexpr bool soo_enabled = std::is_same<SooIntTable, TypeParam>::value;
2642 // Enable the feature even if the prod default is off.
2643 SetHashtablezEnabled(true);
2644 SetHashtablezSampleParameter(100); // Sample ~1% of tables.
2645
2646 auto& sampler = GlobalHashtablezSampler();
2647 size_t start_size = 0;
2648 absl::flat_hash_set<const HashtablezInfo*> preexisting_info;
2649 start_size += sampler.Iterate([&](const HashtablezInfo& info) {
2650 preexisting_info.insert(&info);
2651 ++start_size;
2652 });
2653
2654 std::vector<TypeParam> tables;
2655 for (int i = 0; i < 1000000; ++i) {
2656 tables.emplace_back();
2657
2658 const bool do_reserve = (i % 10 > 5);
2659 const bool do_rehash = !do_reserve && (i % 10 > 0);
2660
2661 if (do_reserve) {
2662 // Don't reserve on all tables.
2663 tables.back().reserve(10 * (i % 10));
2664 }
2665
2666 tables.back().insert(1);
2667 tables.back().insert(i % 5);
2668
2669 if (do_rehash) {
2670 // Rehash some other tables.
2671 tables.back().rehash(10 * (i % 10));
2672 }
2673 }
2674 size_t end_size = 0;
2675 absl::flat_hash_map<size_t, int> observed_checksums;
2676 absl::flat_hash_map<ssize_t, int> reservations;
2677 end_size += sampler.Iterate([&](const HashtablezInfo& info) {
2678 ++end_size;
2679 if (preexisting_info.contains(&info)) return;
2680 observed_checksums[info.hashes_bitwise_xor.load(
2681 std::memory_order_relaxed)]++;
2682 reservations[info.max_reserve.load(std::memory_order_relaxed)]++;
2683 EXPECT_EQ(info.inline_element_size, sizeof(typename TypeParam::value_type));
2684 EXPECT_EQ(info.key_size, sizeof(typename TypeParam::key_type));
2685 EXPECT_EQ(info.value_size, sizeof(typename TypeParam::value_type));
2686
2687 if (soo_enabled) {
2688 EXPECT_EQ(info.soo_capacity, SooCapacity());
2689 } else {
2690 EXPECT_EQ(info.soo_capacity, 0);
2691 }
2692 });
2693
2694 // Expect that we sampled at the requested sampling rate of ~1%.
2695 EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
2696 0.01, 0.005);
2697 EXPECT_EQ(observed_checksums.size(), 5);
2698 for (const auto& [_, count] : observed_checksums) {
2699 EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.2, 0.05);
2700 }
2701
2702 EXPECT_EQ(reservations.size(), 10);
2703 for (const auto& [reservation, count] : reservations) {
2704 EXPECT_GE(reservation, 0);
2705 EXPECT_LT(reservation, 100);
2706
2707 EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.1, 0.05)
2708 << reservation;
2709 }
2710 }
2711
SampleSooMutation(absl::FunctionRef<void (SooIntTable &)> mutate_table)2712 std::vector<const HashtablezInfo*> SampleSooMutation(
2713 absl::FunctionRef<void(SooIntTable&)> mutate_table) {
2714 // Enable the feature even if the prod default is off.
2715 SetHashtablezEnabled(true);
2716 SetHashtablezSampleParameter(100); // Sample ~1% of tables.
2717
2718 auto& sampler = GlobalHashtablezSampler();
2719 size_t start_size = 0;
2720 absl::flat_hash_set<const HashtablezInfo*> preexisting_info;
2721 start_size += sampler.Iterate([&](const HashtablezInfo& info) {
2722 preexisting_info.insert(&info);
2723 ++start_size;
2724 });
2725
2726 std::vector<SooIntTable> tables;
2727 for (int i = 0; i < 1000000; ++i) {
2728 tables.emplace_back();
2729 mutate_table(tables.back());
2730 }
2731 size_t end_size = 0;
2732 std::vector<const HashtablezInfo*> infos;
2733 end_size += sampler.Iterate([&](const HashtablezInfo& info) {
2734 ++end_size;
2735 if (preexisting_info.contains(&info)) return;
2736 infos.push_back(&info);
2737 });
2738
2739 // Expect that we sampled at the requested sampling rate of ~1%.
2740 EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
2741 0.01, 0.005);
2742 return infos;
2743 }
2744
TEST(RawHashSamplerTest,SooTableInsertToEmpty)2745 TEST(RawHashSamplerTest, SooTableInsertToEmpty) {
2746 if (SooIntTable().capacity() != SooCapacity()) {
2747 CHECK_LT(sizeof(void*), 8) << "missing SOO coverage";
2748 GTEST_SKIP() << "not SOO on this platform";
2749 }
2750 std::vector<const HashtablezInfo*> infos =
2751 SampleSooMutation([](SooIntTable& t) { t.insert(1); });
2752
2753 for (const HashtablezInfo* info : infos) {
2754 ASSERT_EQ(info->inline_element_size,
2755 sizeof(typename SooIntTable::value_type));
2756 ASSERT_EQ(info->soo_capacity, SooCapacity());
2757 ASSERT_EQ(info->capacity, NextCapacity(SooCapacity()));
2758 ASSERT_EQ(info->size, 1);
2759 ASSERT_EQ(info->max_reserve, 0);
2760 ASSERT_EQ(info->num_erases, 0);
2761 ASSERT_EQ(info->max_probe_length, 0);
2762 ASSERT_EQ(info->total_probe_length, 0);
2763 }
2764 }
2765
TEST(RawHashSamplerTest,SooTableReserveToEmpty)2766 TEST(RawHashSamplerTest, SooTableReserveToEmpty) {
2767 if (SooIntTable().capacity() != SooCapacity()) {
2768 CHECK_LT(sizeof(void*), 8) << "missing SOO coverage";
2769 GTEST_SKIP() << "not SOO on this platform";
2770 }
2771 std::vector<const HashtablezInfo*> infos =
2772 SampleSooMutation([](SooIntTable& t) { t.reserve(100); });
2773
2774 for (const HashtablezInfo* info : infos) {
2775 ASSERT_EQ(info->inline_element_size,
2776 sizeof(typename SooIntTable::value_type));
2777 ASSERT_EQ(info->soo_capacity, SooCapacity());
2778 ASSERT_GE(info->capacity, 100);
2779 ASSERT_EQ(info->size, 0);
2780 ASSERT_EQ(info->max_reserve, 100);
2781 ASSERT_EQ(info->num_erases, 0);
2782 ASSERT_EQ(info->max_probe_length, 0);
2783 ASSERT_EQ(info->total_probe_length, 0);
2784 }
2785 }
2786
2787 // This tests that reserve on a full SOO table doesn't incorrectly result in new
2788 // (over-)sampling.
TEST(RawHashSamplerTest,SooTableReserveToFullSoo)2789 TEST(RawHashSamplerTest, SooTableReserveToFullSoo) {
2790 if (SooIntTable().capacity() != SooCapacity()) {
2791 CHECK_LT(sizeof(void*), 8) << "missing SOO coverage";
2792 GTEST_SKIP() << "not SOO on this platform";
2793 }
2794 std::vector<const HashtablezInfo*> infos =
2795 SampleSooMutation([](SooIntTable& t) {
2796 t.insert(1);
2797 t.reserve(100);
2798 });
2799
2800 for (const HashtablezInfo* info : infos) {
2801 ASSERT_EQ(info->inline_element_size,
2802 sizeof(typename SooIntTable::value_type));
2803 ASSERT_EQ(info->soo_capacity, SooCapacity());
2804 ASSERT_GE(info->capacity, 100);
2805 ASSERT_EQ(info->size, 1);
2806 ASSERT_EQ(info->max_reserve, 100);
2807 ASSERT_EQ(info->num_erases, 0);
2808 ASSERT_EQ(info->max_probe_length, 0);
2809 ASSERT_EQ(info->total_probe_length, 0);
2810 }
2811 }
2812
2813 // This tests that rehash(0) on a sampled table with size that fits in SOO
2814 // doesn't incorrectly result in losing sampling.
TEST(RawHashSamplerTest,SooTableRehashShrinkWhenSizeFitsInSoo)2815 TEST(RawHashSamplerTest, SooTableRehashShrinkWhenSizeFitsInSoo) {
2816 if (SooIntTable().capacity() != SooCapacity()) {
2817 CHECK_LT(sizeof(void*), 8) << "missing SOO coverage";
2818 GTEST_SKIP() << "not SOO on this platform";
2819 }
2820 std::vector<const HashtablezInfo*> infos =
2821 SampleSooMutation([](SooIntTable& t) {
2822 t.reserve(100);
2823 t.insert(1);
2824 EXPECT_GE(t.capacity(), 100);
2825 t.rehash(0);
2826 });
2827
2828 for (const HashtablezInfo* info : infos) {
2829 ASSERT_EQ(info->inline_element_size,
2830 sizeof(typename SooIntTable::value_type));
2831 ASSERT_EQ(info->soo_capacity, SooCapacity());
2832 ASSERT_EQ(info->capacity, NextCapacity(SooCapacity()));
2833 ASSERT_EQ(info->size, 1);
2834 ASSERT_EQ(info->max_reserve, 100);
2835 ASSERT_EQ(info->num_erases, 0);
2836 ASSERT_EQ(info->max_probe_length, 0);
2837 ASSERT_EQ(info->total_probe_length, 0);
2838 }
2839 }
2840 #endif // ABSL_INTERNAL_HASHTABLEZ_SAMPLE
2841
TEST(RawHashSamplerTest,DoNotSampleCustomAllocators)2842 TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
2843 // Enable the feature even if the prod default is off.
2844 SetHashtablezEnabled(true);
2845 SetHashtablezSampleParameter(100); // Sample ~1% of tables.
2846
2847 auto& sampler = GlobalHashtablezSampler();
2848 size_t start_size = 0;
2849 start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
2850
2851 std::vector<CustomAllocIntTable> tables;
2852 for (int i = 0; i < 1000000; ++i) {
2853 tables.emplace_back();
2854 tables.back().insert(1);
2855 }
2856 size_t end_size = 0;
2857 end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; });
2858
2859 EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
2860 0.00, 0.001);
2861 }
2862
2863 #ifdef ABSL_HAVE_ADDRESS_SANITIZER
2864 template <class TableType>
2865 class SanitizerTest : public testing::Test {};
2866
2867 using SanitizerTableTypes = ::testing::Types<IntTable, TransferableIntTable>;
2868 TYPED_TEST_SUITE(SanitizerTest, SanitizerTableTypes);
2869
TYPED_TEST(SanitizerTest,PoisoningUnused)2870 TYPED_TEST(SanitizerTest, PoisoningUnused) {
2871 TypeParam t;
2872 for (size_t reserve_size = 2; reserve_size < 1024;
2873 reserve_size = reserve_size * 3 / 2) {
2874 t.reserve(reserve_size);
2875 // Insert something to force an allocation.
2876 int64_t& v = *t.insert(0).first;
2877
2878 // Make sure there is something to test.
2879 ASSERT_GT(t.capacity(), 1);
2880
2881 int64_t* slots = RawHashSetTestOnlyAccess::GetSlots(t);
2882 for (size_t i = 0; i < t.capacity(); ++i) {
2883 EXPECT_EQ(slots + i != &v, __asan_address_is_poisoned(slots + i)) << i;
2884 }
2885 }
2886 }
2887
2888 // TODO(b/289225379): poison inline space when empty SOO.
TEST(Sanitizer,PoisoningOnErase)2889 TEST(Sanitizer, PoisoningOnErase) {
2890 NonSooIntTable t;
2891 auto& v = *t.insert(0).first;
2892
2893 EXPECT_FALSE(__asan_address_is_poisoned(&v));
2894 t.erase(0);
2895 EXPECT_TRUE(__asan_address_is_poisoned(&v));
2896 }
2897 #endif // ABSL_HAVE_ADDRESS_SANITIZER
2898
2899 template <typename T>
2900 class AlignOneTest : public ::testing::Test {};
2901 using AlignOneTestTypes =
2902 ::testing::Types<Uint8Table, MinimumAlignmentUint8Table>;
2903 TYPED_TEST_SUITE(AlignOneTest, AlignOneTestTypes);
2904
TYPED_TEST(AlignOneTest,AlignOne)2905 TYPED_TEST(AlignOneTest, AlignOne) {
2906 // We previously had a bug in which we were copying a control byte over the
2907 // first slot when alignof(value_type) is 1. We test repeated
2908 // insertions/erases and verify that the behavior is correct.
2909 TypeParam t;
2910 std::unordered_set<uint8_t> verifier; // NOLINT
2911
2912 // Do repeated insertions/erases from the table.
2913 for (int64_t i = 0; i < 100000; ++i) {
2914 SCOPED_TRACE(i);
2915 const uint8_t u = (i * -i) & 0xFF;
2916 auto it = t.find(u);
2917 auto verifier_it = verifier.find(u);
2918 if (it == t.end()) {
2919 ASSERT_EQ(verifier_it, verifier.end());
2920 t.insert(u);
2921 verifier.insert(u);
2922 } else {
2923 ASSERT_NE(verifier_it, verifier.end());
2924 t.erase(it);
2925 verifier.erase(verifier_it);
2926 }
2927 }
2928
2929 EXPECT_EQ(t.size(), verifier.size());
2930 for (uint8_t u : t) {
2931 EXPECT_EQ(verifier.count(u), 1);
2932 }
2933 }
2934
TEST(Iterator,InvalidUseCrashesWithSanitizers)2935 TEST(Iterator, InvalidUseCrashesWithSanitizers) {
2936 if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
2937 if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
2938
2939 NonSooIntTable t;
2940 // Start with 1 element so that `it` is never an end iterator.
2941 t.insert(-1);
2942 for (int i = 0; i < 10; ++i) {
2943 auto it = t.begin();
2944 t.insert(i);
2945 EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
2946 EXPECT_DEATH_IF_SUPPORTED(void(it == t.begin()),
2947 kInvalidIteratorDeathMessage);
2948 }
2949 }
2950
TEST(Iterator,InvalidUseWithReserveCrashesWithSanitizers)2951 TEST(Iterator, InvalidUseWithReserveCrashesWithSanitizers) {
2952 if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
2953 if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
2954
2955 IntTable t;
2956 t.reserve(10);
2957 t.insert(0);
2958 auto it = t.begin();
2959 // Reserved growth can't rehash.
2960 for (int i = 1; i < 10; ++i) {
2961 t.insert(i);
2962 EXPECT_EQ(*it, 0);
2963 }
2964 // ptr will become invalidated on rehash.
2965 const int64_t* ptr = &*it;
2966 (void)ptr;
2967
2968 // erase decreases size but does not decrease reserved growth so the next
2969 // insertion still invalidates iterators.
2970 t.erase(0);
2971 // The first insert after reserved growth is 0 is guaranteed to rehash when
2972 // generations are enabled.
2973 t.insert(10);
2974 EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
2975 EXPECT_DEATH_IF_SUPPORTED(void(it == t.begin()),
2976 kInvalidIteratorDeathMessage);
2977 #ifdef ABSL_HAVE_ADDRESS_SANITIZER
2978 EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr, "heap-use-after-free");
2979 #endif
2980 }
2981
TEST(Iterator,InvalidUseWithMoveCrashesWithSanitizers)2982 TEST(Iterator, InvalidUseWithMoveCrashesWithSanitizers) {
2983 if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
2984 if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
2985
2986 NonSooIntTable t1, t2;
2987 t1.insert(1);
2988 auto it = t1.begin();
2989 // ptr will become invalidated on rehash.
2990 const auto* ptr = &*it;
2991 (void)ptr;
2992
2993 t2 = std::move(t1);
2994 EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
2995 EXPECT_DEATH_IF_SUPPORTED(void(it == t2.begin()),
2996 kInvalidIteratorDeathMessage);
2997 #ifdef ABSL_HAVE_ADDRESS_SANITIZER
2998 EXPECT_DEATH_IF_SUPPORTED(std::cout << **ptr, "heap-use-after-free");
2999 #endif
3000 }
3001
TYPED_TEST(SooTest,ReservedGrowthUpdatesWhenTableDoesntGrow)3002 TYPED_TEST(SooTest, ReservedGrowthUpdatesWhenTableDoesntGrow) {
3003 TypeParam t;
3004 for (int i = 0; i < 8; ++i) t.insert(i);
3005 // Want to insert twice without invalidating iterators so reserve.
3006 const size_t cap = t.capacity();
3007 t.reserve(t.size() + 2);
3008 // We want to be testing the case in which the reserve doesn't grow the table.
3009 ASSERT_EQ(cap, t.capacity());
3010 auto it = t.find(0);
3011 t.insert(100);
3012 t.insert(200);
3013 // `it` shouldn't have been invalidated.
3014 EXPECT_EQ(*it, 0);
3015 }
3016
3017 template <class TableType>
3018 class InstanceTrackerTest : public testing::Test {};
3019
3020 using ::absl::test_internal::CopyableMovableInstance;
3021 using ::absl::test_internal::InstanceTracker;
3022
3023 struct InstanceTrackerHash {
operator ()absl::container_internal::__anonb1b03db00111::InstanceTrackerHash3024 size_t operator()(const CopyableMovableInstance& t) const {
3025 return absl::HashOf(t.value());
3026 }
3027 };
3028
3029 using InstanceTrackerTableTypes = ::testing::Types<
3030 absl::node_hash_set<CopyableMovableInstance, InstanceTrackerHash>,
3031 absl::flat_hash_set<CopyableMovableInstance, InstanceTrackerHash>>;
3032 TYPED_TEST_SUITE(InstanceTrackerTest, InstanceTrackerTableTypes);
3033
TYPED_TEST(InstanceTrackerTest,EraseIfAll)3034 TYPED_TEST(InstanceTrackerTest, EraseIfAll) {
3035 using Table = TypeParam;
3036 InstanceTracker tracker;
3037 for (int size = 0; size < 100; ++size) {
3038 Table t;
3039 for (int i = 0; i < size; ++i) {
3040 t.emplace(i);
3041 }
3042 absl::erase_if(t, [](const auto&) { return true; });
3043 ASSERT_EQ(t.size(), 0);
3044 }
3045 EXPECT_EQ(tracker.live_instances(), 0);
3046 }
3047
TYPED_TEST(InstanceTrackerTest,EraseIfNone)3048 TYPED_TEST(InstanceTrackerTest, EraseIfNone) {
3049 using Table = TypeParam;
3050 InstanceTracker tracker;
3051 {
3052 Table t;
3053 for (size_t size = 0; size < 100; ++size) {
3054 absl::erase_if(t, [](const auto&) { return false; });
3055 ASSERT_EQ(t.size(), size);
3056 t.emplace(size);
3057 }
3058 }
3059 EXPECT_EQ(tracker.live_instances(), 0);
3060 }
3061
TYPED_TEST(InstanceTrackerTest,EraseIfPartial)3062 TYPED_TEST(InstanceTrackerTest, EraseIfPartial) {
3063 using Table = TypeParam;
3064 InstanceTracker tracker;
3065 for (int mod : {0, 1}) {
3066 for (int size = 0; size < 100; ++size) {
3067 SCOPED_TRACE(absl::StrCat(mod, " ", size));
3068 Table t;
3069 std::vector<CopyableMovableInstance> expected;
3070 for (int i = 0; i < size; ++i) {
3071 t.emplace(i);
3072 if (i % 2 != mod) {
3073 expected.emplace_back(i);
3074 }
3075 }
3076 absl::erase_if(t, [mod](const auto& x) { return x.value() % 2 == mod; });
3077 ASSERT_THAT(t, testing::UnorderedElementsAreArray(expected));
3078 }
3079 }
3080 EXPECT_EQ(tracker.live_instances(), 0);
3081 }
3082
TYPED_TEST(SooTest,EraseIfAll)3083 TYPED_TEST(SooTest, EraseIfAll) {
3084 auto pred = [](const auto&) { return true; };
3085 for (int size = 0; size < 100; ++size) {
3086 TypeParam t;
3087 for (int i = 0; i < size; ++i) t.insert(i);
3088 absl::container_internal::EraseIf(pred, &t);
3089 ASSERT_EQ(t.size(), 0);
3090 }
3091 }
3092
TYPED_TEST(SooTest,EraseIfNone)3093 TYPED_TEST(SooTest, EraseIfNone) {
3094 auto pred = [](const auto&) { return false; };
3095 TypeParam t;
3096 for (size_t size = 0; size < 100; ++size) {
3097 absl::container_internal::EraseIf(pred, &t);
3098 ASSERT_EQ(t.size(), size);
3099 t.insert(size);
3100 }
3101 }
3102
TYPED_TEST(SooTest,EraseIfPartial)3103 TYPED_TEST(SooTest, EraseIfPartial) {
3104 for (int mod : {0, 1}) {
3105 auto pred = [&](const auto& x) {
3106 return static_cast<int64_t>(x) % 2 == mod;
3107 };
3108 for (int size = 0; size < 100; ++size) {
3109 SCOPED_TRACE(absl::StrCat(mod, " ", size));
3110 TypeParam t;
3111 std::vector<int64_t> expected;
3112 for (int i = 0; i < size; ++i) {
3113 t.insert(i);
3114 if (i % 2 != mod) {
3115 expected.push_back(i);
3116 }
3117 }
3118 absl::container_internal::EraseIf(pred, &t);
3119 ASSERT_THAT(t, testing::UnorderedElementsAreArray(expected));
3120 }
3121 }
3122 }
3123
TYPED_TEST(SooTest,ForEach)3124 TYPED_TEST(SooTest, ForEach) {
3125 TypeParam t;
3126 std::vector<int64_t> expected;
3127 for (int size = 0; size < 100; ++size) {
3128 SCOPED_TRACE(size);
3129 {
3130 SCOPED_TRACE("mutable iteration");
3131 std::vector<int64_t> actual;
3132 auto f = [&](auto& x) { actual.push_back(static_cast<int64_t>(x)); };
3133 absl::container_internal::ForEach(f, &t);
3134 ASSERT_THAT(actual, testing::UnorderedElementsAreArray(expected));
3135 }
3136 {
3137 SCOPED_TRACE("const iteration");
3138 std::vector<int64_t> actual;
3139 auto f = [&](auto& x) {
3140 static_assert(
3141 std::is_const<std::remove_reference_t<decltype(x)>>::value,
3142 "no mutable values should be passed to const ForEach");
3143 actual.push_back(static_cast<int64_t>(x));
3144 };
3145 const auto& ct = t;
3146 absl::container_internal::ForEach(f, &ct);
3147 ASSERT_THAT(actual, testing::UnorderedElementsAreArray(expected));
3148 }
3149 t.insert(size);
3150 expected.push_back(size);
3151 }
3152 }
3153
TEST(Table,ForEachMutate)3154 TEST(Table, ForEachMutate) {
3155 StringTable t;
3156 using ValueType = StringTable::value_type;
3157 std::vector<ValueType> expected;
3158 for (int size = 0; size < 100; ++size) {
3159 SCOPED_TRACE(size);
3160 std::vector<ValueType> actual;
3161 auto f = [&](ValueType& x) {
3162 actual.push_back(x);
3163 x.second += "a";
3164 };
3165 absl::container_internal::ForEach(f, &t);
3166 ASSERT_THAT(actual, testing::UnorderedElementsAreArray(expected));
3167 for (ValueType& v : expected) {
3168 v.second += "a";
3169 }
3170 ASSERT_THAT(t, testing::UnorderedElementsAreArray(expected));
3171 t.emplace(std::to_string(size), std::to_string(size));
3172 expected.emplace_back(std::to_string(size), std::to_string(size));
3173 }
3174 }
3175
TYPED_TEST(SooTest,EraseIfReentryDeath)3176 TYPED_TEST(SooTest, EraseIfReentryDeath) {
3177 if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
3178
3179 auto erase_if_with_removal_reentrance = [](size_t reserve_size) {
3180 TypeParam t;
3181 t.reserve(reserve_size);
3182 int64_t first_value = -1;
3183 t.insert(1024);
3184 t.insert(5078);
3185 auto pred = [&](const auto& x) {
3186 if (first_value == -1) {
3187 first_value = static_cast<int64_t>(x);
3188 return false;
3189 }
3190 // We erase on second call to `pred` to reduce the chance that assertion
3191 // will happen in IterateOverFullSlots.
3192 t.erase(first_value);
3193 return true;
3194 };
3195 absl::container_internal::EraseIf(pred, &t);
3196 };
3197 // Removal will likely happen in a different group.
3198 EXPECT_DEATH_IF_SUPPORTED(erase_if_with_removal_reentrance(1024 * 16),
3199 "hash table was modified unexpectedly");
3200 // Removal will happen in the same group.
3201 EXPECT_DEATH_IF_SUPPORTED(
3202 erase_if_with_removal_reentrance(CapacityToGrowth(Group::kWidth - 1)),
3203 "hash table was modified unexpectedly");
3204 }
3205
3206 // This test is useful to test soo branch.
TYPED_TEST(SooTest,EraseIfReentrySingleElementDeath)3207 TYPED_TEST(SooTest, EraseIfReentrySingleElementDeath) {
3208 if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
3209
3210 auto erase_if_with_removal_reentrance = []() {
3211 TypeParam t;
3212 t.insert(1024);
3213 auto pred = [&](const auto& x) {
3214 // We erase ourselves in order to confuse the erase_if.
3215 t.erase(static_cast<int64_t>(x));
3216 return false;
3217 };
3218 absl::container_internal::EraseIf(pred, &t);
3219 };
3220 EXPECT_DEATH_IF_SUPPORTED(erase_if_with_removal_reentrance(),
3221 "hash table was modified unexpectedly");
3222 }
3223
TEST(Table,EraseBeginEndResetsReservedGrowth)3224 TEST(Table, EraseBeginEndResetsReservedGrowth) {
3225 bool frozen = false;
3226 BadHashFreezableIntTable t{FreezableAlloc<int64_t>(&frozen)};
3227 t.reserve(100);
3228 const size_t cap = t.capacity();
3229 frozen = true; // no further allocs allowed
3230
3231 for (int i = 0; i < 10; ++i) {
3232 // Create a long run (hash function returns constant).
3233 for (int j = 0; j < 100; ++j) t.insert(j);
3234 // Erase elements from the middle of the long run, which creates
3235 // tombstones.
3236 for (int j = 30; j < 60; ++j) t.erase(j);
3237 EXPECT_EQ(t.size(), 70);
3238 EXPECT_EQ(t.capacity(), cap);
3239 ASSERT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 30);
3240
3241 t.erase(t.begin(), t.end());
3242
3243 EXPECT_EQ(t.size(), 0);
3244 EXPECT_EQ(t.capacity(), cap);
3245 ASSERT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 0);
3246 }
3247 }
3248
TEST(Table,GenerationInfoResetsOnClear)3249 TEST(Table, GenerationInfoResetsOnClear) {
3250 if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
3251 if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
3252
3253 NonSooIntTable t;
3254 for (int i = 0; i < 1000; ++i) t.insert(i);
3255 t.reserve(t.size() + 100);
3256
3257 t.clear();
3258
3259 t.insert(0);
3260 auto it = t.begin();
3261 t.insert(1);
3262 EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
3263 }
3264
TEST(Table,InvalidReferenceUseCrashesWithSanitizers)3265 TEST(Table, InvalidReferenceUseCrashesWithSanitizers) {
3266 if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
3267 #ifdef ABSL_HAVE_MEMORY_SANITIZER
3268 GTEST_SKIP() << "MSan fails to detect some of these rehashes.";
3269 #endif
3270
3271 NonSooIntTable t;
3272 t.insert(0);
3273 // Rehashing is guaranteed on every insertion while capacity is less than
3274 // RehashProbabilityConstant().
3275 int i = 0;
3276 while (t.capacity() <= RehashProbabilityConstant()) {
3277 // ptr will become invalidated on rehash.
3278 const auto* ptr = &*t.begin();
3279 t.insert(++i);
3280 EXPECT_DEATH_IF_SUPPORTED(std::cout << **ptr, "use-after-free") << i;
3281 }
3282 }
3283
TEST(Iterator,InvalidComparisonDifferentTables)3284 TEST(Iterator, InvalidComparisonDifferentTables) {
3285 if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
3286
3287 NonSooIntTable t1, t2;
3288 NonSooIntTable::iterator default_constructed_iter;
3289 // We randomly use one of N empty generations for generations from empty
3290 // hashtables. In general, we won't always detect when iterators from
3291 // different empty hashtables are compared, but in this test case, we
3292 // should deterministically detect the error due to our randomness yielding
3293 // consecutive random generations.
3294 EXPECT_DEATH_IF_SUPPORTED(void(t1.end() == t2.end()),
3295 "Invalid iterator comparison.*empty hashtables");
3296 EXPECT_DEATH_IF_SUPPORTED(void(t1.end() == default_constructed_iter),
3297 "Invalid iterator comparison.*default-constructed");
3298 t1.insert(0);
3299 EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.end()),
3300 "Invalid iterator comparison.*empty hashtable");
3301 EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == default_constructed_iter),
3302 "Invalid iterator comparison.*default-constructed");
3303 t2.insert(0);
3304 EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.end()),
3305 "Invalid iterator comparison.*end.. iterator");
3306 EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.begin()),
3307 "Invalid iterator comparison.*non-end");
3308 }
3309
3310 template <typename Alloc>
3311 using RawHashSetAlloc = raw_hash_set<IntPolicy, hash_default_hash<int64_t>,
3312 std::equal_to<int64_t>, Alloc>;
3313
TEST(Table,AllocatorPropagation)3314 TEST(Table, AllocatorPropagation) { TestAllocPropagation<RawHashSetAlloc>(); }
3315
3316 struct CountedHash {
operator ()absl::container_internal::__anonb1b03db00111::CountedHash3317 size_t operator()(int64_t value) const {
3318 ++count;
3319 return static_cast<size_t>(value);
3320 }
3321 mutable int count = 0;
3322 };
3323
3324 struct CountedHashIntTable
3325 : raw_hash_set<IntPolicy, CountedHash, std::equal_to<int>,
3326 std::allocator<int>> {
3327 using Base = typename CountedHashIntTable::raw_hash_set;
3328 using Base::Base;
3329 };
3330
TEST(Table,CountedHash)3331 TEST(Table, CountedHash) {
3332 // Verify that raw_hash_set does not compute redundant hashes.
3333 #ifdef NDEBUG
3334 constexpr bool kExpectMinimumHashes = true;
3335 #else
3336 constexpr bool kExpectMinimumHashes = false;
3337 #endif
3338 if (!kExpectMinimumHashes) {
3339 GTEST_SKIP() << "Only run under NDEBUG: `assert` statements may cause "
3340 "redundant hashing.";
3341 }
3342
3343 using Table = CountedHashIntTable;
3344 auto HashCount = [](const Table& t) { return t.hash_function().count; };
3345 {
3346 Table t;
3347 EXPECT_EQ(HashCount(t), 0);
3348 }
3349 {
3350 Table t;
3351 t.insert(1);
3352 EXPECT_EQ(HashCount(t), 1);
3353 t.erase(1);
3354 EXPECT_EQ(HashCount(t), 2);
3355 }
3356 {
3357 Table t;
3358 t.insert(3);
3359 EXPECT_EQ(HashCount(t), 1);
3360 auto node = t.extract(3);
3361 EXPECT_EQ(HashCount(t), 2);
3362 t.insert(std::move(node));
3363 EXPECT_EQ(HashCount(t), 3);
3364 }
3365 {
3366 Table t;
3367 t.emplace(5);
3368 EXPECT_EQ(HashCount(t), 1);
3369 }
3370 {
3371 Table src;
3372 src.insert(7);
3373 Table dst;
3374 dst.merge(src);
3375 EXPECT_EQ(HashCount(dst), 1);
3376 }
3377 }
3378
3379 // IterateOverFullSlots doesn't support SOO.
TEST(Table,IterateOverFullSlotsEmpty)3380 TEST(Table, IterateOverFullSlotsEmpty) {
3381 NonSooIntTable t;
3382 auto fail_if_any = [](const ctrl_t*, auto* i) {
3383 FAIL() << "expected no slots " << **i;
3384 };
3385 container_internal::IterateOverFullSlots(
3386 RawHashSetTestOnlyAccess::GetCommon(t),
3387 RawHashSetTestOnlyAccess::GetSlots(t), fail_if_any);
3388 for (size_t i = 0; i < 256; ++i) {
3389 t.reserve(i);
3390 container_internal::IterateOverFullSlots(
3391 RawHashSetTestOnlyAccess::GetCommon(t),
3392 RawHashSetTestOnlyAccess::GetSlots(t), fail_if_any);
3393 }
3394 }
3395
TEST(Table,IterateOverFullSlotsFull)3396 TEST(Table, IterateOverFullSlotsFull) {
3397 NonSooIntTable t;
3398
3399 std::vector<int64_t> expected_slots;
3400 for (int64_t idx = 0; idx < 128; ++idx) {
3401 t.insert(idx);
3402 expected_slots.push_back(idx);
3403
3404 std::vector<int64_t> slots;
3405 container_internal::IterateOverFullSlots(
3406 RawHashSetTestOnlyAccess::GetCommon(t),
3407 RawHashSetTestOnlyAccess::GetSlots(t),
3408 [&t, &slots](const ctrl_t* ctrl, auto* i) {
3409 ptrdiff_t ctrl_offset =
3410 ctrl - RawHashSetTestOnlyAccess::GetCommon(t).control();
3411 ptrdiff_t slot_offset = i - RawHashSetTestOnlyAccess::GetSlots(t);
3412 ASSERT_EQ(ctrl_offset, slot_offset);
3413 slots.push_back(**i);
3414 });
3415 EXPECT_THAT(slots, testing::UnorderedElementsAreArray(expected_slots));
3416 }
3417 }
3418
TEST(Table,IterateOverFullSlotsDeathOnRemoval)3419 TEST(Table, IterateOverFullSlotsDeathOnRemoval) {
3420 if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
3421
3422 auto iterate_with_reentrant_removal = [](int64_t size,
3423 int64_t reserve_size = -1) {
3424 if (reserve_size == -1) reserve_size = size;
3425 for (int64_t idx = 0; idx < size; ++idx) {
3426 NonSooIntTable t;
3427 t.reserve(static_cast<size_t>(reserve_size));
3428 for (int val = 0; val <= idx; ++val) {
3429 t.insert(val);
3430 }
3431
3432 container_internal::IterateOverFullSlots(
3433 RawHashSetTestOnlyAccess::GetCommon(t),
3434 RawHashSetTestOnlyAccess::GetSlots(t),
3435 [&t](const ctrl_t*, auto* i) {
3436 int64_t value = **i;
3437 // Erase the other element from 2*k and 2*k+1 pair.
3438 t.erase(value ^ 1);
3439 });
3440 }
3441 };
3442
3443 EXPECT_DEATH_IF_SUPPORTED(iterate_with_reentrant_removal(128),
3444 "hash table was modified unexpectedly");
3445 // Removal will likely happen in a different group.
3446 EXPECT_DEATH_IF_SUPPORTED(iterate_with_reentrant_removal(14, 1024 * 16),
3447 "hash table was modified unexpectedly");
3448 // Removal will happen in the same group.
3449 EXPECT_DEATH_IF_SUPPORTED(iterate_with_reentrant_removal(static_cast<int64_t>(
3450 CapacityToGrowth(Group::kWidth - 1))),
3451 "hash table was modified unexpectedly");
3452 }
3453
TEST(Table,IterateOverFullSlotsDeathOnInsert)3454 TEST(Table, IterateOverFullSlotsDeathOnInsert) {
3455 if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
3456
3457 auto iterate_with_reentrant_insert = [](int64_t reserve_size,
3458 int64_t size_divisor = 2) {
3459 int64_t size = reserve_size / size_divisor;
3460 for (int64_t idx = 1; idx <= size; ++idx) {
3461 NonSooIntTable t;
3462 t.reserve(static_cast<size_t>(reserve_size));
3463 for (int val = 1; val <= idx; ++val) {
3464 t.insert(val);
3465 }
3466
3467 container_internal::IterateOverFullSlots(
3468 RawHashSetTestOnlyAccess::GetCommon(t),
3469 RawHashSetTestOnlyAccess::GetSlots(t),
3470 [&t](const ctrl_t*, auto* i) {
3471 int64_t value = **i;
3472 t.insert(-value);
3473 });
3474 }
3475 };
3476
3477 EXPECT_DEATH_IF_SUPPORTED(iterate_with_reentrant_insert(128),
3478 "hash table was modified unexpectedly");
3479 // Insert will likely happen in a different group.
3480 EXPECT_DEATH_IF_SUPPORTED(iterate_with_reentrant_insert(1024 * 16, 1024 * 2),
3481 "hash table was modified unexpectedly");
3482 // Insert will happen in the same group.
3483 EXPECT_DEATH_IF_SUPPORTED(iterate_with_reentrant_insert(static_cast<int64_t>(
3484 CapacityToGrowth(Group::kWidth - 1))),
3485 "hash table was modified unexpectedly");
3486 }
3487
3488 template <typename T>
3489 class SooTable : public testing::Test {};
3490 using FreezableSooTableTypes =
3491 ::testing::Types<FreezableSizedValueSooTable<8>,
3492 FreezableSizedValueSooTable<16>>;
3493 TYPED_TEST_SUITE(SooTable, FreezableSooTableTypes);
3494
TYPED_TEST(SooTable,Basic)3495 TYPED_TEST(SooTable, Basic) {
3496 bool frozen = true;
3497 TypeParam t{FreezableAlloc<typename TypeParam::value_type>(&frozen)};
3498 if (t.capacity() != SooCapacity()) {
3499 CHECK_LT(sizeof(void*), 8) << "missing SOO coverage";
3500 GTEST_SKIP() << "not SOO on this platform";
3501 }
3502
3503 t.insert(0);
3504 EXPECT_EQ(t.capacity(), 1);
3505 auto it = t.find(0);
3506 EXPECT_EQ(it, t.begin());
3507 ASSERT_NE(it, t.end());
3508 EXPECT_EQ(*it, 0);
3509 EXPECT_EQ(++it, t.end());
3510 EXPECT_EQ(t.find(1), t.end());
3511 EXPECT_EQ(t.size(), 1);
3512
3513 t.erase(0);
3514 EXPECT_EQ(t.size(), 0);
3515 t.insert(1);
3516 it = t.find(1);
3517 EXPECT_EQ(it, t.begin());
3518 ASSERT_NE(it, t.end());
3519 EXPECT_EQ(*it, 1);
3520
3521 t.clear();
3522 EXPECT_EQ(t.size(), 0);
3523 }
3524
TEST(Table,RehashToSooUnsampled)3525 TEST(Table, RehashToSooUnsampled) {
3526 SooIntTable t;
3527 if (t.capacity() != SooCapacity()) {
3528 CHECK_LT(sizeof(void*), 8) << "missing SOO coverage";
3529 GTEST_SKIP() << "not SOO on this platform";
3530 }
3531
3532 // We disable hashtablez sampling for this test to ensure that the table isn't
3533 // sampled. When the table is sampled, it won't rehash down to SOO.
3534 SetHashtablezEnabled(false);
3535
3536 t.reserve(100);
3537 t.insert(0);
3538 EXPECT_EQ(*t.begin(), 0);
3539
3540 t.rehash(0); // Rehash back down to SOO table.
3541
3542 EXPECT_EQ(t.capacity(), SooCapacity());
3543 EXPECT_EQ(t.size(), 1);
3544 EXPECT_EQ(*t.begin(), 0);
3545 EXPECT_EQ(t.find(0), t.begin());
3546 EXPECT_EQ(t.find(1), t.end());
3547 }
3548
TEST(Table,ReserveToNonSoo)3549 TEST(Table, ReserveToNonSoo) {
3550 for (int reserve_capacity : {8, 100000}) {
3551 SooIntTable t;
3552 t.insert(0);
3553
3554 t.reserve(reserve_capacity);
3555
3556 EXPECT_EQ(t.find(0), t.begin());
3557 EXPECT_EQ(t.size(), 1);
3558 EXPECT_EQ(*t.begin(), 0);
3559 EXPECT_EQ(t.find(1), t.end());
3560 }
3561 }
3562
3563 struct InconsistentHashEqType {
InconsistentHashEqTypeabsl::container_internal::__anonb1b03db00111::InconsistentHashEqType3564 InconsistentHashEqType(int v1, int v2) : v1(v1), v2(v2) {}
3565 template <typename H>
AbslHashValue(H h,InconsistentHashEqType t)3566 friend H AbslHashValue(H h, InconsistentHashEqType t) {
3567 return H::combine(std::move(h), t.v1);
3568 }
operator ==absl::container_internal::__anonb1b03db00111::InconsistentHashEqType3569 bool operator==(InconsistentHashEqType t) const { return v2 == t.v2; }
3570 int v1, v2;
3571 };
3572
TEST(Iterator,InconsistentHashEqFunctorsValidation)3573 TEST(Iterator, InconsistentHashEqFunctorsValidation) {
3574 if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
3575
3576 ValueTable<InconsistentHashEqType> t;
3577 for (int i = 0; i < 10; ++i) t.insert({i, i});
3578 // We need to find/insert multiple times to guarantee that we get the
3579 // assertion because it's possible for the hash to collide with the inserted
3580 // element that has v2==0. In those cases, the new element won't be inserted.
3581 auto find_conflicting_elems = [&] {
3582 for (int i = 100; i < 20000; ++i) {
3583 EXPECT_EQ(t.find({i, 0}), t.end());
3584 }
3585 };
3586 EXPECT_DEATH_IF_SUPPORTED(find_conflicting_elems(),
3587 "hash/eq functors are inconsistent.");
3588 auto insert_conflicting_elems = [&] {
3589 for (int i = 100; i < 20000; ++i) {
3590 EXPECT_EQ(t.insert({i, 0}).second, false);
3591 }
3592 };
3593 EXPECT_DEATH_IF_SUPPORTED(insert_conflicting_elems(),
3594 "hash/eq functors are inconsistent.");
3595 }
3596
3597 } // namespace
3598 } // namespace container_internal
3599 ABSL_NAMESPACE_END
3600 } // namespace absl
3601