1 // Copyright 2017 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "absl/synchronization/mutex.h"
16
17 #ifdef _WIN32
18 #include <windows.h>
19 #endif
20
21 #include <algorithm>
22 #include <atomic>
23 #include <cstdlib>
24 #include <functional>
25 #include <memory>
26 #include <random>
27 #include <string>
28 #include <thread> // NOLINT(build/c++11)
29 #include <type_traits>
30 #include <vector>
31
32 #include "gtest/gtest.h"
33 #include "absl/base/attributes.h"
34 #include "absl/base/config.h"
35 #include "absl/base/internal/sysinfo.h"
36 #include "absl/log/check.h"
37 #include "absl/log/log.h"
38 #include "absl/memory/memory.h"
39 #include "absl/synchronization/internal/create_thread_identity.h"
40 #include "absl/synchronization/internal/thread_pool.h"
41 #include "absl/time/clock.h"
42 #include "absl/time/time.h"
43
44 #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
45 #include <pthread.h>
46 #include <string.h>
47 #endif
48
49 namespace {
50
51 // TODO(dmauro): Replace with a commandline flag.
52 static constexpr bool kExtendedTest = false;
53
CreatePool(int threads)54 std::unique_ptr<absl::synchronization_internal::ThreadPool> CreatePool(
55 int threads) {
56 return absl::make_unique<absl::synchronization_internal::ThreadPool>(threads);
57 }
58
59 std::unique_ptr<absl::synchronization_internal::ThreadPool>
CreateDefaultPool()60 CreateDefaultPool() {
61 return CreatePool(kExtendedTest ? 32 : 10);
62 }
63
64 // Hack to schedule a function to run on a thread pool thread after a
65 // duration has elapsed.
ScheduleAfter(absl::synchronization_internal::ThreadPool * tp,absl::Duration after,const std::function<void ()> & func)66 static void ScheduleAfter(absl::synchronization_internal::ThreadPool *tp,
67 absl::Duration after,
68 const std::function<void()> &func) {
69 tp->Schedule([func, after] {
70 absl::SleepFor(after);
71 func();
72 });
73 }
74
75 struct ScopedInvariantDebugging {
ScopedInvariantDebugging__anon8f30ae2e0111::ScopedInvariantDebugging76 ScopedInvariantDebugging() { absl::EnableMutexInvariantDebugging(true); }
~ScopedInvariantDebugging__anon8f30ae2e0111::ScopedInvariantDebugging77 ~ScopedInvariantDebugging() { absl::EnableMutexInvariantDebugging(false); }
78 };
79
80 struct TestContext {
81 int iterations;
82 int threads;
83 int g0; // global 0
84 int g1; // global 1
85 absl::Mutex mu;
86 absl::CondVar cv;
87 };
88
89 // To test whether the invariant check call occurs
90 static std::atomic<bool> invariant_checked;
91
GetInvariantChecked()92 static bool GetInvariantChecked() {
93 return invariant_checked.load(std::memory_order_relaxed);
94 }
95
SetInvariantChecked(bool new_value)96 static void SetInvariantChecked(bool new_value) {
97 invariant_checked.store(new_value, std::memory_order_relaxed);
98 }
99
CheckSumG0G1(void * v)100 static void CheckSumG0G1(void *v) {
101 TestContext *cxt = static_cast<TestContext *>(v);
102 CHECK_EQ(cxt->g0, -cxt->g1) << "Error in CheckSumG0G1";
103 SetInvariantChecked(true);
104 }
105
TestMu(TestContext * cxt,int c)106 static void TestMu(TestContext *cxt, int c) {
107 for (int i = 0; i != cxt->iterations; i++) {
108 absl::MutexLock l(&cxt->mu);
109 int a = cxt->g0 + 1;
110 cxt->g0 = a;
111 cxt->g1--;
112 }
113 }
114
TestTry(TestContext * cxt,int c)115 static void TestTry(TestContext *cxt, int c) {
116 for (int i = 0; i != cxt->iterations; i++) {
117 do {
118 std::this_thread::yield();
119 } while (!cxt->mu.TryLock());
120 int a = cxt->g0 + 1;
121 cxt->g0 = a;
122 cxt->g1--;
123 cxt->mu.Unlock();
124 }
125 }
126
TestR20ms(TestContext * cxt,int c)127 static void TestR20ms(TestContext *cxt, int c) {
128 for (int i = 0; i != cxt->iterations; i++) {
129 absl::ReaderMutexLock l(&cxt->mu);
130 absl::SleepFor(absl::Milliseconds(20));
131 cxt->mu.AssertReaderHeld();
132 }
133 }
134
TestRW(TestContext * cxt,int c)135 static void TestRW(TestContext *cxt, int c) {
136 if ((c & 1) == 0) {
137 for (int i = 0; i != cxt->iterations; i++) {
138 absl::WriterMutexLock l(&cxt->mu);
139 cxt->g0++;
140 cxt->g1--;
141 cxt->mu.AssertHeld();
142 cxt->mu.AssertReaderHeld();
143 }
144 } else {
145 for (int i = 0; i != cxt->iterations; i++) {
146 absl::ReaderMutexLock l(&cxt->mu);
147 CHECK_EQ(cxt->g0, -cxt->g1) << "Error in TestRW";
148 cxt->mu.AssertReaderHeld();
149 }
150 }
151 }
152
153 struct MyContext {
154 int target;
155 TestContext *cxt;
156 bool MyTurn();
157 };
158
MyTurn()159 bool MyContext::MyTurn() {
160 TestContext *cxt = this->cxt;
161 return cxt->g0 == this->target || cxt->g0 == cxt->iterations;
162 }
163
TestAwait(TestContext * cxt,int c)164 static void TestAwait(TestContext *cxt, int c) {
165 MyContext mc;
166 mc.target = c;
167 mc.cxt = cxt;
168 absl::MutexLock l(&cxt->mu);
169 cxt->mu.AssertHeld();
170 while (cxt->g0 < cxt->iterations) {
171 cxt->mu.Await(absl::Condition(&mc, &MyContext::MyTurn));
172 CHECK(mc.MyTurn()) << "Error in TestAwait";
173 cxt->mu.AssertHeld();
174 if (cxt->g0 < cxt->iterations) {
175 int a = cxt->g0 + 1;
176 cxt->g0 = a;
177 mc.target += cxt->threads;
178 }
179 }
180 }
181
TestSignalAll(TestContext * cxt,int c)182 static void TestSignalAll(TestContext *cxt, int c) {
183 int target = c;
184 absl::MutexLock l(&cxt->mu);
185 cxt->mu.AssertHeld();
186 while (cxt->g0 < cxt->iterations) {
187 while (cxt->g0 != target && cxt->g0 != cxt->iterations) {
188 cxt->cv.Wait(&cxt->mu);
189 }
190 if (cxt->g0 < cxt->iterations) {
191 int a = cxt->g0 + 1;
192 cxt->g0 = a;
193 cxt->cv.SignalAll();
194 target += cxt->threads;
195 }
196 }
197 }
198
TestSignal(TestContext * cxt,int c)199 static void TestSignal(TestContext *cxt, int c) {
200 CHECK_EQ(cxt->threads, 2) << "TestSignal should use 2 threads";
201 int target = c;
202 absl::MutexLock l(&cxt->mu);
203 cxt->mu.AssertHeld();
204 while (cxt->g0 < cxt->iterations) {
205 while (cxt->g0 != target && cxt->g0 != cxt->iterations) {
206 cxt->cv.Wait(&cxt->mu);
207 }
208 if (cxt->g0 < cxt->iterations) {
209 int a = cxt->g0 + 1;
210 cxt->g0 = a;
211 cxt->cv.Signal();
212 target += cxt->threads;
213 }
214 }
215 }
216
TestCVTimeout(TestContext * cxt,int c)217 static void TestCVTimeout(TestContext *cxt, int c) {
218 int target = c;
219 absl::MutexLock l(&cxt->mu);
220 cxt->mu.AssertHeld();
221 while (cxt->g0 < cxt->iterations) {
222 while (cxt->g0 != target && cxt->g0 != cxt->iterations) {
223 cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(100));
224 }
225 if (cxt->g0 < cxt->iterations) {
226 int a = cxt->g0 + 1;
227 cxt->g0 = a;
228 cxt->cv.SignalAll();
229 target += cxt->threads;
230 }
231 }
232 }
233
G0GE2(TestContext * cxt)234 static bool G0GE2(TestContext *cxt) { return cxt->g0 >= 2; }
235
TestTime(TestContext * cxt,int c,bool use_cv)236 static void TestTime(TestContext *cxt, int c, bool use_cv) {
237 CHECK_EQ(cxt->iterations, 1) << "TestTime should only use 1 iteration";
238 CHECK_GT(cxt->threads, 2) << "TestTime should use more than 2 threads";
239 const bool kFalse = false;
240 absl::Condition false_cond(&kFalse);
241 absl::Condition g0ge2(G0GE2, cxt);
242 if (c == 0) {
243 absl::MutexLock l(&cxt->mu);
244
245 absl::Time start = absl::Now();
246 if (use_cv) {
247 cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
248 } else {
249 CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
250 << "TestTime failed";
251 }
252 absl::Duration elapsed = absl::Now() - start;
253 CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
254 << "TestTime failed";
255 CHECK_EQ(cxt->g0, 1) << "TestTime failed";
256
257 start = absl::Now();
258 if (use_cv) {
259 cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
260 } else {
261 CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
262 << "TestTime failed";
263 }
264 elapsed = absl::Now() - start;
265 CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
266 << "TestTime failed";
267 cxt->g0++;
268 if (use_cv) {
269 cxt->cv.Signal();
270 }
271
272 start = absl::Now();
273 if (use_cv) {
274 cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(4));
275 } else {
276 CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(4)))
277 << "TestTime failed";
278 }
279 elapsed = absl::Now() - start;
280 CHECK(absl::Seconds(3.9) <= elapsed && elapsed <= absl::Seconds(6.0))
281 << "TestTime failed";
282 CHECK_GE(cxt->g0, 3) << "TestTime failed";
283
284 start = absl::Now();
285 if (use_cv) {
286 cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
287 } else {
288 CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
289 << "TestTime failed";
290 }
291 elapsed = absl::Now() - start;
292 CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
293 << "TestTime failed";
294 if (use_cv) {
295 cxt->cv.SignalAll();
296 }
297
298 start = absl::Now();
299 if (use_cv) {
300 cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
301 } else {
302 CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
303 << "TestTime failed";
304 }
305 elapsed = absl::Now() - start;
306 CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
307 << "TestTime failed";
308 CHECK_EQ(cxt->g0, cxt->threads) << "TestTime failed";
309
310 } else if (c == 1) {
311 absl::MutexLock l(&cxt->mu);
312 const absl::Time start = absl::Now();
313 if (use_cv) {
314 cxt->cv.WaitWithTimeout(&cxt->mu, absl::Milliseconds(500));
315 } else {
316 CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Milliseconds(500)))
317 << "TestTime failed";
318 }
319 const absl::Duration elapsed = absl::Now() - start;
320 CHECK(absl::Seconds(0.4) <= elapsed && elapsed <= absl::Seconds(0.9))
321 << "TestTime failed";
322 cxt->g0++;
323 } else if (c == 2) {
324 absl::MutexLock l(&cxt->mu);
325 if (use_cv) {
326 while (cxt->g0 < 2) {
327 cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(100));
328 }
329 } else {
330 CHECK(cxt->mu.AwaitWithTimeout(g0ge2, absl::Seconds(100)))
331 << "TestTime failed";
332 }
333 cxt->g0++;
334 } else {
335 absl::MutexLock l(&cxt->mu);
336 if (use_cv) {
337 while (cxt->g0 < 2) {
338 cxt->cv.Wait(&cxt->mu);
339 }
340 } else {
341 cxt->mu.Await(g0ge2);
342 }
343 cxt->g0++;
344 }
345 }
346
TestMuTime(TestContext * cxt,int c)347 static void TestMuTime(TestContext *cxt, int c) { TestTime(cxt, c, false); }
348
TestCVTime(TestContext * cxt,int c)349 static void TestCVTime(TestContext *cxt, int c) { TestTime(cxt, c, true); }
350
EndTest(int * c0,int * c1,absl::Mutex * mu,absl::CondVar * cv,const std::function<void (int)> & cb)351 static void EndTest(int *c0, int *c1, absl::Mutex *mu, absl::CondVar *cv,
352 const std::function<void(int)> &cb) {
353 mu->Lock();
354 int c = (*c0)++;
355 mu->Unlock();
356 cb(c);
357 absl::MutexLock l(mu);
358 (*c1)++;
359 cv->Signal();
360 }
361
362 // Code common to RunTest() and RunTestWithInvariantDebugging().
RunTestCommon(TestContext * cxt,void (* test)(TestContext * cxt,int),int threads,int iterations,int operations)363 static int RunTestCommon(TestContext *cxt, void (*test)(TestContext *cxt, int),
364 int threads, int iterations, int operations) {
365 absl::Mutex mu2;
366 absl::CondVar cv2;
367 int c0 = 0;
368 int c1 = 0;
369 cxt->g0 = 0;
370 cxt->g1 = 0;
371 cxt->iterations = iterations;
372 cxt->threads = threads;
373 absl::synchronization_internal::ThreadPool tp(threads);
374 for (int i = 0; i != threads; i++) {
375 tp.Schedule(std::bind(
376 &EndTest, &c0, &c1, &mu2, &cv2,
377 std::function<void(int)>(std::bind(test, cxt, std::placeholders::_1))));
378 }
379 mu2.Lock();
380 while (c1 != threads) {
381 cv2.Wait(&mu2);
382 }
383 mu2.Unlock();
384 return cxt->g0;
385 }
386
387 // Basis for the parameterized tests configured below.
RunTest(void (* test)(TestContext * cxt,int),int threads,int iterations,int operations)388 static int RunTest(void (*test)(TestContext *cxt, int), int threads,
389 int iterations, int operations) {
390 TestContext cxt;
391 return RunTestCommon(&cxt, test, threads, iterations, operations);
392 }
393
394 // Like RunTest(), but sets an invariant on the tested Mutex and
395 // verifies that the invariant check happened. The invariant function
396 // will be passed the TestContext* as its arg and must call
397 // SetInvariantChecked(true);
398 #if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
RunTestWithInvariantDebugging(void (* test)(TestContext * cxt,int),int threads,int iterations,int operations,void (* invariant)(void *))399 static int RunTestWithInvariantDebugging(void (*test)(TestContext *cxt, int),
400 int threads, int iterations,
401 int operations,
402 void (*invariant)(void *)) {
403 ScopedInvariantDebugging scoped_debugging;
404 SetInvariantChecked(false);
405 TestContext cxt;
406 cxt.mu.EnableInvariantDebugging(invariant, &cxt);
407 int ret = RunTestCommon(&cxt, test, threads, iterations, operations);
408 CHECK(GetInvariantChecked()) << "Invariant not checked";
409 return ret;
410 }
411 #endif
412
413 // --------------------------------------------------------
414 // Test for fix of bug in TryRemove()
415 struct TimeoutBugStruct {
416 absl::Mutex mu;
417 bool a;
418 int a_waiter_count;
419 };
420
WaitForA(TimeoutBugStruct * x)421 static void WaitForA(TimeoutBugStruct *x) {
422 x->mu.LockWhen(absl::Condition(&x->a));
423 x->a_waiter_count--;
424 x->mu.Unlock();
425 }
426
NoAWaiters(TimeoutBugStruct * x)427 static bool NoAWaiters(TimeoutBugStruct *x) { return x->a_waiter_count == 0; }
428
429 // Test that a CondVar.Wait(&mutex) can un-block a call to mutex.Await() in
430 // another thread.
TEST(Mutex,CondVarWaitSignalsAwait)431 TEST(Mutex, CondVarWaitSignalsAwait) {
432 // Use a struct so the lock annotations apply.
433 struct {
434 absl::Mutex barrier_mu;
435 bool barrier ABSL_GUARDED_BY(barrier_mu) = false;
436
437 absl::Mutex release_mu;
438 bool release ABSL_GUARDED_BY(release_mu) = false;
439 absl::CondVar released_cv;
440 } state;
441
442 auto pool = CreateDefaultPool();
443
444 // Thread A. Sets barrier, waits for release using Mutex::Await, then
445 // signals released_cv.
446 pool->Schedule([&state] {
447 state.release_mu.Lock();
448
449 state.barrier_mu.Lock();
450 state.barrier = true;
451 state.barrier_mu.Unlock();
452
453 state.release_mu.Await(absl::Condition(&state.release));
454 state.released_cv.Signal();
455 state.release_mu.Unlock();
456 });
457
458 state.barrier_mu.LockWhen(absl::Condition(&state.barrier));
459 state.barrier_mu.Unlock();
460 state.release_mu.Lock();
461 // Thread A is now blocked on release by way of Mutex::Await().
462
463 // Set release. Calling released_cv.Wait() should un-block thread A,
464 // which will signal released_cv. If not, the test will hang.
465 state.release = true;
466 state.released_cv.Wait(&state.release_mu);
467 state.release_mu.Unlock();
468 }
469
470 // Test that a CondVar.WaitWithTimeout(&mutex) can un-block a call to
471 // mutex.Await() in another thread.
TEST(Mutex,CondVarWaitWithTimeoutSignalsAwait)472 TEST(Mutex, CondVarWaitWithTimeoutSignalsAwait) {
473 // Use a struct so the lock annotations apply.
474 struct {
475 absl::Mutex barrier_mu;
476 bool barrier ABSL_GUARDED_BY(barrier_mu) = false;
477
478 absl::Mutex release_mu;
479 bool release ABSL_GUARDED_BY(release_mu) = false;
480 absl::CondVar released_cv;
481 } state;
482
483 auto pool = CreateDefaultPool();
484
485 // Thread A. Sets barrier, waits for release using Mutex::Await, then
486 // signals released_cv.
487 pool->Schedule([&state] {
488 state.release_mu.Lock();
489
490 state.barrier_mu.Lock();
491 state.barrier = true;
492 state.barrier_mu.Unlock();
493
494 state.release_mu.Await(absl::Condition(&state.release));
495 state.released_cv.Signal();
496 state.release_mu.Unlock();
497 });
498
499 state.barrier_mu.LockWhen(absl::Condition(&state.barrier));
500 state.barrier_mu.Unlock();
501 state.release_mu.Lock();
502 // Thread A is now blocked on release by way of Mutex::Await().
503
504 // Set release. Calling released_cv.Wait() should un-block thread A,
505 // which will signal released_cv. If not, the test will hang.
506 state.release = true;
507 EXPECT_TRUE(
508 !state.released_cv.WaitWithTimeout(&state.release_mu, absl::Seconds(10)))
509 << "; Unrecoverable test failure: CondVar::WaitWithTimeout did not "
510 "unblock the absl::Mutex::Await call in another thread.";
511
512 state.release_mu.Unlock();
513 }
514
515 // Test for regression of a bug in loop of TryRemove()
TEST(Mutex,MutexTimeoutBug)516 TEST(Mutex, MutexTimeoutBug) {
517 auto tp = CreateDefaultPool();
518
519 TimeoutBugStruct x;
520 x.a = false;
521 x.a_waiter_count = 2;
522 tp->Schedule(std::bind(&WaitForA, &x));
523 tp->Schedule(std::bind(&WaitForA, &x));
524 absl::SleepFor(absl::Seconds(1)); // Allow first two threads to hang.
525 // The skip field of the second will point to the first because there are
526 // only two.
527
528 // Now cause a thread waiting on an always-false to time out
529 // This would deadlock when the bug was present.
530 bool always_false = false;
531 x.mu.LockWhenWithTimeout(absl::Condition(&always_false),
532 absl::Milliseconds(500));
533
534 // if we get here, the bug is not present. Cleanup the state.
535
536 x.a = true; // wakeup the two waiters on A
537 x.mu.Await(absl::Condition(&NoAWaiters, &x)); // wait for them to exit
538 x.mu.Unlock();
539 }
540
541 struct CondVarWaitDeadlock : testing::TestWithParam<int> {
542 absl::Mutex mu;
543 absl::CondVar cv;
544 bool cond1 = false;
545 bool cond2 = false;
546 bool read_lock1;
547 bool read_lock2;
548 bool signal_unlocked;
549
CondVarWaitDeadlock__anon8f30ae2e0111::CondVarWaitDeadlock550 CondVarWaitDeadlock() {
551 read_lock1 = GetParam() & (1 << 0);
552 read_lock2 = GetParam() & (1 << 1);
553 signal_unlocked = GetParam() & (1 << 2);
554 }
555
Waiter1__anon8f30ae2e0111::CondVarWaitDeadlock556 void Waiter1() {
557 if (read_lock1) {
558 mu.ReaderLock();
559 while (!cond1) {
560 cv.Wait(&mu);
561 }
562 mu.ReaderUnlock();
563 } else {
564 mu.Lock();
565 while (!cond1) {
566 cv.Wait(&mu);
567 }
568 mu.Unlock();
569 }
570 }
571
Waiter2__anon8f30ae2e0111::CondVarWaitDeadlock572 void Waiter2() {
573 if (read_lock2) {
574 mu.ReaderLockWhen(absl::Condition(&cond2));
575 mu.ReaderUnlock();
576 } else {
577 mu.LockWhen(absl::Condition(&cond2));
578 mu.Unlock();
579 }
580 }
581 };
582
583 // Test for a deadlock bug in Mutex::Fer().
584 // The sequence of events that lead to the deadlock is:
585 // 1. waiter1 blocks on cv in read mode (mu bits = 0).
586 // 2. waiter2 blocks on mu in either mode (mu bits = kMuWait).
587 // 3. main thread locks mu, sets cond1, unlocks mu (mu bits = kMuWait).
588 // 4. main thread signals on cv and this eventually calls Mutex::Fer().
589 // Currently Fer wakes waiter1 since mu bits = kMuWait (mutex is unlocked).
590 // Before the bug fix Fer neither woke waiter1 nor queued it on mutex,
591 // which resulted in deadlock.
TEST_P(CondVarWaitDeadlock,Test)592 TEST_P(CondVarWaitDeadlock, Test) {
593 auto waiter1 = CreatePool(1);
594 auto waiter2 = CreatePool(1);
595 waiter1->Schedule([this] { this->Waiter1(); });
596 waiter2->Schedule([this] { this->Waiter2(); });
597
598 // Wait while threads block (best-effort is fine).
599 absl::SleepFor(absl::Milliseconds(100));
600
601 // Wake condwaiter.
602 mu.Lock();
603 cond1 = true;
604 if (signal_unlocked) {
605 mu.Unlock();
606 cv.Signal();
607 } else {
608 cv.Signal();
609 mu.Unlock();
610 }
611 waiter1.reset(); // "join" waiter1
612
613 // Wake waiter.
614 mu.Lock();
615 cond2 = true;
616 mu.Unlock();
617 waiter2.reset(); // "join" waiter2
618 }
619
620 INSTANTIATE_TEST_SUITE_P(CondVarWaitDeadlockTest, CondVarWaitDeadlock,
621 ::testing::Range(0, 8),
622 ::testing::PrintToStringParamName());
623
624 // --------------------------------------------------------
625 // Test for fix of bug in DequeueAllWakeable()
626 // Bug was that if there was more than one waiting reader
627 // and all should be woken, the most recently blocked one
628 // would not be.
629
630 struct DequeueAllWakeableBugStruct {
631 absl::Mutex mu;
632 absl::Mutex mu2; // protects all fields below
633 int unfinished_count; // count of unfinished readers; under mu2
634 bool done1; // unfinished_count == 0; under mu2
635 int finished_count; // count of finished readers, under mu2
636 bool done2; // finished_count == 0; under mu2
637 };
638
639 // Test for regression of a bug in loop of DequeueAllWakeable()
AcquireAsReader(DequeueAllWakeableBugStruct * x)640 static void AcquireAsReader(DequeueAllWakeableBugStruct *x) {
641 x->mu.ReaderLock();
642 x->mu2.Lock();
643 x->unfinished_count--;
644 x->done1 = (x->unfinished_count == 0);
645 x->mu2.Unlock();
646 // make sure that both readers acquired mu before we release it.
647 absl::SleepFor(absl::Seconds(2));
648 x->mu.ReaderUnlock();
649
650 x->mu2.Lock();
651 x->finished_count--;
652 x->done2 = (x->finished_count == 0);
653 x->mu2.Unlock();
654 }
655
656 // Test for regression of a bug in loop of DequeueAllWakeable()
TEST(Mutex,MutexReaderWakeupBug)657 TEST(Mutex, MutexReaderWakeupBug) {
658 auto tp = CreateDefaultPool();
659
660 DequeueAllWakeableBugStruct x;
661 x.unfinished_count = 2;
662 x.done1 = false;
663 x.finished_count = 2;
664 x.done2 = false;
665 x.mu.Lock(); // acquire mu exclusively
666 // queue two thread that will block on reader locks on x.mu
667 tp->Schedule(std::bind(&AcquireAsReader, &x));
668 tp->Schedule(std::bind(&AcquireAsReader, &x));
669 absl::SleepFor(absl::Seconds(1)); // give time for reader threads to block
670 x.mu.Unlock(); // wake them up
671
672 // both readers should finish promptly
673 EXPECT_TRUE(
674 x.mu2.LockWhenWithTimeout(absl::Condition(&x.done1), absl::Seconds(10)));
675 x.mu2.Unlock();
676
677 EXPECT_TRUE(
678 x.mu2.LockWhenWithTimeout(absl::Condition(&x.done2), absl::Seconds(10)));
679 x.mu2.Unlock();
680 }
681
682 struct LockWhenTestStruct {
683 absl::Mutex mu1;
684 bool cond = false;
685
686 absl::Mutex mu2;
687 bool waiting = false;
688 };
689
LockWhenTestIsCond(LockWhenTestStruct * s)690 static bool LockWhenTestIsCond(LockWhenTestStruct *s) {
691 s->mu2.Lock();
692 s->waiting = true;
693 s->mu2.Unlock();
694 return s->cond;
695 }
696
LockWhenTestWaitForIsCond(LockWhenTestStruct * s)697 static void LockWhenTestWaitForIsCond(LockWhenTestStruct *s) {
698 s->mu1.LockWhen(absl::Condition(&LockWhenTestIsCond, s));
699 s->mu1.Unlock();
700 }
701
TEST(Mutex,LockWhen)702 TEST(Mutex, LockWhen) {
703 LockWhenTestStruct s;
704
705 std::thread t(LockWhenTestWaitForIsCond, &s);
706 s.mu2.LockWhen(absl::Condition(&s.waiting));
707 s.mu2.Unlock();
708
709 s.mu1.Lock();
710 s.cond = true;
711 s.mu1.Unlock();
712
713 t.join();
714 }
715
TEST(Mutex,LockWhenGuard)716 TEST(Mutex, LockWhenGuard) {
717 absl::Mutex mu;
718 int n = 30;
719 bool done = false;
720
721 // We don't inline the lambda because the conversion is ambiguous in MSVC.
722 bool (*cond_eq_10)(int *) = [](int *p) { return *p == 10; };
723 bool (*cond_lt_10)(int *) = [](int *p) { return *p < 10; };
724
725 std::thread t1([&mu, &n, &done, cond_eq_10]() {
726 absl::ReaderMutexLock lock(&mu, absl::Condition(cond_eq_10, &n));
727 done = true;
728 });
729
730 std::thread t2[10];
731 for (std::thread &t : t2) {
732 t = std::thread([&mu, &n, cond_lt_10]() {
733 absl::WriterMutexLock lock(&mu, absl::Condition(cond_lt_10, &n));
734 ++n;
735 });
736 }
737
738 {
739 absl::MutexLock lock(&mu);
740 n = 0;
741 }
742
743 for (std::thread &t : t2) t.join();
744 t1.join();
745
746 EXPECT_TRUE(done);
747 EXPECT_EQ(n, 10);
748 }
749
750 // --------------------------------------------------------
751 // The following test requires Mutex::ReaderLock to be a real shared
752 // lock, which is not the case in all builds.
753 #if !defined(ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE)
754
755 // Test for fix of bug in UnlockSlow() that incorrectly decremented the reader
756 // count when putting a thread to sleep waiting for a false condition when the
757 // lock was not held.
758
759 // For this bug to strike, we make a thread wait on a free mutex with no
760 // waiters by causing its wakeup condition to be false. Then the
761 // next two acquirers must be readers. The bug causes the lock
762 // to be released when one reader unlocks, rather than both.
763
764 struct ReaderDecrementBugStruct {
765 bool cond; // to delay first thread (under mu)
766 int done; // reference count (under mu)
767 absl::Mutex mu;
768
769 bool waiting_on_cond; // under mu2
770 bool have_reader_lock; // under mu2
771 bool complete; // under mu2
772 absl::Mutex mu2; // > mu
773 };
774
775 // L >= mu, L < mu_waiting_on_cond
IsCond(void * v)776 static bool IsCond(void *v) {
777 ReaderDecrementBugStruct *x = reinterpret_cast<ReaderDecrementBugStruct *>(v);
778 x->mu2.Lock();
779 x->waiting_on_cond = true;
780 x->mu2.Unlock();
781 return x->cond;
782 }
783
784 // L >= mu
AllDone(void * v)785 static bool AllDone(void *v) {
786 ReaderDecrementBugStruct *x = reinterpret_cast<ReaderDecrementBugStruct *>(v);
787 return x->done == 0;
788 }
789
790 // L={}
WaitForCond(ReaderDecrementBugStruct * x)791 static void WaitForCond(ReaderDecrementBugStruct *x) {
792 absl::Mutex dummy;
793 absl::MutexLock l(&dummy);
794 x->mu.LockWhen(absl::Condition(&IsCond, x));
795 x->done--;
796 x->mu.Unlock();
797 }
798
799 // L={}
GetReadLock(ReaderDecrementBugStruct * x)800 static void GetReadLock(ReaderDecrementBugStruct *x) {
801 x->mu.ReaderLock();
802 x->mu2.Lock();
803 x->have_reader_lock = true;
804 x->mu2.Await(absl::Condition(&x->complete));
805 x->mu2.Unlock();
806 x->mu.ReaderUnlock();
807 x->mu.Lock();
808 x->done--;
809 x->mu.Unlock();
810 }
811
812 // Test for reader counter being decremented incorrectly by waiter
813 // with false condition.
TEST(Mutex,MutexReaderDecrementBug)814 TEST(Mutex, MutexReaderDecrementBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
815 ReaderDecrementBugStruct x;
816 x.cond = false;
817 x.waiting_on_cond = false;
818 x.have_reader_lock = false;
819 x.complete = false;
820 x.done = 2; // initial ref count
821
822 // Run WaitForCond() and wait for it to sleep
823 std::thread thread1(WaitForCond, &x);
824 x.mu2.LockWhen(absl::Condition(&x.waiting_on_cond));
825 x.mu2.Unlock();
826
827 // Run GetReadLock(), and wait for it to get the read lock
828 std::thread thread2(GetReadLock, &x);
829 x.mu2.LockWhen(absl::Condition(&x.have_reader_lock));
830 x.mu2.Unlock();
831
832 // Get the reader lock ourselves, and release it.
833 x.mu.ReaderLock();
834 x.mu.ReaderUnlock();
835
836 // The lock should be held in read mode by GetReadLock().
837 // If we have the bug, the lock will be free.
838 x.mu.AssertReaderHeld();
839
840 // Wake up all the threads.
841 x.mu2.Lock();
842 x.complete = true;
843 x.mu2.Unlock();
844
845 // TODO(delesley): turn on analysis once lock upgrading is supported.
846 // (This call upgrades the lock from shared to exclusive.)
847 x.mu.Lock();
848 x.cond = true;
849 x.mu.Await(absl::Condition(&AllDone, &x));
850 x.mu.Unlock();
851
852 thread1.join();
853 thread2.join();
854 }
855 #endif // !ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE
856
857 // Test that we correctly handle the situation when a lock is
858 // held and then destroyed (w/o unlocking).
859 #ifdef ABSL_HAVE_THREAD_SANITIZER
860 // TSAN reports errors when locked Mutexes are destroyed.
TEST(Mutex,DISABLED_LockedMutexDestructionBug)861 TEST(Mutex, DISABLED_LockedMutexDestructionBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
862 #else
863 TEST(Mutex, LockedMutexDestructionBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
864 #endif
865 for (int i = 0; i != 10; i++) {
866 // Create, lock and destroy 10 locks.
867 const int kNumLocks = 10;
868 auto mu = absl::make_unique<absl::Mutex[]>(kNumLocks);
869 for (int j = 0; j != kNumLocks; j++) {
870 if ((j % 2) == 0) {
871 mu[j].WriterLock();
872 } else {
873 mu[j].ReaderLock();
874 }
875 }
876 }
877 }
878
879 // Some functions taking pointers to non-const.
880 bool Equals42(int *p) { return *p == 42; }
881 bool Equals43(int *p) { return *p == 43; }
882
883 // Some functions taking pointers to const.
884 bool ConstEquals42(const int *p) { return *p == 42; }
885 bool ConstEquals43(const int *p) { return *p == 43; }
886
887 // Some function templates taking pointers. Note it's possible for `T` to be
888 // deduced as non-const or const, which creates the potential for ambiguity,
889 // but which the implementation is careful to avoid.
890 template <typename T>
891 bool TemplateEquals42(T *p) {
892 return *p == 42;
893 }
894 template <typename T>
895 bool TemplateEquals43(T *p) {
896 return *p == 43;
897 }
898
899 TEST(Mutex, FunctionPointerCondition) {
900 // Some arguments.
901 int x = 42;
902 const int const_x = 42;
903
904 // Parameter non-const, argument non-const.
905 EXPECT_TRUE(absl::Condition(Equals42, &x).Eval());
906 EXPECT_FALSE(absl::Condition(Equals43, &x).Eval());
907
908 // Parameter const, argument non-const.
909 EXPECT_TRUE(absl::Condition(ConstEquals42, &x).Eval());
910 EXPECT_FALSE(absl::Condition(ConstEquals43, &x).Eval());
911
912 // Parameter const, argument const.
913 EXPECT_TRUE(absl::Condition(ConstEquals42, &const_x).Eval());
914 EXPECT_FALSE(absl::Condition(ConstEquals43, &const_x).Eval());
915
916 // Parameter type deduced, argument non-const.
917 EXPECT_TRUE(absl::Condition(TemplateEquals42, &x).Eval());
918 EXPECT_FALSE(absl::Condition(TemplateEquals43, &x).Eval());
919
920 // Parameter type deduced, argument const.
921 EXPECT_TRUE(absl::Condition(TemplateEquals42, &const_x).Eval());
922 EXPECT_FALSE(absl::Condition(TemplateEquals43, &const_x).Eval());
923
924 // Parameter non-const, argument const is not well-formed.
925 EXPECT_FALSE((std::is_constructible<absl::Condition, decltype(Equals42),
926 decltype(&const_x)>::value));
927 // Validate use of is_constructible by contrasting to a well-formed case.
928 EXPECT_TRUE((std::is_constructible<absl::Condition, decltype(ConstEquals42),
929 decltype(&const_x)>::value));
930 }
931
932 // Example base and derived class for use in predicates and test below. Not a
933 // particularly realistic example, but it suffices for testing purposes.
934 struct Base {
935 explicit Base(int v) : value(v) {}
936 int value;
937 };
938 struct Derived : Base {
939 explicit Derived(int v) : Base(v) {}
940 };
941
942 // Some functions taking pointer to non-const `Base`.
943 bool BaseEquals42(Base *p) { return p->value == 42; }
944 bool BaseEquals43(Base *p) { return p->value == 43; }
945
946 // Some functions taking pointer to const `Base`.
947 bool ConstBaseEquals42(const Base *p) { return p->value == 42; }
948 bool ConstBaseEquals43(const Base *p) { return p->value == 43; }
949
950 TEST(Mutex, FunctionPointerConditionWithDerivedToBaseConversion) {
951 // Some arguments.
952 Derived derived(42);
953 const Derived const_derived(42);
954
955 // Parameter non-const base, argument derived non-const.
956 EXPECT_TRUE(absl::Condition(BaseEquals42, &derived).Eval());
957 EXPECT_FALSE(absl::Condition(BaseEquals43, &derived).Eval());
958
959 // Parameter const base, argument derived non-const.
960 EXPECT_TRUE(absl::Condition(ConstBaseEquals42, &derived).Eval());
961 EXPECT_FALSE(absl::Condition(ConstBaseEquals43, &derived).Eval());
962
963 // Parameter const base, argument derived const.
964 EXPECT_TRUE(absl::Condition(ConstBaseEquals42, &const_derived).Eval());
965 EXPECT_FALSE(absl::Condition(ConstBaseEquals43, &const_derived).Eval());
966
967 // Parameter const base, argument derived const.
968 EXPECT_TRUE(absl::Condition(ConstBaseEquals42, &const_derived).Eval());
969 EXPECT_FALSE(absl::Condition(ConstBaseEquals43, &const_derived).Eval());
970
971 // Parameter derived, argument base is not well-formed.
972 bool (*derived_pred)(const Derived *) = [](const Derived *) { return true; };
973 EXPECT_FALSE((std::is_constructible<absl::Condition, decltype(derived_pred),
974 Base *>::value));
975 EXPECT_FALSE((std::is_constructible<absl::Condition, decltype(derived_pred),
976 const Base *>::value));
977 // Validate use of is_constructible by contrasting to well-formed cases.
978 EXPECT_TRUE((std::is_constructible<absl::Condition, decltype(derived_pred),
979 Derived *>::value));
980 EXPECT_TRUE((std::is_constructible<absl::Condition, decltype(derived_pred),
981 const Derived *>::value));
982 }
983
984 struct Constable {
985 bool WotsAllThisThen() const { return true; }
986 };
987
988 TEST(Mutex, FunctionPointerConditionWithConstMethod) {
989 const Constable chapman;
990 EXPECT_TRUE(absl::Condition(&chapman, &Constable::WotsAllThisThen).Eval());
991 }
992
993 struct True {
994 template <class... Args>
995 bool operator()(Args...) const {
996 return true;
997 }
998 };
999
1000 struct DerivedTrue : True {};
1001
1002 TEST(Mutex, FunctorCondition) {
1003 { // Variadic
1004 True f;
1005 EXPECT_TRUE(absl::Condition(&f).Eval());
1006 }
1007
1008 { // Inherited
1009 DerivedTrue g;
1010 EXPECT_TRUE(absl::Condition(&g).Eval());
1011 }
1012
1013 { // lambda
1014 int value = 3;
1015 auto is_zero = [&value] { return value == 0; };
1016 absl::Condition c(&is_zero);
1017 EXPECT_FALSE(c.Eval());
1018 value = 0;
1019 EXPECT_TRUE(c.Eval());
1020 }
1021
1022 { // bind
1023 int value = 0;
1024 auto is_positive = std::bind(std::less<int>(), 0, std::cref(value));
1025 absl::Condition c(&is_positive);
1026 EXPECT_FALSE(c.Eval());
1027 value = 1;
1028 EXPECT_TRUE(c.Eval());
1029 }
1030
1031 { // std::function
1032 int value = 3;
1033 std::function<bool()> is_zero = [&value] { return value == 0; };
1034 absl::Condition c(&is_zero);
1035 EXPECT_FALSE(c.Eval());
1036 value = 0;
1037 EXPECT_TRUE(c.Eval());
1038 }
1039 }
1040
1041 TEST(Mutex, ConditionSwap) {
1042 // Ensure that Conditions can be swap'ed.
1043 bool b1 = true;
1044 absl::Condition c1(&b1);
1045 bool b2 = false;
1046 absl::Condition c2(&b2);
1047 EXPECT_TRUE(c1.Eval());
1048 EXPECT_FALSE(c2.Eval());
1049 std::swap(c1, c2);
1050 EXPECT_FALSE(c1.Eval());
1051 EXPECT_TRUE(c2.Eval());
1052 }
1053
1054 // --------------------------------------------------------
1055 // Test for bug with pattern of readers using a condvar. The bug was that if a
1056 // reader went to sleep on a condition variable while one or more other readers
1057 // held the lock, but there were no waiters, the reader count (held in the
1058 // mutex word) would be lost. (This is because Enqueue() had at one time
1059 // always placed the thread on the Mutex queue. Later (CL 4075610), to
1060 // tolerate re-entry into Mutex from a Condition predicate, Enqueue() was
1061 // changed so that it could also place a thread on a condition-variable. This
1062 // introduced the case where Enqueue() returned with an empty queue, and this
1063 // case was handled incorrectly in one place.)
1064
1065 static void ReaderForReaderOnCondVar(absl::Mutex *mu, absl::CondVar *cv,
1066 int *running) {
1067 std::random_device dev;
1068 std::mt19937 gen(dev());
1069 std::uniform_int_distribution<int> random_millis(0, 15);
1070 mu->ReaderLock();
1071 while (*running == 3) {
1072 absl::SleepFor(absl::Milliseconds(random_millis(gen)));
1073 cv->WaitWithTimeout(mu, absl::Milliseconds(random_millis(gen)));
1074 }
1075 mu->ReaderUnlock();
1076 mu->Lock();
1077 (*running)--;
1078 mu->Unlock();
1079 }
1080
1081 static bool IntIsZero(int *x) { return *x == 0; }
1082
1083 // Test for reader waiting condition variable when there are other readers
1084 // but no waiters.
1085 TEST(Mutex, TestReaderOnCondVar) {
1086 auto tp = CreateDefaultPool();
1087 absl::Mutex mu;
1088 absl::CondVar cv;
1089 int running = 3;
1090 tp->Schedule(std::bind(&ReaderForReaderOnCondVar, &mu, &cv, &running));
1091 tp->Schedule(std::bind(&ReaderForReaderOnCondVar, &mu, &cv, &running));
1092 absl::SleepFor(absl::Seconds(2));
1093 mu.Lock();
1094 running--;
1095 mu.Await(absl::Condition(&IntIsZero, &running));
1096 mu.Unlock();
1097 }
1098
1099 // --------------------------------------------------------
1100 struct AcquireFromConditionStruct {
1101 absl::Mutex mu0; // protects value, done
1102 int value; // times condition function is called; under mu0,
1103 bool done; // done with test? under mu0
1104 absl::Mutex mu1; // used to attempt to mess up state of mu0
1105 absl::CondVar cv; // so the condition function can be invoked from
1106 // CondVar::Wait().
1107 };
1108
1109 static bool ConditionWithAcquire(AcquireFromConditionStruct *x) {
1110 x->value++; // count times this function is called
1111
1112 if (x->value == 2 || x->value == 3) {
1113 // On the second and third invocation of this function, sleep for 100ms,
1114 // but with the side-effect of altering the state of a Mutex other than
1115 // than one for which this is a condition. The spec now explicitly allows
1116 // this side effect; previously it did not. it was illegal.
1117 bool always_false = false;
1118 x->mu1.LockWhenWithTimeout(absl::Condition(&always_false),
1119 absl::Milliseconds(100));
1120 x->mu1.Unlock();
1121 }
1122 CHECK_LT(x->value, 4) << "should not be invoked a fourth time";
1123
1124 // We arrange for the condition to return true on only the 2nd and 3rd calls.
1125 return x->value == 2 || x->value == 3;
1126 }
1127
1128 static void WaitForCond2(AcquireFromConditionStruct *x) {
1129 // wait for cond0 to become true
1130 x->mu0.LockWhen(absl::Condition(&ConditionWithAcquire, x));
1131 x->done = true;
1132 x->mu0.Unlock();
1133 }
1134
1135 // Test for Condition whose function acquires other Mutexes
1136 TEST(Mutex, AcquireFromCondition) {
1137 auto tp = CreateDefaultPool();
1138
1139 AcquireFromConditionStruct x;
1140 x.value = 0;
1141 x.done = false;
1142 tp->Schedule(
1143 std::bind(&WaitForCond2, &x)); // run WaitForCond2() in a thread T
1144 // T will hang because the first invocation of ConditionWithAcquire() will
1145 // return false.
1146 absl::SleepFor(absl::Milliseconds(500)); // allow T time to hang
1147
1148 x.mu0.Lock();
1149 x.cv.WaitWithTimeout(&x.mu0, absl::Milliseconds(500)); // wake T
1150 // T will be woken because the Wait() will call ConditionWithAcquire()
1151 // for the second time, and it will return true.
1152
1153 x.mu0.Unlock();
1154
1155 // T will then acquire the lock and recheck its own condition.
1156 // It will find the condition true, as this is the third invocation,
1157 // but the use of another Mutex by the calling function will
1158 // cause the old mutex implementation to think that the outer
1159 // LockWhen() has timed out because the inner LockWhenWithTimeout() did.
1160 // T will then check the condition a fourth time because it finds a
1161 // timeout occurred. This should not happen in the new
1162 // implementation that allows the Condition function to use Mutexes.
1163
1164 // It should also succeed, even though the Condition function
1165 // is being invoked from CondVar::Wait, and thus this thread
1166 // is conceptually waiting both on the condition variable, and on mu2.
1167
1168 x.mu0.LockWhen(absl::Condition(&x.done));
1169 x.mu0.Unlock();
1170 }
1171
1172 TEST(Mutex, DeadlockDetector) {
1173 absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
1174
1175 // check that we can call ForgetDeadlockInfo() on a lock with the lock held
1176 absl::Mutex m1;
1177 absl::Mutex m2;
1178 absl::Mutex m3;
1179 absl::Mutex m4;
1180
1181 m1.Lock(); // m1 gets ID1
1182 m2.Lock(); // m2 gets ID2
1183 m3.Lock(); // m3 gets ID3
1184 m3.Unlock();
1185 m2.Unlock();
1186 // m1 still held
1187 m1.ForgetDeadlockInfo(); // m1 loses ID
1188 m2.Lock(); // m2 gets ID2
1189 m3.Lock(); // m3 gets ID3
1190 m4.Lock(); // m4 gets ID4
1191 m3.Unlock();
1192 m2.Unlock();
1193 m4.Unlock();
1194 m1.Unlock();
1195 }
1196
1197 // Bazel has a test "warning" file that programs can write to if the
1198 // test should pass with a warning. This class disables the warning
1199 // file until it goes out of scope.
1200 class ScopedDisableBazelTestWarnings {
1201 public:
1202 ScopedDisableBazelTestWarnings() {
1203 #ifdef _WIN32
1204 char file[MAX_PATH];
1205 if (GetEnvironmentVariableA(kVarName, file, sizeof(file)) < sizeof(file)) {
1206 warnings_output_file_ = file;
1207 SetEnvironmentVariableA(kVarName, nullptr);
1208 }
1209 #else
1210 const char *file = getenv(kVarName);
1211 if (file != nullptr) {
1212 warnings_output_file_ = file;
1213 unsetenv(kVarName);
1214 }
1215 #endif
1216 }
1217
1218 ~ScopedDisableBazelTestWarnings() {
1219 if (!warnings_output_file_.empty()) {
1220 #ifdef _WIN32
1221 SetEnvironmentVariableA(kVarName, warnings_output_file_.c_str());
1222 #else
1223 setenv(kVarName, warnings_output_file_.c_str(), 0);
1224 #endif
1225 }
1226 }
1227
1228 private:
1229 static const char kVarName[];
1230 std::string warnings_output_file_;
1231 };
1232 const char ScopedDisableBazelTestWarnings::kVarName[] =
1233 "TEST_WARNINGS_OUTPUT_FILE";
1234
1235 #ifdef ABSL_HAVE_THREAD_SANITIZER
1236 // This test intentionally creates deadlocks to test the deadlock detector.
1237 TEST(Mutex, DISABLED_DeadlockDetectorBazelWarning) {
1238 #else
1239 TEST(Mutex, DeadlockDetectorBazelWarning) {
1240 #endif
1241 absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kReport);
1242
1243 // Cause deadlock detection to detect something, if it's
1244 // compiled in and enabled. But turn off the bazel warning.
1245 ScopedDisableBazelTestWarnings disable_bazel_test_warnings;
1246
1247 absl::Mutex mu0;
1248 absl::Mutex mu1;
1249 bool got_mu0 = mu0.TryLock();
1250 mu1.Lock(); // acquire mu1 while holding mu0
1251 if (got_mu0) {
1252 mu0.Unlock();
1253 }
1254 if (mu0.TryLock()) { // try lock shouldn't cause deadlock detector to fire
1255 mu0.Unlock();
1256 }
1257 mu0.Lock(); // acquire mu0 while holding mu1; should get one deadlock
1258 // report here
1259 mu0.Unlock();
1260 mu1.Unlock();
1261
1262 absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
1263 }
1264
1265 TEST(Mutex, DeadlockDetectorLongCycle) {
1266 absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kReport);
1267
1268 // This test generates a warning if it passes, and crashes otherwise.
1269 // Cause bazel to ignore the warning.
1270 ScopedDisableBazelTestWarnings disable_bazel_test_warnings;
1271
1272 // Check that we survive a deadlock with a lock cycle.
1273 std::vector<absl::Mutex> mutex(100);
1274 for (size_t i = 0; i != mutex.size(); i++) {
1275 mutex[i].Lock();
1276 mutex[(i + 1) % mutex.size()].Lock();
1277 mutex[i].Unlock();
1278 mutex[(i + 1) % mutex.size()].Unlock();
1279 }
1280
1281 absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
1282 }
1283
1284 // This test is tagged with NO_THREAD_SAFETY_ANALYSIS because the
1285 // annotation-based static thread-safety analysis is not currently
1286 // predicate-aware and cannot tell if the two for-loops that acquire and
1287 // release the locks have the same predicates.
1288 TEST(Mutex, DeadlockDetectorStressTest) ABSL_NO_THREAD_SAFETY_ANALYSIS {
1289 // Stress test: Here we create a large number of locks and use all of them.
1290 // If a deadlock detector keeps a full graph of lock acquisition order,
1291 // it will likely be too slow for this test to pass.
1292 const int n_locks = 1 << 17;
1293 auto array_of_locks = absl::make_unique<absl::Mutex[]>(n_locks);
1294 for (int i = 0; i < n_locks; i++) {
1295 int end = std::min(n_locks, i + 5);
1296 // acquire and then release locks i, i+1, ..., i+4
1297 for (int j = i; j < end; j++) {
1298 array_of_locks[j].Lock();
1299 }
1300 for (int j = i; j < end; j++) {
1301 array_of_locks[j].Unlock();
1302 }
1303 }
1304 }
1305
1306 #ifdef ABSL_HAVE_THREAD_SANITIZER
1307 // TSAN reports errors when locked Mutexes are destroyed.
1308 TEST(Mutex, DISABLED_DeadlockIdBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
1309 #else
1310 TEST(Mutex, DeadlockIdBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
1311 #endif
1312 // Test a scenario where a cached deadlock graph node id in the
1313 // list of held locks is not invalidated when the corresponding
1314 // mutex is deleted.
1315 absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
1316 // Mutex that will be destroyed while being held
1317 absl::Mutex *a = new absl::Mutex;
1318 // Other mutexes needed by test
1319 absl::Mutex b, c;
1320
1321 // Hold mutex.
1322 a->Lock();
1323
1324 // Force deadlock id assignment by acquiring another lock.
1325 b.Lock();
1326 b.Unlock();
1327
1328 // Delete the mutex. The Mutex destructor tries to remove held locks,
1329 // but the attempt isn't foolproof. It can fail if:
1330 // (a) Deadlock detection is currently disabled.
1331 // (b) The destruction is from another thread.
1332 // We exploit (a) by temporarily disabling deadlock detection.
1333 absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kIgnore);
1334 delete a;
1335 absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
1336
1337 // Now acquire another lock which will force a deadlock id assignment.
1338 // We should end up getting assigned the same deadlock id that was
1339 // freed up when "a" was deleted, which will cause a spurious deadlock
1340 // report if the held lock entry for "a" was not invalidated.
1341 c.Lock();
1342 c.Unlock();
1343 }
1344
1345 // --------------------------------------------------------
1346 // Test for timeouts/deadlines on condition waits that are specified using
1347 // absl::Duration and absl::Time. For each waiting function we test with
1348 // a timeout/deadline that has already expired/passed, one that is infinite
1349 // and so never expires/passes, and one that will expire/pass in the near
1350 // future.
1351
1352 static absl::Duration TimeoutTestAllowedSchedulingDelay() {
1353 // Note: we use a function here because Microsoft Visual Studio fails to
1354 // properly initialize constexpr static absl::Duration variables.
1355 return absl::Milliseconds(150);
1356 }
1357
1358 // Returns true if `actual_delay` is close enough to `expected_delay` to pass
1359 // the timeouts/deadlines test. Otherwise, logs warnings and returns false.
1360 ABSL_MUST_USE_RESULT
1361 static bool DelayIsWithinBounds(absl::Duration expected_delay,
1362 absl::Duration actual_delay) {
1363 bool pass = true;
1364 // Do not allow the observed delay to be less than expected. This may occur
1365 // in practice due to clock skew or when the synchronization primitives use a
1366 // different clock than absl::Now(), but these cases should be handled by the
1367 // the retry mechanism in each TimeoutTest.
1368 if (actual_delay < expected_delay) {
1369 LOG(WARNING) << "Actual delay " << actual_delay
1370 << " was too short, expected " << expected_delay
1371 << " (difference " << actual_delay - expected_delay << ")";
1372 pass = false;
1373 }
1374 // If the expected delay is <= zero then allow a small error tolerance, since
1375 // we do not expect context switches to occur during test execution.
1376 // Otherwise, thread scheduling delays may be substantial in rare cases, so
1377 // tolerate up to kTimeoutTestAllowedSchedulingDelay of error.
1378 absl::Duration tolerance = expected_delay <= absl::ZeroDuration()
1379 ? absl::Milliseconds(10)
1380 : TimeoutTestAllowedSchedulingDelay();
1381 if (actual_delay > expected_delay + tolerance) {
1382 LOG(WARNING) << "Actual delay " << actual_delay
1383 << " was too long, expected " << expected_delay
1384 << " (difference " << actual_delay - expected_delay << ")";
1385 pass = false;
1386 }
1387 return pass;
1388 }
1389
1390 // Parameters for TimeoutTest, below.
1391 struct TimeoutTestParam {
1392 // The file and line number (used for logging purposes only).
1393 const char *from_file;
1394 int from_line;
1395
1396 // Should the absolute deadline API based on absl::Time be tested? If false,
1397 // the relative deadline API based on absl::Duration is tested.
1398 bool use_absolute_deadline;
1399
1400 // The deadline/timeout used when calling the API being tested
1401 // (e.g. Mutex::LockWhenWithDeadline).
1402 absl::Duration wait_timeout;
1403
1404 // The delay before the condition will be set true by the test code. If zero
1405 // or negative, the condition is set true immediately (before calling the API
1406 // being tested). Otherwise, if infinite, the condition is never set true.
1407 // Otherwise a closure is scheduled for the future that sets the condition
1408 // true.
1409 absl::Duration satisfy_condition_delay;
1410
1411 // The expected result of the condition after the call to the API being
1412 // tested. Generally `true` means the condition was true when the API returns,
1413 // `false` indicates an expected timeout.
1414 bool expected_result;
1415
1416 // The expected delay before the API under test returns. This is inherently
1417 // flaky, so some slop is allowed (see `DelayIsWithinBounds` above), and the
1418 // test keeps trying indefinitely until this constraint passes.
1419 absl::Duration expected_delay;
1420 };
1421
1422 // Print a `TimeoutTestParam` to a debug log.
1423 std::ostream &operator<<(std::ostream &os, const TimeoutTestParam ¶m) {
1424 return os << "from: " << param.from_file << ":" << param.from_line
1425 << " use_absolute_deadline: "
1426 << (param.use_absolute_deadline ? "true" : "false")
1427 << " wait_timeout: " << param.wait_timeout
1428 << " satisfy_condition_delay: " << param.satisfy_condition_delay
1429 << " expected_result: "
1430 << (param.expected_result ? "true" : "false")
1431 << " expected_delay: " << param.expected_delay;
1432 }
1433
1434 // Like `thread::Executor::ScheduleAt` except:
1435 // a) Delays zero or negative are executed immediately in the current thread.
1436 // b) Infinite delays are never scheduled.
1437 // c) Calls this test's `ScheduleAt` helper instead of using `pool` directly.
1438 static void RunAfterDelay(absl::Duration delay,
1439 absl::synchronization_internal::ThreadPool *pool,
1440 const std::function<void()> &callback) {
1441 if (delay <= absl::ZeroDuration()) {
1442 callback(); // immediate
1443 } else if (delay != absl::InfiniteDuration()) {
1444 ScheduleAfter(pool, delay, callback);
1445 }
1446 }
1447
1448 class TimeoutTest : public ::testing::Test,
1449 public ::testing::WithParamInterface<TimeoutTestParam> {};
1450
1451 std::vector<TimeoutTestParam> MakeTimeoutTestParamValues() {
1452 // The `finite` delay is a finite, relatively short, delay. We make it larger
1453 // than our allowed scheduling delay (slop factor) to avoid confusion when
1454 // diagnosing test failures. The other constants here have clear meanings.
1455 const absl::Duration finite = 3 * TimeoutTestAllowedSchedulingDelay();
1456 const absl::Duration never = absl::InfiniteDuration();
1457 const absl::Duration negative = -absl::InfiniteDuration();
1458 const absl::Duration immediate = absl::ZeroDuration();
1459
1460 // Every test case is run twice; once using the absolute deadline API and once
1461 // using the relative timeout API.
1462 std::vector<TimeoutTestParam> values;
1463 for (bool use_absolute_deadline : {false, true}) {
1464 // Tests with a negative timeout (deadline in the past), which should
1465 // immediately return current state of the condition.
1466
1467 // The condition is already true:
1468 values.push_back(TimeoutTestParam{
1469 __FILE__, __LINE__, use_absolute_deadline,
1470 negative, // wait_timeout
1471 immediate, // satisfy_condition_delay
1472 true, // expected_result
1473 immediate, // expected_delay
1474 });
1475
1476 // The condition becomes true, but the timeout has already expired:
1477 values.push_back(TimeoutTestParam{
1478 __FILE__, __LINE__, use_absolute_deadline,
1479 negative, // wait_timeout
1480 finite, // satisfy_condition_delay
1481 false, // expected_result
1482 immediate // expected_delay
1483 });
1484
1485 // The condition never becomes true:
1486 values.push_back(TimeoutTestParam{
1487 __FILE__, __LINE__, use_absolute_deadline,
1488 negative, // wait_timeout
1489 never, // satisfy_condition_delay
1490 false, // expected_result
1491 immediate // expected_delay
1492 });
1493
1494 // Tests with an infinite timeout (deadline in the infinite future), which
1495 // should only return when the condition becomes true.
1496
1497 // The condition is already true:
1498 values.push_back(TimeoutTestParam{
1499 __FILE__, __LINE__, use_absolute_deadline,
1500 never, // wait_timeout
1501 immediate, // satisfy_condition_delay
1502 true, // expected_result
1503 immediate // expected_delay
1504 });
1505
1506 // The condition becomes true before the (infinite) expiry:
1507 values.push_back(TimeoutTestParam{
1508 __FILE__, __LINE__, use_absolute_deadline,
1509 never, // wait_timeout
1510 finite, // satisfy_condition_delay
1511 true, // expected_result
1512 finite, // expected_delay
1513 });
1514
1515 // Tests with a (small) finite timeout (deadline soon), with the condition
1516 // becoming true both before and after its expiry.
1517
1518 // The condition is already true:
1519 values.push_back(TimeoutTestParam{
1520 __FILE__, __LINE__, use_absolute_deadline,
1521 never, // wait_timeout
1522 immediate, // satisfy_condition_delay
1523 true, // expected_result
1524 immediate // expected_delay
1525 });
1526
1527 // The condition becomes true before the expiry:
1528 values.push_back(TimeoutTestParam{
1529 __FILE__, __LINE__, use_absolute_deadline,
1530 finite * 2, // wait_timeout
1531 finite, // satisfy_condition_delay
1532 true, // expected_result
1533 finite // expected_delay
1534 });
1535
1536 // The condition becomes true, but the timeout has already expired:
1537 values.push_back(TimeoutTestParam{
1538 __FILE__, __LINE__, use_absolute_deadline,
1539 finite, // wait_timeout
1540 finite * 2, // satisfy_condition_delay
1541 false, // expected_result
1542 finite // expected_delay
1543 });
1544
1545 // The condition never becomes true:
1546 values.push_back(TimeoutTestParam{
1547 __FILE__, __LINE__, use_absolute_deadline,
1548 finite, // wait_timeout
1549 never, // satisfy_condition_delay
1550 false, // expected_result
1551 finite // expected_delay
1552 });
1553 }
1554 return values;
1555 }
1556
1557 // Instantiate `TimeoutTest` with `MakeTimeoutTestParamValues()`.
1558 INSTANTIATE_TEST_SUITE_P(All, TimeoutTest,
1559 testing::ValuesIn(MakeTimeoutTestParamValues()));
1560
1561 TEST_P(TimeoutTest, Await) {
1562 const TimeoutTestParam params = GetParam();
1563 LOG(INFO) << "Params: " << params;
1564
1565 // Because this test asserts bounds on scheduling delays it is flaky. To
1566 // compensate it loops forever until it passes. Failures express as test
1567 // timeouts, in which case the test log can be used to diagnose the issue.
1568 for (int attempt = 1;; ++attempt) {
1569 LOG(INFO) << "Attempt " << attempt;
1570
1571 absl::Mutex mu;
1572 bool value = false; // condition value (under mu)
1573
1574 std::unique_ptr<absl::synchronization_internal::ThreadPool> pool =
1575 CreateDefaultPool();
1576 RunAfterDelay(params.satisfy_condition_delay, pool.get(), [&] {
1577 absl::MutexLock l(&mu);
1578 value = true;
1579 });
1580
1581 absl::MutexLock lock(&mu);
1582 absl::Time start_time = absl::Now();
1583 absl::Condition cond(&value);
1584 bool result =
1585 params.use_absolute_deadline
1586 ? mu.AwaitWithDeadline(cond, start_time + params.wait_timeout)
1587 : mu.AwaitWithTimeout(cond, params.wait_timeout);
1588 if (DelayIsWithinBounds(params.expected_delay, absl::Now() - start_time)) {
1589 EXPECT_EQ(params.expected_result, result);
1590 break;
1591 }
1592 }
1593 }
1594
1595 TEST_P(TimeoutTest, LockWhen) {
1596 const TimeoutTestParam params = GetParam();
1597 LOG(INFO) << "Params: " << params;
1598
1599 // Because this test asserts bounds on scheduling delays it is flaky. To
1600 // compensate it loops forever until it passes. Failures express as test
1601 // timeouts, in which case the test log can be used to diagnose the issue.
1602 for (int attempt = 1;; ++attempt) {
1603 LOG(INFO) << "Attempt " << attempt;
1604
1605 absl::Mutex mu;
1606 bool value = false; // condition value (under mu)
1607
1608 std::unique_ptr<absl::synchronization_internal::ThreadPool> pool =
1609 CreateDefaultPool();
1610 RunAfterDelay(params.satisfy_condition_delay, pool.get(), [&] {
1611 absl::MutexLock l(&mu);
1612 value = true;
1613 });
1614
1615 absl::Time start_time = absl::Now();
1616 absl::Condition cond(&value);
1617 bool result =
1618 params.use_absolute_deadline
1619 ? mu.LockWhenWithDeadline(cond, start_time + params.wait_timeout)
1620 : mu.LockWhenWithTimeout(cond, params.wait_timeout);
1621 mu.Unlock();
1622
1623 if (DelayIsWithinBounds(params.expected_delay, absl::Now() - start_time)) {
1624 EXPECT_EQ(params.expected_result, result);
1625 break;
1626 }
1627 }
1628 }
1629
1630 TEST_P(TimeoutTest, ReaderLockWhen) {
1631 const TimeoutTestParam params = GetParam();
1632 LOG(INFO) << "Params: " << params;
1633
1634 // Because this test asserts bounds on scheduling delays it is flaky. To
1635 // compensate it loops forever until it passes. Failures express as test
1636 // timeouts, in which case the test log can be used to diagnose the issue.
1637 for (int attempt = 0;; ++attempt) {
1638 LOG(INFO) << "Attempt " << attempt;
1639
1640 absl::Mutex mu;
1641 bool value = false; // condition value (under mu)
1642
1643 std::unique_ptr<absl::synchronization_internal::ThreadPool> pool =
1644 CreateDefaultPool();
1645 RunAfterDelay(params.satisfy_condition_delay, pool.get(), [&] {
1646 absl::MutexLock l(&mu);
1647 value = true;
1648 });
1649
1650 absl::Time start_time = absl::Now();
1651 bool result =
1652 params.use_absolute_deadline
1653 ? mu.ReaderLockWhenWithDeadline(absl::Condition(&value),
1654 start_time + params.wait_timeout)
1655 : mu.ReaderLockWhenWithTimeout(absl::Condition(&value),
1656 params.wait_timeout);
1657 mu.ReaderUnlock();
1658
1659 if (DelayIsWithinBounds(params.expected_delay, absl::Now() - start_time)) {
1660 EXPECT_EQ(params.expected_result, result);
1661 break;
1662 }
1663 }
1664 }
1665
1666 TEST_P(TimeoutTest, Wait) {
1667 const TimeoutTestParam params = GetParam();
1668 LOG(INFO) << "Params: " << params;
1669
1670 // Because this test asserts bounds on scheduling delays it is flaky. To
1671 // compensate it loops forever until it passes. Failures express as test
1672 // timeouts, in which case the test log can be used to diagnose the issue.
1673 for (int attempt = 0;; ++attempt) {
1674 LOG(INFO) << "Attempt " << attempt;
1675
1676 absl::Mutex mu;
1677 bool value = false; // condition value (under mu)
1678 absl::CondVar cv; // signals a change of `value`
1679
1680 std::unique_ptr<absl::synchronization_internal::ThreadPool> pool =
1681 CreateDefaultPool();
1682 RunAfterDelay(params.satisfy_condition_delay, pool.get(), [&] {
1683 absl::MutexLock l(&mu);
1684 value = true;
1685 cv.Signal();
1686 });
1687
1688 absl::MutexLock lock(&mu);
1689 absl::Time start_time = absl::Now();
1690 absl::Duration timeout = params.wait_timeout;
1691 absl::Time deadline = start_time + timeout;
1692 while (!value) {
1693 if (params.use_absolute_deadline ? cv.WaitWithDeadline(&mu, deadline)
1694 : cv.WaitWithTimeout(&mu, timeout)) {
1695 break; // deadline/timeout exceeded
1696 }
1697 timeout = deadline - absl::Now(); // recompute
1698 }
1699 bool result = value; // note: `mu` is still held
1700
1701 if (DelayIsWithinBounds(params.expected_delay, absl::Now() - start_time)) {
1702 EXPECT_EQ(params.expected_result, result);
1703 break;
1704 }
1705 }
1706 }
1707
1708 TEST(Mutex, Logging) {
1709 // Allow user to look at logging output
1710 absl::Mutex logged_mutex;
1711 logged_mutex.EnableDebugLog("fido_mutex");
1712 absl::CondVar logged_cv;
1713 logged_cv.EnableDebugLog("rover_cv");
1714 logged_mutex.Lock();
1715 logged_cv.WaitWithTimeout(&logged_mutex, absl::Milliseconds(20));
1716 logged_mutex.Unlock();
1717 logged_mutex.ReaderLock();
1718 logged_mutex.ReaderUnlock();
1719 logged_mutex.Lock();
1720 logged_mutex.Unlock();
1721 logged_cv.Signal();
1722 logged_cv.SignalAll();
1723 }
1724
1725 TEST(Mutex, LoggingAddressReuse) {
1726 // Repeatedly re-create a Mutex with debug logging at the same address.
1727 ScopedInvariantDebugging scoped_debugging;
1728 alignas(absl::Mutex) char storage[sizeof(absl::Mutex)];
1729 auto invariant =
1730 +[](void *alive) { EXPECT_TRUE(*static_cast<bool *>(alive)); };
1731 constexpr size_t kIters = 10;
1732 bool alive[kIters] = {};
1733 for (size_t i = 0; i < kIters; ++i) {
1734 absl::Mutex *mu = new (storage) absl::Mutex;
1735 alive[i] = true;
1736 mu->EnableDebugLog("Mutex");
1737 mu->EnableInvariantDebugging(invariant, &alive[i]);
1738 mu->Lock();
1739 mu->Unlock();
1740 mu->~Mutex();
1741 alive[i] = false;
1742 }
1743 }
1744
1745 TEST(Mutex, LoggingBankrupcy) {
1746 // Test the case with too many live Mutexes with debug logging.
1747 ScopedInvariantDebugging scoped_debugging;
1748 std::vector<absl::Mutex> mus(1 << 20);
1749 for (auto &mu : mus) {
1750 mu.EnableDebugLog("Mutex");
1751 }
1752 }
1753
1754 TEST(Mutex, SynchEventRace) {
1755 // Regression test for a false TSan race report in
1756 // EnableInvariantDebugging/EnableDebugLog related to SynchEvent reuse.
1757 ScopedInvariantDebugging scoped_debugging;
1758 std::vector<std::thread> threads;
1759 for (size_t i = 0; i < 5; i++) {
1760 threads.emplace_back([&] {
1761 for (size_t j = 0; j < (1 << 17); j++) {
1762 {
1763 absl::Mutex mu;
1764 mu.EnableInvariantDebugging([](void *) {}, nullptr);
1765 mu.Lock();
1766 mu.Unlock();
1767 }
1768 {
1769 absl::Mutex mu;
1770 mu.EnableDebugLog("Mutex");
1771 }
1772 }
1773 });
1774 }
1775 for (auto &thread : threads) {
1776 thread.join();
1777 }
1778 }
1779
1780 // --------------------------------------------------------
1781
1782 // Generate the vector of thread counts for tests parameterized on thread count.
1783 static std::vector<int> AllThreadCountValues() {
1784 if (kExtendedTest) {
1785 return {2, 4, 8, 10, 16, 20, 24, 30, 32};
1786 }
1787 return {2, 4, 10};
1788 }
1789
1790 // A test fixture parameterized by thread count.
1791 class MutexVariableThreadCountTest : public ::testing::TestWithParam<int> {};
1792
1793 // Instantiate the above with AllThreadCountOptions().
1794 INSTANTIATE_TEST_SUITE_P(ThreadCounts, MutexVariableThreadCountTest,
1795 ::testing::ValuesIn(AllThreadCountValues()),
1796 ::testing::PrintToStringParamName());
1797
1798 // Reduces iterations by some factor for slow platforms
1799 // (determined empirically).
1800 static int ScaleIterations(int x) {
1801 // ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE is set in the implementation
1802 // of Mutex that uses either std::mutex or pthread_mutex_t. Use
1803 // these as keys to determine the slow implementation.
1804 #if defined(ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE)
1805 return x / 10;
1806 #else
1807 return x;
1808 #endif
1809 }
1810
1811 TEST_P(MutexVariableThreadCountTest, Mutex) {
1812 int threads = GetParam();
1813 int iterations = ScaleIterations(10000000) / threads;
1814 int operations = threads * iterations;
1815 EXPECT_EQ(RunTest(&TestMu, threads, iterations, operations), operations);
1816 #if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
1817 iterations = std::min(iterations, 10);
1818 operations = threads * iterations;
1819 EXPECT_EQ(RunTestWithInvariantDebugging(&TestMu, threads, iterations,
1820 operations, CheckSumG0G1),
1821 operations);
1822 #endif
1823 }
1824
1825 TEST_P(MutexVariableThreadCountTest, Try) {
1826 int threads = GetParam();
1827 int iterations = 1000000 / threads;
1828 int operations = iterations * threads;
1829 EXPECT_EQ(RunTest(&TestTry, threads, iterations, operations), operations);
1830 #if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
1831 iterations = std::min(iterations, 10);
1832 operations = threads * iterations;
1833 EXPECT_EQ(RunTestWithInvariantDebugging(&TestTry, threads, iterations,
1834 operations, CheckSumG0G1),
1835 operations);
1836 #endif
1837 }
1838
1839 TEST_P(MutexVariableThreadCountTest, R20ms) {
1840 int threads = GetParam();
1841 int iterations = 100;
1842 int operations = iterations * threads;
1843 EXPECT_EQ(RunTest(&TestR20ms, threads, iterations, operations), 0);
1844 }
1845
1846 TEST_P(MutexVariableThreadCountTest, RW) {
1847 int threads = GetParam();
1848 int iterations = ScaleIterations(20000000) / threads;
1849 int operations = iterations * threads;
1850 EXPECT_EQ(RunTest(&TestRW, threads, iterations, operations), operations / 2);
1851 #if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
1852 iterations = std::min(iterations, 10);
1853 operations = threads * iterations;
1854 EXPECT_EQ(RunTestWithInvariantDebugging(&TestRW, threads, iterations,
1855 operations, CheckSumG0G1),
1856 operations / 2);
1857 #endif
1858 }
1859
1860 TEST_P(MutexVariableThreadCountTest, Await) {
1861 int threads = GetParam();
1862 int iterations = ScaleIterations(500000);
1863 int operations = iterations;
1864 EXPECT_EQ(RunTest(&TestAwait, threads, iterations, operations), operations);
1865 }
1866
1867 TEST_P(MutexVariableThreadCountTest, SignalAll) {
1868 int threads = GetParam();
1869 int iterations = 200000 / threads;
1870 int operations = iterations;
1871 EXPECT_EQ(RunTest(&TestSignalAll, threads, iterations, operations),
1872 operations);
1873 }
1874
1875 TEST(Mutex, Signal) {
1876 int threads = 2; // TestSignal must use two threads
1877 int iterations = 200000;
1878 int operations = iterations;
1879 EXPECT_EQ(RunTest(&TestSignal, threads, iterations, operations), operations);
1880 }
1881
1882 TEST(Mutex, Timed) {
1883 int threads = 10; // Use a fixed thread count of 10
1884 int iterations = 1000;
1885 int operations = iterations;
1886 EXPECT_EQ(RunTest(&TestCVTimeout, threads, iterations, operations),
1887 operations);
1888 }
1889
1890 TEST(Mutex, CVTime) {
1891 int threads = 10; // Use a fixed thread count of 10
1892 int iterations = 1;
1893 EXPECT_EQ(RunTest(&TestCVTime, threads, iterations, 1), threads * iterations);
1894 }
1895
1896 TEST(Mutex, MuTime) {
1897 int threads = 10; // Use a fixed thread count of 10
1898 int iterations = 1;
1899 EXPECT_EQ(RunTest(&TestMuTime, threads, iterations, 1), threads * iterations);
1900 }
1901
1902 TEST(Mutex, SignalExitedThread) {
1903 // The test may expose a race when Mutex::Unlock signals a thread
1904 // that has already exited.
1905 #if defined(__wasm__) || defined(__asmjs__)
1906 constexpr int kThreads = 1; // OOMs under WASM
1907 #else
1908 constexpr int kThreads = 100;
1909 #endif
1910 std::vector<std::thread> top;
1911 for (unsigned i = 0; i < 2 * std::thread::hardware_concurrency(); i++) {
1912 top.emplace_back([&]() {
1913 for (int i = 0; i < kThreads; i++) {
1914 absl::Mutex mu;
1915 std::thread t([&]() {
1916 mu.Lock();
1917 mu.Unlock();
1918 });
1919 mu.Lock();
1920 mu.Unlock();
1921 t.join();
1922 }
1923 });
1924 }
1925 for (auto &th : top) th.join();
1926 }
1927
1928 TEST(Mutex, WriterPriority) {
1929 absl::Mutex mu;
1930 bool wrote = false;
1931 std::atomic<bool> saw_wrote{false};
1932 auto readfunc = [&]() {
1933 for (size_t i = 0; i < 10; ++i) {
1934 absl::ReaderMutexLock lock(&mu);
1935 if (wrote) {
1936 saw_wrote = true;
1937 break;
1938 }
1939 absl::SleepFor(absl::Seconds(1));
1940 }
1941 };
1942 std::thread t1(readfunc);
1943 absl::SleepFor(absl::Milliseconds(500));
1944 std::thread t2(readfunc);
1945 // Note: this test guards against a bug that was related to an uninit
1946 // PerThreadSynch::priority, so the writer intentionally runs on a new thread.
1947 std::thread t3([&]() {
1948 // The writer should be able squeeze between the two alternating readers.
1949 absl::MutexLock lock(&mu);
1950 wrote = true;
1951 });
1952 t1.join();
1953 t2.join();
1954 t3.join();
1955 EXPECT_TRUE(saw_wrote.load());
1956 }
1957
1958 #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
1959 TEST(Mutex, CondVarPriority) {
1960 // A regression test for a bug in condition variable wait morphing,
1961 // which resulted in the waiting thread getting priority of the waking thread.
1962 int err = 0;
1963 sched_param param;
1964 param.sched_priority = 7;
1965 std::thread test([&]() {
1966 err = pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m);
1967 });
1968 test.join();
1969 if (err) {
1970 // Setting priority usually requires special privileges.
1971 GTEST_SKIP() << "failed to set priority: " << strerror(err);
1972 }
1973 absl::Mutex mu;
1974 absl::CondVar cv;
1975 bool locked = false;
1976 bool notified = false;
1977 bool waiting = false;
1978 bool morph = false;
1979 std::thread th([&]() {
1980 EXPECT_EQ(0, pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m));
1981 mu.Lock();
1982 locked = true;
1983 mu.Await(absl::Condition(¬ified));
1984 mu.Unlock();
1985 EXPECT_EQ(absl::synchronization_internal::GetOrCreateCurrentThreadIdentity()
1986 ->per_thread_synch.priority,
1987 param.sched_priority);
1988 mu.Lock();
1989 mu.Await(absl::Condition(&waiting));
1990 morph = true;
1991 absl::SleepFor(absl::Seconds(1));
1992 cv.Signal();
1993 mu.Unlock();
1994 });
1995 mu.Lock();
1996 mu.Await(absl::Condition(&locked));
1997 notified = true;
1998 mu.Unlock();
1999 mu.Lock();
2000 waiting = true;
2001 while (!morph) {
2002 cv.Wait(&mu);
2003 }
2004 mu.Unlock();
2005 th.join();
2006 EXPECT_NE(absl::synchronization_internal::GetOrCreateCurrentThreadIdentity()
2007 ->per_thread_synch.priority,
2008 param.sched_priority);
2009 }
2010 #endif
2011
2012 TEST(Mutex, LockWhenWithTimeoutResult) {
2013 // Check various corner cases for Await/LockWhen return value
2014 // with always true/always false conditions.
2015 absl::Mutex mu;
2016 const bool kAlwaysTrue = true, kAlwaysFalse = false;
2017 const absl::Condition kTrueCond(&kAlwaysTrue), kFalseCond(&kAlwaysFalse);
2018 EXPECT_TRUE(mu.LockWhenWithTimeout(kTrueCond, absl::Milliseconds(1)));
2019 mu.Unlock();
2020 EXPECT_FALSE(mu.LockWhenWithTimeout(kFalseCond, absl::Milliseconds(1)));
2021 EXPECT_TRUE(mu.AwaitWithTimeout(kTrueCond, absl::Milliseconds(1)));
2022 EXPECT_FALSE(mu.AwaitWithTimeout(kFalseCond, absl::Milliseconds(1)));
2023 std::thread th1([&]() {
2024 EXPECT_TRUE(mu.LockWhenWithTimeout(kTrueCond, absl::Milliseconds(1)));
2025 mu.Unlock();
2026 });
2027 std::thread th2([&]() {
2028 EXPECT_FALSE(mu.LockWhenWithTimeout(kFalseCond, absl::Milliseconds(1)));
2029 mu.Unlock();
2030 });
2031 absl::SleepFor(absl::Milliseconds(100));
2032 mu.Unlock();
2033 th1.join();
2034 th2.join();
2035 }
2036
2037 } // namespace
2038