xref: /aosp_15_r20/bionic/tests/pthread_test.cpp (revision 8d67ca893c1523eb926b9080dbe4e2ffd2a27ba1)
1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <gtest/gtest.h>
18 
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <limits.h>
22 #include <malloc.h>
23 #include <pthread.h>
24 #include <signal.h>
25 #include <stdio.h>
26 #include <sys/cdefs.h>
27 #include <sys/mman.h>
28 #include <sys/param.h>
29 #include <sys/prctl.h>
30 #include <sys/resource.h>
31 #include <sys/syscall.h>
32 #include <time.h>
33 #include <unistd.h>
34 #include <unwind.h>
35 
36 #include <atomic>
37 #include <future>
38 #include <vector>
39 
40 #include <android-base/macros.h>
41 #include <android-base/parseint.h>
42 #include <android-base/scopeguard.h>
43 #include <android-base/silent_death_test.h>
44 #include <android-base/strings.h>
45 #include <android-base/test_utils.h>
46 
47 #include "private/bionic_constants.h"
48 #include "private/bionic_time_conversions.h"
49 #include "SignalUtils.h"
50 #include "utils.h"
51 
52 using pthread_DeathTest = SilentDeathTest;
53 
TEST(pthread,pthread_key_create)54 TEST(pthread, pthread_key_create) {
55   pthread_key_t key;
56   ASSERT_EQ(0, pthread_key_create(&key, nullptr));
57   ASSERT_EQ(0, pthread_key_delete(key));
58   // Can't delete a key that's already been deleted.
59   ASSERT_EQ(EINVAL, pthread_key_delete(key));
60 }
61 
TEST(pthread,pthread_keys_max)62 TEST(pthread, pthread_keys_max) {
63   // POSIX says PTHREAD_KEYS_MAX should be at least _POSIX_THREAD_KEYS_MAX.
64   ASSERT_GE(PTHREAD_KEYS_MAX, _POSIX_THREAD_KEYS_MAX);
65 }
66 
TEST(pthread,sysconf_SC_THREAD_KEYS_MAX_eq_PTHREAD_KEYS_MAX)67 TEST(pthread, sysconf_SC_THREAD_KEYS_MAX_eq_PTHREAD_KEYS_MAX) {
68   int sysconf_max = sysconf(_SC_THREAD_KEYS_MAX);
69   ASSERT_EQ(sysconf_max, PTHREAD_KEYS_MAX);
70 }
71 
TEST(pthread,pthread_key_many_distinct)72 TEST(pthread, pthread_key_many_distinct) {
73   // As gtest uses pthread keys, we can't allocate exactly PTHREAD_KEYS_MAX
74   // pthread keys, but We should be able to allocate at least this many keys.
75   int nkeys = PTHREAD_KEYS_MAX / 2;
76   std::vector<pthread_key_t> keys;
77 
78   auto scope_guard = android::base::make_scope_guard([&keys] {
79     for (const auto& key : keys) {
80       EXPECT_EQ(0, pthread_key_delete(key));
81     }
82   });
83 
84   for (int i = 0; i < nkeys; ++i) {
85     pthread_key_t key;
86     // If this fails, it's likely that LIBC_PTHREAD_KEY_RESERVED_COUNT is wrong.
87     ASSERT_EQ(0, pthread_key_create(&key, nullptr)) << i << " of " << nkeys;
88     keys.push_back(key);
89     ASSERT_EQ(0, pthread_setspecific(key, reinterpret_cast<void*>(i)));
90   }
91 
92   for (int i = keys.size() - 1; i >= 0; --i) {
93     ASSERT_EQ(reinterpret_cast<void*>(i), pthread_getspecific(keys.back()));
94     pthread_key_t key = keys.back();
95     keys.pop_back();
96     ASSERT_EQ(0, pthread_key_delete(key));
97   }
98 }
99 
TEST(pthread,pthread_key_not_exceed_PTHREAD_KEYS_MAX)100 TEST(pthread, pthread_key_not_exceed_PTHREAD_KEYS_MAX) {
101   std::vector<pthread_key_t> keys;
102   int rv = 0;
103 
104   // Pthread keys are used by gtest, so PTHREAD_KEYS_MAX should
105   // be more than we are allowed to allocate now.
106   for (int i = 0; i < PTHREAD_KEYS_MAX; i++) {
107     pthread_key_t key;
108     rv = pthread_key_create(&key, nullptr);
109     if (rv == EAGAIN) {
110       break;
111     }
112     EXPECT_EQ(0, rv);
113     keys.push_back(key);
114   }
115 
116   // Don't leak keys.
117   for (const auto& key : keys) {
118     EXPECT_EQ(0, pthread_key_delete(key));
119   }
120   keys.clear();
121 
122   // We should have eventually reached the maximum number of keys and received
123   // EAGAIN.
124   ASSERT_EQ(EAGAIN, rv);
125 }
126 
TEST(pthread,pthread_key_delete)127 TEST(pthread, pthread_key_delete) {
128   void* expected = reinterpret_cast<void*>(1234);
129   pthread_key_t key;
130   ASSERT_EQ(0, pthread_key_create(&key, nullptr));
131   ASSERT_EQ(0, pthread_setspecific(key, expected));
132   ASSERT_EQ(expected, pthread_getspecific(key));
133   ASSERT_EQ(0, pthread_key_delete(key));
134   // After deletion, pthread_getspecific returns nullptr.
135   ASSERT_EQ(nullptr, pthread_getspecific(key));
136   // And you can't use pthread_setspecific with the deleted key.
137   ASSERT_EQ(EINVAL, pthread_setspecific(key, expected));
138 }
139 
TEST(pthread,pthread_key_fork)140 TEST(pthread, pthread_key_fork) {
141   void* expected = reinterpret_cast<void*>(1234);
142   pthread_key_t key;
143   ASSERT_EQ(0, pthread_key_create(&key, nullptr));
144   ASSERT_EQ(0, pthread_setspecific(key, expected));
145   ASSERT_EQ(expected, pthread_getspecific(key));
146 
147   pid_t pid = fork();
148   ASSERT_NE(-1, pid) << strerror(errno);
149 
150   if (pid == 0) {
151     // The surviving thread inherits all the forking thread's TLS values...
152     ASSERT_EQ(expected, pthread_getspecific(key));
153     _exit(99);
154   }
155 
156   AssertChildExited(pid, 99);
157 
158   ASSERT_EQ(expected, pthread_getspecific(key));
159   ASSERT_EQ(0, pthread_key_delete(key));
160 }
161 
DirtyKeyFn(void * key)162 static void* DirtyKeyFn(void* key) {
163   return pthread_getspecific(*reinterpret_cast<pthread_key_t*>(key));
164 }
165 
TEST(pthread,pthread_key_dirty)166 TEST(pthread, pthread_key_dirty) {
167   pthread_key_t key;
168   ASSERT_EQ(0, pthread_key_create(&key, nullptr));
169 
170   size_t stack_size = 640 * 1024;
171   void* stack = mmap(nullptr, stack_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
172   ASSERT_NE(MAP_FAILED, stack);
173   memset(stack, 0xff, stack_size);
174 
175   pthread_attr_t attr;
176   ASSERT_EQ(0, pthread_attr_init(&attr));
177   ASSERT_EQ(0, pthread_attr_setstack(&attr, stack, stack_size));
178 
179   pthread_t t;
180   ASSERT_EQ(0, pthread_create(&t, &attr, DirtyKeyFn, &key));
181 
182   void* result;
183   ASSERT_EQ(0, pthread_join(t, &result));
184   ASSERT_EQ(nullptr, result); // Not ~0!
185 
186   ASSERT_EQ(0, munmap(stack, stack_size));
187   ASSERT_EQ(0, pthread_key_delete(key));
188 }
189 
FnWithStackFrame(void *)190 static void* FnWithStackFrame(void*) {
191   int x;
192   *const_cast<volatile int*>(&x) = 1;
193   return nullptr;
194 }
195 
TEST(pthread,pthread_heap_allocated_stack)196 TEST(pthread, pthread_heap_allocated_stack) {
197   SKIP_WITH_HWASAN; // TODO(b/148982147): Re-enable when fixed.
198 
199   size_t stack_size = 640 * 1024;
200   std::unique_ptr<char[]> stack(new (std::align_val_t(getpagesize())) char[stack_size]);
201   memset(stack.get(), '\xff', stack_size);
202 
203   pthread_attr_t attr;
204   ASSERT_EQ(0, pthread_attr_init(&attr));
205   ASSERT_EQ(0, pthread_attr_setstack(&attr, stack.get(), stack_size));
206 
207   pthread_t t;
208   ASSERT_EQ(0, pthread_create(&t, &attr, FnWithStackFrame, nullptr));
209 
210   void* result;
211   ASSERT_EQ(0, pthread_join(t, &result));
212 }
213 
TEST(pthread,static_pthread_key_used_before_creation)214 TEST(pthread, static_pthread_key_used_before_creation) {
215 #if defined(__BIONIC__)
216   // See http://b/19625804. The bug is about a static/global pthread key being used before creation.
217   // So here tests if the static/global default value 0 can be detected as invalid key.
218   static pthread_key_t key;
219   ASSERT_EQ(nullptr, pthread_getspecific(key));
220   ASSERT_EQ(EINVAL, pthread_setspecific(key, nullptr));
221   ASSERT_EQ(EINVAL, pthread_key_delete(key));
222 #else
223   GTEST_SKIP() << "bionic-only test";
224 #endif
225 }
226 
IdFn(void * arg)227 static void* IdFn(void* arg) {
228   return arg;
229 }
230 
231 class SpinFunctionHelper {
232  public:
SpinFunctionHelper()233   SpinFunctionHelper() {
234     SpinFunctionHelper::spin_flag_ = true;
235   }
236 
~SpinFunctionHelper()237   ~SpinFunctionHelper() {
238     UnSpin();
239   }
240 
GetFunction()241   auto GetFunction() -> void* (*)(void*) {
242     return SpinFunctionHelper::SpinFn;
243   }
244 
UnSpin()245   void UnSpin() {
246     SpinFunctionHelper::spin_flag_ = false;
247   }
248 
249  private:
SpinFn(void *)250   static void* SpinFn(void*) {
251     while (spin_flag_) {}
252     return nullptr;
253   }
254   static std::atomic<bool> spin_flag_;
255 };
256 
257 // It doesn't matter if spin_flag_ is used in several tests,
258 // because it is always set to false after each test. Each thread
259 // loops on spin_flag_ can find it becomes false at some time.
260 std::atomic<bool> SpinFunctionHelper::spin_flag_;
261 
JoinFn(void * arg)262 static void* JoinFn(void* arg) {
263   return reinterpret_cast<void*>(pthread_join(reinterpret_cast<pthread_t>(arg), nullptr));
264 }
265 
AssertDetached(pthread_t t,bool is_detached)266 static void AssertDetached(pthread_t t, bool is_detached) {
267   pthread_attr_t attr;
268   ASSERT_EQ(0, pthread_getattr_np(t, &attr));
269   int detach_state;
270   ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &detach_state));
271   pthread_attr_destroy(&attr);
272   ASSERT_EQ(is_detached, (detach_state == PTHREAD_CREATE_DETACHED));
273 }
274 
MakeDeadThread(pthread_t & t)275 static void MakeDeadThread(pthread_t& t) {
276   ASSERT_EQ(0, pthread_create(&t, nullptr, IdFn, nullptr));
277   ASSERT_EQ(0, pthread_join(t, nullptr));
278 }
279 
TEST(pthread,pthread_create)280 TEST(pthread, pthread_create) {
281   void* expected_result = reinterpret_cast<void*>(123);
282   // Can we create a thread?
283   pthread_t t;
284   ASSERT_EQ(0, pthread_create(&t, nullptr, IdFn, expected_result));
285   // If we join, do we get the expected value back?
286   void* result;
287   ASSERT_EQ(0, pthread_join(t, &result));
288   ASSERT_EQ(expected_result, result);
289 }
290 
TEST(pthread,pthread_create_EAGAIN)291 TEST(pthread, pthread_create_EAGAIN) {
292   pthread_attr_t attributes;
293   ASSERT_EQ(0, pthread_attr_init(&attributes));
294   ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, static_cast<size_t>(-1) & ~(getpagesize() - 1)));
295 
296   pthread_t t;
297   ASSERT_EQ(EAGAIN, pthread_create(&t, &attributes, IdFn, nullptr));
298 }
299 
TEST(pthread,pthread_no_join_after_detach)300 TEST(pthread, pthread_no_join_after_detach) {
301   SpinFunctionHelper spin_helper;
302 
303   pthread_t t1;
304   ASSERT_EQ(0, pthread_create(&t1, nullptr, spin_helper.GetFunction(), nullptr));
305 
306   // After a pthread_detach...
307   ASSERT_EQ(0, pthread_detach(t1));
308   AssertDetached(t1, true);
309 
310   // ...pthread_join should fail.
311   ASSERT_EQ(EINVAL, pthread_join(t1, nullptr));
312 }
313 
TEST(pthread,pthread_no_op_detach_after_join)314 TEST(pthread, pthread_no_op_detach_after_join) {
315   SpinFunctionHelper spin_helper;
316 
317   pthread_t t1;
318   ASSERT_EQ(0, pthread_create(&t1, nullptr, spin_helper.GetFunction(), nullptr));
319 
320   // If thread 2 is already waiting to join thread 1...
321   pthread_t t2;
322   ASSERT_EQ(0, pthread_create(&t2, nullptr, JoinFn, reinterpret_cast<void*>(t1)));
323 
324   sleep(1); // (Give t2 a chance to call pthread_join.)
325 
326 #if defined(__BIONIC__)
327   ASSERT_EQ(EINVAL, pthread_detach(t1));
328 #else
329   ASSERT_EQ(0, pthread_detach(t1));
330 #endif
331   AssertDetached(t1, false);
332 
333   spin_helper.UnSpin();
334 
335   // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
336   void* join_result;
337   ASSERT_EQ(0, pthread_join(t2, &join_result));
338   ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
339 }
340 
TEST(pthread,pthread_join_self)341 TEST(pthread, pthread_join_self) {
342   ASSERT_EQ(EDEADLK, pthread_join(pthread_self(), nullptr));
343 }
344 
345 struct TestBug37410 {
346   pthread_t main_thread;
347   pthread_mutex_t mutex;
348 
mainTestBug37410349   static void main() {
350     TestBug37410 data;
351     data.main_thread = pthread_self();
352     ASSERT_EQ(0, pthread_mutex_init(&data.mutex, nullptr));
353     ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
354 
355     pthread_t t;
356     ASSERT_EQ(0, pthread_create(&t, nullptr, TestBug37410::thread_fn, reinterpret_cast<void*>(&data)));
357 
358     // Wait for the thread to be running...
359     ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
360     ASSERT_EQ(0, pthread_mutex_unlock(&data.mutex));
361 
362     // ...and exit.
363     pthread_exit(nullptr);
364   }
365 
366  private:
thread_fnTestBug37410367   static void* thread_fn(void* arg) {
368     TestBug37410* data = reinterpret_cast<TestBug37410*>(arg);
369 
370     // Unlocking data->mutex will cause the main thread to exit, invalidating *data. Save the handle.
371     pthread_t main_thread = data->main_thread;
372 
373     // Let the main thread know we're running.
374     pthread_mutex_unlock(&data->mutex);
375 
376     // And wait for the main thread to exit.
377     pthread_join(main_thread, nullptr);
378 
379     return nullptr;
380   }
381 };
382 
383 // Even though this isn't really a death test, we have to say "DeathTest" here so gtest knows to
384 // run this test (which exits normally) in its own process.
TEST_F(pthread_DeathTest,pthread_bug_37410)385 TEST_F(pthread_DeathTest, pthread_bug_37410) {
386   // http://code.google.com/p/android/issues/detail?id=37410
387   ASSERT_EXIT(TestBug37410::main(), ::testing::ExitedWithCode(0), "");
388 }
389 
SignalHandlerFn(void * arg)390 static void* SignalHandlerFn(void* arg) {
391   sigset64_t wait_set;
392   sigfillset64(&wait_set);
393   return reinterpret_cast<void*>(sigwait64(&wait_set, reinterpret_cast<int*>(arg)));
394 }
395 
TEST(pthread,pthread_sigmask)396 TEST(pthread, pthread_sigmask) {
397   // Check that SIGUSR1 isn't blocked.
398   sigset_t original_set;
399   sigemptyset(&original_set);
400   ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, nullptr, &original_set));
401   ASSERT_FALSE(sigismember(&original_set, SIGUSR1));
402 
403   // Block SIGUSR1.
404   sigset_t set;
405   sigemptyset(&set);
406   sigaddset(&set, SIGUSR1);
407   ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, &set, nullptr));
408 
409   // Check that SIGUSR1 is blocked.
410   sigset_t final_set;
411   sigemptyset(&final_set);
412   ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, nullptr, &final_set));
413   ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
414   // ...and that sigprocmask agrees with pthread_sigmask.
415   sigemptyset(&final_set);
416   ASSERT_EQ(0, sigprocmask(SIG_BLOCK, nullptr, &final_set));
417   ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
418 
419   // Spawn a thread that calls sigwait and tells us what it received.
420   pthread_t signal_thread;
421   int received_signal = -1;
422   ASSERT_EQ(0, pthread_create(&signal_thread, nullptr, SignalHandlerFn, &received_signal));
423 
424   // Send that thread SIGUSR1.
425   pthread_kill(signal_thread, SIGUSR1);
426 
427   // See what it got.
428   void* join_result;
429   ASSERT_EQ(0, pthread_join(signal_thread, &join_result));
430   ASSERT_EQ(SIGUSR1, received_signal);
431   ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
432 
433   // Restore the original signal mask.
434   ASSERT_EQ(0, pthread_sigmask(SIG_SETMASK, &original_set, nullptr));
435 }
436 
TEST(pthread,pthread_sigmask64_SIGTRMIN)437 TEST(pthread, pthread_sigmask64_SIGTRMIN) {
438   // Check that SIGRTMIN isn't blocked.
439   sigset64_t original_set;
440   sigemptyset64(&original_set);
441   ASSERT_EQ(0, pthread_sigmask64(SIG_BLOCK, nullptr, &original_set));
442   ASSERT_FALSE(sigismember64(&original_set, SIGRTMIN));
443 
444   // Block SIGRTMIN.
445   sigset64_t set;
446   sigemptyset64(&set);
447   sigaddset64(&set, SIGRTMIN);
448   ASSERT_EQ(0, pthread_sigmask64(SIG_BLOCK, &set, nullptr));
449 
450   // Check that SIGRTMIN is blocked.
451   sigset64_t final_set;
452   sigemptyset64(&final_set);
453   ASSERT_EQ(0, pthread_sigmask64(SIG_BLOCK, nullptr, &final_set));
454   ASSERT_TRUE(sigismember64(&final_set, SIGRTMIN));
455   // ...and that sigprocmask64 agrees with pthread_sigmask64.
456   sigemptyset64(&final_set);
457   ASSERT_EQ(0, sigprocmask64(SIG_BLOCK, nullptr, &final_set));
458   ASSERT_TRUE(sigismember64(&final_set, SIGRTMIN));
459 
460   // Spawn a thread that calls sigwait64 and tells us what it received.
461   pthread_t signal_thread;
462   int received_signal = -1;
463   ASSERT_EQ(0, pthread_create(&signal_thread, nullptr, SignalHandlerFn, &received_signal));
464 
465   // Send that thread SIGRTMIN.
466   pthread_kill(signal_thread, SIGRTMIN);
467 
468   // See what it got.
469   void* join_result;
470   ASSERT_EQ(0, pthread_join(signal_thread, &join_result));
471   ASSERT_EQ(SIGRTMIN, received_signal);
472   ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
473 
474   // Restore the original signal mask.
475   ASSERT_EQ(0, pthread_sigmask64(SIG_SETMASK, &original_set, nullptr));
476 }
477 
test_pthread_setname_np__pthread_getname_np(pthread_t t)478 static void test_pthread_setname_np__pthread_getname_np(pthread_t t) {
479   ASSERT_EQ(0, pthread_setname_np(t, "short"));
480   char name[32];
481   ASSERT_EQ(0, pthread_getname_np(t, name, sizeof(name)));
482   ASSERT_STREQ("short", name);
483 
484   // The limit is 15 characters --- the kernel's buffer is 16, but includes a NUL.
485   ASSERT_EQ(0, pthread_setname_np(t, "123456789012345"));
486   ASSERT_EQ(0, pthread_getname_np(t, name, sizeof(name)));
487   ASSERT_STREQ("123456789012345", name);
488 
489   ASSERT_EQ(ERANGE, pthread_setname_np(t, "1234567890123456"));
490 
491   // The passed-in buffer should be at least 16 bytes.
492   ASSERT_EQ(0, pthread_getname_np(t, name, 16));
493   ASSERT_EQ(ERANGE, pthread_getname_np(t, name, 15));
494 }
495 
TEST(pthread,pthread_setname_np__pthread_getname_np__self)496 TEST(pthread, pthread_setname_np__pthread_getname_np__self) {
497   test_pthread_setname_np__pthread_getname_np(pthread_self());
498 }
499 
TEST(pthread,pthread_setname_np__pthread_getname_np__other)500 TEST(pthread, pthread_setname_np__pthread_getname_np__other) {
501   SpinFunctionHelper spin_helper;
502 
503   pthread_t t;
504   ASSERT_EQ(0, pthread_create(&t, nullptr, spin_helper.GetFunction(), nullptr));
505   test_pthread_setname_np__pthread_getname_np(t);
506   spin_helper.UnSpin();
507   ASSERT_EQ(0, pthread_join(t, nullptr));
508 }
509 
510 // http://b/28051133: a kernel misfeature means that you can't change the
511 // name of another thread if you've set PR_SET_DUMPABLE to 0.
TEST(pthread,pthread_setname_np__pthread_getname_np__other_PR_SET_DUMPABLE)512 TEST(pthread, pthread_setname_np__pthread_getname_np__other_PR_SET_DUMPABLE) {
513   ASSERT_EQ(0, prctl(PR_SET_DUMPABLE, 0)) << strerror(errno);
514 
515   SpinFunctionHelper spin_helper;
516 
517   pthread_t t;
518   ASSERT_EQ(0, pthread_create(&t, nullptr, spin_helper.GetFunction(), nullptr));
519   test_pthread_setname_np__pthread_getname_np(t);
520   spin_helper.UnSpin();
521   ASSERT_EQ(0, pthread_join(t, nullptr));
522 }
523 
TEST_F(pthread_DeathTest,pthread_setname_np__no_such_thread)524 TEST_F(pthread_DeathTest, pthread_setname_np__no_such_thread) {
525   pthread_t dead_thread;
526   MakeDeadThread(dead_thread);
527 
528   EXPECT_DEATH(pthread_setname_np(dead_thread, "short 3"),
529                "invalid pthread_t (.*) passed to pthread_setname_np");
530 }
531 
TEST_F(pthread_DeathTest,pthread_setname_np__null_thread)532 TEST_F(pthread_DeathTest, pthread_setname_np__null_thread) {
533   pthread_t null_thread = 0;
534   EXPECT_EQ(ENOENT, pthread_setname_np(null_thread, "short 3"));
535 }
536 
TEST_F(pthread_DeathTest,pthread_getname_np__no_such_thread)537 TEST_F(pthread_DeathTest, pthread_getname_np__no_such_thread) {
538   pthread_t dead_thread;
539   MakeDeadThread(dead_thread);
540 
541   char name[64];
542   EXPECT_DEATH(pthread_getname_np(dead_thread, name, sizeof(name)),
543                "invalid pthread_t (.*) passed to pthread_getname_np");
544 }
545 
TEST_F(pthread_DeathTest,pthread_getname_np__null_thread)546 TEST_F(pthread_DeathTest, pthread_getname_np__null_thread) {
547   pthread_t null_thread = 0;
548 
549   char name[64];
550   EXPECT_EQ(ENOENT, pthread_getname_np(null_thread, name, sizeof(name)));
551 }
552 
TEST(pthread,pthread_kill__0)553 TEST(pthread, pthread_kill__0) {
554   // Signal 0 just tests that the thread exists, so it's safe to call on ourselves.
555   ASSERT_EQ(0, pthread_kill(pthread_self(), 0));
556 }
557 
TEST(pthread,pthread_kill__invalid_signal)558 TEST(pthread, pthread_kill__invalid_signal) {
559   ASSERT_EQ(EINVAL, pthread_kill(pthread_self(), -1));
560 }
561 
pthread_kill__in_signal_handler_helper(int signal_number)562 static void pthread_kill__in_signal_handler_helper(int signal_number) {
563   static int count = 0;
564   ASSERT_EQ(SIGALRM, signal_number);
565   if (++count == 1) {
566     // Can we call pthread_kill from a signal handler?
567     ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
568   }
569 }
570 
TEST(pthread,pthread_kill__in_signal_handler)571 TEST(pthread, pthread_kill__in_signal_handler) {
572   ScopedSignalHandler ssh(SIGALRM, pthread_kill__in_signal_handler_helper);
573   ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
574 }
575 
TEST(pthread,pthread_kill__exited_thread)576 TEST(pthread, pthread_kill__exited_thread) {
577   static std::promise<pid_t> tid_promise;
578   pthread_t thread;
579   ASSERT_EQ(0, pthread_create(&thread, nullptr,
580                               [](void*) -> void* {
581                                 tid_promise.set_value(gettid());
582                                 return nullptr;
583                               },
584                               nullptr));
585 
586   pid_t tid = tid_promise.get_future().get();
587   while (TEMP_FAILURE_RETRY(syscall(__NR_tgkill, getpid(), tid, 0)) != -1) {
588     continue;
589   }
590   ASSERT_ERRNO(ESRCH);
591 
592   ASSERT_EQ(ESRCH, pthread_kill(thread, 0));
593 }
594 
TEST_F(pthread_DeathTest,pthread_detach__no_such_thread)595 TEST_F(pthread_DeathTest, pthread_detach__no_such_thread) {
596   pthread_t dead_thread;
597   MakeDeadThread(dead_thread);
598 
599   EXPECT_DEATH(pthread_detach(dead_thread),
600                "invalid pthread_t (.*) passed to pthread_detach");
601 }
602 
TEST_F(pthread_DeathTest,pthread_detach__null_thread)603 TEST_F(pthread_DeathTest, pthread_detach__null_thread) {
604   pthread_t null_thread = 0;
605   EXPECT_EQ(ESRCH, pthread_detach(null_thread));
606 }
607 
TEST(pthread,pthread_getcpuclockid__clock_gettime)608 TEST(pthread, pthread_getcpuclockid__clock_gettime) {
609   SpinFunctionHelper spin_helper;
610 
611   pthread_t t;
612   ASSERT_EQ(0, pthread_create(&t, nullptr, spin_helper.GetFunction(), nullptr));
613 
614   clockid_t c;
615   ASSERT_EQ(0, pthread_getcpuclockid(t, &c));
616   timespec ts;
617   ASSERT_EQ(0, clock_gettime(c, &ts));
618   spin_helper.UnSpin();
619   ASSERT_EQ(0, pthread_join(t, nullptr));
620 }
621 
TEST_F(pthread_DeathTest,pthread_getcpuclockid__no_such_thread)622 TEST_F(pthread_DeathTest, pthread_getcpuclockid__no_such_thread) {
623   pthread_t dead_thread;
624   MakeDeadThread(dead_thread);
625 
626   clockid_t c;
627   EXPECT_DEATH(pthread_getcpuclockid(dead_thread, &c),
628                "invalid pthread_t (.*) passed to pthread_getcpuclockid");
629 }
630 
TEST_F(pthread_DeathTest,pthread_getcpuclockid__null_thread)631 TEST_F(pthread_DeathTest, pthread_getcpuclockid__null_thread) {
632   pthread_t null_thread = 0;
633   clockid_t c;
634   EXPECT_EQ(ESRCH, pthread_getcpuclockid(null_thread, &c));
635 }
636 
TEST_F(pthread_DeathTest,pthread_getschedparam__no_such_thread)637 TEST_F(pthread_DeathTest, pthread_getschedparam__no_such_thread) {
638   pthread_t dead_thread;
639   MakeDeadThread(dead_thread);
640 
641   int policy;
642   sched_param param;
643   EXPECT_DEATH(pthread_getschedparam(dead_thread, &policy, &param),
644                "invalid pthread_t (.*) passed to pthread_getschedparam");
645 }
646 
TEST_F(pthread_DeathTest,pthread_getschedparam__null_thread)647 TEST_F(pthread_DeathTest, pthread_getschedparam__null_thread) {
648   pthread_t null_thread = 0;
649   int policy;
650   sched_param param;
651   EXPECT_EQ(ESRCH, pthread_getschedparam(null_thread, &policy, &param));
652 }
653 
TEST_F(pthread_DeathTest,pthread_setschedparam__no_such_thread)654 TEST_F(pthread_DeathTest, pthread_setschedparam__no_such_thread) {
655   pthread_t dead_thread;
656   MakeDeadThread(dead_thread);
657 
658   int policy = 0;
659   sched_param param;
660   EXPECT_DEATH(pthread_setschedparam(dead_thread, policy, &param),
661                "invalid pthread_t (.*) passed to pthread_setschedparam");
662 }
663 
TEST_F(pthread_DeathTest,pthread_setschedparam__null_thread)664 TEST_F(pthread_DeathTest, pthread_setschedparam__null_thread) {
665   pthread_t null_thread = 0;
666   int policy = 0;
667   sched_param param;
668   EXPECT_EQ(ESRCH, pthread_setschedparam(null_thread, policy, &param));
669 }
670 
TEST_F(pthread_DeathTest,pthread_setschedprio__no_such_thread)671 TEST_F(pthread_DeathTest, pthread_setschedprio__no_such_thread) {
672   pthread_t dead_thread;
673   MakeDeadThread(dead_thread);
674 
675   EXPECT_DEATH(pthread_setschedprio(dead_thread, 123),
676                "invalid pthread_t (.*) passed to pthread_setschedprio");
677 }
678 
TEST_F(pthread_DeathTest,pthread_setschedprio__null_thread)679 TEST_F(pthread_DeathTest, pthread_setschedprio__null_thread) {
680   pthread_t null_thread = 0;
681   EXPECT_EQ(ESRCH, pthread_setschedprio(null_thread, 123));
682 }
683 
TEST_F(pthread_DeathTest,pthread_join__no_such_thread)684 TEST_F(pthread_DeathTest, pthread_join__no_such_thread) {
685   pthread_t dead_thread;
686   MakeDeadThread(dead_thread);
687 
688   EXPECT_DEATH(pthread_join(dead_thread, nullptr),
689                "invalid pthread_t (.*) passed to pthread_join");
690 }
691 
TEST_F(pthread_DeathTest,pthread_join__null_thread)692 TEST_F(pthread_DeathTest, pthread_join__null_thread) {
693   pthread_t null_thread = 0;
694   EXPECT_EQ(ESRCH, pthread_join(null_thread, nullptr));
695 }
696 
TEST_F(pthread_DeathTest,pthread_kill__no_such_thread)697 TEST_F(pthread_DeathTest, pthread_kill__no_such_thread) {
698   pthread_t dead_thread;
699   MakeDeadThread(dead_thread);
700 
701   EXPECT_DEATH(pthread_kill(dead_thread, 0),
702                "invalid pthread_t (.*) passed to pthread_kill");
703 }
704 
TEST_F(pthread_DeathTest,pthread_kill__null_thread)705 TEST_F(pthread_DeathTest, pthread_kill__null_thread) {
706   pthread_t null_thread = 0;
707   EXPECT_EQ(ESRCH, pthread_kill(null_thread, 0));
708 }
709 
TEST(pthread,pthread_join__multijoin)710 TEST(pthread, pthread_join__multijoin) {
711   SpinFunctionHelper spin_helper;
712 
713   pthread_t t1;
714   ASSERT_EQ(0, pthread_create(&t1, nullptr, spin_helper.GetFunction(), nullptr));
715 
716   pthread_t t2;
717   ASSERT_EQ(0, pthread_create(&t2, nullptr, JoinFn, reinterpret_cast<void*>(t1)));
718 
719   sleep(1); // (Give t2 a chance to call pthread_join.)
720 
721   // Multiple joins to the same thread should fail.
722   ASSERT_EQ(EINVAL, pthread_join(t1, nullptr));
723 
724   spin_helper.UnSpin();
725 
726   // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
727   void* join_result;
728   ASSERT_EQ(0, pthread_join(t2, &join_result));
729   ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
730 }
731 
TEST(pthread,pthread_join__race)732 TEST(pthread, pthread_join__race) {
733   // http://b/11693195 --- pthread_join could return before the thread had actually exited.
734   // If the joiner unmapped the thread's stack, that could lead to SIGSEGV in the thread.
735   for (size_t i = 0; i < 1024; ++i) {
736     size_t stack_size = 640*1024;
737     void* stack = mmap(nullptr, stack_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
738 
739     pthread_attr_t a;
740     pthread_attr_init(&a);
741     pthread_attr_setstack(&a, stack, stack_size);
742 
743     pthread_t t;
744     ASSERT_EQ(0, pthread_create(&t, &a, IdFn, nullptr));
745     ASSERT_EQ(0, pthread_join(t, nullptr));
746     ASSERT_EQ(0, munmap(stack, stack_size));
747   }
748 }
749 
GetActualGuardSizeFn(void * arg)750 static void* GetActualGuardSizeFn(void* arg) {
751   pthread_attr_t attributes;
752   pthread_getattr_np(pthread_self(), &attributes);
753   pthread_attr_getguardsize(&attributes, reinterpret_cast<size_t*>(arg));
754   return nullptr;
755 }
756 
GetActualGuardSize(const pthread_attr_t & attributes)757 static size_t GetActualGuardSize(const pthread_attr_t& attributes) {
758   size_t result;
759   pthread_t t;
760   pthread_create(&t, &attributes, GetActualGuardSizeFn, &result);
761   pthread_join(t, nullptr);
762   return result;
763 }
764 
GetActualStackSizeFn(void * arg)765 static void* GetActualStackSizeFn(void* arg) {
766   pthread_attr_t attributes;
767   pthread_getattr_np(pthread_self(), &attributes);
768   pthread_attr_getstacksize(&attributes, reinterpret_cast<size_t*>(arg));
769   return nullptr;
770 }
771 
GetActualStackSize(const pthread_attr_t & attributes)772 static size_t GetActualStackSize(const pthread_attr_t& attributes) {
773   size_t result;
774   pthread_t t;
775   pthread_create(&t, &attributes, GetActualStackSizeFn, &result);
776   pthread_join(t, nullptr);
777   return result;
778 }
779 
TEST(pthread,pthread_attr_setguardsize_tiny)780 TEST(pthread, pthread_attr_setguardsize_tiny) {
781   pthread_attr_t attributes;
782   ASSERT_EQ(0, pthread_attr_init(&attributes));
783 
784   // No such thing as too small: will be rounded up to one page by pthread_create.
785   ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 128));
786   size_t guard_size;
787   ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
788   ASSERT_EQ(128U, guard_size);
789   ASSERT_EQ(static_cast<unsigned long>(getpagesize()), GetActualGuardSize(attributes));
790 }
791 
TEST(pthread,pthread_attr_setguardsize_reasonable)792 TEST(pthread, pthread_attr_setguardsize_reasonable) {
793   pthread_attr_t attributes;
794   ASSERT_EQ(0, pthread_attr_init(&attributes));
795 
796   // Large enough and a multiple of the page size.
797   ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024));
798   size_t guard_size;
799   ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
800   ASSERT_EQ(32*1024U, guard_size);
801   ASSERT_EQ(32*1024U, GetActualGuardSize(attributes));
802 }
803 
TEST(pthread,pthread_attr_setguardsize_needs_rounding)804 TEST(pthread, pthread_attr_setguardsize_needs_rounding) {
805   pthread_attr_t attributes;
806   ASSERT_EQ(0, pthread_attr_init(&attributes));
807 
808   // Large enough but not a multiple of the page size.
809   ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024 + 1));
810   size_t guard_size;
811   ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
812   ASSERT_EQ(32*1024U + 1, guard_size);
813   ASSERT_EQ(roundup(32 * 1024U + 1, getpagesize()), GetActualGuardSize(attributes));
814 }
815 
TEST(pthread,pthread_attr_setguardsize_enormous)816 TEST(pthread, pthread_attr_setguardsize_enormous) {
817   pthread_attr_t attributes;
818   ASSERT_EQ(0, pthread_attr_init(&attributes));
819 
820   // Larger than the stack itself. (Historically we mistakenly carved
821   // the guard out of the stack itself, rather than adding it after the
822   // end.)
823   ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024*1024));
824   size_t guard_size;
825   ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
826   ASSERT_EQ(32*1024*1024U, guard_size);
827   ASSERT_EQ(32*1024*1024U, GetActualGuardSize(attributes));
828 }
829 
TEST(pthread,pthread_attr_setstacksize)830 TEST(pthread, pthread_attr_setstacksize) {
831   pthread_attr_t attributes;
832   ASSERT_EQ(0, pthread_attr_init(&attributes));
833 
834   // Get the default stack size.
835   size_t default_stack_size;
836   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &default_stack_size));
837 
838   // Too small.
839   ASSERT_EQ(EINVAL, pthread_attr_setstacksize(&attributes, 128));
840   size_t stack_size;
841   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
842   ASSERT_EQ(default_stack_size, stack_size);
843   ASSERT_GE(GetActualStackSize(attributes), default_stack_size);
844 
845   // Large enough and a multiple of the page size; may be rounded up by pthread_create.
846   ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024));
847   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
848   ASSERT_EQ(32*1024U, stack_size);
849   ASSERT_GE(GetActualStackSize(attributes), 32*1024U);
850 
851   // Large enough but not aligned; will be rounded up by pthread_create.
852   ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024 + 1));
853   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
854   ASSERT_EQ(32*1024U + 1, stack_size);
855 #if defined(__BIONIC__)
856   ASSERT_GT(GetActualStackSize(attributes), 32*1024U + 1);
857 #else // __BIONIC__
858   // glibc rounds down, in violation of POSIX. They document this in their BUGS section.
859   ASSERT_EQ(GetActualStackSize(attributes), 32*1024U);
860 #endif // __BIONIC__
861 }
862 
TEST(pthread,pthread_rwlockattr_smoke)863 TEST(pthread, pthread_rwlockattr_smoke) {
864   pthread_rwlockattr_t attr;
865   ASSERT_EQ(0, pthread_rwlockattr_init(&attr));
866 
867   int pshared_value_array[] = {PTHREAD_PROCESS_PRIVATE, PTHREAD_PROCESS_SHARED};
868   for (size_t i = 0; i < sizeof(pshared_value_array) / sizeof(pshared_value_array[0]); ++i) {
869     ASSERT_EQ(0, pthread_rwlockattr_setpshared(&attr, pshared_value_array[i]));
870     int pshared;
871     ASSERT_EQ(0, pthread_rwlockattr_getpshared(&attr, &pshared));
872     ASSERT_EQ(pshared_value_array[i], pshared);
873   }
874 
875 #if !defined(ANDROID_HOST_MUSL)
876   // musl doesn't have pthread_rwlockattr_setkind_np
877   int kind_array[] = {PTHREAD_RWLOCK_PREFER_READER_NP,
878                       PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP};
879   for (size_t i = 0; i < sizeof(kind_array) / sizeof(kind_array[0]); ++i) {
880     ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_array[i]));
881     int kind;
882     ASSERT_EQ(0, pthread_rwlockattr_getkind_np(&attr, &kind));
883     ASSERT_EQ(kind_array[i], kind);
884   }
885 #endif
886 
887   ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr));
888 }
889 
TEST(pthread,pthread_rwlock_init_same_as_PTHREAD_RWLOCK_INITIALIZER)890 TEST(pthread, pthread_rwlock_init_same_as_PTHREAD_RWLOCK_INITIALIZER) {
891   pthread_rwlock_t lock1 = PTHREAD_RWLOCK_INITIALIZER;
892   pthread_rwlock_t lock2;
893   ASSERT_EQ(0, pthread_rwlock_init(&lock2, nullptr));
894   ASSERT_EQ(0, memcmp(&lock1, &lock2, sizeof(lock1)));
895 }
896 
TEST(pthread,pthread_rwlock_smoke)897 TEST(pthread, pthread_rwlock_smoke) {
898   pthread_rwlock_t l;
899   ASSERT_EQ(0, pthread_rwlock_init(&l, nullptr));
900 
901   // Single read lock
902   ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
903   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
904 
905   // Multiple read lock
906   ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
907   ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
908   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
909   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
910 
911   // Write lock
912   ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
913   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
914 
915   // Try writer lock
916   ASSERT_EQ(0, pthread_rwlock_trywrlock(&l));
917   ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
918   ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&l));
919   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
920 
921   // Try reader lock
922   ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
923   ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
924   ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
925   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
926   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
927 
928   // Try writer lock after unlock
929   ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
930   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
931 
932   // EDEADLK in "read after write"
933   ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
934   ASSERT_EQ(EDEADLK, pthread_rwlock_rdlock(&l));
935   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
936 
937   // EDEADLK in "write after write"
938   ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
939   ASSERT_EQ(EDEADLK, pthread_rwlock_wrlock(&l));
940   ASSERT_EQ(0, pthread_rwlock_unlock(&l));
941 
942   ASSERT_EQ(0, pthread_rwlock_destroy(&l));
943 }
944 
945 struct RwlockWakeupHelperArg {
946   pthread_rwlock_t lock;
947   enum Progress {
948     LOCK_INITIALIZED,
949     LOCK_WAITING,
950     LOCK_RELEASED,
951     LOCK_ACCESSED,
952     LOCK_TIMEDOUT,
953   };
954   std::atomic<Progress> progress;
955   std::atomic<pid_t> tid;
956   std::function<int (pthread_rwlock_t*)> trylock_function;
957   std::function<int (pthread_rwlock_t*)> lock_function;
958   std::function<int (pthread_rwlock_t*, const timespec*)> timed_lock_function;
959   clockid_t clock;
960 };
961 
pthread_rwlock_wakeup_helper(RwlockWakeupHelperArg * arg)962 static void pthread_rwlock_wakeup_helper(RwlockWakeupHelperArg* arg) {
963   arg->tid = gettid();
964   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
965   arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
966 
967   ASSERT_EQ(EBUSY, arg->trylock_function(&arg->lock));
968   ASSERT_EQ(0, arg->lock_function(&arg->lock));
969   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress);
970   ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock));
971 
972   arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED;
973 }
974 
test_pthread_rwlock_reader_wakeup_writer(std::function<int (pthread_rwlock_t *)> lock_function)975 static void test_pthread_rwlock_reader_wakeup_writer(std::function<int (pthread_rwlock_t*)> lock_function) {
976   RwlockWakeupHelperArg wakeup_arg;
977   ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
978   ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock));
979   wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
980   wakeup_arg.tid = 0;
981   wakeup_arg.trylock_function = &pthread_rwlock_trywrlock;
982   wakeup_arg.lock_function = lock_function;
983 
984   pthread_t thread;
985   ASSERT_EQ(0, pthread_create(&thread, nullptr,
986     reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_helper), &wakeup_arg));
987   WaitUntilThreadSleep(wakeup_arg.tid);
988   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
989 
990   wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
991   ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
992 
993   ASSERT_EQ(0, pthread_join(thread, nullptr));
994   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
995   ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
996 }
997 
TEST(pthread,pthread_rwlock_reader_wakeup_writer)998 TEST(pthread, pthread_rwlock_reader_wakeup_writer) {
999   test_pthread_rwlock_reader_wakeup_writer(pthread_rwlock_wrlock);
1000 }
1001 
TEST(pthread,pthread_rwlock_reader_wakeup_writer_timedwait)1002 TEST(pthread, pthread_rwlock_reader_wakeup_writer_timedwait) {
1003   timespec ts;
1004   ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1005   ts.tv_sec += 1;
1006   test_pthread_rwlock_reader_wakeup_writer([&](pthread_rwlock_t* lock) {
1007     return pthread_rwlock_timedwrlock(lock, &ts);
1008   });
1009 }
1010 
TEST(pthread,pthread_rwlock_reader_wakeup_writer_timedwait_monotonic_np)1011 TEST(pthread, pthread_rwlock_reader_wakeup_writer_timedwait_monotonic_np) {
1012 #if defined(__BIONIC__)
1013   timespec ts;
1014   ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
1015   ts.tv_sec += 1;
1016   test_pthread_rwlock_reader_wakeup_writer(
1017       [&](pthread_rwlock_t* lock) { return pthread_rwlock_timedwrlock_monotonic_np(lock, &ts); });
1018 #else   // __BIONIC__
1019   GTEST_SKIP() << "pthread_rwlock_timedwrlock_monotonic_np not available";
1020 #endif  // __BIONIC__
1021 }
1022 
TEST(pthread,pthread_rwlock_reader_wakeup_writer_clockwait)1023 TEST(pthread, pthread_rwlock_reader_wakeup_writer_clockwait) {
1024 #if defined(__BIONIC__)
1025   timespec ts;
1026   ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
1027   ts.tv_sec += 1;
1028   test_pthread_rwlock_reader_wakeup_writer([&](pthread_rwlock_t* lock) {
1029     return pthread_rwlock_clockwrlock(lock, CLOCK_MONOTONIC, &ts);
1030   });
1031 
1032   ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1033   ts.tv_sec += 1;
1034   test_pthread_rwlock_reader_wakeup_writer([&](pthread_rwlock_t* lock) {
1035     return pthread_rwlock_clockwrlock(lock, CLOCK_REALTIME, &ts);
1036   });
1037 #else   // __BIONIC__
1038   GTEST_SKIP() << "pthread_rwlock_clockwrlock not available";
1039 #endif  // __BIONIC__
1040 }
1041 
test_pthread_rwlock_writer_wakeup_reader(std::function<int (pthread_rwlock_t *)> lock_function)1042 static void test_pthread_rwlock_writer_wakeup_reader(std::function<int (pthread_rwlock_t*)> lock_function) {
1043   RwlockWakeupHelperArg wakeup_arg;
1044   ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
1045   ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock));
1046   wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
1047   wakeup_arg.tid = 0;
1048   wakeup_arg.trylock_function = &pthread_rwlock_tryrdlock;
1049   wakeup_arg.lock_function = lock_function;
1050 
1051   pthread_t thread;
1052   ASSERT_EQ(0, pthread_create(&thread, nullptr,
1053     reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_helper), &wakeup_arg));
1054   WaitUntilThreadSleep(wakeup_arg.tid);
1055   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
1056 
1057   wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
1058   ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
1059 
1060   ASSERT_EQ(0, pthread_join(thread, nullptr));
1061   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
1062   ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
1063 }
1064 
TEST(pthread,pthread_rwlock_writer_wakeup_reader)1065 TEST(pthread, pthread_rwlock_writer_wakeup_reader) {
1066   test_pthread_rwlock_writer_wakeup_reader(pthread_rwlock_rdlock);
1067 }
1068 
TEST(pthread,pthread_rwlock_writer_wakeup_reader_timedwait)1069 TEST(pthread, pthread_rwlock_writer_wakeup_reader_timedwait) {
1070   timespec ts;
1071   ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1072   ts.tv_sec += 1;
1073   test_pthread_rwlock_writer_wakeup_reader([&](pthread_rwlock_t* lock) {
1074     return pthread_rwlock_timedrdlock(lock, &ts);
1075   });
1076 }
1077 
TEST(pthread,pthread_rwlock_writer_wakeup_reader_timedwait_monotonic_np)1078 TEST(pthread, pthread_rwlock_writer_wakeup_reader_timedwait_monotonic_np) {
1079 #if defined(__BIONIC__)
1080   timespec ts;
1081   ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
1082   ts.tv_sec += 1;
1083   test_pthread_rwlock_writer_wakeup_reader(
1084       [&](pthread_rwlock_t* lock) { return pthread_rwlock_timedrdlock_monotonic_np(lock, &ts); });
1085 #else   // __BIONIC__
1086   GTEST_SKIP() << "pthread_rwlock_timedrdlock_monotonic_np not available";
1087 #endif  // __BIONIC__
1088 }
1089 
TEST(pthread,pthread_rwlock_writer_wakeup_reader_clockwait)1090 TEST(pthread, pthread_rwlock_writer_wakeup_reader_clockwait) {
1091 #if defined(__BIONIC__)
1092   timespec ts;
1093   ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
1094   ts.tv_sec += 1;
1095   test_pthread_rwlock_writer_wakeup_reader([&](pthread_rwlock_t* lock) {
1096     return pthread_rwlock_clockrdlock(lock, CLOCK_MONOTONIC, &ts);
1097   });
1098 
1099   ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1100   ts.tv_sec += 1;
1101   test_pthread_rwlock_writer_wakeup_reader([&](pthread_rwlock_t* lock) {
1102     return pthread_rwlock_clockrdlock(lock, CLOCK_REALTIME, &ts);
1103   });
1104 #else   // __BIONIC__
1105   GTEST_SKIP() << "pthread_rwlock_clockrdlock not available";
1106 #endif  // __BIONIC__
1107 }
1108 
pthread_rwlock_wakeup_timeout_helper(RwlockWakeupHelperArg * arg)1109 static void pthread_rwlock_wakeup_timeout_helper(RwlockWakeupHelperArg* arg) {
1110   arg->tid = gettid();
1111   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
1112   arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
1113 
1114   ASSERT_EQ(EBUSY, arg->trylock_function(&arg->lock));
1115 
1116   timespec ts;
1117   ASSERT_EQ(0, clock_gettime(arg->clock, &ts));
1118   ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts));
1119   ts.tv_nsec = -1;
1120   ASSERT_EQ(EINVAL, arg->timed_lock_function(&arg->lock, &ts));
1121   ts.tv_nsec = NS_PER_S;
1122   ASSERT_EQ(EINVAL, arg->timed_lock_function(&arg->lock, &ts));
1123   ts.tv_nsec = NS_PER_S - 1;
1124   ts.tv_sec = -1;
1125   ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts));
1126   ASSERT_EQ(0, clock_gettime(arg->clock, &ts));
1127   ts.tv_sec += 1;
1128   ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts));
1129   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, arg->progress);
1130   arg->progress = RwlockWakeupHelperArg::LOCK_TIMEDOUT;
1131 }
1132 
pthread_rwlock_timedrdlock_timeout_helper(clockid_t clock,int (* lock_function)(pthread_rwlock_t * __rwlock,const timespec * __timeout))1133 static void pthread_rwlock_timedrdlock_timeout_helper(
1134     clockid_t clock, int (*lock_function)(pthread_rwlock_t* __rwlock, const timespec* __timeout)) {
1135   RwlockWakeupHelperArg wakeup_arg;
1136   ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
1137   ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock));
1138   wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
1139   wakeup_arg.tid = 0;
1140   wakeup_arg.trylock_function = &pthread_rwlock_tryrdlock;
1141   wakeup_arg.timed_lock_function = lock_function;
1142   wakeup_arg.clock = clock;
1143 
1144   pthread_t thread;
1145   ASSERT_EQ(0, pthread_create(&thread, nullptr,
1146       reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_timeout_helper), &wakeup_arg));
1147   WaitUntilThreadSleep(wakeup_arg.tid);
1148   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
1149 
1150   ASSERT_EQ(0, pthread_join(thread, nullptr));
1151   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_TIMEDOUT, wakeup_arg.progress);
1152   ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
1153   ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
1154 }
1155 
TEST(pthread,pthread_rwlock_timedrdlock_timeout)1156 TEST(pthread, pthread_rwlock_timedrdlock_timeout) {
1157   pthread_rwlock_timedrdlock_timeout_helper(CLOCK_REALTIME, pthread_rwlock_timedrdlock);
1158 }
1159 
TEST(pthread,pthread_rwlock_timedrdlock_monotonic_np_timeout)1160 TEST(pthread, pthread_rwlock_timedrdlock_monotonic_np_timeout) {
1161 #if defined(__BIONIC__)
1162   pthread_rwlock_timedrdlock_timeout_helper(CLOCK_MONOTONIC,
1163                                             pthread_rwlock_timedrdlock_monotonic_np);
1164 #else   // __BIONIC__
1165   GTEST_SKIP() << "pthread_rwlock_timedrdlock_monotonic_np not available";
1166 #endif  // __BIONIC__
1167 }
1168 
TEST(pthread,pthread_rwlock_clockrdlock_monotonic_timeout)1169 TEST(pthread, pthread_rwlock_clockrdlock_monotonic_timeout) {
1170 #if defined(__BIONIC__)
1171   pthread_rwlock_timedrdlock_timeout_helper(
1172       CLOCK_MONOTONIC, [](pthread_rwlock_t* __rwlock, const timespec* __timeout) {
1173         return pthread_rwlock_clockrdlock(__rwlock, CLOCK_MONOTONIC, __timeout);
1174       });
1175 #else   // __BIONIC__
1176   GTEST_SKIP() << "pthread_rwlock_clockrdlock not available";
1177 #endif  // __BIONIC__
1178 }
1179 
TEST(pthread,pthread_rwlock_clockrdlock_realtime_timeout)1180 TEST(pthread, pthread_rwlock_clockrdlock_realtime_timeout) {
1181 #if defined(__BIONIC__)
1182   pthread_rwlock_timedrdlock_timeout_helper(
1183       CLOCK_REALTIME, [](pthread_rwlock_t* __rwlock, const timespec* __timeout) {
1184         return pthread_rwlock_clockrdlock(__rwlock, CLOCK_REALTIME, __timeout);
1185       });
1186 #else   // __BIONIC__
1187   GTEST_SKIP() << "pthread_rwlock_clockrdlock not available";
1188 #endif  // __BIONIC__
1189 }
1190 
TEST(pthread,pthread_rwlock_clockrdlock_invalid)1191 TEST(pthread, pthread_rwlock_clockrdlock_invalid) {
1192 #if defined(__BIONIC__)
1193   pthread_rwlock_t lock = PTHREAD_RWLOCK_INITIALIZER;
1194   timespec ts;
1195   EXPECT_EQ(EINVAL, pthread_rwlock_clockrdlock(&lock, CLOCK_PROCESS_CPUTIME_ID, &ts));
1196 #else   // __BIONIC__
1197   GTEST_SKIP() << "pthread_rwlock_clockrdlock not available";
1198 #endif  // __BIONIC__
1199 }
1200 
pthread_rwlock_timedwrlock_timeout_helper(clockid_t clock,int (* lock_function)(pthread_rwlock_t * __rwlock,const timespec * __timeout))1201 static void pthread_rwlock_timedwrlock_timeout_helper(
1202     clockid_t clock, int (*lock_function)(pthread_rwlock_t* __rwlock, const timespec* __timeout)) {
1203   RwlockWakeupHelperArg wakeup_arg;
1204   ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
1205   ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock));
1206   wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
1207   wakeup_arg.tid = 0;
1208   wakeup_arg.trylock_function = &pthread_rwlock_trywrlock;
1209   wakeup_arg.timed_lock_function = lock_function;
1210   wakeup_arg.clock = clock;
1211 
1212   pthread_t thread;
1213   ASSERT_EQ(0, pthread_create(&thread, nullptr,
1214       reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_timeout_helper), &wakeup_arg));
1215   WaitUntilThreadSleep(wakeup_arg.tid);
1216   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
1217 
1218   ASSERT_EQ(0, pthread_join(thread, nullptr));
1219   ASSERT_EQ(RwlockWakeupHelperArg::LOCK_TIMEDOUT, wakeup_arg.progress);
1220   ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
1221   ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
1222 }
1223 
TEST(pthread,pthread_rwlock_timedwrlock_timeout)1224 TEST(pthread, pthread_rwlock_timedwrlock_timeout) {
1225   pthread_rwlock_timedwrlock_timeout_helper(CLOCK_REALTIME, pthread_rwlock_timedwrlock);
1226 }
1227 
TEST(pthread,pthread_rwlock_timedwrlock_monotonic_np_timeout)1228 TEST(pthread, pthread_rwlock_timedwrlock_monotonic_np_timeout) {
1229 #if defined(__BIONIC__)
1230   pthread_rwlock_timedwrlock_timeout_helper(CLOCK_MONOTONIC,
1231                                             pthread_rwlock_timedwrlock_monotonic_np);
1232 #else   // __BIONIC__
1233   GTEST_SKIP() << "pthread_rwlock_timedwrlock_monotonic_np not available";
1234 #endif  // __BIONIC__
1235 }
1236 
TEST(pthread,pthread_rwlock_clockwrlock_monotonic_timeout)1237 TEST(pthread, pthread_rwlock_clockwrlock_monotonic_timeout) {
1238 #if defined(__BIONIC__)
1239   pthread_rwlock_timedwrlock_timeout_helper(
1240       CLOCK_MONOTONIC, [](pthread_rwlock_t* __rwlock, const timespec* __timeout) {
1241         return pthread_rwlock_clockwrlock(__rwlock, CLOCK_MONOTONIC, __timeout);
1242       });
1243 #else   // __BIONIC__
1244   GTEST_SKIP() << "pthread_rwlock_clockwrlock not available";
1245 #endif  // __BIONIC__
1246 }
1247 
TEST(pthread,pthread_rwlock_clockwrlock_realtime_timeout)1248 TEST(pthread, pthread_rwlock_clockwrlock_realtime_timeout) {
1249 #if defined(__BIONIC__)
1250   pthread_rwlock_timedwrlock_timeout_helper(
1251       CLOCK_REALTIME, [](pthread_rwlock_t* __rwlock, const timespec* __timeout) {
1252         return pthread_rwlock_clockwrlock(__rwlock, CLOCK_REALTIME, __timeout);
1253       });
1254 #else   // __BIONIC__
1255   GTEST_SKIP() << "pthread_rwlock_clockwrlock not available";
1256 #endif  // __BIONIC__
1257 }
1258 
TEST(pthread,pthread_rwlock_clockwrlock_invalid)1259 TEST(pthread, pthread_rwlock_clockwrlock_invalid) {
1260 #if defined(__BIONIC__)
1261   pthread_rwlock_t lock = PTHREAD_RWLOCK_INITIALIZER;
1262   timespec ts;
1263   EXPECT_EQ(EINVAL, pthread_rwlock_clockwrlock(&lock, CLOCK_PROCESS_CPUTIME_ID, &ts));
1264 #else   // __BIONIC__
1265   GTEST_SKIP() << "pthread_rwlock_clockrwlock not available";
1266 #endif  // __BIONIC__
1267 }
1268 
1269 #if !defined(ANDROID_HOST_MUSL)
1270 // musl doesn't have pthread_rwlockattr_setkind_np
1271 class RwlockKindTestHelper {
1272  private:
1273   struct ThreadArg {
1274     RwlockKindTestHelper* helper;
1275     std::atomic<pid_t>& tid;
1276 
ThreadArgRwlockKindTestHelper::ThreadArg1277     ThreadArg(RwlockKindTestHelper* helper, std::atomic<pid_t>& tid)
1278       : helper(helper), tid(tid) { }
1279   };
1280 
1281  public:
1282   pthread_rwlock_t lock;
1283 
1284  public:
RwlockKindTestHelper(int kind_type)1285   explicit RwlockKindTestHelper(int kind_type) {
1286     InitRwlock(kind_type);
1287   }
1288 
~RwlockKindTestHelper()1289   ~RwlockKindTestHelper() {
1290     DestroyRwlock();
1291   }
1292 
CreateWriterThread(pthread_t & thread,std::atomic<pid_t> & tid)1293   void CreateWriterThread(pthread_t& thread, std::atomic<pid_t>& tid) {
1294     tid = 0;
1295     ThreadArg* arg = new ThreadArg(this, tid);
1296     ASSERT_EQ(0, pthread_create(&thread, nullptr,
1297                                 reinterpret_cast<void* (*)(void*)>(WriterThreadFn), arg));
1298   }
1299 
CreateReaderThread(pthread_t & thread,std::atomic<pid_t> & tid)1300   void CreateReaderThread(pthread_t& thread, std::atomic<pid_t>& tid) {
1301     tid = 0;
1302     ThreadArg* arg = new ThreadArg(this, tid);
1303     ASSERT_EQ(0, pthread_create(&thread, nullptr,
1304                                 reinterpret_cast<void* (*)(void*)>(ReaderThreadFn), arg));
1305   }
1306 
1307  private:
InitRwlock(int kind_type)1308   void InitRwlock(int kind_type) {
1309     pthread_rwlockattr_t attr;
1310     ASSERT_EQ(0, pthread_rwlockattr_init(&attr));
1311     ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_type));
1312     ASSERT_EQ(0, pthread_rwlock_init(&lock, &attr));
1313     ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr));
1314   }
1315 
DestroyRwlock()1316   void DestroyRwlock() {
1317     ASSERT_EQ(0, pthread_rwlock_destroy(&lock));
1318   }
1319 
WriterThreadFn(ThreadArg * arg)1320   static void WriterThreadFn(ThreadArg* arg) {
1321     arg->tid = gettid();
1322 
1323     RwlockKindTestHelper* helper = arg->helper;
1324     ASSERT_EQ(0, pthread_rwlock_wrlock(&helper->lock));
1325     ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock));
1326     delete arg;
1327   }
1328 
ReaderThreadFn(ThreadArg * arg)1329   static void ReaderThreadFn(ThreadArg* arg) {
1330     arg->tid = gettid();
1331 
1332     RwlockKindTestHelper* helper = arg->helper;
1333     ASSERT_EQ(0, pthread_rwlock_rdlock(&helper->lock));
1334     ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock));
1335     delete arg;
1336   }
1337 };
1338 #endif
1339 
TEST(pthread,pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_READER_NP)1340 TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_READER_NP) {
1341 #if !defined(ANDROID_HOST_MUSL)
1342   RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_READER_NP);
1343   ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock));
1344 
1345   pthread_t writer_thread;
1346   std::atomic<pid_t> writer_tid;
1347   helper.CreateWriterThread(writer_thread, writer_tid);
1348   WaitUntilThreadSleep(writer_tid);
1349 
1350   pthread_t reader_thread;
1351   std::atomic<pid_t> reader_tid;
1352   helper.CreateReaderThread(reader_thread, reader_tid);
1353   ASSERT_EQ(0, pthread_join(reader_thread, nullptr));
1354 
1355   ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock));
1356   ASSERT_EQ(0, pthread_join(writer_thread, nullptr));
1357 #else
1358   GTEST_SKIP() << "musl doesn't have pthread_rwlockattr_setkind_np";
1359 #endif
1360 }
1361 
TEST(pthread,pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP)1362 TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP) {
1363 #if !defined(ANDROID_HOST_MUSL)
1364   RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
1365   ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock));
1366 
1367   pthread_t writer_thread;
1368   std::atomic<pid_t> writer_tid;
1369   helper.CreateWriterThread(writer_thread, writer_tid);
1370   WaitUntilThreadSleep(writer_tid);
1371 
1372   pthread_t reader_thread;
1373   std::atomic<pid_t> reader_tid;
1374   helper.CreateReaderThread(reader_thread, reader_tid);
1375   WaitUntilThreadSleep(reader_tid);
1376 
1377   ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock));
1378   ASSERT_EQ(0, pthread_join(writer_thread, nullptr));
1379   ASSERT_EQ(0, pthread_join(reader_thread, nullptr));
1380 #else
1381   GTEST_SKIP() << "musl doesn't have pthread_rwlockattr_setkind_np";
1382 #endif
1383 }
1384 
1385 static int g_once_fn_call_count = 0;
OnceFn()1386 static void OnceFn() {
1387   ++g_once_fn_call_count;
1388 }
1389 
TEST(pthread,pthread_once_smoke)1390 TEST(pthread, pthread_once_smoke) {
1391   pthread_once_t once_control = PTHREAD_ONCE_INIT;
1392   ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
1393   ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
1394   ASSERT_EQ(1, g_once_fn_call_count);
1395 }
1396 
1397 static std::string pthread_once_1934122_result = "";
1398 
Routine2()1399 static void Routine2() {
1400   pthread_once_1934122_result += "2";
1401 }
1402 
Routine1()1403 static void Routine1() {
1404   pthread_once_t once_control_2 = PTHREAD_ONCE_INIT;
1405   pthread_once_1934122_result += "1";
1406   pthread_once(&once_control_2, &Routine2);
1407 }
1408 
TEST(pthread,pthread_once_1934122)1409 TEST(pthread, pthread_once_1934122) {
1410   // Very old versions of Android couldn't call pthread_once from a
1411   // pthread_once init routine. http://b/1934122.
1412   pthread_once_t once_control_1 = PTHREAD_ONCE_INIT;
1413   ASSERT_EQ(0, pthread_once(&once_control_1, &Routine1));
1414   ASSERT_EQ("12", pthread_once_1934122_result);
1415 }
1416 
1417 static int g_atfork_prepare_calls = 0;
AtForkPrepare1()1418 static void AtForkPrepare1() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 1; }
AtForkPrepare2()1419 static void AtForkPrepare2() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 2; }
1420 static int g_atfork_parent_calls = 0;
AtForkParent1()1421 static void AtForkParent1() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 1; }
AtForkParent2()1422 static void AtForkParent2() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 2; }
1423 static int g_atfork_child_calls = 0;
AtForkChild1()1424 static void AtForkChild1() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 1; }
AtForkChild2()1425 static void AtForkChild2() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 2; }
1426 
TEST(pthread,pthread_atfork_smoke_fork)1427 TEST(pthread, pthread_atfork_smoke_fork) {
1428   ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
1429   ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2));
1430 
1431   g_atfork_prepare_calls = g_atfork_parent_calls = g_atfork_child_calls = 0;
1432   pid_t pid = fork();
1433   ASSERT_NE(-1, pid) << strerror(errno);
1434 
1435   // Child and parent calls are made in the order they were registered.
1436   if (pid == 0) {
1437     ASSERT_EQ(12, g_atfork_child_calls);
1438     _exit(0);
1439   }
1440   ASSERT_EQ(12, g_atfork_parent_calls);
1441 
1442   // Prepare calls are made in the reverse order.
1443   ASSERT_EQ(21, g_atfork_prepare_calls);
1444   AssertChildExited(pid, 0);
1445 }
1446 
TEST(pthread,pthread_atfork_smoke_vfork)1447 TEST(pthread, pthread_atfork_smoke_vfork) {
1448   ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
1449   ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2));
1450 
1451   g_atfork_prepare_calls = g_atfork_parent_calls = g_atfork_child_calls = 0;
1452   pid_t pid = vfork();
1453   ASSERT_NE(-1, pid) << strerror(errno);
1454 
1455   // atfork handlers are not called.
1456   if (pid == 0) {
1457     ASSERT_EQ(0, g_atfork_child_calls);
1458     _exit(0);
1459   }
1460   ASSERT_EQ(0, g_atfork_parent_calls);
1461   ASSERT_EQ(0, g_atfork_prepare_calls);
1462   AssertChildExited(pid, 0);
1463 }
1464 
TEST(pthread,pthread_atfork_smoke__Fork)1465 TEST(pthread, pthread_atfork_smoke__Fork) {
1466 #if defined(__BIONIC__)
1467   ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
1468   ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2));
1469 
1470   g_atfork_prepare_calls = g_atfork_parent_calls = g_atfork_child_calls = 0;
1471   pid_t pid = _Fork();
1472   ASSERT_NE(-1, pid) << strerror(errno);
1473 
1474   // atfork handlers are not called.
1475   if (pid == 0) {
1476     ASSERT_EQ(0, g_atfork_child_calls);
1477     _exit(0);
1478   }
1479   ASSERT_EQ(0, g_atfork_parent_calls);
1480   ASSERT_EQ(0, g_atfork_prepare_calls);
1481   AssertChildExited(pid, 0);
1482 #endif
1483 }
1484 
TEST(pthread,pthread_attr_getscope)1485 TEST(pthread, pthread_attr_getscope) {
1486   pthread_attr_t attr;
1487   ASSERT_EQ(0, pthread_attr_init(&attr));
1488 
1489   int scope;
1490   ASSERT_EQ(0, pthread_attr_getscope(&attr, &scope));
1491   ASSERT_EQ(PTHREAD_SCOPE_SYSTEM, scope);
1492 }
1493 
TEST(pthread,pthread_condattr_init)1494 TEST(pthread, pthread_condattr_init) {
1495   pthread_condattr_t attr;
1496   pthread_condattr_init(&attr);
1497 
1498   clockid_t clock;
1499   ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1500   ASSERT_EQ(CLOCK_REALTIME, clock);
1501 
1502   int pshared;
1503   ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
1504   ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared);
1505 }
1506 
TEST(pthread,pthread_condattr_setclock)1507 TEST(pthread, pthread_condattr_setclock) {
1508   pthread_condattr_t attr;
1509   pthread_condattr_init(&attr);
1510 
1511   ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_REALTIME));
1512   clockid_t clock;
1513   ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1514   ASSERT_EQ(CLOCK_REALTIME, clock);
1515 
1516   ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
1517   ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1518   ASSERT_EQ(CLOCK_MONOTONIC, clock);
1519 
1520   ASSERT_EQ(EINVAL, pthread_condattr_setclock(&attr, CLOCK_PROCESS_CPUTIME_ID));
1521 }
1522 
TEST(pthread,pthread_cond_broadcast__preserves_condattr_flags)1523 TEST(pthread, pthread_cond_broadcast__preserves_condattr_flags) {
1524 #if defined(__BIONIC__)
1525   pthread_condattr_t attr;
1526   pthread_condattr_init(&attr);
1527 
1528   ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
1529   ASSERT_EQ(0, pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
1530 
1531   pthread_cond_t cond_var;
1532   ASSERT_EQ(0, pthread_cond_init(&cond_var, &attr));
1533 
1534   ASSERT_EQ(0, pthread_cond_signal(&cond_var));
1535   ASSERT_EQ(0, pthread_cond_broadcast(&cond_var));
1536 
1537   attr = static_cast<pthread_condattr_t>(*reinterpret_cast<uint32_t*>(cond_var.__private));
1538   clockid_t clock;
1539   ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1540   ASSERT_EQ(CLOCK_MONOTONIC, clock);
1541   int pshared;
1542   ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
1543   ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared);
1544 #else  // !defined(__BIONIC__)
1545   GTEST_SKIP() << "bionic-only test";
1546 #endif  // !defined(__BIONIC__)
1547 }
1548 
1549 class pthread_CondWakeupTest : public ::testing::Test {
1550  protected:
1551   pthread_mutex_t mutex;
1552   pthread_cond_t cond;
1553 
1554   enum Progress {
1555     INITIALIZED,
1556     WAITING,
1557     SIGNALED,
1558     FINISHED,
1559   };
1560   std::atomic<Progress> progress;
1561   pthread_t thread;
1562   timespec ts;
1563   std::function<int (pthread_cond_t* cond, pthread_mutex_t* mutex)> wait_function;
1564 
1565  protected:
SetUp()1566   void SetUp() override {
1567     ASSERT_EQ(0, pthread_mutex_init(&mutex, nullptr));
1568   }
1569 
InitCond(clockid_t clock=CLOCK_REALTIME)1570   void InitCond(clockid_t clock=CLOCK_REALTIME) {
1571     pthread_condattr_t attr;
1572     ASSERT_EQ(0, pthread_condattr_init(&attr));
1573     ASSERT_EQ(0, pthread_condattr_setclock(&attr, clock));
1574     ASSERT_EQ(0, pthread_cond_init(&cond, &attr));
1575     ASSERT_EQ(0, pthread_condattr_destroy(&attr));
1576   }
1577 
StartWaitingThread(std::function<int (pthread_cond_t * cond,pthread_mutex_t * mutex)> wait_function)1578   void StartWaitingThread(
1579       std::function<int(pthread_cond_t* cond, pthread_mutex_t* mutex)> wait_function) {
1580     progress = INITIALIZED;
1581     this->wait_function = wait_function;
1582     ASSERT_EQ(0, pthread_create(&thread, nullptr, reinterpret_cast<void* (*)(void*)>(WaitThreadFn),
1583                                 this));
1584     while (progress != WAITING) {
1585       usleep(5000);
1586     }
1587     usleep(5000);
1588   }
1589 
RunTimedTest(clockid_t clock,std::function<int (pthread_cond_t * cond,pthread_mutex_t * mutex,const timespec * timeout)> wait_function)1590   void RunTimedTest(
1591       clockid_t clock,
1592       std::function<int(pthread_cond_t* cond, pthread_mutex_t* mutex, const timespec* timeout)>
1593           wait_function) {
1594     ASSERT_EQ(0, clock_gettime(clock, &ts));
1595     ts.tv_sec += 1;
1596 
1597     StartWaitingThread([&wait_function, this](pthread_cond_t* cond, pthread_mutex_t* mutex) {
1598       return wait_function(cond, mutex, &ts);
1599     });
1600 
1601     progress = SIGNALED;
1602     ASSERT_EQ(0, pthread_cond_signal(&cond));
1603   }
1604 
RunTimedTest(clockid_t clock,std::function<int (pthread_cond_t * cond,pthread_mutex_t * mutex,clockid_t clock,const timespec * timeout)> wait_function)1605   void RunTimedTest(clockid_t clock, std::function<int(pthread_cond_t* cond, pthread_mutex_t* mutex,
1606                                                        clockid_t clock, const timespec* timeout)>
1607                                          wait_function) {
1608     RunTimedTest(clock, [clock, &wait_function](pthread_cond_t* cond, pthread_mutex_t* mutex,
1609                                                 const timespec* timeout) {
1610       return wait_function(cond, mutex, clock, timeout);
1611     });
1612   }
1613 
TearDown()1614   void TearDown() override {
1615     ASSERT_EQ(0, pthread_join(thread, nullptr));
1616     ASSERT_EQ(FINISHED, progress);
1617     ASSERT_EQ(0, pthread_cond_destroy(&cond));
1618     ASSERT_EQ(0, pthread_mutex_destroy(&mutex));
1619   }
1620 
1621  private:
WaitThreadFn(pthread_CondWakeupTest * test)1622   static void WaitThreadFn(pthread_CondWakeupTest* test) {
1623     ASSERT_EQ(0, pthread_mutex_lock(&test->mutex));
1624     test->progress = WAITING;
1625     while (test->progress == WAITING) {
1626       ASSERT_EQ(0, test->wait_function(&test->cond, &test->mutex));
1627     }
1628     ASSERT_EQ(SIGNALED, test->progress);
1629     test->progress = FINISHED;
1630     ASSERT_EQ(0, pthread_mutex_unlock(&test->mutex));
1631   }
1632 };
1633 
TEST_F(pthread_CondWakeupTest,signal_wait)1634 TEST_F(pthread_CondWakeupTest, signal_wait) {
1635   InitCond();
1636   StartWaitingThread([](pthread_cond_t* cond, pthread_mutex_t* mutex) {
1637     return pthread_cond_wait(cond, mutex);
1638   });
1639   progress = SIGNALED;
1640   ASSERT_EQ(0, pthread_cond_signal(&cond));
1641 }
1642 
TEST_F(pthread_CondWakeupTest,broadcast_wait)1643 TEST_F(pthread_CondWakeupTest, broadcast_wait) {
1644   InitCond();
1645   StartWaitingThread([](pthread_cond_t* cond, pthread_mutex_t* mutex) {
1646     return pthread_cond_wait(cond, mutex);
1647   });
1648   progress = SIGNALED;
1649   ASSERT_EQ(0, pthread_cond_broadcast(&cond));
1650 }
1651 
TEST_F(pthread_CondWakeupTest,signal_timedwait_CLOCK_REALTIME)1652 TEST_F(pthread_CondWakeupTest, signal_timedwait_CLOCK_REALTIME) {
1653   InitCond(CLOCK_REALTIME);
1654   RunTimedTest(CLOCK_REALTIME, pthread_cond_timedwait);
1655 }
1656 
TEST_F(pthread_CondWakeupTest,signal_timedwait_CLOCK_MONOTONIC)1657 TEST_F(pthread_CondWakeupTest, signal_timedwait_CLOCK_MONOTONIC) {
1658   InitCond(CLOCK_MONOTONIC);
1659   RunTimedTest(CLOCK_MONOTONIC, pthread_cond_timedwait);
1660 }
1661 
TEST_F(pthread_CondWakeupTest,signal_timedwait_CLOCK_MONOTONIC_np)1662 TEST_F(pthread_CondWakeupTest, signal_timedwait_CLOCK_MONOTONIC_np) {
1663 #if defined(__BIONIC__)
1664   InitCond(CLOCK_REALTIME);
1665   RunTimedTest(CLOCK_MONOTONIC, pthread_cond_timedwait_monotonic_np);
1666 #else   // __BIONIC__
1667   GTEST_SKIP() << "pthread_cond_timedwait_monotonic_np not available";
1668 #endif  // __BIONIC__
1669 }
1670 
TEST_F(pthread_CondWakeupTest,signal_clockwait_monotonic_monotonic)1671 TEST_F(pthread_CondWakeupTest, signal_clockwait_monotonic_monotonic) {
1672 #if defined(__BIONIC__)
1673   InitCond(CLOCK_MONOTONIC);
1674   RunTimedTest(CLOCK_MONOTONIC, pthread_cond_clockwait);
1675 #else   // __BIONIC__
1676   GTEST_SKIP() << "pthread_cond_clockwait not available";
1677 #endif  // __BIONIC__
1678 }
1679 
TEST_F(pthread_CondWakeupTest,signal_clockwait_monotonic_realtime)1680 TEST_F(pthread_CondWakeupTest, signal_clockwait_monotonic_realtime) {
1681 #if defined(__BIONIC__)
1682   InitCond(CLOCK_MONOTONIC);
1683   RunTimedTest(CLOCK_REALTIME, pthread_cond_clockwait);
1684 #else   // __BIONIC__
1685   GTEST_SKIP() << "pthread_cond_clockwait not available";
1686 #endif  // __BIONIC__
1687 }
1688 
TEST_F(pthread_CondWakeupTest,signal_clockwait_realtime_monotonic)1689 TEST_F(pthread_CondWakeupTest, signal_clockwait_realtime_monotonic) {
1690 #if defined(__BIONIC__)
1691   InitCond(CLOCK_REALTIME);
1692   RunTimedTest(CLOCK_MONOTONIC, pthread_cond_clockwait);
1693 #else   // __BIONIC__
1694   GTEST_SKIP() << "pthread_cond_clockwait not available";
1695 #endif  // __BIONIC__
1696 }
1697 
TEST_F(pthread_CondWakeupTest,signal_clockwait_realtime_realtime)1698 TEST_F(pthread_CondWakeupTest, signal_clockwait_realtime_realtime) {
1699 #if defined(__BIONIC__)
1700   InitCond(CLOCK_REALTIME);
1701   RunTimedTest(CLOCK_REALTIME, pthread_cond_clockwait);
1702 #else   // __BIONIC__
1703   GTEST_SKIP() << "pthread_cond_clockwait not available";
1704 #endif  // __BIONIC__
1705 }
1706 
pthread_cond_timedwait_timeout_helper(bool init_monotonic,clockid_t clock,int (* wait_function)(pthread_cond_t * __cond,pthread_mutex_t * __mutex,const timespec * __timeout))1707 static void pthread_cond_timedwait_timeout_helper(bool init_monotonic, clockid_t clock,
1708                                                   int (*wait_function)(pthread_cond_t* __cond,
1709                                                                        pthread_mutex_t* __mutex,
1710                                                                        const timespec* __timeout)) {
1711   pthread_mutex_t mutex;
1712   ASSERT_EQ(0, pthread_mutex_init(&mutex, nullptr));
1713   pthread_cond_t cond;
1714 
1715   if (init_monotonic) {
1716     pthread_condattr_t attr;
1717     pthread_condattr_init(&attr);
1718 
1719     ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
1720     clockid_t clock;
1721     ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1722     ASSERT_EQ(CLOCK_MONOTONIC, clock);
1723 
1724     ASSERT_EQ(0, pthread_cond_init(&cond, &attr));
1725   } else {
1726     ASSERT_EQ(0, pthread_cond_init(&cond, nullptr));
1727   }
1728   ASSERT_EQ(0, pthread_mutex_lock(&mutex));
1729 
1730   timespec ts;
1731   ASSERT_EQ(0, clock_gettime(clock, &ts));
1732   ASSERT_EQ(ETIMEDOUT, wait_function(&cond, &mutex, &ts));
1733   ts.tv_nsec = -1;
1734   ASSERT_EQ(EINVAL, wait_function(&cond, &mutex, &ts));
1735   ts.tv_nsec = NS_PER_S;
1736   ASSERT_EQ(EINVAL, wait_function(&cond, &mutex, &ts));
1737   ts.tv_nsec = NS_PER_S - 1;
1738   ts.tv_sec = -1;
1739   ASSERT_EQ(ETIMEDOUT, wait_function(&cond, &mutex, &ts));
1740   ASSERT_EQ(0, pthread_mutex_unlock(&mutex));
1741 }
1742 
TEST(pthread,pthread_cond_timedwait_timeout)1743 TEST(pthread, pthread_cond_timedwait_timeout) {
1744   pthread_cond_timedwait_timeout_helper(false, CLOCK_REALTIME, pthread_cond_timedwait);
1745 }
1746 
TEST(pthread,pthread_cond_timedwait_monotonic_np_timeout)1747 TEST(pthread, pthread_cond_timedwait_monotonic_np_timeout) {
1748 #if defined(__BIONIC__)
1749   pthread_cond_timedwait_timeout_helper(false, CLOCK_MONOTONIC, pthread_cond_timedwait_monotonic_np);
1750   pthread_cond_timedwait_timeout_helper(true, CLOCK_MONOTONIC, pthread_cond_timedwait_monotonic_np);
1751 #else   // __BIONIC__
1752   GTEST_SKIP() << "pthread_cond_timedwait_monotonic_np not available";
1753 #endif  // __BIONIC__
1754 }
1755 
TEST(pthread,pthread_cond_clockwait_timeout)1756 TEST(pthread, pthread_cond_clockwait_timeout) {
1757 #if defined(__BIONIC__)
1758   pthread_cond_timedwait_timeout_helper(
1759       false, CLOCK_MONOTONIC,
1760       [](pthread_cond_t* __cond, pthread_mutex_t* __mutex, const timespec* __timeout) {
1761         return pthread_cond_clockwait(__cond, __mutex, CLOCK_MONOTONIC, __timeout);
1762       });
1763   pthread_cond_timedwait_timeout_helper(
1764       true, CLOCK_MONOTONIC,
1765       [](pthread_cond_t* __cond, pthread_mutex_t* __mutex, const timespec* __timeout) {
1766         return pthread_cond_clockwait(__cond, __mutex, CLOCK_MONOTONIC, __timeout);
1767       });
1768   pthread_cond_timedwait_timeout_helper(
1769       false, CLOCK_REALTIME,
1770       [](pthread_cond_t* __cond, pthread_mutex_t* __mutex, const timespec* __timeout) {
1771         return pthread_cond_clockwait(__cond, __mutex, CLOCK_REALTIME, __timeout);
1772       });
1773   pthread_cond_timedwait_timeout_helper(
1774       true, CLOCK_REALTIME,
1775       [](pthread_cond_t* __cond, pthread_mutex_t* __mutex, const timespec* __timeout) {
1776         return pthread_cond_clockwait(__cond, __mutex, CLOCK_REALTIME, __timeout);
1777       });
1778 #else   // __BIONIC__
1779   GTEST_SKIP() << "pthread_cond_clockwait not available";
1780 #endif  // __BIONIC__
1781 }
1782 
TEST(pthread,pthread_cond_clockwait_invalid)1783 TEST(pthread, pthread_cond_clockwait_invalid) {
1784 #if defined(__BIONIC__)
1785   pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
1786   pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
1787   timespec ts;
1788   EXPECT_EQ(EINVAL, pthread_cond_clockwait(&cond, &mutex, CLOCK_PROCESS_CPUTIME_ID, &ts));
1789 
1790 #else   // __BIONIC__
1791   GTEST_SKIP() << "pthread_cond_clockwait not available";
1792 #endif  // __BIONIC__
1793 }
1794 
TEST(pthread,pthread_attr_getstack__main_thread)1795 TEST(pthread, pthread_attr_getstack__main_thread) {
1796   // This test is only meaningful for the main thread, so make sure we're running on it!
1797   ASSERT_EQ(getpid(), syscall(__NR_gettid));
1798 
1799   // Get the main thread's attributes.
1800   pthread_attr_t attributes;
1801   ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1802 
1803   // Check that we correctly report that the main thread has no guard page.
1804   size_t guard_size;
1805   ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
1806   ASSERT_EQ(0U, guard_size); // The main thread has no guard page.
1807 
1808   // Get the stack base and the stack size (both ways).
1809   void* stack_base;
1810   size_t stack_size;
1811   ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1812   size_t stack_size2;
1813   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1814 
1815   // The two methods of asking for the stack size should agree.
1816   EXPECT_EQ(stack_size, stack_size2);
1817 
1818 #if defined(__BIONIC__)
1819   // Find stack in /proc/self/maps using a pointer to the stack.
1820   //
1821   // We do not use "[stack]" label because in native-bridge environment it is not
1822   // guaranteed to point to the right stack. A native bridge implementation may
1823   // keep separate stack for the guest code.
1824   void* maps_stack_hi = nullptr;
1825   std::vector<map_record> maps;
1826   ASSERT_TRUE(Maps::parse_maps(&maps));
1827   uintptr_t stack_address = reinterpret_cast<uintptr_t>(untag_address(&maps_stack_hi));
1828   for (const auto& map : maps) {
1829     if (map.addr_start <= stack_address && map.addr_end > stack_address){
1830       maps_stack_hi = reinterpret_cast<void*>(map.addr_end);
1831       break;
1832     }
1833   }
1834 
1835   // The high address of the /proc/self/maps stack region should equal stack_base + stack_size.
1836   // Remember that the stack grows down (and is mapped in on demand), so the low address of the
1837   // region isn't very interesting.
1838   EXPECT_EQ(maps_stack_hi, reinterpret_cast<uint8_t*>(stack_base) + stack_size);
1839 
1840   // The stack size should correspond to RLIMIT_STACK.
1841   rlimit rl;
1842   ASSERT_EQ(0, getrlimit(RLIMIT_STACK, &rl));
1843   uint64_t original_rlim_cur = rl.rlim_cur;
1844   if (rl.rlim_cur == RLIM_INFINITY) {
1845     rl.rlim_cur = 8 * 1024 * 1024; // Bionic reports unlimited stacks as 8MiB.
1846   }
1847   EXPECT_EQ(rl.rlim_cur, stack_size);
1848 
1849   auto guard = android::base::make_scope_guard([&rl, original_rlim_cur]() {
1850     rl.rlim_cur = original_rlim_cur;
1851     ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1852   });
1853 
1854   //
1855   // What if RLIMIT_STACK is smaller than the stack's current extent?
1856   //
1857   rl.rlim_cur = rl.rlim_max = 1024; // 1KiB. We know the stack must be at least a page already.
1858   rl.rlim_max = RLIM_INFINITY;
1859   ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1860 
1861   ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1862   ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1863   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1864 
1865   EXPECT_EQ(stack_size, stack_size2);
1866   ASSERT_EQ(1024U, stack_size);
1867 
1868   //
1869   // What if RLIMIT_STACK isn't a whole number of pages?
1870   //
1871   rl.rlim_cur = rl.rlim_max = 6666; // Not a whole number of pages.
1872   rl.rlim_max = RLIM_INFINITY;
1873   ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1874 
1875   ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1876   ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1877   ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1878 
1879   EXPECT_EQ(stack_size, stack_size2);
1880   ASSERT_EQ(6666U, stack_size);
1881 #endif
1882 }
1883 
1884 struct GetStackSignalHandlerArg {
1885   volatile bool done;
1886   void* signal_stack_base;
1887   size_t signal_stack_size;
1888   void* main_stack_base;
1889   size_t main_stack_size;
1890 };
1891 
1892 static GetStackSignalHandlerArg getstack_signal_handler_arg;
1893 
getstack_signal_handler(int sig)1894 static void getstack_signal_handler(int sig) {
1895   ASSERT_EQ(SIGUSR1, sig);
1896   // Use sleep() to make current thread be switched out by the kernel to provoke the error.
1897   sleep(1);
1898   pthread_attr_t attr;
1899   ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attr));
1900   void* stack_base;
1901   size_t stack_size;
1902   ASSERT_EQ(0, pthread_attr_getstack(&attr, &stack_base, &stack_size));
1903 
1904   // Verify if the stack used by the signal handler is the alternate stack just registered.
1905   ASSERT_LE(getstack_signal_handler_arg.signal_stack_base, &attr);
1906   ASSERT_LT(static_cast<void*>(untag_address(&attr)),
1907             static_cast<char*>(getstack_signal_handler_arg.signal_stack_base) +
1908                 getstack_signal_handler_arg.signal_stack_size);
1909 
1910   // Verify if the main thread's stack got in the signal handler is correct.
1911   ASSERT_EQ(getstack_signal_handler_arg.main_stack_base, stack_base);
1912   ASSERT_LE(getstack_signal_handler_arg.main_stack_size, stack_size);
1913 
1914   getstack_signal_handler_arg.done = true;
1915 }
1916 
1917 // The previous code obtained the main thread's stack by reading the entry in
1918 // /proc/self/task/<pid>/maps that was labeled [stack]. Unfortunately, on x86/x86_64, the kernel
1919 // relies on sp0 in task state segment(tss) to label the stack map with [stack]. If the kernel
1920 // switches a process while the main thread is in an alternate stack, then the kernel will label
1921 // the wrong map with [stack]. This test verifies that when the above situation happens, the main
1922 // thread's stack is found correctly.
TEST(pthread,pthread_attr_getstack_in_signal_handler)1923 TEST(pthread, pthread_attr_getstack_in_signal_handler) {
1924   // This test is only meaningful for the main thread, so make sure we're running on it!
1925   ASSERT_EQ(getpid(), syscall(__NR_gettid));
1926 
1927   const size_t sig_stack_size = 16 * 1024;
1928   void* sig_stack = mmap(nullptr, sig_stack_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS,
1929                          -1, 0);
1930   ASSERT_NE(MAP_FAILED, sig_stack);
1931   stack_t ss;
1932   ss.ss_sp = sig_stack;
1933   ss.ss_size = sig_stack_size;
1934   ss.ss_flags = 0;
1935   stack_t oss;
1936   ASSERT_EQ(0, sigaltstack(&ss, &oss));
1937 
1938   pthread_attr_t attr;
1939   ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attr));
1940   void* main_stack_base;
1941   size_t main_stack_size;
1942   ASSERT_EQ(0, pthread_attr_getstack(&attr, &main_stack_base, &main_stack_size));
1943 
1944   ScopedSignalHandler handler(SIGUSR1, getstack_signal_handler, SA_ONSTACK);
1945   getstack_signal_handler_arg.done = false;
1946   getstack_signal_handler_arg.signal_stack_base = sig_stack;
1947   getstack_signal_handler_arg.signal_stack_size = sig_stack_size;
1948   getstack_signal_handler_arg.main_stack_base = main_stack_base;
1949   getstack_signal_handler_arg.main_stack_size = main_stack_size;
1950   kill(getpid(), SIGUSR1);
1951   ASSERT_EQ(true, getstack_signal_handler_arg.done);
1952 
1953   ASSERT_EQ(0, sigaltstack(&oss, nullptr));
1954   ASSERT_EQ(0, munmap(sig_stack, sig_stack_size));
1955 }
1956 
pthread_attr_getstack_18908062_helper(void *)1957 static void pthread_attr_getstack_18908062_helper(void*) {
1958   char local_variable;
1959   pthread_attr_t attributes;
1960   pthread_getattr_np(pthread_self(), &attributes);
1961   void* stack_base;
1962   size_t stack_size;
1963   pthread_attr_getstack(&attributes, &stack_base, &stack_size);
1964 
1965   // Test whether &local_variable is in [stack_base, stack_base + stack_size).
1966   ASSERT_LE(reinterpret_cast<char*>(stack_base), &local_variable);
1967   ASSERT_LT(untag_address(&local_variable), reinterpret_cast<char*>(stack_base) + stack_size);
1968 }
1969 
1970 // Check whether something on stack is in the range of
1971 // [stack_base, stack_base + stack_size). see b/18908062.
TEST(pthread,pthread_attr_getstack_18908062)1972 TEST(pthread, pthread_attr_getstack_18908062) {
1973   pthread_t t;
1974   ASSERT_EQ(0, pthread_create(&t, nullptr,
1975             reinterpret_cast<void* (*)(void*)>(pthread_attr_getstack_18908062_helper),
1976             nullptr));
1977   ASSERT_EQ(0, pthread_join(t, nullptr));
1978 }
1979 
1980 #if defined(__BIONIC__)
1981 static pthread_mutex_t pthread_gettid_np_mutex = PTHREAD_MUTEX_INITIALIZER;
1982 
pthread_gettid_np_helper(void * arg)1983 static void* pthread_gettid_np_helper(void* arg) {
1984   *reinterpret_cast<pid_t*>(arg) = gettid();
1985 
1986   // Wait for our parent to call pthread_gettid_np on us before exiting.
1987   pthread_mutex_lock(&pthread_gettid_np_mutex);
1988   pthread_mutex_unlock(&pthread_gettid_np_mutex);
1989   return nullptr;
1990 }
1991 #endif
1992 
TEST(pthread,pthread_gettid_np)1993 TEST(pthread, pthread_gettid_np) {
1994 #if defined(__BIONIC__)
1995   ASSERT_EQ(gettid(), pthread_gettid_np(pthread_self()));
1996 
1997   // Ensure the other thread doesn't exit until after we've called
1998   // pthread_gettid_np on it.
1999   pthread_mutex_lock(&pthread_gettid_np_mutex);
2000 
2001   pid_t t_gettid_result;
2002   pthread_t t;
2003   pthread_create(&t, nullptr, pthread_gettid_np_helper, &t_gettid_result);
2004 
2005   pid_t t_pthread_gettid_np_result = pthread_gettid_np(t);
2006 
2007   // Release the other thread and wait for it to exit.
2008   pthread_mutex_unlock(&pthread_gettid_np_mutex);
2009   ASSERT_EQ(0, pthread_join(t, nullptr));
2010 
2011   ASSERT_EQ(t_gettid_result, t_pthread_gettid_np_result);
2012 #else
2013   GTEST_SKIP() << "pthread_gettid_np not available";
2014 #endif
2015 }
2016 
2017 static size_t cleanup_counter = 0;
2018 
AbortCleanupRoutine(void *)2019 static void AbortCleanupRoutine(void*) {
2020   abort();
2021 }
2022 
CountCleanupRoutine(void *)2023 static void CountCleanupRoutine(void*) {
2024   ++cleanup_counter;
2025 }
2026 
PthreadCleanupTester()2027 static void PthreadCleanupTester() {
2028   pthread_cleanup_push(CountCleanupRoutine, nullptr);
2029   pthread_cleanup_push(CountCleanupRoutine, nullptr);
2030   pthread_cleanup_push(AbortCleanupRoutine, nullptr);
2031 
2032   pthread_cleanup_pop(0); // Pop the abort without executing it.
2033   pthread_cleanup_pop(1); // Pop one count while executing it.
2034   ASSERT_EQ(1U, cleanup_counter);
2035   // Exit while the other count is still on the cleanup stack.
2036   pthread_exit(nullptr);
2037 
2038   // Calls to pthread_cleanup_pop/pthread_cleanup_push must always be balanced.
2039   pthread_cleanup_pop(0);
2040 }
2041 
PthreadCleanupStartRoutine(void *)2042 static void* PthreadCleanupStartRoutine(void*) {
2043   PthreadCleanupTester();
2044   return nullptr;
2045 }
2046 
TEST(pthread,pthread_cleanup_push__pthread_cleanup_pop)2047 TEST(pthread, pthread_cleanup_push__pthread_cleanup_pop) {
2048   pthread_t t;
2049   ASSERT_EQ(0, pthread_create(&t, nullptr, PthreadCleanupStartRoutine, nullptr));
2050   ASSERT_EQ(0, pthread_join(t, nullptr));
2051   ASSERT_EQ(2U, cleanup_counter);
2052 }
2053 
TEST(pthread,PTHREAD_MUTEX_DEFAULT_is_PTHREAD_MUTEX_NORMAL)2054 TEST(pthread, PTHREAD_MUTEX_DEFAULT_is_PTHREAD_MUTEX_NORMAL) {
2055   ASSERT_EQ(PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_DEFAULT);
2056 }
2057 
TEST(pthread,pthread_mutexattr_gettype)2058 TEST(pthread, pthread_mutexattr_gettype) {
2059   pthread_mutexattr_t attr;
2060   ASSERT_EQ(0, pthread_mutexattr_init(&attr));
2061 
2062   int attr_type;
2063 
2064   ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL));
2065   ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
2066   ASSERT_EQ(PTHREAD_MUTEX_NORMAL, attr_type);
2067 
2068   ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK));
2069   ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
2070   ASSERT_EQ(PTHREAD_MUTEX_ERRORCHECK, attr_type);
2071 
2072   ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE));
2073   ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
2074   ASSERT_EQ(PTHREAD_MUTEX_RECURSIVE, attr_type);
2075 
2076   ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
2077 }
2078 
TEST(pthread,pthread_mutexattr_protocol)2079 TEST(pthread, pthread_mutexattr_protocol) {
2080   pthread_mutexattr_t attr;
2081   ASSERT_EQ(0, pthread_mutexattr_init(&attr));
2082 
2083   int protocol;
2084   ASSERT_EQ(0, pthread_mutexattr_getprotocol(&attr, &protocol));
2085   ASSERT_EQ(PTHREAD_PRIO_NONE, protocol);
2086   for (size_t repeat = 0; repeat < 2; ++repeat) {
2087     for (int set_protocol : {PTHREAD_PRIO_NONE, PTHREAD_PRIO_INHERIT}) {
2088       ASSERT_EQ(0, pthread_mutexattr_setprotocol(&attr, set_protocol));
2089       ASSERT_EQ(0, pthread_mutexattr_getprotocol(&attr, &protocol));
2090       ASSERT_EQ(protocol, set_protocol);
2091     }
2092   }
2093 }
2094 
2095 struct PthreadMutex {
2096   pthread_mutex_t lock;
2097 
PthreadMutexPthreadMutex2098   explicit PthreadMutex(int mutex_type, int protocol = PTHREAD_PRIO_NONE) {
2099     init(mutex_type, protocol);
2100   }
2101 
~PthreadMutexPthreadMutex2102   ~PthreadMutex() {
2103     destroy();
2104   }
2105 
2106  private:
initPthreadMutex2107   void init(int mutex_type, int protocol) {
2108     pthread_mutexattr_t attr;
2109     ASSERT_EQ(0, pthread_mutexattr_init(&attr));
2110     ASSERT_EQ(0, pthread_mutexattr_settype(&attr, mutex_type));
2111     ASSERT_EQ(0, pthread_mutexattr_setprotocol(&attr, protocol));
2112     ASSERT_EQ(0, pthread_mutex_init(&lock, &attr));
2113     ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
2114   }
2115 
destroyPthreadMutex2116   void destroy() {
2117     ASSERT_EQ(0, pthread_mutex_destroy(&lock));
2118   }
2119 
2120   DISALLOW_COPY_AND_ASSIGN(PthreadMutex);
2121 };
2122 
UnlockFromAnotherThread(pthread_mutex_t * mutex)2123 static int UnlockFromAnotherThread(pthread_mutex_t* mutex) {
2124   pthread_t thread;
2125   pthread_create(&thread, nullptr, [](void* mutex_voidp) -> void* {
2126     pthread_mutex_t* mutex = static_cast<pthread_mutex_t*>(mutex_voidp);
2127     intptr_t result = pthread_mutex_unlock(mutex);
2128     return reinterpret_cast<void*>(result);
2129   }, mutex);
2130   void* result;
2131   EXPECT_EQ(0, pthread_join(thread, &result));
2132   return reinterpret_cast<intptr_t>(result);
2133 };
2134 
TestPthreadMutexLockNormal(int protocol)2135 static void TestPthreadMutexLockNormal(int protocol) {
2136   PthreadMutex m(PTHREAD_MUTEX_NORMAL, protocol);
2137 
2138   ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
2139   if (protocol == PTHREAD_PRIO_INHERIT) {
2140     ASSERT_EQ(EPERM, UnlockFromAnotherThread(&m.lock));
2141   }
2142   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2143   ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
2144   ASSERT_EQ(EBUSY, pthread_mutex_trylock(&m.lock));
2145   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2146 }
2147 
TestPthreadMutexLockErrorCheck(int protocol)2148 static void TestPthreadMutexLockErrorCheck(int protocol) {
2149   PthreadMutex m(PTHREAD_MUTEX_ERRORCHECK, protocol);
2150 
2151   ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
2152   ASSERT_EQ(EPERM, UnlockFromAnotherThread(&m.lock));
2153   ASSERT_EQ(EDEADLK, pthread_mutex_lock(&m.lock));
2154   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2155   ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
2156   if (protocol == PTHREAD_PRIO_NONE) {
2157     ASSERT_EQ(EBUSY, pthread_mutex_trylock(&m.lock));
2158   } else {
2159     ASSERT_EQ(EDEADLK, pthread_mutex_trylock(&m.lock));
2160   }
2161   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2162   ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
2163 }
2164 
TestPthreadMutexLockRecursive(int protocol)2165 static void TestPthreadMutexLockRecursive(int protocol) {
2166   PthreadMutex m(PTHREAD_MUTEX_RECURSIVE, protocol);
2167 
2168   ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
2169   ASSERT_EQ(EPERM, UnlockFromAnotherThread(&m.lock));
2170   ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
2171   ASSERT_EQ(EPERM, UnlockFromAnotherThread(&m.lock));
2172   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2173   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2174   ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
2175   ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
2176   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2177   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2178   ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
2179 }
2180 
TEST(pthread,pthread_mutex_lock_NORMAL)2181 TEST(pthread, pthread_mutex_lock_NORMAL) {
2182   TestPthreadMutexLockNormal(PTHREAD_PRIO_NONE);
2183 }
2184 
TEST(pthread,pthread_mutex_lock_ERRORCHECK)2185 TEST(pthread, pthread_mutex_lock_ERRORCHECK) {
2186   TestPthreadMutexLockErrorCheck(PTHREAD_PRIO_NONE);
2187 }
2188 
TEST(pthread,pthread_mutex_lock_RECURSIVE)2189 TEST(pthread, pthread_mutex_lock_RECURSIVE) {
2190   TestPthreadMutexLockRecursive(PTHREAD_PRIO_NONE);
2191 }
2192 
TEST(pthread,pthread_mutex_lock_pi)2193 TEST(pthread, pthread_mutex_lock_pi) {
2194   TestPthreadMutexLockNormal(PTHREAD_PRIO_INHERIT);
2195   TestPthreadMutexLockErrorCheck(PTHREAD_PRIO_INHERIT);
2196   TestPthreadMutexLockRecursive(PTHREAD_PRIO_INHERIT);
2197 }
2198 
TEST(pthread,pthread_mutex_pi_count_limit)2199 TEST(pthread, pthread_mutex_pi_count_limit) {
2200 #if defined(__BIONIC__) && !defined(__LP64__)
2201   // Bionic only supports 65536 pi mutexes in 32-bit programs.
2202   pthread_mutexattr_t attr;
2203   ASSERT_EQ(0, pthread_mutexattr_init(&attr));
2204   ASSERT_EQ(0, pthread_mutexattr_setprotocol(&attr, PTHREAD_PRIO_INHERIT));
2205   std::vector<pthread_mutex_t> mutexes(65536);
2206   // Test if we can use 65536 pi mutexes at the same time.
2207   // Run 2 times to check if freed pi mutexes can be recycled.
2208   for (int repeat = 0; repeat < 2; ++repeat) {
2209     for (auto& m : mutexes) {
2210       ASSERT_EQ(0, pthread_mutex_init(&m, &attr));
2211     }
2212     pthread_mutex_t m;
2213     ASSERT_EQ(ENOMEM, pthread_mutex_init(&m, &attr));
2214     for (auto& m : mutexes) {
2215       ASSERT_EQ(0, pthread_mutex_lock(&m));
2216     }
2217     for (auto& m : mutexes) {
2218       ASSERT_EQ(0, pthread_mutex_unlock(&m));
2219     }
2220     for (auto& m : mutexes) {
2221       ASSERT_EQ(0, pthread_mutex_destroy(&m));
2222     }
2223   }
2224   ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
2225 #else
2226   GTEST_SKIP() << "pi mutex count not limited to 64Ki";
2227 #endif
2228 }
2229 
TEST(pthread,pthread_mutex_init_same_as_static_initializers)2230 TEST(pthread, pthread_mutex_init_same_as_static_initializers) {
2231   pthread_mutex_t lock_normal = PTHREAD_MUTEX_INITIALIZER;
2232   PthreadMutex m1(PTHREAD_MUTEX_NORMAL);
2233   ASSERT_EQ(0, memcmp(&lock_normal, &m1.lock, sizeof(pthread_mutex_t)));
2234   pthread_mutex_destroy(&lock_normal);
2235 
2236 #if !defined(ANDROID_HOST_MUSL)
2237   // musl doesn't support PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP or
2238   // PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP.
2239   pthread_mutex_t lock_errorcheck = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
2240   PthreadMutex m2(PTHREAD_MUTEX_ERRORCHECK);
2241   ASSERT_EQ(0, memcmp(&lock_errorcheck, &m2.lock, sizeof(pthread_mutex_t)));
2242   pthread_mutex_destroy(&lock_errorcheck);
2243 
2244   pthread_mutex_t lock_recursive = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
2245   PthreadMutex m3(PTHREAD_MUTEX_RECURSIVE);
2246   ASSERT_EQ(0, memcmp(&lock_recursive, &m3.lock, sizeof(pthread_mutex_t)));
2247   ASSERT_EQ(0, pthread_mutex_destroy(&lock_recursive));
2248 #endif
2249 }
2250 
2251 class MutexWakeupHelper {
2252  private:
2253   PthreadMutex m;
2254   enum Progress {
2255     LOCK_INITIALIZED,
2256     LOCK_WAITING,
2257     LOCK_RELEASED,
2258     LOCK_ACCESSED
2259   };
2260   std::atomic<Progress> progress;
2261   std::atomic<pid_t> tid;
2262 
thread_fn(MutexWakeupHelper * helper)2263   static void thread_fn(MutexWakeupHelper* helper) {
2264     helper->tid = gettid();
2265     ASSERT_EQ(LOCK_INITIALIZED, helper->progress);
2266     helper->progress = LOCK_WAITING;
2267 
2268     ASSERT_EQ(0, pthread_mutex_lock(&helper->m.lock));
2269     ASSERT_EQ(LOCK_RELEASED, helper->progress);
2270     ASSERT_EQ(0, pthread_mutex_unlock(&helper->m.lock));
2271 
2272     helper->progress = LOCK_ACCESSED;
2273   }
2274 
2275  public:
MutexWakeupHelper(int mutex_type)2276   explicit MutexWakeupHelper(int mutex_type) : m(mutex_type) {
2277   }
2278 
test()2279   void test() {
2280     ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
2281     progress = LOCK_INITIALIZED;
2282     tid = 0;
2283 
2284     pthread_t thread;
2285     ASSERT_EQ(0, pthread_create(&thread, nullptr,
2286       reinterpret_cast<void* (*)(void*)>(MutexWakeupHelper::thread_fn), this));
2287 
2288     WaitUntilThreadSleep(tid);
2289     ASSERT_EQ(LOCK_WAITING, progress);
2290 
2291     progress = LOCK_RELEASED;
2292     ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2293 
2294     ASSERT_EQ(0, pthread_join(thread, nullptr));
2295     ASSERT_EQ(LOCK_ACCESSED, progress);
2296   }
2297 };
2298 
TEST(pthread,pthread_mutex_NORMAL_wakeup)2299 TEST(pthread, pthread_mutex_NORMAL_wakeup) {
2300   MutexWakeupHelper helper(PTHREAD_MUTEX_NORMAL);
2301   helper.test();
2302 }
2303 
TEST(pthread,pthread_mutex_ERRORCHECK_wakeup)2304 TEST(pthread, pthread_mutex_ERRORCHECK_wakeup) {
2305   MutexWakeupHelper helper(PTHREAD_MUTEX_ERRORCHECK);
2306   helper.test();
2307 }
2308 
TEST(pthread,pthread_mutex_RECURSIVE_wakeup)2309 TEST(pthread, pthread_mutex_RECURSIVE_wakeup) {
2310   MutexWakeupHelper helper(PTHREAD_MUTEX_RECURSIVE);
2311   helper.test();
2312 }
2313 
GetThreadPriority(pid_t tid)2314 static int GetThreadPriority(pid_t tid) {
2315   // sched_getparam() returns the static priority of a thread, which can't reflect a thread's
2316   // priority after priority inheritance. So read /proc/<pid>/stat to get the dynamic priority.
2317   std::string filename = android::base::StringPrintf("/proc/%d/stat", tid);
2318   std::string content;
2319   int result = INT_MAX;
2320   if (!android::base::ReadFileToString(filename, &content)) {
2321     return result;
2322   }
2323   std::vector<std::string> strs = android::base::Split(content, " ");
2324   if (strs.size() < 18) {
2325     return result;
2326   }
2327   if (!android::base::ParseInt(strs[17], &result)) {
2328     return INT_MAX;
2329   }
2330   return result;
2331 }
2332 
2333 class PIMutexWakeupHelper {
2334 private:
2335   PthreadMutex m;
2336   int protocol;
2337   enum Progress {
2338     LOCK_INITIALIZED,
2339     LOCK_CHILD_READY,
2340     LOCK_WAITING,
2341     LOCK_RELEASED,
2342   };
2343   std::atomic<Progress> progress;
2344   std::atomic<pid_t> main_tid;
2345   std::atomic<pid_t> child_tid;
2346   PthreadMutex start_thread_m;
2347 
thread_fn(PIMutexWakeupHelper * helper)2348   static void thread_fn(PIMutexWakeupHelper* helper) {
2349     helper->child_tid = gettid();
2350     ASSERT_EQ(LOCK_INITIALIZED, helper->progress);
2351     ASSERT_EQ(0, setpriority(PRIO_PROCESS, gettid(), 1));
2352     ASSERT_EQ(21, GetThreadPriority(gettid()));
2353     ASSERT_EQ(0, pthread_mutex_lock(&helper->m.lock));
2354     helper->progress = LOCK_CHILD_READY;
2355     ASSERT_EQ(0, pthread_mutex_lock(&helper->start_thread_m.lock));
2356 
2357     ASSERT_EQ(0, pthread_mutex_unlock(&helper->start_thread_m.lock));
2358     WaitUntilThreadSleep(helper->main_tid);
2359     ASSERT_EQ(LOCK_WAITING, helper->progress);
2360 
2361     if (helper->protocol == PTHREAD_PRIO_INHERIT) {
2362       ASSERT_EQ(20, GetThreadPriority(gettid()));
2363     } else {
2364       ASSERT_EQ(21, GetThreadPriority(gettid()));
2365     }
2366     helper->progress = LOCK_RELEASED;
2367     ASSERT_EQ(0, pthread_mutex_unlock(&helper->m.lock));
2368   }
2369 
2370 public:
PIMutexWakeupHelper(int mutex_type,int protocol)2371   explicit PIMutexWakeupHelper(int mutex_type, int protocol)
2372       : m(mutex_type, protocol), protocol(protocol), start_thread_m(PTHREAD_MUTEX_NORMAL) {
2373   }
2374 
test()2375   void test() {
2376     ASSERT_EQ(0, pthread_mutex_lock(&start_thread_m.lock));
2377     main_tid = gettid();
2378     ASSERT_EQ(20, GetThreadPriority(main_tid));
2379     progress = LOCK_INITIALIZED;
2380     child_tid = 0;
2381 
2382     pthread_t thread;
2383     ASSERT_EQ(0, pthread_create(&thread, nullptr,
2384               reinterpret_cast<void* (*)(void*)>(PIMutexWakeupHelper::thread_fn), this));
2385 
2386     WaitUntilThreadSleep(child_tid);
2387     ASSERT_EQ(LOCK_CHILD_READY, progress);
2388     ASSERT_EQ(0, pthread_mutex_unlock(&start_thread_m.lock));
2389     progress = LOCK_WAITING;
2390     ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
2391 
2392     ASSERT_EQ(LOCK_RELEASED, progress);
2393     ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2394     ASSERT_EQ(0, pthread_join(thread, nullptr));
2395   }
2396 };
2397 
TEST(pthread,pthread_mutex_pi_wakeup)2398 TEST(pthread, pthread_mutex_pi_wakeup) {
2399   for (int type : {PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_RECURSIVE, PTHREAD_MUTEX_ERRORCHECK}) {
2400     for (int protocol : {PTHREAD_PRIO_INHERIT}) {
2401       PIMutexWakeupHelper helper(type, protocol);
2402       helper.test();
2403     }
2404   }
2405 }
2406 
TEST(pthread,pthread_mutex_owner_tid_limit)2407 TEST(pthread, pthread_mutex_owner_tid_limit) {
2408 #if defined(__BIONIC__) && !defined(__LP64__)
2409   FILE* fp = fopen("/proc/sys/kernel/pid_max", "r");
2410   ASSERT_TRUE(fp != nullptr);
2411   long pid_max;
2412   ASSERT_EQ(1, fscanf(fp, "%ld", &pid_max));
2413   fclose(fp);
2414   // Bionic's pthread_mutex implementation on 32-bit devices uses 16 bits to represent owner tid.
2415   ASSERT_LE(pid_max, 65536);
2416 #else
2417   GTEST_SKIP() << "pthread_mutex supports 32-bit tid";
2418 #endif
2419 }
2420 
pthread_mutex_timedlock_helper(clockid_t clock,int (* lock_function)(pthread_mutex_t * __mutex,const timespec * __timeout))2421 static void pthread_mutex_timedlock_helper(clockid_t clock,
2422                                            int (*lock_function)(pthread_mutex_t* __mutex,
2423                                                                 const timespec* __timeout)) {
2424   pthread_mutex_t m;
2425   ASSERT_EQ(0, pthread_mutex_init(&m, nullptr));
2426 
2427   // If the mutex is already locked, pthread_mutex_timedlock should time out.
2428   ASSERT_EQ(0, pthread_mutex_lock(&m));
2429 
2430   timespec ts;
2431   ASSERT_EQ(0, clock_gettime(clock, &ts));
2432   ASSERT_EQ(ETIMEDOUT, lock_function(&m, &ts));
2433   ts.tv_nsec = -1;
2434   ASSERT_EQ(EINVAL, lock_function(&m, &ts));
2435   ts.tv_nsec = NS_PER_S;
2436   ASSERT_EQ(EINVAL, lock_function(&m, &ts));
2437   ts.tv_nsec = NS_PER_S - 1;
2438   ts.tv_sec = -1;
2439   ASSERT_EQ(ETIMEDOUT, lock_function(&m, &ts));
2440 
2441   // Check we wait long enough for the lock before timing out...
2442 
2443   // What time is it before we start?
2444   ASSERT_EQ(0, clock_gettime(clock, &ts));
2445   const int64_t start_ns = to_ns(ts);
2446   // Add a second to get deadline, and wait until we time out.
2447   ts.tv_sec += 1;
2448   ASSERT_EQ(ETIMEDOUT, lock_function(&m, &ts));
2449 
2450   // What time is it now we've timed out?
2451   timespec ts2;
2452   clock_gettime(clock, &ts2);
2453   const int64_t end_ns = to_ns(ts2);
2454 
2455   // The timedlock must have waited at least 1 second before returning.
2456   ASSERT_GE(end_ns - start_ns, NS_PER_S);
2457 
2458   // If the mutex is unlocked, pthread_mutex_timedlock should succeed.
2459   ASSERT_EQ(0, pthread_mutex_unlock(&m));
2460   ASSERT_EQ(0, clock_gettime(clock, &ts));
2461   ts.tv_sec += 1;
2462   ASSERT_EQ(0, lock_function(&m, &ts));
2463 
2464   ASSERT_EQ(0, pthread_mutex_unlock(&m));
2465   ASSERT_EQ(0, pthread_mutex_destroy(&m));
2466 }
2467 
TEST(pthread,pthread_mutex_timedlock)2468 TEST(pthread, pthread_mutex_timedlock) {
2469   pthread_mutex_timedlock_helper(CLOCK_REALTIME, pthread_mutex_timedlock);
2470 }
2471 
TEST(pthread,pthread_mutex_timedlock_monotonic_np)2472 TEST(pthread, pthread_mutex_timedlock_monotonic_np) {
2473 #if defined(__BIONIC__)
2474   pthread_mutex_timedlock_helper(CLOCK_MONOTONIC, pthread_mutex_timedlock_monotonic_np);
2475 #else   // __BIONIC__
2476   GTEST_SKIP() << "pthread_mutex_timedlock_monotonic_np not available";
2477 #endif  // __BIONIC__
2478 }
2479 
TEST(pthread,pthread_mutex_clocklock_MONOTONIC)2480 TEST(pthread, pthread_mutex_clocklock_MONOTONIC) {
2481 #if defined(__BIONIC__)
2482   pthread_mutex_timedlock_helper(
2483       CLOCK_MONOTONIC, [](pthread_mutex_t* __mutex, const timespec* __timeout) {
2484         return pthread_mutex_clocklock(__mutex, CLOCK_MONOTONIC, __timeout);
2485       });
2486 #else   // __BIONIC__
2487   GTEST_SKIP() << "pthread_mutex_clocklock not available";
2488 #endif  // __BIONIC__
2489 }
2490 
TEST(pthread,pthread_mutex_clocklock_REALTIME)2491 TEST(pthread, pthread_mutex_clocklock_REALTIME) {
2492 #if defined(__BIONIC__)
2493   pthread_mutex_timedlock_helper(
2494       CLOCK_REALTIME, [](pthread_mutex_t* __mutex, const timespec* __timeout) {
2495         return pthread_mutex_clocklock(__mutex, CLOCK_REALTIME, __timeout);
2496       });
2497 #else   // __BIONIC__
2498   GTEST_SKIP() << "pthread_mutex_clocklock not available";
2499 #endif  // __BIONIC__
2500 }
2501 
pthread_mutex_timedlock_pi_helper(clockid_t clock,int (* lock_function)(pthread_mutex_t * __mutex,const timespec * __timeout))2502 static void pthread_mutex_timedlock_pi_helper(clockid_t clock,
2503                                               int (*lock_function)(pthread_mutex_t* __mutex,
2504                                                                    const timespec* __timeout)) {
2505   PthreadMutex m(PTHREAD_MUTEX_NORMAL, PTHREAD_PRIO_INHERIT);
2506 
2507   timespec ts;
2508   clock_gettime(clock, &ts);
2509   const int64_t start_ns = ts.tv_sec * NS_PER_S + ts.tv_nsec;
2510 
2511   // add a second to get deadline.
2512   ts.tv_sec += 1;
2513 
2514   ASSERT_EQ(0, lock_function(&m.lock, &ts));
2515 
2516   struct ThreadArgs {
2517     clockid_t clock;
2518     int (*lock_function)(pthread_mutex_t* __mutex, const timespec* __timeout);
2519     PthreadMutex& m;
2520   };
2521 
2522   ThreadArgs thread_args = {
2523     .clock = clock,
2524     .lock_function = lock_function,
2525     .m = m,
2526   };
2527 
2528   auto ThreadFn = [](void* arg) -> void* {
2529     auto args = static_cast<ThreadArgs*>(arg);
2530     timespec ts;
2531     clock_gettime(args->clock, &ts);
2532     ts.tv_sec += 1;
2533     intptr_t result = args->lock_function(&args->m.lock, &ts);
2534     return reinterpret_cast<void*>(result);
2535   };
2536 
2537   pthread_t thread;
2538   ASSERT_EQ(0, pthread_create(&thread, nullptr, ThreadFn, &thread_args));
2539   void* result;
2540   ASSERT_EQ(0, pthread_join(thread, &result));
2541   ASSERT_EQ(ETIMEDOUT, reinterpret_cast<intptr_t>(result));
2542 
2543   // The timedlock must have waited at least 1 second before returning.
2544   clock_gettime(clock, &ts);
2545   const int64_t end_ns = ts.tv_sec * NS_PER_S + ts.tv_nsec;
2546   ASSERT_GT(end_ns - start_ns, NS_PER_S);
2547 
2548   ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2549 }
2550 
TEST(pthread,pthread_mutex_timedlock_pi)2551 TEST(pthread, pthread_mutex_timedlock_pi) {
2552   pthread_mutex_timedlock_pi_helper(CLOCK_REALTIME, pthread_mutex_timedlock);
2553 }
2554 
TEST(pthread,pthread_mutex_timedlock_monotonic_np_pi)2555 TEST(pthread, pthread_mutex_timedlock_monotonic_np_pi) {
2556 #if defined(__BIONIC__)
2557   pthread_mutex_timedlock_pi_helper(CLOCK_MONOTONIC, pthread_mutex_timedlock_monotonic_np);
2558 #else   // __BIONIC__
2559   GTEST_SKIP() << "pthread_mutex_timedlock_monotonic_np not available";
2560 #endif  // __BIONIC__
2561 }
2562 
TEST(pthread,pthread_mutex_clocklock_pi)2563 TEST(pthread, pthread_mutex_clocklock_pi) {
2564 #if defined(__BIONIC__)
2565   pthread_mutex_timedlock_pi_helper(
2566       CLOCK_MONOTONIC, [](pthread_mutex_t* __mutex, const timespec* __timeout) {
2567         return pthread_mutex_clocklock(__mutex, CLOCK_MONOTONIC, __timeout);
2568       });
2569   pthread_mutex_timedlock_pi_helper(
2570       CLOCK_REALTIME, [](pthread_mutex_t* __mutex, const timespec* __timeout) {
2571         return pthread_mutex_clocklock(__mutex, CLOCK_REALTIME, __timeout);
2572       });
2573 #else   // __BIONIC__
2574   GTEST_SKIP() << "pthread_mutex_clocklock not available";
2575 #endif  // __BIONIC__
2576 }
2577 
TEST(pthread,pthread_mutex_clocklock_invalid)2578 TEST(pthread, pthread_mutex_clocklock_invalid) {
2579 #if defined(__BIONIC__)
2580   pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
2581   timespec ts;
2582   EXPECT_EQ(EINVAL, pthread_mutex_clocklock(&mutex, CLOCK_PROCESS_CPUTIME_ID, &ts));
2583 #else   // __BIONIC__
2584   GTEST_SKIP() << "pthread_mutex_clocklock not available";
2585 #endif  // __BIONIC__
2586 }
2587 
TEST_F(pthread_DeathTest,pthread_mutex_using_destroyed_mutex)2588 TEST_F(pthread_DeathTest, pthread_mutex_using_destroyed_mutex) {
2589 #if defined(__BIONIC__)
2590   pthread_mutex_t m;
2591   ASSERT_EQ(0, pthread_mutex_init(&m, nullptr));
2592   ASSERT_EQ(0, pthread_mutex_destroy(&m));
2593   ASSERT_EXIT(pthread_mutex_lock(&m), ::testing::KilledBySignal(SIGABRT),
2594               "pthread_mutex_lock called on a destroyed mutex");
2595   ASSERT_EXIT(pthread_mutex_unlock(&m), ::testing::KilledBySignal(SIGABRT),
2596               "pthread_mutex_unlock called on a destroyed mutex");
2597   ASSERT_EXIT(pthread_mutex_trylock(&m), ::testing::KilledBySignal(SIGABRT),
2598               "pthread_mutex_trylock called on a destroyed mutex");
2599   timespec ts;
2600   ASSERT_EXIT(pthread_mutex_timedlock(&m, &ts), ::testing::KilledBySignal(SIGABRT),
2601               "pthread_mutex_timedlock called on a destroyed mutex");
2602   ASSERT_EXIT(pthread_mutex_timedlock_monotonic_np(&m, &ts), ::testing::KilledBySignal(SIGABRT),
2603               "pthread_mutex_timedlock_monotonic_np called on a destroyed mutex");
2604   ASSERT_EXIT(pthread_mutex_clocklock(&m, CLOCK_MONOTONIC, &ts), ::testing::KilledBySignal(SIGABRT),
2605               "pthread_mutex_clocklock called on a destroyed mutex");
2606   ASSERT_EXIT(pthread_mutex_clocklock(&m, CLOCK_REALTIME, &ts), ::testing::KilledBySignal(SIGABRT),
2607               "pthread_mutex_clocklock called on a destroyed mutex");
2608   ASSERT_EXIT(pthread_mutex_clocklock(&m, CLOCK_PROCESS_CPUTIME_ID, &ts),
2609               ::testing::KilledBySignal(SIGABRT),
2610               "pthread_mutex_clocklock called on a destroyed mutex");
2611   ASSERT_EXIT(pthread_mutex_destroy(&m), ::testing::KilledBySignal(SIGABRT),
2612               "pthread_mutex_destroy called on a destroyed mutex");
2613 #else
2614   GTEST_SKIP() << "bionic-only test";
2615 #endif
2616 }
2617 
2618 class StrictAlignmentAllocator {
2619  public:
allocate(size_t size,size_t alignment)2620   void* allocate(size_t size, size_t alignment) {
2621     char* p = new char[size + alignment * 2];
2622     allocated_array.push_back(p);
2623     while (!is_strict_aligned(p, alignment)) {
2624       ++p;
2625     }
2626     return p;
2627   }
2628 
~StrictAlignmentAllocator()2629   ~StrictAlignmentAllocator() {
2630     for (const auto& p : allocated_array) {
2631       delete[] p;
2632     }
2633   }
2634 
2635  private:
is_strict_aligned(char * p,size_t alignment)2636   bool is_strict_aligned(char* p, size_t alignment) {
2637     return (reinterpret_cast<uintptr_t>(p) % (alignment * 2)) == alignment;
2638   }
2639 
2640   std::vector<char*> allocated_array;
2641 };
2642 
TEST(pthread,pthread_types_allow_four_bytes_alignment)2643 TEST(pthread, pthread_types_allow_four_bytes_alignment) {
2644 #if defined(__BIONIC__)
2645   // For binary compatibility with old version, we need to allow 4-byte aligned data for pthread types.
2646   StrictAlignmentAllocator allocator;
2647   pthread_mutex_t* mutex = reinterpret_cast<pthread_mutex_t*>(
2648                              allocator.allocate(sizeof(pthread_mutex_t), 4));
2649   ASSERT_EQ(0, pthread_mutex_init(mutex, nullptr));
2650   ASSERT_EQ(0, pthread_mutex_lock(mutex));
2651   ASSERT_EQ(0, pthread_mutex_unlock(mutex));
2652   ASSERT_EQ(0, pthread_mutex_destroy(mutex));
2653 
2654   pthread_cond_t* cond = reinterpret_cast<pthread_cond_t*>(
2655                            allocator.allocate(sizeof(pthread_cond_t), 4));
2656   ASSERT_EQ(0, pthread_cond_init(cond, nullptr));
2657   ASSERT_EQ(0, pthread_cond_signal(cond));
2658   ASSERT_EQ(0, pthread_cond_broadcast(cond));
2659   ASSERT_EQ(0, pthread_cond_destroy(cond));
2660 
2661   pthread_rwlock_t* rwlock = reinterpret_cast<pthread_rwlock_t*>(
2662                                allocator.allocate(sizeof(pthread_rwlock_t), 4));
2663   ASSERT_EQ(0, pthread_rwlock_init(rwlock, nullptr));
2664   ASSERT_EQ(0, pthread_rwlock_rdlock(rwlock));
2665   ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
2666   ASSERT_EQ(0, pthread_rwlock_wrlock(rwlock));
2667   ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
2668   ASSERT_EQ(0, pthread_rwlock_destroy(rwlock));
2669 
2670 #else
2671   GTEST_SKIP() << "bionic-only test";
2672 #endif
2673 }
2674 
TEST(pthread,pthread_mutex_lock_null_32)2675 TEST(pthread, pthread_mutex_lock_null_32) {
2676 #if defined(__BIONIC__) && !defined(__LP64__)
2677   // For LP32, the pthread lock/unlock functions allow a NULL mutex and return
2678   // EINVAL in that case: http://b/19995172.
2679   //
2680   // We decorate the public defintion with _Nonnull so that people recompiling
2681   // their code with get a warning and might fix their bug, but need to pass
2682   // NULL here to test that we remain compatible.
2683   pthread_mutex_t* null_value = nullptr;
2684   ASSERT_EQ(EINVAL, pthread_mutex_lock(null_value));
2685 #else
2686   GTEST_SKIP() << "32-bit bionic-only test";
2687 #endif
2688 }
2689 
TEST(pthread,pthread_mutex_unlock_null_32)2690 TEST(pthread, pthread_mutex_unlock_null_32) {
2691 #if defined(__BIONIC__) && !defined(__LP64__)
2692   // For LP32, the pthread lock/unlock functions allow a NULL mutex and return
2693   // EINVAL in that case: http://b/19995172.
2694   //
2695   // We decorate the public defintion with _Nonnull so that people recompiling
2696   // their code with get a warning and might fix their bug, but need to pass
2697   // NULL here to test that we remain compatible.
2698   pthread_mutex_t* null_value = nullptr;
2699   ASSERT_EQ(EINVAL, pthread_mutex_unlock(null_value));
2700 #else
2701   GTEST_SKIP() << "32-bit bionic-only test";
2702 #endif
2703 }
2704 
TEST_F(pthread_DeathTest,pthread_mutex_lock_null_64)2705 TEST_F(pthread_DeathTest, pthread_mutex_lock_null_64) {
2706 #if defined(__BIONIC__) && defined(__LP64__)
2707   pthread_mutex_t* null_value = nullptr;
2708   ASSERT_EXIT(pthread_mutex_lock(null_value), testing::KilledBySignal(SIGSEGV), "");
2709 #else
2710   GTEST_SKIP() << "64-bit bionic-only test";
2711 #endif
2712 }
2713 
TEST_F(pthread_DeathTest,pthread_mutex_unlock_null_64)2714 TEST_F(pthread_DeathTest, pthread_mutex_unlock_null_64) {
2715 #if defined(__BIONIC__) && defined(__LP64__)
2716   pthread_mutex_t* null_value = nullptr;
2717   ASSERT_EXIT(pthread_mutex_unlock(null_value), testing::KilledBySignal(SIGSEGV), "");
2718 #else
2719   GTEST_SKIP() << "64-bit bionic-only test";
2720 #endif
2721 }
2722 
2723 extern _Unwind_Reason_Code FrameCounter(_Unwind_Context* ctx, void* arg);
2724 
2725 static volatile bool signal_handler_on_altstack_done;
2726 
2727 __attribute__((__noinline__))
signal_handler_backtrace()2728 static void signal_handler_backtrace() {
2729   // Check if we have enough stack space for unwinding.
2730   int count = 0;
2731   _Unwind_Backtrace(FrameCounter, &count);
2732   ASSERT_GT(count, 0);
2733 }
2734 
2735 __attribute__((__noinline__))
signal_handler_logging()2736 static void signal_handler_logging() {
2737   // Check if we have enough stack space for logging.
2738   std::string s(2048, '*');
2739   GTEST_LOG_(INFO) << s;
2740   signal_handler_on_altstack_done = true;
2741 }
2742 
2743 __attribute__((__noinline__))
signal_handler_snprintf()2744 static void signal_handler_snprintf() {
2745   // Check if we have enough stack space for snprintf to a PATH_MAX buffer, plus some extra.
2746   char buf[PATH_MAX + 2048];
2747   ASSERT_GT(snprintf(buf, sizeof(buf), "/proc/%d/status", getpid()), 0);
2748 }
2749 
SignalHandlerOnAltStack(int signo,siginfo_t *,void *)2750 static void SignalHandlerOnAltStack(int signo, siginfo_t*, void*) {
2751   ASSERT_EQ(SIGUSR1, signo);
2752   signal_handler_backtrace();
2753   signal_handler_logging();
2754   signal_handler_snprintf();
2755 }
2756 
TEST(pthread,big_enough_signal_stack)2757 TEST(pthread, big_enough_signal_stack) {
2758   signal_handler_on_altstack_done = false;
2759   ScopedSignalHandler handler(SIGUSR1, SignalHandlerOnAltStack, SA_SIGINFO | SA_ONSTACK);
2760   kill(getpid(), SIGUSR1);
2761   ASSERT_TRUE(signal_handler_on_altstack_done);
2762 }
2763 
TEST(pthread,pthread_barrierattr_smoke)2764 TEST(pthread, pthread_barrierattr_smoke) {
2765   pthread_barrierattr_t attr;
2766   ASSERT_EQ(0, pthread_barrierattr_init(&attr));
2767   int pshared;
2768   ASSERT_EQ(0, pthread_barrierattr_getpshared(&attr, &pshared));
2769   ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared);
2770   ASSERT_EQ(0, pthread_barrierattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
2771   ASSERT_EQ(0, pthread_barrierattr_getpshared(&attr, &pshared));
2772   ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared);
2773   ASSERT_EQ(0, pthread_barrierattr_destroy(&attr));
2774 }
2775 
2776 struct BarrierTestHelperData {
2777   size_t thread_count;
2778   pthread_barrier_t barrier;
2779   std::atomic<int> finished_mask;
2780   std::atomic<int> serial_thread_count;
2781   size_t iteration_count;
2782   std::atomic<size_t> finished_iteration_count;
2783 
BarrierTestHelperDataBarrierTestHelperData2784   BarrierTestHelperData(size_t thread_count, size_t iteration_count)
2785       : thread_count(thread_count), finished_mask(0), serial_thread_count(0),
2786         iteration_count(iteration_count), finished_iteration_count(0) {
2787   }
2788 };
2789 
2790 struct BarrierTestHelperArg {
2791   int id;
2792   BarrierTestHelperData* data;
2793 };
2794 
BarrierTestHelper(BarrierTestHelperArg * arg)2795 static void BarrierTestHelper(BarrierTestHelperArg* arg) {
2796   for (size_t i = 0; i < arg->data->iteration_count; ++i) {
2797     int result = pthread_barrier_wait(&arg->data->barrier);
2798     if (result == PTHREAD_BARRIER_SERIAL_THREAD) {
2799       arg->data->serial_thread_count++;
2800     } else {
2801       ASSERT_EQ(0, result);
2802     }
2803     int mask = arg->data->finished_mask.fetch_or(1 << arg->id);
2804     mask |= 1 << arg->id;
2805     if (mask == ((1 << arg->data->thread_count) - 1)) {
2806       ASSERT_EQ(1, arg->data->serial_thread_count);
2807       arg->data->finished_iteration_count++;
2808       arg->data->finished_mask = 0;
2809       arg->data->serial_thread_count = 0;
2810     }
2811   }
2812 }
2813 
TEST(pthread,pthread_barrier_smoke)2814 TEST(pthread, pthread_barrier_smoke) {
2815   const size_t BARRIER_ITERATION_COUNT = 10;
2816   const size_t BARRIER_THREAD_COUNT = 10;
2817   BarrierTestHelperData data(BARRIER_THREAD_COUNT, BARRIER_ITERATION_COUNT);
2818   ASSERT_EQ(0, pthread_barrier_init(&data.barrier, nullptr, data.thread_count));
2819   std::vector<pthread_t> threads(data.thread_count);
2820   std::vector<BarrierTestHelperArg> args(threads.size());
2821   for (size_t i = 0; i < threads.size(); ++i) {
2822     args[i].id = i;
2823     args[i].data = &data;
2824     ASSERT_EQ(0, pthread_create(&threads[i], nullptr,
2825                                 reinterpret_cast<void* (*)(void*)>(BarrierTestHelper), &args[i]));
2826   }
2827   for (size_t i = 0; i < threads.size(); ++i) {
2828     ASSERT_EQ(0, pthread_join(threads[i], nullptr));
2829   }
2830   ASSERT_EQ(data.iteration_count, data.finished_iteration_count);
2831   ASSERT_EQ(0, pthread_barrier_destroy(&data.barrier));
2832 }
2833 
2834 struct BarrierDestroyTestArg {
2835   std::atomic<int> tid;
2836   pthread_barrier_t* barrier;
2837 };
2838 
BarrierDestroyTestHelper(BarrierDestroyTestArg * arg)2839 static void BarrierDestroyTestHelper(BarrierDestroyTestArg* arg) {
2840   arg->tid = gettid();
2841   ASSERT_EQ(0, pthread_barrier_wait(arg->barrier));
2842 }
2843 
TEST(pthread,pthread_barrier_destroy)2844 TEST(pthread, pthread_barrier_destroy) {
2845   pthread_barrier_t barrier;
2846   ASSERT_EQ(0, pthread_barrier_init(&barrier, nullptr, 2));
2847   pthread_t thread;
2848   BarrierDestroyTestArg arg;
2849   arg.tid = 0;
2850   arg.barrier = &barrier;
2851   ASSERT_EQ(0, pthread_create(&thread, nullptr,
2852                               reinterpret_cast<void* (*)(void*)>(BarrierDestroyTestHelper), &arg));
2853   WaitUntilThreadSleep(arg.tid);
2854   ASSERT_EQ(EBUSY, pthread_barrier_destroy(&barrier));
2855   ASSERT_EQ(PTHREAD_BARRIER_SERIAL_THREAD, pthread_barrier_wait(&barrier));
2856   // Verify if the barrier can be destroyed directly after pthread_barrier_wait().
2857   ASSERT_EQ(0, pthread_barrier_destroy(&barrier));
2858   ASSERT_EQ(0, pthread_join(thread, nullptr));
2859 #if defined(__BIONIC__)
2860   ASSERT_EQ(EINVAL, pthread_barrier_destroy(&barrier));
2861 #endif
2862 }
2863 
2864 struct BarrierOrderingTestHelperArg {
2865   pthread_barrier_t* barrier;
2866   size_t* array;
2867   size_t array_length;
2868   size_t id;
2869 };
2870 
BarrierOrderingTestHelper(BarrierOrderingTestHelperArg * arg)2871 void BarrierOrderingTestHelper(BarrierOrderingTestHelperArg* arg) {
2872   const size_t ITERATION_COUNT = 10000;
2873   for (size_t i = 1; i <= ITERATION_COUNT; ++i) {
2874     arg->array[arg->id] = i;
2875     int result = pthread_barrier_wait(arg->barrier);
2876     ASSERT_TRUE(result == 0 || result == PTHREAD_BARRIER_SERIAL_THREAD);
2877     for (size_t j = 0; j < arg->array_length; ++j) {
2878       ASSERT_EQ(i, arg->array[j]);
2879     }
2880     result = pthread_barrier_wait(arg->barrier);
2881     ASSERT_TRUE(result == 0 || result == PTHREAD_BARRIER_SERIAL_THREAD);
2882   }
2883 }
2884 
TEST(pthread,pthread_barrier_check_ordering)2885 TEST(pthread, pthread_barrier_check_ordering) {
2886   const size_t THREAD_COUNT = 4;
2887   pthread_barrier_t barrier;
2888   ASSERT_EQ(0, pthread_barrier_init(&barrier, nullptr, THREAD_COUNT));
2889   size_t array[THREAD_COUNT];
2890   std::vector<pthread_t> threads(THREAD_COUNT);
2891   std::vector<BarrierOrderingTestHelperArg> args(THREAD_COUNT);
2892   for (size_t i = 0; i < THREAD_COUNT; ++i) {
2893     args[i].barrier = &barrier;
2894     args[i].array = array;
2895     args[i].array_length = THREAD_COUNT;
2896     args[i].id = i;
2897     ASSERT_EQ(0, pthread_create(&threads[i], nullptr,
2898                                 reinterpret_cast<void* (*)(void*)>(BarrierOrderingTestHelper),
2899                                 &args[i]));
2900   }
2901   for (size_t i = 0; i < THREAD_COUNT; ++i) {
2902     ASSERT_EQ(0, pthread_join(threads[i], nullptr));
2903   }
2904 }
2905 
TEST(pthread,pthread_barrier_init_zero_count)2906 TEST(pthread, pthread_barrier_init_zero_count) {
2907   pthread_barrier_t barrier;
2908   ASSERT_EQ(EINVAL, pthread_barrier_init(&barrier, nullptr, 0));
2909 }
2910 
TEST(pthread,pthread_spinlock_smoke)2911 TEST(pthread, pthread_spinlock_smoke) {
2912   pthread_spinlock_t lock;
2913   ASSERT_EQ(0, pthread_spin_init(&lock, 0));
2914   ASSERT_EQ(0, pthread_spin_trylock(&lock));
2915   ASSERT_EQ(0, pthread_spin_unlock(&lock));
2916   ASSERT_EQ(0, pthread_spin_lock(&lock));
2917   ASSERT_EQ(EBUSY, pthread_spin_trylock(&lock));
2918   ASSERT_EQ(0, pthread_spin_unlock(&lock));
2919   ASSERT_EQ(0, pthread_spin_destroy(&lock));
2920 }
2921 
TEST(pthread,pthread_attr_getdetachstate__pthread_attr_setdetachstate)2922 TEST(pthread, pthread_attr_getdetachstate__pthread_attr_setdetachstate) {
2923   pthread_attr_t attr;
2924   ASSERT_EQ(0, pthread_attr_init(&attr));
2925 
2926   int state;
2927   ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
2928   ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &state));
2929   ASSERT_EQ(PTHREAD_CREATE_DETACHED, state);
2930 
2931   ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE));
2932   ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &state));
2933   ASSERT_EQ(PTHREAD_CREATE_JOINABLE, state);
2934 
2935   ASSERT_EQ(EINVAL, pthread_attr_setdetachstate(&attr, 123));
2936   ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &state));
2937   ASSERT_EQ(PTHREAD_CREATE_JOINABLE, state);
2938 }
2939 
TEST(pthread,pthread_create__mmap_failures)2940 TEST(pthread, pthread_create__mmap_failures) {
2941   // After thread is successfully created, native_bridge might need more memory to run it.
2942   SKIP_WITH_NATIVE_BRIDGE;
2943 
2944   pthread_attr_t attr;
2945   ASSERT_EQ(0, pthread_attr_init(&attr));
2946   ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
2947 
2948   const auto kPageSize = sysconf(_SC_PAGE_SIZE);
2949 
2950   // Use up all the VMAs. By default this is 64Ki (though some will already be in use).
2951   std::vector<void*> pages;
2952   pages.reserve(64 * 1024);
2953   int prot = PROT_NONE;
2954   while (true) {
2955     void* page = mmap(nullptr, kPageSize, prot, MAP_ANON|MAP_PRIVATE, -1, 0);
2956     if (page == MAP_FAILED) break;
2957     pages.push_back(page);
2958     prot = (prot == PROT_NONE) ? PROT_READ : PROT_NONE;
2959   }
2960 
2961   // Try creating threads, freeing up a page each time we fail.
2962   size_t EAGAIN_count = 0;
2963   size_t i = 0;
2964   for (; i < pages.size(); ++i) {
2965     pthread_t t;
2966     int status = pthread_create(&t, &attr, IdFn, nullptr);
2967     if (status != EAGAIN) break;
2968     ++EAGAIN_count;
2969     ASSERT_EQ(0, munmap(pages[i], kPageSize));
2970   }
2971 
2972   // Creating a thread uses at least three VMAs: the combined stack and TLS, and a guard on each
2973   // side. So we should have seen at least three failures.
2974   ASSERT_GE(EAGAIN_count, 3U);
2975 
2976   for (; i < pages.size(); ++i) {
2977     ASSERT_EQ(0, munmap(pages[i], kPageSize));
2978   }
2979 }
2980 
TEST(pthread,pthread_setschedparam)2981 TEST(pthread, pthread_setschedparam) {
2982   sched_param p = { .sched_priority = INT_MIN };
2983   ASSERT_EQ(EINVAL, pthread_setschedparam(pthread_self(), INT_MIN, &p));
2984 }
2985 
TEST(pthread,pthread_setschedprio)2986 TEST(pthread, pthread_setschedprio) {
2987   ASSERT_EQ(EINVAL, pthread_setschedprio(pthread_self(), INT_MIN));
2988 }
2989 
TEST(pthread,pthread_attr_getinheritsched__pthread_attr_setinheritsched)2990 TEST(pthread, pthread_attr_getinheritsched__pthread_attr_setinheritsched) {
2991   pthread_attr_t attr;
2992   ASSERT_EQ(0, pthread_attr_init(&attr));
2993 
2994   int state;
2995   ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
2996   ASSERT_EQ(0, pthread_attr_getinheritsched(&attr, &state));
2997   ASSERT_EQ(PTHREAD_INHERIT_SCHED, state);
2998 
2999   ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED));
3000   ASSERT_EQ(0, pthread_attr_getinheritsched(&attr, &state));
3001   ASSERT_EQ(PTHREAD_EXPLICIT_SCHED, state);
3002 
3003   ASSERT_EQ(EINVAL, pthread_attr_setinheritsched(&attr, 123));
3004   ASSERT_EQ(0, pthread_attr_getinheritsched(&attr, &state));
3005   ASSERT_EQ(PTHREAD_EXPLICIT_SCHED, state);
3006 }
3007 
TEST(pthread,pthread_attr_setinheritsched__PTHREAD_INHERIT_SCHED__PTHREAD_EXPLICIT_SCHED)3008 TEST(pthread, pthread_attr_setinheritsched__PTHREAD_INHERIT_SCHED__PTHREAD_EXPLICIT_SCHED) {
3009   pthread_attr_t attr;
3010   ASSERT_EQ(0, pthread_attr_init(&attr));
3011 
3012   // If we set invalid scheduling attributes but choose to inherit, everything's fine...
3013   sched_param param = { .sched_priority = sched_get_priority_max(SCHED_FIFO) + 1 };
3014   ASSERT_EQ(0, pthread_attr_setschedparam(&attr, &param));
3015   ASSERT_EQ(0, pthread_attr_setschedpolicy(&attr, SCHED_FIFO));
3016   ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
3017 
3018   pthread_t t;
3019   ASSERT_EQ(0, pthread_create(&t, &attr, IdFn, nullptr));
3020   ASSERT_EQ(0, pthread_join(t, nullptr));
3021 
3022 #if defined(__LP64__)
3023   // If we ask to use them, though, we'll see a failure...
3024   ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED));
3025   ASSERT_EQ(EINVAL, pthread_create(&t, &attr, IdFn, nullptr));
3026 #else
3027   // For backwards compatibility with broken apps, we just ignore failures
3028   // to set scheduler attributes on LP32.
3029 #endif
3030 }
3031 
TEST(pthread,pthread_attr_setinheritsched_PTHREAD_INHERIT_SCHED_takes_effect)3032 TEST(pthread, pthread_attr_setinheritsched_PTHREAD_INHERIT_SCHED_takes_effect) {
3033   sched_param param = { .sched_priority = sched_get_priority_min(SCHED_FIFO) };
3034   int rc = pthread_setschedparam(pthread_self(), SCHED_FIFO, &param);
3035   if (rc == EPERM) GTEST_SKIP() << "pthread_setschedparam failed with EPERM";
3036   ASSERT_EQ(0, rc);
3037 
3038   pthread_attr_t attr;
3039   ASSERT_EQ(0, pthread_attr_init(&attr));
3040   ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
3041 
3042   SpinFunctionHelper spin_helper;
3043   pthread_t t;
3044   ASSERT_EQ(0, pthread_create(&t, &attr, spin_helper.GetFunction(), nullptr));
3045   int actual_policy;
3046   sched_param actual_param;
3047   ASSERT_EQ(0, pthread_getschedparam(t, &actual_policy, &actual_param));
3048   ASSERT_EQ(SCHED_FIFO, actual_policy);
3049   spin_helper.UnSpin();
3050   ASSERT_EQ(0, pthread_join(t, nullptr));
3051 }
3052 
TEST(pthread,pthread_attr_setinheritsched_PTHREAD_EXPLICIT_SCHED_takes_effect)3053 TEST(pthread, pthread_attr_setinheritsched_PTHREAD_EXPLICIT_SCHED_takes_effect) {
3054   sched_param param = { .sched_priority = sched_get_priority_min(SCHED_FIFO) };
3055   int rc = pthread_setschedparam(pthread_self(), SCHED_FIFO, &param);
3056   if (rc == EPERM) GTEST_SKIP() << "pthread_setschedparam failed with EPERM";
3057   ASSERT_EQ(0, rc);
3058 
3059   pthread_attr_t attr;
3060   ASSERT_EQ(0, pthread_attr_init(&attr));
3061   ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED));
3062   ASSERT_EQ(0, pthread_attr_setschedpolicy(&attr, SCHED_OTHER));
3063 
3064   SpinFunctionHelper spin_helper;
3065   pthread_t t;
3066   ASSERT_EQ(0, pthread_create(&t, &attr, spin_helper.GetFunction(), nullptr));
3067   int actual_policy;
3068   sched_param actual_param;
3069   ASSERT_EQ(0, pthread_getschedparam(t, &actual_policy, &actual_param));
3070   ASSERT_EQ(SCHED_OTHER, actual_policy);
3071   spin_helper.UnSpin();
3072   ASSERT_EQ(0, pthread_join(t, nullptr));
3073 }
3074 
TEST(pthread,pthread_attr_setinheritsched__takes_effect_despite_SCHED_RESET_ON_FORK)3075 TEST(pthread, pthread_attr_setinheritsched__takes_effect_despite_SCHED_RESET_ON_FORK) {
3076   sched_param param = { .sched_priority = sched_get_priority_min(SCHED_FIFO) };
3077   int rc = pthread_setschedparam(pthread_self(), SCHED_FIFO | SCHED_RESET_ON_FORK, &param);
3078   if (rc == EPERM) GTEST_SKIP() << "pthread_setschedparam failed with EPERM";
3079   ASSERT_EQ(0, rc);
3080 
3081   pthread_attr_t attr;
3082   ASSERT_EQ(0, pthread_attr_init(&attr));
3083   ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
3084 
3085   SpinFunctionHelper spin_helper;
3086   pthread_t t;
3087   ASSERT_EQ(0, pthread_create(&t, &attr, spin_helper.GetFunction(), nullptr));
3088   int actual_policy;
3089   sched_param actual_param;
3090   ASSERT_EQ(0, pthread_getschedparam(t, &actual_policy, &actual_param));
3091   ASSERT_EQ(SCHED_FIFO  | SCHED_RESET_ON_FORK, actual_policy);
3092   spin_helper.UnSpin();
3093   ASSERT_EQ(0, pthread_join(t, nullptr));
3094 }
3095 
3096 extern "C" bool android_run_on_all_threads(bool (*func)(void*), void* arg);
3097 
TEST(pthread,run_on_all_threads)3098 TEST(pthread, run_on_all_threads) {
3099 #if defined(__BIONIC__)
3100   pthread_t t;
3101   ASSERT_EQ(
3102       0, pthread_create(
3103              &t, nullptr,
3104              [](void*) -> void* {
3105                pthread_attr_t detached;
3106                if (pthread_attr_init(&detached) != 0 ||
3107                    pthread_attr_setdetachstate(&detached, PTHREAD_CREATE_DETACHED) != 0) {
3108                  return reinterpret_cast<void*>(errno);
3109                }
3110 
3111                for (int i = 0; i != 1000; ++i) {
3112                  pthread_t t1, t2;
3113                  if (pthread_create(
3114                          &t1, &detached, [](void*) -> void* { return nullptr; }, nullptr) != 0 ||
3115                      pthread_create(
3116                          &t2, nullptr, [](void*) -> void* { return nullptr; }, nullptr) != 0 ||
3117                      pthread_join(t2, nullptr) != 0) {
3118                    return reinterpret_cast<void*>(errno);
3119                  }
3120                }
3121 
3122                if (pthread_attr_destroy(&detached) != 0) {
3123                  return reinterpret_cast<void*>(errno);
3124                }
3125                return nullptr;
3126              },
3127              nullptr));
3128 
3129   for (int i = 0; i != 1000; ++i) {
3130     ASSERT_TRUE(android_run_on_all_threads([](void* arg) { return arg == nullptr; }, nullptr));
3131   }
3132 
3133   void *retval;
3134   ASSERT_EQ(0, pthread_join(t, &retval));
3135   ASSERT_EQ(nullptr, retval);
3136 #else
3137   GTEST_SKIP() << "bionic-only test";
3138 #endif
3139 }
3140 
TEST(pthread,pthread_getaffinity_np_failure)3141 TEST(pthread, pthread_getaffinity_np_failure) {
3142   // Trivial test of the errno-preserving/returning behavior.
3143 #pragma clang diagnostic push
3144 #pragma clang diagnostic ignored "-Wnonnull"
3145   errno = 0;
3146   ASSERT_EQ(EINVAL, pthread_getaffinity_np(pthread_self(), 0, nullptr));
3147   ASSERT_ERRNO(0);
3148 #pragma clang diagnostic pop
3149 }
3150 
TEST(pthread,pthread_getaffinity)3151 TEST(pthread, pthread_getaffinity) {
3152   cpu_set_t set;
3153   CPU_ZERO(&set);
3154   ASSERT_EQ(0, pthread_getaffinity_np(pthread_self(), sizeof(set), &set));
3155   ASSERT_GT(CPU_COUNT(&set), 0);
3156 }
3157 
TEST(pthread,pthread_setaffinity_np_failure)3158 TEST(pthread, pthread_setaffinity_np_failure) {
3159   // Trivial test of the errno-preserving/returning behavior.
3160 #pragma clang diagnostic push
3161 #pragma clang diagnostic ignored "-Wnonnull"
3162   errno = 0;
3163   ASSERT_EQ(EINVAL, pthread_setaffinity_np(pthread_self(), 0, nullptr));
3164   ASSERT_ERRNO(0);
3165 #pragma clang diagnostic pop
3166 }
3167 
TEST(pthread,pthread_setaffinity)3168 TEST(pthread, pthread_setaffinity) {
3169   cpu_set_t set;
3170   CPU_ZERO(&set);
3171   ASSERT_EQ(0, pthread_getaffinity_np(pthread_self(), sizeof(set), &set));
3172   // It's hard to make any more general claim than this,
3173   // but it ought to be safe to ask for the same affinity you already have.
3174   ASSERT_EQ(0, pthread_setaffinity_np(pthread_self(), sizeof(set), &set));
3175 }
3176