2 * Copyright (C) 2012 The Android Open Source Project
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <gtest/gtest.h>
27 #include <sys/syscall.h>
34 #include "private/bionic_macros.h"
35 #include "private/ScopeGuard.h"
36 #include "BionicDeathTest.h"
37 #include "ScopedSignalHandler.h"
39 TEST(pthread, pthread_key_create) {
41 ASSERT_EQ(0, pthread_key_create(&key, NULL));
42 ASSERT_EQ(0, pthread_key_delete(key));
43 // Can't delete a key that's already been deleted.
44 ASSERT_EQ(EINVAL, pthread_key_delete(key));
47 TEST(pthread, pthread_keys_max) {
48 // POSIX says PTHREAD_KEYS_MAX should be at least _POSIX_THREAD_KEYS_MAX.
49 ASSERT_GE(PTHREAD_KEYS_MAX, _POSIX_THREAD_KEYS_MAX);
52 TEST(pthread, sysconf_SC_THREAD_KEYS_MAX_eq_PTHREAD_KEYS_MAX) {
53 int sysconf_max = sysconf(_SC_THREAD_KEYS_MAX);
54 ASSERT_EQ(sysconf_max, PTHREAD_KEYS_MAX);
57 TEST(pthread, pthread_key_many_distinct) {
58 // As gtest uses pthread keys, we can't allocate exactly PTHREAD_KEYS_MAX
59 // pthread keys, but We should be able to allocate at least this many keys.
60 int nkeys = PTHREAD_KEYS_MAX / 2;
61 std::vector<pthread_key_t> keys;
63 auto scope_guard = make_scope_guard([&keys]{
64 for (auto key : keys) {
65 EXPECT_EQ(0, pthread_key_delete(key));
69 for (int i = 0; i < nkeys; ++i) {
71 // If this fails, it's likely that LIBC_PTHREAD_KEY_RESERVED_COUNT is wrong.
72 ASSERT_EQ(0, pthread_key_create(&key, NULL)) << i << " of " << nkeys;
74 ASSERT_EQ(0, pthread_setspecific(key, reinterpret_cast<void*>(i)));
77 for (int i = keys.size() - 1; i >= 0; --i) {
78 ASSERT_EQ(reinterpret_cast<void*>(i), pthread_getspecific(keys.back()));
79 pthread_key_t key = keys.back();
81 ASSERT_EQ(0, pthread_key_delete(key));
85 TEST(pthread, pthread_key_not_exceed_PTHREAD_KEYS_MAX) {
86 std::vector<pthread_key_t> keys;
89 // Pthread keys are used by gtest, so PTHREAD_KEYS_MAX should
90 // be more than we are allowed to allocate now.
91 for (int i = 0; i < PTHREAD_KEYS_MAX; i++) {
93 rv = pthread_key_create(&key, NULL);
102 for (auto key : keys) {
103 EXPECT_EQ(0, pthread_key_delete(key));
107 // We should have eventually reached the maximum number of keys and received
109 ASSERT_EQ(EAGAIN, rv);
112 TEST(pthread, pthread_key_delete) {
113 void* expected = reinterpret_cast<void*>(1234);
115 ASSERT_EQ(0, pthread_key_create(&key, NULL));
116 ASSERT_EQ(0, pthread_setspecific(key, expected));
117 ASSERT_EQ(expected, pthread_getspecific(key));
118 ASSERT_EQ(0, pthread_key_delete(key));
119 // After deletion, pthread_getspecific returns NULL.
120 ASSERT_EQ(NULL, pthread_getspecific(key));
121 // And you can't use pthread_setspecific with the deleted key.
122 ASSERT_EQ(EINVAL, pthread_setspecific(key, expected));
125 TEST(pthread, pthread_key_fork) {
126 void* expected = reinterpret_cast<void*>(1234);
128 ASSERT_EQ(0, pthread_key_create(&key, NULL));
129 ASSERT_EQ(0, pthread_setspecific(key, expected));
130 ASSERT_EQ(expected, pthread_getspecific(key));
133 ASSERT_NE(-1, pid) << strerror(errno);
136 // The surviving thread inherits all the forking thread's TLS values...
137 ASSERT_EQ(expected, pthread_getspecific(key));
142 ASSERT_EQ(pid, waitpid(pid, &status, 0));
143 ASSERT_TRUE(WIFEXITED(status));
144 ASSERT_EQ(99, WEXITSTATUS(status));
146 ASSERT_EQ(expected, pthread_getspecific(key));
147 ASSERT_EQ(0, pthread_key_delete(key));
150 static void* DirtyKeyFn(void* key) {
151 return pthread_getspecific(*reinterpret_cast<pthread_key_t*>(key));
154 TEST(pthread, pthread_key_dirty) {
156 ASSERT_EQ(0, pthread_key_create(&key, NULL));
158 size_t stack_size = 128 * 1024;
159 void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
160 ASSERT_NE(MAP_FAILED, stack);
161 memset(stack, 0xff, stack_size);
164 ASSERT_EQ(0, pthread_attr_init(&attr));
165 ASSERT_EQ(0, pthread_attr_setstack(&attr, stack, stack_size));
168 ASSERT_EQ(0, pthread_create(&t, &attr, DirtyKeyFn, &key));
171 ASSERT_EQ(0, pthread_join(t, &result));
172 ASSERT_EQ(nullptr, result); // Not ~0!
174 ASSERT_EQ(0, munmap(stack, stack_size));
175 ASSERT_EQ(0, pthread_key_delete(key));
178 static void* IdFn(void* arg) {
182 class SpinFunctionHelper {
184 SpinFunctionHelper() {
185 SpinFunctionHelper::spin_flag_ = true;
187 ~SpinFunctionHelper() {
190 auto GetFunction() -> void* (*)(void*) {
191 return SpinFunctionHelper::SpinFn;
195 SpinFunctionHelper::spin_flag_ = false;
199 static void* SpinFn(void*) {
200 while (spin_flag_) {}
203 static volatile bool spin_flag_;
206 // It doesn't matter if spin_flag_ is used in several tests,
207 // because it is always set to false after each test. Each thread
208 // loops on spin_flag_ can find it becomes false at some time.
209 volatile bool SpinFunctionHelper::spin_flag_ = false;
211 static void* JoinFn(void* arg) {
212 return reinterpret_cast<void*>(pthread_join(reinterpret_cast<pthread_t>(arg), NULL));
215 static void AssertDetached(pthread_t t, bool is_detached) {
217 ASSERT_EQ(0, pthread_getattr_np(t, &attr));
219 ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &detach_state));
220 pthread_attr_destroy(&attr);
221 ASSERT_EQ(is_detached, (detach_state == PTHREAD_CREATE_DETACHED));
224 static void MakeDeadThread(pthread_t& t) {
225 ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, NULL));
226 ASSERT_EQ(0, pthread_join(t, NULL));
229 TEST(pthread, pthread_create) {
230 void* expected_result = reinterpret_cast<void*>(123);
231 // Can we create a thread?
233 ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, expected_result));
234 // If we join, do we get the expected value back?
236 ASSERT_EQ(0, pthread_join(t, &result));
237 ASSERT_EQ(expected_result, result);
240 TEST(pthread, pthread_create_EAGAIN) {
241 pthread_attr_t attributes;
242 ASSERT_EQ(0, pthread_attr_init(&attributes));
243 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, static_cast<size_t>(-1) & ~(getpagesize() - 1)));
246 ASSERT_EQ(EAGAIN, pthread_create(&t, &attributes, IdFn, NULL));
249 TEST(pthread, pthread_no_join_after_detach) {
250 SpinFunctionHelper spinhelper;
253 ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
255 // After a pthread_detach...
256 ASSERT_EQ(0, pthread_detach(t1));
257 AssertDetached(t1, true);
259 // ...pthread_join should fail.
260 ASSERT_EQ(EINVAL, pthread_join(t1, NULL));
263 TEST(pthread, pthread_no_op_detach_after_join) {
264 SpinFunctionHelper spinhelper;
267 ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
269 // If thread 2 is already waiting to join thread 1...
271 ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1)));
273 sleep(1); // (Give t2 a chance to call pthread_join.)
275 #if defined(__BIONIC__)
276 ASSERT_EQ(EINVAL, pthread_detach(t1));
278 ASSERT_EQ(0, pthread_detach(t1));
280 AssertDetached(t1, false);
284 // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
286 ASSERT_EQ(0, pthread_join(t2, &join_result));
287 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
290 TEST(pthread, pthread_join_self) {
291 ASSERT_EQ(EDEADLK, pthread_join(pthread_self(), NULL));
294 struct TestBug37410 {
295 pthread_t main_thread;
296 pthread_mutex_t mutex;
300 data.main_thread = pthread_self();
301 ASSERT_EQ(0, pthread_mutex_init(&data.mutex, NULL));
302 ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
305 ASSERT_EQ(0, pthread_create(&t, NULL, TestBug37410::thread_fn, reinterpret_cast<void*>(&data)));
307 // Wait for the thread to be running...
308 ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
309 ASSERT_EQ(0, pthread_mutex_unlock(&data.mutex));
316 static void* thread_fn(void* arg) {
317 TestBug37410* data = reinterpret_cast<TestBug37410*>(arg);
319 // Let the main thread know we're running.
320 pthread_mutex_unlock(&data->mutex);
322 // And wait for the main thread to exit.
323 pthread_join(data->main_thread, NULL);
329 // Even though this isn't really a death test, we have to say "DeathTest" here so gtest knows to
330 // run this test (which exits normally) in its own process.
332 class pthread_DeathTest : public BionicDeathTest {};
334 TEST_F(pthread_DeathTest, pthread_bug_37410) {
335 // http://code.google.com/p/android/issues/detail?id=37410
336 ASSERT_EXIT(TestBug37410::main(), ::testing::ExitedWithCode(0), "");
339 static void* SignalHandlerFn(void* arg) {
341 sigfillset(&wait_set);
342 return reinterpret_cast<void*>(sigwait(&wait_set, reinterpret_cast<int*>(arg)));
345 TEST(pthread, pthread_sigmask) {
346 // Check that SIGUSR1 isn't blocked.
347 sigset_t original_set;
348 sigemptyset(&original_set);
349 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &original_set));
350 ASSERT_FALSE(sigismember(&original_set, SIGUSR1));
355 sigaddset(&set, SIGUSR1);
356 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, &set, NULL));
358 // Check that SIGUSR1 is blocked.
360 sigemptyset(&final_set);
361 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &final_set));
362 ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
363 // ...and that sigprocmask agrees with pthread_sigmask.
364 sigemptyset(&final_set);
365 ASSERT_EQ(0, sigprocmask(SIG_BLOCK, NULL, &final_set));
366 ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
368 // Spawn a thread that calls sigwait and tells us what it received.
369 pthread_t signal_thread;
370 int received_signal = -1;
371 ASSERT_EQ(0, pthread_create(&signal_thread, NULL, SignalHandlerFn, &received_signal));
373 // Send that thread SIGUSR1.
374 pthread_kill(signal_thread, SIGUSR1);
378 ASSERT_EQ(0, pthread_join(signal_thread, &join_result));
379 ASSERT_EQ(SIGUSR1, received_signal);
380 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
382 // Restore the original signal mask.
383 ASSERT_EQ(0, pthread_sigmask(SIG_SETMASK, &original_set, NULL));
386 TEST(pthread, pthread_setname_np__too_long) {
387 ASSERT_EQ(ERANGE, pthread_setname_np(pthread_self(), "this name is far too long for linux"));
390 TEST(pthread, pthread_setname_np__self) {
391 ASSERT_EQ(0, pthread_setname_np(pthread_self(), "short 1"));
394 TEST(pthread, pthread_setname_np__other) {
395 SpinFunctionHelper spinhelper;
398 ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
399 ASSERT_EQ(0, pthread_setname_np(t1, "short 2"));
402 TEST(pthread, pthread_setname_np__no_such_thread) {
403 pthread_t dead_thread;
404 MakeDeadThread(dead_thread);
406 // Call pthread_setname_np after thread has already exited.
407 ASSERT_EQ(ENOENT, pthread_setname_np(dead_thread, "short 3"));
410 TEST(pthread, pthread_kill__0) {
411 // Signal 0 just tests that the thread exists, so it's safe to call on ourselves.
412 ASSERT_EQ(0, pthread_kill(pthread_self(), 0));
415 TEST(pthread, pthread_kill__invalid_signal) {
416 ASSERT_EQ(EINVAL, pthread_kill(pthread_self(), -1));
419 static void pthread_kill__in_signal_handler_helper(int signal_number) {
420 static int count = 0;
421 ASSERT_EQ(SIGALRM, signal_number);
423 // Can we call pthread_kill from a signal handler?
424 ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
428 TEST(pthread, pthread_kill__in_signal_handler) {
429 ScopedSignalHandler ssh(SIGALRM, pthread_kill__in_signal_handler_helper);
430 ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
433 TEST(pthread, pthread_detach__no_such_thread) {
434 pthread_t dead_thread;
435 MakeDeadThread(dead_thread);
437 ASSERT_EQ(ESRCH, pthread_detach(dead_thread));
440 TEST(pthread, pthread_detach_no_leak) {
441 size_t initial_bytes = 0;
442 // Run this loop more than once since the first loop causes some memory
443 // to be allocated permenantly. Run an extra loop to help catch any subtle
445 for (size_t loop = 0; loop < 3; loop++) {
446 // Set the initial bytes on the second loop since the memory in use
447 // should have stabilized.
449 initial_bytes = mallinfo().uordblks;
453 ASSERT_EQ(0, pthread_attr_init(&attr));
454 ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE));
456 std::vector<pthread_t> threads;
457 for (size_t i = 0; i < 32; ++i) {
459 ASSERT_EQ(0, pthread_create(&t, &attr, IdFn, NULL));
460 threads.push_back(t);
465 for (size_t i = 0; i < 32; ++i) {
466 ASSERT_EQ(0, pthread_detach(threads[i])) << i;
470 size_t final_bytes = mallinfo().uordblks;
471 int leaked_bytes = (final_bytes - initial_bytes);
473 ASSERT_EQ(0, leaked_bytes);
476 TEST(pthread, pthread_getcpuclockid__clock_gettime) {
477 SpinFunctionHelper spinhelper;
480 ASSERT_EQ(0, pthread_create(&t, NULL, spinhelper.GetFunction(), NULL));
483 ASSERT_EQ(0, pthread_getcpuclockid(t, &c));
485 ASSERT_EQ(0, clock_gettime(c, &ts));
488 TEST(pthread, pthread_getcpuclockid__no_such_thread) {
489 pthread_t dead_thread;
490 MakeDeadThread(dead_thread);
493 ASSERT_EQ(ESRCH, pthread_getcpuclockid(dead_thread, &c));
496 TEST(pthread, pthread_getschedparam__no_such_thread) {
497 pthread_t dead_thread;
498 MakeDeadThread(dead_thread);
502 ASSERT_EQ(ESRCH, pthread_getschedparam(dead_thread, &policy, ¶m));
505 TEST(pthread, pthread_setschedparam__no_such_thread) {
506 pthread_t dead_thread;
507 MakeDeadThread(dead_thread);
511 ASSERT_EQ(ESRCH, pthread_setschedparam(dead_thread, policy, ¶m));
514 TEST(pthread, pthread_join__no_such_thread) {
515 pthread_t dead_thread;
516 MakeDeadThread(dead_thread);
518 ASSERT_EQ(ESRCH, pthread_join(dead_thread, NULL));
521 TEST(pthread, pthread_kill__no_such_thread) {
522 pthread_t dead_thread;
523 MakeDeadThread(dead_thread);
525 ASSERT_EQ(ESRCH, pthread_kill(dead_thread, 0));
528 TEST(pthread, pthread_join__multijoin) {
529 SpinFunctionHelper spinhelper;
532 ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
535 ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1)));
537 sleep(1); // (Give t2 a chance to call pthread_join.)
539 // Multiple joins to the same thread should fail.
540 ASSERT_EQ(EINVAL, pthread_join(t1, NULL));
544 // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
546 ASSERT_EQ(0, pthread_join(t2, &join_result));
547 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
550 TEST(pthread, pthread_join__race) {
551 // http://b/11693195 --- pthread_join could return before the thread had actually exited.
552 // If the joiner unmapped the thread's stack, that could lead to SIGSEGV in the thread.
553 for (size_t i = 0; i < 1024; ++i) {
554 size_t stack_size = 64*1024;
555 void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
558 pthread_attr_init(&a);
559 pthread_attr_setstack(&a, stack, stack_size);
562 ASSERT_EQ(0, pthread_create(&t, &a, IdFn, NULL));
563 ASSERT_EQ(0, pthread_join(t, NULL));
564 ASSERT_EQ(0, munmap(stack, stack_size));
568 static void* GetActualGuardSizeFn(void* arg) {
569 pthread_attr_t attributes;
570 pthread_getattr_np(pthread_self(), &attributes);
571 pthread_attr_getguardsize(&attributes, reinterpret_cast<size_t*>(arg));
575 static size_t GetActualGuardSize(const pthread_attr_t& attributes) {
578 pthread_create(&t, &attributes, GetActualGuardSizeFn, &result);
579 pthread_join(t, NULL);
583 static void* GetActualStackSizeFn(void* arg) {
584 pthread_attr_t attributes;
585 pthread_getattr_np(pthread_self(), &attributes);
586 pthread_attr_getstacksize(&attributes, reinterpret_cast<size_t*>(arg));
590 static size_t GetActualStackSize(const pthread_attr_t& attributes) {
593 pthread_create(&t, &attributes, GetActualStackSizeFn, &result);
594 pthread_join(t, NULL);
598 TEST(pthread, pthread_attr_setguardsize) {
599 pthread_attr_t attributes;
600 ASSERT_EQ(0, pthread_attr_init(&attributes));
602 // Get the default guard size.
603 size_t default_guard_size;
604 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &default_guard_size));
606 // No such thing as too small: will be rounded up to one page by pthread_create.
607 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 128));
609 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
610 ASSERT_EQ(128U, guard_size);
611 ASSERT_EQ(4096U, GetActualGuardSize(attributes));
613 // Large enough and a multiple of the page size.
614 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024));
615 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
616 ASSERT_EQ(32*1024U, guard_size);
618 // Large enough but not a multiple of the page size; will be rounded up by pthread_create.
619 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024 + 1));
620 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
621 ASSERT_EQ(32*1024U + 1, guard_size);
624 TEST(pthread, pthread_attr_setstacksize) {
625 pthread_attr_t attributes;
626 ASSERT_EQ(0, pthread_attr_init(&attributes));
628 // Get the default stack size.
629 size_t default_stack_size;
630 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &default_stack_size));
633 ASSERT_EQ(EINVAL, pthread_attr_setstacksize(&attributes, 128));
635 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
636 ASSERT_EQ(default_stack_size, stack_size);
637 ASSERT_GE(GetActualStackSize(attributes), default_stack_size);
639 // Large enough and a multiple of the page size; may be rounded up by pthread_create.
640 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024));
641 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
642 ASSERT_EQ(32*1024U, stack_size);
643 ASSERT_GE(GetActualStackSize(attributes), 32*1024U);
645 // Large enough but not aligned; will be rounded up by pthread_create.
646 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024 + 1));
647 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
648 ASSERT_EQ(32*1024U + 1, stack_size);
649 #if defined(__BIONIC__)
650 ASSERT_GT(GetActualStackSize(attributes), 32*1024U + 1);
652 // glibc rounds down, in violation of POSIX. They document this in their BUGS section.
653 ASSERT_EQ(GetActualStackSize(attributes), 32*1024U);
657 TEST(pthread, pthread_rwlock_smoke) {
659 ASSERT_EQ(0, pthread_rwlock_init(&l, NULL));
662 ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
663 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
665 // Multiple read lock
666 ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
667 ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
668 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
669 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
672 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
673 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
676 ASSERT_EQ(0, pthread_rwlock_trywrlock(&l));
677 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
678 ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&l));
679 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
682 ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
683 ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
684 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
685 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
686 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
688 // Try writer lock after unlock
689 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
690 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
693 // EDEADLK in "read after write"
694 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
695 ASSERT_EQ(EDEADLK, pthread_rwlock_rdlock(&l));
696 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
698 // EDEADLK in "write after write"
699 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
700 ASSERT_EQ(EDEADLK, pthread_rwlock_wrlock(&l));
701 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
704 ASSERT_EQ(0, pthread_rwlock_destroy(&l));
707 struct RwlockWakeupHelperArg {
708 pthread_rwlock_t lock;
715 std::atomic<Progress> progress;
718 static void pthread_rwlock_reader_wakeup_writer_helper(RwlockWakeupHelperArg* arg) {
719 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
720 arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
722 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&arg->lock));
723 ASSERT_EQ(0, pthread_rwlock_wrlock(&arg->lock));
724 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress);
725 ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock));
727 arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED;
730 TEST(pthread, pthread_rwlock_reader_wakeup_writer) {
731 RwlockWakeupHelperArg wakeup_arg;
732 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, NULL));
733 ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock));
734 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
737 ASSERT_EQ(0, pthread_create(&thread, NULL,
738 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_reader_wakeup_writer_helper), &wakeup_arg));
739 while (wakeup_arg.progress != RwlockWakeupHelperArg::LOCK_WAITING) {
743 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
744 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
746 ASSERT_EQ(0, pthread_join(thread, NULL));
747 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
748 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
751 static void pthread_rwlock_writer_wakeup_reader_helper(RwlockWakeupHelperArg* arg) {
752 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
753 arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
755 ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&arg->lock));
756 ASSERT_EQ(0, pthread_rwlock_rdlock(&arg->lock));
757 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress);
758 ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock));
760 arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED;
763 TEST(pthread, pthread_rwlock_writer_wakeup_reader) {
764 RwlockWakeupHelperArg wakeup_arg;
765 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, NULL));
766 ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock));
767 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
770 ASSERT_EQ(0, pthread_create(&thread, NULL,
771 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_writer_wakeup_reader_helper), &wakeup_arg));
772 while (wakeup_arg.progress != RwlockWakeupHelperArg::LOCK_WAITING) {
776 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
777 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
779 ASSERT_EQ(0, pthread_join(thread, NULL));
780 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
781 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
784 static int g_once_fn_call_count = 0;
785 static void OnceFn() {
786 ++g_once_fn_call_count;
789 TEST(pthread, pthread_once_smoke) {
790 pthread_once_t once_control = PTHREAD_ONCE_INIT;
791 ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
792 ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
793 ASSERT_EQ(1, g_once_fn_call_count);
796 static std::string pthread_once_1934122_result = "";
798 static void Routine2() {
799 pthread_once_1934122_result += "2";
802 static void Routine1() {
803 pthread_once_t once_control_2 = PTHREAD_ONCE_INIT;
804 pthread_once_1934122_result += "1";
805 pthread_once(&once_control_2, &Routine2);
808 TEST(pthread, pthread_once_1934122) {
809 // Very old versions of Android couldn't call pthread_once from a
810 // pthread_once init routine. http://b/1934122.
811 pthread_once_t once_control_1 = PTHREAD_ONCE_INIT;
812 ASSERT_EQ(0, pthread_once(&once_control_1, &Routine1));
813 ASSERT_EQ("12", pthread_once_1934122_result);
816 static int g_atfork_prepare_calls = 0;
817 static void AtForkPrepare1() { g_atfork_prepare_calls = (g_atfork_prepare_calls << 4) | 1; }
818 static void AtForkPrepare2() { g_atfork_prepare_calls = (g_atfork_prepare_calls << 4) | 2; }
819 static int g_atfork_parent_calls = 0;
820 static void AtForkParent1() { g_atfork_parent_calls = (g_atfork_parent_calls << 4) | 1; }
821 static void AtForkParent2() { g_atfork_parent_calls = (g_atfork_parent_calls << 4) | 2; }
822 static int g_atfork_child_calls = 0;
823 static void AtForkChild1() { g_atfork_child_calls = (g_atfork_child_calls << 4) | 1; }
824 static void AtForkChild2() { g_atfork_child_calls = (g_atfork_child_calls << 4) | 2; }
826 TEST(pthread, pthread_atfork_smoke) {
827 ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
828 ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2));
831 ASSERT_NE(-1, pid) << strerror(errno);
833 // Child and parent calls are made in the order they were registered.
835 ASSERT_EQ(0x12, g_atfork_child_calls);
838 ASSERT_EQ(0x12, g_atfork_parent_calls);
840 // Prepare calls are made in the reverse order.
841 ASSERT_EQ(0x21, g_atfork_prepare_calls);
844 TEST(pthread, pthread_attr_getscope) {
846 ASSERT_EQ(0, pthread_attr_init(&attr));
849 ASSERT_EQ(0, pthread_attr_getscope(&attr, &scope));
850 ASSERT_EQ(PTHREAD_SCOPE_SYSTEM, scope);
853 TEST(pthread, pthread_condattr_init) {
854 pthread_condattr_t attr;
855 pthread_condattr_init(&attr);
858 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
859 ASSERT_EQ(CLOCK_REALTIME, clock);
862 ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
863 ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared);
866 TEST(pthread, pthread_condattr_setclock) {
867 pthread_condattr_t attr;
868 pthread_condattr_init(&attr);
870 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_REALTIME));
872 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
873 ASSERT_EQ(CLOCK_REALTIME, clock);
875 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
876 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
877 ASSERT_EQ(CLOCK_MONOTONIC, clock);
879 ASSERT_EQ(EINVAL, pthread_condattr_setclock(&attr, CLOCK_PROCESS_CPUTIME_ID));
882 TEST(pthread, pthread_cond_broadcast__preserves_condattr_flags) {
883 #if defined(__BIONIC__)
884 pthread_condattr_t attr;
885 pthread_condattr_init(&attr);
887 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
888 ASSERT_EQ(0, pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
890 pthread_cond_t cond_var;
891 ASSERT_EQ(0, pthread_cond_init(&cond_var, &attr));
893 ASSERT_EQ(0, pthread_cond_signal(&cond_var));
894 ASSERT_EQ(0, pthread_cond_broadcast(&cond_var));
896 attr = static_cast<pthread_condattr_t>(*reinterpret_cast<uint32_t*>(cond_var.__private));
898 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
899 ASSERT_EQ(CLOCK_MONOTONIC, clock);
901 ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
902 ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared);
903 #else // !defined(__BIONIC__)
904 GTEST_LOG_(INFO) << "This tests a bionic implementation detail.\n";
905 #endif // !defined(__BIONIC__)
908 class pthread_CondWakeupTest : public ::testing::Test {
910 pthread_mutex_t mutex;
919 std::atomic<Progress> progress;
923 virtual void SetUp() {
924 ASSERT_EQ(0, pthread_mutex_init(&mutex, NULL));
925 ASSERT_EQ(0, pthread_cond_init(&cond, NULL));
926 progress = INITIALIZED;
928 pthread_create(&thread, NULL, reinterpret_cast<void* (*)(void*)>(WaitThreadFn), this));
931 virtual void TearDown() {
932 ASSERT_EQ(0, pthread_join(thread, NULL));
933 ASSERT_EQ(FINISHED, progress);
934 ASSERT_EQ(0, pthread_cond_destroy(&cond));
935 ASSERT_EQ(0, pthread_mutex_destroy(&mutex));
938 void SleepUntilProgress(Progress expected_progress) {
939 while (progress != expected_progress) {
946 static void WaitThreadFn(pthread_CondWakeupTest* test) {
947 ASSERT_EQ(0, pthread_mutex_lock(&test->mutex));
948 test->progress = WAITING;
949 while (test->progress == WAITING) {
950 ASSERT_EQ(0, pthread_cond_wait(&test->cond, &test->mutex));
952 ASSERT_EQ(SIGNALED, test->progress);
953 test->progress = FINISHED;
954 ASSERT_EQ(0, pthread_mutex_unlock(&test->mutex));
958 TEST_F(pthread_CondWakeupTest, signal) {
959 SleepUntilProgress(WAITING);
961 pthread_cond_signal(&cond);
964 TEST_F(pthread_CondWakeupTest, broadcast) {
965 SleepUntilProgress(WAITING);
967 pthread_cond_broadcast(&cond);
970 TEST(pthread, pthread_mutex_timedlock) {
972 ASSERT_EQ(0, pthread_mutex_init(&m, NULL));
974 // If the mutex is already locked, pthread_mutex_timedlock should time out.
975 ASSERT_EQ(0, pthread_mutex_lock(&m));
978 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
980 ASSERT_EQ(ETIMEDOUT, pthread_mutex_timedlock(&m, &ts));
982 // If the mutex is unlocked, pthread_mutex_timedlock should succeed.
983 ASSERT_EQ(0, pthread_mutex_unlock(&m));
985 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
987 ASSERT_EQ(0, pthread_mutex_timedlock(&m, &ts));
989 ASSERT_EQ(0, pthread_mutex_unlock(&m));
990 ASSERT_EQ(0, pthread_mutex_destroy(&m));
993 TEST(pthread, pthread_attr_getstack__main_thread) {
994 // This test is only meaningful for the main thread, so make sure we're running on it!
995 ASSERT_EQ(getpid(), syscall(__NR_gettid));
997 // Get the main thread's attributes.
998 pthread_attr_t attributes;
999 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1001 // Check that we correctly report that the main thread has no guard page.
1003 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
1004 ASSERT_EQ(0U, guard_size); // The main thread has no guard page.
1006 // Get the stack base and the stack size (both ways).
1009 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1011 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1013 // The two methods of asking for the stack size should agree.
1014 EXPECT_EQ(stack_size, stack_size2);
1016 // What does /proc/self/maps' [stack] line say?
1017 void* maps_stack_hi = NULL;
1018 FILE* fp = fopen("/proc/self/maps", "r");
1019 ASSERT_TRUE(fp != NULL);
1021 while (fgets(line, sizeof(line), fp) != NULL) {
1024 sscanf(line, "%" PRIxPTR "-%" PRIxPTR " %*4s %*x %*x:%*x %*d %10s", &lo, &hi, name);
1025 if (strcmp(name, "[stack]") == 0) {
1026 maps_stack_hi = reinterpret_cast<void*>(hi);
1032 // The stack size should correspond to RLIMIT_STACK.
1034 ASSERT_EQ(0, getrlimit(RLIMIT_STACK, &rl));
1035 uint64_t original_rlim_cur = rl.rlim_cur;
1036 #if defined(__BIONIC__)
1037 if (rl.rlim_cur == RLIM_INFINITY) {
1038 rl.rlim_cur = 8 * 1024 * 1024; // Bionic reports unlimited stacks as 8MiB.
1041 EXPECT_EQ(rl.rlim_cur, stack_size);
1043 auto guard = make_scope_guard([&rl, original_rlim_cur]() {
1044 rl.rlim_cur = original_rlim_cur;
1045 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1048 // The high address of the /proc/self/maps [stack] region should equal stack_base + stack_size.
1049 // Remember that the stack grows down (and is mapped in on demand), so the low address of the
1050 // region isn't very interesting.
1051 EXPECT_EQ(maps_stack_hi, reinterpret_cast<uint8_t*>(stack_base) + stack_size);
1054 // What if RLIMIT_STACK is smaller than the stack's current extent?
1056 rl.rlim_cur = rl.rlim_max = 1024; // 1KiB. We know the stack must be at least a page already.
1057 rl.rlim_max = RLIM_INFINITY;
1058 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1060 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1061 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1062 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1064 EXPECT_EQ(stack_size, stack_size2);
1065 ASSERT_EQ(1024U, stack_size);
1068 // What if RLIMIT_STACK isn't a whole number of pages?
1070 rl.rlim_cur = rl.rlim_max = 6666; // Not a whole number of pages.
1071 rl.rlim_max = RLIM_INFINITY;
1072 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1074 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1075 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1076 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1078 EXPECT_EQ(stack_size, stack_size2);
1079 ASSERT_EQ(6666U, stack_size);
1082 static void pthread_attr_getstack_18908062_helper(void*) {
1083 char local_variable;
1084 pthread_attr_t attributes;
1085 pthread_getattr_np(pthread_self(), &attributes);
1088 pthread_attr_getstack(&attributes, &stack_base, &stack_size);
1090 // Test whether &local_variable is in [stack_base, stack_base + stack_size).
1091 ASSERT_LE(reinterpret_cast<char*>(stack_base), &local_variable);
1092 ASSERT_LT(&local_variable, reinterpret_cast<char*>(stack_base) + stack_size);
1095 // Check whether something on stack is in the range of
1096 // [stack_base, stack_base + stack_size). see b/18908062.
1097 TEST(pthread, pthread_attr_getstack_18908062) {
1099 ASSERT_EQ(0, pthread_create(&t, NULL,
1100 reinterpret_cast<void* (*)(void*)>(pthread_attr_getstack_18908062_helper),
1102 pthread_join(t, NULL);
1105 #if defined(__BIONIC__)
1106 static void* pthread_gettid_np_helper(void* arg) {
1107 *reinterpret_cast<pid_t*>(arg) = gettid();
1112 TEST(pthread, pthread_gettid_np) {
1113 #if defined(__BIONIC__)
1114 ASSERT_EQ(gettid(), pthread_gettid_np(pthread_self()));
1116 pid_t t_gettid_result;
1118 pthread_create(&t, NULL, pthread_gettid_np_helper, &t_gettid_result);
1120 pid_t t_pthread_gettid_np_result = pthread_gettid_np(t);
1122 pthread_join(t, NULL);
1124 ASSERT_EQ(t_gettid_result, t_pthread_gettid_np_result);
1126 GTEST_LOG_(INFO) << "This test does nothing.\n";
1130 static size_t cleanup_counter = 0;
1132 static void AbortCleanupRoutine(void*) {
1136 static void CountCleanupRoutine(void*) {
1140 static void PthreadCleanupTester() {
1141 pthread_cleanup_push(CountCleanupRoutine, NULL);
1142 pthread_cleanup_push(CountCleanupRoutine, NULL);
1143 pthread_cleanup_push(AbortCleanupRoutine, NULL);
1145 pthread_cleanup_pop(0); // Pop the abort without executing it.
1146 pthread_cleanup_pop(1); // Pop one count while executing it.
1147 ASSERT_EQ(1U, cleanup_counter);
1148 // Exit while the other count is still on the cleanup stack.
1151 // Calls to pthread_cleanup_pop/pthread_cleanup_push must always be balanced.
1152 pthread_cleanup_pop(0);
1155 static void* PthreadCleanupStartRoutine(void*) {
1156 PthreadCleanupTester();
1160 TEST(pthread, pthread_cleanup_push__pthread_cleanup_pop) {
1162 ASSERT_EQ(0, pthread_create(&t, NULL, PthreadCleanupStartRoutine, NULL));
1163 pthread_join(t, NULL);
1164 ASSERT_EQ(2U, cleanup_counter);
1167 TEST(pthread, PTHREAD_MUTEX_DEFAULT_is_PTHREAD_MUTEX_NORMAL) {
1168 ASSERT_EQ(PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_DEFAULT);
1171 TEST(pthread, pthread_mutexattr_gettype) {
1172 pthread_mutexattr_t attr;
1173 ASSERT_EQ(0, pthread_mutexattr_init(&attr));
1177 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL));
1178 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1179 ASSERT_EQ(PTHREAD_MUTEX_NORMAL, attr_type);
1181 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK));
1182 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1183 ASSERT_EQ(PTHREAD_MUTEX_ERRORCHECK, attr_type);
1185 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE));
1186 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1187 ASSERT_EQ(PTHREAD_MUTEX_RECURSIVE, attr_type);
1189 ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
1192 struct PthreadMutex {
1193 pthread_mutex_t lock;
1195 PthreadMutex(int mutex_type) {
1204 void init(int mutex_type) {
1205 pthread_mutexattr_t attr;
1206 ASSERT_EQ(0, pthread_mutexattr_init(&attr));
1207 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, mutex_type));
1208 ASSERT_EQ(0, pthread_mutex_init(&lock, &attr));
1209 ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
1213 ASSERT_EQ(0, pthread_mutex_destroy(&lock));
1216 DISALLOW_COPY_AND_ASSIGN(PthreadMutex);
1219 TEST(pthread, pthread_mutex_lock_NORMAL) {
1220 PthreadMutex m(PTHREAD_MUTEX_NORMAL);
1222 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1223 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1226 TEST(pthread, pthread_mutex_lock_ERRORCHECK) {
1227 PthreadMutex m(PTHREAD_MUTEX_ERRORCHECK);
1229 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1230 ASSERT_EQ(EDEADLK, pthread_mutex_lock(&m.lock));
1231 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1232 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
1233 ASSERT_EQ(EBUSY, pthread_mutex_trylock(&m.lock));
1234 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1235 ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
1238 TEST(pthread, pthread_mutex_lock_RECURSIVE) {
1239 PthreadMutex m(PTHREAD_MUTEX_RECURSIVE);
1241 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1242 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1243 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1244 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1245 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
1246 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1247 ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
1250 TEST(pthread, pthread_mutex_init_same_as_static_initializers) {
1251 pthread_mutex_t lock_normal = PTHREAD_MUTEX_INITIALIZER;
1252 PthreadMutex m1(PTHREAD_MUTEX_NORMAL);
1253 ASSERT_EQ(0, memcmp(&lock_normal, &m1.lock, sizeof(pthread_mutex_t)));
1254 pthread_mutex_destroy(&lock_normal);
1256 pthread_mutex_t lock_errorcheck = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
1257 PthreadMutex m2(PTHREAD_MUTEX_ERRORCHECK);
1258 ASSERT_EQ(0, memcmp(&lock_errorcheck, &m2.lock, sizeof(pthread_mutex_t)));
1259 pthread_mutex_destroy(&lock_errorcheck);
1261 pthread_mutex_t lock_recursive = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
1262 PthreadMutex m3(PTHREAD_MUTEX_RECURSIVE);
1263 ASSERT_EQ(0, memcmp(&lock_recursive, &m3.lock, sizeof(pthread_mutex_t)));
1264 ASSERT_EQ(0, pthread_mutex_destroy(&lock_recursive));
1267 class MutexWakeupHelper {
1276 std::atomic<Progress> progress;
1278 static void thread_fn(MutexWakeupHelper* helper) {
1279 ASSERT_EQ(LOCK_INITIALIZED, helper->progress);
1280 helper->progress = LOCK_WAITING;
1282 ASSERT_EQ(0, pthread_mutex_lock(&helper->m.lock));
1283 ASSERT_EQ(LOCK_RELEASED, helper->progress);
1284 ASSERT_EQ(0, pthread_mutex_unlock(&helper->m.lock));
1286 helper->progress = LOCK_ACCESSED;
1290 MutexWakeupHelper(int mutex_type) : m(mutex_type) {
1294 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1295 progress = LOCK_INITIALIZED;
1298 ASSERT_EQ(0, pthread_create(&thread, NULL,
1299 reinterpret_cast<void* (*)(void*)>(MutexWakeupHelper::thread_fn), this));
1301 while (progress != LOCK_WAITING) {
1305 progress = LOCK_RELEASED;
1306 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1308 ASSERT_EQ(0, pthread_join(thread, NULL));
1309 ASSERT_EQ(LOCK_ACCESSED, progress);
1313 TEST(pthread, pthread_mutex_NORMAL_wakeup) {
1314 MutexWakeupHelper helper(PTHREAD_MUTEX_NORMAL);
1318 TEST(pthread, pthread_mutex_ERRORCHECK_wakeup) {
1319 MutexWakeupHelper helper(PTHREAD_MUTEX_ERRORCHECK);
1323 TEST(pthread, pthread_mutex_RECURSIVE_wakeup) {
1324 MutexWakeupHelper helper(PTHREAD_MUTEX_RECURSIVE);
1328 TEST(pthread, pthread_mutex_owner_tid_limit) {
1329 #if defined(__BIONIC__) && !defined(__LP64__)
1330 FILE* fp = fopen("/proc/sys/kernel/pid_max", "r");
1331 ASSERT_TRUE(fp != NULL);
1333 ASSERT_EQ(1, fscanf(fp, "%ld", &pid_max));
1335 // Bionic's pthread_mutex implementation on 32-bit devices uses 16 bits to represent owner tid.
1336 ASSERT_LE(pid_max, 65536);
1338 GTEST_LOG_(INFO) << "This test does nothing as 32-bit tid is supported by pthread_mutex.\n";
1342 class StrictAlignmentAllocator {
1344 void* allocate(size_t size, size_t alignment) {
1345 char* p = new char[size + alignment * 2];
1346 allocated_array.push_back(p);
1347 while (!is_strict_aligned(p, alignment)) {
1353 ~StrictAlignmentAllocator() {
1354 for (auto& p : allocated_array) {
1360 bool is_strict_aligned(char* p, size_t alignment) {
1361 return (reinterpret_cast<uintptr_t>(p) % (alignment * 2)) == alignment;
1364 std::vector<char*> allocated_array;
1367 TEST(pthread, pthread_types_allow_four_bytes_alignment) {
1368 #if defined(__BIONIC__)
1369 // For binary compatibility with old version, we need to allow 4-byte aligned data for pthread types.
1370 StrictAlignmentAllocator allocator;
1371 pthread_mutex_t* mutex = reinterpret_cast<pthread_mutex_t*>(
1372 allocator.allocate(sizeof(pthread_mutex_t), 4));
1373 ASSERT_EQ(0, pthread_mutex_init(mutex, NULL));
1374 ASSERT_EQ(0, pthread_mutex_lock(mutex));
1375 ASSERT_EQ(0, pthread_mutex_unlock(mutex));
1376 ASSERT_EQ(0, pthread_mutex_destroy(mutex));
1378 pthread_cond_t* cond = reinterpret_cast<pthread_cond_t*>(
1379 allocator.allocate(sizeof(pthread_cond_t), 4));
1380 ASSERT_EQ(0, pthread_cond_init(cond, NULL));
1381 ASSERT_EQ(0, pthread_cond_signal(cond));
1382 ASSERT_EQ(0, pthread_cond_broadcast(cond));
1383 ASSERT_EQ(0, pthread_cond_destroy(cond));
1385 pthread_rwlock_t* rwlock = reinterpret_cast<pthread_rwlock_t*>(
1386 allocator.allocate(sizeof(pthread_rwlock_t), 4));
1387 ASSERT_EQ(0, pthread_rwlock_init(rwlock, NULL));
1388 ASSERT_EQ(0, pthread_rwlock_rdlock(rwlock));
1389 ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
1390 ASSERT_EQ(0, pthread_rwlock_wrlock(rwlock));
1391 ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
1392 ASSERT_EQ(0, pthread_rwlock_destroy(rwlock));
1395 GTEST_LOG_(INFO) << "This test tests bionic implementation details.";