blob: 16bf9c0cb97f9ecf9ae550edce531a72008f0ced [file] [log] [blame]
Elliott Hughesbfeab1b2012-09-05 17:47:37 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
19#include <errno.h>
Elliott Hughes5b9310e2013-10-02 16:59:05 -070020#include <inttypes.h>
Elliott Hughesb95cf0d2013-07-15 14:51:07 -070021#include <limits.h>
Elliott Hughes04620a32014-03-07 17:59:05 -080022#include <malloc.h>
Elliott Hughesbfeab1b2012-09-05 17:47:37 -070023#include <pthread.h>
Christopher Ferrisf04935c2013-12-20 18:43:21 -080024#include <signal.h>
Yabin Cui140f3672015-02-03 10:32:00 -080025#include <stdio.h>
Elliott Hughes70b24b12013-11-15 11:51:07 -080026#include <sys/mman.h>
Elliott Hughes57b7a612014-08-25 17:26:50 -070027#include <sys/syscall.h>
Narayan Kamath51e6cb32014-03-03 15:38:51 +000028#include <time.h>
Elliott Hughes4d014e12012-09-07 16:47:54 -070029#include <unistd.h>
Elliott Hughesbfeab1b2012-09-05 17:47:37 -070030
Yabin Cui08ee8d22015-02-11 17:04:36 -080031#include <atomic>
Yabin Cuib5845722015-03-16 22:46:42 -070032#include <vector>
Yabin Cui08ee8d22015-02-11 17:04:36 -080033
Yabin Cui17393b02015-03-21 15:08:25 -070034#include "private/bionic_macros.h"
35#include "private/ScopeGuard.h"
36#include "BionicDeathTest.h"
37#include "ScopedSignalHandler.h"
38
Elliott Hughesbfeab1b2012-09-05 17:47:37 -070039TEST(pthread, pthread_key_create) {
40 pthread_key_t key;
41 ASSERT_EQ(0, pthread_key_create(&key, NULL));
42 ASSERT_EQ(0, pthread_key_delete(key));
43 // Can't delete a key that's already been deleted.
44 ASSERT_EQ(EINVAL, pthread_key_delete(key));
45}
Elliott Hughes4d014e12012-09-07 16:47:54 -070046
Dan Albertc4bcc752014-09-30 11:48:24 -070047TEST(pthread, pthread_keys_max) {
Yabin Cui6c238f22014-12-11 20:50:41 -080048 // POSIX says PTHREAD_KEYS_MAX should be at least _POSIX_THREAD_KEYS_MAX.
49 ASSERT_GE(PTHREAD_KEYS_MAX, _POSIX_THREAD_KEYS_MAX);
Dan Albertc4bcc752014-09-30 11:48:24 -070050}
Elliott Hughes718a5b52014-01-28 17:02:03 -080051
Yabin Cui6c238f22014-12-11 20:50:41 -080052TEST(pthread, sysconf_SC_THREAD_KEYS_MAX_eq_PTHREAD_KEYS_MAX) {
Dan Albertc4bcc752014-09-30 11:48:24 -070053 int sysconf_max = sysconf(_SC_THREAD_KEYS_MAX);
Yabin Cui6c238f22014-12-11 20:50:41 -080054 ASSERT_EQ(sysconf_max, PTHREAD_KEYS_MAX);
Dan Albertc4bcc752014-09-30 11:48:24 -070055}
56
57TEST(pthread, pthread_key_many_distinct) {
Yabin Cui6c238f22014-12-11 20:50:41 -080058 // As gtest uses pthread keys, we can't allocate exactly PTHREAD_KEYS_MAX
59 // pthread keys, but We should be able to allocate at least this many keys.
60 int nkeys = PTHREAD_KEYS_MAX / 2;
Dan Albertc4bcc752014-09-30 11:48:24 -070061 std::vector<pthread_key_t> keys;
62
63 auto scope_guard = make_scope_guard([&keys]{
64 for (auto key : keys) {
65 EXPECT_EQ(0, pthread_key_delete(key));
66 }
67 });
68
69 for (int i = 0; i < nkeys; ++i) {
70 pthread_key_t key;
Elliott Hughes61706932015-03-31 10:56:58 -070071 // If this fails, it's likely that LIBC_PTHREAD_KEY_RESERVED_COUNT is wrong.
Dan Albertc4bcc752014-09-30 11:48:24 -070072 ASSERT_EQ(0, pthread_key_create(&key, NULL)) << i << " of " << nkeys;
73 keys.push_back(key);
74 ASSERT_EQ(0, pthread_setspecific(key, reinterpret_cast<void*>(i)));
75 }
76
77 for (int i = keys.size() - 1; i >= 0; --i) {
78 ASSERT_EQ(reinterpret_cast<void*>(i), pthread_getspecific(keys.back()));
79 pthread_key_t key = keys.back();
80 keys.pop_back();
81 ASSERT_EQ(0, pthread_key_delete(key));
82 }
83}
84
Yabin Cui6c238f22014-12-11 20:50:41 -080085TEST(pthread, pthread_key_not_exceed_PTHREAD_KEYS_MAX) {
Elliott Hughes44b53ad2013-02-11 20:18:47 +000086 std::vector<pthread_key_t> keys;
Dan Albertc4bcc752014-09-30 11:48:24 -070087 int rv = 0;
Yabin Cui6c238f22014-12-11 20:50:41 -080088
89 // Pthread keys are used by gtest, so PTHREAD_KEYS_MAX should
90 // be more than we are allowed to allocate now.
91 for (int i = 0; i < PTHREAD_KEYS_MAX; i++) {
Elliott Hughes44b53ad2013-02-11 20:18:47 +000092 pthread_key_t key;
Dan Albertc4bcc752014-09-30 11:48:24 -070093 rv = pthread_key_create(&key, NULL);
94 if (rv == EAGAIN) {
95 break;
96 }
97 EXPECT_EQ(0, rv);
Elliott Hughes44b53ad2013-02-11 20:18:47 +000098 keys.push_back(key);
99 }
100
Dan Albertc4bcc752014-09-30 11:48:24 -0700101 // Don't leak keys.
102 for (auto key : keys) {
103 EXPECT_EQ(0, pthread_key_delete(key));
Elliott Hughes44b53ad2013-02-11 20:18:47 +0000104 }
Dan Albertc4bcc752014-09-30 11:48:24 -0700105 keys.clear();
106
107 // We should have eventually reached the maximum number of keys and received
108 // EAGAIN.
109 ASSERT_EQ(EAGAIN, rv);
Elliott Hughes44b53ad2013-02-11 20:18:47 +0000110}
111
Elliott Hughesebb770f2014-06-25 13:46:46 -0700112TEST(pthread, pthread_key_delete) {
113 void* expected = reinterpret_cast<void*>(1234);
114 pthread_key_t key;
115 ASSERT_EQ(0, pthread_key_create(&key, NULL));
116 ASSERT_EQ(0, pthread_setspecific(key, expected));
117 ASSERT_EQ(expected, pthread_getspecific(key));
118 ASSERT_EQ(0, pthread_key_delete(key));
119 // After deletion, pthread_getspecific returns NULL.
120 ASSERT_EQ(NULL, pthread_getspecific(key));
121 // And you can't use pthread_setspecific with the deleted key.
122 ASSERT_EQ(EINVAL, pthread_setspecific(key, expected));
123}
124
Elliott Hughes40a52172014-07-30 14:48:10 -0700125TEST(pthread, pthread_key_fork) {
126 void* expected = reinterpret_cast<void*>(1234);
127 pthread_key_t key;
128 ASSERT_EQ(0, pthread_key_create(&key, NULL));
129 ASSERT_EQ(0, pthread_setspecific(key, expected));
130 ASSERT_EQ(expected, pthread_getspecific(key));
131
132 pid_t pid = fork();
133 ASSERT_NE(-1, pid) << strerror(errno);
134
135 if (pid == 0) {
136 // The surviving thread inherits all the forking thread's TLS values...
137 ASSERT_EQ(expected, pthread_getspecific(key));
138 _exit(99);
139 }
140
141 int status;
142 ASSERT_EQ(pid, waitpid(pid, &status, 0));
143 ASSERT_TRUE(WIFEXITED(status));
144 ASSERT_EQ(99, WEXITSTATUS(status));
145
146 ASSERT_EQ(expected, pthread_getspecific(key));
Dan Albert1d53ae22014-09-02 15:24:26 -0700147 ASSERT_EQ(0, pthread_key_delete(key));
Elliott Hughes40a52172014-07-30 14:48:10 -0700148}
149
150static void* DirtyKeyFn(void* key) {
151 return pthread_getspecific(*reinterpret_cast<pthread_key_t*>(key));
152}
153
154TEST(pthread, pthread_key_dirty) {
155 pthread_key_t key;
156 ASSERT_EQ(0, pthread_key_create(&key, NULL));
157
158 size_t stack_size = 128 * 1024;
159 void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
160 ASSERT_NE(MAP_FAILED, stack);
161 memset(stack, 0xff, stack_size);
162
163 pthread_attr_t attr;
164 ASSERT_EQ(0, pthread_attr_init(&attr));
165 ASSERT_EQ(0, pthread_attr_setstack(&attr, stack, stack_size));
166
167 pthread_t t;
168 ASSERT_EQ(0, pthread_create(&t, &attr, DirtyKeyFn, &key));
169
170 void* result;
171 ASSERT_EQ(0, pthread_join(t, &result));
172 ASSERT_EQ(nullptr, result); // Not ~0!
173
174 ASSERT_EQ(0, munmap(stack, stack_size));
Dan Albert1d53ae22014-09-02 15:24:26 -0700175 ASSERT_EQ(0, pthread_key_delete(key));
Elliott Hughes40a52172014-07-30 14:48:10 -0700176}
177
Elliott Hughes4d014e12012-09-07 16:47:54 -0700178static void* IdFn(void* arg) {
179 return arg;
180}
181
Yabin Cui63481602014-12-01 17:41:04 -0800182class SpinFunctionHelper {
183 public:
184 SpinFunctionHelper() {
185 SpinFunctionHelper::spin_flag_ = true;
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400186 }
Yabin Cui63481602014-12-01 17:41:04 -0800187 ~SpinFunctionHelper() {
188 UnSpin();
189 }
190 auto GetFunction() -> void* (*)(void*) {
191 return SpinFunctionHelper::SpinFn;
192 }
193
194 void UnSpin() {
195 SpinFunctionHelper::spin_flag_ = false;
196 }
197
198 private:
199 static void* SpinFn(void*) {
200 while (spin_flag_) {}
201 return NULL;
202 }
203 static volatile bool spin_flag_;
204};
205
206// It doesn't matter if spin_flag_ is used in several tests,
207// because it is always set to false after each test. Each thread
208// loops on spin_flag_ can find it becomes false at some time.
209volatile bool SpinFunctionHelper::spin_flag_ = false;
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400210
Elliott Hughes4d014e12012-09-07 16:47:54 -0700211static void* JoinFn(void* arg) {
212 return reinterpret_cast<void*>(pthread_join(reinterpret_cast<pthread_t>(arg), NULL));
213}
214
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400215static void AssertDetached(pthread_t t, bool is_detached) {
216 pthread_attr_t attr;
217 ASSERT_EQ(0, pthread_getattr_np(t, &attr));
218 int detach_state;
219 ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &detach_state));
220 pthread_attr_destroy(&attr);
221 ASSERT_EQ(is_detached, (detach_state == PTHREAD_CREATE_DETACHED));
222}
223
Elliott Hughes9d23e042013-02-15 19:21:51 -0800224static void MakeDeadThread(pthread_t& t) {
225 ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, NULL));
Elliott Hughes34c987a2014-09-22 16:01:26 -0700226 ASSERT_EQ(0, pthread_join(t, NULL));
Elliott Hughes9d23e042013-02-15 19:21:51 -0800227}
228
Elliott Hughes4d014e12012-09-07 16:47:54 -0700229TEST(pthread, pthread_create) {
230 void* expected_result = reinterpret_cast<void*>(123);
231 // Can we create a thread?
232 pthread_t t;
233 ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, expected_result));
234 // If we join, do we get the expected value back?
235 void* result;
236 ASSERT_EQ(0, pthread_join(t, &result));
237 ASSERT_EQ(expected_result, result);
238}
239
Elliott Hughes3e898472013-02-12 16:40:24 +0000240TEST(pthread, pthread_create_EAGAIN) {
241 pthread_attr_t attributes;
242 ASSERT_EQ(0, pthread_attr_init(&attributes));
243 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, static_cast<size_t>(-1) & ~(getpagesize() - 1)));
244
245 pthread_t t;
246 ASSERT_EQ(EAGAIN, pthread_create(&t, &attributes, IdFn, NULL));
247}
248
Elliott Hughes4d014e12012-09-07 16:47:54 -0700249TEST(pthread, pthread_no_join_after_detach) {
Yabin Cui63481602014-12-01 17:41:04 -0800250 SpinFunctionHelper spinhelper;
251
Elliott Hughes4d014e12012-09-07 16:47:54 -0700252 pthread_t t1;
Yabin Cui63481602014-12-01 17:41:04 -0800253 ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700254
255 // After a pthread_detach...
256 ASSERT_EQ(0, pthread_detach(t1));
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400257 AssertDetached(t1, true);
Elliott Hughes4d014e12012-09-07 16:47:54 -0700258
259 // ...pthread_join should fail.
Elliott Hughes34c987a2014-09-22 16:01:26 -0700260 ASSERT_EQ(EINVAL, pthread_join(t1, NULL));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700261}
262
263TEST(pthread, pthread_no_op_detach_after_join) {
Yabin Cui63481602014-12-01 17:41:04 -0800264 SpinFunctionHelper spinhelper;
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400265
Elliott Hughes4d014e12012-09-07 16:47:54 -0700266 pthread_t t1;
Yabin Cui63481602014-12-01 17:41:04 -0800267 ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700268
269 // If thread 2 is already waiting to join thread 1...
270 pthread_t t2;
271 ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1)));
272
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400273 sleep(1); // (Give t2 a chance to call pthread_join.)
Elliott Hughes4d014e12012-09-07 16:47:54 -0700274
Yabin Cuibbb04322015-03-19 15:19:25 -0700275#if defined(__BIONIC__)
276 ASSERT_EQ(EINVAL, pthread_detach(t1));
277#else
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400278 ASSERT_EQ(0, pthread_detach(t1));
Yabin Cuibbb04322015-03-19 15:19:25 -0700279#endif
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400280 AssertDetached(t1, false);
281
Yabin Cui63481602014-12-01 17:41:04 -0800282 spinhelper.UnSpin();
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400283
284 // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
Elliott Hughes4d014e12012-09-07 16:47:54 -0700285 void* join_result;
286 ASSERT_EQ(0, pthread_join(t2, &join_result));
Elliott Hughes5b9310e2013-10-02 16:59:05 -0700287 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700288}
Elliott Hughes14f19592012-10-29 10:19:44 -0700289
290TEST(pthread, pthread_join_self) {
Elliott Hughes34c987a2014-09-22 16:01:26 -0700291 ASSERT_EQ(EDEADLK, pthread_join(pthread_self(), NULL));
Elliott Hughes14f19592012-10-29 10:19:44 -0700292}
Elliott Hughes4f251be2012-11-01 16:33:29 -0700293
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800294struct TestBug37410 {
295 pthread_t main_thread;
296 pthread_mutex_t mutex;
Elliott Hughes4f251be2012-11-01 16:33:29 -0700297
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800298 static void main() {
299 TestBug37410 data;
300 data.main_thread = pthread_self();
301 ASSERT_EQ(0, pthread_mutex_init(&data.mutex, NULL));
302 ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
303
304 pthread_t t;
305 ASSERT_EQ(0, pthread_create(&t, NULL, TestBug37410::thread_fn, reinterpret_cast<void*>(&data)));
306
307 // Wait for the thread to be running...
308 ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
309 ASSERT_EQ(0, pthread_mutex_unlock(&data.mutex));
310
311 // ...and exit.
312 pthread_exit(NULL);
313 }
314
315 private:
316 static void* thread_fn(void* arg) {
317 TestBug37410* data = reinterpret_cast<TestBug37410*>(arg);
318
319 // Let the main thread know we're running.
320 pthread_mutex_unlock(&data->mutex);
321
322 // And wait for the main thread to exit.
323 pthread_join(data->main_thread, NULL);
324
325 return NULL;
326 }
327};
Elliott Hughes4f251be2012-11-01 16:33:29 -0700328
Elliott Hughes7fd803c2013-02-14 16:33:52 -0800329// Even though this isn't really a death test, we have to say "DeathTest" here so gtest knows to
330// run this test (which exits normally) in its own process.
Yabin Cui9df70402014-11-05 18:01:01 -0800331
332class pthread_DeathTest : public BionicDeathTest {};
333
334TEST_F(pthread_DeathTest, pthread_bug_37410) {
Elliott Hughes4f251be2012-11-01 16:33:29 -0700335 // http://code.google.com/p/android/issues/detail?id=37410
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800336 ASSERT_EXIT(TestBug37410::main(), ::testing::ExitedWithCode(0), "");
Elliott Hughes4f251be2012-11-01 16:33:29 -0700337}
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800338
339static void* SignalHandlerFn(void* arg) {
340 sigset_t wait_set;
341 sigfillset(&wait_set);
342 return reinterpret_cast<void*>(sigwait(&wait_set, reinterpret_cast<int*>(arg)));
343}
344
345TEST(pthread, pthread_sigmask) {
Elliott Hughes19e62322013-10-15 11:23:57 -0700346 // Check that SIGUSR1 isn't blocked.
347 sigset_t original_set;
348 sigemptyset(&original_set);
349 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &original_set));
350 ASSERT_FALSE(sigismember(&original_set, SIGUSR1));
351
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800352 // Block SIGUSR1.
353 sigset_t set;
354 sigemptyset(&set);
355 sigaddset(&set, SIGUSR1);
356 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, &set, NULL));
357
Elliott Hughes19e62322013-10-15 11:23:57 -0700358 // Check that SIGUSR1 is blocked.
359 sigset_t final_set;
360 sigemptyset(&final_set);
361 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &final_set));
362 ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
363 // ...and that sigprocmask agrees with pthread_sigmask.
364 sigemptyset(&final_set);
365 ASSERT_EQ(0, sigprocmask(SIG_BLOCK, NULL, &final_set));
366 ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
367
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800368 // Spawn a thread that calls sigwait and tells us what it received.
369 pthread_t signal_thread;
370 int received_signal = -1;
371 ASSERT_EQ(0, pthread_create(&signal_thread, NULL, SignalHandlerFn, &received_signal));
372
373 // Send that thread SIGUSR1.
374 pthread_kill(signal_thread, SIGUSR1);
375
376 // See what it got.
377 void* join_result;
378 ASSERT_EQ(0, pthread_join(signal_thread, &join_result));
379 ASSERT_EQ(SIGUSR1, received_signal);
Elliott Hughes5b9310e2013-10-02 16:59:05 -0700380 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
Elliott Hughes19e62322013-10-15 11:23:57 -0700381
382 // Restore the original signal mask.
383 ASSERT_EQ(0, pthread_sigmask(SIG_SETMASK, &original_set, NULL));
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800384}
Elliott Hughes5e3fc432013-02-11 16:36:48 -0800385
Elliott Hughes3e898472013-02-12 16:40:24 +0000386TEST(pthread, pthread_setname_np__too_long) {
387 ASSERT_EQ(ERANGE, pthread_setname_np(pthread_self(), "this name is far too long for linux"));
388}
389
390TEST(pthread, pthread_setname_np__self) {
391 ASSERT_EQ(0, pthread_setname_np(pthread_self(), "short 1"));
392}
393
394TEST(pthread, pthread_setname_np__other) {
Yabin Cui63481602014-12-01 17:41:04 -0800395 SpinFunctionHelper spinhelper;
396
Elliott Hughesed29e852014-10-27 12:01:51 -0700397 pthread_t t1;
Yabin Cui63481602014-12-01 17:41:04 -0800398 ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
Elliott Hughesed29e852014-10-27 12:01:51 -0700399 ASSERT_EQ(0, pthread_setname_np(t1, "short 2"));
Elliott Hughes3e898472013-02-12 16:40:24 +0000400}
401
Yabin Cui220b99b2015-03-26 18:13:07 +0000402TEST(pthread, pthread_setname_np__no_such_thread) {
Elliott Hughes9d23e042013-02-15 19:21:51 -0800403 pthread_t dead_thread;
404 MakeDeadThread(dead_thread);
Elliott Hughes3e898472013-02-12 16:40:24 +0000405
406 // Call pthread_setname_np after thread has already exited.
Elliott Hughes68d98d82014-11-12 21:03:26 -0800407 ASSERT_EQ(ENOENT, pthread_setname_np(dead_thread, "short 3"));
Elliott Hughes3e898472013-02-12 16:40:24 +0000408}
Elliott Hughes9d23e042013-02-15 19:21:51 -0800409
410TEST(pthread, pthread_kill__0) {
411 // Signal 0 just tests that the thread exists, so it's safe to call on ourselves.
412 ASSERT_EQ(0, pthread_kill(pthread_self(), 0));
413}
414
415TEST(pthread, pthread_kill__invalid_signal) {
416 ASSERT_EQ(EINVAL, pthread_kill(pthread_self(), -1));
417}
418
Elliott Hughesfae89fc2013-02-21 11:22:23 -0800419static void pthread_kill__in_signal_handler_helper(int signal_number) {
420 static int count = 0;
421 ASSERT_EQ(SIGALRM, signal_number);
422 if (++count == 1) {
423 // Can we call pthread_kill from a signal handler?
424 ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
425 }
426}
427
428TEST(pthread, pthread_kill__in_signal_handler) {
Elliott Hughes4b558f52014-03-04 15:58:02 -0800429 ScopedSignalHandler ssh(SIGALRM, pthread_kill__in_signal_handler_helper);
Elliott Hughesfae89fc2013-02-21 11:22:23 -0800430 ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
431}
432
Yabin Cui220b99b2015-03-26 18:13:07 +0000433TEST(pthread, pthread_detach__no_such_thread) {
Elliott Hughes9d23e042013-02-15 19:21:51 -0800434 pthread_t dead_thread;
435 MakeDeadThread(dead_thread);
436
437 ASSERT_EQ(ESRCH, pthread_detach(dead_thread));
438}
439
Yabin Cui19e246d2014-12-18 14:22:09 -0800440TEST(pthread, pthread_detach_no_leak) {
Christopher Ferrise3809602014-08-06 14:15:01 -0700441 size_t initial_bytes = 0;
442 // Run this loop more than once since the first loop causes some memory
443 // to be allocated permenantly. Run an extra loop to help catch any subtle
444 // memory leaks.
445 for (size_t loop = 0; loop < 3; loop++) {
446 // Set the initial bytes on the second loop since the memory in use
447 // should have stabilized.
448 if (loop == 1) {
449 initial_bytes = mallinfo().uordblks;
450 }
Elliott Hughes04620a32014-03-07 17:59:05 -0800451
Christopher Ferrise3809602014-08-06 14:15:01 -0700452 pthread_attr_t attr;
453 ASSERT_EQ(0, pthread_attr_init(&attr));
454 ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE));
Elliott Hughes04620a32014-03-07 17:59:05 -0800455
Christopher Ferrise3809602014-08-06 14:15:01 -0700456 std::vector<pthread_t> threads;
457 for (size_t i = 0; i < 32; ++i) {
458 pthread_t t;
459 ASSERT_EQ(0, pthread_create(&t, &attr, IdFn, NULL));
460 threads.push_back(t);
461 }
Elliott Hughes04620a32014-03-07 17:59:05 -0800462
Christopher Ferrise3809602014-08-06 14:15:01 -0700463 sleep(1);
Elliott Hughes04620a32014-03-07 17:59:05 -0800464
Christopher Ferrise3809602014-08-06 14:15:01 -0700465 for (size_t i = 0; i < 32; ++i) {
466 ASSERT_EQ(0, pthread_detach(threads[i])) << i;
467 }
Elliott Hughes04620a32014-03-07 17:59:05 -0800468 }
469
470 size_t final_bytes = mallinfo().uordblks;
Elliott Hughes04620a32014-03-07 17:59:05 -0800471 int leaked_bytes = (final_bytes - initial_bytes);
472
Yabin Cui19e246d2014-12-18 14:22:09 -0800473 ASSERT_EQ(0, leaked_bytes);
Elliott Hughes04620a32014-03-07 17:59:05 -0800474}
475
Jeff Hao9b06cc32013-08-15 14:51:16 -0700476TEST(pthread, pthread_getcpuclockid__clock_gettime) {
Yabin Cui63481602014-12-01 17:41:04 -0800477 SpinFunctionHelper spinhelper;
478
Jeff Hao9b06cc32013-08-15 14:51:16 -0700479 pthread_t t;
Yabin Cui63481602014-12-01 17:41:04 -0800480 ASSERT_EQ(0, pthread_create(&t, NULL, spinhelper.GetFunction(), NULL));
Jeff Hao9b06cc32013-08-15 14:51:16 -0700481
482 clockid_t c;
483 ASSERT_EQ(0, pthread_getcpuclockid(t, &c));
484 timespec ts;
485 ASSERT_EQ(0, clock_gettime(c, &ts));
486}
487
Yabin Cui220b99b2015-03-26 18:13:07 +0000488TEST(pthread, pthread_getcpuclockid__no_such_thread) {
Elliott Hughes9d23e042013-02-15 19:21:51 -0800489 pthread_t dead_thread;
490 MakeDeadThread(dead_thread);
491
492 clockid_t c;
493 ASSERT_EQ(ESRCH, pthread_getcpuclockid(dead_thread, &c));
494}
495
Yabin Cui220b99b2015-03-26 18:13:07 +0000496TEST(pthread, pthread_getschedparam__no_such_thread) {
Elliott Hughes9d23e042013-02-15 19:21:51 -0800497 pthread_t dead_thread;
498 MakeDeadThread(dead_thread);
499
500 int policy;
501 sched_param param;
502 ASSERT_EQ(ESRCH, pthread_getschedparam(dead_thread, &policy, &param));
503}
504
Yabin Cui220b99b2015-03-26 18:13:07 +0000505TEST(pthread, pthread_setschedparam__no_such_thread) {
Elliott Hughes9d23e042013-02-15 19:21:51 -0800506 pthread_t dead_thread;
507 MakeDeadThread(dead_thread);
508
509 int policy = 0;
510 sched_param param;
511 ASSERT_EQ(ESRCH, pthread_setschedparam(dead_thread, policy, &param));
512}
513
Yabin Cui220b99b2015-03-26 18:13:07 +0000514TEST(pthread, pthread_join__no_such_thread) {
Elliott Hughes9d23e042013-02-15 19:21:51 -0800515 pthread_t dead_thread;
516 MakeDeadThread(dead_thread);
517
Elliott Hughes34c987a2014-09-22 16:01:26 -0700518 ASSERT_EQ(ESRCH, pthread_join(dead_thread, NULL));
Elliott Hughes9d23e042013-02-15 19:21:51 -0800519}
520
Yabin Cui220b99b2015-03-26 18:13:07 +0000521TEST(pthread, pthread_kill__no_such_thread) {
Elliott Hughes9d23e042013-02-15 19:21:51 -0800522 pthread_t dead_thread;
523 MakeDeadThread(dead_thread);
524
525 ASSERT_EQ(ESRCH, pthread_kill(dead_thread, 0));
526}
msg5550f020d12013-06-06 14:59:28 -0400527
528TEST(pthread, pthread_join__multijoin) {
Yabin Cui63481602014-12-01 17:41:04 -0800529 SpinFunctionHelper spinhelper;
msg5550f020d12013-06-06 14:59:28 -0400530
531 pthread_t t1;
Yabin Cui63481602014-12-01 17:41:04 -0800532 ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
msg5550f020d12013-06-06 14:59:28 -0400533
534 pthread_t t2;
535 ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1)));
536
537 sleep(1); // (Give t2 a chance to call pthread_join.)
538
539 // Multiple joins to the same thread should fail.
540 ASSERT_EQ(EINVAL, pthread_join(t1, NULL));
541
Yabin Cui63481602014-12-01 17:41:04 -0800542 spinhelper.UnSpin();
msg5550f020d12013-06-06 14:59:28 -0400543
544 // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
545 void* join_result;
546 ASSERT_EQ(0, pthread_join(t2, &join_result));
Elliott Hughes5b9310e2013-10-02 16:59:05 -0700547 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
msg5550f020d12013-06-06 14:59:28 -0400548}
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700549
Elliott Hughes70b24b12013-11-15 11:51:07 -0800550TEST(pthread, pthread_join__race) {
551 // http://b/11693195 --- pthread_join could return before the thread had actually exited.
552 // If the joiner unmapped the thread's stack, that could lead to SIGSEGV in the thread.
553 for (size_t i = 0; i < 1024; ++i) {
554 size_t stack_size = 64*1024;
555 void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
556
557 pthread_attr_t a;
558 pthread_attr_init(&a);
559 pthread_attr_setstack(&a, stack, stack_size);
560
561 pthread_t t;
562 ASSERT_EQ(0, pthread_create(&t, &a, IdFn, NULL));
563 ASSERT_EQ(0, pthread_join(t, NULL));
564 ASSERT_EQ(0, munmap(stack, stack_size));
565 }
566}
567
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700568static void* GetActualGuardSizeFn(void* arg) {
569 pthread_attr_t attributes;
570 pthread_getattr_np(pthread_self(), &attributes);
571 pthread_attr_getguardsize(&attributes, reinterpret_cast<size_t*>(arg));
572 return NULL;
573}
574
575static size_t GetActualGuardSize(const pthread_attr_t& attributes) {
576 size_t result;
577 pthread_t t;
578 pthread_create(&t, &attributes, GetActualGuardSizeFn, &result);
Elliott Hughes34c987a2014-09-22 16:01:26 -0700579 pthread_join(t, NULL);
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700580 return result;
581}
582
583static void* GetActualStackSizeFn(void* arg) {
584 pthread_attr_t attributes;
585 pthread_getattr_np(pthread_self(), &attributes);
586 pthread_attr_getstacksize(&attributes, reinterpret_cast<size_t*>(arg));
587 return NULL;
588}
589
590static size_t GetActualStackSize(const pthread_attr_t& attributes) {
591 size_t result;
592 pthread_t t;
593 pthread_create(&t, &attributes, GetActualStackSizeFn, &result);
Elliott Hughes34c987a2014-09-22 16:01:26 -0700594 pthread_join(t, NULL);
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700595 return result;
596}
597
598TEST(pthread, pthread_attr_setguardsize) {
599 pthread_attr_t attributes;
600 ASSERT_EQ(0, pthread_attr_init(&attributes));
601
602 // Get the default guard size.
603 size_t default_guard_size;
604 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &default_guard_size));
605
606 // No such thing as too small: will be rounded up to one page by pthread_create.
607 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 128));
608 size_t guard_size;
609 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
610 ASSERT_EQ(128U, guard_size);
611 ASSERT_EQ(4096U, GetActualGuardSize(attributes));
612
613 // Large enough and a multiple of the page size.
614 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024));
615 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
616 ASSERT_EQ(32*1024U, guard_size);
617
618 // Large enough but not a multiple of the page size; will be rounded up by pthread_create.
619 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024 + 1));
620 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
621 ASSERT_EQ(32*1024U + 1, guard_size);
622}
623
624TEST(pthread, pthread_attr_setstacksize) {
625 pthread_attr_t attributes;
626 ASSERT_EQ(0, pthread_attr_init(&attributes));
627
628 // Get the default stack size.
629 size_t default_stack_size;
630 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &default_stack_size));
631
632 // Too small.
633 ASSERT_EQ(EINVAL, pthread_attr_setstacksize(&attributes, 128));
634 size_t stack_size;
635 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
636 ASSERT_EQ(default_stack_size, stack_size);
637 ASSERT_GE(GetActualStackSize(attributes), default_stack_size);
638
Yabin Cui917d3902015-01-08 12:32:42 -0800639 // Large enough and a multiple of the page size; may be rounded up by pthread_create.
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700640 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024));
641 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
642 ASSERT_EQ(32*1024U, stack_size);
Yabin Cui917d3902015-01-08 12:32:42 -0800643 ASSERT_GE(GetActualStackSize(attributes), 32*1024U);
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700644
Yabin Cui917d3902015-01-08 12:32:42 -0800645 // Large enough but not aligned; will be rounded up by pthread_create.
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700646 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024 + 1));
647 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
648 ASSERT_EQ(32*1024U + 1, stack_size);
Christopher Ferrisf04935c2013-12-20 18:43:21 -0800649#if defined(__BIONIC__)
Yabin Cui917d3902015-01-08 12:32:42 -0800650 ASSERT_GT(GetActualStackSize(attributes), 32*1024U + 1);
Christopher Ferrisf04935c2013-12-20 18:43:21 -0800651#else // __BIONIC__
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700652 // glibc rounds down, in violation of POSIX. They document this in their BUGS section.
653 ASSERT_EQ(GetActualStackSize(attributes), 32*1024U);
Christopher Ferrisf04935c2013-12-20 18:43:21 -0800654#endif // __BIONIC__
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700655}
Elliott Hughesc3f11402013-10-30 14:40:09 -0700656
657TEST(pthread, pthread_rwlock_smoke) {
658 pthread_rwlock_t l;
659 ASSERT_EQ(0, pthread_rwlock_init(&l, NULL));
660
Calin Juravle76f352e2014-05-19 13:41:10 +0100661 // Single read lock
Elliott Hughesc3f11402013-10-30 14:40:09 -0700662 ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
663 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
664
Calin Juravle76f352e2014-05-19 13:41:10 +0100665 // Multiple read lock
666 ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
667 ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
668 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
669 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
670
671 // Write lock
Calin Juravle92687e42014-05-22 19:21:22 +0100672 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
673 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
Calin Juravle76f352e2014-05-19 13:41:10 +0100674
675 // Try writer lock
676 ASSERT_EQ(0, pthread_rwlock_trywrlock(&l));
677 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
678 ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&l));
679 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
680
681 // Try reader lock
682 ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
683 ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
684 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
685 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
686 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
687
688 // Try writer lock after unlock
Elliott Hughesc3f11402013-10-30 14:40:09 -0700689 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
690 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
691
Calin Juravle76f352e2014-05-19 13:41:10 +0100692#ifdef __BIONIC__
693 // EDEADLK in "read after write"
694 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
695 ASSERT_EQ(EDEADLK, pthread_rwlock_rdlock(&l));
696 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
697
698 // EDEADLK in "write after write"
699 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
700 ASSERT_EQ(EDEADLK, pthread_rwlock_wrlock(&l));
701 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
702#endif
703
Elliott Hughesc3f11402013-10-30 14:40:09 -0700704 ASSERT_EQ(0, pthread_rwlock_destroy(&l));
705}
706
Yabin Cui08ee8d22015-02-11 17:04:36 -0800707struct RwlockWakeupHelperArg {
708 pthread_rwlock_t lock;
709 enum Progress {
710 LOCK_INITIALIZED,
711 LOCK_WAITING,
712 LOCK_RELEASED,
713 LOCK_ACCESSED
714 };
715 std::atomic<Progress> progress;
716};
717
718static void pthread_rwlock_reader_wakeup_writer_helper(RwlockWakeupHelperArg* arg) {
719 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
720 arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
721
722 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&arg->lock));
723 ASSERT_EQ(0, pthread_rwlock_wrlock(&arg->lock));
724 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress);
725 ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock));
726
727 arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED;
728}
729
730TEST(pthread, pthread_rwlock_reader_wakeup_writer) {
731 RwlockWakeupHelperArg wakeup_arg;
732 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, NULL));
733 ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock));
734 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
735
736 pthread_t thread;
737 ASSERT_EQ(0, pthread_create(&thread, NULL,
738 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_reader_wakeup_writer_helper), &wakeup_arg));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -0800739 while (wakeup_arg.progress != RwlockWakeupHelperArg::LOCK_WAITING) {
740 usleep(5000);
741 }
742 usleep(5000);
Yabin Cui08ee8d22015-02-11 17:04:36 -0800743 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
744 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
745
746 ASSERT_EQ(0, pthread_join(thread, NULL));
747 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
748 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
749}
750
751static void pthread_rwlock_writer_wakeup_reader_helper(RwlockWakeupHelperArg* arg) {
752 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
753 arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
754
755 ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&arg->lock));
756 ASSERT_EQ(0, pthread_rwlock_rdlock(&arg->lock));
757 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress);
758 ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock));
759
760 arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED;
761}
762
763TEST(pthread, pthread_rwlock_writer_wakeup_reader) {
764 RwlockWakeupHelperArg wakeup_arg;
765 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, NULL));
766 ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock));
767 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
768
769 pthread_t thread;
770 ASSERT_EQ(0, pthread_create(&thread, NULL,
771 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_writer_wakeup_reader_helper), &wakeup_arg));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -0800772 while (wakeup_arg.progress != RwlockWakeupHelperArg::LOCK_WAITING) {
773 usleep(5000);
774 }
775 usleep(5000);
Yabin Cui08ee8d22015-02-11 17:04:36 -0800776 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
777 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
778
779 ASSERT_EQ(0, pthread_join(thread, NULL));
780 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
781 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
782}
783
Elliott Hughes1728b232014-05-14 10:02:03 -0700784static int g_once_fn_call_count = 0;
Elliott Hughesc3f11402013-10-30 14:40:09 -0700785static void OnceFn() {
Elliott Hughes1728b232014-05-14 10:02:03 -0700786 ++g_once_fn_call_count;
Elliott Hughesc3f11402013-10-30 14:40:09 -0700787}
788
789TEST(pthread, pthread_once_smoke) {
790 pthread_once_t once_control = PTHREAD_ONCE_INIT;
791 ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
792 ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
Elliott Hughes1728b232014-05-14 10:02:03 -0700793 ASSERT_EQ(1, g_once_fn_call_count);
Elliott Hughesc3f11402013-10-30 14:40:09 -0700794}
795
Elliott Hughes3694ec62014-05-14 11:46:08 -0700796static std::string pthread_once_1934122_result = "";
797
798static void Routine2() {
799 pthread_once_1934122_result += "2";
800}
801
802static void Routine1() {
803 pthread_once_t once_control_2 = PTHREAD_ONCE_INIT;
804 pthread_once_1934122_result += "1";
805 pthread_once(&once_control_2, &Routine2);
806}
807
808TEST(pthread, pthread_once_1934122) {
809 // Very old versions of Android couldn't call pthread_once from a
810 // pthread_once init routine. http://b/1934122.
811 pthread_once_t once_control_1 = PTHREAD_ONCE_INIT;
812 ASSERT_EQ(0, pthread_once(&once_control_1, &Routine1));
813 ASSERT_EQ("12", pthread_once_1934122_result);
814}
815
Elliott Hughes1728b232014-05-14 10:02:03 -0700816static int g_atfork_prepare_calls = 0;
817static void AtForkPrepare1() { g_atfork_prepare_calls = (g_atfork_prepare_calls << 4) | 1; }
818static void AtForkPrepare2() { g_atfork_prepare_calls = (g_atfork_prepare_calls << 4) | 2; }
819static int g_atfork_parent_calls = 0;
820static void AtForkParent1() { g_atfork_parent_calls = (g_atfork_parent_calls << 4) | 1; }
821static void AtForkParent2() { g_atfork_parent_calls = (g_atfork_parent_calls << 4) | 2; }
822static int g_atfork_child_calls = 0;
823static void AtForkChild1() { g_atfork_child_calls = (g_atfork_child_calls << 4) | 1; }
824static void AtForkChild2() { g_atfork_child_calls = (g_atfork_child_calls << 4) | 2; }
Elliott Hughesc3f11402013-10-30 14:40:09 -0700825
Dmitriy Ivanov00e37812014-11-20 16:53:47 -0800826TEST(pthread, pthread_atfork_smoke) {
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -0700827 ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
828 ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2));
Elliott Hughesc3f11402013-10-30 14:40:09 -0700829
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -0700830 int pid = fork();
831 ASSERT_NE(-1, pid) << strerror(errno);
Elliott Hughesc3f11402013-10-30 14:40:09 -0700832
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -0700833 // Child and parent calls are made in the order they were registered.
834 if (pid == 0) {
835 ASSERT_EQ(0x12, g_atfork_child_calls);
836 _exit(0);
837 }
838 ASSERT_EQ(0x12, g_atfork_parent_calls);
Elliott Hughesc3f11402013-10-30 14:40:09 -0700839
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -0700840 // Prepare calls are made in the reverse order.
841 ASSERT_EQ(0x21, g_atfork_prepare_calls);
Elliott Hughesc3f11402013-10-30 14:40:09 -0700842}
843
844TEST(pthread, pthread_attr_getscope) {
845 pthread_attr_t attr;
846 ASSERT_EQ(0, pthread_attr_init(&attr));
847
848 int scope;
849 ASSERT_EQ(0, pthread_attr_getscope(&attr, &scope));
850 ASSERT_EQ(PTHREAD_SCOPE_SYSTEM, scope);
851}
Narayan Kamath51e6cb32014-03-03 15:38:51 +0000852
853TEST(pthread, pthread_condattr_init) {
854 pthread_condattr_t attr;
855 pthread_condattr_init(&attr);
856
857 clockid_t clock;
858 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
859 ASSERT_EQ(CLOCK_REALTIME, clock);
860
861 int pshared;
862 ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
863 ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared);
864}
865
866TEST(pthread, pthread_condattr_setclock) {
867 pthread_condattr_t attr;
868 pthread_condattr_init(&attr);
869
870 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_REALTIME));
871 clockid_t clock;
872 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
873 ASSERT_EQ(CLOCK_REALTIME, clock);
874
875 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
876 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
877 ASSERT_EQ(CLOCK_MONOTONIC, clock);
878
879 ASSERT_EQ(EINVAL, pthread_condattr_setclock(&attr, CLOCK_PROCESS_CPUTIME_ID));
880}
881
882TEST(pthread, pthread_cond_broadcast__preserves_condattr_flags) {
Yabin Cui32651b82015-03-13 20:30:00 -0700883#if defined(__BIONIC__)
Narayan Kamath51e6cb32014-03-03 15:38:51 +0000884 pthread_condattr_t attr;
885 pthread_condattr_init(&attr);
886
887 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
888 ASSERT_EQ(0, pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
889
890 pthread_cond_t cond_var;
891 ASSERT_EQ(0, pthread_cond_init(&cond_var, &attr));
892
893 ASSERT_EQ(0, pthread_cond_signal(&cond_var));
894 ASSERT_EQ(0, pthread_cond_broadcast(&cond_var));
895
Yabin Cui32651b82015-03-13 20:30:00 -0700896 attr = static_cast<pthread_condattr_t>(*reinterpret_cast<uint32_t*>(cond_var.__private));
Narayan Kamath51e6cb32014-03-03 15:38:51 +0000897 clockid_t clock;
898 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
899 ASSERT_EQ(CLOCK_MONOTONIC, clock);
900 int pshared;
901 ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
902 ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared);
Yabin Cui32651b82015-03-13 20:30:00 -0700903#else // !defined(__BIONIC__)
904 GTEST_LOG_(INFO) << "This tests a bionic implementation detail.\n";
905#endif // !defined(__BIONIC__)
906}
907
908class pthread_CondWakeupTest : public ::testing::Test {
909 protected:
910 pthread_mutex_t mutex;
911 pthread_cond_t cond;
912
913 enum Progress {
914 INITIALIZED,
915 WAITING,
916 SIGNALED,
917 FINISHED,
918 };
919 std::atomic<Progress> progress;
920 pthread_t thread;
921
922 protected:
923 virtual void SetUp() {
924 ASSERT_EQ(0, pthread_mutex_init(&mutex, NULL));
925 ASSERT_EQ(0, pthread_cond_init(&cond, NULL));
926 progress = INITIALIZED;
927 ASSERT_EQ(0,
928 pthread_create(&thread, NULL, reinterpret_cast<void* (*)(void*)>(WaitThreadFn), this));
929 }
930
931 virtual void TearDown() {
932 ASSERT_EQ(0, pthread_join(thread, NULL));
933 ASSERT_EQ(FINISHED, progress);
934 ASSERT_EQ(0, pthread_cond_destroy(&cond));
935 ASSERT_EQ(0, pthread_mutex_destroy(&mutex));
936 }
937
938 void SleepUntilProgress(Progress expected_progress) {
939 while (progress != expected_progress) {
940 usleep(5000);
941 }
942 usleep(5000);
943 }
944
945 private:
946 static void WaitThreadFn(pthread_CondWakeupTest* test) {
947 ASSERT_EQ(0, pthread_mutex_lock(&test->mutex));
948 test->progress = WAITING;
949 while (test->progress == WAITING) {
950 ASSERT_EQ(0, pthread_cond_wait(&test->cond, &test->mutex));
951 }
952 ASSERT_EQ(SIGNALED, test->progress);
953 test->progress = FINISHED;
954 ASSERT_EQ(0, pthread_mutex_unlock(&test->mutex));
955 }
956};
957
958TEST_F(pthread_CondWakeupTest, signal) {
959 SleepUntilProgress(WAITING);
960 progress = SIGNALED;
961 pthread_cond_signal(&cond);
962}
963
964TEST_F(pthread_CondWakeupTest, broadcast) {
965 SleepUntilProgress(WAITING);
966 progress = SIGNALED;
967 pthread_cond_broadcast(&cond);
Narayan Kamath51e6cb32014-03-03 15:38:51 +0000968}
Elliott Hughes0e714a52014-03-03 16:42:47 -0800969
970TEST(pthread, pthread_mutex_timedlock) {
971 pthread_mutex_t m;
972 ASSERT_EQ(0, pthread_mutex_init(&m, NULL));
973
974 // If the mutex is already locked, pthread_mutex_timedlock should time out.
975 ASSERT_EQ(0, pthread_mutex_lock(&m));
976
977 timespec ts;
978 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
979 ts.tv_nsec += 1;
980 ASSERT_EQ(ETIMEDOUT, pthread_mutex_timedlock(&m, &ts));
981
982 // If the mutex is unlocked, pthread_mutex_timedlock should succeed.
983 ASSERT_EQ(0, pthread_mutex_unlock(&m));
984
985 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
986 ts.tv_nsec += 1;
987 ASSERT_EQ(0, pthread_mutex_timedlock(&m, &ts));
988
989 ASSERT_EQ(0, pthread_mutex_unlock(&m));
990 ASSERT_EQ(0, pthread_mutex_destroy(&m));
991}
Elliott Hughes57b7a612014-08-25 17:26:50 -0700992
993TEST(pthread, pthread_attr_getstack__main_thread) {
994 // This test is only meaningful for the main thread, so make sure we're running on it!
995 ASSERT_EQ(getpid(), syscall(__NR_gettid));
996
997 // Get the main thread's attributes.
998 pthread_attr_t attributes;
999 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1000
1001 // Check that we correctly report that the main thread has no guard page.
1002 size_t guard_size;
1003 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
1004 ASSERT_EQ(0U, guard_size); // The main thread has no guard page.
1005
1006 // Get the stack base and the stack size (both ways).
1007 void* stack_base;
1008 size_t stack_size;
1009 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1010 size_t stack_size2;
1011 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1012
1013 // The two methods of asking for the stack size should agree.
1014 EXPECT_EQ(stack_size, stack_size2);
1015
1016 // What does /proc/self/maps' [stack] line say?
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001017 void* maps_stack_hi = NULL;
Elliott Hughes57b7a612014-08-25 17:26:50 -07001018 FILE* fp = fopen("/proc/self/maps", "r");
1019 ASSERT_TRUE(fp != NULL);
1020 char line[BUFSIZ];
1021 while (fgets(line, sizeof(line), fp) != NULL) {
1022 uintptr_t lo, hi;
1023 char name[10];
1024 sscanf(line, "%" PRIxPTR "-%" PRIxPTR " %*4s %*x %*x:%*x %*d %10s", &lo, &hi, name);
1025 if (strcmp(name, "[stack]") == 0) {
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001026 maps_stack_hi = reinterpret_cast<void*>(hi);
Elliott Hughes57b7a612014-08-25 17:26:50 -07001027 break;
1028 }
1029 }
1030 fclose(fp);
1031
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001032 // The stack size should correspond to RLIMIT_STACK.
Elliott Hughes57b7a612014-08-25 17:26:50 -07001033 rlimit rl;
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001034 ASSERT_EQ(0, getrlimit(RLIMIT_STACK, &rl));
Elliott Hughes27a9aed2014-09-04 16:09:25 -07001035 uint64_t original_rlim_cur = rl.rlim_cur;
1036#if defined(__BIONIC__)
1037 if (rl.rlim_cur == RLIM_INFINITY) {
1038 rl.rlim_cur = 8 * 1024 * 1024; // Bionic reports unlimited stacks as 8MiB.
1039 }
1040#endif
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001041 EXPECT_EQ(rl.rlim_cur, stack_size);
1042
Dmitriy Ivanovd9ff7222014-09-08 16:22:22 -07001043 auto guard = make_scope_guard([&rl, original_rlim_cur]() {
Elliott Hughes27a9aed2014-09-04 16:09:25 -07001044 rl.rlim_cur = original_rlim_cur;
1045 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1046 });
1047
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001048 // The high address of the /proc/self/maps [stack] region should equal stack_base + stack_size.
1049 // Remember that the stack grows down (and is mapped in on demand), so the low address of the
1050 // region isn't very interesting.
1051 EXPECT_EQ(maps_stack_hi, reinterpret_cast<uint8_t*>(stack_base) + stack_size);
1052
1053 //
1054 // What if RLIMIT_STACK is smaller than the stack's current extent?
1055 //
Elliott Hughes57b7a612014-08-25 17:26:50 -07001056 rl.rlim_cur = rl.rlim_max = 1024; // 1KiB. We know the stack must be at least a page already.
1057 rl.rlim_max = RLIM_INFINITY;
1058 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1059
1060 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1061 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1062 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1063
1064 EXPECT_EQ(stack_size, stack_size2);
1065 ASSERT_EQ(1024U, stack_size);
1066
1067 //
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001068 // What if RLIMIT_STACK isn't a whole number of pages?
Elliott Hughes57b7a612014-08-25 17:26:50 -07001069 //
1070 rl.rlim_cur = rl.rlim_max = 6666; // Not a whole number of pages.
1071 rl.rlim_max = RLIM_INFINITY;
1072 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1073
1074 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1075 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1076 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1077
1078 EXPECT_EQ(stack_size, stack_size2);
1079 ASSERT_EQ(6666U, stack_size);
1080}
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001081
Yabin Cui917d3902015-01-08 12:32:42 -08001082static void pthread_attr_getstack_18908062_helper(void*) {
1083 char local_variable;
1084 pthread_attr_t attributes;
1085 pthread_getattr_np(pthread_self(), &attributes);
1086 void* stack_base;
1087 size_t stack_size;
1088 pthread_attr_getstack(&attributes, &stack_base, &stack_size);
1089
1090 // Test whether &local_variable is in [stack_base, stack_base + stack_size).
1091 ASSERT_LE(reinterpret_cast<char*>(stack_base), &local_variable);
1092 ASSERT_LT(&local_variable, reinterpret_cast<char*>(stack_base) + stack_size);
1093}
1094
1095// Check whether something on stack is in the range of
1096// [stack_base, stack_base + stack_size). see b/18908062.
1097TEST(pthread, pthread_attr_getstack_18908062) {
1098 pthread_t t;
1099 ASSERT_EQ(0, pthread_create(&t, NULL,
1100 reinterpret_cast<void* (*)(void*)>(pthread_attr_getstack_18908062_helper),
1101 NULL));
1102 pthread_join(t, NULL);
1103}
1104
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001105#if defined(__BIONIC__)
1106static void* pthread_gettid_np_helper(void* arg) {
1107 *reinterpret_cast<pid_t*>(arg) = gettid();
1108 return NULL;
1109}
1110#endif
1111
1112TEST(pthread, pthread_gettid_np) {
1113#if defined(__BIONIC__)
1114 ASSERT_EQ(gettid(), pthread_gettid_np(pthread_self()));
1115
1116 pid_t t_gettid_result;
1117 pthread_t t;
1118 pthread_create(&t, NULL, pthread_gettid_np_helper, &t_gettid_result);
1119
1120 pid_t t_pthread_gettid_np_result = pthread_gettid_np(t);
1121
Elliott Hughes34c987a2014-09-22 16:01:26 -07001122 pthread_join(t, NULL);
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001123
1124 ASSERT_EQ(t_gettid_result, t_pthread_gettid_np_result);
1125#else
1126 GTEST_LOG_(INFO) << "This test does nothing.\n";
1127#endif
1128}
Elliott Hughes34c987a2014-09-22 16:01:26 -07001129
1130static size_t cleanup_counter = 0;
1131
Derek Xue41996952014-09-25 11:05:32 +01001132static void AbortCleanupRoutine(void*) {
Elliott Hughes34c987a2014-09-22 16:01:26 -07001133 abort();
1134}
1135
Derek Xue41996952014-09-25 11:05:32 +01001136static void CountCleanupRoutine(void*) {
Elliott Hughes34c987a2014-09-22 16:01:26 -07001137 ++cleanup_counter;
1138}
1139
Derek Xue41996952014-09-25 11:05:32 +01001140static void PthreadCleanupTester() {
Elliott Hughes34c987a2014-09-22 16:01:26 -07001141 pthread_cleanup_push(CountCleanupRoutine, NULL);
1142 pthread_cleanup_push(CountCleanupRoutine, NULL);
1143 pthread_cleanup_push(AbortCleanupRoutine, NULL);
1144
1145 pthread_cleanup_pop(0); // Pop the abort without executing it.
1146 pthread_cleanup_pop(1); // Pop one count while executing it.
1147 ASSERT_EQ(1U, cleanup_counter);
1148 // Exit while the other count is still on the cleanup stack.
1149 pthread_exit(NULL);
1150
1151 // Calls to pthread_cleanup_pop/pthread_cleanup_push must always be balanced.
1152 pthread_cleanup_pop(0);
1153}
1154
Derek Xue41996952014-09-25 11:05:32 +01001155static void* PthreadCleanupStartRoutine(void*) {
Elliott Hughes34c987a2014-09-22 16:01:26 -07001156 PthreadCleanupTester();
1157 return NULL;
1158}
1159
1160TEST(pthread, pthread_cleanup_push__pthread_cleanup_pop) {
1161 pthread_t t;
1162 ASSERT_EQ(0, pthread_create(&t, NULL, PthreadCleanupStartRoutine, NULL));
1163 pthread_join(t, NULL);
1164 ASSERT_EQ(2U, cleanup_counter);
1165}
Derek Xue41996952014-09-25 11:05:32 +01001166
1167TEST(pthread, PTHREAD_MUTEX_DEFAULT_is_PTHREAD_MUTEX_NORMAL) {
1168 ASSERT_EQ(PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_DEFAULT);
1169}
1170
1171TEST(pthread, pthread_mutexattr_gettype) {
1172 pthread_mutexattr_t attr;
1173 ASSERT_EQ(0, pthread_mutexattr_init(&attr));
1174
1175 int attr_type;
1176
1177 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL));
1178 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1179 ASSERT_EQ(PTHREAD_MUTEX_NORMAL, attr_type);
1180
1181 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK));
1182 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1183 ASSERT_EQ(PTHREAD_MUTEX_ERRORCHECK, attr_type);
1184
1185 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE));
1186 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1187 ASSERT_EQ(PTHREAD_MUTEX_RECURSIVE, attr_type);
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001188
1189 ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
1190}
1191
Yabin Cui17393b02015-03-21 15:08:25 -07001192struct PthreadMutex {
1193 pthread_mutex_t lock;
1194
1195 PthreadMutex(int mutex_type) {
1196 init(mutex_type);
1197 }
1198
1199 ~PthreadMutex() {
1200 destroy();
1201 }
1202
1203 private:
1204 void init(int mutex_type) {
1205 pthread_mutexattr_t attr;
1206 ASSERT_EQ(0, pthread_mutexattr_init(&attr));
1207 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, mutex_type));
1208 ASSERT_EQ(0, pthread_mutex_init(&lock, &attr));
1209 ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
1210 }
1211
1212 void destroy() {
1213 ASSERT_EQ(0, pthread_mutex_destroy(&lock));
1214 }
1215
1216 DISALLOW_COPY_AND_ASSIGN(PthreadMutex);
1217};
Derek Xue41996952014-09-25 11:05:32 +01001218
1219TEST(pthread, pthread_mutex_lock_NORMAL) {
Yabin Cui17393b02015-03-21 15:08:25 -07001220 PthreadMutex m(PTHREAD_MUTEX_NORMAL);
Derek Xue41996952014-09-25 11:05:32 +01001221
Yabin Cui17393b02015-03-21 15:08:25 -07001222 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1223 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
Derek Xue41996952014-09-25 11:05:32 +01001224}
1225
1226TEST(pthread, pthread_mutex_lock_ERRORCHECK) {
Yabin Cui17393b02015-03-21 15:08:25 -07001227 PthreadMutex m(PTHREAD_MUTEX_ERRORCHECK);
Derek Xue41996952014-09-25 11:05:32 +01001228
Yabin Cui17393b02015-03-21 15:08:25 -07001229 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1230 ASSERT_EQ(EDEADLK, pthread_mutex_lock(&m.lock));
1231 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1232 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
1233 ASSERT_EQ(EBUSY, pthread_mutex_trylock(&m.lock));
1234 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1235 ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
Derek Xue41996952014-09-25 11:05:32 +01001236}
1237
1238TEST(pthread, pthread_mutex_lock_RECURSIVE) {
Yabin Cui17393b02015-03-21 15:08:25 -07001239 PthreadMutex m(PTHREAD_MUTEX_RECURSIVE);
Derek Xue41996952014-09-25 11:05:32 +01001240
Yabin Cui17393b02015-03-21 15:08:25 -07001241 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1242 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1243 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1244 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1245 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
1246 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1247 ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
1248}
1249
1250TEST(pthread, pthread_mutex_init_same_as_static_initializers) {
1251 pthread_mutex_t lock_normal = PTHREAD_MUTEX_INITIALIZER;
1252 PthreadMutex m1(PTHREAD_MUTEX_NORMAL);
1253 ASSERT_EQ(0, memcmp(&lock_normal, &m1.lock, sizeof(pthread_mutex_t)));
1254 pthread_mutex_destroy(&lock_normal);
1255
1256 pthread_mutex_t lock_errorcheck = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
1257 PthreadMutex m2(PTHREAD_MUTEX_ERRORCHECK);
1258 ASSERT_EQ(0, memcmp(&lock_errorcheck, &m2.lock, sizeof(pthread_mutex_t)));
1259 pthread_mutex_destroy(&lock_errorcheck);
1260
1261 pthread_mutex_t lock_recursive = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
1262 PthreadMutex m3(PTHREAD_MUTEX_RECURSIVE);
1263 ASSERT_EQ(0, memcmp(&lock_recursive, &m3.lock, sizeof(pthread_mutex_t)));
1264 ASSERT_EQ(0, pthread_mutex_destroy(&lock_recursive));
Derek Xue41996952014-09-25 11:05:32 +01001265}
Yabin Cui140f3672015-02-03 10:32:00 -08001266
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001267class MutexWakeupHelper {
1268 private:
Yabin Cui17393b02015-03-21 15:08:25 -07001269 PthreadMutex m;
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001270 enum Progress {
1271 LOCK_INITIALIZED,
1272 LOCK_WAITING,
1273 LOCK_RELEASED,
1274 LOCK_ACCESSED
1275 };
1276 std::atomic<Progress> progress;
1277
1278 static void thread_fn(MutexWakeupHelper* helper) {
1279 ASSERT_EQ(LOCK_INITIALIZED, helper->progress);
1280 helper->progress = LOCK_WAITING;
1281
Yabin Cui17393b02015-03-21 15:08:25 -07001282 ASSERT_EQ(0, pthread_mutex_lock(&helper->m.lock));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001283 ASSERT_EQ(LOCK_RELEASED, helper->progress);
Yabin Cui17393b02015-03-21 15:08:25 -07001284 ASSERT_EQ(0, pthread_mutex_unlock(&helper->m.lock));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001285
1286 helper->progress = LOCK_ACCESSED;
1287 }
1288
1289 public:
Yabin Cui17393b02015-03-21 15:08:25 -07001290 MutexWakeupHelper(int mutex_type) : m(mutex_type) {
1291 }
1292
1293 void test() {
1294 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001295 progress = LOCK_INITIALIZED;
1296
1297 pthread_t thread;
1298 ASSERT_EQ(0, pthread_create(&thread, NULL,
1299 reinterpret_cast<void* (*)(void*)>(MutexWakeupHelper::thread_fn), this));
1300
1301 while (progress != LOCK_WAITING) {
1302 usleep(5000);
1303 }
1304 usleep(5000);
1305 progress = LOCK_RELEASED;
Yabin Cui17393b02015-03-21 15:08:25 -07001306 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001307
1308 ASSERT_EQ(0, pthread_join(thread, NULL));
1309 ASSERT_EQ(LOCK_ACCESSED, progress);
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001310 }
1311};
1312
1313TEST(pthread, pthread_mutex_NORMAL_wakeup) {
Yabin Cui17393b02015-03-21 15:08:25 -07001314 MutexWakeupHelper helper(PTHREAD_MUTEX_NORMAL);
1315 helper.test();
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001316}
1317
1318TEST(pthread, pthread_mutex_ERRORCHECK_wakeup) {
Yabin Cui17393b02015-03-21 15:08:25 -07001319 MutexWakeupHelper helper(PTHREAD_MUTEX_ERRORCHECK);
1320 helper.test();
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001321}
1322
1323TEST(pthread, pthread_mutex_RECURSIVE_wakeup) {
Yabin Cui17393b02015-03-21 15:08:25 -07001324 MutexWakeupHelper helper(PTHREAD_MUTEX_RECURSIVE);
1325 helper.test();
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001326}
1327
Yabin Cui140f3672015-02-03 10:32:00 -08001328TEST(pthread, pthread_mutex_owner_tid_limit) {
1329 FILE* fp = fopen("/proc/sys/kernel/pid_max", "r");
1330 ASSERT_TRUE(fp != NULL);
1331 long pid_max;
1332 ASSERT_EQ(1, fscanf(fp, "%ld", &pid_max));
1333 fclose(fp);
1334 // Current pthread_mutex uses 16 bits to represent owner tid.
1335 // Change the implementation if we need to support higher value than 65535.
1336 ASSERT_LE(pid_max, 65536);
1337}
Yabin Cuib5845722015-03-16 22:46:42 -07001338
1339class StrictAlignmentAllocator {
1340 public:
1341 void* allocate(size_t size, size_t alignment) {
1342 char* p = new char[size + alignment * 2];
1343 allocated_array.push_back(p);
1344 while (!is_strict_aligned(p, alignment)) {
1345 ++p;
1346 }
1347 return p;
1348 }
1349
1350 ~StrictAlignmentAllocator() {
1351 for (auto& p : allocated_array) {
1352 delete [] p;
1353 }
1354 }
1355
1356 private:
1357 bool is_strict_aligned(char* p, size_t alignment) {
1358 return (reinterpret_cast<uintptr_t>(p) % (alignment * 2)) == alignment;
1359 }
1360
1361 std::vector<char*> allocated_array;
1362};
1363
1364TEST(pthread, pthread_types_allow_four_bytes_alignment) {
1365#if defined(__BIONIC__)
1366 // For binary compatibility with old version, we need to allow 4-byte aligned data for pthread types.
1367 StrictAlignmentAllocator allocator;
1368 pthread_mutex_t* mutex = reinterpret_cast<pthread_mutex_t*>(
1369 allocator.allocate(sizeof(pthread_mutex_t), 4));
1370 ASSERT_EQ(0, pthread_mutex_init(mutex, NULL));
1371 ASSERT_EQ(0, pthread_mutex_lock(mutex));
1372 ASSERT_EQ(0, pthread_mutex_unlock(mutex));
1373 ASSERT_EQ(0, pthread_mutex_destroy(mutex));
1374
1375 pthread_cond_t* cond = reinterpret_cast<pthread_cond_t*>(
1376 allocator.allocate(sizeof(pthread_cond_t), 4));
1377 ASSERT_EQ(0, pthread_cond_init(cond, NULL));
1378 ASSERT_EQ(0, pthread_cond_signal(cond));
1379 ASSERT_EQ(0, pthread_cond_broadcast(cond));
1380 ASSERT_EQ(0, pthread_cond_destroy(cond));
1381
1382 pthread_rwlock_t* rwlock = reinterpret_cast<pthread_rwlock_t*>(
1383 allocator.allocate(sizeof(pthread_rwlock_t), 4));
1384 ASSERT_EQ(0, pthread_rwlock_init(rwlock, NULL));
1385 ASSERT_EQ(0, pthread_rwlock_rdlock(rwlock));
1386 ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
1387 ASSERT_EQ(0, pthread_rwlock_wrlock(rwlock));
1388 ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
1389 ASSERT_EQ(0, pthread_rwlock_destroy(rwlock));
1390
1391#else
1392 GTEST_LOG_(INFO) << "This test tests bionic implementation details.";
1393#endif
1394}