blob: b8cfd565bae2137c5478dec074a1a5bb927239f1 [file] [log] [blame]
Elliott Hughesbfeab1b2012-09-05 17:47:37 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
Dmitriy Ivanovea295f62014-11-20 20:47:02 -080019#include <dlfcn.h>
Elliott Hughesbfeab1b2012-09-05 17:47:37 -070020#include <errno.h>
Elliott Hughes5b9310e2013-10-02 16:59:05 -070021#include <inttypes.h>
Elliott Hughesb95cf0d2013-07-15 14:51:07 -070022#include <limits.h>
Elliott Hughes04620a32014-03-07 17:59:05 -080023#include <malloc.h>
Elliott Hughesbfeab1b2012-09-05 17:47:37 -070024#include <pthread.h>
Christopher Ferrisf04935c2013-12-20 18:43:21 -080025#include <signal.h>
Yabin Cui140f3672015-02-03 10:32:00 -080026#include <stdio.h>
Elliott Hughes70b24b12013-11-15 11:51:07 -080027#include <sys/mman.h>
Elliott Hughes57b7a612014-08-25 17:26:50 -070028#include <sys/syscall.h>
Narayan Kamath51e6cb32014-03-03 15:38:51 +000029#include <time.h>
Elliott Hughes4d014e12012-09-07 16:47:54 -070030#include <unistd.h>
Elliott Hughesbfeab1b2012-09-05 17:47:37 -070031
Yabin Cui08ee8d22015-02-11 17:04:36 -080032#include <atomic>
Yabin Cuif7969852015-04-02 17:47:48 -070033#include <regex>
Yabin Cuib5845722015-03-16 22:46:42 -070034#include <vector>
Yabin Cui08ee8d22015-02-11 17:04:36 -080035
Yabin Cuif7969852015-04-02 17:47:48 -070036#include <base/file.h>
37#include <base/stringprintf.h>
38
Yabin Cui17393b02015-03-21 15:08:25 -070039#include "private/bionic_macros.h"
40#include "private/ScopeGuard.h"
41#include "BionicDeathTest.h"
42#include "ScopedSignalHandler.h"
43
Yabin Cuif7969852015-04-02 17:47:48 -070044extern "C" pid_t gettid();
45
Elliott Hughesbfeab1b2012-09-05 17:47:37 -070046TEST(pthread, pthread_key_create) {
47 pthread_key_t key;
48 ASSERT_EQ(0, pthread_key_create(&key, NULL));
49 ASSERT_EQ(0, pthread_key_delete(key));
50 // Can't delete a key that's already been deleted.
51 ASSERT_EQ(EINVAL, pthread_key_delete(key));
52}
Elliott Hughes4d014e12012-09-07 16:47:54 -070053
Dan Albertc4bcc752014-09-30 11:48:24 -070054TEST(pthread, pthread_keys_max) {
Yabin Cui6c238f22014-12-11 20:50:41 -080055 // POSIX says PTHREAD_KEYS_MAX should be at least _POSIX_THREAD_KEYS_MAX.
56 ASSERT_GE(PTHREAD_KEYS_MAX, _POSIX_THREAD_KEYS_MAX);
Dan Albertc4bcc752014-09-30 11:48:24 -070057}
Elliott Hughes718a5b52014-01-28 17:02:03 -080058
Yabin Cui6c238f22014-12-11 20:50:41 -080059TEST(pthread, sysconf_SC_THREAD_KEYS_MAX_eq_PTHREAD_KEYS_MAX) {
Dan Albertc4bcc752014-09-30 11:48:24 -070060 int sysconf_max = sysconf(_SC_THREAD_KEYS_MAX);
Yabin Cui6c238f22014-12-11 20:50:41 -080061 ASSERT_EQ(sysconf_max, PTHREAD_KEYS_MAX);
Dan Albertc4bcc752014-09-30 11:48:24 -070062}
63
64TEST(pthread, pthread_key_many_distinct) {
Yabin Cui6c238f22014-12-11 20:50:41 -080065 // As gtest uses pthread keys, we can't allocate exactly PTHREAD_KEYS_MAX
66 // pthread keys, but We should be able to allocate at least this many keys.
67 int nkeys = PTHREAD_KEYS_MAX / 2;
Dan Albertc4bcc752014-09-30 11:48:24 -070068 std::vector<pthread_key_t> keys;
69
70 auto scope_guard = make_scope_guard([&keys]{
71 for (auto key : keys) {
72 EXPECT_EQ(0, pthread_key_delete(key));
73 }
74 });
75
76 for (int i = 0; i < nkeys; ++i) {
77 pthread_key_t key;
Elliott Hughes61706932015-03-31 10:56:58 -070078 // If this fails, it's likely that LIBC_PTHREAD_KEY_RESERVED_COUNT is wrong.
Dan Albertc4bcc752014-09-30 11:48:24 -070079 ASSERT_EQ(0, pthread_key_create(&key, NULL)) << i << " of " << nkeys;
80 keys.push_back(key);
81 ASSERT_EQ(0, pthread_setspecific(key, reinterpret_cast<void*>(i)));
82 }
83
84 for (int i = keys.size() - 1; i >= 0; --i) {
85 ASSERT_EQ(reinterpret_cast<void*>(i), pthread_getspecific(keys.back()));
86 pthread_key_t key = keys.back();
87 keys.pop_back();
88 ASSERT_EQ(0, pthread_key_delete(key));
89 }
90}
91
Yabin Cui6c238f22014-12-11 20:50:41 -080092TEST(pthread, pthread_key_not_exceed_PTHREAD_KEYS_MAX) {
Elliott Hughes44b53ad2013-02-11 20:18:47 +000093 std::vector<pthread_key_t> keys;
Dan Albertc4bcc752014-09-30 11:48:24 -070094 int rv = 0;
Yabin Cui6c238f22014-12-11 20:50:41 -080095
96 // Pthread keys are used by gtest, so PTHREAD_KEYS_MAX should
97 // be more than we are allowed to allocate now.
98 for (int i = 0; i < PTHREAD_KEYS_MAX; i++) {
Elliott Hughes44b53ad2013-02-11 20:18:47 +000099 pthread_key_t key;
Dan Albertc4bcc752014-09-30 11:48:24 -0700100 rv = pthread_key_create(&key, NULL);
101 if (rv == EAGAIN) {
102 break;
103 }
104 EXPECT_EQ(0, rv);
Elliott Hughes44b53ad2013-02-11 20:18:47 +0000105 keys.push_back(key);
106 }
107
Dan Albertc4bcc752014-09-30 11:48:24 -0700108 // Don't leak keys.
109 for (auto key : keys) {
110 EXPECT_EQ(0, pthread_key_delete(key));
Elliott Hughes44b53ad2013-02-11 20:18:47 +0000111 }
Dan Albertc4bcc752014-09-30 11:48:24 -0700112 keys.clear();
113
114 // We should have eventually reached the maximum number of keys and received
115 // EAGAIN.
116 ASSERT_EQ(EAGAIN, rv);
Elliott Hughes44b53ad2013-02-11 20:18:47 +0000117}
118
Elliott Hughesebb770f2014-06-25 13:46:46 -0700119TEST(pthread, pthread_key_delete) {
120 void* expected = reinterpret_cast<void*>(1234);
121 pthread_key_t key;
122 ASSERT_EQ(0, pthread_key_create(&key, NULL));
123 ASSERT_EQ(0, pthread_setspecific(key, expected));
124 ASSERT_EQ(expected, pthread_getspecific(key));
125 ASSERT_EQ(0, pthread_key_delete(key));
126 // After deletion, pthread_getspecific returns NULL.
127 ASSERT_EQ(NULL, pthread_getspecific(key));
128 // And you can't use pthread_setspecific with the deleted key.
129 ASSERT_EQ(EINVAL, pthread_setspecific(key, expected));
130}
131
Elliott Hughes40a52172014-07-30 14:48:10 -0700132TEST(pthread, pthread_key_fork) {
133 void* expected = reinterpret_cast<void*>(1234);
134 pthread_key_t key;
135 ASSERT_EQ(0, pthread_key_create(&key, NULL));
136 ASSERT_EQ(0, pthread_setspecific(key, expected));
137 ASSERT_EQ(expected, pthread_getspecific(key));
138
139 pid_t pid = fork();
140 ASSERT_NE(-1, pid) << strerror(errno);
141
142 if (pid == 0) {
143 // The surviving thread inherits all the forking thread's TLS values...
144 ASSERT_EQ(expected, pthread_getspecific(key));
145 _exit(99);
146 }
147
148 int status;
149 ASSERT_EQ(pid, waitpid(pid, &status, 0));
150 ASSERT_TRUE(WIFEXITED(status));
151 ASSERT_EQ(99, WEXITSTATUS(status));
152
153 ASSERT_EQ(expected, pthread_getspecific(key));
Dan Albert1d53ae22014-09-02 15:24:26 -0700154 ASSERT_EQ(0, pthread_key_delete(key));
Elliott Hughes40a52172014-07-30 14:48:10 -0700155}
156
157static void* DirtyKeyFn(void* key) {
158 return pthread_getspecific(*reinterpret_cast<pthread_key_t*>(key));
159}
160
161TEST(pthread, pthread_key_dirty) {
162 pthread_key_t key;
163 ASSERT_EQ(0, pthread_key_create(&key, NULL));
164
165 size_t stack_size = 128 * 1024;
166 void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
167 ASSERT_NE(MAP_FAILED, stack);
168 memset(stack, 0xff, stack_size);
169
170 pthread_attr_t attr;
171 ASSERT_EQ(0, pthread_attr_init(&attr));
172 ASSERT_EQ(0, pthread_attr_setstack(&attr, stack, stack_size));
173
174 pthread_t t;
175 ASSERT_EQ(0, pthread_create(&t, &attr, DirtyKeyFn, &key));
176
177 void* result;
178 ASSERT_EQ(0, pthread_join(t, &result));
179 ASSERT_EQ(nullptr, result); // Not ~0!
180
181 ASSERT_EQ(0, munmap(stack, stack_size));
Dan Albert1d53ae22014-09-02 15:24:26 -0700182 ASSERT_EQ(0, pthread_key_delete(key));
Elliott Hughes40a52172014-07-30 14:48:10 -0700183}
184
Yabin Cui5ddbb3f2015-03-05 20:35:32 -0800185TEST(pthread, static_pthread_key_used_before_creation) {
186#if defined(__BIONIC__)
187 // See http://b/19625804. The bug is about a static/global pthread key being used before creation.
188 // So here tests if the static/global default value 0 can be detected as invalid key.
189 static pthread_key_t key;
190 ASSERT_EQ(nullptr, pthread_getspecific(key));
191 ASSERT_EQ(EINVAL, pthread_setspecific(key, nullptr));
192 ASSERT_EQ(EINVAL, pthread_key_delete(key));
193#else
194 GTEST_LOG_(INFO) << "This test tests bionic pthread key implementation detail.\n";
195#endif
196}
197
Elliott Hughes4d014e12012-09-07 16:47:54 -0700198static void* IdFn(void* arg) {
199 return arg;
200}
201
Yabin Cui63481602014-12-01 17:41:04 -0800202class SpinFunctionHelper {
203 public:
204 SpinFunctionHelper() {
205 SpinFunctionHelper::spin_flag_ = true;
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400206 }
Yabin Cui63481602014-12-01 17:41:04 -0800207 ~SpinFunctionHelper() {
208 UnSpin();
209 }
210 auto GetFunction() -> void* (*)(void*) {
211 return SpinFunctionHelper::SpinFn;
212 }
213
214 void UnSpin() {
215 SpinFunctionHelper::spin_flag_ = false;
216 }
217
218 private:
219 static void* SpinFn(void*) {
220 while (spin_flag_) {}
221 return NULL;
222 }
223 static volatile bool spin_flag_;
224};
225
226// It doesn't matter if spin_flag_ is used in several tests,
227// because it is always set to false after each test. Each thread
228// loops on spin_flag_ can find it becomes false at some time.
229volatile bool SpinFunctionHelper::spin_flag_ = false;
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400230
Elliott Hughes4d014e12012-09-07 16:47:54 -0700231static void* JoinFn(void* arg) {
232 return reinterpret_cast<void*>(pthread_join(reinterpret_cast<pthread_t>(arg), NULL));
233}
234
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400235static void AssertDetached(pthread_t t, bool is_detached) {
236 pthread_attr_t attr;
237 ASSERT_EQ(0, pthread_getattr_np(t, &attr));
238 int detach_state;
239 ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &detach_state));
240 pthread_attr_destroy(&attr);
241 ASSERT_EQ(is_detached, (detach_state == PTHREAD_CREATE_DETACHED));
242}
243
Elliott Hughes9d23e042013-02-15 19:21:51 -0800244static void MakeDeadThread(pthread_t& t) {
245 ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, NULL));
Elliott Hughes34c987a2014-09-22 16:01:26 -0700246 ASSERT_EQ(0, pthread_join(t, NULL));
Elliott Hughes9d23e042013-02-15 19:21:51 -0800247}
248
Elliott Hughes4d014e12012-09-07 16:47:54 -0700249TEST(pthread, pthread_create) {
250 void* expected_result = reinterpret_cast<void*>(123);
251 // Can we create a thread?
252 pthread_t t;
253 ASSERT_EQ(0, pthread_create(&t, NULL, IdFn, expected_result));
254 // If we join, do we get the expected value back?
255 void* result;
256 ASSERT_EQ(0, pthread_join(t, &result));
257 ASSERT_EQ(expected_result, result);
258}
259
Elliott Hughes3e898472013-02-12 16:40:24 +0000260TEST(pthread, pthread_create_EAGAIN) {
261 pthread_attr_t attributes;
262 ASSERT_EQ(0, pthread_attr_init(&attributes));
263 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, static_cast<size_t>(-1) & ~(getpagesize() - 1)));
264
265 pthread_t t;
266 ASSERT_EQ(EAGAIN, pthread_create(&t, &attributes, IdFn, NULL));
267}
268
Elliott Hughes4d014e12012-09-07 16:47:54 -0700269TEST(pthread, pthread_no_join_after_detach) {
Yabin Cui63481602014-12-01 17:41:04 -0800270 SpinFunctionHelper spinhelper;
271
Elliott Hughes4d014e12012-09-07 16:47:54 -0700272 pthread_t t1;
Yabin Cui63481602014-12-01 17:41:04 -0800273 ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700274
275 // After a pthread_detach...
276 ASSERT_EQ(0, pthread_detach(t1));
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400277 AssertDetached(t1, true);
Elliott Hughes4d014e12012-09-07 16:47:54 -0700278
279 // ...pthread_join should fail.
Elliott Hughes34c987a2014-09-22 16:01:26 -0700280 ASSERT_EQ(EINVAL, pthread_join(t1, NULL));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700281}
282
283TEST(pthread, pthread_no_op_detach_after_join) {
Yabin Cui63481602014-12-01 17:41:04 -0800284 SpinFunctionHelper spinhelper;
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400285
Elliott Hughes4d014e12012-09-07 16:47:54 -0700286 pthread_t t1;
Yabin Cui63481602014-12-01 17:41:04 -0800287 ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700288
289 // If thread 2 is already waiting to join thread 1...
290 pthread_t t2;
291 ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1)));
292
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400293 sleep(1); // (Give t2 a chance to call pthread_join.)
Elliott Hughes4d014e12012-09-07 16:47:54 -0700294
Yabin Cuibbb04322015-03-19 15:19:25 -0700295#if defined(__BIONIC__)
296 ASSERT_EQ(EINVAL, pthread_detach(t1));
297#else
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400298 ASSERT_EQ(0, pthread_detach(t1));
Yabin Cuibbb04322015-03-19 15:19:25 -0700299#endif
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400300 AssertDetached(t1, false);
301
Yabin Cui63481602014-12-01 17:41:04 -0800302 spinhelper.UnSpin();
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400303
304 // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
Elliott Hughes4d014e12012-09-07 16:47:54 -0700305 void* join_result;
306 ASSERT_EQ(0, pthread_join(t2, &join_result));
Elliott Hughes5b9310e2013-10-02 16:59:05 -0700307 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700308}
Elliott Hughes14f19592012-10-29 10:19:44 -0700309
310TEST(pthread, pthread_join_self) {
Elliott Hughes34c987a2014-09-22 16:01:26 -0700311 ASSERT_EQ(EDEADLK, pthread_join(pthread_self(), NULL));
Elliott Hughes14f19592012-10-29 10:19:44 -0700312}
Elliott Hughes4f251be2012-11-01 16:33:29 -0700313
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800314struct TestBug37410 {
315 pthread_t main_thread;
316 pthread_mutex_t mutex;
Elliott Hughes4f251be2012-11-01 16:33:29 -0700317
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800318 static void main() {
319 TestBug37410 data;
320 data.main_thread = pthread_self();
321 ASSERT_EQ(0, pthread_mutex_init(&data.mutex, NULL));
322 ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
323
324 pthread_t t;
325 ASSERT_EQ(0, pthread_create(&t, NULL, TestBug37410::thread_fn, reinterpret_cast<void*>(&data)));
326
327 // Wait for the thread to be running...
328 ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
329 ASSERT_EQ(0, pthread_mutex_unlock(&data.mutex));
330
331 // ...and exit.
332 pthread_exit(NULL);
333 }
334
335 private:
336 static void* thread_fn(void* arg) {
337 TestBug37410* data = reinterpret_cast<TestBug37410*>(arg);
338
339 // Let the main thread know we're running.
340 pthread_mutex_unlock(&data->mutex);
341
342 // And wait for the main thread to exit.
343 pthread_join(data->main_thread, NULL);
344
345 return NULL;
346 }
347};
Elliott Hughes4f251be2012-11-01 16:33:29 -0700348
Elliott Hughes7fd803c2013-02-14 16:33:52 -0800349// Even though this isn't really a death test, we have to say "DeathTest" here so gtest knows to
350// run this test (which exits normally) in its own process.
Yabin Cui9df70402014-11-05 18:01:01 -0800351
352class pthread_DeathTest : public BionicDeathTest {};
353
354TEST_F(pthread_DeathTest, pthread_bug_37410) {
Elliott Hughes4f251be2012-11-01 16:33:29 -0700355 // http://code.google.com/p/android/issues/detail?id=37410
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800356 ASSERT_EXIT(TestBug37410::main(), ::testing::ExitedWithCode(0), "");
Elliott Hughes4f251be2012-11-01 16:33:29 -0700357}
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800358
359static void* SignalHandlerFn(void* arg) {
360 sigset_t wait_set;
361 sigfillset(&wait_set);
362 return reinterpret_cast<void*>(sigwait(&wait_set, reinterpret_cast<int*>(arg)));
363}
364
365TEST(pthread, pthread_sigmask) {
Elliott Hughes19e62322013-10-15 11:23:57 -0700366 // Check that SIGUSR1 isn't blocked.
367 sigset_t original_set;
368 sigemptyset(&original_set);
369 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &original_set));
370 ASSERT_FALSE(sigismember(&original_set, SIGUSR1));
371
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800372 // Block SIGUSR1.
373 sigset_t set;
374 sigemptyset(&set);
375 sigaddset(&set, SIGUSR1);
376 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, &set, NULL));
377
Elliott Hughes19e62322013-10-15 11:23:57 -0700378 // Check that SIGUSR1 is blocked.
379 sigset_t final_set;
380 sigemptyset(&final_set);
381 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, NULL, &final_set));
382 ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
383 // ...and that sigprocmask agrees with pthread_sigmask.
384 sigemptyset(&final_set);
385 ASSERT_EQ(0, sigprocmask(SIG_BLOCK, NULL, &final_set));
386 ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
387
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800388 // Spawn a thread that calls sigwait and tells us what it received.
389 pthread_t signal_thread;
390 int received_signal = -1;
391 ASSERT_EQ(0, pthread_create(&signal_thread, NULL, SignalHandlerFn, &received_signal));
392
393 // Send that thread SIGUSR1.
394 pthread_kill(signal_thread, SIGUSR1);
395
396 // See what it got.
397 void* join_result;
398 ASSERT_EQ(0, pthread_join(signal_thread, &join_result));
399 ASSERT_EQ(SIGUSR1, received_signal);
Elliott Hughes5b9310e2013-10-02 16:59:05 -0700400 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
Elliott Hughes19e62322013-10-15 11:23:57 -0700401
402 // Restore the original signal mask.
403 ASSERT_EQ(0, pthread_sigmask(SIG_SETMASK, &original_set, NULL));
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800404}
Elliott Hughes5e3fc432013-02-11 16:36:48 -0800405
Elliott Hughes3e898472013-02-12 16:40:24 +0000406TEST(pthread, pthread_setname_np__too_long) {
407 ASSERT_EQ(ERANGE, pthread_setname_np(pthread_self(), "this name is far too long for linux"));
408}
409
410TEST(pthread, pthread_setname_np__self) {
411 ASSERT_EQ(0, pthread_setname_np(pthread_self(), "short 1"));
412}
413
414TEST(pthread, pthread_setname_np__other) {
Yabin Cui63481602014-12-01 17:41:04 -0800415 SpinFunctionHelper spinhelper;
416
Elliott Hughesed29e852014-10-27 12:01:51 -0700417 pthread_t t1;
Yabin Cui63481602014-12-01 17:41:04 -0800418 ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
Elliott Hughesed29e852014-10-27 12:01:51 -0700419 ASSERT_EQ(0, pthread_setname_np(t1, "short 2"));
Elliott Hughes3e898472013-02-12 16:40:24 +0000420}
421
Yabin Cui220b99b2015-03-26 18:13:07 +0000422TEST(pthread, pthread_setname_np__no_such_thread) {
Elliott Hughes9d23e042013-02-15 19:21:51 -0800423 pthread_t dead_thread;
424 MakeDeadThread(dead_thread);
Elliott Hughes3e898472013-02-12 16:40:24 +0000425
426 // Call pthread_setname_np after thread has already exited.
Elliott Hughes68d98d82014-11-12 21:03:26 -0800427 ASSERT_EQ(ENOENT, pthread_setname_np(dead_thread, "short 3"));
Elliott Hughes3e898472013-02-12 16:40:24 +0000428}
Elliott Hughes9d23e042013-02-15 19:21:51 -0800429
430TEST(pthread, pthread_kill__0) {
431 // Signal 0 just tests that the thread exists, so it's safe to call on ourselves.
432 ASSERT_EQ(0, pthread_kill(pthread_self(), 0));
433}
434
435TEST(pthread, pthread_kill__invalid_signal) {
436 ASSERT_EQ(EINVAL, pthread_kill(pthread_self(), -1));
437}
438
Elliott Hughesfae89fc2013-02-21 11:22:23 -0800439static void pthread_kill__in_signal_handler_helper(int signal_number) {
440 static int count = 0;
441 ASSERT_EQ(SIGALRM, signal_number);
442 if (++count == 1) {
443 // Can we call pthread_kill from a signal handler?
444 ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
445 }
446}
447
448TEST(pthread, pthread_kill__in_signal_handler) {
Elliott Hughes4b558f52014-03-04 15:58:02 -0800449 ScopedSignalHandler ssh(SIGALRM, pthread_kill__in_signal_handler_helper);
Elliott Hughesfae89fc2013-02-21 11:22:23 -0800450 ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
451}
452
Yabin Cui220b99b2015-03-26 18:13:07 +0000453TEST(pthread, pthread_detach__no_such_thread) {
Elliott Hughes9d23e042013-02-15 19:21:51 -0800454 pthread_t dead_thread;
455 MakeDeadThread(dead_thread);
456
457 ASSERT_EQ(ESRCH, pthread_detach(dead_thread));
458}
459
Yabin Cui19e246d2014-12-18 14:22:09 -0800460TEST(pthread, pthread_detach_no_leak) {
Christopher Ferrise3809602014-08-06 14:15:01 -0700461 size_t initial_bytes = 0;
462 // Run this loop more than once since the first loop causes some memory
463 // to be allocated permenantly. Run an extra loop to help catch any subtle
464 // memory leaks.
465 for (size_t loop = 0; loop < 3; loop++) {
466 // Set the initial bytes on the second loop since the memory in use
467 // should have stabilized.
468 if (loop == 1) {
469 initial_bytes = mallinfo().uordblks;
470 }
Elliott Hughes04620a32014-03-07 17:59:05 -0800471
Christopher Ferrise3809602014-08-06 14:15:01 -0700472 pthread_attr_t attr;
473 ASSERT_EQ(0, pthread_attr_init(&attr));
474 ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE));
Elliott Hughes04620a32014-03-07 17:59:05 -0800475
Christopher Ferrise3809602014-08-06 14:15:01 -0700476 std::vector<pthread_t> threads;
477 for (size_t i = 0; i < 32; ++i) {
478 pthread_t t;
479 ASSERT_EQ(0, pthread_create(&t, &attr, IdFn, NULL));
480 threads.push_back(t);
481 }
Elliott Hughes04620a32014-03-07 17:59:05 -0800482
Christopher Ferrise3809602014-08-06 14:15:01 -0700483 sleep(1);
Elliott Hughes04620a32014-03-07 17:59:05 -0800484
Christopher Ferrise3809602014-08-06 14:15:01 -0700485 for (size_t i = 0; i < 32; ++i) {
486 ASSERT_EQ(0, pthread_detach(threads[i])) << i;
487 }
Elliott Hughes04620a32014-03-07 17:59:05 -0800488 }
489
490 size_t final_bytes = mallinfo().uordblks;
Elliott Hughes04620a32014-03-07 17:59:05 -0800491 int leaked_bytes = (final_bytes - initial_bytes);
492
Yabin Cui19e246d2014-12-18 14:22:09 -0800493 ASSERT_EQ(0, leaked_bytes);
Elliott Hughes04620a32014-03-07 17:59:05 -0800494}
495
Jeff Hao9b06cc32013-08-15 14:51:16 -0700496TEST(pthread, pthread_getcpuclockid__clock_gettime) {
Yabin Cui63481602014-12-01 17:41:04 -0800497 SpinFunctionHelper spinhelper;
498
Jeff Hao9b06cc32013-08-15 14:51:16 -0700499 pthread_t t;
Yabin Cui63481602014-12-01 17:41:04 -0800500 ASSERT_EQ(0, pthread_create(&t, NULL, spinhelper.GetFunction(), NULL));
Jeff Hao9b06cc32013-08-15 14:51:16 -0700501
502 clockid_t c;
503 ASSERT_EQ(0, pthread_getcpuclockid(t, &c));
504 timespec ts;
505 ASSERT_EQ(0, clock_gettime(c, &ts));
506}
507
Yabin Cui220b99b2015-03-26 18:13:07 +0000508TEST(pthread, pthread_getcpuclockid__no_such_thread) {
Elliott Hughes9d23e042013-02-15 19:21:51 -0800509 pthread_t dead_thread;
510 MakeDeadThread(dead_thread);
511
512 clockid_t c;
513 ASSERT_EQ(ESRCH, pthread_getcpuclockid(dead_thread, &c));
514}
515
Yabin Cui220b99b2015-03-26 18:13:07 +0000516TEST(pthread, pthread_getschedparam__no_such_thread) {
Elliott Hughes9d23e042013-02-15 19:21:51 -0800517 pthread_t dead_thread;
518 MakeDeadThread(dead_thread);
519
520 int policy;
521 sched_param param;
522 ASSERT_EQ(ESRCH, pthread_getschedparam(dead_thread, &policy, &param));
523}
524
Yabin Cui220b99b2015-03-26 18:13:07 +0000525TEST(pthread, pthread_setschedparam__no_such_thread) {
Elliott Hughes9d23e042013-02-15 19:21:51 -0800526 pthread_t dead_thread;
527 MakeDeadThread(dead_thread);
528
529 int policy = 0;
530 sched_param param;
531 ASSERT_EQ(ESRCH, pthread_setschedparam(dead_thread, policy, &param));
532}
533
Yabin Cui220b99b2015-03-26 18:13:07 +0000534TEST(pthread, pthread_join__no_such_thread) {
Elliott Hughes9d23e042013-02-15 19:21:51 -0800535 pthread_t dead_thread;
536 MakeDeadThread(dead_thread);
537
Elliott Hughes34c987a2014-09-22 16:01:26 -0700538 ASSERT_EQ(ESRCH, pthread_join(dead_thread, NULL));
Elliott Hughes9d23e042013-02-15 19:21:51 -0800539}
540
Yabin Cui220b99b2015-03-26 18:13:07 +0000541TEST(pthread, pthread_kill__no_such_thread) {
Elliott Hughes9d23e042013-02-15 19:21:51 -0800542 pthread_t dead_thread;
543 MakeDeadThread(dead_thread);
544
545 ASSERT_EQ(ESRCH, pthread_kill(dead_thread, 0));
546}
msg5550f020d12013-06-06 14:59:28 -0400547
548TEST(pthread, pthread_join__multijoin) {
Yabin Cui63481602014-12-01 17:41:04 -0800549 SpinFunctionHelper spinhelper;
msg5550f020d12013-06-06 14:59:28 -0400550
551 pthread_t t1;
Yabin Cui63481602014-12-01 17:41:04 -0800552 ASSERT_EQ(0, pthread_create(&t1, NULL, spinhelper.GetFunction(), NULL));
msg5550f020d12013-06-06 14:59:28 -0400553
554 pthread_t t2;
555 ASSERT_EQ(0, pthread_create(&t2, NULL, JoinFn, reinterpret_cast<void*>(t1)));
556
557 sleep(1); // (Give t2 a chance to call pthread_join.)
558
559 // Multiple joins to the same thread should fail.
560 ASSERT_EQ(EINVAL, pthread_join(t1, NULL));
561
Yabin Cui63481602014-12-01 17:41:04 -0800562 spinhelper.UnSpin();
msg5550f020d12013-06-06 14:59:28 -0400563
564 // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
565 void* join_result;
566 ASSERT_EQ(0, pthread_join(t2, &join_result));
Elliott Hughes5b9310e2013-10-02 16:59:05 -0700567 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
msg5550f020d12013-06-06 14:59:28 -0400568}
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700569
Elliott Hughes70b24b12013-11-15 11:51:07 -0800570TEST(pthread, pthread_join__race) {
571 // http://b/11693195 --- pthread_join could return before the thread had actually exited.
572 // If the joiner unmapped the thread's stack, that could lead to SIGSEGV in the thread.
573 for (size_t i = 0; i < 1024; ++i) {
574 size_t stack_size = 64*1024;
575 void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
576
577 pthread_attr_t a;
578 pthread_attr_init(&a);
579 pthread_attr_setstack(&a, stack, stack_size);
580
581 pthread_t t;
582 ASSERT_EQ(0, pthread_create(&t, &a, IdFn, NULL));
583 ASSERT_EQ(0, pthread_join(t, NULL));
584 ASSERT_EQ(0, munmap(stack, stack_size));
585 }
586}
587
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700588static void* GetActualGuardSizeFn(void* arg) {
589 pthread_attr_t attributes;
590 pthread_getattr_np(pthread_self(), &attributes);
591 pthread_attr_getguardsize(&attributes, reinterpret_cast<size_t*>(arg));
592 return NULL;
593}
594
595static size_t GetActualGuardSize(const pthread_attr_t& attributes) {
596 size_t result;
597 pthread_t t;
598 pthread_create(&t, &attributes, GetActualGuardSizeFn, &result);
Elliott Hughes34c987a2014-09-22 16:01:26 -0700599 pthread_join(t, NULL);
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700600 return result;
601}
602
603static void* GetActualStackSizeFn(void* arg) {
604 pthread_attr_t attributes;
605 pthread_getattr_np(pthread_self(), &attributes);
606 pthread_attr_getstacksize(&attributes, reinterpret_cast<size_t*>(arg));
607 return NULL;
608}
609
610static size_t GetActualStackSize(const pthread_attr_t& attributes) {
611 size_t result;
612 pthread_t t;
613 pthread_create(&t, &attributes, GetActualStackSizeFn, &result);
Elliott Hughes34c987a2014-09-22 16:01:26 -0700614 pthread_join(t, NULL);
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700615 return result;
616}
617
618TEST(pthread, pthread_attr_setguardsize) {
619 pthread_attr_t attributes;
620 ASSERT_EQ(0, pthread_attr_init(&attributes));
621
622 // Get the default guard size.
623 size_t default_guard_size;
624 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &default_guard_size));
625
626 // No such thing as too small: will be rounded up to one page by pthread_create.
627 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 128));
628 size_t guard_size;
629 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
630 ASSERT_EQ(128U, guard_size);
631 ASSERT_EQ(4096U, GetActualGuardSize(attributes));
632
633 // Large enough and a multiple of the page size.
634 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024));
635 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
636 ASSERT_EQ(32*1024U, guard_size);
637
638 // Large enough but not a multiple of the page size; will be rounded up by pthread_create.
639 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024 + 1));
640 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
641 ASSERT_EQ(32*1024U + 1, guard_size);
642}
643
644TEST(pthread, pthread_attr_setstacksize) {
645 pthread_attr_t attributes;
646 ASSERT_EQ(0, pthread_attr_init(&attributes));
647
648 // Get the default stack size.
649 size_t default_stack_size;
650 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &default_stack_size));
651
652 // Too small.
653 ASSERT_EQ(EINVAL, pthread_attr_setstacksize(&attributes, 128));
654 size_t stack_size;
655 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
656 ASSERT_EQ(default_stack_size, stack_size);
657 ASSERT_GE(GetActualStackSize(attributes), default_stack_size);
658
Yabin Cui917d3902015-01-08 12:32:42 -0800659 // Large enough and a multiple of the page size; may be rounded up by pthread_create.
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700660 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024));
661 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
662 ASSERT_EQ(32*1024U, stack_size);
Yabin Cui917d3902015-01-08 12:32:42 -0800663 ASSERT_GE(GetActualStackSize(attributes), 32*1024U);
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700664
Yabin Cui917d3902015-01-08 12:32:42 -0800665 // Large enough but not aligned; will be rounded up by pthread_create.
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700666 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024 + 1));
667 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
668 ASSERT_EQ(32*1024U + 1, stack_size);
Christopher Ferrisf04935c2013-12-20 18:43:21 -0800669#if defined(__BIONIC__)
Yabin Cui917d3902015-01-08 12:32:42 -0800670 ASSERT_GT(GetActualStackSize(attributes), 32*1024U + 1);
Christopher Ferrisf04935c2013-12-20 18:43:21 -0800671#else // __BIONIC__
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700672 // glibc rounds down, in violation of POSIX. They document this in their BUGS section.
673 ASSERT_EQ(GetActualStackSize(attributes), 32*1024U);
Christopher Ferrisf04935c2013-12-20 18:43:21 -0800674#endif // __BIONIC__
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700675}
Elliott Hughesc3f11402013-10-30 14:40:09 -0700676
Yabin Cui76615da2015-03-17 14:22:09 -0700677TEST(pthread, pthread_rwlockattr_smoke) {
678 pthread_rwlockattr_t attr;
679 ASSERT_EQ(0, pthread_rwlockattr_init(&attr));
680
681 int pshared_value_array[] = {PTHREAD_PROCESS_PRIVATE, PTHREAD_PROCESS_SHARED};
682 for (size_t i = 0; i < sizeof(pshared_value_array) / sizeof(pshared_value_array[0]); ++i) {
683 ASSERT_EQ(0, pthread_rwlockattr_setpshared(&attr, pshared_value_array[i]));
684 int pshared;
685 ASSERT_EQ(0, pthread_rwlockattr_getpshared(&attr, &pshared));
686 ASSERT_EQ(pshared_value_array[i], pshared);
687 }
688
689 int kind_array[] = {PTHREAD_RWLOCK_PREFER_READER_NP,
690 PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP};
691 for (size_t i = 0; i < sizeof(kind_array) / sizeof(kind_array[0]); ++i) {
692 ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_array[i]));
693 int kind;
694 ASSERT_EQ(0, pthread_rwlockattr_getkind_np(&attr, &kind));
695 ASSERT_EQ(kind_array[i], kind);
696 }
697
698 ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr));
699}
700
701TEST(pthread, pthread_rwlock_init_same_as_PTHREAD_RWLOCK_INITIALIZER) {
702 pthread_rwlock_t lock1 = PTHREAD_RWLOCK_INITIALIZER;
703 pthread_rwlock_t lock2;
704 ASSERT_EQ(0, pthread_rwlock_init(&lock2, NULL));
705 ASSERT_EQ(0, memcmp(&lock1, &lock2, sizeof(lock1)));
706}
707
Elliott Hughesc3f11402013-10-30 14:40:09 -0700708TEST(pthread, pthread_rwlock_smoke) {
709 pthread_rwlock_t l;
710 ASSERT_EQ(0, pthread_rwlock_init(&l, NULL));
711
Calin Juravle76f352e2014-05-19 13:41:10 +0100712 // Single read lock
Elliott Hughesc3f11402013-10-30 14:40:09 -0700713 ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
714 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
715
Calin Juravle76f352e2014-05-19 13:41:10 +0100716 // Multiple read lock
717 ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
718 ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
719 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
720 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
721
722 // Write lock
Calin Juravle92687e42014-05-22 19:21:22 +0100723 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
724 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
Calin Juravle76f352e2014-05-19 13:41:10 +0100725
726 // Try writer lock
727 ASSERT_EQ(0, pthread_rwlock_trywrlock(&l));
728 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
729 ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&l));
730 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
731
732 // Try reader lock
733 ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
734 ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
735 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
736 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
737 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
738
739 // Try writer lock after unlock
Elliott Hughesc3f11402013-10-30 14:40:09 -0700740 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
741 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
742
Calin Juravle76f352e2014-05-19 13:41:10 +0100743 // EDEADLK in "read after write"
744 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
745 ASSERT_EQ(EDEADLK, pthread_rwlock_rdlock(&l));
746 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
747
748 // EDEADLK in "write after write"
749 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
750 ASSERT_EQ(EDEADLK, pthread_rwlock_wrlock(&l));
751 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
Calin Juravle76f352e2014-05-19 13:41:10 +0100752
Elliott Hughesc3f11402013-10-30 14:40:09 -0700753 ASSERT_EQ(0, pthread_rwlock_destroy(&l));
754}
755
Yabin Cuif7969852015-04-02 17:47:48 -0700756static void WaitUntilThreadSleep(std::atomic<pid_t>& pid) {
757 while (pid == 0) {
758 usleep(1000);
759 }
760 std::string filename = android::base::StringPrintf("/proc/%d/stat", pid.load());
761 std::regex regex {R"(\s+S\s+)"};
762
763 while (true) {
764 std::string content;
765 ASSERT_TRUE(android::base::ReadFileToString(filename, &content));
766 if (std::regex_search(content, regex)) {
767 break;
768 }
769 usleep(1000);
770 }
771}
772
Yabin Cui08ee8d22015-02-11 17:04:36 -0800773struct RwlockWakeupHelperArg {
774 pthread_rwlock_t lock;
775 enum Progress {
776 LOCK_INITIALIZED,
777 LOCK_WAITING,
778 LOCK_RELEASED,
779 LOCK_ACCESSED
780 };
781 std::atomic<Progress> progress;
Yabin Cuif7969852015-04-02 17:47:48 -0700782 std::atomic<pid_t> tid;
Yabin Cui08ee8d22015-02-11 17:04:36 -0800783};
784
785static void pthread_rwlock_reader_wakeup_writer_helper(RwlockWakeupHelperArg* arg) {
Yabin Cuif7969852015-04-02 17:47:48 -0700786 arg->tid = gettid();
Yabin Cui08ee8d22015-02-11 17:04:36 -0800787 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
788 arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
789
790 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&arg->lock));
791 ASSERT_EQ(0, pthread_rwlock_wrlock(&arg->lock));
792 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress);
793 ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock));
794
795 arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED;
796}
797
798TEST(pthread, pthread_rwlock_reader_wakeup_writer) {
799 RwlockWakeupHelperArg wakeup_arg;
800 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, NULL));
801 ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock));
802 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
Yabin Cuif7969852015-04-02 17:47:48 -0700803 wakeup_arg.tid = 0;
Yabin Cui08ee8d22015-02-11 17:04:36 -0800804
805 pthread_t thread;
806 ASSERT_EQ(0, pthread_create(&thread, NULL,
807 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_reader_wakeup_writer_helper), &wakeup_arg));
Yabin Cuif7969852015-04-02 17:47:48 -0700808 WaitUntilThreadSleep(wakeup_arg.tid);
809 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
810
Yabin Cui08ee8d22015-02-11 17:04:36 -0800811 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
812 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
813
814 ASSERT_EQ(0, pthread_join(thread, NULL));
815 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
816 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
817}
818
819static void pthread_rwlock_writer_wakeup_reader_helper(RwlockWakeupHelperArg* arg) {
Yabin Cuif7969852015-04-02 17:47:48 -0700820 arg->tid = gettid();
Yabin Cui08ee8d22015-02-11 17:04:36 -0800821 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
822 arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
823
824 ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&arg->lock));
825 ASSERT_EQ(0, pthread_rwlock_rdlock(&arg->lock));
826 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress);
827 ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock));
828
829 arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED;
830}
831
832TEST(pthread, pthread_rwlock_writer_wakeup_reader) {
833 RwlockWakeupHelperArg wakeup_arg;
834 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, NULL));
835 ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock));
836 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
Yabin Cuif7969852015-04-02 17:47:48 -0700837 wakeup_arg.tid = 0;
Yabin Cui08ee8d22015-02-11 17:04:36 -0800838
839 pthread_t thread;
840 ASSERT_EQ(0, pthread_create(&thread, NULL,
841 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_writer_wakeup_reader_helper), &wakeup_arg));
Yabin Cuif7969852015-04-02 17:47:48 -0700842 WaitUntilThreadSleep(wakeup_arg.tid);
843 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
844
Yabin Cui08ee8d22015-02-11 17:04:36 -0800845 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
846 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
847
848 ASSERT_EQ(0, pthread_join(thread, NULL));
849 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
850 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
851}
852
Yabin Cui76615da2015-03-17 14:22:09 -0700853class RwlockKindTestHelper {
854 private:
855 struct ThreadArg {
856 RwlockKindTestHelper* helper;
857 std::atomic<pid_t>& tid;
858
859 ThreadArg(RwlockKindTestHelper* helper, std::atomic<pid_t>& tid)
860 : helper(helper), tid(tid) { }
861 };
862
863 public:
864 pthread_rwlock_t lock;
865
866 public:
867 RwlockKindTestHelper(int kind_type) {
868 InitRwlock(kind_type);
869 }
870
871 ~RwlockKindTestHelper() {
872 DestroyRwlock();
873 }
874
875 void CreateWriterThread(pthread_t& thread, std::atomic<pid_t>& tid) {
876 tid = 0;
877 ThreadArg* arg = new ThreadArg(this, tid);
878 ASSERT_EQ(0, pthread_create(&thread, NULL,
879 reinterpret_cast<void* (*)(void*)>(WriterThreadFn), arg));
880 }
881
882 void CreateReaderThread(pthread_t& thread, std::atomic<pid_t>& tid) {
883 tid = 0;
884 ThreadArg* arg = new ThreadArg(this, tid);
885 ASSERT_EQ(0, pthread_create(&thread, NULL,
886 reinterpret_cast<void* (*)(void*)>(ReaderThreadFn), arg));
887 }
888
889 private:
890 void InitRwlock(int kind_type) {
891 pthread_rwlockattr_t attr;
892 ASSERT_EQ(0, pthread_rwlockattr_init(&attr));
893 ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_type));
894 ASSERT_EQ(0, pthread_rwlock_init(&lock, &attr));
895 ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr));
896 }
897
898 void DestroyRwlock() {
899 ASSERT_EQ(0, pthread_rwlock_destroy(&lock));
900 }
901
902 static void WriterThreadFn(ThreadArg* arg) {
903 arg->tid = gettid();
904
905 RwlockKindTestHelper* helper = arg->helper;
906 ASSERT_EQ(0, pthread_rwlock_wrlock(&helper->lock));
907 ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock));
908 delete arg;
909 }
910
911 static void ReaderThreadFn(ThreadArg* arg) {
912 arg->tid = gettid();
913
914 RwlockKindTestHelper* helper = arg->helper;
915 ASSERT_EQ(0, pthread_rwlock_rdlock(&helper->lock));
916 ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock));
917 delete arg;
918 }
919};
920
921TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_READER_NP) {
922 RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_READER_NP);
923 ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock));
924
925 pthread_t writer_thread;
926 std::atomic<pid_t> writer_tid;
927 helper.CreateWriterThread(writer_thread, writer_tid);
928 WaitUntilThreadSleep(writer_tid);
929
930 pthread_t reader_thread;
931 std::atomic<pid_t> reader_tid;
932 helper.CreateReaderThread(reader_thread, reader_tid);
933 ASSERT_EQ(0, pthread_join(reader_thread, NULL));
934
935 ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock));
936 ASSERT_EQ(0, pthread_join(writer_thread, NULL));
937}
938
939TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP) {
940 RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
941 ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock));
942
943 pthread_t writer_thread;
944 std::atomic<pid_t> writer_tid;
945 helper.CreateWriterThread(writer_thread, writer_tid);
946 WaitUntilThreadSleep(writer_tid);
947
948 pthread_t reader_thread;
949 std::atomic<pid_t> reader_tid;
950 helper.CreateReaderThread(reader_thread, reader_tid);
951 WaitUntilThreadSleep(reader_tid);
952
953 ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock));
954 ASSERT_EQ(0, pthread_join(writer_thread, NULL));
955 ASSERT_EQ(0, pthread_join(reader_thread, NULL));
956}
957
Elliott Hughes1728b232014-05-14 10:02:03 -0700958static int g_once_fn_call_count = 0;
Elliott Hughesc3f11402013-10-30 14:40:09 -0700959static void OnceFn() {
Elliott Hughes1728b232014-05-14 10:02:03 -0700960 ++g_once_fn_call_count;
Elliott Hughesc3f11402013-10-30 14:40:09 -0700961}
962
963TEST(pthread, pthread_once_smoke) {
964 pthread_once_t once_control = PTHREAD_ONCE_INIT;
965 ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
966 ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
Elliott Hughes1728b232014-05-14 10:02:03 -0700967 ASSERT_EQ(1, g_once_fn_call_count);
Elliott Hughesc3f11402013-10-30 14:40:09 -0700968}
969
Elliott Hughes3694ec62014-05-14 11:46:08 -0700970static std::string pthread_once_1934122_result = "";
971
972static void Routine2() {
973 pthread_once_1934122_result += "2";
974}
975
976static void Routine1() {
977 pthread_once_t once_control_2 = PTHREAD_ONCE_INIT;
978 pthread_once_1934122_result += "1";
979 pthread_once(&once_control_2, &Routine2);
980}
981
982TEST(pthread, pthread_once_1934122) {
983 // Very old versions of Android couldn't call pthread_once from a
984 // pthread_once init routine. http://b/1934122.
985 pthread_once_t once_control_1 = PTHREAD_ONCE_INIT;
986 ASSERT_EQ(0, pthread_once(&once_control_1, &Routine1));
987 ASSERT_EQ("12", pthread_once_1934122_result);
988}
989
Elliott Hughes1728b232014-05-14 10:02:03 -0700990static int g_atfork_prepare_calls = 0;
Dmitriy Ivanovea295f62014-11-20 20:47:02 -0800991static void AtForkPrepare1() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 1; }
992static void AtForkPrepare2() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 2; }
Elliott Hughes1728b232014-05-14 10:02:03 -0700993static int g_atfork_parent_calls = 0;
Dmitriy Ivanovea295f62014-11-20 20:47:02 -0800994static void AtForkParent1() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 1; }
995static void AtForkParent2() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 2; }
Elliott Hughes1728b232014-05-14 10:02:03 -0700996static int g_atfork_child_calls = 0;
Dmitriy Ivanovea295f62014-11-20 20:47:02 -0800997static void AtForkChild1() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 1; }
998static void AtForkChild2() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 2; }
Elliott Hughesc3f11402013-10-30 14:40:09 -0700999
Dmitriy Ivanov00e37812014-11-20 16:53:47 -08001000TEST(pthread, pthread_atfork_smoke) {
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -07001001 ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
1002 ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2));
Elliott Hughesc3f11402013-10-30 14:40:09 -07001003
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -07001004 int pid = fork();
1005 ASSERT_NE(-1, pid) << strerror(errno);
Elliott Hughesc3f11402013-10-30 14:40:09 -07001006
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -07001007 // Child and parent calls are made in the order they were registered.
1008 if (pid == 0) {
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001009 ASSERT_EQ(12, g_atfork_child_calls);
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -07001010 _exit(0);
1011 }
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001012 ASSERT_EQ(12, g_atfork_parent_calls);
Elliott Hughesc3f11402013-10-30 14:40:09 -07001013
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -07001014 // Prepare calls are made in the reverse order.
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001015 ASSERT_EQ(21, g_atfork_prepare_calls);
1016 int status;
1017 ASSERT_EQ(pid, waitpid(pid, &status, 0));
1018}
1019
1020static void AtForkPrepare3() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 3; }
1021static void AtForkPrepare4() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 4; }
1022
1023static void AtForkParent3() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 3; }
1024static void AtForkParent4() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 4; }
1025
1026static void AtForkChild3() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 3; }
1027static void AtForkChild4() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 4; }
1028
1029TEST(pthread, pthread_atfork_with_dlclose) {
1030 ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
1031
1032 void* handle = dlopen("libtest_pthread_atfork.so", RTLD_NOW | RTLD_LOCAL);
1033 ASSERT_TRUE(handle != nullptr) << dlerror();
1034 typedef int (*fn_t)(void (*)(void), void (*)(void), void (*)(void));
1035 fn_t fn = reinterpret_cast<fn_t>(dlsym(handle, "proxy_pthread_atfork"));
1036 ASSERT_TRUE(fn != nullptr) << dlerror();
1037 // the library registers 2 additional atfork handlers in a constructor
1038 ASSERT_EQ(0, fn(AtForkPrepare2, AtForkParent2, AtForkChild2));
1039 ASSERT_EQ(0, fn(AtForkPrepare3, AtForkParent3, AtForkChild3));
1040
1041 ASSERT_EQ(0, pthread_atfork(AtForkPrepare4, AtForkParent4, AtForkChild4));
1042
1043 int pid = fork();
1044
1045 ASSERT_NE(-1, pid) << strerror(errno);
1046
1047 if (pid == 0) {
1048 ASSERT_EQ(1234, g_atfork_child_calls);
1049 _exit(0);
1050 }
1051
1052 ASSERT_EQ(1234, g_atfork_parent_calls);
1053 ASSERT_EQ(4321, g_atfork_prepare_calls);
1054
1055 EXPECT_EQ(0, dlclose(handle));
1056 g_atfork_prepare_calls = g_atfork_parent_calls = g_atfork_child_calls = 0;
1057
1058 int status;
1059 ASSERT_EQ(pid, waitpid(pid, &status, 0));
1060
1061 pid = fork();
1062
1063 ASSERT_NE(-1, pid) << strerror(errno);
1064
1065 if (pid == 0) {
1066 ASSERT_EQ(14, g_atfork_child_calls);
1067 _exit(0);
1068 }
1069
1070 ASSERT_EQ(14, g_atfork_parent_calls);
1071 ASSERT_EQ(41, g_atfork_prepare_calls);
1072
1073 ASSERT_EQ(pid, waitpid(pid, &status, 0));
Elliott Hughesc3f11402013-10-30 14:40:09 -07001074}
1075
1076TEST(pthread, pthread_attr_getscope) {
1077 pthread_attr_t attr;
1078 ASSERT_EQ(0, pthread_attr_init(&attr));
1079
1080 int scope;
1081 ASSERT_EQ(0, pthread_attr_getscope(&attr, &scope));
1082 ASSERT_EQ(PTHREAD_SCOPE_SYSTEM, scope);
1083}
Narayan Kamath51e6cb32014-03-03 15:38:51 +00001084
1085TEST(pthread, pthread_condattr_init) {
1086 pthread_condattr_t attr;
1087 pthread_condattr_init(&attr);
1088
1089 clockid_t clock;
1090 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1091 ASSERT_EQ(CLOCK_REALTIME, clock);
1092
1093 int pshared;
1094 ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
1095 ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared);
1096}
1097
1098TEST(pthread, pthread_condattr_setclock) {
1099 pthread_condattr_t attr;
1100 pthread_condattr_init(&attr);
1101
1102 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_REALTIME));
1103 clockid_t clock;
1104 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1105 ASSERT_EQ(CLOCK_REALTIME, clock);
1106
1107 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
1108 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1109 ASSERT_EQ(CLOCK_MONOTONIC, clock);
1110
1111 ASSERT_EQ(EINVAL, pthread_condattr_setclock(&attr, CLOCK_PROCESS_CPUTIME_ID));
1112}
1113
1114TEST(pthread, pthread_cond_broadcast__preserves_condattr_flags) {
Yabin Cui32651b82015-03-13 20:30:00 -07001115#if defined(__BIONIC__)
Narayan Kamath51e6cb32014-03-03 15:38:51 +00001116 pthread_condattr_t attr;
1117 pthread_condattr_init(&attr);
1118
1119 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
1120 ASSERT_EQ(0, pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
1121
1122 pthread_cond_t cond_var;
1123 ASSERT_EQ(0, pthread_cond_init(&cond_var, &attr));
1124
1125 ASSERT_EQ(0, pthread_cond_signal(&cond_var));
1126 ASSERT_EQ(0, pthread_cond_broadcast(&cond_var));
1127
Yabin Cui32651b82015-03-13 20:30:00 -07001128 attr = static_cast<pthread_condattr_t>(*reinterpret_cast<uint32_t*>(cond_var.__private));
Narayan Kamath51e6cb32014-03-03 15:38:51 +00001129 clockid_t clock;
1130 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1131 ASSERT_EQ(CLOCK_MONOTONIC, clock);
1132 int pshared;
1133 ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
1134 ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared);
Yabin Cui32651b82015-03-13 20:30:00 -07001135#else // !defined(__BIONIC__)
1136 GTEST_LOG_(INFO) << "This tests a bionic implementation detail.\n";
1137#endif // !defined(__BIONIC__)
1138}
1139
1140class pthread_CondWakeupTest : public ::testing::Test {
1141 protected:
1142 pthread_mutex_t mutex;
1143 pthread_cond_t cond;
1144
1145 enum Progress {
1146 INITIALIZED,
1147 WAITING,
1148 SIGNALED,
1149 FINISHED,
1150 };
1151 std::atomic<Progress> progress;
1152 pthread_t thread;
1153
1154 protected:
1155 virtual void SetUp() {
1156 ASSERT_EQ(0, pthread_mutex_init(&mutex, NULL));
1157 ASSERT_EQ(0, pthread_cond_init(&cond, NULL));
1158 progress = INITIALIZED;
1159 ASSERT_EQ(0,
1160 pthread_create(&thread, NULL, reinterpret_cast<void* (*)(void*)>(WaitThreadFn), this));
1161 }
1162
1163 virtual void TearDown() {
1164 ASSERT_EQ(0, pthread_join(thread, NULL));
1165 ASSERT_EQ(FINISHED, progress);
1166 ASSERT_EQ(0, pthread_cond_destroy(&cond));
1167 ASSERT_EQ(0, pthread_mutex_destroy(&mutex));
1168 }
1169
1170 void SleepUntilProgress(Progress expected_progress) {
1171 while (progress != expected_progress) {
1172 usleep(5000);
1173 }
1174 usleep(5000);
1175 }
1176
1177 private:
1178 static void WaitThreadFn(pthread_CondWakeupTest* test) {
1179 ASSERT_EQ(0, pthread_mutex_lock(&test->mutex));
1180 test->progress = WAITING;
1181 while (test->progress == WAITING) {
1182 ASSERT_EQ(0, pthread_cond_wait(&test->cond, &test->mutex));
1183 }
1184 ASSERT_EQ(SIGNALED, test->progress);
1185 test->progress = FINISHED;
1186 ASSERT_EQ(0, pthread_mutex_unlock(&test->mutex));
1187 }
1188};
1189
1190TEST_F(pthread_CondWakeupTest, signal) {
1191 SleepUntilProgress(WAITING);
1192 progress = SIGNALED;
1193 pthread_cond_signal(&cond);
1194}
1195
1196TEST_F(pthread_CondWakeupTest, broadcast) {
1197 SleepUntilProgress(WAITING);
1198 progress = SIGNALED;
1199 pthread_cond_broadcast(&cond);
Narayan Kamath51e6cb32014-03-03 15:38:51 +00001200}
Elliott Hughes0e714a52014-03-03 16:42:47 -08001201
1202TEST(pthread, pthread_mutex_timedlock) {
1203 pthread_mutex_t m;
1204 ASSERT_EQ(0, pthread_mutex_init(&m, NULL));
1205
1206 // If the mutex is already locked, pthread_mutex_timedlock should time out.
1207 ASSERT_EQ(0, pthread_mutex_lock(&m));
1208
1209 timespec ts;
1210 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1211 ts.tv_nsec += 1;
1212 ASSERT_EQ(ETIMEDOUT, pthread_mutex_timedlock(&m, &ts));
1213
1214 // If the mutex is unlocked, pthread_mutex_timedlock should succeed.
1215 ASSERT_EQ(0, pthread_mutex_unlock(&m));
1216
1217 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1218 ts.tv_nsec += 1;
1219 ASSERT_EQ(0, pthread_mutex_timedlock(&m, &ts));
1220
1221 ASSERT_EQ(0, pthread_mutex_unlock(&m));
1222 ASSERT_EQ(0, pthread_mutex_destroy(&m));
1223}
Elliott Hughes57b7a612014-08-25 17:26:50 -07001224
1225TEST(pthread, pthread_attr_getstack__main_thread) {
1226 // This test is only meaningful for the main thread, so make sure we're running on it!
1227 ASSERT_EQ(getpid(), syscall(__NR_gettid));
1228
1229 // Get the main thread's attributes.
1230 pthread_attr_t attributes;
1231 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1232
1233 // Check that we correctly report that the main thread has no guard page.
1234 size_t guard_size;
1235 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
1236 ASSERT_EQ(0U, guard_size); // The main thread has no guard page.
1237
1238 // Get the stack base and the stack size (both ways).
1239 void* stack_base;
1240 size_t stack_size;
1241 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1242 size_t stack_size2;
1243 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1244
1245 // The two methods of asking for the stack size should agree.
1246 EXPECT_EQ(stack_size, stack_size2);
1247
1248 // What does /proc/self/maps' [stack] line say?
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001249 void* maps_stack_hi = NULL;
Elliott Hughes57b7a612014-08-25 17:26:50 -07001250 FILE* fp = fopen("/proc/self/maps", "r");
1251 ASSERT_TRUE(fp != NULL);
1252 char line[BUFSIZ];
1253 while (fgets(line, sizeof(line), fp) != NULL) {
1254 uintptr_t lo, hi;
1255 char name[10];
1256 sscanf(line, "%" PRIxPTR "-%" PRIxPTR " %*4s %*x %*x:%*x %*d %10s", &lo, &hi, name);
1257 if (strcmp(name, "[stack]") == 0) {
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001258 maps_stack_hi = reinterpret_cast<void*>(hi);
Elliott Hughes57b7a612014-08-25 17:26:50 -07001259 break;
1260 }
1261 }
1262 fclose(fp);
1263
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001264 // The stack size should correspond to RLIMIT_STACK.
Elliott Hughes57b7a612014-08-25 17:26:50 -07001265 rlimit rl;
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001266 ASSERT_EQ(0, getrlimit(RLIMIT_STACK, &rl));
Elliott Hughes27a9aed2014-09-04 16:09:25 -07001267 uint64_t original_rlim_cur = rl.rlim_cur;
1268#if defined(__BIONIC__)
1269 if (rl.rlim_cur == RLIM_INFINITY) {
1270 rl.rlim_cur = 8 * 1024 * 1024; // Bionic reports unlimited stacks as 8MiB.
1271 }
1272#endif
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001273 EXPECT_EQ(rl.rlim_cur, stack_size);
1274
Dmitriy Ivanovd9ff7222014-09-08 16:22:22 -07001275 auto guard = make_scope_guard([&rl, original_rlim_cur]() {
Elliott Hughes27a9aed2014-09-04 16:09:25 -07001276 rl.rlim_cur = original_rlim_cur;
1277 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1278 });
1279
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001280 // The high address of the /proc/self/maps [stack] region should equal stack_base + stack_size.
1281 // Remember that the stack grows down (and is mapped in on demand), so the low address of the
1282 // region isn't very interesting.
1283 EXPECT_EQ(maps_stack_hi, reinterpret_cast<uint8_t*>(stack_base) + stack_size);
1284
1285 //
1286 // What if RLIMIT_STACK is smaller than the stack's current extent?
1287 //
Elliott Hughes57b7a612014-08-25 17:26:50 -07001288 rl.rlim_cur = rl.rlim_max = 1024; // 1KiB. We know the stack must be at least a page already.
1289 rl.rlim_max = RLIM_INFINITY;
1290 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1291
1292 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1293 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1294 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1295
1296 EXPECT_EQ(stack_size, stack_size2);
1297 ASSERT_EQ(1024U, stack_size);
1298
1299 //
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001300 // What if RLIMIT_STACK isn't a whole number of pages?
Elliott Hughes57b7a612014-08-25 17:26:50 -07001301 //
1302 rl.rlim_cur = rl.rlim_max = 6666; // Not a whole number of pages.
1303 rl.rlim_max = RLIM_INFINITY;
1304 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1305
1306 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1307 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1308 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1309
1310 EXPECT_EQ(stack_size, stack_size2);
1311 ASSERT_EQ(6666U, stack_size);
1312}
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001313
Yabin Cui917d3902015-01-08 12:32:42 -08001314static void pthread_attr_getstack_18908062_helper(void*) {
1315 char local_variable;
1316 pthread_attr_t attributes;
1317 pthread_getattr_np(pthread_self(), &attributes);
1318 void* stack_base;
1319 size_t stack_size;
1320 pthread_attr_getstack(&attributes, &stack_base, &stack_size);
1321
1322 // Test whether &local_variable is in [stack_base, stack_base + stack_size).
1323 ASSERT_LE(reinterpret_cast<char*>(stack_base), &local_variable);
1324 ASSERT_LT(&local_variable, reinterpret_cast<char*>(stack_base) + stack_size);
1325}
1326
1327// Check whether something on stack is in the range of
1328// [stack_base, stack_base + stack_size). see b/18908062.
1329TEST(pthread, pthread_attr_getstack_18908062) {
1330 pthread_t t;
1331 ASSERT_EQ(0, pthread_create(&t, NULL,
1332 reinterpret_cast<void* (*)(void*)>(pthread_attr_getstack_18908062_helper),
1333 NULL));
1334 pthread_join(t, NULL);
1335}
1336
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001337#if defined(__BIONIC__)
1338static void* pthread_gettid_np_helper(void* arg) {
1339 *reinterpret_cast<pid_t*>(arg) = gettid();
1340 return NULL;
1341}
1342#endif
1343
1344TEST(pthread, pthread_gettid_np) {
1345#if defined(__BIONIC__)
1346 ASSERT_EQ(gettid(), pthread_gettid_np(pthread_self()));
1347
1348 pid_t t_gettid_result;
1349 pthread_t t;
1350 pthread_create(&t, NULL, pthread_gettid_np_helper, &t_gettid_result);
1351
1352 pid_t t_pthread_gettid_np_result = pthread_gettid_np(t);
1353
Elliott Hughes34c987a2014-09-22 16:01:26 -07001354 pthread_join(t, NULL);
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001355
1356 ASSERT_EQ(t_gettid_result, t_pthread_gettid_np_result);
1357#else
1358 GTEST_LOG_(INFO) << "This test does nothing.\n";
1359#endif
1360}
Elliott Hughes34c987a2014-09-22 16:01:26 -07001361
1362static size_t cleanup_counter = 0;
1363
Derek Xue41996952014-09-25 11:05:32 +01001364static void AbortCleanupRoutine(void*) {
Elliott Hughes34c987a2014-09-22 16:01:26 -07001365 abort();
1366}
1367
Derek Xue41996952014-09-25 11:05:32 +01001368static void CountCleanupRoutine(void*) {
Elliott Hughes34c987a2014-09-22 16:01:26 -07001369 ++cleanup_counter;
1370}
1371
Derek Xue41996952014-09-25 11:05:32 +01001372static void PthreadCleanupTester() {
Elliott Hughes34c987a2014-09-22 16:01:26 -07001373 pthread_cleanup_push(CountCleanupRoutine, NULL);
1374 pthread_cleanup_push(CountCleanupRoutine, NULL);
1375 pthread_cleanup_push(AbortCleanupRoutine, NULL);
1376
1377 pthread_cleanup_pop(0); // Pop the abort without executing it.
1378 pthread_cleanup_pop(1); // Pop one count while executing it.
1379 ASSERT_EQ(1U, cleanup_counter);
1380 // Exit while the other count is still on the cleanup stack.
1381 pthread_exit(NULL);
1382
1383 // Calls to pthread_cleanup_pop/pthread_cleanup_push must always be balanced.
1384 pthread_cleanup_pop(0);
1385}
1386
Derek Xue41996952014-09-25 11:05:32 +01001387static void* PthreadCleanupStartRoutine(void*) {
Elliott Hughes34c987a2014-09-22 16:01:26 -07001388 PthreadCleanupTester();
1389 return NULL;
1390}
1391
1392TEST(pthread, pthread_cleanup_push__pthread_cleanup_pop) {
1393 pthread_t t;
1394 ASSERT_EQ(0, pthread_create(&t, NULL, PthreadCleanupStartRoutine, NULL));
1395 pthread_join(t, NULL);
1396 ASSERT_EQ(2U, cleanup_counter);
1397}
Derek Xue41996952014-09-25 11:05:32 +01001398
1399TEST(pthread, PTHREAD_MUTEX_DEFAULT_is_PTHREAD_MUTEX_NORMAL) {
1400 ASSERT_EQ(PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_DEFAULT);
1401}
1402
1403TEST(pthread, pthread_mutexattr_gettype) {
1404 pthread_mutexattr_t attr;
1405 ASSERT_EQ(0, pthread_mutexattr_init(&attr));
1406
1407 int attr_type;
1408
1409 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL));
1410 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1411 ASSERT_EQ(PTHREAD_MUTEX_NORMAL, attr_type);
1412
1413 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK));
1414 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1415 ASSERT_EQ(PTHREAD_MUTEX_ERRORCHECK, attr_type);
1416
1417 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE));
1418 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
1419 ASSERT_EQ(PTHREAD_MUTEX_RECURSIVE, attr_type);
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001420
1421 ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
1422}
1423
Yabin Cui17393b02015-03-21 15:08:25 -07001424struct PthreadMutex {
1425 pthread_mutex_t lock;
1426
1427 PthreadMutex(int mutex_type) {
1428 init(mutex_type);
1429 }
1430
1431 ~PthreadMutex() {
1432 destroy();
1433 }
1434
1435 private:
1436 void init(int mutex_type) {
1437 pthread_mutexattr_t attr;
1438 ASSERT_EQ(0, pthread_mutexattr_init(&attr));
1439 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, mutex_type));
1440 ASSERT_EQ(0, pthread_mutex_init(&lock, &attr));
1441 ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
1442 }
1443
1444 void destroy() {
1445 ASSERT_EQ(0, pthread_mutex_destroy(&lock));
1446 }
1447
1448 DISALLOW_COPY_AND_ASSIGN(PthreadMutex);
1449};
Derek Xue41996952014-09-25 11:05:32 +01001450
1451TEST(pthread, pthread_mutex_lock_NORMAL) {
Yabin Cui17393b02015-03-21 15:08:25 -07001452 PthreadMutex m(PTHREAD_MUTEX_NORMAL);
Derek Xue41996952014-09-25 11:05:32 +01001453
Yabin Cui17393b02015-03-21 15:08:25 -07001454 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1455 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
Derek Xue41996952014-09-25 11:05:32 +01001456}
1457
1458TEST(pthread, pthread_mutex_lock_ERRORCHECK) {
Yabin Cui17393b02015-03-21 15:08:25 -07001459 PthreadMutex m(PTHREAD_MUTEX_ERRORCHECK);
Derek Xue41996952014-09-25 11:05:32 +01001460
Yabin Cui17393b02015-03-21 15:08:25 -07001461 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1462 ASSERT_EQ(EDEADLK, pthread_mutex_lock(&m.lock));
1463 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1464 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
1465 ASSERT_EQ(EBUSY, pthread_mutex_trylock(&m.lock));
1466 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1467 ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
Derek Xue41996952014-09-25 11:05:32 +01001468}
1469
1470TEST(pthread, pthread_mutex_lock_RECURSIVE) {
Yabin Cui17393b02015-03-21 15:08:25 -07001471 PthreadMutex m(PTHREAD_MUTEX_RECURSIVE);
Derek Xue41996952014-09-25 11:05:32 +01001472
Yabin Cui17393b02015-03-21 15:08:25 -07001473 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1474 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
1475 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1476 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1477 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
1478 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
1479 ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
1480}
1481
1482TEST(pthread, pthread_mutex_init_same_as_static_initializers) {
1483 pthread_mutex_t lock_normal = PTHREAD_MUTEX_INITIALIZER;
1484 PthreadMutex m1(PTHREAD_MUTEX_NORMAL);
1485 ASSERT_EQ(0, memcmp(&lock_normal, &m1.lock, sizeof(pthread_mutex_t)));
1486 pthread_mutex_destroy(&lock_normal);
1487
1488 pthread_mutex_t lock_errorcheck = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
1489 PthreadMutex m2(PTHREAD_MUTEX_ERRORCHECK);
1490 ASSERT_EQ(0, memcmp(&lock_errorcheck, &m2.lock, sizeof(pthread_mutex_t)));
1491 pthread_mutex_destroy(&lock_errorcheck);
1492
1493 pthread_mutex_t lock_recursive = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
1494 PthreadMutex m3(PTHREAD_MUTEX_RECURSIVE);
1495 ASSERT_EQ(0, memcmp(&lock_recursive, &m3.lock, sizeof(pthread_mutex_t)));
1496 ASSERT_EQ(0, pthread_mutex_destroy(&lock_recursive));
Derek Xue41996952014-09-25 11:05:32 +01001497}
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001498class MutexWakeupHelper {
1499 private:
Yabin Cui17393b02015-03-21 15:08:25 -07001500 PthreadMutex m;
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001501 enum Progress {
1502 LOCK_INITIALIZED,
1503 LOCK_WAITING,
1504 LOCK_RELEASED,
1505 LOCK_ACCESSED
1506 };
1507 std::atomic<Progress> progress;
Yabin Cuif7969852015-04-02 17:47:48 -07001508 std::atomic<pid_t> tid;
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001509
1510 static void thread_fn(MutexWakeupHelper* helper) {
Yabin Cuif7969852015-04-02 17:47:48 -07001511 helper->tid = gettid();
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001512 ASSERT_EQ(LOCK_INITIALIZED, helper->progress);
1513 helper->progress = LOCK_WAITING;
1514
Yabin Cui17393b02015-03-21 15:08:25 -07001515 ASSERT_EQ(0, pthread_mutex_lock(&helper->m.lock));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001516 ASSERT_EQ(LOCK_RELEASED, helper->progress);
Yabin Cui17393b02015-03-21 15:08:25 -07001517 ASSERT_EQ(0, pthread_mutex_unlock(&helper->m.lock));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001518
1519 helper->progress = LOCK_ACCESSED;
1520 }
1521
1522 public:
Yabin Cui17393b02015-03-21 15:08:25 -07001523 MutexWakeupHelper(int mutex_type) : m(mutex_type) {
1524 }
1525
1526 void test() {
1527 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001528 progress = LOCK_INITIALIZED;
Yabin Cuif7969852015-04-02 17:47:48 -07001529 tid = 0;
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001530
1531 pthread_t thread;
1532 ASSERT_EQ(0, pthread_create(&thread, NULL,
1533 reinterpret_cast<void* (*)(void*)>(MutexWakeupHelper::thread_fn), this));
1534
Yabin Cuif7969852015-04-02 17:47:48 -07001535 WaitUntilThreadSleep(tid);
1536 ASSERT_EQ(LOCK_WAITING, progress);
1537
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001538 progress = LOCK_RELEASED;
Yabin Cui17393b02015-03-21 15:08:25 -07001539 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001540
1541 ASSERT_EQ(0, pthread_join(thread, NULL));
1542 ASSERT_EQ(LOCK_ACCESSED, progress);
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001543 }
1544};
1545
1546TEST(pthread, pthread_mutex_NORMAL_wakeup) {
Yabin Cui17393b02015-03-21 15:08:25 -07001547 MutexWakeupHelper helper(PTHREAD_MUTEX_NORMAL);
1548 helper.test();
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001549}
1550
1551TEST(pthread, pthread_mutex_ERRORCHECK_wakeup) {
Yabin Cui17393b02015-03-21 15:08:25 -07001552 MutexWakeupHelper helper(PTHREAD_MUTEX_ERRORCHECK);
1553 helper.test();
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001554}
1555
1556TEST(pthread, pthread_mutex_RECURSIVE_wakeup) {
Yabin Cui17393b02015-03-21 15:08:25 -07001557 MutexWakeupHelper helper(PTHREAD_MUTEX_RECURSIVE);
1558 helper.test();
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08001559}
1560
Yabin Cui140f3672015-02-03 10:32:00 -08001561TEST(pthread, pthread_mutex_owner_tid_limit) {
Yabin Cuie69c2452015-02-13 16:21:25 -08001562#if defined(__BIONIC__) && !defined(__LP64__)
Yabin Cui140f3672015-02-03 10:32:00 -08001563 FILE* fp = fopen("/proc/sys/kernel/pid_max", "r");
1564 ASSERT_TRUE(fp != NULL);
1565 long pid_max;
1566 ASSERT_EQ(1, fscanf(fp, "%ld", &pid_max));
1567 fclose(fp);
Yabin Cuie69c2452015-02-13 16:21:25 -08001568 // Bionic's pthread_mutex implementation on 32-bit devices uses 16 bits to represent owner tid.
Yabin Cui140f3672015-02-03 10:32:00 -08001569 ASSERT_LE(pid_max, 65536);
Yabin Cuie69c2452015-02-13 16:21:25 -08001570#else
1571 GTEST_LOG_(INFO) << "This test does nothing as 32-bit tid is supported by pthread_mutex.\n";
1572#endif
Yabin Cui140f3672015-02-03 10:32:00 -08001573}
Yabin Cuib5845722015-03-16 22:46:42 -07001574
1575class StrictAlignmentAllocator {
1576 public:
1577 void* allocate(size_t size, size_t alignment) {
1578 char* p = new char[size + alignment * 2];
1579 allocated_array.push_back(p);
1580 while (!is_strict_aligned(p, alignment)) {
1581 ++p;
1582 }
1583 return p;
1584 }
1585
1586 ~StrictAlignmentAllocator() {
1587 for (auto& p : allocated_array) {
1588 delete [] p;
1589 }
1590 }
1591
1592 private:
1593 bool is_strict_aligned(char* p, size_t alignment) {
1594 return (reinterpret_cast<uintptr_t>(p) % (alignment * 2)) == alignment;
1595 }
1596
1597 std::vector<char*> allocated_array;
1598};
1599
1600TEST(pthread, pthread_types_allow_four_bytes_alignment) {
1601#if defined(__BIONIC__)
1602 // For binary compatibility with old version, we need to allow 4-byte aligned data for pthread types.
1603 StrictAlignmentAllocator allocator;
1604 pthread_mutex_t* mutex = reinterpret_cast<pthread_mutex_t*>(
1605 allocator.allocate(sizeof(pthread_mutex_t), 4));
1606 ASSERT_EQ(0, pthread_mutex_init(mutex, NULL));
1607 ASSERT_EQ(0, pthread_mutex_lock(mutex));
1608 ASSERT_EQ(0, pthread_mutex_unlock(mutex));
1609 ASSERT_EQ(0, pthread_mutex_destroy(mutex));
1610
1611 pthread_cond_t* cond = reinterpret_cast<pthread_cond_t*>(
1612 allocator.allocate(sizeof(pthread_cond_t), 4));
1613 ASSERT_EQ(0, pthread_cond_init(cond, NULL));
1614 ASSERT_EQ(0, pthread_cond_signal(cond));
1615 ASSERT_EQ(0, pthread_cond_broadcast(cond));
1616 ASSERT_EQ(0, pthread_cond_destroy(cond));
1617
1618 pthread_rwlock_t* rwlock = reinterpret_cast<pthread_rwlock_t*>(
1619 allocator.allocate(sizeof(pthread_rwlock_t), 4));
1620 ASSERT_EQ(0, pthread_rwlock_init(rwlock, NULL));
1621 ASSERT_EQ(0, pthread_rwlock_rdlock(rwlock));
1622 ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
1623 ASSERT_EQ(0, pthread_rwlock_wrlock(rwlock));
1624 ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
1625 ASSERT_EQ(0, pthread_rwlock_destroy(rwlock));
1626
1627#else
1628 GTEST_LOG_(INFO) << "This test tests bionic implementation details.";
1629#endif
1630}