Prevent using static-allocated pthread keys before creation.

Bug: 19993460

Change-Id: I244dea7f5df3c8384f88aa48d635348fafc9cbaf
diff --git a/libc/bionic/pthread_key.cpp b/libc/bionic/pthread_key.cpp
index 65e0879..6d77afa 100644
--- a/libc/bionic/pthread_key.cpp
+++ b/libc/bionic/pthread_key.cpp
@@ -57,8 +57,15 @@
   return seq & (1 << SEQ_KEY_IN_USE_BIT);
 }
 
+#define KEY_VALID_FLAG (1 << 31)
+
+static_assert(sizeof(pthread_key_t) == sizeof(int) && static_cast<pthread_key_t>(-1) < 0,
+              "pthread_key_t should be typedef to int");
+
 static inline bool KeyInValidRange(pthread_key_t key) {
-  return key >= 0 && key < BIONIC_PTHREAD_KEY_COUNT;
+  // key < 0 means bit 31 is set.
+  // Then key < (2^31 | BIONIC_PTHREAD_KEY_COUNT) means the index part of key < BIONIC_PTHREAD_KEY_COUNT.
+  return (key < (KEY_VALID_FLAG | BIONIC_PTHREAD_KEY_COUNT));
 }
 
 // Called from pthread_exit() to remove all pthread keys. This must call the destructor of
@@ -114,7 +121,7 @@
     while (!SeqOfKeyInUse(seq)) {
       if (atomic_compare_exchange_weak(&key_map[i].seq, &seq, seq + SEQ_INCREMENT_STEP)) {
         atomic_store(&key_map[i].key_destructor, reinterpret_cast<uintptr_t>(key_destructor));
-        *key = i;
+        *key = i | KEY_VALID_FLAG;
         return 0;
       }
     }
@@ -127,9 +134,10 @@
 // responsibility of the caller to properly dispose of the corresponding data
 // and resources, using any means it finds suitable.
 int pthread_key_delete(pthread_key_t key) {
-  if (!KeyInValidRange(key)) {
+  if (__predict_false(!KeyInValidRange(key))) {
     return EINVAL;
   }
+  key &= ~KEY_VALID_FLAG;
   // Increase seq to invalidate values in all threads.
   uintptr_t seq = atomic_load_explicit(&key_map[key].seq, memory_order_relaxed);
   if (SeqOfKeyInUse(seq)) {
@@ -141,9 +149,10 @@
 }
 
 void* pthread_getspecific(pthread_key_t key) {
-  if (!KeyInValidRange(key)) {
+  if (__predict_false(!KeyInValidRange(key))) {
     return NULL;
   }
+  key &= ~KEY_VALID_FLAG;
   uintptr_t seq = atomic_load_explicit(&key_map[key].seq, memory_order_relaxed);
   pthread_key_data_t* data = &(__get_thread()->key_data[key]);
   // It is user's responsibility to synchornize between the creation and use of pthread keys,
@@ -151,16 +160,19 @@
   if (__predict_true(SeqOfKeyInUse(seq) && data->seq == seq)) {
     return data->data;
   }
+  // We arrive here when current thread holds the seq of an deleted pthread key. So the
+  // data is for the deleted pthread key, and should be cleared.
   data->data = NULL;
   return NULL;
 }
 
 int pthread_setspecific(pthread_key_t key, const void* ptr) {
-  if (!KeyInValidRange(key)) {
+  if (__predict_false(!KeyInValidRange(key))) {
     return EINVAL;
   }
+  key &= ~KEY_VALID_FLAG;
   uintptr_t seq = atomic_load_explicit(&key_map[key].seq, memory_order_relaxed);
-  if (SeqOfKeyInUse(seq)) {
+  if (__predict_true(SeqOfKeyInUse(seq))) {
     pthread_key_data_t* data = &(__get_thread()->key_data[key]);
     data->seq = seq;
     data->data = const_cast<void*>(ptr);