Revert^4 "Implement LockSupport.park with a futex"
This reverts commit 13f4d9631db981ae5008073db4df9c6a934fc9f3.
Reason for revert: Fixing ThreadStress timeouts.
Change-Id: I9f8c3f56eccfadc3751049731e8d427873cc7841
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index e021b77..a739c2d 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -31,8 +31,10 @@
#include "mirror/array.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
+#include "art_field-inl.h"
#include "native_util.h"
#include "scoped_fast_native_object_access-inl.h"
+#include "well_known_classes.h"
namespace art {
@@ -504,6 +506,33 @@
std::atomic_thread_fence(std::memory_order_seq_cst);
}
+static void Unsafe_park(JNIEnv* env, jobject, jboolean isAbsolute, jlong time) {
+ ScopedObjectAccess soa(env);
+ Thread::Current()->Park(isAbsolute, time);
+}
+
+static void Unsafe_unpark(JNIEnv* env, jobject, jobject jthread) {
+ art::ScopedFastNativeObjectAccess soa(env);
+ if (jthread == nullptr || !env->IsInstanceOf(jthread, WellKnownClasses::java_lang_Thread)) {
+ ThrowIllegalArgumentException("Argument to unpark() was not a Thread");
+ return;
+ }
+ art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ art::Thread* thread = art::Thread::FromManagedThread(soa, jthread);
+ if (thread != nullptr) {
+ thread->Unpark();
+ } else {
+ // If thread is null, that means that either the thread is not started yet,
+ // or the thread has already terminated. Setting the field to true will be
+ // respected when the thread does start, and is harmless if the thread has
+ // already terminated.
+ ArtField* unparked =
+ jni::DecodeArtField(WellKnownClasses::java_lang_Thread_unparkedBeforeStart);
+ // JNI must use non transactional mode.
+ unparked->SetBoolean<false>(soa.Decode<mirror::Object>(jthread), JNI_TRUE);
+ }
+}
+
static JNINativeMethod gMethods[] = {
FAST_NATIVE_METHOD(Unsafe, compareAndSwapInt, "(Ljava/lang/Object;JII)Z"),
FAST_NATIVE_METHOD(Unsafe, compareAndSwapLong, "(Ljava/lang/Object;JJJ)Z"),
@@ -546,6 +575,8 @@
FAST_NATIVE_METHOD(Unsafe, putShort, "(Ljava/lang/Object;JS)V"),
FAST_NATIVE_METHOD(Unsafe, putFloat, "(Ljava/lang/Object;JF)V"),
FAST_NATIVE_METHOD(Unsafe, putDouble, "(Ljava/lang/Object;JD)V"),
+ FAST_NATIVE_METHOD(Unsafe, unpark, "(Ljava/lang/Object;)V"),
+ NATIVE_METHOD(Unsafe, park, "(ZJ)V"),
// Each of the getFoo variants are overloaded with a call that operates
// directively on a native pointer.
diff --git a/runtime/thread.cc b/runtime/thread.cc
index dda3b82..66e852a 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -44,6 +44,7 @@
#include "arch/context.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/atomic.h"
#include "base/bit_utils.h"
#include "base/casts.h"
#include "base/file_utils.h"
@@ -285,6 +286,116 @@
<< "No deoptimization context for thread " << *this;
}
+enum {
+ kPermitAvailable = 0, // Incrementing consumes the permit
+ kNoPermit = 1, // Incrementing marks as waiter waiting
+ kNoPermitWaiterWaiting = 2
+};
+
+void Thread::Park(bool is_absolute, int64_t time) {
+ DCHECK(this == Thread::Current());
+#if ART_USE_FUTEXES
+ // Consume the permit, or mark as waiting. This cannot cause park_state to go
+ // outside of its valid range (0, 1, 2), because in all cases where 2 is
+ // assigned it is set back to 1 before returning, and this method cannot run
+ // concurrently with itself since it operates on the current thread.
+ int old_state = tls32_.park_state_.fetch_add(1, std::memory_order_relaxed);
+ if (old_state == kNoPermit) {
+ // no permit was available. block thread until later.
+ // TODO: Call to signal jvmti here
+ int result = 0;
+ if (!is_absolute && time == 0) {
+ // Thread.getState() is documented to return waiting for untimed parks.
+ ScopedThreadSuspension sts(this, ThreadState::kWaiting);
+ DCHECK_EQ(NumberOfHeldMutexes(), 0u);
+ result = futex(tls32_.park_state_.Address(),
+ FUTEX_WAIT_PRIVATE,
+ /* sleep if val = */ kNoPermitWaiterWaiting,
+ /* timeout */ nullptr,
+ nullptr,
+ 0);
+ } else if (time > 0) {
+ // Only actually suspend and futex_wait if we're going to wait for some
+ // positive amount of time - the kernel will reject negative times with
+ // EINVAL, and a zero time will just noop.
+
+ // Thread.getState() is documented to return timed wait for timed parks.
+ ScopedThreadSuspension sts(this, ThreadState::kTimedWaiting);
+ DCHECK_EQ(NumberOfHeldMutexes(), 0u);
+ timespec timespec;
+ if (is_absolute) {
+ // Time is millis when scheduled for an absolute time
+ timespec.tv_nsec = (time % 1000) * 1000000;
+ timespec.tv_sec = time / 1000;
+ // This odd looking pattern is recommended by futex documentation to
+ // wait until an absolute deadline, with otherwise identical behavior to
+ // FUTEX_WAIT_PRIVATE. This also allows parkUntil() to return at the
+ // correct time when the system clock changes.
+ result = futex(tls32_.park_state_.Address(),
+ FUTEX_WAIT_BITSET_PRIVATE | FUTEX_CLOCK_REALTIME,
+ /* sleep if val = */ kNoPermitWaiterWaiting,
+ ×pec,
+ nullptr,
+ FUTEX_BITSET_MATCH_ANY);
+ } else {
+ // Time is nanos when scheduled for a relative time
+ timespec.tv_sec = time / 1000000000;
+ timespec.tv_nsec = time % 1000000000;
+ result = futex(tls32_.park_state_.Address(),
+ FUTEX_WAIT_PRIVATE,
+ /* sleep if val = */ kNoPermitWaiterWaiting,
+ ×pec,
+ nullptr,
+ 0);
+ }
+ }
+ if (result == -1) {
+ switch (errno) {
+ case EAGAIN:
+ case ETIMEDOUT:
+ case EINTR: break; // park() is allowed to spuriously return
+ default: PLOG(FATAL) << "Failed to park";
+ }
+ }
+ // Mark as no longer waiting, and consume permit if there is one.
+ tls32_.park_state_.store(kNoPermit, std::memory_order_relaxed);
+ // TODO: Call to signal jvmti here
+ } else {
+ // the fetch_add has consumed the permit. immediately return.
+ DCHECK_EQ(old_state, kPermitAvailable);
+ }
+#else
+ #pragma clang diagnostic push
+ #pragma clang diagnostic warning "-W#warnings"
+ #warning "LockSupport.park/unpark implemented as noops without FUTEX support."
+ #pragma clang diagnostic pop
+ UNUSED(is_absolute, time);
+ UNIMPLEMENTED(WARNING);
+ sched_yield();
+#endif
+}
+
+void Thread::Unpark() {
+#if ART_USE_FUTEXES
+ // Set permit available; will be consumed either by fetch_add (when the thread
+ // tries to park) or store (when the parked thread is woken up)
+ if (tls32_.park_state_.exchange(kPermitAvailable, std::memory_order_relaxed)
+ == kNoPermitWaiterWaiting) {
+ int result = futex(tls32_.park_state_.Address(),
+ FUTEX_WAKE_PRIVATE,
+ /* number of waiters = */ 1,
+ nullptr,
+ nullptr,
+ 0);
+ if (result == -1) {
+ PLOG(FATAL) << "Failed to unpark";
+ }
+ }
+#else
+ UNIMPLEMENTED(WARNING);
+#endif
+}
+
void Thread::PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type) {
StackedShadowFrameRecord* record = new StackedShadowFrameRecord(
sf, type, tlsPtr_.stacked_shadow_frame_record);
@@ -489,6 +600,22 @@
runtime->GetRuntimeCallbacks()->ThreadStart(self);
+ // Unpark ourselves if the java peer was unparked before it started (see
+ // b/28845097#comment49 for more information)
+
+ ArtField* unparkedField = jni::DecodeArtField(
+ WellKnownClasses::java_lang_Thread_unparkedBeforeStart);
+ bool should_unpark = false;
+ {
+ // Hold the lock here, so that if another thread calls unpark before the thread starts
+ // we don't observe the unparkedBeforeStart field before the unparker writes to it,
+ // which could cause a lost unpark.
+ art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ should_unpark = unparkedField->GetBoolean(self->tlsPtr_.opeer) == JNI_TRUE;
+ }
+ if (should_unpark) {
+ self->Unpark();
+ }
// Invoke the 'run' method of our java.lang.Thread.
ObjPtr<mirror::Object> receiver = self->tlsPtr_.opeer;
jmethodID mid = WellKnownClasses::java_lang_Thread_run;
@@ -2133,6 +2260,9 @@
tls32_.state_and_flags.as_struct.flags = 0;
tls32_.state_and_flags.as_struct.state = kNative;
tls32_.interrupted.store(false, std::memory_order_relaxed);
+ // Initialize with no permit; if the java Thread was unparked before being
+ // started, it will unpark itself before calling into java code.
+ tls32_.park_state_.store(kNoPermit, std::memory_order_relaxed);
memset(&tlsPtr_.held_mutexes[0], 0, sizeof(tlsPtr_.held_mutexes));
std::fill(tlsPtr_.rosalloc_runs,
tlsPtr_.rosalloc_runs + kNumRosAllocThreadLocalSizeBracketsInThread,
@@ -2449,12 +2579,15 @@
}
void Thread::Interrupt(Thread* self) {
- MutexLock mu(self, *wait_mutex_);
- if (tls32_.interrupted.load(std::memory_order_seq_cst)) {
- return;
+ {
+ MutexLock mu(self, *wait_mutex_);
+ if (tls32_.interrupted.load(std::memory_order_seq_cst)) {
+ return;
+ }
+ tls32_.interrupted.store(true, std::memory_order_seq_cst);
+ NotifyLocked(self);
}
- tls32_.interrupted.store(true, std::memory_order_seq_cst);
- NotifyLocked(self);
+ Unpark();
}
void Thread::Notify() {
diff --git a/runtime/thread.h b/runtime/thread.h
index 941867c..b304cef 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -581,6 +581,11 @@
return poison_object_cookie_;
}
+ // Parking for 0ns of relative time means an untimed park, negative (though
+ // should be handled in java code) returns immediately
+ void Park(bool is_absolute, int64_t time) REQUIRES_SHARED(Locks::mutator_lock_);
+ void Unpark();
+
private:
void NotifyLocked(Thread* self) REQUIRES(wait_mutex_);
@@ -1543,6 +1548,8 @@
// Thread "interrupted" status; stays raised until queried or thrown.
Atomic<bool32_t> interrupted;
+ AtomicInteger park_state_;
+
// True if the thread is allowed to access a weak ref (Reference::GetReferent() and system
// weaks) and to potentially mark an object alive/gray. This is used for concurrent reference
// processing of the CC collector only. This is thread local so that we can enable/disable weak
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 206418f..94faa62 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -128,6 +128,7 @@
jfieldID WellKnownClasses::java_lang_Thread_name;
jfieldID WellKnownClasses::java_lang_Thread_priority;
jfieldID WellKnownClasses::java_lang_Thread_nativePeer;
+jfieldID WellKnownClasses::java_lang_Thread_unparkedBeforeStart;
jfieldID WellKnownClasses::java_lang_ThreadGroup_groups;
jfieldID WellKnownClasses::java_lang_ThreadGroup_ngroups;
jfieldID WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup;
@@ -376,6 +377,7 @@
java_lang_Thread_name = CacheField(env, java_lang_Thread, false, "name", "Ljava/lang/String;");
java_lang_Thread_priority = CacheField(env, java_lang_Thread, false, "priority", "I");
java_lang_Thread_nativePeer = CacheField(env, java_lang_Thread, false, "nativePeer", "J");
+ java_lang_Thread_unparkedBeforeStart = CacheField(env, java_lang_Thread, false, "unparkedBeforeStart", "Z");
java_lang_ThreadGroup_groups = CacheField(env, java_lang_ThreadGroup, false, "groups", "[Ljava/lang/ThreadGroup;");
java_lang_ThreadGroup_ngroups = CacheField(env, java_lang_ThreadGroup, false, "ngroups", "I");
java_lang_ThreadGroup_mainThreadGroup = CacheField(env, java_lang_ThreadGroup, true, "mainThreadGroup", "Ljava/lang/ThreadGroup;");
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index ce5ab1d..8c85228 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -137,6 +137,7 @@
static jfieldID java_lang_Thread_name;
static jfieldID java_lang_Thread_priority;
static jfieldID java_lang_Thread_nativePeer;
+ static jfieldID java_lang_Thread_unparkedBeforeStart;
static jfieldID java_lang_ThreadGroup_groups;
static jfieldID java_lang_ThreadGroup_ngroups;
static jfieldID java_lang_ThreadGroup_mainThreadGroup;