Global lock levels.
Introduce the notion of the mutators/GC being a shared-exclusive (aka
reader-writer) lock. Introduce globally ordered locks, analysable by
annotalysis, statically at compile time. Add locking attributes to
methods.
More subtly, remove the heap_lock_ and split between various locks that
are held for smaller periods (where work doesn't get blocked). Remove
buggy Dalvik style thread transitions. Make GC use CMS in all cases when
concurrent is enabled. Fix bug where suspend counts rather than debug
suspend counts were sent to JDWP. Move the PathClassLoader to
WellKnownClasses. In debugger refactor calls to send request and
possibly suspend. Break apart different VmWait thread states. Move
identity hash code to a shared method.
Change-Id: Icdbfc3ce3fcccd14341860ac7305d8e97b51f5c6
diff --git a/src/oat/jni/arm/calling_convention_arm.cc b/src/oat/jni/arm/calling_convention_arm.cc
index 75c0380..e06a583 100644
--- a/src/oat/jni/arm/calling_convention_arm.cc
+++ b/src/oat/jni/arm/calling_convention_arm.cc
@@ -53,48 +53,27 @@
return ReturnRegisterForShorty(GetShorty());
}
-// Managed runtime calling convention
+ManagedRegister ArmJniCallingConvention::IntReturnRegister() {
+ return ArmManagedRegister::FromCoreRegister(R0);
+}
-std::vector<ManagedRegister> ArmManagedRuntimeCallingConvention::entry_spills_;
+// Managed runtime calling convention
ManagedRegister ArmManagedRuntimeCallingConvention::MethodRegister() {
return ArmManagedRegister::FromCoreRegister(R0);
}
bool ArmManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
- return itr_slots_ < 3;
+ return false; // Everything moved to stack on entry.
}
bool ArmManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
- if (itr_slots_ < 2) {
- return false;
- } else if (itr_slots_ > 2) {
- return true;
- } else {
- // handle funny case of a long/double straddling registers and the stack
- return IsParamALongOrDouble(itr_args_);
- }
+ return true;
}
-static const Register kManagedArgumentRegisters[] = {
- R1, R2, R3
-};
ManagedRegister ArmManagedRuntimeCallingConvention::CurrentParamRegister() {
- CHECK(IsCurrentParamInRegister());
- if (IsParamALongOrDouble(itr_args_)) {
- if (itr_slots_ == 0) {
- return ArmManagedRegister::FromRegisterPair(R1_R2);
- } else if (itr_slots_ == 1) {
- return ArmManagedRegister::FromRegisterPair(R2_R3);
- } else {
- // This is a long/double split between registers and the stack
- return ArmManagedRegister::FromCoreRegister(
- kManagedArgumentRegisters[itr_slots_]);
- }
- } else {
- return
- ArmManagedRegister::FromCoreRegister(kManagedArgumentRegisters[itr_slots_]);
- }
+ LOG(FATAL) << "Should not reach here";
+ return ManagedRegister::NoRegister();
}
FrameOffset ArmManagedRuntimeCallingConvention::CurrentParamStackOffset() {
@@ -103,15 +82,26 @@
FrameOffset(displacement_.Int32Value() + // displacement
kPointerSize + // Method*
(itr_slots_ * kPointerSize)); // offset into in args
- if (itr_slots_ == 2) {
- // the odd spanning case, bump the offset to skip the first half of the
- // input which is in a register
- CHECK(IsCurrentParamInRegister());
- result = FrameOffset(result.Int32Value() + 4);
- }
return result;
}
+const std::vector<ManagedRegister>& ArmManagedRuntimeCallingConvention::EntrySpills() {
+ // We spill the argument registers on ARM to free them up for scratch use, we then assume
+ // all arguments are on the stack.
+ if (entry_spills_.size() == 0) {
+ size_t num_spills = NumArgs() + NumLongOrDoubleArgs();
+ if (num_spills > 0) {
+ entry_spills_.push_back(ArmManagedRegister::FromCoreRegister(R1));
+ if (num_spills > 1) {
+ entry_spills_.push_back(ArmManagedRegister::FromCoreRegister(R2));
+ if (num_spills > 2) {
+ entry_spills_.push_back(ArmManagedRegister::FromCoreRegister(R3));
+ }
+ }
+ }
+ }
+ return entry_spills_;
+}
// JNI calling convention
ArmJniCallingConvention::ArmJniCallingConvention(bool is_static, bool is_synchronized,
@@ -165,11 +155,6 @@
kStackAlignment);
}
-// Will reg be crushed by an outgoing argument?
-bool ArmJniCallingConvention::IsMethodRegisterClobberedPreCall() {
- return true; // The method register R0 is always clobbered by the JNIEnv
-}
-
// JniCallingConvention ABI follows AAPCS where longs and doubles must occur
// in even register numbers and stack slots
void ArmJniCallingConvention::Next() {
diff --git a/src/oat/jni/arm/calling_convention_arm.h b/src/oat/jni/arm/calling_convention_arm.h
index 527ffa1..b536b6b 100644
--- a/src/oat/jni/arm/calling_convention_arm.h
+++ b/src/oat/jni/arm/calling_convention_arm.h
@@ -36,12 +36,10 @@
virtual bool IsCurrentParamOnStack();
virtual ManagedRegister CurrentParamRegister();
virtual FrameOffset CurrentParamStackOffset();
- virtual const std::vector<ManagedRegister>& EntrySpills() {
- DCHECK(entry_spills_.empty());
- return entry_spills_;
- }
+ virtual const std::vector<ManagedRegister>& EntrySpills();
+
private:
- static std::vector<ManagedRegister> entry_spills_;
+ std::vector<ManagedRegister> entry_spills_;
DISALLOW_COPY_AND_ASSIGN(ArmManagedRuntimeCallingConvention);
};
@@ -52,6 +50,7 @@
virtual ~ArmJniCallingConvention() {}
// Calling convention
virtual ManagedRegister ReturnRegister();
+ virtual ManagedRegister IntReturnRegister();
virtual ManagedRegister InterproceduralScratchRegister();
// JNI calling convention
virtual void Next(); // Override default behavior for AAPCS
@@ -65,7 +64,6 @@
virtual uint32_t FpSpillMask() const {
return 0; // Floats aren't spilled in JNI down call
}
- virtual bool IsMethodRegisterClobberedPreCall();
virtual bool IsCurrentParamInRegister();
virtual bool IsCurrentParamOnStack();
virtual ManagedRegister CurrentParamRegister();
diff --git a/src/oat/jni/calling_convention.h b/src/oat/jni/calling_convention.h
index ae6c7ed..121d1f8 100644
--- a/src/oat/jni/calling_convention.h
+++ b/src/oat/jni/calling_convention.h
@@ -41,9 +41,9 @@
return result;
}
- // Register that holds result of this method
+ // Register that holds result of this method invocation.
virtual ManagedRegister ReturnRegister() = 0;
- // Register reserved for scratch usage during procedure calls
+ // Register reserved for scratch usage during procedure calls.
virtual ManagedRegister InterproceduralScratchRegister() = 0;
// Offset of Method within the frame
@@ -224,6 +224,8 @@
// Location where the return value of a call can be squirreled if another
// call is made following the native call
FrameOffset ReturnValueSaveLocation() const;
+ // Register that holds result if it is integer.
+ virtual ManagedRegister IntReturnRegister() = 0;
// Callee save registers to spill prior to native code (which may clobber)
virtual const std::vector<ManagedRegister>& CalleeSaveRegisters() const = 0;
@@ -232,10 +234,6 @@
virtual uint32_t CoreSpillMask() const = 0;
virtual uint32_t FpSpillMask() const = 0;
- // Returns true if the method register will have been clobbered during argument
- // set up
- virtual bool IsMethodRegisterClobberedPreCall() = 0;
-
// An extra scratch register live after the call
virtual ManagedRegister ReturnScratchRegister() const = 0;
diff --git a/src/oat/jni/jni_compiler.cc b/src/oat/jni/jni_compiler.cc
index 4916072..a9a1bca 100644
--- a/src/oat/jni/jni_compiler.cc
+++ b/src/oat/jni/jni_compiler.cc
@@ -36,31 +36,336 @@
namespace art {
-static void ChangeThreadState(Assembler* jni_asm, ThreadState new_state,
- ManagedRegister scratch, ManagedRegister return_reg,
- FrameOffset return_save_location,
- size_t return_size) {
- /*
- * This code mirrors that of Thread::SetState where detail is given on why
- * barriers occur when they do.
- */
- if (new_state == kRunnable) {
- /*
- * Change our status to kRunnable. The transition requires
- * that we check for pending suspension, because the runtime considers
- * us to be "asleep" in all other states, and another thread could
- * be performing a GC now.
- */
- __ StoreImmediateToThread(Thread::StateOffset(), kRunnable, scratch);
- __ MemoryBarrier(scratch);
- __ SuspendPoll(scratch, return_reg, return_save_location, return_size);
- } else {
- /*
- * Not changing to kRunnable. No additional work required.
- */
- __ MemoryBarrier(scratch);
- __ StoreImmediateToThread(Thread::StateOffset(), new_state, scratch);
+static void CopyParameter(Assembler* jni_asm,
+ ManagedRuntimeCallingConvention* mr_conv,
+ JniCallingConvention* jni_conv,
+ size_t frame_size, size_t out_arg_size);
+static void SetNativeParameter(Assembler* jni_asm,
+ JniCallingConvention* jni_conv,
+ ManagedRegister in_reg);
+
+// Generate the JNI bridge for the given method, general contract:
+// - Arguments are in the managed runtime format, either on stack or in
+// registers, a reference to the method object is supplied as part of this
+// convention.
+//
+CompiledMethod* ArtJniCompileMethodInternal(Compiler& compiler,
+ uint32_t access_flags, uint32_t method_idx,
+ const DexFile& dex_file) {
+ const bool is_native = (access_flags & kAccNative) != 0;
+ CHECK(is_native);
+ const bool is_static = (access_flags & kAccStatic) != 0;
+ const bool is_synchronized = (access_flags & kAccSynchronized) != 0;
+ const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
+ InstructionSet instruction_set = compiler.GetInstructionSet();
+ if (instruction_set == kThumb2) {
+ instruction_set = kArm;
}
+ // Calling conventions used to iterate over parameters to method
+ UniquePtr<JniCallingConvention> jni_conv(
+ JniCallingConvention::Create(is_static, is_synchronized, shorty, instruction_set));
+ UniquePtr<ManagedRuntimeCallingConvention> mr_conv(
+ ManagedRuntimeCallingConvention::Create(is_static, is_synchronized, shorty, instruction_set));
+
+ // Assembler that holds generated instructions
+ UniquePtr<Assembler> jni_asm(Assembler::Create(instruction_set));
+ bool should_disassemble = false;
+
+ // Offsets into data structures
+ // TODO: if cross compiling these offsets are for the host not the target
+ const Offset functions(OFFSETOF_MEMBER(JNIEnvExt, functions));
+ const Offset monitor_enter(OFFSETOF_MEMBER(JNINativeInterface, MonitorEnter));
+ const Offset monitor_exit(OFFSETOF_MEMBER(JNINativeInterface, MonitorExit));
+
+ // 1. Build the frame saving all callee saves
+ const size_t frame_size(jni_conv->FrameSize());
+ const std::vector<ManagedRegister>& callee_save_regs = jni_conv->CalleeSaveRegisters();
+ __ BuildFrame(frame_size, mr_conv->MethodRegister(), callee_save_regs, mr_conv->EntrySpills());
+
+ // 2. Set up the StackIndirectReferenceTable
+ mr_conv->ResetIterator(FrameOffset(frame_size));
+ jni_conv->ResetIterator(FrameOffset(0));
+ __ StoreImmediateToFrame(jni_conv->SirtNumRefsOffset(),
+ jni_conv->ReferenceCount(),
+ mr_conv->InterproceduralScratchRegister());
+ __ CopyRawPtrFromThread(jni_conv->SirtLinkOffset(),
+ Thread::TopSirtOffset(),
+ mr_conv->InterproceduralScratchRegister());
+ __ StoreStackOffsetToThread(Thread::TopSirtOffset(),
+ jni_conv->SirtOffset(),
+ mr_conv->InterproceduralScratchRegister());
+
+ // 3. Place incoming reference arguments into SIRT
+ jni_conv->Next(); // Skip JNIEnv*
+ // 3.5. Create Class argument for static methods out of passed method
+ if (is_static) {
+ FrameOffset sirt_offset = jni_conv->CurrentParamSirtEntryOffset();
+ // Check sirt offset is within frame
+ CHECK_LT(sirt_offset.Uint32Value(), frame_size);
+ __ LoadRef(jni_conv->InterproceduralScratchRegister(),
+ mr_conv->MethodRegister(), Method::DeclaringClassOffset());
+ __ VerifyObject(jni_conv->InterproceduralScratchRegister(), false);
+ __ StoreRef(sirt_offset, jni_conv->InterproceduralScratchRegister());
+ jni_conv->Next(); // in SIRT so move to next argument
+ }
+ while (mr_conv->HasNext()) {
+ CHECK(jni_conv->HasNext());
+ bool ref_param = jni_conv->IsCurrentParamAReference();
+ CHECK(!ref_param || mr_conv->IsCurrentParamAReference());
+ // References need placing in SIRT and the entry value passing
+ if (ref_param) {
+ // Compute SIRT entry, note null is placed in the SIRT but its boxed value
+ // must be NULL
+ FrameOffset sirt_offset = jni_conv->CurrentParamSirtEntryOffset();
+ // Check SIRT offset is within frame and doesn't run into the saved segment state
+ CHECK_LT(sirt_offset.Uint32Value(), frame_size);
+ CHECK_NE(sirt_offset.Uint32Value(),
+ jni_conv->SavedLocalReferenceCookieOffset().Uint32Value());
+ bool input_in_reg = mr_conv->IsCurrentParamInRegister();
+ bool input_on_stack = mr_conv->IsCurrentParamOnStack();
+ CHECK(input_in_reg || input_on_stack);
+
+ if (input_in_reg) {
+ ManagedRegister in_reg = mr_conv->CurrentParamRegister();
+ __ VerifyObject(in_reg, mr_conv->IsCurrentArgPossiblyNull());
+ __ StoreRef(sirt_offset, in_reg);
+ } else if (input_on_stack) {
+ FrameOffset in_off = mr_conv->CurrentParamStackOffset();
+ __ VerifyObject(in_off, mr_conv->IsCurrentArgPossiblyNull());
+ __ CopyRef(sirt_offset, in_off,
+ mr_conv->InterproceduralScratchRegister());
+ }
+ }
+ mr_conv->Next();
+ jni_conv->Next();
+ }
+
+ // 4. Write out the end of the quick frames.
+ __ StoreStackPointerToThread(Thread::TopOfManagedStackOffset());
+ __ StoreImmediateToThread(Thread::TopOfManagedStackPcOffset(), 0,
+ mr_conv->InterproceduralScratchRegister());
+
+ // 5. Move frame down to allow space for out going args.
+ const size_t out_arg_size = jni_conv->OutArgSize();
+ __ IncreaseFrameSize(out_arg_size);
+
+
+ // 6. Call into appropriate JniMethodStart passing Thread* so that transition out of Runnable
+ // can occur. The result is the saved JNI local state that is restored by the exit call. We
+ // abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer
+ // arguments.
+ uintptr_t jni_start = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodStartSynchronized)
+ : ENTRYPOINT_OFFSET(pJniMethodStart);
+ jni_conv->ResetIterator(FrameOffset(out_arg_size));
+ FrameOffset locked_object_sirt_offset(0);
+ if (is_synchronized) {
+ // Pass object for locking.
+ jni_conv->Next(); // Skip JNIEnv.
+ locked_object_sirt_offset = jni_conv->CurrentParamSirtEntryOffset();
+ jni_conv->ResetIterator(FrameOffset(out_arg_size));
+ if (jni_conv->IsCurrentParamOnStack()) {
+ FrameOffset out_off = jni_conv->CurrentParamStackOffset();
+ __ CreateSirtEntry(out_off, locked_object_sirt_offset,
+ mr_conv->InterproceduralScratchRegister(),
+ false);
+ } else {
+ ManagedRegister out_reg = jni_conv->CurrentParamRegister();
+ __ CreateSirtEntry(out_reg, locked_object_sirt_offset,
+ ManagedRegister::NoRegister(), false);
+ }
+ jni_conv->Next();
+ }
+ if (jni_conv->IsCurrentParamInRegister()) {
+ __ GetCurrentThread(jni_conv->CurrentParamRegister());
+ __ Call(jni_conv->CurrentParamRegister(), Offset(jni_start),
+ jni_conv->InterproceduralScratchRegister());
+ } else {
+ __ GetCurrentThread(jni_conv->CurrentParamStackOffset(),
+ jni_conv->InterproceduralScratchRegister());
+ __ Call(ThreadOffset(jni_start), jni_conv->InterproceduralScratchRegister());
+ }
+ if (is_synchronized) { // Check for exceptions from monitor enter.
+ __ ExceptionPoll(jni_conv->InterproceduralScratchRegister(), out_arg_size);
+ }
+ FrameOffset saved_cookie_offset = jni_conv->SavedLocalReferenceCookieOffset();
+ __ Store(saved_cookie_offset, jni_conv->IntReturnRegister(), 4);
+
+ // 7. Iterate over arguments placing values from managed calling convention in
+ // to the convention required for a native call (shuffling). For references
+ // place an index/pointer to the reference after checking whether it is
+ // NULL (which must be encoded as NULL).
+ // Note: we do this prior to materializing the JNIEnv* and static's jclass to
+ // give as many free registers for the shuffle as possible
+ mr_conv->ResetIterator(FrameOffset(frame_size+out_arg_size));
+ uint32_t args_count = 0;
+ while (mr_conv->HasNext()) {
+ args_count++;
+ mr_conv->Next();
+ }
+
+ // Do a backward pass over arguments, so that the generated code will be "mov
+ // R2, R3; mov R1, R2" instead of "mov R1, R2; mov R2, R3."
+ // TODO: A reverse iterator to improve readability.
+ for (uint32_t i = 0; i < args_count; ++i) {
+ mr_conv->ResetIterator(FrameOffset(frame_size + out_arg_size));
+ jni_conv->ResetIterator(FrameOffset(out_arg_size));
+ jni_conv->Next(); // Skip JNIEnv*.
+ if (is_static) {
+ jni_conv->Next(); // Skip Class for now.
+ }
+ // Skip to the argument we're interested in.
+ for (uint32_t j = 0; j < args_count - i - 1; ++j) {
+ mr_conv->Next();
+ jni_conv->Next();
+ }
+ CopyParameter(jni_asm.get(), mr_conv.get(), jni_conv.get(), frame_size, out_arg_size);
+ }
+ if (is_static) {
+ // Create argument for Class
+ mr_conv->ResetIterator(FrameOffset(frame_size+out_arg_size));
+ jni_conv->ResetIterator(FrameOffset(out_arg_size));
+ jni_conv->Next(); // Skip JNIEnv*
+ FrameOffset sirt_offset = jni_conv->CurrentParamSirtEntryOffset();
+ if (jni_conv->IsCurrentParamOnStack()) {
+ FrameOffset out_off = jni_conv->CurrentParamStackOffset();
+ __ CreateSirtEntry(out_off, sirt_offset,
+ mr_conv->InterproceduralScratchRegister(),
+ false);
+ } else {
+ ManagedRegister out_reg = jni_conv->CurrentParamRegister();
+ __ CreateSirtEntry(out_reg, sirt_offset,
+ ManagedRegister::NoRegister(), false);
+ }
+ }
+
+ // 8. Create 1st argument, the JNI environment ptr.
+ jni_conv->ResetIterator(FrameOffset(out_arg_size));
+ // Register that will hold local indirect reference table
+ if (jni_conv->IsCurrentParamInRegister()) {
+ ManagedRegister jni_env = jni_conv->CurrentParamRegister();
+ DCHECK(!jni_env.Equals(jni_conv->InterproceduralScratchRegister()));
+ __ LoadRawPtrFromThread(jni_env, Thread::JniEnvOffset());
+ } else {
+ FrameOffset jni_env = jni_conv->CurrentParamStackOffset();
+ __ CopyRawPtrFromThread(jni_env, Thread::JniEnvOffset(),
+ jni_conv->InterproceduralScratchRegister());
+ }
+
+ // 9. Plant call to native code associated with method.
+ __ Call(jni_conv->MethodStackOffset(), Method::NativeMethodOffset(),
+ mr_conv->InterproceduralScratchRegister());
+
+ // 10. Fix differences in result widths.
+ if (instruction_set == kX86) {
+ if (jni_conv->GetReturnType() == Primitive::kPrimByte ||
+ jni_conv->GetReturnType() == Primitive::kPrimShort) {
+ __ SignExtend(jni_conv->ReturnRegister(),
+ Primitive::ComponentSize(jni_conv->GetReturnType()));
+ } else if (jni_conv->GetReturnType() == Primitive::kPrimBoolean ||
+ jni_conv->GetReturnType() == Primitive::kPrimChar) {
+ __ ZeroExtend(jni_conv->ReturnRegister(),
+ Primitive::ComponentSize(jni_conv->GetReturnType()));
+ }
+ }
+
+ // 11. Save return value
+ bool reference_return = jni_conv->IsReturnAReference();
+ FrameOffset return_save_location = jni_conv->ReturnValueSaveLocation();
+ if (jni_conv->SizeOfReturnValue() != 0 && !reference_return) {
+ CHECK_LT(return_save_location.Uint32Value(), frame_size+out_arg_size);
+ __ Store(return_save_location, jni_conv->ReturnRegister(), jni_conv->SizeOfReturnValue());
+ }
+
+ // 12. Call into JNI method end possibly passing a returned reference, the method and the current
+ // thread.
+ {
+ // Modify iterator for call, important offsets were saved above.
+ size_t jni_end_arg_count = 0;
+ if (reference_return) { jni_end_arg_count++; }
+ if (is_synchronized) { jni_end_arg_count++; }
+ const char* jni_end_shorty = jni_end_arg_count == 0 ? "I"
+ : (jni_end_arg_count == 1 ? "II" : "III");
+ jni_conv.reset(JniCallingConvention::Create(is_static, is_synchronized, jni_end_shorty,
+ instruction_set));
+ // Ensure out arguments will fit in space taken before (we expect this due to stack alignment).
+ size_t jni_end_out_arg_size = jni_conv->OutArgSize();
+ CHECK_LE(jni_end_out_arg_size, out_arg_size);
+ jni_conv->ResetIterator(FrameOffset(jni_end_out_arg_size));
+ }
+ uintptr_t jni_end;
+ if (reference_return) {
+ // Pass result.
+ jni_end = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodEndWithReferenceSynchronized)
+ : ENTRYPOINT_OFFSET(pJniMethodEndWithReference);
+ SetNativeParameter(jni_asm.get(), jni_conv.get(), jni_conv->ReturnRegister());
+ jni_conv->Next();
+ } else {
+ jni_end = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodEndSynchronized)
+ : ENTRYPOINT_OFFSET(pJniMethodEnd);
+ }
+ // Pass saved local reference state.
+ if (jni_conv->IsCurrentParamOnStack()) {
+ FrameOffset out_off = jni_conv->CurrentParamStackOffset();
+ __ Copy(out_off, saved_cookie_offset, jni_conv->InterproceduralScratchRegister(), 4);
+ } else {
+ ManagedRegister out_reg = jni_conv->CurrentParamRegister();
+ __ Load(out_reg, saved_cookie_offset, 4);
+ }
+ jni_conv->Next();
+ if (is_synchronized) {
+ // Pass object for unlocking.
+ if (jni_conv->IsCurrentParamOnStack()) {
+ FrameOffset out_off = jni_conv->CurrentParamStackOffset();
+ __ CreateSirtEntry(out_off, locked_object_sirt_offset,
+ jni_conv->InterproceduralScratchRegister(),
+ false);
+ } else {
+ ManagedRegister out_reg = jni_conv->CurrentParamRegister();
+ __ CreateSirtEntry(out_reg, locked_object_sirt_offset,
+ ManagedRegister::NoRegister(), false);
+ }
+ jni_conv->Next();
+ }
+ if (jni_conv->IsCurrentParamInRegister()) {
+ __ GetCurrentThread(jni_conv->CurrentParamRegister());
+ __ Call(jni_conv->CurrentParamRegister(), Offset(jni_end),
+ jni_conv->InterproceduralScratchRegister());
+ } else {
+ __ GetCurrentThread(jni_conv->CurrentParamStackOffset(),
+ jni_conv->InterproceduralScratchRegister());
+ __ Call(ThreadOffset(jni_end), jni_conv->InterproceduralScratchRegister());
+ }
+
+ // 13. Reload return value
+ if (jni_conv->SizeOfReturnValue() != 0 && !reference_return) {
+ __ Load(mr_conv->ReturnRegister(), return_save_location, mr_conv->SizeOfReturnValue());
+ }
+
+ // 14. Move frame up now we're done with the out arg space.
+ __ DecreaseFrameSize(out_arg_size);
+
+ // 15. Process pending exceptions from JNI call or monitor exit.
+ __ ExceptionPoll(jni_conv->InterproceduralScratchRegister(), 0);
+
+ // 16. Remove activation - no need to restore callee save registers because we didn't clobber
+ // them.
+ __ RemoveFrame(frame_size, std::vector<ManagedRegister>());
+
+ // 17. Finalize code generation
+ __ EmitSlowPaths();
+ size_t cs = __ CodeSize();
+ std::vector<uint8_t> managed_code(cs);
+ MemoryRegion code(&managed_code[0], managed_code.size());
+ __ FinalizeInstructions(code);
+ if (should_disassemble) {
+ UniquePtr<Disassembler> disassembler(Disassembler::Create(instruction_set));
+ disassembler->Dump(LOG(INFO), &managed_code[0], &managed_code[managed_code.size()]);
+ }
+ return new CompiledMethod(instruction_set,
+ managed_code,
+ frame_size,
+ jni_conv->CoreSpillMask(),
+ jni_conv->FpSpillMask());
}
// Copy a single parameter from the managed to the JNI calling convention
@@ -166,441 +471,6 @@
}
}
-static bool IsRegisterPair(InstructionSet instruction_set, ManagedRegister r) {
- return ((instruction_set == kArm && r.AsArm().IsRegisterPair()) ||
- (instruction_set == kX86 && r.AsX86().IsRegisterPair()));
-}
-
-// Generate the JNI bridge for the given method, general contract:
-// - Arguments are in the managed runtime format, either on stack or in
-// registers, a reference to the method object is supplied as part of this
-// convention.
-//
-CompiledMethod* ArtJniCompileMethodInternal(Compiler& compiler,
- uint32_t access_flags, uint32_t method_idx,
- const DexFile& dex_file) {
- const bool is_native = (access_flags & kAccNative) != 0;
- CHECK(is_native);
- const bool is_static = (access_flags & kAccStatic) != 0;
- const bool is_synchronized = (access_flags & kAccSynchronized) != 0;
- const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
- InstructionSet instruction_set = compiler.GetInstructionSet();
- if (instruction_set == kThumb2) {
- instruction_set = kArm;
- }
- // Calling conventions used to iterate over parameters to method
- UniquePtr<JniCallingConvention> jni_conv(
- JniCallingConvention::Create(is_static, is_synchronized, shorty, instruction_set));
- UniquePtr<ManagedRuntimeCallingConvention> mr_conv(
- ManagedRuntimeCallingConvention::Create(is_static, is_synchronized, shorty, instruction_set));
-
- // Assembler that holds generated instructions
- UniquePtr<Assembler> jni_asm(Assembler::Create(instruction_set));
- bool should_disassemble = false;
-
- // Offsets into data structures
- // TODO: if cross compiling these offsets are for the host not the target
- const Offset functions(OFFSETOF_MEMBER(JNIEnvExt, functions));
- const Offset monitor_enter(OFFSETOF_MEMBER(JNINativeInterface, MonitorEnter));
- const Offset monitor_exit(OFFSETOF_MEMBER(JNINativeInterface, MonitorExit));
-
- // 1. Build the frame saving all callee saves
- const size_t frame_size(jni_conv->FrameSize());
- const std::vector<ManagedRegister>& callee_save_regs = jni_conv->CalleeSaveRegisters();
- __ BuildFrame(frame_size, mr_conv->MethodRegister(), callee_save_regs, mr_conv->EntrySpills());
-
- // 2. Set up the StackIndirectReferenceTable
- mr_conv->ResetIterator(FrameOffset(frame_size));
- jni_conv->ResetIterator(FrameOffset(0));
- __ StoreImmediateToFrame(jni_conv->SirtNumRefsOffset(),
- jni_conv->ReferenceCount(),
- mr_conv->InterproceduralScratchRegister());
- __ CopyRawPtrFromThread(jni_conv->SirtLinkOffset(),
- Thread::TopSirtOffset(),
- mr_conv->InterproceduralScratchRegister());
- __ StoreStackOffsetToThread(Thread::TopSirtOffset(),
- jni_conv->SirtOffset(),
- mr_conv->InterproceduralScratchRegister());
-
- // 3. Place incoming reference arguments into SIRT
- jni_conv->Next(); // Skip JNIEnv*
- // 3.5. Create Class argument for static methods out of passed method
- if (is_static) {
- FrameOffset sirt_offset = jni_conv->CurrentParamSirtEntryOffset();
- // Check sirt offset is within frame
- CHECK_LT(sirt_offset.Uint32Value(), frame_size);
- __ LoadRef(jni_conv->InterproceduralScratchRegister(),
- mr_conv->MethodRegister(), Method::DeclaringClassOffset());
- __ VerifyObject(jni_conv->InterproceduralScratchRegister(), false);
- __ StoreRef(sirt_offset, jni_conv->InterproceduralScratchRegister());
- jni_conv->Next(); // in SIRT so move to next argument
- }
- while (mr_conv->HasNext()) {
- CHECK(jni_conv->HasNext());
- bool ref_param = jni_conv->IsCurrentParamAReference();
- CHECK(!ref_param || mr_conv->IsCurrentParamAReference());
- // References need placing in SIRT and the entry value passing
- if (ref_param) {
- // Compute SIRT entry, note null is placed in the SIRT but its boxed value
- // must be NULL
- FrameOffset sirt_offset = jni_conv->CurrentParamSirtEntryOffset();
- // Check SIRT offset is within frame and doesn't run into the saved segment state
- CHECK_LT(sirt_offset.Uint32Value(), frame_size);
- CHECK_NE(sirt_offset.Uint32Value(),
- jni_conv->SavedLocalReferenceCookieOffset().Uint32Value());
- bool input_in_reg = mr_conv->IsCurrentParamInRegister();
- bool input_on_stack = mr_conv->IsCurrentParamOnStack();
- CHECK(input_in_reg || input_on_stack);
-
- if (input_in_reg) {
- ManagedRegister in_reg = mr_conv->CurrentParamRegister();
- __ VerifyObject(in_reg, mr_conv->IsCurrentArgPossiblyNull());
- __ StoreRef(sirt_offset, in_reg);
- } else if (input_on_stack) {
- FrameOffset in_off = mr_conv->CurrentParamStackOffset();
- __ VerifyObject(in_off, mr_conv->IsCurrentArgPossiblyNull());
- __ CopyRef(sirt_offset, in_off,
- mr_conv->InterproceduralScratchRegister());
- }
- }
- mr_conv->Next();
- jni_conv->Next();
- }
-
- // 4. Transition from being in managed to native code. Save the top_of_managed_stack_
- // so that the managed stack can be crawled while in native code. Clear the corresponding
- // PC value that has no meaning for the this frame.
- __ StoreStackPointerToThread(Thread::TopOfManagedStackOffset());
- __ StoreImmediateToThread(Thread::TopOfManagedStackPcOffset(), 0,
- mr_conv->InterproceduralScratchRegister());
- ChangeThreadState(jni_asm.get(), kNative,
- mr_conv->InterproceduralScratchRegister(),
- ManagedRegister::NoRegister(), FrameOffset(0), 0);
-
- // 5. Move frame down to allow space for out going args. Do for as short a
- // time as possible to aid profiling..
- const size_t out_arg_size = jni_conv->OutArgSize();
- __ IncreaseFrameSize(out_arg_size);
-
- // 6. Acquire lock for synchronized methods.
- if (is_synchronized) {
- // Compute arguments in registers to preserve
- mr_conv->ResetIterator(FrameOffset(frame_size + out_arg_size));
- std::vector<ManagedRegister> live_argument_regs;
- std::vector<size_t> live_argument_regs_size;
- while (mr_conv->HasNext()) {
- if (mr_conv->IsCurrentParamInRegister()) {
- live_argument_regs.push_back(mr_conv->CurrentParamRegister());
- live_argument_regs_size.push_back(mr_conv->CurrentParamSize());
- }
- mr_conv->Next();
- }
-
- // Copy arguments to preserve to callee save registers
- CHECK_LE(live_argument_regs.size(), callee_save_regs.size());
- for (size_t in = 0, out = 0; in < live_argument_regs.size(); ++in) {
- size_t size = live_argument_regs_size.at(in);
- if (IsRegisterPair(instruction_set, live_argument_regs.at(in))) {
- CHECK_EQ(instruction_set, kArm);
- arm::ArmManagedRegister pair(live_argument_regs.at(in).AsArm());
- arm::Register lo(pair.AsRegisterPairLow());
- arm::Register hi(pair.AsRegisterPairHigh());
- __ Move(callee_save_regs.at(out++), arm::ArmManagedRegister::FromCoreRegister(lo), size / 2);
- __ Move(callee_save_regs.at(out++), arm::ArmManagedRegister::FromCoreRegister(hi), size / 2);
- } else {
- __ Move(callee_save_regs.at(out++), live_argument_regs.at(in), size);
- }
- }
-
- // Get SIRT entry for 1st argument (jclass or this) to be 1st argument to
- // monitor enter
- mr_conv->ResetIterator(FrameOffset(frame_size + out_arg_size));
- jni_conv->ResetIterator(FrameOffset(out_arg_size));
- jni_conv->Next(); // Skip JNIEnv*
- if (is_static) {
- FrameOffset sirt_offset = jni_conv->CurrentParamSirtEntryOffset();
- if (jni_conv->IsCurrentParamOnStack()) {
- FrameOffset out_off = jni_conv->CurrentParamStackOffset();
- __ CreateSirtEntry(out_off, sirt_offset,
- mr_conv->InterproceduralScratchRegister(),
- false);
- } else {
- ManagedRegister out_reg = jni_conv->CurrentParamRegister();
- __ CreateSirtEntry(out_reg, sirt_offset,
- ManagedRegister::NoRegister(), false);
- }
- } else {
- CopyParameter(jni_asm.get(), mr_conv.get(), jni_conv.get(), frame_size,
- out_arg_size);
- }
-
- // Generate JNIEnv* in place and leave a copy in jni_fns_register
- jni_conv->ResetIterator(FrameOffset(out_arg_size));
- ManagedRegister jni_fns_register =
- jni_conv->InterproceduralScratchRegister();
- __ LoadRawPtrFromThread(jni_fns_register, Thread::JniEnvOffset());
- SetNativeParameter(jni_asm.get(), jni_conv.get(), jni_fns_register);
-
- // Call JNIEnv->MonitorEnter(object)
- __ LoadRawPtr(jni_fns_register, jni_fns_register, functions);
- __ Call(jni_fns_register, monitor_enter,
- jni_conv->InterproceduralScratchRegister());
-
- // Check for exceptions
- __ ExceptionPoll(jni_conv->InterproceduralScratchRegister());
-
- // Restore live arguments
- for (size_t in = 0, out = 0; out < live_argument_regs.size(); ++out) {
- size_t size = live_argument_regs_size.at(out);
- if (IsRegisterPair(instruction_set, live_argument_regs.at(out))) {
- CHECK_EQ(instruction_set, kArm);
- arm::ArmManagedRegister pair(live_argument_regs.at(out).AsArm());
- arm::Register lo(pair.AsRegisterPairLow());
- arm::Register hi(pair.AsRegisterPairHigh());
- __ Move(arm::ArmManagedRegister::FromCoreRegister(lo), callee_save_regs.at(in++), size / 2);
- __ Move(arm::ArmManagedRegister::FromCoreRegister(hi), callee_save_regs.at(in++), size / 2);
- } else {
- __ Move(live_argument_regs.at(out), callee_save_regs.at(in++), size);
- }
- }
- }
-
- // 7. Iterate over arguments placing values from managed calling convention in
- // to the convention required for a native call (shuffling). For references
- // place an index/pointer to the reference after checking whether it is
- // NULL (which must be encoded as NULL).
- // Note: we do this prior to materializing the JNIEnv* and static's jclass to
- // give as many free registers for the shuffle as possible
- mr_conv->ResetIterator(FrameOffset(frame_size+out_arg_size));
- uint32_t args_count = 0;
- while (mr_conv->HasNext()) {
- args_count++;
- mr_conv->Next();
- }
-
- // Do a backward pass over arguments, so that the generated code will be "mov
- // R2, R3; mov R1, R2" instead of "mov R1, R2; mov R2, R3."
- // TODO: A reverse iterator to improve readability.
- for (uint32_t i = 0; i < args_count; ++i) {
- mr_conv->ResetIterator(FrameOffset(frame_size + out_arg_size));
- jni_conv->ResetIterator(FrameOffset(out_arg_size));
- jni_conv->Next(); // Skip JNIEnv*
- if (is_static) {
- jni_conv->Next(); // Skip Class for now
- }
- for (uint32_t j = 0; j < args_count - i - 1; ++j) {
- mr_conv->Next();
- jni_conv->Next();
- }
- CopyParameter(jni_asm.get(), mr_conv.get(), jni_conv.get(), frame_size, out_arg_size);
- }
-
- if (is_static) {
- // Create argument for Class
- mr_conv->ResetIterator(FrameOffset(frame_size+out_arg_size));
- jni_conv->ResetIterator(FrameOffset(out_arg_size));
- jni_conv->Next(); // Skip JNIEnv*
- FrameOffset sirt_offset = jni_conv->CurrentParamSirtEntryOffset();
- if (jni_conv->IsCurrentParamOnStack()) {
- FrameOffset out_off = jni_conv->CurrentParamStackOffset();
- __ CreateSirtEntry(out_off, sirt_offset,
- mr_conv->InterproceduralScratchRegister(),
- false);
- } else {
- ManagedRegister out_reg = jni_conv->CurrentParamRegister();
- __ CreateSirtEntry(out_reg, sirt_offset,
- ManagedRegister::NoRegister(), false);
- }
- }
- // 8. Create 1st argument, the JNI environment ptr and save the top of the local reference table
- jni_conv->ResetIterator(FrameOffset(out_arg_size));
- // Register that will hold local indirect reference table
- if (jni_conv->IsCurrentParamInRegister()) {
- ManagedRegister jni_env = jni_conv->CurrentParamRegister();
- DCHECK(!jni_env.Equals(jni_conv->InterproceduralScratchRegister()));
- __ LoadRawPtrFromThread(jni_env, Thread::JniEnvOffset());
- // Frame[saved_local_ref_cookie_offset] = env->local_ref_cookie
- __ Copy(jni_conv->SavedLocalReferenceCookieOffset(),
- jni_env, JNIEnvExt::LocalRefCookieOffset(),
- jni_conv->InterproceduralScratchRegister(), 4);
- // env->local_ref_cookie = env->locals.segment_state
- __ Copy(jni_env, JNIEnvExt::LocalRefCookieOffset(),
- jni_env, JNIEnvExt::SegmentStateOffset(),
- jni_conv->InterproceduralScratchRegister(), 4);
- } else {
- FrameOffset jni_env = jni_conv->CurrentParamStackOffset();
- __ CopyRawPtrFromThread(jni_env, Thread::JniEnvOffset(),
- jni_conv->InterproceduralScratchRegister());
- // Frame[saved_local_ref_cookie_offset] = env->local_ref_cookie
- __ Copy(jni_conv->SavedLocalReferenceCookieOffset(),
- jni_env, JNIEnvExt::LocalRefCookieOffset(),
- jni_conv->InterproceduralScratchRegister(), 4);
- // env->local_ref_cookie = env->locals.segment_state
- __ Copy(jni_env, JNIEnvExt::LocalRefCookieOffset(),
- jni_env, JNIEnvExt::SegmentStateOffset(),
- jni_conv->InterproceduralScratchRegister(), 4);
- }
-
- // 9. Plant call to native code associated with method
- if (!jni_conv->IsMethodRegisterClobberedPreCall()) {
- // Method register shouldn't have been crushed by setting up outgoing
- // arguments
- __ Call(mr_conv->MethodRegister(), Method::NativeMethodOffset(),
- mr_conv->InterproceduralScratchRegister());
- } else {
- __ Call(jni_conv->MethodStackOffset(), Method::NativeMethodOffset(),
- mr_conv->InterproceduralScratchRegister());
- }
-
- // 10. Release lock for synchronized methods.
- if (is_synchronized) {
- mr_conv->ResetIterator(FrameOffset(frame_size+out_arg_size));
- jni_conv->ResetIterator(FrameOffset(out_arg_size));
- jni_conv->Next(); // Skip JNIEnv*
- // Save return value
- FrameOffset return_save_location = jni_conv->ReturnValueSaveLocation();
- if (jni_conv->SizeOfReturnValue() != 0) {
- FrameOffset return_save_location = jni_conv->ReturnValueSaveLocation();
- CHECK_LT(return_save_location.Uint32Value(), frame_size+out_arg_size);
- __ Store(return_save_location, jni_conv->ReturnRegister(),
- jni_conv->SizeOfReturnValue());
- }
- // Get SIRT entry for 1st argument
- if (is_static) {
- FrameOffset sirt_offset = jni_conv->CurrentParamSirtEntryOffset();
- if (jni_conv->IsCurrentParamOnStack()) {
- FrameOffset out_off = jni_conv->CurrentParamStackOffset();
- __ CreateSirtEntry(out_off, sirt_offset,
- mr_conv->InterproceduralScratchRegister(),
- false);
- } else {
- ManagedRegister out_reg = jni_conv->CurrentParamRegister();
- __ CreateSirtEntry(out_reg, sirt_offset,
- ManagedRegister::NoRegister(), false);
- }
- } else {
- CopyParameter(jni_asm.get(), mr_conv.get(), jni_conv.get(), frame_size,
- out_arg_size);
- }
- // Generate JNIEnv* in place and leave a copy in jni_env_register
- jni_conv->ResetIterator(FrameOffset(out_arg_size));
- ManagedRegister jni_env_register =
- jni_conv->InterproceduralScratchRegister();
- __ LoadRawPtrFromThread(jni_env_register, Thread::JniEnvOffset());
- SetNativeParameter(jni_asm.get(), jni_conv.get(), jni_env_register);
- // Call JNIEnv->MonitorExit(object)
- __ LoadRawPtr(jni_env_register, jni_env_register, functions);
- __ Call(jni_env_register, monitor_exit,
- jni_conv->InterproceduralScratchRegister());
- // Reload return value
- if (jni_conv->SizeOfReturnValue() != 0) {
- __ Load(jni_conv->ReturnRegister(), return_save_location,
- jni_conv->SizeOfReturnValue());
- }
- }
-
- // 11. Release outgoing argument area
- __ DecreaseFrameSize(out_arg_size);
- mr_conv->ResetIterator(FrameOffset(frame_size));
- jni_conv->ResetIterator(FrameOffset(0));
-
- // 12. Transition from being in native to managed code, possibly entering a
- // safepoint
- // Don't clobber result
- CHECK(!jni_conv->InterproceduralScratchRegister().Equals(jni_conv->ReturnRegister()));
- // Location to preserve result on slow path, ensuring its within the frame
- FrameOffset return_save_location = jni_conv->ReturnValueSaveLocation();
- CHECK(return_save_location.Uint32Value() < frame_size ||
- jni_conv->SizeOfReturnValue() == 0);
- ChangeThreadState(jni_asm.get(), kRunnable,
- jni_conv->InterproceduralScratchRegister(),
- jni_conv->ReturnRegister(), return_save_location,
- jni_conv->SizeOfReturnValue());
-
- // 13. Place result in correct register possibly loading from indirect
- // reference table
- if (jni_conv->IsReturnAReference()) {
- __ IncreaseFrameSize(out_arg_size);
- jni_conv->ResetIterator(FrameOffset(out_arg_size));
-
- jni_conv->Next(); // Skip Thread* argument
- // Pass result as arg2
- SetNativeParameter(jni_asm.get(), jni_conv.get(),
- jni_conv->ReturnRegister());
-
- // Pass Thread*
- jni_conv->ResetIterator(FrameOffset(out_arg_size));
- if (jni_conv->IsCurrentParamInRegister()) {
- __ GetCurrentThread(jni_conv->CurrentParamRegister());
- __ Call(jni_conv->CurrentParamRegister(),
- Offset(ENTRYPOINT_OFFSET(pDecodeJObjectInThread)),
- jni_conv->InterproceduralScratchRegister());
- } else {
- __ GetCurrentThread(jni_conv->CurrentParamStackOffset(),
- jni_conv->InterproceduralScratchRegister());
- __ Call(ThreadOffset(ENTRYPOINT_OFFSET(pDecodeJObjectInThread)),
- jni_conv->InterproceduralScratchRegister());
- }
-
- __ DecreaseFrameSize(out_arg_size);
- jni_conv->ResetIterator(FrameOffset(0));
- } else if (instruction_set == kX86) {
- if (jni_conv->GetReturnType() == Primitive::kPrimByte ||
- jni_conv->GetReturnType() == Primitive::kPrimShort) {
- __ SignExtend(jni_conv->ReturnRegister(), Primitive::ComponentSize(jni_conv->GetReturnType()));
- } else if (jni_conv->GetReturnType() == Primitive::kPrimBoolean ||
- jni_conv->GetReturnType() == Primitive::kPrimChar) {
- __ ZeroExtend(jni_conv->ReturnRegister(), Primitive::ComponentSize(jni_conv->GetReturnType()));
- }
- }
- DCHECK_EQ(mr_conv->SizeOfReturnValue(), jni_conv->SizeOfReturnValue());
- __ Move(mr_conv->ReturnRegister(), jni_conv->ReturnRegister(), mr_conv->SizeOfReturnValue());
-
- // 14. Restore segment state and remove SIRT from thread
- {
- ManagedRegister jni_env = jni_conv->InterproceduralScratchRegister();
- __ LoadRawPtrFromThread(jni_env, Thread::JniEnvOffset());
- // env->locals.segment_state = env->local_ref_cookie
- __ Copy(jni_env, JNIEnvExt::SegmentStateOffset(),
- jni_env, JNIEnvExt::LocalRefCookieOffset(),
- jni_conv->ReturnScratchRegister(), 4);
- // env->local_ref_cookie = Frame[saved_local_ref_cookie_offset]
- __ Copy(jni_env, JNIEnvExt::LocalRefCookieOffset(),
- jni_conv->SavedLocalReferenceCookieOffset(),
- jni_conv->ReturnScratchRegister(), 4);
- }
- __ CopyRawPtrToThread(Thread::TopSirtOffset(), jni_conv->SirtLinkOffset(),
- jni_conv->InterproceduralScratchRegister());
-
- // 15. Check for pending exception and forward if there
- __ ExceptionPoll(jni_conv->InterproceduralScratchRegister());
-
- // 16. Remove activation
- if (is_synchronized) {
- __ RemoveFrame(frame_size, callee_save_regs);
- } else {
- // no need to restore callee save registers because we didn't
- // clobber them while locking the monitor.
- __ RemoveFrame(frame_size, std::vector<ManagedRegister>());
- }
-
- // 17. Finalize code generation
- __ EmitSlowPaths();
- size_t cs = __ CodeSize();
- std::vector<uint8_t> managed_code(cs);
- MemoryRegion code(&managed_code[0], managed_code.size());
- __ FinalizeInstructions(code);
- if (should_disassemble) {
- UniquePtr<Disassembler> disassembler(Disassembler::Create(instruction_set));
- disassembler->Dump(LOG(INFO), &managed_code[0], &managed_code[managed_code.size()]);
- }
- return new CompiledMethod(instruction_set,
- managed_code,
- frame_size,
- jni_conv->CoreSpillMask(),
- jni_conv->FpSpillMask());
-}
-
} // namespace art
extern "C" art::CompiledMethod* ArtJniCompileMethod(art::Compiler& compiler,
diff --git a/src/oat/jni/x86/calling_convention_x86.cc b/src/oat/jni/x86/calling_convention_x86.cc
index 1cd849c..90c050c 100644
--- a/src/oat/jni/x86/calling_convention_x86.cc
+++ b/src/oat/jni/x86/calling_convention_x86.cc
@@ -61,6 +61,10 @@
return ReturnRegisterForShorty(GetShorty(), true);
}
+ManagedRegister X86JniCallingConvention::IntReturnRegister() {
+ return X86ManagedRegister::FromCpuRegister(EAX);
+}
+
// Managed runtime calling convention
ManagedRegister X86ManagedRuntimeCallingConvention::MethodRegister() {
@@ -131,10 +135,6 @@
return RoundUp(NumberOfOutgoingStackArgs() * kPointerSize, kStackAlignment);
}
-bool X86JniCallingConvention::IsMethodRegisterClobberedPreCall() {
- return IsSynchronized(); // Monitor enter crushes the method register
-}
-
bool X86JniCallingConvention::IsCurrentParamInRegister() {
return false; // Everything is passed by stack.
}
@@ -149,15 +149,17 @@
}
FrameOffset X86JniCallingConvention::CurrentParamStackOffset() {
- return FrameOffset(displacement_.Int32Value() - OutArgSize() +
- (itr_slots_ * kPointerSize));
+ return FrameOffset(displacement_.Int32Value() - OutArgSize() + (itr_slots_ * kPointerSize));
}
size_t X86JniCallingConvention::NumberOfOutgoingStackArgs() {
size_t static_args = IsStatic() ? 1 : 0; // count jclass
// regular argument parameters and this
size_t param_args = NumArgs() + NumLongOrDoubleArgs();
- return static_args + param_args + 2; // count JNIEnv* and return pc (pushed after Method*)
+ // count JNIEnv* and return pc (pushed after Method*)
+ size_t total_args = static_args + param_args + 2;
+ return total_args;
+
}
} // namespace x86
diff --git a/src/oat/jni/x86/calling_convention_x86.h b/src/oat/jni/x86/calling_convention_x86.h
index 959a37f..5116a46 100644
--- a/src/oat/jni/x86/calling_convention_x86.h
+++ b/src/oat/jni/x86/calling_convention_x86.h
@@ -49,6 +49,7 @@
virtual ~X86JniCallingConvention() {}
// Calling convention
virtual ManagedRegister ReturnRegister();
+ virtual ManagedRegister IntReturnRegister();
virtual ManagedRegister InterproceduralScratchRegister();
// JNI calling convention
virtual size_t FrameSize();
@@ -61,7 +62,6 @@
virtual uint32_t FpSpillMask() const {
return 0;
}
- virtual bool IsMethodRegisterClobberedPreCall();
virtual bool IsCurrentParamInRegister();
virtual bool IsCurrentParamOnStack();
virtual ManagedRegister CurrentParamRegister();
diff --git a/src/oat/runtime/arm/oat_support_entrypoints_arm.cc b/src/oat/runtime/arm/oat_support_entrypoints_arm.cc
index 37454fd..1aa069e 100644
--- a/src/oat/runtime/arm/oat_support_entrypoints_arm.cc
+++ b/src/oat/runtime/arm/oat_support_entrypoints_arm.cc
@@ -60,8 +60,17 @@
extern "C" void art_handle_fill_data_from_code(void*, void*);
// JNI entrypoints.
-extern Object* DecodeJObjectInThread(Thread* thread, jobject obj);
extern void* FindNativeMethod(Thread* thread);
+extern uint32_t JniMethodStart(Thread* self);
+extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self);
+extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self);
+extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject locked,
+ Thread* self);
+extern Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie,
+ Thread* self);
+extern Object* JniMethodEndWithReferenceSynchronized(jobject result,
+ uint32_t saved_local_ref_cookie,
+ jobject locked, Thread* self);
// Lock entrypoints.
extern "C" void art_lock_object_from_code(void*);
@@ -182,8 +191,13 @@
points->pHandleFillArrayDataFromCode = art_handle_fill_data_from_code;
// JNI
- points->pDecodeJObjectInThread = DecodeJObjectInThread;
points->pFindNativeMethod = FindNativeMethod;
+ points->pJniMethodStart = JniMethodStart;
+ points->pJniMethodStartSynchronized = JniMethodStartSynchronized;
+ points->pJniMethodEnd = JniMethodEnd;
+ points->pJniMethodEndSynchronized = JniMethodEndSynchronized;
+ points->pJniMethodEndWithReference = JniMethodEndWithReference;
+ points->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
// Locks
points->pLockObjectFromCode = art_lock_object_from_code;
diff --git a/src/oat/runtime/arm/stub_arm.cc b/src/oat/runtime/arm/stub_arm.cc
index 5a20748..d3c94a8 100644
--- a/src/oat/runtime/arm/stub_arm.cc
+++ b/src/oat/runtime/arm/stub_arm.cc
@@ -17,6 +17,7 @@
#include "jni_internal.h"
#include "oat/utils/arm/assembler_arm.h"
#include "oat/runtime/oat_support_entrypoints.h"
+#include "oat/runtime/stub.h"
#include "object.h"
#include "stack_indirect_reference_table.h"
diff --git a/src/oat/runtime/callee_save_frame.h b/src/oat/runtime/callee_save_frame.h
index 1509553..14ba046 100644
--- a/src/oat/runtime/callee_save_frame.h
+++ b/src/oat/runtime/callee_save_frame.h
@@ -23,9 +23,11 @@
class Method;
-// Place a special frame at the TOS that will save the callee saves for the given type
-static void FinishCalleeSaveFrameSetup(Thread* self, Method** sp, Runtime::CalleeSaveType type) {
- // Be aware the store below may well stomp on an incoming argument
+// Place a special frame at the TOS that will save the callee saves for the given type.
+static void FinishCalleeSaveFrameSetup(Thread* self, Method** sp, Runtime::CalleeSaveType type)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
+ // Be aware the store below may well stomp on an incoming argument.
+ GlobalSynchronization::mutator_lock_->AssertSharedHeld();
*sp = Runtime::Current()->GetCalleeSaveMethod(type);
self->SetTopOfStack(sp, 0);
self->VerifyStack();
diff --git a/src/oat/runtime/oat_support_entrypoints.h b/src/oat/runtime/oat_support_entrypoints.h
index a235e4f..39d9eab 100644
--- a/src/oat/runtime/oat_support_entrypoints.h
+++ b/src/oat/runtime/oat_support_entrypoints.h
@@ -72,8 +72,14 @@
void (*pHandleFillArrayDataFromCode)(void*, void*);
// JNI
- Object* (*pDecodeJObjectInThread)(Thread* thread, jobject obj);
void* (*pFindNativeMethod)(Thread* thread);
+ uint32_t (*pJniMethodStart)(Thread*);
+ uint32_t (*pJniMethodStartSynchronized)(jobject to_lock, Thread* self);
+ void (*pJniMethodEnd)(uint32_t cookie, Thread* self);
+ void (*pJniMethodEndSynchronized)(uint32_t cookie, jobject locked, Thread* self);
+ Object* (*pJniMethodEndWithReference)(jobject result, uint32_t cookie, Thread* self);
+ Object* (*pJniMethodEndWithReferenceSynchronized)(jobject result, uint32_t cookie,
+ jobject locked, Thread* self);
// Locks
void (*pLockObjectFromCode)(void*);
diff --git a/src/oat/runtime/stub.h b/src/oat/runtime/stub.h
new file mode 100644
index 0000000..5d8b37d
--- /dev/null
+++ b/src/oat/runtime/stub.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_RUNTIME_OAT_RUNTIME_STUB_H_
+#define ART_SRC_OAT_RUNTIME_OAT_RUNTIME_STUB_H_
+
+#include "runtime.h"
+
+namespace art {
+
+namespace arm {
+ ByteArray* CreateAbstractMethodErrorStub()
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
+ ByteArray* ArmCreateResolutionTrampoline(Runtime::TrampolineType type)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
+ ByteArray* CreateJniDlsymLookupStub()
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
+}
+
+namespace x86 {
+ ByteArray* CreateAbstractMethodErrorStub()
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
+ ByteArray* X86CreateResolutionTrampoline(Runtime::TrampolineType type)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
+ ByteArray* CreateJniDlsymLookupStub()
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
+}
+
+} // namespace art
+
+#endif // ART_SRC_OAT_RUNTIME_OAT_RUNTIME_STUB_H_
diff --git a/src/oat/runtime/support_alloc.cc b/src/oat/runtime/support_alloc.cc
index d9394d2..4a03f98 100644
--- a/src/oat/runtime/support_alloc.cc
+++ b/src/oat/runtime/support_alloc.cc
@@ -20,39 +20,45 @@
namespace art {
extern "C" Object* artAllocObjectFromCode(uint32_t type_idx, Method* method,
- Thread* self, Method** sp) {
+ Thread* self, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
return AllocObjectFromCode(type_idx, method, self, false);
}
extern "C" Object* artAllocObjectFromCodeWithAccessCheck(uint32_t type_idx, Method* method,
- Thread* self, Method** sp) {
+ Thread* self, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
return AllocObjectFromCode(type_idx, method, self, true);
}
extern "C" Array* artAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t component_count,
- Thread* self, Method** sp) {
+ Thread* self, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
return AllocArrayFromCode(type_idx, method, component_count, self, false);
}
extern "C" Array* artAllocArrayFromCodeWithAccessCheck(uint32_t type_idx, Method* method,
int32_t component_count,
- Thread* self, Method** sp) {
+ Thread* self, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
return AllocArrayFromCode(type_idx, method, component_count, self, true);
}
extern "C" Array* artCheckAndAllocArrayFromCode(uint32_t type_idx, Method* method,
- int32_t component_count, Thread* self, Method** sp) {
+ int32_t component_count, Thread* self, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
return CheckAndAllocArrayFromCode(type_idx, method, component_count, self, false);
}
extern "C" Array* artCheckAndAllocArrayFromCodeWithAccessCheck(uint32_t type_idx, Method* method,
int32_t component_count,
- Thread* self, Method** sp) {
+ Thread* self, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
return CheckAndAllocArrayFromCode(type_idx, method, component_count, self, true);
}
diff --git a/src/oat/runtime/support_cast.cc b/src/oat/runtime/support_cast.cc
index 139239f..ea083f1 100644
--- a/src/oat/runtime/support_cast.cc
+++ b/src/oat/runtime/support_cast.cc
@@ -20,14 +20,16 @@
namespace art {
// Assignable test for code, won't throw. Null and equality tests already performed
-extern "C" uint32_t artIsAssignableFromCode(const Class* klass, const Class* ref_class) {
+extern "C" uint32_t artIsAssignableFromCode(const Class* klass, const Class* ref_class)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
DCHECK(klass != NULL);
DCHECK(ref_class != NULL);
return klass->IsAssignableFrom(ref_class) ? 1 : 0;
}
// Check whether it is safe to cast one class to the other, throw exception and return -1 on failure
-extern "C" int artCheckCastFromCode(const Class* a, const Class* b, Thread* self, Method** sp) {
+extern "C" int artCheckCastFromCode(const Class* a, const Class* b, Thread* self, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
DCHECK(a->IsClass()) << PrettyClass(a);
DCHECK(b->IsClass()) << PrettyClass(b);
if (LIKELY(b->IsAssignableFrom(a))) {
@@ -45,7 +47,8 @@
// Tests whether 'element' can be assigned into an array of type 'array_class'.
// Returns 0 on success and -1 if an exception is pending.
extern "C" int artCanPutArrayElementFromCode(const Object* element, const Class* array_class,
- Thread* self, Method** sp) {
+ Thread* self, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
DCHECK(array_class != NULL);
// element can't be NULL as we catch this is screened in runtime_support
Class* element_class = element->GetClass();
diff --git a/src/oat/runtime/support_debug.cc b/src/oat/runtime/support_debug.cc
index ef6e0b1..9968043 100644
--- a/src/oat/runtime/support_debug.cc
+++ b/src/oat/runtime/support_debug.cc
@@ -25,13 +25,15 @@
* method entry and offset 0 within the method, we'll use an offset of -1
* to denote method entry.
*/
-extern "C" void artUpdateDebuggerFromCode(int32_t dex_pc, Thread* self, Method** sp) {
+extern "C" void artUpdateDebuggerFromCode(int32_t dex_pc, Thread* self, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
Dbg::UpdateDebugger(dex_pc, self);
}
// Temporary debugging hook for compiler.
-extern void DebugMe(Method* method, uint32_t info) {
+extern void DebugMe(Method* method, uint32_t info)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
LOG(INFO) << "DebugMe";
if (method != NULL) {
LOG(INFO) << PrettyMethod(method);
diff --git a/src/oat/runtime/support_dexcache.cc b/src/oat/runtime/support_dexcache.cc
index 49e038d..8e7c2ad 100644
--- a/src/oat/runtime/support_dexcache.cc
+++ b/src/oat/runtime/support_dexcache.cc
@@ -20,7 +20,8 @@
namespace art {
extern "C" Class* artInitializeStaticStorageFromCode(uint32_t type_idx, const Method* referrer,
- Thread* self, Method** sp) {
+ Thread* self, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
// Called to ensure static storage base is initialized for direct static field reads and writes.
// A class may be accessing another class' fields when it doesn't have access, as access has been
// given by inheritance.
@@ -29,7 +30,8 @@
}
extern "C" Class* artInitializeTypeFromCode(uint32_t type_idx, const Method* referrer, Thread* self,
- Method** sp) {
+ Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
// Called when method->dex_cache_resolved_types_[] misses.
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
return ResolveVerifyAndClinit(type_idx, referrer, self, false, false);
@@ -37,7 +39,8 @@
extern "C" Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx,
const Method* referrer, Thread* self,
- Method** sp) {
+ Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
// Called when caller isn't guaranteed to have access to a type and the dex cache may be
// unpopulated.
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
@@ -45,7 +48,8 @@
}
extern "C" String* artResolveStringFromCode(Method* referrer, int32_t string_idx,
- Thread* self, Method** sp) {
+ Thread* self, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
return ResolveStringFromCode(referrer, string_idx);
}
diff --git a/src/oat/runtime/support_field.cc b/src/oat/runtime/support_field.cc
index 77fe618..99e3a94 100644
--- a/src/oat/runtime/support_field.cc
+++ b/src/oat/runtime/support_field.cc
@@ -22,7 +22,8 @@
namespace art {
extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx, const Method* referrer,
- Thread* self, Method** sp) {
+ Thread* self, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
Field* field = FindFieldFast(field_idx, referrer, true, false, sizeof(int32_t));
if (LIKELY(field != NULL)) {
return field->Get32(NULL);
@@ -36,7 +37,8 @@
}
extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx, const Method* referrer,
- Thread* self, Method** sp) {
+ Thread* self, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
Field* field = FindFieldFast(field_idx, referrer, true, false, sizeof(int64_t));
if (LIKELY(field != NULL)) {
return field->Get64(NULL);
@@ -50,7 +52,8 @@
}
extern "C" Object* artGetObjStaticFromCode(uint32_t field_idx, const Method* referrer,
- Thread* self, Method** sp) {
+ Thread* self, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
Field* field = FindFieldFast(field_idx, referrer, false, false, sizeof(Object*));
if (LIKELY(field != NULL)) {
return field->GetObj(NULL);
@@ -64,7 +67,8 @@
}
extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, Object* obj,
- const Method* referrer, Thread* self, Method** sp) {
+ const Method* referrer, Thread* self, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
Field* field = FindFieldFast(field_idx, referrer, true, false, sizeof(int32_t));
if (LIKELY(field != NULL && obj != NULL)) {
return field->Get32(obj);
@@ -82,7 +86,8 @@
}
extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, Object* obj,
- const Method* referrer, Thread* self, Method** sp) {
+ const Method* referrer, Thread* self, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
Field* field = FindFieldFast(field_idx, referrer, true, false, sizeof(int64_t));
if (LIKELY(field != NULL && obj != NULL)) {
return field->Get64(obj);
@@ -100,7 +105,8 @@
}
extern "C" Object* artGetObjInstanceFromCode(uint32_t field_idx, Object* obj,
- const Method* referrer, Thread* self, Method** sp) {
+ const Method* referrer, Thread* self, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
Field* field = FindFieldFast(field_idx, referrer, false, false, sizeof(Object*));
if (LIKELY(field != NULL && obj != NULL)) {
return field->GetObj(obj);
@@ -118,7 +124,8 @@
}
extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value,
- const Method* referrer, Thread* self, Method** sp) {
+ const Method* referrer, Thread* self, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
Field* field = FindFieldFast(field_idx, referrer, true, true, sizeof(int32_t));
if (LIKELY(field != NULL)) {
field->Set32(NULL, new_value);
@@ -134,7 +141,8 @@
}
extern "C" int artSet64StaticFromCode(uint32_t field_idx, const Method* referrer,
- uint64_t new_value, Thread* self, Method** sp) {
+ uint64_t new_value, Thread* self, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
Field* field = FindFieldFast(field_idx, referrer, true, true, sizeof(int64_t));
if (LIKELY(field != NULL)) {
field->Set64(NULL, new_value);
@@ -150,7 +158,8 @@
}
extern "C" int artSetObjStaticFromCode(uint32_t field_idx, Object* new_value,
- const Method* referrer, Thread* self, Method** sp) {
+ const Method* referrer, Thread* self, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
Field* field = FindFieldFast(field_idx, referrer, false, true, sizeof(Object*));
if (LIKELY(field != NULL)) {
if (LIKELY(!FieldHelper(field).IsPrimitiveType())) {
@@ -168,7 +177,8 @@
}
extern "C" int artSet32InstanceFromCode(uint32_t field_idx, Object* obj, uint32_t new_value,
- const Method* referrer, Thread* self, Method** sp) {
+ const Method* referrer, Thread* self, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
Field* field = FindFieldFast(field_idx, referrer, true, true, sizeof(int32_t));
if (LIKELY(field != NULL && obj != NULL)) {
field->Set32(obj, new_value);
@@ -188,7 +198,8 @@
}
extern "C" int artSet64InstanceFromCode(uint32_t field_idx, Object* obj, uint64_t new_value,
- Thread* self, Method** sp) {
+ Thread* self, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
Method* callee_save = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsOnly);
Method* referrer = sp[callee_save->GetFrameSizeInBytes() / sizeof(Method*)];
Field* field = FindFieldFast(field_idx, referrer, true, true, sizeof(int64_t));
@@ -211,7 +222,8 @@
}
extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, Object* obj, Object* new_value,
- const Method* referrer, Thread* self, Method** sp) {
+ const Method* referrer, Thread* self, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
Field* field = FindFieldFast(field_idx, referrer, false, true, sizeof(Object*));
if (LIKELY(field != NULL && obj != NULL)) {
field->SetObj(obj, new_value);
diff --git a/src/oat/runtime/support_fillarray.cc b/src/oat/runtime/support_fillarray.cc
index 7227f6b..8561bd8 100644
--- a/src/oat/runtime/support_fillarray.cc
+++ b/src/oat/runtime/support_fillarray.cc
@@ -37,7 +37,8 @@
*/
extern "C" int artHandleFillArrayDataFromCode(Array* array,
const Instruction::ArrayDataPayload* payload,
- Thread* self, Method** sp) {
+ Thread* self, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
DCHECK_EQ(payload->ident, static_cast<uint16_t>(Instruction::kArrayDataSignature));
if (UNLIKELY(array == NULL)) {
diff --git a/src/oat/runtime/support_invoke.cc b/src/oat/runtime/support_invoke.cc
index 4669688..9c7b3a2 100644
--- a/src/oat/runtime/support_invoke.cc
+++ b/src/oat/runtime/support_invoke.cc
@@ -20,7 +20,8 @@
namespace art {
static uint64_t artInvokeCommon(uint32_t method_idx, Object* this_object, Method* caller_method,
- Thread* self, Method** sp, bool access_check, InvokeType type) {
+ Thread* self, Method** sp, bool access_check, InvokeType type)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
Method* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type);
if (UNLIKELY(method == NULL)) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
@@ -54,14 +55,16 @@
// See comments in runtime_support_asm.S
extern "C" uint64_t artInvokeInterfaceTrampoline(uint32_t method_idx, Object* this_object,
Method* caller_method, Thread* self,
- Method** sp) {
+ Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
return artInvokeCommon(method_idx, this_object, caller_method, self, sp, false, kInterface);
}
extern "C" uint64_t artInvokeInterfaceTrampolineWithAccessCheck(uint32_t method_idx,
Object* this_object,
Method* caller_method, Thread* self,
- Method** sp) {
+ Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kInterface);
}
@@ -69,28 +72,32 @@
extern "C" uint64_t artInvokeDirectTrampolineWithAccessCheck(uint32_t method_idx,
Object* this_object,
Method* caller_method, Thread* self,
- Method** sp) {
+ Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kDirect);
}
extern "C" uint64_t artInvokeStaticTrampolineWithAccessCheck(uint32_t method_idx,
Object* this_object,
Method* caller_method, Thread* self,
- Method** sp) {
+ Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kStatic);
}
extern "C" uint64_t artInvokeSuperTrampolineWithAccessCheck(uint32_t method_idx,
Object* this_object,
Method* caller_method, Thread* self,
- Method** sp) {
+ Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kSuper);
}
extern "C" uint64_t artInvokeVirtualTrampolineWithAccessCheck(uint32_t method_idx,
Object* this_object,
Method* caller_method, Thread* self,
- Method** sp) {
+ Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kVirtual);
}
diff --git a/src/oat/runtime/support_jni.cc b/src/oat/runtime/support_jni.cc
index cfa1a11..bbff673 100644
--- a/src/oat/runtime/support_jni.cc
+++ b/src/oat/runtime/support_jni.cc
@@ -16,20 +16,23 @@
#include "object.h"
#include "object_utils.h"
+#include "scoped_thread_state_change.h"
#include "thread.h"
namespace art {
// Used by the JNI dlsym stub to find the native method to invoke if none is registered.
-extern void* FindNativeMethod(Thread* self) {
+extern void* FindNativeMethod(Thread* self) LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_) {
+ GlobalSynchronization::mutator_lock_->AssertNotHeld(); // We come here as Native.
DCHECK(Thread::Current() == self);
+ ScopedObjectAccess soa(self);
- Method* method = const_cast<Method*>(self->GetCurrentMethod());
+ Method* method = self->GetCurrentMethod();
DCHECK(method != NULL);
// Lookup symbol address for method, on failure we'll return NULL with an
// exception set, otherwise we return the address of the method we found.
- void* native_code = self->GetJniEnv()->vm->FindCodeForNativeMethod(method);
+ void* native_code = soa.Vm()->FindCodeForNativeMethod(method);
if (native_code == NULL) {
DCHECK(self->IsExceptionPending());
return NULL;
@@ -40,23 +43,61 @@
}
}
-// Return value helper for jobject return types, used for JNI return values.
-extern Object* DecodeJObjectInThread(Thread* self, jobject java_object) {
- if (self->IsExceptionPending()) {
- return NULL;
- }
- Object* o = self->DecodeJObject(java_object);
- if (o == NULL || !self->GetJniEnv()->check_jni) {
- return o;
- }
+// Called on entry to JNI, transition out of Runnable and release share of mutator_lock_.
+extern uint32_t JniMethodStart(Thread* self) UNLOCK_FUNCTION(GlobalSynchronizatio::mutator_lock_) {
+ JNIEnvExt* env = self->GetJniEnv();
+ uint32_t saved_local_ref_cookie = env->local_ref_cookie;
+ env->local_ref_cookie = env->locals.GetSegmentState();
+ self->TransitionFromRunnableToSuspended(kNative);
+ return saved_local_ref_cookie;
+}
+extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self)
+ UNLOCK_FUNCTION(GlobalSynchronization::mutator_lock_) {
+ self->DecodeJObject(to_lock)->MonitorEnter(self);
+ return JniMethodStart(self);
+}
+
+static void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self) {
+ JNIEnvExt* env = self->GetJniEnv();
+ env->locals.SetSegmentState(env->local_ref_cookie);
+ env->local_ref_cookie = saved_local_ref_cookie;
+ self->PopSirt();
+}
+
+static void UnlockJniSynchronizedMethod(jobject locked, Thread* self)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_)
+ UNLOCK_FUNCTION(monitor_lock_) {
+ // Save any pending exception over monitor exit call.
+ Throwable* saved_exception = NULL;
+ if (UNLIKELY(self->IsExceptionPending())) {
+ saved_exception = self->GetException();
+ self->ClearException();
+ }
+ // Decode locked object and unlock, before popping local references.
+ self->DecodeJObject(locked)->MonitorExit(self);
+ if (UNLIKELY(self->IsExceptionPending())) {
+ LOG(FATAL) << "Synchronized JNI code returning with an exception:\n"
+ << saved_exception->Dump()
+ << "\nEncountered second exception during implicit MonitorExit:\n"
+ << self->GetException()->Dump();
+ }
+ // Restore pending exception.
+ if (saved_exception != NULL) {
+ self->SetException(saved_exception);
+ }
+}
+
+static void CheckReferenceResult(Object* o, Thread* self)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
+ if (o == NULL) {
+ return;
+ }
if (o == kInvalidIndirectRefObject) {
JniAbortF(NULL, "invalid reference returned from %s",
PrettyMethod(self->GetCurrentMethod()).c_str());
}
-
- // Make sure that the result is an instance of the type this
- // method was expected to return.
+ // Make sure that the result is an instance of the type this method was expected to return.
Method* m = self->GetCurrentMethod();
MethodHelper mh(m);
Class* return_type = mh.GetReturnType();
@@ -65,7 +106,53 @@
JniAbortF(NULL, "attempt to return an instance of %s from %s",
PrettyTypeOf(o).c_str(), PrettyMethod(m).c_str());
}
+}
+extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self)
+ SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) {
+ self->TransitionFromSuspendedToRunnable();
+ PopLocalReferences(saved_local_ref_cookie, self);
+}
+
+
+extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject locked, Thread* self)
+ SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) {
+ self->TransitionFromSuspendedToRunnable();
+ UnlockJniSynchronizedMethod(locked, self); // Must decode before pop.
+ PopLocalReferences(saved_local_ref_cookie, self);
+}
+
+extern Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie,
+ Thread* self)
+ SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) {
+ self->TransitionFromSuspendedToRunnable();
+ Object* o = self->DecodeJObject(result); // Must decode before pop.
+ PopLocalReferences(saved_local_ref_cookie, self);
+ // Process result.
+ if (UNLIKELY(self->GetJniEnv()->check_jni)) {
+ if (self->IsExceptionPending()) {
+ return NULL;
+ }
+ CheckReferenceResult(o, self);
+ }
+ return o;
+}
+
+extern Object* JniMethodEndWithReferenceSynchronized(jobject result,
+ uint32_t saved_local_ref_cookie,
+ jobject locked, Thread* self)
+ SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_) {
+ self->TransitionFromSuspendedToRunnable();
+ UnlockJniSynchronizedMethod(locked, self); // Must decode before pop.
+ Object* o = self->DecodeJObject(result);
+ PopLocalReferences(saved_local_ref_cookie, self);
+ // Process result.
+ if (UNLIKELY(self->GetJniEnv()->check_jni)) {
+ if (self->IsExceptionPending()) {
+ return NULL;
+ }
+ CheckReferenceResult(o, self);
+ }
return o;
}
@@ -77,7 +164,8 @@
*arg_ptr = reinterpret_cast<intptr_t>(value_as_work_around_rep);
}
-extern "C" const void* artWorkAroundAppJniBugs(Thread* self, intptr_t* sp) {
+extern "C" const void* artWorkAroundAppJniBugs(Thread* self, intptr_t* sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_){
DCHECK(Thread::Current() == self);
// TODO: this code is specific to ARM
// On entry the stack pointed by sp is:
diff --git a/src/oat/runtime/support_locks.cc b/src/oat/runtime/support_locks.cc
index 30fc567..9d44e55 100644
--- a/src/oat/runtime/support_locks.cc
+++ b/src/oat/runtime/support_locks.cc
@@ -19,14 +19,16 @@
namespace art {
-extern "C" int artUnlockObjectFromCode(Object* obj, Thread* self, Method** sp) {
+extern "C" int artUnlockObjectFromCode(Object* obj, Thread* self, Method** sp)
+ UNLOCK_FUNCTION(monitor_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
DCHECK(obj != NULL); // Assumed to have been checked before entry
// MonitorExit may throw exception
return obj->MonitorExit(self) ? 0 /* Success */ : -1 /* Failure */;
}
-extern "C" void artLockObjectFromCode(Object* obj, Thread* thread, Method** sp) {
+extern "C" void artLockObjectFromCode(Object* obj, Thread* thread, Method** sp)
+ EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) {
FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsOnly);
DCHECK(obj != NULL); // Assumed to have been checked before entry
obj->MonitorEnter(thread); // May block
diff --git a/src/oat/runtime/support_proxy.cc b/src/oat/runtime/support_proxy.cc
index 83d2265..972779d 100644
--- a/src/oat/runtime/support_proxy.cc
+++ b/src/oat/runtime/support_proxy.cc
@@ -18,7 +18,7 @@
#include "object_utils.h"
#include "reflection.h"
#include "runtime_support.h"
-#include "scoped_jni_thread_state.h"
+#include "scoped_thread_state_change.h"
#include "thread.h"
#include "well_known_classes.h"
@@ -43,7 +43,8 @@
// reference arguments (so they survive GC) and create a boxed argument array. Finally we invoke
// the invocation handler which is a field within the proxy object receiver.
extern "C" void artProxyInvokeHandler(Method* proxy_method, Object* receiver,
- Thread* self, byte* stack_args) {
+ Thread* self, byte* stack_args)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
// Register the top of the managed stack
Method** proxy_sp = reinterpret_cast<Method**>(stack_args - SP_OFFSET_IN_BYTES);
DCHECK_EQ(*proxy_sp, proxy_method);
@@ -51,11 +52,11 @@
DCHECK_EQ(proxy_method->GetFrameSizeInBytes(), FRAME_SIZE_IN_BYTES);
// Start new JNI local reference state
JNIEnvExt* env = self->GetJniEnv();
- ScopedJniThreadState ts(env);
+ ScopedObjectAccessUnchecked soa(env);
ScopedJniEnvLocalRefState env_state(env);
// Create local ref. copies of proxy method and the receiver
- jobject rcvr_jobj = ts.AddLocalReference<jobject>(receiver);
- jobject proxy_method_jobj = ts.AddLocalReference<jobject>(proxy_method);
+ jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
+ jobject proxy_method_jobj = soa.AddLocalReference<jobject>(proxy_method);
// Placing into local references incoming arguments from the caller's register arguments,
// replacing original Object* with jobject
@@ -74,7 +75,7 @@
while (cur_arg < args_in_regs && param_index < num_params) {
if (proxy_mh.IsParamAReference(param_index)) {
Object* obj = *reinterpret_cast<Object**>(stack_args + (cur_arg * kPointerSize));
- jobject jobj = ts.AddLocalReference<jobject>(obj);
+ jobject jobj = soa.AddLocalReference<jobject>(obj);
*reinterpret_cast<jobject*>(stack_args + (cur_arg * kPointerSize)) = jobj;
}
cur_arg = cur_arg + (proxy_mh.IsParamALongOrDouble(param_index) ? 2 : 1);
@@ -85,7 +86,7 @@
while (param_index < num_params) {
if (proxy_mh.IsParamAReference(param_index)) {
Object* obj = *reinterpret_cast<Object**>(stack_args + (cur_arg * kPointerSize));
- jobject jobj = ts.AddLocalReference<jobject>(obj);
+ jobject jobj = soa.AddLocalReference<jobject>(obj);
*reinterpret_cast<jobject*>(stack_args + (cur_arg * kPointerSize)) = jobj;
}
cur_arg = cur_arg + (proxy_mh.IsParamALongOrDouble(param_index) ? 2 : 1);
@@ -104,13 +105,13 @@
CHECK(self->IsExceptionPending());
return;
}
- args_jobj[2].l = ts.AddLocalReference<jobjectArray>(args);
+ args_jobj[2].l = soa.AddLocalReference<jobjectArray>(args);
}
// Convert proxy method into expected interface method
Method* interface_method = proxy_method->FindOverriddenMethod();
DCHECK(interface_method != NULL);
DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
- args_jobj[1].l = ts.AddLocalReference<jobject>(interface_method);
+ args_jobj[1].l = soa.AddLocalReference<jobject>(interface_method);
// Box arguments
cur_arg = 0; // reset stack location to read to start
// reset index, will index into param type array which doesn't include the receiver
diff --git a/src/oat/runtime/support_stubs.cc b/src/oat/runtime/support_stubs.cc
index 3f6bc8f..013f885 100644
--- a/src/oat/runtime/support_stubs.cc
+++ b/src/oat/runtime/support_stubs.cc
@@ -23,7 +23,7 @@
#if defined(ART_USE_LLVM_COMPILER)
#include "nth_caller_visitor.h"
#endif
-#include "scoped_jni_thread_state.h"
+#include "scoped_thread_state_change.h"
// Architecture specific assembler helper to deliver exception.
extern "C" void art_deliver_exception_from_code(void*);
@@ -33,7 +33,8 @@
#if !defined(ART_USE_LLVM_COMPILER)
// Lazily resolve a method. Called by stub code.
const void* UnresolvedDirectMethodTrampolineFromCode(Method* called, Method** sp, Thread* thread,
- Runtime::TrampolineType type) {
+ Runtime::TrampolineType type)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
#if defined(__arm__)
// On entry the stack pointed by sp is:
// | argN | |
@@ -82,7 +83,7 @@
FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsAndArgs);
// Start new JNI local reference state
JNIEnvExt* env = thread->GetJniEnv();
- ScopedJniThreadState ts(env);
+ ScopedObjectAccessUnchecked soa(env);
ScopedJniEnvLocalRefState env_state(env);
// Compute details about the called method (avoid GCs)
@@ -147,7 +148,7 @@
// If we thought we had fewer than 3 arguments in registers, account for the receiver
args_in_regs++;
}
- ts.AddLocalReference<jobject>(obj);
+ soa.AddLocalReference<jobject>(obj);
}
size_t shorty_index = 1; // skip return value
// Iterate while arguments and arguments in registers (less 1 from cur_arg which is offset to skip
@@ -157,7 +158,7 @@
shorty_index++;
if (c == 'L') {
Object* obj = reinterpret_cast<Object*>(regs[cur_arg]);
- ts.AddLocalReference<jobject>(obj);
+ soa.AddLocalReference<jobject>(obj);
}
cur_arg = cur_arg + (c == 'J' || c == 'D' ? 2 : 1);
}
@@ -168,7 +169,7 @@
shorty_index++;
if (c == 'L') {
Object* obj = reinterpret_cast<Object*>(regs[cur_arg]);
- ts.AddLocalReference<jobject>(obj);
+ soa.AddLocalReference<jobject>(obj);
}
cur_arg = cur_arg + (c == 'J' || c == 'D' ? 2 : 1);
}
@@ -308,7 +309,8 @@
#if !defined(ART_USE_LLVM_COMPILER)
// Called by the AbstractMethodError. Called by stub code.
-extern void ThrowAbstractMethodErrorFromCode(Method* method, Thread* thread, Method** sp) {
+extern void ThrowAbstractMethodErrorFromCode(Method* method, Thread* thread, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll);
thread->ThrowNewExceptionF("Ljava/lang/AbstractMethodError;",
"abstract method \"%s\"", PrettyMethod(method).c_str());
diff --git a/src/oat/runtime/support_thread.cc b/src/oat/runtime/support_thread.cc
index 6cd595b..32284bb 100644
--- a/src/oat/runtime/support_thread.cc
+++ b/src/oat/runtime/support_thread.cc
@@ -20,15 +20,18 @@
namespace art {
-void CheckSuspendFromCode(Thread* thread) {
- // Called when thread->suspend_count_ != 0
- Runtime::Current()->GetThreadList()->FullSuspendCheck(thread);
+void CheckSuspendFromCode(Thread* thread)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
+ // Called when thread->suspend_count_ != 0 on JNI return. JNI method acts as callee-save frame.
+ thread->VerifyStack();
+ thread->FullSuspendCheck();
}
-extern "C" void artTestSuspendFromCode(Thread* thread, Method** sp) {
+extern "C" void artTestSuspendFromCode(Thread* thread, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
// Called when suspend count check value is 0 and thread->suspend_count_ != 0
FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsOnly);
- Runtime::Current()->GetThreadList()->FullSuspendCheck(thread);
+ thread->FullSuspendCheck();
}
} // namespace art
diff --git a/src/oat/runtime/support_throw.cc b/src/oat/runtime/support_throw.cc
index 31cf7d9..4fa2387 100644
--- a/src/oat/runtime/support_throw.cc
+++ b/src/oat/runtime/support_throw.cc
@@ -23,13 +23,15 @@
namespace art {
// Deliver an exception that's pending on thread helping set up a callee save frame on the way.
-extern "C" void artDeliverPendingExceptionFromCode(Thread* thread, Method** sp) {
+extern "C" void artDeliverPendingExceptionFromCode(Thread* thread, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll);
thread->DeliverException();
}
// Called by generated call to throw an exception.
-extern "C" void artDeliverExceptionFromCode(Throwable* exception, Thread* thread, Method** sp) {
+extern "C" void artDeliverExceptionFromCode(Throwable* exception, Thread* thread, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
/*
* exception may be NULL, in which case this routine should
* throw NPE. NOTE: this is a convenience for generated code,
@@ -47,7 +49,8 @@
}
// Called by generated call to throw a NPE exception.
-extern "C" void artThrowNullPointerExceptionFromCode(Thread* self, Method** sp) {
+extern "C" void artThrowNullPointerExceptionFromCode(Thread* self, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
uint32_t dex_pc;
Method* throw_method = self->GetCurrentMethod(&dex_pc);
@@ -56,21 +59,24 @@
}
// Called by generated call to throw an arithmetic divide by zero exception.
-extern "C" void artThrowDivZeroFromCode(Thread* thread, Method** sp) {
+extern "C" void artThrowDivZeroFromCode(Thread* thread, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll);
thread->ThrowNewException("Ljava/lang/ArithmeticException;", "divide by zero");
thread->DeliverException();
}
// Called by generated call to throw an array index out of bounds exception.
-extern "C" void artThrowArrayBoundsFromCode(int index, int limit, Thread* thread, Method** sp) {
+extern "C" void artThrowArrayBoundsFromCode(int index, int limit, Thread* thread, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll);
thread->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;",
"length=%d; index=%d", limit, index);
thread->DeliverException();
}
-extern "C" void artThrowStackOverflowFromCode(Thread* thread, Method** sp) {
+extern "C" void artThrowStackOverflowFromCode(Thread* thread, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll);
// Remove extra entry pushed onto second stack during method tracing.
if (Runtime::Current()->IsMethodTracingActive()) {
@@ -83,7 +89,8 @@
thread->DeliverException();
}
-extern "C" void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self, Method** sp) {
+extern "C" void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self, Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
Method* method = self->GetCurrentMethod();
self->ThrowNewException("Ljava/lang/NoSuchMethodError;",
@@ -91,7 +98,9 @@
self->DeliverException();
}
-extern "C" void artThrowVerificationErrorFromCode(int32_t kind, int32_t ref, Thread* self, Method** sp) {
+extern "C" void artThrowVerificationErrorFromCode(int32_t kind, int32_t ref, Thread* self,
+ Method** sp)
+ SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
Method* method = self->GetCurrentMethod();
ThrowVerificationError(self, method, kind, ref);
diff --git a/src/oat/runtime/x86/oat_support_entrypoints_x86.cc b/src/oat/runtime/x86/oat_support_entrypoints_x86.cc
index a28a898..e52569d 100644
--- a/src/oat/runtime/x86/oat_support_entrypoints_x86.cc
+++ b/src/oat/runtime/x86/oat_support_entrypoints_x86.cc
@@ -59,8 +59,17 @@
extern "C" void art_handle_fill_data_from_code(void*, void*);
// JNI entrypoints.
-extern Object* DecodeJObjectInThread(Thread* thread, jobject obj);
extern void* FindNativeMethod(Thread* thread);
+extern uint32_t JniMethodStart(Thread* self);
+extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self);
+extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self);
+extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject locked,
+ Thread* self);
+extern Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie,
+ Thread* self);
+extern Object* JniMethodEndWithReferenceSynchronized(jobject result,
+ uint32_t saved_local_ref_cookie,
+ jobject locked, Thread* self);
// Lock entrypoints.
extern "C" void art_lock_object_from_code(void*);
@@ -153,8 +162,13 @@
points->pHandleFillArrayDataFromCode = art_handle_fill_data_from_code;
// JNI
- points->pDecodeJObjectInThread = DecodeJObjectInThread;
points->pFindNativeMethod = FindNativeMethod;
+ points->pJniMethodStart = JniMethodStart;
+ points->pJniMethodStartSynchronized = JniMethodStartSynchronized;
+ points->pJniMethodEnd = JniMethodEnd;
+ points->pJniMethodEndSynchronized = JniMethodEndSynchronized;
+ points->pJniMethodEndWithReference = JniMethodEndWithReference;
+ points->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
// Locks
points->pLockObjectFromCode = art_lock_object_from_code;
diff --git a/src/oat/runtime/x86/stub_x86.cc b/src/oat/runtime/x86/stub_x86.cc
index a9db314..74e0f39 100644
--- a/src/oat/runtime/x86/stub_x86.cc
+++ b/src/oat/runtime/x86/stub_x86.cc
@@ -16,6 +16,7 @@
#include "jni_internal.h"
#include "oat/runtime/oat_support_entrypoints.h"
+#include "oat/runtime/stub.h"
#include "oat/utils/x86/assembler_x86.h"
#include "object.h"
#include "stack_indirect_reference_table.h"
diff --git a/src/oat/utils/arm/assembler_arm.cc b/src/oat/utils/arm/assembler_arm.cc
index 55b6187..de665dd 100644
--- a/src/oat/utils/arm/assembler_arm.cc
+++ b/src/oat/utils/arm/assembler_arm.cc
@@ -1440,10 +1440,9 @@
const std::vector<ManagedRegister>& callee_save_regs,
const std::vector<ManagedRegister>& entry_spills) {
CHECK_ALIGNED(frame_size, kStackAlignment);
- DCHECK_EQ(entry_spills.size(), 0u);
CHECK_EQ(R0, method_reg.AsArm().AsCoreRegister());
- // Push callee saves and link register
+ // Push callee saves and link register.
RegList push_list = 1 << LR;
size_t pushed_values = 1;
for (size_t i = 0; i < callee_save_regs.size(); i++) {
@@ -1453,13 +1452,19 @@
}
PushList(push_list);
- // Increase frame to required size
+ // Increase frame to required size.
CHECK_GT(frame_size, pushed_values * kPointerSize); // Must be at least space to push Method*
size_t adjust = frame_size - (pushed_values * kPointerSize);
IncreaseFrameSize(adjust);
- // Write out Method*
+ // Write out Method*.
StoreToOffset(kStoreWord, R0, SP, 0);
+
+ // Write out entry spills.
+ for (size_t i = 0; i < entry_spills.size(); ++i) {
+ Register reg = entry_spills.at(i).AsArm().AsCoreRegister();
+ StoreToOffset(kStoreWord, reg, SP, frame_size + kPointerSize + (i * kPointerSize));
+ }
}
void ArmAssembler::RemoveFrame(size_t frame_size,
@@ -1891,9 +1896,9 @@
#undef __
}
-void ArmAssembler::ExceptionPoll(ManagedRegister mscratch) {
+void ArmAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
ArmManagedRegister scratch = mscratch.AsArm();
- ArmExceptionSlowPath* slow = new ArmExceptionSlowPath(scratch);
+ ArmExceptionSlowPath* slow = new ArmExceptionSlowPath(scratch, stack_adjust);
buffer_.EnqueueSlowPath(slow);
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
TR, Thread::ExceptionOffset().Int32Value());
@@ -1905,7 +1910,9 @@
ArmAssembler* sp_asm = down_cast<ArmAssembler*>(sasm);
#define __ sp_asm->
__ Bind(&entry_);
-
+ if (stack_adjust_ != 0) { // Fix up the frame.
+ __ DecreaseFrameSize(stack_adjust_);
+ }
// Pass exception object as argument
// Don't care about preserving R0 as this call won't return
__ mov(R0, ShifterOperand(scratch_.AsCoreRegister()));
diff --git a/src/oat/utils/arm/assembler_arm.h b/src/oat/utils/arm/assembler_arm.h
index edfaf30..2410bac 100644
--- a/src/oat/utils/arm/assembler_arm.h
+++ b/src/oat/utils/arm/assembler_arm.h
@@ -564,7 +564,7 @@
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
- virtual void ExceptionPoll(ManagedRegister scratch);
+ virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust);
private:
void EmitType01(Condition cond,
@@ -650,10 +650,13 @@
// Slowpath entered when Thread::Current()->_exception is non-null
class ArmExceptionSlowPath : public SlowPath {
public:
- explicit ArmExceptionSlowPath(ArmManagedRegister scratch) : scratch_(scratch) {}
+ explicit ArmExceptionSlowPath(ArmManagedRegister scratch, size_t stack_adjust)
+ : scratch_(scratch), stack_adjust_(stack_adjust) {
+ }
virtual void Emit(Assembler *sp_asm);
private:
const ArmManagedRegister scratch_;
+ const size_t stack_adjust_;
};
// Slowpath entered when Thread::Current()->_suspend_count is non-zero
diff --git a/src/oat/utils/assembler.h b/src/oat/utils/assembler.h
index dabd321..68108e7 100644
--- a/src/oat/utils/assembler.h
+++ b/src/oat/utils/assembler.h
@@ -446,7 +446,7 @@
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
- virtual void ExceptionPoll(ManagedRegister scratch) = 0;
+ virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) = 0;
virtual ~Assembler() {}
diff --git a/src/oat/utils/x86/assembler_x86.cc b/src/oat/utils/x86/assembler_x86.cc
index b7f0c1f..78f2b57 100644
--- a/src/oat/utils/x86/assembler_x86.cc
+++ b/src/oat/utils/x86/assembler_x86.cc
@@ -1862,8 +1862,8 @@
#undef __
}
-void X86Assembler::ExceptionPoll(ManagedRegister /*scratch*/) {
- X86ExceptionSlowPath* slow = new X86ExceptionSlowPath();
+void X86Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
+ X86ExceptionSlowPath* slow = new X86ExceptionSlowPath(stack_adjust);
buffer_.EnqueueSlowPath(slow);
fs()->cmpl(Address::Absolute(Thread::ExceptionOffset()), Immediate(0));
j(kNotEqual, slow->Entry());
@@ -1874,6 +1874,9 @@
#define __ sp_asm->
__ Bind(&entry_);
// Note: the return value is dead
+ if (stack_adjust_ != 0) { // Fix up the frame.
+ __ DecreaseFrameSize(stack_adjust_);
+ }
// Pass exception as argument in EAX
__ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset()));
__ fs()->call(Address::Absolute(ENTRYPOINT_OFFSET(pDeliverException)));
diff --git a/src/oat/utils/x86/assembler_x86.h b/src/oat/utils/x86/assembler_x86.h
index c8edf44..7291211 100644
--- a/src/oat/utils/x86/assembler_x86.h
+++ b/src/oat/utils/x86/assembler_x86.h
@@ -598,7 +598,7 @@
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
- virtual void ExceptionPoll(ManagedRegister scratch);
+ virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust);
private:
inline void EmitUint8(uint8_t value);
@@ -650,8 +650,10 @@
// Slowpath entered when Thread::Current()->_exception is non-null
class X86ExceptionSlowPath : public SlowPath {
public:
- X86ExceptionSlowPath() {}
+ X86ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
virtual void Emit(Assembler *sp_asm);
+ private:
+ const size_t stack_adjust_;
};
// Slowpath entered when Thread::Current()->_suspend_count is non-zero