Revert "Revert "Add implicit null and stack checks for x86""
Fixes x86_64 cross compile issue. Removes command line options
and property to set implicit checks - this is hard coded now.
This reverts commit 3d14eb620716e92c21c4d2c2d11a95be53319791.
Change-Id: I5404473b5aaf1a9c68b7181f5952cb174d93a90d
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 7f5cf0c..3774b32 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -412,6 +412,7 @@
LOCAL_STATIC_LIBRARIES := libziparchive libz
else # host
LOCAL_STATIC_LIBRARIES += libcutils libziparchive-host libz libutils
+ LOCAL_SHARED_LIBRARIES += libsigchain
LOCAL_LDLIBS += -ldl -lpthread
ifeq ($$(HOST_OS),linux)
LOCAL_LDLIBS += -lrt
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
index 2a82129..e22c56e 100644
--- a/runtime/arch/arm/fault_handler_arm.cc
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -46,9 +46,10 @@
return instr_size;
}
-void FaultManager::GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method,
+void FaultManager::GetMethodAndReturnPCAndSP(siginfo_t* siginfo, void* context,
+ mirror::ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
- struct ucontext *uc = (struct ucontext *)context;
+ struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
*out_sp = static_cast<uintptr_t>(sc->arm_sp);
VLOG(signals) << "sp: " << *out_sp;
@@ -114,7 +115,7 @@
uint32_t checkinst1 = 0xf8d90000 + Thread::ThreadSuspendTriggerOffset<4>().Int32Value();
uint16_t checkinst2 = 0x6800;
- struct ucontext *uc = (struct ucontext *)context;
+ struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
uint8_t* ptr2 = reinterpret_cast<uint8_t*>(sc->arm_pc);
uint8_t* ptr1 = ptr2 - 4;
@@ -178,7 +179,7 @@
// to the overflow region below the protected region.
bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
- struct ucontext *uc = (struct ucontext *)context;
+ struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
VLOG(signals) << "stack overflow handler with sp at " << std::hex << &uc;
VLOG(signals) << "sigcontext: " << std::hex << sc;
@@ -205,7 +206,7 @@
}
// We know this is a stack overflow. We need to move the sp to the overflow region
- // the exists below the protected region. Determine the address of the next
+ // that exists below the protected region. Determine the address of the next
// available valid address below the protected region.
uintptr_t prevsp = sp;
sp = pregion;
diff --git a/runtime/arch/arm64/fault_handler_arm64.cc b/runtime/arch/arm64/fault_handler_arm64.cc
index 74c3023..34eede6 100644
--- a/runtime/arch/arm64/fault_handler_arm64.cc
+++ b/runtime/arch/arm64/fault_handler_arm64.cc
@@ -29,7 +29,8 @@
namespace art {
-void FaultManager::GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method,
+void FaultManager::GetMethodAndReturnPCAndSP(siginfo_t* siginfo, void* context,
+ mirror::ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
}
diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc
index 1ecd7d9..5a64a69 100644
--- a/runtime/arch/mips/fault_handler_mips.cc
+++ b/runtime/arch/mips/fault_handler_mips.cc
@@ -29,7 +29,8 @@
namespace art {
-void FaultManager::GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method,
+void FaultManager::GetMethodAndReturnPCAndSP(siginfo_t* siginfo, void* context,
+ mirror::ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
}
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index 7c1980e..f62200a 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -21,6 +21,10 @@
#include "globals.h"
#include "base/logging.h"
#include "base/hex_dump.h"
+#include "mirror/art_method.h"
+#include "mirror/art_method-inl.h"
+#include "thread.h"
+#include "thread-inl.h"
//
@@ -29,19 +33,294 @@
namespace art {
-void FaultManager::GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method,
+extern "C" void art_quick_throw_null_pointer_exception();
+extern "C" void art_quick_throw_stack_overflow_from_signal();
+extern "C" void art_quick_test_suspend();
+
+// From the x86 disassembler...
+enum SegmentPrefix {
+ kCs = 0x2e,
+ kSs = 0x36,
+ kDs = 0x3e,
+ kEs = 0x26,
+ kFs = 0x64,
+ kGs = 0x65,
+};
+
+// Get the size of an instruction in bytes.
+static uint32_t GetInstructionSize(uint8_t* pc) {
+ uint8_t* instruction_start = pc;
+ bool have_prefixes = true;
+ bool two_byte = false;
+
+ // Skip all the prefixes.
+ do {
+ switch (*pc) {
+ // Group 1 - lock and repeat prefixes:
+ case 0xF0:
+ case 0xF2:
+ case 0xF3:
+ // Group 2 - segment override prefixes:
+ case kCs:
+ case kSs:
+ case kDs:
+ case kEs:
+ case kFs:
+ case kGs:
+ // Group 3 - operand size override:
+ case 0x66:
+ // Group 4 - address size override:
+ case 0x67:
+ break;
+ default:
+ have_prefixes = false;
+ break;
+ }
+ if (have_prefixes) {
+ pc++;
+ }
+ } while (have_prefixes);
+
+#if defined(__x86_64__)
+ // Skip REX is present.
+ if (*pc >= 0x40 && *pc <= 0x4F) {
+ ++pc;
+ }
+#endif
+
+ // Check for known instructions.
+ uint32_t known_length = 0;
+ switch (*pc) {
+ case 0x83: // cmp [r + v], b: 4 byte instruction
+ known_length = 4;
+ break;
+ }
+
+ if (known_length > 0) {
+ VLOG(signals) << "known instruction with length " << known_length;
+ return known_length;
+ }
+
+ // Unknown instruction, work out length.
+
+ // Work out if we have a ModR/M byte.
+ uint8_t opcode = *pc++;
+ if (opcode == 0xf) {
+ two_byte = true;
+ opcode = *pc++;
+ }
+
+ bool has_modrm = false; // Is ModR/M byte present?
+ uint8_t hi = opcode >> 4; // Opcode high nybble.
+ uint8_t lo = opcode & 0b1111; // Opcode low nybble.
+
+ // From the Intel opcode tables.
+ if (two_byte) {
+ has_modrm = true; // TODO: all of these?
+ } else if (hi < 4) {
+ has_modrm = lo < 4 || (lo >= 8 && lo <= 0xb);
+ } else if (hi == 6) {
+ has_modrm = lo == 3 || lo == 9 || lo == 0xb;
+ } else if (hi == 8) {
+ has_modrm = lo != 0xd;
+ } else if (hi == 0xc) {
+ has_modrm = lo == 1 || lo == 2 || lo == 6 || lo == 7;
+ } else if (hi == 0xd) {
+ has_modrm = lo < 4;
+ } else if (hi == 0xf) {
+ has_modrm = lo == 6 || lo == 7;
+ }
+
+ if (has_modrm) {
+ uint8_t modrm = *pc++;
+ uint8_t mod = (modrm >> 6) & 0b11;
+ uint8_t reg = (modrm >> 3) & 0b111;
+ switch (mod) {
+ case 0:
+ break;
+ case 1:
+ if (reg == 4) {
+ // SIB + 1 byte displacement.
+ pc += 2;
+ } else {
+ pc += 1;
+ }
+ break;
+ case 2:
+ // SIB + 4 byte displacement.
+ pc += 5;
+ break;
+ case 3:
+ break;
+ }
+ }
+
+ VLOG(signals) << "calculated X86 instruction size is " << (pc - instruction_start);
+ return pc - instruction_start;
+}
+
+void FaultManager::GetMethodAndReturnPCAndSP(siginfo_t* siginfo, void* context,
+ mirror::ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
+ struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
+ *out_sp = static_cast<uintptr_t>(uc->uc_mcontext.gregs[REG_ESP]);
+ VLOG(signals) << "sp: " << std::hex << *out_sp;
+ if (*out_sp == 0) {
+ return;
+ }
+
+ // In the case of a stack overflow, the stack is not valid and we can't
+ // get the method from the top of the stack. However it's in EAX.
+ uintptr_t* fault_addr = reinterpret_cast<uintptr_t*>(siginfo->si_addr);
+ uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
+ reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kX86));
+ if (overflow_addr == fault_addr) {
+ *out_method = reinterpret_cast<mirror::ArtMethod*>(uc->uc_mcontext.gregs[REG_EAX]);
+ } else {
+ // The method is at the top of the stack.
+ *out_method = reinterpret_cast<mirror::ArtMethod*>(reinterpret_cast<uintptr_t*>(*out_sp)[0]);
+ }
+
+ uint8_t* pc = reinterpret_cast<uint8_t*>(uc->uc_mcontext.gregs[REG_EIP]);
+ VLOG(signals) << HexDump(pc, 32, true, "PC ");
+
+ uint32_t instr_size = GetInstructionSize(pc);
+ *out_return_pc = reinterpret_cast<uintptr_t>(pc + instr_size);
}
bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
+ struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
+ uint8_t* pc = reinterpret_cast<uint8_t*>(uc->uc_mcontext.gregs[REG_EIP]);
+ uint8_t* sp = reinterpret_cast<uint8_t*>(uc->uc_mcontext.gregs[REG_ESP]);
+
+ uint32_t instr_size = GetInstructionSize(pc);
+ // We need to arrange for the signal handler to return to the null pointer
+ // exception generator. The return address must be the address of the
+ // next instruction (this instruction + instruction size). The return address
+ // is on the stack at the top address of the current frame.
+
+ // Push the return address onto the stack.
+ uint32_t retaddr = reinterpret_cast<uint32_t>(pc + instr_size);
+ uint32_t* next_sp = reinterpret_cast<uint32_t*>(sp - 4);
+ *next_sp = retaddr;
+ uc->uc_mcontext.gregs[REG_ESP] = reinterpret_cast<uint32_t>(next_sp);
+
+ uc->uc_mcontext.gregs[REG_EIP] =
+ reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception);
+ VLOG(signals) << "Generating null pointer exception";
+ return true;
+}
+
+// A suspend check is done using the following instruction sequence:
+// 0xf720f1df: 648B058C000000 mov eax, fs:[0x8c] ; suspend_trigger
+// .. some intervening instructions.
+// 0xf720f1e6: 8500 test eax, [eax]
+
+// The offset from fs is Thread::ThreadSuspendTriggerOffset().
+// To check for a suspend check, we examine the instructions that caused
+// the fault.
+bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
+ // These are the instructions to check for. The first one is the mov eax, fs:[xxx]
+ // where xxx is the offset of the suspend trigger.
+ uint32_t trigger = Thread::ThreadSuspendTriggerOffset<4>().Int32Value();
+
+ VLOG(signals) << "Checking for suspension point";
+ uint8_t checkinst1[] = {0x64, 0x8b, 0x05, static_cast<uint8_t>(trigger & 0xff),
+ static_cast<uint8_t>((trigger >> 8) & 0xff), 0, 0};
+ uint8_t checkinst2[] = {0x85, 0x00};
+
+ struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
+ uint8_t* pc = reinterpret_cast<uint8_t*>(uc->uc_mcontext.gregs[REG_EIP]);
+ uint8_t* sp = reinterpret_cast<uint8_t*>(uc->uc_mcontext.gregs[REG_ESP]);
+
+ if (pc[0] != checkinst2[0] || pc[1] != checkinst2[1]) {
+ // Second instruction is not correct (test eax,[eax]).
+ VLOG(signals) << "Not a suspension point";
+ return false;
+ }
+
+ // The first instruction can a little bit up the stream due to load hoisting
+ // in the compiler.
+ uint8_t* limit = pc - 100; // Compiler will hoist to a max of 20 instructions.
+ uint8_t* ptr = pc - sizeof(checkinst1);
+ bool found = false;
+ while (ptr > limit) {
+ if (memcmp(ptr, checkinst1, sizeof(checkinst1)) == 0) {
+ found = true;
+ break;
+ }
+ ptr -= 1;
+ }
+
+ if (found) {
+ VLOG(signals) << "suspend check match";
+
+ // We need to arrange for the signal handler to return to the null pointer
+ // exception generator. The return address must be the address of the
+ // next instruction (this instruction + 2). The return address
+ // is on the stack at the top address of the current frame.
+
+ // Push the return address onto the stack.
+ uint32_t retaddr = reinterpret_cast<uint32_t>(pc + 2);
+ uint32_t* next_sp = reinterpret_cast<uint32_t*>(sp - 4);
+ *next_sp = retaddr;
+ uc->uc_mcontext.gregs[REG_ESP] = reinterpret_cast<uint32_t>(next_sp);
+
+ uc->uc_mcontext.gregs[REG_EIP] = reinterpret_cast<uintptr_t>(art_quick_test_suspend);
+
+ // Now remove the suspend trigger that caused this fault.
+ Thread::Current()->RemoveSuspendTrigger();
+ VLOG(signals) << "removed suspend trigger invoking test suspend";
+ return true;
+ }
+ VLOG(signals) << "Not a suspend check match, first instruction mismatch";
return false;
}
-bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
- return false;
-}
+// The stack overflow check is done using the following instruction:
+// test eax, [esp+ -xxx]
+// where 'xxx' is the size of the overflow area.
+//
+// This is done before any frame is established in the method. The return
+// address for the previous method is on the stack at ESP.
bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
- return false;
+ struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
+ uintptr_t sp = static_cast<uintptr_t>(uc->uc_mcontext.gregs[REG_ESP]);
+
+ uintptr_t fault_addr = reinterpret_cast<uintptr_t>(info->si_addr);
+ VLOG(signals) << "fault_addr: " << std::hex << fault_addr;
+ VLOG(signals) << "checking for stack overflow, sp: " << std::hex << sp <<
+ ", fault_addr: " << fault_addr;
+
+ uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kX86);
+
+ Thread* self = Thread::Current();
+ uintptr_t pregion = reinterpret_cast<uintptr_t>(self->GetStackEnd()) -
+ Thread::kStackOverflowProtectedSize;
+
+ // Check that the fault address is the value expected for a stack overflow.
+ if (fault_addr != overflow_addr) {
+ VLOG(signals) << "Not a stack overflow";
+ return false;
+ }
+
+ // We know this is a stack overflow. We need to move the sp to the overflow region
+ // that exists below the protected region. Determine the address of the next
+ // available valid address below the protected region.
+ VLOG(signals) << "setting sp to overflow region at " << std::hex << pregion;
+
+ // Since the compiler puts the implicit overflow
+ // check before the callee save instructions, the SP is already pointing to
+ // the previous frame.
+
+ // Tell the stack overflow code where the new stack pointer should be.
+ uc->uc_mcontext.gregs[REG_EAX] = pregion;
+
+ // Now arrange for the signal handler to return to art_quick_throw_stack_overflow_from_signal.
+ uc->uc_mcontext.gregs[REG_EIP] = reinterpret_cast<uintptr_t>(
+ art_quick_throw_stack_overflow_from_signal);
+
+ return true;
}
} // namespace art
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 24b9e46..68f46ad 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -173,6 +173,21 @@
*/
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode
+// On entry to this function, EAX contains the ESP value for the overflow region.
+DEFINE_FUNCTION art_quick_throw_stack_overflow_from_signal
+ // Here, the ESP is above the protected region. We need to create a
+ // callee save frame and then move ESP down to the overflow region.
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ mov %esp, %ecx // get current stack pointer
+ mov %eax, %esp // move ESP to the overflow region.
+ PUSH ecx // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ CFI_ADJUST_CFA_OFFSET(4)
+ SETUP_GOT_NOSAVE // clobbers ebx (harmless here)
+ call PLT_SYMBOL(artThrowStackOverflowFromCode) // artThrowStackOverflowFromCode(Thread*, SP)
+ int3 // unreached
+END_FUNCTION art_quick_throw_stack_overflow_from_signal
+
/*
* Called by managed code, saves callee saves and then calls artThrowException
* that will place a mock Method* at the bottom of the stack. Arg1 holds the exception.
diff --git a/runtime/arch/x86_64/fault_handler_x86_64.cc b/runtime/arch/x86_64/fault_handler_x86_64.cc
index 233d3c7..88ae7f3 100644
--- a/runtime/arch/x86_64/fault_handler_x86_64.cc
+++ b/runtime/arch/x86_64/fault_handler_x86_64.cc
@@ -29,7 +29,8 @@
namespace art {
-void FaultManager::GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method,
+void FaultManager::GetMethodAndReturnPCAndSP(siginfo_t* siginfo, void* context,
+ mirror::ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
}
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 3112bc0..f99ce07 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -29,9 +29,7 @@
#include "mirror/object-inl.h"
#include "object_utils.h"
#include "scoped_thread_state_change.h"
-#ifdef HAVE_ANDROID_OS
#include "sigchain.h"
-#endif
#include "verify_object-inl.h"
namespace art {
@@ -47,6 +45,7 @@
// Signal handler called on SIGSEGV.
static void art_fault_handler(int sig, siginfo_t* info, void* context) {
+ // std::cout << "handling fault in ART handler\n";
fault_manager.HandleFault(sig, info, context);
}
@@ -55,9 +54,7 @@
}
FaultManager::~FaultManager() {
-#ifdef HAVE_ANDROID_OS
UnclaimSignalChain(SIGSEGV);
-#endif
sigaction(SIGSEGV, &oldaction_, nullptr); // Restore old handler.
}
@@ -72,11 +69,12 @@
#endif
// Set our signal handler now.
- sigaction(SIGSEGV, &action, &oldaction_);
-#ifdef HAVE_ANDROID_OS
+ int e = sigaction(SIGSEGV, &action, &oldaction_);
+ if (e != 0) {
+ VLOG(signals) << "Failed to claim SEGV: " << strerror(errno);
+ }
// Make sure our signal handler is called before any user handlers.
ClaimSignalChain(SIGSEGV, &oldaction_);
-#endif
}
void FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
@@ -84,8 +82,12 @@
//
// If malloc calls abort, it will be holding its lock.
// If the handler tries to call malloc, it will deadlock.
+
+ // Also, there is only an 8K stack available here to logging can cause memory
+ // overwrite issues if you are unlucky. If you want to enable logging and
+ // are getting crashes, allocate more space for the alternate signal stack.
VLOG(signals) << "Handling fault";
- if (IsInGeneratedCode(context, true)) {
+ if (IsInGeneratedCode(info, context, true)) {
VLOG(signals) << "in generated code, looking for handler";
for (const auto& handler : generated_code_handlers_) {
VLOG(signals) << "invoking Action on handler " << handler;
@@ -101,11 +103,8 @@
}
art_sigsegv_fault();
-#ifdef HAVE_ANDROID_OS
+ // Pass this on to the next handler in the chain, or the default if none.
InvokeUserSignalHandler(sig, info, context);
-#else
- oldaction_.sa_sigaction(sig, info, context);
-#endif
}
void FaultManager::AddHandler(FaultHandler* handler, bool generated_code) {
@@ -132,7 +131,7 @@
// This function is called within the signal handler. It checks that
// the mutator_lock is held (shared). No annotalysis is done.
-bool FaultManager::IsInGeneratedCode(void* context, bool check_dex_pc) {
+bool FaultManager::IsInGeneratedCode(siginfo_t* siginfo, void* context, bool check_dex_pc) {
// We can only be running Java code in the current thread if it
// is in Runnable state.
VLOG(signals) << "Checking for generated code";
@@ -161,7 +160,7 @@
// Get the architecture specific method address and return address. These
// are in architecture specific files in arch/<arch>/fault_handler_<arch>.
- GetMethodAndReturnPCAndSP(context, &method_obj, &return_pc, &sp);
+ GetMethodAndReturnPCAndSP(siginfo, context, &method_obj, &return_pc, &sp);
// If we don't have a potential method, we're outta here.
VLOG(signals) << "potential method: " << method_obj;
@@ -242,12 +241,12 @@
bool JavaStackTraceHandler::Action(int sig, siginfo_t* siginfo, void* context) {
// Make sure that we are in the generated code, but we may not have a dex pc.
- if (manager_->IsInGeneratedCode(context, false)) {
+ if (manager_->IsInGeneratedCode(siginfo, context, false)) {
LOG(ERROR) << "Dumping java stack trace for crash in generated code";
mirror::ArtMethod* method = nullptr;
uintptr_t return_pc = 0;
uintptr_t sp = 0;
- manager_->GetMethodAndReturnPCAndSP(context, &method, &return_pc, &sp);
+ manager_->GetMethodAndReturnPCAndSP(siginfo, context, &method, &return_pc, &sp);
Thread* self = Thread::Current();
// Inside of generated code, sp[0] is the method, so sp is the frame.
StackReference<mirror::ArtMethod>* frame =
diff --git a/runtime/fault_handler.h b/runtime/fault_handler.h
index 026f5b9..71c9977 100644
--- a/runtime/fault_handler.h
+++ b/runtime/fault_handler.h
@@ -43,9 +43,10 @@
void HandleFault(int sig, siginfo_t* info, void* context);
void AddHandler(FaultHandler* handler, bool generated_code);
void RemoveHandler(FaultHandler* handler);
- void GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method,
+ void GetMethodAndReturnPCAndSP(siginfo_t* siginfo, void* context, mirror::ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp);
- bool IsInGeneratedCode(void *context, bool check_dex_pc) NO_THREAD_SAFETY_ANALYSIS;
+ bool IsInGeneratedCode(siginfo_t* siginfo, void *context, bool check_dex_pc)
+ NO_THREAD_SAFETY_ANALYSIS;
private:
std::vector<FaultHandler*> generated_code_handlers_;
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index e1e133f..a5ade3e 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -258,38 +258,6 @@
verify_ = true;
image_isa_ = kRuntimeISA;
- // Default to explicit checks. Switch off with -implicit-checks:.
- // or setprop dalvik.vm.implicit_checks check1,check2,...
-#ifdef HAVE_ANDROID_OS
- {
- char buf[PROP_VALUE_MAX];
- property_get("dalvik.vm.implicit_checks", buf, "null,stack");
- std::string checks(buf);
- std::vector<std::string> checkvec;
- Split(checks, ',', checkvec);
- explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
- kExplicitStackOverflowCheck;
- for (auto& str : checkvec) {
- std::string val = Trim(str);
- if (val == "none") {
- explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
- kExplicitStackOverflowCheck;
- } else if (val == "null") {
- explicit_checks_ &= ~kExplicitNullCheck;
- } else if (val == "suspend") {
- explicit_checks_ &= ~kExplicitSuspendCheck;
- } else if (val == "stack") {
- explicit_checks_ &= ~kExplicitStackOverflowCheck;
- } else if (val == "all") {
- explicit_checks_ = 0;
- }
- }
- }
-#else
- explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
- kExplicitStackOverflowCheck;
-#endif
-
for (size_t i = 0; i < options.size(); ++i) {
if (true && options[0].first == "-Xzygote") {
LOG(INFO) << "option[" << i << "]=" << options[i].first;
@@ -305,6 +273,7 @@
Exit(0);
} else if (StartsWith(option, "-Xbootclasspath:")) {
boot_class_path_string_ = option.substr(strlen("-Xbootclasspath:")).data();
+ LOG(INFO) << "setting boot class path to " << boot_class_path_string_;
} else if (option == "-classpath" || option == "-cp") {
// TODO: support -Djava.class.path
i++;
@@ -573,54 +542,6 @@
if (!ParseUnsignedInteger(option, ':', &profiler_options_.max_stack_depth_)) {
return false;
}
- } else if (StartsWith(option, "-implicit-checks:")) {
- std::string checks;
- if (!ParseStringAfterChar(option, ':', &checks)) {
- return false;
- }
- std::vector<std::string> checkvec;
- Split(checks, ',', checkvec);
- for (auto& str : checkvec) {
- std::string val = Trim(str);
- if (val == "none") {
- explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
- kExplicitStackOverflowCheck;
- } else if (val == "null") {
- explicit_checks_ &= ~kExplicitNullCheck;
- } else if (val == "suspend") {
- explicit_checks_ &= ~kExplicitSuspendCheck;
- } else if (val == "stack") {
- explicit_checks_ &= ~kExplicitStackOverflowCheck;
- } else if (val == "all") {
- explicit_checks_ = 0;
- } else {
- return false;
- }
- }
- } else if (StartsWith(option, "-explicit-checks:")) {
- std::string checks;
- if (!ParseStringAfterChar(option, ':', &checks)) {
- return false;
- }
- std::vector<std::string> checkvec;
- Split(checks, ',', checkvec);
- for (auto& str : checkvec) {
- std::string val = Trim(str);
- if (val == "none") {
- explicit_checks_ = 0;
- } else if (val == "null") {
- explicit_checks_ |= kExplicitNullCheck;
- } else if (val == "suspend") {
- explicit_checks_ |= kExplicitSuspendCheck;
- } else if (val == "stack") {
- explicit_checks_ |= kExplicitStackOverflowCheck;
- } else if (val == "all") {
- explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
- kExplicitStackOverflowCheck;
- } else {
- return false;
- }
- }
} else if (StartsWith(option, "-Xcompiler:")) {
if (!ParseStringAfterChar(option, ':', &compiler_executable_)) {
return false;
diff --git a/runtime/parsed_options.h b/runtime/parsed_options.h
index d0f3c12..fe719b1 100644
--- a/runtime/parsed_options.h
+++ b/runtime/parsed_options.h
@@ -84,11 +84,6 @@
bool verify_;
InstructionSet image_isa_;
- static constexpr uint32_t kExplicitNullCheck = 1;
- static constexpr uint32_t kExplicitSuspendCheck = 2;
- static constexpr uint32_t kExplicitStackOverflowCheck = 4;
- uint32_t explicit_checks_;
-
private:
ParsedOptions() {}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index efa205e..f165ffa 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -140,7 +140,10 @@
suspend_handler_(nullptr),
stack_overflow_handler_(nullptr),
verify_(false),
- target_sdk_version_(0) {
+ target_sdk_version_(0),
+ implicit_null_checks_(false),
+ implicit_so_checks_(false),
+ implicit_suspend_checks_(false) {
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
callee_save_methods_[i] = nullptr;
}
@@ -580,41 +583,6 @@
GetInstrumentation()->ForceInterpretOnly();
}
- bool implicit_checks_supported = false;
- switch (kRuntimeISA) {
- case kArm:
- case kThumb2:
- implicit_checks_supported = true;
- break;
- default:
- break;
- }
-
- if (!options->interpreter_only_ && implicit_checks_supported &&
- (options->explicit_checks_ != (ParsedOptions::kExplicitSuspendCheck |
- ParsedOptions::kExplicitNullCheck |
- ParsedOptions::kExplicitStackOverflowCheck) || kEnableJavaStackTraceHandler)) {
- fault_manager.Init();
-
- // These need to be in a specific order. The null point check handler must be
- // after the suspend check and stack overflow check handlers.
- if ((options->explicit_checks_ & ParsedOptions::kExplicitSuspendCheck) == 0) {
- suspend_handler_ = new SuspensionHandler(&fault_manager);
- }
-
- if ((options->explicit_checks_ & ParsedOptions::kExplicitStackOverflowCheck) == 0) {
- stack_overflow_handler_ = new StackOverflowHandler(&fault_manager);
- }
-
- if ((options->explicit_checks_ & ParsedOptions::kExplicitNullCheck) == 0) {
- null_pointer_handler_ = new NullPointerHandler(&fault_manager);
- }
-
- if (kEnableJavaStackTraceHandler) {
- new JavaStackTraceHandler(&fault_manager);
- }
- }
-
heap_ = new gc::Heap(options->heap_initial_size_,
options->heap_growth_limit_,
options->heap_min_free_,
@@ -645,6 +613,42 @@
BlockSignals();
InitPlatformSignalHandlers();
+ // Change the implicit checks flags based on runtime architecture.
+ switch (kRuntimeISA) {
+ case kArm:
+ case kThumb2:
+ case kX86:
+ implicit_null_checks_ = true;
+ implicit_so_checks_ = true;
+ break;
+ default:
+ // Keep the defaults.
+ break;
+ }
+
+ if (!options->interpreter_only_ &&
+ (implicit_null_checks_ || implicit_so_checks_ || implicit_suspend_checks_)) {
+ fault_manager.Init();
+
+ // These need to be in a specific order. The null point check handler must be
+ // after the suspend check and stack overflow check handlers.
+ if (implicit_suspend_checks_) {
+ suspend_handler_ = new SuspensionHandler(&fault_manager);
+ }
+
+ if (implicit_so_checks_) {
+ stack_overflow_handler_ = new StackOverflowHandler(&fault_manager);
+ }
+
+ if (implicit_null_checks_) {
+ null_pointer_handler_ = new NullPointerHandler(&fault_manager);
+ }
+
+ if (kEnableJavaStackTraceHandler) {
+ new JavaStackTraceHandler(&fault_manager);
+ }
+ }
+
java_vm_ = new JavaVMExt(this, options.get());
Thread::Startup();
@@ -1216,37 +1220,6 @@
argv->push_back("--compiler-filter=interpret-only");
}
- argv->push_back("--runtime-arg");
- std::string checkstr = "-implicit-checks";
-
- int nchecks = 0;
- char checksep = ':';
-
- if (!ExplicitNullChecks()) {
- checkstr += checksep;
- checksep = ',';
- checkstr += "null";
- ++nchecks;
- }
- if (!ExplicitSuspendChecks()) {
- checkstr += checksep;
- checksep = ',';
- checkstr += "suspend";
- ++nchecks;
- }
-
- if (!ExplicitStackOverflowChecks()) {
- checkstr += checksep;
- checksep = ',';
- checkstr += "stack";
- ++nchecks;
- }
-
- if (nchecks == 0) {
- checkstr += ":none";
- }
- argv->push_back(checkstr);
-
// Make the dex2oat instruction set match that of the launching runtime. If we have multiple
// architecture support, dex2oat may be compiled as a different instruction-set than that
// currently being executed.
diff --git a/runtime/runtime.h b/runtime/runtime.h
index f839be1..28f1217 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -596,6 +596,11 @@
// Specifies target SDK version to allow workarounds for certain API levels.
int32_t target_sdk_version_;
+ // Implicit checks flags.
+ bool implicit_null_checks_; // NullPointer checks are implicit.
+ bool implicit_so_checks_; // StackOverflow checks are implicit.
+ bool implicit_suspend_checks_; // Thread suspension checks are implicit.
+
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 7827dfb..e5ccde5 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -231,47 +231,99 @@
return stack_size;
}
+// Global variable to prevent the compiler optimizing away the page reads for the stack.
+byte dont_optimize_this;
+
// Install a protected region in the stack. This is used to trigger a SIGSEGV if a stack
// overflow is detected. It is located right below the stack_end_. Just below that
// is the StackOverflow reserved region used when creating the StackOverflow
// exception.
+//
+// There is a little complexity here that deserves a special mention. When running on the
+// host (glibc), the process's main thread's stack is allocated with a special flag
+// to prevent memory being allocated when it's not needed. This flag makes the
+// kernel only allocate memory for the stack by growing down in memory. Because we
+// want to put an mprotected region far away from that at the stack top, we need
+// to make sure the pages for the stack are mapped in before we call mprotect. We do
+// this by reading every page from the stack bottom (highest address) to the stack top.
+// We then madvise this away.
void Thread::InstallImplicitProtection(bool is_main_stack) {
byte* pregion = tlsPtr_.stack_end;
+ byte* stack_lowmem = tlsPtr_.stack_begin;
+ byte* stack_top = reinterpret_cast<byte*>(reinterpret_cast<uintptr_t>(&pregion) &
+ ~(kPageSize - 1)); // Page containing current top of stack.
+#ifndef HAVE_ANDROID_OS
+ bool running_on_host = true;
+#else
+ bool running_on_host = false;
+#endif
+
+ if (running_on_host) {
+ // On Host, we need to map in the main stack. This must be done by reading from the
+ // current stack pointer downwards as the stack is mapped using VM_GROWSDOWN
+ // in the kernel. Any access more than a page below the current SP will cause
+ // a segv.
+ if (is_main_stack) {
+ // First we need to unprotect the protected region because this may
+ // be called more than once for a particular stack and we will crash
+ // if we try to read the protected page.
+ mprotect(pregion - kStackOverflowProtectedSize, kStackOverflowProtectedSize, PROT_READ);
+
+ // Read every page from the high address to the low.
+ for (byte* p = stack_top; p > stack_lowmem; p -= kPageSize) {
+ dont_optimize_this = *p;
+ }
+ }
+ }
+
+ // Check and place a marker word at the lowest usable address in the stack. This
+ // is used to prevent a double protection.
constexpr uint32_t kMarker = 0xdadadada;
uintptr_t *marker = reinterpret_cast<uintptr_t*>(pregion);
if (*marker == kMarker) {
- // The region has already been set up.
+ // The region has already been set up. But on the main stack on the host we have
+ // removed the protected region in order to read the stack memory. We need to put
+ // this back again.
+ if (is_main_stack && running_on_host) {
+ mprotect(pregion - kStackOverflowProtectedSize, kStackOverflowProtectedSize, PROT_NONE);
+ madvise(stack_lowmem, stack_top - stack_lowmem, MADV_DONTNEED);
+ }
return;
}
// Add marker so that we can detect a second attempt to do this.
*marker = kMarker;
- pregion -= kStackOverflowProtectedSize;
-
- // Touch the pages in the region to map them in. Otherwise mprotect fails. Only
- // need to do this on the main stack. We only need to touch one byte per page.
- if (is_main_stack) {
- byte* start = pregion;
- byte* end = pregion + kStackOverflowProtectedSize;
- while (start < end) {
- *start = static_cast<byte>(0);
- start += kPageSize;
+ if (!running_on_host) {
+ // Running on Android, stacks are mapped cleanly. The protected region for the
+ // main stack just needs to be mapped in. We do this by writing one byte per page.
+ for (byte* p = pregion - kStackOverflowProtectedSize; p < pregion; p += kPageSize) {
+ *p = 0;
}
}
+ pregion -= kStackOverflowProtectedSize;
+
VLOG(threads) << "installing stack protected region at " << std::hex <<
static_cast<void*>(pregion) << " to " <<
static_cast<void*>(pregion + kStackOverflowProtectedSize - 1);
+
if (mprotect(pregion, kStackOverflowProtectedSize, PROT_NONE) == -1) {
LOG(FATAL) << "Unable to create protected region in stack for implicit overflow check. Reason:"
<< strerror(errno);
}
// Tell the kernel that we won't be needing these pages any more.
+ // NB. madvise will probably write zeroes into the memory (on linux it does).
if (is_main_stack) {
- madvise(pregion, kStackOverflowProtectedSize, MADV_DONTNEED);
+ if (running_on_host) {
+ // On the host, it's the whole stack (minus a page to prevent overwrite of stack top).
+ madvise(stack_lowmem, stack_top - stack_lowmem - kPageSize, MADV_DONTNEED);
+ } else {
+ // On Android, just the protected region.
+ madvise(pregion, kStackOverflowProtectedSize, MADV_DONTNEED);
+ }
}
}
@@ -532,13 +584,17 @@
// Install the protected region if we are doing implicit overflow checks.
if (implicit_stack_check) {
if (is_main_thread) {
- // The main thread has a 16K protected region at the bottom. We need
+ size_t guardsize;
+ pthread_attr_t attributes;
+ CHECK_PTHREAD_CALL(pthread_attr_init, (&attributes), "guard size query");
+ CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, &guardsize), "guard size query");
+ CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), "guard size query");
+ // The main thread might have protected region at the bottom. We need
// to install our own region so we need to move the limits
// of the stack to make room for it.
- constexpr uint32_t kDelta = 16 * KB;
- tlsPtr_.stack_begin += kDelta;
- tlsPtr_.stack_end += kDelta;
- tlsPtr_.stack_size -= kDelta;
+ tlsPtr_.stack_begin += guardsize;
+ tlsPtr_.stack_end += guardsize;
+ tlsPtr_.stack_size -= guardsize;
}
InstallImplicitProtection(is_main_thread);
}
diff --git a/runtime/thread_linux.cc b/runtime/thread_linux.cc
index ee66ccc..518211b 100644
--- a/runtime/thread_linux.cc
+++ b/runtime/thread_linux.cc
@@ -35,8 +35,8 @@
void Thread::SetUpAlternateSignalStack() {
// Create and set an alternate signal stack.
stack_t ss;
- ss.ss_sp = new uint8_t[SIGSTKSZ];
- ss.ss_size = SIGSTKSZ;
+ ss.ss_sp = new uint8_t[SIGSTKSZ * 2]; // NB. this is 16K.
+ ss.ss_size = SIGSTKSZ * 2;
ss.ss_flags = 0;
CHECK(ss.ss_sp != NULL);
SigAltStack(&ss, NULL);
@@ -56,7 +56,7 @@
// Tell the kernel to stop using it.
ss.ss_sp = NULL;
ss.ss_flags = SS_DISABLE;
- ss.ss_size = SIGSTKSZ; // Avoid ENOMEM failure with Mac OS' buggy libc.
+ ss.ss_size = SIGSTKSZ * 2; // Avoid ENOMEM failure with Mac OS' buggy libc.
SigAltStack(&ss, NULL);
// Free it.