Merge "Move explicit GC after we are done initalizing all classes."
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 051cfb6..1823366 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -314,7 +314,7 @@
method_inliner_map_.get(),
compiler_kind, instruction_set,
instruction_set_features,
- true, new CompilerDriver::DescriptorSet,
+ true, new std::set<std::string>,
2, true, true, timer_.get()));
}
// We typically don't generate an image in unit tests, disable this optimization by default.
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 4f8c1d4..c44a116 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -622,11 +622,10 @@
uint16_t class_def_idx, uint32_t method_idx,
jobject class_loader, const DexFile& dex_file,
void* llvm_compilation_unit) {
- std::string method_name = PrettyMethod(method_idx, dex_file);
- VLOG(compiler) << "Compiling " << method_name << "...";
+ VLOG(compiler) << "Compiling " << PrettyMethod(method_idx, dex_file) << "...";
if (code_item->insns_size_in_code_units_ >= 0x10000) {
LOG(INFO) << "Method size exceeds compiler limits: " << code_item->insns_size_in_code_units_
- << " in " << method_name;
+ << " in " << PrettyMethod(method_idx, dex_file);
return NULL;
}
@@ -658,7 +657,7 @@
cu.compiler_flip_match = false;
bool use_match = !cu.compiler_method_match.empty();
bool match = use_match && (cu.compiler_flip_match ^
- (method_name.find(cu.compiler_method_match) != std::string::npos));
+ (PrettyMethod(method_idx, dex_file).find(cu.compiler_method_match) != std::string::npos));
if (!use_match || match) {
cu.disable_opt = kCompilerOptimizerDisableFlags;
cu.enable_debug = kCompilerDebugFlags;
@@ -669,7 +668,7 @@
if (gVerboseMethods.size() != 0) {
cu.verbose = false;
for (size_t i = 0; i < gVerboseMethods.size(); ++i) {
- if (method_name.find(gVerboseMethods[i])
+ if (PrettyMethod(method_idx, dex_file).find(gVerboseMethods[i])
!= std::string::npos) {
cu.verbose = true;
break;
@@ -711,7 +710,8 @@
class_loader, dex_file);
if (!CanCompileMethod(method_idx, dex_file, cu)) {
- VLOG(compiler) << cu.instruction_set << ": Cannot compile method : " << method_name;
+ VLOG(compiler) << cu.instruction_set << ": Cannot compile method : "
+ << PrettyMethod(method_idx, dex_file);
return nullptr;
}
@@ -719,7 +719,7 @@
std::string skip_message;
if (cu.mir_graph->SkipCompilation(&skip_message)) {
VLOG(compiler) << cu.instruction_set << ": Skipping method : "
- << method_name << " Reason = " << skip_message;
+ << PrettyMethod(method_idx, dex_file) << " Reason = " << skip_message;
return nullptr;
}
@@ -730,7 +730,7 @@
/* For non-leaf methods check if we should skip compilation when the profiler is enabled. */
if (cu.compiler_driver->ProfilePresent()
&& !cu.mir_graph->MethodIsLeaf()
- && cu.mir_graph->SkipCompilationByName(method_name)) {
+ && cu.mir_graph->SkipCompilationByName(PrettyMethod(method_idx, dex_file))) {
return nullptr;
}
@@ -749,7 +749,7 @@
if (cu.enable_debug & (1 << kDebugShowMemoryUsage)) {
if (cu.arena_stack.PeakBytesAllocated() > 1 * 1024 * 1024) {
MemStats stack_stats(cu.arena_stack.GetPeakStats());
- LOG(INFO) << method_name << " " << Dumpable<MemStats>(stack_stats);
+ LOG(INFO) << PrettyMethod(method_idx, dex_file) << " " << Dumpable<MemStats>(stack_stats);
}
}
cu.arena_stack.Reset();
@@ -757,7 +757,8 @@
CompiledMethod* result = NULL;
if (cu.mir_graph->PuntToInterpreter()) {
- VLOG(compiler) << cu.instruction_set << ": Punted method to interpreter: " << method_name;
+ VLOG(compiler) << cu.instruction_set << ": Punted method to interpreter: "
+ << PrettyMethod(method_idx, dex_file);
return nullptr;
}
@@ -768,21 +769,21 @@
cu.NewTimingSplit("Cleanup");
if (result) {
- VLOG(compiler) << cu.instruction_set << ": Compiled " << method_name;
+ VLOG(compiler) << cu.instruction_set << ": Compiled " << PrettyMethod(method_idx, dex_file);
} else {
- VLOG(compiler) << cu.instruction_set << ": Deferred " << method_name;
+ VLOG(compiler) << cu.instruction_set << ": Deferred " << PrettyMethod(method_idx, dex_file);
}
if (cu.enable_debug & (1 << kDebugShowMemoryUsage)) {
if (cu.arena.BytesAllocated() > (1 * 1024 *1024)) {
MemStats mem_stats(cu.arena.GetMemStats());
- LOG(INFO) << method_name << " " << Dumpable<MemStats>(mem_stats);
+ LOG(INFO) << PrettyMethod(method_idx, dex_file) << " " << Dumpable<MemStats>(mem_stats);
}
}
if (cu.enable_debug & (1 << kDebugShowSummaryMemoryUsage)) {
LOG(INFO) << "MEMINFO " << cu.arena.BytesAllocated() << " " << cu.mir_graph->GetNumBlocks()
- << " " << method_name;
+ << " " << PrettyMethod(method_idx, dex_file);
}
cu.EndTiming();
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 6aee563..dee1361 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -172,10 +172,18 @@
bottom_block->first_mir_insn = insn;
bottom_block->last_mir_insn = orig_block->last_mir_insn;
- /* If this block was terminated by a return, the flag needs to go with the bottom block */
+ /* If this block was terminated by a return, conditional branch or throw,
+ * the flag needs to go with the bottom block
+ */
bottom_block->terminated_by_return = orig_block->terminated_by_return;
orig_block->terminated_by_return = false;
+ bottom_block->conditional_branch = orig_block->conditional_branch;
+ orig_block->conditional_branch = false;
+
+ bottom_block->explicit_throw = orig_block->explicit_throw;
+ orig_block->explicit_throw = false;
+
/* Handle the taken path */
bottom_block->taken = orig_block->taken;
if (bottom_block->taken != NullBasicBlockId) {
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 23ceb56..c7dd85c 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -692,10 +692,13 @@
// Include the rest of the instructions
bb->last_mir_insn = bb_next->last_mir_insn;
/*
- * If lower-half of pair of blocks to combine contained a return, move the flag
- * to the newly combined block.
+ * If lower-half of pair of blocks to combine contained
+ * a return or a conditional branch or an explicit throw,
+ * move the flag to the newly combined block.
*/
bb->terminated_by_return = bb_next->terminated_by_return;
+ bb->conditional_branch = bb_next->conditional_branch;
+ bb->explicit_throw = bb_next->explicit_throw;
/*
* NOTE: we aren't updating all dataflow info here. Should either make sure this pass
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index b133991..4ba3c4b 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -354,13 +354,14 @@
* We can safely skip the stack overflow check if we're
* a leaf *and* our frame size < fudge factor.
*/
- bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !IsLargeFrame(frame_size_, kArm);
+ bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, kArm);
NewLIR0(kPseudoMethodEntry);
- const size_t kStackOverflowReservedUsableBytes = GetStackOverflowReservedBytes(kArm) -
- Thread::kStackOverflowSignalReservedBytes;
+ const size_t kStackOverflowReservedUsableBytes = GetStackOverflowReservedBytes(kArm);
bool large_frame = (static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes);
+ bool generate_explicit_stack_overflow_check = large_frame ||
+ !cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks();
if (!skip_overflow_check) {
- if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
+ if (generate_explicit_stack_overflow_check) {
if (!large_frame) {
/* Load stack limit */
LockTemp(rs_r12);
@@ -399,7 +400,7 @@
const int spill_size = spill_count * 4;
const int frame_size_without_spills = frame_size_ - spill_size;
if (!skip_overflow_check) {
- if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
+ if (generate_explicit_stack_overflow_check) {
class StackOverflowSlowPath : public LIRSlowPath {
public:
StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, bool restore_lr, size_t sp_displace)
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index 28b747b..0538c31 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -329,16 +329,20 @@
* We can safely skip the stack overflow check if we're
* a leaf *and* our frame size < fudge factor.
*/
- bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !IsLargeFrame(frame_size_, kArm64);
+ bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, kArm64);
NewLIR0(kPseudoMethodEntry);
+ const size_t kStackOverflowReservedUsableBytes = GetStackOverflowReservedBytes(kArm64);
+ const bool large_frame = static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes;
+ bool generate_explicit_stack_overflow_check = large_frame ||
+ !cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks();
const int spill_count = num_core_spills_ + num_fp_spills_;
const int spill_size = (spill_count * kArm64PointerSize + 15) & ~0xf; // SP 16 byte alignment.
const int frame_size_without_spills = frame_size_ - spill_size;
if (!skip_overflow_check) {
- if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
+ if (generate_explicit_stack_overflow_check) {
// Load stack limit
LoadWordDisp(rs_xSELF, Thread::StackEndOffset<8>().Int32Value(), rs_xIP1);
} else {
@@ -365,7 +369,7 @@
}
if (!skip_overflow_check) {
- if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
+ if (generate_explicit_stack_overflow_check) {
class StackOverflowSlowPath: public LIRSlowPath {
public:
StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace) :
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 4577a4c..e8cb356 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -303,7 +303,7 @@
* We can safely skip the stack overflow check if we're
* a leaf *and* our frame size < fudge factor.
*/
- bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !IsLargeFrame(frame_size_, kMips);
+ bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, kMips);
NewLIR0(kPseudoMethodEntry);
RegStorage check_reg = AllocTemp();
RegStorage new_sp = AllocTemp();
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index f5f8671..996689a 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -219,7 +219,7 @@
* a leaf *and* our frame size < fudge factor.
*/
InstructionSet isa = cu_->target64 ? kX86_64 : kX86;
- const bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !IsLargeFrame(frame_size_, isa);
+ bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, isa);
// If we doing an implicit stack overflow check, perform the load immediately
// before the stack pointer is decremented and anything is saved.
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 24a3fe3..1f5b350 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -159,6 +159,7 @@
bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object) OVERRIDE;
bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) OVERRIDE;
bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) OVERRIDE;
+ bool GenInlinedReverseBits(CallInfo* info, OpSize size) OVERRIDE;
bool GenInlinedSqrt(CallInfo* info) OVERRIDE;
bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
@@ -957,6 +958,9 @@
private:
// The number of vector registers [0..N] reserved by a call to ReserveVectorRegisters
int num_reserved_vector_regs_;
+
+ void SwapBits(RegStorage result_reg, int shift, int32_t value);
+ void SwapBits64(RegStorage result_reg, int shift, int64_t value);
};
} // namespace art
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index fdc46e2..afa2ae2 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -1061,6 +1061,83 @@
return true;
}
+void X86Mir2Lir::SwapBits(RegStorage result_reg, int shift, int32_t value) {
+ RegStorage r_temp = AllocTemp();
+ OpRegCopy(r_temp, result_reg);
+ OpRegImm(kOpLsr, result_reg, shift);
+ OpRegImm(kOpAnd, r_temp, value);
+ OpRegImm(kOpAnd, result_reg, value);
+ OpRegImm(kOpLsl, r_temp, shift);
+ OpRegReg(kOpOr, result_reg, r_temp);
+ FreeTemp(r_temp);
+}
+
+void X86Mir2Lir::SwapBits64(RegStorage result_reg, int shift, int64_t value) {
+ RegStorage r_temp = AllocTempWide();
+ OpRegCopy(r_temp, result_reg);
+ OpRegImm(kOpLsr, result_reg, shift);
+ RegStorage r_value = AllocTempWide();
+ LoadConstantWide(r_value, value);
+ OpRegReg(kOpAnd, r_temp, r_value);
+ OpRegReg(kOpAnd, result_reg, r_value);
+ OpRegImm(kOpLsl, r_temp, shift);
+ OpRegReg(kOpOr, result_reg, r_temp);
+ FreeTemp(r_temp);
+ FreeTemp(r_value);
+}
+
+bool X86Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
+ RegLocation rl_src_i = info->args[0];
+ RegLocation rl_i = (size == k64) ? LoadValueWide(rl_src_i, kCoreReg)
+ : LoadValue(rl_src_i, kCoreReg);
+ RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info);
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ if (size == k64) {
+ if (cu_->instruction_set == kX86_64) {
+ /* Use one bswap instruction to reverse byte order first and then use 3 rounds of
+ swapping bits to reverse bits in a long number x. Using bswap to save instructions
+ compared to generic luni implementation which has 5 rounds of swapping bits.
+ x = bswap x
+ x = (x & 0x5555555555555555) << 1 | (x >> 1) & 0x5555555555555555;
+ x = (x & 0x3333333333333333) << 2 | (x >> 2) & 0x3333333333333333;
+ x = (x & 0x0F0F0F0F0F0F0F0F) << 4 | (x >> 4) & 0x0F0F0F0F0F0F0F0F;
+ */
+ OpRegReg(kOpRev, rl_result.reg, rl_i.reg);
+ SwapBits64(rl_result.reg, 1, 0x5555555555555555);
+ SwapBits64(rl_result.reg, 2, 0x3333333333333333);
+ SwapBits64(rl_result.reg, 4, 0x0f0f0f0f0f0f0f0f);
+ StoreValueWide(rl_dest, rl_result);
+ return true;
+ }
+ RegStorage r_i_low = rl_i.reg.GetLow();
+ if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
+ // First REV shall clobber rl_result.reg.GetLowReg(), save the value in a temp for the second
+ // REV.
+ r_i_low = AllocTemp();
+ OpRegCopy(r_i_low, rl_i.reg);
+ }
+ OpRegReg(kOpRev, rl_result.reg.GetLow(), rl_i.reg.GetHigh());
+ OpRegReg(kOpRev, rl_result.reg.GetHigh(), r_i_low);
+ if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
+ FreeTemp(r_i_low);
+ }
+ SwapBits(rl_result.reg.GetLow(), 1, 0x55555555);
+ SwapBits(rl_result.reg.GetLow(), 2, 0x33333333);
+ SwapBits(rl_result.reg.GetLow(), 4, 0x0f0f0f0f);
+ SwapBits(rl_result.reg.GetHigh(), 1, 0x55555555);
+ SwapBits(rl_result.reg.GetHigh(), 2, 0x33333333);
+ SwapBits(rl_result.reg.GetHigh(), 4, 0x0f0f0f0f);
+ StoreValueWide(rl_dest, rl_result);
+ } else {
+ OpRegReg(kOpRev, rl_result.reg, rl_i.reg);
+ SwapBits(rl_result.reg, 1, 0x55555555);
+ SwapBits(rl_result.reg, 2, 0x33333333);
+ SwapBits(rl_result.reg, 4, 0x0f0f0f0f);
+ StoreValue(rl_dest, rl_result);
+ }
+ return true;
+}
+
LIR* X86Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
CHECK(base_of_code_ != nullptr);
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index a7f67e7..a8e6b3c 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -30,7 +30,8 @@
namespace art {
VerificationResults::VerificationResults(const CompilerOptions* compiler_options)
- : verified_methods_lock_("compiler verified methods lock"),
+ : compiler_options_(compiler_options),
+ verified_methods_lock_("compiler verified methods lock"),
verified_methods_(),
rejected_classes_lock_("compiler rejected classes lock"),
rejected_classes_() {
@@ -106,6 +107,9 @@
return true;
}
#endif
+ if (!compiler_options_->IsCompilationEnabled()) {
+ return false;
+ }
// Don't compile class initializers, ever.
if (((access_flags & kAccConstructor) != 0) && ((access_flags & kAccStatic) != 0)) {
return false;
diff --git a/compiler/dex/verification_results.h b/compiler/dex/verification_results.h
index 7fdf767..0e7923f 100644
--- a/compiler/dex/verification_results.h
+++ b/compiler/dex/verification_results.h
@@ -56,6 +56,8 @@
const uint32_t access_flags);
private:
+ const CompilerOptions* const compiler_options_;
+
// Verified methods.
typedef SafeMap<MethodReference, const VerifiedMethod*,
MethodReferenceComparator> VerifiedMethodMap;
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index af1fd88..645fc1c 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -328,7 +328,7 @@
Compiler::Kind compiler_kind,
InstructionSet instruction_set,
InstructionSetFeatures instruction_set_features,
- bool image, DescriptorSet* image_classes, size_t thread_count,
+ bool image, std::set<std::string>* image_classes, size_t thread_count,
bool dump_stats, bool dump_passes, CumulativeLogger* timer,
std::string profile_file)
: profile_present_(false), compiler_options_(compiler_options),
@@ -678,9 +678,9 @@
static bool RecordImageClassesVisitor(mirror::Class* klass, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CompilerDriver::DescriptorSet* image_classes =
- reinterpret_cast<CompilerDriver::DescriptorSet*>(arg);
- image_classes->insert(klass->GetDescriptor());
+ std::set<std::string>* image_classes = reinterpret_cast<std::set<std::string>*>(arg);
+ std::string temp;
+ image_classes->insert(klass->GetDescriptor(&temp));
return true;
}
@@ -750,22 +750,20 @@
CHECK_NE(image_classes_->size(), 0U);
}
-static void MaybeAddToImageClasses(Handle<mirror::Class> c,
- CompilerDriver::DescriptorSet* image_classes)
+static void MaybeAddToImageClasses(Handle<mirror::Class> c, std::set<std::string>* image_classes)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Thread* self = Thread::Current();
StackHandleScope<1> hs(self);
// Make a copy of the handle so that we don't clobber it doing Assign.
Handle<mirror::Class> klass(hs.NewHandle(c.Get()));
+ std::string temp;
while (!klass->IsObjectClass()) {
- std::string descriptor(klass->GetDescriptor());
- std::pair<CompilerDriver::DescriptorSet::iterator, bool> result =
- image_classes->insert(descriptor);
- if (result.second) {
- VLOG(compiler) << "Adding " << descriptor << " to image classes";
- } else {
- return;
+ const char* descriptor = klass->GetDescriptor(&temp);
+ std::pair<std::set<std::string>::iterator, bool> result = image_classes->insert(descriptor);
+ if (!result.second) { // Previously inserted.
+ break;
}
+ VLOG(compiler) << "Adding " << descriptor << " to image classes";
for (size_t i = 0; i < klass->NumDirectInterfaces(); ++i) {
StackHandleScope<1> hs(self);
MaybeAddToImageClasses(hs.NewHandle(mirror::Class::GetDirectInterface(self, klass, i)),
@@ -1550,13 +1548,23 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
CHECK(self->IsExceptionPending());
mirror::Throwable* exception = self->GetException(nullptr);
- std::string descriptor = exception->GetClass()->GetDescriptor();
- if (descriptor != "Ljava/lang/IllegalAccessError;" &&
- descriptor != "Ljava/lang/IncompatibleClassChangeError;" &&
- descriptor != "Ljava/lang/InstantiationError;" &&
- descriptor != "Ljava/lang/NoClassDefFoundError;" &&
- descriptor != "Ljava/lang/NoSuchFieldError;" &&
- descriptor != "Ljava/lang/NoSuchMethodError;") {
+ std::string temp;
+ const char* descriptor = exception->GetClass()->GetDescriptor(&temp);
+ const char* expected_exceptions[] = {
+ "Ljava/lang/IllegalAccessError;",
+ "Ljava/lang/IncompatibleClassChangeError;",
+ "Ljava/lang/InstantiationError;",
+ "Ljava/lang/NoClassDefFoundError;",
+ "Ljava/lang/NoSuchFieldError;",
+ "Ljava/lang/NoSuchMethodError;"
+ };
+ bool found = false;
+ for (size_t i = 0; (found == false) && (i < arraysize(expected_exceptions)); ++i) {
+ if (strcmp(descriptor, expected_exceptions[i]) == 0) {
+ found = true;
+ }
+ }
+ if (!found) {
LOG(FATAL) << "Unexpected exeption " << exception->Dump();
}
self->ClearException();
@@ -1904,12 +1912,25 @@
void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, size_t class_def_index) {
ATRACE_CALL();
- jobject jclass_loader = manager->GetClassLoader();
const DexFile& dex_file = *manager->GetDexFile();
const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
ClassLinker* class_linker = manager->GetClassLinker();
- if (SkipClass(class_linker, jclass_loader, dex_file, manager->GetDexFiles(), class_def)) {
- return;
+ jobject jclass_loader = manager->GetClassLoader();
+ {
+ // Use a scoped object access to perform to the quick SkipClass check.
+ const char* descriptor = dex_file.GetClassDescriptor(class_def);
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ Handle<mirror::Class> klass(
+ hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
+ if (klass.Get() == nullptr) {
+ CHECK(soa.Self()->IsExceptionPending());
+ soa.Self()->ClearException();
+ } else if (SkipClass(jclass_loader, dex_file, klass.Get())) {
+ return;
+ }
}
ClassReference ref(&dex_file, class_def_index);
// Skip compiling classes with generic verifier failures since they will still fail at runtime
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 2a5cdb9..233c4f8 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -92,8 +92,6 @@
class CompilerDriver {
public:
- typedef std::set<std::string> DescriptorSet;
-
// Create a compiler targeting the requested "instruction_set".
// "image" should be true if image specific optimizations should be
// enabled. "image_classes" lets the compiler know what classes it
@@ -105,7 +103,7 @@
Compiler::Kind compiler_kind,
InstructionSet instruction_set,
InstructionSetFeatures instruction_set_features,
- bool image, DescriptorSet* image_classes,
+ bool image, std::set<std::string>* image_classes,
size_t thread_count, bool dump_stats, bool dump_passes,
CumulativeLogger* timer, std::string profile_file = "");
@@ -152,7 +150,7 @@
return image_;
}
- DescriptorSet* GetImageClasses() const {
+ const std::set<std::string>* GetImageClasses() const {
return image_classes_.get();
}
@@ -729,7 +727,7 @@
// If image_ is true, specifies the classes that will be included in
// the image. Note if image_classes_ is NULL, all classes are
// included in the image.
- std::unique_ptr<DescriptorSet> image_classes_;
+ std::unique_ptr<std::set<std::string>> image_classes_;
size_t thread_count_;
uint64_t start_ns_;
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 6b23345..3d119bb 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -123,7 +123,7 @@
}
ASSERT_TRUE(compiler_driver_->GetImageClasses() != NULL);
- CompilerDriver::DescriptorSet image_classes(*compiler_driver_->GetImageClasses());
+ std::set<std::string> image_classes(*compiler_driver_->GetImageClasses());
// Need to delete the compiler since it has worker threads which are attached to runtime.
compiler_driver_.reset();
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index d102bbc..ba7e13f 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -294,7 +294,8 @@
}
bool ImageWriter::IsImageClass(Class* klass) {
- return compiler_driver_.IsImageClass(klass->GetDescriptor().c_str());
+ std::string temp;
+ return compiler_driver_.IsImageClass(klass->GetDescriptor(&temp));
}
struct NonImageClasses {
@@ -351,7 +352,8 @@
bool ImageWriter::NonImageClassesVisitor(Class* klass, void* arg) {
NonImageClasses* context = reinterpret_cast<NonImageClasses*>(arg);
if (!context->image_writer->IsImageClass(klass)) {
- context->non_image_classes->insert(klass->GetDescriptor());
+ std::string temp;
+ context->non_image_classes->insert(klass->GetDescriptor(&temp));
}
return true;
}
@@ -371,14 +373,15 @@
Class* klass = obj->AsClass();
if (!image_writer->IsImageClass(klass)) {
image_writer->DumpImageClasses();
- CHECK(image_writer->IsImageClass(klass)) << klass->GetDescriptor()
+ std::string temp;
+ CHECK(image_writer->IsImageClass(klass)) << klass->GetDescriptor(&temp)
<< " " << PrettyDescriptor(klass);
}
}
}
void ImageWriter::DumpImageClasses() {
- CompilerDriver::DescriptorSet* image_classes = compiler_driver_.GetImageClasses();
+ const std::set<std::string>* image_classes = compiler_driver_.GetImageClasses();
CHECK(image_classes != NULL);
for (const std::string& image_class : *image_classes) {
LOG(INFO) << " " << image_class;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index eccc970..2c954a0 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -265,7 +265,7 @@
codegen_(codegen) {}
void CodeGeneratorARM::GenerateFrameEntry() {
- bool skip_overflow_check = IsLeafMethod() && !IsLargeFrame(GetFrameSize(), InstructionSet::kArm);
+ bool skip_overflow_check = IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm);
if (!skip_overflow_check) {
if (kExplicitStackOverflowCheck) {
SlowPathCode* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathARM();
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index ab53b17..35b8116 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -241,7 +241,7 @@
static const int kFakeReturnRegister = 8;
core_spill_mask_ |= (1 << kFakeReturnRegister);
- bool skip_overflow_check = IsLeafMethod() && !IsLargeFrame(GetFrameSize(), InstructionSet::kX86);
+ bool skip_overflow_check = IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kX86);
if (!skip_overflow_check && !kExplicitStackOverflowCheck) {
__ testl(EAX, Address(ESP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kX86))));
RecordPcInfo(0);
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index e4259f5..c4571ca 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -209,7 +209,7 @@
core_spill_mask_ |= (1 << kFakeReturnRegister);
bool skip_overflow_check = IsLeafMethod()
- && !IsLargeFrame(GetFrameSize(), InstructionSet::kX86_64);
+ && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kX86_64);
if (!skip_overflow_check && !kExplicitStackOverflowCheck) {
__ testq(CpuRegister(RAX), Address(
diff --git a/compiler/utils/stack_checks.h b/compiler/utils/stack_checks.h
index 63adbc2..ce01077 100644
--- a/compiler/utils/stack_checks.h
+++ b/compiler/utils/stack_checks.h
@@ -33,10 +33,9 @@
// Determine whether a frame is small or large, used in the decision on whether to elide a
// stack overflow check on method entry.
//
-// A frame is considered large when it's either above kLargeFrameSize, or a quarter of the
-// overflow-usable stack space.
-static inline bool IsLargeFrame(size_t size, InstructionSet isa) {
- return size >= kLargeFrameSize || size >= GetStackOverflowReservedBytes(isa) / 4;
+// A frame is considered large when it's above kLargeFrameSize.
+static inline bool FrameNeedsStackCheck(size_t size, InstructionSet isa) {
+ return size >= kLargeFrameSize;
}
} // namespace art
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index ee11575..3f9f007 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -146,7 +146,7 @@
uint8_t length_;
uint8_t encoding_[6];
- explicit Operand(CpuRegister reg) { SetModRM(3, reg); }
+ explicit Operand(CpuRegister reg) : rex_(0), length_(0) { SetModRM(3, reg); }
// Get the operand encoding byte at the given index.
uint8_t encoding_at(int index) const {
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 403cb80..19b37af 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -271,20 +271,20 @@
// Reads the class names (java.lang.Object) and returns a set of descriptors (Ljava/lang/Object;)
- CompilerDriver::DescriptorSet* ReadImageClassesFromFile(const char* image_classes_filename) {
+ std::set<std::string>* ReadImageClassesFromFile(const char* image_classes_filename) {
std::unique_ptr<std::ifstream> image_classes_file(new std::ifstream(image_classes_filename,
std::ifstream::in));
if (image_classes_file.get() == nullptr) {
LOG(ERROR) << "Failed to open image classes file " << image_classes_filename;
return nullptr;
}
- std::unique_ptr<CompilerDriver::DescriptorSet> result(ReadImageClasses(*image_classes_file));
+ std::unique_ptr<std::set<std::string>> result(ReadImageClasses(*image_classes_file));
image_classes_file->close();
return result.release();
}
- CompilerDriver::DescriptorSet* ReadImageClasses(std::istream& image_classes_stream) {
- std::unique_ptr<CompilerDriver::DescriptorSet> image_classes(new CompilerDriver::DescriptorSet);
+ std::set<std::string>* ReadImageClasses(std::istream& image_classes_stream) {
+ std::unique_ptr<std::set<std::string>> image_classes(new std::set<std::string>);
while (image_classes_stream.good()) {
std::string dot;
std::getline(image_classes_stream, dot);
@@ -298,7 +298,7 @@
}
// Reads the class names (java.lang.Object) and returns a set of descriptors (Ljava/lang/Object;)
- CompilerDriver::DescriptorSet* ReadImageClassesFromZip(const char* zip_filename,
+ std::set<std::string>* ReadImageClassesFromZip(const char* zip_filename,
const char* image_classes_filename,
std::string* error_msg) {
std::unique_ptr<ZipArchive> zip_archive(ZipArchive::Open(zip_filename, error_msg));
@@ -349,7 +349,7 @@
const std::string& oat_location,
const std::string& bitcode_filename,
bool image,
- std::unique_ptr<CompilerDriver::DescriptorSet>& image_classes,
+ std::unique_ptr<std::set<std::string>>& image_classes,
bool dump_stats,
bool dump_passes,
TimingLogger& timings,
@@ -1276,7 +1276,7 @@
WellKnownClasses::Init(self->GetJniEnv());
// If --image-classes was specified, calculate the full list of classes to include in the image
- std::unique_ptr<CompilerDriver::DescriptorSet> image_classes(nullptr);
+ std::unique_ptr<std::set<std::string>> image_classes(nullptr);
if (image_classes_filename != nullptr) {
std::string error_msg;
if (image_classes_zip_filename != nullptr) {
@@ -1292,7 +1292,7 @@
return EXIT_FAILURE;
}
} else if (image) {
- image_classes.reset(new CompilerDriver::DescriptorSet);
+ image_classes.reset(new std::set<std::string>);
}
std::vector<const DexFile*> dex_files;
diff --git a/disassembler/Android.mk b/disassembler/Android.mk
index a0abc9e..d67c169 100644
--- a/disassembler/Android.mk
+++ b/disassembler/Android.mk
@@ -99,9 +99,9 @@
$(eval $(call build-libart-disassembler,target,debug))
endif
# We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
-ifeq ($(ART_BUILD_NDEBUG),true)
+ifeq ($(ART_BUILD_HOST_NDEBUG),true)
$(eval $(call build-libart-disassembler,host,ndebug))
endif
-ifeq ($(ART_BUILD_DEBUG),true)
+ifeq ($(ART_BUILD_HOST_DEBUG),true)
$(eval $(call build-libart-disassembler,host,debug))
endif
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 068a450..75bc49b 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1165,7 +1165,8 @@
state->stats_.ComputeOutliers(total_size, expansion, method);
}
}
- state->stats_.Update(obj_class->GetDescriptor().c_str(), object_bytes);
+ std::string temp;
+ state->stats_.Update(obj_class->GetDescriptor(&temp), object_bytes);
}
std::set<const void*> already_seen_;
diff --git a/patchoat/Android.mk b/patchoat/Android.mk
index 8b6b9ad..1e16096 100644
--- a/patchoat/Android.mk
+++ b/patchoat/Android.mk
@@ -37,9 +37,9 @@
endif
# We always build patchoat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
-ifeq ($(ART_BUILD_NDEBUG),true)
+ifeq ($(ART_BUILD_HOST_NDEBUG),true)
$(eval $(call build-art-executable,patchoat,$(PATCHOAT_SRC_FILES),,art/compiler,host,ndebug))
endif
-ifeq ($(ART_BUILD_DEBUG),true)
+ifeq ($(ART_BUILD_HOST_DEBUG),true)
$(eval $(call build-art-executable,patchoat,$(PATCHOAT_SRC_FILES),,art/compiler,host,debug))
endif
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 4371f13..f55d3fb 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -468,10 +468,10 @@
# We always build dex2oat and dependencies, even if the host build is otherwise disabled, since
# they are used to cross compile for the target.
-ifeq ($(ART_BUILD_NDEBUG),true)
+ifeq ($(ART_BUILD_HOST_NDEBUG),true)
$(eval $(call build-libart,host,ndebug))
endif
-ifeq ($(ART_BUILD_DEBUG),true)
+ifeq ($(ART_BUILD_HOST_DEBUG),true)
$(eval $(call build-libart,host,debug))
endif
diff --git a/runtime/arch/arm/asm_support_arm.h b/runtime/arch/arm/asm_support_arm.h
index ae30aee..330924e 100644
--- a/runtime/arch/arm/asm_support_arm.h
+++ b/runtime/arch/arm/asm_support_arm.h
@@ -24,9 +24,9 @@
// Offset of field Thread::tls32_.thin_lock_thread_id verified in InitCpu
#define THREAD_ID_OFFSET 12
// Offset of field Thread::tlsPtr_.card_table verified in InitCpu
-#define THREAD_CARD_TABLE_OFFSET 112
+#define THREAD_CARD_TABLE_OFFSET 120
// Offset of field Thread::tlsPtr_.exception verified in InitCpu
-#define THREAD_EXCEPTION_OFFSET 116
+#define THREAD_EXCEPTION_OFFSET 124
#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 176
#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 32
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
index be28544..28b69ec 100644
--- a/runtime/arch/arm/fault_handler_arm.cc
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -35,7 +35,7 @@
namespace art {
extern "C" void art_quick_throw_null_pointer_exception();
-extern "C" void art_quick_throw_stack_overflow_from_signal();
+extern "C" void art_quick_throw_stack_overflow();
extern "C" void art_quick_implicit_suspend();
// Get the size of a thumb2 instruction in bytes.
@@ -194,40 +194,19 @@
uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kArm);
- Thread* self = reinterpret_cast<Thread*>(sc->arm_r9);
- CHECK_EQ(self, Thread::Current());
- uintptr_t pregion = reinterpret_cast<uintptr_t>(self->GetStackEnd()) -
- Thread::kStackOverflowProtectedSize;
-
// Check that the fault address is the value expected for a stack overflow.
if (fault_addr != overflow_addr) {
VLOG(signals) << "Not a stack overflow";
return false;
}
- // We know this is a stack overflow. We need to move the sp to the overflow region
- // that exists below the protected region. Determine the address of the next
- // available valid address below the protected region.
- uintptr_t prevsp = sp;
- sp = pregion;
- VLOG(signals) << "setting sp to overflow region at " << std::hex << sp;
+ VLOG(signals) << "Stack overflow found";
- // Since the compiler puts the implicit overflow
- // check before the callee save instructions, the SP is already pointing to
- // the previous frame.
- VLOG(signals) << "previous frame: " << std::hex << prevsp;
-
- // Now establish the stack pointer for the signal return.
- sc->arm_sp = prevsp;
-
- // Tell the stack overflow code where the new stack pointer should be.
- sc->arm_ip = sp; // aka r12
-
- // Now arrange for the signal handler to return to art_quick_throw_stack_overflow_from_signal.
+ // Now arrange for the signal handler to return to art_quick_throw_stack_overflow_from.
// The value of LR must be the same as it was when we entered the code that
// caused this fault. This will be inserted into a callee save frame by
- // the function to which this handler returns (art_quick_throw_stack_overflow_from_signal).
- sc->arm_pc = reinterpret_cast<uintptr_t>(art_quick_throw_stack_overflow_from_signal);
+ // the function to which this handler returns (art_quick_throw_stack_overflow).
+ sc->arm_pc = reinterpret_cast<uintptr_t>(art_quick_throw_stack_overflow);
// The kernel will now return to the address in sc->arm_pc.
return true;
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 6c63a1a..dd1f04a 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -235,31 +235,6 @@
*/
ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFromCode
- /*
- * Invoke stack overflow exception from signal handler.
- * On entry:
- * r9: thread
- * sp: address of last known frame
- * r12: address of next valid SP below protected region in stack
- *
- * This is deceptively simple but hides some complexity. It is called in the case of
- * a stack overflow condition during implicit checks. The signal handler has been
- * called by the kernel due to a load from the protected stack region. The handler
- * works out the address of the previous frame and passes this in SP. However there
- * is a piece of memory somewhere below the current SP that is not accessible (the
- * memory that caused the signal). The signal handler works out the next
- * accessible value of SP and passes this in r12. This code then sets up the SP
- * to be this new value and calls the code to create and throw the stack overflow
- * exception.
- */
-ENTRY art_quick_throw_stack_overflow_from_signal
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
- mov r0, r9 @ pass Thread::Current
- mov r1, sp @ pass SP
- mov sp, r12 @ move SP down to below protected region.
- b artThrowStackOverflowFromCode @ artThrowStackOverflowFromCode(Thread*, SP)
-END art_quick_throw_stack_overflow_from_signal
-
/*
* All generated callsites for interface invokes and invocation slow paths will load arguments
* as usual - except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
diff --git a/runtime/arch/arm64/asm_support_arm64.h b/runtime/arch/arm64/asm_support_arm64.h
index 7f0f56f..a926449 100644
--- a/runtime/arch/arm64/asm_support_arm64.h
+++ b/runtime/arch/arm64/asm_support_arm64.h
@@ -30,9 +30,9 @@
// Offset of field Thread::suspend_count_
#define THREAD_FLAGS_OFFSET 0
// Offset of field Thread::card_table_
-#define THREAD_CARD_TABLE_OFFSET 112
+#define THREAD_CARD_TABLE_OFFSET 120
// Offset of field Thread::exception_
-#define THREAD_EXCEPTION_OFFSET 120
+#define THREAD_EXCEPTION_OFFSET 128
// Offset of field Thread::thin_lock_thread_id_
#define THREAD_ID_OFFSET 12
diff --git a/runtime/arch/arm64/fault_handler_arm64.cc b/runtime/arch/arm64/fault_handler_arm64.cc
index 3a7e689..b5948cb 100644
--- a/runtime/arch/arm64/fault_handler_arm64.cc
+++ b/runtime/arch/arm64/fault_handler_arm64.cc
@@ -27,7 +27,7 @@
#include "thread.h"
#include "thread-inl.h"
-extern "C" void art_quick_throw_stack_overflow_from_signal();
+extern "C" void art_quick_throw_stack_overflow();
extern "C" void art_quick_throw_null_pointer_exception();
extern "C" void art_quick_implicit_suspend();
@@ -157,40 +157,19 @@
uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kArm64);
- Thread* self = reinterpret_cast<Thread*>(sc->regs[art::arm64::TR]);
- CHECK_EQ(self, Thread::Current());
- uintptr_t pregion = reinterpret_cast<uintptr_t>(self->GetStackEnd()) -
- Thread::kStackOverflowProtectedSize;
-
// Check that the fault address is the value expected for a stack overflow.
if (fault_addr != overflow_addr) {
VLOG(signals) << "Not a stack overflow";
return false;
}
- // We know this is a stack overflow. We need to move the sp to the overflow region
- // that exists below the protected region. Determine the address of the next
- // available valid address below the protected region.
- uintptr_t prevsp = sp;
- sp = pregion;
- VLOG(signals) << "setting sp to overflow region at " << std::hex << sp;
+ VLOG(signals) << "Stack overflow found";
- // Since the compiler puts the implicit overflow
- // check before the callee save instructions, the SP is already pointing to
- // the previous frame.
- VLOG(signals) << "previous frame: " << std::hex << prevsp;
-
- // Now establish the stack pointer for the signal return.
- sc->sp = prevsp;
-
- // Tell the stack overflow code where the new stack pointer should be.
- sc->regs[art::arm64::IP0] = sp; // aka x16
-
- // Now arrange for the signal handler to return to art_quick_throw_stack_overflow_from_signal.
+ // Now arrange for the signal handler to return to art_quick_throw_stack_overflow.
// The value of LR must be the same as it was when we entered the code that
// caused this fault. This will be inserted into a callee save frame by
- // the function to which this handler returns (art_quick_throw_stack_overflow_from_signal).
- sc->pc = reinterpret_cast<uintptr_t>(art_quick_throw_stack_overflow_from_signal);
+ // the function to which this handler returns (art_quick_throw_stack_overflow).
+ sc->pc = reinterpret_cast<uintptr_t>(art_quick_throw_stack_overflow);
// The kernel will now return to the address in sc->pc.
return true;
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 04be4a2..ab9035a 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -435,31 +435,6 @@
*/
ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFromCode
- /*
- * Invoke stack overflow exception from signal handler.
- * On entry:
- * xSELF: thread
- * SP: address of last known frame
- * IP0: address of next valid SP below protected region in stack
- *
- * This is deceptively simple but hides some complexity. It is called in the case of
- * a stack overflow condition during implicit checks. The signal handler has been
- * called by the kernel due to a load from the protected stack region. The handler
- * works out the address of the previous frame and passes this in SP. However there
- * is a piece of memory somewhere below the current SP that is not accessible (the
- * memory that caused the signal). The signal handler works out the next
- * accessible value of SP and passes this in x16/IP0. This code then sets up the SP
- * to be this new value and calls the code to create and throw the stack overflow
- * exception.
- */
-ENTRY art_quick_throw_stack_overflow_from_signal
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
- mov x0, xSELF // pass Thread::Current
- mov x1, sp // pass SP
- mov sp, xIP0 // move SP down to below protected region.
- b artThrowStackOverflowFromCode // artThrowStackOverflowFromCode(Thread*, SP)
-END art_quick_throw_stack_overflow_from_signal
-
/*
* All generated callsites for interface invokes and invocation slow paths will load arguments
* as usual - except instead of loading arg0/x0 with the target Method*, arg0/x0 will contain
@@ -1843,17 +1818,17 @@
ret
.Ldo_memcmp16:
- mov xIP0, x0 // Save x0 and LR. __memcmp16 does not use these temps.
- mov xIP1, xLR // TODO: Codify and check that?
+ mov x14, x0 // Save x0 and LR. __memcmp16 does not use these temps.
+ mov x15, xLR // TODO: Codify and check that?
mov x0, x2
uxtw x2, w3
bl __memcmp16
- mov xLR, xIP1 // Restore LR.
+ mov xLR, x15 // Restore LR.
cmp x0, #0 // Check the memcmp difference.
- csel x0, x0, xIP0, ne // x0 := x0 != 0 ? xIP0(prev x0=length diff) : x1.
+ csel x0, x0, x14, ne // x0 := x0 != 0 ? x14(prev x0=length diff) : x1.
ret
END art_quick_string_compareto
diff --git a/runtime/arch/x86/asm_support_x86.h b/runtime/arch/x86/asm_support_x86.h
index 531ed77..c9f5a25 100644
--- a/runtime/arch/x86/asm_support_x86.h
+++ b/runtime/arch/x86/asm_support_x86.h
@@ -20,11 +20,11 @@
#include "asm_support.h"
// Offset of field Thread::self_ verified in InitCpu
-#define THREAD_SELF_OFFSET 148
+#define THREAD_SELF_OFFSET 156
// Offset of field Thread::card_table_ verified in InitCpu
-#define THREAD_CARD_TABLE_OFFSET 112
+#define THREAD_CARD_TABLE_OFFSET 120
// Offset of field Thread::exception_ verified in InitCpu
-#define THREAD_EXCEPTION_OFFSET 116
+#define THREAD_EXCEPTION_OFFSET 124
// Offset of field Thread::thin_lock_thread_id_ verified in InitCpu
#define THREAD_ID_OFFSET 12
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index 8b6c9b1..c143c5d 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -28,16 +28,29 @@
#if defined(__APPLE__)
#define ucontext __darwin_ucontext
+
+#if defined(__x86_64__)
+// 64 bit mac build.
+#define CTX_ESP uc_mcontext->__ss.__rsp
+#define CTX_EIP uc_mcontext->__ss.__rip
+#define CTX_EAX uc_mcontext->__ss.__rax
+#define CTX_METHOD uc_mcontext->__ss.__rdi
+#else
+// 32 bit mac build.
#define CTX_ESP uc_mcontext->__ss.__esp
#define CTX_EIP uc_mcontext->__ss.__eip
#define CTX_EAX uc_mcontext->__ss.__eax
#define CTX_METHOD uc_mcontext->__ss.__eax
+#endif
+
#elif defined(__x86_64__)
+// 64 bit linux build.
#define CTX_ESP uc_mcontext.gregs[REG_RSP]
#define CTX_EIP uc_mcontext.gregs[REG_RIP]
#define CTX_EAX uc_mcontext.gregs[REG_RAX]
#define CTX_METHOD uc_mcontext.gregs[REG_RDI]
#else
+// 32 bit linux build.
#define CTX_ESP uc_mcontext.gregs[REG_ESP]
#define CTX_EIP uc_mcontext.gregs[REG_EIP]
#define CTX_EAX uc_mcontext.gregs[REG_EAX]
@@ -50,9 +63,18 @@
namespace art {
+#if defined(__APPLE__) && defined(__x86_64__)
+// mac symbols have a prefix of _ on x86_64
+extern "C" void _art_quick_throw_null_pointer_exception();
+extern "C" void _art_quick_throw_stack_overflow_from_signal();
+extern "C" void _art_quick_test_suspend();
+#define EXT_SYM(sym) _ ## sym
+#else
extern "C" void art_quick_throw_null_pointer_exception();
-extern "C" void art_quick_throw_stack_overflow_from_signal();
+extern "C" void art_quick_throw_stack_overflow();
extern "C" void art_quick_test_suspend();
+#define EXT_SYM(sym) sym
+#endif
// Get the size of an instruction in bytes.
// Return 0 if the instruction is not handled.
@@ -253,7 +275,7 @@
*next_sp = retaddr;
uc->CTX_ESP = reinterpret_cast<uintptr_t>(next_sp);
- uc->CTX_EIP = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception);
+ uc->CTX_EIP = reinterpret_cast<uintptr_t>(EXT_SYM(art_quick_throw_null_pointer_exception));
VLOG(signals) << "Generating null pointer exception";
return true;
}
@@ -327,7 +349,7 @@
*next_sp = retaddr;
uc->CTX_ESP = reinterpret_cast<uintptr_t>(next_sp);
- uc->CTX_EIP = reinterpret_cast<uintptr_t>(art_quick_test_suspend);
+ uc->CTX_EIP = reinterpret_cast<uintptr_t>(EXT_SYM(art_quick_test_suspend));
// Now remove the suspend trigger that caused this fault.
Thread::Current()->RemoveSuspendTrigger();
@@ -360,30 +382,20 @@
uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kX86);
#endif
- Thread* self = Thread::Current();
- uintptr_t pregion = reinterpret_cast<uintptr_t>(self->GetStackEnd()) -
- Thread::kStackOverflowProtectedSize;
-
// Check that the fault address is the value expected for a stack overflow.
if (fault_addr != overflow_addr) {
VLOG(signals) << "Not a stack overflow";
return false;
}
- // We know this is a stack overflow. We need to move the sp to the overflow region
- // that exists below the protected region. Determine the address of the next
- // available valid address below the protected region.
- VLOG(signals) << "setting sp to overflow region at " << std::hex << pregion;
+ VLOG(signals) << "Stack overflow found";
// Since the compiler puts the implicit overflow
// check before the callee save instructions, the SP is already pointing to
// the previous frame.
- // Tell the stack overflow code where the new stack pointer should be.
- uc->CTX_EAX = pregion;
-
- // Now arrange for the signal handler to return to art_quick_throw_stack_overflow_from_signal.
- uc->CTX_EIP = reinterpret_cast<uintptr_t>(art_quick_throw_stack_overflow_from_signal);
+ // Now arrange for the signal handler to return to art_quick_throw_stack_overflow.
+ uc->CTX_EIP = reinterpret_cast<uintptr_t>(art_quick_throw_stack_overflow);
return true;
}
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index dc4019d..75ec49d 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -174,21 +174,6 @@
*/
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode
-// On entry to this function, EAX contains the ESP value for the overflow region.
-DEFINE_FUNCTION art_quick_throw_stack_overflow_from_signal
- // Here, the ESP is above the protected region. We need to create a
- // callee save frame and then move ESP down to the overflow region.
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
- mov %esp, %ecx // get current stack pointer
- mov %eax, %esp // move ESP to the overflow region.
- PUSH ecx // pass SP
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- CFI_ADJUST_CFA_OFFSET(4)
- SETUP_GOT_NOSAVE // clobbers ebx (harmless here)
- call PLT_SYMBOL(artThrowStackOverflowFromCode) // artThrowStackOverflowFromCode(Thread*, SP)
- int3 // unreached
-END_FUNCTION art_quick_throw_stack_overflow_from_signal
-
/*
* Called by managed code, saves callee saves and then calls artThrowException
* that will place a mock Method* at the bottom of the stack. Arg1 holds the exception.
diff --git a/runtime/arch/x86_64/asm_support_x86_64.S b/runtime/arch/x86_64/asm_support_x86_64.S
index 70c71c2..682ba43 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.S
+++ b/runtime/arch/x86_64/asm_support_x86_64.S
@@ -112,6 +112,8 @@
.balign 16
END_MACRO
+// TODO: we might need to use SYMBOL() here to add the underscore prefix
+// for mac builds.
MACRO1(DEFINE_FUNCTION, c_name)
FUNCTION_TYPE(\c_name, 0)
.globl VAR(c_name, 0)
diff --git a/runtime/arch/x86_64/asm_support_x86_64.h b/runtime/arch/x86_64/asm_support_x86_64.h
index c3637ef..40958dc 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.h
+++ b/runtime/arch/x86_64/asm_support_x86_64.h
@@ -28,11 +28,11 @@
#define RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET 16
// Offset of field Thread::self_ verified in InitCpu
-#define THREAD_SELF_OFFSET 184
+#define THREAD_SELF_OFFSET 192
// Offset of field Thread::card_table_ verified in InitCpu
-#define THREAD_CARD_TABLE_OFFSET 112
+#define THREAD_CARD_TABLE_OFFSET 120
// Offset of field Thread::exception_ verified in InitCpu
-#define THREAD_EXCEPTION_OFFSET 120
+#define THREAD_EXCEPTION_OFFSET 128
// Offset of field Thread::thin_lock_thread_id_ verified in InitCpu
#define THREAD_ID_OFFSET 12
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index f021ada..48bc240 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -284,18 +284,6 @@
*/
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode
-// On entry to this function, RAX contains the ESP value for the overflow region.
-DEFINE_FUNCTION art_quick_throw_stack_overflow_from_signal
- // Here, the RSP is above the protected region. We need to create a
- // callee save frame and then move RSP down to the overflow region.
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
- mov %rsp, %rsi // get current stack pointer, pass SP as second arg
- mov %rax, %rsp // move RSP to the overflow region.
- mov %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current() as first arg
- call PLT_SYMBOL(artThrowStackOverflowFromCode) // artThrowStackOverflowFromCode(Thread*, SP)
- int3 // unreached
-END_FUNCTION art_quick_throw_stack_overflow_from_signal
-
/*
* Called by managed code, saves callee saves and then calls artThrowException
* that will place a mock Method* at the bottom of the stack. Arg1 holds the exception.
diff --git a/runtime/atomic.h b/runtime/atomic.h
index 5ddafb4..e57c0c0 100644
--- a/runtime/atomic.h
+++ b/runtime/atomic.h
@@ -17,14 +17,8 @@
#ifndef ART_RUNTIME_ATOMIC_H_
#define ART_RUNTIME_ATOMIC_H_
-#ifdef __clang__
-#define ART_HAVE_STDATOMIC 1
-#endif
-
#include <stdint.h>
-#if ART_HAVE_STDATOMIC
#include <atomic>
-#endif
#include <limits>
#include <vector>
@@ -157,8 +151,6 @@
return kNeedSwapMutexes;
}
- #if ART_HAVE_STDATOMIC
-
static void ThreadFenceAcquire() {
std::atomic_thread_fence(std::memory_order_acquire);
}
@@ -179,66 +171,6 @@
std::atomic_thread_fence(std::memory_order_seq_cst);
}
- #else
-
- static void ThreadFenceAcquire() {
- #if defined(__arm__) || defined(__aarch64__)
- __asm__ __volatile__("dmb ish" : : : "memory");
- // Could possibly use dmb ishld on aarch64
- // But currently we also use this on volatile loads
- // to enforce store atomicity. Ishld is
- // insufficient for that purpose.
- #elif defined(__i386__) || defined(__x86_64__)
- __asm__ __volatile__("" : : : "memory");
- #elif defined(__mips__)
- __asm__ __volatile__("sync" : : : "memory");
- #else
- #error Unexpected architecture
- #endif
- }
-
- static void ThreadFenceRelease() {
- #if defined(__arm__) || defined(__aarch64__)
- __asm__ __volatile__("dmb ish" : : : "memory");
- // ishst doesn't order load followed by store.
- #elif defined(__i386__) || defined(__x86_64__)
- __asm__ __volatile__("" : : : "memory");
- #elif defined(__mips__)
- __asm__ __volatile__("sync" : : : "memory");
- #else
- #error Unexpected architecture
- #endif
- }
-
- // Fence at the end of a constructor with final fields
- // or allocation. We believe this
- // only has to order stores, and can thus be weaker than
- // release on aarch64.
- static void ThreadFenceForConstructor() {
- #if defined(__arm__) || defined(__aarch64__)
- __asm__ __volatile__("dmb ishst" : : : "memory");
- #elif defined(__i386__) || defined(__x86_64__)
- __asm__ __volatile__("" : : : "memory");
- #elif defined(__mips__)
- __asm__ __volatile__("sync" : : : "memory");
- #else
- #error Unexpected architecture
- #endif
- }
-
- static void ThreadFenceSequentiallyConsistent() {
- #if defined(__arm__) || defined(__aarch64__)
- __asm__ __volatile__("dmb ish" : : : "memory");
- #elif defined(__i386__) || defined(__x86_64__)
- __asm__ __volatile__("mfence" : : : "memory");
- #elif defined(__mips__)
- __asm__ __volatile__("sync" : : : "memory");
- #else
- #error Unexpected architecture
- #endif
- }
- #endif
-
private:
static Mutex* GetSwapMutex(const volatile int64_t* addr);
static int64_t SwapMutexRead64(volatile const int64_t* addr);
@@ -252,11 +184,10 @@
DISALLOW_COPY_AND_ASSIGN(QuasiAtomic);
};
-#if ART_HAVE_STDATOMIC
template<typename T>
-class Atomic : public std::atomic<T> {
+class PACKED(sizeof(T)) Atomic : public std::atomic<T> {
public:
- Atomic<T>() : std::atomic<T>() { }
+ Atomic<T>() : std::atomic<T>(0) { }
explicit Atomic<T>(T value) : std::atomic<T>(value) { }
@@ -360,292 +291,20 @@
}
};
-#else
-
-template<typename T> class Atomic;
-
-// Helper class for Atomic to deal separately with size 8 and small
-// objects. Should not be used directly.
-
-template<int SZ, class T> struct AtomicHelper {
- friend class Atomic<T>;
-
- private:
- COMPILE_ASSERT(sizeof(T) <= 4, bad_atomic_helper_arg);
-
- static T LoadRelaxed(const volatile T* loc) {
- // sizeof(T) <= 4
- return *loc;
- }
-
- static void StoreRelaxed(volatile T* loc, T desired) {
- // sizeof(T) <= 4
- *loc = desired;
- }
-
- static bool CompareExchangeStrongSequentiallyConsistent(volatile T* loc,
- T expected_value, T desired_value) {
- // sizeof(T) <= 4
- return __sync_bool_compare_and_swap(loc, expected_value, desired_value);
- }
-};
-
-// Interpret the bit pattern of input (type U) as type V. Requires the size
-// of V >= size of U (compile-time checked).
-// Reproduced here from utils.h to keep dependencies small.
-template<typename U, typename V>
-static inline V bit_cast_atomic(U in) {
- COMPILE_ASSERT(sizeof(U) == sizeof(V), size_of_u_not_eq_size_of_v);
- union {
- U u;
- V v;
- } tmp;
- tmp.u = in;
- return tmp.v;
-}
-
-template<class T> struct AtomicHelper<8, T> {
- friend class Atomic<T>;
-
- private:
- COMPILE_ASSERT(sizeof(T) == 8, bad_large_atomic_helper_arg);
-
- static T LoadRelaxed(const volatile T* loc) {
- // sizeof(T) == 8
- volatile const int64_t* loc_ptr =
- reinterpret_cast<volatile const int64_t*>(loc);
- return bit_cast_atomic<int64_t, T>(QuasiAtomic::Read64(loc_ptr));
- }
-
- static void StoreRelaxed(volatile T* loc, T desired) {
- // sizeof(T) == 8
- volatile int64_t* loc_ptr =
- reinterpret_cast<volatile int64_t*>(loc);
- QuasiAtomic::Write64(loc_ptr, bit_cast_atomic<T, int64_t>(desired));
- }
-
-
- static bool CompareExchangeStrongSequentiallyConsistent(volatile T* loc,
- T expected_value, T desired_value) {
- // sizeof(T) == 8
- volatile int64_t* loc_ptr = reinterpret_cast<volatile int64_t*>(loc);
- return QuasiAtomic::Cas64(bit_cast_atomic<T, int64_t>(expected_value),
- bit_cast_atomic<T, int64_t>(desired_value),
- loc_ptr);
- }
-};
-
-template<typename T>
-class PACKED(sizeof(T)) Atomic {
- private:
- COMPILE_ASSERT(sizeof(T) <= 4 || sizeof(T) == 8, bad_atomic_arg);
-
- public:
- Atomic<T>() : value_(0) { }
-
- explicit Atomic<T>(T value) : value_(value) { }
-
- // Load from memory without ordering or synchronization constraints.
- T LoadRelaxed() const {
- return AtomicHelper<sizeof(T), T>::LoadRelaxed(&value_);
- }
-
- // Word tearing allowed, but may race.
- T LoadJavaData() const {
- return value_;
- }
-
- // Load from memory with a total ordering.
- T LoadSequentiallyConsistent() const;
-
- // Store to memory without ordering or synchronization constraints.
- void StoreRelaxed(T desired) {
- AtomicHelper<sizeof(T), T>::StoreRelaxed(&value_, desired);
- }
-
- // Word tearing allowed, but may race.
- void StoreJavaData(T desired) {
- value_ = desired;
- }
-
- // Store to memory with release ordering.
- void StoreRelease(T desired);
-
- // Store to memory with a total ordering.
- void StoreSequentiallyConsistent(T desired);
-
- // Atomically replace the value with desired value if it matches the expected value.
- // Participates in total ordering of atomic operations.
- bool CompareExchangeStrongSequentiallyConsistent(T expected_value, T desired_value) {
- return AtomicHelper<sizeof(T), T>::
- CompareExchangeStrongSequentiallyConsistent(&value_, expected_value, desired_value);
- }
-
- // The same, but may fail spuriously.
- bool CompareExchangeWeakSequentiallyConsistent(T expected_value, T desired_value) {
- // TODO: Take advantage of the fact that it may fail spuriously.
- return AtomicHelper<sizeof(T), T>::
- CompareExchangeStrongSequentiallyConsistent(&value_, expected_value, desired_value);
- }
-
- // Atomically replace the value with desired value if it matches the expected value. Doesn't
- // imply ordering or synchronization constraints.
- bool CompareExchangeStrongRelaxed(T expected_value, T desired_value) {
- // TODO: make this relaxed.
- return CompareExchangeStrongSequentiallyConsistent(expected_value, desired_value);
- }
-
- // The same, but may fail spuriously.
- bool CompareExchangeWeakRelaxed(T expected_value, T desired_value) {
- // TODO: Take advantage of the fact that it may fail spuriously.
- // TODO: make this relaxed.
- return CompareExchangeStrongSequentiallyConsistent(expected_value, desired_value);
- }
-
- // Atomically replace the value with desired value if it matches the expected value. Prior accesses
- // made to other memory locations by the thread that did the release become visible in this
- // thread.
- bool CompareExchangeWeakAcquire(T expected_value, T desired_value) {
- // TODO: make this acquire.
- return CompareExchangeWeakSequentiallyConsistent(expected_value, desired_value);
- }
-
- // Atomically replace the value with desired value if it matches the expected value. Prior accesses
- // to other memory locations become visible to the threads that do a consume or an acquire on the
- // same location.
- bool CompareExchangeWeakRelease(T expected_value, T desired_value) {
- // TODO: make this release.
- return CompareExchangeWeakSequentiallyConsistent(expected_value, desired_value);
- }
-
- volatile T* Address() {
- return &value_;
- }
-
- T FetchAndAddSequentiallyConsistent(const T value) {
- if (sizeof(T) <= 4) {
- return __sync_fetch_and_add(&value_, value); // Return old value.
- } else {
- T expected;
- do {
- expected = LoadRelaxed();
- } while (!CompareExchangeWeakSequentiallyConsistent(expected, expected + value));
- return expected;
- }
- }
-
- T FetchAndSubSequentiallyConsistent(const T value) {
- if (sizeof(T) <= 4) {
- return __sync_fetch_and_sub(&value_, value); // Return old value.
- } else {
- return FetchAndAddSequentiallyConsistent(-value);
- }
- }
-
- T FetchAndOrSequentiallyConsistent(const T value) {
- if (sizeof(T) <= 4) {
- return __sync_fetch_and_or(&value_, value); // Return old value.
- } else {
- T expected;
- do {
- expected = LoadRelaxed();
- } while (!CompareExchangeWeakSequentiallyConsistent(expected, expected | value));
- return expected;
- }
- }
-
- T FetchAndAndSequentiallyConsistent(const T value) {
- if (sizeof(T) <= 4) {
- return __sync_fetch_and_and(&value_, value); // Return old value.
- } else {
- T expected;
- do {
- expected = LoadRelaxed();
- } while (!CompareExchangeWeakSequentiallyConsistent(expected, expected & value));
- return expected;
- }
- }
-
- T operator++() { // Prefix operator.
- if (sizeof(T) <= 4) {
- return __sync_add_and_fetch(&value_, 1); // Return new value.
- } else {
- return FetchAndAddSequentiallyConsistent(1) + 1;
- }
- }
-
- T operator++(int) { // Postfix operator.
- return FetchAndAddSequentiallyConsistent(1);
- }
-
- T operator--() { // Prefix operator.
- if (sizeof(T) <= 4) {
- return __sync_sub_and_fetch(&value_, 1); // Return new value.
- } else {
- return FetchAndSubSequentiallyConsistent(1) - 1;
- }
- }
-
- T operator--(int) { // Postfix operator.
- return FetchAndSubSequentiallyConsistent(1);
- }
-
- static T MaxValue() {
- return std::numeric_limits<T>::max();
- }
-
-
- private:
- volatile T value_;
-};
-#endif
-
typedef Atomic<int32_t> AtomicInteger;
COMPILE_ASSERT(sizeof(AtomicInteger) == sizeof(int32_t), weird_atomic_int_size);
COMPILE_ASSERT(alignof(AtomicInteger) == alignof(int32_t),
atomic_int_alignment_differs_from_that_of_underlying_type);
COMPILE_ASSERT(sizeof(Atomic<int64_t>) == sizeof(int64_t), weird_atomic_int64_size);
+
+// Assert the alignment of 64-bit integers is 64-bit. This isn't true on certain 32-bit
+// architectures (e.g. x86-32) but we know that 64-bit integers here are arranged to be 8-byte
+// aligned.
#if defined(__LP64__)
COMPILE_ASSERT(alignof(Atomic<int64_t>) == alignof(int64_t),
atomic_int64_alignment_differs_from_that_of_underlying_type);
#endif
-// The above fails on x86-32.
-// This is OK, since we explicitly arrange for alignment of 8-byte fields.
-
-
-#if !ART_HAVE_STDATOMIC
-template<typename T>
-inline T Atomic<T>::LoadSequentiallyConsistent() const {
- T result = value_;
- if (sizeof(T) != 8 || !QuasiAtomic::LongAtomicsUseMutexes()) {
- QuasiAtomic::ThreadFenceAcquire();
- // We optimistically assume this suffices for store atomicity.
- // On ARMv8 we strengthen ThreadFenceAcquire to make that true.
- }
- return result;
-}
-
-template<typename T>
-inline void Atomic<T>::StoreRelease(T desired) {
- if (sizeof(T) != 8 || !QuasiAtomic::LongAtomicsUseMutexes()) {
- QuasiAtomic::ThreadFenceRelease();
- }
- StoreRelaxed(desired);
-}
-
-template<typename T>
-inline void Atomic<T>::StoreSequentiallyConsistent(T desired) {
- if (sizeof(T) != 8 || !QuasiAtomic::LongAtomicsUseMutexes()) {
- QuasiAtomic::ThreadFenceRelease();
- }
- StoreRelaxed(desired);
- if (sizeof(T) != 8 || !QuasiAtomic::LongAtomicsUseMutexes()) {
- QuasiAtomic::ThreadFenceSequentiallyConsistent();
- }
-}
-
-#endif
} // namespace art
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index 3e5cdba..f70db35 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -51,9 +51,11 @@
blocked_tid_(kLogLockContentions ? blocked_tid : 0),
owner_tid_(kLogLockContentions ? owner_tid : 0),
start_nano_time_(kLogLockContentions ? NanoTime() : 0) {
- std::string msg = StringPrintf("Lock contention on %s (owner tid: %" PRIu64 ")",
- mutex->GetName(), owner_tid);
- ATRACE_BEGIN(msg.c_str());
+ if (ATRACE_ENABLED()) {
+ std::string msg = StringPrintf("Lock contention on %s (owner tid: %" PRIu64 ")",
+ mutex->GetName(), owner_tid);
+ ATRACE_BEGIN(msg.c_str());
+ }
}
~ScopedContentionRecorder() {
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 9921bdd..3af90b2 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -47,7 +47,9 @@
}
}
DCHECK(!(*element_class)->IsPrimitiveVoid());
- std::string descriptor = "[" + (*element_class)->GetDescriptor();
+ std::string descriptor = "[";
+ std::string temp;
+ descriptor += (*element_class)->GetDescriptor(&temp);
StackHandleScope<2> hs(Thread::Current());
Handle<mirror::ClassLoader> class_loader(hs.NewHandle((*element_class)->GetClassLoader()));
HandleWrapper<mirror::Class> h_element_class(hs.NewHandleWrapper(element_class));
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 3b4976f..db42146 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -40,7 +40,7 @@
#include "intern_table.h"
#include "interpreter/interpreter.h"
#include "leb128.h"
-#include "method_helper.h"
+#include "method_helper-inl.h"
#include "oat.h"
#include "oat_file.h"
#include "object_lock.h"
@@ -97,7 +97,8 @@
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
if (c->GetVerifyErrorClass() != NULL) {
// TODO: change the verifier to store an _instance_, with a useful detail message?
- self->ThrowNewException(throw_location, c->GetVerifyErrorClass()->GetDescriptor().c_str(),
+ std::string temp;
+ self->ThrowNewException(throw_location, c->GetVerifyErrorClass()->GetDescriptor(&temp),
PrettyDescriptor(c).c_str());
} else {
self->ThrowNewException(throw_location, "Ljava/lang/NoClassDefFoundError;",
@@ -886,12 +887,18 @@
}
}
- // Create the oat file.
- open_oat_file.reset(CreateOatFileForDexLocation(dex_location, scoped_flock.GetFile()->Fd(),
- oat_location, error_msgs));
+ if (Runtime::Current()->IsDex2OatEnabled()) {
+ // Create the oat file.
+ open_oat_file.reset(CreateOatFileForDexLocation(dex_location, scoped_flock.GetFile()->Fd(),
+ oat_location, error_msgs));
+ }
// Failed, bail.
if (open_oat_file.get() == nullptr) {
+ std::string error_msg;
+ // dex2oat was disabled or crashed. Add the dex file in the list of dex_files to make progress.
+ DexFile::Open(dex_location, dex_location, &error_msg, dex_files);
+ error_msgs->push_back(error_msg);
return false;
}
@@ -2015,15 +2022,21 @@
return mirror::Class::ComputeClassSize(false, 0, num_32, num_64, num_ref);
}
-OatFile::OatClass ClassLinker::GetOatClass(const DexFile& dex_file, uint16_t class_def_idx) {
+bool ClassLinker::FindOatClass(const DexFile& dex_file,
+ uint16_t class_def_idx,
+ OatFile::OatClass* oat_class) {
+ DCHECK(oat_class != nullptr);
DCHECK_NE(class_def_idx, DexFile::kDexNoIndex16);
const OatFile* oat_file = FindOpenedOatFileForDexFile(dex_file);
- CHECK(oat_file != NULL) << dex_file.GetLocation();
+ if (oat_file == nullptr) {
+ return false;
+ }
uint dex_location_checksum = dex_file.GetLocationChecksum();
const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_file.GetLocation().c_str(),
&dex_location_checksum);
CHECK(oat_dex_file != NULL) << dex_file.GetLocation();
- return oat_dex_file->GetOatClass(class_def_idx);
+ *oat_class = oat_dex_file->GetOatClass(class_def_idx);
+ return true;
}
static uint32_t GetOatMethodIndexFromMethodIndex(const DexFile& dex_file, uint16_t class_def_idx,
@@ -2060,7 +2073,8 @@
return 0;
}
-const OatFile::OatMethod ClassLinker::GetOatMethodFor(mirror::ArtMethod* method) {
+bool ClassLinker::FindOatMethodFor(mirror::ArtMethod* method, OatFile::OatMethod* oat_method) {
+ DCHECK(oat_method != nullptr);
// Although we overwrite the trampoline of non-static methods, we may get here via the resolution
// method for direct methods (or virtual methods made direct).
mirror::Class* declaring_class = method->GetDeclaringClass();
@@ -2087,10 +2101,15 @@
GetOatMethodIndexFromMethodIndex(*declaring_class->GetDexCache()->GetDexFile(),
method->GetDeclaringClass()->GetDexClassDefIndex(),
method->GetDexMethodIndex()));
- const OatFile::OatClass oat_class = GetOatClass(*declaring_class->GetDexCache()->GetDexFile(),
- declaring_class->GetDexClassDefIndex());
+ OatFile::OatClass oat_class;
+ if (!FindOatClass(*declaring_class->GetDexCache()->GetDexFile(),
+ declaring_class->GetDexClassDefIndex(),
+ &oat_class)) {
+ return false;
+ }
- return oat_class.GetOatMethod(oat_method_index);
+ *oat_method = oat_class.GetOatMethod(oat_method_index);
+ return true;
}
// Special case to get oat code without overwriting a trampoline.
@@ -2099,7 +2118,12 @@
if (method->IsProxyMethod()) {
return GetQuickProxyInvokeHandler();
}
- const void* result = GetOatMethodFor(method).GetQuickCode();
+ OatFile::OatMethod oat_method;
+ const void* result = nullptr;
+ if (FindOatMethodFor(method, &oat_method)) {
+ result = oat_method.GetQuickCode();
+ }
+
if (result == nullptr) {
if (method->IsNative()) {
// No code and native? Use generic trampoline.
@@ -2122,10 +2146,16 @@
if (method->IsProxyMethod()) {
return GetPortableProxyInvokeHandler();
}
- const OatFile::OatMethod oat_method = GetOatMethodFor(method);
- const void* result = oat_method.GetPortableCode();
+ OatFile::OatMethod oat_method;
+ const void* result = nullptr;
+ const void* quick_code = nullptr;
+ if (FindOatMethodFor(method, &oat_method)) {
+ result = oat_method.GetPortableCode();
+ quick_code = oat_method.GetQuickCode();
+ }
+
if (result == nullptr) {
- if (oat_method.GetQuickCode() == nullptr) {
+ if (quick_code == nullptr) {
// No code? You must mean to go into the interpreter.
result = GetPortableToInterpreterBridge();
} else {
@@ -2140,14 +2170,20 @@
const void* ClassLinker::GetQuickOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx,
uint32_t method_idx) {
- const OatFile::OatClass oat_class = GetOatClass(dex_file, class_def_idx);
+ OatFile::OatClass oat_class;
+ if (!FindOatClass(dex_file, class_def_idx, &oat_class)) {
+ return nullptr;
+ }
uint32_t oat_method_idx = GetOatMethodIndexFromMethodIndex(dex_file, class_def_idx, method_idx);
return oat_class.GetOatMethod(oat_method_idx).GetQuickCode();
}
const void* ClassLinker::GetPortableOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx,
uint32_t method_idx) {
- const OatFile::OatClass oat_class = GetOatClass(dex_file, class_def_idx);
+ OatFile::OatClass oat_class;
+ if (!FindOatClass(dex_file, class_def_idx, &oat_class)) {
+ return nullptr;
+ }
uint32_t oat_method_idx = GetOatMethodIndexFromMethodIndex(dex_file, class_def_idx, method_idx);
return oat_class.GetOatMethod(oat_method_idx).GetPortableCode();
}
@@ -2190,7 +2226,6 @@
const byte* class_data = dex_file.GetClassData(*dex_class_def);
// There should always be class data if there were direct methods.
CHECK(class_data != nullptr) << PrettyDescriptor(klass);
- const OatFile::OatClass oat_class = GetOatClass(dex_file, klass->GetDexClassDefIndex());
ClassDataItemIterator it(dex_file, class_data);
// Skip fields
while (it.HasNextStaticField()) {
@@ -2199,6 +2234,8 @@
while (it.HasNextInstanceField()) {
it.Next();
}
+ OatFile::OatClass oat_class;
+ bool has_oat_class = FindOatClass(dex_file, klass->GetDexClassDefIndex(), &oat_class);
// Link the code of methods skipped by LinkCode.
for (size_t method_index = 0; it.HasNextDirectMethod(); ++method_index, it.Next()) {
mirror::ArtMethod* method = klass->GetDirectMethod(method_index);
@@ -2206,8 +2243,13 @@
// Only update static methods.
continue;
}
- const void* portable_code = oat_class.GetOatMethod(method_index).GetPortableCode();
- const void* quick_code = oat_class.GetOatMethod(method_index).GetQuickCode();
+ const void* portable_code = nullptr;
+ const void* quick_code = nullptr;
+ if (has_oat_class) {
+ OatFile::OatMethod oat_method = oat_class.GetOatMethod(method_index);
+ portable_code = oat_method.GetPortableCode();
+ quick_code = oat_method.GetQuickCode();
+ }
const bool enter_interpreter = NeedsInterpreter(method, quick_code, portable_code);
bool have_portable_code = false;
if (enter_interpreter) {
@@ -2239,16 +2281,21 @@
void ClassLinker::LinkCode(Handle<mirror::ArtMethod> method, const OatFile::OatClass* oat_class,
const DexFile& dex_file, uint32_t dex_method_index,
uint32_t method_index) {
+ if (Runtime::Current()->IsCompiler()) {
+ // The following code only applies to a non-compiler runtime.
+ return;
+ }
// Method shouldn't have already been linked.
DCHECK(method->GetEntryPointFromQuickCompiledCode() == nullptr);
DCHECK(method->GetEntryPointFromPortableCompiledCode() == nullptr);
- // Every kind of method should at least get an invoke stub from the oat_method.
- // non-abstract methods also get their code pointers.
- const OatFile::OatMethod oat_method = oat_class->GetOatMethod(method_index);
- oat_method.LinkMethod(method.Get());
+ if (oat_class != nullptr) {
+ // Every kind of method should at least get an invoke stub from the oat_method.
+ // non-abstract methods also get their code pointers.
+ const OatFile::OatMethod oat_method = oat_class->GetOatMethod(method_index);
+ oat_method.LinkMethod(method.Get());
+ }
// Install entry point from interpreter.
- Runtime* runtime = Runtime::Current();
bool enter_interpreter = NeedsInterpreter(method.Get(),
method->GetEntryPointFromQuickCompiledCode(),
method->GetEntryPointFromPortableCompiledCode());
@@ -2302,6 +2349,7 @@
}
// Allow instrumentation its chance to hijack code.
+ Runtime* runtime = Runtime::Current();
runtime->GetInstrumentation()->UpdateMethodsCode(method.Get(),
method->GetEntryPointFromQuickCompiledCode(),
method->GetEntryPointFromPortableCompiledCode(),
@@ -2338,8 +2386,10 @@
return; // no fields or methods - for example a marker interface
}
- if (Runtime::Current()->IsStarted() && !Runtime::Current()->UseCompileTimeClassPath()) {
- const OatFile::OatClass oat_class = GetOatClass(dex_file, klass->GetDexClassDefIndex());
+ OatFile::OatClass oat_class;
+ if (Runtime::Current()->IsStarted()
+ && !Runtime::Current()->UseCompileTimeClassPath()
+ && FindOatClass(dex_file, klass->GetDexClassDefIndex(), &oat_class)) {
LoadClassMembers(dex_file, class_data, klass, class_loader, &oat_class);
} else {
LoadClassMembers(dex_file, class_data, klass, class_loader, nullptr);
@@ -2422,9 +2472,7 @@
return;
}
klass->SetDirectMethod(i, method.Get());
- if (oat_class != nullptr) {
- LinkCode(method, oat_class, dex_file, it.GetMemberIndex(), class_def_method_index);
- }
+ LinkCode(method, oat_class, dex_file, it.GetMemberIndex(), class_def_method_index);
method->SetMethodIndex(class_def_method_index);
class_def_method_index++;
}
@@ -2437,9 +2485,7 @@
}
klass->SetVirtualMethod(i, method.Get());
DCHECK_EQ(class_def_method_index, it.NumDirectMethods() + i);
- if (oat_class != nullptr) {
- LinkCode(method, oat_class, dex_file, it.GetMemberIndex(), class_def_method_index);
- }
+ LinkCode(method, oat_class, dex_file, it.GetMemberIndex(), class_def_method_index);
class_def_method_index++;
}
DCHECK(!it.HasNext());
@@ -2482,17 +2528,18 @@
// Set finalizable flag on declaring class.
if (strcmp("V", dex_file.GetShorty(method_id.proto_idx_)) == 0) {
// Void return type.
- if (klass->GetClassLoader() != NULL) { // All non-boot finalizer methods are flagged
+ if (klass->GetClassLoader() != NULL) { // All non-boot finalizer methods are flagged.
klass->SetFinalizable();
} else {
- std::string klass_descriptor = klass->GetDescriptor();
+ std::string temp;
+ const char* klass_descriptor = klass->GetDescriptor(&temp);
// The Enum class declares a "final" finalize() method to prevent subclasses from
// introducing a finalizer. We don't want to set the finalizable flag for Enum or its
// subclasses, so we exclude it here.
// We also want to avoid setting the flag on Object, where we know that finalize() is
// empty.
- if (klass_descriptor.compare("Ljava/lang/Object;") != 0 &&
- klass_descriptor.compare("Ljava/lang/Enum;") != 0) {
+ if (strcmp(klass_descriptor, "Ljava/lang/Object;") != 0 &&
+ strcmp(klass_descriptor, "Ljava/lang/Enum;") != 0) {
klass->SetFinalizable();
}
}
@@ -2991,6 +3038,7 @@
const char* old_no_suspend_cause =
self->StartAssertNoThreadSuspension("Moving image classes to class table");
mirror::ObjectArray<mirror::DexCache>* dex_caches = GetImageDexCaches();
+ std::string temp;
for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
mirror::DexCache* dex_cache = dex_caches->Get(i);
mirror::ObjectArray<mirror::Class>* types = dex_cache->GetResolvedTypes();
@@ -2998,9 +3046,9 @@
mirror::Class* klass = types->Get(j);
if (klass != NULL) {
DCHECK(klass->GetClassLoader() == NULL);
- std::string descriptor = klass->GetDescriptor();
- size_t hash = Hash(descriptor.c_str());
- mirror::Class* existing = LookupClassFromTableLocked(descriptor.c_str(), NULL, hash);
+ const char* descriptor = klass->GetDescriptor(&temp);
+ size_t hash = Hash(descriptor);
+ mirror::Class* existing = LookupClassFromTableLocked(descriptor, NULL, hash);
if (existing != NULL) {
CHECK(existing == klass) << PrettyClassAndClassLoader(existing) << " != "
<< PrettyClassAndClassLoader(klass);
@@ -3218,7 +3266,7 @@
const OatFile* oat_file = FindOpenedOatFileForDexFile(dex_file);
// Make this work with gtests, which do not set up the image properly.
// TODO: we should clean up gtests to set up the image path properly.
- if (Runtime::Current()->IsCompiler() && (oat_file == NULL)) {
+ if (Runtime::Current()->IsCompiler() || (oat_file == NULL)) {
return false;
}
@@ -3265,9 +3313,10 @@
// isn't a problem and this case shouldn't occur
return false;
}
+ std::string temp;
LOG(FATAL) << "Unexpected class status: " << oat_file_class_status
<< " " << dex_file.GetLocation() << " " << PrettyClass(klass) << " "
- << klass->GetDescriptor();
+ << klass->GetDescriptor(&temp);
return false;
}
@@ -3786,7 +3835,8 @@
// Set the class as initialized except if failed to initialize static fields.
klass->SetStatus(mirror::Class::kStatusInitialized, self);
if (VLOG_IS_ON(class_linker)) {
- LOG(INFO) << "Initialized class " << klass->GetDescriptor() << " from " <<
+ std::string temp;
+ LOG(INFO) << "Initialized class " << klass->GetDescriptor(&temp) << " from " <<
klass->GetLocation();
}
// Opportunistically set static method trampolines to their destination.
@@ -4141,9 +4191,9 @@
klass->GetSuperClass()->GetVTableLength();
size_t actual_count = klass->GetSuperClass()->GetVTableLength();
CHECK_LE(actual_count, max_count);
- StackHandleScope<3> hs(self);
+ StackHandleScope<4> hs(self);
+ Handle<mirror::Class> super_class(hs.NewHandle(klass->GetSuperClass()));
Handle<mirror::ObjectArray<mirror::ArtMethod>> vtable;
- mirror::Class* super_class = klass->GetSuperClass();
if (super_class->ShouldHaveEmbeddedImtAndVTable()) {
vtable = hs.NewHandle(AllocArtMethodArray(self, max_count));
if (UNLIKELY(vtable.Get() == nullptr)) {
@@ -4155,7 +4205,7 @@
vtable->Set<false>(i, super_class->GetVTableEntry(i));
}
} else {
- CHECK(super_class->GetVTable() != nullptr) << PrettyClass(super_class);
+ CHECK(super_class->GetVTable() != nullptr) << PrettyClass(super_class.Get());
vtable = hs.NewHandle(super_class->GetVTable()->CopyOf(self, max_count));
if (UNLIKELY(vtable.Get() == nullptr)) {
CHECK(self->IsExceptionPending()); // OOME.
@@ -4301,9 +4351,10 @@
interfaces->Get(i);
DCHECK(interface != NULL);
if (!interface->IsInterface()) {
+ std::string temp;
ThrowIncompatibleClassChangeError(klass.Get(), "Class %s implements non-interface class %s",
PrettyDescriptor(klass.Get()).c_str(),
- PrettyDescriptor(interface->GetDescriptor()).c_str());
+ PrettyDescriptor(interface->GetDescriptor(&temp)).c_str());
return false;
}
// Check if interface is already in iftable
@@ -4677,11 +4728,12 @@
} else {
klass->SetNumReferenceInstanceFields(num_reference_fields);
if (!klass->IsVariableSize()) {
- DCHECK_GE(size, sizeof(mirror::Object)) << klass->GetDescriptor();
+ std::string temp;
+ DCHECK_GE(size, sizeof(mirror::Object)) << klass->GetDescriptor(&temp);
size_t previous_size = klass->GetObjectSize();
if (previous_size != 0) {
// Make sure that we didn't originally have an incorrect size.
- CHECK_EQ(previous_size, size) << klass->GetDescriptor();
+ CHECK_EQ(previous_size, size) << klass->GetDescriptor(&temp);
}
klass->SetObjectSize(size);
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 8c09042..6fc0f0e 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -392,7 +392,7 @@
}
private:
- const OatFile::OatMethod GetOatMethodFor(mirror::ArtMethod* method)
+ bool FindOatMethodFor(mirror::ArtMethod* method, OatFile::OatMethod* oat_method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
OatFile& GetImageOatFile(gc::space::ImageSpace* space)
@@ -461,8 +461,9 @@
void FixupStaticTrampolines(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Finds the associated oat class for a dex_file and descriptor
- OatFile::OatClass GetOatClass(const DexFile& dex_file, uint16_t class_def_idx)
+ // Finds the associated oat class for a dex_file and descriptor. Returns whether the class
+ // was found, and sets the data in oat_class.
+ bool FindOatClass(const DexFile& dex_file, uint16_t class_def_idx, OatFile::OatClass* oat_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void RegisterDexFileLocked(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
@@ -742,6 +743,7 @@
friend class ImageWriter; // for GetClassRoots
friend class ImageDumper; // for FindOpenedOatFileFromOatLocation
friend class ElfPatcher; // for FindOpenedOatFileForDexFile & FindOpenedOatFileFromOatLocation
+ friend class NoDex2OatTest; // for FindOpenedOatFileForDexFile
FRIEND_TEST(ClassLinkerTest, ClassRootDescriptors);
FRIEND_TEST(mirror::DexCacheTest, Open);
FRIEND_TEST(ExceptionTest, FindExceptionHandler);
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 8d93265..69c281e 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -68,7 +68,8 @@
ASSERT_TRUE(primitive->GetClass() != NULL);
ASSERT_EQ(primitive->GetClass(), primitive->GetClass()->GetClass());
EXPECT_TRUE(primitive->GetClass()->GetSuperClass() != NULL);
- ASSERT_STREQ(descriptor.c_str(), primitive->GetDescriptor().c_str());
+ std::string temp;
+ ASSERT_STREQ(descriptor.c_str(), primitive->GetDescriptor(&temp));
EXPECT_TRUE(primitive->GetSuperClass() == NULL);
EXPECT_FALSE(primitive->HasSuperClass());
EXPECT_TRUE(primitive->GetClassLoader() == NULL);
@@ -106,7 +107,8 @@
Handle<mirror::ClassLoader> loader(hs.NewHandle(class_loader));
Handle<mirror::Class> array(
hs.NewHandle(class_linker_->FindClass(self, array_descriptor.c_str(), loader)));
- EXPECT_STREQ(component_type.c_str(), array->GetComponentType()->GetDescriptor().c_str());
+ std::string temp;
+ EXPECT_STREQ(component_type.c_str(), array->GetComponentType()->GetDescriptor(&temp));
EXPECT_EQ(class_loader, array->GetClassLoader());
EXPECT_EQ(kAccFinal | kAccAbstract, (array->GetAccessFlags() & (kAccFinal | kAccAbstract)));
AssertArrayClass(array_descriptor, array);
@@ -118,13 +120,14 @@
ASSERT_TRUE(array->GetClass() != NULL);
ASSERT_EQ(array->GetClass(), array->GetClass()->GetClass());
EXPECT_TRUE(array->GetClass()->GetSuperClass() != NULL);
- ASSERT_STREQ(array_descriptor.c_str(), array->GetDescriptor().c_str());
+ std::string temp;
+ ASSERT_STREQ(array_descriptor.c_str(), array->GetDescriptor(&temp));
EXPECT_TRUE(array->GetSuperClass() != NULL);
Thread* self = Thread::Current();
EXPECT_EQ(class_linker_->FindSystemClass(self, "Ljava/lang/Object;"), array->GetSuperClass());
EXPECT_TRUE(array->HasSuperClass());
ASSERT_TRUE(array->GetComponentType() != NULL);
- ASSERT_TRUE(!array->GetComponentType()->GetDescriptor().empty());
+ ASSERT_GT(strlen(array->GetComponentType()->GetDescriptor(&temp)), 0U);
EXPECT_EQ(mirror::Class::kStatusInitialized, array->GetStatus());
EXPECT_FALSE(array->IsErroneous());
EXPECT_TRUE(array->IsLoaded());
@@ -148,9 +151,9 @@
ASSERT_TRUE(array->GetIfTable() != NULL);
mirror::Class* direct_interface0 = mirror::Class::GetDirectInterface(self, array, 0);
EXPECT_TRUE(direct_interface0 != nullptr);
- EXPECT_STREQ(direct_interface0->GetDescriptor().c_str(), "Ljava/lang/Cloneable;");
+ EXPECT_STREQ(direct_interface0->GetDescriptor(&temp), "Ljava/lang/Cloneable;");
mirror::Class* direct_interface1 = mirror::Class::GetDirectInterface(self, array, 1);
- EXPECT_STREQ(direct_interface1->GetDescriptor().c_str(), "Ljava/io/Serializable;");
+ EXPECT_STREQ(direct_interface1->GetDescriptor(&temp), "Ljava/io/Serializable;");
mirror::Class* array_ptr = array->GetComponentType();
EXPECT_EQ(class_linker_->FindArrayClass(self, &array_ptr), array.Get());
}
@@ -185,7 +188,8 @@
void AssertClass(const std::string& descriptor, Handle<mirror::Class> klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- EXPECT_STREQ(descriptor.c_str(), klass->GetDescriptor().c_str());
+ std::string temp;
+ EXPECT_STREQ(descriptor.c_str(), klass->GetDescriptor(&temp));
if (descriptor == "Ljava/lang/Object;") {
EXPECT_FALSE(klass->HasSuperClass());
} else {
@@ -201,8 +205,9 @@
EXPECT_FALSE(klass->IsArrayClass());
EXPECT_TRUE(klass->GetComponentType() == NULL);
EXPECT_TRUE(klass->IsInSamePackage(klass.Get()));
- EXPECT_TRUE(mirror::Class::IsInSamePackage(klass->GetDescriptor().c_str(),
- klass->GetDescriptor().c_str()));
+ std::string temp2;
+ EXPECT_TRUE(mirror::Class::IsInSamePackage(klass->GetDescriptor(&temp),
+ klass->GetDescriptor(&temp2)));
if (klass->IsInterface()) {
EXPECT_TRUE(klass->IsAbstract());
if (klass->NumDirectMethods() == 1) {
@@ -311,7 +316,8 @@
Handle<mirror::Class> klass(
hs.NewHandle(class_linker_->FindSystemClass(self, descriptor.c_str())));
ASSERT_TRUE(klass.Get() != nullptr);
- EXPECT_STREQ(descriptor.c_str(), klass.Get()->GetDescriptor().c_str());
+ std::string temp;
+ EXPECT_STREQ(descriptor.c_str(), klass.Get()->GetDescriptor(&temp));
EXPECT_EQ(class_loader, klass->GetClassLoader());
if (klass->IsPrimitive()) {
AssertPrimitiveClass(descriptor, klass.Get());
@@ -671,7 +677,8 @@
ASSERT_TRUE(JavaLangObject->GetClass() != NULL);
ASSERT_EQ(JavaLangObject->GetClass(), JavaLangObject->GetClass()->GetClass());
EXPECT_EQ(JavaLangObject, JavaLangObject->GetClass()->GetSuperClass());
- ASSERT_STREQ(JavaLangObject->GetDescriptor().c_str(), "Ljava/lang/Object;");
+ std::string temp;
+ ASSERT_STREQ(JavaLangObject->GetDescriptor(&temp), "Ljava/lang/Object;");
EXPECT_TRUE(JavaLangObject->GetSuperClass() == NULL);
EXPECT_FALSE(JavaLangObject->HasSuperClass());
EXPECT_TRUE(JavaLangObject->GetClassLoader() == NULL);
@@ -715,7 +722,7 @@
ASSERT_TRUE(MyClass->GetClass() != NULL);
ASSERT_EQ(MyClass->GetClass(), MyClass->GetClass()->GetClass());
EXPECT_EQ(JavaLangObject, MyClass->GetClass()->GetSuperClass());
- ASSERT_STREQ(MyClass->GetDescriptor().c_str(), "LMyClass;");
+ ASSERT_STREQ(MyClass->GetDescriptor(&temp), "LMyClass;");
EXPECT_TRUE(MyClass->GetSuperClass() == JavaLangObject);
EXPECT_TRUE(MyClass->HasSuperClass());
EXPECT_EQ(class_loader.Get(), MyClass->GetClassLoader());
@@ -860,7 +867,8 @@
EXPECT_EQ(9U, statics->NumStaticFields());
mirror::ArtField* s0 = mirror::Class::FindStaticField(soa.Self(), statics, "s0", "Z");
- EXPECT_STREQ(s0->GetClass()->GetDescriptor().c_str(), "Ljava/lang/reflect/ArtField;");
+ std::string temp;
+ EXPECT_STREQ(s0->GetClass()->GetDescriptor(&temp), "Ljava/lang/reflect/ArtField;");
EXPECT_EQ(s0->GetTypeAsPrimitiveType(), Primitive::kPrimBoolean);
EXPECT_EQ(true, s0->GetBoolean(statics.Get()));
s0->SetBoolean<false>(statics.Get(), false);
@@ -1051,10 +1059,11 @@
TEST_F(ClassLinkerTest, ClassRootDescriptors) {
ScopedObjectAccess soa(Thread::Current());
+ std::string temp;
for (int i = 0; i < ClassLinker::kClassRootsMax; i++) {
mirror::Class* klass = class_linker_->GetClassRoot(ClassLinker::ClassRoot(i));
- EXPECT_TRUE(!klass->GetDescriptor().empty());
- EXPECT_STREQ(klass->GetDescriptor().c_str(),
+ EXPECT_GT(strlen(klass->GetDescriptor(&temp)), 0U);
+ EXPECT_STREQ(klass->GetDescriptor(&temp),
class_linker_->GetClassRootDescriptor(ClassLinker::ClassRoot(i))) << " i = " << i;
}
}
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 970593d..bb48be3 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -296,8 +296,9 @@
const StringPiece& type, const StringPiece& name)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::ostringstream msg;
+ std::string temp;
msg << "No " << scope << "field " << name << " of type " << type
- << " in class " << c->GetDescriptor() << " or its superclasses";
+ << " in class " << c->GetDescriptor(&temp) << " or its superclasses";
ThrowException(NULL, "Ljava/lang/NoSuchFieldError;", c, msg.str().c_str());
}
@@ -306,8 +307,9 @@
void ThrowNoSuchMethodError(InvokeType type, mirror::Class* c, const StringPiece& name,
const Signature& signature) {
std::ostringstream msg;
+ std::string temp;
msg << "No " << type << " method " << name << signature
- << " in class " << c->GetDescriptor() << " or its super classes";
+ << " in class " << c->GetDescriptor(&temp) << " or its super classes";
ThrowException(NULL, "Ljava/lang/NoSuchMethodError;", c, msg.str().c_str());
}
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index bc13379..1cddb8b 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -451,6 +451,13 @@
return static_cast<JDWP::JdwpTag>(descriptor[0]);
}
+static JDWP::JdwpTag BasicTagFromClass(mirror::Class* klass)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ std::string temp;
+ const char* descriptor = klass->GetDescriptor(&temp);
+ return BasicTagFromDescriptor(descriptor);
+}
+
static JDWP::JdwpTag TagFromClass(const ScopedObjectAccessUnchecked& soa, mirror::Class* c)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
CHECK(c != NULL);
@@ -824,7 +831,8 @@
if (!o->IsClass()) {
return StringPrintf("non-class %p", o); // This is only used for debugging output anyway.
}
- return DescriptorToName(o->AsClass()->GetDescriptor().c_str());
+ std::string temp;
+ return DescriptorToName(o->AsClass()->GetDescriptor(&temp));
}
JDWP::JdwpError Dbg::GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId& class_object_id) {
@@ -1140,7 +1148,8 @@
Runtime::Current()->GetClassLinker()->VisitClasses(ClassListCreator::Visit, &clc);
}
-JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag, uint32_t* pStatus, std::string* pDescriptor) {
+JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag,
+ uint32_t* pStatus, std::string* pDescriptor) {
JDWP::JdwpError status;
mirror::Class* c = DecodeClass(class_id, status);
if (c == NULL) {
@@ -1160,7 +1169,8 @@
}
if (pDescriptor != NULL) {
- *pDescriptor = c->GetDescriptor();
+ std::string temp;
+ *pDescriptor = c->GetDescriptor(&temp);
}
return JDWP::ERR_NONE;
}
@@ -1196,7 +1206,8 @@
if (c == NULL) {
return status;
}
- *signature = c->GetDescriptor();
+ std::string temp;
+ *signature = c->GetDescriptor(&temp);
return JDWP::ERR_NONE;
}
@@ -1275,14 +1286,12 @@
LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
return JDWP::ERR_INVALID_LENGTH;
}
- std::string descriptor(a->GetClass()->GetDescriptor());
- JDWP::JdwpTag tag = BasicTagFromDescriptor(descriptor.c_str() + 1);
-
- expandBufAdd1(pReply, tag);
+ JDWP::JdwpTag element_tag = BasicTagFromClass(a->GetClass()->GetComponentType());
+ expandBufAdd1(pReply, element_tag);
expandBufAdd4BE(pReply, count);
- if (IsPrimitiveTag(tag)) {
- size_t width = GetTagWidth(tag);
+ if (IsPrimitiveTag(element_tag)) {
+ size_t width = GetTagWidth(element_tag);
uint8_t* dst = expandBufAddSpace(pReply, count * width);
if (width == 8) {
const uint64_t* src8 = reinterpret_cast<uint64_t*>(a->GetRawData(sizeof(uint64_t), 0));
@@ -1303,7 +1312,7 @@
for (int i = 0; i < count; ++i) {
mirror::Object* element = oa->Get(offset + i);
JDWP::JdwpTag specific_tag = (element != nullptr) ? TagFromObject(soa, element)
- : tag;
+ : element_tag;
expandBufAdd1(pReply, specific_tag);
expandBufAddObjectId(pReply, gRegistry->Add(element));
}
@@ -1337,11 +1346,10 @@
LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
return JDWP::ERR_INVALID_LENGTH;
}
- std::string descriptor = dst->GetClass()->GetDescriptor();
- JDWP::JdwpTag tag = BasicTagFromDescriptor(descriptor.c_str() + 1);
+ JDWP::JdwpTag element_tag = BasicTagFromClass(dst->GetClass()->GetComponentType());
- if (IsPrimitiveTag(tag)) {
- size_t width = GetTagWidth(tag);
+ if (IsPrimitiveTag(element_tag)) {
+ size_t width = GetTagWidth(element_tag);
if (width == 8) {
CopyArrayData<uint64_t>(dst, request, offset, count);
} else if (width == 4) {
@@ -2087,6 +2095,11 @@
// query all threads, so it's easier if we just don't tell them about this thread.
return;
}
+ if (t->IsStillStarting()) {
+ // This thread is being started (and has been registered in the thread list). However, it is
+ // not completely started yet so we must ignore it.
+ return;
+ }
mirror::Object* peer = t->GetPeer();
if (IsInDesiredThreadGroup(peer)) {
thread_ids_.push_back(gRegistry->Add(peer));
@@ -2729,7 +2742,8 @@
// since the class may not yet be verified.
int state = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
JDWP::JdwpTypeTag tag = GetTypeTag(c);
- gJdwpState->PostClassPrepare(tag, gRegistry->Add(c), c->GetDescriptor(), state);
+ std::string temp;
+ gJdwpState->PostClassPrepare(tag, gRegistry->Add(c), c->GetDescriptor(&temp), state);
}
void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object,
@@ -4518,7 +4532,8 @@
int idx = HeadIndex();
while (count--) {
AllocRecord* record = &recent_allocation_records_[idx];
- class_names.Add(record->Type()->GetDescriptor());
+ std::string temp;
+ class_names.Add(record->Type()->GetDescriptor(&temp));
for (size_t i = 0; i < kMaxAllocRecordStackDepth; i++) {
mirror::ArtMethod* m = record->StackElement(i)->Method();
if (m != NULL) {
@@ -4559,9 +4574,9 @@
JDWP::Append2BE(bytes, method_names.Size());
JDWP::Append2BE(bytes, filenames.Size());
- count = alloc_record_count_;
idx = HeadIndex();
- while (count--) {
+ std::string temp;
+ for (count = alloc_record_count_; count != 0; --count) {
// For each entry:
// (4b) total allocation size
// (2b) thread id
@@ -4570,7 +4585,7 @@
AllocRecord* record = &recent_allocation_records_[idx];
size_t stack_depth = record->GetDepth();
size_t allocated_object_class_name_index =
- class_names.IndexOf(record->Type()->GetDescriptor().c_str());
+ class_names.IndexOf(record->Type()->GetDescriptor(&temp));
JDWP::Append4BE(bytes, record->ByteCount());
JDWP::Append2BE(bytes, record->ThinLockId());
JDWP::Append2BE(bytes, allocated_object_class_name_index);
@@ -4591,7 +4606,6 @@
JDWP::Append2BE(bytes, file_name_index);
JDWP::Append2BE(bytes, record->StackElement(stack_frame)->LineNumber());
}
-
idx = (idx + 1) & (alloc_record_max_ - 1);
}
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 4755b9e..a0e35f8 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -212,6 +212,11 @@
bool explicit_overflow_check = Runtime::Current()->ExplicitStackOverflowChecks();
self->ResetDefaultStackEnd(!explicit_overflow_check); // Return to default stack size.
+
+ // And restore protection if implicit checks are on.
+ if (!explicit_overflow_check) {
+ self->ProtectStack();
+ }
}
void CheckReferenceResult(mirror::Object* o, Thread* self) {
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index 879010e..13decc8 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -58,8 +58,10 @@
StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
+ self->NoteSignalBeingHandled();
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
ThrowNullPointerExceptionFromDexPC(throw_location);
+ self->NoteSignalHandlerDone();
self->QuickDeliverException();
}
@@ -83,7 +85,9 @@
extern "C" void artThrowStackOverflowFromCode(Thread* self, StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
+ self->NoteSignalBeingHandled();
ThrowStackOverflowError(self);
+ self->NoteSignalHandlerDone();
self->QuickDeliverException();
}
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index ae1b94f..f572d27 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -70,6 +70,10 @@
EXPECT_OFFSET_DIFFP(Thread, tls32_, daemon, throwing_OutOfMemoryError, 4);
EXPECT_OFFSET_DIFFP(Thread, tls32_, throwing_OutOfMemoryError, no_thread_suspension, 4);
EXPECT_OFFSET_DIFFP(Thread, tls32_, no_thread_suspension, thread_exit_check_count, 4);
+ EXPECT_OFFSET_DIFFP(Thread, tls32_, thread_exit_check_count,
+ is_exception_reported_to_instrumentation_, 4);
+ EXPECT_OFFSET_DIFFP(Thread, tls32_, is_exception_reported_to_instrumentation_,
+ handling_signal_, 4);
// TODO: Better connection. Take alignment into account.
EXPECT_OFFSET_DIFF_GT3(Thread, tls32_.thread_exit_check_count, tls64_.trace_clock_base, 4,
diff --git a/runtime/field_helper.cc b/runtime/field_helper.cc
index 40daa6d..5c85c46 100644
--- a/runtime/field_helper.cc
+++ b/runtime/field_helper.cc
@@ -41,17 +41,7 @@
}
const char* FieldHelper::GetDeclaringClassDescriptor() {
- uint32_t field_index = field_->GetDexFieldIndex();
- if (UNLIKELY(field_->GetDeclaringClass()->IsProxyClass())) {
- DCHECK(field_->IsStatic());
- DCHECK_LT(field_index, 2U);
- // 0 == Class[] interfaces; 1 == Class[][] throws;
- declaring_class_descriptor_ = field_->GetDeclaringClass()->GetDescriptor();
- return declaring_class_descriptor_.c_str();
- }
- const DexFile* dex_file = field_->GetDexFile();
- const DexFile::FieldId& field_id = dex_file->GetFieldId(field_index);
- return dex_file->GetFieldDeclaringClassDescriptor(field_id);
+ return field_->GetDeclaringClass()->GetDescriptor(&declaring_class_descriptor_);
}
} // namespace art
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 6d8190e..5d138d2 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -787,10 +787,15 @@
os << "Mean GC object throughput: "
<< (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
}
- size_t total_objects_allocated = GetObjectsAllocatedEver();
- os << "Total number of allocations: " << total_objects_allocated << "\n";
- size_t total_bytes_allocated = GetBytesAllocatedEver();
+ uint64_t total_objects_allocated = GetObjectsAllocatedEver();
+ os << "Total number of allocations " << total_objects_allocated << "\n";
+ uint64_t total_bytes_allocated = GetBytesAllocatedEver();
os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n";
+ os << "Free memory " << PrettySize(GetFreeMemory()) << "\n";
+ os << "Free memory until GC " << PrettySize(GetFreeMemoryUntilGC()) << "\n";
+ os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n";
+ os << "Total memory " << PrettySize(GetTotalMemory()) << "\n";
+ os << "Max memory " << PrettySize(GetMaxMemory()) << "\n";
if (kMeasureAllocationTime) {
os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n";
os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated)
@@ -864,7 +869,7 @@
std::ostringstream oss;
size_t total_bytes_free = GetFreeMemory();
oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
- << " free bytes";
+ << " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM";
// If the allocation failed due to fragmentation, print out the largest continuous allocation.
if (total_bytes_free >= byte_count) {
space::AllocSpace* space = nullptr;
@@ -1313,11 +1318,11 @@
return total;
}
-size_t Heap::GetObjectsAllocatedEver() const {
+uint64_t Heap::GetObjectsAllocatedEver() const {
return GetObjectsFreedEver() + GetObjectsAllocated();
}
-size_t Heap::GetBytesAllocatedEver() const {
+uint64_t Heap::GetBytesAllocatedEver() const {
return GetBytesFreedEver() + GetBytesAllocated();
}
@@ -2847,7 +2852,7 @@
remaining_bytes = kMinConcurrentRemainingBytes;
}
DCHECK_LE(remaining_bytes, max_allowed_footprint_);
- DCHECK_LE(max_allowed_footprint_, growth_limit_);
+ DCHECK_LE(max_allowed_footprint_, GetMaxMemory());
// Start a concurrent GC when we get close to the estimated remaining bytes. When the
// allocation rate is very high, remaining_bytes could tell us that we should start a GC
// right away.
@@ -3077,19 +3082,7 @@
}
size_t Heap::GetTotalMemory() const {
- size_t ret = 0;
- for (const auto& space : continuous_spaces_) {
- // Currently don't include the image space.
- if (!space->IsImageSpace()) {
- ret += space->Size();
- }
- }
- for (const auto& space : discontinuous_spaces_) {
- if (space->IsLargeObjectSpace()) {
- ret += space->AsLargeObjectSpace()->GetBytesAllocated();
- }
- }
- return ret;
+ return std::max(max_allowed_footprint_, GetBytesAllocated());
}
void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
@@ -3099,8 +3092,7 @@
void Heap::CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) {
CHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
- (c->IsVariableSize() || c->GetObjectSize() == byte_count) ||
- c->GetDescriptor().empty());
+ (c->IsVariableSize() || c->GetObjectSize() == byte_count));
CHECK_GE(byte_count, sizeof(mirror::Object));
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 1851662..d5b49d8 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -401,18 +401,18 @@
size_t GetObjectsAllocated() const LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
// Returns the total number of objects allocated since the heap was created.
- size_t GetObjectsAllocatedEver() const;
+ uint64_t GetObjectsAllocatedEver() const;
// Returns the total number of bytes allocated since the heap was created.
- size_t GetBytesAllocatedEver() const;
+ uint64_t GetBytesAllocatedEver() const;
// Returns the total number of objects freed since the heap was created.
- size_t GetObjectsFreedEver() const {
+ uint64_t GetObjectsFreedEver() const {
return total_objects_freed_ever_;
}
// Returns the total number of bytes freed since the heap was created.
- size_t GetBytesFreedEver() const {
+ uint64_t GetBytesFreedEver() const {
return total_bytes_freed_ever_;
}
@@ -421,19 +421,32 @@
// were specified. Android apps start with a growth limit (small heap size) which is
// cleared/extended for large apps.
size_t GetMaxMemory() const {
- return growth_limit_;
+ // There is some race conditions in the allocation code that can cause bytes allocated to
+ // become larger than growth_limit_ in rare cases.
+ return std::max(GetBytesAllocated(), growth_limit_);
}
- // Implements java.lang.Runtime.totalMemory, returning the amount of memory consumed by an
- // application.
+ // Implements java.lang.Runtime.totalMemory, returning approximate amount of memory currently
+ // consumed by an application.
size_t GetTotalMemory() const;
- // Implements java.lang.Runtime.freeMemory.
+ // Returns approximately how much free memory we have until the next GC happens.
+ size_t GetFreeMemoryUntilGC() const {
+ return max_allowed_footprint_ - GetBytesAllocated();
+ }
+
+ // Returns approximately how much free memory we have until the next OOME happens.
+ size_t GetFreeMemoryUntilOOME() const {
+ return growth_limit_ - GetBytesAllocated();
+ }
+
+ // Returns how much free memory we have until we need to grow the heap to perform an allocation.
+ // Similar to GetFreeMemoryUntilGC. Implements java.lang.Runtime.freeMemory.
size_t GetFreeMemory() const {
size_t byte_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
- // Make sure we don't get a negative number since the max allowed footprint is only updated
- // after the GC. But we can still allocate even if bytes_allocated > max_allowed_footprint_.
- return std::max(max_allowed_footprint_, byte_allocated) - byte_allocated;
+ size_t total_memory = GetTotalMemory();
+ // Make sure we don't get a negative number.
+ return total_memory - std::min(total_memory, byte_allocated);
}
// get the space that corresponds to an object's address. Current implementation searches all
@@ -885,10 +898,10 @@
size_t concurrent_start_bytes_;
// Since the heap was created, how many bytes have been freed.
- size_t total_bytes_freed_ever_;
+ uint64_t total_bytes_freed_ever_;
// Since the heap was created, how many objects have been freed.
- size_t total_objects_freed_ever_;
+ uint64_t total_objects_freed_ever_;
// Number of bytes allocated. Adjusted after each allocation and free.
Atomic<size_t> num_bytes_allocated_;
diff --git a/runtime/instruction_set.cc b/runtime/instruction_set.cc
index d7e358c..d8a38f4 100644
--- a/runtime/instruction_set.cc
+++ b/runtime/instruction_set.cc
@@ -87,11 +87,10 @@
static constexpr size_t kDefaultStackOverflowReservedBytes = 16 * KB;
static constexpr size_t kMipsStackOverflowReservedBytes = kDefaultStackOverflowReservedBytes;
-// TODO: Lower once implicit stack-overflow checks can work with less than 16K.
-static constexpr size_t kArmStackOverflowReservedBytes = (kIsDebugBuild ? 16 : 16) * KB;
-static constexpr size_t kArm64StackOverflowReservedBytes = (kIsDebugBuild ? 16 : 16) * KB;
-static constexpr size_t kX86StackOverflowReservedBytes = (kIsDebugBuild ? 16 : 16) * KB;
-static constexpr size_t kX86_64StackOverflowReservedBytes = (kIsDebugBuild ? 16 : 16) * KB;
+static constexpr size_t kArmStackOverflowReservedBytes = 8 * KB;
+static constexpr size_t kArm64StackOverflowReservedBytes = 8 * KB;
+static constexpr size_t kX86StackOverflowReservedBytes = 8 * KB;
+static constexpr size_t kX86_64StackOverflowReservedBytes = 8 * KB;
size_t GetStackOverflowReservedBytes(InstructionSet isa) {
switch (isa) {
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 16be077..ae42284 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -50,7 +50,7 @@
// Do we want to deoptimize for method entry and exit listeners or just try to intercept
// invocations? Deoptimization forces all code to run in the interpreter and considerably hurts the
// application's performance.
-static constexpr bool kDeoptimizeForAccurateMethodEntryExitListeners = false;
+static constexpr bool kDeoptimizeForAccurateMethodEntryExitListeners = true;
static bool InstallStubsClassVisitor(mirror::Class* klass, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index b35da0c..6705695 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -270,12 +270,13 @@
}
if (!reg->VerifierInstanceOf(field_class)) {
// This should never happen.
+ std::string temp1, temp2, temp3;
self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
"Ljava/lang/VirtualMachineError;",
"Put '%s' that is not instance of field '%s' in '%s'",
- reg->GetClass()->GetDescriptor().c_str(),
- field_class->GetDescriptor().c_str(),
- f->GetDeclaringClass()->GetDescriptor().c_str());
+ reg->GetClass()->GetDescriptor(&temp1),
+ field_class->GetDescriptor(&temp2),
+ f->GetDeclaringClass()->GetDescriptor(&temp3));
return false;
}
}
@@ -588,12 +589,13 @@
}
if (!o->VerifierInstanceOf(arg_type)) {
// This should never happen.
+ std::string temp1, temp2;
self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
"Ljava/lang/VirtualMachineError;",
"Invoking %s with bad arg %d, type '%s' not instance of '%s'",
method->GetName(), shorty_pos,
- o->GetClass()->GetDescriptor().c_str(),
- arg_type->GetDescriptor().c_str());
+ o->GetClass()->GetDescriptor(&temp1),
+ arg_type->GetDescriptor(&temp2));
return false;
}
}
@@ -775,7 +777,7 @@
if (found == nullptr && abort_if_not_found) {
if (!self->IsExceptionPending()) {
AbortTransaction(self, "%s failed in un-started runtime for class: %s",
- method_name.c_str(), PrettyDescriptor(descriptor).c_str());
+ method_name.c_str(), PrettyDescriptor(descriptor.c_str()).c_str());
}
return;
}
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index abd4b44..e098ac8 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -341,11 +341,12 @@
}
if (!obj_result->VerifierInstanceOf(return_type)) {
// This should never happen.
+ std::string temp1, temp2;
self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
"Ljava/lang/VirtualMachineError;",
"Returning '%s' that is not instance of return type '%s'",
- obj_result->GetClass()->GetDescriptor().c_str(),
- return_type->GetDescriptor().c_str());
+ obj_result->GetClass()->GetDescriptor(&temp1),
+ return_type->GetDescriptor(&temp2));
HANDLE_PENDING_EXCEPTION();
}
}
@@ -615,10 +616,11 @@
ThrowNullPointerException(NULL, "throw with null exception");
} else if (do_assignability_check && !exception->GetClass()->IsThrowableClass()) {
// This should never happen.
+ std::string temp;
self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
"Ljava/lang/VirtualMachineError;",
"Throwing '%s' that is not instance of Throwable",
- exception->GetClass()->GetDescriptor().c_str());
+ exception->GetClass()->GetDescriptor(&temp));
} else {
self->SetException(shadow_frame.GetCurrentLocationForThrow(), exception->AsThrowable());
}
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index c635648..5401495 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -256,11 +256,12 @@
}
if (!obj_result->VerifierInstanceOf(return_type)) {
// This should never happen.
+ std::string temp1, temp2;
self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
"Ljava/lang/VirtualMachineError;",
"Returning '%s' that is not instance of return type '%s'",
- obj_result->GetClass()->GetDescriptor().c_str(),
- return_type->GetDescriptor().c_str());
+ obj_result->GetClass()->GetDescriptor(&temp1),
+ return_type->GetDescriptor(&temp2));
HANDLE_PENDING_EXCEPTION();
}
}
@@ -529,10 +530,11 @@
ThrowNullPointerException(NULL, "throw with null exception");
} else if (do_assignability_check && !exception->GetClass()->IsThrowableClass()) {
// This should never happen.
+ std::string temp;
self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
"Ljava/lang/VirtualMachineError;",
"Throwing '%s' that is not instance of Throwable",
- exception->GetClass()->GetDescriptor().c_str());
+ exception->GetClass()->GetDescriptor(&temp));
} else {
self->SetException(shadow_frame.GetCurrentLocationForThrow(), exception->AsThrowable());
}
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index d5e92a4..b7d485e 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -84,9 +84,10 @@
const char* name, const char* sig, const char* kind)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
+ std::string temp;
soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchMethodError;",
"no %s method \"%s.%s%s\"",
- kind, c->GetDescriptor().c_str(), name, sig);
+ kind, c->GetDescriptor(&temp), name, sig);
}
static void ReportInvalidJNINativeMethod(const ScopedObjectAccess& soa, mirror::Class* c,
@@ -193,24 +194,26 @@
StackHandleScope<1> hs(soa.Self());
Handle<mirror::Throwable> cause(hs.NewHandle(soa.Self()->GetException(&throw_location)));
soa.Self()->ClearException();
+ std::string temp;
soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;",
"no type \"%s\" found and so no field \"%s\" "
"could be found in class \"%s\" or its superclasses", sig, name,
- c->GetDescriptor().c_str());
+ c->GetDescriptor(&temp));
soa.Self()->GetException(nullptr)->SetCause(cause.Get());
return nullptr;
}
+ std::string temp;
if (is_static) {
field = mirror::Class::FindStaticField(soa.Self(), c, name,
- field_type->GetDescriptor().c_str());
+ field_type->GetDescriptor(&temp));
} else {
- field = c->FindInstanceField(name, field_type->GetDescriptor().c_str());
+ field = c->FindInstanceField(name, field_type->GetDescriptor(&temp));
}
if (field == nullptr) {
ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;",
"no \"%s\" field \"%s\" in class \"%s\" or its superclasses",
- sig, name, c->GetDescriptor().c_str());
+ sig, name, c->GetDescriptor(&temp));
return nullptr;
}
return soa.EncodeField(field);
diff --git a/runtime/leb128.h b/runtime/leb128.h
index 0e80fe2..89de16e 100644
--- a/runtime/leb128.h
+++ b/runtime/leb128.h
@@ -28,7 +28,7 @@
static inline uint32_t DecodeUnsignedLeb128(const uint8_t** data) {
const uint8_t* ptr = *data;
int result = *(ptr++);
- if (result > 0x7f) {
+ if (UNLIKELY(result > 0x7f)) {
int cur = *(ptr++);
result = (result & 0x7f) | ((cur & 0x7f) << 7);
if (cur > 0x7f) {
diff --git a/runtime/method_helper-inl.h b/runtime/method_helper-inl.h
index 3a5056a..9af835f 100644
--- a/runtime/method_helper-inl.h
+++ b/runtime/method_helper-inl.h
@@ -26,6 +26,23 @@
namespace art {
+inline bool MethodHelper::HasSameNameAndSignature(MethodHelper* other) {
+ const DexFile* dex_file = method_->GetDexFile();
+ const DexFile::MethodId& mid = dex_file->GetMethodId(GetMethod()->GetDexMethodIndex());
+ if (method_->GetDexCache() == other->method_->GetDexCache()) {
+ const DexFile::MethodId& other_mid =
+ dex_file->GetMethodId(other->GetMethod()->GetDexMethodIndex());
+ return mid.name_idx_ == other_mid.name_idx_ && mid.proto_idx_ == other_mid.proto_idx_;
+ }
+ const DexFile* other_dex_file = other->method_->GetDexFile();
+ const DexFile::MethodId& other_mid =
+ other_dex_file->GetMethodId(other->GetMethod()->GetDexMethodIndex());
+ if (!DexFileStringEquals(dex_file, mid.name_idx_, other_dex_file, other_mid.name_idx_)) {
+ return false; // Name mismatch.
+ }
+ return dex_file->GetMethodSignature(mid) == other_dex_file->GetMethodSignature(other_mid);
+}
+
inline mirror::Class* MethodHelper::GetClassFromTypeIdx(uint16_t type_idx, bool resolve) {
mirror::ArtMethod* method = GetMethod();
mirror::Class* type = method->GetDexCacheResolvedType(type_idx);
diff --git a/runtime/method_helper.cc b/runtime/method_helper.cc
index 1bd2f90..d6f83a8 100644
--- a/runtime/method_helper.cc
+++ b/runtime/method_helper.cc
@@ -36,23 +36,6 @@
dex_cache);
}
-bool MethodHelper::HasSameNameAndSignature(MethodHelper* other) {
- const DexFile* dex_file = method_->GetDexFile();
- const DexFile::MethodId& mid = dex_file->GetMethodId(GetMethod()->GetDexMethodIndex());
- if (method_->GetDexCache() == other->method_->GetDexCache()) {
- const DexFile::MethodId& other_mid =
- dex_file->GetMethodId(other->GetMethod()->GetDexMethodIndex());
- return mid.name_idx_ == other_mid.name_idx_ && mid.proto_idx_ == other_mid.proto_idx_;
- }
- const DexFile* other_dex_file = other->method_->GetDexFile();
- const DexFile::MethodId& other_mid =
- other_dex_file->GetMethodId(other->GetMethod()->GetDexMethodIndex());
- if (!DexFileStringEquals(dex_file, mid.name_idx_, other_dex_file, other_mid.name_idx_)) {
- return false; // Name mismatch.
- }
- return dex_file->GetMethodSignature(mid) == other_dex_file->GetMethodSignature(other_mid);
-}
-
bool MethodHelper::HasSameSignatureWithDifferentClassLoaders(MethodHelper* other) {
if (UNLIKELY(GetReturnType() != other->GetReturnType())) {
return false;
diff --git a/runtime/method_helper.h b/runtime/method_helper.h
index 62465be..f71d273 100644
--- a/runtime/method_helper.h
+++ b/runtime/method_helper.h
@@ -105,7 +105,8 @@
return GetParamPrimitiveType(param) == Primitive::kPrimNot;
}
- bool HasSameNameAndSignature(MethodHelper* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE bool HasSameNameAndSignature(MethodHelper* other)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool HasSameSignatureWithDifferentClassLoaders(MethodHelper* other)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index 0dd1588..06700e6 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -417,7 +417,7 @@
return Signature::NoSignature();
}
-inline const char* ArtMethod::GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+inline const char* ArtMethod::GetName() {
mirror::ArtMethod* method = GetInterfaceMethodIfProxy();
uint32_t dex_method_idx = method->GetDexMethodIndex();
if (LIKELY(dex_method_idx != DexFile::kDexNoIndex)) {
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 8eacb1c..370bfb9 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -27,7 +27,7 @@
#include "interpreter/interpreter.h"
#include "jni_internal.h"
#include "mapping_table.h"
-#include "method_helper.h"
+#include "method_helper-inl.h"
#include "object_array-inl.h"
#include "object_array.h"
#include "object-inl.h"
@@ -283,6 +283,11 @@
void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result,
const char* shorty) {
+ if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
+ ThrowStackOverflowError(self);
+ return;
+ }
+
if (kIsDebugBuild) {
self->AssertThreadSuspensionIsAllowable();
CHECK_EQ(kRunnable, self->GetState());
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 4ebceff..fa592c2 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -446,7 +446,7 @@
const Signature GetSignature() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const char* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE const char* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const DexFile::CodeItem* GetCodeItem() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -473,7 +473,7 @@
mirror::DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* GetInterfaceMethodIfProxy() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
protected:
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index c3754d7..b0ff7ea 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -649,11 +649,11 @@
}
inline bool Class::DescriptorEquals(const char* match) {
- if (UNLIKELY(IsArrayClass())) {
+ if (IsArrayClass()) {
return match[0] == '[' && GetComponentType()->DescriptorEquals(match + 1);
- } else if (UNLIKELY(IsPrimitive())) {
+ } else if (IsPrimitive()) {
return strcmp(Primitive::Descriptor(GetPrimitiveType()), match) == 0;
- } else if (UNLIKELY(IsProxyClass())) {
+ } else if (IsProxyClass()) {
return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(this) == match;
} else {
const DexFile& dex_file = GetDexFile();
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index f29ba73..5b8eb82 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -90,15 +90,21 @@
Class* eiie_class;
// Do't attempt to use FindClass if we have an OOM error since this can try to do more
// allocations and may cause infinite loops.
- if (old_exception.Get() == nullptr ||
- old_exception->GetClass()->GetDescriptor() != "Ljava/lang/OutOfMemoryError;") {
+ bool throw_eiie = (old_exception.Get() == nullptr);
+ if (!throw_eiie) {
+ std::string temp;
+ const char* old_exception_descriptor = old_exception->GetClass()->GetDescriptor(&temp);
+ throw_eiie = (strcmp(old_exception_descriptor, "Ljava/lang/OutOfMemoryError;") != 0);
+ }
+ if (throw_eiie) {
// Clear exception to call FindSystemClass.
self->ClearException();
eiie_class = Runtime::Current()->GetClassLinker()->FindSystemClass(
self, "Ljava/lang/ExceptionInInitializerError;");
CHECK(!self->IsExceptionPending());
// Only verification errors, not initialization problems, should set a verify error.
- // This is to ensure that ThrowEarlierClassFailure will throw NoClassDefFoundError in that case.
+ // This is to ensure that ThrowEarlierClassFailure will throw NoClassDefFoundError in that
+ // case.
Class* exception_class = old_exception->GetClass();
if (!eiie_class->IsAssignableFrom(exception_class)) {
SetVerifyErrorClass(exception_class);
@@ -163,7 +169,8 @@
if (name != nullptr) {
return name;
}
- std::string descriptor(h_this->GetDescriptor());
+ std::string temp;
+ const char* descriptor = h_this->GetDescriptor(&temp);
Thread* self = Thread::Current();
if ((descriptor[0] != 'L') && (descriptor[0] != '[')) {
// The descriptor indicates that this is the class for
@@ -186,12 +193,7 @@
} else {
// Convert the UTF-8 name to a java.lang.String. The name must use '.' to separate package
// components.
- if (descriptor.size() > 2 && descriptor[0] == 'L' && descriptor[descriptor.size() - 1] == ';') {
- descriptor.erase(0, 1);
- descriptor.erase(descriptor.size() - 1);
- }
- std::replace(descriptor.begin(), descriptor.end(), '/', '.');
- name = String::AllocFromModifiedUtf8(self, descriptor.c_str());
+ name = String::AllocFromModifiedUtf8(self, DescriptorToDot(descriptor).c_str());
}
h_this->SetName(name);
return name;
@@ -215,8 +217,9 @@
Handle<mirror::Class> h_this(hs.NewHandle(this));
Handle<mirror::Class> h_super(hs.NewHandle(GetSuperClass()));
+ std::string temp;
os << "----- " << (IsInterface() ? "interface" : "class") << " "
- << "'" << GetDescriptor() << "' cl=" << GetClassLoader() << " -----\n",
+ << "'" << GetDescriptor(&temp) << "' cl=" << GetClassLoader() << " -----\n",
os << " objectSize=" << SizeOf() << " "
<< "(" << (h_super.Get() != nullptr ? h_super->SizeOf() : -1) << " from super)\n",
os << StringPrintf(" access=0x%04x.%04x\n",
@@ -336,7 +339,8 @@
return true;
}
// Compare the package part of the descriptor string.
- return IsInSamePackage(klass1->GetDescriptor().c_str(), klass2->GetDescriptor().c_str());
+ std::string temp1, temp2;
+ return IsInSamePackage(klass1->GetDescriptor(&temp1), klass2->GetDescriptor(&temp2));
}
bool Class::IsStringClass() const {
@@ -713,13 +717,14 @@
SetPreverifiedFlagOnMethods(GetVirtualMethods());
}
-std::string Class::GetDescriptor() {
- if (UNLIKELY(IsArrayClass())) {
- return GetArrayDescriptor();
- } else if (UNLIKELY(IsPrimitive())) {
+const char* Class::GetDescriptor(std::string* storage) {
+ if (IsPrimitive()) {
return Primitive::Descriptor(GetPrimitiveType());
- } else if (UNLIKELY(IsProxyClass())) {
- return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(this);
+ } else if (IsArrayClass()) {
+ return GetArrayDescriptor(storage);
+ } else if (IsProxyClass()) {
+ *storage = Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(this);
+ return storage->c_str();
} else {
const DexFile& dex_file = GetDexFile();
const DexFile::TypeId& type_id = dex_file.GetTypeId(GetClassDef()->class_idx_);
@@ -727,8 +732,12 @@
}
}
-std::string Class::GetArrayDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return "[" + GetComponentType()->GetDescriptor();
+const char* Class::GetArrayDescriptor(std::string* storage) {
+ std::string temp;
+ const char* elem_desc = GetComponentType()->GetDescriptor(&temp);
+ *storage = "[";
+ *storage += elem_desc;
+ return storage->c_str();
}
const DexFile::ClassDef* Class::GetClassDef() {
@@ -791,7 +800,6 @@
}
const char* Class::GetSourceFile() {
- std::string descriptor(GetDescriptor());
const DexFile& dex_file = GetDexFile();
const DexFile::ClassDef* dex_class_def = GetClassDef();
if (dex_class_def == nullptr) {
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 2a3f104..4b37bef 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -965,11 +965,15 @@
template<typename Visitor>
void VisitEmbeddedImtAndVTable(const Visitor& visitor) NO_THREAD_SAFETY_ANALYSIS;
- std::string GetDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Get the descriptor of the class. In a few cases a std::string is required, rather than
+ // always create one the storage argument is populated and its internal c_str() returned. We do
+ // this to avoid memory allocation in the common case.
+ const char* GetDescriptor(std::string* storage) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ const char* GetArrayDescriptor(std::string* storage) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool DescriptorEquals(const char* match) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- std::string GetArrayDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const DexFile::ClassDef* GetClassDef() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index da3c36c..aa181ee 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -22,6 +22,7 @@
#include "array-inl.h"
#include "art_field-inl.h"
+#include "art_method-inl.h"
#include "asm_support.h"
#include "class-inl.h"
#include "class_linker.h"
@@ -31,11 +32,11 @@
#include "entrypoints/entrypoint_utils-inl.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/heap.h"
+#include "handle_scope-inl.h"
#include "iftable-inl.h"
-#include "art_method-inl.h"
+#include "method_helper-inl.h"
#include "object-inl.h"
#include "object_array-inl.h"
-#include "handle_scope-inl.h"
#include "scoped_thread_state_change.h"
#include "string-inl.h"
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index c3304e6..f199c99 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -114,7 +114,9 @@
bool success = linker->OpenDexFilesFromOat(sourceName.c_str(), outputName.c_str(), &error_msgs,
dex_files.get());
- if (success) {
+ if (success || !dex_files->empty()) {
+ // In the case of non-success, we have not found or could not generate the oat file.
+ // But we may still have found a dex file that we can use.
return static_cast<jlong>(reinterpret_cast<uintptr_t>(dex_files.release()));
} else {
// The vector should be empty after a failed loading attempt.
diff --git a/runtime/native/java_lang_reflect_Array.cc b/runtime/native/java_lang_reflect_Array.cc
index f94e42b..058458f 100644
--- a/runtime/native/java_lang_reflect_Array.cc
+++ b/runtime/native/java_lang_reflect_Array.cc
@@ -34,7 +34,8 @@
DCHECK(javaDimArray != NULL);
mirror::Object* dimensions_obj = soa.Decode<mirror::Object*>(javaDimArray);
DCHECK(dimensions_obj->IsArrayInstance());
- DCHECK_STREQ(dimensions_obj->GetClass()->GetDescriptor().c_str(), "[I");
+ DCHECK_EQ(dimensions_obj->GetClass()->GetComponentType()->GetPrimitiveType(),
+ Primitive::kPrimInt);
Handle<mirror::IntArray> dimensions_array(
hs.NewHandle(down_cast<mirror::IntArray*>(dimensions_obj)));
mirror::Array* new_array = mirror::Array::CreateMultiArray(soa.Self(), element_class,
diff --git a/runtime/oat.cc b/runtime/oat.cc
index 0a8c35b..ede108c 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -23,7 +23,7 @@
namespace art {
const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' };
-const uint8_t OatHeader::kOatVersion[] = { '0', '3', '8', '\0' };
+const uint8_t OatHeader::kOatVersion[] = { '0', '3', '9', '\0' };
static size_t ComputeOatHeaderSize(const SafeMap<std::string, std::string>* variable_data) {
size_t estimate = 0U;
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 9710a2a..810eccb 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -133,6 +133,8 @@
const uint32_t code_offset,
const uint32_t gc_map_offset);
+ OatMethod() {}
+
private:
template<class T>
T GetOatPointer(uint32_t offset) const {
@@ -166,6 +168,8 @@
// methods are not included.
const OatMethod GetOatMethod(uint32_t method_index) const;
+ OatClass() {}
+
private:
OatClass(const OatFile* oat_file,
mirror::Class::Status status,
@@ -174,13 +178,13 @@
const uint32_t* bitmap_pointer,
const OatMethodOffsets* methods_pointer);
- const OatFile* const oat_file_;
+ const OatFile* oat_file_;
- const mirror::Class::Status status_;
+ mirror::Class::Status status_;
- const OatClassType type_;
+ OatClassType type_;
- const uint32_t* const bitmap_;
+ const uint32_t* bitmap_;
const OatMethodOffsets* methods_pointer_;
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 12f9f33..3ef7a17 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -223,6 +223,7 @@
compiler_callbacks_ = nullptr;
is_zygote_ = false;
must_relocate_ = kDefaultMustRelocate;
+ dex2oat_enabled_ = true;
if (kPoisonHeapReferences) {
// kPoisonHeapReferences currently works only with the interpreter only.
// TODO: make it work with the compiler.
@@ -421,6 +422,10 @@
must_relocate_ = true;
} else if (option == "-Xnorelocate") {
must_relocate_ = false;
+ } else if (option == "-Xnodex2oat") {
+ dex2oat_enabled_ = false;
+ } else if (option == "-Xdex2oat") {
+ dex2oat_enabled_ = true;
} else if (option == "-Xint") {
interpreter_only_ = true;
} else if (StartsWith(option, "-Xgc:")) {
@@ -710,7 +715,7 @@
UsageMessage(stream, "The following standard options are supported:\n");
UsageMessage(stream, " -classpath classpath (-cp classpath)\n");
UsageMessage(stream, " -Dproperty=value\n");
- UsageMessage(stream, " -verbose:tag ('gc', 'jni', or 'class')\n");
+ UsageMessage(stream, " -verbose:tag ('gc', 'jni', or 'class')\n");
UsageMessage(stream, " -showversion\n");
UsageMessage(stream, " -help\n");
UsageMessage(stream, " -agentlib:jdwp=options\n");
@@ -720,9 +725,9 @@
UsageMessage(stream, " -Xrunjdwp:<options>\n");
UsageMessage(stream, " -Xbootclasspath:bootclasspath\n");
UsageMessage(stream, " -Xcheck:tag (e.g. 'jni')\n");
- UsageMessage(stream, " -XmsN (min heap, must be multiple of 1K, >= 1MB)\n");
- UsageMessage(stream, " -XmxN (max heap, must be multiple of 1K, >= 2MB)\n");
- UsageMessage(stream, " -XssN (stack size)\n");
+ UsageMessage(stream, " -XmsN (min heap, must be multiple of 1K, >= 1MB)\n");
+ UsageMessage(stream, " -XmxN (max heap, must be multiple of 1K, >= 2MB)\n");
+ UsageMessage(stream, " -XssN (stack size)\n");
UsageMessage(stream, " -Xint\n");
UsageMessage(stream, "\n");
@@ -776,6 +781,7 @@
UsageMessage(stream, " -Ximage-compiler-option dex2oat-option\n");
UsageMessage(stream, " -Xpatchoat:filename\n");
UsageMessage(stream, " -X[no]relocate\n");
+ UsageMessage(stream, " -X[no]dex2oat (Whether to invoke dex2oat on the application)\n");
UsageMessage(stream, "\n");
UsageMessage(stream, "The following previously supported Dalvik options are ignored:\n");
diff --git a/runtime/parsed_options.h b/runtime/parsed_options.h
index c328ca7..aa2c557 100644
--- a/runtime/parsed_options.h
+++ b/runtime/parsed_options.h
@@ -50,6 +50,7 @@
CompilerCallbacks* compiler_callbacks_;
bool is_zygote_;
bool must_relocate_;
+ bool dex2oat_enabled_;
std::string patchoat_executable_;
bool interpreter_only_;
bool is_explicit_gc_disabled_;
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index 3081421..5af26b0 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -139,8 +139,9 @@
EXPECT_EQ(2U, proxy_class->NumDirectInterfaces()); // Interfaces$I and Interfaces$J.
EXPECT_EQ(I.Get(), mirror::Class::GetDirectInterface(soa.Self(), proxy_class, 0));
EXPECT_EQ(J.Get(), mirror::Class::GetDirectInterface(soa.Self(), proxy_class, 1));
- std::string proxy_class_descriptor(proxy_class->GetDescriptor());
- EXPECT_STREQ("L$Proxy1234;", proxy_class_descriptor.c_str());
+ std::string temp;
+ const char* proxy_class_descriptor = proxy_class->GetDescriptor(&temp);
+ EXPECT_STREQ("L$Proxy1234;", proxy_class_descriptor);
EXPECT_EQ(nullptr, proxy_class->GetSourceFile());
}
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 0169ccc..7da450c 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -211,11 +211,11 @@
}
static void ThrowIllegalPrimitiveArgumentException(const char* expected,
- const StringPiece& found_descriptor)
+ const char* found_descriptor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ThrowIllegalArgumentException(nullptr,
StringPrintf("Invalid primitive conversion from %s to %s", expected,
- PrettyDescriptor(found_descriptor.as_string()).c_str()).c_str());
+ PrettyDescriptor(found_descriptor).c_str()).c_str());
}
bool BuildArgArrayFromObjectArray(const ScopedObjectAccessAlreadyRunnable& soa,
@@ -257,8 +257,9 @@
#define DO_FAIL(expected) \
} else { \
if (arg->GetClass<>()->IsPrimitive()) { \
+ std::string temp; \
ThrowIllegalPrimitiveArgumentException(expected, \
- arg->GetClass<>()->GetDescriptor().c_str()); \
+ arg->GetClass<>()->GetDescriptor(&temp)); \
} else { \
ThrowIllegalArgumentException(nullptr, \
StringPrintf("method %s argument %zd has type %s, got %s", \
@@ -446,6 +447,14 @@
JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid,
va_list args)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // We want to make sure that the stack is not within a small distance from the
+ // protected region in case we are calling into a leaf function whose stack
+ // check has been elided.
+ if (UNLIKELY(__builtin_frame_address(0) < soa.Self()->GetStackEnd())) {
+ ThrowStackOverflowError(soa.Self());
+ return JValue();
+ }
+
mirror::ArtMethod* method = soa.DecodeMethod(mid);
mirror::Object* receiver = method->IsStatic() ? nullptr : soa.Decode<mirror::Object*>(obj);
uint32_t shorty_len = 0;
@@ -459,6 +468,14 @@
JValue InvokeWithJValues(const ScopedObjectAccessAlreadyRunnable& soa, mirror::Object* receiver,
jmethodID mid, jvalue* args) {
+ // We want to make sure that the stack is not within a small distance from the
+ // protected region in case we are calling into a leaf function whose stack
+ // check has been elided.
+ if (UNLIKELY(__builtin_frame_address(0) < soa.Self()->GetStackEnd())) {
+ ThrowStackOverflowError(soa.Self());
+ return JValue();
+ }
+
mirror::ArtMethod* method = soa.DecodeMethod(mid);
uint32_t shorty_len = 0;
const char* shorty = method->GetShorty(&shorty_len);
@@ -471,6 +488,14 @@
JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccessAlreadyRunnable& soa,
mirror::Object* receiver, jmethodID mid, jvalue* args) {
+ // We want to make sure that the stack is not within a small distance from the
+ // protected region in case we are calling into a leaf function whose stack
+ // check has been elided.
+ if (UNLIKELY(__builtin_frame_address(0) < soa.Self()->GetStackEnd())) {
+ ThrowStackOverflowError(soa.Self());
+ return JValue();
+ }
+
mirror::ArtMethod* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid));
uint32_t shorty_len = 0;
const char* shorty = method->GetShorty(&shorty_len);
@@ -483,6 +508,14 @@
JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa,
jobject obj, jmethodID mid, va_list args) {
+ // We want to make sure that the stack is not within a small distance from the
+ // protected region in case we are calling into a leaf function whose stack
+ // check has been elided.
+ if (UNLIKELY(__builtin_frame_address(0) < soa.Self()->GetStackEnd())) {
+ ThrowStackOverflowError(soa.Self());
+ return JValue();
+ }
+
mirror::Object* receiver = soa.Decode<mirror::Object*>(obj);
mirror::ArtMethod* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid));
uint32_t shorty_len = 0;
@@ -496,6 +529,14 @@
void InvokeWithShadowFrame(Thread* self, ShadowFrame* shadow_frame, uint16_t arg_offset,
MethodHelper& mh, JValue* result) {
+ // We want to make sure that the stack is not within a small distance from the
+ // protected region in case we are calling into a leaf function whose stack
+ // check has been elided.
+ if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
+ ThrowStackOverflowError(self);
+ return;
+ }
+
ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
arg_array.BuildArgArrayFromFrame(shadow_frame, arg_offset);
shadow_frame->GetMethod()->Invoke(self, arg_array.GetArray(), arg_array.GetNumBytes(), result,
@@ -504,6 +545,15 @@
jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaMethod,
jobject javaReceiver, jobject javaArgs, bool accessible) {
+ // We want to make sure that the stack is not within a small distance from the
+ // protected region in case we are calling into a leaf function whose stack
+ // check has been elided.
+ if (UNLIKELY(__builtin_frame_address(0) <
+ soa.Self()->GetStackEndForInterpreter(true))) {
+ ThrowStackOverflowError(soa.Self());
+ return nullptr;
+ }
+
mirror::ArtMethod* m = mirror::ArtMethod::FromReflectedMethod(soa, javaMethod);
mirror::Class* declaring_class = m->GetDeclaringClass();
@@ -815,11 +865,11 @@
src_class = class_linker->FindPrimitiveClass('S');
boxed_value.SetS(primitive_field->GetShort(o));
} else {
+ std::string temp;
ThrowIllegalArgumentException(throw_location,
- StringPrintf("%s has type %s, got %s",
- UnboxingFailureKind(f).c_str(),
- PrettyDescriptor(dst_class).c_str(),
- PrettyDescriptor(o->GetClass()->GetDescriptor()).c_str()).c_str());
+ StringPrintf("%s has type %s, got %s", UnboxingFailureKind(f).c_str(),
+ PrettyDescriptor(dst_class).c_str(),
+ PrettyDescriptor(o->GetClass()->GetDescriptor(&temp)).c_str()).c_str());
return false;
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index d677729..d3957c1 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -101,6 +101,7 @@
must_relocate_(false),
is_concurrent_gc_enabled_(true),
is_explicit_gc_disabled_(false),
+ dex2oat_enabled_(true),
default_stack_size_(0),
heap_(nullptr),
max_spins_before_thin_lock_inflation_(Monitor::kDefaultMaxSpinsBeforeThinLockInflation),
@@ -556,6 +557,7 @@
must_relocate_ = options->must_relocate_;
is_zygote_ = options->is_zygote_;
is_explicit_gc_disabled_ = options->is_explicit_gc_disabled_;
+ dex2oat_enabled_ = options->dex2oat_enabled_;
vfprintf_ = options->hook_vfprintf_;
exit_ = options->hook_exit_;
diff --git a/runtime/runtime.h b/runtime/runtime.h
index a85c2e4..e76280a 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -105,6 +105,10 @@
return must_relocate_;
}
+ bool IsDex2OatEnabled() const {
+ return dex2oat_enabled_;
+ }
+
CompilerCallbacks* GetCompilerCallbacks() {
return compiler_callbacks_;
}
@@ -503,6 +507,7 @@
bool must_relocate_;
bool is_concurrent_gc_enabled_;
bool is_explicit_gc_disabled_;
+ bool dex2oat_enabled_;
std::string compiler_executable_;
std::string patchoat_executable_;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 8e6da74..7aabfce 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -76,8 +76,7 @@
bool Thread::is_started_ = false;
pthread_key_t Thread::pthread_key_self_;
ConditionVariable* Thread::resume_cond_ = nullptr;
-const size_t Thread::kStackOverflowImplicitCheckSize = kStackOverflowProtectedSize +
- GetStackOverflowReservedBytes(kRuntimeISA);
+const size_t Thread::kStackOverflowImplicitCheckSize = GetStackOverflowReservedBytes(kRuntimeISA);
static const char* kThreadNameDuringStartup = "<native thread without managed peer>";
@@ -238,92 +237,48 @@
byte dont_optimize_this;
// Install a protected region in the stack. This is used to trigger a SIGSEGV if a stack
-// overflow is detected. It is located right below the stack_end_. Just below that
-// is the StackOverflow reserved region used when creating the StackOverflow
-// exception.
+// overflow is detected. It is located right below the stack_begin_.
//
-// There is a little complexity here that deserves a special mention. When running on the
-// host (glibc), the process's main thread's stack is allocated with a special flag
+// There is a little complexity here that deserves a special mention. On some
+// architectures, the stack created using a VM_GROWSDOWN flag
// to prevent memory being allocated when it's not needed. This flag makes the
// kernel only allocate memory for the stack by growing down in memory. Because we
// want to put an mprotected region far away from that at the stack top, we need
// to make sure the pages for the stack are mapped in before we call mprotect. We do
// this by reading every page from the stack bottom (highest address) to the stack top.
// We then madvise this away.
-void Thread::InstallImplicitProtection(bool is_main_stack) {
- byte* pregion = tlsPtr_.stack_end;
- byte* stack_lowmem = tlsPtr_.stack_begin;
- byte* stack_top = reinterpret_cast<byte*>(reinterpret_cast<uintptr_t>(&pregion) &
+void Thread::InstallImplicitProtection() {
+ byte* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
+ byte* stack_himem = tlsPtr_.stack_end;
+ byte* stack_top = reinterpret_cast<byte*>(reinterpret_cast<uintptr_t>(&stack_himem) &
~(kPageSize - 1)); // Page containing current top of stack.
- const bool running_on_intel = (kRuntimeISA == kX86) || (kRuntimeISA == kX86_64);
+ // First remove the protection on the protected region as will want to read and
+ // write it. This may fail (on the first attempt when the stack is not mapped)
+ // but we ignore that.
+ UnprotectStack();
- if (running_on_intel) {
- // On Intel, we need to map in the main stack. This must be done by reading from the
- // current stack pointer downwards as the stack is mapped using VM_GROWSDOWN
- // in the kernel. Any access more than a page below the current SP will cause
- // a segv.
- if (is_main_stack) {
- // First we need to unprotect the protected region because this may
- // be called more than once for a particular stack and we will crash
- // if we try to read the protected page.
- mprotect(pregion - kStackOverflowProtectedSize, kStackOverflowProtectedSize, PROT_READ);
+ // Map in the stack. This must be done by reading from the
+ // current stack pointer downwards as the stack may be mapped using VM_GROWSDOWN
+ // in the kernel. Any access more than a page below the current SP might cause
+ // a segv.
- // Read every page from the high address to the low.
- for (byte* p = stack_top; p > stack_lowmem; p -= kPageSize) {
- dont_optimize_this = *p;
- }
- }
+ // Read every page from the high address to the low.
+ for (byte* p = stack_top; p > pregion; p -= kPageSize) {
+ dont_optimize_this = *p;
}
- // Check and place a marker word at the lowest usable address in the stack. This
- // is used to prevent a double protection.
- constexpr uint32_t kMarker = 0xdadadada;
- uintptr_t *marker = reinterpret_cast<uintptr_t*>(pregion);
- if (*marker == kMarker) {
- // The region has already been set up. But on the main stack on the host we have
- // removed the protected region in order to read the stack memory. We need to put
- // this back again.
- if (is_main_stack && running_on_intel) {
- mprotect(pregion - kStackOverflowProtectedSize, kStackOverflowProtectedSize, PROT_NONE);
- madvise(stack_lowmem, stack_top - stack_lowmem, MADV_DONTNEED);
- }
- return;
- }
- // Add marker so that we can detect a second attempt to do this.
- *marker = kMarker;
-
- if (!running_on_intel) {
- // Running on !Intel, stacks are mapped cleanly. The protected region for the
- // main stack just needs to be mapped in. We do this by writing one byte per page.
- for (byte* p = pregion - kStackOverflowProtectedSize; p < pregion; p += kPageSize) {
- *p = 0;
- }
- }
-
- pregion -= kStackOverflowProtectedSize;
-
VLOG(threads) << "installing stack protected region at " << std::hex <<
static_cast<void*>(pregion) << " to " <<
static_cast<void*>(pregion + kStackOverflowProtectedSize - 1);
-
- if (mprotect(pregion, kStackOverflowProtectedSize, PROT_NONE) == -1) {
- LOG(FATAL) << "Unable to create protected region in stack for implicit overflow check. Reason:"
- << strerror(errno) << kStackOverflowProtectedSize;
- }
+ // Protect the bottom of the stack to prevent read/write to it.
+ ProtectStack();
// Tell the kernel that we won't be needing these pages any more.
// NB. madvise will probably write zeroes into the memory (on linux it does).
- if (is_main_stack) {
- if (running_on_intel) {
- // On the host, it's the whole stack (minus a page to prevent overwrite of stack top).
- madvise(stack_lowmem, stack_top - stack_lowmem - kPageSize, MADV_DONTNEED);
- } else {
- // On Android, just the protected region.
- madvise(pregion, kStackOverflowProtectedSize, MADV_DONTNEED);
- }
- }
+ uint32_t unwanted_size = stack_top - pregion - kPageSize;
+ madvise(pregion, unwanted_size, MADV_DONTNEED);
}
void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) {
@@ -538,7 +493,13 @@
tlsPtr_.stack_begin = reinterpret_cast<byte*>(read_stack_base);
tlsPtr_.stack_size = read_stack_size;
- if (read_stack_size <= GetStackOverflowReservedBytes(kRuntimeISA)) {
+ // The minimum stack size we can cope with is the overflow reserved bytes (typically
+ // 8K) + the protected region size (4K) + another page (4K). Typically this will
+ // be 8+4+4 = 16K. The thread won't be able to do much with this stack even the GC takes
+ // between 8K and 12K.
+ uint32_t min_stack = GetStackOverflowReservedBytes(kRuntimeISA) + kStackOverflowProtectedSize
+ + 4 * KB;
+ if (read_stack_size <= min_stack) {
LOG(FATAL) << "Attempt to attach a thread with a too-small stack (" << read_stack_size
<< " bytes)";
}
@@ -582,20 +543,19 @@
// Install the protected region if we are doing implicit overflow checks.
if (implicit_stack_check) {
- if (is_main_thread) {
- size_t guardsize;
- pthread_attr_t attributes;
- CHECK_PTHREAD_CALL(pthread_attr_init, (&attributes), "guard size query");
- CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, &guardsize), "guard size query");
- CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), "guard size query");
- // The main thread might have protected region at the bottom. We need
- // to install our own region so we need to move the limits
- // of the stack to make room for it.
- tlsPtr_.stack_begin += guardsize;
- tlsPtr_.stack_end += guardsize;
- tlsPtr_.stack_size -= guardsize;
- }
- InstallImplicitProtection(is_main_thread);
+ size_t guardsize;
+ pthread_attr_t attributes;
+ CHECK_PTHREAD_CALL(pthread_attr_init, (&attributes), "guard size query");
+ CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, &guardsize), "guard size query");
+ CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), "guard size query");
+ // The thread might have protected region at the bottom. We need
+ // to install our own region so we need to move the limits
+ // of the stack to make room for it.
+ tlsPtr_.stack_begin += guardsize;
+ tlsPtr_.stack_end += guardsize;
+ tlsPtr_.stack_size -= guardsize;
+
+ InstallImplicitProtection();
}
// Sanity check.
@@ -2266,6 +2226,14 @@
}
tlsPtr_.stack_end = tlsPtr_.stack_begin;
+
+ // Remove the stack overflow protection if is it set up.
+ bool implicit_stack_check = !Runtime::Current()->ExplicitStackOverflowChecks();
+ if (implicit_stack_check) {
+ if (!UnprotectStack()) {
+ LOG(ERROR) << "Unable to remove stack protection for stack overflow";
+ }
+ }
}
void Thread::SetTlab(byte* start, byte* end) {
@@ -2291,4 +2259,21 @@
return os;
}
+void Thread::ProtectStack() {
+ void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
+ VLOG(threads) << "Protecting stack at " << pregion;
+ if (mprotect(pregion, kStackOverflowProtectedSize, PROT_NONE) == -1) {
+ LOG(FATAL) << "Unable to create protected region in stack for implicit overflow check. "
+ "Reason: "
+ << strerror(errno) << " size: " << kStackOverflowProtectedSize;
+ }
+}
+
+bool Thread::UnprotectStack() {
+ void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
+ VLOG(threads) << "Unprotecting stack at " << pregion;
+ return mprotect(pregion, kStackOverflowProtectedSize, PROT_READ|PROT_WRITE) == 0;
+}
+
+
} // namespace art
diff --git a/runtime/thread.h b/runtime/thread.h
index c2b200b..120ff6f 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -94,16 +94,41 @@
static constexpr size_t kNumRosAllocThreadLocalSizeBrackets = 34;
+// Thread's stack layout for implicit stack overflow checks:
+//
+// +---------------------+ <- highest address of stack memory
+// | |
+// . . <- SP
+// | |
+// | |
+// +---------------------+ <- stack_end
+// | |
+// | Gap |
+// | |
+// +---------------------+ <- stack_begin
+// | |
+// | Protected region |
+// | |
+// +---------------------+ <- lowest address of stack memory
+//
+// The stack always grows down in memory. At the lowest address is a region of memory
+// that is set mprotect(PROT_NONE). Any attempt to read/write to this region will
+// result in a segmentation fault signal. At any point, the thread's SP will be somewhere
+// between the stack_end and the highest address in stack memory. An implicit stack
+// overflow check is a read of memory at a certain offset below the current SP (4K typically).
+// If the thread's SP is below the stack_end address this will be a read into the protected
+// region. If the SP is above the stack_end address, the thread is guaranteed to have
+// at least 4K of space. Because stack overflow checks are only performed in generated code,
+// if the thread makes a call out to a native function (through JNI), that native function
+// might only have 4K of memory (if the SP is adjacent to stack_end).
+
class Thread {
public:
- // How much of the reserved bytes is reserved for incoming signals.
- static constexpr size_t kStackOverflowSignalReservedBytes = 2 * KB;
-
// For implicit overflow checks we reserve an extra piece of memory at the bottom
// of the stack (lowest memory). The higher portion of the memory
// is protected against reads and the lower is available for use while
// throwing the StackOverflow exception.
- static constexpr size_t kStackOverflowProtectedSize = 16 * KB;
+ static constexpr size_t kStackOverflowProtectedSize = 4 * KB;
static const size_t kStackOverflowImplicitCheckSize;
// Creates a new native thread corresponding to the given managed peer.
@@ -582,7 +607,7 @@
}
// Install the protected region for implicit stack checks.
- void InstallImplicitProtection(bool is_main_stack);
+ void InstallImplicitProtection();
bool IsHandlingStackOverflow() const {
return tlsPtr_.stack_end == tlsPtr_.stack_begin;
@@ -814,6 +839,20 @@
tls32_.is_exception_reported_to_instrumentation_ = reported;
}
+ void ProtectStack();
+ bool UnprotectStack();
+
+ void NoteSignalBeingHandled() {
+ if (tls32_.handling_signal_) {
+ LOG(FATAL) << "Detected signal while processing a signal";
+ }
+ tls32_.handling_signal_ = true;
+ }
+
+ void NoteSignalHandlerDone() {
+ tls32_.handling_signal_ = false;
+ }
+
private:
explicit Thread(bool daemon);
~Thread() LOCKS_EXCLUDED(Locks::mutator_lock_,
@@ -918,7 +957,8 @@
explicit tls_32bit_sized_values(bool is_daemon) :
suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0),
daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0),
- thread_exit_check_count(0), is_exception_reported_to_instrumentation_(false) {
+ thread_exit_check_count(0), is_exception_reported_to_instrumentation_(false),
+ handling_signal_(false), padding_(0) {
}
union StateAndFlags state_and_flags;
@@ -958,6 +998,12 @@
// When true this field indicates that the exception associated with this thread has already
// been reported to instrumentation.
bool32_t is_exception_reported_to_instrumentation_;
+
+ // True if signal is being handled by this thread.
+ bool32_t handling_signal_;
+
+ // Padding to make the size aligned to 8. Remove this if we add another 32 bit field.
+ int32_t padding_;
} tls32_;
struct PACKED(8) tls_64bit_sized_values {
diff --git a/runtime/utf-inl.h b/runtime/utf-inl.h
index d8c258b..1373d17 100644
--- a/runtime/utf-inl.h
+++ b/runtime/utf-inl.h
@@ -40,20 +40,60 @@
inline int CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(const char* utf8_1,
const char* utf8_2) {
- for (;;) {
- if (*utf8_1 == '\0') {
- return (*utf8_2 == '\0') ? 0 : -1;
- } else if (*utf8_2 == '\0') {
+ uint16_t c1, c2;
+ do {
+ c1 = *utf8_1;
+ c2 = *utf8_2;
+ // Did we reach a terminating character?
+ if (c1 == 0) {
+ return (c2 == 0) ? 0 : -1;
+ } else if (c2 == 0) {
return 1;
}
-
- int c1 = GetUtf16FromUtf8(&utf8_1);
- int c2 = GetUtf16FromUtf8(&utf8_2);
-
- if (c1 != c2) {
- return c1 > c2 ? 1 : -1;
+ // Assume 1-byte value and handle all cases first.
+ utf8_1++;
+ utf8_2++;
+ if ((c1 & 0x80) == 0) {
+ if (c1 == c2) {
+ // Matching 1-byte values.
+ continue;
+ } else {
+ // Non-matching values.
+ if ((c2 & 0x80) == 0) {
+ // 1-byte value, do nothing.
+ } else if ((c2 & 0x20) == 0) {
+ // 2-byte value.
+ c2 = ((c2 & 0x1f) << 6) | (*utf8_2 & 0x3f);
+ } else {
+ // 3-byte value.
+ c2 = ((c2 & 0x0f) << 12) | ((utf8_2[0] & 0x3f) << 6) | (utf8_2[1] & 0x3f);
+ }
+ return static_cast<int>(c1) - static_cast<int>(c2);
+ }
}
- }
+ // Non-matching or multi-byte values.
+ if ((c1 & 0x20) == 0) {
+ // 2-byte value.
+ c1 = ((c1 & 0x1f) << 6) | (*utf8_1 & 0x3f);
+ utf8_1++;
+ } else {
+ // 3-byte value.
+ c1 = ((c1 & 0x0f) << 12) | ((utf8_1[0] & 0x3f) << 6) | (utf8_1[1] & 0x3f);
+ utf8_1 += 2;
+ }
+ if ((c2 & 0x80) == 0) {
+ // 1-byte value, do nothing.
+ } else if ((c2 & 0x20) == 0) {
+ // 2-byte value.
+ c2 = ((c2 & 0x1f) << 6) | (*utf8_2 & 0x3f);
+ utf8_2++;
+ } else {
+ // 3-byte value.
+ c2 = ((c2 & 0x0f) << 12) | ((utf8_2[0] & 0x3f) << 6) | (utf8_2[1] & 0x3f);
+ utf8_2 += 2;
+ }
+ } while (c1 == c2);
+ return static_cast<int>(c1) - static_cast<int>(c2);
}
} // namespace art
diff --git a/runtime/utf.h b/runtime/utf.h
index 29f8499..63cdbdc 100644
--- a/runtime/utf.h
+++ b/runtime/utf.h
@@ -55,7 +55,8 @@
/*
* Compare two modified UTF-8 strings as UTF-16 code point values in a non-locale sensitive manner
*/
-int CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(const char* utf8_1, const char* utf8_2);
+ALWAYS_INLINE int CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(const char* utf8_1,
+ const char* utf8_2);
/*
* Compare a modified UTF-8 string with a UTF-16 string as code point values in a non-locale
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 48d6cdf..b845f50 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -222,19 +222,20 @@
if (java_descriptor == NULL) {
return "null";
}
- return PrettyDescriptor(java_descriptor->ToModifiedUtf8());
+ return PrettyDescriptor(java_descriptor->ToModifiedUtf8().c_str());
}
std::string PrettyDescriptor(mirror::Class* klass) {
if (klass == NULL) {
return "null";
}
- return PrettyDescriptor(klass->GetDescriptor());
+ std::string temp;
+ return PrettyDescriptor(klass->GetDescriptor(&temp));
}
-std::string PrettyDescriptor(const std::string& descriptor) {
+std::string PrettyDescriptor(const char* descriptor) {
// Count the number of '['s to get the dimensionality.
- const char* c = descriptor.c_str();
+ const char* c = descriptor;
size_t dim = 0;
while (*c == '[') {
dim++;
@@ -275,15 +276,14 @@
result.push_back(ch);
}
// ...and replace the semicolon with 'dim' "[]" pairs:
- while (dim--) {
+ for (size_t i = 0; i < dim; ++i) {
result += "[]";
}
return result;
}
std::string PrettyDescriptor(Primitive::Type type) {
- std::string descriptor_string(Primitive::Descriptor(type));
- return PrettyDescriptor(descriptor_string);
+ return PrettyDescriptor(Primitive::Descriptor(type));
}
std::string PrettyField(mirror::ArtField* f, bool with_type) {
@@ -341,8 +341,10 @@
} else {
++argument_length;
}
- std::string argument_descriptor(signature, argument_length);
- result += PrettyDescriptor(argument_descriptor);
+ {
+ std::string argument_descriptor(signature, argument_length);
+ result += PrettyDescriptor(argument_descriptor.c_str());
+ }
if (signature[argument_length] != ')') {
result += ", ";
}
@@ -410,9 +412,10 @@
if (obj->GetClass() == NULL) {
return "(raw)";
}
- std::string result(PrettyDescriptor(obj->GetClass()->GetDescriptor()));
+ std::string temp;
+ std::string result(PrettyDescriptor(obj->GetClass()->GetDescriptor(&temp)));
if (obj->IsClass()) {
- result += "<" + PrettyDescriptor(obj->AsClass()->GetDescriptor()) + ">";
+ result += "<" + PrettyDescriptor(obj->AsClass()->GetDescriptor(&temp)) + ">";
}
return result;
}
@@ -622,11 +625,20 @@
std::string DescriptorToDot(const char* descriptor) {
size_t length = strlen(descriptor);
- if (descriptor[0] == 'L' && descriptor[length - 1] == ';') {
- std::string result(descriptor + 1, length - 2);
- std::replace(result.begin(), result.end(), '/', '.');
- return result;
+ if (length > 1) {
+ if (descriptor[0] == 'L' && descriptor[length - 1] == ';') {
+ // Descriptors have the leading 'L' and trailing ';' stripped.
+ std::string result(descriptor + 1, length - 2);
+ std::replace(result.begin(), result.end(), '/', '.');
+ return result;
+ } else {
+ // For arrays the 'L' and ';' remain intact.
+ std::string result(descriptor);
+ std::replace(result.begin(), result.end(), '/', '.');
+ return result;
+ }
}
+ // Do nothing for non-class/array descriptors.
return descriptor;
}
diff --git a/runtime/utils.h b/runtime/utils.h
index f6773be..c89c41f 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -273,7 +273,7 @@
// "java.lang.String[]", and so forth.
std::string PrettyDescriptor(mirror::String* descriptor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-std::string PrettyDescriptor(const std::string& descriptor);
+std::string PrettyDescriptor(const char* descriptor);
std::string PrettyDescriptor(mirror::Class* klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
std::string PrettyDescriptor(Primitive::Type type);
@@ -335,10 +335,12 @@
// Turn "java.lang.String" into "Ljava/lang/String;".
std::string DotToDescriptor(const char* class_name);
-// Turn "Ljava/lang/String;" into "java.lang.String".
+// Turn "Ljava/lang/String;" into "java.lang.String" using the conventions of
+// java.lang.Class.getName().
std::string DescriptorToDot(const char* descriptor);
-// Turn "Ljava/lang/String;" into "java/lang/String".
+// Turn "Ljava/lang/String;" into "java/lang/String" using the opposite conventions of
+// java.lang.Class.getName().
std::string DescriptorToName(const char* descriptor);
// Tests for whether 's' is a valid class name in the three common forms:
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 329b4dc..fb57fc7 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -97,7 +97,8 @@
const DexFile& dex_file = klass->GetDexFile();
const DexFile::ClassDef* class_def = klass->GetClassDef();
mirror::Class* super = klass->GetSuperClass();
- if (super == NULL && "Ljava/lang/Object;" != klass->GetDescriptor()) {
+ std::string temp;
+ if (super == NULL && strcmp("Ljava/lang/Object;", klass->GetDescriptor(&temp)) != 0) {
early_failure = true;
failure_message = " that has no super class";
} else if (super != NULL && super->IsFinal()) {
@@ -1457,10 +1458,8 @@
*/
if ((opcode_flags & Instruction::kThrow) != 0 && CurrentInsnFlags()->IsInTry()) {
saved_line_->CopyFromLine(work_line_.get());
- } else {
-#ifndef NDEBUG
+ } else if (kIsDebugBuild) {
saved_line_->FillWithGarbage();
-#endif
}
@@ -2221,6 +2220,7 @@
is_range, false);
const char* return_type_descriptor;
bool is_constructor;
+ RegType* return_type = nullptr;
if (called_method == NULL) {
uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
@@ -2230,6 +2230,19 @@
} else {
is_constructor = called_method->IsConstructor();
return_type_descriptor = called_method->GetReturnTypeDescriptor();
+ Thread* self = Thread::Current();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ArtMethod> h_called_method(hs.NewHandle(called_method));
+ MethodHelper mh(h_called_method);
+ mirror::Class* return_type_class = mh.GetReturnType(can_load_classes_);
+ if (return_type_class != nullptr) {
+ return_type = ®_types_.FromClass(return_type_descriptor,
+ return_type_class,
+ return_type_class->CannotBeAssignedFromOtherTypes());
+ } else {
+ DCHECK(!can_load_classes_ || self->IsExceptionPending());
+ self->ClearException();
+ }
}
if (is_constructor) {
/*
@@ -2271,12 +2284,14 @@
*/
work_line_->MarkRefsAsInitialized(this_type);
}
- RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(),
- return_type_descriptor, false);
- if (!return_type.IsLowHalf()) {
- work_line_->SetResultRegisterType(return_type);
+ if (return_type == nullptr) {
+ return_type = ®_types_.FromDescriptor(class_loader_->Get(),
+ return_type_descriptor, false);
+ }
+ if (!return_type->IsLowHalf()) {
+ work_line_->SetResultRegisterType(*return_type);
} else {
- work_line_->SetResultRegisterTypeWide(return_type, return_type.HighHalf(®_types_));
+ work_line_->SetResultRegisterTypeWide(*return_type, return_type->HighHalf(®_types_));
}
just_set_result = true;
break;
@@ -3121,7 +3136,8 @@
RegType* res_method_class;
if (res_method != nullptr) {
mirror::Class* klass = res_method->GetDeclaringClass();
- res_method_class = ®_types_.FromClass(klass->GetDescriptor().c_str(), klass,
+ std::string temp;
+ res_method_class = ®_types_.FromClass(klass->GetDescriptor(&temp), klass,
klass->CannotBeAssignedFromOtherTypes());
} else {
const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
@@ -3337,8 +3353,9 @@
}
if (!actual_arg_type.IsZero()) {
mirror::Class* klass = res_method->GetDeclaringClass();
+ std::string temp;
RegType& res_method_class =
- reg_types_.FromClass(klass->GetDescriptor().c_str(), klass,
+ reg_types_.FromClass(klass->GetDescriptor(&temp), klass,
klass->CannotBeAssignedFromOtherTypes());
if (!res_method_class.IsAssignableFrom(actual_arg_type)) {
Fail(actual_arg_type.IsUnresolvedTypes() ? VERIFY_ERROR_NO_CLASS :
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 6422cdf..30be82f 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -414,20 +414,22 @@
std::string UnresolvedReferenceType::Dump() {
std::stringstream result;
- result << "Unresolved Reference" << ": " << PrettyDescriptor(GetDescriptor());
+ result << "Unresolved Reference" << ": " << PrettyDescriptor(GetDescriptor().c_str());
return result.str();
}
std::string UnresolvedUninitializedRefType::Dump() {
std::stringstream result;
- result << "Unresolved And Uninitialized Reference" << ": " << PrettyDescriptor(GetDescriptor());
- result << " Allocation PC: " << GetAllocationPc();
+ result << "Unresolved And Uninitialized Reference" << ": "
+ << PrettyDescriptor(GetDescriptor().c_str())
+ << " Allocation PC: " << GetAllocationPc();
return result.str();
}
std::string UnresolvedUninitializedThisRefType::Dump() {
std::stringstream result;
- result << "Unresolved And Uninitialized This Reference" << PrettyDescriptor(GetDescriptor());
+ result << "Unresolved And Uninitialized This Reference"
+ << PrettyDescriptor(GetDescriptor().c_str());
return result.str();
}
@@ -618,7 +620,8 @@
if (super_klass != NULL) {
// A super class of a precise type isn't precise as a precise type indicates the register
// holds exactly that type.
- return cache->FromClass(super_klass->GetDescriptor().c_str(), super_klass, false);
+ std::string temp;
+ return cache->FromClass(super_klass->GetDescriptor(&temp), super_klass, false);
} else {
return cache->Zero();
}
@@ -896,7 +899,8 @@
} else if (c2 == join_class && !incoming_type.IsPreciseReference()) {
return incoming_type;
} else {
- return reg_types->FromClass(join_class->GetDescriptor().c_str(), join_class, false);
+ std::string temp;
+ return reg_types->FromClass(join_class->GetDescriptor(&temp), join_class, false);
}
}
} else {
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index c0e4351..482bb4d 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -122,9 +122,9 @@
}
}
-bool RegTypeCache::MatchDescriptor(size_t idx, const char* descriptor, bool precise) {
+bool RegTypeCache::MatchDescriptor(size_t idx, const StringPiece& descriptor, bool precise) {
RegType* entry = entries_[idx];
- if (entry->descriptor_ != descriptor) {
+ if (descriptor != entry->descriptor_) {
return false;
}
if (entry->HasClass()) {
@@ -158,9 +158,11 @@
RegType& RegTypeCache::From(mirror::ClassLoader* loader, const char* descriptor,
bool precise) {
- // Try looking up the class in the cache first.
+ // Try looking up the class in the cache first. We use a StringPiece to avoid continual strlen
+ // operations on the descriptor.
+ StringPiece descriptor_sp(descriptor);
for (size_t i = primitive_count_; i < entries_.size(); i++) {
- if (MatchDescriptor(i, descriptor, precise)) {
+ if (MatchDescriptor(i, descriptor_sp, precise)) {
return *(entries_[i]);
}
}
@@ -181,9 +183,9 @@
if (klass->CannotBeAssignedFromOtherTypes() || precise) {
DCHECK(!(klass->IsAbstract()) || klass->IsArrayClass());
DCHECK(!klass->IsInterface());
- entry = new PreciseReferenceType(klass, descriptor, entries_.size());
+ entry = new PreciseReferenceType(klass, descriptor_sp.as_string(), entries_.size());
} else {
- entry = new ReferenceType(klass, descriptor, entries_.size());
+ entry = new ReferenceType(klass, descriptor_sp.as_string(), entries_.size());
}
AddEntry(entry);
return *entry;
@@ -197,7 +199,7 @@
DCHECK(!Thread::Current()->IsExceptionPending());
}
if (IsValidDescriptor(descriptor)) {
- RegType* entry = new UnresolvedReferenceType(descriptor, entries_.size());
+ RegType* entry = new UnresolvedReferenceType(descriptor_sp.as_string(), entries_.size());
AddEntry(entry);
return *entry;
} else {
@@ -407,7 +409,7 @@
return *cur_entry;
}
}
- entry = new UnresolvedReferenceType(descriptor.c_str(), entries_.size());
+ entry = new UnresolvedReferenceType(descriptor, entries_.size());
} else {
mirror::Class* klass = uninit_type.GetClass();
if (uninit_type.IsUninitializedThisReference() && !klass->IsFinal()) {
@@ -564,13 +566,14 @@
return FromDescriptor(loader, component.c_str(), false);
} else {
mirror::Class* klass = array.GetClass()->GetComponentType();
+ std::string temp;
if (klass->IsErroneous()) {
// Arrays may have erroneous component types, use unresolved in that case.
// We assume that the primitive classes are not erroneous, so we know it is a
// reference type.
- return FromDescriptor(loader, klass->GetDescriptor().c_str(), false);
+ return FromDescriptor(loader, klass->GetDescriptor(&temp), false);
} else {
- return FromClass(klass->GetDescriptor().c_str(), klass,
+ return FromClass(klass->GetDescriptor(&temp), klass,
klass->CannotBeAssignedFromOtherTypes());
}
}
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index d46cf2c..c0427eb 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -29,9 +29,11 @@
namespace art {
namespace mirror {
-class Class;
-class ClassLoader;
+ class Class;
+ class ClassLoader;
} // namespace mirror
+class StringPiece;
+
namespace verifier {
class RegType;
@@ -149,7 +151,7 @@
void FillPrimitiveAndSmallConstantTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Class* ResolveClass(const char* descriptor, mirror::ClassLoader* loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool MatchDescriptor(size_t idx, const char* descriptor, bool precise)
+ bool MatchDescriptor(size_t idx, const StringPiece& descriptor, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/test/004-InterfaceTest/src/Main.java b/test/004-InterfaceTest/src/Main.java
index 9ebac59..297cbb0 100644
--- a/test/004-InterfaceTest/src/Main.java
+++ b/test/004-InterfaceTest/src/Main.java
@@ -23,7 +23,7 @@
Integer intobj = new Integer(0);
String s = "asdf";
long start = System.currentTimeMillis();
- for (int i = 0; i < 1000000; i++) {
+ for (int i = 0; i < 10000; i++) {
map.put(intobj, s);
}
long end = System.currentTimeMillis();
@@ -34,7 +34,7 @@
Integer intobj = new Integer(0);
String s = "asdf";
long start = System.currentTimeMillis();
- for (int i = 0; i < 1000000; i++) {
+ for (int i = 0; i < 10000; i++) {
map.put(intobj, s);
}
long end = System.currentTimeMillis();
diff --git a/test/004-SignalTest/signaltest.cc b/test/004-SignalTest/signaltest.cc
index a2dd664..c05dc22 100644
--- a/test/004-SignalTest/signaltest.cc
+++ b/test/004-SignalTest/signaltest.cc
@@ -16,6 +16,7 @@
#include <signal.h>
#include <stdio.h>
+#include <stdlib.h>
#include <unistd.h>
#include "jni.h"
@@ -24,8 +25,15 @@
#include <sys/ucontext.h>
#endif
+static int signal_count;
+static const int kMaxSignal = 2;
+
static void signalhandler(int sig, siginfo_t* info, void* context) {
printf("signal caught\n");
+ ++signal_count;
+ if (signal_count > kMaxSignal) {
+ abort();
+ }
#ifdef __arm__
// On ARM we do a more exhaustive test to make sure the signal
// context is OK.
diff --git a/test/046-reflect/src/Main.java b/test/046-reflect/src/Main.java
index 11eb773..3fe3881 100644
--- a/test/046-reflect/src/Main.java
+++ b/test/046-reflect/src/Main.java
@@ -693,10 +693,35 @@
}
}
+ private static void checkGetDeclaredConstructor() {
+ try {
+ Method.class.getDeclaredConstructor().setAccessible(true);
+ System.out.print("Didn't get an exception from method getDeclaredConstructor");
+ } catch (NoSuchMethodException e) {
+ } catch (Exception e) {
+ System.out.print(e);
+ }
+ try {
+ Field.class.getDeclaredConstructor().setAccessible(true);
+ System.out.print("Didn't get an exception from field getDeclaredConstructor");
+ } catch (NoSuchMethodException e) {
+ } catch (Exception e) {
+ System.out.print(e);
+ }
+ try {
+ Class.class.getDeclaredConstructor().setAccessible(true);
+ System.out.print("Didn't get an exception from class getDeclaredConstructor()");
+ } catch (SecurityException e) {
+ } catch (Exception e) {
+ System.out.print(e);
+ }
+ }
+
public static void main(String[] args) throws Exception {
Main test = new Main();
test.run();
+ checkGetDeclaredConstructor();
checkAccess();
checkType();
checkClinitForFields();
diff --git a/test/116-nodex2oat/expected.txt b/test/116-nodex2oat/expected.txt
new file mode 100644
index 0000000..05b1c2f
--- /dev/null
+++ b/test/116-nodex2oat/expected.txt
@@ -0,0 +1,6 @@
+Run -Xnodex2oat
+Has oat is false, is dex2oat enabled is false.
+Run -Xdex2oat
+Has oat is true, is dex2oat enabled is true.
+Run default
+Has oat is true, is dex2oat enabled is true.
diff --git a/test/116-nodex2oat/info.txt b/test/116-nodex2oat/info.txt
new file mode 100644
index 0000000..f063d9f
--- /dev/null
+++ b/test/116-nodex2oat/info.txt
@@ -0,0 +1 @@
+Test that disables dex2oat'ing the application.
diff --git a/test/116-nodex2oat/nodex2oat.cc b/test/116-nodex2oat/nodex2oat.cc
new file mode 100644
index 0000000..4326db0
--- /dev/null
+++ b/test/116-nodex2oat/nodex2oat.cc
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "class_linker.h"
+#include "dex_file-inl.h"
+#include "mirror/class-inl.h"
+#include "scoped_thread_state_change.h"
+#include "thread.h"
+
+namespace art {
+
+class NoDex2OatTest {
+ public:
+ static bool hasOat(jclass cls) {
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Class* klass = soa.Decode<mirror::Class*>(cls);
+ const DexFile& dex_file = klass->GetDexFile();
+ const OatFile* oat_file =
+ Runtime::Current()->GetClassLinker()->FindOpenedOatFileForDexFile(dex_file);
+ return oat_file != nullptr;
+ }
+};
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasOat(JNIEnv*, jclass cls) {
+ return NoDex2OatTest::hasOat(cls);
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isDex2OatEnabled(JNIEnv*, jclass cls) {
+ return Runtime::Current()->IsDex2OatEnabled();
+}
+
+} // namespace art
diff --git a/test/116-nodex2oat/run b/test/116-nodex2oat/run
new file mode 100755
index 0000000..5ffeecd
--- /dev/null
+++ b/test/116-nodex2oat/run
@@ -0,0 +1,30 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Remove prebuild from the flags, this test is for testing not having oat files.
+flags="${@/--prebuild/}"
+
+# Make sure we can run without an oat file,
+echo "Run -Xnodex2oat"
+${RUN} ${flags} --runtime-option -Xnodex2oat
+
+# Make sure we can run with the oat file.
+echo "Run -Xdex2oat"
+${RUN} ${flags} --runtime-option -Xdex2oat
+
+# Make sure we can run with the default settings.
+echo "Run default"
+${RUN} ${flags}
diff --git a/test/116-nodex2oat/src/Main.java b/test/116-nodex2oat/src/Main.java
new file mode 100644
index 0000000..37ac9d5
--- /dev/null
+++ b/test/116-nodex2oat/src/Main.java
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ System.out.println(
+ "Has oat is " + hasOat() + ", is dex2oat enabled is " + isDex2OatEnabled() + ".");
+
+ if (hasOat() && !isDex2OatEnabled()) {
+ throw new Error("Application with dex2oat disabled runs with an oat file");
+ } else if (!hasOat() && isDex2OatEnabled()) {
+ throw new Error("Application with dex2oat enabled runs without an oat file");
+ }
+ }
+
+ static {
+ System.loadLibrary("arttest");
+ }
+
+ private native static boolean hasOat();
+
+ private native static boolean isDex2OatEnabled();
+}
diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk
index f3563a4..3871b28 100644
--- a/test/Android.libarttest.mk
+++ b/test/Android.libarttest.mk
@@ -23,7 +23,8 @@
004-SignalTest/signaltest.cc \
004-ReferenceMap/stack_walk_refmap_jni.cc \
004-StackWalk/stack_walk_jni.cc \
- 004-UnsafeTest/unsafe_test.cc
+ 004-UnsafeTest/unsafe_test.cc \
+ 116-nodex2oat/nodex2oat.cc
ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttest.so
ifdef TARGET_2ND_ARCH
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index d7ee383..47111c5 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -168,6 +168,7 @@
########################################################################
ART_TEST_TARGET_RUN_TEST_ALL_RULES :=
+ART_TEST_TARGET_RUN_TEST_GCSTRESS_RULES :=
ART_TEST_TARGET_RUN_TEST_DEFAULT_RULES :=
ART_TEST_TARGET_RUN_TEST_INTERPRETER_RULES :=
ART_TEST_TARGET_RUN_TEST_OPTIMIZING_RULES :=
@@ -188,6 +189,7 @@
ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NO_PREBUILD_RULES :=
ART_TEST_TARGET_RUN_TEST_OPTIMIZING_PREBUILD_RULES :=
ART_TEST_TARGET_RUN_TEST_ALL$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_GCSTRESS$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_RUN_TEST_DEFAULT$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_RUN_TEST_INTERPRETER$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_RUN_TEST_OPTIMIZING$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
@@ -226,6 +228,7 @@
ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NO_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_RUN_TEST_OPTIMIZING_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_HOST_RUN_TEST_ALL_RULES :=
+ART_TEST_HOST_RUN_TEST_GCSTRESS_RULES :=
ART_TEST_HOST_RUN_TEST_DEFAULT_RULES :=
ART_TEST_HOST_RUN_TEST_INTERPRETER_RULES :=
ART_TEST_HOST_RUN_TEST_OPTIMIZING_RULES :=
@@ -246,6 +249,7 @@
ART_TEST_HOST_RUN_TEST_OPTIMIZING_NO_PREBUILD_RULES :=
ART_TEST_HOST_RUN_TEST_OPTIMIZING_PREBUILD_RULES :=
ART_TEST_HOST_RUN_TEST_ALL$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_GCSTRESS$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
ART_TEST_HOST_RUN_TEST_DEFAULT$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
ART_TEST_HOST_RUN_TEST_INTERPRETER$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
ART_TEST_HOST_RUN_TEST_OPTIMIZING$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
@@ -330,6 +334,7 @@
prereq_rule :=
skip_test := false
uc_reloc_type :=
+ uc_run_type :=
ifeq ($(ART_TEST_RUN_TEST_ALWAYS_CLEAN),true)
run_test_options += --always-clean
endif
@@ -401,6 +406,7 @@
endif
endif
ifeq ($(5),trace)
+ uc_run_type := TRACE
run_test_options += --trace
run_test_rule_name := test-art-$(2)-run-test-trace-$(3)-$(6)-$(1)$(4)
ifneq ($$(ART_TEST_TRACE),true)
@@ -408,6 +414,7 @@
endif
else
ifeq ($(5),gcverify)
+ uc_run_type := GCVERIFY
run_test_options += --runtime-option -Xgc:preverify --runtime-option -Xgc:postverify \
--runtime-option -Xgc:preverify_rosalloc --runtime-option -Xgc:postverify_rosalloc
run_test_rule_name := test-art-$(2)-run-test-gcverify-$(3)-$(6)-$(1)$(4)
@@ -416,6 +423,7 @@
endif
else
ifeq ($(5),gcstress)
+ uc_run_type := GCSTRESS
run_test_options += --runtime-option -Xgc:SS --runtime-option -Xms2m \
--runtime-option -Xmx2m --runtime-option -Xgc:preverify --runtime-option -Xgc:postverify
run_test_rule_name := test-art-$(2)-run-test-gcstress-$(3)-$(6)-$(1)$(4)
@@ -457,6 +465,7 @@
ART_TEST_$$(uc_host_or_target)_RUN_TEST_ALL_RULES += $$(run_test_rule_name)
ART_TEST_$$(uc_host_or_target)_RUN_TEST_$$(uc_reloc_type)_RULES += $$(run_test_rule_name)
ART_TEST_$$(uc_host_or_target)_RUN_TEST_ALL$(4)_RULES += $$(run_test_rule_name)
+ ART_TEST_$$(uc_host_or_target)_RUN_TEST_$$(uc_run_type)_RULES += $$(run_test_rule_name)
# Clear locally defined variables.
skip_test :=
@@ -609,6 +618,8 @@
$(ART_TEST_TARGET_RUN_TEST_RELOCATE_RULES)))
$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test, \
$(ART_TEST_TARGET_RUN_TEST_ALL_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-gcstress, \
+ $(ART_TEST_TARGET_RUN_TEST_GCSTRESS_RULES)))
$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default, \
$(ART_TEST_TARGET_RUN_TEST_DEFAULT_RULES)))
$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter, \
@@ -682,6 +693,8 @@
ifdef TARGET_2ND_ARCH
$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
$(ART_TEST_TARGET_RUN_TEST_ALL$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-gcstress$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_GCSTRESS$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
$(ART_TEST_TARGET_RUN_TEST_DEFAULT$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
@@ -732,6 +745,8 @@
$(ART_TEST_HOST_RUN_TEST_RELOCATE_RULES)))
$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test, \
$(ART_TEST_HOST_RUN_TEST_ALL_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-gcstress, \
+ $(ART_TEST_HOST_RUN_TEST_GCSTRESS_RULES)))
$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default, \
$(ART_TEST_HOST_RUN_TEST_DEFAULT_RULES)))
$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter, \
@@ -805,6 +820,8 @@
ifneq ($(HOST_PREFER_32_BIT),true)
$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
$(ART_TEST_HOST_RUN_TEST_ALL$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-gcstress$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_GCSTRESS$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
$(ART_TEST_HOST_RUN_TEST_DEFAULT$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \