Merge "ART: Fix missing Erase() in error case"
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index 15edcc5..8275162 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -449,14 +449,14 @@
switch (state) {
case 0: // Get the current Method* [sets kArg0]
if (direct_code != static_cast<uintptr_t>(-1)) {
- cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
+ cg->LoadConstantWide(cg->TargetPtrReg(kInvokeTgt), direct_code);
} else if (Arm64UseRelativeCall(cu, target_method)) {
// Defer to linker patch.
} else {
cg->LoadCodeAddress(target_method, type, kInvokeTgt);
}
if (direct_method != static_cast<uintptr_t>(-1)) {
- cg->LoadConstant(cg->TargetReg(kArg0, kRef), direct_method);
+ cg->LoadConstantWide(cg->TargetReg(kArg0, kRef), direct_method);
} else {
cg->LoadMethodAddress(target_method, type, kArg0);
}
@@ -479,7 +479,7 @@
// Set up direct code if known.
if (direct_code != 0) {
if (direct_code != static_cast<uintptr_t>(-1)) {
- cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
+ cg->LoadConstantWide(cg->TargetPtrReg(kInvokeTgt), direct_code);
} else if (Arm64UseRelativeCall(cu, target_method)) {
// Defer to linker patch.
} else {
diff --git a/compiler/dex/quick/mips64/call_mips64.cc b/compiler/dex/quick/mips64/call_mips64.cc
index 63cef7e..31be1c2 100644
--- a/compiler/dex/quick/mips64/call_mips64.cc
+++ b/compiler/dex/quick/mips64/call_mips64.cc
@@ -356,12 +356,12 @@
switch (state) {
case 0: // Get the current Method* [sets kArg0]
if (direct_code != static_cast<uintptr_t>(-1)) {
- cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
+ cg->LoadConstantWide(cg->TargetPtrReg(kInvokeTgt), direct_code);
} else {
cg->LoadCodeAddress(target_method, type, kInvokeTgt);
}
if (direct_method != static_cast<uintptr_t>(-1)) {
- cg->LoadConstant(cg->TargetReg(kArg0, kRef), direct_method);
+ cg->LoadConstantWide(cg->TargetReg(kArg0, kRef), direct_method);
} else {
cg->LoadMethodAddress(target_method, type, kArg0);
}
@@ -382,7 +382,7 @@
// Set up direct code if known.
if (direct_code != 0) {
if (direct_code != static_cast<uintptr_t>(-1)) {
- cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
+ cg->LoadConstantWide(cg->TargetPtrReg(kInvokeTgt), direct_code);
} else {
CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
cg->LoadCodeAddress(target_method, type, kInvokeTgt);
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index c3db3a6..11c1465 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -332,7 +332,12 @@
switch (state) {
case 0: // Get the current Method* [sets kArg0]
if (direct_method != static_cast<uintptr_t>(-1)) {
- cg->LoadConstant(cg->TargetReg(kArg0, kRef), direct_method);
+ auto target_reg = cg->TargetReg(kArg0, kRef);
+ if (target_reg.Is64Bit()) {
+ cg->LoadConstantWide(target_reg, direct_method);
+ } else {
+ cg->LoadConstant(target_reg, direct_method);
+ }
} else {
cg->LoadMethodAddress(target_method, type, kArg0);
}
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index be6c41a..3d3d5cb 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -932,7 +932,7 @@
Runtime* current = Runtime::Current();
// Suspend all threads.
- current->GetThreadList()->SuspendAll();
+ current->GetThreadList()->SuspendAll(__FUNCTION__);
std::string error_msg;
std::unique_ptr<ClinitImageUpdate> update(ClinitImageUpdate::Create(image_classes_.get(),
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 742d83e..c4cef09 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -577,142 +577,42 @@
pc_info.native_pc = GetAssembler()->CodeSize();
pc_infos_.Add(pc_info);
- // Populate stack map information.
-
+ uint32_t inlining_depth = 0;
if (instruction == nullptr) {
// For stack overflow checks.
- stack_map_stream_.AddStackMapEntry(dex_pc, pc_info.native_pc, 0, 0, 0, 0);
- return;
- }
+ stack_map_stream_.RecordEnvironment(
+ /* environment */ nullptr,
+ /* environment_size */ 0,
+ /* locations */ nullptr,
+ dex_pc,
+ pc_info.native_pc,
+ /* register_mask */ 0,
+ inlining_depth);
+ } else {
+ LocationSummary* locations = instruction->GetLocations();
+ HEnvironment* environment = instruction->GetEnvironment();
+ size_t environment_size = instruction->EnvironmentSize();
- LocationSummary* locations = instruction->GetLocations();
- HEnvironment* environment = instruction->GetEnvironment();
-
- size_t environment_size = instruction->EnvironmentSize();
-
- size_t inlining_depth = 0;
- uint32_t register_mask = locations->GetRegisterMask();
- if (locations->OnlyCallsOnSlowPath()) {
- // In case of slow path, we currently set the location of caller-save registers
- // to register (instead of their stack location when pushed before the slow-path
- // call). Therefore register_mask contains both callee-save and caller-save
- // registers that hold objects. We must remove the caller-save from the mask, since
- // they will be overwritten by the callee.
- register_mask &= core_callee_save_mask_;
- }
- // The register mask must be a subset of callee-save registers.
- DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
- stack_map_stream_.AddStackMapEntry(
- dex_pc, pc_info.native_pc, register_mask,
- locations->GetStackMask(), environment_size, inlining_depth);
-
- // Walk over the environment, and record the location of dex registers.
- for (size_t i = 0; i < environment_size; ++i) {
- HInstruction* current = environment->GetInstructionAt(i);
- if (current == nullptr) {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
- continue;
+ uint32_t register_mask = locations->GetRegisterMask();
+ if (locations->OnlyCallsOnSlowPath()) {
+ // In case of slow path, we currently set the location of caller-save registers
+ // to register (instead of their stack location when pushed before the slow-path
+ // call). Therefore register_mask contains both callee-save and caller-save
+ // registers that hold objects. We must remove the caller-save from the mask, since
+ // they will be overwritten by the callee.
+ register_mask &= core_callee_save_mask_;
}
+ // The register mask must be a subset of callee-save registers.
+ DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
- Location location = locations->GetEnvironmentAt(i);
- switch (location.GetKind()) {
- case Location::kConstant: {
- DCHECK_EQ(current, location.GetConstant());
- if (current->IsLongConstant()) {
- int64_t value = current->AsLongConstant()->GetValue();
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant,
- Low32Bits(value));
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant,
- High32Bits(value));
- ++i;
- DCHECK_LT(i, environment_size);
- } else if (current->IsDoubleConstant()) {
- int64_t value = bit_cast<double, int64_t>(current->AsDoubleConstant()->GetValue());
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant,
- Low32Bits(value));
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant,
- High32Bits(value));
- ++i;
- DCHECK_LT(i, environment_size);
- } else if (current->IsIntConstant()) {
- int32_t value = current->AsIntConstant()->GetValue();
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
- } else if (current->IsNullConstant()) {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, 0);
- } else {
- DCHECK(current->IsFloatConstant());
- int32_t value = bit_cast<float, int32_t>(current->AsFloatConstant()->GetValue());
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
- }
- break;
- }
-
- case Location::kStackSlot: {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack,
- location.GetStackIndex());
- break;
- }
-
- case Location::kDoubleStackSlot: {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack,
- location.GetStackIndex());
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack,
- location.GetHighStackIndex(kVRegSize));
- ++i;
- DCHECK_LT(i, environment_size);
- break;
- }
-
- case Location::kRegister : {
- int id = location.reg();
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id);
- if (current->GetType() == Primitive::kPrimLong) {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id);
- ++i;
- DCHECK_LT(i, environment_size);
- }
- break;
- }
-
- case Location::kFpuRegister : {
- int id = location.reg();
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id);
- if (current->GetType() == Primitive::kPrimDouble) {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id);
- ++i;
- DCHECK_LT(i, environment_size);
- }
- break;
- }
-
- case Location::kFpuRegisterPair : {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister,
- location.low());
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister,
- location.high());
- ++i;
- DCHECK_LT(i, environment_size);
- break;
- }
-
- case Location::kRegisterPair : {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister,
- location.low());
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister,
- location.high());
- ++i;
- DCHECK_LT(i, environment_size);
- break;
- }
-
- case Location::kInvalid: {
- stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
- break;
- }
-
- default:
- LOG(FATAL) << "Unexpected kind " << location.GetKind();
- }
+ // Populate stack map information.
+ stack_map_stream_.RecordEnvironment(environment,
+ environment_size,
+ locations,
+ dex_pc,
+ pc_info.native_pc,
+ register_mask,
+ inlining_depth);
}
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 81fc684..81efc03 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -29,8 +29,6 @@
namespace art {
-static size_t constexpr kVRegSize = 4;
-
// Binary encoding of 2^32 for type double.
static int64_t constexpr k2Pow32EncodingForDouble = INT64_C(0x41F0000000000000);
// Binary encoding of 2^31 for type double.
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 863bab2..3168801 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -20,6 +20,7 @@
#include "base/bit_vector.h"
#include "base/value_object.h"
#include "memory_region.h"
+#include "nodes.h"
#include "stack_map.h"
#include "utils/growable_array.h"
@@ -32,8 +33,9 @@
class StackMapStream : public ValueObject {
public:
explicit StackMapStream(ArenaAllocator* allocator)
- : stack_maps_(allocator, 10),
- dex_register_maps_(allocator, 10 * 4),
+ : allocator_(allocator),
+ stack_maps_(allocator, 10),
+ dex_register_locations_(allocator, 10 * 4),
inline_infos_(allocator, 2),
stack_mask_max_(-1),
number_of_stack_maps_with_inline_info_(0) {}
@@ -52,8 +54,9 @@
BitVector* sp_mask;
uint32_t num_dex_registers;
uint8_t inlining_depth;
- size_t dex_register_maps_start_index;
+ size_t dex_register_locations_start_index;
size_t inline_infos_start_index;
+ BitVector* live_dex_registers_mask;
};
struct InlineInfoEntry {
@@ -65,7 +68,8 @@
uint32_t register_mask,
BitVector* sp_mask,
uint32_t num_dex_registers,
- uint8_t inlining_depth) {
+ uint8_t inlining_depth,
+ BitVector* live_dex_registers_mask) {
StackMapEntry entry;
entry.dex_pc = dex_pc;
entry.native_pc_offset = native_pc_offset;
@@ -73,8 +77,9 @@
entry.sp_mask = sp_mask;
entry.num_dex_registers = num_dex_registers;
entry.inlining_depth = inlining_depth;
- entry.dex_register_maps_start_index = dex_register_maps_.Size();
+ entry.dex_register_locations_start_index = dex_register_locations_.Size();
entry.inline_infos_start_index = inline_infos_.Size();
+ entry.live_dex_registers_mask = live_dex_registers_mask;
stack_maps_.Add(entry);
if (sp_mask != nullptr) {
@@ -85,11 +90,146 @@
}
}
- void AddDexRegisterEntry(DexRegisterLocation::Kind kind, int32_t value) {
- // Ensure we only use non-compressed location kind at this stage.
- DCHECK(DexRegisterLocation::IsShortLocationKind(kind))
- << DexRegisterLocation::PrettyDescriptor(kind);
- dex_register_maps_.Add(DexRegisterLocation(kind, value));
+ void RecordEnvironment(HEnvironment* environment,
+ size_t environment_size,
+ LocationSummary* locations,
+ uint32_t dex_pc,
+ uint32_t native_pc,
+ uint32_t register_mask,
+ uint32_t inlining_depth) {
+ if (environment == nullptr) {
+ // For stack overflow checks.
+ AddStackMapEntry(dex_pc, native_pc, 0, 0, 0, inlining_depth, nullptr);
+ return;
+ }
+
+ BitVector* live_dex_registers_mask = new (allocator_) ArenaBitVector(allocator_, 0, true);
+
+ AddStackMapEntry(
+ dex_pc, native_pc, register_mask,
+ locations->GetStackMask(), environment_size, inlining_depth, live_dex_registers_mask);
+
+ // Walk over the environment, and record the location of dex registers.
+ for (size_t i = 0; i < environment_size; ++i) {
+ HInstruction* current = environment->GetInstructionAt(i);
+ if (current == nullptr) {
+ // No need to store anything, the `live_dex_registers_mask` will hold the
+ // information that this register is not live.
+ continue;
+ }
+
+ Location location = locations->GetEnvironmentAt(i);
+ switch (location.GetKind()) {
+ case Location::kConstant: {
+ DCHECK_EQ(current, location.GetConstant());
+ if (current->IsLongConstant()) {
+ // TODO: Consider moving setting the bit in AddDexRegisterEntry to avoid
+ // doing it manually here.
+ live_dex_registers_mask->SetBit(i);
+ live_dex_registers_mask->SetBit(i + 1);
+ int64_t value = current->AsLongConstant()->GetValue();
+ AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, Low32Bits(value));
+ AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, High32Bits(value));
+ ++i;
+ DCHECK_LT(i, environment_size);
+ } else if (current->IsDoubleConstant()) {
+ live_dex_registers_mask->SetBit(i);
+ live_dex_registers_mask->SetBit(i + 1);
+ int64_t value = bit_cast<double, int64_t>(current->AsDoubleConstant()->GetValue());
+ AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, Low32Bits(value));
+ AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, High32Bits(value));
+ ++i;
+ DCHECK_LT(i, environment_size);
+ } else if (current->IsIntConstant()) {
+ live_dex_registers_mask->SetBit(i);
+ int32_t value = current->AsIntConstant()->GetValue();
+ AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
+ } else if (current->IsNullConstant()) {
+ live_dex_registers_mask->SetBit(i);
+ AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, 0);
+ } else {
+ DCHECK(current->IsFloatConstant()) << current->DebugName();
+ live_dex_registers_mask->SetBit(i);
+ int32_t value = bit_cast<float, int32_t>(current->AsFloatConstant()->GetValue());
+ AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
+ }
+ break;
+ }
+
+ case Location::kStackSlot: {
+ live_dex_registers_mask->SetBit(i);
+ AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack,
+ location.GetStackIndex());
+ break;
+ }
+
+ case Location::kDoubleStackSlot: {
+ live_dex_registers_mask->SetBit(i);
+ live_dex_registers_mask->SetBit(i + 1);
+ AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
+ AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack,
+ location.GetHighStackIndex(kVRegSize));
+ ++i;
+ DCHECK_LT(i, environment_size);
+ break;
+ }
+
+ case Location::kRegister : {
+ live_dex_registers_mask->SetBit(i);
+ int id = location.reg();
+ AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id);
+ if (current->GetType() == Primitive::kPrimLong) {
+ live_dex_registers_mask->SetBit(i + 1);
+ AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id);
+ ++i;
+ DCHECK_LT(i, environment_size);
+ }
+ break;
+ }
+
+ case Location::kFpuRegister : {
+ live_dex_registers_mask->SetBit(i);
+ int id = location.reg();
+ AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id);
+ if (current->GetType() == Primitive::kPrimDouble) {
+ live_dex_registers_mask->SetBit(i + 1);
+ AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id);
+ ++i;
+ DCHECK_LT(i, environment_size);
+ }
+ break;
+ }
+
+ case Location::kFpuRegisterPair : {
+ live_dex_registers_mask->SetBit(i);
+ live_dex_registers_mask->SetBit(i + 1);
+ AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, location.low());
+ AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, location.high());
+ ++i;
+ DCHECK_LT(i, environment_size);
+ break;
+ }
+
+ case Location::kRegisterPair : {
+ live_dex_registers_mask->SetBit(i);
+ live_dex_registers_mask->SetBit(i + 1);
+ AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, location.low());
+ AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, location.high());
+ ++i;
+ DCHECK_LT(i, environment_size);
+ break;
+ }
+
+ case Location::kInvalid: {
+ // No need to store anything, the `live_dex_registers_mask` will hold the
+ // information that this register is not live.
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected kind " << location.GetKind();
+ }
+ }
}
void AddInlineInfoEntry(uint32_t method_index) {
@@ -118,22 +258,26 @@
// Compute the size of the Dex register map of `entry`.
size_t ComputeDexRegisterMapSize(const StackMapEntry& entry) const {
size_t size = DexRegisterMap::kFixedSize;
- for (size_t j = 0; j < entry.num_dex_registers; ++j) {
- DexRegisterLocation dex_register_location =
- dex_register_maps_.Get(entry.dex_register_maps_start_index + j);
- size += DexRegisterMap::EntrySize(dex_register_location);
+ // Add the bit mask for the dex register liveness.
+ size += DexRegisterMap::LiveBitMaskSize(entry.num_dex_registers);
+ for (size_t dex_register_number = 0, index_in_dex_register_locations = 0;
+ dex_register_number < entry.num_dex_registers;
+ ++dex_register_number) {
+ if (entry.live_dex_registers_mask->IsBitSet(dex_register_number)) {
+ DexRegisterLocation dex_register_location = dex_register_locations_.Get(
+ entry.dex_register_locations_start_index + index_in_dex_register_locations);
+ size += DexRegisterMap::EntrySize(dex_register_location);
+ index_in_dex_register_locations++;
+ }
}
return size;
}
// Compute the size of all the Dex register maps.
size_t ComputeDexRegisterMapsSize() const {
- size_t size = stack_maps_.Size() * DexRegisterMap::kFixedSize;
- // The size of each register location depends on the type of
- // the entry.
- for (size_t i = 0, e = dex_register_maps_.Size(); i < e; ++i) {
- DexRegisterLocation entry = dex_register_maps_.Get(i);
- size += DexRegisterMap::EntrySize(entry);
+ size_t size = 0;
+ for (size_t i = 0; i < stack_maps_.Size(); ++i) {
+ size += ComputeDexRegisterMapSize(stack_maps_.Get(i));
}
return size;
}
@@ -161,7 +305,7 @@
size_t stack_mask_size = ComputeStackMaskSize();
uint8_t* memory_start = region.start();
- MemoryRegion dex_register_maps_region = region.Subregion(
+ MemoryRegion dex_register_locations_region = region.Subregion(
ComputeDexRegisterMapsStart(),
ComputeDexRegisterMapsSize());
@@ -189,7 +333,7 @@
if (entry.num_dex_registers != 0) {
// Set the Dex register map.
MemoryRegion register_region =
- dex_register_maps_region.Subregion(
+ dex_register_locations_region.Subregion(
next_dex_register_map_offset,
ComputeDexRegisterMapSize(entry));
next_dex_register_map_offset += register_region.size();
@@ -198,11 +342,20 @@
// Offset in `dex_register_map` where to store the next register entry.
size_t offset = DexRegisterMap::kFixedSize;
- for (size_t j = 0; j < entry.num_dex_registers; ++j) {
- DexRegisterLocation dex_register_location =
- dex_register_maps_.Get(entry.dex_register_maps_start_index + j);
- dex_register_map.SetRegisterInfo(offset, dex_register_location);
- offset += DexRegisterMap::EntrySize(dex_register_location);
+ dex_register_map.SetLiveBitMask(offset,
+ entry.num_dex_registers,
+ *entry.live_dex_registers_mask);
+ offset += DexRegisterMap::LiveBitMaskSize(entry.num_dex_registers);
+ for (size_t dex_register_number = 0, index_in_dex_register_locations = 0;
+ dex_register_number < entry.num_dex_registers;
+ ++dex_register_number) {
+ if (entry.live_dex_registers_mask->IsBitSet(dex_register_number)) {
+ DexRegisterLocation dex_register_location = dex_register_locations_.Get(
+ entry.dex_register_locations_start_index + index_in_dex_register_locations);
+ dex_register_map.SetRegisterInfo(offset, dex_register_location);
+ offset += DexRegisterMap::EntrySize(dex_register_location);
+ ++index_in_dex_register_locations;
+ }
}
// Ensure we reached the end of the Dex registers region.
DCHECK_EQ(offset, register_region.size());
@@ -232,12 +385,24 @@
}
private:
+ void AddDexRegisterEntry(DexRegisterLocation::Kind kind, int32_t value) {
+ // Ensure we only use non-compressed location kind at this stage.
+ DCHECK(DexRegisterLocation::IsShortLocationKind(kind))
+ << DexRegisterLocation::PrettyDescriptor(kind);
+ dex_register_locations_.Add(DexRegisterLocation(kind, value));
+ }
+
+ ArenaAllocator* allocator_;
GrowableArray<StackMapEntry> stack_maps_;
- GrowableArray<DexRegisterLocation> dex_register_maps_;
+ GrowableArray<DexRegisterLocation> dex_register_locations_;
GrowableArray<InlineInfoEntry> inline_infos_;
int stack_mask_max_;
size_t number_of_stack_maps_with_inline_info_;
+ ART_FRIEND_TEST(StackMapTest, Test1);
+ ART_FRIEND_TEST(StackMapTest, Test2);
+ ART_FRIEND_TEST(StackMapTest, TestNonLiveDexRegisters);
+
DISALLOW_COPY_AND_ASSIGN(StackMapStream);
};
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 3a5f806..4606bd6 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -31,19 +31,17 @@
return true;
}
-static size_t ComputeDexRegisterMapSize(const DexRegisterMap& dex_registers,
- size_t number_of_dex_registers) {
- return dex_registers.FindLocationOffset(number_of_dex_registers);
-}
-
TEST(StackMapTest, Test1) {
ArenaPool pool;
ArenaAllocator arena(&pool);
StackMapStream stream(&arena);
ArenaBitVector sp_mask(&arena, 0, false);
+ ArenaBitVector live_registers_mask(&arena, 0, true);
+ live_registers_mask.SetBit(0);
+ live_registers_mask.SetBit(1);
size_t number_of_dex_registers = 2;
- stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0, &live_registers_mask);
stream.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, 0);
stream.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, -2);
@@ -68,10 +66,9 @@
ASSERT_TRUE(stack_map.HasDexRegisterMap());
DexRegisterMap dex_registers = code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
- ASSERT_EQ(6u, dex_registers.Size());
- ASSERT_EQ(6u, ComputeDexRegisterMapSize(dex_registers, number_of_dex_registers));
- DexRegisterLocation location0 = dex_registers.GetLocationKindAndValue(0);
- DexRegisterLocation location1 = dex_registers.GetLocationKindAndValue(1);
+ ASSERT_EQ(7u, dex_registers.Size());
+ DexRegisterLocation location0 = dex_registers.GetLocationKindAndValue(0, number_of_dex_registers);
+ DexRegisterLocation location1 = dex_registers.GetLocationKindAndValue(1, number_of_dex_registers);
ASSERT_EQ(DexRegisterLocation::Kind::kInStack, location0.GetKind());
ASSERT_EQ(DexRegisterLocation::Kind::kConstant, location1.GetKind());
ASSERT_EQ(DexRegisterLocation::Kind::kInStack, location0.GetInternalKind());
@@ -91,7 +88,10 @@
sp_mask1.SetBit(2);
sp_mask1.SetBit(4);
size_t number_of_dex_registers = 2;
- stream.AddStackMapEntry(0, 64, 0x3, &sp_mask1, number_of_dex_registers, 2);
+ ArenaBitVector live_registers_mask1(&arena, 0, true);
+ live_registers_mask1.SetBit(0);
+ live_registers_mask1.SetBit(1);
+ stream.AddStackMapEntry(0, 64, 0x3, &sp_mask1, number_of_dex_registers, 2, &live_registers_mask1);
stream.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, 0);
stream.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, -2);
stream.AddInlineInfoEntry(42);
@@ -100,7 +100,10 @@
ArenaBitVector sp_mask2(&arena, 0, true);
sp_mask2.SetBit(3);
sp_mask1.SetBit(8);
- stream.AddStackMapEntry(1, 128, 0xFF, &sp_mask2, number_of_dex_registers, 0);
+ ArenaBitVector live_registers_mask2(&arena, 0, true);
+ live_registers_mask2.SetBit(0);
+ live_registers_mask2.SetBit(1);
+ stream.AddStackMapEntry(1, 128, 0xFF, &sp_mask2, number_of_dex_registers, 0, &live_registers_mask2);
stream.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, 18);
stream.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, 3);
@@ -128,10 +131,11 @@
ASSERT_TRUE(stack_map.HasDexRegisterMap());
DexRegisterMap dex_registers =
code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
- ASSERT_EQ(6u, dex_registers.Size());
- ASSERT_EQ(6u, ComputeDexRegisterMapSize(dex_registers, number_of_dex_registers));
- DexRegisterLocation location0 = dex_registers.GetLocationKindAndValue(0);
- DexRegisterLocation location1 = dex_registers.GetLocationKindAndValue(1);
+ ASSERT_EQ(7u, dex_registers.Size());
+ DexRegisterLocation location0 =
+ dex_registers.GetLocationKindAndValue(0, number_of_dex_registers);
+ DexRegisterLocation location1 =
+ dex_registers.GetLocationKindAndValue(1, number_of_dex_registers);
ASSERT_EQ(DexRegisterLocation::Kind::kInStack, location0.GetKind());
ASSERT_EQ(DexRegisterLocation::Kind::kConstant, location1.GetKind());
ASSERT_EQ(DexRegisterLocation::Kind::kInStack, location0.GetInternalKind());
@@ -161,10 +165,11 @@
ASSERT_TRUE(stack_map.HasDexRegisterMap());
DexRegisterMap dex_registers =
code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
- ASSERT_EQ(2u, dex_registers.Size());
- ASSERT_EQ(2u, ComputeDexRegisterMapSize(dex_registers, number_of_dex_registers));
- DexRegisterLocation location0 = dex_registers.GetLocationKindAndValue(0);
- DexRegisterLocation location1 = dex_registers.GetLocationKindAndValue(1);
+ ASSERT_EQ(3u, dex_registers.Size());
+ DexRegisterLocation location0 =
+ dex_registers.GetLocationKindAndValue(0, number_of_dex_registers);
+ DexRegisterLocation location1 =
+ dex_registers.GetLocationKindAndValue(1, number_of_dex_registers);
ASSERT_EQ(DexRegisterLocation::Kind::kInRegister, location0.GetKind());
ASSERT_EQ(DexRegisterLocation::Kind::kInFpuRegister, location1.GetKind());
ASSERT_EQ(DexRegisterLocation::Kind::kInRegister, location0.GetInternalKind());
@@ -176,4 +181,33 @@
}
}
+TEST(StackMapTest, TestNonLiveDexRegisters) {
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+ StackMapStream stream(&arena);
+
+ ArenaBitVector sp_mask(&arena, 0, false);
+ ArenaBitVector live_registers_mask(&arena, 0, true);
+ live_registers_mask.SetBit(1);
+ uint32_t number_of_dex_registers = 2;
+ stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0, &live_registers_mask);
+ stream.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, -2);
+
+ size_t size = stream.ComputeNeededSize();
+ void* memory = arena.Alloc(size, kArenaAllocMisc);
+ MemoryRegion region(memory, size);
+ stream.FillIn(region);
+
+ CodeInfo code_info(region);
+ StackMap stack_map = code_info.GetStackMapAt(0);
+ ASSERT_TRUE(stack_map.HasDexRegisterMap());
+ DexRegisterMap dex_registers = code_info.GetDexRegisterMapOf(stack_map, 2);
+ ASSERT_EQ(DexRegisterLocation::Kind::kNone,
+ dex_registers.GetLocationKind(0, number_of_dex_registers));
+ ASSERT_EQ(DexRegisterLocation::Kind::kConstant,
+ dex_registers.GetLocationKind(1, number_of_dex_registers));
+ ASSERT_EQ(-2, dex_registers.GetConstant(1, number_of_dex_registers));
+ ASSERT_FALSE(stack_map.HasInlineInfo());
+}
+
} // namespace art
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 9512376..c27b3d4 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1084,9 +1084,13 @@
if (stack_map.HasDexRegisterMap()) {
DexRegisterMap dex_register_map =
code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+ // TODO: Display the bit mask of live Dex registers.
for (size_t j = 0; j < number_of_dex_registers; ++j) {
- DexRegisterLocation location = dex_register_map.GetLocationKindAndValue(j);
- DumpRegisterMapping(os, j, location.GetInternalKind(), location.GetValue());
+ if (dex_register_map.IsDexRegisterLive(j)) {
+ DexRegisterLocation location =
+ dex_register_map.GetLocationKindAndValue(j, number_of_dex_registers);
+ DumpRegisterMapping(os, j, location.GetInternalKind(), location.GetValue());
+ }
}
}
}
@@ -1535,7 +1539,7 @@
{
self->TransitionFromRunnableToSuspended(kNative);
ThreadList* thread_list = Runtime::Current()->GetThreadList();
- thread_list->SuspendAll();
+ thread_list->SuspendAll(__FUNCTION__);
heap->RevokeAllThreadLocalAllocationStacks(self);
thread_list->ResumeAll();
self->TransitionFromSuspendedToRunnable();
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 2ec2b0c..13dcb8c 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -46,7 +46,6 @@
Mutex* Locks::jni_libraries_lock_ = nullptr;
Mutex* Locks::logging_lock_ = nullptr;
Mutex* Locks::mem_maps_lock_ = nullptr;
-Mutex* Locks::method_verifiers_lock_ = nullptr;
Mutex* Locks::modify_ldt_lock_ = nullptr;
ReaderWriterMutex* Locks::mutator_lock_ = nullptr;
Mutex* Locks::profiler_lock_ = nullptr;
@@ -1002,10 +1001,6 @@
classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
current_lock_level);
- UPDATE_CURRENT_LOCK_LEVEL(kMethodVerifiersLock);
- DCHECK(method_verifiers_lock_ == nullptr);
- method_verifiers_lock_ = new Mutex("Method verifiers lock", current_lock_level);
-
UPDATE_CURRENT_LOCK_LEVEL(kMonitorPoolLock);
DCHECK(allocated_monitor_ids_lock_ == nullptr);
allocated_monitor_ids_lock_ = new Mutex("allocated monitor ids lock", current_lock_level);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index f9e1e62..6e7b04f 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -588,11 +588,9 @@
// Guards lists of classes within the class linker.
static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
- static Mutex* method_verifiers_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
-
// When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
// doesn't try to hold a higher level Mutex.
- #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::method_verifiers_lock_)
+ #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index 893ab11..0ec0295 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -66,14 +66,16 @@
mirror::ArtMethod* m = GetMethod();
CodeInfo code_info = m->GetOptimizedCodeInfo();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+ uint16_t number_of_dex_registers = m->GetCodeItem()->registers_size_;
DexRegisterMap dex_register_map =
- code_info.GetDexRegisterMapOf(stack_map, m->GetCodeItem()->registers_size_);
+ code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
MemoryRegion stack_mask = stack_map.GetStackMask();
uint32_t register_mask = stack_map.GetRegisterMask();
for (int i = 0; i < number_of_references; ++i) {
int reg = registers[i];
CHECK(reg < m->GetCodeItem()->registers_size_);
- DexRegisterLocation location = dex_register_map.GetLocationKindAndValue(reg);
+ DexRegisterLocation location =
+ dex_register_map.GetLocationKindAndValue(reg, number_of_dex_registers);
switch (location.GetKind()) {
case DexRegisterLocation::Kind::kNone:
// Not set, should not be a reference.
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 9f2a09b..6296cf5 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -644,7 +644,7 @@
}
Runtime* runtime = Runtime::Current();
- runtime->GetThreadList()->SuspendAll();
+ runtime->GetThreadList()->SuspendAll(__FUNCTION__);
Thread* self = Thread::Current();
ThreadState old_state = self->SetStateUnsafe(kRunnable);
CHECK_NE(old_state, kRunnable);
@@ -668,7 +668,7 @@
// to kRunnable to avoid scoped object access transitions. Remove the debugger as a listener
// and clear the object registry.
Runtime* runtime = Runtime::Current();
- runtime->GetThreadList()->SuspendAll();
+ runtime->GetThreadList()->SuspendAll(__FUNCTION__);
Thread* self = Thread::Current();
ThreadState old_state = self->SetStateUnsafe(kRunnable);
@@ -819,7 +819,7 @@
Thread* self = Thread::Current();
CHECK_EQ(self->GetState(), kRunnable);
self->TransitionFromRunnableToSuspended(kSuspended);
- Runtime::Current()->GetThreadList()->SuspendAll();
+ Runtime::Current()->GetThreadList()->SuspendAll(__FUNCTION__);
MonitorInfo monitor_info(o);
@@ -3135,7 +3135,7 @@
self->TransitionFromRunnableToSuspended(kWaitingForDeoptimization);
// We need to suspend mutator threads first.
Runtime* const runtime = Runtime::Current();
- runtime->GetThreadList()->SuspendAll();
+ runtime->GetThreadList()->SuspendAll(__FUNCTION__);
const ThreadState old_state = self->SetStateUnsafe(kRunnable);
{
MutexLock mu(self, *Locks::deoptimization_lock_);
@@ -4436,7 +4436,7 @@
// RosAlloc's internal logic doesn't know to release and reacquire the heap bitmap lock.
self->TransitionFromRunnableToSuspended(kSuspended);
ThreadList* tl = Runtime::Current()->GetThreadList();
- tl->SuspendAll();
+ tl->SuspendAll(__FUNCTION__);
{
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
@@ -4452,7 +4452,7 @@
heap->IncrementDisableMovingGC(self);
self->TransitionFromRunnableToSuspended(kSuspended);
ThreadList* tl = Runtime::Current()->GetThreadList();
- tl->SuspendAll();
+ tl->SuspendAll(__FUNCTION__);
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
context.SetChunkOverhead(0);
space->AsRegionSpace()->Walk(BumpPointerSpaceCallback, &context);
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 0fdfcb3..88209a3 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -129,7 +129,8 @@
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, held_mutexes, nested_signal_state,
sizeof(void*) * kLockLevelCount);
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, nested_signal_state, flip_function, sizeof(void*));
- EXPECT_OFFSET_DIFF(Thread, tlsPtr_.flip_function, Thread, wait_mutex_, sizeof(void*),
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, flip_function, method_verifier, sizeof(void*));
+ EXPECT_OFFSET_DIFF(Thread, tlsPtr_.method_verifier, Thread, wait_mutex_, sizeof(void*),
thread_tlsptr_end);
}
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index eafcc45..47d6ada 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -157,7 +157,7 @@
GarbageCollector::ScopedPause::ScopedPause(GarbageCollector* collector)
: start_time_(NanoTime()), collector_(collector) {
- Runtime::Current()->GetThreadList()->SuspendAll();
+ Runtime::Current()->GetThreadList()->SuspendAll(__FUNCTION__);
}
GarbageCollector::ScopedPause::~ScopedPause() {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 9421db5..0cad11f 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -576,7 +576,7 @@
ThreadList* tl = Runtime::Current()->GetThreadList();
Thread* self = Thread::Current();
ScopedThreadStateChange tsc(self, kSuspended);
- tl->SuspendAll();
+ tl->SuspendAll(__FUNCTION__);
// Something may have caused the transition to fail.
if (!IsMovingGc(collector_type_) && non_moving_space_ != main_space_) {
CHECK(main_space_ != nullptr);
@@ -758,7 +758,7 @@
IncrementDisableMovingGC(self);
self->TransitionFromRunnableToSuspended(kWaitingForVisitObjects);
ThreadList* tl = Runtime::Current()->GetThreadList();
- tl->SuspendAll();
+ tl->SuspendAll(__FUNCTION__);
VisitObjectsInternalRegionSpace(callback, arg);
VisitObjectsInternal(callback, arg);
tl->ResumeAll();
@@ -1058,7 +1058,7 @@
// Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
// about pauses.
Runtime* runtime = Runtime::Current();
- runtime->GetThreadList()->SuspendAll();
+ runtime->GetThreadList()->SuspendAll(__FUNCTION__);
uint64_t start_time = NanoTime();
size_t count = runtime->GetMonitorList()->DeflateMonitors();
VLOG(heap) << "Deflating " << count << " monitors took "
@@ -1697,7 +1697,7 @@
return HomogeneousSpaceCompactResult::kErrorVMShuttingDown;
}
// Suspend all threads.
- tl->SuspendAll();
+ tl->SuspendAll(__FUNCTION__);
uint64_t start_time = NanoTime();
// Launch compaction.
space::MallocSpace* to_space = main_space_backup_.release();
@@ -1779,7 +1779,7 @@
return;
}
collector::GarbageCollector* collector = nullptr;
- tl->SuspendAll();
+ tl->SuspendAll(__FUNCTION__);
switch (collector_type) {
case kCollectorTypeSS: {
if (!IsMovingGc(collector_type_)) {
@@ -1993,15 +1993,16 @@
virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
- size_t object_size = RoundUp(obj->SizeOf(), kObjectAlignment);
+ size_t obj_size = obj->SizeOf();
+ size_t alloc_size = RoundUp(obj_size, kObjectAlignment);
mirror::Object* forward_address;
// Find the smallest bin which we can move obj in.
- auto it = bins_.lower_bound(object_size);
+ auto it = bins_.lower_bound(alloc_size);
if (it == bins_.end()) {
// No available space in the bins, place it in the target space instead (grows the zygote
// space).
size_t bytes_allocated, dummy;
- forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr, &dummy);
+ forward_address = to_space_->Alloc(self_, alloc_size, &bytes_allocated, nullptr, &dummy);
if (to_space_live_bitmap_ != nullptr) {
to_space_live_bitmap_->Set(forward_address);
} else {
@@ -2016,11 +2017,12 @@
// Set the live and mark bits so that sweeping system weaks works properly.
bin_live_bitmap_->Set(forward_address);
bin_mark_bitmap_->Set(forward_address);
- DCHECK_GE(size, object_size);
- AddBin(size - object_size, pos + object_size); // Add a new bin with the remaining space.
+ DCHECK_GE(size, alloc_size);
+ // Add a new bin with the remaining space.
+ AddBin(size - alloc_size, pos + alloc_size);
}
- // Copy the object over to its new location.
- memcpy(reinterpret_cast<void*>(forward_address), obj, object_size);
+ // Copy the object over to its new location. Don't use alloc_size to avoid valgrind error.
+ memcpy(reinterpret_cast<void*>(forward_address), obj, obj_size);
if (kUseBakerOrBrooksReadBarrier) {
obj->AssertReadBarrierPointer();
if (kUseBrooksReadBarrier) {
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index f140021..eb1d5f4 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -303,7 +303,7 @@
// TODO: NO_THREAD_SAFETY_ANALYSIS.
Thread* self = Thread::Current();
ThreadList* tl = Runtime::Current()->GetThreadList();
- tl->SuspendAll();
+ tl->SuspendAll(__FUNCTION__);
{
MutexLock mu(self, *Locks::runtime_shutdown_lock_);
MutexLock mu2(self, *Locks::thread_list_lock_);
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 5a7b7e1..b822613 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -1180,7 +1180,7 @@
// comment in Heap::VisitObjects().
heap->IncrementDisableMovingGC(self);
}
- Runtime::Current()->GetThreadList()->SuspendAll();
+ Runtime::Current()->GetThreadList()->SuspendAll(__FUNCTION__);
Hprof hprof(filename, fd, direct_to_ddms);
hprof.Dump();
Runtime::Current()->GetThreadList()->ResumeAll();
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 085062c..b53b8cd 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -607,7 +607,7 @@
Locks::mutator_lock_->AssertNotHeld(self);
Locks::instrument_entrypoints_lock_->AssertHeld(self);
if (runtime->IsStarted()) {
- tl->SuspendAll();
+ tl->SuspendAll(__FUNCTION__);
}
{
MutexLock mu(self, *Locks::runtime_shutdown_lock_);
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 9b89459..729791f 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -164,7 +164,7 @@
void Jit::CreateInstrumentationCache(size_t compile_threshold) {
CHECK_GT(compile_threshold, 0U);
Runtime* const runtime = Runtime::Current();
- runtime->GetThreadList()->SuspendAll();
+ runtime->GetThreadList()->SuspendAll(__FUNCTION__);
// Add Jit interpreter instrumentation, tells the interpreter when to notify the jit to compile
// something.
instrumentation_cache_.reset(new jit::JitInstrumentationCache(compile_threshold));
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index a2f1481..189559e 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1331,12 +1331,6 @@
callee_save_methods_[i].VisitRootIfNonNull(callback, arg, RootInfo(kRootVMInternal));
}
verifier::MethodVerifier::VisitStaticRoots(callback, arg);
- {
- MutexLock mu(Thread::Current(), *Locks::method_verifiers_lock_);
- for (verifier::MethodVerifier* verifier : method_verifiers_) {
- verifier->VisitRoots(callback, arg);
- }
- }
VisitTransactionRoots(callback, arg);
instrumentation_.VisitRoots(callback, arg);
}
@@ -1508,26 +1502,6 @@
compile_time_class_paths_.Put(class_loader, class_path);
}
-void Runtime::AddMethodVerifier(verifier::MethodVerifier* verifier) {
- DCHECK(verifier != nullptr);
- if (gAborting) {
- return;
- }
- MutexLock mu(Thread::Current(), *Locks::method_verifiers_lock_);
- method_verifiers_.insert(verifier);
-}
-
-void Runtime::RemoveMethodVerifier(verifier::MethodVerifier* verifier) {
- DCHECK(verifier != nullptr);
- if (gAborting) {
- return;
- }
- MutexLock mu(Thread::Current(), *Locks::method_verifiers_lock_);
- auto it = method_verifiers_.find(verifier);
- CHECK(it != method_verifiers_.end());
- method_verifiers_.erase(it);
-}
-
void Runtime::StartProfiler(const char* profile_output_filename) {
profile_output_filename_ = profile_output_filename;
profiler_started_ =
diff --git a/runtime/runtime.h b/runtime/runtime.h
index d54972c..3cf22bf 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -456,11 +456,6 @@
return use_compile_time_class_path_;
}
- void AddMethodVerifier(verifier::MethodVerifier* verifier)
- LOCKS_EXCLUDED(Locks::method_verifiers_lock_);
- void RemoveMethodVerifier(verifier::MethodVerifier* verifier)
- LOCKS_EXCLUDED(Locks::method_verifiers_lock_);
-
const std::vector<const DexFile*>& GetCompileTimeClassPath(jobject class_loader);
// The caller is responsible for ensuring the class_path DexFiles remain
@@ -642,9 +637,6 @@
Mutex fault_message_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::string fault_message_ GUARDED_BY(fault_message_lock_);
- // Method verifier set, used so that we can update their GC roots.
- std::set<verifier::MethodVerifier*> method_verifiers_ GUARDED_BY(Locks::method_verifiers_lock_);
-
// A non-zero value indicates that a thread has been created but not yet initialized. Guarded by
// the shutdown lock so that threads aren't born while we're shutting down.
size_t threads_being_born_ GUARDED_BY(Locks::runtime_shutdown_lock_);
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 47b85ad..2d688ee 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -202,30 +202,33 @@
DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
// its instructions?
DCHECK_LT(vreg, code_item->registers_size_);
+ uint16_t number_of_dex_registers = code_item->registers_size_;
DexRegisterMap dex_register_map =
- code_info.GetDexRegisterMapOf(stack_map, code_item->registers_size_);
- DexRegisterLocation::Kind location_kind = dex_register_map.GetLocationKind(vreg);
+ code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+ DexRegisterLocation::Kind location_kind =
+ dex_register_map.GetLocationKind(vreg, number_of_dex_registers);
switch (location_kind) {
case DexRegisterLocation::Kind::kInStack: {
- const int32_t offset = dex_register_map.GetStackOffsetInBytes(vreg);
+ const int32_t offset = dex_register_map.GetStackOffsetInBytes(vreg, number_of_dex_registers);
const uint8_t* addr = reinterpret_cast<const uint8_t*>(cur_quick_frame_) + offset;
*val = *reinterpret_cast<const uint32_t*>(addr);
return true;
}
case DexRegisterLocation::Kind::kInRegister:
case DexRegisterLocation::Kind::kInFpuRegister: {
- uint32_t reg = dex_register_map.GetMachineRegister(vreg);
+ uint32_t reg = dex_register_map.GetMachineRegister(vreg, number_of_dex_registers);
return GetRegisterIfAccessible(reg, kind, val);
}
case DexRegisterLocation::Kind::kConstant:
- *val = dex_register_map.GetConstant(vreg);
+ *val = dex_register_map.GetConstant(vreg, number_of_dex_registers);
return true;
case DexRegisterLocation::Kind::kNone:
return false;
default:
LOG(FATAL)
<< "Unexpected location kind"
- << DexRegisterLocation::PrettyDescriptor(dex_register_map.GetLocationInternalKind(vreg));
+ << DexRegisterLocation::PrettyDescriptor(
+ dex_register_map.GetLocationInternalKind(vreg, number_of_dex_registers));
UNREACHABLE();
}
}
@@ -388,21 +391,23 @@
const DexFile::CodeItem* code_item = m->GetCodeItem();
DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
// its instructions?
- DCHECK_LT(vreg, code_item->registers_size_);
+ uint16_t number_of_dex_registers = code_item->registers_size_;
+ DCHECK_LT(vreg, number_of_dex_registers);
DexRegisterMap dex_register_map =
- code_info.GetDexRegisterMapOf(stack_map, code_item->registers_size_);
- DexRegisterLocation::Kind location_kind = dex_register_map.GetLocationKind(vreg);
+ code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+ DexRegisterLocation::Kind location_kind =
+ dex_register_map.GetLocationKind(vreg, number_of_dex_registers);
uint32_t dex_pc = m->ToDexPc(cur_quick_frame_pc_, false);
switch (location_kind) {
case DexRegisterLocation::Kind::kInStack: {
- const int32_t offset = dex_register_map.GetStackOffsetInBytes(vreg);
+ const int32_t offset = dex_register_map.GetStackOffsetInBytes(vreg, number_of_dex_registers);
uint8_t* addr = reinterpret_cast<uint8_t*>(cur_quick_frame_) + offset;
*reinterpret_cast<uint32_t*>(addr) = new_value;
return true;
}
case DexRegisterLocation::Kind::kInRegister:
case DexRegisterLocation::Kind::kInFpuRegister: {
- uint32_t reg = dex_register_map.GetMachineRegister(vreg);
+ uint32_t reg = dex_register_map.GetMachineRegister(vreg, number_of_dex_registers);
return SetRegisterIfAccessible(reg, new_value, kind);
}
case DexRegisterLocation::Kind::kConstant:
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 8ebafc5..0db589f 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -31,6 +31,9 @@
// Word alignment required on ARM, in bytes.
static constexpr size_t kWordAlignment = 4;
+// Size of Dex virtual registers.
+static size_t constexpr kVRegSize = 4;
+
/**
* Classes in the following file are wrapper on stack map information backed
* by a MemoryRegion. As such they read and write to the region, they don't have
@@ -191,6 +194,10 @@
DexRegisterLocation(Kind kind, int32_t value)
: kind_(kind), value_(value) {}
+ static DexRegisterLocation None() {
+ return DexRegisterLocation(Kind::kNone, 0);
+ }
+
// Get the "surface" kind of the location, i.e., the one that doesn't
// include any value with a "large" qualifier.
Kind GetKind() const {
@@ -211,8 +218,8 @@
/**
* Information on dex register values for a specific PC. The information is
* of the form:
- * [location_kind, register_value]+.
- * either on 1 or 5 bytes (see art::DexRegisterLocation::Kind).
+ * [live_bit_mask, DexRegisterLocation+].
+ * DexRegisterLocations are either 1- or 5-byte wide (see art::DexRegisterLocation::Kind).
*/
class DexRegisterMap {
public:
@@ -221,6 +228,18 @@
// Short (compressed) location, fitting on one byte.
typedef uint8_t ShortLocation;
+ static size_t LiveBitMaskSize(uint16_t number_of_dex_registers) {
+ return RoundUp(number_of_dex_registers, kBitsPerByte) / kBitsPerByte;
+ }
+
+ void SetLiveBitMask(size_t offset,
+ uint16_t number_of_dex_registers,
+ const BitVector& live_dex_registers_mask) {
+ for (uint16_t i = 0; i < number_of_dex_registers; i++) {
+ region_.StoreBit(offset + i, live_dex_registers_mask.IsBitSet(i));
+ }
+ }
+
void SetRegisterInfo(size_t offset, const DexRegisterLocation& dex_register_location) {
DexRegisterLocation::Kind kind = ComputeCompressedKind(dex_register_location);
int32_t value = dex_register_location.GetValue();
@@ -256,39 +275,63 @@
}
}
- // Find the offset of the Dex register location number `dex_register_index`.
- size_t FindLocationOffset(uint16_t dex_register_index) const {
+ bool IsDexRegisterLive(uint16_t dex_register_index) const {
size_t offset = kFixedSize;
+ return region_.LoadBit(offset + dex_register_index);
+ }
+
+ static constexpr size_t kNoDexRegisterLocationOffset = -1;
+
+ static size_t GetDexRegisterMapLocationsOffset(uint16_t number_of_dex_registers) {
+ return kLiveBitMaskOffset + LiveBitMaskSize(number_of_dex_registers);
+ }
+
+ // Find the offset of the Dex register location number `dex_register_index`.
+ size_t FindLocationOffset(uint16_t dex_register_index, uint16_t number_of_dex_registers) const {
+ if (!IsDexRegisterLive(dex_register_index)) return kNoDexRegisterLocationOffset;
+ size_t offset = GetDexRegisterMapLocationsOffset(number_of_dex_registers);
// Skip the first `dex_register_index - 1` entries.
for (uint16_t i = 0; i < dex_register_index; ++i) {
- // Read the first next byte and inspect its first 3 bits to decide
- // whether it is a short or a large location.
- DexRegisterLocation::Kind kind = ExtractKindAtOffset(offset);
- if (DexRegisterLocation::IsShortLocationKind(kind)) {
- // Short location. Skip the current byte.
- offset += SingleShortEntrySize();
- } else {
- // Large location. Skip the 5 next bytes.
- offset += SingleLargeEntrySize();
+ if (IsDexRegisterLive(i)) {
+ // Read the first next byte and inspect its first 3 bits to decide
+ // whether it is a short or a large location.
+ DexRegisterLocation::Kind kind = ExtractKindAtOffset(offset);
+ if (DexRegisterLocation::IsShortLocationKind(kind)) {
+ // Short location. Skip the current byte.
+ offset += SingleShortEntrySize();
+ } else {
+ // Large location. Skip the 5 next bytes.
+ offset += SingleLargeEntrySize();
+ }
}
}
return offset;
}
// Get the surface kind.
- DexRegisterLocation::Kind GetLocationKind(uint16_t dex_register_index) const {
- return DexRegisterLocation::ConvertToSurfaceKind(GetLocationInternalKind(dex_register_index));
+ DexRegisterLocation::Kind GetLocationKind(uint16_t dex_register_index,
+ uint16_t number_of_dex_registers) const {
+ return IsDexRegisterLive(dex_register_index)
+ ? DexRegisterLocation::ConvertToSurfaceKind(
+ GetLocationInternalKind(dex_register_index, number_of_dex_registers))
+ : DexRegisterLocation::Kind::kNone;
}
// Get the internal kind.
- DexRegisterLocation::Kind GetLocationInternalKind(uint16_t dex_register_index) const {
- size_t offset = FindLocationOffset(dex_register_index);
- return ExtractKindAtOffset(offset);
+ DexRegisterLocation::Kind GetLocationInternalKind(uint16_t dex_register_index,
+ uint16_t number_of_dex_registers) const {
+ return IsDexRegisterLive(dex_register_index)
+ ? ExtractKindAtOffset(FindLocationOffset(dex_register_index, number_of_dex_registers))
+ : DexRegisterLocation::Kind::kNone;
}
// TODO: Rename as GetDexRegisterLocation?
- DexRegisterLocation GetLocationKindAndValue(uint16_t dex_register_index) const {
- size_t offset = FindLocationOffset(dex_register_index);
+ DexRegisterLocation GetLocationKindAndValue(uint16_t dex_register_index,
+ uint16_t number_of_dex_registers) const {
+ if (!IsDexRegisterLive(dex_register_index)) {
+ return DexRegisterLocation::None();
+ }
+ size_t offset = FindLocationOffset(dex_register_index, number_of_dex_registers);
// Read the first byte and inspect its first 3 bits to get the location.
ShortLocation first_byte = region_.LoadUnaligned<ShortLocation>(offset);
DexRegisterLocation::Kind kind = ExtractKindFromShortLocation(first_byte);
@@ -311,21 +354,25 @@
}
}
- int32_t GetStackOffsetInBytes(uint16_t dex_register_index) const {
- DexRegisterLocation location = GetLocationKindAndValue(dex_register_index);
+ int32_t GetStackOffsetInBytes(uint16_t dex_register_index,
+ uint16_t number_of_dex_registers) const {
+ DexRegisterLocation location =
+ GetLocationKindAndValue(dex_register_index, number_of_dex_registers);
DCHECK(location.GetKind() == DexRegisterLocation::Kind::kInStack);
// GetLocationKindAndValue returns the offset in bytes.
return location.GetValue();
}
- int32_t GetConstant(uint16_t dex_register_index) const {
- DexRegisterLocation location = GetLocationKindAndValue(dex_register_index);
+ int32_t GetConstant(uint16_t dex_register_index, uint16_t number_of_dex_registers) const {
+ DexRegisterLocation location =
+ GetLocationKindAndValue(dex_register_index, number_of_dex_registers);
DCHECK(location.GetKind() == DexRegisterLocation::Kind::kConstant);
return location.GetValue();
}
- int32_t GetMachineRegister(uint16_t dex_register_index) const {
- DexRegisterLocation location = GetLocationKindAndValue(dex_register_index);
+ int32_t GetMachineRegister(uint16_t dex_register_index, uint16_t number_of_dex_registers) const {
+ DexRegisterLocation location =
+ GetLocationKindAndValue(dex_register_index, number_of_dex_registers);
DCHECK(location.GetInternalKind() == DexRegisterLocation::Kind::kInRegister
|| location.GetInternalKind() == DexRegisterLocation::Kind::kInFpuRegister)
<< DexRegisterLocation::PrettyDescriptor(location.GetInternalKind());
@@ -405,7 +452,8 @@
return region_.size();
}
- static constexpr int kFixedSize = 0;
+ static constexpr int kLiveBitMaskOffset = 0;
+ static constexpr int kFixedSize = kLiveBitMaskOffset;
private:
// Width of the kind "field" in a short location, in bits.
@@ -428,6 +476,8 @@
static DexRegisterLocation::Kind ExtractKindFromShortLocation(ShortLocation location) {
uint8_t kind = (location >> kKindOffset) & kKindMask;
DCHECK_LE(kind, static_cast<uint8_t>(DexRegisterLocation::Kind::kLastLocationKind));
+ // We do not encode kNone locations in the stack map.
+ DCHECK_NE(kind, static_cast<uint8_t>(DexRegisterLocation::Kind::kNone));
return static_cast<DexRegisterLocation::Kind>(kind);
}
@@ -678,21 +728,28 @@
// TODO: Ideally, we would like to use art::DexRegisterMap::Size or
// art::DexRegisterMap::FindLocationOffset, but the DexRegisterMap is not
// yet built. Try to factor common code.
- size_t offset = origin + DexRegisterMap::kFixedSize;
+ size_t offset =
+ origin + DexRegisterMap::GetDexRegisterMapLocationsOffset(number_of_dex_registers);
+
+ // Create a temporary DexRegisterMap to be able to call DexRegisterMap.IsDexRegisterLive.
+ DexRegisterMap only_live_mask(MemoryRegion(region_.Subregion(origin, offset - origin)));
+
// Skip the first `number_of_dex_registers - 1` entries.
for (uint16_t i = 0; i < number_of_dex_registers; ++i) {
- // Read the first next byte and inspect its first 3 bits to decide
- // whether it is a short or a large location.
- DexRegisterMap::ShortLocation first_byte =
- region_.LoadUnaligned<DexRegisterMap::ShortLocation>(offset);
- DexRegisterLocation::Kind kind =
- DexRegisterMap::ExtractKindFromShortLocation(first_byte);
- if (DexRegisterLocation::IsShortLocationKind(kind)) {
- // Short location. Skip the current byte.
- offset += DexRegisterMap::SingleShortEntrySize();
- } else {
- // Large location. Skip the 5 next bytes.
- offset += DexRegisterMap::SingleLargeEntrySize();
+ if (only_live_mask.IsDexRegisterLive(i)) {
+ // Read the first next byte and inspect its first 3 bits to decide
+ // whether it is a short or a large location.
+ DexRegisterMap::ShortLocation first_byte =
+ region_.LoadUnaligned<DexRegisterMap::ShortLocation>(offset);
+ DexRegisterLocation::Kind kind =
+ DexRegisterMap::ExtractKindFromShortLocation(first_byte);
+ if (DexRegisterLocation::IsShortLocationKind(kind)) {
+ // Short location. Skip the current byte.
+ offset += DexRegisterMap::SingleShortEntrySize();
+ } else {
+ // Large location. Skip the 5 next bytes.
+ offset += DexRegisterMap::SingleLargeEntrySize();
+ }
}
}
size_t size = offset - origin;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 8e98d53..affb6cd 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -70,6 +70,7 @@
#include "thread-inl.h"
#include "utils.h"
#include "verifier/dex_gc_map.h"
+#include "verifier/method_verifier.h"
#include "verify_object-inl.h"
#include "vmap_table.h"
#include "well_known_classes.h"
@@ -2296,6 +2297,9 @@
mapper.VisitShadowFrame(shadow_frame);
}
}
+ if (tlsPtr_.method_verifier != nullptr) {
+ tlsPtr_.method_verifier->VisitRoots(visitor, arg, RootInfo(kRootNativeStack, thread_id));
+ }
// Visit roots on this thread's stack
Context* context = GetLongJumpContext();
RootCallbackVisitor visitor_to_callback(visitor, arg, thread_id);
@@ -2417,4 +2421,14 @@
tlsPtr_.debug_invoke_req = nullptr;
}
+void Thread::SetVerifier(verifier::MethodVerifier* verifier) {
+ CHECK(tlsPtr_.method_verifier == nullptr);
+ tlsPtr_.method_verifier = verifier;
+}
+
+void Thread::ClearVerifier(verifier::MethodVerifier* verifier) {
+ CHECK_EQ(tlsPtr_.method_verifier, verifier);
+ tlsPtr_.method_verifier = nullptr;
+}
+
} // namespace art
diff --git a/runtime/thread.h b/runtime/thread.h
index 2e9ae3c..da7af83 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -62,6 +62,11 @@
class StackTraceElement;
class Throwable;
} // namespace mirror
+
+namespace verifier {
+class MethodVerifier;
+} // namespace verifier
+
class BaseMutex;
class ClassLinker;
class Closure;
@@ -875,6 +880,9 @@
return tls32_.suspended_at_suspend_check;
}
+ void SetVerifier(verifier::MethodVerifier* verifier);
+ void ClearVerifier(verifier::MethodVerifier* verifier);
+
private:
explicit Thread(bool daemon);
~Thread() LOCKS_EXCLUDED(Locks::mutator_lock_,
@@ -1055,10 +1063,8 @@
pthread_self(0), last_no_thread_suspension_cause(nullptr), thread_local_start(nullptr),
thread_local_pos(nullptr), thread_local_end(nullptr), thread_local_objects(0),
thread_local_alloc_stack_top(nullptr), thread_local_alloc_stack_end(nullptr),
- nested_signal_state(nullptr), flip_function(nullptr) {
- for (size_t i = 0; i < kLockLevelCount; ++i) {
- held_mutexes[i] = nullptr;
- }
+ nested_signal_state(nullptr), flip_function(nullptr), method_verifier(nullptr) {
+ std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
}
// The biased card table, see CardTable for details.
@@ -1172,6 +1178,9 @@
// The function used for thread flip.
Closure* flip_function;
+
+ // Current method verifier, used for root marking.
+ verifier::MethodVerifier* method_verifier;
} tlsPtr_;
// Guards the 'interrupted_' and 'wait_monitor_' members.
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index ddfbebd..1ab0093 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -448,13 +448,13 @@
return runnable_threads.size() + other_threads.size() + 1; // +1 for self.
}
-void ThreadList::SuspendAll() {
+void ThreadList::SuspendAll(const char* cause) {
Thread* self = Thread::Current();
if (self != nullptr) {
- VLOG(threads) << *self << " SuspendAll starting...";
+ VLOG(threads) << *self << " SuspendAll for " << cause << " starting...";
} else {
- VLOG(threads) << "Thread[null] SuspendAll starting...";
+ VLOG(threads) << "Thread[null] SuspendAll for " << cause << " starting...";
}
ATRACE_BEGIN("Suspending mutator threads");
const uint64_t start_time = NanoTime();
@@ -503,7 +503,7 @@
}
ATRACE_END();
- ATRACE_BEGIN("Mutator threads suspended");
+ ATRACE_BEGIN((std::string("Mutator threads suspended for ") + cause).c_str());
if (self != nullptr) {
VLOG(threads) << *self << " SuspendAll complete";
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index de0dd79..c18e285 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -61,7 +61,7 @@
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_);
// Suspends all threads and gets exclusive access to the mutator_lock_.
- void SuspendAll()
+ void SuspendAll(const char* cause)
EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_)
LOCKS_EXCLUDED(Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_);
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 8833a85..88be23f 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -313,7 +313,7 @@
}
}
- runtime->GetThreadList()->SuspendAll();
+ runtime->GetThreadList()->SuspendAll(__FUNCTION__);
{
MutexLock mu(self, *Locks::thread_list_lock_);
runtime->GetThreadList()->ForEach(GetSample, the_trace);
@@ -367,7 +367,7 @@
// Enable count of allocs if specified in the flags.
bool enable_stats = false;
- runtime->GetThreadList()->SuspendAll();
+ runtime->GetThreadList()->SuspendAll(__FUNCTION__);
// Create Trace object.
{
@@ -421,7 +421,7 @@
CHECK_PTHREAD_CALL(pthread_join, (sampling_pthread, NULL), "sampling thread shutdown");
sampling_pthread_ = 0U;
}
- runtime->GetThreadList()->SuspendAll();
+ runtime->GetThreadList()->SuspendAll(__FUNCTION__);
if (the_trace != nullptr) {
stop_alloc_counting = (the_trace->flags_ & kTraceCountAllocs) != 0;
the_trace->FinishTracing();
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index b3f686d..9ceb6f4 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -395,12 +395,12 @@
has_virtual_or_interface_invokes_(false),
verify_to_dump_(verify_to_dump),
allow_thread_suspension_(allow_thread_suspension) {
- Runtime::Current()->AddMethodVerifier(this);
+ self->SetVerifier(this);
DCHECK(class_def != nullptr);
}
MethodVerifier::~MethodVerifier() {
- Runtime::Current()->RemoveMethodVerifier(this);
+ Thread::Current()->ClearVerifier(this);
STLDeleteElements(&failure_messages_);
}
@@ -4334,8 +4334,8 @@
RegTypeCache::VisitStaticRoots(callback, arg);
}
-void MethodVerifier::VisitRoots(RootCallback* callback, void* arg) {
- reg_types_.VisitRoots(callback, arg);
+void MethodVerifier::VisitRoots(RootCallback* callback, void* arg, const RootInfo& root_info) {
+ reg_types_.VisitRoots(callback, arg, root_info);
}
} // namespace verifier
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index d7c2071..6b813ef 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -227,7 +227,8 @@
static void VisitStaticRoots(RootCallback* callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void VisitRoots(RootCallback* callback, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VisitRoots(RootCallback* callback, void* arg, const RootInfo& roots)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Accessors used by the compiler via CompilerCallback
const DexFile::CodeItem* CodeItem() const;
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 3510665..201169f 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -778,8 +778,8 @@
}
}
-void RegType::VisitRoots(RootCallback* callback, void* arg) const {
- klass_.VisitRootIfNonNull(callback, arg, RootInfo(kRootUnknown));
+void RegType::VisitRoots(RootCallback* callback, void* arg, const RootInfo& root_info) const {
+ klass_.VisitRootIfNonNull(callback, arg, root_info);
}
void UninitializedThisReferenceType::CheckInvariants() const {
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 05958b5..73e131e 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -250,7 +250,7 @@
virtual ~RegType() {}
- void VisitRoots(RootCallback* callback, void* arg) const
+ void VisitRoots(RootCallback* callback, void* arg, const RootInfo& root_info) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
protected:
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 22696c7..6e57857 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -238,9 +238,7 @@
}
}
-RegTypeCache::RegTypeCache(bool can_load_classes)
- : entries_lock_("entries lock"),
- can_load_classes_(can_load_classes) {
+RegTypeCache::RegTypeCache(bool can_load_classes) : can_load_classes_(can_load_classes) {
if (kIsDebugBuild) {
Thread::Current()->AssertThreadSuspensionIsAllowable(gAborting == 0);
}
@@ -563,35 +561,33 @@
// Visit the primitive types, this is required since if there are no active verifiers they wont
// be in the entries array, and therefore not visited as roots.
if (primitive_initialized_) {
- UndefinedType::GetInstance()->VisitRoots(callback, arg);
- ConflictType::GetInstance()->VisitRoots(callback, arg);
- BooleanType::GetInstance()->VisitRoots(callback, arg);
- ByteType::GetInstance()->VisitRoots(callback, arg);
- ShortType::GetInstance()->VisitRoots(callback, arg);
- CharType::GetInstance()->VisitRoots(callback, arg);
- IntegerType::GetInstance()->VisitRoots(callback, arg);
- LongLoType::GetInstance()->VisitRoots(callback, arg);
- LongHiType::GetInstance()->VisitRoots(callback, arg);
- FloatType::GetInstance()->VisitRoots(callback, arg);
- DoubleLoType::GetInstance()->VisitRoots(callback, arg);
- DoubleHiType::GetInstance()->VisitRoots(callback, arg);
+ RootInfo ri(kRootUnknown);
+ UndefinedType::GetInstance()->VisitRoots(callback, arg, ri);
+ ConflictType::GetInstance()->VisitRoots(callback, arg, ri);
+ BooleanType::GetInstance()->VisitRoots(callback, arg, ri);
+ ByteType::GetInstance()->VisitRoots(callback, arg, ri);
+ ShortType::GetInstance()->VisitRoots(callback, arg, ri);
+ CharType::GetInstance()->VisitRoots(callback, arg, ri);
+ IntegerType::GetInstance()->VisitRoots(callback, arg, ri);
+ LongLoType::GetInstance()->VisitRoots(callback, arg, ri);
+ LongHiType::GetInstance()->VisitRoots(callback, arg, ri);
+ FloatType::GetInstance()->VisitRoots(callback, arg, ri);
+ DoubleLoType::GetInstance()->VisitRoots(callback, arg, ri);
+ DoubleHiType::GetInstance()->VisitRoots(callback, arg, ri);
for (int32_t value = kMinSmallConstant; value <= kMaxSmallConstant; ++value) {
- small_precise_constants_[value - kMinSmallConstant]->VisitRoots(callback, arg);
+ small_precise_constants_[value - kMinSmallConstant]->VisitRoots(callback, arg, ri);
}
}
}
-void RegTypeCache::VisitRoots(RootCallback* callback, void* arg) {
- MutexLock mu(Thread::Current(), entries_lock_);
+void RegTypeCache::VisitRoots(RootCallback* callback, void* arg, const RootInfo& root_info) {
// Exclude the static roots that are visited by VisitStaticRoots().
for (size_t i = primitive_count_; i < entries_.size(); ++i) {
- entries_[i]->VisitRoots(callback, arg);
+ entries_[i]->VisitRoots(callback, arg, root_info);
}
}
void RegTypeCache::AddEntry(RegType* new_entry) {
- // TODO: There is probably a faster way to do this by using thread local roots.
- MutexLock mu(Thread::Current(), entries_lock_);
entries_.push_back(new_entry);
}
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index 4b56fd6..01032a0 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -137,7 +137,8 @@
void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const RegType& RegTypeFromPrimitiveType(Primitive::Type) const;
- void VisitRoots(RootCallback* callback, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VisitRoots(RootCallback* callback, void* arg, const RootInfo& root_info)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void VisitStaticRoots(RootCallback* callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -171,9 +172,6 @@
// Number of well known primitives that will be copied into a RegTypeCache upon construction.
static uint16_t primitive_count_;
- // Guards adding and visitng roots to prevent race conditions.
- Mutex entries_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
-
// The actual storage for the RegTypes.
std::vector<const RegType*> entries_;
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 41d814a..4e229d5 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -106,5 +106,11 @@
modes_variants: [[device,X64]],
names: ["libcore.java.lang.SystemTest#testArrayCopyConcurrentModification"],
bug: 19165288
+},
+{
+ description: "Bug in libcore",
+ result: EXEC_FAILED,
+ names: ["libcore.javax.crypto.ECDHKeyAgreementTest#testInit_withUnsupportedPrivateKeyType"],
+ bug: 19730263
}
]