Merge "Fix bug in register allocator."
diff --git a/Android.mk b/Android.mk
index 2fb812a..3cd6b5f 100644
--- a/Android.mk
+++ b/Android.mk
@@ -232,6 +232,11 @@
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
endif
+# Valgrind. Currently only 32b gtests.
+.PHONY: valgrind-test-art-host
+valgrind-test-art-host: valgrind-test-art-host-gtest32
+ $(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
+
########################################################################
# target test rules
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index 20c5a21..3897401 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -41,6 +41,18 @@
test-art-host-run-test-gcstress-interpreter-prebuild-114-ParallelGC64 \
test-art-host-run-test-gcstress-optimizing-prebuild-114-ParallelGC64
+# Failing valgrind tests.
+# Note: *all* 64b tests involving the runtime do not work currently. b/15170219.
+
+# Optimizing compiler codegen is not destructed and can leak non-arena-ed structures.
+ART_TEST_KNOWN_BROKEN += \
+ valgrind-test-art-host-gtest-codegen_test32 \
+ valgrind-test-art-host-gtest-find_loops_test32 \
+ valgrind-test-art-host-gtest-linearize_test32 \
+ valgrind-test-art-host-gtest-live_ranges_test32 \
+ valgrind-test-art-host-gtest-liveness_test32 \
+ valgrind-test-art-host-gtest-register_allocator_test32
+
# List of known failing tests that when executed won't cause test execution to not finish.
# The test name must be the full rule name such as test-art-host-oat-optimizing-HelloWorld64.
ART_TEST_KNOWN_FAILING :=
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 352938e..700bcf0 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -442,7 +442,14 @@
endif
rule_name := $(3)test-art-$(1)-gtest$(4)
- dependencies := $$(ART_TEST_$(2)_GTEST$(4)_RULES)
+ ifeq ($(3),valgrind-)
+ ifneq ($(1),host)
+ $$(error valgrind tests only wired up for the host)
+ endif
+ dependencies := $$(ART_TEST_$(2)_VALGRIND_GTEST$(4)_RULES)
+ else
+ dependencies := $$(ART_TEST_$(2)_GTEST$(4)_RULES)
+ endif
.PHONY: $$(rule_name)
$$(rule_name): $$(dependencies)
diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h
index 2920190..b2c348b 100644
--- a/compiler/dex/bb_optimizations.h
+++ b/compiler/dex/bb_optimizations.h
@@ -34,16 +34,16 @@
void Start(PassDataHolder* data) const {
DCHECK(data != nullptr);
- CompilationUnit* cUnit = down_cast<PassMEDataHolder*>(data)->c_unit;
- DCHECK(cUnit != nullptr);
- cUnit->mir_graph->DoCacheFieldLoweringInfo();
+ CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph->DoCacheFieldLoweringInfo();
}
bool Gate(const PassDataHolder* data) const {
DCHECK(data != nullptr);
- CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
- DCHECK(cUnit != nullptr);
- return cUnit->mir_graph->HasFieldAccess();
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ return c_unit->mir_graph->HasFieldAccess();
}
};
@@ -58,16 +58,16 @@
void Start(PassDataHolder* data) const {
DCHECK(data != nullptr);
- CompilationUnit* cUnit = down_cast<PassMEDataHolder*>(data)->c_unit;
- DCHECK(cUnit != nullptr);
- cUnit->mir_graph->DoCacheMethodLoweringInfo();
+ CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph->DoCacheMethodLoweringInfo();
}
bool Gate(const PassDataHolder* data) const {
DCHECK(data != nullptr);
- CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
- DCHECK(cUnit != nullptr);
- return cUnit->mir_graph->HasInvokes();
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ return c_unit->mir_graph->HasInvokes();
}
};
@@ -84,35 +84,35 @@
bool Gate(const PassDataHolder* data) const {
DCHECK(data != nullptr);
- CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
- DCHECK(cUnit != nullptr);
- return cUnit->mir_graph->InlineSpecialMethodsGate();
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ return c_unit->mir_graph->InlineSpecialMethodsGate();
}
void Start(PassDataHolder* data) const {
DCHECK(data != nullptr);
- CompilationUnit* cUnit = down_cast<PassMEDataHolder*>(data)->c_unit;
- DCHECK(cUnit != nullptr);
- cUnit->mir_graph->InlineSpecialMethodsStart();
+ CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph->InlineSpecialMethodsStart();
}
bool Worker(PassDataHolder* data) const {
DCHECK(data != nullptr);
PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
- CompilationUnit* cUnit = pass_me_data_holder->c_unit;
- DCHECK(cUnit != nullptr);
+ CompilationUnit* c_unit = pass_me_data_holder->c_unit;
+ DCHECK(c_unit != nullptr);
BasicBlock* bb = pass_me_data_holder->bb;
DCHECK(bb != nullptr);
- cUnit->mir_graph->InlineSpecialMethods(bb);
+ c_unit->mir_graph->InlineSpecialMethods(bb);
// No need of repeating, so just return false.
return false;
}
void End(PassDataHolder* data) const {
DCHECK(data != nullptr);
- CompilationUnit* cUnit = down_cast<PassMEDataHolder*>(data)->c_unit;
- DCHECK(cUnit != nullptr);
- cUnit->mir_graph->InlineSpecialMethodsEnd();
+ CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph->InlineSpecialMethodsEnd();
}
};
@@ -127,9 +127,10 @@
void Start(PassDataHolder* data) const {
DCHECK(data != nullptr);
- CompilationUnit* cUnit = down_cast<PassMEDataHolder*>(data)->c_unit;
- DCHECK(cUnit != nullptr);
- cUnit->mir_graph->VerifyDataflow();
+ CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph->VerifyDataflow();
+ c_unit->mir_graph->ClearAllVisitedFlags();
}
bool Worker(PassDataHolder* data) const;
@@ -147,26 +148,26 @@
void Start(PassDataHolder* data) const {
DCHECK(data != nullptr);
- CompilationUnit* cUnit = down_cast<PassMEDataHolder*>(data)->c_unit;
- DCHECK(cUnit != nullptr);
- cUnit->mir_graph->EliminateNullChecksAndInferTypesStart();
+ CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph->EliminateNullChecksAndInferTypesStart();
}
bool Worker(PassDataHolder* data) const {
DCHECK(data != nullptr);
PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
- CompilationUnit* cUnit = pass_me_data_holder->c_unit;
- DCHECK(cUnit != nullptr);
+ CompilationUnit* c_unit = pass_me_data_holder->c_unit;
+ DCHECK(c_unit != nullptr);
BasicBlock* bb = pass_me_data_holder->bb;
DCHECK(bb != nullptr);
- return cUnit->mir_graph->EliminateNullChecksAndInferTypes(bb);
+ return c_unit->mir_graph->EliminateNullChecksAndInferTypes(bb);
}
void End(PassDataHolder* data) const {
DCHECK(data != nullptr);
- CompilationUnit* cUnit = down_cast<PassMEDataHolder*>(data)->c_unit;
- DCHECK(cUnit != nullptr);
- cUnit->mir_graph->EliminateNullChecksAndInferTypesEnd();
+ CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph->EliminateNullChecksAndInferTypesEnd();
}
};
@@ -178,26 +179,26 @@
bool Gate(const PassDataHolder* data) const {
DCHECK(data != nullptr);
- CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
- DCHECK(cUnit != nullptr);
- return cUnit->mir_graph->EliminateClassInitChecksGate();
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ return c_unit->mir_graph->EliminateClassInitChecksGate();
}
bool Worker(PassDataHolder* data) const {
DCHECK(data != nullptr);
PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
- CompilationUnit* cUnit = pass_me_data_holder->c_unit;
- DCHECK(cUnit != nullptr);
+ CompilationUnit* c_unit = pass_me_data_holder->c_unit;
+ DCHECK(c_unit != nullptr);
BasicBlock* bb = pass_me_data_holder->bb;
DCHECK(bb != nullptr);
- return cUnit->mir_graph->EliminateClassInitChecks(bb);
+ return c_unit->mir_graph->EliminateClassInitChecks(bb);
}
void End(PassDataHolder* data) const {
DCHECK(data != nullptr);
- CompilationUnit* cUnit = down_cast<PassMEDataHolder*>(data)->c_unit;
- DCHECK(cUnit != nullptr);
- cUnit->mir_graph->EliminateClassInitChecksEnd();
+ CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph->EliminateClassInitChecksEnd();
}
};
@@ -213,26 +214,26 @@
bool Gate(const PassDataHolder* data) const OVERRIDE {
DCHECK(data != nullptr);
- CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
- DCHECK(cUnit != nullptr);
- return cUnit->mir_graph->ApplyGlobalValueNumberingGate();
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ return c_unit->mir_graph->ApplyGlobalValueNumberingGate();
}
bool Worker(PassDataHolder* data) const {
DCHECK(data != nullptr);
PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
- CompilationUnit* cUnit = pass_me_data_holder->c_unit;
- DCHECK(cUnit != nullptr);
+ CompilationUnit* c_unit = pass_me_data_holder->c_unit;
+ DCHECK(c_unit != nullptr);
BasicBlock* bb = pass_me_data_holder->bb;
DCHECK(bb != nullptr);
- return cUnit->mir_graph->ApplyGlobalValueNumbering(bb);
+ return c_unit->mir_graph->ApplyGlobalValueNumbering(bb);
}
void End(PassDataHolder* data) const OVERRIDE {
DCHECK(data != nullptr);
- CompilationUnit* cUnit = down_cast<PassMEDataHolder*>(data)->c_unit;
- DCHECK(cUnit != nullptr);
- cUnit->mir_graph->ApplyGlobalValueNumberingEnd();
+ CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph->ApplyGlobalValueNumberingEnd();
}
};
@@ -247,9 +248,9 @@
bool Gate(const PassDataHolder* data) const {
DCHECK(data != nullptr);
- CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
- DCHECK(cUnit != nullptr);
- return ((cUnit->disable_opt & (1 << kSuppressExceptionEdges)) != 0);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ return ((c_unit->disable_opt & (1 << kSuppressExceptionEdges)) != 0);
}
bool Worker(PassDataHolder* data) const;
@@ -266,9 +267,9 @@
bool Gate(const PassDataHolder* data) const {
DCHECK(data != nullptr);
- CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
- DCHECK(cUnit != nullptr);
- return ((cUnit->disable_opt & (1 << kBBOpt)) == 0);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ return ((c_unit->disable_opt & (1 << kBBOpt)) == 0);
}
void Start(PassDataHolder* data) const;
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 5d7cbed..7ac878f 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -678,26 +678,40 @@
}
}
-/* Try to make common case the fallthrough path */
+/* Try to make common case the fallthrough path. */
bool MIRGraph::LayoutBlocks(BasicBlock* bb) {
- // TODO: For now, just looking for direct throws. Consider generalizing for profile feedback
+ // TODO: For now, just looking for direct throws. Consider generalizing for profile feedback.
if (!bb->explicit_throw) {
return false;
}
+
+ // If we visited it, we are done.
+ if (bb->visited) {
+ return false;
+ }
+ bb->visited = true;
+
BasicBlock* walker = bb;
while (true) {
- // Check termination conditions
+ // Check termination conditions.
if ((walker->block_type == kEntryBlock) || (Predecessors(walker) != 1)) {
break;
}
BasicBlock* prev = GetBasicBlock(walker->predecessors->Get(0));
+
+ // If we visited the predecessor, we are done.
+ if (prev->visited) {
+ return false;
+ }
+ prev->visited = true;
+
if (prev->conditional_branch) {
if (GetBasicBlock(prev->fall_through) == walker) {
- // Already done - return
+ // Already done - return.
break;
}
DCHECK_EQ(walker, GetBasicBlock(prev->taken));
- // Got one. Flip it and exit
+ // Got one. Flip it and exit.
Instruction::Code opcode = prev->last_mir_insn->dalvikInsn.opcode;
switch (opcode) {
case Instruction::IF_EQ: opcode = Instruction::IF_NE; break;
@@ -721,6 +735,10 @@
break;
}
walker = prev;
+
+ if (walker->visited) {
+ break;
+ }
}
return false;
}
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index 06d9dd5..dcec861 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -427,7 +427,7 @@
REG_DEF_LR | NEEDS_FIXUP, "vldr", "!0s, [!1C, #!2E]", 4, kFixupVLoad),
ENCODING_MAP(kThumb2Vldrd, 0xed900b00,
kFmtDfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF |
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF4 |
REG_DEF_LR | NEEDS_FIXUP, "vldr", "!0S, [!1C, #!2E]", 4, kFixupVLoad),
ENCODING_MAP(kThumb2Vmuls, 0xee200a00,
kFmtSfp, 22, 12, kFmtSfp, 7, 16, kFmtSfp, 5, 0,
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 1b3f2a1..01c8f80 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -216,7 +216,7 @@
verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
bool is_range = (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE) ||
(inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE);
- verifier::RegType&
+ const verifier::RegType&
reg_type(line->GetRegisterType(is_range ? inst->VRegC_3rc() : inst->VRegC_35c()));
if (!reg_type.HasClass()) {
@@ -284,18 +284,18 @@
const verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
bool is_safe_cast = false;
if (code == Instruction::CHECK_CAST) {
- verifier::RegType& reg_type(line->GetRegisterType(inst->VRegA_21c()));
- verifier::RegType& cast_type =
+ const verifier::RegType& reg_type(line->GetRegisterType(inst->VRegA_21c()));
+ const verifier::RegType& cast_type =
method_verifier->ResolveCheckedClass(inst->VRegB_21c());
is_safe_cast = cast_type.IsStrictlyAssignableFrom(reg_type);
} else {
- verifier::RegType& array_type(line->GetRegisterType(inst->VRegB_23x()));
+ const verifier::RegType& array_type(line->GetRegisterType(inst->VRegB_23x()));
// We only know its safe to assign to an array if the array type is precise. For example,
// an Object[] can have any type of object stored in it, but it may also be assigned a
// String[] in which case the stores need to be of Strings.
if (array_type.IsPreciseReference()) {
- verifier::RegType& value_type(line->GetRegisterType(inst->VRegA_23x()));
- verifier::RegType& component_type = method_verifier->GetRegTypeCache()
+ const verifier::RegType& value_type(line->GetRegisterType(inst->VRegA_23x()));
+ const verifier::RegType& component_type = method_verifier->GetRegTypeCache()
->GetComponentType(array_type, method_verifier->GetClassLoader());
is_safe_cast = component_type.IsStrictlyAssignableFrom(value_type);
}
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 32a7676..db6a01e 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -981,7 +981,7 @@
ScopedObjectAccess soa(Thread::Current());
mirror::Class* reference_class = mirror::Reference::GetJavaLangRefReference();
- bool is_initialized;
+ bool is_initialized = false;
bool unused_finalizable;
// Make sure we have a finished Reference class object before attempting to use it.
if (!CanEmbedTypeInCode(*reference_class->GetDexCache()->GetDexFile(),
diff --git a/compiler/elf_patcher.cc b/compiler/elf_patcher.cc
index 72bf7d3..f192227 100644
--- a/compiler/elf_patcher.cc
+++ b/compiler/elf_patcher.cc
@@ -200,7 +200,8 @@
mirror::ArtMethod* target = GetTargetMethod(patch);
uintptr_t quick_code = reinterpret_cast<uintptr_t>(class_linker->GetQuickOatCodeFor(target));
DCHECK_NE(quick_code, 0U) << PrettyMethod(target);
- const OatFile* target_oat = class_linker->FindOpenedOatFileForDexFile(*patch->GetTargetDexFile());
+ const OatFile* target_oat =
+ class_linker->FindOpenedOatDexFileForDexFile(*patch->GetTargetDexFile())->GetOatFile();
// Get where the data actually starts. if target is this oat_file_ it is oat_data_start_,
// otherwise it is wherever target_oat is loaded.
uintptr_t oat_data_addr = GetBaseAddressFor(target_oat);
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index e858a7b..80d7b98 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -197,13 +197,13 @@
std::vector<const DexFile*> dex_files;
uint32_t image_file_location_oat_checksum = 0;
uint32_t image_file_location_oat_begin = 0;
- OatHeader* oat_header = OatHeader::Create(instruction_set,
- instruction_set_features,
- &dex_files,
- image_file_location_oat_checksum,
- image_file_location_oat_begin,
- nullptr);
- ASSERT_NE(oat_header, nullptr);
+ std::unique_ptr<OatHeader> oat_header(OatHeader::Create(instruction_set,
+ instruction_set_features,
+ &dex_files,
+ image_file_location_oat_checksum,
+ image_file_location_oat_begin,
+ nullptr));
+ ASSERT_NE(oat_header.get(), nullptr);
ASSERT_TRUE(oat_header->IsValid());
char* magic = const_cast<char*>(oat_header->GetMagic());
diff --git a/compiler/optimizing/find_loops_test.cc b/compiler/optimizing/find_loops_test.cc
index fab9f7a..c36b143 100644
--- a/compiler/optimizing/find_loops_test.cc
+++ b/compiler/optimizing/find_loops_test.cc
@@ -27,9 +27,8 @@
namespace art {
-static HGraph* TestCode(const uint16_t* data, ArenaPool* pool) {
- ArenaAllocator allocator(pool);
- HGraphBuilder builder(&allocator);
+static HGraph* TestCode(const uint16_t* data, ArenaAllocator* allocator) {
+ HGraphBuilder builder(allocator);
const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
HGraph* graph = builder.BuildGraph(*item);
graph->BuildDominatorTree();
@@ -44,7 +43,8 @@
Instruction::RETURN_VOID);
ArenaPool arena;
- HGraph* graph = TestCode(data, &arena);
+ ArenaAllocator allocator(&arena);
+ HGraph* graph = TestCode(data, &allocator);
for (size_t i = 0, e = graph->GetBlocks().Size(); i < e; ++i) {
ASSERT_EQ(graph->GetBlocks().Get(i)->GetLoopInformation(), nullptr);
}
@@ -56,7 +56,8 @@
Instruction::RETURN);
ArenaPool arena;
- HGraph* graph = TestCode(data, &arena);
+ ArenaAllocator allocator(&arena);
+ HGraph* graph = TestCode(data, &allocator);
for (size_t i = 0, e = graph->GetBlocks().Size(); i < e; ++i) {
ASSERT_EQ(graph->GetBlocks().Get(i)->GetLoopInformation(), nullptr);
}
@@ -71,7 +72,8 @@
Instruction::RETURN);
ArenaPool arena;
- HGraph* graph = TestCode(data, &arena);
+ ArenaAllocator allocator(&arena);
+ HGraph* graph = TestCode(data, &allocator);
for (size_t i = 0, e = graph->GetBlocks().Size(); i < e; ++i) {
ASSERT_EQ(graph->GetBlocks().Get(i)->GetLoopInformation(), nullptr);
}
@@ -87,7 +89,8 @@
Instruction::RETURN | 0 << 8);
ArenaPool arena;
- HGraph* graph = TestCode(data, &arena);
+ ArenaAllocator allocator(&arena);
+ HGraph* graph = TestCode(data, &allocator);
for (size_t i = 0, e = graph->GetBlocks().Size(); i < e; ++i) {
ASSERT_EQ(graph->GetBlocks().Get(i)->GetLoopInformation(), nullptr);
}
@@ -101,7 +104,8 @@
Instruction::RETURN | 0 << 8);
ArenaPool arena;
- HGraph* graph = TestCode(data, &arena);
+ ArenaAllocator allocator(&arena);
+ HGraph* graph = TestCode(data, &allocator);
for (size_t i = 0, e = graph->GetBlocks().Size(); i < e; ++i) {
ASSERT_EQ(graph->GetBlocks().Get(i)->GetLoopInformation(), nullptr);
}
@@ -146,7 +150,8 @@
Instruction::RETURN_VOID);
ArenaPool arena;
- HGraph* graph = TestCode(data, &arena);
+ ArenaAllocator allocator(&arena);
+ HGraph* graph = TestCode(data, &allocator);
TestBlock(graph, 0, false, -1); // entry block
TestBlock(graph, 1, false, -1); // pre header
@@ -173,7 +178,8 @@
Instruction::RETURN | 0 << 8);
ArenaPool arena;
- HGraph* graph = TestCode(data, &arena);
+ ArenaAllocator allocator(&arena);
+ HGraph* graph = TestCode(data, &allocator);
TestBlock(graph, 0, false, -1); // entry block
TestBlock(graph, 1, false, -1); // goto block
@@ -197,7 +203,8 @@
Instruction::RETURN | 0 << 8);
ArenaPool arena;
- HGraph* graph = TestCode(data, &arena);
+ ArenaAllocator allocator(&arena);
+ HGraph* graph = TestCode(data, &allocator);
TestBlock(graph, 0, false, -1); // entry block
TestBlock(graph, 1, false, -1); // goto block
@@ -222,7 +229,8 @@
Instruction::RETURN | 0 << 8);
ArenaPool arena;
- HGraph* graph = TestCode(data, &arena);
+ ArenaAllocator allocator(&arena);
+ HGraph* graph = TestCode(data, &allocator);
TestBlock(graph, 0, false, -1); // entry block
TestBlock(graph, 1, false, -1); // pre header
@@ -248,7 +256,8 @@
Instruction::RETURN | 0 << 8);
ArenaPool arena;
- HGraph* graph = TestCode(data, &arena);
+ ArenaAllocator allocator(&arena);
+ HGraph* graph = TestCode(data, &allocator);
TestBlock(graph, 0, false, -1); // entry block
TestBlock(graph, 1, false, -1); // pre header
@@ -271,9 +280,9 @@
Instruction::GOTO | 0xFB00,
Instruction::RETURN | 0 << 8);
-
ArenaPool arena;
- HGraph* graph = TestCode(data, &arena);
+ ArenaAllocator allocator(&arena);
+ HGraph* graph = TestCode(data, &allocator);
TestBlock(graph, 0, false, -1); // entry block
TestBlock(graph, 1, false, -1); // pre header of outer loop
@@ -302,9 +311,9 @@
Instruction::GOTO | 0xFE00, // second loop
Instruction::RETURN | 0 << 8);
-
ArenaPool arena;
- HGraph* graph = TestCode(data, &arena);
+ ArenaAllocator allocator(&arena);
+ HGraph* graph = TestCode(data, &allocator);
TestBlock(graph, 0, false, -1); // entry block
TestBlock(graph, 1, false, -1); // pre header of first loop
@@ -333,7 +342,8 @@
Instruction::RETURN | 0 << 8);
ArenaPool arena;
- HGraph* graph = TestCode(data, &arena);
+ ArenaAllocator allocator(&arena);
+ HGraph* graph = TestCode(data, &allocator);
ASSERT_TRUE(graph->GetBlocks().Get(3)->IsLoopHeader());
HLoopInformation* info = graph->GetBlocks().Get(3)->GetLoopInformation();
ASSERT_FALSE(info->GetHeader()->Dominates(info->GetBackEdges().Get(0)));
@@ -347,7 +357,8 @@
Instruction::RETURN | 0 << 8);
ArenaPool arena;
- HGraph* graph = TestCode(data, &arena);
+ ArenaAllocator allocator(&arena);
+ HGraph* graph = TestCode(data, &allocator);
TestBlock(graph, 0, false, -1); // entry block
TestBlock(graph, 1, false, -1); // pre header of first loop
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index 68b784a..e3045e1 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -92,6 +92,7 @@
// Compute the relocation delta and switch to the new contents area.
ptrdiff_t delta = new_contents - contents_;
+ delete[] contents_;
contents_ = new_contents;
// Update the cursor and recompute the limit.
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 3f9f007..763dafe 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -253,7 +253,7 @@
class X86_64Assembler FINAL : public Assembler {
public:
- X86_64Assembler() {}
+ X86_64Assembler() : cfi_cfa_offset_(0), cfi_pc_(0) {}
virtual ~X86_64Assembler() {}
/*
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 4ed7b20..7a48b63 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -16,6 +16,7 @@
#include "assembler_x86_64.h"
+#include "base/stl_util.h"
#include "utils/assembler_test.h"
namespace art {
@@ -62,6 +63,11 @@
}
}
+ void TearDown() OVERRIDE {
+ AssemblerTest::TearDown();
+ STLDeleteElements(®isters_);
+ }
+
std::vector<x86_64::CpuRegister*> GetRegisters() OVERRIDE {
return registers_;
}
@@ -219,6 +225,7 @@
}
}
+ STLDeleteElements(®isters);
return str.str();
}
diff --git a/runtime/arch/arm/jni_entrypoints_arm.S b/runtime/arch/arm/jni_entrypoints_arm.S
index 1be34ba..0e00f34 100644
--- a/runtime/arch/arm/jni_entrypoints_arm.S
+++ b/runtime/arch/arm/jni_entrypoints_arm.S
@@ -22,11 +22,13 @@
.extern artFindNativeMethod
ENTRY art_jni_dlsym_lookup_stub
push {r0, r1, r2, r3, lr} @ spill regs
- .save {r0, r1, r2, r3, lr}
- .pad #20
.cfi_adjust_cfa_offset 20
+ .cfi_rel_offset r0, 0
+ .cfi_rel_offset r1, 4
+ .cfi_rel_offset r2, 8
+ .cfi_rel_offset r3, 12
+ .cfi_rel_offset lr, 16
sub sp, #12 @ pad stack pointer to align frame
- .pad #12
.cfi_adjust_cfa_offset 12
blx artFindNativeMethod
mov r12, r0 @ save result in r12
@@ -35,9 +37,12 @@
cbz r0, 1f @ is method code null?
pop {r0, r1, r2, r3, lr} @ restore regs
.cfi_adjust_cfa_offset -20
+ .cfi_restore r0
+ .cfi_restore r1
+ .cfi_restore r2
+ .cfi_restore r3
+ .cfi_restore lr
bx r12 @ if non-null, tail call to method's code
1:
- .cfi_adjust_cfa_offset 20
pop {r0, r1, r2, r3, pc} @ restore regs and return to caller to handle exception
- .cfi_adjust_cfa_offset -20
END art_jni_dlsym_lookup_stub
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 1306546..875efbb 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -156,7 +156,7 @@
}
inline mirror::Object* ClassLinker::AllocObject(Thread* self) {
- return GetClassRoot(kJavaLangObject)->Alloc<false, false>(self,
+ return GetClassRoot(kJavaLangObject)->Alloc<true, false>(self,
Runtime::Current()->GetHeap()->GetCurrentAllocator());
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 563ee93..4342234 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -756,18 +756,18 @@
return *oat_file;
}
-const OatFile* ClassLinker::FindOpenedOatFileForDexFile(const DexFile& dex_file) {
+const OatFile::OatDexFile* ClassLinker::FindOpenedOatDexFileForDexFile(const DexFile& dex_file) {
const char* dex_location = dex_file.GetLocation().c_str();
uint32_t dex_location_checksum = dex_file.GetLocationChecksum();
- return FindOpenedOatFile(nullptr, dex_location, &dex_location_checksum);
+ return FindOpenedOatDexFile(nullptr, dex_location, &dex_location_checksum);
}
-const OatFile* ClassLinker::FindOpenedOatFile(const char* oat_location, const char* dex_location,
- const uint32_t* const dex_location_checksum) {
+const OatFile::OatDexFile* ClassLinker::FindOpenedOatDexFile(const char* oat_location,
+ const char* dex_location,
+ const uint32_t* dex_location_checksum) {
ReaderMutexLock mu(Thread::Current(), dex_lock_);
- for (size_t i = 0; i < oat_files_.size(); i++) {
- const OatFile* oat_file = oat_files_[i];
- DCHECK(oat_file != NULL);
+ for (const OatFile* oat_file : oat_files_) {
+ DCHECK(oat_file != nullptr);
if (oat_location != nullptr) {
if (oat_file->GetLocation() != oat_location) {
@@ -778,11 +778,11 @@
const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_location,
dex_location_checksum,
false);
- if (oat_dex_file != NULL) {
- return oat_file;
+ if (oat_dex_file != nullptr) {
+ return oat_dex_file;
}
}
- return NULL;
+ return nullptr;
}
@@ -904,8 +904,10 @@
bool needs_registering = false;
- std::unique_ptr<const OatFile> open_oat_file(FindOpenedOatFile(oat_location, dex_location,
- dex_location_checksum_pointer));
+ const OatFile::OatDexFile* oat_dex_file = FindOpenedOatDexFile(oat_location, dex_location,
+ dex_location_checksum_pointer);
+ std::unique_ptr<const OatFile> open_oat_file(
+ oat_dex_file != nullptr ? oat_dex_file->GetOatFile() : nullptr);
// 2) If we do not have an open one, maybe there's one on disk already.
@@ -1026,7 +1028,8 @@
}
// Try to load again, but stronger checks.
- success = LoadMultiDexFilesFromOatFile(open_oat_file.get(), dex_location, dex_location_checksum_pointer,
+ success = LoadMultiDexFilesFromOatFile(open_oat_file.get(), dex_location,
+ dex_location_checksum_pointer,
true, error_msgs, dex_files);
if (success) {
RegisterOatFile(open_oat_file.release());
@@ -1201,12 +1204,11 @@
if (oat_dex_file == NULL) {
*error_msg = StringPrintf("oat file '%s' does not contain contents for '%s' with checksum 0x%x",
oat_file->GetLocation().c_str(), dex_location, dex_location_checksum);
- std::vector<const OatFile::OatDexFile*> oat_dex_files = oat_file->GetOatDexFiles();
- for (size_t i = 0; i < oat_dex_files.size(); i++) {
- const OatFile::OatDexFile* oat_dex_file = oat_dex_files[i];
- *error_msg += StringPrintf("\noat file '%s' contains contents for '%s'",
+ for (const OatFile::OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) {
+ *error_msg += StringPrintf("\noat file '%s' contains contents for '%s' with checksum 0x%x",
oat_file->GetLocation().c_str(),
- oat_dex_file->GetDexFileLocation().c_str());
+ oat_dex_file->GetDexFileLocation().c_str(),
+ oat_dex_file->GetDexFileLocationChecksum());
}
return false;
}
@@ -1248,7 +1250,7 @@
const OatFile* ClassLinker::FindOatFileContainingDexFileFromDexLocation(
const char* dex_location,
- const uint32_t* const dex_location_checksum,
+ const uint32_t* dex_location_checksum,
InstructionSet isa,
std::vector<std::string>* error_msgs,
bool* obsolete_file_cleanup_failed) {
@@ -2138,7 +2140,7 @@
if (pair.second != nullptr) {
mirror::Class* klass = LookupClass(descriptor, nullptr);
if (klass != nullptr) {
- return klass;
+ return EnsureResolved(self, descriptor, klass);
}
klass = DefineClass(descriptor, NullHandle<mirror::ClassLoader>(), *pair.first,
*pair.second);
@@ -2400,15 +2402,11 @@
OatFile::OatClass ClassLinker::FindOatClass(const DexFile& dex_file, uint16_t class_def_idx,
bool* found) {
DCHECK_NE(class_def_idx, DexFile::kDexNoIndex16);
- const OatFile* oat_file = FindOpenedOatFileForDexFile(dex_file);
- if (oat_file == nullptr) {
+ const OatFile::OatDexFile* oat_dex_file = FindOpenedOatDexFileForDexFile(dex_file);
+ if (oat_dex_file == nullptr) {
*found = false;
return OatFile::OatClass::Invalid();
}
- uint dex_location_checksum = dex_file.GetLocationChecksum();
- const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_file.GetLocation().c_str(),
- &dex_location_checksum);
- CHECK(oat_dex_file != NULL) << dex_file.GetLocation();
*found = true;
return oat_dex_file->GetOatClass(class_def_idx);
}
@@ -3681,17 +3679,13 @@
}
}
- const OatFile* oat_file = FindOpenedOatFileForDexFile(dex_file);
+ const OatFile::OatDexFile* oat_dex_file = FindOpenedOatDexFileForDexFile(dex_file);
// Make this work with gtests, which do not set up the image properly.
// TODO: we should clean up gtests to set up the image path properly.
- if (Runtime::Current()->IsCompiler() || (oat_file == NULL)) {
+ if (Runtime::Current()->IsCompiler() || (oat_dex_file == nullptr)) {
return false;
}
- CHECK(oat_file != NULL) << dex_file.GetLocation() << " " << PrettyClass(klass);
- uint dex_location_checksum = dex_file.GetLocationChecksum();
- const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_file.GetLocation().c_str(),
- &dex_location_checksum);
CHECK(oat_dex_file != NULL) << dex_file.GetLocation() << " " << PrettyClass(klass);
uint16_t class_def_index = klass->GetDexClassDefIndex();
oat_file_class_status = oat_dex_file->GetOatClass(class_def_index).GetStatus();
@@ -4433,7 +4427,6 @@
return false;
}
CreateReferenceInstanceOffsets(klass);
- CreateReferenceStaticOffsets(klass);
CHECK_EQ(mirror::Class::kStatusLoaded, klass->GetStatus());
if (!klass->IsTemp() || (!init_done_ && klass->GetClassSize() == class_size)) {
@@ -5166,20 +5159,13 @@
return;
}
}
- CreateReferenceOffsets(klass, false, reference_offsets);
+ CreateReferenceOffsets(klass, reference_offsets);
}
-void ClassLinker::CreateReferenceStaticOffsets(ConstHandle<mirror::Class> klass) {
- CreateReferenceOffsets(klass, true, 0);
-}
-
-void ClassLinker::CreateReferenceOffsets(ConstHandle<mirror::Class> klass, bool is_static,
+void ClassLinker::CreateReferenceOffsets(ConstHandle<mirror::Class> klass,
uint32_t reference_offsets) {
- size_t num_reference_fields =
- is_static ? klass->NumReferenceStaticFieldsDuringLinking()
- : klass->NumReferenceInstanceFieldsDuringLinking();
- mirror::ObjectArray<mirror::ArtField>* fields =
- is_static ? klass->GetSFields() : klass->GetIFields();
+ size_t num_reference_fields = klass->NumReferenceInstanceFieldsDuringLinking();
+ mirror::ObjectArray<mirror::ArtField>* fields = klass->GetIFields();
// All of the fields that contain object references are guaranteed
// to be at the beginning of the fields list.
for (size_t i = 0; i < num_reference_fields; ++i) {
@@ -5197,12 +5183,7 @@
break;
}
}
- // Update fields in klass
- if (is_static) {
- klass->SetReferenceStaticOffsets(reference_offsets);
- } else {
- klass->SetReferenceInstanceOffsets(reference_offsets);
- }
+ klass->SetReferenceInstanceOffsets(reference_offsets);
}
mirror::String* ClassLinker::ResolveString(const DexFile& dex_file, uint32_t string_idx,
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index d1f5aa0..158816d 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -533,10 +533,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CreateReferenceInstanceOffsets(ConstHandle<mirror::Class> klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CreateReferenceStaticOffsets(ConstHandle<mirror::Class> klass)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CreateReferenceOffsets(ConstHandle<mirror::Class> klass, bool is_static,
- uint32_t reference_offsets)
+ void CreateReferenceOffsets(ConstHandle<mirror::Class> klass, uint32_t reference_offsets)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// For use by ImageWriter to find DexCaches for its roots
@@ -549,14 +546,15 @@
}
mirror::DexCache* GetDexCache(size_t idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, dex_lock_);
- const OatFile* FindOpenedOatFileForDexFile(const DexFile& dex_file)
+ const OatFile::OatDexFile* FindOpenedOatDexFileForDexFile(const DexFile& dex_file)
LOCKS_EXCLUDED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Find an opened oat file that contains dex_location. If oat_location is not nullptr, the file
- // must have that location, else any oat location is accepted.
- const OatFile* FindOpenedOatFile(const char* oat_location, const char* dex_location,
- const uint32_t* const dex_location_checksum)
+ // Find an opened oat dex file that contains dex_location. If oat_location is not nullptr,
+ // the file must have that location, else any oat location is accepted.
+ const OatFile::OatDexFile* FindOpenedOatDexFile(const char* oat_location,
+ const char* dex_location,
+ const uint32_t* dex_location_checksum)
LOCKS_EXCLUDED(dex_lock_);
// Will open the oat file directly without relocating, even if we could/should do relocation.
@@ -606,8 +604,8 @@
// Note 1: this will not check open oat files, which are assumed to be stale when this is run.
// Note 2: Does not register the oat file. It is the caller's job to register if the file is to
// be kept.
- const OatFile* FindOatFileContainingDexFileFromDexLocation(const char* location,
- const uint32_t* const location_checksum,
+ const OatFile* FindOatFileContainingDexFileFromDexLocation(const char* dex_location,
+ const uint32_t* dex_location_checksum,
InstructionSet isa,
std::vector<std::string>* error_msgs,
bool* obsolete_file_cleanup_failed)
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 69c281e..b250918 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -528,7 +528,6 @@
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, object_size_), "objectSize"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, primitive_type_), "primitiveType"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, reference_instance_offsets_), "referenceInstanceOffsets"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, reference_static_offsets_), "referenceStaticOffsets"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, status_), "status"));
};
};
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index ed3592c..8656bd3 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -77,14 +77,13 @@
// Strip ":...", which is the location
const char* zip_entry_name = kClassesDex;
const char* file_part = filename;
- std::unique_ptr<const char> file_part_ptr;
+ std::string file_part_storage;
-
- if (IsMultiDexLocation(filename)) {
- std::pair<const char*, const char*> pair = SplitMultiDexLocation(filename);
- file_part_ptr.reset(pair.first);
- file_part = pair.first;
- zip_entry_name = pair.second;
+ if (DexFile::IsMultiDexLocation(filename)) {
+ file_part_storage = GetBaseLocation(filename);
+ file_part = file_part_storage.c_str();
+ zip_entry_name = filename + file_part_storage.size() + 1;
+ DCHECK_EQ(zip_entry_name[-1], kMultiDexSeparator);
}
ScopedFd fd(OpenAndReadMagic(file_part, &magic, error_msg));
@@ -206,19 +205,20 @@
const Header* dex_header = reinterpret_cast<const Header*>(map->Begin());
- const DexFile* dex_file = OpenMemory(location, dex_header->checksum_, map.release(), error_msg);
- if (dex_file == nullptr) {
+ std::unique_ptr<const DexFile> dex_file(OpenMemory(location, dex_header->checksum_, map.release(),
+ error_msg));
+ if (dex_file.get() == nullptr) {
*error_msg = StringPrintf("Failed to open dex file '%s' from memory: %s", location,
error_msg->c_str());
return nullptr;
}
- if (verify && !DexFileVerifier::Verify(dex_file, dex_file->Begin(), dex_file->Size(), location,
- error_msg)) {
+ if (verify && !DexFileVerifier::Verify(dex_file.get(), dex_file->Begin(), dex_file->Size(),
+ location, error_msg)) {
return nullptr;
}
- return dex_file;
+ return dex_file.release();
}
const char* DexFile::kClassesDex = "classes.dex";
@@ -303,7 +303,7 @@
while (i < 100) {
std::string name = StringPrintf("classes%zu.dex", i);
- std::string fake_location = location + ":" + name;
+ std::string fake_location = location + kMultiDexSeparator + name;
std::unique_ptr<const DexFile> next_dex_file(Open(zip_archive, name.c_str(), fake_location,
error_msg, &error_code));
if (next_dex_file.get() == nullptr) {
@@ -951,21 +951,6 @@
return strrchr(location, kMultiDexSeparator) != nullptr;
}
-std::pair<const char*, const char*> DexFile::SplitMultiDexLocation(
- const char* location) {
- const char* colon_ptr = strrchr(location, kMultiDexSeparator);
-
- // Check it's synthetic.
- CHECK_NE(colon_ptr, static_cast<const char*>(nullptr));
-
- size_t colon_index = colon_ptr - location;
- char* tmp = new char[colon_index + 1];
- strncpy(tmp, location, colon_index);
- tmp[colon_index] = 0;
-
- return std::make_pair(tmp, colon_ptr + 1);
-}
-
std::string DexFile::GetMultiDexClassesDexName(size_t number, const char* dex_location) {
if (number == 0) {
return dex_location;
@@ -976,26 +961,17 @@
std::string DexFile::GetDexCanonicalLocation(const char* dex_location) {
CHECK_NE(dex_location, static_cast<const char*>(nullptr));
- char* path = nullptr;
- if (!IsMultiDexLocation(dex_location)) {
- path = realpath(dex_location, nullptr);
+ std::string base_location = GetBaseLocation(dex_location);
+ const char* suffix = dex_location + base_location.size();
+ DCHECK(suffix[0] == 0 || suffix[0] == kMultiDexSeparator);
+ UniqueCPtr<const char[]> path(realpath(base_location.c_str(), nullptr));
+ if (path != nullptr && path.get() != base_location) {
+ return std::string(path.get()) + suffix;
+ } else if (suffix[0] == 0) {
+ return base_location;
} else {
- std::pair<const char*, const char*> pair = DexFile::SplitMultiDexLocation(dex_location);
- const char* dex_real_location(realpath(pair.first, nullptr));
- delete pair.first;
- if (dex_real_location != nullptr) {
- int length = strlen(dex_real_location) + strlen(pair.second) + strlen(kMultiDexSeparatorString) + 1;
- char* multidex_canonical_location = reinterpret_cast<char*>(malloc(sizeof(char) * length));
- snprintf(multidex_canonical_location, length, "%s" kMultiDexSeparatorString "%s", dex_real_location, pair.second);
- free(const_cast<char*>(dex_real_location));
- path = multidex_canonical_location;
- }
+ return dex_location;
}
-
- // If realpath fails then we just copy the argument.
- std::string result(path == nullptr ? dex_location : path);
- free(path);
- return result;
}
std::ostream& operator<<(std::ostream& os, const DexFile& dex_file) {
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 118bd80..1b46a12 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -389,14 +389,21 @@
// For normal dex files, location and base location coincide. If a dex file is part of a multidex
// archive, the base location is the name of the originating jar/apk, stripped of any internal
// classes*.dex path.
- const std::string GetBaseLocation() const {
- if (IsMultiDexLocation(location_.c_str())) {
- std::pair<const char*, const char*> pair = SplitMultiDexLocation(location_.c_str());
- std::string res(pair.first);
- delete[] pair.first;
- return res;
+ static std::string GetBaseLocation(const char* location) {
+ const char* pos = strrchr(location, kMultiDexSeparator);
+ if (pos == nullptr) {
+ return location;
} else {
+ return std::string(location, pos - location);
+ }
+ }
+
+ std::string GetBaseLocation() const {
+ size_t pos = location_.rfind(kMultiDexSeparator);
+ if (pos == std::string::npos) {
return location_;
+ } else {
+ return location_.substr(0, pos);
}
}
@@ -918,13 +925,6 @@
// whether the string contains the separator character.
static bool IsMultiDexLocation(const char* location);
- // Splits a multidex location at the last separator character. The second component is a pointer
- // to the character after the separator. The first is a copy of the substring up to the separator.
- //
- // Note: It's the caller's job to free the first component of the returned pair.
- // Bug 15313523: gcc/libc++ don't allow a unique_ptr for the first component
- static std::pair<const char*, const char*> SplitMultiDexLocation(const char* location);
-
// The base address of the memory mapping.
const byte* const begin_;
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 330d045..d0c5603 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -355,8 +355,8 @@
TEST_F(DexFileTest, GetDexCanonicalLocation) {
ScratchFile file;
- char* dex_location_real = realpath(file.GetFilename().c_str(), nullptr);
- std::string dex_location(dex_location_real);
+ UniqueCPtr<const char[]> dex_location_real(realpath(file.GetFilename().c_str(), nullptr));
+ std::string dex_location(dex_location_real.get());
ASSERT_EQ(dex_location, DexFile::GetDexCanonicalLocation(dex_location.c_str()));
std::string multidex_location = DexFile::GetMultiDexClassesDexName(1, dex_location.c_str());
@@ -371,8 +371,6 @@
ASSERT_EQ(multidex_location, DexFile::GetDexCanonicalLocation(multidex_location_sym.c_str()));
ASSERT_EQ(0, unlink(dex_location_sym.c_str()));
-
- free(dex_location_real);
}
} // namespace art
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 47696f9..fede2f8 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -19,6 +19,7 @@
#include <setjmp.h>
#include <sys/mman.h>
#include <sys/ucontext.h>
+#include "base/stl_util.h"
#include "mirror/art_method.h"
#include "mirror/class.h"
#include "sigchain.h"
@@ -115,13 +116,23 @@
initialized_ = true;
}
-void FaultManager::Shutdown() {
+void FaultManager::Release() {
if (initialized_) {
UnclaimSignalChain(SIGSEGV);
initialized_ = false;
}
}
+void FaultManager::Shutdown() {
+ if (initialized_) {
+ Release();
+
+ // Free all handlers.
+ STLDeleteElements(&generated_code_handlers_);
+ STLDeleteElements(&other_handlers_);
+ }
+}
+
void FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
// BE CAREFUL ALLOCATING HERE INCLUDING USING LOG(...)
//
@@ -156,9 +167,9 @@
// Now set up the nested signal handler.
- // Shutdown the fault manager so that it will remove the signal chain for
+ // Release the fault manager so that it will remove the signal chain for
// SIGSEGV and we call the real sigaction.
- fault_manager.Shutdown();
+ fault_manager.Release();
// The action for SIGSEGV should be the default handler now.
@@ -226,6 +237,7 @@
}
void FaultManager::AddHandler(FaultHandler* handler, bool generated_code) {
+ DCHECK(initialized_);
if (generated_code) {
generated_code_handlers_.push_back(handler);
} else {
diff --git a/runtime/fault_handler.h b/runtime/fault_handler.h
index bb26780..8b66a6f 100644
--- a/runtime/fault_handler.h
+++ b/runtime/fault_handler.h
@@ -39,10 +39,17 @@
~FaultManager();
void Init();
+
+ // Unclaim signals.
+ void Release();
+
+ // Unclaim signals and delete registered handlers.
void Shutdown();
void HandleFault(int sig, siginfo_t* info, void* context);
void HandleNestedSignal(int sig, siginfo_t* info, void* context);
+
+ // Added handlers are owned by the fault handler and will be freed on Shutdown().
void AddHandler(FaultHandler* handler, bool generated_code);
void RemoveHandler(FaultHandler* handler);
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index 9bf3ea2..9ee6d89 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -27,15 +27,6 @@
class Object;
} // namespace mirror
-inline void IrtIterator::SkipNullsAndTombstones() {
- // We skip NULLs and tombstones. Clients don't want to see implementation details.
- while (i_ < capacity_ &&
- (table_[i_].IsNull() ||
- Runtime::Current()->IsClearedJniWeakGlobal(table_[i_].Read<kWithoutReadBarrier>()))) {
- ++i_;
- }
-}
-
// Verifies that the indirect table lookup is valid.
// Returns "false" if something looks bad.
inline bool IndirectReferenceTable::GetChecked(IndirectRef iref) const {
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 49bffa4..2278408 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -161,10 +161,12 @@
}
void IndirectReferenceTable::AssertEmpty() {
- if (UNLIKELY(begin() != end())) {
- ScopedObjectAccess soa(Thread::Current());
- LOG(FATAL) << "Internal Error: non-empty local reference table\n"
- << MutatorLockedDumpable<IndirectReferenceTable>(*this);
+ for (size_t i = 0; i < Capacity(); ++i) {
+ if (!table_[i].IsNull()) {
+ ScopedObjectAccess soa(Thread::Current());
+ LOG(FATAL) << "Internal Error: non-empty local reference table\n"
+ << MutatorLockedDumpable<IndirectReferenceTable>(*this);
+ }
}
}
@@ -264,6 +266,11 @@
void IndirectReferenceTable::VisitRoots(RootCallback* callback, void* arg, uint32_t tid,
RootType root_type) {
for (auto ref : *this) {
+ if (*ref == nullptr) {
+ // Need to skip null entries to make it possible to do the
+ // non-null check after the call back.
+ continue;
+ }
callback(ref, arg, tid, root_type);
DCHECK(*ref != nullptr);
}
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 562ba1e..5291e50 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -206,12 +206,10 @@
explicit IrtIterator(GcRoot<mirror::Object>* table, size_t i, size_t capacity)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: table_(table), i_(i), capacity_(capacity) {
- SkipNullsAndTombstones();
}
IrtIterator& operator++() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
++i_;
- SkipNullsAndTombstones();
return *this;
}
@@ -225,8 +223,6 @@
}
private:
- void SkipNullsAndTombstones() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
GcRoot<mirror::Object>* const table_;
size_t i_;
const size_t capacity_;
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 424addb..0ac5b88 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -760,6 +760,11 @@
for (mirror::Object** entry : weak_globals_) {
// Since this is called by the GC, we don't need a read barrier.
mirror::Object* obj = *entry;
+ if (obj == nullptr) {
+ // Need to skip null here to distinguish between null entries
+ // and cleared weak ref entries.
+ continue;
+ }
mirror::Object* new_obj = callback(obj, arg);
if (new_obj == nullptr) {
new_obj = Runtime::Current()->GetClearedJniWeakGlobal();
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index 69f618c..e54d0e0 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -18,6 +18,8 @@
#include <memory>
+#include <valgrind.h>
+
#include "gtest/gtest.h"
namespace art {
@@ -198,17 +200,20 @@
#endif
TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
- uintptr_t start_addr = ART_BASE_ADDRESS + 0x1000000;
- std::string error_msg;
- std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
- reinterpret_cast<byte*>(start_addr),
- 0x21000000,
- PROT_READ | PROT_WRITE,
- true,
- &error_msg));
- ASSERT_TRUE(map.get() != nullptr) << error_msg;
- ASSERT_TRUE(error_msg.empty());
- ASSERT_EQ(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), start_addr);
+ // This test may not work under valgrind.
+ if (RUNNING_ON_VALGRIND == 0) {
+ uintptr_t start_addr = ART_BASE_ADDRESS + 0x1000000;
+ std::string error_msg;
+ std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
+ reinterpret_cast<byte*>(start_addr),
+ 0x21000000,
+ PROT_READ | PROT_WRITE,
+ true,
+ &error_msg));
+ ASSERT_TRUE(map.get() != nullptr) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
+ ASSERT_EQ(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), start_addr);
+ }
}
TEST_F(MemMapTest, MapAnonymousOverflow) {
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 760d54c..e7d8163 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -292,18 +292,6 @@
new_reference_offsets);
}
-void Class::SetReferenceStaticOffsets(uint32_t new_reference_offsets) {
- if (new_reference_offsets != CLASS_WALK_SUPER) {
- // Sanity check that the number of bits set in the reference offset bitmap
- // agrees with the number of references
- CHECK_EQ((size_t)POPCOUNT(new_reference_offsets),
- NumReferenceStaticFieldsDuringLinking());
- }
- // Not called within a transaction.
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, reference_static_offsets_),
- new_reference_offsets);
-}
-
bool Class::IsInSamePackage(const StringPiece& descriptor1, const StringPiece& descriptor2) {
size_t i = 0;
while (descriptor1[i] != '\0' && descriptor1[i] == descriptor2[i]) {
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 0d30bc6..cf9501a 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -871,14 +871,6 @@
// TODO: uint16_t
void SetStaticField(uint32_t i, ArtField* f) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- uint32_t GetReferenceStaticOffsets() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, reference_static_offsets_));
- }
-
- void SetReferenceStaticOffsets(uint32_t new_reference_offsets)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Find a static or instance field using the JLS resolution order
static ArtField* FindField(Thread* self, Handle<Class> klass, const StringPiece& name,
const StringPiece& type)
@@ -1150,9 +1142,6 @@
// Bitmap of offsets of ifields.
uint32_t reference_instance_offsets_;
- // Bitmap of offsets of sfields.
- uint32_t reference_static_offsets_;
-
// State of class initialization.
Status status_;
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 8c1dc7d..166ea9c 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -888,9 +888,9 @@
template<bool kVisitClass, bool kIsStatic, typename Visitor>
inline void Object::VisitFieldsReferences(uint32_t ref_offsets, const Visitor& visitor) {
- if (LIKELY(ref_offsets != CLASS_WALK_SUPER)) {
+ if (!kIsStatic && LIKELY(ref_offsets != CLASS_WALK_SUPER)) {
if (!kVisitClass) {
- // Mask out the class from the reference offsets.
+ // Mask out the class from the reference offsets.
ref_offsets ^= kWordHighBitMask;
}
DCHECK_EQ(ClassOffset().Uint32Value(), 0U);
@@ -902,7 +902,7 @@
ref_offsets &= ~(CLASS_HIGH_BIT >> right_shift);
}
} else {
- // There is no reference offset bitmap. In the non-static case, walk up the class
+ // There is no reference offset bitmap. In the non-static case, walk up the class
// inheritance hierarchy and find reference offsets the hard way. In the static case, just
// consider this class.
for (mirror::Class* klass = kIsStatic ? AsClass() : GetClass(); klass != nullptr;
@@ -930,8 +930,7 @@
template<bool kVisitClass, typename Visitor>
inline void Object::VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor) {
DCHECK(!klass->IsTemp());
- klass->VisitFieldsReferences<kVisitClass, true>(
- klass->GetReferenceStaticOffsets<kVerifyNone>(), visitor);
+ klass->VisitFieldsReferences<kVisitClass, true>(0, visitor);
}
template <const bool kVisitClass, VerifyObjectFlags kVerifyFlags, typename Visitor,
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index d179a96..cf1c6e1 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -140,7 +140,7 @@
}
OatFile::~OatFile() {
- STLDeleteValues(&oat_dex_files_);
+ STLDeleteElements(&oat_dex_files_storage_);
if (dlopen_handle_ != NULL) {
dlclose(dlopen_handle_);
}
@@ -238,7 +238,9 @@
return false;
}
- for (size_t i = 0; i < GetOatHeader().GetDexFileCount(); i++) {
+ uint32_t dex_file_count = GetOatHeader().GetDexFileCount();
+ oat_dex_files_storage_.reserve(dex_file_count);
+ for (size_t i = 0; i < dex_file_count; i++) {
uint32_t dex_file_location_size = *reinterpret_cast<const uint32_t*>(oat);
if (UNLIKELY(dex_file_location_size == 0U)) {
*error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd with empty location name",
@@ -315,14 +317,24 @@
return false;
}
- // Create the OatDexFile and add it to the owning map indexed by the dex file location.
+ std::string canonical_location = DexFile::GetDexCanonicalLocation(dex_file_location.c_str());
+
+ // Create the OatDexFile and add it to the owning container.
OatDexFile* oat_dex_file = new OatDexFile(this,
dex_file_location,
+ canonical_location,
dex_file_checksum,
dex_file_pointer,
methods_offsets_pointer);
+ oat_dex_files_storage_.push_back(oat_dex_file);
+
+ // Add the location and canonical location (if different) to the oat_dex_files_ table.
StringPiece key(oat_dex_file->GetDexFileLocation());
oat_dex_files_.Put(key, oat_dex_file);
+ if (canonical_location != dex_file_location) {
+ StringPiece canonical_key(oat_dex_file->GetCanonicalDexFileLocation());
+ oat_dex_files_.Put(canonical_key, oat_dex_file);
+ }
}
return true;
}
@@ -370,20 +382,13 @@
oat_dex_file = secondary_lb->second; // May be nullptr.
} else {
// We haven't seen this dex_location before, we must check the canonical location.
- if (UNLIKELY(oat_dex_files_by_canonical_location_.empty())) {
- // Lazily fill in the oat_dex_files_by_canonical_location_.
- for (const auto& entry : oat_dex_files_) {
- const std::string& dex_location = entry.second->GetDexFileLocation();
- string_cache_.emplace_back(DexFile::GetDexCanonicalLocation(dex_location.c_str()));
- StringPiece canonical_location_key(string_cache_.back());
- oat_dex_files_by_canonical_location_.Put(canonical_location_key, entry.second);
- }
- }
std::string dex_canonical_location = DexFile::GetDexCanonicalLocation(dex_location);
- StringPiece canonical_key(dex_canonical_location);
- auto canonical_it = oat_dex_files_by_canonical_location_.find(canonical_key);
- if (canonical_it != oat_dex_files_by_canonical_location_.end()) {
- oat_dex_file = canonical_it->second;
+ if (dex_canonical_location != dex_location) {
+ StringPiece canonical_key(dex_canonical_location);
+ auto canonical_it = oat_dex_files_.find(canonical_key);
+ if (canonical_it != oat_dex_files_.end()) {
+ oat_dex_file = canonical_it->second;
+ } // else keep nullptr.
} // else keep nullptr.
// Copy the key to the string_cache_ and store the result in secondary map.
@@ -408,11 +413,11 @@
<< " ( canonical path " << dex_canonical_location << ")"
<< " with checksum " << checksum << " in OatFile " << GetLocation();
if (kIsDebugBuild) {
- for (Table::const_iterator it = oat_dex_files_.begin(); it != oat_dex_files_.end(); ++it) {
+ for (const OatDexFile* odf : oat_dex_files_storage_) {
LOG(WARNING) << "OatFile " << GetLocation()
- << " contains OatDexFile " << it->second->GetDexFileLocation()
- << " (canonical path " << it->first << ")"
- << " with checksum 0x" << std::hex << it->second->GetDexFileLocationChecksum();
+ << " contains OatDexFile " << odf->GetDexFileLocation()
+ << " (canonical path " << odf->GetCanonicalDexFileLocation() << ")"
+ << " with checksum 0x" << std::hex << odf->GetDexFileLocationChecksum();
}
}
}
@@ -420,21 +425,15 @@
return NULL;
}
-std::vector<const OatFile::OatDexFile*> OatFile::GetOatDexFiles() const {
- std::vector<const OatFile::OatDexFile*> result;
- for (Table::const_iterator it = oat_dex_files_.begin(); it != oat_dex_files_.end(); ++it) {
- result.push_back(it->second);
- }
- return result;
-}
-
OatFile::OatDexFile::OatDexFile(const OatFile* oat_file,
const std::string& dex_file_location,
+ const std::string& canonical_dex_file_location,
uint32_t dex_file_location_checksum,
const byte* dex_file_pointer,
const uint32_t* oat_class_offsets_pointer)
: oat_file_(oat_file),
dex_file_location_(dex_file_location),
+ canonical_dex_file_location_(canonical_dex_file_location),
dex_file_location_checksum_(dex_file_location_checksum),
dex_file_pointer_(dex_file_pointer),
oat_class_offsets_pointer_(oat_class_offsets_pointer) {}
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 8cb47e2..2fd4f4c 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -214,6 +214,10 @@
// Opens the DexFile referred to by this OatDexFile from within the containing OatFile.
const DexFile* OpenDexFile(std::string* error_msg) const;
+ const OatFile* GetOatFile() const {
+ return oat_file_;
+ }
+
// Returns the size of the DexFile refered to by this OatDexFile.
size_t FileSize() const;
@@ -222,6 +226,11 @@
return dex_file_location_;
}
+ // Returns the canonical location of DexFile that was the source of this OatDexFile.
+ const std::string& GetCanonicalDexFileLocation() const {
+ return canonical_dex_file_location_;
+ }
+
// Returns checksum of original DexFile that was the source of this OatDexFile;
uint32_t GetDexFileLocationChecksum() const {
return dex_file_location_checksum_;
@@ -235,12 +244,14 @@
private:
OatDexFile(const OatFile* oat_file,
const std::string& dex_file_location,
+ const std::string& canonical_dex_file_location,
uint32_t dex_file_checksum,
const byte* dex_file_pointer,
const uint32_t* oat_class_offsets_pointer);
const OatFile* const oat_file_;
const std::string dex_file_location_;
+ const std::string canonical_dex_file_location_;
const uint32_t dex_file_location_checksum_;
const byte* const dex_file_pointer_;
const uint32_t* const oat_class_offsets_pointer_;
@@ -254,7 +265,9 @@
bool exception_if_not_found = true) const
LOCKS_EXCLUDED(secondary_lookup_lock_);
- std::vector<const OatDexFile*> GetOatDexFiles() const;
+ const std::vector<const OatDexFile*>& GetOatDexFiles() const {
+ return oat_dex_files_storage_;
+ }
size_t Size() const {
return End() - Begin();
@@ -307,6 +320,9 @@
// dlopen handle during runtime.
void* dlopen_handle_;
+ // Owning storage for the OatDexFile objects.
+ std::vector<const OatDexFile*> oat_dex_files_storage_;
+
// NOTE: We use a StringPiece as the key type to avoid a memory allocation on every
// lookup with a const char* key. The StringPiece doesn't own its backing storage,
// therefore we're using the OatDexFile::dex_file_location_ as the backing storage
@@ -314,11 +330,11 @@
// of keys in secondary_oat_dex_files_ and oat_dex_files_by_canonical_location_.
typedef AllocationTrackingSafeMap<StringPiece, const OatDexFile*, kAllocatorTagOatFile> Table;
- // Map each plain dex file location retrieved from the oat file to its OatDexFile.
- // This map doesn't change after it's constructed in Setup() and therefore doesn't
- // need any locking and provides the cheapest dex file lookup for GetOatDexFile()
- // for a very frequent use case. Never contains a nullptr value.
- Table oat_dex_files_; // Owns the OatDexFile* values.
+ // Map each location and canonical location (if different) retrieved from the
+ // oat file to its OatDexFile. This map doesn't change after it's constructed in Setup()
+ // and therefore doesn't need any locking and provides the cheapest dex file lookup
+ // for GetOatDexFile() for a very frequent use case. Never contains a nullptr value.
+ Table oat_dex_files_;
// Lock guarding all members needed for secondary lookup in GetOatDexFile().
mutable Mutex secondary_lookup_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
@@ -329,10 +345,6 @@
// location and use oat_dex_files_by_canonical_location_.
mutable Table secondary_oat_dex_files_ GUARDED_BY(secondary_lookup_lock_);
- // Map the canonical location to an OatDexFile. This lazily constructed map is used
- // when we're doing the secondary lookup for a given location for the first time.
- mutable Table oat_dex_files_by_canonical_location_ GUARDED_BY(secondary_lookup_lock_);
-
// Cache of strings. Contains the backing storage for keys in the secondary_oat_dex_files_
// and the lazily initialized oat_dex_files_by_canonical_location_.
// NOTE: We're keeping references to contained strings in form of StringPiece and adding
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index 8b632b2..fe05073 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -29,9 +29,7 @@
inline mirror::Object* Runtime::GetClearedJniWeakGlobal() {
mirror::Object* obj = sentinel_.Read();
- if (obj == nullptr) {
- LOG(ERROR) << "Failed to return cleared JNI weak global sentinel";
- }
+ DCHECK(obj != nullptr);
return obj;
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 89ad505..474b72d 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -139,9 +139,6 @@
system_class_loader_(nullptr),
dump_gc_performance_on_shutdown_(false),
preinitialization_transaction_(nullptr),
- null_pointer_handler_(nullptr),
- suspend_handler_(nullptr),
- stack_overflow_handler_(nullptr),
verify_(false),
target_sdk_version_(0),
implicit_null_checks_(false),
@@ -199,10 +196,6 @@
// TODO: acquire a static mutex on Runtime to avoid racing.
CHECK(instance_ == nullptr || instance_ == this);
instance_ = nullptr;
-
- delete null_pointer_handler_;
- delete suspend_handler_;
- delete stack_overflow_handler_;
}
struct AbortState {
@@ -599,9 +592,7 @@
return false;
}
- std::vector<const OatFile::OatDexFile*> oat_dex_files = oat_file->GetOatDexFiles();
- for (size_t i = 0; i < oat_dex_files.size(); i++) {
- const OatFile::OatDexFile* oat_dex_file = oat_dex_files[i];
+ for (const OatFile::OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) {
if (oat_dex_file == nullptr) {
*failures += 1;
continue;
@@ -733,7 +724,8 @@
case kArm64:
case kX86_64:
implicit_null_checks_ = true;
- implicit_so_checks_ = true;
+ // Installing stack protection does not play well with valgrind.
+ implicit_so_checks_ = (RUNNING_ON_VALGRIND == 0);
break;
default:
// Keep the defaults.
@@ -745,16 +737,19 @@
// These need to be in a specific order. The null point check handler must be
// after the suspend check and stack overflow check handlers.
+ //
+ // Note: the instances attach themselves to the fault manager and are handled by it. The manager
+ // will delete the instance on Shutdown().
if (implicit_suspend_checks_) {
- suspend_handler_ = new SuspensionHandler(&fault_manager);
+ new SuspensionHandler(&fault_manager);
}
if (implicit_so_checks_) {
- stack_overflow_handler_ = new StackOverflowHandler(&fault_manager);
+ new StackOverflowHandler(&fault_manager);
}
if (implicit_null_checks_) {
- null_pointer_handler_ = new NullPointerHandler(&fault_manager);
+ new NullPointerHandler(&fault_manager);
}
if (kEnableJavaStackTraceHandler) {
@@ -809,6 +804,7 @@
// Initialize the special sentinel_ value early.
sentinel_ = GcRoot<mirror::Object>(class_linker_->AllocObject(self));
+ CHECK(sentinel_.Read() != nullptr);
verifier::MethodVerifier::Init();
diff --git a/runtime/runtime.h b/runtime/runtime.h
index a0993ca..9df1453 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -463,16 +463,8 @@
void AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* arg_vector) const;
- bool ExplicitNullChecks() const {
- return null_pointer_handler_ == nullptr;
- }
-
- bool ExplicitSuspendChecks() const {
- return suspend_handler_ == nullptr;
- }
-
bool ExplicitStackOverflowChecks() const {
- return stack_overflow_handler_ == nullptr;
+ return !implicit_so_checks_;
}
bool IsVerificationEnabled() const {
@@ -636,9 +628,6 @@
// Transaction used for pre-initializing classes at compilation time.
Transaction* preinitialization_transaction_;
- NullPointerHandler* null_pointer_handler_;
- SuspensionHandler* suspend_handler_;
- StackOverflowHandler* stack_overflow_handler_;
// If false, verification is disabled. True by default.
bool verify_;
diff --git a/runtime/utils.h b/runtime/utils.h
index 7fb5bbd..50462b1 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -20,6 +20,7 @@
#include <pthread.h>
#include <limits>
+#include <memory>
#include <string>
#include <vector>
@@ -502,6 +503,18 @@
void EncodeUnsignedLeb128(uint32_t data, std::vector<uint8_t>* buf);
void EncodeSignedLeb128(int32_t data, std::vector<uint8_t>* buf);
+// Deleter using free() for use with std::unique_ptr<>. See also UniqueCPtr<> below.
+struct FreeDelete {
+ // NOTE: Deleting a const object is valid but free() takes a non-const pointer.
+ void operator()(const void* ptr) const {
+ free(const_cast<void*>(ptr));
+ }
+};
+
+// Alias for std::unique_ptr<> that uses the C function free() to delete objects.
+template <typename T>
+using UniqueCPtr = std::unique_ptr<T, FreeDelete>;
+
} // namespace art
#endif // ART_RUNTIME_UTILS_H_
diff --git a/runtime/verifier/method_verifier-inl.h b/runtime/verifier/method_verifier-inl.h
index 0f9aeb5..2d9fd53 100644
--- a/runtime/verifier/method_verifier-inl.h
+++ b/runtime/verifier/method_verifier-inl.h
@@ -66,9 +66,9 @@
return !failure_messages_.empty();
}
-inline RegType& MethodVerifier::ResolveCheckedClass(uint32_t class_idx) {
+inline const RegType& MethodVerifier::ResolveCheckedClass(uint32_t class_idx) {
DCHECK(!HasFailures());
- RegType& result = ResolveClassAndCheckAccess(class_idx);
+ const RegType& result = ResolveClassAndCheckAccess(class_idx);
DCHECK(!HasFailures());
return result;
}
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index c21a7a4..9cde8da 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -1187,7 +1187,7 @@
// If this is a constructor for a class other than java.lang.Object, mark the first ("this")
// argument as uninitialized. This restricts field access until the superclass constructor is
// called.
- RegType& declaring_class = GetDeclaringClass();
+ const RegType& declaring_class = GetDeclaringClass();
if (IsConstructor() && !declaring_class.IsJavaLangObject()) {
reg_line->SetRegisterType(arg_start + cur_arg,
reg_types_.UninitializedThisArgument(declaring_class));
@@ -1219,7 +1219,7 @@
// it's effectively considered initialized the instant we reach here (in the sense that we
// can return without doing anything or call virtual methods).
{
- RegType& reg_type = ResolveClassAndCheckAccess(iterator.GetTypeIdx());
+ const RegType& reg_type = ResolveClassAndCheckAccess(iterator.GetTypeIdx());
if (!reg_type.IsNonZeroReferenceTypes()) {
DCHECK(HasFailures());
return false;
@@ -1253,8 +1253,8 @@
return false;
}
- RegType& lo_half = descriptor[0] == 'J' ? reg_types_.LongLo() : reg_types_.DoubleLo();
- RegType& hi_half = descriptor[0] == 'J' ? reg_types_.LongHi() : reg_types_.DoubleHi();
+ const RegType& lo_half = descriptor[0] == 'J' ? reg_types_.LongLo() : reg_types_.DoubleLo();
+ const RegType& hi_half = descriptor[0] == 'J' ? reg_types_.LongHi() : reg_types_.DoubleHi();
reg_line->SetRegisterTypeWide(arg_start + cur_arg, lo_half, hi_half);
cur_arg++;
break;
@@ -1546,7 +1546,7 @@
* This statement can only appear as the first instruction in an exception handler. We verify
* that as part of extracting the exception type from the catch block list.
*/
- RegType& res_type = GetCaughtExceptionType();
+ const RegType& res_type = GetCaughtExceptionType();
work_line_->SetRegisterType(inst->VRegA_11x(), res_type);
break;
}
@@ -1560,7 +1560,7 @@
case Instruction::RETURN:
if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
/* check the method signature */
- RegType& return_type = GetMethodReturnType();
+ const RegType& return_type = GetMethodReturnType();
if (!return_type.IsCategory1Types()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected non-category 1 return type "
<< return_type;
@@ -1568,7 +1568,7 @@
// Compilers may generate synthetic functions that write byte values into boolean fields.
// Also, it may use integer values for boolean, byte, short, and character return types.
const uint32_t vregA = inst->VRegA_11x();
- RegType& src_type = work_line_->GetRegisterType(vregA);
+ const RegType& src_type = work_line_->GetRegisterType(vregA);
bool use_src = ((return_type.IsBoolean() && src_type.IsByte()) ||
((return_type.IsBoolean() || return_type.IsByte() ||
return_type.IsShort() || return_type.IsChar()) &&
@@ -1585,7 +1585,7 @@
case Instruction::RETURN_WIDE:
if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
/* check the method signature */
- RegType& return_type = GetMethodReturnType();
+ const RegType& return_type = GetMethodReturnType();
if (!return_type.IsCategory2Types()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-wide not expected";
} else {
@@ -1600,7 +1600,7 @@
break;
case Instruction::RETURN_OBJECT:
if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
- RegType& return_type = GetMethodReturnType();
+ const RegType& return_type = GetMethodReturnType();
if (!return_type.IsReferenceTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-object not expected";
} else {
@@ -1608,7 +1608,7 @@
DCHECK(!return_type.IsZero());
DCHECK(!return_type.IsUninitializedReference());
const uint32_t vregA = inst->VRegA_11x();
- RegType& reg_type = work_line_->GetRegisterType(vregA);
+ const RegType& reg_type = work_line_->GetRegisterType(vregA);
// Disallow returning uninitialized values and verify that the reference in vAA is an
// instance of the "return_type"
if (reg_type.IsUninitializedTypes()) {
@@ -1655,29 +1655,29 @@
/* could be long or double; resolved upon use */
case Instruction::CONST_WIDE_16: {
int64_t val = static_cast<int16_t>(inst->VRegB_21s());
- RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
- RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+ const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+ const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
work_line_->SetRegisterTypeWide(inst->VRegA_21s(), lo, hi);
break;
}
case Instruction::CONST_WIDE_32: {
int64_t val = static_cast<int32_t>(inst->VRegB_31i());
- RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
- RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+ const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+ const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
work_line_->SetRegisterTypeWide(inst->VRegA_31i(), lo, hi);
break;
}
case Instruction::CONST_WIDE: {
int64_t val = inst->VRegB_51l();
- RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
- RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+ const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+ const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
work_line_->SetRegisterTypeWide(inst->VRegA_51l(), lo, hi);
break;
}
case Instruction::CONST_WIDE_HIGH16: {
int64_t val = static_cast<uint64_t>(inst->VRegB_21h()) << 48;
- RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
- RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+ const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+ const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
work_line_->SetRegisterTypeWide(inst->VRegA_21h(), lo, hi);
break;
}
@@ -1690,7 +1690,7 @@
case Instruction::CONST_CLASS: {
// Get type from instruction if unresolved then we need an access check
// TODO: check Compiler::CanAccessTypeWithoutChecks returns false when res_type is unresolved
- RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
+ const RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
// Register holds class, ie its type is class, on error it will hold Conflict.
work_line_->SetRegisterType(inst->VRegA_21c(),
res_type.IsConflict() ? res_type
@@ -1736,7 +1736,7 @@
*/
const bool is_checkcast = (inst->Opcode() == Instruction::CHECK_CAST);
const uint32_t type_idx = (is_checkcast) ? inst->VRegB_21c() : inst->VRegC_22c();
- RegType& res_type = ResolveClassAndCheckAccess(type_idx);
+ const RegType& res_type = ResolveClassAndCheckAccess(type_idx);
if (res_type.IsConflict()) {
// If this is a primitive type, fail HARD.
mirror::Class* klass = dex_cache_->GetResolvedType(type_idx);
@@ -1755,7 +1755,7 @@
}
// TODO: check Compiler::CanAccessTypeWithoutChecks returns false when res_type is unresolved
uint32_t orig_type_reg = (is_checkcast) ? inst->VRegA_21c() : inst->VRegB_22c();
- RegType& orig_type = work_line_->GetRegisterType(orig_type_reg);
+ const RegType& orig_type = work_line_->GetRegisterType(orig_type_reg);
if (!res_type.IsNonZeroReferenceTypes()) {
if (is_checkcast) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "check-cast on unexpected class " << res_type;
@@ -1778,7 +1778,7 @@
break;
}
case Instruction::ARRAY_LENGTH: {
- RegType& res_type = work_line_->GetRegisterType(inst->VRegB_12x());
+ const RegType& res_type = work_line_->GetRegisterType(inst->VRegB_12x());
if (res_type.IsReferenceTypes()) {
if (!res_type.IsArrayTypes() && !res_type.IsZero()) { // ie not an array or null
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array-length on non-array " << res_type;
@@ -1791,7 +1791,7 @@
break;
}
case Instruction::NEW_INSTANCE: {
- RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
+ const RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
if (res_type.IsConflict()) {
DCHECK_NE(failures_.size(), 0U);
break; // bad class
@@ -1803,7 +1803,7 @@
<< "new-instance on primitive, interface or abstract class" << res_type;
// Soft failure so carry on to set register type.
}
- RegType& uninit_type = reg_types_.Uninitialized(res_type, work_insn_idx_);
+ const RegType& uninit_type = reg_types_.Uninitialized(res_type, work_insn_idx_);
// Any registers holding previous allocations from this address that have not yet been
// initialized must be marked invalid.
work_line_->MarkUninitRefsAsInvalid(uninit_type);
@@ -1856,7 +1856,7 @@
work_line_->SetRegisterType(inst->VRegA_23x(), reg_types_.Integer());
break;
case Instruction::THROW: {
- RegType& res_type = work_line_->GetRegisterType(inst->VRegA_11x());
+ const RegType& res_type = work_line_->GetRegisterType(inst->VRegA_11x());
if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(res_type)) {
Fail(res_type.IsUnresolvedTypes() ? VERIFY_ERROR_NO_CLASS : VERIFY_ERROR_BAD_CLASS_SOFT)
<< "thrown class " << res_type << " not instanceof Throwable";
@@ -1877,14 +1877,14 @@
case Instruction::FILL_ARRAY_DATA: {
/* Similar to the verification done for APUT */
- RegType& array_type = work_line_->GetRegisterType(inst->VRegA_31t());
+ const RegType& array_type = work_line_->GetRegisterType(inst->VRegA_31t());
/* array_type can be null if the reg type is Zero */
if (!array_type.IsZero()) {
if (!array_type.IsArrayTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid fill-array-data with array type "
<< array_type;
} else {
- RegType& component_type = reg_types_.GetComponentType(array_type, GetClassLoader());
+ const RegType& component_type = reg_types_.GetComponentType(array_type, GetClassLoader());
DCHECK(!component_type.IsConflict());
if (component_type.IsNonZeroReferenceTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid fill-array-data with component type "
@@ -1911,8 +1911,8 @@
}
case Instruction::IF_EQ:
case Instruction::IF_NE: {
- RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
- RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
+ const RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
+ const RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
bool mismatch = false;
if (reg_type1.IsZero()) { // zero then integral or reference expected
mismatch = !reg_type2.IsReferenceTypes() && !reg_type2.IsIntegralTypes();
@@ -1931,8 +1931,8 @@
case Instruction::IF_GE:
case Instruction::IF_GT:
case Instruction::IF_LE: {
- RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
- RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
+ const RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
+ const RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
if (!reg_type1.IsIntegralTypes() || !reg_type2.IsIntegralTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "args to 'if' (" << reg_type1 << ","
<< reg_type2 << ") must be integral";
@@ -1941,7 +1941,7 @@
}
case Instruction::IF_EQZ:
case Instruction::IF_NEZ: {
- RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
+ const RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
if (!reg_type.IsReferenceTypes() && !reg_type.IsIntegralTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "type " << reg_type
<< " unexpected as arg to if-eqz/if-nez";
@@ -1987,8 +1987,8 @@
// type is assignable to the original then allow optimization. This check is performed to
// ensure that subsequent merges don't lose type information - such as becoming an
// interface from a class that would lose information relevant to field checks.
- RegType& orig_type = work_line_->GetRegisterType(instance_of_inst->VRegB_22c());
- RegType& cast_type = ResolveClassAndCheckAccess(instance_of_inst->VRegC_22c());
+ const RegType& orig_type = work_line_->GetRegisterType(instance_of_inst->VRegB_22c());
+ const RegType& cast_type = ResolveClassAndCheckAccess(instance_of_inst->VRegC_22c());
if (!orig_type.Equals(cast_type) &&
!cast_type.IsUnresolvedTypes() && !orig_type.IsUnresolvedTypes() &&
@@ -2043,7 +2043,7 @@
case Instruction::IF_GEZ:
case Instruction::IF_GTZ:
case Instruction::IF_LEZ: {
- RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
+ const RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
if (!reg_type.IsIntegralTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "type " << reg_type
<< " unexpected as arg to if-ltz/if-gez/if-gtz/if-lez";
@@ -2192,7 +2192,7 @@
inst->Opcode() == Instruction::INVOKE_SUPER_RANGE);
mirror::ArtMethod* called_method = VerifyInvocationArgs(inst, METHOD_VIRTUAL, is_range,
is_super);
- RegType* return_type = nullptr;
+ const RegType* return_type = nullptr;
if (called_method != nullptr) {
Thread* self = Thread::Current();
StackHandleScope<1> hs(self);
@@ -2230,7 +2230,7 @@
is_range, false);
const char* return_type_descriptor;
bool is_constructor;
- RegType* return_type = nullptr;
+ const RegType* return_type = nullptr;
if (called_method == NULL) {
uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
@@ -2262,7 +2262,7 @@
* allowing the latter only if the "this" argument is the same as the "this" argument to
* this method (which implies that we're in a constructor ourselves).
*/
- RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
+ const RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
if (this_type.IsConflict()) // failure.
break;
@@ -2273,7 +2273,7 @@
}
/* must be in same class or in superclass */
- // RegType& this_super_klass = this_type.GetSuperClass(®_types_);
+ // const RegType& this_super_klass = this_type.GetSuperClass(®_types_);
// TODO: re-enable constructor type verification
// if (this_super_klass.IsConflict()) {
// Unknown super class, fail so we re-check at runtime.
@@ -2322,7 +2322,7 @@
} else {
descriptor = called_method->GetReturnTypeDescriptor();
}
- RegType& return_type = reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
+ const RegType& return_type = reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(return_type);
} else {
@@ -2349,7 +2349,7 @@
/* Get the type of the "this" arg, which should either be a sub-interface of called
* interface or Object (see comments in RegType::JoinClass).
*/
- RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
+ const RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
if (this_type.IsZero()) {
/* null pointer always passes (and always fails at runtime) */
} else {
@@ -2379,7 +2379,7 @@
} else {
descriptor = abs_method->GetReturnTypeDescriptor();
}
- RegType& return_type = reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
+ const RegType& return_type = reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(return_type);
} else {
@@ -2656,7 +2656,7 @@
mirror::ArtMethod* called_method = VerifyInvokeVirtualQuickArgs(inst, is_range);
if (called_method != NULL) {
const char* descriptor = called_method->GetReturnTypeDescriptor();
- RegType& return_type = reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
+ const RegType& return_type = reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(return_type);
} else {
@@ -2935,11 +2935,11 @@
return true;
} // NOLINT(readability/fn_size)
-RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) {
+const RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) {
const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
- RegType& referrer = GetDeclaringClass();
+ const RegType& referrer = GetDeclaringClass();
mirror::Class* klass = dex_cache_->GetResolvedType(class_idx);
- RegType& result = klass != NULL ?
+ const RegType& result = klass != NULL ?
reg_types_.FromClass(descriptor, klass, klass->CannotBeAssignedFromOtherTypes()) :
reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
if (result.IsConflict()) {
@@ -2961,8 +2961,8 @@
return result;
}
-RegType& MethodVerifier::GetCaughtExceptionType() {
- RegType* common_super = NULL;
+const RegType& MethodVerifier::GetCaughtExceptionType() {
+ const RegType* common_super = NULL;
if (code_item_->tries_size_ != 0) {
const byte* handlers_ptr = DexFile::GetCatchHandlerData(*code_item_, 0);
uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
@@ -2973,7 +2973,7 @@
if (iterator.GetHandlerTypeIndex() == DexFile::kDexNoIndex16) {
common_super = ®_types_.JavaLangThrowable(false);
} else {
- RegType& exception = ResolveClassAndCheckAccess(iterator.GetHandlerTypeIndex());
+ const RegType& exception = ResolveClassAndCheckAccess(iterator.GetHandlerTypeIndex());
if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(exception)) {
if (exception.IsUnresolvedTypes()) {
// We don't know enough about the type. Fail here and let runtime handle it.
@@ -3008,7 +3008,7 @@
mirror::ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(uint32_t dex_method_idx,
MethodType method_type) {
const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx);
- RegType& klass_type = ResolveClassAndCheckAccess(method_id.class_idx_);
+ const RegType& klass_type = ResolveClassAndCheckAccess(method_id.class_idx_);
if (klass_type.IsConflict()) {
std::string append(" in attempt to access method ");
append += dex_file_->GetMethodName(method_id);
@@ -3019,7 +3019,7 @@
return NULL; // Can't resolve Class so no more to do here
}
mirror::Class* klass = klass_type.GetClass();
- RegType& referrer = GetDeclaringClass();
+ const RegType& referrer = GetDeclaringClass();
mirror::ArtMethod* res_method = dex_cache_->GetResolvedMethod(dex_method_idx);
if (res_method == NULL) {
const char* name = dex_file_->GetMethodName(method_id);
@@ -3126,7 +3126,7 @@
* rigorous check here (which is okay since we have to do it at runtime).
*/
if (method_type != METHOD_STATIC) {
- RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
+ const RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
if (actual_arg_type.IsConflict()) { // GetInvocationThis failed.
CHECK(have_pending_hard_failure_);
return nullptr;
@@ -3147,7 +3147,7 @@
}
}
if (method_type != METHOD_INTERFACE && !actual_arg_type.IsZero()) {
- RegType* res_method_class;
+ const RegType* res_method_class;
if (res_method != nullptr) {
mirror::Class* klass = res_method->GetDeclaringClass();
std::string temp;
@@ -3189,11 +3189,11 @@
return nullptr;
}
- RegType& reg_type = reg_types_.FromDescriptor(GetClassLoader(), param_descriptor, false);
+ const RegType& reg_type = reg_types_.FromDescriptor(GetClassLoader(), param_descriptor, false);
uint32_t get_reg = is_range ? inst->VRegC_3rc() + static_cast<uint32_t>(sig_registers) :
arg[sig_registers];
if (reg_type.IsIntegralTypes()) {
- RegType& src_type = work_line_->GetRegisterType(get_reg);
+ const RegType& src_type = work_line_->GetRegisterType(get_reg);
if (!src_type.IsIntegralTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register v" << get_reg << " has type " << src_type
<< " but expected " << reg_type;
@@ -3276,7 +3276,7 @@
// has a vtable entry for the target method.
if (is_super) {
DCHECK(method_type == METHOD_VIRTUAL);
- RegType& super = GetDeclaringClass().GetSuperClass(®_types_);
+ const RegType& super = GetDeclaringClass().GetSuperClass(®_types_);
if (super.IsUnresolvedTypes()) {
Fail(VERIFY_ERROR_NO_METHOD) << "unknown super class in invoke-super from "
<< PrettyMethod(dex_method_idx_, *dex_file_)
@@ -3304,7 +3304,7 @@
RegisterLine* reg_line, bool is_range) {
DCHECK(inst->Opcode() == Instruction::INVOKE_VIRTUAL_QUICK ||
inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
- RegType& actual_arg_type = reg_line->GetInvocationThis(inst, is_range);
+ const RegType& actual_arg_type = reg_line->GetInvocationThis(inst, is_range);
if (!actual_arg_type.HasClass()) {
VLOG(verifier) << "Failed to get mirror::Class* from '" << actual_arg_type << "'";
return nullptr;
@@ -3342,7 +3342,7 @@
// We use vAA as our expected arg count, rather than res_method->insSize, because we need to
// match the call to the signature. Also, we might be calling through an abstract method
// definition (which doesn't have register count values).
- RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
+ const RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
if (actual_arg_type.IsConflict()) { // GetInvocationThis failed.
return NULL;
}
@@ -3367,7 +3367,7 @@
if (!actual_arg_type.IsZero()) {
mirror::Class* klass = res_method->GetDeclaringClass();
std::string temp;
- RegType& res_method_class =
+ const RegType& res_method_class =
reg_types_.FromClass(klass->GetDescriptor(&temp), klass,
klass->CannotBeAssignedFromOtherTypes());
if (!res_method_class.IsAssignableFrom(actual_arg_type)) {
@@ -3403,7 +3403,7 @@
<< " missing signature component";
return NULL;
}
- RegType& reg_type = reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
+ const RegType& reg_type = reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
uint32_t get_reg = is_range ? inst->VRegC_3rc() + actual_args : arg[actual_args];
if (!work_line_->VerifyRegisterType(get_reg, reg_type)) {
return res_method;
@@ -3431,7 +3431,7 @@
DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY_RANGE);
type_idx = inst->VRegB_3rc();
}
- RegType& res_type = ResolveClassAndCheckAccess(type_idx);
+ const RegType& res_type = ResolveClassAndCheckAccess(type_idx);
if (res_type.IsConflict()) { // bad class
DCHECK_NE(failures_.size(), 0U);
} else {
@@ -3442,12 +3442,12 @@
/* make sure "size" register is valid type */
work_line_->VerifyRegisterType(inst->VRegB_22c(), reg_types_.Integer());
/* set register type to array class */
- RegType& precise_type = reg_types_.FromUninitialized(res_type);
+ const RegType& precise_type = reg_types_.FromUninitialized(res_type);
work_line_->SetRegisterType(inst->VRegA_22c(), precise_type);
} else {
// Verify each register. If "arg_count" is bad, VerifyRegisterType() will run off the end of
// the list and fail. It's legal, if silly, for arg_count to be zero.
- RegType& expected_type = reg_types_.GetComponentType(res_type, GetClassLoader());
+ const RegType& expected_type = reg_types_.GetComponentType(res_type, GetClassLoader());
uint32_t arg_count = (is_range) ? inst->VRegA_3rc() : inst->VRegA_35c();
uint32_t arg[5];
if (!is_range) {
@@ -3461,19 +3461,19 @@
}
}
// filled-array result goes into "result" register
- RegType& precise_type = reg_types_.FromUninitialized(res_type);
+ const RegType& precise_type = reg_types_.FromUninitialized(res_type);
work_line_->SetResultRegisterType(precise_type);
}
}
}
void MethodVerifier::VerifyAGet(const Instruction* inst,
- RegType& insn_type, bool is_primitive) {
- RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
+ const RegType& insn_type, bool is_primitive) {
+ const RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
if (!index_type.IsArrayIndexTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Invalid reg type for array index (" << index_type << ")";
} else {
- RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
+ const RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
if (array_type.IsZero()) {
// Null array class; this code path will fail at runtime. Infer a merge-able type from the
// instruction type. TODO: have a proper notion of bottom here.
@@ -3489,7 +3489,7 @@
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aget";
} else {
/* verify the class */
- RegType& component_type = reg_types_.GetComponentType(array_type, GetClassLoader());
+ const RegType& component_type = reg_types_.GetComponentType(array_type, GetClassLoader());
if (!component_type.IsReferenceTypes() && !is_primitive) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "primitive array type " << array_type
<< " source for aget-object";
@@ -3516,12 +3516,12 @@
}
}
-void MethodVerifier::VerifyPrimitivePut(RegType& target_type, RegType& insn_type,
+void MethodVerifier::VerifyPrimitivePut(const RegType& target_type, const RegType& insn_type,
const uint32_t vregA) {
// Primitive assignability rules are weaker than regular assignability rules.
bool instruction_compatible;
bool value_compatible;
- RegType& value_type = work_line_->GetRegisterType(vregA);
+ const RegType& value_type = work_line_->GetRegisterType(vregA);
if (target_type.IsIntegralTypes()) {
instruction_compatible = target_type.Equals(insn_type);
value_compatible = value_type.IsIntegralTypes();
@@ -3533,7 +3533,7 @@
// Additional register check: this is not checked statically (as part of VerifyInstructions),
// as target_type depends on the resolved type of the field.
if (instruction_compatible && work_line_->NumRegs() > vregA + 1) {
- RegType& value_type_hi = work_line_->GetRegisterType(vregA + 1);
+ const RegType& value_type_hi = work_line_->GetRegisterType(vregA + 1);
value_compatible = value_type.IsLongTypes() && value_type.CheckWidePair(value_type_hi);
} else {
value_compatible = false;
@@ -3543,7 +3543,7 @@
// Additional register check: this is not checked statically (as part of VerifyInstructions),
// as target_type depends on the resolved type of the field.
if (instruction_compatible && work_line_->NumRegs() > vregA + 1) {
- RegType& value_type_hi = work_line_->GetRegisterType(vregA + 1);
+ const RegType& value_type_hi = work_line_->GetRegisterType(vregA + 1);
value_compatible = value_type.IsDoubleTypes() && value_type.CheckWidePair(value_type_hi);
} else {
value_compatible = false;
@@ -3568,19 +3568,19 @@
}
void MethodVerifier::VerifyAPut(const Instruction* inst,
- RegType& insn_type, bool is_primitive) {
- RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
+ const RegType& insn_type, bool is_primitive) {
+ const RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
if (!index_type.IsArrayIndexTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Invalid reg type for array index (" << index_type << ")";
} else {
- RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
+ const RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
if (array_type.IsZero()) {
// Null array type; this code path will fail at runtime. Infer a merge-able type from the
// instruction type.
} else if (!array_type.IsArrayTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aput";
} else {
- RegType& component_type = reg_types_.GetComponentType(array_type, GetClassLoader());
+ const RegType& component_type = reg_types_.GetComponentType(array_type, GetClassLoader());
const uint32_t vregA = inst->VRegA_23x();
if (is_primitive) {
VerifyPrimitivePut(component_type, insn_type, vregA);
@@ -3602,7 +3602,7 @@
mirror::ArtField* MethodVerifier::GetStaticField(int field_idx) {
const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
// Check access to class
- RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
+ const RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
if (klass_type.IsConflict()) { // bad class
AppendToLastFailMessage(StringPrintf(" in attempt to access static field %d (%s) in %s",
field_idx, dex_file_->GetFieldName(field_id),
@@ -3634,10 +3634,10 @@
return field;
}
-mirror::ArtField* MethodVerifier::GetInstanceField(RegType& obj_type, int field_idx) {
+mirror::ArtField* MethodVerifier::GetInstanceField(const RegType& obj_type, int field_idx) {
const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
// Check access to class
- RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
+ const RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
if (klass_type.IsConflict()) {
AppendToLastFailMessage(StringPrintf(" in attempt to access instance field %d (%s) in %s",
field_idx, dex_file_->GetFieldName(field_id),
@@ -3676,7 +3676,7 @@
return NULL;
} else {
mirror::Class* klass = field->GetDeclaringClass();
- RegType& field_klass =
+ const RegType& field_klass =
reg_types_.FromClass(dex_file_->GetFieldDeclaringClassDescriptor(field_id),
klass, klass->CannotBeAssignedFromOtherTypes());
if (obj_type.IsUninitializedTypes() &&
@@ -3701,17 +3701,17 @@
}
}
-void MethodVerifier::VerifyISGet(const Instruction* inst, RegType& insn_type,
+void MethodVerifier::VerifyISGet(const Instruction* inst, const RegType& insn_type,
bool is_primitive, bool is_static) {
uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
mirror::ArtField* field;
if (is_static) {
field = GetStaticField(field_idx);
} else {
- RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
+ const RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
field = GetInstanceField(object_type, field_idx);
}
- RegType* field_type = nullptr;
+ const RegType* field_type = nullptr;
if (field != NULL) {
Thread* self = Thread::Current();
mirror::Class* field_type_class;
@@ -3767,17 +3767,17 @@
}
}
-void MethodVerifier::VerifyISPut(const Instruction* inst, RegType& insn_type,
+void MethodVerifier::VerifyISPut(const Instruction* inst, const RegType& insn_type,
bool is_primitive, bool is_static) {
uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
mirror::ArtField* field;
if (is_static) {
field = GetStaticField(field_idx);
} else {
- RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
+ const RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
field = GetInstanceField(object_type, field_idx);
}
- RegType* field_type = nullptr;
+ const RegType* field_type = nullptr;
if (field != NULL) {
if (field->IsFinal() && field->GetDeclaringClass() != GetDeclaringClass().GetClass()) {
Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot modify final field " << PrettyField(field)
@@ -3829,7 +3829,7 @@
inst->Opcode() == Instruction::IPUT_QUICK ||
inst->Opcode() == Instruction::IPUT_WIDE_QUICK ||
inst->Opcode() == Instruction::IPUT_OBJECT_QUICK);
- RegType& object_type = reg_line->GetRegisterType(inst->VRegB_22c());
+ const RegType& object_type = reg_line->GetRegisterType(inst->VRegB_22c());
if (!object_type.HasClass()) {
VLOG(verifier) << "Failed to get mirror::Class* from '" << object_type << "'";
return nullptr;
@@ -3844,7 +3844,7 @@
return f;
}
-void MethodVerifier::VerifyIGetQuick(const Instruction* inst, RegType& insn_type,
+void MethodVerifier::VerifyIGetQuick(const Instruction* inst, const RegType& insn_type,
bool is_primitive) {
DCHECK(Runtime::Current()->IsStarted());
mirror::ArtField* field = GetQuickFieldAccess(inst, work_line_.get());
@@ -3859,7 +3859,7 @@
FieldHelper fh(h_field);
field_type_class = fh.GetType(can_load_classes_);
}
- RegType* field_type;
+ const RegType* field_type;
if (field_type_class != nullptr) {
field_type = ®_types_.FromClass(field->GetTypeDescriptor(), field_type_class,
field_type_class->CannotBeAssignedFromOtherTypes());
@@ -3904,7 +3904,7 @@
}
}
-void MethodVerifier::VerifyIPutQuick(const Instruction* inst, RegType& insn_type,
+void MethodVerifier::VerifyIPutQuick(const Instruction* inst, const RegType& insn_type,
bool is_primitive) {
DCHECK(Runtime::Current()->IsStarted());
mirror::ArtField* field = GetQuickFieldAccess(inst, work_line_.get());
@@ -3914,7 +3914,7 @@
}
const char* descriptor = field->GetTypeDescriptor();
mirror::ClassLoader* loader = field->GetDeclaringClass()->GetClassLoader();
- RegType& field_type = reg_types_.FromDescriptor(loader, descriptor, false);
+ const RegType& field_type = reg_types_.FromDescriptor(loader, descriptor, false);
if (field != NULL) {
if (field->IsFinal() && field->GetDeclaringClass() != GetDeclaringClass().GetClass()) {
Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot modify final field " << PrettyField(field)
@@ -3927,7 +3927,7 @@
// Primitive field assignability rules are weaker than regular assignability rules
bool instruction_compatible;
bool value_compatible;
- RegType& value_type = work_line_->GetRegisterType(vregA);
+ const RegType& value_type = work_line_->GetRegisterType(vregA);
if (field_type.IsIntegralTypes()) {
instruction_compatible = insn_type.IsIntegralTypes();
value_compatible = value_type.IsIntegralTypes();
@@ -4045,7 +4045,7 @@
return &insn_flags_[work_insn_idx_];
}
-RegType& MethodVerifier::GetMethodReturnType() {
+const RegType& MethodVerifier::GetMethodReturnType() {
if (return_type_ == nullptr) {
if (mirror_method_.Get() != nullptr) {
Thread* self = Thread::Current();
@@ -4072,7 +4072,7 @@
return *return_type_;
}
-RegType& MethodVerifier::GetDeclaringClass() {
+const RegType& MethodVerifier::GetDeclaringClass() {
if (declaring_class_ == NULL) {
const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
const char* descriptor
@@ -4093,7 +4093,7 @@
DCHECK(line != nullptr) << "No register line at DEX pc " << StringPrintf("0x%x", dex_pc);
std::vector<int32_t> result;
for (size_t i = 0; i < line->NumRegs(); ++i) {
- RegType& type = line->GetRegisterType(i);
+ const RegType& type = line->GetRegisterType(i);
if (type.IsConstant()) {
result.push_back(type.IsPreciseConstant() ? kConstant : kImpreciseConstant);
result.push_back(type.ConstantValue());
@@ -4133,7 +4133,7 @@
return result;
}
-RegType& MethodVerifier::DetermineCat1Constant(int32_t value, bool precise) {
+const RegType& MethodVerifier::DetermineCat1Constant(int32_t value, bool precise) {
if (precise) {
// Precise constant type.
return reg_types_.FromCat1Const(value, true);
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index af33414..45c0a03 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -232,7 +232,7 @@
bool HasCheckCasts() const;
bool HasVirtualOrInterfaceInvokes() const;
bool HasFailures() const;
- RegType& ResolveCheckedClass(uint32_t class_idx)
+ const RegType& ResolveCheckedClass(uint32_t class_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
@@ -473,34 +473,34 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Helper to perform verification on puts of primitive type.
- void VerifyPrimitivePut(RegType& target_type, RegType& insn_type,
+ void VerifyPrimitivePut(const RegType& target_type, const RegType& insn_type,
const uint32_t vregA) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an aget instruction. The destination register's type will be set to
// be that of component type of the array unless the array type is unknown, in which case a
// bottom type inferred from the type of instruction is used. is_primitive is false for an
// aget-object.
- void VerifyAGet(const Instruction* inst, RegType& insn_type,
+ void VerifyAGet(const Instruction* inst, const RegType& insn_type,
bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an aput instruction.
- void VerifyAPut(const Instruction* inst, RegType& insn_type,
+ void VerifyAPut(const Instruction* inst, const RegType& insn_type,
bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Lookup instance field and fail for resolution violations
- mirror::ArtField* GetInstanceField(RegType& obj_type, int field_idx)
+ mirror::ArtField* GetInstanceField(const RegType& obj_type, int field_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Lookup static field and fail for resolution violations
mirror::ArtField* GetStaticField(int field_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an iget or sget instruction.
- void VerifyISGet(const Instruction* inst, RegType& insn_type,
+ void VerifyISGet(const Instruction* inst, const RegType& insn_type,
bool is_primitive, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an iput or sput instruction.
- void VerifyISPut(const Instruction* inst, RegType& insn_type,
+ void VerifyISPut(const Instruction* inst, const RegType& insn_type,
bool is_primitive, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -510,18 +510,18 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an iget-quick instruction.
- void VerifyIGetQuick(const Instruction* inst, RegType& insn_type,
+ void VerifyIGetQuick(const Instruction* inst, const RegType& insn_type,
bool is_primitive)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an iput-quick instruction.
- void VerifyIPutQuick(const Instruction* inst, RegType& insn_type,
+ void VerifyIPutQuick(const Instruction* inst, const RegType& insn_type,
bool is_primitive)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolves a class based on an index and performs access checks to ensure the referrer can
// access the resolved class.
- RegType& ResolveClassAndCheckAccess(uint32_t class_idx)
+ const RegType& ResolveClassAndCheckAccess(uint32_t class_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -529,7 +529,7 @@
* address, determine the Join of all exceptions that can land here. Fails if no matching
* exception handler can be found or if the Join of exception types fails.
*/
- RegType& GetCaughtExceptionType()
+ const RegType& GetCaughtExceptionType()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -615,14 +615,14 @@
}
// Return the register type for the method.
- RegType& GetMethodReturnType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const RegType& GetMethodReturnType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get a type representing the declaring class of the method.
- RegType& GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const RegType& GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
InstructionFlags* CurrentInsnFlags();
- RegType& DetermineCat1Constant(int32_t value, bool precise)
+ const RegType& DetermineCat1Constant(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
RegTypeCache reg_types_;
@@ -643,7 +643,7 @@
// Its object representation if known.
ConstHandle<mirror::ArtMethod> mirror_method_ GUARDED_BY(Locks::mutator_lock_);
const uint32_t method_access_flags_; // Method's access flags.
- RegType* return_type_; // Lazily computed return type of the method.
+ const RegType* return_type_; // Lazily computed return type of the method.
const DexFile* const dex_file_; // The dex file containing the method.
// The dex_cache for the declaring class of the method.
ConstHandle<mirror::DexCache> dex_cache_ GUARDED_BY(Locks::mutator_lock_);
@@ -651,7 +651,7 @@
ConstHandle<mirror::ClassLoader> class_loader_ GUARDED_BY(Locks::mutator_lock_);
const DexFile::ClassDef* const class_def_; // The class def of the declaring class of the method.
const DexFile::CodeItem* const code_item_; // The code item containing the code for the method.
- RegType* declaring_class_; // Lazily computed reg type of the method's declaring class.
+ const RegType* declaring_class_; // Lazily computed reg type of the method's declaring class.
// Instruction widths and flags, one entry per code unit.
std::unique_ptr<InstructionFlags[]> insn_flags_;
// The dex PC of a FindLocksAtDexPc request, -1 otherwise.
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 30be82f..68c7849 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -81,7 +81,7 @@
: PrimitiveType(klass, descriptor, cache_id) {
}
-std::string PreciseConstType::Dump() {
+std::string PreciseConstType::Dump() const {
std::stringstream result;
uint32_t val = ConstantValue();
if (val == 0) {
@@ -98,47 +98,47 @@
return result.str();
}
-std::string BooleanType::Dump() {
+std::string BooleanType::Dump() const {
return "Boolean";
}
-std::string ConflictType::Dump() {
+std::string ConflictType::Dump() const {
return "Conflict";
}
-std::string ByteType::Dump() {
+std::string ByteType::Dump() const {
return "Byte";
}
-std::string ShortType::Dump() {
+std::string ShortType::Dump() const {
return "Short";
}
-std::string CharType::Dump() {
+std::string CharType::Dump() const {
return "Char";
}
-std::string FloatType::Dump() {
+std::string FloatType::Dump() const {
return "Float";
}
-std::string LongLoType::Dump() {
+std::string LongLoType::Dump() const {
return "Long (Low Half)";
}
-std::string LongHiType::Dump() {
+std::string LongHiType::Dump() const {
return "Long (High Half)";
}
-std::string DoubleLoType::Dump() {
+std::string DoubleLoType::Dump() const {
return "Double (Low Half)";
}
-std::string DoubleHiType::Dump() {
+std::string DoubleHiType::Dump() const {
return "Double (High Half)";
}
-std::string IntegerType::Dump() {
+std::string IntegerType::Dump() const {
return "Integer";
}
@@ -361,7 +361,7 @@
}
}
-std::string UndefinedType::Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+std::string UndefinedType::Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return "Undefined";
}
@@ -391,7 +391,7 @@
DCHECK(klass->IsInstantiable());
}
-std::string UnresolvedMergedType::Dump() {
+std::string UnresolvedMergedType::Dump() const {
std::stringstream result;
std::set<uint16_t> types = GetMergedTypes();
result << "UnresolvedMergedReferences(";
@@ -405,20 +405,20 @@
return result.str();
}
-std::string UnresolvedSuperClass::Dump() {
+std::string UnresolvedSuperClass::Dump() const {
std::stringstream result;
uint16_t super_type_id = GetUnresolvedSuperClassChildId();
result << "UnresolvedSuperClass(" << reg_type_cache_->GetFromId(super_type_id).Dump() << ")";
return result.str();
}
-std::string UnresolvedReferenceType::Dump() {
+std::string UnresolvedReferenceType::Dump() const {
std::stringstream result;
result << "Unresolved Reference" << ": " << PrettyDescriptor(GetDescriptor().c_str());
return result.str();
}
-std::string UnresolvedUninitializedRefType::Dump() {
+std::string UnresolvedUninitializedRefType::Dump() const {
std::stringstream result;
result << "Unresolved And Uninitialized Reference" << ": "
<< PrettyDescriptor(GetDescriptor().c_str())
@@ -426,40 +426,40 @@
return result.str();
}
-std::string UnresolvedUninitializedThisRefType::Dump() {
+std::string UnresolvedUninitializedThisRefType::Dump() const {
std::stringstream result;
result << "Unresolved And Uninitialized This Reference"
<< PrettyDescriptor(GetDescriptor().c_str());
return result.str();
}
-std::string ReferenceType::Dump() {
+std::string ReferenceType::Dump() const {
std::stringstream result;
result << "Reference" << ": " << PrettyDescriptor(GetClass());
return result.str();
}
-std::string PreciseReferenceType::Dump() {
+std::string PreciseReferenceType::Dump() const {
std::stringstream result;
result << "Precise Reference" << ": "<< PrettyDescriptor(GetClass());
return result.str();
}
-std::string UninitializedReferenceType::Dump() {
+std::string UninitializedReferenceType::Dump() const {
std::stringstream result;
result << "Uninitialized Reference" << ": " << PrettyDescriptor(GetClass());
result << " Allocation PC: " << GetAllocationPc();
return result.str();
}
-std::string UninitializedThisReferenceType::Dump() {
+std::string UninitializedThisReferenceType::Dump() const {
std::stringstream result;
result << "Uninitialized This Reference" << ": " << PrettyDescriptor(GetClass());
result << "Allocation PC: " << GetAllocationPc();
return result.str();
}
-std::string ImpreciseConstType::Dump() {
+std::string ImpreciseConstType::Dump() const {
std::stringstream result;
uint32_t val = ConstantValue();
if (val == 0) {
@@ -474,7 +474,7 @@
}
return result.str();
}
-std::string PreciseConstLoType::Dump() {
+std::string PreciseConstLoType::Dump() const {
std::stringstream result;
int32_t val = ConstantValueLo();
@@ -488,7 +488,7 @@
return result.str();
}
-std::string ImpreciseConstLoType::Dump() {
+std::string ImpreciseConstLoType::Dump() const {
std::stringstream result;
int32_t val = ConstantValueLo();
@@ -502,7 +502,7 @@
return result.str();
}
-std::string PreciseConstHiType::Dump() {
+std::string PreciseConstHiType::Dump() const {
std::stringstream result;
int32_t val = ConstantValueHi();
result << "Precise ";
@@ -515,7 +515,7 @@
return result.str();
}
-std::string ImpreciseConstHiType::Dump() {
+std::string ImpreciseConstHiType::Dump() const {
std::stringstream result;
int32_t val = ConstantValueHi();
result << "Imprecise ";
@@ -532,7 +532,7 @@
: RegType(NULL, "", cache_id), constant_(constant) {
}
-RegType& UndefinedType::Merge(RegType& incoming_type, RegTypeCache* reg_types)
+const RegType& UndefinedType::Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (incoming_type.IsUndefined()) {
return *this; // Undefined MERGE Undefined => Undefined
@@ -540,7 +540,7 @@
return reg_types->Conflict();
}
-RegType& RegType::HighHalf(RegTypeCache* cache) const {
+const RegType& RegType::HighHalf(RegTypeCache* cache) const {
DCHECK(IsLowHalf());
if (IsLongLo()) {
return cache->LongHi();
@@ -588,10 +588,12 @@
}
std::set<uint16_t> UnresolvedMergedType::GetMergedTypes() const {
std::pair<uint16_t, uint16_t> refs = GetTopMergedTypes();
- RegType& _left(reg_type_cache_->GetFromId(refs.first));
- UnresolvedMergedType* left = down_cast<UnresolvedMergedType*>(&_left);
+ const RegType& _left(reg_type_cache_->GetFromId(refs.first));
+ RegType& __left(const_cast<RegType&>(_left));
+ UnresolvedMergedType* left = down_cast<UnresolvedMergedType*>(&__left);
- RegType& _right(reg_type_cache_->GetFromId(refs.second));
+ RegType& _right(
+ const_cast<RegType&>(reg_type_cache_->GetFromId(refs.second)));
UnresolvedMergedType* right = down_cast<UnresolvedMergedType*>(&_right);
std::set<uint16_t> types;
@@ -614,7 +616,7 @@
return types;
}
-RegType& RegType::GetSuperClass(RegTypeCache* cache) {
+const RegType& RegType::GetSuperClass(RegTypeCache* cache) const {
if (!IsUnresolvedTypes()) {
mirror::Class* super_klass = GetClass()->GetSuperClass();
if (super_klass != NULL) {
@@ -636,7 +638,7 @@
}
}
-bool RegType::CanAccess(RegType& other) {
+bool RegType::CanAccess(const RegType& other) const {
if (Equals(other)) {
return true; // Trivial accessibility.
} else {
@@ -652,7 +654,7 @@
}
}
-bool RegType::CanAccessMember(mirror::Class* klass, uint32_t access_flags) {
+bool RegType::CanAccessMember(mirror::Class* klass, uint32_t access_flags) const {
if ((access_flags & kAccPublic) != 0) {
return true;
}
@@ -663,7 +665,7 @@
}
}
-bool RegType::IsObjectArrayTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool RegType::IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) {
// Primitive arrays will always resolve
DCHECK(descriptor_[1] == 'L' || descriptor_[1] == '[');
@@ -676,11 +678,11 @@
}
}
-bool RegType::IsJavaLangObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool RegType::IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return IsReference() && GetClass()->IsObjectClass();
}
-bool RegType::IsArrayTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool RegType::IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) {
return descriptor_[0] == '[';
} else if (HasClass()) {
@@ -690,7 +692,7 @@
}
}
-bool RegType::IsJavaLangObjectArray() {
+bool RegType::IsJavaLangObjectArray() const {
if (HasClass()) {
mirror::Class* type = GetClass();
return type->IsArrayClass() && type->GetComponentType()->IsObjectClass();
@@ -698,7 +700,7 @@
return false;
}
-bool RegType::IsInstantiableTypes() {
+bool RegType::IsInstantiableTypes() const {
return IsUnresolvedTypes() || (IsNonZeroReferenceTypes() && GetClass()->IsInstantiable());
}
@@ -706,7 +708,7 @@
: ConstantType(constat, cache_id) {
}
-static bool AssignableFrom(RegType& lhs, RegType& rhs, bool strict)
+static bool AssignableFrom(const RegType& lhs, const RegType& rhs, bool strict)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (lhs.Equals(rhs)) {
return true;
@@ -754,11 +756,11 @@
}
}
-bool RegType::IsAssignableFrom(RegType& src) {
+bool RegType::IsAssignableFrom(const RegType& src) const {
return AssignableFrom(*this, src, false);
}
-bool RegType::IsStrictlyAssignableFrom(RegType& src) {
+bool RegType::IsStrictlyAssignableFrom(const RegType& src) const {
return AssignableFrom(*this, src, true);
}
@@ -776,11 +778,11 @@
}
}
-static RegType& SelectNonConstant(RegType& a, RegType& b) {
+static const RegType& SelectNonConstant(const RegType& a, const RegType& b) {
return a.IsConstantTypes() ? b : a;
}
-RegType& RegType::Merge(RegType& incoming_type, RegTypeCache* reg_types) {
+const RegType& RegType::Merge(const RegType& incoming_type, RegTypeCache* reg_types) const {
DCHECK(!Equals(incoming_type)); // Trivial equality handled by caller
if (IsConflict()) {
return *this; // Conflict MERGE * => Conflict
@@ -969,7 +971,7 @@
void RegType::VisitRoots(RootCallback* callback, void* arg) {
if (!klass_.IsNull()) {
- klass_.VisitRoot(callback, arg, 0, kRootUnknown);
+ callback(reinterpret_cast<mirror::Object**>(&klass_), arg, 0, kRootUnknown);
}
}
@@ -1009,8 +1011,7 @@
}
std::ostream& operator<<(std::ostream& os, const RegType& rhs) {
- RegType& rhs_non_const = const_cast<RegType&>(rhs);
- os << rhs_non_const.Dump();
+ os << rhs.Dump();
return os;
}
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 1682d4e..378b4c9 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -109,7 +109,7 @@
return IsLowHalf();
}
// Check this is the low half, and that type_h is its matching high-half.
- inline bool CheckWidePair(RegType& type_h) const {
+ inline bool CheckWidePair(const RegType& type_h) const {
if (IsLowHalf()) {
return ((IsPreciseConstantLo() && type_h.IsPreciseConstantHi()) ||
(IsPreciseConstantLo() && type_h.IsImpreciseConstantHi()) ||
@@ -121,7 +121,7 @@
return false;
}
// The high half that corresponds to this low half
- RegType& HighHalf(RegTypeCache* cache) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const RegType& HighHalf(RegTypeCache* cache) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsConstantBoolean() const {
return IsConstant() && (ConstantValue() >= 0) && (ConstantValue() <= 1);
@@ -200,18 +200,18 @@
virtual bool HasClass() const {
return false;
}
- bool IsJavaLangObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsArrayTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsObjectArrayTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Primitive::Type GetPrimitiveType() const;
- bool IsJavaLangObjectArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsInstantiableTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsJavaLangObjectArray() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsInstantiableTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const std::string& GetDescriptor() const {
DCHECK(HasClass() || (IsUnresolvedTypes() && !IsUnresolvedMergedReference() &&
!IsUnresolvedSuperClass()));
return descriptor_;
}
- mirror::Class* GetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Class* GetClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(!IsUnresolvedReference());
DCHECK(!klass_.IsNull()) << Dump();
DCHECK(HasClass());
@@ -220,34 +220,35 @@
uint16_t GetId() const {
return cache_id_;
}
- RegType& GetSuperClass(RegTypeCache* cache)
+ const RegType& GetSuperClass(RegTypeCache* cache) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- virtual std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ virtual std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
// Can this type access other?
- bool CanAccess(RegType& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool CanAccess(const RegType& other) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Can this type access a member with the given properties?
- bool CanAccessMember(mirror::Class* klass, uint32_t access_flags)
+ bool CanAccessMember(mirror::Class* klass, uint32_t access_flags) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Can this type be assigned by src?
// Note: Object and interface types may always be assigned to one another, see comment on
// ClassJoin.
- bool IsAssignableFrom(RegType& src) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsAssignableFrom(const RegType& src) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Can this type be assigned by src? Variant of IsAssignableFrom that doesn't allow assignment to
// an interface from an Object.
- bool IsStrictlyAssignableFrom(RegType& src) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsStrictlyAssignableFrom(const RegType& src) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Are these RegTypes the same?
- bool Equals(RegType& other) const {
+ bool Equals(const RegType& other) const {
return GetId() == other.GetId();
}
// Compute the merge of this register from one edge (path) with incoming_type from another.
- virtual RegType& Merge(RegType& incoming_type, RegTypeCache* reg_types)
+ virtual const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -276,7 +277,7 @@
protected:
RegType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : descriptor_(descriptor), klass_(GcRoot<mirror::Class>(klass)), cache_id_(cache_id) {
+ : descriptor_(descriptor), klass_(klass), cache_id_(cache_id) {
if (kIsDebugBuild) {
CheckInvariants();
}
@@ -286,7 +287,7 @@
const std::string descriptor_;
- GcRoot<mirror::Class> klass_;
+ mutable GcRoot<mirror::Class> klass_; // Non-const only due to moving classes.
const uint16_t cache_id_;
friend class RegTypeCache;
@@ -302,7 +303,7 @@
return true;
}
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get the singleton Conflict instance.
static ConflictType* GetInstance();
@@ -332,7 +333,7 @@
return true;
}
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get the singleton Undefined instance.
static UndefinedType* GetInstance();
@@ -351,7 +352,7 @@
: RegType(klass, descriptor, cache_id) {
}
- virtual RegType& Merge(RegType& incoming_type, RegTypeCache* reg_types)
+ virtual const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static UndefinedType* instance_;
@@ -374,7 +375,7 @@
bool IsInteger() const {
return true;
}
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static IntegerType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -393,7 +394,7 @@
bool IsBoolean() const {
return true;
}
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static BooleanType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -413,7 +414,7 @@
bool IsByte() const {
return true;
}
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static ByteType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -432,7 +433,7 @@
bool IsShort() const {
return true;
}
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static ShortType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -451,7 +452,7 @@
bool IsChar() const {
return true;
}
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static CharType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -470,7 +471,7 @@
bool IsFloat() const {
return true;
}
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static FloatType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -492,7 +493,7 @@
class LongLoType : public Cat2Type {
public:
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsLongLo() const {
return true;
}
@@ -514,7 +515,7 @@
class LongHiType : public Cat2Type {
public:
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsLongHi() const {
return true;
}
@@ -533,7 +534,7 @@
class DoubleLoType : public Cat2Type {
public:
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsDoubleLo() const {
return true;
}
@@ -555,7 +556,7 @@
class DoubleHiType : public Cat2Type {
public:
- std::string Dump();
+ std::string Dump() const;
virtual bool IsDoubleHi() const {
return true;
}
@@ -622,7 +623,7 @@
return true;
}
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
class PreciseConstLoType : public ConstantType {
@@ -634,7 +635,7 @@
bool IsPreciseConstantLo() const {
return true;
}
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
class PreciseConstHiType : public ConstantType {
@@ -646,7 +647,7 @@
bool IsPreciseConstantHi() const {
return true;
}
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
class ImpreciseConstType : public ConstantType {
@@ -656,7 +657,7 @@
bool IsImpreciseConstant() const {
return true;
}
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
class ImpreciseConstLoType : public ConstantType {
@@ -667,7 +668,7 @@
bool IsImpreciseConstantLo() const {
return true;
}
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
class ImpreciseConstHiType : public ConstantType {
@@ -678,7 +679,7 @@
bool IsImpreciseConstantHi() const {
return true;
}
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
// Common parent of all uninitialized types. Uninitialized types are created by "new" dex
@@ -719,7 +720,7 @@
return true;
}
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
// Similar to UnresolvedReferenceType but not yet having been passed to a constructor.
@@ -738,7 +739,7 @@
return true;
}
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
@@ -763,7 +764,7 @@
return true;
}
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -783,7 +784,7 @@
return true;
}
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
@@ -808,7 +809,7 @@
return true;
}
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
// A type of register holding a reference to an Object of type GetClass and only an object of that
@@ -830,7 +831,7 @@
return true;
}
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
// Common parent of unresolved types.
@@ -858,7 +859,7 @@
return true;
}
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
@@ -884,7 +885,7 @@
return static_cast<uint16_t>(unresolved_child_id_ & 0xFFFF);
}
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -919,7 +920,7 @@
return true;
}
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h
index fdf96a8..fc9e5c9 100644
--- a/runtime/verifier/reg_type_cache-inl.h
+++ b/runtime/verifier/reg_type_cache-inl.h
@@ -24,14 +24,14 @@
namespace art {
namespace verifier {
-inline RegType& RegTypeCache::GetFromId(uint16_t id) const {
+inline const art::verifier::RegType& RegTypeCache::GetFromId(uint16_t id) const {
DCHECK_LT(id, entries_.size());
RegType* result = entries_[id];
DCHECK(result != NULL);
return *result;
}
-inline ConstantType& RegTypeCache::FromCat1Const(int32_t value, bool precise) {
+inline const ConstantType& RegTypeCache::FromCat1Const(int32_t value, bool precise) {
// We only expect 0 to be a precise constant.
DCHECK(value != 0 || precise);
if (precise && (value >= kMinSmallConstant) && (value <= kMaxSmallConstant)) {
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 482bb4d..92a005b 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -65,8 +65,8 @@
DCHECK_EQ(entries_.size(), primitive_count_);
}
-RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader, const char* descriptor,
- bool precise) {
+const RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader, const char* descriptor,
+ bool precise) {
DCHECK(RegTypeCache::primitive_initialized_);
if (descriptor[1] == '\0') {
switch (descriptor[0]) {
@@ -97,7 +97,7 @@
}
};
-RegType& RegTypeCache::RegTypeFromPrimitiveType(Primitive::Type prim_type) const {
+const RegType& RegTypeCache::RegTypeFromPrimitiveType(Primitive::Type prim_type) const {
CHECK(RegTypeCache::primitive_initialized_);
switch (prim_type) {
case Primitive::kPrimBoolean:
@@ -156,8 +156,8 @@
return klass;
}
-RegType& RegTypeCache::From(mirror::ClassLoader* loader, const char* descriptor,
- bool precise) {
+const RegType& RegTypeCache::From(mirror::ClassLoader* loader, const char* descriptor,
+ bool precise) {
// Try looking up the class in the cache first. We use a StringPiece to avoid continual strlen
// operations on the descriptor.
StringPiece descriptor_sp(descriptor);
@@ -210,7 +210,7 @@
}
}
-RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) {
+const RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) {
DCHECK(klass != nullptr);
if (klass->IsPrimitive()) {
// Note: precise isn't used for primitive classes. A char is assignable to an int. All
@@ -313,15 +313,17 @@
}
}
-RegType& RegTypeCache::FromUnresolvedMerge(RegType& left, RegType& right) {
+const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, const RegType& right) {
std::set<uint16_t> types;
if (left.IsUnresolvedMergedReference()) {
- types = (down_cast<UnresolvedMergedType*>(&left))->GetMergedTypes();
+ RegType& non_const(const_cast<RegType&>(left));
+ types = (down_cast<UnresolvedMergedType*>(&non_const))->GetMergedTypes();
} else {
types.insert(left.GetId());
}
if (right.IsUnresolvedMergedReference()) {
- std::set<uint16_t> right_types = (down_cast<UnresolvedMergedType*>(&right))->GetMergedTypes();
+ RegType& non_const(const_cast<RegType&>(right));
+ std::set<uint16_t> right_types = (down_cast<UnresolvedMergedType*>(&non_const))->GetMergedTypes();
types.insert(right_types.begin(), right_types.end());
} else {
types.insert(right.GetId());
@@ -348,7 +350,7 @@
return *entry;
}
-RegType& RegTypeCache::FromUnresolvedSuperClass(RegType& child) {
+const RegType& RegTypeCache::FromUnresolvedSuperClass(const RegType& child) {
// Check if entry already exists.
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
@@ -367,7 +369,7 @@
return *entry;
}
-UninitializedType& RegTypeCache::Uninitialized(RegType& type, uint32_t allocation_pc) {
+const UninitializedType& RegTypeCache::Uninitialized(const RegType& type, uint32_t allocation_pc) {
UninitializedType* entry = NULL;
const std::string& descriptor(type.GetDescriptor());
if (type.IsUnresolvedTypes()) {
@@ -397,7 +399,7 @@
return *entry;
}
-RegType& RegTypeCache::FromUninitialized(RegType& uninit_type) {
+const RegType& RegTypeCache::FromUninitialized(const RegType& uninit_type) {
RegType* entry;
if (uninit_type.IsUnresolvedTypes()) {
@@ -439,44 +441,44 @@
return *entry;
}
-ImpreciseConstType& RegTypeCache::ByteConstant() {
- ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::min(), false);
+const ImpreciseConstType& RegTypeCache::ByteConstant() {
+ const ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::min(), false);
DCHECK(result.IsImpreciseConstant());
- return *down_cast<ImpreciseConstType*>(&result);
+ return *down_cast<const ImpreciseConstType*>(&result);
}
-ImpreciseConstType& RegTypeCache::CharConstant() {
+const ImpreciseConstType& RegTypeCache::CharConstant() {
int32_t jchar_max = static_cast<int32_t>(std::numeric_limits<jchar>::max());
- ConstantType& result = FromCat1Const(jchar_max, false);
+ const ConstantType& result = FromCat1Const(jchar_max, false);
DCHECK(result.IsImpreciseConstant());
- return *down_cast<ImpreciseConstType*>(&result);
+ return *down_cast<const ImpreciseConstType*>(&result);
}
-ImpreciseConstType& RegTypeCache::ShortConstant() {
- ConstantType& result = FromCat1Const(std::numeric_limits<jshort>::min(), false);
+const ImpreciseConstType& RegTypeCache::ShortConstant() {
+ const ConstantType& result = FromCat1Const(std::numeric_limits<jshort>::min(), false);
DCHECK(result.IsImpreciseConstant());
- return *down_cast<ImpreciseConstType*>(&result);
+ return *down_cast<const ImpreciseConstType*>(&result);
}
-ImpreciseConstType& RegTypeCache::IntConstant() {
- ConstantType& result = FromCat1Const(std::numeric_limits<jint>::max(), false);
+const ImpreciseConstType& RegTypeCache::IntConstant() {
+ const ConstantType& result = FromCat1Const(std::numeric_limits<jint>::max(), false);
DCHECK(result.IsImpreciseConstant());
- return *down_cast<ImpreciseConstType*>(&result);
+ return *down_cast<const ImpreciseConstType*>(&result);
}
-ImpreciseConstType& RegTypeCache::PosByteConstant() {
- ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::max(), false);
+const ImpreciseConstType& RegTypeCache::PosByteConstant() {
+ const ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::max(), false);
DCHECK(result.IsImpreciseConstant());
- return *down_cast<ImpreciseConstType*>(&result);
+ return *down_cast<const ImpreciseConstType*>(&result);
}
-ImpreciseConstType& RegTypeCache::PosShortConstant() {
- ConstantType& result = FromCat1Const(std::numeric_limits<jshort>::max(), false);
+const ImpreciseConstType& RegTypeCache::PosShortConstant() {
+ const ConstantType& result = FromCat1Const(std::numeric_limits<jshort>::max(), false);
DCHECK(result.IsImpreciseConstant());
- return *down_cast<ImpreciseConstType*>(&result);
+ return *down_cast<const ImpreciseConstType*>(&result);
}
-UninitializedType& RegTypeCache::UninitializedThisArgument(RegType& type) {
+const UninitializedType& RegTypeCache::UninitializedThisArgument(const RegType& type) {
UninitializedType* entry;
const std::string& descriptor(type.GetDescriptor());
if (type.IsUnresolvedTypes()) {
@@ -502,7 +504,7 @@
return *entry;
}
-ConstantType& RegTypeCache::FromCat1NonSmallConstant(int32_t value, bool precise) {
+const ConstantType& RegTypeCache::FromCat1NonSmallConstant(int32_t value, bool precise) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
if (cur_entry->klass_.IsNull() && cur_entry->IsConstant() &&
@@ -521,7 +523,7 @@
return *entry;
}
-ConstantType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) {
+const ConstantType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
if (cur_entry->IsConstantLo() && (cur_entry->IsPrecise() == precise) &&
@@ -539,7 +541,7 @@
return *entry;
}
-ConstantType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) {
+const ConstantType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
if (cur_entry->IsConstantHi() && (cur_entry->IsPrecise() == precise) &&
@@ -557,7 +559,7 @@
return *entry;
}
-RegType& RegTypeCache::GetComponentType(RegType& array, mirror::ClassLoader* loader) {
+const RegType& RegTypeCache::GetComponentType(const RegType& array, mirror::ClassLoader* loader) {
if (!array.IsArrayTypes()) {
return Conflict();
} else if (array.IsUnresolvedTypes()) {
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index c0427eb..8baf3ff 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -51,99 +51,99 @@
}
}
static void ShutDown();
- RegType& GetFromId(uint16_t id) const;
- RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise)
+ const art::verifier::RegType& GetFromId(uint16_t id) const;
+ const RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
+ const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ConstantType& FromCat1Const(int32_t value, bool precise)
+ const ConstantType& FromCat1Const(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ConstantType& FromCat2ConstLo(int32_t value, bool precise)
+ const ConstantType& FromCat2ConstLo(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ConstantType& FromCat2ConstHi(int32_t value, bool precise)
+ const ConstantType& FromCat2ConstHi(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- RegType& FromDescriptor(mirror::ClassLoader* loader, const char* descriptor, bool precise)
+ const RegType& FromDescriptor(mirror::ClassLoader* loader, const char* descriptor, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- RegType& FromUnresolvedMerge(RegType& left, RegType& right)
+ const RegType& FromUnresolvedMerge(const RegType& left, const RegType& right)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- RegType& FromUnresolvedSuperClass(RegType& child)
+ const RegType& FromUnresolvedSuperClass(const RegType& child)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- RegType& JavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const RegType& JavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// String is final and therefore always precise.
return From(NULL, "Ljava/lang/String;", true);
}
- RegType& JavaLangThrowable(bool precise)
+ const RegType& JavaLangThrowable(bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return From(NULL, "Ljava/lang/Throwable;", precise);
}
- ConstantType& Zero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const ConstantType& Zero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return FromCat1Const(0, true);
}
- ConstantType& One() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const ConstantType& One() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return FromCat1Const(1, true);
}
size_t GetCacheSize() {
return entries_.size();
}
- RegType& Boolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const RegType& Boolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return *BooleanType::GetInstance();
}
- RegType& Byte() {
+ const RegType& Byte() {
return *ByteType::GetInstance();
}
- RegType& Char() {
+ const RegType& Char() {
return *CharType::GetInstance();
}
- RegType& Short() {
+ const RegType& Short() {
return *ShortType::GetInstance();
}
- RegType& Integer() {
+ const RegType& Integer() {
return *IntegerType::GetInstance();
}
- RegType& Float() {
+ const RegType& Float() {
return *FloatType::GetInstance();
}
- RegType& LongLo() {
+ const RegType& LongLo() {
return *LongLoType::GetInstance();
}
- RegType& LongHi() {
+ const RegType& LongHi() {
return *LongHiType::GetInstance();
}
- RegType& DoubleLo() {
+ const RegType& DoubleLo() {
return *DoubleLoType::GetInstance();
}
- RegType& DoubleHi() {
+ const RegType& DoubleHi() {
return *DoubleHiType::GetInstance();
}
- RegType& Undefined() {
+ const RegType& Undefined() {
return *UndefinedType::GetInstance();
}
- RegType& Conflict() {
+ const RegType& Conflict() {
return *ConflictType::GetInstance();
}
- RegType& JavaLangClass(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const RegType& JavaLangClass(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return From(NULL, "Ljava/lang/Class;", precise);
}
- RegType& JavaLangObject(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const RegType& JavaLangObject(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return From(NULL, "Ljava/lang/Object;", precise);
}
- UninitializedType& Uninitialized(RegType& type, uint32_t allocation_pc)
+ const UninitializedType& Uninitialized(const RegType& type, uint32_t allocation_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Create an uninitialized 'this' argument for the given type.
- UninitializedType& UninitializedThisArgument(RegType& type)
+ const UninitializedType& UninitializedThisArgument(const RegType& type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- RegType& FromUninitialized(RegType& uninit_type)
+ const RegType& FromUninitialized(const RegType& uninit_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ImpreciseConstType& ByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ImpreciseConstType& CharConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ImpreciseConstType& ShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ImpreciseConstType& IntConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ImpreciseConstType& PosByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ImpreciseConstType& PosShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- RegType& GetComponentType(RegType& array, mirror::ClassLoader* loader)
+ const ImpreciseConstType& ByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const ImpreciseConstType& CharConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const ImpreciseConstType& ShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const ImpreciseConstType& IntConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const ImpreciseConstType& PosByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const ImpreciseConstType& PosShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const RegType& GetComponentType(const RegType& array, mirror::ClassLoader* loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- RegType& RegTypeFromPrimitiveType(Primitive::Type) const;
+ const RegType& RegTypeFromPrimitiveType(Primitive::Type) const;
void VisitRoots(RootCallback* callback, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -153,7 +153,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool MatchDescriptor(size_t idx, const StringPiece& descriptor, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise)
+ const ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void AddEntry(RegType* new_entry);
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index e27558a..9dc0df1 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -33,21 +33,21 @@
// Tests creating primitive types types.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- RegType& ref_type_const_0 = cache.FromCat1Const(10, true);
- RegType& ref_type_const_1 = cache.FromCat1Const(10, true);
- RegType& ref_type_const_2 = cache.FromCat1Const(30, true);
- RegType& ref_type_const_3 = cache.FromCat1Const(30, false);
+ const RegType& ref_type_const_0 = cache.FromCat1Const(10, true);
+ const RegType& ref_type_const_1 = cache.FromCat1Const(10, true);
+ const RegType& ref_type_const_2 = cache.FromCat1Const(30, true);
+ const RegType& ref_type_const_3 = cache.FromCat1Const(30, false);
EXPECT_TRUE(ref_type_const_0.Equals(ref_type_const_1));
EXPECT_FALSE(ref_type_const_0.Equals(ref_type_const_2));
EXPECT_FALSE(ref_type_const_0.Equals(ref_type_const_3));
- RegType& ref_type_const_wide_0 = cache.FromCat2ConstHi(50, true);
- RegType& ref_type_const_wide_1 = cache.FromCat2ConstHi(50, true);
+ const RegType& ref_type_const_wide_0 = cache.FromCat2ConstHi(50, true);
+ const RegType& ref_type_const_wide_1 = cache.FromCat2ConstHi(50, true);
EXPECT_TRUE(ref_type_const_wide_0.Equals(ref_type_const_wide_1));
- RegType& ref_type_const_wide_2 = cache.FromCat2ConstLo(50, true);
- RegType& ref_type_const_wide_3 = cache.FromCat2ConstLo(50, true);
- RegType& ref_type_const_wide_4 = cache.FromCat2ConstLo(55, true);
+ const RegType& ref_type_const_wide_2 = cache.FromCat2ConstLo(50, true);
+ const RegType& ref_type_const_wide_3 = cache.FromCat2ConstLo(50, true);
+ const RegType& ref_type_const_wide_4 = cache.FromCat2ConstLo(55, true);
EXPECT_TRUE(ref_type_const_wide_2.Equals(ref_type_const_wide_3));
EXPECT_FALSE(ref_type_const_wide_2.Equals(ref_type_const_wide_4));
}
@@ -56,11 +56,11 @@
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
int64_t val = static_cast<int32_t>(1234);
- RegType& precise_lo = cache.FromCat2ConstLo(static_cast<int32_t>(val), true);
- RegType& precise_hi = cache.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
- RegType& precise_const = cache.FromCat1Const(static_cast<int32_t>(val >> 32), true);
- RegType& long_lo = cache.LongLo();
- RegType& long_hi = cache.LongHi();
+ const RegType& precise_lo = cache.FromCat2ConstLo(static_cast<int32_t>(val), true);
+ const RegType& precise_hi = cache.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+ const RegType& precise_const = cache.FromCat1Const(static_cast<int32_t>(val >> 32), true);
+ const RegType& long_lo = cache.LongLo();
+ const RegType& long_hi = cache.LongHi();
// Check sanity of types.
EXPECT_TRUE(precise_lo.IsLowHalf());
EXPECT_FALSE(precise_hi.IsLowHalf());
@@ -80,7 +80,7 @@
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- RegType& bool_reg_type = cache.Boolean();
+ const RegType& bool_reg_type = cache.Boolean();
EXPECT_FALSE(bool_reg_type.IsUndefined());
EXPECT_FALSE(bool_reg_type.IsConflict());
EXPECT_FALSE(bool_reg_type.IsZero());
@@ -112,7 +112,7 @@
EXPECT_TRUE(bool_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(bool_reg_type.IsNonZeroReferenceTypes());
- RegType& byte_reg_type = cache.Byte();
+ const RegType& byte_reg_type = cache.Byte();
EXPECT_FALSE(byte_reg_type.IsUndefined());
EXPECT_FALSE(byte_reg_type.IsConflict());
EXPECT_FALSE(byte_reg_type.IsZero());
@@ -144,7 +144,7 @@
EXPECT_TRUE(byte_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(byte_reg_type.IsNonZeroReferenceTypes());
- RegType& char_reg_type = cache.Char();
+ const RegType& char_reg_type = cache.Char();
EXPECT_FALSE(char_reg_type.IsUndefined());
EXPECT_FALSE(char_reg_type.IsConflict());
EXPECT_FALSE(char_reg_type.IsZero());
@@ -176,7 +176,7 @@
EXPECT_TRUE(char_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(char_reg_type.IsNonZeroReferenceTypes());
- RegType& short_reg_type = cache.Short();
+ const RegType& short_reg_type = cache.Short();
EXPECT_FALSE(short_reg_type.IsUndefined());
EXPECT_FALSE(short_reg_type.IsConflict());
EXPECT_FALSE(short_reg_type.IsZero());
@@ -208,7 +208,7 @@
EXPECT_TRUE(short_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(short_reg_type.IsNonZeroReferenceTypes());
- RegType& int_reg_type = cache.Integer();
+ const RegType& int_reg_type = cache.Integer();
EXPECT_FALSE(int_reg_type.IsUndefined());
EXPECT_FALSE(int_reg_type.IsConflict());
EXPECT_FALSE(int_reg_type.IsZero());
@@ -240,7 +240,7 @@
EXPECT_TRUE(int_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(int_reg_type.IsNonZeroReferenceTypes());
- RegType& long_reg_type = cache.LongLo();
+ const RegType& long_reg_type = cache.LongLo();
EXPECT_FALSE(long_reg_type.IsUndefined());
EXPECT_FALSE(long_reg_type.IsConflict());
EXPECT_FALSE(long_reg_type.IsZero());
@@ -272,7 +272,7 @@
EXPECT_FALSE(long_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(long_reg_type.IsNonZeroReferenceTypes());
- RegType& float_reg_type = cache.Float();
+ const RegType& float_reg_type = cache.Float();
EXPECT_FALSE(float_reg_type.IsUndefined());
EXPECT_FALSE(float_reg_type.IsConflict());
EXPECT_FALSE(float_reg_type.IsZero());
@@ -304,7 +304,7 @@
EXPECT_FALSE(float_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(float_reg_type.IsNonZeroReferenceTypes());
- RegType& double_reg_type = cache.DoubleLo();
+ const RegType& double_reg_type = cache.DoubleLo();
EXPECT_FALSE(double_reg_type.IsUndefined());
EXPECT_FALSE(double_reg_type.IsConflict());
EXPECT_FALSE(double_reg_type.IsZero());
@@ -344,9 +344,9 @@
// match the one that is imprecise.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- RegType& imprecise_obj = cache.JavaLangObject(false);
- RegType& precise_obj = cache.JavaLangObject(true);
- RegType& precise_obj_2 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
+ const RegType& imprecise_obj = cache.JavaLangObject(false);
+ const RegType& precise_obj = cache.JavaLangObject(true);
+ const RegType& precise_obj_2 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
EXPECT_TRUE(precise_obj.Equals(precise_obj_2));
EXPECT_FALSE(imprecise_obj.Equals(precise_obj));
@@ -359,14 +359,14 @@
// a hit second time.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ const RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
EXPECT_TRUE(ref_type_0.IsNonZeroReferenceTypes());
- RegType& ref_type_1 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ const RegType& ref_type_1 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.Equals(ref_type_1));
- RegType& unresolved_super_class = cache.FromUnresolvedSuperClass(ref_type_0);
+ const RegType& unresolved_super_class = cache.FromUnresolvedSuperClass(ref_type_0);
EXPECT_TRUE(unresolved_super_class.IsUnresolvedSuperClass());
EXPECT_TRUE(unresolved_super_class.IsNonZeroReferenceTypes());
}
@@ -375,21 +375,21 @@
// Tests creating types uninitialized types from unresolved types.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ const RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
- RegType& ref_type = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ const RegType& ref_type = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.Equals(ref_type));
// Create an uninitialized type of this unresolved type
- RegType& unresolved_unintialised = cache.Uninitialized(ref_type, 1101ull);
+ const RegType& unresolved_unintialised = cache.Uninitialized(ref_type, 1101ull);
EXPECT_TRUE(unresolved_unintialised.IsUnresolvedAndUninitializedReference());
EXPECT_TRUE(unresolved_unintialised.IsUninitializedTypes());
EXPECT_TRUE(unresolved_unintialised.IsNonZeroReferenceTypes());
// Create an uninitialized type of this unresolved type with different PC
- RegType& ref_type_unresolved_unintialised_1 = cache.Uninitialized(ref_type, 1102ull);
+ const RegType& ref_type_unresolved_unintialised_1 = cache.Uninitialized(ref_type, 1102ull);
EXPECT_TRUE(unresolved_unintialised.IsUnresolvedAndUninitializedReference());
EXPECT_FALSE(unresolved_unintialised.Equals(ref_type_unresolved_unintialised_1));
// Create an uninitialized type of this unresolved type with the same PC
- RegType& unresolved_unintialised_2 = cache.Uninitialized(ref_type, 1101ull);
+ const RegType& unresolved_unintialised_2 = cache.Uninitialized(ref_type, 1101ull);
EXPECT_TRUE(unresolved_unintialised.Equals(unresolved_unintialised_2));
}
@@ -397,12 +397,12 @@
// Tests types for proper Dump messages.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- RegType& unresolved_ref = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
- RegType& unresolved_ref_another = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExistEither;", true);
- RegType& resolved_ref = cache.JavaLangString();
- RegType& resolved_unintialiesd = cache.Uninitialized(resolved_ref, 10);
- RegType& unresolved_unintialized = cache.Uninitialized(unresolved_ref, 12);
- RegType& unresolved_merged = cache.FromUnresolvedMerge(unresolved_ref, unresolved_ref_another);
+ const RegType& unresolved_ref = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ const RegType& unresolved_ref_another = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExistEither;", true);
+ const RegType& resolved_ref = cache.JavaLangString();
+ const RegType& resolved_unintialiesd = cache.Uninitialized(resolved_ref, 10);
+ const RegType& unresolved_unintialized = cache.Uninitialized(unresolved_ref, 12);
+ const RegType& unresolved_merged = cache.FromUnresolvedMerge(unresolved_ref, unresolved_ref_another);
std::string expected = "Unresolved Reference: java.lang.DoesNotExist";
EXPECT_EQ(expected, unresolved_ref.Dump());
@@ -422,16 +422,16 @@
// The JavaLangObject method instead of FromDescriptor. String class is final.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- RegType& ref_type = cache.JavaLangString();
- RegType& ref_type_2 = cache.JavaLangString();
- RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/String;", true);
+ const RegType& ref_type = cache.JavaLangString();
+ const RegType& ref_type_2 = cache.JavaLangString();
+ const RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/String;", true);
EXPECT_TRUE(ref_type.Equals(ref_type_2));
EXPECT_TRUE(ref_type_2.Equals(ref_type_3));
EXPECT_TRUE(ref_type.IsPreciseReference());
// Create an uninitialized type out of this:
- RegType& ref_type_unintialized = cache.Uninitialized(ref_type, 0110ull);
+ const RegType& ref_type_unintialized = cache.Uninitialized(ref_type, 0110ull);
EXPECT_TRUE(ref_type_unintialized.IsUninitializedReference());
EXPECT_FALSE(ref_type_unintialized.IsUnresolvedAndUninitializedReference());
}
@@ -442,9 +442,9 @@
// The JavaLangObject method instead of FromDescriptor. Object Class in not final.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- RegType& ref_type = cache.JavaLangObject(true);
- RegType& ref_type_2 = cache.JavaLangObject(true);
- RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
+ const RegType& ref_type = cache.JavaLangObject(true);
+ const RegType& ref_type_2 = cache.JavaLangObject(true);
+ const RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
EXPECT_TRUE(ref_type.Equals(ref_type_2));
EXPECT_TRUE(ref_type_3.Equals(ref_type_2));
@@ -455,19 +455,20 @@
// String and object , LUB is object.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache_new(true);
- RegType& string = cache_new.JavaLangString();
- RegType& Object = cache_new.JavaLangObject(true);
+ const RegType& string = cache_new.JavaLangString();
+ const RegType& Object = cache_new.JavaLangObject(true);
EXPECT_TRUE(string.Merge(Object, &cache_new).IsJavaLangObject());
// Merge two unresolved types.
- RegType& ref_type_0 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ const RegType& ref_type_0 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
- RegType& ref_type_1 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExistToo;", true);
+ const RegType& ref_type_1 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExistToo;", true);
EXPECT_FALSE(ref_type_0.Equals(ref_type_1));
- RegType& merged = ref_type_1.Merge(ref_type_0, &cache_new);
+ const RegType& merged = ref_type_1.Merge(ref_type_0, &cache_new);
EXPECT_TRUE(merged.IsUnresolvedMergedReference());
+ RegType& merged_nonconst = const_cast<RegType&>(merged);
- std::set<uint16_t> merged_ids = (down_cast<UnresolvedMergedType*>(&merged))->GetMergedTypes();
+ std::set<uint16_t> merged_ids = (down_cast<UnresolvedMergedType*>(&merged_nonconst))->GetMergedTypes();
EXPECT_EQ(ref_type_0.GetId(), *(merged_ids.begin()));
EXPECT_EQ(ref_type_1.GetId(), *((++merged_ids.begin())));
}
@@ -478,27 +479,27 @@
RegTypeCache cache_new(true);
constexpr int32_t kTestConstantValue = 10;
- RegType& float_type = cache_new.Float();
- RegType& precise_cst = cache_new.FromCat1Const(kTestConstantValue, true);
- RegType& imprecise_cst = cache_new.FromCat1Const(kTestConstantValue, false);
+ const RegType& float_type = cache_new.Float();
+ const RegType& precise_cst = cache_new.FromCat1Const(kTestConstantValue, true);
+ const RegType& imprecise_cst = cache_new.FromCat1Const(kTestConstantValue, false);
{
// float MERGE precise cst => float.
- RegType& merged = float_type.Merge(precise_cst, &cache_new);
+ const RegType& merged = float_type.Merge(precise_cst, &cache_new);
EXPECT_TRUE(merged.IsFloat());
}
{
// precise cst MERGE float => float.
- RegType& merged = precise_cst.Merge(float_type, &cache_new);
+ const RegType& merged = precise_cst.Merge(float_type, &cache_new);
EXPECT_TRUE(merged.IsFloat());
}
{
// float MERGE imprecise cst => float.
- RegType& merged = float_type.Merge(imprecise_cst, &cache_new);
+ const RegType& merged = float_type.Merge(imprecise_cst, &cache_new);
EXPECT_TRUE(merged.IsFloat());
}
{
// imprecise cst MERGE float => float.
- RegType& merged = imprecise_cst.Merge(float_type, &cache_new);
+ const RegType& merged = imprecise_cst.Merge(float_type, &cache_new);
EXPECT_TRUE(merged.IsFloat());
}
}
@@ -509,50 +510,50 @@
RegTypeCache cache_new(true);
constexpr int32_t kTestConstantValue = 10;
- RegType& long_lo_type = cache_new.LongLo();
- RegType& long_hi_type = cache_new.LongHi();
- RegType& precise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, true);
- RegType& imprecise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, false);
- RegType& precise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, true);
- RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
+ const RegType& long_lo_type = cache_new.LongLo();
+ const RegType& long_hi_type = cache_new.LongHi();
+ const RegType& precise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, true);
+ const RegType& imprecise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, false);
+ const RegType& precise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, true);
+ const RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
{
// lo MERGE precise cst lo => lo.
- RegType& merged = long_lo_type.Merge(precise_cst_lo, &cache_new);
+ const RegType& merged = long_lo_type.Merge(precise_cst_lo, &cache_new);
EXPECT_TRUE(merged.IsLongLo());
}
{
// precise cst lo MERGE lo => lo.
- RegType& merged = precise_cst_lo.Merge(long_lo_type, &cache_new);
+ const RegType& merged = precise_cst_lo.Merge(long_lo_type, &cache_new);
EXPECT_TRUE(merged.IsLongLo());
}
{
// lo MERGE imprecise cst lo => lo.
- RegType& merged = long_lo_type.Merge(imprecise_cst_lo, &cache_new);
+ const RegType& merged = long_lo_type.Merge(imprecise_cst_lo, &cache_new);
EXPECT_TRUE(merged.IsLongLo());
}
{
// imprecise cst lo MERGE lo => lo.
- RegType& merged = imprecise_cst_lo.Merge(long_lo_type, &cache_new);
+ const RegType& merged = imprecise_cst_lo.Merge(long_lo_type, &cache_new);
EXPECT_TRUE(merged.IsLongLo());
}
{
// hi MERGE precise cst hi => hi.
- RegType& merged = long_hi_type.Merge(precise_cst_hi, &cache_new);
+ const RegType& merged = long_hi_type.Merge(precise_cst_hi, &cache_new);
EXPECT_TRUE(merged.IsLongHi());
}
{
// precise cst hi MERGE hi => hi.
- RegType& merged = precise_cst_hi.Merge(long_hi_type, &cache_new);
+ const RegType& merged = precise_cst_hi.Merge(long_hi_type, &cache_new);
EXPECT_TRUE(merged.IsLongHi());
}
{
// hi MERGE imprecise cst hi => hi.
- RegType& merged = long_hi_type.Merge(imprecise_cst_hi, &cache_new);
+ const RegType& merged = long_hi_type.Merge(imprecise_cst_hi, &cache_new);
EXPECT_TRUE(merged.IsLongHi());
}
{
// imprecise cst hi MERGE hi => hi.
- RegType& merged = imprecise_cst_hi.Merge(long_hi_type, &cache_new);
+ const RegType& merged = imprecise_cst_hi.Merge(long_hi_type, &cache_new);
EXPECT_TRUE(merged.IsLongHi());
}
}
@@ -563,50 +564,50 @@
RegTypeCache cache_new(true);
constexpr int32_t kTestConstantValue = 10;
- RegType& double_lo_type = cache_new.DoubleLo();
- RegType& double_hi_type = cache_new.DoubleHi();
- RegType& precise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, true);
- RegType& imprecise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, false);
- RegType& precise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, true);
- RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
+ const RegType& double_lo_type = cache_new.DoubleLo();
+ const RegType& double_hi_type = cache_new.DoubleHi();
+ const RegType& precise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, true);
+ const RegType& imprecise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, false);
+ const RegType& precise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, true);
+ const RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
{
// lo MERGE precise cst lo => lo.
- RegType& merged = double_lo_type.Merge(precise_cst_lo, &cache_new);
+ const RegType& merged = double_lo_type.Merge(precise_cst_lo, &cache_new);
EXPECT_TRUE(merged.IsDoubleLo());
}
{
// precise cst lo MERGE lo => lo.
- RegType& merged = precise_cst_lo.Merge(double_lo_type, &cache_new);
+ const RegType& merged = precise_cst_lo.Merge(double_lo_type, &cache_new);
EXPECT_TRUE(merged.IsDoubleLo());
}
{
// lo MERGE imprecise cst lo => lo.
- RegType& merged = double_lo_type.Merge(imprecise_cst_lo, &cache_new);
+ const RegType& merged = double_lo_type.Merge(imprecise_cst_lo, &cache_new);
EXPECT_TRUE(merged.IsDoubleLo());
}
{
// imprecise cst lo MERGE lo => lo.
- RegType& merged = imprecise_cst_lo.Merge(double_lo_type, &cache_new);
+ const RegType& merged = imprecise_cst_lo.Merge(double_lo_type, &cache_new);
EXPECT_TRUE(merged.IsDoubleLo());
}
{
// hi MERGE precise cst hi => hi.
- RegType& merged = double_hi_type.Merge(precise_cst_hi, &cache_new);
+ const RegType& merged = double_hi_type.Merge(precise_cst_hi, &cache_new);
EXPECT_TRUE(merged.IsDoubleHi());
}
{
// precise cst hi MERGE hi => hi.
- RegType& merged = precise_cst_hi.Merge(double_hi_type, &cache_new);
+ const RegType& merged = precise_cst_hi.Merge(double_hi_type, &cache_new);
EXPECT_TRUE(merged.IsDoubleHi());
}
{
// hi MERGE imprecise cst hi => hi.
- RegType& merged = double_hi_type.Merge(imprecise_cst_hi, &cache_new);
+ const RegType& merged = double_hi_type.Merge(imprecise_cst_hi, &cache_new);
EXPECT_TRUE(merged.IsDoubleHi());
}
{
// imprecise cst hi MERGE hi => hi.
- RegType& merged = imprecise_cst_hi.Merge(double_hi_type, &cache_new);
+ const RegType& merged = imprecise_cst_hi.Merge(double_hi_type, &cache_new);
EXPECT_TRUE(merged.IsDoubleHi());
}
}
@@ -615,8 +616,8 @@
// Tests creating primitive types types.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache_new(true);
- RegType& imprecise_const = cache_new.FromCat1Const(10, false);
- RegType& precise_const = cache_new.FromCat1Const(10, true);
+ const RegType& imprecise_const = cache_new.FromCat1Const(10, false);
+ const RegType& precise_const = cache_new.FromCat1Const(10, true);
EXPECT_TRUE(imprecise_const.IsImpreciseConstant());
EXPECT_TRUE(precise_const.IsPreciseConstant());
diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h
index 378c6d3..0989cd0 100644
--- a/runtime/verifier/register_line-inl.h
+++ b/runtime/verifier/register_line-inl.h
@@ -25,7 +25,7 @@
namespace art {
namespace verifier {
-inline RegType& RegisterLine::GetRegisterType(uint32_t vsrc) const {
+inline const RegType& RegisterLine::GetRegisterType(uint32_t vsrc) const {
// The register index was validated during the static pass, so we don't need to check it here.
DCHECK_LT(vsrc, num_regs_);
return verifier_->GetRegTypeCache()->GetFromId(line_[vsrc]);
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 4d67cfb..556056c 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -36,7 +36,7 @@
return true;
}
-bool RegisterLine::SetRegisterType(uint32_t vdst, RegType& new_type) {
+bool RegisterLine::SetRegisterType(uint32_t vdst, const RegType& new_type) {
DCHECK_LT(vdst, num_regs_);
if (new_type.IsLowHalf() || new_type.IsHighHalf()) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Expected category1 register type not '"
@@ -53,8 +53,8 @@
return true;
}
-bool RegisterLine::SetRegisterTypeWide(uint32_t vdst, RegType& new_type1,
- RegType& new_type2) {
+bool RegisterLine::SetRegisterTypeWide(uint32_t vdst, const RegType& new_type1,
+ const RegType& new_type2) {
DCHECK_LT(vdst + 1, num_regs_);
if (!new_type1.CheckWidePair(new_type2)) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "Invalid wide pair '"
@@ -75,21 +75,21 @@
result_[1] = result_[0];
}
-void RegisterLine::SetResultRegisterType(RegType& new_type) {
+void RegisterLine::SetResultRegisterType(const RegType& new_type) {
DCHECK(!new_type.IsLowHalf());
DCHECK(!new_type.IsHighHalf());
result_[0] = new_type.GetId();
result_[1] = verifier_->GetRegTypeCache()->Undefined().GetId();
}
-void RegisterLine::SetResultRegisterTypeWide(RegType& new_type1,
- RegType& new_type2) {
+void RegisterLine::SetResultRegisterTypeWide(const RegType& new_type1,
+ const RegType& new_type2) {
DCHECK(new_type1.CheckWidePair(new_type2));
result_[0] = new_type1.GetId();
result_[1] = new_type2.GetId();
}
-RegType& RegisterLine::GetInvocationThis(const Instruction* inst, bool is_range) {
+const RegType& RegisterLine::GetInvocationThis(const Instruction* inst, bool is_range) {
const size_t args_count = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
if (args_count < 1) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invoke lacks 'this'";
@@ -97,7 +97,7 @@
}
/* Get the element type of the array held in vsrc */
const uint32_t this_reg = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
- RegType& this_type = GetRegisterType(this_reg);
+ const RegType& this_type = GetRegisterType(this_reg);
if (!this_type.IsReferenceTypes()) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "tried to get class from non-reference register v"
<< this_reg << " (type=" << this_type << ")";
@@ -107,9 +107,9 @@
}
bool RegisterLine::VerifyRegisterType(uint32_t vsrc,
- RegType& check_type) {
+ const RegType& check_type) {
// Verify the src register type against the check type refining the type of the register
- RegType& src_type = GetRegisterType(vsrc);
+ const RegType& src_type = GetRegisterType(vsrc);
if (!(check_type.IsAssignableFrom(src_type))) {
enum VerifyError fail_type;
if (!check_type.IsNonZeroReferenceTypes() || !src_type.IsNonZeroReferenceTypes()) {
@@ -125,7 +125,7 @@
return false;
}
if (check_type.IsLowHalf()) {
- RegType& src_type_h = GetRegisterType(vsrc + 1);
+ const RegType& src_type_h = GetRegisterType(vsrc + 1);
if (!src_type.CheckWidePair(src_type_h)) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register v" << vsrc << " has type "
<< src_type << "/" << src_type_h;
@@ -139,17 +139,17 @@
return true;
}
-bool RegisterLine::VerifyRegisterTypeWide(uint32_t vsrc, RegType& check_type1,
- RegType& check_type2) {
+bool RegisterLine::VerifyRegisterTypeWide(uint32_t vsrc, const RegType& check_type1,
+ const RegType& check_type2) {
DCHECK(check_type1.CheckWidePair(check_type2));
// Verify the src register type against the check type refining the type of the register
- RegType& src_type = GetRegisterType(vsrc);
+ const RegType& src_type = GetRegisterType(vsrc);
if (!check_type1.IsAssignableFrom(src_type)) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register v" << vsrc << " has type " << src_type
<< " but expected " << check_type1;
return false;
}
- RegType& src_type_h = GetRegisterType(vsrc + 1);
+ const RegType& src_type_h = GetRegisterType(vsrc + 1);
if (!src_type.CheckWidePair(src_type_h)) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register v" << vsrc << " has type "
<< src_type << "/" << src_type_h;
@@ -162,9 +162,9 @@
return true;
}
-void RegisterLine::MarkRefsAsInitialized(RegType& uninit_type) {
+void RegisterLine::MarkRefsAsInitialized(const RegType& uninit_type) {
DCHECK(uninit_type.IsUninitializedTypes());
- RegType& init_type = verifier_->GetRegTypeCache()->FromUninitialized(uninit_type);
+ const RegType& init_type = verifier_->GetRegTypeCache()->FromUninitialized(uninit_type);
size_t changed = 0;
for (uint32_t i = 0; i < num_regs_; i++) {
if (GetRegisterType(i).Equals(uninit_type)) {
@@ -200,7 +200,7 @@
}
}
-std::string RegisterLine::Dump() {
+std::string RegisterLine::Dump() const {
std::string result;
for (size_t i = 0; i < num_regs_; i++) {
result += StringPrintf("%zd:[", i);
@@ -213,7 +213,7 @@
return result;
}
-void RegisterLine::MarkUninitRefsAsInvalid(RegType& uninit_type) {
+void RegisterLine::MarkUninitRefsAsInvalid(const RegType& uninit_type) {
for (size_t i = 0; i < num_regs_; i++) {
if (GetRegisterType(i).Equals(uninit_type)) {
line_[i] = verifier_->GetRegTypeCache()->Conflict().GetId();
@@ -224,7 +224,7 @@
void RegisterLine::CopyRegister1(uint32_t vdst, uint32_t vsrc, TypeCategory cat) {
DCHECK(cat == kTypeCategory1nr || cat == kTypeCategoryRef);
- RegType& type = GetRegisterType(vsrc);
+ const RegType& type = GetRegisterType(vsrc);
if (!SetRegisterType(vdst, type)) {
return;
}
@@ -238,8 +238,8 @@
}
void RegisterLine::CopyRegister2(uint32_t vdst, uint32_t vsrc) {
- RegType& type_l = GetRegisterType(vsrc);
- RegType& type_h = GetRegisterType(vsrc + 1);
+ const RegType& type_l = GetRegisterType(vsrc);
+ const RegType& type_h = GetRegisterType(vsrc + 1);
if (!type_l.CheckWidePair(type_h)) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "copy2 v" << vdst << "<-v" << vsrc
@@ -250,7 +250,7 @@
}
void RegisterLine::CopyResultRegister1(uint32_t vdst, bool is_reference) {
- RegType& type = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
+ const RegType& type = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
if ((!is_reference && !type.IsCategory1Types()) ||
(is_reference && !type.IsReferenceTypes())) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD)
@@ -267,8 +267,8 @@
* register to another register, and reset the result register.
*/
void RegisterLine::CopyResultRegister2(uint32_t vdst) {
- RegType& type_l = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
- RegType& type_h = verifier_->GetRegTypeCache()->GetFromId(result_[1]);
+ const RegType& type_l = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
+ const RegType& type_h = verifier_->GetRegTypeCache()->GetFromId(result_[1]);
if (!type_l.IsCategory2Types()) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD)
<< "copyRes2 v" << vdst << "<- result0" << " type=" << type_l;
@@ -281,40 +281,40 @@
}
void RegisterLine::CheckUnaryOp(const Instruction* inst,
- RegType& dst_type,
- RegType& src_type) {
+ const RegType& dst_type,
+ const RegType& src_type) {
if (VerifyRegisterType(inst->VRegB_12x(), src_type)) {
SetRegisterType(inst->VRegA_12x(), dst_type);
}
}
void RegisterLine::CheckUnaryOpWide(const Instruction* inst,
- RegType& dst_type1, RegType& dst_type2,
- RegType& src_type1, RegType& src_type2) {
+ const RegType& dst_type1, const RegType& dst_type2,
+ const RegType& src_type1, const RegType& src_type2) {
if (VerifyRegisterTypeWide(inst->VRegB_12x(), src_type1, src_type2)) {
SetRegisterTypeWide(inst->VRegA_12x(), dst_type1, dst_type2);
}
}
void RegisterLine::CheckUnaryOpToWide(const Instruction* inst,
- RegType& dst_type1, RegType& dst_type2,
- RegType& src_type) {
+ const RegType& dst_type1, const RegType& dst_type2,
+ const RegType& src_type) {
if (VerifyRegisterType(inst->VRegB_12x(), src_type)) {
SetRegisterTypeWide(inst->VRegA_12x(), dst_type1, dst_type2);
}
}
void RegisterLine::CheckUnaryOpFromWide(const Instruction* inst,
- RegType& dst_type,
- RegType& src_type1, RegType& src_type2) {
+ const RegType& dst_type,
+ const RegType& src_type1, const RegType& src_type2) {
if (VerifyRegisterTypeWide(inst->VRegB_12x(), src_type1, src_type2)) {
SetRegisterType(inst->VRegA_12x(), dst_type);
}
}
void RegisterLine::CheckBinaryOp(const Instruction* inst,
- RegType& dst_type,
- RegType& src_type1, RegType& src_type2,
+ const RegType& dst_type,
+ const RegType& src_type1, const RegType& src_type2,
bool check_boolean_op) {
const uint32_t vregB = inst->VRegB_23x();
const uint32_t vregC = inst->VRegC_23x();
@@ -333,9 +333,9 @@
}
void RegisterLine::CheckBinaryOpWide(const Instruction* inst,
- RegType& dst_type1, RegType& dst_type2,
- RegType& src_type1_1, RegType& src_type1_2,
- RegType& src_type2_1, RegType& src_type2_2) {
+ const RegType& dst_type1, const RegType& dst_type2,
+ const RegType& src_type1_1, const RegType& src_type1_2,
+ const RegType& src_type2_1, const RegType& src_type2_2) {
if (VerifyRegisterTypeWide(inst->VRegB_23x(), src_type1_1, src_type1_2) &&
VerifyRegisterTypeWide(inst->VRegC_23x(), src_type2_1, src_type2_2)) {
SetRegisterTypeWide(inst->VRegA_23x(), dst_type1, dst_type2);
@@ -343,8 +343,8 @@
}
void RegisterLine::CheckBinaryOpWideShift(const Instruction* inst,
- RegType& long_lo_type, RegType& long_hi_type,
- RegType& int_type) {
+ const RegType& long_lo_type, const RegType& long_hi_type,
+ const RegType& int_type) {
if (VerifyRegisterTypeWide(inst->VRegB_23x(), long_lo_type, long_hi_type) &&
VerifyRegisterType(inst->VRegC_23x(), int_type)) {
SetRegisterTypeWide(inst->VRegA_23x(), long_lo_type, long_hi_type);
@@ -352,8 +352,8 @@
}
void RegisterLine::CheckBinaryOp2addr(const Instruction* inst,
- RegType& dst_type, RegType& src_type1,
- RegType& src_type2, bool check_boolean_op) {
+ const RegType& dst_type, const RegType& src_type1,
+ const RegType& src_type2, bool check_boolean_op) {
const uint32_t vregA = inst->VRegA_12x();
const uint32_t vregB = inst->VRegB_12x();
if (VerifyRegisterType(vregA, src_type1) &&
@@ -371,9 +371,9 @@
}
void RegisterLine::CheckBinaryOp2addrWide(const Instruction* inst,
- RegType& dst_type1, RegType& dst_type2,
- RegType& src_type1_1, RegType& src_type1_2,
- RegType& src_type2_1, RegType& src_type2_2) {
+ const RegType& dst_type1, const RegType& dst_type2,
+ const RegType& src_type1_1, const RegType& src_type1_2,
+ const RegType& src_type2_1, const RegType& src_type2_2) {
const uint32_t vregA = inst->VRegA_12x();
const uint32_t vregB = inst->VRegB_12x();
if (VerifyRegisterTypeWide(vregA, src_type1_1, src_type1_2) &&
@@ -383,8 +383,8 @@
}
void RegisterLine::CheckBinaryOp2addrWideShift(const Instruction* inst,
- RegType& long_lo_type, RegType& long_hi_type,
- RegType& int_type) {
+ const RegType& long_lo_type, const RegType& long_hi_type,
+ const RegType& int_type) {
const uint32_t vregA = inst->VRegA_12x();
const uint32_t vregB = inst->VRegB_12x();
if (VerifyRegisterTypeWide(vregA, long_lo_type, long_hi_type) &&
@@ -394,7 +394,7 @@
}
void RegisterLine::CheckLiteralOp(const Instruction* inst,
- RegType& dst_type, RegType& src_type,
+ const RegType& dst_type, const RegType& src_type,
bool check_boolean_op, bool is_lit16) {
const uint32_t vregA = is_lit16 ? inst->VRegA_22s() : inst->VRegA_22b();
const uint32_t vregB = is_lit16 ? inst->VRegB_22s() : inst->VRegB_22b();
@@ -413,7 +413,7 @@
}
void RegisterLine::PushMonitor(uint32_t reg_idx, int32_t insn_idx) {
- RegType& reg_type = GetRegisterType(reg_idx);
+ const RegType& reg_type = GetRegisterType(reg_idx);
if (!reg_type.IsReferenceTypes()) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-enter on non-object (" << reg_type << ")";
} else if (monitors_.size() >= 32) {
@@ -425,7 +425,7 @@
}
void RegisterLine::PopMonitor(uint32_t reg_idx) {
- RegType& reg_type = GetRegisterType(reg_idx);
+ const RegType& reg_type = GetRegisterType(reg_idx);
if (!reg_type.IsReferenceTypes()) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-exit on non-object (" << reg_type << ")";
} else if (monitors_.empty()) {
@@ -460,9 +460,9 @@
DCHECK(incoming_line != nullptr);
for (size_t idx = 0; idx < num_regs_; idx++) {
if (line_[idx] != incoming_line->line_[idx]) {
- RegType& incoming_reg_type = incoming_line->GetRegisterType(idx);
- RegType& cur_type = GetRegisterType(idx);
- RegType& new_type = cur_type.Merge(incoming_reg_type, verifier_->GetRegTypeCache());
+ const RegType& incoming_reg_type = incoming_line->GetRegisterType(idx);
+ const RegType& cur_type = GetRegisterType(idx);
+ const RegType& new_type = cur_type.Merge(incoming_reg_type, verifier_->GetRegTypeCache());
changed = changed || !cur_type.Equals(new_type);
line_[idx] = new_type.GetId();
}
@@ -508,8 +508,7 @@
std::ostream& operator<<(std::ostream& os, const RegisterLine& rhs)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- RegisterLine& rhs_non_const = const_cast<RegisterLine&>(rhs);
- os << rhs_non_const.Dump();
+ os << rhs.Dump();
return os;
}
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index 06b7cca..a9d0dbb 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -81,26 +81,26 @@
// Set the type of register N, verifying that the register is valid. If "newType" is the "Lo"
// part of a 64-bit value, register N+1 will be set to "newType+1".
// The register index was validated during the static pass, so we don't need to check it here.
- bool SetRegisterType(uint32_t vdst, RegType& new_type)
+ bool SetRegisterType(uint32_t vdst, const RegType& new_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool SetRegisterTypeWide(uint32_t vdst, RegType& new_type1, RegType& new_type2)
+ bool SetRegisterTypeWide(uint32_t vdst, const RegType& new_type1, const RegType& new_type2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/* Set the type of the "result" register. */
- void SetResultRegisterType(RegType& new_type)
+ void SetResultRegisterType(const RegType& new_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetResultRegisterTypeWide(RegType& new_type1, RegType& new_type2)
+ void SetResultRegisterTypeWide(const RegType& new_type1, const RegType& new_type2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get the type of register vsrc.
- RegType& GetRegisterType(uint32_t vsrc) const;
+ const RegType& GetRegisterType(uint32_t vsrc) const;
- bool VerifyRegisterType(uint32_t vsrc, RegType& check_type)
+ bool VerifyRegisterType(uint32_t vsrc, const RegType& check_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool VerifyRegisterTypeWide(uint32_t vsrc, RegType& check_type1, RegType& check_type2)
+ bool VerifyRegisterTypeWide(uint32_t vsrc, const RegType& check_type1, const RegType& check_type2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CopyFromLine(const RegisterLine* src) {
@@ -110,7 +110,7 @@
reg_to_lock_depths_ = src->reg_to_lock_depths_;
}
- std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FillWithGarbage() {
memset(&line_, 0xf1, num_regs_ * sizeof(uint16_t));
@@ -126,7 +126,7 @@
* to prevent them from being used (otherwise, MarkRefsAsInitialized would mark the old ones and
* the new ones at the same time).
*/
- void MarkUninitRefsAsInvalid(RegType& uninit_type)
+ void MarkUninitRefsAsInvalid(const RegType& uninit_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -134,7 +134,7 @@
* reference type. This is called when an appropriate constructor is invoked -- all copies of
* the reference must be marked as initialized.
*/
- void MarkRefsAsInitialized(RegType& uninit_type)
+ void MarkRefsAsInitialized(const RegType& uninit_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -173,30 +173,30 @@
* The argument count is in vA, and the first argument is in vC, for both "simple" and "range"
* versions. We just need to make sure vA is >= 1 and then return vC.
*/
- RegType& GetInvocationThis(const Instruction* inst, bool is_range)
+ const RegType& GetInvocationThis(const Instruction* inst, bool is_range)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
* Verify types for a simple two-register instruction (e.g. "neg-int").
* "dst_type" is stored into vA, and "src_type" is verified against vB.
*/
- void CheckUnaryOp(const Instruction* inst, RegType& dst_type,
- RegType& src_type)
+ void CheckUnaryOp(const Instruction* inst, const RegType& dst_type,
+ const RegType& src_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckUnaryOpWide(const Instruction* inst,
- RegType& dst_type1, RegType& dst_type2,
- RegType& src_type1, RegType& src_type2)
+ const RegType& dst_type1, const RegType& dst_type2,
+ const RegType& src_type1, const RegType& src_type2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckUnaryOpToWide(const Instruction* inst,
- RegType& dst_type1, RegType& dst_type2,
- RegType& src_type)
+ const RegType& dst_type1, const RegType& dst_type2,
+ const RegType& src_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckUnaryOpFromWide(const Instruction* inst,
- RegType& dst_type,
- RegType& src_type1, RegType& src_type2)
+ const RegType& dst_type,
+ const RegType& src_type1, const RegType& src_type2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -205,19 +205,19 @@
* against vB/vC.
*/
void CheckBinaryOp(const Instruction* inst,
- RegType& dst_type, RegType& src_type1, RegType& src_type2,
+ const RegType& dst_type, const RegType& src_type1, const RegType& src_type2,
bool check_boolean_op)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckBinaryOpWide(const Instruction* inst,
- RegType& dst_type1, RegType& dst_type2,
- RegType& src_type1_1, RegType& src_type1_2,
- RegType& src_type2_1, RegType& src_type2_2)
+ const RegType& dst_type1, const RegType& dst_type2,
+ const RegType& src_type1_1, const RegType& src_type1_2,
+ const RegType& src_type2_1, const RegType& src_type2_2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckBinaryOpWideShift(const Instruction* inst,
- RegType& long_lo_type, RegType& long_hi_type,
- RegType& int_type)
+ const RegType& long_lo_type, const RegType& long_hi_type,
+ const RegType& int_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -225,20 +225,20 @@
* are verified against vA/vB, then "dst_type" is stored into vA.
*/
void CheckBinaryOp2addr(const Instruction* inst,
- RegType& dst_type,
- RegType& src_type1, RegType& src_type2,
+ const RegType& dst_type,
+ const RegType& src_type1, const RegType& src_type2,
bool check_boolean_op)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckBinaryOp2addrWide(const Instruction* inst,
- RegType& dst_type1, RegType& dst_type2,
- RegType& src_type1_1, RegType& src_type1_2,
- RegType& src_type2_1, RegType& src_type2_2)
+ const RegType& dst_type1, const RegType& dst_type2,
+ const RegType& src_type1_1, const RegType& src_type1_2,
+ const RegType& src_type2_1, const RegType& src_type2_2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckBinaryOp2addrWideShift(const Instruction* inst,
- RegType& long_lo_type, RegType& long_hi_type,
- RegType& int_type)
+ const RegType& long_lo_type, const RegType& long_hi_type,
+ const RegType& int_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -248,7 +248,7 @@
* If "check_boolean_op" is set, we use the constant value in vC.
*/
void CheckLiteralOp(const Instruction* inst,
- RegType& dst_type, RegType& src_type,
+ const RegType& dst_type, const RegType& src_type,
bool check_boolean_op, bool is_lit16)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/test/116-nodex2oat/nodex2oat.cc b/test/116-nodex2oat/nodex2oat.cc
index 4326db0..04cac45 100644
--- a/test/116-nodex2oat/nodex2oat.cc
+++ b/test/116-nodex2oat/nodex2oat.cc
@@ -28,9 +28,9 @@
ScopedObjectAccess soa(Thread::Current());
mirror::Class* klass = soa.Decode<mirror::Class*>(cls);
const DexFile& dex_file = klass->GetDexFile();
- const OatFile* oat_file =
- Runtime::Current()->GetClassLinker()->FindOpenedOatFileForDexFile(dex_file);
- return oat_file != nullptr;
+ const OatFile::OatDexFile* oat_dex_file =
+ Runtime::Current()->GetClassLinker()->FindOpenedOatDexFileForDexFile(dex_file);
+ return oat_dex_file != nullptr;
}
};
diff --git a/test/117-nopatchoat/nopatchoat.cc b/test/117-nopatchoat/nopatchoat.cc
index ced7f6e..5994653 100644
--- a/test/117-nopatchoat/nopatchoat.cc
+++ b/test/117-nopatchoat/nopatchoat.cc
@@ -28,9 +28,9 @@
ScopedObjectAccess soa(Thread::Current());
mirror::Class* klass = soa.Decode<mirror::Class*>(cls);
const DexFile& dex_file = klass->GetDexFile();
- const OatFile* oat_file =
- Runtime::Current()->GetClassLinker()->FindOpenedOatFileForDexFile(dex_file);
- return oat_file != nullptr && oat_file->IsExecutable();
+ const OatFile::OatDexFile* oat_dex_file =
+ Runtime::Current()->GetClassLinker()->FindOpenedOatDexFileForDexFile(dex_file);
+ return oat_dex_file != nullptr && oat_dex_file->GetOatFile()->IsExecutable();
}
};
diff --git a/test/118-noimage-dex2oat/noimage-dex2oat.cc b/test/118-noimage-dex2oat/noimage-dex2oat.cc
index 4a3d33c..7340d9e 100644
--- a/test/118-noimage-dex2oat/noimage-dex2oat.cc
+++ b/test/118-noimage-dex2oat/noimage-dex2oat.cc
@@ -28,9 +28,9 @@
ScopedObjectAccess soa(Thread::Current());
mirror::Class* klass = soa.Decode<mirror::Class*>(cls);
const DexFile& dex_file = klass->GetDexFile();
- const OatFile* oat_file =
- Runtime::Current()->GetClassLinker()->FindOpenedOatFileForDexFile(dex_file);
- return oat_file != nullptr;
+ const OatFile::OatDexFile* oat_dex_file =
+ Runtime::Current()->GetClassLinker()->FindOpenedOatDexFileForDexFile(dex_file);
+ return oat_dex_file != nullptr;
}
};