Merge "MIPS64: Code cleanup."
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index a443487..b507124 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -295,18 +295,22 @@
art_cflags += -DIMT_SIZE=64
endif
-ifeq ($(ART_USE_OPTIMIZING_COMPILER),true)
- art_cflags += -DART_USE_OPTIMIZING_COMPILER=1
-endif
-
ifeq ($(ART_HEAP_POISONING),true)
art_cflags += -DART_HEAP_POISONING=1
art_asflags += -DART_HEAP_POISONING=1
endif
+#
+# Used to change the read barrier type. Valid values are BAKER, BROOKS, TABLELOOKUP.
+# The default is BAKER.
+#
+ART_READ_BARRIER_TYPE ?= BAKER
+
ifeq ($(ART_USE_READ_BARRIER),true)
art_cflags += -DART_USE_READ_BARRIER=1
+ art_cflags += -DART_READ_BARRIER_TYPE_IS_$(ART_READ_BARRIER_TYPE)=1
art_asflags += -DART_USE_READ_BARRIER=1
+ art_asflags += -DART_READ_BARRIER_TYPE_IS_$(ART_READ_BARRIER_TYPE)=1
endif
ifeq ($(ART_USE_TLAB),true)
@@ -396,7 +400,6 @@
art_non_debug_cflags :=
art_host_non_debug_cflags :=
art_target_non_debug_cflags :=
-art_default_gc_type :=
art_default_gc_type_cflags :=
ART_HOST_LDLIBS :=
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index 6e17ed3..3272c27 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -52,18 +52,12 @@
core_pic_infix :=
core_dex2oat_dependency := $(DEX2OAT_DEPENDENCY)
- # With the optimizing compiler, we want to rerun dex2oat whenever there is
- # a dex2oat change to catch regressions early.
- ifeq ($(ART_USE_OPTIMIZING_COMPILER), true)
- core_dex2oat_dependency := $(DEX2OAT)
- endif
-
ifeq ($(1),default)
core_compile_options += --compiler-backend=Quick
endif
ifeq ($(1),optimizing)
core_compile_options += --compiler-backend=Optimizing
- core_dex2oat_dependency := $(DEX2OAT)
+ core_dex2oat_dependency += $(DEX2OAT)
core_infix := -optimizing
endif
ifeq ($(1),interpreter)
@@ -178,17 +172,13 @@
core_pic_infix :=
core_dex2oat_dependency := $(DEX2OAT_DEPENDENCY)
- # With the optimizing compiler, we want to rerun dex2oat whenever there is
- # a dex2oat change to catch regressions early.
- ifeq ($(ART_USE_OPTIMIZING_COMPILER), true)
- core_dex2oat_dependency := $(DEX2OAT)
- endif
-
ifeq ($(1),default)
core_compile_options += --compiler-backend=Quick
endif
ifeq ($(1),optimizing)
core_compile_options += --compiler-backend=Optimizing
+ # With the optimizing compiler, we want to rerun dex2oat whenever there is
+ # a dex2oat change to catch regressions early.
core_dex2oat_dependency := $(DEX2OAT)
core_infix := -optimizing
endif
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index dc2bc5c..67b4428 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -92,7 +92,7 @@
void UnreserveImageSpace();
- Compiler::Kind compiler_kind_ = kUseOptimizingCompiler ? Compiler::kOptimizing : Compiler::kQuick;
+ Compiler::Kind compiler_kind_ = Compiler::kOptimizing;
std::unique_ptr<CompilerOptions> compiler_options_;
std::unique_ptr<VerificationResults> verification_results_;
std::unique_ptr<DexFileToMethodInlinerMap> method_inliner_map_;
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 1f114cf..3c5c2fe 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -148,7 +148,7 @@
if (arg1.wide == 0) {
LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
} else {
- RegStorage r_tmp = TargetReg(cu_->instruction_set == kMips ? kArg2 : kArg1, kWide);
+ RegStorage r_tmp = TargetReg(kArg2, kWide);
LoadValueDirectWideFixed(arg1, r_tmp);
}
LoadConstant(TargetReg(kArg0, kNotWide), arg0);
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 939bf40..6ed666b 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -535,37 +535,76 @@
if (block->visited || block->hidden) {
return;
}
- block->visited = true;
- /* Process this block */
- DoSSAConversion(block);
+ typedef struct {
+ BasicBlock* bb;
+ int32_t* ssa_map;
+ } BasicBlockInfo;
+ BasicBlockInfo temp;
- /* Save SSA map snapshot */
ScopedArenaAllocator allocator(&cu_->arena_stack);
- uint32_t num_vregs = GetNumOfCodeAndTempVRs();
- int32_t* saved_ssa_map = allocator.AllocArray<int32_t>(num_vregs, kArenaAllocDalvikToSSAMap);
- size_t map_size = sizeof(saved_ssa_map[0]) * num_vregs;
- memcpy(saved_ssa_map, vreg_to_ssa_map_, map_size);
+ ScopedArenaVector<BasicBlockInfo> bi_stack(allocator.Adapter());
+ ScopedArenaVector<BasicBlock*> succ_stack(allocator.Adapter());
- if (block->fall_through != NullBasicBlockId) {
- DoDFSPreOrderSSARename(GetBasicBlock(block->fall_through));
- /* Restore SSA map snapshot */
- memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
- }
- if (block->taken != NullBasicBlockId) {
- DoDFSPreOrderSSARename(GetBasicBlock(block->taken));
- /* Restore SSA map snapshot */
- memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
- }
- if (block->successor_block_list_type != kNotUsed) {
- for (SuccessorBlockInfo* successor_block_info : block->successor_blocks) {
- BasicBlock* succ_bb = GetBasicBlock(successor_block_info->block);
- DoDFSPreOrderSSARename(succ_bb);
- /* Restore SSA map snapshot */
- memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
+ uint32_t num_vregs = GetNumOfCodeAndTempVRs();
+ size_t map_size = sizeof(int32_t) * num_vregs;
+ temp.bb = block;
+ temp.ssa_map = vreg_to_ssa_map_;
+ bi_stack.push_back(temp);
+
+ while (!bi_stack.empty()) {
+ temp = bi_stack.back();
+ bi_stack.pop_back();
+ BasicBlock* b = temp.bb;
+
+ if (b->visited || b->hidden) {
+ continue;
+ }
+ b->visited = true;
+
+ /* Restore SSA map snapshot, except for the first block */
+ if (b != block) {
+ memcpy(vreg_to_ssa_map_, temp.ssa_map, map_size);
+ }
+
+ /* Process this block */
+ DoSSAConversion(b);
+
+ /* If there are no successor, taken, and fall through blocks, continue */
+ if (b->successor_block_list_type == kNotUsed &&
+ b->taken == NullBasicBlockId &&
+ b->fall_through == NullBasicBlockId) {
+ continue;
+ }
+
+ /* Save SSA map snapshot */
+ int32_t* saved_ssa_map =
+ allocator.AllocArray<int32_t>(num_vregs, kArenaAllocDalvikToSSAMap);
+ memcpy(saved_ssa_map, vreg_to_ssa_map_, map_size);
+
+ if (b->successor_block_list_type != kNotUsed) {
+ for (SuccessorBlockInfo* successor_block_info : b->successor_blocks) {
+ BasicBlock* succ_bb = GetBasicBlock(successor_block_info->block);
+ succ_stack.push_back(succ_bb);
+ }
+ while (!succ_stack.empty()) {
+ temp.bb = succ_stack.back();
+ succ_stack.pop_back();
+ temp.ssa_map = saved_ssa_map;
+ bi_stack.push_back(temp);
+ }
+ }
+ if (b->taken != NullBasicBlockId) {
+ temp.bb = GetBasicBlock(b->taken);
+ temp.ssa_map = saved_ssa_map;
+ bi_stack.push_back(temp);
+ }
+ if (b->fall_through != NullBasicBlockId) {
+ temp.bb = GetBasicBlock(b->fall_through);
+ temp.ssa_map = saved_ssa_map;
+ bi_stack.push_back(temp);
}
}
- return;
}
} // namespace art
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index cb36f62..ebbfb14 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -1186,6 +1186,12 @@
}
}
+static Primitive::Type GetFieldAccessType(const DexFile& dex_file, uint16_t field_index) {
+ const DexFile::FieldId& field_id = dex_file.GetFieldId(field_index);
+ const char* type = dex_file.GetFieldTypeDescriptor(field_id);
+ return Primitive::GetType(type[0]);
+}
+
bool HGraphBuilder::BuildInstanceFieldAccess(const Instruction& instruction,
uint32_t dex_pc,
bool is_put) {
@@ -1205,44 +1211,61 @@
ArtField* resolved_field =
compiler_driver_->ComputeInstanceFieldInfo(field_index, dex_compilation_unit_, is_put, soa);
- if (resolved_field == nullptr) {
- MaybeRecordStat(MethodCompilationStat::kNotCompiledUnresolvedField);
- return false;
- }
-
- Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType();
HInstruction* object = LoadLocal(obj_reg, Primitive::kPrimNot, dex_pc);
- current_block_->AddInstruction(new (arena_) HNullCheck(object, dex_pc));
+ HInstruction* null_check = new (arena_) HNullCheck(object, dex_pc);
+ current_block_->AddInstruction(null_check);
+
+ Primitive::Type field_type = (resolved_field == nullptr)
+ ? GetFieldAccessType(*dex_file_, field_index)
+ : resolved_field->GetTypeAsPrimitiveType();
if (is_put) {
Temporaries temps(graph_);
- HInstruction* null_check = current_block_->GetLastInstruction();
// We need one temporary for the null check.
temps.Add(null_check);
HInstruction* value = LoadLocal(source_or_dest_reg, field_type, dex_pc);
- current_block_->AddInstruction(new (arena_) HInstanceFieldSet(
- null_check,
- value,
- field_type,
- resolved_field->GetOffset(),
- resolved_field->IsVolatile(),
- field_index,
- *dex_file_,
- dex_compilation_unit_->GetDexCache(),
- dex_pc));
+ HInstruction* field_set = nullptr;
+ if (resolved_field == nullptr) {
+ MaybeRecordStat(MethodCompilationStat::kUnresolvedField);
+ field_set = new (arena_) HUnresolvedInstanceFieldSet(null_check,
+ value,
+ field_type,
+ field_index,
+ dex_pc);
+ } else {
+ field_set = new (arena_) HInstanceFieldSet(null_check,
+ value,
+ field_type,
+ resolved_field->GetOffset(),
+ resolved_field->IsVolatile(),
+ field_index,
+ *dex_file_,
+ dex_compilation_unit_->GetDexCache(),
+ dex_pc);
+ }
+ current_block_->AddInstruction(field_set);
} else {
- current_block_->AddInstruction(new (arena_) HInstanceFieldGet(
- current_block_->GetLastInstruction(),
- field_type,
- resolved_field->GetOffset(),
- resolved_field->IsVolatile(),
- field_index,
- *dex_file_,
- dex_compilation_unit_->GetDexCache(),
- dex_pc));
-
- UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction(), dex_pc);
+ HInstruction* field_get = nullptr;
+ if (resolved_field == nullptr) {
+ MaybeRecordStat(MethodCompilationStat::kUnresolvedField);
+ field_get = new (arena_) HUnresolvedInstanceFieldGet(null_check,
+ field_type,
+ field_index,
+ dex_pc);
+ } else {
+ field_get = new (arena_) HInstanceFieldGet(null_check,
+ field_type,
+ resolved_field->GetOffset(),
+ resolved_field->IsVolatile(),
+ field_index,
+ *dex_file_,
+ dex_compilation_unit_->GetDexCache(),
+ dex_pc);
+ }
+ current_block_->AddInstruction(field_get);
+ UpdateLocal(source_or_dest_reg, field_get, dex_pc);
}
+
return true;
}
@@ -1282,6 +1305,23 @@
return outer_class.Get() == cls.Get();
}
+void HGraphBuilder::BuildUnresolvedStaticFieldAccess(const Instruction& instruction,
+ uint32_t dex_pc,
+ bool is_put,
+ Primitive::Type field_type) {
+ uint32_t source_or_dest_reg = instruction.VRegA_21c();
+ uint16_t field_index = instruction.VRegB_21c();
+
+ if (is_put) {
+ HInstruction* value = LoadLocal(source_or_dest_reg, field_type, dex_pc);
+ current_block_->AddInstruction(
+ new (arena_) HUnresolvedStaticFieldSet(value, field_type, field_index, dex_pc));
+ } else {
+ current_block_->AddInstruction(
+ new (arena_) HUnresolvedStaticFieldGet(field_type, field_index, dex_pc));
+ UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction(), dex_pc);
+ }
+}
bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
uint32_t dex_pc,
bool is_put) {
@@ -1299,10 +1339,13 @@
soa, dex_cache, class_loader, dex_compilation_unit_, field_index, true);
if (resolved_field == nullptr) {
- MaybeRecordStat(MethodCompilationStat::kNotCompiledUnresolvedField);
- return false;
+ MaybeRecordStat(MethodCompilationStat::kUnresolvedField);
+ Primitive::Type field_type = GetFieldAccessType(*dex_file_, field_index);
+ BuildUnresolvedStaticFieldAccess(instruction, dex_pc, is_put, field_type);
+ return true;
}
+ Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType();
const DexFile& outer_dex_file = *outer_compilation_unit_->GetDexFile();
Handle<mirror::DexCache> outer_dex_cache(hs.NewHandle(
outer_compilation_unit_->GetClassLinker()->FindDexCache(soa.Self(), outer_dex_file)));
@@ -1317,6 +1360,7 @@
// The compiler driver cannot currently understand multiple dex caches involved. Just bailout.
return false;
} else {
+ // TODO: This is rather expensive. Perf it and cache the results if needed.
std::pair<bool, bool> pair = compiler_driver_->IsFastStaticField(
outer_dex_cache.Get(),
GetCompilingClass(),
@@ -1325,7 +1369,9 @@
&storage_index);
bool can_easily_access = is_put ? pair.second : pair.first;
if (!can_easily_access) {
- return false;
+ MaybeRecordStat(MethodCompilationStat::kUnresolvedFieldNotAFastAccess);
+ BuildUnresolvedStaticFieldAccess(instruction, dex_pc, is_put, field_type);
+ return true;
}
}
@@ -1346,8 +1392,6 @@
cls = new (arena_) HClinitCheck(constant, dex_pc);
current_block_->AddInstruction(cls);
}
-
- Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType();
if (is_put) {
// We need to keep the class alive before loading the value.
Temporaries temps(graph_);
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 4c8e3d0..b2dc241 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -187,6 +187,10 @@
// Builds an instance field access node and returns whether the instruction is supported.
bool BuildInstanceFieldAccess(const Instruction& instruction, uint32_t dex_pc, bool is_put);
+ void BuildUnresolvedStaticFieldAccess(const Instruction& instruction,
+ uint32_t dex_pc,
+ bool is_put,
+ Primitive::Type field_type);
// Builds a static field access node and returns whether the instruction is supported.
bool BuildStaticFieldAccess(const Instruction& instruction, uint32_t dex_pc, bool is_put);
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index be05691..8254277 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -413,6 +413,130 @@
InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
}
+void CodeGenerator::CreateUnresolvedFieldLocationSummary(
+ HInstruction* field_access,
+ Primitive::Type field_type,
+ const FieldAccessCallingConvention& calling_convention) {
+ bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
+ || field_access->IsUnresolvedInstanceFieldSet();
+ bool is_get = field_access->IsUnresolvedInstanceFieldGet()
+ || field_access->IsUnresolvedStaticFieldGet();
+
+ ArenaAllocator* allocator = field_access->GetBlock()->GetGraph()->GetArena();
+ LocationSummary* locations =
+ new (allocator) LocationSummary(field_access, LocationSummary::kCall);
+
+ locations->AddTemp(calling_convention.GetFieldIndexLocation());
+
+ if (is_instance) {
+ // Add the `this` object for instance field accesses.
+ locations->SetInAt(0, calling_convention.GetObjectLocation());
+ }
+
+ // Note that pSetXXStatic/pGetXXStatic always takes/returns an int or int64
+ // regardless of the the type. Because of that we forced to special case
+ // the access to floating point values.
+ if (is_get) {
+ if (Primitive::IsFloatingPointType(field_type)) {
+ // The return value will be stored in regular registers while register
+ // allocator expects it in a floating point register.
+ // Note We don't need to request additional temps because the return
+ // register(s) are already blocked due the call and they may overlap with
+ // the input or field index.
+ // The transfer between the two will be done at codegen level.
+ locations->SetOut(calling_convention.GetFpuLocation(field_type));
+ } else {
+ locations->SetOut(calling_convention.GetReturnLocation(field_type));
+ }
+ } else {
+ size_t set_index = is_instance ? 1 : 0;
+ if (Primitive::IsFloatingPointType(field_type)) {
+ // The set value comes from a float location while the calling convention
+ // expects it in a regular register location. Allocate a temp for it and
+ // make the transfer at codegen.
+ AddLocationAsTemp(calling_convention.GetSetValueLocation(field_type, is_instance), locations);
+ locations->SetInAt(set_index, calling_convention.GetFpuLocation(field_type));
+ } else {
+ locations->SetInAt(set_index,
+ calling_convention.GetSetValueLocation(field_type, is_instance));
+ }
+ }
+}
+
+void CodeGenerator::GenerateUnresolvedFieldAccess(
+ HInstruction* field_access,
+ Primitive::Type field_type,
+ uint32_t field_index,
+ uint32_t dex_pc,
+ const FieldAccessCallingConvention& calling_convention) {
+ LocationSummary* locations = field_access->GetLocations();
+
+ MoveConstant(locations->GetTemp(0), field_index);
+
+ bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
+ || field_access->IsUnresolvedInstanceFieldSet();
+ bool is_get = field_access->IsUnresolvedInstanceFieldGet()
+ || field_access->IsUnresolvedStaticFieldGet();
+
+ if (!is_get && Primitive::IsFloatingPointType(field_type)) {
+ // Copy the float value to be set into the calling convention register.
+ // Note that using directly the temp location is problematic as we don't
+ // support temp register pairs. To avoid boilerplate conversion code, use
+ // the location from the calling convention.
+ MoveLocation(calling_convention.GetSetValueLocation(field_type, is_instance),
+ locations->InAt(is_instance ? 1 : 0),
+ (Primitive::Is64BitType(field_type) ? Primitive::kPrimLong : Primitive::kPrimInt));
+ }
+
+ QuickEntrypointEnum entrypoint = kQuickSet8Static; // Initialize to anything to avoid warnings.
+ switch (field_type) {
+ case Primitive::kPrimBoolean:
+ entrypoint = is_instance
+ ? (is_get ? kQuickGetBooleanInstance : kQuickSet8Instance)
+ : (is_get ? kQuickGetBooleanStatic : kQuickSet8Static);
+ break;
+ case Primitive::kPrimByte:
+ entrypoint = is_instance
+ ? (is_get ? kQuickGetByteInstance : kQuickSet8Instance)
+ : (is_get ? kQuickGetByteStatic : kQuickSet8Static);
+ break;
+ case Primitive::kPrimShort:
+ entrypoint = is_instance
+ ? (is_get ? kQuickGetShortInstance : kQuickSet16Instance)
+ : (is_get ? kQuickGetShortStatic : kQuickSet16Static);
+ break;
+ case Primitive::kPrimChar:
+ entrypoint = is_instance
+ ? (is_get ? kQuickGetCharInstance : kQuickSet16Instance)
+ : (is_get ? kQuickGetCharStatic : kQuickSet16Static);
+ break;
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ entrypoint = is_instance
+ ? (is_get ? kQuickGet32Instance : kQuickSet32Instance)
+ : (is_get ? kQuickGet32Static : kQuickSet32Static);
+ break;
+ case Primitive::kPrimNot:
+ entrypoint = is_instance
+ ? (is_get ? kQuickGetObjInstance : kQuickSetObjInstance)
+ : (is_get ? kQuickGetObjStatic : kQuickSetObjStatic);
+ break;
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ entrypoint = is_instance
+ ? (is_get ? kQuickGet64Instance : kQuickSet64Instance)
+ : (is_get ? kQuickGet64Static : kQuickSet64Static);
+ break;
+ default:
+ LOG(FATAL) << "Invalid type " << field_type;
+ }
+ InvokeRuntime(entrypoint, field_access, dex_pc, nullptr);
+
+ if (is_get && Primitive::IsFloatingPointType(field_type)) {
+ MoveLocation(locations->Out(), calling_convention.GetReturnLocation(field_type), field_type);
+ }
+}
+
void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const {
// The DCHECKS below check that a register is not specified twice in
// the summary. The out location can overlap with an input, so we need
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 5da0e59..a3ebc43 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -143,6 +143,22 @@
DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitor);
};
+class FieldAccessCallingConvention {
+ public:
+ virtual Location GetObjectLocation() const = 0;
+ virtual Location GetFieldIndexLocation() const = 0;
+ virtual Location GetReturnLocation(Primitive::Type type) const = 0;
+ virtual Location GetSetValueLocation(Primitive::Type type, bool is_instance) const = 0;
+ virtual Location GetFpuLocation(Primitive::Type type) const = 0;
+ virtual ~FieldAccessCallingConvention() {}
+
+ protected:
+ FieldAccessCallingConvention() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConvention);
+};
+
class CodeGenerator {
public:
// Compiles the graph to executable instructions. Returns whether the compilation
@@ -177,6 +193,9 @@
virtual void Bind(HBasicBlock* block) = 0;
virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) = 0;
virtual void MoveConstant(Location destination, int32_t value) = 0;
+ virtual void MoveLocation(Location dst, Location src, Primitive::Type dst_type) = 0;
+ virtual void AddLocationAsTemp(Location location, LocationSummary* locations) = 0;
+
virtual Assembler* GetAssembler() = 0;
virtual const Assembler& GetAssembler() const = 0;
virtual size_t GetWordSize() const = 0;
@@ -385,6 +404,18 @@
void GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke);
+ void CreateUnresolvedFieldLocationSummary(
+ HInstruction* field_access,
+ Primitive::Type field_type,
+ const FieldAccessCallingConvention& calling_convention);
+
+ void GenerateUnresolvedFieldAccess(
+ HInstruction* field_access,
+ Primitive::Type field_type,
+ uint32_t field_index,
+ uint32_t dex_pc,
+ const FieldAccessCallingConvention& calling_convention);
+
void SetDisassemblyInformation(DisassemblyInformation* info) { disasm_info_ = info; }
DisassemblyInformation* GetDisassemblyInformation() const { return disasm_info_; }
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index c775e03..299350b 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -361,6 +361,51 @@
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM);
};
+class ArraySetSlowPathARM : public SlowPathCode {
+ public:
+ explicit ArraySetSlowPathARM(HInstruction* instruction) : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ parallel_move.AddMove(
+ locations->InAt(0),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimNot,
+ nullptr);
+ parallel_move.AddMove(
+ locations->InAt(1),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimInt,
+ nullptr);
+ parallel_move.AddMove(
+ locations->InAt(2),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
+ Primitive::kPrimNot,
+ nullptr);
+ codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
+
+ CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
+ arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ RestoreLiveRegisters(codegen, locations);
+ __ b(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathARM"; }
+
+ private:
+ HInstruction* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARM);
+};
+
#undef __
#define __ down_cast<ArmAssembler*>(GetAssembler())->
@@ -428,12 +473,8 @@
kNumberOfRegisterPairs,
ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
arraysize(kCoreCalleeSaves)),
- graph->IsDebuggable()
- // If the graph is debuggable, we need to save the fpu registers ourselves,
- // as the stubs do not do it.
- ? 0
- : ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
- arraysize(kFpuCalleeSaves)),
+ ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
+ arraysize(kFpuCalleeSaves)),
compiler_options,
stats),
block_labels_(nullptr),
@@ -566,7 +607,12 @@
}
blocked_core_registers_[kCoreSavedRegisterForBaseline] = false;
+ }
+ if (is_baseline || GetGraph()->IsDebuggable()) {
+ // Stubs do not save callee-save floating point registers. If the graph
+ // is debuggable, we need to deal with these registers differently. For
+ // now, just block them.
for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
}
@@ -861,6 +907,10 @@
Primitive::kPrimInt);
} else if (source.IsFpuRegister()) {
UNIMPLEMENTED(FATAL);
+ } else if (source.IsFpuRegisterPair()) {
+ __ vmovrrd(destination.AsRegisterPairLow<Register>(),
+ destination.AsRegisterPairHigh<Register>(),
+ FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()));
} else {
DCHECK(source.IsDoubleStackSlot());
DCHECK(ExpectedPairLayout(destination));
@@ -872,6 +922,10 @@
__ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
SP,
source.GetStackIndex());
+ } else if (source.IsRegisterPair()) {
+ __ vmovdrr(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
+ source.AsRegisterPairLow<Register>(),
+ source.AsRegisterPairHigh<Register>());
} else {
UNIMPLEMENTED(FATAL);
}
@@ -993,6 +1047,25 @@
__ LoadImmediate(location.AsRegister<Register>(), value);
}
+void CodeGeneratorARM::MoveLocation(Location dst, Location src, Primitive::Type dst_type) {
+ if (Primitive::Is64BitType(dst_type)) {
+ Move64(dst, src);
+ } else {
+ Move32(dst, src);
+ }
+}
+
+void CodeGeneratorARM::AddLocationAsTemp(Location location, LocationSummary* locations) {
+ if (location.IsRegister()) {
+ locations->AddTemp(location);
+ } else if (location.IsRegisterPair()) {
+ locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairLow<Register>()));
+ locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairHigh<Register>()));
+ } else {
+ UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
+ }
+}
+
void CodeGeneratorARM::InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
@@ -3560,6 +3633,74 @@
HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
}
+void LocationsBuilderARM::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionARM calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionARM calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderARM::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionARM calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionARM calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderARM::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionARM calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionARM calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderARM::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionARM calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionARM calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) {
LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
? LocationSummary::kCallOnSlowPath
@@ -3750,38 +3891,32 @@
bool needs_write_barrier =
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
- bool needs_runtime_call = instruction->NeedsTypeCheck();
+ bool may_need_runtime_call = instruction->NeedsTypeCheck();
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
- instruction, needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall);
- if (needs_runtime_call) {
- InvokeRuntimeCallingConvention calling_convention;
- locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
- locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ instruction,
+ may_need_runtime_call ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ if (Primitive::IsFloatingPointType(value_type)) {
+ locations->SetInAt(2, Location::RequiresFpuRegister());
} else {
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
- if (Primitive::IsFloatingPointType(value_type)) {
- locations->SetInAt(2, Location::RequiresFpuRegister());
- } else {
- locations->SetInAt(2, Location::RequiresRegister());
- }
+ locations->SetInAt(2, Location::RequiresRegister());
+ }
- if (needs_write_barrier) {
- // Temporary registers for the write barrier.
- locations->AddTemp(Location::RequiresRegister()); // Possibly used for ref. poisoning too.
- locations->AddTemp(Location::RequiresRegister());
- }
+ if (needs_write_barrier) {
+ // Temporary registers for the write barrier.
+ locations->AddTemp(Location::RequiresRegister()); // Possibly used for ref. poisoning too.
+ locations->AddTemp(Location::RequiresRegister());
}
}
void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).AsRegister<Register>();
+ Register array = locations->InAt(0).AsRegister<Register>();
Location index = locations->InAt(1);
Primitive::Type value_type = instruction->GetComponentType();
- bool needs_runtime_call = locations->WillCall();
+ bool may_need_runtime_call = locations->CanCall();
bool needs_write_barrier =
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
@@ -3793,9 +3928,9 @@
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
- __ StoreToOffset(kStoreByte, value, obj, offset);
+ __ StoreToOffset(kStoreByte, value, array, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
+ __ add(IP, array, ShifterOperand(index.AsRegister<Register>()));
__ StoreToOffset(kStoreByte, value, IP, data_offset);
}
break;
@@ -3808,55 +3943,133 @@
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
- __ StoreToOffset(kStoreHalfword, value, obj, offset);
+ __ StoreToOffset(kStoreHalfword, value, array, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
+ __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
__ StoreToOffset(kStoreHalfword, value, IP, data_offset);
}
break;
}
- case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- if (!needs_runtime_call) {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- Register value = locations->InAt(2).AsRegister<Register>();
- Register source = value;
- if (kPoisonHeapReferences && needs_write_barrier) {
- // Note that in the case where `value` is a null reference,
- // we do not enter this block, as a null reference does not
- // need poisoning.
- DCHECK_EQ(value_type, Primitive::kPrimNot);
- Register temp = locations->GetTemp(0).AsRegister<Register>();
- __ Mov(temp, value);
- __ PoisonHeapReference(temp);
- source = temp;
- }
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ Register value = locations->InAt(2).AsRegister<Register>();
+ Register source = value;
+
+ if (instruction->InputAt(2)->IsNullConstant()) {
+ // Just setting null.
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- __ StoreToOffset(kStoreWord, source, obj, offset);
+ __ StoreToOffset(kStoreWord, source, array, offset);
} else {
DCHECK(index.IsRegister()) << index;
- __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
+ __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
__ StoreToOffset(kStoreWord, source, IP, data_offset);
}
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- if (needs_write_barrier) {
- DCHECK_EQ(value_type, Primitive::kPrimNot);
- Register temp = locations->GetTemp(0).AsRegister<Register>();
- Register card = locations->GetTemp(1).AsRegister<Register>();
- codegen_->MarkGCCard(temp, card, obj, value, instruction->GetValueCanBeNull());
- }
- } else {
- DCHECK_EQ(value_type, Primitive::kPrimNot);
- // Note: if heap poisoning is enabled, pAputObject takes cares
- // of poisoning the reference.
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
- instruction,
- instruction->GetDexPc(),
- nullptr);
+ break;
}
+
+ DCHECK(needs_write_barrier);
+ Register temp1 = locations->GetTemp(0).AsRegister<Register>();
+ Register temp2 = locations->GetTemp(1).AsRegister<Register>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+ uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+ Label done;
+ SlowPathCode* slow_path = nullptr;
+
+ if (may_need_runtime_call) {
+ slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathARM(instruction);
+ codegen_->AddSlowPath(slow_path);
+ if (instruction->GetValueCanBeNull()) {
+ Label non_zero;
+ __ CompareAndBranchIfNonZero(value, &non_zero);
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ StoreToOffset(kStoreWord, value, array, offset);
+ } else {
+ DCHECK(index.IsRegister()) << index;
+ __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
+ __ StoreToOffset(kStoreWord, value, IP, data_offset);
+ }
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ b(&done);
+ __ Bind(&non_zero);
+ }
+
+ __ LoadFromOffset(kLoadWord, temp1, array, class_offset);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ MaybeUnpoisonHeapReference(temp1);
+ __ LoadFromOffset(kLoadWord, temp1, temp1, component_offset);
+ __ LoadFromOffset(kLoadWord, temp2, value, class_offset);
+ // No need to poison/unpoison, we're comparing two poisoined references.
+ __ cmp(temp1, ShifterOperand(temp2));
+ if (instruction->StaticTypeOfArrayIsObjectArray()) {
+ Label do_put;
+ __ b(&do_put, EQ);
+ __ MaybeUnpoisonHeapReference(temp1);
+ __ LoadFromOffset(kLoadWord, temp1, temp1, super_offset);
+ // No need to poison/unpoison, we're comparing against null.
+ __ CompareAndBranchIfNonZero(temp1, slow_path->GetEntryLabel());
+ __ Bind(&do_put);
+ } else {
+ __ b(slow_path->GetEntryLabel(), NE);
+ }
+ }
+
+ if (kPoisonHeapReferences) {
+ // Note that in the case where `value` is a null reference,
+ // we do not enter this block, as a null reference does not
+ // need poisoning.
+ DCHECK_EQ(value_type, Primitive::kPrimNot);
+ __ Mov(temp1, value);
+ __ PoisonHeapReference(temp1);
+ source = temp1;
+ }
+
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ StoreToOffset(kStoreWord, source, array, offset);
+ } else {
+ DCHECK(index.IsRegister()) << index;
+ __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
+ __ StoreToOffset(kStoreWord, source, IP, data_offset);
+ }
+
+ if (!may_need_runtime_call) {
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
+
+ codegen_->MarkGCCard(temp1, temp2, array, value, instruction->GetValueCanBeNull());
+
+ if (done.IsLinked()) {
+ __ Bind(&done);
+ }
+
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
+ }
+
+ break;
+ }
+
+ case Primitive::kPrimInt: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ Register value = locations->InAt(2).AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ StoreToOffset(kStoreWord, value, array, offset);
+ } else {
+ DCHECK(index.IsRegister()) << index;
+ __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
+ __ StoreToOffset(kStoreWord, value, IP, data_offset);
+ }
+
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
break;
}
@@ -3866,9 +4079,9 @@
if (index.IsConstant()) {
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), obj, offset);
+ __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), array, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
+ __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
__ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), IP, data_offset);
}
break;
@@ -3880,9 +4093,9 @@
DCHECK(value.IsFpuRegister());
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- __ StoreSToOffset(value.AsFpuRegister<SRegister>(), obj, offset);
+ __ StoreSToOffset(value.AsFpuRegister<SRegister>(), array, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
+ __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
__ StoreSToOffset(value.AsFpuRegister<SRegister>(), IP, data_offset);
}
break;
@@ -3894,9 +4107,9 @@
DCHECK(value.IsFpuRegisterPair());
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), obj, offset);
+ __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), array, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
+ __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
__ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
}
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 111112e..16d1d38 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -96,6 +96,38 @@
DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorARM);
};
+class FieldAccessCallingConventionARM : public FieldAccessCallingConvention {
+ public:
+ FieldAccessCallingConventionARM() {}
+
+ Location GetObjectLocation() const OVERRIDE {
+ return Location::RegisterLocation(R1);
+ }
+ Location GetFieldIndexLocation() const OVERRIDE {
+ return Location::RegisterLocation(R0);
+ }
+ Location GetReturnLocation(Primitive::Type type) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? Location::RegisterPairLocation(R0, R1)
+ : Location::RegisterLocation(R0);
+ }
+ Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? Location::RegisterPairLocation(R2, R3)
+ : (is_instance
+ ? Location::RegisterLocation(R2)
+ : Location::RegisterLocation(R1));
+ }
+ Location GetFpuLocation(Primitive::Type type) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? Location::FpuRegisterPairLocation(S0, S1)
+ : Location::FpuRegisterLocation(S0);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionARM);
+};
+
class ParallelMoveResolverARM : public ParallelMoveResolverWithSwap {
public:
ParallelMoveResolverARM(ArenaAllocator* allocator, CodeGeneratorARM* codegen)
@@ -225,6 +257,9 @@
void Bind(HBasicBlock* block) OVERRIDE;
void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
void MoveConstant(Location destination, int32_t value) OVERRIDE;
+ void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+
size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 70327af..c7ade65 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -19,7 +19,6 @@
#include "arch/arm64/instruction_set_features_arm64.h"
#include "art_method.h"
#include "code_generator_utils.h"
-#include "common_arm64.h"
#include "compiled_method.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
@@ -119,11 +118,8 @@
CPURegList core_list = CPURegList(CPURegister::kRegister, kXRegSize,
register_set->GetCoreRegisters() & (~callee_saved_core_registers.list()));
- CPURegList fp_list = CPURegList(
- CPURegister::kFPRegister,
- kDRegSize,
- register_set->GetFloatingPointRegisters()
- & (~(codegen->GetGraph()->IsDebuggable() ? 0 : callee_saved_fp_registers.list())));
+ CPURegList fp_list = CPURegList(CPURegister::kFPRegister, kDRegSize,
+ register_set->GetFloatingPointRegisters() & (~callee_saved_fp_registers.list()));
MacroAssembler* masm = down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler();
UseScratchRegisterScope temps(masm);
@@ -480,7 +476,7 @@
class DeoptimizationSlowPathARM64 : public SlowPathCodeARM64 {
public:
explicit DeoptimizationSlowPathARM64(HInstruction* instruction)
- : instruction_(instruction) {}
+ : instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
__ Bind(GetEntryLabel());
@@ -499,6 +495,52 @@
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM64);
};
+class ArraySetSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+ explicit ArraySetSlowPathARM64(HInstruction* instruction) : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ parallel_move.AddMove(
+ locations->InAt(0),
+ LocationFrom(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimNot,
+ nullptr);
+ parallel_move.AddMove(
+ locations->InAt(1),
+ LocationFrom(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimInt,
+ nullptr);
+ parallel_move.AddMove(
+ locations->InAt(2),
+ LocationFrom(calling_convention.GetRegisterAt(2)),
+ Primitive::kPrimNot,
+ nullptr);
+ codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
+
+ CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
+ arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
+ RestoreLiveRegisters(codegen, locations);
+ __ B(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathARM64"; }
+
+ private:
+ HInstruction* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARM64);
+};
+
#undef __
Location InvokeDexCallingConventionVisitorARM64::GetNextLocation(Primitive::Type type) {
@@ -537,9 +579,7 @@
kNumberOfAllocatableFPRegisters,
kNumberOfAllocatableRegisterPairs,
callee_saved_core_registers.list(),
- // If the graph is debuggable, we need to save the fpu registers ourselves,
- // as the stubs do not do it.
- graph->IsDebuggable() ? 0 : callee_saved_fp_registers.list(),
+ callee_saved_fp_registers.list(),
compiler_options,
stats),
block_labels_(nullptr),
@@ -620,7 +660,7 @@
void ParallelMoveResolverARM64::EmitMove(size_t index) {
DCHECK_LT(index, moves_.size());
MoveOperands* move = moves_[index];
- codegen_->MoveLocation(move->GetDestination(), move->GetSource());
+ codegen_->MoveLocation(move->GetDestination(), move->GetSource(), Primitive::kPrimVoid);
}
void CodeGeneratorARM64::GenerateFrameEntry() {
@@ -704,7 +744,9 @@
}
if (instruction->IsCurrentMethod()) {
- MoveLocation(location, Location::DoubleStackSlot(kCurrentMethodStackOffset));
+ MoveLocation(location,
+ Location::DoubleStackSlot(kCurrentMethodStackOffset),
+ Primitive::kPrimVoid);
} else if (locations != nullptr && locations->Out().Equals(location)) {
return;
} else if (instruction->IsIntConstant()
@@ -747,6 +789,14 @@
__ Mov(RegisterFrom(location, Primitive::kPrimInt), value);
}
+void CodeGeneratorARM64::AddLocationAsTemp(Location location, LocationSummary* locations) {
+ if (location.IsRegister()) {
+ locations->AddTemp(location);
+ } else {
+ UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
+ }
+}
+
Location CodeGeneratorARM64::GetStackLocation(HLoadLocal* load) const {
Primitive::Type type = load->GetType();
@@ -814,7 +864,12 @@
while (!reserved_core_baseline_registers.IsEmpty()) {
blocked_core_registers_[reserved_core_baseline_registers.PopLowestIndex().code()] = true;
}
+ }
+ if (is_baseline || GetGraph()->IsDebuggable()) {
+ // Stubs do not save callee-save floating point registers. If the graph
+ // is debuggable, we need to deal with these registers differently. For
+ // now, just block them.
CPURegList reserved_fp_baseline_registers = callee_saved_fp_registers;
while (!reserved_fp_baseline_registers.IsEmpty()) {
blocked_fpu_registers_[reserved_fp_baseline_registers.PopLowestIndex().code()] = true;
@@ -897,7 +952,9 @@
(cst->IsDoubleConstant() && type == Primitive::kPrimDouble);
}
-void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Primitive::Type type) {
+void CodeGeneratorARM64::MoveLocation(Location destination,
+ Location source,
+ Primitive::Type dst_type) {
if (source.Equals(destination)) {
return;
}
@@ -906,7 +963,7 @@
// locations. When moving from and to a register, the argument type can be
// used to generate 32bit instead of 64bit moves. In debug mode we also
// checks the coherency of the locations and the type.
- bool unspecified_type = (type == Primitive::kPrimVoid);
+ bool unspecified_type = (dst_type == Primitive::kPrimVoid);
if (destination.IsRegister() || destination.IsFpuRegister()) {
if (unspecified_type) {
@@ -916,30 +973,44 @@
|| src_cst->IsFloatConstant()
|| src_cst->IsNullConstant()))) {
// For stack slots and 32bit constants, a 64bit type is appropriate.
- type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
+ dst_type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
} else {
// If the source is a double stack slot or a 64bit constant, a 64bit
// type is appropriate. Else the source is a register, and since the
// type has not been specified, we chose a 64bit type to force a 64bit
// move.
- type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
+ dst_type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
}
}
- DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(type)) ||
- (destination.IsRegister() && !Primitive::IsFloatingPointType(type)));
- CPURegister dst = CPURegisterFrom(destination, type);
+ DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(dst_type)) ||
+ (destination.IsRegister() && !Primitive::IsFloatingPointType(dst_type)));
+ CPURegister dst = CPURegisterFrom(destination, dst_type);
if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot());
__ Ldr(dst, StackOperandFrom(source));
} else if (source.IsConstant()) {
- DCHECK(CoherentConstantAndType(source, type));
+ DCHECK(CoherentConstantAndType(source, dst_type));
MoveConstant(dst, source.GetConstant());
- } else {
+ } else if (source.IsRegister()) {
if (destination.IsRegister()) {
- __ Mov(Register(dst), RegisterFrom(source, type));
+ __ Mov(Register(dst), RegisterFrom(source, dst_type));
} else {
DCHECK(destination.IsFpuRegister());
- __ Fmov(FPRegister(dst), FPRegisterFrom(source, type));
+ Primitive::Type source_type = Primitive::Is64BitType(dst_type)
+ ? Primitive::kPrimLong
+ : Primitive::kPrimInt;
+ __ Fmov(FPRegisterFrom(destination, dst_type), RegisterFrom(source, source_type));
+ }
+ } else {
+ DCHECK(source.IsFpuRegister());
+ if (destination.IsRegister()) {
+ Primitive::Type source_type = Primitive::Is64BitType(dst_type)
+ ? Primitive::kPrimDouble
+ : Primitive::kPrimFloat;
+ __ Fmov(RegisterFrom(destination, dst_type), FPRegisterFrom(source, source_type));
+ } else {
+ DCHECK(destination.IsFpuRegister());
+ __ Fmov(FPRegister(dst), FPRegisterFrom(source, dst_type));
}
}
} else { // The destination is not a register. It must be a stack slot.
@@ -947,16 +1018,17 @@
if (source.IsRegister() || source.IsFpuRegister()) {
if (unspecified_type) {
if (source.IsRegister()) {
- type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
+ dst_type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
} else {
- type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
+ dst_type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
}
}
- DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(type)) &&
- (source.IsFpuRegister() == Primitive::IsFloatingPointType(type)));
- __ Str(CPURegisterFrom(source, type), StackOperandFrom(destination));
+ DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(dst_type)) &&
+ (source.IsFpuRegister() == Primitive::IsFloatingPointType(dst_type)));
+ __ Str(CPURegisterFrom(source, dst_type), StackOperandFrom(destination));
} else if (source.IsConstant()) {
- DCHECK(unspecified_type || CoherentConstantAndType(source, type)) << source << " " << type;
+ DCHECK(unspecified_type || CoherentConstantAndType(source, dst_type))
+ << source << " " << dst_type;
UseScratchRegisterScope temps(GetVIXLAssembler());
HConstant* src_cst = source.GetConstant();
CPURegister temp;
@@ -1560,76 +1632,136 @@
}
void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) {
- if (instruction->NeedsTypeCheck()) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
- InvokeRuntimeCallingConvention calling_convention;
- locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
- locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2)));
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction,
+ instruction->NeedsTypeCheck() ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
+ locations->SetInAt(2, Location::RequiresFpuRegister());
} else {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
- if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
- locations->SetInAt(2, Location::RequiresFpuRegister());
- } else {
- locations->SetInAt(2, Location::RequiresRegister());
- }
+ locations->SetInAt(2, Location::RequiresRegister());
}
}
void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
Primitive::Type value_type = instruction->GetComponentType();
LocationSummary* locations = instruction->GetLocations();
- bool needs_runtime_call = locations->WillCall();
+ bool may_need_runtime_call = locations->CanCall();
+ bool needs_write_barrier =
+ CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
- if (needs_runtime_call) {
- // Note: if heap poisoning is enabled, pAputObject takes cares
- // of poisoning the reference.
- codegen_->InvokeRuntime(
- QUICK_ENTRY_POINT(pAputObject), instruction, instruction->GetDexPc(), nullptr);
- CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
+ Register array = InputRegisterAt(instruction, 0);
+ CPURegister value = InputCPURegisterAt(instruction, 2);
+ CPURegister source = value;
+ Location index = locations->InAt(1);
+ size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value();
+ MemOperand destination = HeapOperand(array);
+ MacroAssembler* masm = GetVIXLAssembler();
+ BlockPoolsScope block_pools(masm);
+
+ if (!needs_write_barrier) {
+ DCHECK(!may_need_runtime_call);
+ if (index.IsConstant()) {
+ offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type);
+ destination = HeapOperand(array, offset);
+ } else {
+ UseScratchRegisterScope temps(masm);
+ Register temp = temps.AcquireSameSizeAs(array);
+ __ Add(temp, array, offset);
+ destination = HeapOperand(temp,
+ XRegisterFrom(index),
+ LSL,
+ Primitive::ComponentSizeShift(value_type));
+ }
+ codegen_->Store(value_type, value, destination);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
} else {
- Register obj = InputRegisterAt(instruction, 0);
- CPURegister value = InputCPURegisterAt(instruction, 2);
- CPURegister source = value;
- Location index = locations->InAt(1);
- size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value();
- MemOperand destination = HeapOperand(obj);
- MacroAssembler* masm = GetVIXLAssembler();
- BlockPoolsScope block_pools(masm);
+ DCHECK(needs_write_barrier);
+ vixl::Label done;
+ SlowPathCodeARM64* slow_path = nullptr;
{
// We use a block to end the scratch scope before the write barrier, thus
// freeing the temporary registers so they can be used in `MarkGCCard`.
UseScratchRegisterScope temps(masm);
-
- if (kPoisonHeapReferences && value_type == Primitive::kPrimNot) {
- DCHECK(value.IsW());
- Register temp = temps.AcquireW();
- __ Mov(temp, value.W());
- GetAssembler()->PoisonHeapReference(temp.W());
- source = temp;
- }
-
+ Register temp = temps.AcquireSameSizeAs(array);
if (index.IsConstant()) {
offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type);
- destination = HeapOperand(obj, offset);
+ destination = HeapOperand(array, offset);
} else {
- Register temp = temps.AcquireSameSizeAs(obj);
- __ Add(temp, obj, offset);
destination = HeapOperand(temp,
XRegisterFrom(index),
LSL,
Primitive::ComponentSizeShift(value_type));
}
- codegen_->Store(value_type, source, destination);
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+ uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+
+ if (may_need_runtime_call) {
+ slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathARM64(instruction);
+ codegen_->AddSlowPath(slow_path);
+ if (instruction->GetValueCanBeNull()) {
+ vixl::Label non_zero;
+ __ Cbnz(Register(value), &non_zero);
+ if (!index.IsConstant()) {
+ __ Add(temp, array, offset);
+ }
+ __ Str(wzr, destination);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ B(&done);
+ __ Bind(&non_zero);
+ }
+
+ Register temp2 = temps.AcquireSameSizeAs(array);
+ __ Ldr(temp, HeapOperand(array, class_offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ GetAssembler()->MaybeUnpoisonHeapReference(temp);
+ __ Ldr(temp, HeapOperand(temp, component_offset));
+ __ Ldr(temp2, HeapOperand(Register(value), class_offset));
+ // No need to poison/unpoison, we're comparing two poisoned references.
+ __ Cmp(temp, temp2);
+ if (instruction->StaticTypeOfArrayIsObjectArray()) {
+ vixl::Label do_put;
+ __ B(eq, &do_put);
+ GetAssembler()->MaybeUnpoisonHeapReference(temp);
+ __ Ldr(temp, HeapOperand(temp, super_offset));
+ // No need to unpoison, we're comparing against null.
+ __ Cbnz(temp, slow_path->GetEntryLabel());
+ __ Bind(&do_put);
+ } else {
+ __ B(ne, slow_path->GetEntryLabel());
+ }
+ temps.Release(temp2);
+ }
+
+ if (kPoisonHeapReferences) {
+ Register temp2 = temps.AcquireSameSizeAs(array);
+ DCHECK(value.IsW());
+ __ Mov(temp2, value.W());
+ GetAssembler()->PoisonHeapReference(temp2);
+ source = temp2;
+ }
+
+ if (!index.IsConstant()) {
+ __ Add(temp, array, offset);
+ }
+ __ Str(source, destination);
+
+ if (!may_need_runtime_call) {
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
}
- if (CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue())) {
- codegen_->MarkGCCard(obj, value.W(), instruction->GetValueCanBeNull());
+
+ codegen_->MarkGCCard(array, value.W(), instruction->GetValueCanBeNull());
+
+ if (done.IsLinked()) {
+ __ Bind(&done);
+ }
+
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
}
}
}
@@ -3402,6 +3534,74 @@
HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
}
+void LocationsBuilderARM64::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionARM64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM64::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionARM64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderARM64::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionARM64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM64::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionARM64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderARM64::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionARM64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM64::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionARM64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderARM64::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionARM64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM64::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionARM64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
}
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 7178081..a068b48 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
#include "code_generator.h"
+#include "common_arm64.h"
#include "dex/compiler_enums.h"
#include "driver/compiler_options.h"
#include "nodes.h"
@@ -141,6 +142,34 @@
DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorARM64);
};
+class FieldAccessCallingConventionARM64 : public FieldAccessCallingConvention {
+ public:
+ FieldAccessCallingConventionARM64() {}
+
+ Location GetObjectLocation() const OVERRIDE {
+ return helpers::LocationFrom(vixl::x1);
+ }
+ Location GetFieldIndexLocation() const OVERRIDE {
+ return helpers::LocationFrom(vixl::x0);
+ }
+ Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ return helpers::LocationFrom(vixl::x0);
+ }
+ Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? helpers::LocationFrom(vixl::x2)
+ : (is_instance
+ ? helpers::LocationFrom(vixl::x2)
+ : helpers::LocationFrom(vixl::x1));
+ }
+ Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ return helpers::LocationFrom(vixl::d0);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionARM64);
+};
+
class InstructionCodeGeneratorARM64 : public HGraphVisitor {
public:
InstructionCodeGeneratorARM64(HGraph* graph, CodeGeneratorARM64* codegen);
@@ -334,10 +363,9 @@
// Code generation helpers.
void MoveConstant(vixl::CPURegister destination, HConstant* constant);
void MoveConstant(Location destination, int32_t value) OVERRIDE;
- // The type is optional. When specified it must be coherent with the
- // locations, and is used for optimisation and debugging.
- void MoveLocation(Location destination, Location source,
- Primitive::Type type = Primitive::kPrimVoid);
+ void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+
void Load(Primitive::Type type, vixl::CPURegister dst, const vixl::MemOperand& src);
void Store(Primitive::Type type, vixl::CPURegister rt, const vixl::MemOperand& dst);
void LoadAcquire(HInstruction* instruction, vixl::CPURegister dst, const vixl::MemOperand& src);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index c9f8493..e95d283 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -40,9 +40,6 @@
// We need extra temporary/scratch registers (in addition to AT) in some cases.
static constexpr FpuRegister FTMP = F8;
-// ART Thread Register.
-static constexpr GpuRegister TR = S1;
-
Location Mips64ReturnLocation(Primitive::Type return_type) {
switch (return_type) {
case Primitive::kPrimBoolean:
@@ -620,7 +617,7 @@
void CodeGeneratorMIPS64::MoveLocation(Location destination,
Location source,
- Primitive::Type type) {
+ Primitive::Type dst_type) {
if (source.Equals(destination)) {
return;
}
@@ -628,7 +625,7 @@
// A valid move can always be inferred from the destination and source
// locations. When moving from and to a register, the argument type can be
// used to generate 32bit instead of 64bit moves.
- bool unspecified_type = (type == Primitive::kPrimVoid);
+ bool unspecified_type = (dst_type == Primitive::kPrimVoid);
DCHECK_EQ(unspecified_type, false);
if (destination.IsRegister() || destination.IsFpuRegister()) {
@@ -639,21 +636,21 @@
|| src_cst->IsFloatConstant()
|| src_cst->IsNullConstant()))) {
// For stack slots and 32bit constants, a 64bit type is appropriate.
- type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
+ dst_type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
} else {
// If the source is a double stack slot or a 64bit constant, a 64bit
// type is appropriate. Else the source is a register, and since the
// type has not been specified, we chose a 64bit type to force a 64bit
// move.
- type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
+ dst_type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
}
}
- DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(type)) ||
- (destination.IsRegister() && !Primitive::IsFloatingPointType(type)));
+ DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(dst_type)) ||
+ (destination.IsRegister() && !Primitive::IsFloatingPointType(dst_type)));
if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
// Move to GPR/FPR from stack
LoadOperandType load_type = source.IsStackSlot() ? kLoadWord : kLoadDoubleword;
- if (Primitive::IsFloatingPointType(type)) {
+ if (Primitive::IsFloatingPointType(dst_type)) {
__ LoadFpuFromOffset(load_type,
destination.AsFpuRegister<FpuRegister>(),
SP,
@@ -668,31 +665,47 @@
} else if (source.IsConstant()) {
// Move to GPR/FPR from constant
GpuRegister gpr = AT;
- if (!Primitive::IsFloatingPointType(type)) {
+ if (!Primitive::IsFloatingPointType(dst_type)) {
gpr = destination.AsRegister<GpuRegister>();
}
- if (type == Primitive::kPrimInt || type == Primitive::kPrimFloat) {
+ if (dst_type == Primitive::kPrimInt || dst_type == Primitive::kPrimFloat) {
__ LoadConst32(gpr, GetInt32ValueOf(source.GetConstant()->AsConstant()));
} else {
__ LoadConst64(gpr, GetInt64ValueOf(source.GetConstant()->AsConstant()));
}
- if (type == Primitive::kPrimFloat) {
+ if (dst_type == Primitive::kPrimFloat) {
__ Mtc1(gpr, destination.AsFpuRegister<FpuRegister>());
- } else if (type == Primitive::kPrimDouble) {
+ } else if (dst_type == Primitive::kPrimDouble) {
__ Dmtc1(gpr, destination.AsFpuRegister<FpuRegister>());
}
- } else {
+ } else if (source.IsRegister()) {
if (destination.IsRegister()) {
// Move to GPR from GPR
__ Move(destination.AsRegister<GpuRegister>(), source.AsRegister<GpuRegister>());
} else {
+ DCHECK(destination.IsFpuRegister());
+ if (Primitive::Is64BitType(dst_type)) {
+ __ Dmtc1(source.AsRegister<GpuRegister>(), destination.AsFpuRegister<FpuRegister>());
+ } else {
+ __ Mtc1(source.AsRegister<GpuRegister>(), destination.AsFpuRegister<FpuRegister>());
+ }
+ }
+ } else if (source.IsFpuRegister()) {
+ if (destination.IsFpuRegister()) {
// Move to FPR from FPR
- if (type == Primitive::kPrimFloat) {
+ if (dst_type == Primitive::kPrimFloat) {
__ MovS(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
} else {
- DCHECK_EQ(type, Primitive::kPrimDouble);
+ DCHECK_EQ(dst_type, Primitive::kPrimDouble);
__ MovD(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
}
+ } else {
+ DCHECK(destination.IsRegister());
+ if (Primitive::Is64BitType(dst_type)) {
+ __ Dmfc1(destination.AsRegister<GpuRegister>(), source.AsFpuRegister<FpuRegister>());
+ } else {
+ __ Mfc1(destination.AsRegister<GpuRegister>(), source.AsFpuRegister<FpuRegister>());
+ }
}
}
} else { // The destination is not a register. It must be a stack slot.
@@ -700,13 +713,13 @@
if (source.IsRegister() || source.IsFpuRegister()) {
if (unspecified_type) {
if (source.IsRegister()) {
- type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
+ dst_type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
} else {
- type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
+ dst_type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
}
}
- DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(type)) &&
- (source.IsFpuRegister() == Primitive::IsFloatingPointType(type)));
+ DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(dst_type)) &&
+ (source.IsFpuRegister() == Primitive::IsFloatingPointType(dst_type)));
// Move to stack from GPR/FPR
StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
if (source.IsRegister()) {
@@ -864,6 +877,14 @@
__ LoadConst32(location.AsRegister<GpuRegister>(), value);
}
+void CodeGeneratorMIPS64::AddLocationAsTemp(Location location, LocationSummary* locations) {
+ if (location.IsRegister()) {
+ locations->AddTemp(location);
+ } else {
+ UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
+ }
+}
+
Location CodeGeneratorMIPS64::GetStackLocation(HLoadLocal* load) const {
Primitive::Type type = load->GetType();
@@ -3121,6 +3142,74 @@
HandleFieldSet(instruction, instruction->GetFieldInfo());
}
+void LocationsBuilderMIPS64::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionMIPS64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionMIPS64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderMIPS64::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionMIPS64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionMIPS64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderMIPS64::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionMIPS64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionMIPS64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderMIPS64::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionMIPS64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionMIPS64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
void LocationsBuilderMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
}
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 16461d6..5e8f9e7 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -106,6 +106,31 @@
DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
};
+class FieldAccessCallingConventionMIPS64 : public FieldAccessCallingConvention {
+ public:
+ FieldAccessCallingConventionMIPS64() {}
+
+ Location GetObjectLocation() const OVERRIDE {
+ return Location::RegisterLocation(A1);
+ }
+ Location GetFieldIndexLocation() const OVERRIDE {
+ return Location::RegisterLocation(A0);
+ }
+ Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ return Location::RegisterLocation(A0);
+ }
+ Location GetSetValueLocation(
+ Primitive::Type type ATTRIBUTE_UNUSED, bool is_instance) const OVERRIDE {
+ return is_instance ? Location::RegisterLocation(A2) : Location::RegisterLocation(A1);
+ }
+ Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ return Location::FpuRegisterLocation(F0);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionMIPS64);
+};
+
class ParallelMoveResolverMIPS64 : public ParallelMoveResolverWithSwap {
public:
ParallelMoveResolverMIPS64(ArenaAllocator* allocator, CodeGeneratorMIPS64* codegen)
@@ -280,11 +305,13 @@
void Finalize(CodeAllocator* allocator) OVERRIDE;
// Code generation helpers.
-
- void MoveLocation(Location destination, Location source, Primitive::Type type);
+ void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
void MoveConstant(Location destination, int32_t value) OVERRIDE;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+
+
void SwapLocations(Location loc1, Location loc2, Primitive::Type type);
// Generate code to invoke a runtime entry point.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index a47a95e..5078456 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -380,6 +380,51 @@
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathX86);
};
+class ArraySetSlowPathX86 : public SlowPathCode {
+ public:
+ explicit ArraySetSlowPathX86(HInstruction* instruction) : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ parallel_move.AddMove(
+ locations->InAt(0),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimNot,
+ nullptr);
+ parallel_move.AddMove(
+ locations->InAt(1),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimInt,
+ nullptr);
+ parallel_move.AddMove(
+ locations->InAt(2),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
+ Primitive::kPrimNot,
+ nullptr);
+ codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
+
+ CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
+ x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ RestoreLiveRegisters(codegen, locations);
+ __ jmp(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathX86"; }
+
+ private:
+ HInstruction* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86);
+};
+
#undef __
#define __ down_cast<X86Assembler*>(GetAssembler())->
@@ -782,7 +827,10 @@
Location::RegisterLocation(destination.AsRegisterPairLow<Register>()),
Primitive::kPrimInt);
} else if (source.IsFpuRegister()) {
- LOG(FATAL) << "Unimplemented";
+ XmmRegister src_reg = source.AsFpuRegister<XmmRegister>();
+ __ movd(destination.AsRegisterPairLow<Register>(), src_reg);
+ __ psrlq(src_reg, Immediate(32));
+ __ movd(destination.AsRegisterPairHigh<Register>(), src_reg);
} else {
// No conflict possible, so just do the moves.
DCHECK(source.IsDoubleStackSlot());
@@ -795,6 +843,15 @@
__ movaps(destination.AsFpuRegister<XmmRegister>(), source.AsFpuRegister<XmmRegister>());
} else if (source.IsDoubleStackSlot()) {
__ movsd(destination.AsFpuRegister<XmmRegister>(), Address(ESP, source.GetStackIndex()));
+ } else if (source.IsRegisterPair()) {
+ size_t elem_size = Primitive::ComponentSize(Primitive::kPrimInt);
+ // Create stack space for 2 elements.
+ __ subl(ESP, Immediate(2 * elem_size));
+ __ movl(Address(ESP, 0), source.AsRegisterPairLow<Register>());
+ __ movl(Address(ESP, elem_size), source.AsRegisterPairHigh<Register>());
+ __ movsd(destination.AsFpuRegister<XmmRegister>(), Address(ESP, 0));
+ // And remove the temporary stack space we allocated.
+ __ addl(ESP, Immediate(2 * elem_size));
} else {
LOG(FATAL) << "Unimplemented";
}
@@ -921,6 +978,25 @@
__ movl(location.AsRegister<Register>(), Immediate(value));
}
+void CodeGeneratorX86::MoveLocation(Location dst, Location src, Primitive::Type dst_type) {
+ if (Primitive::Is64BitType(dst_type)) {
+ Move64(dst, src);
+ } else {
+ Move32(dst, src);
+ }
+}
+
+void CodeGeneratorX86::AddLocationAsTemp(Location location, LocationSummary* locations) {
+ if (location.IsRegister()) {
+ locations->AddTemp(location);
+ } else if (location.IsRegisterPair()) {
+ locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairLow<Register>()));
+ locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairHigh<Register>()));
+ } else {
+ UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
+ }
+}
+
void InstructionCodeGeneratorX86::HandleGoto(HInstruction* got, HBasicBlock* successor) {
DCHECK(!successor->IsExitBlock());
@@ -4040,6 +4116,74 @@
HandleFieldGet(instruction, instruction->GetFieldInfo());
}
+void LocationsBuilderX86::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionX86 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionX86 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderX86::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionX86 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionX86 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderX86::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionX86 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionX86 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderX86::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionX86 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionX86 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
void LocationsBuilderX86::VisitNullCheck(HNullCheck* instruction) {
LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
? LocationSummary::kCallOnSlowPath
@@ -4245,72 +4389,59 @@
bool needs_write_barrier =
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
- bool needs_runtime_call = instruction->NeedsTypeCheck();
+ bool may_need_runtime_call = instruction->NeedsTypeCheck();
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
instruction,
- needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall);
+ may_need_runtime_call ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall);
- if (needs_runtime_call) {
- InvokeRuntimeCallingConvention calling_convention;
- locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
- locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ bool is_byte_type = (value_type == Primitive::kPrimBoolean)
+ || (value_type == Primitive::kPrimByte);
+ // We need the inputs to be different than the output in case of long operation.
+ // In case of a byte operation, the register allocator does not support multiple
+ // inputs that die at entry with one in a specific register.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ if (is_byte_type) {
+ // Ensure the value is in a byte register.
+ locations->SetInAt(2, Location::ByteRegisterOrConstant(EAX, instruction->InputAt(2)));
+ } else if (Primitive::IsFloatingPointType(value_type)) {
+ locations->SetInAt(2, Location::RequiresFpuRegister());
} else {
- bool is_byte_type = (value_type == Primitive::kPrimBoolean)
- || (value_type == Primitive::kPrimByte);
- // We need the inputs to be different than the output in case of long operation.
- // In case of a byte operation, the register allocator does not support multiple
- // inputs that die at entry with one in a specific register.
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
- if (is_byte_type) {
- // Ensure the value is in a byte register.
- locations->SetInAt(2, Location::ByteRegisterOrConstant(EAX, instruction->InputAt(2)));
- } else if (Primitive::IsFloatingPointType(value_type)) {
- locations->SetInAt(2, Location::RequiresFpuRegister());
- } else {
- locations->SetInAt(2, Location::RegisterOrConstant(instruction->InputAt(2)));
- }
- if (needs_write_barrier) {
- // Temporary registers for the write barrier.
- locations->AddTemp(Location::RequiresRegister()); // Possibly used for ref. poisoning too.
- // Ensure the card is in a byte register.
- locations->AddTemp(Location::RegisterLocation(ECX));
- }
+ locations->SetInAt(2, Location::RegisterOrConstant(instruction->InputAt(2)));
+ }
+ if (needs_write_barrier) {
+ // Temporary registers for the write barrier.
+ locations->AddTemp(Location::RequiresRegister()); // Possibly used for ref. poisoning too.
+ // Ensure the card is in a byte register.
+ locations->AddTemp(Location::RegisterLocation(ECX));
}
}
void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).AsRegister<Register>();
+ Register array = locations->InAt(0).AsRegister<Register>();
Location index = locations->InAt(1);
Location value = locations->InAt(2);
Primitive::Type value_type = instruction->GetComponentType();
- bool needs_runtime_call = locations->WillCall();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+ uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+ bool may_need_runtime_call = locations->CanCall();
bool needs_write_barrier =
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
switch (value_type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
- if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
- if (value.IsRegister()) {
- __ movb(Address(obj, offset), value.AsRegister<ByteRegister>());
- } else {
- __ movb(Address(obj, offset),
- Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
- }
+ uint32_t offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
+ Address address = index.IsConstant()
+ ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + offset)
+ : Address(array, index.AsRegister<Register>(), TIMES_1, offset);
+ if (value.IsRegister()) {
+ __ movb(address, value.AsRegister<ByteRegister>());
} else {
- if (value.IsRegister()) {
- __ movb(Address(obj, index.AsRegister<Register>(), TIMES_1, data_offset),
- value.AsRegister<ByteRegister>());
- } else {
- __ movb(Address(obj, index.AsRegister<Register>(), TIMES_1, data_offset),
- Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
- }
+ __ movb(address, Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
codegen_->MaybeRecordImplicitNullCheck(instruction);
break;
@@ -4318,93 +4449,106 @@
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
- if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
- if (value.IsRegister()) {
- __ movw(Address(obj, offset), value.AsRegister<Register>());
- } else {
- __ movw(Address(obj, offset),
- Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
- }
+ uint32_t offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
+ Address address = index.IsConstant()
+ ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + offset)
+ : Address(array, index.AsRegister<Register>(), TIMES_2, offset);
+ if (value.IsRegister()) {
+ __ movw(address, value.AsRegister<Register>());
} else {
- if (value.IsRegister()) {
- __ movw(Address(obj, index.AsRegister<Register>(), TIMES_2, data_offset),
- value.AsRegister<Register>());
- } else {
- __ movw(Address(obj, index.AsRegister<Register>(), TIMES_2, data_offset),
- Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
- }
+ __ movw(address, Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
codegen_->MaybeRecordImplicitNullCheck(instruction);
break;
}
- case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- if (!needs_runtime_call) {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- if (index.IsConstant()) {
- size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- if (value.IsRegister()) {
- if (kPoisonHeapReferences && value_type == Primitive::kPrimNot) {
- Register temp = locations->GetTemp(0).AsRegister<Register>();
- __ movl(temp, value.AsRegister<Register>());
- __ PoisonHeapReference(temp);
- __ movl(Address(obj, offset), temp);
- } else {
- __ movl(Address(obj, offset), value.AsRegister<Register>());
- }
- } else {
- DCHECK(value.IsConstant()) << value;
- int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
- // `value_type == Primitive::kPrimNot` implies `v == 0`.
- DCHECK((value_type != Primitive::kPrimNot) || (v == 0));
- // Note: if heap poisoning is enabled, no need to poison
- // (negate) `v` if it is a reference, as it would be null.
- __ movl(Address(obj, offset), Immediate(v));
- }
- } else {
- DCHECK(index.IsRegister()) << index;
- if (value.IsRegister()) {
- if (kPoisonHeapReferences && value_type == Primitive::kPrimNot) {
- Register temp = locations->GetTemp(0).AsRegister<Register>();
- __ movl(temp, value.AsRegister<Register>());
- __ PoisonHeapReference(temp);
- __ movl(Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset), temp);
- } else {
- __ movl(Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset),
- value.AsRegister<Register>());
- }
- } else {
- DCHECK(value.IsConstant()) << value;
- int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
- // `value_type == Primitive::kPrimNot` implies `v == 0`.
- DCHECK((value_type != Primitive::kPrimNot) || (v == 0));
- // Note: if heap poisoning is enabled, no need to poison
- // (negate) `v` if it is a reference, as it would be null.
- __ movl(Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset), Immediate(v));
- }
- }
+ uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ Address address = index.IsConstant()
+ ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
+ : Address(array, index.AsRegister<Register>(), TIMES_4, offset);
+ if (!value.IsRegister()) {
+ // Just setting null.
+ DCHECK(instruction->InputAt(2)->IsNullConstant());
+ DCHECK(value.IsConstant()) << value;
+ __ movl(address, Immediate(0));
codegen_->MaybeRecordImplicitNullCheck(instruction);
-
- if (needs_write_barrier) {
- Register temp = locations->GetTemp(0).AsRegister<Register>();
- Register card = locations->GetTemp(1).AsRegister<Register>();
- codegen_->MarkGCCard(
- temp, card, obj, value.AsRegister<Register>(), instruction->GetValueCanBeNull());
- }
- } else {
- DCHECK_EQ(value_type, Primitive::kPrimNot);
- DCHECK(!codegen_->IsLeafMethod());
- // Note: if heap poisoning is enabled, pAputObject takes cares
- // of poisoning the reference.
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
- instruction,
- instruction->GetDexPc(),
- nullptr);
+ DCHECK(!needs_write_barrier);
+ DCHECK(!may_need_runtime_call);
+ break;
}
+
+ DCHECK(needs_write_barrier);
+ Register register_value = value.AsRegister<Register>();
+ NearLabel done, not_null, do_put;
+ SlowPathCode* slow_path = nullptr;
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
+ if (may_need_runtime_call) {
+ slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathX86(instruction);
+ codegen_->AddSlowPath(slow_path);
+ if (instruction->GetValueCanBeNull()) {
+ __ testl(register_value, register_value);
+ __ j(kNotEqual, ¬_null);
+ __ movl(address, Immediate(0));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ jmp(&done);
+ __ Bind(¬_null);
+ }
+
+ __ movl(temp, Address(array, class_offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ MaybeUnpoisonHeapReference(temp);
+ __ movl(temp, Address(temp, component_offset));
+ // No need to poison/unpoison, we're comparing two poisoned references.
+ __ cmpl(temp, Address(register_value, class_offset));
+ if (instruction->StaticTypeOfArrayIsObjectArray()) {
+ __ j(kEqual, &do_put);
+ __ MaybeUnpoisonHeapReference(temp);
+ __ movl(temp, Address(temp, super_offset));
+ // No need to unpoison, we're comparing against null..
+ __ testl(temp, temp);
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ Bind(&do_put);
+ } else {
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ }
+ }
+
+ if (kPoisonHeapReferences) {
+ __ movl(temp, register_value);
+ __ PoisonHeapReference(temp);
+ __ movl(address, temp);
+ } else {
+ __ movl(address, register_value);
+ }
+ if (!may_need_runtime_call) {
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
+
+ Register card = locations->GetTemp(1).AsRegister<Register>();
+ codegen_->MarkGCCard(
+ temp, card, array, value.AsRegister<Register>(), instruction->GetValueCanBeNull());
+ __ Bind(&done);
+
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
+ }
+
+ break;
+ }
+ case Primitive::kPrimInt: {
+ uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ Address address = index.IsConstant()
+ ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
+ : Address(array, index.AsRegister<Register>(), TIMES_4, offset);
+ if (value.IsRegister()) {
+ __ movl(address, value.AsRegister<Register>());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
+ __ movl(address, Immediate(v));
+ }
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
break;
}
@@ -4413,30 +4557,30 @@
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
if (value.IsRegisterPair()) {
- __ movl(Address(obj, offset), value.AsRegisterPairLow<Register>());
+ __ movl(Address(array, offset), value.AsRegisterPairLow<Register>());
codegen_->MaybeRecordImplicitNullCheck(instruction);
- __ movl(Address(obj, offset + kX86WordSize), value.AsRegisterPairHigh<Register>());
+ __ movl(Address(array, offset + kX86WordSize), value.AsRegisterPairHigh<Register>());
} else {
DCHECK(value.IsConstant());
int64_t val = value.GetConstant()->AsLongConstant()->GetValue();
- __ movl(Address(obj, offset), Immediate(Low32Bits(val)));
+ __ movl(Address(array, offset), Immediate(Low32Bits(val)));
codegen_->MaybeRecordImplicitNullCheck(instruction);
- __ movl(Address(obj, offset + kX86WordSize), Immediate(High32Bits(val)));
+ __ movl(Address(array, offset + kX86WordSize), Immediate(High32Bits(val)));
}
} else {
if (value.IsRegisterPair()) {
- __ movl(Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset),
+ __ movl(Address(array, index.AsRegister<Register>(), TIMES_8, data_offset),
value.AsRegisterPairLow<Register>());
codegen_->MaybeRecordImplicitNullCheck(instruction);
- __ movl(Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize),
+ __ movl(Address(array, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize),
value.AsRegisterPairHigh<Register>());
} else {
DCHECK(value.IsConstant());
int64_t val = value.GetConstant()->AsLongConstant()->GetValue();
- __ movl(Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset),
+ __ movl(Address(array, index.AsRegister<Register>(), TIMES_8, data_offset),
Immediate(Low32Bits(val)));
codegen_->MaybeRecordImplicitNullCheck(instruction);
- __ movl(Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize),
+ __ movl(Address(array, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize),
Immediate(High32Bits(val)));
}
}
@@ -4444,28 +4588,22 @@
}
case Primitive::kPrimFloat: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
+ uint32_t offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
+ Address address = index.IsConstant()
+ ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
+ : Address(array, index.AsRegister<Register>(), TIMES_4, offset);
DCHECK(value.IsFpuRegister());
- if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- __ movss(Address(obj, offset), value.AsFpuRegister<XmmRegister>());
- } else {
- __ movss(Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset),
- value.AsFpuRegister<XmmRegister>());
- }
+ __ movss(address, value.AsFpuRegister<XmmRegister>());
break;
}
case Primitive::kPrimDouble: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
+ uint32_t offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
+ Address address = index.IsConstant()
+ ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + offset)
+ : Address(array, index.AsRegister<Register>(), TIMES_8, offset);
DCHECK(value.IsFpuRegister());
- if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- __ movsd(Address(obj, offset), value.AsFpuRegister<XmmRegister>());
- } else {
- __ movsd(Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset),
- value.AsFpuRegister<XmmRegister>());
- }
+ __ movsd(address, value.AsFpuRegister<XmmRegister>());
break;
}
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 2c2fc65..ae2d84f 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -91,6 +91,36 @@
DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorX86);
};
+class FieldAccessCallingConventionX86 : public FieldAccessCallingConvention {
+ public:
+ FieldAccessCallingConventionX86() {}
+
+ Location GetObjectLocation() const OVERRIDE {
+ return Location::RegisterLocation(ECX);
+ }
+ Location GetFieldIndexLocation() const OVERRIDE {
+ return Location::RegisterLocation(EAX);
+ }
+ Location GetReturnLocation(Primitive::Type type) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? Location::RegisterPairLocation(EAX, EDX)
+ : Location::RegisterLocation(EAX);
+ }
+ Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? Location::RegisterPairLocation(EDX, EBX)
+ : (is_instance
+ ? Location::RegisterLocation(EDX)
+ : Location::RegisterLocation(ECX));
+ }
+ Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ return Location::FpuRegisterLocation(XMM0);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionX86);
+};
+
class ParallelMoveResolverX86 : public ParallelMoveResolverWithSwap {
public:
ParallelMoveResolverX86(ArenaAllocator* allocator, CodeGeneratorX86* codegen)
@@ -228,6 +258,9 @@
void Bind(HBasicBlock* block) OVERRIDE;
void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
void MoveConstant(Location destination, int32_t value) OVERRIDE;
+ void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+
size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index b845a27..791bb9e 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -396,6 +396,51 @@
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathX86_64);
};
+class ArraySetSlowPathX86_64 : public SlowPathCode {
+ public:
+ explicit ArraySetSlowPathX86_64(HInstruction* instruction) : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ parallel_move.AddMove(
+ locations->InAt(0),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimNot,
+ nullptr);
+ parallel_move.AddMove(
+ locations->InAt(1),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimInt,
+ nullptr);
+ parallel_move.AddMove(
+ locations->InAt(2),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
+ Primitive::kPrimNot,
+ nullptr);
+ codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
+
+ CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
+ x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ RestoreLiveRegisters(codegen, locations);
+ __ jmp(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathX86_64"; }
+
+ private:
+ HInstruction* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86_64);
+};
+
#undef __
#define __ down_cast<X86_64Assembler*>(GetAssembler())->
@@ -945,6 +990,19 @@
Load64BitValue(location.AsRegister<CpuRegister>(), static_cast<int64_t>(value));
}
+void CodeGeneratorX86_64::MoveLocation(
+ Location dst, Location src, Primitive::Type dst_type ATTRIBUTE_UNUSED) {
+ Move(dst, src);
+}
+
+void CodeGeneratorX86_64::AddLocationAsTemp(Location location, LocationSummary* locations) {
+ if (location.IsRegister()) {
+ locations->AddTemp(location);
+ } else {
+ UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
+ }
+}
+
void InstructionCodeGeneratorX86_64::HandleGoto(HInstruction* got, HBasicBlock* successor) {
DCHECK(!successor->IsExitBlock());
@@ -3804,6 +3862,74 @@
HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
}
+void LocationsBuilderX86_64::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionX86_64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86_64::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionX86_64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderX86_64::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionX86_64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86_64::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionX86_64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderX86_64::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionX86_64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86_64::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionX86_64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderX86_64::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionX86_64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86_64::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionX86_64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
void LocationsBuilderX86_64::VisitNullCheck(HNullCheck* instruction) {
LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
? LocationSummary::kCallOnSlowPath
@@ -3992,66 +4118,55 @@
bool needs_write_barrier =
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
- bool needs_runtime_call = instruction->NeedsTypeCheck();
+ bool may_need_runtime_call = instruction->NeedsTypeCheck();
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
- instruction, needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall);
- if (needs_runtime_call) {
- InvokeRuntimeCallingConvention calling_convention;
- locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
- locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
- } else {
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(
- 1, Location::RegisterOrConstant(instruction->InputAt(1)));
- locations->SetInAt(2, Location::RequiresRegister());
- if (value_type == Primitive::kPrimLong) {
- locations->SetInAt(2, Location::RegisterOrInt32LongConstant(instruction->InputAt(2)));
- } else if (value_type == Primitive::kPrimFloat || value_type == Primitive::kPrimDouble) {
- locations->SetInAt(2, Location::RequiresFpuRegister());
- } else {
- locations->SetInAt(2, Location::RegisterOrConstant(instruction->InputAt(2)));
- }
+ instruction,
+ may_need_runtime_call ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall);
- if (needs_write_barrier) {
- // Temporary registers for the write barrier.
- locations->AddTemp(Location::RequiresRegister()); // Possibly used for ref. poisoning too.
- locations->AddTemp(Location::RequiresRegister());
- }
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(
+ 1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ locations->SetInAt(2, Location::RequiresRegister());
+ if (value_type == Primitive::kPrimLong) {
+ locations->SetInAt(2, Location::RegisterOrInt32LongConstant(instruction->InputAt(2)));
+ } else if (value_type == Primitive::kPrimFloat || value_type == Primitive::kPrimDouble) {
+ locations->SetInAt(2, Location::RequiresFpuRegister());
+ } else {
+ locations->SetInAt(2, Location::RegisterOrConstant(instruction->InputAt(2)));
+ }
+
+ if (needs_write_barrier) {
+ // Temporary registers for the write barrier.
+ locations->AddTemp(Location::RequiresRegister()); // Possibly used for ref. poisoning too.
+ locations->AddTemp(Location::RequiresRegister());
}
}
void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
+ CpuRegister array = locations->InAt(0).AsRegister<CpuRegister>();
Location index = locations->InAt(1);
Location value = locations->InAt(2);
Primitive::Type value_type = instruction->GetComponentType();
- bool needs_runtime_call = locations->WillCall();
+ bool may_need_runtime_call = locations->CanCall();
bool needs_write_barrier =
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+ uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
switch (value_type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
- if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
- if (value.IsRegister()) {
- __ movb(Address(obj, offset), value.AsRegister<CpuRegister>());
- } else {
- __ movb(Address(obj, offset),
- Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
- }
+ uint32_t offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
+ Address address = index.IsConstant()
+ ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + offset)
+ : Address(array, index.AsRegister<CpuRegister>(), TIMES_1, offset);
+ if (value.IsRegister()) {
+ __ movb(address, value.AsRegister<CpuRegister>());
} else {
- if (value.IsRegister()) {
- __ movb(Address(obj, index.AsRegister<CpuRegister>(), TIMES_1, data_offset),
- value.AsRegister<CpuRegister>());
- } else {
- __ movb(Address(obj, index.AsRegister<CpuRegister>(), TIMES_1, data_offset),
- Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
- }
+ __ movb(address, Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
codegen_->MaybeRecordImplicitNullCheck(instruction);
break;
@@ -4059,154 +4174,145 @@
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
- if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
- if (value.IsRegister()) {
- __ movw(Address(obj, offset), value.AsRegister<CpuRegister>());
- } else {
- DCHECK(value.IsConstant()) << value;
- __ movw(Address(obj, offset),
- Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
- }
+ uint32_t offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
+ Address address = index.IsConstant()
+ ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + offset)
+ : Address(array, index.AsRegister<CpuRegister>(), TIMES_2, offset);
+ if (value.IsRegister()) {
+ __ movw(address, value.AsRegister<CpuRegister>());
} else {
- DCHECK(index.IsRegister()) << index;
- if (value.IsRegister()) {
- __ movw(Address(obj, index.AsRegister<CpuRegister>(), TIMES_2, data_offset),
- value.AsRegister<CpuRegister>());
- } else {
- DCHECK(value.IsConstant()) << value;
- __ movw(Address(obj, index.AsRegister<CpuRegister>(), TIMES_2, data_offset),
- Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
- }
+ DCHECK(value.IsConstant()) << value;
+ __ movw(address, Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
codegen_->MaybeRecordImplicitNullCheck(instruction);
break;
}
- case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- if (!needs_runtime_call) {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- if (index.IsConstant()) {
- size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- if (value.IsRegister()) {
- if (kPoisonHeapReferences && value_type == Primitive::kPrimNot) {
- CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
- __ movl(temp, value.AsRegister<CpuRegister>());
- __ PoisonHeapReference(temp);
- __ movl(Address(obj, offset), temp);
- } else {
- __ movl(Address(obj, offset), value.AsRegister<CpuRegister>());
- }
- } else {
- DCHECK(value.IsConstant()) << value;
- int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
- // `value_type == Primitive::kPrimNot` implies `v == 0`.
- DCHECK((value_type != Primitive::kPrimNot) || (v == 0));
- // Note: if heap poisoning is enabled, no need to poison
- // (negate) `v` if it is a reference, as it would be null.
- __ movl(Address(obj, offset), Immediate(v));
- }
- } else {
- DCHECK(index.IsRegister()) << index;
- if (value.IsRegister()) {
- if (kPoisonHeapReferences && value_type == Primitive::kPrimNot) {
- CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
- __ movl(temp, value.AsRegister<CpuRegister>());
- __ PoisonHeapReference(temp);
- __ movl(Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset), temp);
- } else {
- __ movl(Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset),
- value.AsRegister<CpuRegister>());
- }
- } else {
- DCHECK(value.IsConstant()) << value;
- int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
- // `value_type == Primitive::kPrimNot` implies `v == 0`.
- DCHECK((value_type != Primitive::kPrimNot) || (v == 0));
- // Note: if heap poisoning is enabled, no need to poison
- // (negate) `v` if it is a reference, as it would be null.
- __ movl(Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset),
- Immediate(v));
- }
- }
+ uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ Address address = index.IsConstant()
+ ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
+ : Address(array, index.AsRegister<CpuRegister>(), TIMES_4, offset);
+ if (!value.IsRegister()) {
+ // Just setting null.
+ DCHECK(instruction->InputAt(2)->IsNullConstant());
+ DCHECK(value.IsConstant()) << value;
+ __ movl(address, Immediate(0));
codegen_->MaybeRecordImplicitNullCheck(instruction);
- if (needs_write_barrier) {
- DCHECK_EQ(value_type, Primitive::kPrimNot);
- CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
- CpuRegister card = locations->GetTemp(1).AsRegister<CpuRegister>();
- codegen_->MarkGCCard(
- temp, card, obj, value.AsRegister<CpuRegister>(), instruction->GetValueCanBeNull());
- }
- } else {
- DCHECK_EQ(value_type, Primitive::kPrimNot);
- // Note: if heap poisoning is enabled, pAputObject takes cares
- // of poisoning the reference.
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
- instruction,
- instruction->GetDexPc(),
- nullptr);
- DCHECK(!codegen_->IsLeafMethod());
+ DCHECK(!needs_write_barrier);
+ DCHECK(!may_need_runtime_call);
+ break;
}
+
+ DCHECK(needs_write_barrier);
+ CpuRegister register_value = value.AsRegister<CpuRegister>();
+ NearLabel done, not_null, do_put;
+ SlowPathCode* slow_path = nullptr;
+ CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
+ if (may_need_runtime_call) {
+ slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathX86_64(instruction);
+ codegen_->AddSlowPath(slow_path);
+ if (instruction->GetValueCanBeNull()) {
+ __ testl(register_value, register_value);
+ __ j(kNotEqual, ¬_null);
+ __ movl(address, Immediate(0));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ jmp(&done);
+ __ Bind(¬_null);
+ }
+
+ __ movl(temp, Address(array, class_offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ MaybeUnpoisonHeapReference(temp);
+ __ movl(temp, Address(temp, component_offset));
+ // No need to poison/unpoison, we're comparing two poisoned references.
+ __ cmpl(temp, Address(register_value, class_offset));
+ if (instruction->StaticTypeOfArrayIsObjectArray()) {
+ __ j(kEqual, &do_put);
+ __ MaybeUnpoisonHeapReference(temp);
+ __ movl(temp, Address(temp, super_offset));
+ // No need to unpoison the result, we're comparing against null.
+ __ testl(temp, temp);
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ Bind(&do_put);
+ } else {
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ }
+ }
+
+ if (kPoisonHeapReferences) {
+ __ movl(temp, register_value);
+ __ PoisonHeapReference(temp);
+ __ movl(address, temp);
+ } else {
+ __ movl(address, register_value);
+ }
+ if (!may_need_runtime_call) {
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
+
+ CpuRegister card = locations->GetTemp(1).AsRegister<CpuRegister>();
+ codegen_->MarkGCCard(
+ temp, card, array, value.AsRegister<CpuRegister>(), instruction->GetValueCanBeNull());
+ __ Bind(&done);
+
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
+ }
+
+ break;
+ }
+ case Primitive::kPrimInt: {
+ uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ Address address = index.IsConstant()
+ ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
+ : Address(array, index.AsRegister<CpuRegister>(), TIMES_4, offset);
+ if (value.IsRegister()) {
+ __ movl(address, value.AsRegister<CpuRegister>());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
+ __ movl(address, Immediate(v));
+ }
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
break;
}
case Primitive::kPrimLong: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
- if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- if (value.IsRegister()) {
- __ movq(Address(obj, offset), value.AsRegister<CpuRegister>());
- } else {
- int64_t v = value.GetConstant()->AsLongConstant()->GetValue();
- DCHECK(IsInt<32>(v));
- int32_t v_32 = v;
- __ movq(Address(obj, offset), Immediate(v_32));
- }
+ uint32_t offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
+ Address address = index.IsConstant()
+ ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + offset)
+ : Address(array, index.AsRegister<CpuRegister>(), TIMES_8, offset);
+ if (value.IsRegister()) {
+ __ movq(address, value.AsRegister<CpuRegister>());
} else {
- if (value.IsRegister()) {
- __ movq(Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset),
- value.AsRegister<CpuRegister>());
- } else {
- int64_t v = value.GetConstant()->AsLongConstant()->GetValue();
- DCHECK(IsInt<32>(v));
- int32_t v_32 = v;
- __ movq(Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset),
- Immediate(v_32));
- }
+ int64_t v = value.GetConstant()->AsLongConstant()->GetValue();
+ DCHECK(IsInt<32>(v));
+ int32_t v_32 = v;
+ __ movq(address, Immediate(v_32));
}
codegen_->MaybeRecordImplicitNullCheck(instruction);
break;
}
case Primitive::kPrimFloat: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
- if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- DCHECK(value.IsFpuRegister());
- __ movss(Address(obj, offset), value.AsFpuRegister<XmmRegister>());
- } else {
- DCHECK(value.IsFpuRegister());
- __ movss(Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset),
- value.AsFpuRegister<XmmRegister>());
- }
+ uint32_t offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
+ Address address = index.IsConstant()
+ ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
+ : Address(array, index.AsRegister<CpuRegister>(), TIMES_4, offset);
+ DCHECK(value.IsFpuRegister());
+ __ movss(address, value.AsFpuRegister<XmmRegister>());
codegen_->MaybeRecordImplicitNullCheck(instruction);
break;
}
case Primitive::kPrimDouble: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
- if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- DCHECK(value.IsFpuRegister());
- __ movsd(Address(obj, offset), value.AsFpuRegister<XmmRegister>());
- } else {
- DCHECK(value.IsFpuRegister());
- __ movsd(Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset),
- value.AsFpuRegister<XmmRegister>());
- }
+ uint32_t offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
+ Address address = index.IsConstant()
+ ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + offset)
+ : Address(array, index.AsRegister<CpuRegister>(), TIMES_8, offset);
+ DCHECK(value.IsFpuRegister());
+ __ movsd(address, value.AsFpuRegister<XmmRegister>());
codegen_->MaybeRecordImplicitNullCheck(instruction);
break;
}
@@ -4250,7 +4356,7 @@
Location index_loc = locations->InAt(0);
Location length_loc = locations->InAt(1);
SlowPathCode* slow_path =
- new (GetGraph()->GetArena()) BoundsCheckSlowPathX86_64(instruction);
+ new (GetGraph()->GetArena()) BoundsCheckSlowPathX86_64(instruction);
if (length_loc.IsConstant()) {
int32_t length = CodeGenerator::GetInt32ValueOf(length_loc.GetConstant());
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 197ce63..ecc8630 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -70,6 +70,35 @@
DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
};
+class FieldAccessCallingConventionX86_64 : public FieldAccessCallingConvention {
+ public:
+ FieldAccessCallingConventionX86_64() {}
+
+ Location GetObjectLocation() const OVERRIDE {
+ return Location::RegisterLocation(RSI);
+ }
+ Location GetFieldIndexLocation() const OVERRIDE {
+ return Location::RegisterLocation(RDI);
+ }
+ Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ return Location::RegisterLocation(RAX);
+ }
+ Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? Location::RegisterLocation(RDX)
+ : (is_instance
+ ? Location::RegisterLocation(RDX)
+ : Location::RegisterLocation(RSI));
+ }
+ Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ return Location::FpuRegisterLocation(XMM0);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionX86_64);
+};
+
+
class InvokeDexCallingConventionVisitorX86_64 : public InvokeDexCallingConventionVisitor {
public:
InvokeDexCallingConventionVisitorX86_64() {}
@@ -215,6 +244,9 @@
void Bind(HBasicBlock* block) OVERRIDE;
void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
void MoveConstant(Location destination, int32_t value) OVERRIDE;
+ void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+
size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 2c6c3b7..7a83662 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -398,6 +398,22 @@
StartAttributeStream("intrinsic") << invoke->GetIntrinsic();
}
+ void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* field_access) OVERRIDE {
+ StartAttributeStream("field_type") << field_access->GetFieldType();
+ }
+
+ void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* field_access) OVERRIDE {
+ StartAttributeStream("field_type") << field_access->GetFieldType();
+ }
+
+ void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* field_access) OVERRIDE {
+ StartAttributeStream("field_type") << field_access->GetFieldType();
+ }
+
+ void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* field_access) OVERRIDE {
+ StartAttributeStream("field_type") << field_access->GetFieldType();
+ }
+
void VisitTryBoundary(HTryBoundary* try_boundary) OVERRIDE {
StartAttributeStream("kind") << (try_boundary->IsEntry() ? "entry" : "exit");
}
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 039029a..0b65c56 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -247,12 +247,14 @@
return false;
}
- uint16_t class_def_idx = resolved_method->GetDeclaringClass()->GetDexClassDefIndex();
- if (!compiler_driver_->IsMethodVerifiedWithoutFailures(
- resolved_method->GetDexMethodIndex(), class_def_idx, *resolved_method->GetDexFile())) {
- VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
- << " couldn't be verified, so it cannot be inlined";
- return false;
+ if (!resolved_method->GetDeclaringClass()->IsVerified()) {
+ uint16_t class_def_idx = resolved_method->GetDeclaringClass()->GetDexClassDefIndex();
+ if (!compiler_driver_->IsMethodVerifiedWithoutFailures(
+ resolved_method->GetDexMethodIndex(), class_def_idx, *resolved_method->GetDexFile())) {
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
+ << " couldn't be verified, so it cannot be inlined";
+ return false;
+ }
}
if (invoke_instruction->IsInvokeStaticOrDirect() &&
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 22bca2f..3287a0a 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -431,19 +431,41 @@
HInstruction* value = instruction->GetValue();
if (value->GetType() != Primitive::kPrimNot) return;
+ if (CanEnsureNotNullAt(value, instruction)) {
+ instruction->ClearValueCanBeNull();
+ }
+
if (value->IsArrayGet()) {
if (value->AsArrayGet()->GetArray() == instruction->GetArray()) {
// If the code is just swapping elements in the array, no need for a type check.
instruction->ClearNeedsTypeCheck();
+ return;
}
}
if (value->IsNullConstant()) {
instruction->ClearNeedsTypeCheck();
+ return;
}
- if (CanEnsureNotNullAt(value, instruction)) {
- instruction->ClearValueCanBeNull();
+ ScopedObjectAccess soa(Thread::Current());
+ ReferenceTypeInfo array_rti = instruction->GetArray()->GetReferenceTypeInfo();
+ ReferenceTypeInfo value_rti = value->GetReferenceTypeInfo();
+ if (!array_rti.IsValid()) {
+ return;
+ }
+
+ if (value_rti.IsValid() && array_rti.CanArrayHold(value_rti)) {
+ instruction->ClearNeedsTypeCheck();
+ return;
+ }
+
+ if (array_rti.IsObjectArray()) {
+ if (array_rti.IsExact()) {
+ instruction->ClearNeedsTypeCheck();
+ return;
+ }
+ instruction->SetStaticTypeOfArrayIsObjectArray();
}
}
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 3e3acaf..b60905d 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -747,6 +747,24 @@
__ Sd(val, adr, 0);
}
+// Thread java.lang.Thread.currentThread()
+void IntrinsicLocationsBuilderMIPS64::VisitThreadCurrentThread(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitThreadCurrentThread(HInvoke* invoke) {
+ Mips64Assembler* assembler = GetAssembler();
+ GpuRegister out = invoke->GetLocations()->Out().AsRegister<GpuRegister>();
+
+ __ LoadFromOffset(kLoadUnsignedWord,
+ out,
+ TR,
+ Thread::PeerOffset<kMips64PointerSize>().Int32Value());
+}
+
// Unimplemented intrinsics.
#define UNIMPLEMENTED_INTRINSIC(Name) \
@@ -758,7 +776,6 @@
UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
UNIMPLEMENTED_INTRINSIC(MathRoundFloat)
-UNIMPLEMENTED_INTRINSIC(ThreadCurrentThread)
UNIMPLEMENTED_INTRINSIC(UnsafeGet)
UNIMPLEMENTED_INTRINSIC(UnsafeGetVolatile)
UNIMPLEMENTED_INTRINSIC(UnsafeGetLong)
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 486968c..849f876 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1067,6 +1067,10 @@
M(Shr, BinaryOperation) \
M(StaticFieldGet, Instruction) \
M(StaticFieldSet, Instruction) \
+ M(UnresolvedInstanceFieldGet, Instruction) \
+ M(UnresolvedInstanceFieldSet, Instruction) \
+ M(UnresolvedStaticFieldGet, Instruction) \
+ M(UnresolvedStaticFieldSet, Instruction) \
M(StoreLocal, Instruction) \
M(Sub, BinaryOperation) \
M(SuspendCheck, Instruction) \
@@ -1644,17 +1648,34 @@
bool IsValid() const SHARED_REQUIRES(Locks::mutator_lock_) {
return IsValidHandle(type_handle_);
}
+
bool IsExact() const { return is_exact_; }
bool IsObjectClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(IsValid());
return GetTypeHandle()->IsObjectClass();
}
+
+ bool IsObjectArray() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsValid());
+ return IsArrayClass() && GetTypeHandle()->GetComponentType()->IsObjectClass();
+ }
+
bool IsInterface() const SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(IsValid());
return GetTypeHandle()->IsInterface();
}
+ bool IsArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ return GetTypeHandle()->IsArrayClass();
+ }
+
+ bool CanArrayHold(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (!IsExact()) return false;
+ if (!IsArrayClass()) return false;
+ return GetTypeHandle()->GetComponentType()->IsAssignableFrom(rti.GetTypeHandle().Get());
+ }
+
Handle<mirror::Class> GetTypeHandle() const { return type_handle_; }
bool IsSupertypeOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
@@ -2222,7 +2243,9 @@
public:
int32_t GetValue() const { return value_; }
- uint64_t GetValueAsUint64() const OVERRIDE { return static_cast<uint64_t>(value_); }
+ uint64_t GetValueAsUint64() const OVERRIDE {
+ return static_cast<uint64_t>(static_cast<uint32_t>(value_));
+ }
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
DCHECK(other->IsIntConstant());
@@ -4312,7 +4335,8 @@
SideEffectsForArchRuntimeCalls(value->GetType())), dex_pc),
expected_component_type_(expected_component_type),
needs_type_check_(value->GetType() == Primitive::kPrimNot),
- value_can_be_null_(true) {
+ value_can_be_null_(true),
+ static_type_of_array_is_object_array_(false) {
SetRawInputAt(0, array);
SetRawInputAt(1, index);
SetRawInputAt(2, value);
@@ -4341,8 +4365,13 @@
value_can_be_null_ = false;
}
+ void SetStaticTypeOfArrayIsObjectArray() {
+ static_type_of_array_is_object_array_ = true;
+ }
+
bool GetValueCanBeNull() const { return value_can_be_null_; }
bool NeedsTypeCheck() const { return needs_type_check_; }
+ bool StaticTypeOfArrayIsObjectArray() const { return static_type_of_array_is_object_array_; }
HInstruction* GetArray() const { return InputAt(0); }
HInstruction* GetIndex() const { return InputAt(1); }
@@ -4369,6 +4398,9 @@
const Primitive::Type expected_component_type_;
bool needs_type_check_;
bool value_can_be_null_;
+ // Cached information for the reference_type_info_ so that codegen
+ // does not need to inspect the static type.
+ bool static_type_of_array_is_object_array_;
DISALLOW_COPY_AND_ASSIGN(HArraySet);
};
@@ -4707,6 +4739,112 @@
DISALLOW_COPY_AND_ASSIGN(HStaticFieldSet);
};
+class HUnresolvedInstanceFieldGet : public HExpression<1> {
+ public:
+ HUnresolvedInstanceFieldGet(HInstruction* obj,
+ Primitive::Type field_type,
+ uint32_t field_index,
+ uint32_t dex_pc)
+ : HExpression(field_type, SideEffects::AllExceptGCDependency(), dex_pc),
+ field_index_(field_index) {
+ SetRawInputAt(0, obj);
+ }
+
+ bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool CanThrow() const OVERRIDE { return true; }
+
+ Primitive::Type GetFieldType() const { return GetType(); }
+ uint32_t GetFieldIndex() const { return field_index_; }
+
+ DECLARE_INSTRUCTION(UnresolvedInstanceFieldGet);
+
+ private:
+ const uint32_t field_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(HUnresolvedInstanceFieldGet);
+};
+
+class HUnresolvedInstanceFieldSet : public HTemplateInstruction<2> {
+ public:
+ HUnresolvedInstanceFieldSet(HInstruction* obj,
+ HInstruction* value,
+ Primitive::Type field_type,
+ uint32_t field_index,
+ uint32_t dex_pc)
+ : HTemplateInstruction(SideEffects::AllExceptGCDependency(), dex_pc),
+ field_type_(field_type),
+ field_index_(field_index) {
+ DCHECK_EQ(field_type, value->GetType());
+ SetRawInputAt(0, obj);
+ SetRawInputAt(1, value);
+ }
+
+ bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool CanThrow() const OVERRIDE { return true; }
+
+ Primitive::Type GetFieldType() const { return field_type_; }
+ uint32_t GetFieldIndex() const { return field_index_; }
+
+ DECLARE_INSTRUCTION(UnresolvedInstanceFieldSet);
+
+ private:
+ const Primitive::Type field_type_;
+ const uint32_t field_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(HUnresolvedInstanceFieldSet);
+};
+
+class HUnresolvedStaticFieldGet : public HExpression<0> {
+ public:
+ HUnresolvedStaticFieldGet(Primitive::Type field_type,
+ uint32_t field_index,
+ uint32_t dex_pc)
+ : HExpression(field_type, SideEffects::AllExceptGCDependency(), dex_pc),
+ field_index_(field_index) {
+ }
+
+ bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool CanThrow() const OVERRIDE { return true; }
+
+ Primitive::Type GetFieldType() const { return GetType(); }
+ uint32_t GetFieldIndex() const { return field_index_; }
+
+ DECLARE_INSTRUCTION(UnresolvedStaticFieldGet);
+
+ private:
+ const uint32_t field_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(HUnresolvedStaticFieldGet);
+};
+
+class HUnresolvedStaticFieldSet : public HTemplateInstruction<1> {
+ public:
+ HUnresolvedStaticFieldSet(HInstruction* value,
+ Primitive::Type field_type,
+ uint32_t field_index,
+ uint32_t dex_pc)
+ : HTemplateInstruction(SideEffects::AllExceptGCDependency(), dex_pc),
+ field_type_(field_type),
+ field_index_(field_index) {
+ DCHECK_EQ(field_type, value->GetType());
+ SetRawInputAt(0, value);
+ }
+
+ bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool CanThrow() const OVERRIDE { return true; }
+
+ Primitive::Type GetFieldType() const { return field_type_; }
+ uint32_t GetFieldIndex() const { return field_index_; }
+
+ DECLARE_INSTRUCTION(UnresolvedStaticFieldSet);
+
+ private:
+ const Primitive::Type field_type_;
+ const uint32_t field_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(HUnresolvedStaticFieldSet);
+};
+
// Implement the move-exception DEX instruction.
class HLoadException : public HExpression<0> {
public:
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index dbfbd96..3e982dc 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -829,8 +829,12 @@
return compiled_method;
}
-static bool HasOnlyUnresolvedFailures(const VerifiedMethod* verified_method) {
- uint32_t unresolved_mask = verifier::VerifyError::VERIFY_ERROR_NO_CLASS;
+static bool CanHandleVerificationFailure(const VerifiedMethod* verified_method) {
+ // For access errors the compiler will use the unresolved helpers (e.g. HInvokeUnresolved).
+ uint32_t unresolved_mask = verifier::VerifyError::VERIFY_ERROR_NO_CLASS
+ | verifier::VerifyError::VERIFY_ERROR_ACCESS_CLASS
+ | verifier::VerifyError::VERIFY_ERROR_ACCESS_FIELD
+ | verifier::VerifyError::VERIFY_ERROR_ACCESS_METHOD;
return (verified_method->GetEncounteredVerificationFailures() & (~unresolved_mask)) == 0;
}
@@ -847,7 +851,7 @@
const VerifiedMethod* verified_method = compiler_driver->GetVerifiedMethod(&dex_file, method_idx);
DCHECK(!verified_method->HasRuntimeThrow());
if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file)
- || HasOnlyUnresolvedFailures(verified_method)) {
+ || CanHandleVerificationFailure(verified_method)) {
method = TryCompile(code_item, access_flags, invoke_type, class_def_idx,
method_idx, jclass_loader, dex_file, dex_cache);
} else {
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index c7701b7..df45c8e 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -34,6 +34,8 @@
kInstructionSimplifications,
kInstructionSimplificationsArch,
kUnresolvedMethod,
+ kUnresolvedField,
+ kUnresolvedFieldNotAFastAccess,
kNotCompiledBranchOutsideMethodCode,
kNotCompiledCannotBuildSSA,
kNotCompiledCantAccesType,
@@ -45,7 +47,6 @@
kNotCompiledPathological,
kNotCompiledSpaceFilter,
kNotCompiledUnhandledInstruction,
- kNotCompiledUnresolvedField,
kNotCompiledUnsupportedIsa,
kNotCompiledVerifyAtRuntime,
kNotOptimizedDisabled,
@@ -104,6 +105,8 @@
case kInstructionSimplifications: return "kInstructionSimplifications";
case kInstructionSimplificationsArch: return "kInstructionSimplificationsArch";
case kUnresolvedMethod : return "kUnresolvedMethod";
+ case kUnresolvedField : return "kUnresolvedField";
+ case kUnresolvedFieldNotAFastAccess : return "kUnresolvedFieldNotAFastAccess";
case kNotCompiledBranchOutsideMethodCode: return "kNotCompiledBranchOutsideMethodCode";
case kNotCompiledCannotBuildSSA : return "kNotCompiledCannotBuildSSA";
case kNotCompiledCantAccesType : return "kNotCompiledCantAccesType";
@@ -115,7 +118,6 @@
case kNotCompiledPathological : return "kNotCompiledPathological";
case kNotCompiledSpaceFilter : return "kNotCompiledSpaceFilter";
case kNotCompiledUnhandledInstruction : return "kNotCompiledUnhandledInstruction";
- case kNotCompiledUnresolvedField : return "kNotCompiledUnresolvedField";
case kNotCompiledUnsupportedIsa : return "kNotCompiledUnsupportedIsa";
case kNotCompiledVerifyAtRuntime : return "kNotCompiledVerifyAtRuntime";
case kNotOptimizedDisabled : return "kNotOptimizedDisabled";
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index fe837e4..d22f254 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -52,6 +52,8 @@
void SetClassAsTypeInfo(HInstruction* instr, mirror::Class* klass, bool is_exact);
void VisitInstanceFieldGet(HInstanceFieldGet* instr) OVERRIDE;
void VisitStaticFieldGet(HStaticFieldGet* instr) OVERRIDE;
+ void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) OVERRIDE;
+ void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instr) OVERRIDE;
void VisitInvoke(HInvoke* instr) OVERRIDE;
void VisitArrayGet(HArrayGet* instr) OVERRIDE;
void VisitCheckCast(HCheckCast* instr) OVERRIDE;
@@ -450,6 +452,22 @@
UpdateFieldAccessTypeInfo(instr, instr->GetFieldInfo());
}
+void RTPVisitor::VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) {
+ // TODO: Use descriptor to get the actual type.
+ if (instr->GetFieldType() == Primitive::kPrimNot) {
+ instr->SetReferenceTypeInfo(
+ ReferenceTypeInfo::Create(object_class_handle_, /* is_exact */ false));
+ }
+}
+
+void RTPVisitor::VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instr) {
+ // TODO: Use descriptor to get the actual type.
+ if (instr->GetFieldType() == Primitive::kPrimNot) {
+ instr->SetReferenceTypeInfo(
+ ReferenceTypeInfo::Create(object_class_handle_, /* is_exact */ false));
+ }
+}
+
void RTPVisitor::VisitLoadClass(HLoadClass* instr) {
ScopedObjectAccess soa(Thread::Current());
mirror::DexCache* dex_cache =
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index cc32da1..680e2d7 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -235,11 +235,7 @@
UsageError(" --compiler-backend=(Quick|Optimizing): select compiler backend");
UsageError(" set.");
UsageError(" Example: --compiler-backend=Optimizing");
- if (kUseOptimizingCompiler) {
- UsageError(" Default: Optimizing");
- } else {
- UsageError(" Default: Quick");
- }
+ UsageError(" Default: Optimizing");
UsageError("");
UsageError(" --compiler-filter="
"(verify-none"
@@ -503,7 +499,7 @@
class Dex2Oat FINAL {
public:
explicit Dex2Oat(TimingLogger* timings) :
- compiler_kind_(kUseOptimizingCompiler ? Compiler::kOptimizing : Compiler::kQuick),
+ compiler_kind_(Compiler::kOptimizing),
instruction_set_(kRuntimeISA),
// Take the default set of instruction features from the build.
verification_results_(nullptr),
@@ -752,10 +748,9 @@
void ProcessOptions(ParserOptions* parser_options) {
image_ = (!image_filename_.empty());
- if (!parser_options->requested_specific_compiler && !kUseOptimizingCompiler) {
- // If no specific compiler is requested, the current behavior is
- // to compile the boot image with Quick, and the rest with Optimizing.
- compiler_kind_ = image_ ? Compiler::kQuick : Compiler::kOptimizing;
+ if (image_) {
+ // We need the boot image to always be debuggable.
+ parser_options->debuggable = true;
}
if (oat_filename_.empty() && oat_fd_ == -1) {
diff --git a/imgdiag/imgdiag_test.cc b/imgdiag/imgdiag_test.cc
index 1ac7930..82bc8b9 100644
--- a/imgdiag/imgdiag_test.cc
+++ b/imgdiag/imgdiag_test.cc
@@ -109,11 +109,12 @@
std::string boot_image_location_;
};
-#if defined (ART_TARGET)
+#if defined (ART_TARGET) && !defined(__mips__)
TEST_F(ImgDiagTest, ImageDiffPidSelf) {
#else
// Can't run this test on the host, it will fail when trying to open /proc/kpagestats
// because it's root read-only.
+// Also test fails on mips. b/24596015.
TEST_F(ImgDiagTest, DISABLED_ImageDiffPidSelf) {
#endif
// Invoke 'img_diag' against the current process.
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index dc1cf8a..d09631b 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -839,13 +839,12 @@
TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
/*
* Called by managed code to resolve a static field and store a 64-bit primitive value.
- * On entry r0 holds field index, r1:r2 hold new_val
+ * On entry r0 holds field index, r2:r3 hold new_val
*/
.extern artSet64StaticFromCode
ENTRY art_quick_set64_static
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r3, r12 @ save callee saves in case of GC
- mov r3, r2 @ pass one half of wide argument
- mov r2, r1 @ pass other half of wide argument
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r12 @ save callee saves in case of GC
+ @ r2:r3 contain the wide argument
ldr r1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
.cfi_adjust_cfa_offset 16
@@ -870,6 +869,7 @@
.extern artSet64InstanceFromCode
ENTRY art_quick_set64_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r12, lr @ save callee saves in case of GC
+ @ r2:r3 contain the wide argument
ldr r12, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
str r9, [sp, #-12]! @ expand the frame and pass Thread::Current
.cfi_adjust_cfa_offset 12
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 6812178..be5a15e 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1421,9 +1421,8 @@
.extern artSet64StaticFromCode
ENTRY art_quick_set64_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
- mov x3, x1 // Store value
ldr x1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
- mov x2, x3 // Put value param
+ // x2 contains the parameter
mov x3, xSELF // pass Thread::Current
bl artSet64StaticFromCode
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index ce1b2f3..68156ae 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1244,7 +1244,7 @@
.extern artSet64StaticFromCode
ENTRY art_quick_set64_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- move $a2, $a1 # pass new_val
+ # a2 contains the new val
ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artSet64StaticFromCode # (field_idx, referrer, new_val, Thread*)
move $a3, rSELF # pass Thread::Current
diff --git a/runtime/arch/mips64/registers_mips64.h b/runtime/arch/mips64/registers_mips64.h
index cd94d5e..1d07d47 100644
--- a/runtime/arch/mips64/registers_mips64.h
+++ b/runtime/arch/mips64/registers_mips64.h
@@ -52,7 +52,6 @@
S6 = 22,
S7 = 23,
T8 = 24, // More temporaries.
- TMP = T8, // scratch register (in addition to AT)
T9 = 25,
K0 = 26, // Reserved for trap handler.
K1 = 27,
@@ -60,6 +59,8 @@
SP = 29, // Stack pointer.
S8 = 30, // Saved value/frame pointer.
RA = 31, // Return address.
+ TR = S1, // ART Thread Register
+ TMP = T8, // scratch register (in addition to AT)
kNumberOfGpuRegisters = 32,
kNoGpuRegister = -1 // Signals an illegal register.
};
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index f10799c..c3a5ce3 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -774,22 +774,6 @@
return result;
}
- // Method with 32b arg0, 64b arg1
- size_t Invoke3UWithReferrer(size_t arg0, uint64_t arg1, uintptr_t code, Thread* self,
- ArtMethod* referrer) {
-#if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \
- defined(__aarch64__)
- // Just pass through.
- return Invoke3WithReferrer(arg0, arg1, 0U, code, self, referrer);
-#else
- // Need to split up arguments.
- uint32_t lower = static_cast<uint32_t>(arg1 & 0xFFFFFFFF);
- uint32_t upper = static_cast<uint32_t>((arg1 >> 32) & 0xFFFFFFFF);
-
- return Invoke3WithReferrer(arg0, lower, upper, code, self, referrer);
-#endif
- }
-
static uintptr_t GetEntrypoint(Thread* self, QuickEntrypointEnum entrypoint) {
int32_t offset;
#ifdef __LP64__
@@ -1974,21 +1958,23 @@
}
-// TODO: Complete these tests for 32b architectures.
+// TODO: Complete these tests for 32b architectures
static void GetSet64Static(ArtField* f, Thread* self, ArtMethod* referrer,
StubTest* test)
SHARED_REQUIRES(Locks::mutator_lock_) {
-#if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \
- defined(__aarch64__)
+#if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) \
+ || defined(__aarch64__)
uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
for (size_t i = 0; i < arraysize(values); ++i) {
- test->Invoke3UWithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
- values[i],
- StubTest::GetEntrypoint(self, kQuickSet64Static),
- self,
- referrer);
+ // 64 bit FieldSet stores the set value in the second register.
+ test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
+ 0U,
+ values[i],
+ StubTest::GetEntrypoint(self, kQuickSet64Static),
+ self,
+ referrer);
size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
0U, 0U,
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index f3b15c9..3afc4d5 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1434,15 +1434,18 @@
// Call artSet64StaticFromCode with 3 word size arguments plus with the referrer in the 2nd position
// so that new_val is aligned on even registers were we passing arguments in registers.
DEFINE_FUNCTION art_quick_set64_static
+ // TODO: Implement SETUP_GOT_NOSAVE for got_reg = ecx to avoid moving around the registers.
+ movd %ebx, %xmm0
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
- mov FRAME_SIZE_REFS_ONLY_CALLEE_SAVE(%esp), %ebx // get referrer
+ movd %xmm0, %ebx
+ mov FRAME_SIZE_REFS_ONLY_CALLEE_SAVE(%esp), %ecx // get referrer
subl LITERAL(12), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(12)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- PUSH edx // pass high half of new_val
- PUSH ecx // pass low half of new_val
- PUSH ebx // pass referrer
+ PUSH ebx // pass high half of new_val
+ PUSH edx // pass low half of new_val
+ PUSH ecx // pass referrer
PUSH eax // pass field_idx
call SYMBOL(artSet64StaticFromCode) // (field_idx, referrer, new_val, Thread*)
addl LITERAL(32), %esp // pop arguments
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 2f438a3..1133203 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1383,7 +1383,7 @@
// This is singled out as the argument order is different.
DEFINE_FUNCTION art_quick_set64_static
- movq %rsi, %rdx // pass new_val
+ // new_val is already in %rdx
movq 8(%rsp), %rsi // pass referrer
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
// field_idx is in rdi
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index d6b2b7e..632a50f 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -35,6 +35,8 @@
#include "quick/quick_method_frame_info.h"
#include "read_barrier-inl.h"
#include "runtime-inl.h"
+#include "scoped_thread_state_change.h"
+#include "thread-inl.h"
#include "utils.h"
namespace art {
@@ -75,9 +77,28 @@
expected_root, desired_root);
}
+// AssertSharedHeld doesn't work in GetAccessFlags, so use a NO_THREAD_SAFETY_ANALYSIS helper.
+// TODO: Figure out why ASSERT_SHARED_CAPABILITY doesn't work.
+ALWAYS_INLINE
+static inline void DoGetAccessFlagsHelper(ArtMethod* method) NO_THREAD_SAFETY_ANALYSIS {
+ CHECK(method->IsRuntimeMethod() || method->GetDeclaringClass()->IsIdxLoaded() ||
+ method->GetDeclaringClass()->IsErroneous());
+}
+
inline uint32_t ArtMethod::GetAccessFlags() {
- DCHECK(IsRuntimeMethod() || GetDeclaringClass()->IsIdxLoaded() ||
- GetDeclaringClass()->IsErroneous());
+ if (kIsDebugBuild) {
+ Thread* self = Thread::Current();
+ if (!Locks::mutator_lock_->IsSharedHeld(self)) {
+ ScopedObjectAccess soa(self);
+ CHECK(IsRuntimeMethod() || GetDeclaringClass()->IsIdxLoaded() ||
+ GetDeclaringClass()->IsErroneous());
+ } else {
+ // We cannot use SOA in this case. We might be holding the lock, but may not be in the
+ // runnable state (e.g., during GC).
+ Locks::mutator_lock_->AssertSharedHeld(self);
+ DoGetAccessFlagsHelper(this);
+ }
+ }
return access_flags_;
}
diff --git a/runtime/art_method.h b/runtime/art_method.h
index f78c827..0315c3a 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -75,7 +75,9 @@
return MemberOffset(OFFSETOF_MEMBER(ArtMethod, declaring_class_));
}
- ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_);
+ // Note: GetAccessFlags acquires the mutator lock in debug mode to check that it is not called for
+ // a proxy method.
+ ALWAYS_INLINE uint32_t GetAccessFlags();
void SetAccessFlags(uint32_t new_access_flags) {
// Not called within a transaction.
@@ -86,77 +88,78 @@
InvokeType GetInvokeType() SHARED_REQUIRES(Locks::mutator_lock_);
// Returns true if the method is declared public.
- bool IsPublic() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPublic() {
return (GetAccessFlags() & kAccPublic) != 0;
}
// Returns true if the method is declared private.
- bool IsPrivate() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPrivate() {
return (GetAccessFlags() & kAccPrivate) != 0;
}
// Returns true if the method is declared static.
- bool IsStatic() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsStatic() {
return (GetAccessFlags() & kAccStatic) != 0;
}
// Returns true if the method is a constructor.
- bool IsConstructor() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsConstructor() {
return (GetAccessFlags() & kAccConstructor) != 0;
}
// Returns true if the method is a class initializer.
- bool IsClassInitializer() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsClassInitializer() {
return IsConstructor() && IsStatic();
}
// Returns true if the method is static, private, or a constructor.
- bool IsDirect() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsDirect() {
return IsDirect(GetAccessFlags());
}
static bool IsDirect(uint32_t access_flags) {
- return (access_flags & (kAccStatic | kAccPrivate | kAccConstructor)) != 0;
+ constexpr uint32_t direct = kAccStatic | kAccPrivate | kAccConstructor;
+ return (access_flags & direct) != 0;
}
// Returns true if the method is declared synchronized.
- bool IsSynchronized() SHARED_REQUIRES(Locks::mutator_lock_) {
- uint32_t synchonized = kAccSynchronized | kAccDeclaredSynchronized;
+ bool IsSynchronized() {
+ constexpr uint32_t synchonized = kAccSynchronized | kAccDeclaredSynchronized;
return (GetAccessFlags() & synchonized) != 0;
}
- bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsFinal() {
return (GetAccessFlags() & kAccFinal) != 0;
}
- bool IsMiranda() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsMiranda() {
return (GetAccessFlags() & kAccMiranda) != 0;
}
- bool IsNative() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsNative() {
return (GetAccessFlags() & kAccNative) != 0;
}
- bool IsFastNative() SHARED_REQUIRES(Locks::mutator_lock_) {
- uint32_t mask = kAccFastNative | kAccNative;
+ bool IsFastNative() {
+ constexpr uint32_t mask = kAccFastNative | kAccNative;
return (GetAccessFlags() & mask) == mask;
}
- bool IsAbstract() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsAbstract() {
return (GetAccessFlags() & kAccAbstract) != 0;
}
- bool IsSynthetic() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsSynthetic() {
return (GetAccessFlags() & kAccSynthetic) != 0;
}
bool IsProxyMethod() SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsPreverified() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPreverified() {
return (GetAccessFlags() & kAccPreverified) != 0;
}
- void SetPreverified() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetPreverified() {
DCHECK(!IsPreverified());
SetAccessFlags(GetAccessFlags() | kAccPreverified);
}
@@ -404,7 +407,7 @@
return GetNativePointer<void*>(EntryPointFromJniOffset(pointer_size), pointer_size);
}
- void SetEntryPointFromJni(const void* entrypoint) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetEntryPointFromJni(const void* entrypoint) {
DCHECK(IsNative());
SetEntryPointFromJniPtrSize(entrypoint, sizeof(void*));
}
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index b6ad547..beabce3 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -2463,6 +2463,9 @@
ScopedCheck sc(kFlag_Default, __FUNCTION__);
JniValueType args[2] = {{.E = env}, {.L = obj}};
if (sc.Check(soa, true, "EL", args)) {
+ if (obj != nullptr) {
+ down_cast<JNIEnvExt*>(env)->RecordMonitorEnter(obj);
+ }
JniValueType result;
result.i = baseEnv(env)->MonitorEnter(env, obj);
if (sc.Check(soa, false, "i", &result)) {
@@ -2477,6 +2480,9 @@
ScopedCheck sc(kFlag_ExcepOkay, __FUNCTION__);
JniValueType args[2] = {{.E = env}, {.L = obj}};
if (sc.Check(soa, true, "EL", args)) {
+ if (obj != nullptr) {
+ down_cast<JNIEnvExt*>(env)->CheckMonitorRelease(obj);
+ }
JniValueType result;
result.i = baseEnv(env)->MonitorExit(env, obj);
if (sc.Check(soa, false, "i", &result)) {
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index dbc5cec..b0590e2 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2650,10 +2650,8 @@
const DexFile& dex_file,
bool allow_failure) {
// Search assuming unique-ness of dex file.
- JavaVMExt* const vm = self->GetJniEnv()->vm;
for (jweak weak_root : dex_caches_) {
- mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(
- vm->DecodeWeakGlobal(self, weak_root));
+ mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(self->DecodeJObject(weak_root));
if (dex_cache != nullptr && dex_cache->GetDexFile() == &dex_file) {
return dex_cache;
}
@@ -6202,10 +6200,9 @@
void ClassLinker::VisitClassLoaders(ClassLoaderVisitor* visitor) const {
Thread* const self = Thread::Current();
- JavaVMExt* const vm = self->GetJniEnv()->vm;
for (const ClassLoaderData& data : class_loaders_) {
- auto* const class_loader = down_cast<mirror::ClassLoader*>(
- vm->DecodeWeakGlobal(self, data.weak_root));
+ // Need to use DecodeJObject so that we get null for cleared JNI weak globals.
+ auto* const class_loader = down_cast<mirror::ClassLoader*>(self->DecodeJObject(data.weak_root));
if (class_loader != nullptr) {
visitor->Visit(class_loader);
}
@@ -6218,8 +6215,8 @@
JavaVMExt* const vm = Runtime::Current()->GetJavaVM();
for (auto it = class_loaders_.begin(); it != class_loaders_.end(); ) {
const ClassLoaderData& data = *it;
- auto* const class_loader = down_cast<mirror::ClassLoader*>(
- vm->DecodeWeakGlobal(self, data.weak_root));
+ // Need to use DecodeJObject so that we get null for cleared JNI weak globals.
+ auto* const class_loader = down_cast<mirror::ClassLoader*>(self->DecodeJObject(data.weak_root));
if (class_loader != nullptr) {
++it;
} else {
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 739403f..7f3e938 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -883,6 +883,7 @@
friend class ImageWriter; // for GetClassRoots
friend class ImageDumper; // for FindOpenedOatFileFromOatLocation
friend class JniCompilerTest; // for GetRuntimeQuickGenericJniStub
+ friend class JniInternalTest; // for GetRuntimeQuickGenericJniStub
ART_FRIEND_TEST(mirror::DexCacheTest, Open); // for AllocDexCache
DISALLOW_COPY_AND_ASSIGN(ClassLinker);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 450031a..d24b4fb 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -579,15 +579,11 @@
Runtime* runtime = Runtime::Current();
ScopedSuspendAll ssa(__FUNCTION__);
- Thread* self = Thread::Current();
- ThreadState old_state = self->SetStateUnsafe(kRunnable);
- CHECK_NE(old_state, kRunnable);
if (RequiresDeoptimization()) {
runtime->GetInstrumentation()->EnableDeoptimization();
}
instrumentation_events_ = 0;
gDebuggerActive = true;
- CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
LOG(INFO) << "Debugger is active";
}
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 8ae0b07..f66628d 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -411,12 +411,6 @@
ThrowNullPointerExceptionForMethodAccess(method_idx, type);
return nullptr; // Failure.
} else if (access_check) {
- // Incompatible class change should have been handled in resolve method.
- if (UNLIKELY(resolved_method->CheckIncompatibleClassChange(type))) {
- ThrowIncompatibleClassChangeError(type, resolved_method->GetInvokeType(), resolved_method,
- referrer);
- return nullptr; // Failure.
- }
mirror::Class* methods_class = resolved_method->GetDeclaringClass();
mirror::Class* referring_class = referrer->GetDeclaringClass();
bool can_access_resolved_method =
@@ -426,6 +420,12 @@
DCHECK(self->IsExceptionPending()); // Throw exception and unwind.
return nullptr; // Failure.
}
+ // Incompatible class change should have been handled in resolve method.
+ if (UNLIKELY(resolved_method->CheckIncompatibleClassChange(type))) {
+ ThrowIncompatibleClassChangeError(type, resolved_method->GetInvokeType(), resolved_method,
+ referrer);
+ return nullptr; // Failure.
+ }
}
switch (type) {
case kStatic:
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index fc5c52e..58f256a 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -65,6 +65,9 @@
static void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
JNIEnvExt* env = self->GetJniEnv();
+ if (UNLIKELY(env->check_jni)) {
+ env->CheckNoHeldMonitors();
+ }
env->locals.SetSegmentState(env->local_ref_cookie);
env->local_ref_cookie = saved_local_ref_cookie;
self->PopHandleScope();
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 468179c..0a7a69f 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -620,7 +620,10 @@
gc_mark_stack_->PushBack(to_ref);
} else {
CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
- static_cast<uint32_t>(kMarkStackModeGcExclusive));
+ static_cast<uint32_t>(kMarkStackModeGcExclusive))
+ << "ref=" << to_ref
+ << " self->gc_marking=" << self->GetIsGcMarking()
+ << " cc->is_marking=" << is_marking_;
CHECK(self == thread_running_gc_)
<< "Only GC-running thread should access the mark stack "
<< "in the GC exclusive mark stack mode";
diff --git a/runtime/globals.h b/runtime/globals.h
index d70f3ab..987a94e 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -58,12 +58,6 @@
static constexpr bool kIsTargetBuild = false;
#endif
-#if defined(ART_USE_OPTIMIZING_COMPILER)
-static constexpr bool kUseOptimizingCompiler = true;
-#else
-static constexpr bool kUseOptimizingCompiler = false;
-#endif
-
// Garbage collector constants.
static constexpr bool kMovingCollector = true;
static constexpr bool kMarkCompactSupport = false && kMovingCollector;
diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc
index e9c16c1..9b9c5d2 100644
--- a/runtime/jit/jit_instrumentation.cc
+++ b/runtime/jit/jit_instrumentation.cc
@@ -111,6 +111,9 @@
DCHECK(this_object != nullptr);
ProfilingInfo* info = caller->GetProfilingInfo(sizeof(void*));
if (info != nullptr) {
+ // Since the instrumentation is marked from the declaring class we need to mark the card so
+ // that mod-union tables and card rescanning know about the update.
+ Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(caller->GetDeclaringClass());
info->AddInvokeInfo(thread, dex_pc, this_object->GetClass());
}
}
diff --git a/runtime/jni_env_ext.cc b/runtime/jni_env_ext.cc
index b18b430..4104d7a 100644
--- a/runtime/jni_env_ext.cc
+++ b/runtime/jni_env_ext.cc
@@ -16,10 +16,17 @@
#include "jni_env_ext.h"
+#include <algorithm>
+#include <vector>
+
#include "check_jni.h"
#include "indirect_reference_table.h"
#include "java_vm_ext.h"
#include "jni_internal.h"
+#include "lock_word.h"
+#include "mirror/object-inl.h"
+#include "nth_caller_visitor.h"
+#include "thread-inl.h"
namespace art {
@@ -63,14 +70,14 @@
JNIEnvExt::~JNIEnvExt() {
}
-jobject JNIEnvExt::NewLocalRef(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
+jobject JNIEnvExt::NewLocalRef(mirror::Object* obj) {
if (obj == nullptr) {
return nullptr;
}
return reinterpret_cast<jobject>(locals.Add(local_ref_cookie, obj));
}
-void JNIEnvExt::DeleteLocalRef(jobject obj) SHARED_REQUIRES(Locks::mutator_lock_) {
+void JNIEnvExt::DeleteLocalRef(jobject obj) {
if (obj != nullptr) {
locals.Remove(local_ref_cookie, reinterpret_cast<IndirectRef>(obj));
}
@@ -86,14 +93,14 @@
monitors.Dump(os);
}
-void JNIEnvExt::PushFrame(int capacity) SHARED_REQUIRES(Locks::mutator_lock_) {
+void JNIEnvExt::PushFrame(int capacity) {
UNUSED(capacity); // cpplint gets confused with (int) and thinks its a cast.
// TODO: take 'capacity' into account.
stacked_local_ref_cookies.push_back(local_ref_cookie);
local_ref_cookie = locals.GetSegmentState();
}
-void JNIEnvExt::PopFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+void JNIEnvExt::PopFrame() {
locals.SetSegmentState(local_ref_cookie);
local_ref_cookie = stacked_local_ref_cookies.back();
stacked_local_ref_cookies.pop_back();
@@ -104,4 +111,118 @@
IndirectReferenceTable::SegmentStateOffset().Int32Value());
}
+// Use some defining part of the caller's frame as the identifying mark for the JNI segment.
+static uintptr_t GetJavaCallFrame(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
+ NthCallerVisitor zeroth_caller(self, 0, false);
+ zeroth_caller.WalkStack();
+ if (zeroth_caller.caller == nullptr) {
+ // No Java code, must be from pure native code.
+ return 0;
+ } else if (zeroth_caller.GetCurrentQuickFrame() == nullptr) {
+ // Shadow frame = interpreter. Use the actual shadow frame's address.
+ DCHECK(zeroth_caller.GetCurrentShadowFrame() != nullptr);
+ return reinterpret_cast<uintptr_t>(zeroth_caller.GetCurrentShadowFrame());
+ } else {
+ // Quick frame = compiled code. Use the bottom of the frame.
+ return reinterpret_cast<uintptr_t>(zeroth_caller.GetCurrentQuickFrame());
+ }
+}
+
+void JNIEnvExt::RecordMonitorEnter(jobject obj) {
+ locked_objects_.push_back(std::make_pair(GetJavaCallFrame(self), obj));
+}
+
+static std::string ComputeMonitorDescription(Thread* self,
+ jobject obj) SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Object* o = self->DecodeJObject(obj);
+ if ((o->GetLockWord(false).GetState() == LockWord::kThinLocked) &&
+ Locks::mutator_lock_->IsExclusiveHeld(self)) {
+ // Getting the identity hashcode here would result in lock inflation and suspension of the
+ // current thread, which isn't safe if this is the only runnable thread.
+ return StringPrintf("<@addr=0x%" PRIxPTR "> (a %s)",
+ reinterpret_cast<intptr_t>(o),
+ PrettyTypeOf(o).c_str());
+ } else {
+ // IdentityHashCode can cause thread suspension, which would invalidate o if it moved. So
+ // we get the pretty type before we call IdentityHashCode.
+ const std::string pretty_type(PrettyTypeOf(o));
+ return StringPrintf("<0x%08x> (a %s)", o->IdentityHashCode(), pretty_type.c_str());
+ }
+}
+
+static void RemoveMonitors(Thread* self,
+ uintptr_t frame,
+ ReferenceTable* monitors,
+ std::vector<std::pair<uintptr_t, jobject>>* locked_objects)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ auto kept_end = std::remove_if(
+ locked_objects->begin(),
+ locked_objects->end(),
+ [self, frame, monitors](const std::pair<uintptr_t, jobject>& pair)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (frame == pair.first) {
+ mirror::Object* o = self->DecodeJObject(pair.second);
+ monitors->Remove(o);
+ return true;
+ }
+ return false;
+ });
+ locked_objects->erase(kept_end, locked_objects->end());
+}
+
+void JNIEnvExt::CheckMonitorRelease(jobject obj) {
+ uintptr_t current_frame = GetJavaCallFrame(self);
+ std::pair<uintptr_t, jobject> exact_pair = std::make_pair(current_frame, obj);
+ auto it = std::find(locked_objects_.begin(), locked_objects_.end(), exact_pair);
+ bool will_abort = false;
+ if (it != locked_objects_.end()) {
+ locked_objects_.erase(it);
+ } else {
+ // Check whether this monitor was locked in another JNI "session."
+ mirror::Object* mirror_obj = self->DecodeJObject(obj);
+ for (std::pair<uintptr_t, jobject>& pair : locked_objects_) {
+ if (self->DecodeJObject(pair.second) == mirror_obj) {
+ std::string monitor_descr = ComputeMonitorDescription(self, pair.second);
+ vm->JniAbortF("<JNI MonitorExit>",
+ "Unlocking monitor that wasn't locked here: %s",
+ monitor_descr.c_str());
+ will_abort = true;
+ break;
+ }
+ }
+ }
+
+ // When we abort, also make sure that any locks from the current "session" are removed from
+ // the monitors table, otherwise we may visit local objects in GC during abort (which won't be
+ // valid anymore).
+ if (will_abort) {
+ RemoveMonitors(self, current_frame, &monitors, &locked_objects_);
+ }
+}
+
+void JNIEnvExt::CheckNoHeldMonitors() {
+ uintptr_t current_frame = GetJavaCallFrame(self);
+ // The locked_objects_ are grouped by their stack frame component, as this enforces structured
+ // locking, and the groups form a stack. So the current frame entries are at the end. Check
+ // whether the vector is empty, and when there are elements, whether the last element belongs
+ // to this call - this signals that there are unlocked monitors.
+ if (!locked_objects_.empty()) {
+ std::pair<uintptr_t, jobject>& pair = locked_objects_[locked_objects_.size() - 1];
+ if (pair.first == current_frame) {
+ std::string monitor_descr = ComputeMonitorDescription(self, pair.second);
+ vm->JniAbortF("<JNI End>",
+ "Still holding a locked object on JNI end: %s",
+ monitor_descr.c_str());
+ // When we abort, also make sure that any locks from the current "session" are removed from
+ // the monitors table, otherwise we may visit local objects in GC during abort.
+ RemoveMonitors(self, current_frame, &monitors, &locked_objects_);
+ } else if (kIsDebugBuild) {
+ // Make sure there are really no other entries and our checking worked as expected.
+ for (std::pair<uintptr_t, jobject>& check_pair : locked_objects_) {
+ CHECK_NE(check_pair.first, current_frame);
+ }
+ }
+ }
+}
+
} // namespace art
diff --git a/runtime/jni_env_ext.h b/runtime/jni_env_ext.h
index 9b55536..3828ff0 100644
--- a/runtime/jni_env_ext.h
+++ b/runtime/jni_env_ext.h
@@ -43,8 +43,8 @@
void SetCheckJniEnabled(bool enabled);
- void PushFrame(int capacity);
- void PopFrame();
+ void PushFrame(int capacity) SHARED_REQUIRES(Locks::mutator_lock_);
+ void PopFrame() SHARED_REQUIRES(Locks::mutator_lock_);
template<typename T>
T AddLocalReference(mirror::Object* obj)
@@ -89,10 +89,27 @@
// Used by -Xcheck:jni.
const JNINativeInterface* unchecked_functions;
+ // Functions to keep track of monitor lock and unlock operations. Used to ensure proper locking
+ // rules in CheckJNI mode.
+
+ // Record locking of a monitor.
+ void RecordMonitorEnter(jobject obj) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Check the release, that is, that the release is performed in the same JNI "segment."
+ void CheckMonitorRelease(jobject obj) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Check that no monitors are held that have been acquired in this JNI "segment."
+ void CheckNoHeldMonitors() SHARED_REQUIRES(Locks::mutator_lock_);
+
private:
// The constructor should not be called directly. It may leave the object in an erronuous state,
// and the result needs to be checked.
JNIEnvExt(Thread* self, JavaVMExt* vm);
+
+ // All locked objects, with the (Java caller) stack frame that locked them. Used in CheckJNI
+ // to ensure that only monitors locked in this native frame are being unlocked, and that at
+ // the end all are unlocked.
+ std::vector<std::pair<uintptr_t, jobject>> locked_objects_;
};
// Used to save and restore the JNIEnvExt state when not going through code created by the JNI
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 2a0cb28..41b368e 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -607,11 +607,64 @@
EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
}
+ void SetUpForTest(bool direct, const char* method_name, const char* method_sig,
+ void* native_fnptr) {
+ // Initialize class loader and set generic JNI entrypoint.
+ // Note: this code is adapted from the jni_compiler_test, and taken with minimal modifications.
+ if (!runtime_->IsStarted()) {
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ class_loader_ = LoadDex("MyClassNatives");
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader_)));
+ mirror::Class* c = class_linker_->FindClass(soa.Self(), "LMyClassNatives;", loader);
+ const auto pointer_size = class_linker_->GetImagePointerSize();
+ ArtMethod* method = direct ? c->FindDirectMethod(method_name, method_sig, pointer_size) :
+ c->FindVirtualMethod(method_name, method_sig, pointer_size);
+ ASSERT_TRUE(method != nullptr) << method_name << " " << method_sig;
+ method->SetEntryPointFromQuickCompiledCode(class_linker_->GetRuntimeQuickGenericJniStub());
+ }
+ // Start runtime.
+ Thread::Current()->TransitionFromSuspendedToRunnable();
+ bool started = runtime_->Start();
+ CHECK(started);
+ }
+ // JNI operations after runtime start.
+ env_ = Thread::Current()->GetJniEnv();
+ jklass_ = env_->FindClass("MyClassNatives");
+ ASSERT_TRUE(jklass_ != nullptr) << method_name << " " << method_sig;
+
+ if (direct) {
+ jmethod_ = env_->GetStaticMethodID(jklass_, method_name, method_sig);
+ } else {
+ jmethod_ = env_->GetMethodID(jklass_, method_name, method_sig);
+ }
+ ASSERT_TRUE(jmethod_ != nullptr) << method_name << " " << method_sig;
+
+ if (native_fnptr != nullptr) {
+ JNINativeMethod methods[] = { { method_name, method_sig, native_fnptr } };
+ ASSERT_EQ(JNI_OK, env_->RegisterNatives(jklass_, methods, 1))
+ << method_name << " " << method_sig;
+ } else {
+ env_->UnregisterNatives(jklass_);
+ }
+
+ jmethodID constructor = env_->GetMethodID(jklass_, "<init>", "()V");
+ jobj_ = env_->NewObject(jklass_, constructor);
+ ASSERT_TRUE(jobj_ != nullptr) << method_name << " " << method_sig;
+ }
+
JavaVMExt* vm_;
JNIEnv* env_;
jclass aioobe_;
jclass ase_;
jclass sioobe_;
+
+ jclass jklass_;
+ jobject jobj_;
+ jobject class_loader_;
+ jmethodID jmethod_;
};
TEST_F(JniInternalTest, AllocObject) {
@@ -2111,4 +2164,38 @@
}
}
+void Java_MyClassNatives_foo_exit(JNIEnv* env, jobject thisObj) {
+ // Release the monitor on self. This should trigger an abort.
+ env->MonitorExit(thisObj);
+}
+
+TEST_F(JniInternalTest, MonitorExitLockedInDifferentCall) {
+ SetUpForTest(false, "foo", "()V", reinterpret_cast<void*>(&Java_MyClassNatives_foo_exit));
+ ASSERT_NE(jobj_, nullptr);
+
+ env_->MonitorEnter(jobj_);
+ EXPECT_FALSE(env_->ExceptionCheck());
+
+ CheckJniAbortCatcher check_jni_abort_catcher;
+ env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_);
+ check_jni_abort_catcher.Check("Unlocking monitor that wasn't locked here");
+}
+
+void Java_MyClassNatives_foo_enter_no_exit(JNIEnv* env, jobject thisObj) {
+ // Acquire but don't release the monitor on self. This should trigger an abort on return.
+ env->MonitorEnter(thisObj);
+}
+
+TEST_F(JniInternalTest, MonitorExitNotAllUnlocked) {
+ SetUpForTest(false,
+ "foo",
+ "()V",
+ reinterpret_cast<void*>(&Java_MyClassNatives_foo_enter_no_exit));
+ ASSERT_NE(jobj_, nullptr);
+
+ CheckJniAbortCatcher check_jni_abort_catcher;
+ env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_);
+ check_jni_abort_catcher.Check("Still holding a locked object on JNI end");
+}
+
} // namespace art
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 8fd6849..5da15df 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -229,6 +229,65 @@
return nullptr;
}
+static mirror::Field* GetPublicFieldRecursive(
+ Thread* self, mirror::Class* clazz, mirror::String* name)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(clazz != nullptr);
+ DCHECK(name != nullptr);
+ DCHECK(self != nullptr);
+
+ StackHandleScope<1> hs(self);
+ MutableHandle<mirror::Class> h_clazz(hs.NewHandle(clazz));
+
+ // We search the current class, its direct interfaces then its superclass.
+ while (h_clazz.Get() != nullptr) {
+ mirror::Field* result = GetDeclaredField(self, h_clazz.Get(), name);
+ if ((result != nullptr) && (result->GetAccessFlags() & kAccPublic)) {
+ return result;
+ } else if (UNLIKELY(self->IsExceptionPending())) {
+ // Something went wrong. Bail out.
+ return nullptr;
+ }
+
+ uint32_t num_direct_interfaces = h_clazz->NumDirectInterfaces();
+ for (uint32_t i = 0; i < num_direct_interfaces; i++) {
+ mirror::Class *iface = mirror::Class::GetDirectInterface(self, h_clazz, i);
+ if (UNLIKELY(iface == nullptr)) {
+ self->AssertPendingException();
+ return nullptr;
+ }
+ result = GetPublicFieldRecursive(self, iface, name);
+ if (result != nullptr) {
+ DCHECK(result->GetAccessFlags() & kAccPublic);
+ return result;
+ } else if (UNLIKELY(self->IsExceptionPending())) {
+ // Something went wrong. Bail out.
+ return nullptr;
+ }
+ }
+
+ // We don't try the superclass if we are an interface.
+ if (h_clazz->IsInterface()) {
+ break;
+ }
+
+ // Get the next class.
+ h_clazz.Assign(h_clazz->GetSuperClass());
+ }
+ return nullptr;
+}
+
+static jobject Class_getPublicFieldRecursive(JNIEnv* env, jobject javaThis, jstring name) {
+ ScopedFastNativeObjectAccess soa(env);
+ auto* name_string = soa.Decode<mirror::String*>(name);
+ if (UNLIKELY(name_string == nullptr)) {
+ ThrowNullPointerException("name == null");
+ return nullptr;
+ }
+ return soa.AddLocalReference<jobject>(
+ GetPublicFieldRecursive(soa.Self(), DecodeClass(soa, javaThis), name_string));
+}
+
static jobject Class_getDeclaredFieldInternal(JNIEnv* env, jobject javaThis, jstring name) {
ScopedFastNativeObjectAccess soa(env);
auto* name_string = soa.Decode<mirror::String*>(name);
@@ -678,6 +737,7 @@
"!([Ljava/lang/Class;)Ljava/lang/reflect/Constructor;"),
NATIVE_METHOD(Class, getDeclaredConstructorsInternal, "!(Z)[Ljava/lang/reflect/Constructor;"),
NATIVE_METHOD(Class, getDeclaredField, "!(Ljava/lang/String;)Ljava/lang/reflect/Field;"),
+ NATIVE_METHOD(Class, getPublicFieldRecursive, "!(Ljava/lang/String;)Ljava/lang/reflect/Field;"),
NATIVE_METHOD(Class, getDeclaredFieldInternal, "!(Ljava/lang/String;)Ljava/lang/reflect/Field;"),
NATIVE_METHOD(Class, getDeclaredFields, "!()[Ljava/lang/reflect/Field;"),
NATIVE_METHOD(Class, getDeclaredFieldsUnchecked, "!(Z)[Ljava/lang/reflect/Field;"),
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index daae401..85ac4aa 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -62,8 +62,10 @@
if (heap != nullptr && heap->GetReadBarrierTable()->IsSet(old_ref)) {
ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
// Update the field atomically. This may fail if mutator updates before us, but it's ok.
- obj->CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier<false, false>(
- offset, old_ref, ref);
+ if (ref != old_ref) {
+ obj->CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier<false, false>(
+ offset, old_ref, ref);
+ }
}
AssertToSpaceInvariant(obj, offset, ref);
return ref;
@@ -90,17 +92,17 @@
// To be implemented.
return ref;
} else if (with_read_barrier && kUseTableLookupReadBarrier) {
- if (kMaybeDuringStartup && IsDuringStartup()) {
- // During startup, the heap may not be initialized yet. Just
- // return the given ref.
- return ref;
- }
- if (Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
+ Thread* self = Thread::Current();
+ if (self != nullptr &&
+ self->GetIsGcMarking() &&
+ Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
MirrorType* old_ref = ref;
ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
// Update the field atomically. This may fail if mutator updates before us, but it's ok.
- Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root);
- atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, ref);
+ if (ref != old_ref) {
+ Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root);
+ atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, ref);
+ }
}
AssertToSpaceInvariant(gc_root_source, ref);
return ref;
@@ -127,19 +129,19 @@
// To be implemented.
return ref;
} else if (with_read_barrier && kUseTableLookupReadBarrier) {
- if (kMaybeDuringStartup && IsDuringStartup()) {
- // During startup, the heap may not be initialized yet. Just
- // return the given ref.
- return ref;
- }
- if (Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
+ Thread* self = Thread::Current();
+ if (self != nullptr &&
+ self->GetIsGcMarking() &&
+ Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
auto old_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
ref = reinterpret_cast<MirrorType*>(Mark(ref));
auto new_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
// Update the field atomically. This may fail if mutator updates before us, but it's ok.
- auto* atomic_root =
- reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
- atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, new_ref);
+ if (new_ref.AsMirrorPtr() != old_ref.AsMirrorPtr()) {
+ auto* atomic_root =
+ reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
+ atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, new_ref);
+ }
}
AssertToSpaceInvariant(gc_root_source, ref);
return ref;
diff --git a/runtime/read_barrier_c.h b/runtime/read_barrier_c.h
index 710c21f..8e5b187 100644
--- a/runtime/read_barrier_c.h
+++ b/runtime/read_barrier_c.h
@@ -26,10 +26,16 @@
// table-lookup read barriers.
#ifdef ART_USE_READ_BARRIER
+#if ART_READ_BARRIER_TYPE_IS_BAKER
#define USE_BAKER_READ_BARRIER
-// #define USE_BROOKS_READ_BARRIER
-// #define USE_TABLE_LOOKUP_READ_BARRIER
+#elif ART_READ_BARRIER_TYPE_IS_BROOKS
+#define USE_BROOKS_READ_BARRIER
+#elif ART_READ_BARRIER_TYPE_IS_TABLELOOKUP
+#define USE_TABLE_LOOKUP_READ_BARRIER
+#else
+#error "ART read barrier type must be set"
#endif
+#endif // ART_USE_READ_BARRIER
#ifdef ART_HEAP_POISONING
#define USE_HEAP_POISONING
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 7f72f8a..1d21a64 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -110,7 +110,7 @@
}
InlineInfo StackVisitor::GetCurrentInlineInfo() const {
- ArtMethod* outer_method = *GetCurrentQuickFrame();
+ ArtMethod* outer_method = GetOuterMethod();
uint32_t native_pc_offset = outer_method->NativeQuickPcOffset(cur_quick_frame_pc_);
CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
@@ -194,11 +194,12 @@
}
bool StackVisitor::IsReferenceVReg(ArtMethod* m, uint16_t vreg) {
+ DCHECK_EQ(m, GetMethod());
// Process register map (which native and runtime methods don't have)
if (m->IsNative() || m->IsRuntimeMethod() || m->IsProxyMethod()) {
return false;
}
- if (m->IsOptimized(sizeof(void*))) {
+ if (GetOuterMethod()->IsOptimized(sizeof(void*))) {
return true; // TODO: Implement.
}
const uint8_t* native_gc_map = m->GetNativeGcMap(sizeof(void*));
@@ -251,7 +252,7 @@
if (GetVRegFromDebuggerShadowFrame(vreg, kind, val)) {
return true;
}
- if (m->IsOptimized(sizeof(void*))) {
+ if (GetOuterMethod()->IsOptimized(sizeof(void*))) {
return GetVRegFromOptimizedCode(m, vreg, kind, val);
} else {
return GetVRegFromQuickCode(m, vreg, kind, val);
@@ -288,15 +289,15 @@
bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const {
+ ArtMethod* outer_method = GetOuterMethod();
+ const void* code_pointer = outer_method->GetQuickOatCodePointer(sizeof(void*));
+ DCHECK(code_pointer != nullptr);
DCHECK_EQ(m, GetMethod());
const DexFile::CodeItem* code_item = m->GetCodeItem();
DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
// its instructions?
uint16_t number_of_dex_registers = code_item->registers_size_;
DCHECK_LT(vreg, code_item->registers_size_);
- ArtMethod* outer_method = *GetCurrentQuickFrame();
- const void* code_pointer = outer_method->GetQuickOatCodePointer(sizeof(void*));
- DCHECK(code_pointer != nullptr);
CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
@@ -405,7 +406,7 @@
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably read registers without a context.
DCHECK(m == GetMethod());
- if (m->IsOptimized(sizeof(void*))) {
+ if (GetOuterMethod()->IsOptimized(sizeof(void*))) {
return GetVRegPairFromOptimizedCode(m, vreg, kind_lo, kind_hi, val);
} else {
return GetVRegPairFromQuickCode(m, vreg, kind_lo, kind_hi, val);
@@ -481,7 +482,7 @@
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
- if (m->IsOptimized(sizeof(void*))) {
+ if (GetOuterMethod()->IsOptimized(sizeof(void*))) {
return false;
} else {
return SetVRegFromQuickCode(m, vreg, new_value, kind);
@@ -590,7 +591,7 @@
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
- if (m->IsOptimized(sizeof(void*))) {
+ if (GetOuterMethod()->IsOptimized(sizeof(void*))) {
return false;
} else {
return SetVRegPairFromQuickCode(m, vreg, new_value, kind_lo, kind_hi);
@@ -724,14 +725,14 @@
uintptr_t StackVisitor::GetReturnPc() const {
uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
DCHECK(sp != nullptr);
- uint8_t* pc_addr = sp + GetMethod()->GetReturnPcOffset().SizeValue();
+ uint8_t* pc_addr = sp + GetOuterMethod()->GetReturnPcOffset().SizeValue();
return *reinterpret_cast<uintptr_t*>(pc_addr);
}
void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
CHECK(sp != nullptr);
- uint8_t* pc_addr = sp + GetMethod()->GetReturnPcOffset().SizeValue();
+ uint8_t* pc_addr = sp + GetOuterMethod()->GetReturnPcOffset().SizeValue();
*reinterpret_cast<uintptr_t*>(pc_addr) = new_ret_pc;
}
diff --git a/runtime/stack.h b/runtime/stack.h
index 292c745..31acf0e 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -473,6 +473,10 @@
ArtMethod* GetMethod() const SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtMethod* GetOuterMethod() const {
+ return *GetCurrentQuickFrame();
+ }
+
bool IsShadowFrame() const {
return cur_shadow_frame_ != nullptr;
}
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 8bf241b..f5d20bd 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -118,11 +118,8 @@
}
}
-inline void Thread::TransitionFromRunnableToSuspended(ThreadState new_state) {
- AssertThreadSuspensionIsAllowable();
+inline void Thread::TransitionToSuspendedAndRunCheckpoints(ThreadState new_state) {
DCHECK_NE(new_state, kRunnable);
- DCHECK_EQ(this, Thread::Current());
- // Change to non-runnable state, thereby appearing suspended to the system.
DCHECK_EQ(GetState(), kRunnable);
union StateAndFlags old_state_and_flags;
union StateAndFlags new_state_and_flags;
@@ -145,12 +142,9 @@
break;
}
}
+}
- // Change to non-runnable state, thereby appearing suspended to the system.
- // Mark the release of the share of the mutator_lock_.
- Locks::mutator_lock_->TransitionFromRunnableToSuspended(this);
-
- // Once suspended - check the active suspend barrier flag
+inline void Thread::PassActiveSuspendBarriers() {
while (true) {
uint16_t current_flags = tls32_.state_and_flags.as_struct.flags;
if (LIKELY((current_flags & (kCheckpointRequest | kActiveSuspendBarrier)) == 0)) {
@@ -159,11 +153,22 @@
PassActiveSuspendBarriers(this);
} else {
// Impossible
- LOG(FATAL) << "Fatal, thread transited into suspended without running the checkpoint";
+ LOG(FATAL) << "Fatal, thread transitioned into suspended without running the checkpoint";
}
}
}
+inline void Thread::TransitionFromRunnableToSuspended(ThreadState new_state) {
+ AssertThreadSuspensionIsAllowable();
+ DCHECK_EQ(this, Thread::Current());
+ // Change to non-runnable state, thereby appearing suspended to the system.
+ TransitionToSuspendedAndRunCheckpoints(new_state);
+ // Mark the release of the share of the mutator_lock_.
+ Locks::mutator_lock_->TransitionFromRunnableToSuspended(this);
+ // Once suspended - check the active suspend barrier flag
+ PassActiveSuspendBarriers();
+}
+
inline ThreadState Thread::TransitionFromSuspendedToRunnable() {
union StateAndFlags old_state_and_flags;
old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
@@ -191,7 +196,9 @@
PassActiveSuspendBarriers(this);
} else if ((old_state_and_flags.as_struct.flags & kCheckpointRequest) != 0) {
// Impossible
- LOG(FATAL) << "Fatal, wrong checkpoint flag";
+ LOG(FATAL) << "Transitioning to runnable with checkpoint flag, "
+ << " flags=" << old_state_and_flags.as_struct.flags
+ << " state=" << old_state_and_flags.as_struct.state;
} else if ((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0) {
// Wait while our suspend count is non-zero.
MutexLock mu(this, *Locks::thread_suspend_count_lock_);
diff --git a/runtime/thread.h b/runtime/thread.h
index d21644d..d262c62 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -247,17 +247,15 @@
SHARED_REQUIRES(Locks::mutator_lock_);
// Transition from non-runnable to runnable state acquiring share on mutator_lock_.
- ThreadState TransitionFromSuspendedToRunnable()
+ ALWAYS_INLINE ThreadState TransitionFromSuspendedToRunnable()
REQUIRES(!Locks::thread_suspend_count_lock_)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
- ALWAYS_INLINE;
+ SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
// Transition from runnable into a state where mutator privileges are denied. Releases share of
// mutator lock.
- void TransitionFromRunnableToSuspended(ThreadState new_state)
+ ALWAYS_INLINE void TransitionFromRunnableToSuspended(ThreadState new_state)
REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
- UNLOCK_FUNCTION(Locks::mutator_lock_)
- ALWAYS_INLINE;
+ UNLOCK_FUNCTION(Locks::mutator_lock_);
// Once called thread suspension will cause an assertion failure.
const char* StartAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) {
@@ -1017,11 +1015,15 @@
// Dbg::Disconnected.
ThreadState SetStateUnsafe(ThreadState new_state) {
ThreadState old_state = GetState();
- tls32_.state_and_flags.as_struct.state = new_state;
- // if transit to a suspended state, check the pass barrier request.
- if (UNLIKELY((new_state != kRunnable) &&
- (tls32_.state_and_flags.as_struct.flags & kActiveSuspendBarrier))) {
- PassActiveSuspendBarriers(this);
+ if (old_state == kRunnable && new_state != kRunnable) {
+ // Need to run pending checkpoint and suspend barriers. Run checkpoints in runnable state in
+ // case they need to use a ScopedObjectAccess. If we are holding the mutator lock and a SOA
+ // attempts to TransitionFromSuspendedToRunnable, it results in a deadlock.
+ TransitionToSuspendedAndRunCheckpoints(new_state);
+ // Since we transitioned to a suspended state, check the pass barrier requests.
+ PassActiveSuspendBarriers();
+ } else {
+ tls32_.state_and_flags.as_struct.state = new_state;
}
return old_state;
}
@@ -1064,6 +1066,12 @@
void SetUpAlternateSignalStack();
void TearDownAlternateSignalStack();
+ ALWAYS_INLINE void TransitionToSuspendedAndRunCheckpoints(ThreadState new_state)
+ REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
+
+ ALWAYS_INLINE void PassActiveSuspendBarriers()
+ REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
+
// 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
// change from being Suspended to Runnable without a suspend request occurring.
union PACKED(4) StateAndFlags {
diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
index 55a77ac..285df18 100644
--- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -49,7 +49,9 @@
CHECK_REGS_CONTAIN_REFS(0x06U, true, 8, 1); // v8: this, v1: x
CHECK_REGS_CONTAIN_REFS(0x08U, true, 8, 3, 1); // v8: this, v3: y, v1: x
CHECK_REGS_CONTAIN_REFS(0x0cU, true, 8, 3, 1); // v8: this, v3: y, v1: x
- CHECK_REGS_CONTAIN_REFS(0x0eU, true, 8, 3, 1); // v8: this, v3: y, v1: x
+ if (!m->IsOptimized(sizeof(void*))) {
+ CHECK_REGS_CONTAIN_REFS(0x0eU, true, 8, 3, 1); // v8: this, v3: y, v1: x
+ }
CHECK_REGS_CONTAIN_REFS(0x10U, true, 8, 3, 1); // v8: this, v3: y, v1: x
// v2 is added because of the instruction at DexPC 0024. Object merges with 0 is Object. See:
// 0024: move-object v3, v2
@@ -63,12 +65,18 @@
// Note that v0: ex can be eliminated because it's a dead merge of two different exceptions.
CHECK_REGS_CONTAIN_REFS(0x18U, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex)
CHECK_REGS_CONTAIN_REFS(0x1aU, true, 8, 5, 2, 1); // v8: this, v5: x[1], v2: y, v1: x (dead v0: ex)
- CHECK_REGS_CONTAIN_REFS(0x1dU, true, 8, 5, 2, 1); // v8: this, v5: x[1], v2: y, v1: x (dead v0: ex)
- // v5 is removed from the root set because there is a "merge" operation.
- // See 0015: if-nez v2, 001f.
- CHECK_REGS_CONTAIN_REFS(0x1fU, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex)
+ if (!m->IsOptimized(sizeof(void*))) {
+ // v8: this, v5: x[1], v2: y, v1: x (dead v0: ex)
+ CHECK_REGS_CONTAIN_REFS(0x1dU, true, 8, 5, 2, 1);
+ // v5 is removed from the root set because there is a "merge" operation.
+ // See 0015: if-nez v2, 001f.
+ CHECK_REGS_CONTAIN_REFS(0x1fU, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex)
+ }
CHECK_REGS_CONTAIN_REFS(0x21U, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex)
- CHECK_REGS_CONTAIN_REFS(0x27U, true, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
+
+ if (!m->IsOptimized(sizeof(void*))) {
+ CHECK_REGS_CONTAIN_REFS(0x27U, true, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
+ }
CHECK_REGS_CONTAIN_REFS(0x29U, true, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
CHECK_REGS_CONTAIN_REFS(0x2cU, true, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
// Note that it is OK for a compiler to not have a dex map at these two dex PCs because
diff --git a/test/024-illegal-access/expected.txt b/test/024-illegal-access/expected.txt
index 5f951f4..0ae4a77 100644
--- a/test/024-illegal-access/expected.txt
+++ b/test/024-illegal-access/expected.txt
@@ -1,2 +1,5 @@
Got expected failure 1
Got expected failure 2
+Got expected failure 3
+Got expected failure 4
+Got expected failure 5
diff --git a/test/024-illegal-access/src/Main.java b/test/024-illegal-access/src/Main.java
index bde73e9..84c7114 100644
--- a/test/024-illegal-access/src/Main.java
+++ b/test/024-illegal-access/src/Main.java
@@ -17,7 +17,7 @@
public class Main {
static public void main(String[] args) {
try {
- PublicAccess.main();
+ PublicAccess.accessStaticField();
System.err.println("ERROR: call 1 not expected to succeed");
} catch (VerifyError ve) {
// dalvik
@@ -28,14 +28,41 @@
}
try {
- CheckInstanceof.main(new Object());
+ PublicAccess.accessStaticMethod();
System.err.println("ERROR: call 2 not expected to succeed");
- } catch (VerifyError ve) {
- // dalvik
- System.out.println("Got expected failure 2");
} catch (IllegalAccessError iae) {
// reference
System.out.println("Got expected failure 2");
}
+
+ try {
+ PublicAccess.accessInstanceField();
+ System.err.println("ERROR: call 3 not expected to succeed");
+ } catch (VerifyError ve) {
+ // dalvik
+ System.out.println("Got expected failure 3");
+ } catch (IllegalAccessError iae) {
+ // reference
+ System.out.println("Got expected failure 3");
+ }
+
+ try {
+ PublicAccess.accessInstanceMethod();
+ System.err.println("ERROR: call 4 not expected to succeed");
+ } catch (IllegalAccessError iae) {
+ // reference
+ System.out.println("Got expected failure 4");
+ }
+
+ try {
+ CheckInstanceof.main(new Object());
+ System.err.println("ERROR: call 5 not expected to succeed");
+ } catch (VerifyError ve) {
+ // dalvik
+ System.out.println("Got expected failure 5");
+ } catch (IllegalAccessError iae) {
+ // reference
+ System.out.println("Got expected failure 5");
+ }
}
}
diff --git a/test/024-illegal-access/src/PublicAccess.java b/test/024-illegal-access/src/PublicAccess.java
index 4e72cd4..e3fef85 100644
--- a/test/024-illegal-access/src/PublicAccess.java
+++ b/test/024-illegal-access/src/PublicAccess.java
@@ -18,8 +18,20 @@
* Some stuff for access checks.
*/
public class PublicAccess {
- public static void main() {
- String shouldFail = SemiPrivate.mPrivvy;
+ public static void accessStaticField() {
+ String shouldFail = SemiPrivate.mStaticPrivvy;
+ System.out.println("Got " + shouldFail);
+ }
+ public static void accessStaticMethod() {
+ String shouldFail = SemiPrivate.privvyStaticMethod();
+ System.out.println("Got " + shouldFail);
+ }
+ public static void accessInstanceField() {
+ String shouldFail = new SemiPrivate().mInstancePrivvy;
+ System.out.println("Got " + shouldFail);
+ }
+ public static void accessInstanceMethod() {
+ String shouldFail = new SemiPrivate().privvyInstanceMethod();
System.out.println("Got " + shouldFail);
}
}
diff --git a/test/024-illegal-access/src/SemiPrivate.java b/test/024-illegal-access/src/SemiPrivate.java
index 06b16c4..62e0d05 100644
--- a/test/024-illegal-access/src/SemiPrivate.java
+++ b/test/024-illegal-access/src/SemiPrivate.java
@@ -18,5 +18,15 @@
* Version with package scope access.
*/
public class SemiPrivate {
- /* not private */ static String mPrivvy = "stuff";
+ /* not private */ static String mStaticPrivvy = "stuff";
+
+ /* not private */ static String privvyStaticMethod() {
+ return "stuff";
+ }
+
+ /* not private */ String mInstancePrivvy = "stuff";
+
+ /* not private */ String privvyInstanceMethod() {
+ return "stuff";
+ }
}
diff --git a/test/024-illegal-access/src2/SemiPrivate.java b/test/024-illegal-access/src2/SemiPrivate.java
index 064265a..4f36a07 100644
--- a/test/024-illegal-access/src2/SemiPrivate.java
+++ b/test/024-illegal-access/src2/SemiPrivate.java
@@ -18,5 +18,15 @@
* Version with private access.
*/
public class SemiPrivate {
- private static String mPrivvy = "stuff";
+ private static String mStaticPrivvy = "stuff";
+
+ private static String privvyStaticMethod() {
+ return "stuff";
+ }
+
+ private String mInstancePrivvy = "stuff";
+
+ private String privvyInstanceMethod() {
+ return "stuff";
+ }
}
diff --git a/test/088-monitor-verification/smali/NotStructuredOverUnlock.smali b/test/088-monitor-verification/smali/NotStructuredOverUnlock.smali
index aa0c2d5..0dc492f 100644
--- a/test/088-monitor-verification/smali/NotStructuredOverUnlock.smali
+++ b/test/088-monitor-verification/smali/NotStructuredOverUnlock.smali
@@ -5,7 +5,7 @@
.method public static run(Ljava/lang/Object;)V
.registers 3
- invoke-static {}, LMain;->assertCallerIsInterpreted()V
+ invoke-static {}, LMain;->assertIsInterpreted()V
# Lock twice, but unlock thrice.
diff --git a/test/088-monitor-verification/smali/NotStructuredUnderUnlock.smali b/test/088-monitor-verification/smali/NotStructuredUnderUnlock.smali
index 2c31fda..df6e168 100644
--- a/test/088-monitor-verification/smali/NotStructuredUnderUnlock.smali
+++ b/test/088-monitor-verification/smali/NotStructuredUnderUnlock.smali
@@ -5,7 +5,7 @@
.method public static run(Ljava/lang/Object;)V
.registers 3
- invoke-static {}, LMain;->assertCallerIsInterpreted()V
+ invoke-static {}, LMain;->assertIsInterpreted()V
# Lock thrice, but only unlock twice.
diff --git a/test/088-monitor-verification/smali/OK.smali b/test/088-monitor-verification/smali/OK.smali
index 596798d..a43ecb0 100644
--- a/test/088-monitor-verification/smali/OK.smali
+++ b/test/088-monitor-verification/smali/OK.smali
@@ -20,7 +20,7 @@
.method public static runNoMonitors(Ljava/lang/Object;Ljava/lang/Object;)V
.registers 3
- invoke-static {}, LMain;->assertCallerIsManaged()V
+ invoke-static {}, LMain;->assertIsManaged()V
return-void
@@ -29,7 +29,7 @@
.method public static runStraightLine(Ljava/lang/Object;Ljava/lang/Object;)V
.registers 3
- invoke-static {}, LMain;->assertCallerIsManaged()V
+ invoke-static {}, LMain;->assertIsManaged()V
monitor-enter v1 # 1
monitor-enter v2 # 2
@@ -44,7 +44,7 @@
.method public static runBalancedJoin(Ljava/lang/Object;Ljava/lang/Object;)V
.registers 3
- invoke-static {}, LMain;->assertCallerIsManaged()V
+ invoke-static {}, LMain;->assertIsManaged()V
monitor-enter v1 # 1
diff --git a/test/088-monitor-verification/smali/TooDeep.smali b/test/088-monitor-verification/smali/TooDeep.smali
index 1a8f2f0..a1e3281 100644
--- a/test/088-monitor-verification/smali/TooDeep.smali
+++ b/test/088-monitor-verification/smali/TooDeep.smali
@@ -7,7 +7,7 @@
# Lock depth is 33, which is more than the verifier supports. This should have been punted to
# the interpreter.
- invoke-static {}, LMain;->assertCallerIsInterpreted()V
+ invoke-static {}, LMain;->assertIsInterpreted()V
monitor-enter v2 # 1
monitor-enter v2 # 2
diff --git a/test/088-monitor-verification/smali/UnbalancedJoin.smali b/test/088-monitor-verification/smali/UnbalancedJoin.smali
index da8f773..993f32c 100644
--- a/test/088-monitor-verification/smali/UnbalancedJoin.smali
+++ b/test/088-monitor-verification/smali/UnbalancedJoin.smali
@@ -5,7 +5,7 @@
.method public static run(Ljava/lang/Object;Ljava/lang/Object;)V
.registers 3
- invoke-static {}, LMain;->assertCallerIsInterpreted()V
+ invoke-static {}, LMain;->assertIsInterpreted()V
if-eqz v2, :Lnull
diff --git a/test/088-monitor-verification/smali/UnbalancedStraight.smali b/test/088-monitor-verification/smali/UnbalancedStraight.smali
index 68edb6c..cbb8bcc 100644
--- a/test/088-monitor-verification/smali/UnbalancedStraight.smali
+++ b/test/088-monitor-verification/smali/UnbalancedStraight.smali
@@ -5,7 +5,7 @@
.method public static run(Ljava/lang/Object;Ljava/lang/Object;)V
.registers 3
- invoke-static {}, LMain;->assertCallerIsInterpreted()V
+ invoke-static {}, LMain;->assertIsInterpreted()V
monitor-enter v1 # 1
monitor-enter v2 # 2
diff --git a/test/088-monitor-verification/src/Main.java b/test/088-monitor-verification/src/Main.java
index fc5755b..2188055 100644
--- a/test/088-monitor-verification/src/Main.java
+++ b/test/088-monitor-verification/src/Main.java
@@ -220,7 +220,7 @@
// Smali testing code.
private static void runSmaliTests() {
- if (!hasOatFile() || runtimeIsSoftFail() || isCallerInterpreted()) {
+ if (!hasOatFile() || runtimeIsSoftFail() || isInterpreted()) {
// Skip test, this seems to be a non-compiled code test configuration.
return;
}
@@ -277,9 +277,9 @@
}
// Helpers for the smali code.
- public static native void assertCallerIsInterpreted();
- public static native void assertCallerIsManaged();
+ public static native void assertIsInterpreted();
+ public static native void assertIsManaged();
public static native boolean hasOatFile();
public static native boolean runtimeIsSoftFail();
- public static native boolean isCallerInterpreted();
+ public static native boolean isInterpreted();
}
diff --git a/test/117-nopatchoat/src/Main.java b/test/117-nopatchoat/src/Main.java
index 5cca309..425cf48 100644
--- a/test/117-nopatchoat/src/Main.java
+++ b/test/117-nopatchoat/src/Main.java
@@ -28,10 +28,10 @@
System.out.println(
"dex2oat & patchoat are " + ((isDex2OatEnabled()) ? "enabled" : "disabled") +
- ", has oat is " + hasOat() + ", has executable oat is " + (
+ ", has oat is " + hasOatFile() + ", has executable oat is " + (
executable_correct ? "expected" : "not expected") + ".");
- if (!hasOat() && isDex2OatEnabled()) {
+ if (!hasOatFile() && isDex2OatEnabled()) {
throw new Error("Application with dex2oat enabled runs without an oat file");
}
@@ -51,7 +51,7 @@
private native static boolean isPic();
- private native static boolean hasOat();
+ private native static boolean hasOatFile();
private native static boolean hasExecutableOat();
diff --git a/test/449-checker-bce/expected.txt b/test/449-checker-bce/expected.txt
index e114c50..4665d7a 100644
--- a/test/449-checker-bce/expected.txt
+++ b/test/449-checker-bce/expected.txt
@@ -1 +1,2 @@
+JNI_OnLoad called
java.lang.ArrayIndexOutOfBoundsException: length=5; index=82
diff --git a/test/449-checker-bce/src/Main.java b/test/449-checker-bce/src/Main.java
index f06c250..22829cd 100644
--- a/test/449-checker-bce/src/Main.java
+++ b/test/449-checker-bce/src/Main.java
@@ -265,6 +265,7 @@
// A helper into which the actual throwing function should be inlined.
static void constantIndexingForward6(int[] array) {
+ assertIsManaged();
constantIndexing6(array);
}
@@ -618,13 +619,17 @@
static int foo() {
try {
+ assertIsManaged();
// This will cause AIOOBE.
constantIndexing2(new int[3]);
} catch (ArrayIndexOutOfBoundsException e) {
+ assertIsManaged(); // This is to ensure that single-frame deoptimization works.
+ // Will need to be updated if constantIndexing2 is inlined.
try {
// This will cause AIOOBE.
constantIndexingForward6(new int[3]);
} catch (ArrayIndexOutOfBoundsException e2) {
+ assertIsManaged();
return 99;
}
}
@@ -634,13 +639,13 @@
int sum;
- /// CHECK-START: void Main.foo1(int[], int, int) BCE (before)
+ /// CHECK-START: void Main.foo1(int[], int, int, boolean) BCE (before)
/// CHECK: BoundsCheck
/// CHECK: ArraySet
/// CHECK-NOT: BoundsCheck
/// CHECK: ArrayGet
- /// CHECK-START: void Main.foo1(int[], int, int) BCE (after)
+ /// CHECK-START: void Main.foo1(int[], int, int, boolean) BCE (after)
/// CHECK: Phi
/// CHECK-NOT: BoundsCheck
/// CHECK: ArraySet
@@ -657,25 +662,30 @@
/// CHECK: Phi
/// CHECK: Goto
- void foo1(int[] array, int start, int end) {
+ void foo1(int[] array, int start, int end, boolean expectInterpreter) {
// Three HDeoptimize will be added. One for
// start >= 0, one for end <= array.length,
// and one for null check on array (to hoist null
// check and array.length out of loop).
for (int i = start ; i < end; i++) {
+ if (expectInterpreter) {
+ assertIsInterpreted();
+ } else {
+ assertIsManaged();
+ }
array[i] = 1;
sum += array[i];
}
}
- /// CHECK-START: void Main.foo2(int[], int, int) BCE (before)
+ /// CHECK-START: void Main.foo2(int[], int, int, boolean) BCE (before)
/// CHECK: BoundsCheck
/// CHECK: ArraySet
/// CHECK-NOT: BoundsCheck
/// CHECK: ArrayGet
- /// CHECK-START: void Main.foo2(int[], int, int) BCE (after)
+ /// CHECK-START: void Main.foo2(int[], int, int, boolean) BCE (after)
/// CHECK: Phi
/// CHECK-NOT: BoundsCheck
/// CHECK: ArraySet
@@ -692,25 +702,30 @@
/// CHECK: Phi
/// CHECK: Goto
- void foo2(int[] array, int start, int end) {
+ void foo2(int[] array, int start, int end, boolean expectInterpreter) {
// Three HDeoptimize will be added. One for
// start >= 0, one for end <= array.length,
// and one for null check on array (to hoist null
// check and array.length out of loop).
for (int i = start ; i <= end; i++) {
+ if (expectInterpreter) {
+ assertIsInterpreted();
+ } else {
+ assertIsManaged();
+ }
array[i] = 1;
sum += array[i];
}
}
- /// CHECK-START: void Main.foo3(int[], int) BCE (before)
+ /// CHECK-START: void Main.foo3(int[], int, boolean) BCE (before)
/// CHECK: BoundsCheck
/// CHECK: ArraySet
/// CHECK-NOT: BoundsCheck
/// CHECK: ArrayGet
- /// CHECK-START: void Main.foo3(int[], int) BCE (after)
+ /// CHECK-START: void Main.foo3(int[], int, boolean) BCE (after)
/// CHECK: Phi
/// CHECK-NOT: BoundsCheck
/// CHECK: ArraySet
@@ -726,24 +741,29 @@
/// CHECK: Phi
/// CHECK: Goto
- void foo3(int[] array, int end) {
+ void foo3(int[] array, int end, boolean expectInterpreter) {
// Two HDeoptimize will be added. One for end < array.length,
// and one for null check on array (to hoist null check
// and array.length out of loop).
for (int i = 3 ; i <= end; i++) {
+ if (expectInterpreter) {
+ assertIsInterpreted();
+ } else {
+ assertIsManaged();
+ }
array[i] = 1;
sum += array[i];
}
}
- /// CHECK-START: void Main.foo4(int[], int) BCE (before)
+ /// CHECK-START: void Main.foo4(int[], int, boolean) BCE (before)
/// CHECK: BoundsCheck
/// CHECK: ArraySet
/// CHECK-NOT: BoundsCheck
/// CHECK: ArrayGet
- /// CHECK-START: void Main.foo4(int[], int) BCE (after)
+ /// CHECK-START: void Main.foo4(int[], int, boolean) BCE (after)
/// CHECK: Phi
/// CHECK-NOT: BoundsCheck
/// CHECK: ArraySet
@@ -759,18 +779,23 @@
/// CHECK: Phi
/// CHECK: Goto
- void foo4(int[] array, int end) {
+ void foo4(int[] array, int end, boolean expectInterpreter) {
// Two HDeoptimize will be added. One for end <= array.length,
// and one for null check on array (to hoist null check
// and array.length out of loop).
for (int i = end ; i > 0; i--) {
+ if (expectInterpreter) {
+ assertIsInterpreted();
+ } else {
+ assertIsManaged();
+ }
array[i - 1] = 1;
sum += array[i - 1];
}
}
- /// CHECK-START: void Main.foo5(int[], int) BCE (before)
+ /// CHECK-START: void Main.foo5(int[], int, boolean) BCE (before)
/// CHECK: BoundsCheck
/// CHECK: ArraySet
/// CHECK: BoundsCheck
@@ -780,7 +805,7 @@
/// CHECK: BoundsCheck
/// CHECK: ArrayGet
- /// CHECK-START: void Main.foo5(int[], int) BCE (after)
+ /// CHECK-START: void Main.foo5(int[], int, boolean) BCE (after)
/// CHECK-NOT: BoundsCheck
/// CHECK: ArraySet
/// CHECK: Phi
@@ -800,7 +825,7 @@
/// CHECK-NOT: Phi
/// CHECK: Goto
- void foo5(int[] array, int end) {
+ void foo5(int[] array, int end, boolean expectInterpreter) {
// Bounds check in this loop can be eliminated without deoptimization.
for (int i = array.length - 1 ; i >= 0; i--) {
array[i] = 1;
@@ -808,6 +833,11 @@
// One HDeoptimize will be added.
// It's for (end - 2 <= array.length - 2).
for (int i = end - 2 ; i > 0; i--) {
+ if (expectInterpreter) {
+ assertIsInterpreted();
+ } else {
+ assertIsManaged();
+ }
sum += array[i - 1];
sum += array[i];
sum += array[i + 1];
@@ -815,7 +845,7 @@
}
- /// CHECK-START: void Main.foo6(int[], int, int) BCE (before)
+ /// CHECK-START: void Main.foo6(int[], int, int, boolean) BCE (before)
/// CHECK: BoundsCheck
/// CHECK: ArrayGet
/// CHECK: BoundsCheck
@@ -829,7 +859,7 @@
/// CHECK-NOT: BoundsCheck
/// CHECK: ArraySet
- /// CHECK-START: void Main.foo6(int[], int, int) BCE (after)
+ /// CHECK-START: void Main.foo6(int[], int, int, boolean) BCE (after)
/// CHECK: Phi
/// CHECK-NOT: BoundsCheck
/// CHECK: ArrayGet
@@ -855,12 +885,17 @@
/// CHECK: Goto
/// CHECK-NOT: Deoptimize
- void foo6(int[] array, int start, int end) {
+ void foo6(int[] array, int start, int end, boolean expectInterpreter) {
// Three HDeoptimize will be added. One for
// start >= 2, one for end <= array.length - 3,
// and one for null check on array (to hoist null
// check and array.length out of loop).
for (int i = end; i >= start; i--) {
+ if (expectInterpreter) {
+ assertIsInterpreted();
+ } else {
+ assertIsManaged();
+ }
array[i] = (array[i-2] + array[i-1] + array[i] + array[i+1] + array[i+2]) / 5;
}
}
@@ -948,12 +983,12 @@
}
- /// CHECK-START: void Main.foo9(int[]) BCE (before)
+ /// CHECK-START: void Main.foo9(int[], boolean) BCE (before)
/// CHECK: NullCheck
/// CHECK: BoundsCheck
/// CHECK: ArrayGet
- /// CHECK-START: void Main.foo9(int[]) BCE (after)
+ /// CHECK-START: void Main.foo9(int[], boolean) BCE (after)
// The loop is guaranteed to be entered. No need to transform the
// loop for loop body entry test.
/// CHECK: Deoptimize
@@ -964,10 +999,15 @@
/// CHECK-NOT: BoundsCheck
/// CHECK: ArrayGet
- void foo9(int[] array) {
+ void foo9(int[] array, boolean expectInterpreter) {
// Two HDeoptimize will be added. One for
// 10 <= array.length, and one for null check on array.
for (int i = 0 ; i < 10; i++) {
+ if (expectInterpreter) {
+ assertIsInterpreted();
+ } else {
+ assertIsManaged();
+ }
sum += array[i];
}
}
@@ -999,7 +1039,7 @@
static void testUnknownBounds() {
boolean caught = false;
Main main = new Main();
- main.foo1(new int[10], 0, 10);
+ main.foo1(new int[10], 0, 10, false);
if (main.sum != 10) {
System.out.println("foo1 failed!");
}
@@ -1007,7 +1047,7 @@
caught = false;
main = new Main();
try {
- main.foo1(new int[10], 0, 11);
+ main.foo1(new int[10], 0, 11, true);
} catch (ArrayIndexOutOfBoundsException e) {
caught = true;
}
@@ -1016,7 +1056,7 @@
}
main = new Main();
- main.foo2(new int[10], 0, 9);
+ main.foo2(new int[10], 0, 9, false);
if (main.sum != 10) {
System.out.println("foo2 failed!");
}
@@ -1024,7 +1064,7 @@
caught = false;
main = new Main();
try {
- main.foo2(new int[10], 0, 10);
+ main.foo2(new int[10], 0, 10, true);
} catch (ArrayIndexOutOfBoundsException e) {
caught = true;
}
@@ -1033,7 +1073,7 @@
}
main = new Main();
- main.foo3(new int[10], 9);
+ main.foo3(new int[10], 9, false);
if (main.sum != 7) {
System.out.println("foo3 failed!");
}
@@ -1041,7 +1081,7 @@
caught = false;
main = new Main();
try {
- main.foo3(new int[10], 10);
+ main.foo3(new int[10], 10, true);
} catch (ArrayIndexOutOfBoundsException e) {
caught = true;
}
@@ -1050,7 +1090,7 @@
}
main = new Main();
- main.foo4(new int[10], 10);
+ main.foo4(new int[10], 10, false);
if (main.sum != 10) {
System.out.println("foo4 failed!");
}
@@ -1058,7 +1098,7 @@
caught = false;
main = new Main();
try {
- main.foo4(new int[10], 11);
+ main.foo4(new int[10], 11, true);
} catch (ArrayIndexOutOfBoundsException e) {
caught = true;
}
@@ -1067,7 +1107,7 @@
}
main = new Main();
- main.foo5(new int[10], 10);
+ main.foo5(new int[10], 10, false);
if (main.sum != 24) {
System.out.println("foo5 failed!");
}
@@ -1075,7 +1115,7 @@
caught = false;
main = new Main();
try {
- main.foo5(new int[10], 11);
+ main.foo5(new int[10], 11, true);
} catch (ArrayIndexOutOfBoundsException e) {
caught = true;
}
@@ -1084,11 +1124,11 @@
}
main = new Main();
- main.foo6(new int[10], 2, 7);
+ main.foo6(new int[10], 2, 7, false);
main = new Main();
int[] array9 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
- main.foo9(array9);
+ main.foo9(array9, false);
if (main.sum != 45) {
System.out.println("foo9 failed!");
}
@@ -1104,7 +1144,7 @@
caught = false;
main = new Main();
try {
- main.foo6(new int[10], 2, 8);
+ main.foo6(new int[10], 2, 8, true);
} catch (ArrayIndexOutOfBoundsException e) {
caught = true;
}
@@ -1115,7 +1155,7 @@
caught = false;
main = new Main();
try {
- main.foo6(new int[10], 1, 7);
+ main.foo6(new int[10], 1, 7, true);
} catch (ArrayIndexOutOfBoundsException e) {
caught = true;
}
@@ -1152,6 +1192,15 @@
/// CHECK: ParallelMove
public static void main(String[] args) {
+ System.loadLibrary(args[0]);
+
+ if (!compiledWithOptimizing() ||
+ !hasOatFile() ||
+ runtimeIsSoftFail() ||
+ isInterpreted()) {
+ disableStackFrameAsserts();
+ }
+
sieve(20);
int[] array = {5, 2, 3, 7, 0, 1, 6, 4};
@@ -1190,4 +1239,11 @@
new Main().testExceptionMessage();
}
+ public static native boolean compiledWithOptimizing();
+ public static native void disableStackFrameAsserts();
+ public static native void assertIsManaged();
+ public static native void assertIsInterpreted();
+ public static native boolean hasOatFile();
+ public static native boolean runtimeIsSoftFail();
+ public static native boolean isInterpreted();
}
diff --git a/test/529-checker-unresolved/src/Main.java b/test/529-checker-unresolved/src/Main.java
index 6f04797..adb5ada 100644
--- a/test/529-checker-unresolved/src/Main.java
+++ b/test/529-checker-unresolved/src/Main.java
@@ -44,6 +44,76 @@
super.superMethod();
}
+ /// CHECK-START: void Main.callUnresolvedStaticFieldAccess() register (before)
+ /// CHECK: UnresolvedStaticFieldSet field_type:PrimByte
+ /// CHECK: UnresolvedStaticFieldSet field_type:PrimChar
+ /// CHECK: UnresolvedStaticFieldSet field_type:PrimInt
+ /// CHECK: UnresolvedStaticFieldSet field_type:PrimLong
+ /// CHECK: UnresolvedStaticFieldSet field_type:PrimFloat
+ /// CHECK: UnresolvedStaticFieldSet field_type:PrimDouble
+ /// CHECK: UnresolvedStaticFieldSet field_type:PrimNot
+
+ /// CHECK: UnresolvedStaticFieldGet field_type:PrimByte
+ /// CHECK: UnresolvedStaticFieldGet field_type:PrimChar
+ /// CHECK: UnresolvedStaticFieldGet field_type:PrimInt
+ /// CHECK: UnresolvedStaticFieldGet field_type:PrimLong
+ /// CHECK: UnresolvedStaticFieldGet field_type:PrimFloat
+ /// CHECK: UnresolvedStaticFieldGet field_type:PrimDouble
+ /// CHECK: UnresolvedStaticFieldGet field_type:PrimNot
+ static public void callUnresolvedStaticFieldAccess() {
+ Object o = new Object();
+ UnresolvedClass.staticByte = (byte)1;
+ UnresolvedClass.staticChar = '1';
+ UnresolvedClass.staticInt = 123456789;
+ UnresolvedClass.staticLong = 123456789123456789l;
+ UnresolvedClass.staticFloat = 123456789123456789f;
+ UnresolvedClass.staticDouble = 123456789123456789d;
+ UnresolvedClass.staticObject = o;
+
+ expectEquals((byte)1, UnresolvedClass.staticByte);
+ expectEquals('1', UnresolvedClass.staticChar);
+ expectEquals(123456789, UnresolvedClass.staticInt);
+ expectEquals(123456789123456789l, UnresolvedClass.staticLong);
+ expectEquals(123456789123456789f, UnresolvedClass.staticFloat);
+ expectEquals(123456789123456789d, UnresolvedClass.staticDouble);
+ expectEquals(o, UnresolvedClass.staticObject);
+ }
+
+ /// CHECK-START: void Main.callUnresolvedInstanceFieldAccess(UnresolvedClass) register (before)
+ /// CHECK: UnresolvedInstanceFieldSet field_type:PrimByte
+ /// CHECK: UnresolvedInstanceFieldSet field_type:PrimChar
+ /// CHECK: UnresolvedInstanceFieldSet field_type:PrimInt
+ /// CHECK: UnresolvedInstanceFieldSet field_type:PrimLong
+ /// CHECK: UnresolvedInstanceFieldSet field_type:PrimFloat
+ /// CHECK: UnresolvedInstanceFieldSet field_type:PrimDouble
+ /// CHECK: UnresolvedInstanceFieldSet field_type:PrimNot
+
+ /// CHECK: UnresolvedInstanceFieldGet field_type:PrimByte
+ /// CHECK: UnresolvedInstanceFieldGet field_type:PrimChar
+ /// CHECK: UnresolvedInstanceFieldGet field_type:PrimInt
+ /// CHECK: UnresolvedInstanceFieldGet field_type:PrimLong
+ /// CHECK: UnresolvedInstanceFieldGet field_type:PrimFloat
+ /// CHECK: UnresolvedInstanceFieldGet field_type:PrimDouble
+ /// CHECK: UnresolvedInstanceFieldGet field_type:PrimNot
+ static public void callUnresolvedInstanceFieldAccess(UnresolvedClass c) {
+ Object o = new Object();
+ c.instanceByte = (byte)1;
+ c.instanceChar = '1';
+ c.instanceInt = 123456789;
+ c.instanceLong = 123456789123456789l;
+ c.instanceFloat = 123456789123456789f;
+ c.instanceDouble = 123456789123456789d;
+ c.instanceObject = o;
+
+ expectEquals((byte)1, c.instanceByte);
+ expectEquals('1', c.instanceChar);
+ expectEquals(123456789, c.instanceInt);
+ expectEquals(123456789123456789l, c.instanceLong);
+ expectEquals(123456789123456789f, c.instanceFloat);
+ expectEquals(123456789123456789d, c.instanceDouble);
+ expectEquals(o, c.instanceObject);
+ }
+
/// CHECK-START: void Main.main(java.lang.String[]) register (before)
/// CHECK: InvokeUnresolved invoke_type:direct
static public void main(String[] args) {
@@ -52,5 +122,49 @@
callInvokeUnresolvedVirtual(c);
callInvokeUnresolvedInterface(c);
callInvokeUnresolvedSuper(new Main());
+ callUnresolvedStaticFieldAccess();
+ callUnresolvedInstanceFieldAccess(c);
+ }
+
+ public static void expectEquals(byte expected, byte result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(char expected, char result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(float expected, float result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(double expected, double result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(Object expected, Object result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
}
}
diff --git a/test/529-checker-unresolved/src/Unresolved.java b/test/529-checker-unresolved/src/Unresolved.java
index 5bf92dd..03ceb68 100644
--- a/test/529-checker-unresolved/src/Unresolved.java
+++ b/test/529-checker-unresolved/src/Unresolved.java
@@ -40,6 +40,22 @@
public void interfaceMethod() {
System.out.println("UnresolvedClass.interfaceMethod()");
}
+
+ public static byte staticByte;
+ public static char staticChar;
+ public static int staticInt;
+ public static long staticLong;
+ public static float staticFloat;
+ public static double staticDouble;
+ public static Object staticObject;
+
+ public byte instanceByte;
+ public char instanceChar;
+ public int instanceInt;
+ public long instanceLong;
+ public float instanceFloat;
+ public double instanceDouble;
+ public Object instanceObject;
}
final class UnresolvedFinalClass {
diff --git a/test/535-deopt-and-inlining/expected.txt b/test/535-deopt-and-inlining/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/535-deopt-and-inlining/expected.txt
diff --git a/test/535-deopt-and-inlining/info.txt b/test/535-deopt-and-inlining/info.txt
new file mode 100644
index 0000000..717612a
--- /dev/null
+++ b/test/535-deopt-and-inlining/info.txt
@@ -0,0 +1,2 @@
+Stress test for deoptimization and JIT, to ensure the
+stack visitor uses the right ArtMethod when deopting.
diff --git a/test/535-deopt-and-inlining/src/Main.java b/test/535-deopt-and-inlining/src/Main.java
new file mode 100644
index 0000000..c231bf0
--- /dev/null
+++ b/test/535-deopt-and-inlining/src/Main.java
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ public static void run() {
+ // Loop enough to get JIT compilation.
+ for (int i = 0; i < 10000; ++i) {
+ doCall(new int[0]);
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ run();
+ }
+
+ public static void doCall(int[] array) {
+ try {
+ deopt(array);
+ } catch (IndexOutOfBoundsException ioobe) {
+ // Expected
+ }
+ }
+
+ public static void deopt(int[] array) {
+ // Invoke `deopt` much more than `$inline$deopt` so that only `deopt` gets
+ // initially JITted.
+ if (call == 100) {
+ call = 0;
+ $inline$deopt(array);
+ } else {
+ call++;
+ }
+ }
+
+ public static void $inline$deopt(int[] array) {
+ array[0] = 1;
+ array[1] = 1;
+ }
+
+ static int call = 0;
+}
diff --git a/test/535-regression-const-val/expected.txt b/test/535-regression-const-val/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/535-regression-const-val/expected.txt
diff --git a/test/535-regression-const-val/info.txt b/test/535-regression-const-val/info.txt
new file mode 100644
index 0000000..ea3e67b
--- /dev/null
+++ b/test/535-regression-const-val/info.txt
@@ -0,0 +1,2 @@
+Test a regression where SsaChecker would fail comparing raw value of IntConstant
+vs FloatConstant due to a static_cast sign extend.
diff --git a/test/535-regression-const-val/smali/TestCase.smali b/test/535-regression-const-val/smali/TestCase.smali
new file mode 100644
index 0000000..f42f173
--- /dev/null
+++ b/test/535-regression-const-val/smali/TestCase.smali
@@ -0,0 +1,36 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTestCase;
+.super Ljava/lang/Object;
+
+.method public static testCase(ZZ)I
+ .registers 5
+
+ # Create Phi [ 0.0f, -0.25f ].
+ # Binary representation of -0.25f has the most significant bit set.
+ if-eqz p0, :else
+ :then
+ const v0, 0x0
+ goto :merge
+ :else
+ const/high16 v0, 0xbe800000
+ :merge
+
+ # Now use as either float or int.
+ if-eqz p1, :return
+ float-to-int v0, v0
+ :return
+ return v0
+.end method
diff --git a/test/535-regression-const-val/src/Main.java b/test/535-regression-const-val/src/Main.java
new file mode 100644
index 0000000..858770f
--- /dev/null
+++ b/test/535-regression-const-val/src/Main.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ // Workaround for b/18051191.
+ class InnerClass {}
+
+ public static void main(String[] args) {}
+}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index e915357..a103eac 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -467,6 +467,7 @@
530-checker-loops \
530-checker-regression-reftype-final \
532-checker-nonnull-arrayset \
+ 534-checker-bce-deoptimization \
ifeq (mips,$(TARGET_ARCH))
ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
@@ -489,14 +490,6 @@
$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
-# If ART_USE_OPTIMIZING_COMPILER is set to true, then the default core.art has been
-# compiled with the optimizing compiler.
-ifeq ($(ART_USE_OPTIMIZING_COMPILER),true)
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- default,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS),$(ALL_ADDRESS_SIZES))
-endif
-
TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS :=
# Tests that should fail when the optimizing compiler compiles them non-debuggable.
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index 042b03b..082c9b3 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -66,4 +66,54 @@
return Runtime::Current()->IsImageDex2OatEnabled();
}
+// public static native boolean compiledWithOptimizing();
+// Did we use the optimizing compiler to compile this?
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_compiledWithOptimizing(JNIEnv* env, jclass cls) {
+ ScopedObjectAccess soa(env);
+
+ mirror::Class* klass = soa.Decode<mirror::Class*>(cls);
+ const DexFile& dex_file = klass->GetDexFile();
+ const OatFile::OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
+ if (oat_dex_file == nullptr) {
+ // Could be JIT, which also uses optimizing, but conservatively say no.
+ return JNI_FALSE;
+ }
+ const OatFile* oat_file = oat_dex_file->GetOatFile();
+ CHECK(oat_file != nullptr);
+
+ const char* cmd_line = oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kDex2OatCmdLineKey);
+ CHECK(cmd_line != nullptr); // Huh? This should not happen.
+
+ // Check the backend.
+ constexpr const char* kCompilerBackend = "--compiler-backend=";
+ const char* backend = strstr(cmd_line, kCompilerBackend);
+ if (backend != nullptr) {
+ // If it's set, make sure it's optimizing.
+ backend += strlen(kCompilerBackend);
+ if (strncmp(backend, "Optimizing", strlen("Optimizing")) != 0) {
+ return JNI_FALSE;
+ }
+ }
+
+ // Check the filter.
+ constexpr const char* kCompilerFilter = "--compiler-filter=";
+ const char* filter = strstr(cmd_line, kCompilerFilter);
+ if (filter != nullptr) {
+ // If it's set, make sure it's not interpret-only|verify-none|verify-at-runtime.
+ // Note: The space filter might have an impact on the test, but ignore that for now.
+ filter += strlen(kCompilerFilter);
+ constexpr const char* kInterpretOnly = "interpret-only";
+ constexpr const char* kVerifyNone = "verify-none";
+ constexpr const char* kVerifyAtRuntime = "verify-at-runtime";
+ if (strncmp(filter, kInterpretOnly, strlen(kInterpretOnly)) == 0 ||
+ strncmp(filter, kVerifyNone, strlen(kVerifyNone)) == 0 ||
+ strncmp(filter, kVerifyAtRuntime, strlen(kVerifyAtRuntime)) == 0) {
+ return JNI_FALSE;
+ }
+ }
+
+ return JNI_TRUE;
+}
+
} // namespace art
diff --git a/test/common/stack_inspect.cc b/test/common/stack_inspect.cc
index d22cf52..922eae6 100644
--- a/test/common/stack_inspect.cc
+++ b/test/common/stack_inspect.cc
@@ -27,9 +27,20 @@
namespace art {
-// public static native boolean isCallerInterpreted();
+static bool asserts_enabled = true;
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_isCallerInterpreted(JNIEnv* env, jclass) {
+// public static native void disableStackFrameAsserts();
+// Note: to globally disable asserts in unsupported configurations.
+
+extern "C" JNIEXPORT void JNICALL Java_Main_disableStackFrameAsserts(JNIEnv* env ATTRIBUTE_UNUSED,
+ jclass cls ATTRIBUTE_UNUSED) {
+ asserts_enabled = false;
+}
+
+
+// public static native boolean isInterpreted();
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInterpreted(JNIEnv* env, jclass) {
ScopedObjectAccess soa(env);
NthCallerVisitor caller(soa.Self(), 1, false);
caller.WalkStack();
@@ -37,16 +48,18 @@
return caller.GetCurrentShadowFrame() != nullptr ? JNI_TRUE : JNI_FALSE;
}
-// public static native void assertCallerIsInterpreted();
+// public static native void assertIsInterpreted();
-extern "C" JNIEXPORT void JNICALL Java_Main_assertCallerIsInterpreted(JNIEnv* env, jclass klass) {
- CHECK(Java_Main_isCallerInterpreted(env, klass));
+extern "C" JNIEXPORT void JNICALL Java_Main_assertIsInterpreted(JNIEnv* env, jclass klass) {
+ if (asserts_enabled) {
+ CHECK(Java_Main_isInterpreted(env, klass));
+ }
}
-// public static native boolean isCallerManaged();
+// public static native boolean isManaged();
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_isCallerManaged(JNIEnv* env, jclass cls) {
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isManaged(JNIEnv* env, jclass cls) {
ScopedObjectAccess soa(env);
mirror::Class* klass = soa.Decode<mirror::Class*>(cls);
@@ -65,10 +78,12 @@
return caller.GetCurrentShadowFrame() != nullptr ? JNI_FALSE : JNI_TRUE;
}
-// public static native void assertCallerIsManaged();
+// public static native void assertIsManaged();
-extern "C" JNIEXPORT void JNICALL Java_Main_assertCallerIsManaged(JNIEnv* env, jclass cls) {
- CHECK(Java_Main_isCallerManaged(env, cls));
+extern "C" JNIEXPORT void JNICALL Java_Main_assertIsManaged(JNIEnv* env, jclass cls) {
+ if (asserts_enabled) {
+ CHECK(Java_Main_isManaged(env, cls));
+ }
}
} // namespace art
diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk
index 3c1522c..71366c1 100644
--- a/tools/ahat/Android.mk
+++ b/tools/ahat/Android.mk
@@ -16,6 +16,8 @@
LOCAL_PATH := $(call my-dir)
+include art/build/Android.common_test.mk
+
# --- ahat.jar ----------------
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(call all-java-files-under, src)
@@ -44,7 +46,7 @@
ahat: $(LOCAL_BUILT_MODULE)
-# --- ahat-test.jar --------------
+# --- ahat-tests.jar --------------
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(call all-java-files-under, test)
LOCAL_JAR_MANIFEST := test/manifest.txt
@@ -53,6 +55,42 @@
LOCAL_MODULE_TAGS := tests
LOCAL_MODULE := ahat-tests
include $(BUILD_HOST_JAVA_LIBRARY)
+AHAT_TEST_JAR := $(LOCAL_BUILT_MODULE)
-ahat-test: $(LOCAL_BUILT_MODULE)
- java -jar $<
+# --- ahat-test-dump.jar --------------
+include $(CLEAR_VARS)
+LOCAL_MODULE := ahat-test-dump
+LOCAL_MODULE_TAGS := tests
+LOCAL_SRC_FILES := $(call all-java-files-under, test-dump)
+include $(BUILD_HOST_DALVIK_JAVA_LIBRARY)
+
+# Determine the location of the test-dump.jar and test-dump.hprof files.
+# These use variables set implicitly by the include of
+# BUILD_HOST_DALVIK_JAVA_LIBRARY above.
+AHAT_TEST_DUMP_JAR := $(LOCAL_BUILT_MODULE)
+AHAT_TEST_DUMP_HPROF := $(intermediates.COMMON)/test-dump.hprof
+
+# Run ahat-test-dump.jar to generate test-dump.hprof
+AHAT_TEST_DUMP_DEPENDENCIES := \
+ $(ART_HOST_EXECUTABLES) \
+ $(HOST_OUT_EXECUTABLES)/art \
+ $(HOST_CORE_IMG_OUT_BASE)$(CORE_IMG_SUFFIX)
+
+$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_ART := $(HOST_OUT_EXECUTABLES)/art
+$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_DUMP_JAR := $(AHAT_TEST_DUMP_JAR)
+$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_DUMP_DEPENDENCIES := $(AHAT_TEST_DUMP_DEPENDENCIES)
+$(AHAT_TEST_DUMP_HPROF): $(AHAT_TEST_DUMP_JAR) $(AHAT_TEST_DUMP_DEPENDENCIES)
+ $(PRIVATE_AHAT_TEST_ART) -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@
+
+.PHONY: ahat-test
+ahat-test: PRIVATE_AHAT_TEST_DUMP_HPROF := $(AHAT_TEST_DUMP_HPROF)
+ahat-test: PRIVATE_AHAT_TEST_JAR := $(AHAT_TEST_JAR)
+ahat-test: $(AHAT_TEST_JAR) $(AHAT_TEST_DUMP_HPROF)
+ java -Dahat.test.dump.hprof=$(PRIVATE_AHAT_TEST_DUMP_HPROF) -jar $(PRIVATE_AHAT_TEST_JAR)
+
+# Clean up local variables.
+AHAT_TEST_DUMP_DEPENDENCIES :=
+AHAT_TEST_DUMP_HPROF :=
+AHAT_TEST_DUMP_JAR :=
+AHAT_TEST_JAR :=
+
diff --git a/tools/ahat/src/AhatSnapshot.java b/tools/ahat/src/AhatSnapshot.java
index 2437d03..3035ef7 100644
--- a/tools/ahat/src/AhatSnapshot.java
+++ b/tools/ahat/src/AhatSnapshot.java
@@ -18,13 +18,18 @@
import com.android.tools.perflib.heap.ClassObj;
import com.android.tools.perflib.heap.Heap;
+import com.android.tools.perflib.heap.HprofParser;
import com.android.tools.perflib.heap.Instance;
import com.android.tools.perflib.heap.RootObj;
import com.android.tools.perflib.heap.Snapshot;
import com.android.tools.perflib.heap.StackFrame;
import com.android.tools.perflib.heap.StackTrace;
+import com.android.tools.perflib.heap.io.HprofBuffer;
+import com.android.tools.perflib.heap.io.MemoryMappedFileBuffer;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
+import java.io.File;
+import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
@@ -47,7 +52,22 @@
private Site mRootSite;
private Map<Heap, Long> mHeapSizes;
- public AhatSnapshot(Snapshot snapshot) {
+ /**
+ * Create an AhatSnapshot from an hprof file.
+ */
+ public static AhatSnapshot fromHprof(File hprof) throws IOException {
+ HprofBuffer buffer = new MemoryMappedFileBuffer(hprof);
+ Snapshot snapshot = (new HprofParser(buffer)).parse();
+ snapshot.computeDominators();
+ return new AhatSnapshot(snapshot);
+ }
+
+ /**
+ * Construct an AhatSnapshot for the given perflib snapshot.
+ * Ther user is responsible for calling snapshot.computeDominators before
+ * calling this AhatSnapshot constructor.
+ */
+ private AhatSnapshot(Snapshot snapshot) {
mSnapshot = snapshot;
mHeaps = new ArrayList<Heap>(mSnapshot.getHeaps());
mDominated = new HashMap<Instance, List<Instance>>();
@@ -92,6 +112,11 @@
}
}
+ // Note: This method is exposed for testing purposes.
+ public ClassObj findClass(String name) {
+ return mSnapshot.findClass(name);
+ }
+
public Instance findInstance(long id) {
return mSnapshot.findInstance(id);
}
diff --git a/tools/ahat/src/InstanceUtils.java b/tools/ahat/src/InstanceUtils.java
index 7ee3ff2..a6ac3b8 100644
--- a/tools/ahat/src/InstanceUtils.java
+++ b/tools/ahat/src/InstanceUtils.java
@@ -32,7 +32,7 @@
* given name.
*/
public static boolean isInstanceOfClass(Instance inst, String className) {
- ClassObj cls = inst.getClassObj();
+ ClassObj cls = (inst == null) ? null : inst.getClassObj();
return (cls != null && className.equals(cls.getClassName()));
}
@@ -132,7 +132,7 @@
* Read a field of an instance.
* Returns null if the field value is null or if the field couldn't be read.
*/
- private static Object getField(Instance inst, String fieldName) {
+ public static Object getField(Instance inst, String fieldName) {
if (!(inst instanceof ClassInstance)) {
return null;
}
diff --git a/tools/ahat/src/Main.java b/tools/ahat/src/Main.java
index 2e2ddd2..1563aa0 100644
--- a/tools/ahat/src/Main.java
+++ b/tools/ahat/src/Main.java
@@ -16,10 +16,6 @@
package com.android.ahat;
-import com.android.tools.perflib.heap.HprofParser;
-import com.android.tools.perflib.heap.Snapshot;
-import com.android.tools.perflib.heap.io.HprofBuffer;
-import com.android.tools.perflib.heap.io.MemoryMappedFileBuffer;
import com.sun.net.httpserver.HttpServer;
import java.io.File;
import java.io.IOException;
@@ -71,15 +67,8 @@
return;
}
- System.out.println("Reading hprof file...");
- HprofBuffer buffer = new MemoryMappedFileBuffer(hprof);
- Snapshot snapshot = (new HprofParser(buffer)).parse();
-
- System.out.println("Computing Dominators...");
- snapshot.computeDominators();
-
- System.out.println("Processing snapshot for ahat...");
- AhatSnapshot ahat = new AhatSnapshot(snapshot);
+ System.out.println("Processing hprof file...");
+ AhatSnapshot ahat = AhatSnapshot.fromHprof(hprof);
InetAddress loopback = InetAddress.getLoopbackAddress();
InetSocketAddress addr = new InetSocketAddress(loopback, port);
diff --git a/tools/ahat/test-dump/Main.java b/tools/ahat/test-dump/Main.java
new file mode 100644
index 0000000..cea1dc1
--- /dev/null
+++ b/tools/ahat/test-dump/Main.java
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import dalvik.system.VMDebug;
+import java.io.IOException;
+
+/**
+ * Program used to create a heap dump for test purposes.
+ */
+public class Main {
+ // Keep a reference to the DumpedStuff instance so that it is not garbage
+ // collected before we take the heap dump.
+ public static DumpedStuff stuff;
+
+ // We will take a heap dump that includes a single instance of this
+ // DumpedStuff class. Objects stored as fields in this class can be easily
+ // found in the hprof dump by searching for the instance of the DumpedStuff
+ // class and reading the desired field.
+ public static class DumpedStuff {
+ public String basicString = "hello, world";
+ public String nullString = null;
+ public Object anObject = new Object();
+ }
+
+ public static void main(String[] args) throws IOException {
+ if (args.length < 1) {
+ System.err.println("no output file specified");
+ return;
+ }
+ String file = args[0];
+
+ // Allocate the instance of DumpedStuff.
+ stuff = new DumpedStuff();
+
+ // Take a heap dump that will include that instance of DumpedStuff.
+ System.err.println("Dumping hprof data to " + file);
+ VMDebug.dumpHprofData(file);
+ }
+}
diff --git a/tools/ahat/test/InstanceUtilsTest.java b/tools/ahat/test/InstanceUtilsTest.java
new file mode 100644
index 0000000..7613df4
--- /dev/null
+++ b/tools/ahat/test/InstanceUtilsTest.java
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.ahat;
+
+import com.android.tools.perflib.heap.Instance;
+import java.io.IOException;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import org.junit.Test;
+
+public class InstanceUtilsTest {
+ @Test
+ public void basicString() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+ Instance str = (Instance)dump.getDumpedThing("basicString");
+ assertEquals("hello, world", InstanceUtils.asString(str));
+ }
+
+ @Test
+ public void nullString() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+ Instance obj = (Instance)dump.getDumpedThing("nullString");
+ assertNull(InstanceUtils.asString(obj));
+ }
+
+ @Test
+ public void notString() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+ Instance obj = (Instance)dump.getDumpedThing("anObject");
+ assertNotNull(obj);
+ assertNull(InstanceUtils.asString(obj));
+ }
+}
diff --git a/tools/ahat/test/TestDump.java b/tools/ahat/test/TestDump.java
new file mode 100644
index 0000000..c3a76e4
--- /dev/null
+++ b/tools/ahat/test/TestDump.java
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.ahat;
+
+import com.android.tools.perflib.heap.ClassObj;
+import com.android.tools.perflib.heap.Field;
+import com.android.tools.perflib.heap.Instance;
+import java.io.File;
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * The TestDump class is used to get an AhatSnapshot for the test-dump
+ * program.
+ */
+public class TestDump {
+ // It can take on the order of a second to parse and process the test-dump
+ // hprof. To avoid repeating this overhead for each test case, we cache the
+ // loaded instance of TestDump and reuse it when possible. In theory the
+ // test cases should not be able to modify the cached snapshot in a way that
+ // is visible to other test cases.
+ private static TestDump mCachedTestDump = null;
+
+ private AhatSnapshot mSnapshot = null;
+
+ /**
+ * Load the test-dump.hprof file.
+ * The location of the file is read from the system property
+ * "ahat.test.dump.hprof", which is expected to be set on the command line.
+ * For example:
+ * java -Dahat.test.dump.hprof=test-dump.hprof -jar ahat-tests.jar
+ *
+ * An IOException is thrown if there is a failure reading the hprof file.
+ */
+ private TestDump() throws IOException {
+ String hprof = System.getProperty("ahat.test.dump.hprof");
+ mSnapshot = AhatSnapshot.fromHprof(new File(hprof));
+ }
+
+ /**
+ * Get the AhatSnapshot for the test dump program.
+ */
+ public AhatSnapshot getAhatSnapshot() {
+ return mSnapshot;
+ }
+
+ /**
+ * Return the value of a field in the DumpedStuff instance in the
+ * snapshot for the test-dump program.
+ */
+ public Object getDumpedThing(String name) {
+ ClassObj main = mSnapshot.findClass("Main");
+ Instance stuff = null;
+ for (Map.Entry<Field, Object> fields : main.getStaticFieldValues().entrySet()) {
+ if ("stuff".equals(fields.getKey().getName())) {
+ stuff = (Instance) fields.getValue();
+ }
+ }
+ return InstanceUtils.getField(stuff, name);
+ }
+
+ /**
+ * Get the test dump.
+ * An IOException is thrown if there is an error reading the test dump hprof
+ * file.
+ * To improve performance, this returns a cached instance of the TestDump
+ * when possible.
+ */
+ public static synchronized TestDump getTestDump() throws IOException {
+ if (mCachedTestDump == null) {
+ mCachedTestDump = new TestDump();
+ }
+ return mCachedTestDump;
+ }
+}
diff --git a/tools/ahat/test/Tests.java b/tools/ahat/test/Tests.java
index fb53d90..bab7121 100644
--- a/tools/ahat/test/Tests.java
+++ b/tools/ahat/test/Tests.java
@@ -22,6 +22,7 @@
public static void main(String[] args) {
if (args.length == 0) {
args = new String[]{
+ "com.android.ahat.InstanceUtilsTest",
"com.android.ahat.QueryTest",
"com.android.ahat.SortTest"
};
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index 972e827..de9b35d 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -20,21 +20,11 @@
fi
common_targets="vogar vogar.jar core-tests apache-harmony-jdwp-tests-hostdex jsr166-tests"
-android_root="/data/local/tmp/system"
-linker="linker"
mode="target"
j_arg="-j$(nproc)"
showcommands=
make_command=
-case "$TARGET_PRODUCT" in
- (armv8|mips64r6) linker="linker64";;
-esac
-
-if [[ "$ART_TEST_ANDROID_ROOT" != "" ]]; then
- android_root="$ART_TEST_ANDROID_ROOT"
-fi
-
while true; do
if [[ "$1" == "--host" ]]; then
mode="host"
@@ -42,16 +32,6 @@
elif [[ "$1" == "--target" ]]; then
mode="target"
shift
- elif [[ "$1" == "--32" ]]; then
- linker="linker"
- shift
- elif [[ "$1" == "--64" ]]; then
- linker="linker64"
- shift
- elif [[ "$1" == "--android-root" ]]; then
- shift
- android_root=$1
- shift
elif [[ "$1" == -j* ]]; then
j_arg=$1
shift
@@ -64,19 +44,10 @@
done
if [[ $mode == "host" ]]; then
- make_command="make $j_arg build-art-host-tests $common_targets out/host/linux-x86/lib/libjavacoretests.so out/host/linux-x86/lib64/libjavacoretests.so"
- echo "Executing $make_command"
- $make_command
+ make_command="make $j_arg $showcommands build-art-host-tests $common_targets out/host/linux-x86/lib/libjavacoretests.so out/host/linux-x86/lib64/libjavacoretests.so"
elif [[ $mode == "target" ]]; then
- # Disable NINJA for building on target, it does not support setting environment variables
- # within the make command.
- env="$env USE_NINJA=false"
- # Build extra tools that will be used by tests, so that
- # they are compiled with our own linker.
- # We need to provide our own linker in case the linker on the device
- # is out of date.
- make_command="make TARGET_LINKER=$android_root/bin/$linker $j_arg $showcommands build-art-target-tests $common_targets libjavacrypto libjavacoretests linker toybox toolbox sh out/host/linux-x86/bin/adb"
- echo "Executing env $env $make_command"
- env $env $make_command
+ make_command="make $j_arg $showcommands build-art-target-tests $common_targets libjavacrypto libjavacoretests linker toybox toolbox sh out/host/linux-x86/bin/adb"
fi
+echo "Executing $make_command"
+$make_command