Merge "Revert "[optimizing] Improve x86 parallel moves/swaps""
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index d9d09bc..7283710 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -67,6 +67,7 @@
ART_GTEST_jni_compiler_test_DEX_DEPS := MyClassNatives
ART_GTEST_jni_internal_test_DEX_DEPS := AllFields StaticLeafMethods
ART_GTEST_oat_file_assistant_test_DEX_DEPS := Main MainStripped MultiDex Nested
+ART_GTEST_oat_file_test_DEX_DEPS := Main MultiDex
ART_GTEST_object_test_DEX_DEPS := ProtoCompare ProtoCompare2 StaticsFromCode XandY
ART_GTEST_proxy_test_DEX_DEPS := Interfaces
ART_GTEST_reflection_test_DEX_DEPS := Main NonStaticLeafMethods StaticLeafMethods
diff --git a/compiler/cfi_test.h b/compiler/cfi_test.h
index 9181792..cdb1b9e 100644
--- a/compiler/cfi_test.h
+++ b/compiler/cfi_test.h
@@ -46,7 +46,9 @@
constexpr bool is64bit = false;
dwarf::DebugFrameOpCodeWriter<> initial_opcodes;
dwarf::WriteEhFrameCIE(is64bit, dwarf::Reg(8), initial_opcodes, &eh_frame_data_);
- dwarf::WriteEhFrameFDE(is64bit, 0, 0, actual_asm.size(), &actual_cfi, &eh_frame_data_);
+ std::vector<uintptr_t> eh_frame_patches;
+ dwarf::WriteEhFrameFDE(is64bit, 0, 0, actual_asm.size(), &actual_cfi,
+ &eh_frame_data_, &eh_frame_patches);
ReformatCfi(Objdump(false, "-W"), &lines);
// Pretty-print assembly.
auto* opts = new DisassemblerOptions(false, actual_asm.data(), true);
diff --git a/compiler/compiler.h b/compiler/compiler.h
index a04641e..94b0fe3 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -63,13 +63,6 @@
virtual uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
- virtual bool WriteElf(art::File* file,
- OatWriter* oat_writer,
- const std::vector<const art::DexFile*>& dex_files,
- const std::string& android_root,
- bool is_host) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
-
uint64_t GetMaximumCompilationTimeBeforeWarning() const {
return maximum_compilation_time_before_warning_;
}
@@ -107,9 +100,6 @@
return driver_;
}
- // Whether to produce 64-bit ELF files for 64-bit targets. Leave this off for now.
- static constexpr bool kProduce64BitELFFiles = false;
-
private:
CompilerDriver* const driver_;
const uint64_t maximum_compilation_time_before_warning_;
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index 548b6f8..ef94d8b 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -14,13 +14,13 @@
* limitations under the License.
*/
+#include "art_field-inl.h"
#include "base/logging.h"
#include "base/mutex.h"
#include "dex_file-inl.h"
#include "dex_instruction-inl.h"
#include "driver/compiler_driver.h"
#include "driver/dex_compilation_unit.h"
-#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
diff --git a/compiler/dex/mir_field_info.cc b/compiler/dex/mir_field_info.cc
index a9ab3bb..4dfec17 100644
--- a/compiler/dex/mir_field_info.cc
+++ b/compiler/dex/mir_field_info.cc
@@ -56,7 +56,7 @@
// definition) we still want to resolve fields and record all available info.
for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
uint32_t field_idx;
- mirror::ArtField* resolved_field;
+ ArtField* resolved_field;
if (!it->IsQuickened()) {
field_idx = it->field_idx_;
resolved_field = compiler_driver->ResolveField(soa, dex_cache, class_loader, mUnit,
@@ -121,7 +121,7 @@
for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
uint32_t field_idx = it->field_idx_;
- mirror::ArtField* resolved_field =
+ ArtField* resolved_field =
compiler_driver->ResolveField(soa, dex_cache, class_loader, mUnit, field_idx, true);
if (UNLIKELY(resolved_field == nullptr)) {
continue;
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index f6fa938..5bf77aa 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -36,8 +36,7 @@
* the linker, by the trampolines and other stubs (the backend uses
* these as temporary registers).
* r18 : (rxSELF) is reserved (pointer to thread-local storage).
- * r19 : (rwSUSPEND) is reserved (suspend check/debugger assist).
- * r20-r29: Callee save registers (promotion targets).
+ * r19-r29: Callee save registers (promotion targets).
* r30 : (lr) is reserved (the link register).
* rsp : (sp) is reserved (the stack pointer).
* rzr : (zr) is reserved (the zero register).
@@ -146,7 +145,6 @@
// Aliases which are not defined in "ARM Architecture Reference, register names".
rxIP0 = rx16,
rxIP1 = rx17,
- rxSUSPEND = rx19,
rxSELF = rx18,
rxLR = rx30,
/*
@@ -156,7 +154,6 @@
*/
rwIP0 = rw16,
rwIP1 = rw17,
- rwSUSPEND = rw19,
rwSELF = rw18,
rwLR = rw30,
};
@@ -176,12 +173,10 @@
constexpr RegStorage rs_xIP1(RegStorage::kValid | rxIP1);
constexpr RegStorage rs_wIP1(RegStorage::kValid | rwIP1);
// Reserved registers.
-constexpr RegStorage rs_xSUSPEND(RegStorage::kValid | rxSUSPEND);
constexpr RegStorage rs_xSELF(RegStorage::kValid | rxSELF);
constexpr RegStorage rs_sp(RegStorage::kValid | rsp);
constexpr RegStorage rs_xLR(RegStorage::kValid | rxLR);
// TODO: eliminate the need for these.
-constexpr RegStorage rs_wSUSPEND(RegStorage::kValid | rwSUSPEND);
constexpr RegStorage rs_wSELF(RegStorage::kValid | rwSELF);
constexpr RegStorage rs_wsp(RegStorage::kValid | rwsp);
constexpr RegStorage rs_wLR(RegStorage::kValid | rwLR);
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 20f61f2..b7dbd0a 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -1010,8 +1010,12 @@
// Test suspend flag, return target of taken suspend branch
LIR* Arm64Mir2Lir::OpTestSuspend(LIR* target) {
- NewLIR3(kA64Subs3rRd, rwSUSPEND, rwSUSPEND, 1);
- return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
+ RegStorage r_tmp = AllocTemp();
+ LoadBaseDisp(rs_xSELF, Thread::ThreadFlagsOffset<kArm64PointerSize>().Int32Value(), r_tmp,
+ kUnsignedHalf, kNotVolatile);
+ LIR* cmp_branch = OpCmpImmBranch(target == nullptr ? kCondNe: kCondEq, r_tmp, 0, target);
+ FreeTemp(r_tmp);
+ return cmp_branch;
}
// Decrement register and branch on condition
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index c5c0dc5..fc32ecd 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -53,10 +53,9 @@
rs_d24, rs_d25, rs_d26, rs_d27, rs_d28, rs_d29, rs_d30, rs_d31};
// Note: we are not able to call to C function since rs_xSELF is a special register need to be
// preserved but would be scratched by native functions follow aapcs64.
-static constexpr RegStorage reserved_regs_arr[] =
- {rs_wSUSPEND, rs_wSELF, rs_wsp, rs_wLR, rs_wzr};
-static constexpr RegStorage reserved64_regs_arr[] =
- {rs_xSUSPEND, rs_xSELF, rs_sp, rs_xLR, rs_xzr};
+static constexpr RegStorage reserved_regs_arr[] = {rs_wSELF, rs_wsp, rs_wLR, rs_wzr};
+static constexpr RegStorage reserved64_regs_arr[] = {rs_xSELF, rs_sp, rs_xLR, rs_xzr};
+
static constexpr RegStorage core_temps_arr[] =
{rs_w0, rs_w1, rs_w2, rs_w3, rs_w4, rs_w5, rs_w6, rs_w7,
rs_w8, rs_w9, rs_w10, rs_w11, rs_w12, rs_w13, rs_w14, rs_w15, rs_w16,
@@ -110,7 +109,7 @@
RegStorage res_reg = RegStorage::InvalidReg();
switch (reg) {
case kSelf: res_reg = rs_wSELF; break;
- case kSuspend: res_reg = rs_wSUSPEND; break;
+ case kSuspend: res_reg = RegStorage::InvalidReg(); break;
case kLr: res_reg = rs_wLR; break;
case kPc: res_reg = RegStorage::InvalidReg(); break;
case kSp: res_reg = rs_wsp; break;
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 5ea36c2..9f4a318 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -1071,7 +1071,7 @@
pc_rel_temp_(nullptr),
dex_cache_arrays_min_offset_(std::numeric_limits<uint32_t>::max()),
cfi_(&last_lir_insn_,
- cu->compiler_driver->GetCompilerOptions().GetIncludeDebugSymbols(),
+ cu->compiler_driver->GetCompilerOptions().GetIncludeCFI(),
arena),
in_to_reg_storage_mapping_(arena) {
switch_tables_.reserve(4);
diff --git a/compiler/dex/quick/quick_cfi_test.cc b/compiler/dex/quick/quick_cfi_test.cc
index 2e62166..2db5a36 100644
--- a/compiler/dex/quick/quick_cfi_test.cc
+++ b/compiler/dex/quick/quick_cfi_test.cc
@@ -56,11 +56,11 @@
CompilerOptions::kDefaultSmallMethodThreshold,
CompilerOptions::kDefaultTinyMethodThreshold,
CompilerOptions::kDefaultNumDexMethodsThreshold,
- true, // generate_gdb_information.
false,
CompilerOptions::kDefaultTopKProfileThreshold,
false,
true, // include_debug_symbols.
+ true, // include_cfi
false,
false,
false,
diff --git a/compiler/dex/quick/quick_cfi_test_expected.inc b/compiler/dex/quick/quick_cfi_test_expected.inc
index 634fdee..48109d2 100644
--- a/compiler/dex/quick/quick_cfi_test_expected.inc
+++ b/compiler/dex/quick/quick_cfi_test_expected.inc
@@ -33,15 +33,15 @@
// 0x00000014: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kArm64[] = {
- 0xFF, 0x03, 0x01, 0xD1, 0xE8, 0xA7, 0x01, 0x6D, 0xF4, 0xD7, 0x02, 0xA9,
+ 0xFF, 0x03, 0x01, 0xD1, 0xE8, 0xA7, 0x01, 0x6D, 0xF3, 0xD3, 0x02, 0xA9,
0xFE, 0x1F, 0x00, 0xF9, 0xE0, 0x03, 0x00, 0xB9, 0xE8, 0xA7, 0x41, 0x6D,
- 0xF4, 0xD7, 0x42, 0xA9, 0xFE, 0x1F, 0x40, 0xF9, 0xFF, 0x03, 0x01, 0x91,
+ 0xF3, 0xD3, 0x42, 0xA9, 0xFE, 0x1F, 0x40, 0xF9, 0xFF, 0x03, 0x01, 0x91,
0xC0, 0x03, 0x5F, 0xD6,
};
static constexpr uint8_t expected_cfi_kArm64[] = {
- 0x44, 0x0E, 0x40, 0x44, 0x05, 0x48, 0x0A, 0x05, 0x49, 0x08, 0x44, 0x94,
- 0x06, 0x95, 0x04, 0x44, 0x9E, 0x02, 0x44, 0x0A, 0x44, 0x06, 0x48, 0x06,
- 0x49, 0x44, 0xD4, 0xD5, 0x44, 0xDE, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E,
+ 0x44, 0x0E, 0x40, 0x44, 0x05, 0x48, 0x0A, 0x05, 0x49, 0x08, 0x44, 0x93,
+ 0x06, 0x94, 0x04, 0x44, 0x9E, 0x02, 0x44, 0x0A, 0x44, 0x06, 0x48, 0x06,
+ 0x49, 0x44, 0xD3, 0xD4, 0x44, 0xDE, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E,
0x40,
};
// 0x00000000: sub sp, sp, #0x40 (64)
@@ -49,9 +49,9 @@
// 0x00000004: stp d8, d9, [sp, #24]
// 0x00000008: .cfi_offset_extended: r72 at cfa-40
// 0x00000008: .cfi_offset_extended: r73 at cfa-32
-// 0x00000008: stp x20, x21, [sp, #40]
-// 0x0000000c: .cfi_offset: r20 at cfa-24
-// 0x0000000c: .cfi_offset: r21 at cfa-16
+// 0x00000008: stp x19, x20, [sp, #40]
+// 0x0000000c: .cfi_offset: r19 at cfa-24
+// 0x0000000c: .cfi_offset: r20 at cfa-16
// 0x0000000c: str lr, [sp, #56]
// 0x00000010: .cfi_offset: r30 at cfa-8
// 0x00000010: str w0, [sp]
@@ -59,9 +59,9 @@
// 0x00000014: ldp d8, d9, [sp, #24]
// 0x00000018: .cfi_restore_extended: r72
// 0x00000018: .cfi_restore_extended: r73
-// 0x00000018: ldp x20, x21, [sp, #40]
+// 0x00000018: ldp x19, x20, [sp, #40]
+// 0x0000001c: .cfi_restore: r19
// 0x0000001c: .cfi_restore: r20
-// 0x0000001c: .cfi_restore: r21
// 0x0000001c: ldr lr, [sp, #56]
// 0x00000020: .cfi_restore: r30
// 0x00000020: add sp, sp, #0x40 (64)
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 2c0bd47..fc3e687 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -793,20 +793,6 @@
InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet())));
}
-bool QuickCompiler::WriteElf(art::File* file,
- OatWriter* oat_writer,
- const std::vector<const art::DexFile*>& dex_files,
- const std::string& android_root,
- bool is_host) const {
- if (kProduce64BitELFFiles && Is64BitInstructionSet(GetCompilerDriver()->GetInstructionSet())) {
- return art::ElfWriterQuick64::Create(file, oat_writer, dex_files, android_root, is_host,
- *GetCompilerDriver());
- } else {
- return art::ElfWriterQuick32::Create(file, oat_writer, dex_files, android_root, is_host,
- *GetCompilerDriver());
- }
-}
-
Mir2Lir* QuickCompiler::GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) {
UNUSED(compilation_unit);
Mir2Lir* mir_to_lir = nullptr;
diff --git a/compiler/dex/quick/quick_compiler.h b/compiler/dex/quick/quick_compiler.h
index 09b08ac..8d2c324 100644
--- a/compiler/dex/quick/quick_compiler.h
+++ b/compiler/dex/quick/quick_compiler.h
@@ -52,14 +52,6 @@
uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool WriteElf(art::File* file,
- OatWriter* oat_writer,
- const std::vector<const art::DexFile*>& dex_files,
- const std::string& android_root,
- bool is_host) const
- OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
static Mir2Lir* GetCodeGenerator(CompilationUnit* cu, void* compilation_unit);
void InitCompilationUnit(CompilationUnit& cu) const OVERRIDE;
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 5b90ba9..ae814b4 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -222,7 +222,7 @@
} else if (IsInstructionIGetQuickOrIPutQuick(inst->Opcode())) {
uint32_t dex_pc = inst->GetDexPc(insns);
verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
- mirror::ArtField* field = method_verifier->GetQuickFieldAccess(inst, line);
+ ArtField* field = method_verifier->GetQuickFieldAccess(inst, line);
if (field == nullptr) {
// It can be null if the line wasn't verified since it was unreachable.
return false;
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 8babc28..b4d4695 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -19,12 +19,11 @@
#include "compiler_driver.h"
+#include "art_field-inl.h"
#include "dex_compilation_unit.h"
-#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache-inl.h"
-#include "mirror/art_field-inl.h"
#include "scoped_thread_state_change.h"
#include "handle_scope-inl.h"
@@ -65,12 +64,12 @@
return ResolveClass(soa, dex_cache, class_loader, referrer_method_id.class_idx_, mUnit);
}
-inline mirror::ArtField* CompilerDriver::ResolveFieldWithDexFile(
+inline ArtField* CompilerDriver::ResolveFieldWithDexFile(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexFile* dex_file,
uint32_t field_idx, bool is_static) {
DCHECK_EQ(dex_cache->GetDexFile(), dex_file);
- mirror::ArtField* resolved_field = Runtime::Current()->GetClassLinker()->ResolveField(
+ ArtField* resolved_field = Runtime::Current()->GetClassLinker()->ResolveField(
*dex_file, field_idx, dex_cache, class_loader, is_static);
DCHECK_EQ(resolved_field == nullptr, soa.Self()->IsExceptionPending());
if (UNLIKELY(resolved_field == nullptr)) {
@@ -90,7 +89,7 @@
return Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file);
}
-inline mirror::ArtField* CompilerDriver::ResolveField(
+inline ArtField* CompilerDriver::ResolveField(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
uint32_t field_idx, bool is_static) {
@@ -100,7 +99,7 @@
}
inline void CompilerDriver::GetResolvedFieldDexFileLocation(
- mirror::ArtField* resolved_field, const DexFile** declaring_dex_file,
+ ArtField* resolved_field, const DexFile** declaring_dex_file,
uint16_t* declaring_class_idx, uint16_t* declaring_field_idx) {
mirror::Class* declaring_class = resolved_field->GetDeclaringClass();
*declaring_dex_file = declaring_class->GetDexCache()->GetDexFile();
@@ -108,17 +107,17 @@
*declaring_field_idx = resolved_field->GetDexFieldIndex();
}
-inline bool CompilerDriver::IsFieldVolatile(mirror::ArtField* field) {
+inline bool CompilerDriver::IsFieldVolatile(ArtField* field) {
return field->IsVolatile();
}
-inline MemberOffset CompilerDriver::GetFieldOffset(mirror::ArtField* field) {
+inline MemberOffset CompilerDriver::GetFieldOffset(ArtField* field) {
return field->GetOffset();
}
inline std::pair<bool, bool> CompilerDriver::IsFastInstanceField(
mirror::DexCache* dex_cache, mirror::Class* referrer_class,
- mirror::ArtField* resolved_field, uint16_t field_idx) {
+ ArtField* resolved_field, uint16_t field_idx) {
DCHECK(!resolved_field->IsStatic());
mirror::Class* fields_class = resolved_field->GetDeclaringClass();
bool fast_get = referrer_class != nullptr &&
@@ -130,7 +129,7 @@
inline std::pair<bool, bool> CompilerDriver::IsFastStaticField(
mirror::DexCache* dex_cache, mirror::Class* referrer_class,
- mirror::ArtField* resolved_field, uint16_t field_idx, uint32_t* storage_index) {
+ ArtField* resolved_field, uint16_t field_idx, uint32_t* storage_index) {
DCHECK(resolved_field->IsStatic());
if (LIKELY(referrer_class != nullptr)) {
mirror::Class* fields_class = resolved_field->GetDeclaringClass();
@@ -177,14 +176,14 @@
}
inline bool CompilerDriver::IsStaticFieldInReferrerClass(mirror::Class* referrer_class,
- mirror::ArtField* resolved_field) {
+ ArtField* resolved_field) {
DCHECK(resolved_field->IsStatic());
mirror::Class* fields_class = resolved_field->GetDeclaringClass();
return referrer_class == fields_class;
}
inline bool CompilerDriver::IsStaticFieldsClassInitialized(mirror::Class* referrer_class,
- mirror::ArtField* resolved_field) {
+ ArtField* resolved_field) {
DCHECK(resolved_field->IsStatic());
mirror::Class* fields_class = resolved_field->GetDeclaringClass();
return fields_class == referrer_class || fields_class->IsInitialized();
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index c2b8375..ef47377 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -27,9 +27,10 @@
#include <malloc.h> // For mallinfo
#endif
+#include "art_field-inl.h"
#include "base/stl_util.h"
#include "base/timing_logger.h"
-#include "class_linker.h"
+#include "class_linker-inl.h"
#include "compiled_class.h"
#include "compiled_method.h"
#include "compiler.h"
@@ -40,6 +41,7 @@
#include "dex/verified_method.h"
#include "dex/quick/dex_file_method_inliner.h"
#include "driver/compiler_options.h"
+#include "elf_writer_quick.h"
#include "jni_internal.h"
#include "object_lock.h"
#include "profiler.h"
@@ -47,7 +49,6 @@
#include "gc/accounting/card_table-inl.h"
#include "gc/accounting/heap_bitmap.h"
#include "gc/space/space.h"
-#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class_loader.h"
#include "mirror/class-inl.h"
@@ -72,6 +73,9 @@
static constexpr bool kTimeCompileMethod = !kIsDebugBuild;
+// Whether to produce 64-bit ELF files for 64-bit targets. Leave this off for now.
+static constexpr bool kProduce64BitELFFiles = false;
+
static double Percentage(size_t x, size_t y) {
return 100.0 * (static_cast<double>(x)) / (static_cast<double>(x + y));
}
@@ -1179,7 +1183,7 @@
DexCacheArraysLayout CompilerDriver::GetDexCacheArraysLayout(const DexFile* dex_file) {
// Currently only image dex caches have fixed array layout.
return IsImage() && GetSupportBootImageFixup()
- ? DexCacheArraysLayout(dex_file)
+ ? DexCacheArraysLayout(GetInstructionSetPointerSize(instruction_set_), dex_file)
: DexCacheArraysLayout();
}
@@ -1205,12 +1209,11 @@
stats_->ProcessedInvoke(invoke_type, flags);
}
-mirror::ArtField* CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx,
- const DexCompilationUnit* mUnit,
- bool is_put,
- const ScopedObjectAccess& soa) {
+ArtField* CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx,
+ const DexCompilationUnit* mUnit, bool is_put,
+ const ScopedObjectAccess& soa) {
// Try to resolve the field and compiling method's class.
- mirror::ArtField* resolved_field;
+ ArtField* resolved_field;
mirror::Class* referrer_class;
mirror::DexCache* dex_cache;
{
@@ -1219,11 +1222,10 @@
hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile())));
Handle<mirror::ClassLoader> class_loader_handle(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader())));
- Handle<mirror::ArtField> resolved_field_handle(hs.NewHandle(
- ResolveField(soa, dex_cache_handle, class_loader_handle, mUnit, field_idx, false)));
- referrer_class = (resolved_field_handle.Get() != nullptr)
+ resolved_field =
+ ResolveField(soa, dex_cache_handle, class_loader_handle, mUnit, field_idx, false);
+ referrer_class = resolved_field != nullptr
? ResolveCompilingMethodsClass(soa, dex_cache_handle, class_loader_handle, mUnit) : nullptr;
- resolved_field = resolved_field_handle.Get();
dex_cache = dex_cache_handle.Get();
}
bool can_link = false;
@@ -1240,11 +1242,9 @@
bool is_put, MemberOffset* field_offset,
bool* is_volatile) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ArtField> resolved_field =
- hs.NewHandle(ComputeInstanceFieldInfo(field_idx, mUnit, is_put, soa));
+ ArtField* resolved_field = ComputeInstanceFieldInfo(field_idx, mUnit, is_put, soa);
- if (resolved_field.Get() == nullptr) {
+ if (resolved_field == nullptr) {
// Conservative defaults.
*is_volatile = true;
*field_offset = MemberOffset(static_cast<size_t>(-1));
@@ -1263,20 +1263,19 @@
Primitive::Type* type) {
ScopedObjectAccess soa(Thread::Current());
// Try to resolve the field and compiling method's class.
- mirror::ArtField* resolved_field;
+ ArtField* resolved_field;
mirror::Class* referrer_class;
mirror::DexCache* dex_cache;
{
- StackHandleScope<3> hs(soa.Self());
+ StackHandleScope<2> hs(soa.Self());
Handle<mirror::DexCache> dex_cache_handle(
hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile())));
Handle<mirror::ClassLoader> class_loader_handle(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader())));
- Handle<mirror::ArtField> resolved_field_handle(hs.NewHandle(
- ResolveField(soa, dex_cache_handle, class_loader_handle, mUnit, field_idx, true)));
- referrer_class = (resolved_field_handle.Get() != nullptr)
+ resolved_field =
+ ResolveField(soa, dex_cache_handle, class_loader_handle, mUnit, field_idx, true);
+ referrer_class = resolved_field != nullptr
? ResolveCompilingMethodsClass(soa, dex_cache_handle, class_loader_handle, mUnit) : nullptr;
- resolved_field = resolved_field_handle.Get();
dex_cache = dex_cache_handle.Get();
}
bool result = false;
@@ -1724,7 +1723,7 @@
ClassDataItemIterator it(dex_file, class_data);
while (it.HasNextStaticField()) {
if (resolve_fields_and_methods) {
- mirror::ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(),
+ ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(),
dex_cache, class_loader, true);
if (field == nullptr) {
CheckAndClearResolveException(soa.Self());
@@ -1739,7 +1738,7 @@
requires_constructor_barrier = true;
}
if (resolve_fields_and_methods) {
- mirror::ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(),
+ ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(),
dex_cache, class_loader, false);
if (field == nullptr) {
CheckAndClearResolveException(soa.Self());
@@ -2368,7 +2367,11 @@
OatWriter* oat_writer,
art::File* file)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return compiler_->WriteElf(file, oat_writer, dex_files, android_root, is_host);
+ if (kProduce64BitELFFiles && Is64BitInstructionSet(GetInstructionSet())) {
+ return art::ElfWriterQuick64::Create(file, oat_writer, dex_files, android_root, is_host, *this);
+ } else {
+ return art::ElfWriterQuick32::Create(file, oat_writer, dex_files, android_root, is_host, *this);
+ }
}
bool CompilerDriver::SkipCompilation(const std::string& method_name) {
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index a6ed559..f1066a5 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -239,14 +239,14 @@
// Resolve a field. Returns nullptr on failure, including incompatible class change.
// NOTE: Unlike ClassLinker's ResolveField(), this method enforces is_static.
- mirror::ArtField* ResolveField(
+ ArtField* ResolveField(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
uint32_t field_idx, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolve a field with a given dex file.
- mirror::ArtField* ResolveFieldWithDexFile(
+ ArtField* ResolveFieldWithDexFile(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexFile* dex_file,
uint32_t field_idx, bool is_static)
@@ -254,12 +254,12 @@
// Get declaration location of a resolved field.
void GetResolvedFieldDexFileLocation(
- mirror::ArtField* resolved_field, const DexFile** declaring_dex_file,
+ ArtField* resolved_field, const DexFile** declaring_dex_file,
uint16_t* declaring_class_idx, uint16_t* declaring_field_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsFieldVolatile(mirror::ArtField* field) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- MemberOffset GetFieldOffset(mirror::ArtField* field) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsFieldVolatile(ArtField* field) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ MemberOffset GetFieldOffset(ArtField* field) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Find a dex cache for a dex file.
inline mirror::DexCache* FindDexCache(const DexFile* dex_file)
@@ -268,23 +268,23 @@
// Can we fast-path an IGET/IPUT access to an instance field? If yes, compute the field offset.
std::pair<bool, bool> IsFastInstanceField(
mirror::DexCache* dex_cache, mirror::Class* referrer_class,
- mirror::ArtField* resolved_field, uint16_t field_idx)
+ ArtField* resolved_field, uint16_t field_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Can we fast-path an SGET/SPUT access to a static field? If yes, compute the type index
// of the declaring class in the referrer's dex file.
std::pair<bool, bool> IsFastStaticField(
mirror::DexCache* dex_cache, mirror::Class* referrer_class,
- mirror::ArtField* resolved_field, uint16_t field_idx, uint32_t* storage_index)
+ ArtField* resolved_field, uint16_t field_idx, uint32_t* storage_index)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Is static field's in referrer's class?
- bool IsStaticFieldInReferrerClass(mirror::Class* referrer_class, mirror::ArtField* resolved_field)
+ bool IsStaticFieldInReferrerClass(mirror::Class* referrer_class, ArtField* resolved_field)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Is static field's class initialized?
bool IsStaticFieldsClassInitialized(mirror::Class* referrer_class,
- mirror::ArtField* resolved_field)
+ ArtField* resolved_field)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolve a method. Returns nullptr on failure, including incompatible class change.
@@ -331,7 +331,7 @@
void ComputeFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit,
const ScopedObjectAccess& soa, bool is_static,
- mirror::ArtField** resolved_field,
+ ArtField** resolved_field,
mirror::Class** referrer_class,
mirror::DexCache** dex_cache)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -341,7 +341,7 @@
MemberOffset* field_offset, bool* is_volatile)
LOCKS_EXCLUDED(Locks::mutator_lock_);
- mirror::ArtField* ComputeInstanceFieldInfo(uint32_t field_idx,
+ ArtField* ComputeInstanceFieldInfo(uint32_t field_idx,
const DexCompilationUnit* mUnit,
bool is_put,
const ScopedObjectAccess& soa)
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 5ebc029..e78ff90 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -20,7 +20,7 @@
#include <stdio.h>
#include <memory>
-#include "class_linker.h"
+#include "class_linker-inl.h"
#include "common_compiler_test.h"
#include "dex_file.h"
#include "gc/heap.h"
@@ -132,7 +132,7 @@
}
EXPECT_EQ(dex.NumFieldIds(), dex_cache->NumResolvedFields());
for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) {
- mirror::ArtField* field = dex_cache->GetResolvedField(i);
+ ArtField* field = Runtime::Current()->GetClassLinker()->GetResolvedField(i, dex_cache);
EXPECT_TRUE(field != NULL) << "field_idx=" << i
<< " " << dex.GetFieldDeclaringClassDescriptor(dex.GetFieldId(i))
<< " " << dex.GetFieldName(dex.GetFieldId(i));
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index fc00c92..c5fc98a 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -27,11 +27,11 @@
small_method_threshold_(kDefaultSmallMethodThreshold),
tiny_method_threshold_(kDefaultTinyMethodThreshold),
num_dex_methods_threshold_(kDefaultNumDexMethodsThreshold),
- generate_gdb_information_(false),
include_patch_information_(kDefaultIncludePatchInformation),
top_k_profile_threshold_(kDefaultTopKProfileThreshold),
debuggable_(false),
include_debug_symbols_(kDefaultIncludeDebugSymbols),
+ include_cfi_(false),
implicit_null_checks_(true),
implicit_so_checks_(true),
implicit_suspend_checks_(false),
@@ -53,11 +53,11 @@
size_t small_method_threshold,
size_t tiny_method_threshold,
size_t num_dex_methods_threshold,
- bool generate_gdb_information,
bool include_patch_information,
double top_k_profile_threshold,
bool debuggable,
bool include_debug_symbols,
+ bool include_cfi,
bool implicit_null_checks,
bool implicit_so_checks,
bool implicit_suspend_checks,
@@ -73,11 +73,11 @@
small_method_threshold_(small_method_threshold),
tiny_method_threshold_(tiny_method_threshold),
num_dex_methods_threshold_(num_dex_methods_threshold),
- generate_gdb_information_(generate_gdb_information),
include_patch_information_(include_patch_information),
top_k_profile_threshold_(top_k_profile_threshold),
debuggable_(debuggable),
include_debug_symbols_(include_debug_symbols),
+ include_cfi_(include_cfi),
implicit_null_checks_(implicit_null_checks),
implicit_so_checks_(implicit_so_checks),
implicit_suspend_checks_(implicit_suspend_checks),
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index f7ea385..bf3f8ec 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -61,11 +61,11 @@
size_t small_method_threshold,
size_t tiny_method_threshold,
size_t num_dex_methods_threshold,
- bool generate_gdb_information,
bool include_patch_information,
double top_k_profile_threshold,
bool debuggable,
bool include_debug_symbols,
+ bool include_cfi,
bool implicit_null_checks,
bool implicit_so_checks,
bool implicit_suspend_checks,
@@ -150,6 +150,11 @@
return include_debug_symbols_;
}
+ bool GetIncludeCFI() const {
+ // include-debug-symbols implies include-cfi.
+ return include_cfi_ || include_debug_symbols_;
+ }
+
bool GetImplicitNullChecks() const {
return implicit_null_checks_;
}
@@ -162,10 +167,6 @@
return implicit_suspend_checks_;
}
- bool GetGenerateGDBInformation() const {
- return generate_gdb_information_;
- }
-
bool GetIncludePatchInformation() const {
return include_patch_information_;
}
@@ -207,12 +208,12 @@
const size_t small_method_threshold_;
const size_t tiny_method_threshold_;
const size_t num_dex_methods_threshold_;
- const bool generate_gdb_information_;
const bool include_patch_information_;
// When using a profile file only the top K% of the profiled samples will be compiled.
const double top_k_profile_threshold_;
const bool debuggable_;
const bool include_debug_symbols_;
+ const bool include_cfi_;
const bool implicit_null_checks_;
const bool implicit_so_checks_;
const bool implicit_suspend_checks_;
diff --git a/compiler/dwarf/debug_frame_opcode_writer.h b/compiler/dwarf/debug_frame_opcode_writer.h
index d0d1821..4112c84 100644
--- a/compiler/dwarf/debug_frame_opcode_writer.h
+++ b/compiler/dwarf/debug_frame_opcode_writer.h
@@ -17,9 +17,9 @@
#ifndef ART_COMPILER_DWARF_DEBUG_FRAME_OPCODE_WRITER_H_
#define ART_COMPILER_DWARF_DEBUG_FRAME_OPCODE_WRITER_H_
-#include "dwarf.h"
-#include "register.h"
-#include "writer.h"
+#include "dwarf/dwarf_constants.h"
+#include "dwarf/register.h"
+#include "dwarf/writer.h"
#include "utils.h"
namespace art {
diff --git a/compiler/dwarf/debug_info_entry_writer.h b/compiler/dwarf/debug_info_entry_writer.h
index c0350b6..f5b9ca5 100644
--- a/compiler/dwarf/debug_info_entry_writer.h
+++ b/compiler/dwarf/debug_info_entry_writer.h
@@ -17,11 +17,12 @@
#ifndef ART_COMPILER_DWARF_DEBUG_INFO_ENTRY_WRITER_H_
#define ART_COMPILER_DWARF_DEBUG_INFO_ENTRY_WRITER_H_
+#include <cstdint>
#include <unordered_map>
-#include "dwarf.h"
+#include "dwarf/dwarf_constants.h"
+#include "dwarf/writer.h"
#include "leb128.h"
-#include "writer.h"
namespace art {
namespace dwarf {
@@ -88,6 +89,7 @@
void WriteAddr(Attribute attrib, uint64_t value) {
AddAbbrevAttribute(attrib, DW_FORM_addr);
+ patch_locations_.push_back(this->data()->size());
if (is64bit_) {
this->PushUint64(value);
} else {
@@ -168,7 +170,11 @@
this->PushUint32(address);
}
- bool is64bit() const { return is64bit_; }
+ bool Is64bit() const { return is64bit_; }
+
+ const std::vector<uintptr_t>& GetPatchLocations() const {
+ return patch_locations_;
+ }
using Writer<Allocator>::data;
@@ -240,6 +246,7 @@
size_t abbrev_code_offset_ = 0; // Location to patch once we know the code.
bool inside_entry_ = false; // Entry ends at first child (if any).
bool has_children = true;
+ std::vector<uintptr_t> patch_locations_;
};
} // namespace dwarf
diff --git a/compiler/dwarf/debug_line_opcode_writer.h b/compiler/dwarf/debug_line_opcode_writer.h
index f34acee..bdc25e4 100644
--- a/compiler/dwarf/debug_line_opcode_writer.h
+++ b/compiler/dwarf/debug_line_opcode_writer.h
@@ -17,8 +17,10 @@
#ifndef ART_COMPILER_DWARF_DEBUG_LINE_OPCODE_WRITER_H_
#define ART_COMPILER_DWARF_DEBUG_LINE_OPCODE_WRITER_H_
-#include "dwarf.h"
-#include "writer.h"
+#include <cstdint>
+
+#include "dwarf/dwarf_constants.h"
+#include "dwarf/writer.h"
namespace art {
namespace dwarf {
@@ -119,10 +121,12 @@
if (use_64bit_address_) {
this->PushUleb128(1 + 8);
this->PushUint8(DW_LNE_set_address);
+ patch_locations_.push_back(this->data()->size());
this->PushUint64(absolute_address);
} else {
this->PushUleb128(1 + 4);
this->PushUint8(DW_LNE_set_address);
+ patch_locations_.push_back(this->data()->size());
this->PushUint32(absolute_address);
}
current_address_ = absolute_address;
@@ -204,6 +208,10 @@
return current_line_;
}
+ const std::vector<uintptr_t>& GetPatchLocations() const {
+ return patch_locations_;
+ }
+
using Writer<Allocator>::data;
DebugLineOpCodeWriter(bool use64bitAddress,
@@ -233,6 +241,7 @@
uint64_t current_address_;
int current_file_;
int current_line_;
+ std::vector<uintptr_t> patch_locations_;
DISALLOW_COPY_AND_ASSIGN(DebugLineOpCodeWriter);
};
diff --git a/runtime/dwarf.h b/compiler/dwarf/dwarf_constants.h
similarity index 98%
rename from runtime/dwarf.h
rename to compiler/dwarf/dwarf_constants.h
index b491f47..8e39ca7 100644
--- a/runtime/dwarf.h
+++ b/compiler/dwarf/dwarf_constants.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_DWARF_H_
-#define ART_RUNTIME_DWARF_H_
+#ifndef ART_COMPILER_DWARF_DWARF_CONSTANTS_H_
+#define ART_COMPILER_DWARF_DWARF_CONSTANTS_H_
namespace art {
namespace dwarf {
@@ -661,4 +661,4 @@
} // namespace dwarf
} // namespace art
-#endif // ART_RUNTIME_DWARF_H_
+#endif // ART_COMPILER_DWARF_DWARF_CONSTANTS_H_
diff --git a/compiler/dwarf/dwarf_test.cc b/compiler/dwarf/dwarf_test.cc
index ec18e96..98f691a 100644
--- a/compiler/dwarf/dwarf_test.cc
+++ b/compiler/dwarf/dwarf_test.cc
@@ -120,19 +120,61 @@
DebugFrameOpCodeWriter<> initial_opcodes;
WriteEhFrameCIE(is64bit, Reg(is64bit ? 16 : 8), initial_opcodes, &eh_frame_data_);
- WriteEhFrameFDE(is64bit, 0, 0x01000000, 0x01000000, opcodes.data(), &eh_frame_data_);
+ std::vector<uintptr_t> eh_frame_patches;
+ std::vector<uintptr_t> expected_patches { 28 }; // NOLINT
+ WriteEhFrameFDE(is64bit, 0, 0x01000000, 0x01000000, opcodes.data(),
+ &eh_frame_data_, &eh_frame_patches);
+
+ EXPECT_EQ(expected_patches, eh_frame_patches);
CheckObjdumpOutput(is64bit, "-W");
}
-// TODO: objdump seems to have trouble with 64bit CIE length.
-TEST_F(DwarfTest, DISABLED_DebugFrame64) {
+TEST_F(DwarfTest, DebugFrame64) {
constexpr bool is64bit = true;
DebugFrameOpCodeWriter<> initial_opcodes;
WriteEhFrameCIE(is64bit, Reg(16), initial_opcodes, &eh_frame_data_);
DebugFrameOpCodeWriter<> opcodes;
+ std::vector<uintptr_t> eh_frame_patches;
+ std::vector<uintptr_t> expected_patches { 32 }; // NOLINT
WriteEhFrameFDE(is64bit, 0, 0x0100000000000000, 0x0200000000000000,
- opcodes.data(), &eh_frame_data_);
+ opcodes.data(), &eh_frame_data_, &eh_frame_patches);
DW_CHECK("FDE cie=00000000 pc=100000000000000..300000000000000");
+
+ EXPECT_EQ(expected_patches, eh_frame_patches);
+ CheckObjdumpOutput(is64bit, "-W");
+}
+
+// Test x86_64 register mapping. It is the only non-trivial architecture.
+// ARM, X86, and Mips have: dwarf_reg = art_reg + constant.
+TEST_F(DwarfTest, x86_64_RegisterMapping) {
+ constexpr bool is64bit = true;
+ DebugFrameOpCodeWriter<> opcodes;
+ for (int i = 0; i < 16; i++) {
+ opcodes.RelOffset(Reg::X86_64Core(i), 0);
+ }
+ DW_CHECK("FDE");
+ DW_CHECK_NEXT("DW_CFA_offset: r0 (rax)");
+ DW_CHECK_NEXT("DW_CFA_offset: r2 (rcx)");
+ DW_CHECK_NEXT("DW_CFA_offset: r1 (rdx)");
+ DW_CHECK_NEXT("DW_CFA_offset: r3 (rbx)");
+ DW_CHECK_NEXT("DW_CFA_offset: r7 (rsp)");
+ DW_CHECK_NEXT("DW_CFA_offset: r6 (rbp)");
+ DW_CHECK_NEXT("DW_CFA_offset: r4 (rsi)");
+ DW_CHECK_NEXT("DW_CFA_offset: r5 (rdi)");
+ DW_CHECK_NEXT("DW_CFA_offset: r8 (r8)");
+ DW_CHECK_NEXT("DW_CFA_offset: r9 (r9)");
+ DW_CHECK_NEXT("DW_CFA_offset: r10 (r10)");
+ DW_CHECK_NEXT("DW_CFA_offset: r11 (r11)");
+ DW_CHECK_NEXT("DW_CFA_offset: r12 (r12)");
+ DW_CHECK_NEXT("DW_CFA_offset: r13 (r13)");
+ DW_CHECK_NEXT("DW_CFA_offset: r14 (r14)");
+ DW_CHECK_NEXT("DW_CFA_offset: r15 (r15)");
+ DebugFrameOpCodeWriter<> initial_opcodes;
+ WriteEhFrameCIE(is64bit, Reg(16), initial_opcodes, &eh_frame_data_);
+ std::vector<uintptr_t> eh_frame_patches;
+ WriteEhFrameFDE(is64bit, 0, 0x0100000000000000, 0x0200000000000000,
+ opcodes.data(), &eh_frame_data_, &eh_frame_patches);
+
CheckObjdumpOutput(is64bit, "-W");
}
@@ -184,7 +226,12 @@
DW_CHECK_NEXT("Entry\tDir\tTime\tSize\tName");
DW_CHECK_NEXT("1\t0\t1000\t2000\tfile.c");
- WriteDebugLineTable(include_directories, files, opcodes, &debug_line_data_);
+ std::vector<uintptr_t> debug_line_patches;
+ std::vector<uintptr_t> expected_patches { 87 }; // NOLINT
+ WriteDebugLineTable(include_directories, files, opcodes,
+ &debug_line_data_, &debug_line_patches);
+
+ EXPECT_EQ(expected_patches, debug_line_patches);
CheckObjdumpOutput(is64bit, "-W");
}
@@ -219,7 +266,10 @@
std::vector<std::string> directories;
std::vector<FileEntry> files { { "file.c", 0, 1000, 2000 } }; // NOLINT
- WriteDebugLineTable(directories, files, opcodes, &debug_line_data_);
+ std::vector<uintptr_t> debug_line_patches;
+ WriteDebugLineTable(directories, files, opcodes,
+ &debug_line_data_, &debug_line_patches);
+
CheckObjdumpOutput(is64bit, "-W -WL");
}
@@ -271,7 +321,12 @@
DW_CHECK_NEXT("DW_AT_high_pc DW_FORM_addr");
DW_CHECK("3 DW_TAG_compile_unit [has children]");
- dwarf::WriteDebugInfoCU(0 /* debug_abbrev_offset */, info, &debug_info_data_);
+ std::vector<uintptr_t> debug_info_patches;
+ std::vector<uintptr_t> expected_patches { 16, 20, 29, 33, 42, 46 }; // NOLINT
+ dwarf::WriteDebugInfoCU(0 /* debug_abbrev_offset */, info,
+ &debug_info_data_, &debug_info_patches);
+
+ EXPECT_EQ(expected_patches, debug_info_patches);
CheckObjdumpOutput(is64bit, "-W");
}
diff --git a/compiler/dwarf/headers.h b/compiler/dwarf/headers.h
index d866b91..760f53c 100644
--- a/compiler/dwarf/headers.h
+++ b/compiler/dwarf/headers.h
@@ -17,15 +17,23 @@
#ifndef ART_COMPILER_DWARF_HEADERS_H_
#define ART_COMPILER_DWARF_HEADERS_H_
-#include "debug_frame_opcode_writer.h"
-#include "debug_info_entry_writer.h"
-#include "debug_line_opcode_writer.h"
-#include "register.h"
-#include "writer.h"
+#include <cstdint>
+
+#include "dwarf/debug_frame_opcode_writer.h"
+#include "dwarf/debug_info_entry_writer.h"
+#include "dwarf/debug_line_opcode_writer.h"
+#include "dwarf/register.h"
+#include "dwarf/writer.h"
namespace art {
namespace dwarf {
+// Note that all headers start with 32-bit length.
+// DWARF also supports 64-bit lengths, but we never use that.
+// It is intended to support very large debug sections (>4GB),
+// and compilers are expected *not* to use it by default.
+// In particular, it is not related to machine architecture.
+
// Write common information entry (CIE) to .eh_frame section.
template<typename Allocator>
void WriteEhFrameCIE(bool is64bit, Reg return_address_register,
@@ -33,15 +41,8 @@
std::vector<uint8_t>* eh_frame) {
Writer<> writer(eh_frame);
size_t cie_header_start_ = writer.data()->size();
- if (is64bit) {
- // TODO: This is not related to being 64bit.
- writer.PushUint32(0xffffffff);
- writer.PushUint64(0); // Length placeholder.
- writer.PushUint64(0); // CIE id.
- } else {
- writer.PushUint32(0); // Length placeholder.
- writer.PushUint32(0); // CIE id.
- }
+ writer.PushUint32(0); // Length placeholder.
+ writer.PushUint32(0); // CIE id.
writer.PushUint8(1); // Version.
writer.PushString("zR");
writer.PushUleb128(DebugFrameOpCodeWriter<Allocator>::kCodeAlignmentFactor);
@@ -55,11 +56,7 @@
}
writer.PushData(opcodes.data());
writer.Pad(is64bit ? 8 : 4);
- if (is64bit) {
- writer.UpdateUint64(cie_header_start_ + 4, writer.data()->size() - cie_header_start_ - 12);
- } else {
- writer.UpdateUint32(cie_header_start_, writer.data()->size() - cie_header_start_ - 4);
- }
+ writer.UpdateUint32(cie_header_start_, writer.data()->size() - cie_header_start_ - 4);
}
// Write frame description entry (FDE) to .eh_frame section.
@@ -67,20 +64,15 @@
void WriteEhFrameFDE(bool is64bit, size_t cie_offset,
uint64_t initial_address, uint64_t address_range,
const std::vector<uint8_t, Allocator>* opcodes,
- std::vector<uint8_t>* eh_frame) {
+ std::vector<uint8_t>* eh_frame,
+ std::vector<uintptr_t>* eh_frame_patches) {
Writer<> writer(eh_frame);
size_t fde_header_start = writer.data()->size();
- if (is64bit) {
- // TODO: This is not related to being 64bit.
- writer.PushUint32(0xffffffff);
- writer.PushUint64(0); // Length placeholder.
- uint64_t cie_pointer = writer.data()->size() - cie_offset;
- writer.PushUint64(cie_pointer);
- } else {
- writer.PushUint32(0); // Length placeholder.
- uint32_t cie_pointer = writer.data()->size() - cie_offset;
- writer.PushUint32(cie_pointer);
- }
+ writer.PushUint32(0); // Length placeholder.
+ uint32_t cie_pointer = writer.data()->size() - cie_offset;
+ writer.PushUint32(cie_pointer);
+ // Relocate initial_address, but not address_range (it is size).
+ eh_frame_patches->push_back(writer.data()->size());
if (is64bit) {
writer.PushUint64(initial_address);
writer.PushUint64(address_range);
@@ -91,26 +83,28 @@
writer.PushUleb128(0); // Augmentation data size.
writer.PushData(opcodes);
writer.Pad(is64bit ? 8 : 4);
- if (is64bit) {
- writer.UpdateUint64(fde_header_start + 4, writer.data()->size() - fde_header_start - 12);
- } else {
- writer.UpdateUint32(fde_header_start, writer.data()->size() - fde_header_start - 4);
- }
+ writer.UpdateUint32(fde_header_start, writer.data()->size() - fde_header_start - 4);
}
// Write compilation unit (CU) to .debug_info section.
template<typename Allocator>
void WriteDebugInfoCU(uint32_t debug_abbrev_offset,
const DebugInfoEntryWriter<Allocator>& entries,
- std::vector<uint8_t>* debug_info) {
+ std::vector<uint8_t>* debug_info,
+ std::vector<uintptr_t>* debug_info_patches) {
Writer<> writer(debug_info);
size_t start = writer.data()->size();
writer.PushUint32(0); // Length placeholder.
writer.PushUint16(3); // Version.
writer.PushUint32(debug_abbrev_offset);
- writer.PushUint8(entries.is64bit() ? 8 : 4);
+ writer.PushUint8(entries.Is64bit() ? 8 : 4);
+ size_t entries_offset = writer.data()->size();
writer.PushData(entries.data());
writer.UpdateUint32(start, writer.data()->size() - start - 4);
+ // Copy patch locations and make them relative to .debug_info section.
+ for (uintptr_t patch_location : entries.GetPatchLocations()) {
+ debug_info_patches->push_back(entries_offset + patch_location);
+ }
}
struct FileEntry {
@@ -125,7 +119,8 @@
void WriteDebugLineTable(const std::vector<std::string>& include_directories,
const std::vector<FileEntry>& files,
const DebugLineOpCodeWriter<Allocator>& opcodes,
- std::vector<uint8_t>* debug_line) {
+ std::vector<uint8_t>* debug_line,
+ std::vector<uintptr_t>* debug_line_patches) {
Writer<> writer(debug_line);
size_t header_start = writer.data()->size();
writer.PushUint32(0); // Section-length placeholder.
@@ -157,8 +152,13 @@
}
writer.PushUint8(0); // Terminate file list.
writer.UpdateUint32(header_length_pos, writer.data()->size() - header_length_pos - 4);
- writer.PushData(opcodes.data()->data(), opcodes.data()->size());
+ size_t opcodes_offset = writer.data()->size();
+ writer.PushData(opcodes.data());
writer.UpdateUint32(header_start, writer.data()->size() - header_start - 4);
+ // Copy patch locations and make them relative to .debug_line section.
+ for (uintptr_t patch_location : opcodes.GetPatchLocations()) {
+ debug_line_patches->push_back(opcodes_offset + patch_location);
+ }
}
} // namespace dwarf
diff --git a/compiler/elf_writer_debug.cc b/compiler/elf_writer_debug.cc
index 5e8e24b..39233ce 100644
--- a/compiler/elf_writer_debug.cc
+++ b/compiler/elf_writer_debug.cc
@@ -16,6 +16,8 @@
#include "elf_writer_debug.h"
+#include <unordered_set>
+
#include "compiled_method.h"
#include "driver/compiler_driver.h"
#include "dex_file-inl.h"
@@ -149,6 +151,25 @@
UNREACHABLE();
}
+void WriteEhFrame(const CompilerDriver* compiler,
+ OatWriter* oat_writer,
+ uint32_t text_section_offset,
+ std::vector<uint8_t>* eh_frame) {
+ const auto& method_infos = oat_writer->GetMethodDebugInfo();
+ const InstructionSet isa = compiler->GetInstructionSet();
+ size_t cie_offset = eh_frame->size();
+ auto* eh_frame_patches = oat_writer->GetAbsolutePatchLocationsFor(".eh_frame");
+ WriteEhFrameCIE(isa, eh_frame);
+ for (const OatWriter::DebugInfo& mi : method_infos) {
+ const SwapVector<uint8_t>* opcodes = mi.compiled_method_->GetCFIInfo();
+ if (opcodes != nullptr) {
+ WriteEhFrameFDE(Is64BitInstructionSet(isa), cie_offset,
+ text_section_offset + mi.low_pc_, mi.high_pc_ - mi.low_pc_,
+ opcodes, eh_frame, eh_frame_patches);
+ }
+ }
+}
+
/*
* @brief Generate the DWARF sections.
* @param oat_writer The Oat file Writer.
@@ -159,201 +180,205 @@
* @param debug_line Line number table.
*/
void WriteDebugSections(const CompilerDriver* compiler,
- const OatWriter* oat_writer,
+ OatWriter* oat_writer,
uint32_t text_section_offset,
- std::vector<uint8_t>* eh_frame,
std::vector<uint8_t>* debug_info,
std::vector<uint8_t>* debug_abbrev,
std::vector<uint8_t>* debug_str,
std::vector<uint8_t>* debug_line) {
const std::vector<OatWriter::DebugInfo>& method_infos = oat_writer->GetMethodDebugInfo();
const InstructionSet isa = compiler->GetInstructionSet();
- uint32_t cunit_low_pc = static_cast<uint32_t>(-1);
- uint32_t cunit_high_pc = 0;
- for (auto method_info : method_infos) {
- cunit_low_pc = std::min(cunit_low_pc, method_info.low_pc_);
- cunit_high_pc = std::max(cunit_high_pc, method_info.high_pc_);
+
+ // Find all addresses (low_pc) which contain deduped methods.
+ // The first instance of method is not marked deduped_, but the rest is.
+ std::unordered_set<uint32_t> deduped_addresses;
+ for (auto it = method_infos.begin(); it != method_infos.end(); ++it) {
+ if (it->deduped_) {
+ deduped_addresses.insert(it->low_pc_);
+ }
}
- // Write .eh_frame section.
- size_t cie_offset = eh_frame->size();
- WriteEhFrameCIE(isa, eh_frame);
- for (const OatWriter::DebugInfo& mi : method_infos) {
- const SwapVector<uint8_t>* opcodes = mi.compiled_method_->GetCFIInfo();
- if (opcodes != nullptr) {
- WriteEhFrameFDE(Is64BitInstructionSet(isa), cie_offset,
- text_section_offset + mi.low_pc_, mi.high_pc_ - mi.low_pc_,
- opcodes, eh_frame);
+ // Group the methods into compilation units based on source file.
+ std::vector<std::vector<const OatWriter::DebugInfo*>> compilation_units;
+ const char* last_source_file = nullptr;
+ for (const auto& mi : method_infos) {
+ // Attribute given instruction range only to single method.
+ // Otherwise the debugger might get really confused.
+ if (!mi.deduped_) {
+ auto& dex_class_def = mi.dex_file_->GetClassDef(mi.class_def_index_);
+ const char* source_file = mi.dex_file_->GetSourceFile(dex_class_def);
+ if (compilation_units.empty() || source_file != last_source_file) {
+ compilation_units.push_back(std::vector<const OatWriter::DebugInfo*>());
+ }
+ compilation_units.back().push_back(&mi);
+ last_source_file = source_file;
}
}
// Write .debug_info section.
- size_t debug_abbrev_offset = debug_abbrev->size();
- DebugInfoEntryWriter<> info(false /* 32 bit */, debug_abbrev);
- info.StartTag(DW_TAG_compile_unit, DW_CHILDREN_yes);
- info.WriteStrp(DW_AT_producer, "Android dex2oat", debug_str);
- info.WriteData1(DW_AT_language, DW_LANG_Java);
- info.WriteAddr(DW_AT_low_pc, cunit_low_pc + text_section_offset);
- info.WriteAddr(DW_AT_high_pc, cunit_high_pc + text_section_offset);
- info.WriteData4(DW_AT_stmt_list, debug_line->size());
- for (auto method_info : method_infos) {
- std::string method_name = PrettyMethod(method_info.dex_method_index_,
- *method_info.dex_file_, true);
- if (method_info.deduped_) {
- // TODO We should place the DEDUPED tag on the first instance of a deduplicated symbol
- // so that it will show up in a debuggerd crash report.
- method_name += " [ DEDUPED ]";
- }
- info.StartTag(DW_TAG_subprogram, DW_CHILDREN_no);
- info.WriteStrp(DW_AT_name, method_name.data(), debug_str);
- info.WriteAddr(DW_AT_low_pc, method_info.low_pc_ + text_section_offset);
- info.WriteAddr(DW_AT_high_pc, method_info.high_pc_ + text_section_offset);
- info.EndTag(); // DW_TAG_subprogram
- }
- info.EndTag(); // DW_TAG_compile_unit
- WriteDebugInfoCU(debug_abbrev_offset, info, debug_info);
-
- // TODO: in gdb info functions <regexp> - reports Java functions, but
- // source file is <unknown> because .debug_line is formed as one
- // compilation unit. To fix this it is possible to generate
- // a separate compilation unit for every distinct Java source.
- // Each of the these compilation units can have several non-adjacent
- // method ranges.
-
- // Write .debug_line section.
- std::vector<FileEntry> files;
- std::unordered_map<std::string, size_t> files_map;
- std::vector<std::string> directories;
- std::unordered_map<std::string, size_t> directories_map;
- int code_factor_bits_ = 0;
- int dwarf_isa = -1;
- switch (isa) {
- case kArm: // arm actually means thumb2.
- case kThumb2:
- code_factor_bits_ = 1; // 16-bit instuctions
- dwarf_isa = 1; // DW_ISA_ARM_thumb.
- break;
- case kArm64:
- case kMips:
- case kMips64:
- code_factor_bits_ = 2; // 32-bit instructions
- break;
- case kNone:
- case kX86:
- case kX86_64:
- break;
- }
- DebugLineOpCodeWriter<> opcodes(false /* 32bit */, code_factor_bits_);
- opcodes.SetAddress(text_section_offset + cunit_low_pc);
- if (dwarf_isa != -1) {
- opcodes.SetISA(dwarf_isa);
- }
- for (const OatWriter::DebugInfo& mi : method_infos) {
- // Addresses in the line table should be unique and increasing.
- if (mi.deduped_) {
- continue;
+ for (const auto& compilation_unit : compilation_units) {
+ uint32_t cunit_low_pc = 0xFFFFFFFFU;
+ uint32_t cunit_high_pc = 0;
+ for (auto method_info : compilation_unit) {
+ cunit_low_pc = std::min(cunit_low_pc, method_info->low_pc_);
+ cunit_high_pc = std::max(cunit_high_pc, method_info->high_pc_);
}
- struct DebugInfoCallbacks {
- static bool NewPosition(void* ctx, uint32_t address, uint32_t line) {
- auto* context = reinterpret_cast<DebugInfoCallbacks*>(ctx);
- context->dex2line_.push_back({address, static_cast<int32_t>(line)});
- return false;
+ size_t debug_abbrev_offset = debug_abbrev->size();
+ DebugInfoEntryWriter<> info(false /* 32 bit */, debug_abbrev);
+ info.StartTag(DW_TAG_compile_unit, DW_CHILDREN_yes);
+ info.WriteStrp(DW_AT_producer, "Android dex2oat", debug_str);
+ info.WriteData1(DW_AT_language, DW_LANG_Java);
+ info.WriteAddr(DW_AT_low_pc, cunit_low_pc + text_section_offset);
+ info.WriteAddr(DW_AT_high_pc, cunit_high_pc + text_section_offset);
+ info.WriteData4(DW_AT_stmt_list, debug_line->size());
+ for (auto method_info : compilation_unit) {
+ std::string method_name = PrettyMethod(method_info->dex_method_index_,
+ *method_info->dex_file_, true);
+ if (deduped_addresses.find(method_info->low_pc_) != deduped_addresses.end()) {
+ method_name += " [DEDUPED]";
}
- DefaultSrcMap dex2line_;
- } debug_info_callbacks;
-
- const DexFile* dex = mi.dex_file_;
- if (mi.code_item_ != nullptr) {
- dex->DecodeDebugInfo(mi.code_item_,
- (mi.access_flags_ & kAccStatic) != 0,
- mi.dex_method_index_,
- DebugInfoCallbacks::NewPosition,
- nullptr,
- &debug_info_callbacks);
+ info.StartTag(DW_TAG_subprogram, DW_CHILDREN_no);
+ info.WriteStrp(DW_AT_name, method_name.data(), debug_str);
+ info.WriteAddr(DW_AT_low_pc, method_info->low_pc_ + text_section_offset);
+ info.WriteAddr(DW_AT_high_pc, method_info->high_pc_ + text_section_offset);
+ info.EndTag(); // DW_TAG_subprogram
}
+ info.EndTag(); // DW_TAG_compile_unit
+ auto* debug_info_patches = oat_writer->GetAbsolutePatchLocationsFor(".debug_info");
+ WriteDebugInfoCU(debug_abbrev_offset, info, debug_info, debug_info_patches);
- // Get and deduplicate directory and filename.
- int file_index = 0; // 0 - primary source file of the compilation.
- auto& dex_class_def = dex->GetClassDef(mi.class_def_index_);
- const char* source_file = dex->GetSourceFile(dex_class_def);
- if (source_file != nullptr) {
- std::string file_name(source_file);
- size_t file_name_slash = file_name.find_last_of('/');
- std::string class_name(dex->GetClassDescriptor(dex_class_def));
- size_t class_name_slash = class_name.find_last_of('/');
- std::string full_path(file_name);
-
- // Guess directory from package name.
- int directory_index = 0; // 0 - current directory of the compilation.
- if (file_name_slash == std::string::npos && // Just filename.
- class_name.front() == 'L' && // Type descriptor for a class.
- class_name_slash != std::string::npos) { // Has package name.
- std::string package_name = class_name.substr(1, class_name_slash - 1);
- auto it = directories_map.find(package_name);
- if (it == directories_map.end()) {
- directory_index = 1 + directories.size();
- directories_map.emplace(package_name, directory_index);
- directories.push_back(package_name);
- } else {
- directory_index = it->second;
+ // Write .debug_line section.
+ std::vector<FileEntry> files;
+ std::unordered_map<std::string, size_t> files_map;
+ std::vector<std::string> directories;
+ std::unordered_map<std::string, size_t> directories_map;
+ int code_factor_bits_ = 0;
+ int dwarf_isa = -1;
+ switch (isa) {
+ case kArm: // arm actually means thumb2.
+ case kThumb2:
+ code_factor_bits_ = 1; // 16-bit instuctions
+ dwarf_isa = 1; // DW_ISA_ARM_thumb.
+ break;
+ case kArm64:
+ case kMips:
+ case kMips64:
+ code_factor_bits_ = 2; // 32-bit instructions
+ break;
+ case kNone:
+ case kX86:
+ case kX86_64:
+ break;
+ }
+ DebugLineOpCodeWriter<> opcodes(false /* 32bit */, code_factor_bits_);
+ opcodes.SetAddress(text_section_offset + cunit_low_pc);
+ if (dwarf_isa != -1) {
+ opcodes.SetISA(dwarf_isa);
+ }
+ for (const OatWriter::DebugInfo* mi : compilation_unit) {
+ struct DebugInfoCallbacks {
+ static bool NewPosition(void* ctx, uint32_t address, uint32_t line) {
+ auto* context = reinterpret_cast<DebugInfoCallbacks*>(ctx);
+ context->dex2line_.push_back({address, static_cast<int32_t>(line)});
+ return false;
}
- full_path = package_name + "/" + file_name;
+ DefaultSrcMap dex2line_;
+ } debug_info_callbacks;
+
+ const DexFile* dex = mi->dex_file_;
+ if (mi->code_item_ != nullptr) {
+ dex->DecodeDebugInfo(mi->code_item_,
+ (mi->access_flags_ & kAccStatic) != 0,
+ mi->dex_method_index_,
+ DebugInfoCallbacks::NewPosition,
+ nullptr,
+ &debug_info_callbacks);
}
- // Add file entry.
- auto it2 = files_map.find(full_path);
- if (it2 == files_map.end()) {
- file_index = 1 + files.size();
- files_map.emplace(full_path, file_index);
- files.push_back(FileEntry {
- file_name,
- directory_index,
- 0, // Modification time - NA.
- 0, // File size - NA.
- });
- } else {
- file_index = it2->second;
- }
- }
- opcodes.SetFile(file_index);
+ // Get and deduplicate directory and filename.
+ int file_index = 0; // 0 - primary source file of the compilation.
+ auto& dex_class_def = dex->GetClassDef(mi->class_def_index_);
+ const char* source_file = dex->GetSourceFile(dex_class_def);
+ if (source_file != nullptr) {
+ std::string file_name(source_file);
+ size_t file_name_slash = file_name.find_last_of('/');
+ std::string class_name(dex->GetClassDescriptor(dex_class_def));
+ size_t class_name_slash = class_name.find_last_of('/');
+ std::string full_path(file_name);
- // Generate mapping opcodes from PC to Java lines.
- const DefaultSrcMap& dex2line_map = debug_info_callbacks.dex2line_;
- uint32_t low_pc = text_section_offset + mi.low_pc_;
- if (file_index != 0 && !dex2line_map.empty()) {
- bool first = true;
- for (SrcMapElem pc2dex : mi.compiled_method_->GetSrcMappingTable()) {
- uint32_t pc = pc2dex.from_;
- int dex_pc = pc2dex.to_;
- auto dex2line = dex2line_map.Find(static_cast<uint32_t>(dex_pc));
- if (dex2line.first) {
- int line = dex2line.second;
- if (first) {
- first = false;
- if (pc > 0) {
- // Assume that any preceding code is prologue.
- int first_line = dex2line_map.front().to_;
- // Prologue is not a sensible place for a breakpoint.
- opcodes.NegateStmt();
- opcodes.AddRow(low_pc, first_line);
- opcodes.NegateStmt();
- opcodes.SetPrologueEnd();
+ // Guess directory from package name.
+ int directory_index = 0; // 0 - current directory of the compilation.
+ if (file_name_slash == std::string::npos && // Just filename.
+ class_name.front() == 'L' && // Type descriptor for a class.
+ class_name_slash != std::string::npos) { // Has package name.
+ std::string package_name = class_name.substr(1, class_name_slash - 1);
+ auto it = directories_map.find(package_name);
+ if (it == directories_map.end()) {
+ directory_index = 1 + directories.size();
+ directories_map.emplace(package_name, directory_index);
+ directories.push_back(package_name);
+ } else {
+ directory_index = it->second;
+ }
+ full_path = package_name + "/" + file_name;
+ }
+
+ // Add file entry.
+ auto it2 = files_map.find(full_path);
+ if (it2 == files_map.end()) {
+ file_index = 1 + files.size();
+ files_map.emplace(full_path, file_index);
+ files.push_back(FileEntry {
+ file_name,
+ directory_index,
+ 0, // Modification time - NA.
+ 0, // File size - NA.
+ });
+ } else {
+ file_index = it2->second;
+ }
+ }
+ opcodes.SetFile(file_index);
+
+ // Generate mapping opcodes from PC to Java lines.
+ const DefaultSrcMap& dex2line_map = debug_info_callbacks.dex2line_;
+ uint32_t low_pc = text_section_offset + mi->low_pc_;
+ if (file_index != 0 && !dex2line_map.empty()) {
+ bool first = true;
+ for (SrcMapElem pc2dex : mi->compiled_method_->GetSrcMappingTable()) {
+ uint32_t pc = pc2dex.from_;
+ int dex_pc = pc2dex.to_;
+ auto dex2line = dex2line_map.Find(static_cast<uint32_t>(dex_pc));
+ if (dex2line.first) {
+ int line = dex2line.second;
+ if (first) {
+ first = false;
+ if (pc > 0) {
+ // Assume that any preceding code is prologue.
+ int first_line = dex2line_map.front().to_;
+ // Prologue is not a sensible place for a breakpoint.
+ opcodes.NegateStmt();
+ opcodes.AddRow(low_pc, first_line);
+ opcodes.NegateStmt();
+ opcodes.SetPrologueEnd();
+ }
+ opcodes.AddRow(low_pc + pc, line);
+ } else if (line != opcodes.CurrentLine()) {
+ opcodes.AddRow(low_pc + pc, line);
}
- opcodes.AddRow(low_pc + pc, line);
- } else if (line != opcodes.CurrentLine()) {
- opcodes.AddRow(low_pc + pc, line);
}
}
+ } else {
+ // line 0 - instruction cannot be attributed to any source line.
+ opcodes.AddRow(low_pc, 0);
}
- } else {
- // line 0 - instruction cannot be attributed to any source line.
- opcodes.AddRow(low_pc, 0);
}
+ opcodes.AdvancePC(text_section_offset + cunit_high_pc);
+ opcodes.EndSequence();
+ auto* debug_line_patches = oat_writer->GetAbsolutePatchLocationsFor(".debug_line");
+ WriteDebugLineTable(directories, files, opcodes, debug_line, debug_line_patches);
}
- opcodes.AdvancePC(text_section_offset + cunit_high_pc);
- opcodes.EndSequence();
- WriteDebugLineTable(directories, files, opcodes, debug_line);
}
} // namespace dwarf
diff --git a/compiler/elf_writer_debug.h b/compiler/elf_writer_debug.h
index 39a99d6..2c03b98 100644
--- a/compiler/elf_writer_debug.h
+++ b/compiler/elf_writer_debug.h
@@ -24,14 +24,18 @@
namespace art {
namespace dwarf {
+void WriteEhFrame(const CompilerDriver* compiler,
+ OatWriter* oat_writer,
+ uint32_t text_section_offset,
+ std::vector<uint8_t>* eh_frame);
+
void WriteDebugSections(const CompilerDriver* compiler,
- const OatWriter* oat_writer,
+ OatWriter* oat_writer,
uint32_t text_section_offset,
- std::vector<uint8_t>* eh_frame_data,
- std::vector<uint8_t>* debug_info_data,
- std::vector<uint8_t>* debug_abbrev_data,
- std::vector<uint8_t>* debug_str_data,
- std::vector<uint8_t>* debug_line_data);
+ std::vector<uint8_t>* debug_info,
+ std::vector<uint8_t>* debug_abbrev,
+ std::vector<uint8_t>* debug_str,
+ std::vector<uint8_t>* debug_line);
} // namespace dwarf
} // namespace art
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index e9af25f..429cd85 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -17,6 +17,7 @@
#include "elf_writer_quick.h"
#include <unordered_map>
+#include <unordered_set>
#include "base/logging.h"
#include "base/unix_file/fd_file.h"
@@ -74,6 +75,40 @@
Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>* builder,
OatWriter* oat_writer);
+// Encode patch locations in .oat_patches format.
+template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
+ typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
+ typename Elf_Phdr, typename Elf_Shdr>
+void ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn, Elf_Sym, Elf_Ehdr,
+ Elf_Phdr, Elf_Shdr>::EncodeOatPatches(const OatWriter::PatchLocationsMap& sections,
+ std::vector<uint8_t>* buffer) {
+ for (const auto& section : sections) {
+ const std::string& name = section.first;
+ std::vector<uintptr_t>* locations = section.second.get();
+ DCHECK(!name.empty());
+ std::sort(locations->begin(), locations->end());
+ // Reserve buffer space - guess 2 bytes per ULEB128.
+ buffer->reserve(buffer->size() + name.size() + locations->size() * 2);
+ // Write null-terminated section name.
+ const uint8_t* name_data = reinterpret_cast<const uint8_t*>(name.c_str());
+ buffer->insert(buffer->end(), name_data, name_data + name.size() + 1);
+ // Write placeholder for data length.
+ size_t length_pos = buffer->size();
+ EncodeUnsignedLeb128(buffer, UINT32_MAX);
+ // Write LEB128 encoded list of advances (deltas between consequtive addresses).
+ size_t data_pos = buffer->size();
+ uintptr_t address = 0; // relative to start of section.
+ for (uintptr_t location : *locations) {
+ DCHECK_LT(location - address, UINT32_MAX) << "Large gap between patch locations";
+ EncodeUnsignedLeb128(buffer, location - address);
+ address = location;
+ }
+ // Update length.
+ UpdateUnsignedLeb128(buffer->data() + length_pos, buffer->size() - data_pos);
+ }
+ buffer->push_back(0); // End of sections.
+}
+
template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
typename Elf_Phdr, typename Elf_Shdr>
@@ -110,21 +145,28 @@
return false;
}
+ if (compiler_driver_->GetCompilerOptions().GetIncludeCFI() &&
+ !oat_writer->GetMethodDebugInfo().empty()) {
+ ElfRawSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> eh_frame(
+ ".eh_frame", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, 4, 0);
+ dwarf::WriteEhFrame(compiler_driver_, oat_writer,
+ builder->GetTextBuilder().GetSection()->sh_addr,
+ eh_frame.GetBuffer());
+ builder->RegisterRawSection(eh_frame);
+ }
+
if (compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols() &&
!oat_writer->GetMethodDebugInfo().empty()) {
WriteDebugSymbols(compiler_driver_, builder.get(), oat_writer);
}
- if (compiler_driver_->GetCompilerOptions().GetIncludePatchInformation()) {
+ if (compiler_driver_->GetCompilerOptions().GetIncludePatchInformation() ||
+ // ElfWriter::Fixup will be called regardless and it needs to be able
+ // to patch debug sections so we have to include patches for them.
+ compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols()) {
ElfRawSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> oat_patches(
- ".oat_patches", SHT_OAT_PATCH, 0, NULL, 0, sizeof(uintptr_t), sizeof(uintptr_t));
- const std::vector<uintptr_t>& locations = oat_writer->GetAbsolutePatchLocations();
- const uint8_t* begin = reinterpret_cast<const uint8_t*>(&locations[0]);
- const uint8_t* end = begin + locations.size() * sizeof(locations[0]);
- oat_patches.GetBuffer()->assign(begin, end);
- if (debug) {
- LOG(INFO) << "Prepared .oat_patches for " << locations.size() << " patches.";
- }
+ ".oat_patches", SHT_OAT_PATCH, 0, NULL, 0, 1, 0);
+ EncodeOatPatches(oat_writer->GetAbsolutePatchLocations(), oat_patches.GetBuffer());
builder->RegisterRawSection(oat_patches);
}
@@ -140,16 +182,23 @@
ElfBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>* builder,
OatWriter* oat_writer) {
- // Iterate over the compiled methods.
const std::vector<OatWriter::DebugInfo>& method_info = oat_writer->GetMethodDebugInfo();
+
+ // Find all addresses (low_pc) which contain deduped methods.
+ // The first instance of method is not marked deduped_, but the rest is.
+ std::unordered_set<uint32_t> deduped_addresses;
+ for (auto it = method_info.begin(); it != method_info.end(); ++it) {
+ if (it->deduped_) {
+ deduped_addresses.insert(it->low_pc_);
+ }
+ }
+
ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Sym, Elf_Shdr>* symtab =
builder->GetSymtabBuilder();
for (auto it = method_info.begin(); it != method_info.end(); ++it) {
std::string name = PrettyMethod(it->dex_method_index_, *it->dex_file_, true);
- if (it->deduped_) {
- // TODO We should place the DEDUPED tag on the first instance of a deduplicated symbol
- // so that it will show up in a debuggerd crash report.
- name += " [ DEDUPED ]";
+ if (deduped_addresses.find(it->low_pc_) != deduped_addresses.end()) {
+ name += " [DEDUPED]";
}
uint32_t low_pc = it->low_pc_;
@@ -167,7 +216,6 @@
}
typedef ElfRawSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> Section;
- Section eh_frame(".eh_frame", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, 4, 0);
Section debug_info(".debug_info", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
Section debug_abbrev(".debug_abbrev", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
Section debug_str(".debug_str", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
@@ -176,13 +224,11 @@
dwarf::WriteDebugSections(compiler_driver,
oat_writer,
builder->GetTextBuilder().GetSection()->sh_addr,
- eh_frame.GetBuffer(),
debug_info.GetBuffer(),
debug_abbrev.GetBuffer(),
debug_str.GetBuffer(),
debug_line.GetBuffer());
- builder->RegisterRawSection(eh_frame);
builder->RegisterRawSection(debug_info);
builder->RegisterRawSection(debug_abbrev);
builder->RegisterRawSection(debug_str);
diff --git a/compiler/elf_writer_quick.h b/compiler/elf_writer_quick.h
index 4990ed0..811beb4 100644
--- a/compiler/elf_writer_quick.h
+++ b/compiler/elf_writer_quick.h
@@ -19,6 +19,7 @@
#include "elf_utils.h"
#include "elf_writer.h"
+#include "oat_writer.h"
namespace art {
@@ -36,6 +37,9 @@
const CompilerDriver& driver)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void EncodeOatPatches(const OatWriter::PatchLocationsMap& sections,
+ std::vector<uint8_t>* buffer);
+
protected:
bool Write(OatWriter* oat_writer,
const std::vector<const DexFile*>& dex_files,
diff --git a/compiler/elf_writer_test.cc b/compiler/elf_writer_test.cc
index 8e2d175..3e5ad7b 100644
--- a/compiler/elf_writer_test.cc
+++ b/compiler/elf_writer_test.cc
@@ -19,6 +19,9 @@
#include "base/stringprintf.h"
#include "base/unix_file/fd_file.h"
#include "common_compiler_test.h"
+#include "elf_file.h"
+#include "elf_file_impl.h"
+#include "elf_writer_quick.h"
#include "oat.h"
#include "utils.h"
@@ -85,4 +88,73 @@
}
}
+// Run only on host since we do unaligned memory accesses.
+#ifndef HAVE_ANDROID_OS
+
+static void PatchSection(const std::vector<uintptr_t>& patch_locations,
+ std::vector<uint8_t>* section, int32_t delta) {
+ for (uintptr_t location : patch_locations) {
+ *reinterpret_cast<int32_t*>(section->data() + location) += delta;
+ }
+}
+
+TEST_F(ElfWriterTest, EncodeDecodeOatPatches) {
+ std::vector<uint8_t> oat_patches; // Encoded patches.
+
+ // Encode patch locations for a few sections.
+ OatWriter::PatchLocationsMap sections;
+ std::vector<uintptr_t> patches0 { 0, 4, 8, 15, 128, 200 }; // NOLINT
+ sections.emplace(".section0", std::unique_ptr<std::vector<uintptr_t>>(
+ new std::vector<uintptr_t> { patches0 }));
+ std::vector<uintptr_t> patches1 { 8, 127 }; // NOLINT
+ sections.emplace(".section1", std::unique_ptr<std::vector<uintptr_t>>(
+ new std::vector<uintptr_t> { patches1 }));
+ std::vector<uintptr_t> patches2 { }; // NOLINT
+ sections.emplace(".section2", std::unique_ptr<std::vector<uintptr_t>>(
+ new std::vector<uintptr_t> { patches2 }));
+ ElfWriterQuick32::EncodeOatPatches(sections, &oat_patches);
+
+ // Create buffers to be patched.
+ std::vector<uint8_t> initial_data(256);
+ for (size_t i = 0; i < initial_data.size(); i++) {
+ initial_data[i] = i;
+ }
+ std::vector<uint8_t> section0_expected = initial_data;
+ std::vector<uint8_t> section1_expected = initial_data;
+ std::vector<uint8_t> section2_expected = initial_data;
+ std::vector<uint8_t> section0_actual = initial_data;
+ std::vector<uint8_t> section1_actual = initial_data;
+ std::vector<uint8_t> section2_actual = initial_data;
+
+ // Patch manually.
+ constexpr int32_t delta = 0x11235813;
+ PatchSection(patches0, §ion0_expected, delta);
+ PatchSection(patches1, §ion1_expected, delta);
+ PatchSection(patches2, §ion2_expected, delta);
+
+ // Decode and apply patch locations.
+ bool section0_successful = ElfFileImpl32::ApplyOatPatches(
+ oat_patches.data(), oat_patches.data() + oat_patches.size(),
+ ".section0", delta,
+ section0_actual.data(), section0_actual.data() + section0_actual.size());
+ EXPECT_TRUE(section0_successful);
+ EXPECT_EQ(section0_expected, section0_actual);
+
+ bool section1_successful = ElfFileImpl32::ApplyOatPatches(
+ oat_patches.data(), oat_patches.data() + oat_patches.size(),
+ ".section1", delta,
+ section1_actual.data(), section1_actual.data() + section1_actual.size());
+ EXPECT_TRUE(section1_successful);
+ EXPECT_EQ(section1_expected, section1_actual);
+
+ bool section2_successful = ElfFileImpl32::ApplyOatPatches(
+ oat_patches.data(), oat_patches.data() + oat_patches.size(),
+ ".section2", delta,
+ section2_actual.data(), section2_actual.data() + section2_actual.size());
+ EXPECT_TRUE(section2_successful);
+ EXPECT_EQ(section2_expected, section2_actual);
+}
+
+#endif
+
} // namespace art
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index cf97943..cfd525c 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -21,7 +21,7 @@
#include <vector>
#include "base/unix_file/fd_file.h"
-#include "class_linker.h"
+#include "class_linker-inl.h"
#include "common_compiler_test.h"
#include "elf_writer.h"
#include "gc/space/image_space.h"
@@ -205,6 +205,7 @@
uint32_t oat_file_end = ART_BASE_ADDRESS + (10 * KB);
ImageHeader image_header(image_begin,
image_size_,
+ 0u, 0u,
image_bitmap_offset,
image_bitmap_size,
image_roots,
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 1ede228..2420254 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -22,9 +22,10 @@
#include <numeric>
#include <vector>
+#include "art_field-inl.h"
#include "base/logging.h"
#include "base/unix_file/fd_file.h"
-#include "class_linker.h"
+#include "class_linker-inl.h"
#include "compiled_method.h"
#include "dex_file-inl.h"
#include "driver/compiler_driver.h"
@@ -40,8 +41,8 @@
#include "globals.h"
#include "image.h"
#include "intern_table.h"
+#include "linear_alloc.h"
#include "lock_word.h"
-#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/array-inl.h"
#include "mirror/class-inl.h"
@@ -57,7 +58,6 @@
#include "handle_scope-inl.h"
#include "utils/dex_cache_arrays_layout-inl.h"
-using ::art::mirror::ArtField;
using ::art::mirror::ArtMethod;
using ::art::mirror::Class;
using ::art::mirror::DexCache;
@@ -164,6 +164,9 @@
Thread::Current()->TransitionFromSuspendedToRunnable();
CreateHeader(oat_loaded_size, oat_data_offset);
+ // TODO: heap validation can't handle these fix up passes.
+ Runtime::Current()->GetHeap()->DisableObjectValidation();
+ CopyAndFixupNativeData();
CopyAndFixupObjects();
Thread::Current()->TransitionFromRunnableToSuspended(kNative);
@@ -186,9 +189,10 @@
return EXIT_FAILURE;
}
- // Write out the image.
+ // Write out the image + fields.
+ const auto write_count = image_header->GetImageSize() + image_header->GetArtFieldsSize();
CHECK_EQ(image_end_, image_header->GetImageSize());
- if (!image_file->WriteFully(image_->Begin(), image_end_)) {
+ if (!image_file->WriteFully(image_->Begin(), write_count)) {
PLOG(ERROR) << "Failed to write image file " << image_filename;
image_file->Erase();
return false;
@@ -204,6 +208,8 @@
return false;
}
+ CHECK_EQ(image_header->GetImageBitmapOffset() + image_header->GetImageBitmapSize(),
+ static_cast<size_t>(image_file->GetLength()));
if (image_file->FlushCloseOrErase() != 0) {
PLOG(ERROR) << "Failed to flush and close image file " << image_filename;
return false;
@@ -219,6 +225,8 @@
mirror::Object* obj = reinterpret_cast<mirror::Object*>(image_->Begin() + offset);
DCHECK_ALIGNED(obj, kObjectAlignment);
+ static size_t max_offset = 0;
+ max_offset = std::max(max_offset, offset);
image_bitmap_->Set(obj); // Mark the obj as mutated, since we will end up changing it.
{
// Remember the object-inside-of-the-image's hash code so we can restore it after the copy.
@@ -302,13 +310,26 @@
DexCache* dex_cache = class_linker->GetDexCache(idx);
const DexFile* dex_file = dex_cache->GetDexFile();
dex_cache_array_starts_.Put(dex_file, size);
- DexCacheArraysLayout layout(dex_file);
+ DexCacheArraysLayout layout(target_ptr_size_, dex_file);
DCHECK(layout.Valid());
- dex_cache_array_indexes_.Put(dex_cache->GetResolvedTypes(), size + layout.TypesOffset());
- dex_cache_array_indexes_.Put(dex_cache->GetResolvedMethods(), size + layout.MethodsOffset());
- dex_cache_array_indexes_.Put(dex_cache->GetResolvedFields(), size + layout.FieldsOffset());
- dex_cache_array_indexes_.Put(dex_cache->GetStrings(), size + layout.StringsOffset());
+ auto types_size = layout.TypesSize(dex_file->NumTypeIds());
+ auto methods_size = layout.MethodsSize(dex_file->NumMethodIds());
+ auto fields_size = layout.FieldsSize(dex_file->NumFieldIds());
+ auto strings_size = layout.StringsSize(dex_file->NumStringIds());
+ dex_cache_array_indexes_.Put(
+ dex_cache->GetResolvedTypes(),
+ DexCacheArrayLocation {size + layout.TypesOffset(), types_size});
+ dex_cache_array_indexes_.Put(
+ dex_cache->GetResolvedMethods(),
+ DexCacheArrayLocation {size + layout.MethodsOffset(), methods_size});
+ dex_cache_array_indexes_.Put(
+ dex_cache->GetResolvedFields(),
+ DexCacheArrayLocation {size + layout.FieldsOffset(), fields_size});
+ dex_cache_array_indexes_.Put(
+ dex_cache->GetStrings(),
+ DexCacheArrayLocation {size + layout.StringsOffset(), strings_size});
size += layout.Size();
+ CHECK_EQ(layout.Size(), types_size + methods_size + fields_size + strings_size);
}
// Set the slot size early to avoid DCHECK() failures in IsImageBinSlotAssigned()
// when AssignImageBinSlot() assigns their indexes out or order.
@@ -405,12 +426,20 @@
}
} else if (object->GetClass<kVerifyNone>()->IsStringClass()) {
bin = kBinString; // Strings are almost always immutable (except for object header).
- } else if (object->IsObjectArray()) {
- auto it = dex_cache_array_indexes_.find(object);
- if (it != dex_cache_array_indexes_.end()) {
- bin = kBinDexCacheArray;
- current_offset = it->second; // Use prepared offset defined by the DexCacheLayout.
- } // else bin = kBinRegular
+ } else if (object->IsArrayInstance()) {
+ mirror::Class* klass = object->GetClass<kVerifyNone>();
+ auto* component_type = klass->GetComponentType();
+ if (!component_type->IsPrimitive() || component_type->IsPrimitiveInt() ||
+ component_type->IsPrimitiveLong()) {
+ auto it = dex_cache_array_indexes_.find(object);
+ if (it != dex_cache_array_indexes_.end()) {
+ bin = kBinDexCacheArray;
+ // Use prepared offset defined by the DexCacheLayout.
+ current_offset = it->second.offset_;
+ // Override incase of cross compilation.
+ object_size = it->second.length_;
+ } // else bin = kBinRegular
+ }
} // else bin = kBinRegular
}
@@ -465,7 +494,10 @@
}
bool ImageWriter::AllocMemory() {
- size_t length = RoundUp(Runtime::Current()->GetHeap()->GetTotalMemory(), kPageSize);
+ auto* runtime = Runtime::Current();
+ const size_t heap_size = runtime->GetHeap()->GetTotalMemory();
+ // Add linear alloc usage since we need to have room for the ArtFields.
+ const size_t length = RoundUp(heap_size + runtime->GetLinearAlloc()->GetUsedMemory(), kPageSize);
std::string error_msg;
image_.reset(MemMap::MapAnonymous("image writer image", nullptr, length, PROT_READ | PROT_WRITE,
false, false, &error_msg));
@@ -476,7 +508,7 @@
// Create the image bitmap.
image_bitmap_.reset(gc::accounting::ContinuousSpaceBitmap::Create("image bitmap", image_->Begin(),
- length));
+ RoundUp(length, kPageSize)));
if (image_bitmap_.get() == nullptr) {
LOG(ERROR) << "Failed to allocate memory for image bitmap";
return false;
@@ -698,9 +730,9 @@
}
}
for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) {
- ArtField* field = dex_cache->GetResolvedField(i);
- if (field != NULL && !IsImageClass(field->GetDeclaringClass())) {
- dex_cache->SetResolvedField(i, NULL);
+ ArtField* field = dex_cache->GetResolvedField(i, sizeof(void*));
+ if (field != nullptr && !IsImageClass(field->GetDeclaringClass())) {
+ dex_cache->SetResolvedField(i, nullptr, sizeof(void*));
}
}
// Clean the dex field. It might have been populated during the initialization phase, but
@@ -786,7 +818,7 @@
// caches. We check that the number of dex caches does not change.
size_t dex_cache_count;
{
- ReaderMutexLock mu(Thread::Current(), *class_linker->DexLock());
+ ReaderMutexLock mu(self, *class_linker->DexLock());
dex_cache_count = class_linker->GetDexCacheCount();
}
Handle<ObjectArray<Object>> dex_caches(
@@ -794,7 +826,7 @@
dex_cache_count)));
CHECK(dex_caches.Get() != nullptr) << "Failed to allocate a dex cache array.";
{
- ReaderMutexLock mu(Thread::Current(), *class_linker->DexLock());
+ ReaderMutexLock mu(self, *class_linker->DexLock());
CHECK_EQ(dex_cache_count, class_linker->GetDexCacheCount())
<< "The number of dex caches changed.";
for (size_t i = 0; i < dex_cache_count; ++i) {
@@ -861,9 +893,9 @@
WalkInstanceFields(h_obj.Get(), klass.Get());
// Walk static fields of a Class.
if (h_obj->IsClass()) {
- size_t num_static_fields = klass->NumReferenceStaticFields();
+ size_t num_reference_static_fields = klass->NumReferenceStaticFields();
MemberOffset field_offset = klass->GetFirstReferenceStaticFieldOffset();
- for (size_t i = 0; i < num_static_fields; ++i) {
+ for (size_t i = 0; i < num_reference_static_fields; ++i) {
mirror::Object* value = h_obj->GetFieldObject<mirror::Object>(field_offset);
if (value != nullptr) {
WalkFieldsInOrder(value);
@@ -871,6 +903,21 @@
field_offset = MemberOffset(field_offset.Uint32Value() +
sizeof(mirror::HeapReference<mirror::Object>));
}
+
+ // Visit and assign offsets for fields.
+ ArtField* fields[2] = { h_obj->AsClass()->GetSFields(), h_obj->AsClass()->GetIFields() };
+ size_t num_fields[2] = { h_obj->AsClass()->NumStaticFields(),
+ h_obj->AsClass()->NumInstanceFields() };
+ for (size_t i = 0; i < 2; ++i) {
+ for (size_t j = 0; j < num_fields[i]; ++j) {
+ auto* field = fields[i] + j;
+ auto it = art_field_reloc_.find(field);
+ CHECK(it == art_field_reloc_.end()) << "Field at index " << i << ":" << j
+ << " already assigned " << PrettyField(field);
+ art_field_reloc_.emplace(field, bin_slot_sizes_[kBinArtField]);
+ bin_slot_sizes_[kBinArtField] += sizeof(ArtField);
+ }
+ }
} else if (h_obj->IsObjectArray()) {
// Walk elements of an object array.
int32_t length = h_obj->AsObjectArray<mirror::Object>()->GetLength();
@@ -921,7 +968,6 @@
// know where image_roots is going to end up
image_end_ += RoundUp(sizeof(ImageHeader), kObjectAlignment); // 64-bit-alignment
- // TODO: Image spaces only?
DCHECK_LT(image_end_, image_->Size());
image_objects_offset_begin_ = image_end_;
// Prepare bin slots for dex cache arrays.
@@ -935,34 +981,47 @@
previous_sizes += bin_slot_sizes_[i];
}
DCHECK_EQ(previous_sizes, GetBinSizeSum());
+ DCHECK_EQ(image_end_, GetBinSizeSum(kBinMirrorCount) + image_objects_offset_begin_);
+
// Transform each object's bin slot into an offset which will be used to do the final copy.
heap->VisitObjects(UnbinObjectsIntoOffsetCallback, this);
DCHECK(saved_hashes_map_.empty()); // All binslot hashes should've been put into vector by now.
- DCHECK_GT(image_end_, GetBinSizeSum());
+ DCHECK_EQ(image_end_, GetBinSizeSum(kBinMirrorCount) + image_objects_offset_begin_);
image_roots_address_ = PointerToLowMemUInt32(GetImageAddress(image_roots.Get()));
- // Note that image_end_ is left at end of used space
+ // Note that image_end_ is left at end of used mirror space
}
void ImageWriter::CreateHeader(size_t oat_loaded_size, size_t oat_data_offset) {
CHECK_NE(0U, oat_loaded_size);
const uint8_t* oat_file_begin = GetOatFileBegin();
const uint8_t* oat_file_end = oat_file_begin + oat_loaded_size;
-
oat_data_begin_ = oat_file_begin + oat_data_offset;
const uint8_t* oat_data_end = oat_data_begin_ + oat_file_->Size();
-
+ // Write out sections.
+ size_t cur_pos = image_end_;
+ // Add fields.
+ auto fields_offset = cur_pos;
+ CHECK_EQ(image_objects_offset_begin_ + GetBinSizeSum(kBinArtField), fields_offset);
+ auto fields_size = bin_slot_sizes_[kBinArtField];
+ cur_pos += fields_size;
// Return to write header at start of image with future location of image_roots. At this point,
- // image_end_ is the size of the image (excluding bitmaps).
+ // image_end_ is the size of the image (excluding bitmaps, ArtFields).
+ /*
const size_t heap_bytes_per_bitmap_byte = kBitsPerByte * kObjectAlignment;
const size_t bitmap_bytes = RoundUp(image_end_, heap_bytes_per_bitmap_byte) /
heap_bytes_per_bitmap_byte;
+ */
+ const size_t bitmap_bytes = image_bitmap_->Size();
+ auto bitmap_offset = RoundUp(cur_pos, kPageSize);
+ auto bitmap_size = RoundUp(bitmap_bytes, kPageSize);
+ cur_pos += bitmap_size;
new (image_->Begin()) ImageHeader(PointerToLowMemUInt32(image_begin_),
static_cast<uint32_t>(image_end_),
- RoundUp(image_end_, kPageSize),
- RoundUp(bitmap_bytes, kPageSize),
+ fields_offset, fields_size,
+ bitmap_offset, bitmap_size,
image_roots_address_,
oat_file_->GetOatHeader().GetChecksum(),
PointerToLowMemUInt32(oat_file_begin),
@@ -972,11 +1031,21 @@
compile_pic_);
}
+void ImageWriter::CopyAndFixupNativeData() {
+ // Copy ArtFields to their locations and update the array for convenience.
+ auto fields_offset = image_objects_offset_begin_ + GetBinSizeSum(kBinArtField);
+ for (auto& pair : art_field_reloc_) {
+ pair.second += fields_offset;
+ auto* dest = image_->Begin() + pair.second;
+ DCHECK_GE(dest, image_->Begin() + image_end_);
+ memcpy(dest, pair.first, sizeof(ArtField));
+ reinterpret_cast<ArtField*>(dest)->SetDeclaringClass(
+ down_cast<Class*>(GetImageAddress(pair.first->GetDeclaringClass())));
+ }
+}
+
void ImageWriter::CopyAndFixupObjects() {
gc::Heap* heap = Runtime::Current()->GetHeap();
- // TODO: heap validation can't handle this fix up pass
- heap->DisableObjectValidation();
- // TODO: Image spaces only?
heap->VisitObjects(CopyAndFixupObjectsCallback, this);
// Fix up the object previously had hash codes.
for (const std::pair<mirror::Object*, uint32_t>& hash_pair : saved_hashes_) {
@@ -990,26 +1059,88 @@
void ImageWriter::CopyAndFixupObjectsCallback(Object* obj, void* arg) {
DCHECK(obj != nullptr);
DCHECK(arg != nullptr);
- ImageWriter* image_writer = reinterpret_cast<ImageWriter*>(arg);
+ reinterpret_cast<ImageWriter*>(arg)->CopyAndFixupObject(obj);
+}
+
+bool ImageWriter::CopyAndFixupIfDexCacheFieldArray(mirror::Object* dst, mirror::Object* obj,
+ mirror::Class* klass) {
+ if (!klass->IsArrayClass()) {
+ return false;
+ }
+ auto* component_type = klass->GetComponentType();
+ bool is_int_arr = component_type->IsPrimitiveInt();
+ bool is_long_arr = component_type->IsPrimitiveLong();
+ if (!is_int_arr && !is_long_arr) {
+ return false;
+ }
+ auto it = dex_cache_array_indexes_.find(obj); // Is this a dex cache array?
+ if (it == dex_cache_array_indexes_.end()) {
+ return false;
+ }
+ mirror::Array* arr = obj->AsArray();
+ CHECK_EQ(reinterpret_cast<Object*>(
+ image_->Begin() + it->second.offset_ + image_objects_offset_begin_), dst);
+ dex_cache_array_indexes_.erase(it);
+ // Fixup int pointers for the field array.
+ CHECK(!arr->IsObjectArray());
+ const size_t num_elements = arr->GetLength();
+ if (target_ptr_size_ == 4) {
+ // Will get fixed up by fixup object.
+ dst->SetClass(down_cast<mirror::Class*>(
+ GetImageAddress(mirror::IntArray::GetArrayClass())));
+ } else {
+ DCHECK_EQ(target_ptr_size_, 8u);
+ dst->SetClass(down_cast<mirror::Class*>(
+ GetImageAddress(mirror::LongArray::GetArrayClass())));
+ }
+ mirror::Array* dest_array = down_cast<mirror::Array*>(dst);
+ dest_array->SetLength(num_elements);
+ for (size_t i = 0, count = num_elements; i < count; ++i) {
+ ArtField* field = reinterpret_cast<ArtField*>(is_int_arr ?
+ arr->AsIntArray()->GetWithoutChecks(i) : arr->AsLongArray()->GetWithoutChecks(i));
+ uint8_t* fixup_location = nullptr;
+ if (field != nullptr) {
+ auto it2 = art_field_reloc_.find(field);
+ CHECK(it2 != art_field_reloc_.end()) << "No relocation for field " << PrettyField(field);
+ fixup_location = image_begin_ + it2->second;
+ }
+ if (target_ptr_size_ == 4) {
+ down_cast<mirror::IntArray*>(dest_array)->SetWithoutChecks<kVerifyNone>(
+ i, static_cast<uint32_t>(reinterpret_cast<uint64_t>(fixup_location)));
+ } else {
+ down_cast<mirror::LongArray*>(dest_array)->SetWithoutChecks<kVerifyNone>(
+ i, reinterpret_cast<uint64_t>(fixup_location));
+ }
+ }
+ dst->SetLockWord(LockWord::Default(), false);
+ return true;
+}
+
+void ImageWriter::CopyAndFixupObject(Object* obj) {
// see GetLocalAddress for similar computation
- size_t offset = image_writer->GetImageOffset(obj);
- uint8_t* dst = image_writer->image_->Begin() + offset;
+ size_t offset = GetImageOffset(obj);
+ auto* dst = reinterpret_cast<Object*>(image_->Begin() + offset);
const uint8_t* src = reinterpret_cast<const uint8_t*>(obj);
size_t n;
- if (obj->IsArtMethod()) {
+ mirror::Class* klass = obj->GetClass();
+
+ if (CopyAndFixupIfDexCacheFieldArray(dst, obj, klass)) {
+ return;
+ }
+ if (klass->IsArtMethodClass()) {
// Size without pointer fields since we don't want to overrun the buffer if target art method
// is 32 bits but source is 64 bits.
- n = mirror::ArtMethod::SizeWithoutPointerFields(image_writer->target_ptr_size_);
+ n = mirror::ArtMethod::SizeWithoutPointerFields(target_ptr_size_);
} else {
n = obj->SizeOf();
}
- DCHECK_LT(offset + n, image_writer->image_->Size());
+ DCHECK_LE(offset + n, image_->Size());
memcpy(dst, src, n);
- Object* copy = reinterpret_cast<Object*>(dst);
+
// Write in a hash code of objects which have inflated monitors or a hash code in their monitor
// word.
- copy->SetLockWord(LockWord::Default(), false);
- image_writer->FixupObject(obj, copy);
+ dst->SetLockWord(LockWord::Default(), false);
+ FixupObject(obj, dst);
}
// Rewrite all the references in the copied object to point to their image address equivalent
@@ -1045,15 +1176,10 @@
FixupClassVisitor(ImageWriter* image_writer, Object* copy) : FixupVisitor(image_writer, copy) {
}
- void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const
+ void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
DCHECK(obj->IsClass());
FixupVisitor::operator()(obj, offset, /*is_static*/false);
-
- // TODO: Remove dead code
- if (offset.Uint32Value() < mirror::Class::EmbeddedVTableOffset().Uint32Value()) {
- return;
- }
}
void operator()(mirror::Class* klass ATTRIBUTE_UNUSED,
@@ -1064,6 +1190,31 @@
}
};
+void ImageWriter::FixupClass(mirror::Class* orig, mirror::Class* copy) {
+ // Copy and fix up ArtFields in the class.
+ ArtField* fields[2] = { orig->AsClass()->GetSFields(), orig->AsClass()->GetIFields() };
+ size_t num_fields[2] = { orig->AsClass()->NumStaticFields(),
+ orig->AsClass()->NumInstanceFields() };
+ // Update the arrays.
+ for (size_t i = 0; i < 2; ++i) {
+ if (num_fields[i] == 0) {
+ CHECK(fields[i] == nullptr);
+ continue;
+ }
+ auto it = art_field_reloc_.find(fields[i]);
+ CHECK(it != art_field_reloc_.end()) << PrettyClass(orig->AsClass()) << " : "
+ << PrettyField(fields[i]);
+ auto* image_fields = reinterpret_cast<ArtField*>(image_begin_ + it->second);
+ if (i == 0) {
+ down_cast<Class*>(copy)->SetSFieldsUnchecked(image_fields);
+ } else {
+ down_cast<Class*>(copy)->SetIFieldsUnchecked(image_fields);
+ }
+ }
+ FixupClassVisitor visitor(this, copy);
+ static_cast<mirror::Object*>(orig)->VisitReferences<true /*visit class*/>(visitor, visitor);
+}
+
void ImageWriter::FixupObject(Object* orig, Object* copy) {
DCHECK(orig != nullptr);
DCHECK(copy != nullptr);
@@ -1075,9 +1226,8 @@
DCHECK_EQ(copy->GetReadBarrierPointer(), GetImageAddress(orig));
}
}
- if (orig->IsClass() && orig->AsClass()->ShouldHaveEmbeddedImtAndVTable()) {
- FixupClassVisitor visitor(this, copy);
- orig->VisitReferences<true /*visit class*/>(visitor, visitor);
+ if (orig->IsClass()) {
+ FixupClass(orig->AsClass<kVerifyNone>(), down_cast<mirror::Class*>(copy));
} else {
FixupVisitor visitor(this, copy);
orig->VisitReferences<true /*visit class*/>(visitor, visitor);
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 71044f7..a2d99ee 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -90,7 +90,7 @@
}
uint8_t* GetOatFileBegin() const {
- return image_begin_ + RoundUp(image_end_, kPageSize);
+ return image_begin_ + RoundUp(image_end_ + bin_slot_sizes_[kBinArtField], kPageSize);
}
bool Write(const std::string& image_filename,
@@ -127,12 +127,16 @@
kBinArtMethodNative, // Art method that is actually native
kBinArtMethodNotInitialized, // Art method with a declaring class that wasn't initialized
// Add more bins here if we add more segregation code.
+ // Non mirror fields must be below. ArtFields should be always clean.
+ kBinArtField,
kBinSize,
+ // Number of bins which are for mirror objects.
+ kBinMirrorCount = kBinArtField,
};
friend std::ostream& operator<<(std::ostream& stream, const Bin& bin);
- static constexpr size_t kBinBits = MinimumBitsToStore(kBinSize - 1);
+ static constexpr size_t kBinBits = MinimumBitsToStore(kBinMirrorCount - 1);
// uint32 = typeof(lockword_)
static constexpr size_t kBinShift = BitSizeOf<uint32_t>() - kBinBits;
// 111000.....0
@@ -251,11 +255,18 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Creates the contiguous image in memory and adjusts pointers.
+ void CopyAndFixupNativeData() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CopyAndFixupObjects() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void CopyAndFixupObject(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool CopyAndFixupIfDexCacheFieldArray(mirror::Object* dst, mirror::Object* obj,
+ mirror::Class* klass)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FixupMethod(mirror::ArtMethod* orig, mirror::ArtMethod* copy)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void FixupClass(mirror::Class* orig, mirror::Class* copy)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FixupObject(mirror::Object* orig, mirror::Object* copy)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -295,8 +306,13 @@
// Memory mapped for generating the image.
std::unique_ptr<MemMap> image_;
- // Indexes for dex cache arrays (objects are inside of the image so that they don't move).
- SafeMap<mirror::Object*, size_t> dex_cache_array_indexes_;
+ // Indexes, lengths for dex cache arrays (objects are inside of the image so that they don't
+ // move).
+ struct DexCacheArrayLocation {
+ size_t offset_;
+ size_t length_;
+ };
+ SafeMap<mirror::Object*, DexCacheArrayLocation> dex_cache_array_indexes_;
// The start offsets of the dex cache arrays.
SafeMap<const DexFile*, size_t> dex_cache_array_starts_;
@@ -331,6 +347,11 @@
size_t bin_slot_previous_sizes_[kBinSize]; // Number of bytes in previous bins.
size_t bin_slot_count_[kBinSize]; // Number of objects in a bin
+ // ArtField relocating map, ArtFields are allocated as array of structs but we want to have one
+ // entry per art field for convenience.
+ // ArtFields are placed right after the end of the image objects (aka sum of bin_slot_sizes_).
+ std::unordered_map<ArtField*, uintptr_t> art_field_reloc_;
+
void* string_data_array_; // The backing for the interned strings.
friend class FixupVisitor;
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index df5d5cc..be2c8c6 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -71,13 +71,13 @@
CompilerOptions::kDefaultTinyMethodThreshold,
CompilerOptions::kDefaultNumDexMethodsThreshold,
false,
- false,
CompilerOptions::kDefaultTopKProfileThreshold,
false, // TODO: Think about debuggability of JIT-compiled code.
false,
false,
false,
false,
+ false,
false, // pic
nullptr,
pass_manager_options,
diff --git a/compiler/jni/jni_cfi_test_expected.inc b/compiler/jni/jni_cfi_test_expected.inc
index 47e6f10..eaf7872 100644
--- a/compiler/jni/jni_cfi_test_expected.inc
+++ b/compiler/jni/jni_cfi_test_expected.inc
@@ -81,132 +81,109 @@
// 0x0000002a: .cfi_def_cfa_offset: 128
static constexpr uint8_t expected_asm_kArm64[] = {
- 0xFF, 0x03, 0x03, 0xD1, 0xFE, 0x5F, 0x00, 0xF9, 0xFD, 0x5B, 0x00, 0xF9,
- 0xFC, 0x57, 0x00, 0xF9, 0xFB, 0x53, 0x00, 0xF9, 0xFA, 0x4F, 0x00, 0xF9,
- 0xF9, 0x4B, 0x00, 0xF9, 0xF8, 0x47, 0x00, 0xF9, 0xF7, 0x43, 0x00, 0xF9,
- 0xF6, 0x3F, 0x00, 0xF9, 0xF5, 0x3B, 0x00, 0xF9, 0xF4, 0x37, 0x00, 0xF9,
- 0xEF, 0x33, 0x00, 0xFD, 0xEE, 0x2F, 0x00, 0xFD, 0xED, 0x2B, 0x00, 0xFD,
- 0xEC, 0x27, 0x00, 0xFD, 0xEB, 0x23, 0x00, 0xFD, 0xEA, 0x1F, 0x00, 0xFD,
- 0xE9, 0x1B, 0x00, 0xFD, 0xE8, 0x17, 0x00, 0xFD, 0xF5, 0x03, 0x12, 0xAA,
+ 0xFF, 0x03, 0x03, 0xD1, 0xF3, 0x53, 0x06, 0xA9, 0xF5, 0x5B, 0x07, 0xA9,
+ 0xF7, 0x63, 0x08, 0xA9, 0xF9, 0x6B, 0x09, 0xA9, 0xFB, 0x73, 0x0A, 0xA9,
+ 0xFD, 0x7B, 0x0B, 0xA9, 0xE8, 0x27, 0x02, 0x6D, 0xEA, 0x2F, 0x03, 0x6D,
+ 0xEC, 0x37, 0x04, 0x6D, 0xEE, 0x3F, 0x05, 0x6D, 0xF5, 0x03, 0x12, 0xAA,
0xE0, 0x03, 0x00, 0xB9, 0xE1, 0xC7, 0x00, 0xB9, 0xE0, 0xCB, 0x00, 0xBD,
0xE2, 0xCF, 0x00, 0xB9, 0xE3, 0xD3, 0x00, 0xB9, 0xFF, 0x83, 0x00, 0xD1,
- 0xFF, 0x83, 0x00, 0x91, 0xF2, 0x03, 0x15, 0xAA, 0xFE, 0x5F, 0x40, 0xF9,
- 0xFD, 0x5B, 0x40, 0xF9, 0xFC, 0x57, 0x40, 0xF9, 0xFB, 0x53, 0x40, 0xF9,
- 0xFA, 0x4F, 0x40, 0xF9, 0xF9, 0x4B, 0x40, 0xF9, 0xF8, 0x47, 0x40, 0xF9,
- 0xF7, 0x43, 0x40, 0xF9, 0xF6, 0x3F, 0x40, 0xF9, 0xF5, 0x3B, 0x40, 0xF9,
- 0xF4, 0x37, 0x40, 0xF9, 0xEF, 0x33, 0x40, 0xFD, 0xEE, 0x2F, 0x40, 0xFD,
- 0xED, 0x2B, 0x40, 0xFD, 0xEC, 0x27, 0x40, 0xFD, 0xEB, 0x23, 0x40, 0xFD,
- 0xEA, 0x1F, 0x40, 0xFD, 0xE9, 0x1B, 0x40, 0xFD, 0xE8, 0x17, 0x40, 0xFD,
+ 0xFF, 0x83, 0x00, 0x91, 0xF2, 0x03, 0x15, 0xAA, 0xF3, 0x53, 0x46, 0xA9,
+ 0xF5, 0x5B, 0x47, 0xA9, 0xF7, 0x63, 0x48, 0xA9, 0xF9, 0x6B, 0x49, 0xA9,
+ 0xFB, 0x73, 0x4A, 0xA9, 0xFD, 0x7B, 0x4B, 0xA9, 0xE8, 0x27, 0x42, 0x6D,
+ 0xEA, 0x2F, 0x43, 0x6D, 0xEC, 0x37, 0x44, 0x6D, 0xEE, 0x3F, 0x45, 0x6D,
0xFF, 0x03, 0x03, 0x91, 0xC0, 0x03, 0x5F, 0xD6,
};
static constexpr uint8_t expected_cfi_kArm64[] = {
- 0x44, 0x0E, 0xC0, 0x01, 0x44, 0x9E, 0x02, 0x44, 0x9D, 0x04, 0x44, 0x9C,
- 0x06, 0x44, 0x9B, 0x08, 0x44, 0x9A, 0x0A, 0x44, 0x99, 0x0C, 0x44, 0x98,
- 0x0E, 0x44, 0x97, 0x10, 0x44, 0x96, 0x12, 0x44, 0x95, 0x14, 0x44, 0x94,
- 0x16, 0x44, 0x05, 0x4F, 0x18, 0x44, 0x05, 0x4E, 0x1A, 0x44, 0x05, 0x4D,
- 0x1C, 0x44, 0x05, 0x4C, 0x1E, 0x44, 0x05, 0x4B, 0x20, 0x44, 0x05, 0x4A,
- 0x22, 0x44, 0x05, 0x49, 0x24, 0x44, 0x05, 0x48, 0x26, 0x5C, 0x0E, 0xE0,
- 0x01, 0x44, 0x0E, 0xC0, 0x01, 0x0A, 0x48, 0xDE, 0x44, 0xDD, 0x44, 0xDC,
- 0x44, 0xDB, 0x44, 0xDA, 0x44, 0xD9, 0x44, 0xD8, 0x44, 0xD7, 0x44, 0xD6,
- 0x44, 0xD5, 0x44, 0xD4, 0x44, 0x06, 0x4F, 0x44, 0x06, 0x4E, 0x44, 0x06,
- 0x4D, 0x44, 0x06, 0x4C, 0x44, 0x06, 0x4B, 0x44, 0x06, 0x4A, 0x44, 0x06,
- 0x49, 0x44, 0x06, 0x48, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0xC0, 0x01,
+ 0x44, 0x0E, 0xC0, 0x01, 0x44, 0x93, 0x18, 0x94, 0x16, 0x44, 0x95, 0x14,
+ 0x96, 0x12, 0x44, 0x97, 0x10, 0x98, 0x0E, 0x44, 0x99, 0x0C, 0x9A, 0x0A,
+ 0x44, 0x9B, 0x08, 0x9C, 0x06, 0x44, 0x9D, 0x04, 0x9E, 0x02, 0x44, 0x05,
+ 0x48, 0x28, 0x05, 0x49, 0x26, 0x44, 0x05, 0x4A, 0x24, 0x05, 0x4B, 0x22,
+ 0x44, 0x05, 0x4C, 0x20, 0x05, 0x4D, 0x1E, 0x44, 0x05, 0x4E, 0x1C, 0x05,
+ 0x4F, 0x1A, 0x5C, 0x0E, 0xE0, 0x01, 0x44, 0x0E, 0xC0, 0x01, 0x44, 0x0A,
+ 0x44, 0xD3, 0xD4, 0x44, 0xD5, 0xD6, 0x44, 0xD7, 0xD8, 0x44, 0xD9, 0xDA,
+ 0x44, 0xDB, 0xDC, 0x44, 0xDD, 0xDE, 0x44, 0x06, 0x48, 0x06, 0x49, 0x44,
+ 0x06, 0x4A, 0x06, 0x4B, 0x44, 0x06, 0x4C, 0x06, 0x4D, 0x44, 0x06, 0x4E,
+ 0x06, 0x4F, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0xC0, 0x01,
};
// 0x00000000: sub sp, sp, #0xc0 (192)
// 0x00000004: .cfi_def_cfa_offset: 192
-// 0x00000004: str lr, [sp, #184]
-// 0x00000008: .cfi_offset: r30 at cfa-8
-// 0x00000008: str x29, [sp, #176]
-// 0x0000000c: .cfi_offset: r29 at cfa-16
-// 0x0000000c: str x28, [sp, #168]
-// 0x00000010: .cfi_offset: r28 at cfa-24
-// 0x00000010: str x27, [sp, #160]
-// 0x00000014: .cfi_offset: r27 at cfa-32
-// 0x00000014: str x26, [sp, #152]
-// 0x00000018: .cfi_offset: r26 at cfa-40
-// 0x00000018: str x25, [sp, #144]
-// 0x0000001c: .cfi_offset: r25 at cfa-48
-// 0x0000001c: str x24, [sp, #136]
-// 0x00000020: .cfi_offset: r24 at cfa-56
-// 0x00000020: str x23, [sp, #128]
-// 0x00000024: .cfi_offset: r23 at cfa-64
-// 0x00000024: str x22, [sp, #120]
-// 0x00000028: .cfi_offset: r22 at cfa-72
-// 0x00000028: str x21, [sp, #112]
-// 0x0000002c: .cfi_offset: r21 at cfa-80
-// 0x0000002c: str x20, [sp, #104]
-// 0x00000030: .cfi_offset: r20 at cfa-88
-// 0x00000030: str d15, [sp, #96]
-// 0x00000034: .cfi_offset_extended: r79 at cfa-96
-// 0x00000034: str d14, [sp, #88]
-// 0x00000038: .cfi_offset_extended: r78 at cfa-104
-// 0x00000038: str d13, [sp, #80]
-// 0x0000003c: .cfi_offset_extended: r77 at cfa-112
-// 0x0000003c: str d12, [sp, #72]
-// 0x00000040: .cfi_offset_extended: r76 at cfa-120
-// 0x00000040: str d11, [sp, #64]
-// 0x00000044: .cfi_offset_extended: r75 at cfa-128
-// 0x00000044: str d10, [sp, #56]
-// 0x00000048: .cfi_offset_extended: r74 at cfa-136
-// 0x00000048: str d9, [sp, #48]
-// 0x0000004c: .cfi_offset_extended: r73 at cfa-144
-// 0x0000004c: str d8, [sp, #40]
-// 0x00000050: .cfi_offset_extended: r72 at cfa-152
-// 0x00000050: mov x21, tr
-// 0x00000054: str w0, [sp]
-// 0x00000058: str w1, [sp, #196]
-// 0x0000005c: str s0, [sp, #200]
-// 0x00000060: str w2, [sp, #204]
-// 0x00000064: str w3, [sp, #208]
-// 0x00000068: sub sp, sp, #0x20 (32)
-// 0x0000006c: .cfi_def_cfa_offset: 224
-// 0x0000006c: add sp, sp, #0x20 (32)
-// 0x00000070: .cfi_def_cfa_offset: 192
-// 0x00000070: .cfi_remember_state
-// 0x00000070: mov tr, x21
-// 0x00000074: ldr lr, [sp, #184]
-// 0x00000078: .cfi_restore: r30
-// 0x00000078: ldr x29, [sp, #176]
-// 0x0000007c: .cfi_restore: r29
-// 0x0000007c: ldr x28, [sp, #168]
-// 0x00000080: .cfi_restore: r28
-// 0x00000080: ldr x27, [sp, #160]
-// 0x00000084: .cfi_restore: r27
-// 0x00000084: ldr x26, [sp, #152]
-// 0x00000088: .cfi_restore: r26
-// 0x00000088: ldr x25, [sp, #144]
-// 0x0000008c: .cfi_restore: r25
-// 0x0000008c: ldr x24, [sp, #136]
-// 0x00000090: .cfi_restore: r24
-// 0x00000090: ldr x23, [sp, #128]
-// 0x00000094: .cfi_restore: r23
-// 0x00000094: ldr x22, [sp, #120]
-// 0x00000098: .cfi_restore: r22
-// 0x00000098: ldr x21, [sp, #112]
-// 0x0000009c: .cfi_restore: r21
-// 0x0000009c: ldr x20, [sp, #104]
-// 0x000000a0: .cfi_restore: r20
-// 0x000000a0: ldr d15, [sp, #96]
-// 0x000000a4: .cfi_restore_extended: r79
-// 0x000000a4: ldr d14, [sp, #88]
-// 0x000000a8: .cfi_restore_extended: r78
-// 0x000000a8: ldr d13, [sp, #80]
-// 0x000000ac: .cfi_restore_extended: r77
-// 0x000000ac: ldr d12, [sp, #72]
-// 0x000000b0: .cfi_restore_extended: r76
-// 0x000000b0: ldr d11, [sp, #64]
-// 0x000000b4: .cfi_restore_extended: r75
-// 0x000000b4: ldr d10, [sp, #56]
-// 0x000000b8: .cfi_restore_extended: r74
-// 0x000000b8: ldr d9, [sp, #48]
-// 0x000000bc: .cfi_restore_extended: r73
-// 0x000000bc: ldr d8, [sp, #40]
-// 0x000000c0: .cfi_restore_extended: r72
-// 0x000000c0: add sp, sp, #0xc0 (192)
-// 0x000000c4: .cfi_def_cfa_offset: 0
-// 0x000000c4: ret
-// 0x000000c8: .cfi_restore_state
-// 0x000000c8: .cfi_def_cfa_offset: 192
+// 0x00000004: stp x19, x20, [sp, #96]
+// 0x00000008: .cfi_offset: r19 at cfa-96
+// 0x00000008: .cfi_offset: r20 at cfa-88
+// 0x00000008: stp x21, x22, [sp, #112]
+// 0x0000000c: .cfi_offset: r21 at cfa-80
+// 0x0000000c: .cfi_offset: r22 at cfa-72
+// 0x0000000c: stp x23, x24, [sp, #128]
+// 0x00000010: .cfi_offset: r23 at cfa-64
+// 0x00000010: .cfi_offset: r24 at cfa-56
+// 0x00000010: stp x25, x26, [sp, #144]
+// 0x00000014: .cfi_offset: r25 at cfa-48
+// 0x00000014: .cfi_offset: r26 at cfa-40
+// 0x00000014: stp x27, x28, [sp, #160]
+// 0x00000018: .cfi_offset: r27 at cfa-32
+// 0x00000018: .cfi_offset: r28 at cfa-24
+// 0x00000018: stp x29, lr, [sp, #176]
+// 0x0000001c: .cfi_offset: r29 at cfa-16
+// 0x0000001c: .cfi_offset: r30 at cfa-8
+// 0x0000001c: stp d8, d9, [sp, #32]
+// 0x00000020: .cfi_offset_extended: r72 at cfa-160
+// 0x00000020: .cfi_offset_extended: r73 at cfa-152
+// 0x00000020: stp d10, d11, [sp, #48]
+// 0x00000024: .cfi_offset_extended: r74 at cfa-144
+// 0x00000024: .cfi_offset_extended: r75 at cfa-136
+// 0x00000024: stp d12, d13, [sp, #64]
+// 0x00000028: .cfi_offset_extended: r76 at cfa-128
+// 0x00000028: .cfi_offset_extended: r77 at cfa-120
+// 0x00000028: stp d14, d15, [sp, #80]
+// 0x0000002c: .cfi_offset_extended: r78 at cfa-112
+// 0x0000002c: .cfi_offset_extended: r79 at cfa-104
+// 0x0000002c: mov x21, tr
+// 0x00000030: str w0, [sp]
+// 0x00000034: str w1, [sp, #196]
+// 0x00000038: str s0, [sp, #200]
+// 0x0000003c: str w2, [sp, #204]
+// 0x00000040: str w3, [sp, #208]
+// 0x00000044: sub sp, sp, #0x20 (32)
+// 0x00000048: .cfi_def_cfa_offset: 224
+// 0x00000048: add sp, sp, #0x20 (32)
+// 0x0000004c: .cfi_def_cfa_offset: 192
+// 0x0000004c: mov tr, x21
+// 0x00000050: .cfi_remember_state
+// 0x00000050: ldp x19, x20, [sp, #96]
+// 0x00000054: .cfi_restore: r19
+// 0x00000054: .cfi_restore: r20
+// 0x00000054: ldp x21, x22, [sp, #112]
+// 0x00000058: .cfi_restore: r21
+// 0x00000058: .cfi_restore: r22
+// 0x00000058: ldp x23, x24, [sp, #128]
+// 0x0000005c: .cfi_restore: r23
+// 0x0000005c: .cfi_restore: r24
+// 0x0000005c: ldp x25, x26, [sp, #144]
+// 0x00000060: .cfi_restore: r25
+// 0x00000060: .cfi_restore: r26
+// 0x00000060: ldp x27, x28, [sp, #160]
+// 0x00000064: .cfi_restore: r27
+// 0x00000064: .cfi_restore: r28
+// 0x00000064: ldp x29, lr, [sp, #176]
+// 0x00000068: .cfi_restore: r29
+// 0x00000068: .cfi_restore: r30
+// 0x00000068: ldp d8, d9, [sp, #32]
+// 0x0000006c: .cfi_restore_extended: r72
+// 0x0000006c: .cfi_restore_extended: r73
+// 0x0000006c: ldp d10, d11, [sp, #48]
+// 0x00000070: .cfi_restore_extended: r74
+// 0x00000070: .cfi_restore_extended: r75
+// 0x00000070: ldp d12, d13, [sp, #64]
+// 0x00000074: .cfi_restore_extended: r76
+// 0x00000074: .cfi_restore_extended: r77
+// 0x00000074: ldp d14, d15, [sp, #80]
+// 0x00000078: .cfi_restore_extended: r78
+// 0x00000078: .cfi_restore_extended: r79
+// 0x00000078: add sp, sp, #0xc0 (192)
+// 0x0000007c: .cfi_def_cfa_offset: 0
+// 0x0000007c: ret
+// 0x00000080: .cfi_restore_state
+// 0x00000080: .cfi_def_cfa_offset: 192
static constexpr uint8_t expected_asm_kX86[] = {
0x57, 0x56, 0x55, 0x83, 0xC4, 0xE4, 0x50, 0x89, 0x4C, 0x24, 0x34, 0xF3,
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 70bfb81..4186891 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -164,6 +164,7 @@
void CheckParameterAlignImpl();
void MaxParamNumberImpl();
void WithoutImplementationImpl();
+ void WithoutImplementationRefReturnImpl();
void StackArgsIntsFirstImpl();
void StackArgsFloatsFirstImpl();
void StackArgsMixedImpl();
@@ -1494,6 +1495,20 @@
JNI_TEST(WithoutImplementation)
+void JniCompilerTest::WithoutImplementationRefReturnImpl() {
+ // This will lead to error messages in the log.
+ ScopedLogSeverity sls(LogSeverity::FATAL);
+
+ SetUpForTest(false, "withoutImplementationRefReturn", "()Ljava/lang/Object;", nullptr);
+
+ env_->CallObjectMethod(jobj_, jmethod_);
+
+ EXPECT_TRUE(Thread::Current()->IsExceptionPending());
+ EXPECT_TRUE(env_->ExceptionCheck() == JNI_TRUE);
+}
+
+JNI_TEST(WithoutImplementationRefReturn)
+
void Java_MyClassNatives_stackArgsIntsFirst(JNIEnv*, jclass, jint i1, jint i2, jint i3,
jint i4, jint i5, jint i6, jint i7, jint i8, jint i9,
jint i10, jfloat f1, jfloat f2, jfloat f3, jfloat f4,
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index 05eb80a..a6caff1 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -157,27 +157,25 @@
Arm64JniCallingConvention::Arm64JniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
: JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
- // TODO: Ugly hard code...
- // Should generate these according to the spill mask automatically.
- callee_save_regs_.push_back(Arm64ManagedRegister::FromXRegister(X20));
- callee_save_regs_.push_back(Arm64ManagedRegister::FromXRegister(X21));
- callee_save_regs_.push_back(Arm64ManagedRegister::FromXRegister(X22));
- callee_save_regs_.push_back(Arm64ManagedRegister::FromXRegister(X23));
- callee_save_regs_.push_back(Arm64ManagedRegister::FromXRegister(X24));
- callee_save_regs_.push_back(Arm64ManagedRegister::FromXRegister(X25));
- callee_save_regs_.push_back(Arm64ManagedRegister::FromXRegister(X26));
- callee_save_regs_.push_back(Arm64ManagedRegister::FromXRegister(X27));
- callee_save_regs_.push_back(Arm64ManagedRegister::FromXRegister(X28));
- callee_save_regs_.push_back(Arm64ManagedRegister::FromXRegister(X29));
- callee_save_regs_.push_back(Arm64ManagedRegister::FromXRegister(X30));
+ uint32_t core_spill_mask = CoreSpillMask();
+ for (int x_reg = 0; x_reg < kNumberOfXRegisters; ++x_reg) {
+ if (((1 << x_reg) & core_spill_mask) != 0) {
+ callee_save_regs_.push_back(
+ Arm64ManagedRegister::FromXRegister(static_cast<XRegister>(x_reg)));
+ }
+ }
- for (size_t i = 0; i < arraysize(kDCalleeSaveRegisters); ++i) {
- callee_save_regs_.push_back(Arm64ManagedRegister::FromDRegister(kDCalleeSaveRegisters[i]));
+ uint32_t fp_spill_mask = FpSpillMask();
+ for (int d_reg = 0; d_reg < kNumberOfDRegisters; ++d_reg) {
+ if (((1 << d_reg) & fp_spill_mask) != 0) {
+ callee_save_regs_.push_back(
+ Arm64ManagedRegister::FromDRegister(static_cast<DRegister>(d_reg)));
+ }
}
}
uint32_t Arm64JniCallingConvention::CoreSpillMask() const {
- // Compute spill mask to agree with callee saves initialized in the constructor
+ // Compute spill mask to agree with callee saves initialized in the constructor.
// Note: The native jni function may call to some VM runtime functions which may suspend
// or trigger GC. And the jni method frame will become top quick frame in those cases.
// So we need to satisfy GC to save LR and callee-save registers which is similar to
@@ -186,12 +184,14 @@
// Jni method is the method that compiled by jni compiler.
// Call chain: managed code(java) --> jni method --> jni function.
// Thread register(X18, scratched by aapcs64) is not saved on stack, it is saved in ETR(X21).
- // Suspend register(x19) is preserved by aapcs64 and it is not used in Jni method.
- return 1 << X20 | 1 << X21 | 1 << X22 | 1 << X23 | 1 << X24 | 1 << X25 |
- 1 << X26 | 1 << X27 | 1 << X28 | 1 << X29 | 1 << LR;
+ return 1 << X19 | 1 << X20 | 1 << X21 | 1 << X22 | 1 << X23 | 1 << X24 |
+ 1 << X25 | 1 << X26 | 1 << X27 | 1 << X28 | 1 << X29 | 1 << LR;
}
uint32_t Arm64JniCallingConvention::FpSpillMask() const {
+ // Considering the case, java_method_1 --> jni method --> jni function --> java_method_2, we may
+ // break on java_method_2 and we still need to find out the values of DEX registers in
+ // java_method_1. So all callee-saves(in managed code) need to be saved.
uint32_t result = 0;
for (size_t i = 0; i < arraysize(kDCalleeSaveRegisters); ++i) {
result |= (1 << kDCalleeSaveRegisters[i]);
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 8a14038..2402ea5 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -94,7 +94,7 @@
// Assembler that holds generated instructions
std::unique_ptr<Assembler> jni_asm(Assembler::Create(instruction_set));
- jni_asm->cfi().SetEnabled(driver->GetCompilerOptions().GetIncludeDebugSymbols());
+ jni_asm->cfi().SetEnabled(driver->GetCompilerOptions().GetIncludeCFI());
// Offsets into data structures
// TODO: if cross compiling these offsets are for the host not the target
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 5b4cc54..5abd204 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -36,6 +36,7 @@
#include "mirror/art_method-inl.h"
#include "mirror/array.h"
#include "mirror/class_loader.h"
+#include "mirror/dex_cache-inl.h"
#include "mirror/object-inl.h"
#include "os.h"
#include "output_stream.h"
@@ -349,8 +350,10 @@
class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor {
public:
InitCodeMethodVisitor(OatWriter* writer, size_t offset)
- : OatDexMethodVisitor(writer, offset) {
- writer_->absolute_patch_locations_.reserve(
+ : OatDexMethodVisitor(writer, offset),
+ text_absolute_patch_locations_(writer->GetAbsolutePatchLocationsFor(".text")),
+ debuggable_(writer->GetCompilerDriver()->GetCompilerOptions().GetDebuggable()) {
+ text_absolute_patch_locations_->reserve(
writer_->compiler_driver_->GetNonRelativeLinkerPatchCount());
}
@@ -377,20 +380,19 @@
CHECK_NE(code_size, 0U);
uint32_t thumb_offset = compiled_method->CodeDelta();
- // Deduplicate code arrays.
+ // Deduplicate code arrays if we are not producing debuggable code.
bool deduped = false;
- auto lb = dedupe_map_.lower_bound(compiled_method);
- if (lb != dedupe_map_.end() && !dedupe_map_.key_comp()(compiled_method, lb->first)) {
- quick_code_offset = lb->second;
- deduped = true;
+ if (debuggable_) {
+ quick_code_offset = NewQuickCodeOffset(compiled_method, it, thumb_offset);
} else {
- offset_ = writer_->relative_patcher_->ReserveSpace(
- offset_, compiled_method, MethodReference(dex_file_, it.GetMemberIndex()));
- offset_ = compiled_method->AlignCode(offset_);
- DCHECK_ALIGNED_PARAM(offset_,
- GetInstructionSetAlignment(compiled_method->GetInstructionSet()));
- quick_code_offset = offset_ + sizeof(OatQuickMethodHeader) + thumb_offset;
- dedupe_map_.PutBefore(lb, compiled_method, quick_code_offset);
+ auto lb = dedupe_map_.lower_bound(compiled_method);
+ if (lb != dedupe_map_.end() && !dedupe_map_.key_comp()(compiled_method, lb->first)) {
+ quick_code_offset = lb->second;
+ deduped = true;
+ } else {
+ quick_code_offset = NewQuickCodeOffset(compiled_method, it, thumb_offset);
+ dedupe_map_.PutBefore(lb, compiled_method, quick_code_offset);
+ }
}
MethodReference method_ref(dex_file_, it.GetMemberIndex());
@@ -442,13 +444,14 @@
uintptr_t base_loc = offset_ - code_size - writer_->oat_header_->GetExecutableOffset();
for (const LinkerPatch& patch : compiled_method->GetPatches()) {
if (!patch.IsPcRelative()) {
- writer_->absolute_patch_locations_.push_back(base_loc + patch.LiteralOffset());
+ text_absolute_patch_locations_->push_back(base_loc + patch.LiteralOffset());
}
}
}
}
- if (writer_->compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols()) {
+ if (writer_->compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols() ||
+ writer_->compiler_driver_->GetCompilerOptions().GetIncludeCFI()) {
// Record debug information for this function if we are doing that.
const uint32_t quick_code_start = quick_code_offset -
writer_->oat_header_->GetExecutableOffset() - thumb_offset;
@@ -529,9 +532,26 @@
}
};
+ uint32_t NewQuickCodeOffset(CompiledMethod* compiled_method,
+ const ClassDataItemIterator& it,
+ uint32_t thumb_offset) {
+ offset_ = writer_->relative_patcher_->ReserveSpace(
+ offset_, compiled_method, MethodReference(dex_file_, it.GetMemberIndex()));
+ offset_ = compiled_method->AlignCode(offset_);
+ DCHECK_ALIGNED_PARAM(offset_,
+ GetInstructionSetAlignment(compiled_method->GetInstructionSet()));
+ return offset_ + sizeof(OatQuickMethodHeader) + thumb_offset;
+ }
+
// Deduplication is already done on a pointer basis by the compiler driver,
// so we can simply compare the pointers to find out if things are duplicated.
SafeMap<const CompiledMethod*, uint32_t, CodeOffsetsKeyComparator> dedupe_map_;
+
+ // Patch locations for the .text section.
+ std::vector<uintptr_t>* const text_absolute_patch_locations_;
+
+ // Cache of compiler's --debuggable option.
+ const bool debuggable_;
};
template <typename DataAccess>
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 51bc9b4..cc2b39a 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -19,6 +19,7 @@
#include <stdint.h>
#include <cstddef>
+#include <map>
#include <memory>
#include "linker/relative_patcher.h" // For linker::RelativePatcherTargetProvider.
@@ -81,6 +82,8 @@
//
class OatWriter {
public:
+ typedef std::map<std::string, std::unique_ptr<std::vector<uintptr_t>>> PatchLocationsMap;
+
OatWriter(const std::vector<const DexFile*>& dex_files,
uint32_t image_file_location_oat_checksum,
uintptr_t image_file_location_oat_begin,
@@ -102,10 +105,19 @@
return bss_size_;
}
- const std::vector<uintptr_t>& GetAbsolutePatchLocations() const {
+ const PatchLocationsMap& GetAbsolutePatchLocations() const {
return absolute_patch_locations_;
}
+ std::vector<uintptr_t>* GetAbsolutePatchLocationsFor(const char* section_name) {
+ auto it = absolute_patch_locations_.emplace(
+ std::string(section_name), std::unique_ptr<std::vector<uintptr_t>>());
+ if (it.second) { // Inserted new item.
+ it.first->second.reset(new std::vector<uintptr_t>());
+ }
+ return it.first->second.get();
+ }
+
void SetOatDataOffset(size_t oat_data_offset) {
oat_data_offset_ = oat_data_offset;
}
@@ -330,8 +342,9 @@
std::unique_ptr<linker::RelativePatcher> relative_patcher_;
- // The locations of absolute patches relative to the start of the executable section.
- std::vector<uintptr_t> absolute_patch_locations_;
+ // The locations of absolute patches relative to the start of section.
+ // The map's key is the ELF's section name (including the dot).
+ PatchLocationsMap absolute_patch_locations_;
// Map method reference to assigned offset.
// Wrap the map in a class implementing linker::RelativePatcherTargetProvider.
diff --git a/compiler/optimizing/boolean_simplifier.cc b/compiler/optimizing/boolean_simplifier.cc
index be432c5..06328f2 100644
--- a/compiler/optimizing/boolean_simplifier.cc
+++ b/compiler/optimizing/boolean_simplifier.cc
@@ -73,8 +73,8 @@
}
} else {
// General case when 'cond' is another instruction of type boolean.
- // Negate with 'cond == 0'.
- return new (allocator) HEqual(cond, graph->GetIntConstant(0));
+ DCHECK_EQ(cond->GetType(), Primitive::Type::kPrimBoolean);
+ return new (allocator) HBooleanNot(cond);
}
}
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index a912d4c..8a64d81 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -16,16 +16,13 @@
#include "builder.h"
+#include "art_field-inl.h"
#include "base/logging.h"
#include "class_linker.h"
-#include "dex_file.h"
#include "dex_file-inl.h"
-#include "dex_instruction.h"
#include "dex_instruction-inl.h"
#include "driver/compiler_driver-inl.h"
#include "driver/compiler_options.h"
-#include "mirror/art_field.h"
-#include "mirror/art_field-inl.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache.h"
#include "nodes.h"
@@ -656,11 +653,10 @@
uint16_t field_index = instruction.VRegC_22c();
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ArtField> resolved_field(hs.NewHandle(
- compiler_driver_->ComputeInstanceFieldInfo(field_index, dex_compilation_unit_, is_put, soa)));
+ ArtField* resolved_field =
+ compiler_driver_->ComputeInstanceFieldInfo(field_index, dex_compilation_unit_, is_put, soa);
- if (resolved_field.Get() == nullptr) {
+ if (resolved_field == nullptr) {
MaybeRecordStat(MethodCompilationStat::kNotCompiledUnresolvedField);
return false;
}
@@ -728,15 +724,15 @@
uint16_t field_index = instruction.VRegB_21c();
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<5> hs(soa.Self());
+ StackHandleScope<4> hs(soa.Self());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(
dex_compilation_unit_->GetClassLinker()->FindDexCache(*dex_compilation_unit_->GetDexFile())));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
soa.Decode<mirror::ClassLoader*>(dex_compilation_unit_->GetClassLoader())));
- Handle<mirror::ArtField> resolved_field(hs.NewHandle(compiler_driver_->ResolveField(
- soa, dex_cache, class_loader, dex_compilation_unit_, field_index, true)));
+ ArtField* resolved_field = compiler_driver_->ResolveField(
+ soa, dex_cache, class_loader, dex_compilation_unit_, field_index, true);
- if (resolved_field.Get() == nullptr) {
+ if (resolved_field == nullptr) {
MaybeRecordStat(MethodCompilationStat::kNotCompiledUnresolvedField);
return false;
}
@@ -758,7 +754,7 @@
std::pair<bool, bool> pair = compiler_driver_->IsFastStaticField(
outer_dex_cache.Get(),
referrer_class.Get(),
- resolved_field.Get(),
+ resolved_field,
field_index,
&storage_index);
bool can_easily_access = is_put ? pair.second : pair.first;
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 8736374..f7fa5db 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -802,10 +802,15 @@
}
}
-void CodeGenerator::EmitParallelMoves(Location from1, Location to1, Location from2, Location to2) {
+void CodeGenerator::EmitParallelMoves(Location from1,
+ Location to1,
+ Primitive::Type type1,
+ Location from2,
+ Location to2,
+ Primitive::Type type2) {
HParallelMove parallel_move(GetGraph()->GetArena());
- parallel_move.AddMove(from1, to1, nullptr);
- parallel_move.AddMove(from2, to2, nullptr);
+ parallel_move.AddMove(from1, to1, type1, nullptr);
+ parallel_move.AddMove(from2, to2, type2, nullptr);
GetMoveResolver()->EmitNativeCode(¶llel_move);
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index b888aca..e536b2d 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -244,7 +244,12 @@
// of the architecture.
static size_t GetCacheOffset(uint32_t index);
- void EmitParallelMoves(Location from1, Location to1, Location from2, Location to2);
+ void EmitParallelMoves(Location from1,
+ Location to1,
+ Primitive::Type type1,
+ Location from2,
+ Location to2,
+ Primitive::Type type2);
static bool StoreNeedsWriteBarrier(Primitive::Type type, HInstruction* value) {
// Check that null value is not represented as an integer constant.
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index a799a51..507b3cd 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -141,8 +141,10 @@
codegen->EmitParallelMoves(
index_location_,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimInt,
length_location_,
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimInt);
arm_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
}
@@ -262,8 +264,10 @@
codegen->EmitParallelMoves(
class_to_check_,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimNot,
object_class_,
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
arm_codegen->InvokeRuntime(
@@ -514,11 +518,11 @@
}
static dwarf::Reg DWARFReg(Register reg) {
- return dwarf::Reg::ArmCore(static_cast<int>(reg));
+ return dwarf::Reg::ArmCore(static_cast<int>(reg));
}
static dwarf::Reg DWARFReg(SRegister reg) {
- return dwarf::Reg::ArmFp(static_cast<int>(reg));
+ return dwarf::Reg::ArmFp(static_cast<int>(reg));
}
void CodeGeneratorARM::GenerateFrameEntry() {
@@ -542,12 +546,12 @@
uint32_t push_mask = (core_spill_mask_ & (~(1 << PC))) | 1 << LR;
__ PushList(push_mask);
__ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(push_mask));
- __ cfi().RelOffsetForMany(DWARFReg(Register(0)), 0, push_mask, kArmWordSize);
+ __ cfi().RelOffsetForMany(DWARFReg(R0), 0, push_mask, kArmWordSize);
if (fpu_spill_mask_ != 0) {
SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
__ vpushs(start_register, POPCOUNT(fpu_spill_mask_));
__ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(fpu_spill_mask_));
- __ cfi().RelOffsetForMany(DWARFReg(SRegister(0)), 0, fpu_spill_mask_, kArmWordSize);
+ __ cfi().RelOffsetForMany(DWARFReg(S0), 0, fpu_spill_mask_, kArmWordSize);
}
int adjust = GetFrameSize() - FrameEntrySpillSize();
__ AddConstant(SP, -adjust);
@@ -560,6 +564,7 @@
__ bx(LR);
return;
}
+ __ cfi().RememberState();
int adjust = GetFrameSize() - FrameEntrySpillSize();
__ AddConstant(SP, adjust);
__ cfi().AdjustCFAOffset(-adjust);
@@ -570,6 +575,8 @@
__ cfi().RestoreMany(DWARFReg(SRegister(0)), fpu_spill_mask_);
}
__ PopList(core_spill_mask_);
+ __ cfi().RestoreState();
+ __ cfi().DefCFAOffset(GetFrameSize());
}
void CodeGeneratorARM::Bind(HBasicBlock* block) {
@@ -747,8 +754,10 @@
EmitParallelMoves(
Location::RegisterLocation(source.AsRegisterPairHigh<Register>()),
Location::RegisterLocation(destination.AsRegisterPairHigh<Register>()),
+ Primitive::kPrimInt,
Location::RegisterLocation(source.AsRegisterPairLow<Register>()),
- Location::RegisterLocation(destination.AsRegisterPairLow<Register>()));
+ Location::RegisterLocation(destination.AsRegisterPairLow<Register>()),
+ Primitive::kPrimInt);
} else if (source.IsFpuRegister()) {
UNIMPLEMENTED(FATAL);
} else {
@@ -786,8 +795,10 @@
EmitParallelMoves(
Location::StackSlot(source.GetStackIndex()),
Location::StackSlot(destination.GetStackIndex()),
+ Primitive::kPrimInt,
Location::StackSlot(source.GetHighStackIndex(kArmWordSize)),
- Location::StackSlot(destination.GetHighStackIndex(kArmWordSize)));
+ Location::StackSlot(destination.GetHighStackIndex(kArmWordSize)),
+ Primitive::kPrimInt);
}
}
}
@@ -1209,10 +1220,7 @@
void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret) {
UNUSED(ret);
- __ cfi().RememberState();
codegen_->GenerateFrameExit();
- __ cfi().RestoreState();
- __ cfi().DefCFAOffset(codegen_->GetFrameSize());
}
void LocationsBuilderARM::VisitReturn(HReturn* ret) {
@@ -1223,10 +1231,7 @@
void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) {
UNUSED(ret);
- __ cfi().RememberState();
codegen_->GenerateFrameExit();
- __ cfi().RestoreState();
- __ cfi().DefCFAOffset(codegen_->GetFrameSize());
}
void LocationsBuilderARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
@@ -2660,6 +2665,21 @@
}
}
+void LocationsBuilderARM::VisitBooleanNot(HBooleanNot* bool_not) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorARM::VisitBooleanNot(HBooleanNot* bool_not) {
+ DCHECK_EQ(bool_not->InputAt(0)->GetType(), Primitive::kPrimBoolean);
+ LocationSummary* locations = bool_not->GetLocations();
+ Location out = locations->Out();
+ Location in = locations->InAt(0);
+ __ eor(out.AsRegister<Register>(), in.AsRegister<Register>(), ShifterOperand(1));
+}
+
void LocationsBuilderARM::VisitCompare(HCompare* compare) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 5fe8adc..f6ec729 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -122,8 +122,8 @@
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
codegen->EmitParallelMoves(
- index_location_, LocationFrom(calling_convention.GetRegisterAt(0)),
- length_location_, LocationFrom(calling_convention.GetRegisterAt(1)));
+ index_location_, LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimInt,
+ length_location_, LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimInt);
arm64_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
@@ -322,8 +322,8 @@
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
codegen->EmitParallelMoves(
- class_to_check_, LocationFrom(calling_convention.GetRegisterAt(0)),
- object_class_, LocationFrom(calling_convention.GetRegisterAt(1)));
+ class_to_check_, LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot,
+ object_class_, LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
arm64_codegen->InvokeRuntime(
@@ -466,64 +466,27 @@
// sp[0] : current method.
__ Str(kArtMethodRegister, MemOperand(sp, -frame_size, PreIndex));
GetAssembler()->cfi().AdjustCFAOffset(frame_size);
- SpillRegisters(GetFramePreservedCoreRegisters(), frame_size - GetCoreSpillSize());
- SpillRegisters(GetFramePreservedFPRegisters(), frame_size - FrameEntrySpillSize());
+ GetAssembler()->SpillRegisters(GetFramePreservedCoreRegisters(),
+ frame_size - GetCoreSpillSize());
+ GetAssembler()->SpillRegisters(GetFramePreservedFPRegisters(),
+ frame_size - FrameEntrySpillSize());
}
}
void CodeGeneratorARM64::GenerateFrameExit() {
+ GetAssembler()->cfi().RememberState();
if (!HasEmptyFrame()) {
int frame_size = GetFrameSize();
- UnspillRegisters(GetFramePreservedFPRegisters(), frame_size - FrameEntrySpillSize());
- UnspillRegisters(GetFramePreservedCoreRegisters(), frame_size - GetCoreSpillSize());
+ GetAssembler()->UnspillRegisters(GetFramePreservedFPRegisters(),
+ frame_size - FrameEntrySpillSize());
+ GetAssembler()->UnspillRegisters(GetFramePreservedCoreRegisters(),
+ frame_size - GetCoreSpillSize());
__ Drop(frame_size);
GetAssembler()->cfi().AdjustCFAOffset(-frame_size);
}
-}
-
-static inline dwarf::Reg DWARFReg(CPURegister reg) {
- if (reg.IsFPRegister()) {
- return dwarf::Reg::Arm64Fp(reg.code());
- } else {
- DCHECK_LT(reg.code(), 31u); // X0 - X30.
- return dwarf::Reg::Arm64Core(reg.code());
- }
-}
-
-void CodeGeneratorARM64::SpillRegisters(vixl::CPURegList registers, int offset) {
- int size = registers.RegisterSizeInBytes();
- while (registers.Count() >= 2) {
- const CPURegister& dst0 = registers.PopLowestIndex();
- const CPURegister& dst1 = registers.PopLowestIndex();
- __ Stp(dst0, dst1, MemOperand(__ StackPointer(), offset));
- GetAssembler()->cfi().RelOffset(DWARFReg(dst0), offset);
- GetAssembler()->cfi().RelOffset(DWARFReg(dst1), offset + size);
- offset += 2 * size;
- }
- if (!registers.IsEmpty()) {
- const CPURegister& dst0 = registers.PopLowestIndex();
- __ Str(dst0, MemOperand(__ StackPointer(), offset));
- GetAssembler()->cfi().RelOffset(DWARFReg(dst0), offset);
- }
- DCHECK(registers.IsEmpty());
-}
-
-void CodeGeneratorARM64::UnspillRegisters(vixl::CPURegList registers, int offset) {
- int size = registers.RegisterSizeInBytes();
- while (registers.Count() >= 2) {
- const CPURegister& dst0 = registers.PopLowestIndex();
- const CPURegister& dst1 = registers.PopLowestIndex();
- __ Ldp(dst0, dst1, MemOperand(__ StackPointer(), offset));
- GetAssembler()->cfi().Restore(DWARFReg(dst0));
- GetAssembler()->cfi().Restore(DWARFReg(dst1));
- offset += 2 * size;
- }
- if (!registers.IsEmpty()) {
- const CPURegister& dst0 = registers.PopLowestIndex();
- __ Ldr(dst0, MemOperand(__ StackPointer(), offset));
- GetAssembler()->cfi().Restore(DWARFReg(dst0));
- }
- DCHECK(registers.IsEmpty());
+ __ Ret();
+ GetAssembler()->cfi().RestoreState();
+ GetAssembler()->cfi().DefCFAOffset(GetFrameSize());
}
void CodeGeneratorARM64::Bind(HBasicBlock* block) {
@@ -1376,7 +1339,7 @@
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction));
if (instruction->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
}
@@ -2316,6 +2279,17 @@
}
}
+void LocationsBuilderARM64::VisitBooleanNot(HBooleanNot* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorARM64::VisitBooleanNot(HBooleanNot* instruction) {
+ DCHECK_EQ(instruction->InputAt(0)->GetType(), Primitive::kPrimBoolean);
+ __ Eor(OutputRegister(instruction), InputRegisterAt(instruction, 0), vixl::Operand(1));
+}
+
void LocationsBuilderARM64::VisitNullCheck(HNullCheck* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
@@ -2465,11 +2439,7 @@
void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction) {
UNUSED(instruction);
- GetAssembler()->cfi().RememberState();
codegen_->GenerateFrameExit();
- __ Ret();
- GetAssembler()->cfi().RestoreState();
- GetAssembler()->cfi().DefCFAOffset(codegen_->GetFrameSize());
}
void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) {
@@ -2478,11 +2448,7 @@
void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction) {
UNUSED(instruction);
- GetAssembler()->cfi().RememberState();
codegen_->GenerateFrameExit();
- __ Ret();
- GetAssembler()->cfi().RestoreState();
- GetAssembler()->cfi().DefCFAOffset(codegen_->GetFrameSize());
}
void LocationsBuilderARM64::VisitShl(HShl* shl) {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 9430e31..07c6dd0 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -46,14 +46,11 @@
const vixl::Register tr = vixl::x18; // Thread Register
static const vixl::Register kArtMethodRegister = vixl::w0; // Method register on invoke.
-const vixl::Register kQuickSuspendRegister = vixl::x19;
const vixl::CPURegList vixl_reserved_core_registers(vixl::ip0, vixl::ip1);
const vixl::CPURegList vixl_reserved_fp_registers(vixl::d31);
-// TODO: When the runtime does not use kQuickSuspendRegister as a suspend
-// counter remove it from the reserved registers list.
-const vixl::CPURegList runtime_reserved_core_registers(tr, kQuickSuspendRegister, vixl::lr);
+const vixl::CPURegList runtime_reserved_core_registers(tr, vixl::lr);
// Callee-saved registers defined by AAPCS64.
const vixl::CPURegList callee_saved_core_registers(vixl::CPURegister::kRegister,
@@ -227,8 +224,6 @@
void GenerateFrameEntry() OVERRIDE;
void GenerateFrameExit() OVERRIDE;
- void SpillRegisters(vixl::CPURegList registers, int offset);
- void UnspillRegisters(vixl::CPURegList registers, int offset);
vixl::CPURegList GetFramePreservedCoreRegisters() const {
return vixl::CPURegList(vixl::CPURegister::kRegister, vixl::kXRegSize,
diff --git a/compiler/optimizing/code_generator_utils.cc b/compiler/optimizing/code_generator_utils.cc
index 26cab2f..921c1d8 100644
--- a/compiler/optimizing/code_generator_utils.cc
+++ b/compiler/optimizing/code_generator_utils.cc
@@ -18,13 +18,15 @@
#include "base/logging.h"
+namespace art {
+
void CalculateMagicAndShiftForDivRem(int64_t divisor, bool is_long,
int64_t* magic, int* shift) {
// It does not make sense to calculate magic and shift for zero divisor.
DCHECK_NE(divisor, 0);
- /* According to implementation from H.S.Warren's "Hacker's Delight" (Addison Wesley, 2002)
- * Chapter 10 and T,Grablund, P.L.Montogomery's "Division by Invariant Integers Using
+ /* Implementation according to H.S.Warren's "Hacker's Delight" (Addison Wesley, 2002)
+ * Chapter 10 and T.Grablund, P.L.Montogomery's "Division by Invariant Integers Using
* Multiplication" (PLDI 1994).
* The magic number M and shift S can be calculated in the following way:
* Let nc be the most positive value of numerator(n) such that nc = kd - 1,
@@ -39,11 +41,11 @@
* 2^p > nc * (d - 2^p % d), where d >= 2
* 2^p > nc * (d + 2^p % d), where d <= -2.
*
- * The magic number M is calcuated by
+ * The magic number M is calculated by
* M = (2^p + d - 2^p % d) / d, where d >= 2
* M = (2^p - d - 2^p % d) / d, where d <= -2.
*
- * Notice that p is always bigger than or equal to 32 (resp. 64), so we just return 32-p
+ * Notice that p is always bigger than or equal to 32 (resp. 64), so we just return 32 - p
* (resp. 64 - p) as the shift number S.
*/
@@ -52,9 +54,10 @@
// Initialize the computations.
uint64_t abs_d = (divisor >= 0) ? divisor : -divisor;
- uint64_t tmp = exp + (is_long ? static_cast<uint64_t>(divisor) >> 63 :
- static_cast<uint32_t>(divisor) >> 31);
- uint64_t abs_nc = tmp - 1 - tmp % abs_d;
+ uint64_t sign_bit = is_long ? static_cast<uint64_t>(divisor) >> 63 :
+ static_cast<uint32_t>(divisor) >> 31;
+ uint64_t tmp = exp + sign_bit;
+ uint64_t abs_nc = tmp - 1 - (tmp % abs_d);
uint64_t quotient1 = exp / abs_nc;
uint64_t remainder1 = exp % abs_nc;
uint64_t quotient2 = exp / abs_d;
@@ -91,3 +94,4 @@
*shift = is_long ? p - 64 : p - 32;
}
+} // namespace art
diff --git a/compiler/optimizing/code_generator_utils.h b/compiler/optimizing/code_generator_utils.h
index 742d675..59b495c 100644
--- a/compiler/optimizing/code_generator_utils.h
+++ b/compiler/optimizing/code_generator_utils.h
@@ -19,7 +19,12 @@
#include <cstdint>
-// Computes the magic number and the shift needed in the div/rem by constant algorithm
+namespace art {
+
+// Computes the magic number and the shift needed in the div/rem by constant algorithm, as out
+// arguments `magic` and `shift`
void CalculateMagicAndShiftForDivRem(int64_t divisor, bool is_long, int64_t* magic, int* shift);
+} // namespace art
+
#endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_UTILS_H_
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 4d74683..0cc377c 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -113,8 +113,10 @@
x86_codegen->EmitParallelMoves(
index_location_,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimInt,
length_location_,
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimInt);
__ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowArrayBounds)));
RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
}
@@ -266,8 +268,10 @@
x86_codegen->EmitParallelMoves(
class_to_check_,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimNot,
object_class_,
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
__ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize,
@@ -461,7 +465,7 @@
codegen_(codegen) {}
static dwarf::Reg DWARFReg(Register reg) {
- return dwarf::Reg::X86Core(static_cast<int>(reg));
+ return dwarf::Reg::X86Core(static_cast<int>(reg));
}
void CodeGeneratorX86::GenerateFrameEntry() {
@@ -496,22 +500,24 @@
}
void CodeGeneratorX86::GenerateFrameExit() {
- if (HasEmptyFrame()) {
- return;
- }
+ __ cfi().RememberState();
+ if (!HasEmptyFrame()) {
+ int adjust = GetFrameSize() - FrameEntrySpillSize();
+ __ addl(ESP, Immediate(adjust));
+ __ cfi().AdjustCFAOffset(-adjust);
- int adjust = GetFrameSize() - FrameEntrySpillSize();
- __ addl(ESP, Immediate(adjust));
- __ cfi().AdjustCFAOffset(-adjust);
-
- for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
- Register reg = kCoreCalleeSaves[i];
- if (allocated_registers_.ContainsCoreRegister(reg)) {
- __ popl(reg);
- __ cfi().AdjustCFAOffset(-static_cast<int>(kX86WordSize));
- __ cfi().Restore(DWARFReg(reg));
+ for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
+ Register reg = kCoreCalleeSaves[i];
+ if (allocated_registers_.ContainsCoreRegister(reg)) {
+ __ popl(reg);
+ __ cfi().AdjustCFAOffset(-static_cast<int>(kX86WordSize));
+ __ cfi().Restore(DWARFReg(reg));
+ }
}
}
+ __ ret();
+ __ cfi().RestoreState();
+ __ cfi().DefCFAOffset(GetFrameSize());
}
void CodeGeneratorX86::Bind(HBasicBlock* block) {
@@ -653,8 +659,10 @@
EmitParallelMoves(
Location::RegisterLocation(source.AsRegisterPairHigh<Register>()),
Location::RegisterLocation(destination.AsRegisterPairHigh<Register>()),
+ Primitive::kPrimInt,
Location::RegisterLocation(source.AsRegisterPairLow<Register>()),
- Location::RegisterLocation(destination.AsRegisterPairLow<Register>()));
+ Location::RegisterLocation(destination.AsRegisterPairLow<Register>()),
+ Primitive::kPrimInt);
} else if (source.IsFpuRegister()) {
LOG(FATAL) << "Unimplemented";
} else {
@@ -697,8 +705,10 @@
EmitParallelMoves(
Location::StackSlot(source.GetStackIndex()),
Location::StackSlot(destination.GetStackIndex()),
+ Primitive::kPrimInt,
Location::StackSlot(source.GetHighStackIndex(kX86WordSize)),
- Location::StackSlot(destination.GetHighStackIndex(kX86WordSize)));
+ Location::StackSlot(destination.GetHighStackIndex(kX86WordSize)),
+ Primitive::kPrimInt);
}
}
}
@@ -1116,11 +1126,7 @@
void InstructionCodeGeneratorX86::VisitReturnVoid(HReturnVoid* ret) {
UNUSED(ret);
- __ cfi().RememberState();
codegen_->GenerateFrameExit();
- __ ret();
- __ cfi().RestoreState();
- __ cfi().DefCFAOffset(codegen_->GetFrameSize());
}
void LocationsBuilderX86::VisitReturn(HReturn* ret) {
@@ -1178,11 +1184,7 @@
LOG(FATAL) << "Unknown return type " << ret->InputAt(0)->GetType();
}
}
- __ cfi().RememberState();
codegen_->GenerateFrameExit();
- __ ret();
- __ cfi().RestoreState();
- __ cfi().DefCFAOffset(codegen_->GetFrameSize());
}
void LocationsBuilderX86::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
@@ -2304,10 +2306,11 @@
LocationSummary* locations = instruction->GetLocations();
DCHECK(locations->InAt(1).IsConstant());
+ DCHECK(locations->InAt(1).GetConstant()->IsIntConstant());
Register out_register = locations->Out().AsRegister<Register>();
Register input_register = locations->InAt(0).AsRegister<Register>();
- int imm = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
+ int32_t imm = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
DCHECK(imm == 1 || imm == -1);
@@ -2322,16 +2325,14 @@
}
-void InstructionCodeGeneratorX86::DivByPowerOfTwo(HBinaryOperation* instruction) {
- DCHECK(instruction->IsDiv());
-
+void InstructionCodeGeneratorX86::DivByPowerOfTwo(HDiv* instruction) {
LocationSummary* locations = instruction->GetLocations();
Register out_register = locations->Out().AsRegister<Register>();
Register input_register = locations->InAt(0).AsRegister<Register>();
- int imm = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
+ int32_t imm = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
- DCHECK(instruction->IsDiv() && IsPowerOfTwo(std::abs(imm)));
+ DCHECK(IsPowerOfTwo(std::abs(imm)));
Register num = locations->GetTemp(0).AsRegister<Register>();
__ leal(num, Address(input_register, std::abs(imm) - 1));
@@ -2440,15 +2441,15 @@
DCHECK_EQ(EAX, first.AsRegister<Register>());
DCHECK_EQ(is_div ? EAX : EDX, out.AsRegister<Register>());
- if (second.IsConstant()) {
- int imm = second.GetConstant()->AsIntConstant()->GetValue();
+ if (instruction->InputAt(1)->IsIntConstant()) {
+ int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
if (imm == 0) {
// Do not generate anything for 0. DivZeroCheck would forbid any generated code.
} else if (imm == 1 || imm == -1) {
DivRemOneOrMinusOne(instruction);
} else if (is_div && IsPowerOfTwo(std::abs(imm))) {
- DivByPowerOfTwo(instruction);
+ DivByPowerOfTwo(instruction->AsDiv());
} else {
DCHECK(imm <= -2 || imm >= 2);
GenerateDivRemWithAnyConstant(instruction);
@@ -2519,7 +2520,7 @@
// We need to save the numerator while we tweak eax and edx. As we are using imul in a way
// which enforces results to be in EAX and EDX, things are simpler if we use EAX also as
// output and request another temp.
- if (div->InputAt(1)->IsConstant()) {
+ if (div->InputAt(1)->IsIntConstant()) {
locations->AddTemp(Location::RequiresRegister());
}
break;
@@ -2593,7 +2594,7 @@
// We need to save the numerator while we tweak eax and edx. As we are using imul in a way
// which enforces results to be in EAX and EDX, things are simpler if we use EDX also as
// output and request another temp.
- if (rem->InputAt(1)->IsConstant()) {
+ if (rem->InputAt(1)->IsIntConstant()) {
locations->AddTemp(Location::RequiresRegister());
}
break;
@@ -2713,16 +2714,16 @@
switch (op->GetResultType()) {
case Primitive::kPrimInt: {
- locations->SetInAt(0, Location::Any());
- // The shift count needs to be in CL or a constant.
+ locations->SetInAt(0, Location::RequiresRegister());
+ // The shift count needs to be in CL.
locations->SetInAt(1, Location::ByteRegisterOrConstant(ECX, op->InputAt(1)));
locations->SetOut(Location::SameAsFirstInput());
break;
}
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
- // The shift count needs to be in CL or a constant.
- locations->SetInAt(1, Location::ByteRegisterOrConstant(ECX, op->InputAt(1)));
+ // The shift count needs to be in CL.
+ locations->SetInAt(1, Location::RegisterLocation(ECX));
locations->SetOut(Location::SameAsFirstInput());
break;
}
@@ -2741,115 +2742,46 @@
switch (op->GetResultType()) {
case Primitive::kPrimInt: {
- if (first.IsRegister()) {
- Register first_reg = first.AsRegister<Register>();
- if (second.IsRegister()) {
- Register second_reg = second.AsRegister<Register>();
- DCHECK_EQ(ECX, second_reg);
- if (op->IsShl()) {
- __ shll(first_reg, second_reg);
- } else if (op->IsShr()) {
- __ sarl(first_reg, second_reg);
- } else {
- __ shrl(first_reg, second_reg);
- }
- } else {
- int32_t shift = second.GetConstant()->AsIntConstant()->GetValue() & kMaxIntShiftValue;
- if (shift == 0) {
- return;
- }
- Immediate imm(shift);
- if (op->IsShl()) {
- __ shll(first_reg, imm);
- } else if (op->IsShr()) {
- __ sarl(first_reg, imm);
- } else {
- __ shrl(first_reg, imm);
- }
- }
- } else {
- DCHECK(first.IsStackSlot()) << first;
- Address addr(ESP, first.GetStackIndex());
- if (second.IsRegister()) {
- Register second_reg = second.AsRegister<Register>();
- DCHECK_EQ(ECX, second_reg);
- if (op->IsShl()) {
- __ shll(addr, second_reg);
- } else if (op->IsShr()) {
- __ sarl(addr, second_reg);
- } else {
- __ shrl(addr, second_reg);
- }
- } else {
- int32_t shift = second.GetConstant()->AsIntConstant()->GetValue() & kMaxIntShiftValue;
- if (shift == 0) {
- return;
- }
- Immediate imm(shift);
- if (op->IsShl()) {
- __ shll(addr, imm);
- } else if (op->IsShr()) {
- __ sarl(addr, imm);
- } else {
- __ shrl(addr, imm);
- }
- }
- }
-
- break;
- }
- case Primitive::kPrimLong: {
+ Register first_reg = first.AsRegister<Register>();
if (second.IsRegister()) {
Register second_reg = second.AsRegister<Register>();
DCHECK_EQ(ECX, second_reg);
if (op->IsShl()) {
- GenerateShlLong(first, second_reg);
+ __ shll(first_reg, second_reg);
} else if (op->IsShr()) {
- GenerateShrLong(first, second_reg);
+ __ sarl(first_reg, second_reg);
} else {
- GenerateUShrLong(first, second_reg);
+ __ shrl(first_reg, second_reg);
}
} else {
- // Shift by a constant.
- int shift = second.GetConstant()->AsIntConstant()->GetValue() & kMaxLongShiftValue;
- // Nothing to do if the shift is 0, as the input is already the output.
- if (shift != 0) {
- if (op->IsShl()) {
- GenerateShlLong(first, shift);
- } else if (op->IsShr()) {
- GenerateShrLong(first, shift);
- } else {
- GenerateUShrLong(first, shift);
- }
+ Immediate imm(second.GetConstant()->AsIntConstant()->GetValue() & kMaxIntShiftValue);
+ if (op->IsShl()) {
+ __ shll(first_reg, imm);
+ } else if (op->IsShr()) {
+ __ sarl(first_reg, imm);
+ } else {
+ __ shrl(first_reg, imm);
}
}
break;
}
+ case Primitive::kPrimLong: {
+ Register second_reg = second.AsRegister<Register>();
+ DCHECK_EQ(ECX, second_reg);
+ if (op->IsShl()) {
+ GenerateShlLong(first, second_reg);
+ } else if (op->IsShr()) {
+ GenerateShrLong(first, second_reg);
+ } else {
+ GenerateUShrLong(first, second_reg);
+ }
+ break;
+ }
default:
LOG(FATAL) << "Unexpected op type " << op->GetResultType();
}
}
-void InstructionCodeGeneratorX86::GenerateShlLong(const Location& loc, int shift) {
- Register low = loc.AsRegisterPairLow<Register>();
- Register high = loc.AsRegisterPairHigh<Register>();
- if (shift == 32) {
- // Shift by 32 is easy. High gets low, and low gets 0.
- codegen_->EmitParallelMoves(
- loc.ToLow(), loc.ToHigh(),
- Location::ConstantLocation(GetGraph()->GetIntConstant(0)), loc.ToLow());
- } else if (shift > 32) {
- // Low part becomes 0. High part is low part << (shift-32).
- __ movl(high, low);
- __ shll(high, Immediate(shift - 32));
- __ xorl(low, low);
- } else {
- // Between 1 and 31.
- __ shld(high, low, Immediate(shift));
- __ shll(low, Immediate(shift));
- }
-}
-
void InstructionCodeGeneratorX86::GenerateShlLong(const Location& loc, Register shifter) {
Label done;
__ shld(loc.AsRegisterPairHigh<Register>(), loc.AsRegisterPairLow<Register>(), shifter);
@@ -2861,27 +2793,6 @@
__ Bind(&done);
}
-void InstructionCodeGeneratorX86::GenerateShrLong(const Location& loc, int shift) {
- Register low = loc.AsRegisterPairLow<Register>();
- Register high = loc.AsRegisterPairHigh<Register>();
- if (shift == 32) {
- // Need to copy the sign.
- DCHECK_NE(low, high);
- __ movl(low, high);
- __ sarl(high, Immediate(31));
- } else if (shift > 32) {
- DCHECK_NE(low, high);
- // High part becomes sign. Low part is shifted by shift - 32.
- __ movl(low, high);
- __ sarl(high, Immediate(31));
- __ shrl(low, Immediate(shift - 32));
- } else {
- // Between 1 and 31.
- __ shrd(low, high, Immediate(shift));
- __ sarl(high, Immediate(shift));
- }
-}
-
void InstructionCodeGeneratorX86::GenerateShrLong(const Location& loc, Register shifter) {
Label done;
__ shrd(loc.AsRegisterPairLow<Register>(), loc.AsRegisterPairHigh<Register>(), shifter);
@@ -2893,26 +2804,6 @@
__ Bind(&done);
}
-void InstructionCodeGeneratorX86::GenerateUShrLong(const Location& loc, int shift) {
- Register low = loc.AsRegisterPairLow<Register>();
- Register high = loc.AsRegisterPairHigh<Register>();
- if (shift == 32) {
- // Shift by 32 is easy. Low gets high, and high gets 0.
- codegen_->EmitParallelMoves(
- loc.ToHigh(), loc.ToLow(),
- Location::ConstantLocation(GetGraph()->GetIntConstant(0)), loc.ToHigh());
- } else if (shift > 32) {
- // Low part is high >> (shift - 32). High part becomes 0.
- __ movl(low, high);
- __ shrl(low, Immediate(shift - 32));
- __ xorl(high, high);
- } else {
- // Between 1 and 31.
- __ shrd(low, high, Immediate(shift));
- __ shrl(high, Immediate(shift));
- }
-}
-
void InstructionCodeGeneratorX86::GenerateUShrLong(const Location& loc, Register shifter) {
Label done;
__ shrd(loc.AsRegisterPairLow<Register>(), loc.AsRegisterPairHigh<Register>(), shifter);
@@ -3032,6 +2923,22 @@
}
}
+void LocationsBuilderX86::VisitBooleanNot(HBooleanNot* bool_not) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+}
+
+void InstructionCodeGeneratorX86::VisitBooleanNot(HBooleanNot* bool_not) {
+ DCHECK_EQ(bool_not->InputAt(0)->GetType(), Primitive::kPrimBoolean);
+ LocationSummary* locations = bool_not->GetLocations();
+ Location in = locations->InAt(0);
+ Location out = locations->Out();
+ DCHECK(in.Equals(out));
+ __ xorl(out.AsRegister<Register>(), Immediate(1));
+}
+
void LocationsBuilderX86::VisitCompare(HCompare* compare) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index e6e7fb7..368ae0f 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -164,16 +164,13 @@
void HandleBitwiseOperation(HBinaryOperation* instruction);
void GenerateDivRemIntegral(HBinaryOperation* instruction);
void DivRemOneOrMinusOne(HBinaryOperation* instruction);
- void DivByPowerOfTwo(HBinaryOperation* instruction);
+ void DivByPowerOfTwo(HDiv* instruction);
void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
void GenerateRemFP(HRem *rem);
void HandleShift(HBinaryOperation* instruction);
void GenerateShlLong(const Location& loc, Register shifter);
void GenerateShrLong(const Location& loc, Register shifter);
void GenerateUShrLong(const Location& loc, Register shifter);
- void GenerateShlLong(const Location& loc, int shift);
- void GenerateShrLong(const Location& loc, int shift);
- void GenerateUShrLong(const Location& loc, int shift);
void GenerateMemoryBarrier(MemBarrierKind kind);
void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 5710ec5..fb0f4ca 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -161,8 +161,10 @@
codegen->EmitParallelMoves(
index_location_,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimInt,
length_location_,
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimInt);
__ gs()->call(Address::Absolute(
QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowArrayBounds), true));
RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
@@ -285,8 +287,10 @@
codegen->EmitParallelMoves(
class_to_check_,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimNot,
object_class_,
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
__ gs()->call(
@@ -484,10 +488,11 @@
}
static dwarf::Reg DWARFReg(Register reg) {
- return dwarf::Reg::X86_64Core(static_cast<int>(reg));
+ return dwarf::Reg::X86_64Core(static_cast<int>(reg));
}
+
static dwarf::Reg DWARFReg(FloatRegister reg) {
- return dwarf::Reg::X86_64Fp(static_cast<int>(reg));
+ return dwarf::Reg::X86_64Fp(static_cast<int>(reg));
}
void CodeGeneratorX86_64::GenerateFrameEntry() {
@@ -534,31 +539,34 @@
}
void CodeGeneratorX86_64::GenerateFrameExit() {
- if (HasEmptyFrame()) {
- return;
- }
- uint32_t xmm_spill_location = GetFpuSpillStart();
- size_t xmm_spill_slot_size = GetFloatingPointSpillSlotSize();
- for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
- if (allocated_registers_.ContainsFloatingPointRegister(kFpuCalleeSaves[i])) {
- int offset = xmm_spill_location + (xmm_spill_slot_size * i);
- __ movsd(XmmRegister(kFpuCalleeSaves[i]), Address(CpuRegister(RSP), offset));
- __ cfi().Restore(DWARFReg(kFpuCalleeSaves[i]));
+ __ cfi().RememberState();
+ if (!HasEmptyFrame()) {
+ uint32_t xmm_spill_location = GetFpuSpillStart();
+ size_t xmm_spill_slot_size = GetFloatingPointSpillSlotSize();
+ for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
+ if (allocated_registers_.ContainsFloatingPointRegister(kFpuCalleeSaves[i])) {
+ int offset = xmm_spill_location + (xmm_spill_slot_size * i);
+ __ movsd(XmmRegister(kFpuCalleeSaves[i]), Address(CpuRegister(RSP), offset));
+ __ cfi().Restore(DWARFReg(kFpuCalleeSaves[i]));
+ }
+ }
+
+ int adjust = GetFrameSize() - GetCoreSpillSize();
+ __ addq(CpuRegister(RSP), Immediate(adjust));
+ __ cfi().AdjustCFAOffset(-adjust);
+
+ for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
+ Register reg = kCoreCalleeSaves[i];
+ if (allocated_registers_.ContainsCoreRegister(reg)) {
+ __ popq(CpuRegister(reg));
+ __ cfi().AdjustCFAOffset(-static_cast<int>(kX86_64WordSize));
+ __ cfi().Restore(DWARFReg(reg));
+ }
}
}
-
- int adjust = GetFrameSize() - GetCoreSpillSize();
- __ addq(CpuRegister(RSP), Immediate(adjust));
- __ cfi().AdjustCFAOffset(-adjust);
-
- for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
- Register reg = kCoreCalleeSaves[i];
- if (allocated_registers_.ContainsCoreRegister(reg)) {
- __ popq(CpuRegister(reg));
- __ cfi().AdjustCFAOffset(-static_cast<int>(kX86_64WordSize));
- __ cfi().Restore(DWARFReg(reg));
- }
- }
+ __ ret();
+ __ cfi().RestoreState();
+ __ cfi().DefCFAOffset(GetFrameSize());
}
void CodeGeneratorX86_64::Bind(HBasicBlock* block) {
@@ -1143,11 +1151,7 @@
void InstructionCodeGeneratorX86_64::VisitReturnVoid(HReturnVoid* ret) {
UNUSED(ret);
- __ cfi().RememberState();
codegen_->GenerateFrameExit();
- __ ret();
- __ cfi().RestoreState();
- __ cfi().DefCFAOffset(codegen_->GetFrameSize());
}
void LocationsBuilderX86_64::VisitReturn(HReturn* ret) {
@@ -1198,11 +1202,7 @@
LOG(FATAL) << "Unexpected return type " << ret->InputAt(0)->GetType();
}
}
- __ cfi().RememberState();
codegen_->GenerateFrameExit();
- __ ret();
- __ cfi().RestoreState();
- __ cfi().DefCFAOffset(codegen_->GetFrameSize());
}
Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) {
@@ -2348,12 +2348,7 @@
CpuRegister output_register = locations->Out().AsRegister<CpuRegister>();
CpuRegister input_register = locations->InAt(0).AsRegister<CpuRegister>();
- int64_t imm;
- if (second.GetConstant()->IsLongConstant()) {
- imm = second.GetConstant()->AsLongConstant()->GetValue();
- } else {
- imm = second.GetConstant()->AsIntConstant()->GetValue();
- }
+ int64_t imm = Int64FromConstant(second.GetConstant());
DCHECK(imm == 1 || imm == -1);
@@ -2383,25 +2378,18 @@
}
default:
- LOG(FATAL) << "Unreachable";
+ LOG(FATAL) << "Unexpected type for div by (-)1 " << instruction->GetResultType();
}
}
-void InstructionCodeGeneratorX86_64::DivByPowerOfTwo(HBinaryOperation* instruction) {
- DCHECK(instruction->IsDiv());
-
+void InstructionCodeGeneratorX86_64::DivByPowerOfTwo(HDiv* instruction) {
LocationSummary* locations = instruction->GetLocations();
Location second = locations->InAt(1);
CpuRegister output_register = locations->Out().AsRegister<CpuRegister>();
CpuRegister numerator = locations->InAt(0).AsRegister<CpuRegister>();
- int64_t imm;
- if (instruction->GetResultType() == Primitive::kPrimLong) {
- imm = second.GetConstant()->AsLongConstant()->GetValue();
- } else {
- imm = second.GetConstant()->AsIntConstant()->GetValue();
- }
+ int64_t imm = Int64FromConstant(second.GetConstant());
DCHECK(IsPowerOfTwo(std::abs(imm)));
@@ -2462,7 +2450,7 @@
int64_t magic;
int shift;
- // TODO: can these branch be written as one?
+ // TODO: can these branches be written as one?
if (instruction->GetResultType() == Primitive::kPrimInt) {
int imm = second.GetConstant()->AsIntConstant()->GetValue();
@@ -2526,7 +2514,7 @@
__ imulq(numerator);
if (imm > 0 && magic < 0) {
- // RDX += numeratorerator
+ // RDX += numerator
__ addq(rdx, numerator);
} else if (imm < 0 && magic > 0) {
// RDX -= numerator
@@ -2576,19 +2564,14 @@
DCHECK_EQ(is_div ? RAX : RDX, out.AsRegister());
if (second.IsConstant()) {
- int64_t imm;
- if (second.GetConstant()->AsLongConstant()) {
- imm = second.GetConstant()->AsLongConstant()->GetValue();
- } else {
- imm = second.GetConstant()->AsIntConstant()->GetValue();
- }
+ int64_t imm = Int64FromConstant(second.GetConstant());
if (imm == 0) {
// Do not generate anything. DivZeroCheck would prevent any code to be executed.
} else if (imm == 1 || imm == -1) {
DivRemOneOrMinusOne(instruction);
} else if (instruction->IsDiv() && IsPowerOfTwo(std::abs(imm))) {
- DivByPowerOfTwo(instruction);
+ DivByPowerOfTwo(instruction->AsDiv());
} else {
DCHECK(imm <= -2 || imm >= 2);
GenerateDivRemWithAnyConstant(instruction);
@@ -2995,6 +2978,22 @@
}
}
+void LocationsBuilderX86_64::VisitBooleanNot(HBooleanNot* bool_not) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+}
+
+void InstructionCodeGeneratorX86_64::VisitBooleanNot(HBooleanNot* bool_not) {
+ DCHECK_EQ(bool_not->InputAt(0)->GetType(), Primitive::kPrimBoolean);
+ LocationSummary* locations = bool_not->GetLocations();
+ DCHECK_EQ(locations->InAt(0).AsRegister<CpuRegister>().AsRegister(),
+ locations->Out().AsRegister<CpuRegister>().AsRegister());
+ Location out = locations->Out();
+ __ xorl(out.AsRegister<CpuRegister>(), Immediate(1));
+}
+
void LocationsBuilderX86_64::VisitPhi(HPhi* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index aae7de0..b4876ef 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -174,7 +174,7 @@
void HandleBitwiseOperation(HBinaryOperation* operation);
void GenerateRemFP(HRem *rem);
void DivRemOneOrMinusOne(HBinaryOperation* instruction);
- void DivByPowerOfTwo(HBinaryOperation* instruction);
+ void DivByPowerOfTwo(HDiv* instruction);
void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
void GenerateDivRemIntegral(HBinaryOperation* instruction);
void HandleShift(HBinaryOperation* operation);
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 2be117b..afcff1e 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -152,7 +152,7 @@
bool has_result,
Expected expected) {
graph->BuildDominatorTree();
- SsaLivenessAnalysis liveness(*graph, codegen);
+ SsaLivenessAnalysis liveness(graph, codegen);
liveness.Analyze();
RegisterAllocator register_allocator(graph->GetArena(), codegen, liveness);
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index 966165b..53f1f3c 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -194,7 +194,8 @@
int64_t value = CodeGenerator::GetInt64ValueOf(constant);
- if (instr->IsAdd() || instr->IsSub() || instr->IsCondition() || instr->IsCompare()) {
+ if (instr->IsAdd() || instr->IsSub() || instr->IsCondition() ||
+ instr->IsCompare() || instr->IsBoundsCheck()) {
// Uses aliases of ADD/SUB instructions.
return vixl::Assembler::IsImmAddSub(value);
} else if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) {
diff --git a/compiler/optimizing/dominator_test.cc b/compiler/optimizing/dominator_test.cc
index 7623e42..61a7697 100644
--- a/compiler/optimizing/dominator_test.cc
+++ b/compiler/optimizing/dominator_test.cc
@@ -36,7 +36,13 @@
ASSERT_EQ(graph->GetBlocks().Size(), blocks_length);
for (size_t i = 0, e = blocks_length; i < e; ++i) {
if (blocks[i] == -1) {
- ASSERT_EQ(nullptr, graph->GetBlocks().Get(i)->GetDominator());
+ if (graph->GetBlocks().Get(i) == nullptr) {
+ // Dead block.
+ } else {
+ // Only the entry block has no dominator.
+ ASSERT_EQ(nullptr, graph->GetBlocks().Get(i)->GetDominator());
+ ASSERT_TRUE(graph->GetBlocks().Get(i)->IsEntryBlock());
+ }
} else {
ASSERT_NE(nullptr, graph->GetBlocks().Get(i)->GetDominator());
ASSERT_EQ(blocks[i], graph->GetBlocks().Get(i)->GetDominator()->GetBlockId());
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 56ec8a7..afbc490 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -24,9 +24,21 @@
class InstructionSimplifierVisitor : public HGraphVisitor {
public:
InstructionSimplifierVisitor(HGraph* graph, OptimizingCompilerStats* stats)
- : HGraphVisitor(graph), stats_(stats) {}
+ : HGraphVisitor(graph),
+ stats_(stats) {}
+
+ void Run();
private:
+ void RecordSimplification() {
+ simplification_occurred_ = true;
+ simplifications_at_current_position_++;
+ if (stats_) {
+ stats_->RecordStat(kInstructionSimplifications);
+ }
+ }
+
+ bool TryMoveNegOnInputsAfterBinop(HBinaryOperation* binop);
void VisitShift(HBinaryOperation* shift);
void VisitSuspendCheck(HSuspendCheck* check) OVERRIDE;
@@ -40,6 +52,8 @@
void VisitAnd(HAnd* instruction) OVERRIDE;
void VisitDiv(HDiv* instruction) OVERRIDE;
void VisitMul(HMul* instruction) OVERRIDE;
+ void VisitNeg(HNeg* instruction) OVERRIDE;
+ void VisitNot(HNot* instruction) OVERRIDE;
void VisitOr(HOr* instruction) OVERRIDE;
void VisitShl(HShl* instruction) OVERRIDE;
void VisitShr(HShr* instruction) OVERRIDE;
@@ -48,11 +62,38 @@
void VisitXor(HXor* instruction) OVERRIDE;
OptimizingCompilerStats* stats_;
+ bool simplification_occurred_ = false;
+ int simplifications_at_current_position_ = 0;
+ // We ensure we do not loop infinitely. The value is a finger in the air guess
+ // that should allow enough simplification.
+ static constexpr int kMaxSamePositionSimplifications = 10;
};
void InstructionSimplifier::Run() {
InstructionSimplifierVisitor visitor(graph_, stats_);
- visitor.VisitInsertionOrder();
+ visitor.Run();
+}
+
+void InstructionSimplifierVisitor::Run() {
+ for (HReversePostOrderIterator it(*GetGraph()); !it.Done();) {
+ // The simplification of an instruction to another instruction may yield
+ // possibilities for other simplifications. So although we perform a reverse
+ // post order visit, we sometimes need to revisit an instruction index.
+ simplification_occurred_ = false;
+ VisitBasicBlock(it.Current());
+ if (simplification_occurred_ &&
+ (simplifications_at_current_position_ < kMaxSamePositionSimplifications)) {
+ // New simplifications may be applicable to the instruction at the
+ // current index, so don't advance the iterator.
+ continue;
+ }
+ if (simplifications_at_current_position_ >= kMaxSamePositionSimplifications) {
+ LOG(WARNING) << "Too many simplifications (" << simplifications_at_current_position_
+ << ") occurred at the current position.";
+ }
+ simplifications_at_current_position_ = 0;
+ it.Advance();
+ }
}
namespace {
@@ -63,6 +104,35 @@
} // namespace
+// Returns true if the code was simplified to use only one negation operation
+// after the binary operation instead of one on each of the inputs.
+bool InstructionSimplifierVisitor::TryMoveNegOnInputsAfterBinop(HBinaryOperation* binop) {
+ DCHECK(binop->IsAdd() || binop->IsSub());
+ DCHECK(binop->GetLeft()->IsNeg() && binop->GetRight()->IsNeg());
+ HNeg* left_neg = binop->GetLeft()->AsNeg();
+ HNeg* right_neg = binop->GetRight()->AsNeg();
+ if (!left_neg->HasOnlyOneNonEnvironmentUse() ||
+ !right_neg->HasOnlyOneNonEnvironmentUse()) {
+ return false;
+ }
+ // Replace code looking like
+ // NEG tmp1, a
+ // NEG tmp2, b
+ // ADD dst, tmp1, tmp2
+ // with
+ // ADD tmp, a, b
+ // NEG dst, tmp
+ binop->ReplaceInput(left_neg->GetInput(), 0);
+ binop->ReplaceInput(right_neg->GetInput(), 1);
+ left_neg->GetBlock()->RemoveInstruction(left_neg);
+ right_neg->GetBlock()->RemoveInstruction(right_neg);
+ HNeg* neg = new (GetGraph()->GetArena()) HNeg(binop->GetType(), binop);
+ binop->GetBlock()->InsertInstructionBefore(neg, binop->GetNext());
+ binop->ReplaceWithExceptInReplacementAtIndex(neg, 0);
+ RecordSimplification();
+ return true;
+}
+
void InstructionSimplifierVisitor::VisitShift(HBinaryOperation* instruction) {
DCHECK(instruction->IsShl() || instruction->IsShr() || instruction->IsUShr());
HConstant* input_cst = instruction->GetConstantRight();
@@ -182,6 +252,36 @@
// src
instruction->ReplaceWith(input_other);
instruction->GetBlock()->RemoveInstruction(instruction);
+ return;
+ }
+
+ HInstruction* left = instruction->GetLeft();
+ HInstruction* right = instruction->GetRight();
+ bool left_is_neg = left->IsNeg();
+ bool right_is_neg = right->IsNeg();
+
+ if (left_is_neg && right_is_neg) {
+ if (TryMoveNegOnInputsAfterBinop(instruction)) {
+ return;
+ }
+ }
+
+ HNeg* neg = left_is_neg ? left->AsNeg() : right->AsNeg();
+ if ((left_is_neg ^ right_is_neg) && neg->HasOnlyOneNonEnvironmentUse()) {
+ // Replace code looking like
+ // NEG tmp, b
+ // ADD dst, a, tmp
+ // with
+ // SUB dst, a, b
+ // We do not perform the optimization if the input negation has environment
+ // uses or multiple non-environment uses as it could lead to worse code. In
+ // particular, we do not want the live range of `b` to be extended if we are
+ // not sure the initial 'NEG' instruction can be removed.
+ HInstruction* other = left_is_neg ? right : left;
+ HSub* sub = new(GetGraph()->GetArena()) HSub(instruction->GetType(), other, neg->GetInput());
+ instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, sub);
+ RecordSimplification();
+ neg->GetBlock()->RemoveInstruction(neg);
}
}
@@ -201,7 +301,7 @@
// We assume that GVN has run before, so we only perform a pointer comparison.
// If for some reason the values are equal but the pointers are different, we
- // are still correct and only miss an optimisation opportunity.
+ // are still correct and only miss an optimization opportunity.
if (instruction->GetLeft() == instruction->GetRight()) {
// Replace code looking like
// AND dst, src, src
@@ -235,6 +335,7 @@
// NEG dst, src
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(
instruction, (new (GetGraph()->GetArena()) HNeg(type, input_other)));
+ RecordSimplification();
}
}
@@ -267,6 +368,7 @@
// NEG dst, src
HNeg* neg = new (allocator) HNeg(type, input_other);
block->ReplaceAndRemoveInstructionWith(instruction, neg);
+ RecordSimplification();
return;
}
@@ -280,6 +382,7 @@
// The 'int' and 'long' cases are handled below.
block->ReplaceAndRemoveInstructionWith(instruction,
new (allocator) HAdd(type, input_other, input_other));
+ RecordSimplification();
return;
}
@@ -295,10 +398,75 @@
HIntConstant* shift = GetGraph()->GetIntConstant(WhichPowerOf2(factor));
HShl* shl = new(allocator) HShl(type, input_other, shift);
block->ReplaceAndRemoveInstructionWith(instruction, shl);
+ RecordSimplification();
}
}
}
+void InstructionSimplifierVisitor::VisitNeg(HNeg* instruction) {
+ HInstruction* input = instruction->GetInput();
+ if (input->IsNeg()) {
+ // Replace code looking like
+ // NEG tmp, src
+ // NEG dst, tmp
+ // with
+ // src
+ HNeg* previous_neg = input->AsNeg();
+ instruction->ReplaceWith(previous_neg->GetInput());
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ // We perform the optimization even if the input negation has environment
+ // uses since it allows removing the current instruction. But we only delete
+ // the input negation only if it is does not have any uses left.
+ if (!previous_neg->HasUses()) {
+ previous_neg->GetBlock()->RemoveInstruction(previous_neg);
+ }
+ RecordSimplification();
+ return;
+ }
+
+ if (input->IsSub() && input->HasOnlyOneNonEnvironmentUse()) {
+ // Replace code looking like
+ // SUB tmp, a, b
+ // NEG dst, tmp
+ // with
+ // SUB dst, b, a
+ // We do not perform the optimization if the input subtraction has
+ // environment uses or multiple non-environment uses as it could lead to
+ // worse code. In particular, we do not want the live ranges of `a` and `b`
+ // to be extended if we are not sure the initial 'SUB' instruction can be
+ // removed.
+ HSub* sub = input->AsSub();
+ HSub* new_sub =
+ new (GetGraph()->GetArena()) HSub(instruction->GetType(), sub->GetRight(), sub->GetLeft());
+ instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, new_sub);
+ if (!sub->HasUses()) {
+ sub->GetBlock()->RemoveInstruction(sub);
+ }
+ RecordSimplification();
+ }
+}
+
+void InstructionSimplifierVisitor::VisitNot(HNot* instruction) {
+ HInstruction* input = instruction->GetInput();
+ if (input->IsNot()) {
+ // Replace code looking like
+ // NOT tmp, src
+ // NOT dst, tmp
+ // with
+ // src
+ // We perform the optimization even if the input negation has environment
+ // uses since it allows removing the current instruction. But we only delete
+ // the input negation only if it is does not have any uses left.
+ HNot* previous_not = input->AsNot();
+ instruction->ReplaceWith(previous_not->GetInput());
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ if (!previous_not->HasUses()) {
+ previous_not->GetBlock()->RemoveInstruction(previous_not);
+ }
+ RecordSimplification();
+ }
+}
+
void InstructionSimplifierVisitor::VisitOr(HOr* instruction) {
HConstant* input_cst = instruction->GetConstantRight();
HInstruction* input_other = instruction->GetLeastConstantLeft();
@@ -315,7 +483,7 @@
// We assume that GVN has run before, so we only perform a pointer comparison.
// If for some reason the values are equal but the pointers are different, we
- // are still correct and only miss an optimisation opportunity.
+ // are still correct and only miss an optimization opportunity.
if (instruction->GetLeft() == instruction->GetRight()) {
// Replace code looking like
// OR dst, src, src
@@ -356,20 +524,61 @@
HBasicBlock* block = instruction->GetBlock();
ArenaAllocator* allocator = GetGraph()->GetArena();
- if (instruction->GetLeft()->IsConstant()) {
- int64_t left = Int64FromConstant(instruction->GetLeft()->AsConstant());
- if (left == 0) {
+ HInstruction* left = instruction->GetLeft();
+ HInstruction* right = instruction->GetRight();
+ if (left->IsConstant()) {
+ if (Int64FromConstant(left->AsConstant()) == 0) {
// Replace code looking like
// SUB dst, 0, src
// with
// NEG dst, src
- // Note that we cannot optimise `0.0 - x` to `-x` for floating-point. When
+ // Note that we cannot optimize `0.0 - x` to `-x` for floating-point. When
// `x` is `0.0`, the former expression yields `0.0`, while the later
// yields `-0.0`.
- HNeg* neg = new (allocator) HNeg(type, instruction->GetRight());
+ HNeg* neg = new (allocator) HNeg(type, right);
block->ReplaceAndRemoveInstructionWith(instruction, neg);
+ RecordSimplification();
+ return;
}
}
+
+ if (left->IsNeg() && right->IsNeg()) {
+ if (TryMoveNegOnInputsAfterBinop(instruction)) {
+ return;
+ }
+ }
+
+ if (right->IsNeg() && right->HasOnlyOneNonEnvironmentUse()) {
+ // Replace code looking like
+ // NEG tmp, b
+ // SUB dst, a, tmp
+ // with
+ // ADD dst, a, b
+ HAdd* add = new(GetGraph()->GetArena()) HAdd(type, left, right->AsNeg()->GetInput());
+ instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, add);
+ RecordSimplification();
+ right->GetBlock()->RemoveInstruction(right);
+ return;
+ }
+
+ if (left->IsNeg() && left->HasOnlyOneNonEnvironmentUse()) {
+ // Replace code looking like
+ // NEG tmp, a
+ // SUB dst, tmp, b
+ // with
+ // ADD tmp, a, b
+ // NEG dst, tmp
+ // The second version is not intrinsically better, but enables more
+ // transformations.
+ HAdd* add = new(GetGraph()->GetArena()) HAdd(type, left->AsNeg()->GetInput(), right);
+ instruction->GetBlock()->InsertInstructionBefore(add, instruction);
+ HNeg* neg = new (GetGraph()->GetArena()) HNeg(instruction->GetType(), add);
+ instruction->GetBlock()->InsertInstructionBefore(neg, instruction);
+ instruction->ReplaceWith(neg);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ RecordSimplification();
+ left->GetBlock()->RemoveInstruction(left);
+ }
}
void InstructionSimplifierVisitor::VisitUShr(HUShr* instruction) {
@@ -397,6 +606,7 @@
// NOT dst, src
HNot* bitwise_not = new (GetGraph()->GetArena()) HNot(instruction->GetType(), input_other);
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, bitwise_not);
+ RecordSimplification();
return;
}
}
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 94e27e9..9a6062f 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -94,7 +94,7 @@
Location cc_loc = calling_convention_visitor.GetNextLocation(input->GetType());
Location actual_loc = locations->InAt(i);
- parallel_move.AddMove(actual_loc, cc_loc, nullptr);
+ parallel_move.AddMove(actual_loc, cc_loc, input->GetType(), nullptr);
}
codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index d1176c4..d3a4e6c 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -103,7 +103,7 @@
Location cc_loc = calling_convention_visitor.GetNextLocation(input->GetType());
Location actual_loc = locations->InAt(i);
- parallel_move.AddMove(actual_loc, cc_loc, nullptr);
+ parallel_move.AddMove(actual_loc, cc_loc, input->GetType(), nullptr);
}
codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index aec2d19..3c7a266 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -128,7 +128,7 @@
Location cc_loc = calling_convention_visitor.GetNextLocation(input->GetType());
Location actual_loc = locations->InAt(i);
- parallel_move.AddMove(actual_loc, cc_loc, nullptr);
+ parallel_move.AddMove(actual_loc, cc_loc, input->GetType(), nullptr);
}
codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index cbf94f0..d9a1c31 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -120,7 +120,7 @@
Location cc_loc = calling_convention_visitor.GetNextLocation(input->GetType());
Location actual_loc = locations->InAt(i);
- parallel_move.AddMove(actual_loc, cc_loc, nullptr);
+ parallel_move.AddMove(actual_loc, cc_loc, input->GetType(), nullptr);
}
codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
diff --git a/compiler/optimizing/linearize_test.cc b/compiler/optimizing/linearize_test.cc
index 28c5555..7818c60 100644
--- a/compiler/optimizing/linearize_test.cc
+++ b/compiler/optimizing/linearize_test.cc
@@ -50,12 +50,12 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(*graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
- ASSERT_EQ(liveness.GetLinearOrder().Size(), number_of_blocks);
+ ASSERT_EQ(graph->GetLinearOrder().Size(), number_of_blocks);
for (size_t i = 0; i < number_of_blocks; ++i) {
- ASSERT_EQ(liveness.GetLinearOrder().Get(i)->GetBlockId(), expected_order[i]);
+ ASSERT_EQ(graph->GetLinearOrder().Get(i)->GetBlockId(), expected_order[i]);
}
}
diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc
index 61d6593..5236773 100644
--- a/compiler/optimizing/live_ranges_test.cc
+++ b/compiler/optimizing/live_ranges_test.cc
@@ -69,7 +69,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(*graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
LiveInterval* interval = liveness.GetInstructionFromSsaIndex(0)->GetLiveInterval();
@@ -117,7 +117,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(*graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
LiveInterval* interval = liveness.GetInstructionFromSsaIndex(0)->GetLiveInterval();
@@ -168,7 +168,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(*graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
// Test for the 4 constant.
@@ -247,7 +247,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(*graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
// Test for the 0 constant.
@@ -327,7 +327,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(*graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
// Test for the 0 constant.
@@ -405,7 +405,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(*graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
// Test for the 0 constant.
diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc
index 81250ca..8a96ee9 100644
--- a/compiler/optimizing/liveness_test.cc
+++ b/compiler/optimizing/liveness_test.cc
@@ -57,7 +57,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(*graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
std::ostringstream buffer;
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index d8a8554..ada3487 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -51,9 +51,7 @@
for (size_t i = 0; i < blocks_.Size(); ++i) {
if (!visited.IsBitSet(i)) {
HBasicBlock* block = blocks_.Get(i);
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- RemoveAsUser(it.Current());
- }
+ DCHECK(block->GetPhis().IsEmpty()) << "Phis are not inserted at this stage";
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
RemoveAsUser(it.Current());
}
@@ -61,19 +59,17 @@
}
}
-void HGraph::RemoveDeadBlocks(const ArenaBitVector& visited) const {
+void HGraph::RemoveDeadBlocks(const ArenaBitVector& visited) {
for (size_t i = 0; i < blocks_.Size(); ++i) {
if (!visited.IsBitSet(i)) {
HBasicBlock* block = blocks_.Get(i);
+ // We only need to update the successor, which might be live.
for (size_t j = 0; j < block->GetSuccessors().Size(); ++j) {
block->GetSuccessors().Get(j)->RemovePredecessor(block);
}
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- block->RemovePhi(it.Current()->AsPhi(), /*ensure_safety=*/ false);
- }
- for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- block->RemoveInstruction(it.Current(), /*ensure_safety=*/ false);
- }
+ // Remove the block from the list of blocks, so that further analyses
+ // never see it.
+ blocks_.Put(i, nullptr);
}
}
}
@@ -258,6 +254,7 @@
// (2): Simplify loops by having only one back edge, and one preheader.
for (size_t i = 0; i < blocks_.Size(); ++i) {
HBasicBlock* block = blocks_.Get(i);
+ if (block == nullptr) continue;
if (block->GetSuccessors().Size() > 1) {
for (size_t j = 0; j < block->GetSuccessors().Size(); ++j) {
HBasicBlock* successor = block->GetSuccessors().Get(j);
@@ -274,8 +271,9 @@
}
bool HGraph::AnalyzeNaturalLoops() const {
- for (size_t i = 0; i < blocks_.Size(); ++i) {
- HBasicBlock* block = blocks_.Get(i);
+ // Order does not matter.
+ for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
+ HBasicBlock* block = it.Current();
if (block->IsLoopHeader()) {
HLoopInformation* info = block->GetLoopInformation();
if (!info->Populate()) {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index f764eb4..fe47939 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -112,6 +112,7 @@
: arena_(arena),
blocks_(arena, kDefaultNumberOfBlocks),
reverse_post_order_(arena, kDefaultNumberOfBlocks),
+ linear_order_(arena, kDefaultNumberOfBlocks),
entry_block_(nullptr),
exit_block_(nullptr),
maximum_number_of_out_vregs_(0),
@@ -216,6 +217,10 @@
return reverse_post_order_;
}
+ const GrowableArray<HBasicBlock*>& GetLinearOrder() const {
+ return linear_order_;
+ }
+
bool HasArrayAccesses() const {
return has_array_accesses_;
}
@@ -248,7 +253,7 @@
ArenaBitVector* visited,
ArenaBitVector* visiting);
void RemoveInstructionsAsUsersFromDeadBlocks(const ArenaBitVector& visited) const;
- void RemoveDeadBlocks(const ArenaBitVector& visited) const;
+ void RemoveDeadBlocks(const ArenaBitVector& visited);
template <class InstType, typename ValueType>
InstType* CreateConstant(ValueType value, ArenaSafeMap<ValueType, InstType*>* cache);
@@ -262,6 +267,9 @@
// List of blocks to perform a reverse post order tree traversal.
GrowableArray<HBasicBlock*> reverse_post_order_;
+ // List of blocks to perform a linear order tree traversal.
+ GrowableArray<HBasicBlock*> linear_order_;
+
HBasicBlock* entry_block_;
HBasicBlock* exit_block_;
@@ -293,6 +301,7 @@
ArenaSafeMap<int32_t, HIntConstant*> cached_int_constants_;
ArenaSafeMap<int64_t, HLongConstant*> cached_long_constants_;
+ friend class SsaLivenessAnalysis; // For the linear order.
ART_FRIEND_TEST(GraphTest, IfSuccessorSimpleJoinBlock1);
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
@@ -676,6 +685,7 @@
M(ArrayGet, Instruction) \
M(ArrayLength, Instruction) \
M(ArraySet, Instruction) \
+ M(BooleanNot, UnaryOperation) \
M(BoundsCheck, Instruction) \
M(BoundType, Instruction) \
M(CheckCast, Instruction) \
@@ -1177,6 +1187,9 @@
bool HasUses() const { return !uses_.IsEmpty() || !env_uses_.IsEmpty(); }
bool HasEnvironmentUses() const { return !env_uses_.IsEmpty(); }
bool HasNonEnvironmentUses() const { return !uses_.IsEmpty(); }
+ bool HasOnlyOneNonEnvironmentUse() const {
+ return !HasEnvironmentUses() && GetUses().HasOnlyOneUse();
+ }
// Does this instruction strictly dominate `other_instruction`?
// Returns false if this instruction and `other_instruction` are the same.
@@ -1214,6 +1227,13 @@
void ReplaceWith(HInstruction* instruction);
void ReplaceInput(HInstruction* replacement, size_t index);
+ // This is almost the same as doing `ReplaceWith()`. But in this helper, the
+ // uses of this instruction by `other` are *not* updated.
+ void ReplaceWithExceptInReplacementAtIndex(HInstruction* other, size_t use_index) {
+ ReplaceWith(other);
+ other->ReplaceInput(this, use_index);
+ }
+
// Move `this` instruction before `cursor`.
void MoveBefore(HInstruction* cursor);
@@ -2633,6 +2653,33 @@
DISALLOW_COPY_AND_ASSIGN(HNot);
};
+class HBooleanNot : public HUnaryOperation {
+ public:
+ explicit HBooleanNot(HInstruction* input)
+ : HUnaryOperation(Primitive::Type::kPrimBoolean, input) {}
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+ bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
+ UNUSED(other);
+ return true;
+ }
+
+ int32_t Evaluate(int32_t x) const OVERRIDE {
+ DCHECK(IsUint<1>(x));
+ return !x;
+ }
+
+ int64_t Evaluate(int64_t x ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " cannot be used with 64-bit values";
+ UNREACHABLE();
+ }
+
+ DECLARE_INSTRUCTION(BooleanNot);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HBooleanNot);
+};
+
class HTypeConversion : public HExpression<1> {
public:
// Instantiate a type conversion of `input` to `result_type`.
@@ -3408,8 +3455,11 @@
class MoveOperands : public ArenaObject<kArenaAllocMisc> {
public:
- MoveOperands(Location source, Location destination, HInstruction* instruction)
- : source_(source), destination_(destination), instruction_(instruction) {}
+ MoveOperands(Location source,
+ Location destination,
+ Primitive::Type type,
+ HInstruction* instruction)
+ : source_(source), destination_(destination), type_(type), instruction_(instruction) {}
Location GetSource() const { return source_; }
Location GetDestination() const { return destination_; }
@@ -3457,11 +3507,17 @@
return source_.IsInvalid();
}
+ bool Is64BitMove() const {
+ return Primitive::Is64BitType(type_);
+ }
+
HInstruction* GetInstruction() const { return instruction_; }
private:
Location source_;
Location destination_;
+ // The type this move is for.
+ Primitive::Type type_;
// The instruction this move is assocatied with. Null when this move is
// for moving an input in the expected locations of user (including a phi user).
// This is only used in debug mode, to ensure we do not connect interval siblings
@@ -3476,7 +3532,10 @@
explicit HParallelMove(ArenaAllocator* arena)
: HTemplateInstruction(SideEffects::None()), moves_(arena, kDefaultNumberOfMoves) {}
- void AddMove(Location source, Location destination, HInstruction* instruction) {
+ void AddMove(Location source,
+ Location destination,
+ Primitive::Type type,
+ HInstruction* instruction) {
DCHECK(source.IsValid());
DCHECK(destination.IsValid());
if (kIsDebugBuild) {
@@ -3502,7 +3561,7 @@
<< "Same destination for two moves in a parallel move.";
}
}
- moves_.Add(MoveOperands(source, destination, instruction));
+ moves_.Add(MoveOperands(source, destination, type, instruction));
}
MoveOperands* MoveOperandsAt(size_t index) const {
@@ -3618,6 +3677,43 @@
DISALLOW_COPY_AND_ASSIGN(HPostOrderIterator);
};
+class HLinearPostOrderIterator : public ValueObject {
+ public:
+ explicit HLinearPostOrderIterator(const HGraph& graph)
+ : order_(graph.GetLinearOrder()), index_(graph.GetLinearOrder().Size()) {}
+
+ bool Done() const { return index_ == 0; }
+
+ HBasicBlock* Current() const { return order_.Get(index_ -1); }
+
+ void Advance() {
+ --index_;
+ DCHECK_GE(index_, 0U);
+ }
+
+ private:
+ const GrowableArray<HBasicBlock*>& order_;
+ size_t index_;
+
+ DISALLOW_COPY_AND_ASSIGN(HLinearPostOrderIterator);
+};
+
+class HLinearOrderIterator : public ValueObject {
+ public:
+ explicit HLinearOrderIterator(const HGraph& graph)
+ : order_(graph.GetLinearOrder()), index_(0) {}
+
+ bool Done() const { return index_ == order_.Size(); }
+ HBasicBlock* Current() const { return order_.Get(index_); }
+ void Advance() { ++index_; }
+
+ private:
+ const GrowableArray<HBasicBlock*>& order_;
+ size_t index_;
+
+ DISALLOW_COPY_AND_ASSIGN(HLinearOrderIterator);
+};
+
// Iterator over the blocks that art part of the loop. Includes blocks part
// of an inner loop. The order in which the blocks are iterated is on their
// block id.
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index 6d986ba..b2c13ad 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -73,7 +73,7 @@
code_gen->ComputeSpillMask();
code_gen->SetFrameSize(frame_size);
code_gen->GenerateFrameEntry();
- code_gen->GetInstructionVisitor()->VisitReturnVoid(new (&allocator) HReturnVoid());
+ code_gen->GenerateFrameExit();
// Get the outputs.
InternalCodeAllocator code_allocator;
code_gen->Finalize(&code_allocator);
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 0e02212..efca1a5 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -200,20 +200,6 @@
InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet())));
}
- bool WriteElf(art::File* file,
- OatWriter* oat_writer,
- const std::vector<const art::DexFile*>& dex_files,
- const std::string& android_root,
- bool is_host) const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (kProduce64BitELFFiles && Is64BitInstructionSet(GetCompilerDriver()->GetInstructionSet())) {
- return art::ElfWriterQuick64::Create(file, oat_writer, dex_files, android_root, is_host,
- *GetCompilerDriver());
- } else {
- return art::ElfWriterQuick32::Create(file, oat_writer, dex_files, android_root, is_host,
- *GetCompilerDriver());
- }
- }
-
void InitCompilationUnit(CompilationUnit& cu) const OVERRIDE;
void Init() OVERRIDE;
@@ -371,9 +357,20 @@
return ArrayRef<const uint8_t>(vector);
}
-// TODO: The function below uses too much stack space.
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wframe-larger-than="
+static void AllocateRegisters(HGraph* graph,
+ CodeGenerator* codegen,
+ PassInfoPrinter* pass_info_printer) {
+ PrepareForRegisterAllocation(graph).Run();
+ SsaLivenessAnalysis liveness(graph, codegen);
+ {
+ PassInfo pass_info(SsaLivenessAnalysis::kLivenessPassName, pass_info_printer);
+ liveness.Analyze();
+ }
+ {
+ PassInfo pass_info(RegisterAllocator::kRegisterAllocatorPassName, pass_info_printer);
+ RegisterAllocator(graph->GetArena(), codegen, liveness).AllocateRegisters();
+ }
+}
CompiledMethod* OptimizingCompiler::CompileOptimized(HGraph* graph,
CodeGenerator* codegen,
@@ -385,16 +382,7 @@
RunOptimizations(graph, compiler_driver, &compilation_stats_,
dex_file, dex_compilation_unit, pass_info_printer, &handles);
- PrepareForRegisterAllocation(graph).Run();
- SsaLivenessAnalysis liveness(*graph, codegen);
- {
- PassInfo pass_info(SsaLivenessAnalysis::kLivenessPassName, pass_info_printer);
- liveness.Analyze();
- }
- {
- PassInfo pass_info(RegisterAllocator::kRegisterAllocatorPassName, pass_info_printer);
- RegisterAllocator(graph->GetArena(), codegen, liveness).AllocateRegisters();
- }
+ AllocateRegisters(graph, codegen, pass_info_printer);
CodeVectorAllocator allocator;
codegen->CompileOptimized(&allocator);
@@ -427,8 +415,6 @@
ArrayRef<const LinkerPatch>());
}
-#pragma GCC diagnostic pop
-
CompiledMethod* OptimizingCompiler::CompileBaseline(
CodeGenerator* codegen,
CompilerDriver* compiler_driver,
@@ -530,7 +516,7 @@
return nullptr;
}
codegen->GetAssembler()->cfi().SetEnabled(
- compiler_driver->GetCompilerOptions().GetIncludeDebugSymbols());
+ compiler_driver->GetCompilerOptions().GetIncludeCFI());
PassInfoPrinter pass_info_printer(graph,
method_name.c_str(),
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index b97a667..4d5b8d0 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -47,6 +47,7 @@
kNotCompiledUnhandledInstruction,
kRemovedCheckedCast,
kRemovedNullCheck,
+ kInstructionSimplifications,
kLastStat
};
@@ -110,6 +111,7 @@
case kNotCompiledUnhandledInstruction : return "kNotCompiledUnhandledInstruction";
case kRemovedCheckedCast: return "kRemovedCheckedCast";
case kRemovedNullCheck: return "kRemovedNullCheck";
+ case kInstructionSimplifications: return "kInstructionSimplifications";
default: LOG(FATAL) << "invalid stat";
}
return "";
diff --git a/compiler/optimizing/parallel_move_resolver.cc b/compiler/optimizing/parallel_move_resolver.cc
index 9df8f56..ad92ca5 100644
--- a/compiler/optimizing/parallel_move_resolver.cc
+++ b/compiler/optimizing/parallel_move_resolver.cc
@@ -189,9 +189,9 @@
const MoveOperands& other_move = *moves_.Get(i);
if (other_move.Blocks(destination)) {
DCHECK(other_move.IsPending());
- if (!destination.IsPair() && other_move.GetSource().IsPair()) {
- // We swap pairs before swapping non-pairs. Go back from the
- // cycle by returning the pair that must be swapped.
+ if (!move->Is64BitMove() && other_move.Is64BitMove()) {
+ // We swap 64bits moves before swapping 32bits moves. Go back from the
+ // cycle by returning the move that must be swapped.
return moves_.Get(i);
}
do_swap = true;
@@ -216,7 +216,7 @@
UpdateSourceOf(moves_.Get(i), swap_destination, source);
}
}
- // If the swap was required because of a pair in the middle of a cycle,
+ // If the swap was required because of a 64bits move in the middle of a cycle,
// we return the swapped move, so that the caller knows it needs to re-iterate
// its dependency loop.
return required_swap;
diff --git a/compiler/optimizing/parallel_move_resolver.h b/compiler/optimizing/parallel_move_resolver.h
index 3fa1b37..95f8ad5 100644
--- a/compiler/optimizing/parallel_move_resolver.h
+++ b/compiler/optimizing/parallel_move_resolver.h
@@ -85,12 +85,18 @@
// other moves to satisfy dependencies).
//
// Return whether another move in the dependency cycle needs to swap. This
- // is to handle pair swaps, where we want the pair to swap first to avoid
- // building pairs that are unexpected by the code generator. For example, if
- // we were to swap R1 with R2, we would need to update all locations using
- // R2 to R1. So a (R2,R3) pair register could become (R1,R3). We could make
- // the code generator understand such pairs, but it's easier and cleaner to
- // just not create such pairs and exchange pairs in priority.
+ // is to handle 64bits swaps:
+ // 1) In the case of register pairs, where we want the pair to swap first to avoid
+ // building pairs that are unexpected by the code generator. For example, if
+ // we were to swap R1 with R2, we would need to update all locations using
+ // R2 to R1. So a (R2,R3) pair register could become (R1,R3). We could make
+ // the code generator understand such pairs, but it's easier and cleaner to
+ // just not create such pairs and exchange pairs in priority.
+ // 2) Even when the architecture does not have pairs, we must handle 64bits swaps
+ // first. Consider the case: (R0->R1) (R1->S) (S->R0), where 'S' is a single
+ // stack slot. If we end up swapping S and R0, S will only contain the low bits
+ // of R0. If R0->R1 is for a 64bits instruction, R1 will therefore not contain
+ // the right value.
MoveOperands* PerformMove(size_t index);
DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolver);
diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc
index 5c502f7..95cca51 100644
--- a/compiler/optimizing/parallel_move_test.cc
+++ b/compiler/optimizing/parallel_move_test.cc
@@ -87,6 +87,7 @@
moves->AddMove(
Location::RegisterLocation(operands[i][0]),
Location::RegisterLocation(operands[i][1]),
+ Primitive::kPrimInt,
nullptr);
}
return moves;
@@ -145,10 +146,12 @@
moves->AddMove(
Location::ConstantLocation(new (&allocator) HIntConstant(0)),
Location::RegisterLocation(0),
+ Primitive::kPrimInt,
nullptr);
moves->AddMove(
Location::RegisterLocation(1),
Location::RegisterLocation(2),
+ Primitive::kPrimInt,
nullptr);
resolver.EmitNativeCode(moves);
ASSERT_STREQ("(1 -> 2) (C -> 0)", resolver.GetMessage().c_str());
@@ -164,10 +167,12 @@
moves->AddMove(
Location::RegisterLocation(2),
Location::RegisterLocation(4),
+ Primitive::kPrimInt,
nullptr);
moves->AddMove(
Location::RegisterPairLocation(0, 1),
Location::RegisterPairLocation(2, 3),
+ Primitive::kPrimLong,
nullptr);
resolver.EmitNativeCode(moves);
ASSERT_STREQ("(2 -> 4) (0,1 -> 2,3)", resolver.GetMessage().c_str());
@@ -179,10 +184,12 @@
moves->AddMove(
Location::RegisterPairLocation(0, 1),
Location::RegisterPairLocation(2, 3),
+ Primitive::kPrimLong,
nullptr);
moves->AddMove(
Location::RegisterLocation(2),
Location::RegisterLocation(4),
+ Primitive::kPrimInt,
nullptr);
resolver.EmitNativeCode(moves);
ASSERT_STREQ("(2 -> 4) (0,1 -> 2,3)", resolver.GetMessage().c_str());
@@ -194,10 +201,12 @@
moves->AddMove(
Location::RegisterPairLocation(0, 1),
Location::RegisterPairLocation(2, 3),
+ Primitive::kPrimLong,
nullptr);
moves->AddMove(
Location::RegisterLocation(2),
Location::RegisterLocation(0),
+ Primitive::kPrimInt,
nullptr);
resolver.EmitNativeCode(moves);
ASSERT_STREQ("(0,1 <-> 2,3)", resolver.GetMessage().c_str());
@@ -208,14 +217,17 @@
moves->AddMove(
Location::RegisterLocation(2),
Location::RegisterLocation(7),
+ Primitive::kPrimInt,
nullptr);
moves->AddMove(
Location::RegisterLocation(7),
Location::RegisterLocation(1),
+ Primitive::kPrimInt,
nullptr);
moves->AddMove(
Location::RegisterPairLocation(0, 1),
Location::RegisterPairLocation(2, 3),
+ Primitive::kPrimLong,
nullptr);
resolver.EmitNativeCode(moves);
ASSERT_STREQ("(0,1 <-> 2,3) (7 -> 1) (0 -> 7)", resolver.GetMessage().c_str());
@@ -226,14 +238,17 @@
moves->AddMove(
Location::RegisterLocation(2),
Location::RegisterLocation(7),
+ Primitive::kPrimInt,
nullptr);
moves->AddMove(
Location::RegisterPairLocation(0, 1),
Location::RegisterPairLocation(2, 3),
+ Primitive::kPrimLong,
nullptr);
moves->AddMove(
Location::RegisterLocation(7),
Location::RegisterLocation(1),
+ Primitive::kPrimInt,
nullptr);
resolver.EmitNativeCode(moves);
ASSERT_STREQ("(0,1 <-> 2,3) (7 -> 1) (0 -> 7)", resolver.GetMessage().c_str());
@@ -244,14 +259,17 @@
moves->AddMove(
Location::RegisterPairLocation(0, 1),
Location::RegisterPairLocation(2, 3),
+ Primitive::kPrimLong,
nullptr);
moves->AddMove(
Location::RegisterLocation(2),
Location::RegisterLocation(7),
+ Primitive::kPrimInt,
nullptr);
moves->AddMove(
Location::RegisterLocation(7),
Location::RegisterLocation(1),
+ Primitive::kPrimInt,
nullptr);
resolver.EmitNativeCode(moves);
ASSERT_STREQ("(0,1 <-> 2,3) (7 -> 1) (0 -> 7)", resolver.GetMessage().c_str());
@@ -262,10 +280,12 @@
moves->AddMove(
Location::RegisterPairLocation(0, 1),
Location::RegisterPairLocation(2, 3),
+ Primitive::kPrimLong,
nullptr);
moves->AddMove(
Location::RegisterPairLocation(2, 3),
Location::RegisterPairLocation(0, 1),
+ Primitive::kPrimLong,
nullptr);
resolver.EmitNativeCode(moves);
ASSERT_STREQ("(2,3 <-> 0,1)", resolver.GetMessage().c_str());
@@ -276,10 +296,12 @@
moves->AddMove(
Location::RegisterPairLocation(2, 3),
Location::RegisterPairLocation(0, 1),
+ Primitive::kPrimLong,
nullptr);
moves->AddMove(
Location::RegisterPairLocation(0, 1),
Location::RegisterPairLocation(2, 3),
+ Primitive::kPrimLong,
nullptr);
resolver.EmitNativeCode(moves);
ASSERT_STREQ("(0,1 <-> 2,3)", resolver.GetMessage().c_str());
@@ -292,18 +314,71 @@
moves->AddMove(
Location::RegisterLocation(10),
Location::RegisterLocation(5),
+ Primitive::kPrimInt,
nullptr);
moves->AddMove(
Location::RegisterPairLocation(4, 5),
Location::DoubleStackSlot(32),
+ Primitive::kPrimLong,
nullptr);
moves->AddMove(
Location::DoubleStackSlot(32),
Location::RegisterPairLocation(10, 11),
+ Primitive::kPrimLong,
nullptr);
resolver.EmitNativeCode(moves);
ASSERT_STREQ("(2x32(sp) <-> 10,11) (4,5 <-> 2x32(sp)) (4 -> 5)", resolver.GetMessage().c_str());
}
}
+// Test that we do 64bits moves before 32bits moves.
+TEST(ParallelMoveTest, CyclesWith64BitsMoves) {
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+
+ {
+ TestParallelMoveResolver resolver(&allocator);
+ HParallelMove* moves = new (&allocator) HParallelMove(&allocator);
+ moves->AddMove(
+ Location::RegisterLocation(0),
+ Location::RegisterLocation(1),
+ Primitive::kPrimLong,
+ nullptr);
+ moves->AddMove(
+ Location::RegisterLocation(1),
+ Location::StackSlot(48),
+ Primitive::kPrimInt,
+ nullptr);
+ moves->AddMove(
+ Location::StackSlot(48),
+ Location::RegisterLocation(0),
+ Primitive::kPrimInt,
+ nullptr);
+ resolver.EmitNativeCode(moves);
+ ASSERT_STREQ("(0 <-> 1) (48(sp) <-> 0)", resolver.GetMessage().c_str());
+ }
+
+ {
+ TestParallelMoveResolver resolver(&allocator);
+ HParallelMove* moves = new (&allocator) HParallelMove(&allocator);
+ moves->AddMove(
+ Location::RegisterPairLocation(0, 1),
+ Location::RegisterPairLocation(2, 3),
+ Primitive::kPrimLong,
+ nullptr);
+ moves->AddMove(
+ Location::RegisterPairLocation(2, 3),
+ Location::DoubleStackSlot(32),
+ Primitive::kPrimLong,
+ nullptr);
+ moves->AddMove(
+ Location::DoubleStackSlot(32),
+ Location::RegisterPairLocation(0, 1),
+ Primitive::kPrimLong,
+ nullptr);
+ resolver.EmitNativeCode(moves);
+ ASSERT_STREQ("(2x32(sp) <-> 0,1) (2,3 <-> 2x32(sp))", resolver.GetMessage().c_str());
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/primitive_type_propagation.cc b/compiler/optimizing/primitive_type_propagation.cc
index c20c8a1..af93438 100644
--- a/compiler/optimizing/primitive_type_propagation.cc
+++ b/compiler/optimizing/primitive_type_propagation.cc
@@ -65,6 +65,10 @@
if (equivalent->IsPhi()) {
equivalent->AsPhi()->SetLive();
AddToWorklist(equivalent->AsPhi());
+ } else if (equivalent == input) {
+ // The input has changed its type. It can be an input of other phis,
+ // so we need to put phi users in the work list.
+ AddDependentInstructionsToWorklist(equivalent);
}
}
}
@@ -117,10 +121,10 @@
worklist_.Add(instruction);
}
-void PrimitiveTypePropagation::AddDependentInstructionsToWorklist(HPhi* instruction) {
+void PrimitiveTypePropagation::AddDependentInstructionsToWorklist(HInstruction* instruction) {
for (HUseIterator<HInstruction*> it(instruction->GetUses()); !it.Done(); it.Advance()) {
HPhi* phi = it.Current()->GetUser()->AsPhi();
- if (phi != nullptr && phi->IsLive()) {
+ if (phi != nullptr && phi->IsLive() && phi->GetType() != instruction->GetType()) {
AddToWorklist(phi);
}
}
diff --git a/compiler/optimizing/primitive_type_propagation.h b/compiler/optimizing/primitive_type_propagation.h
index 1374cbb..6d370ed 100644
--- a/compiler/optimizing/primitive_type_propagation.h
+++ b/compiler/optimizing/primitive_type_propagation.h
@@ -33,7 +33,7 @@
void VisitBasicBlock(HBasicBlock* block);
void ProcessWorklist();
void AddToWorklist(HPhi* phi);
- void AddDependentInstructionsToWorklist(HPhi* phi);
+ void AddDependentInstructionsToWorklist(HInstruction* instruction);
bool UpdateType(HPhi* phi);
HGraph* const graph_;
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 4bca434..2fbd051 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -103,7 +103,7 @@
// Since only parallel moves have been inserted during the register allocation,
// these checks are mostly for making sure these moves have been added correctly.
size_t current_liveness = 0;
- for (HLinearOrderIterator it(liveness_); !it.Done(); it.Advance()) {
+ for (HLinearOrderIterator it(*codegen_->GetGraph()); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
HInstruction* instruction = inst_it.Current();
@@ -147,7 +147,7 @@
void RegisterAllocator::AllocateRegistersInternal() {
// Iterate post-order, to ensure the list is sorted, and the last added interval
// is the one with the lowest start position.
- for (HLinearPostOrderIterator it(liveness_); !it.Done(); it.Advance()) {
+ for (HLinearPostOrderIterator it(*codegen_->GetGraph()); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
for (HBackwardInstructionIterator back_it(block->GetInstructions()); !back_it.Done();
back_it.Advance()) {
@@ -224,7 +224,7 @@
temp_intervals_.Add(interval);
interval->AddTempUse(instruction, i);
if (codegen_->NeedsTwoRegisters(Primitive::kPrimDouble)) {
- interval->AddHighInterval(true);
+ interval->AddHighInterval(/* is_temp */ true);
LiveInterval* high = interval->GetHighInterval();
temp_intervals_.Add(high);
unhandled_fp_intervals_.Add(high);
@@ -310,6 +310,29 @@
current->AddHighInterval();
}
+ for (size_t safepoint_index = safepoints_.Size(); safepoint_index > 0; --safepoint_index) {
+ HInstruction* safepoint = safepoints_.Get(safepoint_index - 1);
+ size_t safepoint_position = safepoint->GetLifetimePosition();
+
+ // Test that safepoints are ordered in the optimal way.
+ DCHECK(safepoint_index == safepoints_.Size()
+ || safepoints_.Get(safepoint_index)->GetLifetimePosition() < safepoint_position);
+
+ if (safepoint_position == current->GetStart()) {
+ // The safepoint is for this instruction, so the location of the instruction
+ // does not need to be saved.
+ DCHECK_EQ(safepoint_index, safepoints_.Size());
+ DCHECK_EQ(safepoint, instruction);
+ continue;
+ } else if (current->IsDeadAt(safepoint_position)) {
+ break;
+ } else if (!current->Covers(safepoint_position)) {
+ // Hole in the interval.
+ continue;
+ }
+ current->AddSafepoint(safepoint);
+ }
+
// Some instructions define their output in fixed register/stack slot. We need
// to ensure we know these locations before doing register allocation. For a
// given register, we create an interval that covers these locations. The register
@@ -1204,10 +1227,10 @@
&& codegen_->ShouldSplitLongMoves()
// The parallel move resolver knows how to deal with long constants.
&& !source.IsConstant()) {
- move->AddMove(source.ToLow(), destination.ToLow(), instruction);
- move->AddMove(source.ToHigh(), destination.ToHigh(), nullptr);
+ move->AddMove(source.ToLow(), destination.ToLow(), Primitive::kPrimInt, instruction);
+ move->AddMove(source.ToHigh(), destination.ToHigh(), Primitive::kPrimInt, nullptr);
} else {
- move->AddMove(source, destination, instruction);
+ move->AddMove(source, destination, type, instruction);
}
}
@@ -1399,7 +1422,7 @@
: Location::StackSlot(interval->GetParent()->GetSpillSlot()));
}
UsePosition* use = current->GetFirstUse();
- size_t safepoint_index = safepoints_.Size();
+ SafepointPosition* safepoint_position = interval->GetFirstSafepoint();
// Walk over all siblings, updating locations of use positions, and
// connecting them when they are adjacent.
@@ -1450,28 +1473,13 @@
InsertParallelMoveAt(current->GetEnd(), interval->GetDefinedBy(), source, destination);
}
- // At each safepoint, we record stack and register information.
- // We iterate backwards to test safepoints in ascending order of positions,
- // which is what LiveInterval::Covers is optimized for.
- for (; safepoint_index > 0; --safepoint_index) {
- HInstruction* safepoint = safepoints_.Get(safepoint_index - 1);
- size_t position = safepoint->GetLifetimePosition();
-
- // Test that safepoints are ordered in the optimal way.
- DCHECK(safepoint_index == safepoints_.Size()
- || safepoints_.Get(safepoint_index)->GetLifetimePosition() <= position);
-
- if (current->IsDeadAt(position)) {
+ for (; safepoint_position != nullptr; safepoint_position = safepoint_position->GetNext()) {
+ if (!current->Covers(safepoint_position->GetPosition())) {
+ DCHECK(next_sibling != nullptr);
break;
- } else if (!current->Covers(position)) {
- continue;
- } else if (interval->GetStart() == position) {
- // The safepoint is for this instruction, so the location of the instruction
- // does not need to be saved.
- continue;
}
- LocationSummary* locations = safepoint->GetLocations();
+ LocationSummary* locations = safepoint_position->GetLocations();
if ((current->GetType() == Primitive::kPrimNot) && current->GetParent()->HasSpillSlot()) {
locations->SetStackBit(current->GetParent()->GetSpillSlot() / kVRegSize);
}
@@ -1515,6 +1523,7 @@
} while (current != nullptr);
if (kIsDebugBuild) {
+ DCHECK(safepoint_position == nullptr);
// Following uses can only be environment uses. The location for
// these environments will be none.
while (use != nullptr) {
@@ -1589,7 +1598,7 @@
maximum_number_of_live_core_registers_,
maximum_number_of_live_fp_registers_,
reserved_out_slots_,
- liveness_.GetLinearOrder());
+ codegen_->GetGraph()->GetLinearOrder());
// Adjust the Out Location of instructions.
// TODO: Use pointers of Location inside LiveInterval to avoid doing another iteration.
@@ -1669,7 +1678,7 @@
}
// Resolve non-linear control flow across branches. Order does not matter.
- for (HLinearOrderIterator it(liveness_); !it.Done(); it.Advance()) {
+ for (HLinearOrderIterator it(*codegen_->GetGraph()); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
BitVector* live = liveness_.GetLiveInSet(*block);
for (uint32_t idx : live->Indexes()) {
@@ -1682,7 +1691,7 @@
}
// Resolve phi inputs. Order does not matter.
- for (HLinearOrderIterator it(liveness_); !it.Done(); it.Advance()) {
+ for (HLinearOrderIterator it(*codegen_->GetGraph()); !it.Done(); it.Advance()) {
HBasicBlock* current = it.Current();
for (HInstructionIterator inst_it(current->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
HInstruction* phi = inst_it.Current();
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 3951439..c307d98 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -46,7 +46,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(*graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
RegisterAllocator register_allocator(&allocator, &codegen, liveness);
register_allocator.AllocateRegisters();
@@ -306,7 +306,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(*graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
RegisterAllocator register_allocator(&allocator, &codegen, liveness);
register_allocator.AllocateRegisters();
@@ -340,7 +340,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(*graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
HXor* first_xor = graph->GetBlocks().Get(1)->GetFirstInstruction()->AsXor();
@@ -395,7 +395,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(*graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
RegisterAllocator register_allocator(&allocator, &codegen, liveness);
register_allocator.AllocateRegisters();
@@ -419,7 +419,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(*graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
RegisterAllocator register_allocator(&allocator, &codegen, liveness);
@@ -523,7 +523,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(*graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
// Check that the register allocator is deterministic.
@@ -540,7 +540,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(*graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
// Set the phi to a specific register, and check that the inputs get allocated
@@ -559,7 +559,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(*graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
// Set input1 to a specific register, and check that the phi and other input get allocated
@@ -578,7 +578,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(*graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
// Set input2 to a specific register, and check that the phi and other input get allocated
@@ -632,7 +632,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(*graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
RegisterAllocator register_allocator(&allocator, &codegen, liveness);
@@ -647,7 +647,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(*graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
// Check that the field gets put in the register expected by its use.
@@ -699,7 +699,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(*graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
RegisterAllocator register_allocator(&allocator, &codegen, liveness);
@@ -715,7 +715,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(*graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
// check that both adds get the same register.
@@ -766,7 +766,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(*graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
RegisterAllocator register_allocator(&allocator, &codegen, liveness);
@@ -856,7 +856,7 @@
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(*graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen);
RegisterAllocator register_allocator(&allocator, &codegen, liveness);
register_allocator.unhandled_core_intervals_.Add(fourth);
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 95da6ef..302df2a 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -69,9 +69,9 @@
// current reverse post order in the graph, but it would require making
// order queries to a GrowableArray, which is not the best data structure
// for it.
- GrowableArray<uint32_t> forward_predecessors(graph_.GetArena(), graph_.GetBlocks().Size());
- forward_predecessors.SetSize(graph_.GetBlocks().Size());
- for (HReversePostOrderIterator it(graph_); !it.Done(); it.Advance()) {
+ GrowableArray<uint32_t> forward_predecessors(graph_->GetArena(), graph_->GetBlocks().Size());
+ forward_predecessors.SetSize(graph_->GetBlocks().Size());
+ for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
size_t number_of_forward_predecessors = block->GetPredecessors().Size();
if (block->IsLoopHeader()) {
@@ -86,11 +86,11 @@
// iterate over the successors. When all non-back edge predecessors of a
// successor block are visited, the successor block is added in the worklist
// following an order that satisfies the requirements to build our linear graph.
- GrowableArray<HBasicBlock*> worklist(graph_.GetArena(), 1);
- worklist.Add(graph_.GetEntryBlock());
+ GrowableArray<HBasicBlock*> worklist(graph_->GetArena(), 1);
+ worklist.Add(graph_->GetEntryBlock());
do {
HBasicBlock* current = worklist.Pop();
- linear_order_.Add(current);
+ graph_->linear_order_.Add(current);
for (size_t i = 0, e = current->GetSuccessors().Size(); i < e; ++i) {
HBasicBlock* successor = current->GetSuccessors().Get(i);
int block_id = successor->GetBlockId();
@@ -115,7 +115,7 @@
// to differentiate between the start and end of an instruction. Adding 2 to
// the lifetime position for each instruction ensures the start of an
// instruction is different than the end of the previous instruction.
- for (HLinearOrderIterator it(*this); !it.Done(); it.Advance()) {
+ for (HLinearOrderIterator it(*graph_); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
block->SetLifetimeStart(lifetime_position);
@@ -127,7 +127,7 @@
instructions_from_ssa_index_.Add(current);
current->SetSsaIndex(ssa_index++);
current->SetLiveInterval(
- LiveInterval::MakeInterval(graph_.GetArena(), current->GetType(), current));
+ LiveInterval::MakeInterval(graph_->GetArena(), current->GetType(), current));
}
current->SetLifetimePosition(lifetime_position);
}
@@ -145,7 +145,7 @@
instructions_from_ssa_index_.Add(current);
current->SetSsaIndex(ssa_index++);
current->SetLiveInterval(
- LiveInterval::MakeInterval(graph_.GetArena(), current->GetType(), current));
+ LiveInterval::MakeInterval(graph_->GetArena(), current->GetType(), current));
}
instructions_from_lifetime_position_.Add(current);
current->SetLifetimePosition(lifetime_position);
@@ -158,11 +158,11 @@
}
void SsaLivenessAnalysis::ComputeLiveness() {
- for (HLinearOrderIterator it(*this); !it.Done(); it.Advance()) {
+ for (HLinearOrderIterator it(*graph_); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
block_infos_.Put(
block->GetBlockId(),
- new (graph_.GetArena()) BlockInfo(graph_.GetArena(), *block, number_of_ssa_values_));
+ new (graph_->GetArena()) BlockInfo(graph_->GetArena(), *block, number_of_ssa_values_));
}
// Compute the live ranges, as well as the initial live_in, live_out, and kill sets.
@@ -179,7 +179,7 @@
void SsaLivenessAnalysis::ComputeLiveRanges() {
// Do a post order visit, adding inputs of instructions live in the block where
// that instruction is defined, and killing instructions that are being visited.
- for (HLinearPostOrderIterator it(*this); !it.Done(); it.Advance()) {
+ for (HLinearPostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
BitVector* kill = GetKillSet(*block);
@@ -281,7 +281,7 @@
do {
changed = false;
- for (HPostOrderIterator it(graph_); !it.Done(); it.Advance()) {
+ for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
const HBasicBlock& block = *it.Current();
// The live_in set depends on the kill set (which does not
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index d2da84c..2b51f94 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -149,6 +149,39 @@
DISALLOW_COPY_AND_ASSIGN(UsePosition);
};
+class SafepointPosition : public ArenaObject<kArenaAllocMisc> {
+ public:
+ explicit SafepointPosition(HInstruction* instruction)
+ : instruction_(instruction),
+ next_(nullptr) {}
+
+ void SetNext(SafepointPosition* next) {
+ next_ = next;
+ }
+
+ size_t GetPosition() const {
+ return instruction_->GetLifetimePosition();
+ }
+
+ SafepointPosition* GetNext() const {
+ return next_;
+ }
+
+ LocationSummary* GetLocations() const {
+ return instruction_->GetLocations();
+ }
+
+ HInstruction* GetInstruction() const {
+ return instruction_;
+ }
+
+ private:
+ HInstruction* const instruction_;
+ SafepointPosition* next_;
+
+ DISALLOW_COPY_AND_ASSIGN(SafepointPosition);
+};
+
/**
* An interval is a list of disjoint live ranges where an instruction is live.
* Each instruction that has uses gets an interval.
@@ -703,6 +736,22 @@
UNREACHABLE();
}
+ void AddSafepoint(HInstruction* instruction) {
+ SafepointPosition* safepoint = new (allocator_) SafepointPosition(instruction);
+ if (first_safepoint_ == nullptr) {
+ first_safepoint_ = last_safepoint_ = safepoint;
+ } else {
+ DCHECK_LT(last_safepoint_->GetPosition(), safepoint->GetPosition());
+ last_safepoint_->SetNext(safepoint);
+ last_safepoint_ = safepoint;
+ }
+ }
+
+ SafepointPosition* GetFirstSafepoint() const {
+ DCHECK_EQ(GetParent(), this) << "Only the first sibling lists safepoints";
+ return first_safepoint_;
+ }
+
private:
LiveInterval(ArenaAllocator* allocator,
Primitive::Type type,
@@ -715,6 +764,8 @@
: allocator_(allocator),
first_range_(nullptr),
last_range_(nullptr),
+ first_safepoint_(nullptr),
+ last_safepoint_(nullptr),
last_visited_range_(nullptr),
first_use_(nullptr),
type_(type),
@@ -771,6 +822,10 @@
LiveRange* first_range_;
LiveRange* last_range_;
+ // Safepoints where this interval is live. Only set in the parent interval.
+ SafepointPosition* first_safepoint_;
+ SafepointPosition* last_safepoint_;
+
// Last visited range. This is a range search optimization leveraging the fact
// that the register allocator does a linear scan through the intervals.
LiveRange* last_visited_range_;
@@ -838,15 +893,14 @@
*/
class SsaLivenessAnalysis : public ValueObject {
public:
- SsaLivenessAnalysis(const HGraph& graph, CodeGenerator* codegen)
+ SsaLivenessAnalysis(HGraph* graph, CodeGenerator* codegen)
: graph_(graph),
codegen_(codegen),
- linear_order_(graph.GetArena(), graph.GetBlocks().Size()),
- block_infos_(graph.GetArena(), graph.GetBlocks().Size()),
- instructions_from_ssa_index_(graph.GetArena(), 0),
- instructions_from_lifetime_position_(graph.GetArena(), 0),
+ block_infos_(graph->GetArena(), graph->GetBlocks().Size()),
+ instructions_from_ssa_index_(graph->GetArena(), 0),
+ instructions_from_lifetime_position_(graph->GetArena(), 0),
number_of_ssa_values_(0) {
- block_infos_.SetSize(graph.GetBlocks().Size());
+ block_infos_.SetSize(graph->GetBlocks().Size());
}
void Analyze();
@@ -863,10 +917,6 @@
return &block_infos_.Get(block.GetBlockId())->kill_;
}
- const GrowableArray<HBasicBlock*>& GetLinearOrder() const {
- return linear_order_;
- }
-
HInstruction* GetInstructionFromSsaIndex(size_t index) const {
return instructions_from_ssa_index_.Get(index);
}
@@ -934,9 +984,8 @@
return instruction->GetType() == Primitive::kPrimNot;
}
- const HGraph& graph_;
+ HGraph* const graph_;
CodeGenerator* const codegen_;
- GrowableArray<HBasicBlock*> linear_order_;
GrowableArray<BlockInfo*> block_infos_;
// Temporary array used when computing live_in, live_out, and kill sets.
@@ -949,43 +998,6 @@
DISALLOW_COPY_AND_ASSIGN(SsaLivenessAnalysis);
};
-class HLinearPostOrderIterator : public ValueObject {
- public:
- explicit HLinearPostOrderIterator(const SsaLivenessAnalysis& liveness)
- : order_(liveness.GetLinearOrder()), index_(liveness.GetLinearOrder().Size()) {}
-
- bool Done() const { return index_ == 0; }
-
- HBasicBlock* Current() const { return order_.Get(index_ -1); }
-
- void Advance() {
- --index_;
- DCHECK_GE(index_, 0U);
- }
-
- private:
- const GrowableArray<HBasicBlock*>& order_;
- size_t index_;
-
- DISALLOW_COPY_AND_ASSIGN(HLinearPostOrderIterator);
-};
-
-class HLinearOrderIterator : public ValueObject {
- public:
- explicit HLinearOrderIterator(const SsaLivenessAnalysis& liveness)
- : order_(liveness.GetLinearOrder()), index_(0) {}
-
- bool Done() const { return index_ == order_.Size(); }
- HBasicBlock* Current() const { return order_.Get(index_); }
- void Advance() { ++index_; }
-
- private:
- const GrowableArray<HBasicBlock*>& order_;
- size_t index_;
-
- DISALLOW_COPY_AND_ASSIGN(HLinearOrderIterator);
-};
-
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_SSA_LIVENESS_ANALYSIS_H_
diff --git a/compiler/utils/arena_bit_vector.cc b/compiler/utils/arena_bit_vector.cc
index ddc0c81..fbbfd84 100644
--- a/compiler/utils/arena_bit_vector.cc
+++ b/compiler/utils/arena_bit_vector.cc
@@ -16,6 +16,7 @@
#include "arena_bit_vector.h"
+#include "base/allocator.h"
#include "base/arena_allocator.h"
namespace art {
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index fbd0411..98702a2 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -640,56 +640,89 @@
___ Brk();
}
-static dwarf::Reg DWARFReg(XRegister reg) {
- return dwarf::Reg::Arm64Core(static_cast<int>(reg));
+static inline dwarf::Reg DWARFReg(CPURegister reg) {
+ if (reg.IsFPRegister()) {
+ return dwarf::Reg::Arm64Fp(reg.code());
+ } else {
+ DCHECK_LT(reg.code(), 31u); // X0 - X30.
+ return dwarf::Reg::Arm64Core(reg.code());
+ }
}
-static dwarf::Reg DWARFReg(DRegister reg) {
- return dwarf::Reg::Arm64Fp(static_cast<int>(reg));
+void Arm64Assembler::SpillRegisters(vixl::CPURegList registers, int offset) {
+ int size = registers.RegisterSizeInBytes();
+ const Register sp = vixl_masm_->StackPointer();
+ while (registers.Count() >= 2) {
+ const CPURegister& dst0 = registers.PopLowestIndex();
+ const CPURegister& dst1 = registers.PopLowestIndex();
+ ___ Stp(dst0, dst1, MemOperand(sp, offset));
+ cfi_.RelOffset(DWARFReg(dst0), offset);
+ cfi_.RelOffset(DWARFReg(dst1), offset + size);
+ offset += 2 * size;
+ }
+ if (!registers.IsEmpty()) {
+ const CPURegister& dst0 = registers.PopLowestIndex();
+ ___ Str(dst0, MemOperand(sp, offset));
+ cfi_.RelOffset(DWARFReg(dst0), offset);
+ }
+ DCHECK(registers.IsEmpty());
}
-constexpr size_t kFramePointerSize = 8;
-constexpr unsigned int kJniRefSpillRegsSize = 11 + 8;
+void Arm64Assembler::UnspillRegisters(vixl::CPURegList registers, int offset) {
+ int size = registers.RegisterSizeInBytes();
+ const Register sp = vixl_masm_->StackPointer();
+ while (registers.Count() >= 2) {
+ const CPURegister& dst0 = registers.PopLowestIndex();
+ const CPURegister& dst1 = registers.PopLowestIndex();
+ ___ Ldp(dst0, dst1, MemOperand(sp, offset));
+ cfi_.Restore(DWARFReg(dst0));
+ cfi_.Restore(DWARFReg(dst1));
+ offset += 2 * size;
+ }
+ if (!registers.IsEmpty()) {
+ const CPURegister& dst0 = registers.PopLowestIndex();
+ ___ Ldr(dst0, MemOperand(sp, offset));
+ cfi_.Restore(DWARFReg(dst0));
+ }
+ DCHECK(registers.IsEmpty());
+}
void Arm64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
- const std::vector<ManagedRegister>& callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) {
- CHECK_ALIGNED(frame_size, kStackAlignment);
- CHECK(X0 == method_reg.AsArm64().AsXRegister());
+ const std::vector<ManagedRegister>& callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills) {
+ // Setup VIXL CPURegList for callee-saves.
+ CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
+ CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
+ for (auto r : callee_save_regs) {
+ Arm64ManagedRegister reg = r.AsArm64();
+ if (reg.IsXRegister()) {
+ core_reg_list.Combine(reg_x(reg.AsXRegister()).code());
+ } else {
+ DCHECK(reg.IsDRegister());
+ fp_reg_list.Combine(reg_d(reg.AsDRegister()).code());
+ }
+ }
+ size_t core_reg_size = core_reg_list.TotalSizeInBytes();
+ size_t fp_reg_size = fp_reg_list.TotalSizeInBytes();
- // TODO: *create APCS FP - end of FP chain;
- // *add support for saving a different set of callee regs.
- // For now we check that the size of callee regs vector is 11 core registers and 8 fp registers.
- CHECK_EQ(callee_save_regs.size(), kJniRefSpillRegsSize);
- // Increase frame to required size - must be at least space to push StackReference<Method>.
- CHECK_GT(frame_size, kJniRefSpillRegsSize * kFramePointerSize);
+ // Increase frame to required size.
+ DCHECK_ALIGNED(frame_size, kStackAlignment);
+ DCHECK_GE(frame_size, core_reg_size + fp_reg_size + sizeof(StackReference<mirror::ArtMethod>));
IncreaseFrameSize(frame_size);
- // TODO: Ugly hard code...
- // Should generate these according to the spill mask automatically.
- // TUNING: Use stp.
- // Note: Must match Arm64JniCallingConvention::CoreSpillMask().
- size_t reg_offset = frame_size;
- static constexpr XRegister x_spills[] = {
- LR, X29, X28, X27, X26, X25, X24, X23, X22, X21, X20 };
- for (size_t i = 0; i < arraysize(x_spills); i++) {
- XRegister reg = x_spills[i];
- reg_offset -= 8;
- StoreToOffset(reg, SP, reg_offset);
- cfi_.RelOffset(DWARFReg(reg), reg_offset);
- }
- for (int d = 15; d >= 8; d--) {
- DRegister reg = static_cast<DRegister>(d);
- reg_offset -= 8;
- StoreDToOffset(reg, SP, reg_offset);
- cfi_.RelOffset(DWARFReg(reg), reg_offset);
- }
+ // Save callee-saves.
+ SpillRegisters(core_reg_list, frame_size - core_reg_size);
+ SpillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
- // Move TR(Caller saved) to ETR(Callee saved). The original (ETR)X21 has been saved on stack.
- // This way we make sure that TR is not trashed by native code.
+ // Note: This is specific to JNI method frame.
+ // We will need to move TR(Caller saved in AAPCS) to ETR(Callee saved in AAPCS). The original
+ // (ETR)X21 has been saved on stack. In this way, we can restore TR later.
+ DCHECK(!core_reg_list.IncludesAliasOf(reg_x(TR)));
+ DCHECK(core_reg_list.IncludesAliasOf(reg_x(ETR)));
___ Mov(reg_x(ETR), reg_x(TR));
// Write StackReference<Method>.
+ DCHECK(X0 == method_reg.AsArm64().AsXRegister());
DCHECK_EQ(4U, sizeof(StackReference<mirror::ArtMethod>));
StoreWToOffset(StoreOperandType::kStoreWord, W0, SP, 0);
@@ -717,37 +750,39 @@
}
}
-void Arm64Assembler::RemoveFrame(size_t frame_size, const std::vector<ManagedRegister>& callee_save_regs) {
- CHECK_ALIGNED(frame_size, kStackAlignment);
- cfi_.RememberState();
+void Arm64Assembler::RemoveFrame(size_t frame_size,
+ const std::vector<ManagedRegister>& callee_save_regs) {
+ // Setup VIXL CPURegList for callee-saves.
+ CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
+ CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
+ for (auto r : callee_save_regs) {
+ Arm64ManagedRegister reg = r.AsArm64();
+ if (reg.IsXRegister()) {
+ core_reg_list.Combine(reg_x(reg.AsXRegister()).code());
+ } else {
+ DCHECK(reg.IsDRegister());
+ fp_reg_list.Combine(reg_d(reg.AsDRegister()).code());
+ }
+ }
+ size_t core_reg_size = core_reg_list.TotalSizeInBytes();
+ size_t fp_reg_size = fp_reg_list.TotalSizeInBytes();
- // For now we only check that the size of the frame is greater than the spill size.
- CHECK_EQ(callee_save_regs.size(), kJniRefSpillRegsSize);
- CHECK_GT(frame_size, kJniRefSpillRegsSize * kFramePointerSize);
+ // For now we only check that the size of the frame is large enough to hold spills and method
+ // reference.
+ DCHECK_GE(frame_size, core_reg_size + fp_reg_size + sizeof(StackReference<mirror::ArtMethod>));
+ DCHECK_ALIGNED(frame_size, kStackAlignment);
- // We move ETR(aapcs64 callee saved) back to TR(aapcs64 caller saved) which might have
- // been trashed in the native call. The original ETR(X21) is restored from stack.
+ // Note: This is specific to JNI method frame.
+ // Restore TR(Caller saved in AAPCS) from ETR(Callee saved in AAPCS).
+ DCHECK(!core_reg_list.IncludesAliasOf(reg_x(TR)));
+ DCHECK(core_reg_list.IncludesAliasOf(reg_x(ETR)));
___ Mov(reg_x(TR), reg_x(ETR));
- // TODO: Ugly hard code...
- // Should generate these according to the spill mask automatically.
- // TUNING: Use ldp.
- // Note: Must match Arm64JniCallingConvention::CoreSpillMask().
- size_t reg_offset = frame_size;
- static constexpr XRegister x_spills[] = {
- LR, X29, X28, X27, X26, X25, X24, X23, X22, X21, X20 };
- for (size_t i = 0; i < arraysize(x_spills); i++) {
- XRegister reg = x_spills[i];
- reg_offset -= 8;
- LoadFromOffset(reg, SP, reg_offset);
- cfi_.Restore(DWARFReg(reg));
- }
- for (int d = 15; d >= 8; d--) {
- DRegister reg = static_cast<DRegister>(d);
- reg_offset -= 8;
- LoadDFromOffset(reg, SP, reg_offset);
- cfi_.Restore(DWARFReg(reg));
- }
+ cfi_.RememberState();
+
+ // Restore callee-saves.
+ UnspillRegisters(core_reg_list, frame_size - core_reg_size);
+ UnspillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
// Decrease frame size to start of callee saved regs.
DecreaseFrameSize(frame_size);
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 8973b9c..b7715af 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -83,6 +83,9 @@
// Copy instructions out of assembly buffer into the given region of memory.
void FinalizeInstructions(const MemoryRegion& region);
+ void SpillRegisters(vixl::CPURegList registers, int offset);
+ void UnspillRegisters(vixl::CPURegList registers, int offset);
+
// Emit code that will create an activation on the stack.
void BuildFrame(size_t frame_size, ManagedRegister method_reg,
const std::vector<ManagedRegister>& callee_save_regs,
diff --git a/compiler/utils/dex_cache_arrays_layout-inl.h b/compiler/utils/dex_cache_arrays_layout-inl.h
index 7d02ce3..2c50c96 100644
--- a/compiler/utils/dex_cache_arrays_layout-inl.h
+++ b/compiler/utils/dex_cache_arrays_layout-inl.h
@@ -26,7 +26,6 @@
#include "utils.h"
namespace mirror {
-class ArtField;
class ArtMethod;
class Class;
class String;
@@ -34,40 +33,55 @@
namespace art {
-inline DexCacheArraysLayout::DexCacheArraysLayout(const DexFile* dex_file)
+inline DexCacheArraysLayout::DexCacheArraysLayout(size_t pointer_size, const DexFile* dex_file)
: /* types_offset_ is always 0u */
- methods_offset_(types_offset_ + ArraySize<mirror::Class>(dex_file->NumTypeIds())),
- strings_offset_(methods_offset_ + ArraySize<mirror::ArtMethod>(dex_file->NumMethodIds())),
- fields_offset_(strings_offset_ + ArraySize<mirror::String>(dex_file->NumStringIds())),
- size_(fields_offset_ + ArraySize<mirror::ArtField>(dex_file->NumFieldIds())) {
+ pointer_size_(pointer_size),
+ methods_offset_(types_offset_ + TypesSize(dex_file->NumTypeIds())),
+ strings_offset_(methods_offset_ + MethodsSize(dex_file->NumMethodIds())),
+ fields_offset_(strings_offset_ + StringsSize(dex_file->NumStringIds())),
+ size_(fields_offset_ + FieldsSize(dex_file->NumFieldIds())) {
+ DCHECK(pointer_size == 4u || pointer_size == 8u);
}
inline size_t DexCacheArraysLayout::TypeOffset(uint32_t type_idx) const {
- return types_offset_ + ElementOffset<mirror::Class>(type_idx);
+ return types_offset_ + ElementOffset(sizeof(mirror::HeapReference<mirror::Class>), type_idx);
+}
+
+inline size_t DexCacheArraysLayout::TypesSize(size_t num_elements) const {
+ return ArraySize(sizeof(mirror::HeapReference<mirror::Class>), num_elements);
}
inline size_t DexCacheArraysLayout::MethodOffset(uint32_t method_idx) const {
- return methods_offset_ + ElementOffset<mirror::ArtMethod>(method_idx);
+ return methods_offset_ + ElementOffset(
+ sizeof(mirror::HeapReference<mirror::ArtMethod>), method_idx);
+}
+
+inline size_t DexCacheArraysLayout::MethodsSize(size_t num_elements) const {
+ return ArraySize(sizeof(mirror::HeapReference<mirror::ArtMethod>), num_elements);
}
inline size_t DexCacheArraysLayout::StringOffset(uint32_t string_idx) const {
- return strings_offset_ + ElementOffset<mirror::String>(string_idx);
+ return strings_offset_ + ElementOffset(sizeof(mirror::HeapReference<mirror::String>), string_idx);
+}
+
+inline size_t DexCacheArraysLayout::StringsSize(size_t num_elements) const {
+ return ArraySize(sizeof(mirror::HeapReference<mirror::String>), num_elements);
}
inline size_t DexCacheArraysLayout::FieldOffset(uint32_t field_idx) const {
- return fields_offset_ + ElementOffset<mirror::ArtField>(field_idx);
+ return fields_offset_ + ElementOffset(pointer_size_, field_idx);
}
-template <typename MirrorType>
-inline size_t DexCacheArraysLayout::ElementOffset(uint32_t idx) {
- return mirror::Array::DataOffset(sizeof(mirror::HeapReference<MirrorType>)).Uint32Value() +
- sizeof(mirror::HeapReference<MirrorType>) * idx;
+inline size_t DexCacheArraysLayout::FieldsSize(size_t num_elements) const {
+ return ArraySize(pointer_size_, num_elements);
}
-template <typename MirrorType>
-inline size_t DexCacheArraysLayout::ArraySize(uint32_t num_elements) {
- size_t array_size = mirror::ComputeArraySize(
- num_elements, ComponentSizeShiftWidth<sizeof(mirror::HeapReference<MirrorType>)>());
+inline size_t DexCacheArraysLayout::ElementOffset(size_t element_size, uint32_t idx) {
+ return mirror::Array::DataOffset(element_size).Uint32Value() + element_size * idx;
+}
+
+inline size_t DexCacheArraysLayout::ArraySize(size_t element_size, uint32_t num_elements) {
+ size_t array_size = mirror::ComputeArraySize(num_elements, ComponentSizeShiftWidth(element_size));
DCHECK_NE(array_size, 0u); // No overflow expected for dex cache arrays.
return RoundUp(array_size, kObjectAlignment);
}
diff --git a/compiler/utils/dex_cache_arrays_layout.h b/compiler/utils/dex_cache_arrays_layout.h
index b461256..8f98ea1 100644
--- a/compiler/utils/dex_cache_arrays_layout.h
+++ b/compiler/utils/dex_cache_arrays_layout.h
@@ -29,6 +29,7 @@
// Construct an invalid layout.
DexCacheArraysLayout()
: /* types_offset_ is always 0u */
+ pointer_size_(0u),
methods_offset_(0u),
strings_offset_(0u),
fields_offset_(0u),
@@ -36,7 +37,7 @@
}
// Construct a layout for a particular dex file.
- explicit DexCacheArraysLayout(const DexFile* dex_file);
+ explicit DexCacheArraysLayout(size_t pointer_size, const DexFile* dex_file);
bool Valid() const {
return Size() != 0u;
@@ -52,36 +53,43 @@
size_t TypeOffset(uint32_t type_idx) const;
+ size_t TypesSize(size_t num_elements) const;
+
size_t MethodsOffset() const {
return methods_offset_;
}
size_t MethodOffset(uint32_t method_idx) const;
+ size_t MethodsSize(size_t num_elements) const;
+
size_t StringsOffset() const {
return strings_offset_;
}
size_t StringOffset(uint32_t string_idx) const;
+ size_t StringsSize(size_t num_elements) const;
+
size_t FieldsOffset() const {
return fields_offset_;
}
size_t FieldOffset(uint32_t field_idx) const;
+ size_t FieldsSize(size_t num_elements) const;
+
private:
static constexpr size_t types_offset_ = 0u;
+ const size_t pointer_size_; // Must be first for construction initialization order.
const size_t methods_offset_;
const size_t strings_offset_;
const size_t fields_offset_;
const size_t size_;
- template <typename MirrorType>
- static size_t ElementOffset(uint32_t idx);
+ static size_t ElementOffset(size_t element_size, uint32_t idx);
- template <typename MirrorType>
- static size_t ArraySize(uint32_t num_elements);
+ static size_t ArraySize(size_t element_size, uint32_t num_elements);
};
} // namespace art
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index f8bba07..329698c 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -883,6 +883,13 @@
}
+void X86Assembler::filds(const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xDB);
+ EmitOperand(0, src);
+}
+
+
void X86Assembler::fincstp() {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xD9);
@@ -1285,62 +1292,32 @@
void X86Assembler::shll(Register reg, const Immediate& imm) {
- EmitGenericShift(4, Operand(reg), imm);
+ EmitGenericShift(4, reg, imm);
}
void X86Assembler::shll(Register operand, Register shifter) {
- EmitGenericShift(4, Operand(operand), shifter);
-}
-
-
-void X86Assembler::shll(const Address& address, const Immediate& imm) {
- EmitGenericShift(4, address, imm);
-}
-
-
-void X86Assembler::shll(const Address& address, Register shifter) {
- EmitGenericShift(4, address, shifter);
+ EmitGenericShift(4, operand, shifter);
}
void X86Assembler::shrl(Register reg, const Immediate& imm) {
- EmitGenericShift(5, Operand(reg), imm);
+ EmitGenericShift(5, reg, imm);
}
void X86Assembler::shrl(Register operand, Register shifter) {
- EmitGenericShift(5, Operand(operand), shifter);
-}
-
-
-void X86Assembler::shrl(const Address& address, const Immediate& imm) {
- EmitGenericShift(5, address, imm);
-}
-
-
-void X86Assembler::shrl(const Address& address, Register shifter) {
- EmitGenericShift(5, address, shifter);
+ EmitGenericShift(5, operand, shifter);
}
void X86Assembler::sarl(Register reg, const Immediate& imm) {
- EmitGenericShift(7, Operand(reg), imm);
+ EmitGenericShift(7, reg, imm);
}
void X86Assembler::sarl(Register operand, Register shifter) {
- EmitGenericShift(7, Operand(operand), shifter);
-}
-
-
-void X86Assembler::sarl(const Address& address, const Immediate& imm) {
- EmitGenericShift(7, address, imm);
-}
-
-
-void X86Assembler::sarl(const Address& address, Register shifter) {
- EmitGenericShift(7, address, shifter);
+ EmitGenericShift(7, operand, shifter);
}
@@ -1353,15 +1330,6 @@
}
-void X86Assembler::shld(Register dst, Register src, const Immediate& imm) {
- AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitUint8(0x0F);
- EmitUint8(0xA4);
- EmitRegisterOperand(src, dst);
- EmitUint8(imm.value() & 0xFF);
-}
-
-
void X86Assembler::shrd(Register dst, Register src, Register shifter) {
DCHECK_EQ(ECX, shifter);
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -1371,15 +1339,6 @@
}
-void X86Assembler::shrd(Register dst, Register src, const Immediate& imm) {
- AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitUint8(0x0F);
- EmitUint8(0xAC);
- EmitRegisterOperand(src, dst);
- EmitUint8(imm.value() & 0xFF);
-}
-
-
void X86Assembler::negl(Register reg) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF7);
@@ -1663,28 +1622,28 @@
void X86Assembler::EmitGenericShift(int reg_or_opcode,
- const Operand& operand,
+ Register reg,
const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
CHECK(imm.is_int8());
if (imm.value() == 1) {
EmitUint8(0xD1);
- EmitOperand(reg_or_opcode, operand);
+ EmitOperand(reg_or_opcode, Operand(reg));
} else {
EmitUint8(0xC1);
- EmitOperand(reg_or_opcode, operand);
+ EmitOperand(reg_or_opcode, Operand(reg));
EmitUint8(imm.value() & 0xFF);
}
}
void X86Assembler::EmitGenericShift(int reg_or_opcode,
- const Operand& operand,
+ Register operand,
Register shifter) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
CHECK_EQ(shifter, ECX);
EmitUint8(0xD3);
- EmitOperand(reg_or_opcode, operand);
+ EmitOperand(reg_or_opcode, Operand(operand));
}
static dwarf::Reg DWARFReg(Register reg) {
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 37acb6e..a933474 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -349,6 +349,7 @@
void fistpl(const Address& dst);
void fistps(const Address& dst);
void fildl(const Address& src);
+ void filds(const Address& src);
void fincstp();
void ffree(const Immediate& index);
@@ -429,20 +430,12 @@
void shll(Register reg, const Immediate& imm);
void shll(Register operand, Register shifter);
- void shll(const Address& address, const Immediate& imm);
- void shll(const Address& address, Register shifter);
void shrl(Register reg, const Immediate& imm);
void shrl(Register operand, Register shifter);
- void shrl(const Address& address, const Immediate& imm);
- void shrl(const Address& address, Register shifter);
void sarl(Register reg, const Immediate& imm);
void sarl(Register operand, Register shifter);
- void sarl(const Address& address, const Immediate& imm);
- void sarl(const Address& address, Register shifter);
void shld(Register dst, Register src, Register shifter);
- void shld(Register dst, Register src, const Immediate& imm);
void shrd(Register dst, Register src, Register shifter);
- void shrd(Register dst, Register src, const Immediate& imm);
void negl(Register reg);
void notl(Register reg);
@@ -627,8 +620,8 @@
void EmitLabelLink(Label* label);
void EmitNearLabelLink(Label* label);
- void EmitGenericShift(int rm, const Operand& operand, const Immediate& imm);
- void EmitGenericShift(int rm, const Operand& operand, Register shifter);
+ void EmitGenericShift(int rm, Register reg, const Immediate& imm);
+ void EmitGenericShift(int rm, Register operand, Register shifter);
DISALLOW_COPY_AND_ASSIGN(X86Assembler);
};
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index dba3b6b..f326e49 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -172,4 +172,22 @@
DriverStr(expected, "lock_cmpxchg8b");
}
+TEST_F(AssemblerX86Test, FPUIntegerLoad) {
+ GetAssembler()->filds(x86::Address(x86::Register(x86::ESP), 4));
+ GetAssembler()->fildl(x86::Address(x86::Register(x86::ESP), 12));
+ const char* expected =
+ "fildl 0x4(%ESP)\n"
+ "fildll 0xc(%ESP)\n";
+ DriverStr(expected, "FPUIntegerLoad");
+}
+
+TEST_F(AssemblerX86Test, FPUIntegerStore) {
+ GetAssembler()->fistps(x86::Address(x86::Register(x86::ESP), 16));
+ GetAssembler()->fistpl(x86::Address(x86::Register(x86::ESP), 24));
+ const char* expected =
+ "fistpl 0x10(%ESP)\n"
+ "fistpll 0x18(%ESP)\n";
+ DriverStr(expected, "FPUIntegerStore");
+}
+
} // namespace art
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 638659d..32204a9 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -988,6 +988,13 @@
}
+void X86_64Assembler::filds(const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xDB);
+ EmitOperand(0, src);
+}
+
+
void X86_64Assembler::fincstp() {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xD9);
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 15b8b15..16ef70b 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -448,6 +448,7 @@
void fistpl(const Address& dst);
void fistps(const Address& dst);
void fildl(const Address& src);
+ void filds(const Address& src);
void fincstp();
void ffree(const Immediate& index);
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 116190a..5ca0373 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -849,6 +849,24 @@
DriverFn(&x87_fn, "x87");
}
+TEST_F(AssemblerX86_64Test, FPUIntegerLoad) {
+ GetAssembler()->filds(x86_64::Address(x86_64::CpuRegister(x86_64::RSP), 4));
+ GetAssembler()->fildl(x86_64::Address(x86_64::CpuRegister(x86_64::RSP), 12));
+ const char* expected =
+ "fildl 0x4(%RSP)\n"
+ "fildll 0xc(%RSP)\n";
+ DriverStr(expected, "FPUIntegerLoad");
+}
+
+TEST_F(AssemblerX86_64Test, FPUIntegerStore) {
+ GetAssembler()->fistps(x86_64::Address(x86_64::CpuRegister(x86_64::RSP), 16));
+ GetAssembler()->fistpl(x86_64::Address(x86_64::CpuRegister(x86_64::RSP), 24));
+ const char* expected =
+ "fistpl 0x10(%RSP)\n"
+ "fistpll 0x18(%RSP)\n";
+ DriverStr(expected, "FPUIntegerStore");
+}
+
////////////////
// CALL / JMP //
////////////////
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 10949e4..7e32b43 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -109,6 +109,11 @@
UsageError("Usage: dex2oat [options]...");
UsageError("");
+ UsageError(" -j<number>: specifies the number of threads used for compilation.");
+ UsageError(" Default is the number of detected hardware threads available on the");
+ UsageError(" host system.");
+ UsageError(" Example: -j12");
+ UsageError("");
UsageError(" --dex-file=<dex-file>: specifies a .dex, .jar, or .apk file to compile.");
UsageError(" Example: --dex-file=/system/framework/core.jar");
UsageError("");
@@ -188,11 +193,6 @@
UsageError(" Example: --compiler-filter=everything");
UsageError(" Default: speed");
UsageError("");
- UsageError(" --huge-method-max=<method-instruction-count>: the threshold size for a huge");
- UsageError(" method for compiler filter tuning.");
- UsageError(" Example: --huge-method-max=%d", CompilerOptions::kDefaultHugeMethodThreshold);
- UsageError(" Default: %d", CompilerOptions::kDefaultHugeMethodThreshold);
- UsageError("");
UsageError(" --huge-method-max=<method-instruction-count>: threshold size for a huge");
UsageError(" method for compiler filter tuning.");
UsageError(" Example: --huge-method-max=%d", CompilerOptions::kDefaultHugeMethodThreshold);
@@ -231,6 +231,11 @@
UsageError("");
UsageError(" --no-include-debug-symbols: Do not include ELF symbols in this oat file");
UsageError("");
+ UsageError(" --include-cfi: Include call frame information in the .eh_frame section.");
+ UsageError(" The --include-debug-symbols option implies --include-cfi.");
+ UsageError("");
+ UsageError(" --no-include-cfi: Do not include call frame information in the .eh_frame section.");
+ UsageError("");
UsageError(" --runtime-arg <argument>: used to specify various arguments for the runtime,");
UsageError(" such as initial heap size, maximum heap size, and verbose output.");
UsageError(" Use a separate --runtime-arg switch for each argument.");
@@ -496,8 +501,8 @@
bool debuggable = false;
bool include_patch_information = CompilerOptions::kDefaultIncludePatchInformation;
bool include_debug_symbols = kIsDebugBuild;
+ bool include_cfi = kIsDebugBuild;
bool watch_dog_enabled = true;
- bool generate_gdb_information = kIsDebugBuild;
bool abort_on_hard_verifier_error = false;
bool requested_specific_compiler = false;
@@ -541,12 +546,6 @@
watch_dog_enabled = true;
} else if (option == "--no-watch-dog") {
watch_dog_enabled = false;
- } else if (option == "--gen-gdb-info") {
- generate_gdb_information = true;
- // Debug symbols are needed for gdb information.
- include_debug_symbols = true;
- } else if (option == "--no-gen-gdb-info") {
- generate_gdb_information = false;
} else if (option.starts_with("-j")) {
const char* thread_count_str = option.substr(strlen("-j")).data();
if (!ParseUint(thread_count_str, &thread_count_)) {
@@ -684,7 +683,10 @@
include_debug_symbols = true;
} else if (option == "--no-include-debug-symbols" || option == "--strip-symbols") {
include_debug_symbols = false;
- generate_gdb_information = false; // Depends on debug symbols, see above.
+ } else if (option == "--include-cfi") {
+ include_cfi = true;
+ } else if (option == "--no-include-cfi") {
+ include_cfi = false;
} else if (option == "--debuggable") {
debuggable = true;
} else if (option.starts_with("--profile-file=")) {
@@ -936,11 +938,11 @@
small_method_threshold,
tiny_method_threshold,
num_dex_methods_threshold,
- generate_gdb_information,
include_patch_information,
top_k_profile_threshold,
debuggable,
include_debug_symbols,
+ include_cfi,
implicit_null_checks,
implicit_so_checks,
implicit_suspend_checks,
@@ -1235,6 +1237,11 @@
for (auto& class_path_file : class_path_files_) {
class_path_files.push_back(class_path_file.get());
}
+
+ // Store the classpath we have right now.
+ key_value_store_->Put(OatHeader::kClassPathKey,
+ OatFile::EncodeDexFileDependencies(class_path_files));
+
// Then the dex files we'll compile. Thus we'll resolve the class-path first.
class_path_files.insert(class_path_files.end(), dex_files_.begin(), dex_files_.end());
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index 9b57ecb..34a4c14 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -168,7 +168,10 @@
<< "\n\n";
const uint8_t* image_begin_unaligned = boot_image_header.GetImageBegin();
- const uint8_t* image_end_unaligned = image_begin_unaligned + boot_image_header.GetImageSize();
+ const uint8_t* image_mirror_end_unaligned = image_begin_unaligned +
+ boot_image_header.GetImageSize();
+ const uint8_t* image_end_unaligned = image_mirror_end_unaligned +
+ boot_image_header.GetArtFieldsSize();
// Adjust range to nearest page
const uint8_t* image_begin = AlignDown(image_begin_unaligned, kPageSize);
@@ -350,7 +353,7 @@
size_t dirty_object_bytes = 0;
{
const uint8_t* begin_image_ptr = image_begin_unaligned;
- const uint8_t* end_image_ptr = image_end_unaligned;
+ const uint8_t* end_image_ptr = image_mirror_end_unaligned;
const uint8_t* current = begin_image_ptr + RoundUp(sizeof(ImageHeader), kObjectAlignment);
while (reinterpret_cast<const uintptr_t>(current)
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 322d3aa..a36e5b1 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -26,6 +26,7 @@
#include <vector>
#include "arch/instruction_set_features.h"
+#include "art_field-inl.h"
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
#include "class_linker-inl.h"
@@ -40,7 +41,6 @@
#include "image.h"
#include "indenter.h"
#include "mapping_table.h"
-#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/array-inl.h"
#include "mirror/class-inl.h"
@@ -1549,9 +1549,6 @@
} else if (type->IsClassClass()) {
mirror::Class* klass = value->AsClass();
os << StringPrintf("%p Class: %s\n", klass, PrettyDescriptor(klass).c_str());
- } else if (type->IsArtFieldClass()) {
- mirror::ArtField* field = value->AsArtField();
- os << StringPrintf("%p Field: %s\n", field, PrettyField(field).c_str());
} else if (type->IsArtMethodClass()) {
mirror::ArtMethod* method = value->AsArtMethod();
os << StringPrintf("%p Method: %s\n", method, PrettyMethod(method).c_str());
@@ -1560,7 +1557,7 @@
}
}
- static void PrintField(std::ostream& os, mirror::ArtField* field, mirror::Object* obj)
+ static void PrintField(std::ostream& os, ArtField* field, mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
os << StringPrintf("%s: ", field->GetName());
switch (field->GetTypeAsPrimitiveType()) {
@@ -1619,12 +1616,9 @@
if (super != nullptr) {
DumpFields(os, obj, super);
}
- mirror::ObjectArray<mirror::ArtField>* fields = klass->GetIFields();
- if (fields != nullptr) {
- for (int32_t i = 0; i < fields->GetLength(); i++) {
- mirror::ArtField* field = fields->Get(i);
- PrintField(os, field, obj);
- }
+ ArtField* fields = klass->GetIFields();
+ for (size_t i = 0, count = klass->NumInstanceFields(); i < count; i++) {
+ PrintField(os, &fields[i], obj);
}
}
@@ -1686,9 +1680,6 @@
mirror::Class* klass = obj->AsClass();
os << StringPrintf("%p: java.lang.Class \"%s\" (", obj, PrettyDescriptor(klass).c_str())
<< klass->GetStatus() << ")\n";
- } else if (obj->IsArtField()) {
- os << StringPrintf("%p: java.lang.reflect.ArtField %s\n", obj,
- PrettyField(obj->AsArtField()).c_str());
} else if (obj->IsArtMethod()) {
os << StringPrintf("%p: java.lang.reflect.ArtMethod %s\n", obj,
PrettyMethod(obj->AsArtMethod()).c_str());
@@ -1725,14 +1716,15 @@
PrettyObjectValue(indent_os, value_class, value);
}
} else if (obj->IsClass()) {
- mirror::ObjectArray<mirror::ArtField>* sfields = obj->AsClass()->GetSFields();
- if (sfields != nullptr) {
+ mirror::Class* klass = obj->AsClass();
+ ArtField* sfields = klass->GetSFields();
+ const size_t num_fields = klass->NumStaticFields();
+ if (num_fields != 0) {
indent_os << "STATICS:\n";
Indenter indent2_filter(indent_os.rdbuf(), kIndentChar, kIndentBy1Count);
std::ostream indent2_os(&indent2_filter);
- for (int32_t i = 0; i < sfields->GetLength(); i++) {
- mirror::ArtField* field = sfields->Get(i);
- PrintField(indent2_os, field, field->GetDeclaringClass());
+ for (size_t i = 0; i < num_fields; i++) {
+ PrintField(indent2_os, &sfields[i], sfields[i].GetDeclaringClass());
}
}
} else if (obj->IsArtMethod()) {
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 9584064..4dc0967 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -24,6 +24,7 @@
#include <string>
#include <vector>
+#include "art_field-inl.h"
#include "base/dumpable.h"
#include "base/scoped_flock.h"
#include "base/stringpiece.h"
@@ -34,7 +35,6 @@
#include "elf_file_impl.h"
#include "gc/space/image_space.h"
#include "image.h"
-#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/object-inl.h"
#include "mirror/reference.h"
@@ -415,13 +415,64 @@
return true;
}
+void PatchOat::PatchArtFields(const ImageHeader* image_header) {
+ const size_t art_field_size = image_header->GetArtFieldsSize();
+ const size_t art_field_offset = image_header->GetArtFieldsOffset();
+ for (size_t pos = 0; pos < art_field_size; pos += sizeof(ArtField)) {
+ auto* field = reinterpret_cast<ArtField*>(heap_->Begin() + art_field_offset + pos);
+ auto* dest_field = RelocatedCopyOf(field);
+ dest_field->SetDeclaringClass(RelocatedAddressOfPointer(field->GetDeclaringClass()));
+ }
+}
+
+void PatchOat::PatchDexFileArrays(mirror::ObjectArray<mirror::Object>* img_roots) {
+ auto* dex_caches = down_cast<mirror::ObjectArray<mirror::DexCache>*>(
+ img_roots->Get(ImageHeader::kDexCaches));
+ for (size_t i = 0, count = dex_caches->GetLength(); i < count; ++i) {
+ auto* dex_cache = dex_caches->GetWithoutChecks(i);
+ auto* fields = dex_cache->GetResolvedFields();
+ if (fields == nullptr) {
+ continue;
+ }
+ CHECK(!fields->IsObjectArray());
+ CHECK(fields->IsArrayInstance());
+ auto* component_type = fields->GetClass()->GetComponentType();
+ if (component_type->IsPrimitiveInt()) {
+ mirror::IntArray* arr = fields->AsIntArray();
+ mirror::IntArray* copy_arr = down_cast<mirror::IntArray*>(RelocatedCopyOf(arr));
+ for (size_t j = 0, count2 = arr->GetLength(); j < count2; ++j) {
+ auto f = arr->GetWithoutChecks(j);
+ if (f != 0) {
+ copy_arr->SetWithoutChecks<false>(j, f + delta_);
+ }
+ }
+ } else {
+ CHECK(component_type->IsPrimitiveLong());
+ mirror::LongArray* arr = fields->AsLongArray();
+ mirror::LongArray* copy_arr = down_cast<mirror::LongArray*>(RelocatedCopyOf(arr));
+ for (size_t j = 0, count2 = arr->GetLength(); j < count2; ++j) {
+ auto f = arr->GetWithoutChecks(j);
+ if (f != 0) {
+ copy_arr->SetWithoutChecks<false>(j, f + delta_);
+ }
+ }
+ }
+ }
+}
+
bool PatchOat::PatchImage() {
ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
CHECK_GT(image_->Size(), sizeof(ImageHeader));
// These are the roots from the original file.
- mirror::Object* img_roots = image_header->GetImageRoots();
+ auto* img_roots = image_header->GetImageRoots();
image_header->RelocateImage(delta_);
+ // Patch and update ArtFields.
+ PatchArtFields(image_header);
+
+ // Patch dex file int/long arrays which point to ArtFields.
+ PatchDexFileArrays(img_roots);
+
VisitObject(img_roots);
if (!image_header->IsValid()) {
LOG(ERROR) << "reloction renders image header invalid";
@@ -448,7 +499,7 @@
bool is_static_unused ATTRIBUTE_UNUSED) const {
mirror::Object* referent = obj->GetFieldObject<mirror::Object, kVerifyNone>(off);
DCHECK(patcher_->InHeap(referent)) << "Referent is not in the heap.";
- mirror::Object* moved_object = patcher_->RelocatedAddressOf(referent);
+ mirror::Object* moved_object = patcher_->RelocatedAddressOfPointer(referent);
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(off, moved_object);
}
@@ -457,30 +508,10 @@
MemberOffset off = mirror::Reference::ReferentOffset();
mirror::Object* referent = ref->GetReferent();
DCHECK(patcher_->InHeap(referent)) << "Referent is not in the heap.";
- mirror::Object* moved_object = patcher_->RelocatedAddressOf(referent);
+ mirror::Object* moved_object = patcher_->RelocatedAddressOfPointer(referent);
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(off, moved_object);
}
-mirror::Object* PatchOat::RelocatedCopyOf(mirror::Object* obj) {
- if (obj == nullptr) {
- return nullptr;
- }
- DCHECK_GT(reinterpret_cast<uintptr_t>(obj), reinterpret_cast<uintptr_t>(heap_->Begin()));
- DCHECK_LT(reinterpret_cast<uintptr_t>(obj), reinterpret_cast<uintptr_t>(heap_->End()));
- uintptr_t heap_off =
- reinterpret_cast<uintptr_t>(obj) - reinterpret_cast<uintptr_t>(heap_->Begin());
- DCHECK_LT(heap_off, image_->Size());
- return reinterpret_cast<mirror::Object*>(image_->Begin() + heap_off);
-}
-
-mirror::Object* PatchOat::RelocatedAddressOf(mirror::Object* obj) {
- if (obj == nullptr) {
- return nullptr;
- } else {
- return reinterpret_cast<mirror::Object*>(reinterpret_cast<uint8_t*>(obj) + delta_);
- }
-}
-
const OatHeader* PatchOat::GetOatHeader(const ElfFile* elf_file) {
if (elf_file->Is64Bit()) {
return GetOatHeader<ElfFileImpl64>(elf_file->GetImpl64());
@@ -507,7 +538,7 @@
if (kUseBakerOrBrooksReadBarrier) {
object->AssertReadBarrierPointer();
if (kUseBrooksReadBarrier) {
- mirror::Object* moved_to = RelocatedAddressOf(object);
+ mirror::Object* moved_to = RelocatedAddressOfPointer(object);
copy->SetReadBarrierPointer(moved_to);
DCHECK_EQ(copy->GetReadBarrierPointer(), moved_to);
}
@@ -516,6 +547,12 @@
object->VisitReferences<true, kVerifyNone>(visitor, visitor);
if (object->IsArtMethod<kVerifyNone>()) {
FixupMethod(down_cast<mirror::ArtMethod*>(object), down_cast<mirror::ArtMethod*>(copy));
+ } else if (object->IsClass<kVerifyNone>()) {
+ mirror::Class* klass = down_cast<mirror::Class*>(object);
+ down_cast<mirror::Class*>(copy)->SetSFieldsUnchecked(
+ RelocatedAddressOfPointer(klass->GetSFields()));
+ down_cast<mirror::Class*>(copy)->SetIFieldsUnchecked(
+ RelocatedAddressOfPointer(klass->GetIFields()));
}
}
@@ -588,35 +625,6 @@
return true;
}
-template <typename ElfFileImpl, typename ptr_t>
-bool PatchOat::CheckOatFile(ElfFileImpl* oat_file) {
- auto patches_sec = oat_file->FindSectionByName(".oat_patches");
- if (patches_sec->sh_type != SHT_OAT_PATCH) {
- return false;
- }
- ptr_t* patches = reinterpret_cast<ptr_t*>(oat_file->Begin() + patches_sec->sh_offset);
- ptr_t* patches_end = patches + (patches_sec->sh_size / sizeof(ptr_t));
- auto oat_data_sec = oat_file->FindSectionByName(".rodata");
- auto oat_text_sec = oat_file->FindSectionByName(".text");
- if (oat_data_sec == nullptr) {
- return false;
- }
- if (oat_text_sec == nullptr) {
- return false;
- }
- if (oat_text_sec->sh_offset <= oat_data_sec->sh_offset) {
- return false;
- }
-
- for (; patches < patches_end; patches++) {
- if (oat_text_sec->sh_size <= *patches) {
- return false;
- }
- }
-
- return true;
-}
-
template <typename ElfFileImpl>
bool PatchOat::PatchOatHeader(ElfFileImpl* oat_file) {
auto rodata_sec = oat_file->FindSectionByName(".rodata");
@@ -642,7 +650,7 @@
template <typename ElfFileImpl>
bool PatchOat::PatchElf(ElfFileImpl* oat_file) {
TimingLogger::ScopedTiming t("Fixup Elf Text Section", timings_);
- if (!PatchTextSection<ElfFileImpl>(oat_file)) {
+ if (!oat_file->ApplyOatPatchesTo(".text", delta_)) {
return false;
}
@@ -694,51 +702,6 @@
return true;
}
-template <typename ElfFileImpl>
-bool PatchOat::PatchTextSection(ElfFileImpl* oat_file) {
- auto patches_sec = oat_file->FindSectionByName(".oat_patches");
- if (patches_sec == nullptr) {
- LOG(ERROR) << ".oat_patches section not found. Aborting patch";
- return false;
- }
- if (patches_sec->sh_type != SHT_OAT_PATCH) {
- LOG(ERROR) << "Unexpected type of .oat_patches";
- return false;
- }
-
- switch (patches_sec->sh_entsize) {
- case sizeof(uint32_t):
- return PatchTextSection<ElfFileImpl, uint32_t>(oat_file);
- case sizeof(uint64_t):
- return PatchTextSection<ElfFileImpl, uint64_t>(oat_file);
- default:
- LOG(ERROR) << ".oat_patches Entsize of " << patches_sec->sh_entsize << "bits "
- << "is not valid";
- return false;
- }
-}
-
-template <typename ElfFileImpl, typename patch_loc_t>
-bool PatchOat::PatchTextSection(ElfFileImpl* oat_file) {
- bool oat_file_valid = CheckOatFile<ElfFileImpl, patch_loc_t>(oat_file);
- CHECK(oat_file_valid) << "Oat file invalid";
- auto patches_sec = oat_file->FindSectionByName(".oat_patches");
- patch_loc_t* patches = reinterpret_cast<patch_loc_t*>(oat_file->Begin() + patches_sec->sh_offset);
- patch_loc_t* patches_end = patches + (patches_sec->sh_size / sizeof(patch_loc_t));
- auto oat_text_sec = oat_file->FindSectionByName(".text");
- CHECK(oat_text_sec != nullptr);
- uint8_t* to_patch = oat_file->Begin() + oat_text_sec->sh_offset;
- uintptr_t to_patch_end = reinterpret_cast<uintptr_t>(to_patch) + oat_text_sec->sh_size;
-
- for (; patches < patches_end; patches++) {
- CHECK_LT(*patches, oat_text_sec->sh_size) << "Bad Patch";
- uint32_t* patch_loc = reinterpret_cast<uint32_t*>(to_patch + *patches);
- CHECK_LT(reinterpret_cast<uintptr_t>(patch_loc), to_patch_end);
- *patch_loc += delta_;
- }
- return true;
-}
-
static int orig_argc;
static char** orig_argv;
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index 578df3a..86f9118 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -109,20 +109,34 @@
template <typename ElfFileImpl>
bool PatchElf(ElfFileImpl* oat_file);
template <typename ElfFileImpl>
- bool PatchTextSection(ElfFileImpl* oat_file);
- // Templatized version to actually do the patching with the right sized offsets.
- template <typename ElfFileImpl, typename patch_loc_t> bool PatchTextSection(ElfFileImpl* oat_file);
- template <typename ElfFileImpl, typename patch_loc_t> bool CheckOatFile(ElfFileImpl* oat_filec);
- template <typename ElfFileImpl>
bool PatchOatHeader(ElfFileImpl* oat_file);
bool PatchImage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void PatchArtFields(const ImageHeader* image_header) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void PatchDexFileArrays(mirror::ObjectArray<mirror::Object>* img_roots)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool WriteElf(File* out);
bool WriteImage(File* out);
- mirror::Object* RelocatedCopyOf(mirror::Object*);
- mirror::Object* RelocatedAddressOf(mirror::Object* obj);
+ template <typename T>
+ T* RelocatedCopyOf(T* obj) {
+ if (obj == nullptr) {
+ return nullptr;
+ }
+ DCHECK_GT(reinterpret_cast<uintptr_t>(obj), reinterpret_cast<uintptr_t>(heap_->Begin()));
+ DCHECK_LT(reinterpret_cast<uintptr_t>(obj), reinterpret_cast<uintptr_t>(heap_->End()));
+ uintptr_t heap_off =
+ reinterpret_cast<uintptr_t>(obj) - reinterpret_cast<uintptr_t>(heap_->Begin());
+ DCHECK_LT(heap_off, image_->Size());
+ return reinterpret_cast<T*>(image_->Begin() + heap_off);
+ }
+
+ template <typename T>
+ T* RelocatedAddressOfPointer(T* obj) {
+ return obj == nullptr ? nullptr :
+ reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(obj) + delta_);
+ }
// Look up the oat header from any elf file.
static const OatHeader* GetOatHeader(const ElfFile* elf_file);
diff --git a/runtime/Android.mk b/runtime/Android.mk
index c0e7f47..d3488fc 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -19,6 +19,7 @@
include art/build/Android.common_build.mk
LIBART_COMMON_SRC_FILES := \
+ art_field.cc \
atomic.cc.arm \
barrier.cc \
base/allocator.cc \
@@ -96,9 +97,9 @@
jit/jit_instrumentation.cc \
jni_internal.cc \
jobject_comparator.cc \
+ linear_alloc.cc \
mem_map.cc \
memory_region.cc \
- mirror/art_field.cc \
mirror/art_method.cc \
mirror/array.cc \
mirror/class.cc \
diff --git a/runtime/arch/arm64/asm_support_arm64.S b/runtime/arch/arm64/asm_support_arm64.S
index b3e9242..39a8aa5 100644
--- a/runtime/arch/arm64/asm_support_arm64.S
+++ b/runtime/arch/arm64/asm_support_arm64.S
@@ -21,12 +21,6 @@
// Define special registers.
-// Register holding suspend check count down.
-// 32-bit is enough for the suspend register.
-#define wSUSPEND w19
-// xSUSPEND is 64-bit view of wSUSPEND.
-// Used to save/restore the register scratched by managed code.
-#define xSUSPEND x19
// Register holding Thread::Current().
#define xSELF x18
// x18 is not preserved by aapcs64, save it on xETR(External Thread reg) for restore and later use.
diff --git a/runtime/arch/arm64/asm_support_arm64.h b/runtime/arch/arm64/asm_support_arm64.h
index 989ecc6..998f567 100644
--- a/runtime/arch/arm64/asm_support_arm64.h
+++ b/runtime/arch/arm64/asm_support_arm64.h
@@ -20,7 +20,7 @@
#include "asm_support.h"
#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 176
-#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 96
+#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 112
#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 224
#endif // ART_RUNTIME_ARCH_ARM64_ASM_SUPPORT_ARM64_H_
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.h b/runtime/arch/arm64/instruction_set_features_arm64.h
index 3b3e2c9..e59ff58 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.h
+++ b/runtime/arch/arm64/instruction_set_features_arm64.h
@@ -66,13 +66,12 @@
return fix_cortex_a53_843419_;
}
- // TODO: Tune this on a per CPU basis. For now, we pessimistically assume
- // that all ARM64 CPUs prefer explicit memory barriers over acquire-release.
- //
- // NOTE: This should not be the case! However we want to exercise the
- // explicit memory barriers code paths in the Optimizing Compiler.
+ // NOTE: This flag can be tunned on a CPU basis. In general all ARMv8 CPUs
+ // should prefer the Acquire-Release semantics over the explicit DMBs when
+ // handling load/store-volatile. For a specific use case see the ARM64
+ // Optimizing backend.
bool PreferAcquireRelease() const {
- return false;
+ return true;
}
virtual ~Arm64InstructionSetFeatures() {}
diff --git a/runtime/arch/arm64/instruction_set_features_arm64_test.cc b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
index 753107b..599f24e 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64_test.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
@@ -31,7 +31,7 @@
EXPECT_STREQ("smp,a53", arm64_features->GetFeatureString().c_str());
EXPECT_EQ(arm64_features->AsBitmap(), 3U);
// See the comments in instruction_set_features_arm64.h.
- EXPECT_FALSE(arm64_features->AsArm64InstructionSetFeatures()->PreferAcquireRelease());
+ EXPECT_TRUE(arm64_features->AsArm64InstructionSetFeatures()->PreferAcquireRelease());
}
} // namespace art
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index b4de879..4079436 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -48,8 +48,8 @@
stp d12, d13, [sp, #40]
stp d14, d15, [sp, #56]
- // Reserved registers
- stp xSELF, xSUSPEND, [sp, #72]
+ // Thread register and x19 (callee-save)
+ stp xSELF, x19, [sp, #72]
.cfi_rel_offset x18, 72
.cfi_rel_offset x19, 80
@@ -99,38 +99,39 @@
THIS_LOAD_REQUIRES_READ_BARRIER
ldr wIP0, [xIP0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET ]
- sub sp, sp, #96
- .cfi_adjust_cfa_offset 96
+ sub sp, sp, #112
+ .cfi_adjust_cfa_offset 112
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 96)
+#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 112)
#error "REFS_ONLY_CALLEE_SAVE_FRAME(ARM64) size not as expected."
#endif
// Callee-saves
- stp x20, x21, [sp, #8]
- .cfi_rel_offset x20, 8
- .cfi_rel_offset x21, 16
+ stp x19, x20, [sp, #16]
+ .cfi_rel_offset x19, 16
+ .cfi_rel_offset x20, 24
- stp x22, x23, [sp, #24]
- .cfi_rel_offset x22, 24
- .cfi_rel_offset x23, 32
+ stp x21, x22, [sp, #32]
+ .cfi_rel_offset x21, 32
+ .cfi_rel_offset x22, 40
- stp x24, x25, [sp, #40]
- .cfi_rel_offset x24, 40
- .cfi_rel_offset x25, 48
+ stp x23, x24, [sp, #48]
+ .cfi_rel_offset x23, 48
+ .cfi_rel_offset x24, 56
- stp x26, x27, [sp, #56]
- .cfi_rel_offset x26, 56
- .cfi_rel_offset x27, 64
+ stp x25, x26, [sp, #64]
+ .cfi_rel_offset x25, 64
+ .cfi_rel_offset x26, 72
- stp x28, x29, [sp, #72]
- .cfi_rel_offset x28, 72
- .cfi_rel_offset x29, 80
+ stp x27, x28, [sp, #80]
+ .cfi_rel_offset x27, 80
+ .cfi_rel_offset x28, 88
- // LR
- str xLR, [sp, #88]
- .cfi_rel_offset x30, 88
+ // x29(callee-save) and LR
+ stp x29, xLR, [sp, #96]
+ .cfi_rel_offset x29, 96
+ .cfi_rel_offset x30, 104
// Save xSELF to xETR.
mov xETR, xSELF
@@ -148,32 +149,33 @@
mov xSELF, xETR
// Callee-saves
- ldp x20, x21, [sp, #8]
+ ldp x19, x20, [sp, #16]
+ .cfi_restore x19
.cfi_restore x20
+
+ ldp x21, x22, [sp, #32]
.cfi_restore x21
-
- ldp x22, x23, [sp, #24]
.cfi_restore x22
+
+ ldp x23, x24, [sp, #48]
.cfi_restore x23
-
- ldp x24, x25, [sp, #40]
.cfi_restore x24
+
+ ldp x25, x26, [sp, #64]
.cfi_restore x25
-
- ldp x26, x27, [sp, #56]
.cfi_restore x26
+
+ ldp x27, x28, [sp, #80]
.cfi_restore x27
-
- ldp x28, x29, [sp, #72]
.cfi_restore x28
- .cfi_restore x29
- // LR
- ldr xLR, [sp, #88]
+ // x29(callee-save) and LR
+ ldp x29, xLR, [sp, #96]
+ .cfi_restore x29
.cfi_restore x30
- add sp, sp, #96
- .cfi_adjust_cfa_offset -96
+ add sp, sp, #112
+ .cfi_adjust_cfa_offset -112
.endm
.macro POP_REFS_ONLY_CALLEE_SAVE_FRAME
@@ -183,8 +185,8 @@
ldr xETR, [sp, #16]
.cfi_restore x21
- add sp, sp, #96
- .cfi_adjust_cfa_offset -96
+ add sp, sp, #112
+ .cfi_adjust_cfa_offset -112
.endm
.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
@@ -202,30 +204,33 @@
#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(ARM64) size not as expected."
#endif
- // FP args
- stp d0, d1, [sp, #16]
- stp d2, d3, [sp, #32]
- stp d4, d5, [sp, #48]
- stp d6, d7, [sp, #64]
+ // FP args.
+ stp d0, d1, [sp, #8]
+ stp d2, d3, [sp, #24]
+ stp d4, d5, [sp, #40]
+ stp d6, d7, [sp, #56]
- // args and x20(callee-save)
- stp x1, x2, [sp, #80]
- .cfi_rel_offset x1, 80
- .cfi_rel_offset x2, 88
+ // Core args.
+ str x1, [sp, 72]
+ .cfi_rel_offset x1, 72
- stp x3, x4, [sp, #96]
- .cfi_rel_offset x3, 96
- .cfi_rel_offset x4, 104
+ stp x2, x3, [sp, #80]
+ .cfi_rel_offset x2, 80
+ .cfi_rel_offset x3, 88
- stp x5, x6, [sp, #112]
- .cfi_rel_offset x5, 112
- .cfi_rel_offset x6, 120
+ stp x4, x5, [sp, #96]
+ .cfi_rel_offset x4, 96
+ .cfi_rel_offset x5, 104
- stp x7, x20, [sp, #128]
- .cfi_rel_offset x7, 128
- .cfi_rel_offset x20, 136
+ stp x6, x7, [sp, #112]
+ .cfi_rel_offset x6, 112
+ .cfi_rel_offset x7, 120
// Callee-saves.
+ stp x19, x20, [sp, #128]
+ .cfi_rel_offset x19, 128
+ .cfi_rel_offset x20, 136
+
stp x21, x22, [sp, #144]
.cfi_rel_offset x21, 144
.cfi_rel_offset x22, 152
@@ -289,30 +294,33 @@
// Restore xSELF.
mov xSELF, xETR
- // FP args
- ldp d0, d1, [sp, #16]
- ldp d2, d3, [sp, #32]
- ldp d4, d5, [sp, #48]
- ldp d6, d7, [sp, #64]
+ // FP args.
+ ldp d0, d1, [sp, #8]
+ ldp d2, d3, [sp, #24]
+ ldp d4, d5, [sp, #40]
+ ldp d6, d7, [sp, #56]
- // args and x20(callee-save)
- ldp x1, x2, [sp, #80]
+ // Core args.
+ ldr x1, [sp, 72]
.cfi_restore x1
+
+ ldp x2, x3, [sp, #80]
.cfi_restore x2
-
- ldp x3, x4, [sp, #96]
.cfi_restore x3
+
+ ldp x4, x5, [sp, #96]
.cfi_restore x4
-
- ldp x5, x6, [sp, #112]
.cfi_restore x5
- .cfi_restore x6
- ldp x7, x20, [sp, #128]
+ ldp x6, x7, [sp, #112]
+ .cfi_restore x6
.cfi_restore x7
- .cfi_restore x20
// Callee-saves.
+ ldp x19, x20, [sp, #128]
+ .cfi_restore x19
+ .cfi_restore x20
+
ldp x21, x22, [sp, #144]
.cfi_restore x21
.cfi_restore x22
@@ -499,7 +507,7 @@
.macro INVOKE_STUB_CREATE_FRAME
-SAVE_SIZE=15*8 // x4, x5, x20, x21, x22, x23, x24, x25, x26, x27, x28, xSUSPEND, SP, LR, FP saved.
+SAVE_SIZE=15*8 // x4, x5, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, SP, LR, FP saved.
SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE
@@ -534,7 +542,7 @@
.cfi_rel_offset x20, 48
.cfi_rel_offset x21, 56
- stp x9, xSUSPEND, [x10, #32] // Save old stack pointer and xSUSPEND
+ stp x9, x19, [x10, #32] // Save old stack pointer and x19.
.cfi_rel_offset sp, 32
.cfi_rel_offset x19, 40
@@ -549,7 +557,6 @@
mov xFP, x10 // Use xFP now, as it's callee-saved.
.cfi_def_cfa_register x29
mov xSELF, x3 // Move thread pointer into SELF register.
- mov wSUSPEND, #SUSPEND_CHECK_INTERVAL // reset wSUSPEND to suspend check interval
// Copy arguments into stack frame.
// Use simple copy routine for now.
@@ -634,7 +641,7 @@
str x0, [x4]
.Lexit_art_quick_invoke_stub\@:
- ldp x2, xSUSPEND, [xFP, #32] // Restore stack pointer and xSUSPEND.
+ ldp x2, x19, [xFP, #32] // Restore stack pointer and x19.
.cfi_restore x19
mov sp, x2
.cfi_restore sp
@@ -662,7 +669,9 @@
* | FP'' | <- SP'
* +----------------------+
* +----------------------+
- * | x19 | <- Used as wSUSPEND, won't be restored by managed code.
+ * | x28 | <- TODO: Remove callee-saves.
+ * | : |
+ * | x19 |
* | SP' |
* | X5 |
* | X4 | Saved registers
@@ -680,7 +689,6 @@
* x1-x7 - integer parameters.
* d0-d7 - Floating point parameters.
* xSELF = self
- * wSUSPEND = suspend count
* SP = & of ArtMethod*
* x1 = "this" pointer.
*
@@ -1388,12 +1396,11 @@
GENERATE_ALL_ALLOC_ENTRYPOINTS
/*
- * Called by managed code when the value in wSUSPEND has been decremented to 0.
+ * Called by managed code when the thread has been asked to suspend.
*/
.extern artTestSuspendFromCode
ENTRY art_quick_test_suspend
ldrh w0, [xSELF, #THREAD_FLAGS_OFFSET] // get xSELF->state_and_flags.as_struct.flags
- mov wSUSPEND, #SUSPEND_CHECK_INTERVAL // reset wSUSPEND to SUSPEND_CHECK_INTERVAL
cbnz w0, .Lneed_suspend // check flags == 0
ret // return if flags == 0
.Lneed_suspend:
@@ -1477,6 +1484,7 @@
* | X22 | callee save
* | X21 | callee save
* | X20 | callee save
+ * | X19 | callee save
* | X7 | arg7
* | X6 | arg6
* | X5 | arg5
diff --git a/runtime/arch/arm64/quick_method_frame_info_arm64.h b/runtime/arch/arm64/quick_method_frame_info_arm64.h
index 0e1e32b..61b4dff 100644
--- a/runtime/arch/arm64/quick_method_frame_info_arm64.h
+++ b/runtime/arch/arm64/quick_method_frame_info_arm64.h
@@ -33,10 +33,10 @@
(1 << art::arm64::LR);
// Callee saved registers
static constexpr uint32_t kArm64CalleeSaveRefSpills =
- (1 << art::arm64::X20) | (1 << art::arm64::X21) | (1 << art::arm64::X22) |
- (1 << art::arm64::X23) | (1 << art::arm64::X24) | (1 << art::arm64::X25) |
- (1 << art::arm64::X26) | (1 << art::arm64::X27) | (1 << art::arm64::X28) |
- (1 << art::arm64::X29);
+ (1 << art::arm64::X19) | (1 << art::arm64::X20) | (1 << art::arm64::X21) |
+ (1 << art::arm64::X22) | (1 << art::arm64::X23) | (1 << art::arm64::X24) |
+ (1 << art::arm64::X25) | (1 << art::arm64::X26) | (1 << art::arm64::X27) |
+ (1 << art::arm64::X28) | (1 << art::arm64::X29);
// X0 is the method pointer. Not saved.
static constexpr uint32_t kArm64CalleeSaveArgSpills =
(1 << art::arm64::X1) | (1 << art::arm64::X2) | (1 << art::arm64::X3) |
@@ -44,9 +44,7 @@
(1 << art::arm64::X7);
static constexpr uint32_t kArm64CalleeSaveAllSpills =
// Thread register.
- (1 << art::arm64::X18) |
- // Suspend register.
- 1 << art::arm64::X19;
+ (1 << art::arm64::X18);
static constexpr uint32_t kArm64CalleeSaveFpAlwaysSpills = 0;
static constexpr uint32_t kArm64CalleeSaveFpRefSpills = 0;
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 0769687..9cccf7c 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -16,9 +16,10 @@
#include <cstdio>
+#include "art_field-inl.h"
+#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
-#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/string-inl.h"
@@ -1305,7 +1306,7 @@
}
-static void GetSetBooleanStatic(Handle<mirror::ArtField>* f, Thread* self,
+static void GetSetBooleanStatic(ArtField* f, Thread* self,
mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
@@ -1313,14 +1314,14 @@
uint8_t values[num_values] = { 0, 1, 2, 128, 0xFF };
for (size_t i = 0; i < num_values; ++i) {
- test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+ test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
static_cast<size_t>(values[i]),
0U,
StubTest::GetEntrypoint(self, kQuickSet8Static),
self,
referrer);
- size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+ size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
0U, 0U,
StubTest::GetEntrypoint(self, kQuickGetBooleanStatic),
self,
@@ -1335,21 +1336,21 @@
std::cout << "Skipping set_boolean_static as I don't know how to do that on " << kRuntimeISA << std::endl;
#endif
}
-static void GetSetByteStatic(Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSetByteStatic(ArtField* f, Thread* self, mirror::ArtMethod* referrer,
+ StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
int8_t values[] = { -128, -64, 0, 64, 127 };
for (size_t i = 0; i < arraysize(values); ++i) {
- test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+ test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
static_cast<size_t>(values[i]),
0U,
StubTest::GetEntrypoint(self, kQuickSet8Static),
self,
referrer);
- size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+ size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
0U, 0U,
StubTest::GetEntrypoint(self, kQuickGetByteStatic),
self,
@@ -1365,26 +1366,26 @@
}
-static void GetSetBooleanInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
- Thread* self, mirror::ArtMethod* referrer, StubTest* test)
+static void GetSetBooleanInstance(Handle<mirror::Object>* obj, ArtField* f, Thread* self,
+ mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
uint8_t values[] = { 0, true, 2, 128, 0xFF };
for (size_t i = 0; i < arraysize(values); ++i) {
- test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+ test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
reinterpret_cast<size_t>(obj->Get()),
static_cast<size_t>(values[i]),
StubTest::GetEntrypoint(self, kQuickSet8Instance),
self,
referrer);
- uint8_t res = f->Get()->GetBoolean(obj->Get());
+ uint8_t res = f->GetBoolean(obj->Get());
EXPECT_EQ(values[i], res) << "Iteration " << i;
- f->Get()->SetBoolean<false>(obj->Get(), res);
+ f->SetBoolean<false>(obj->Get(), res);
- size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+ size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
reinterpret_cast<size_t>(obj->Get()),
0U,
StubTest::GetEntrypoint(self, kQuickGetBooleanInstance),
@@ -1399,25 +1400,25 @@
std::cout << "Skipping set_boolean_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
#endif
}
-static void GetSetByteInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
+static void GetSetByteInstance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
int8_t values[] = { -128, -64, 0, 64, 127 };
for (size_t i = 0; i < arraysize(values); ++i) {
- test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+ test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
reinterpret_cast<size_t>(obj->Get()),
static_cast<size_t>(values[i]),
StubTest::GetEntrypoint(self, kQuickSet8Instance),
self,
referrer);
- int8_t res = f->Get()->GetByte(obj->Get());
+ int8_t res = f->GetByte(obj->Get());
EXPECT_EQ(res, values[i]) << "Iteration " << i;
- f->Get()->SetByte<false>(obj->Get(), ++res);
+ f->SetByte<false>(obj->Get(), ++res);
- size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+ size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
reinterpret_cast<size_t>(obj->Get()),
0U,
StubTest::GetEntrypoint(self, kQuickGetByteInstance),
@@ -1433,21 +1434,21 @@
#endif
}
-static void GetSetCharStatic(Handle<mirror::ArtField>* f, Thread* self, mirror::ArtMethod* referrer,
+static void GetSetCharStatic(ArtField* f, Thread* self, mirror::ArtMethod* referrer,
StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
for (size_t i = 0; i < arraysize(values); ++i) {
- test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+ test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
static_cast<size_t>(values[i]),
0U,
StubTest::GetEntrypoint(self, kQuickSet16Static),
self,
referrer);
- size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+ size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
0U, 0U,
StubTest::GetEntrypoint(self, kQuickGetCharStatic),
self,
@@ -1462,21 +1463,21 @@
std::cout << "Skipping set_char_static as I don't know how to do that on " << kRuntimeISA << std::endl;
#endif
}
-static void GetSetShortStatic(Handle<mirror::ArtField>* f, Thread* self,
+static void GetSetShortStatic(ArtField* f, Thread* self,
mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
for (size_t i = 0; i < arraysize(values); ++i) {
- test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+ test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
static_cast<size_t>(values[i]),
0U,
StubTest::GetEntrypoint(self, kQuickSet16Static),
self,
referrer);
- size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+ size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
0U, 0U,
StubTest::GetEntrypoint(self, kQuickGetShortStatic),
self,
@@ -1492,25 +1493,25 @@
#endif
}
-static void GetSetCharInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
- Thread* self, mirror::ArtMethod* referrer, StubTest* test)
+static void GetSetCharInstance(Handle<mirror::Object>* obj, ArtField* f,
+ Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
for (size_t i = 0; i < arraysize(values); ++i) {
- test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+ test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
reinterpret_cast<size_t>(obj->Get()),
static_cast<size_t>(values[i]),
StubTest::GetEntrypoint(self, kQuickSet16Instance),
self,
referrer);
- uint16_t res = f->Get()->GetChar(obj->Get());
+ uint16_t res = f->GetChar(obj->Get());
EXPECT_EQ(res, values[i]) << "Iteration " << i;
- f->Get()->SetChar<false>(obj->Get(), ++res);
+ f->SetChar<false>(obj->Get(), ++res);
- size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+ size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
reinterpret_cast<size_t>(obj->Get()),
0U,
StubTest::GetEntrypoint(self, kQuickGetCharInstance),
@@ -1525,25 +1526,25 @@
std::cout << "Skipping set_char_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
#endif
}
-static void GetSetShortInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
+static void GetSetShortInstance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
for (size_t i = 0; i < arraysize(values); ++i) {
- test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+ test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
reinterpret_cast<size_t>(obj->Get()),
static_cast<size_t>(values[i]),
StubTest::GetEntrypoint(self, kQuickSet16Instance),
self,
referrer);
- int16_t res = f->Get()->GetShort(obj->Get());
+ int16_t res = f->GetShort(obj->Get());
EXPECT_EQ(res, values[i]) << "Iteration " << i;
- f->Get()->SetShort<false>(obj->Get(), ++res);
+ f->SetShort<false>(obj->Get(), ++res);
- size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+ size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
reinterpret_cast<size_t>(obj->Get()),
0U,
StubTest::GetEntrypoint(self, kQuickGetShortInstance),
@@ -1559,21 +1560,21 @@
#endif
}
-static void GetSet32Static(Handle<mirror::ArtField>* f, Thread* self, mirror::ArtMethod* referrer,
+static void GetSet32Static(ArtField* f, Thread* self, mirror::ArtMethod* referrer,
StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
for (size_t i = 0; i < arraysize(values); ++i) {
- test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+ test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
static_cast<size_t>(values[i]),
0U,
StubTest::GetEntrypoint(self, kQuickSet32Static),
self,
referrer);
- size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+ size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
0U, 0U,
StubTest::GetEntrypoint(self, kQuickGet32Static),
self,
@@ -1590,27 +1591,27 @@
}
-static void GetSet32Instance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
+static void GetSet32Instance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
for (size_t i = 0; i < arraysize(values); ++i) {
- test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+ test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
reinterpret_cast<size_t>(obj->Get()),
static_cast<size_t>(values[i]),
StubTest::GetEntrypoint(self, kQuickSet32Instance),
self,
referrer);
- int32_t res = f->Get()->GetInt(obj->Get());
+ int32_t res = f->GetInt(obj->Get());
EXPECT_EQ(res, static_cast<int32_t>(values[i])) << "Iteration " << i;
res++;
- f->Get()->SetInt<false>(obj->Get(), res);
+ f->SetInt<false>(obj->Get(), res);
- size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+ size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
reinterpret_cast<size_t>(obj->Get()),
0U,
StubTest::GetEntrypoint(self, kQuickGet32Instance),
@@ -1649,17 +1650,17 @@
}
#endif
-static void GetSetObjStatic(Handle<mirror::ArtField>* f, Thread* self, mirror::ArtMethod* referrer,
+static void GetSetObjStatic(ArtField* f, Thread* self, mirror::ArtMethod* referrer,
StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
- set_and_check_static((*f)->GetDexFieldIndex(), nullptr, self, referrer, test);
+ set_and_check_static(f->GetDexFieldIndex(), nullptr, self, referrer, test);
// Allocate a string object for simplicity.
mirror::String* str = mirror::String::AllocFromModifiedUtf8(self, "Test");
- set_and_check_static((*f)->GetDexFieldIndex(), str, self, referrer, test);
+ set_and_check_static(f->GetDexFieldIndex(), str, self, referrer, test);
- set_and_check_static((*f)->GetDexFieldIndex(), nullptr, self, referrer, test);
+ set_and_check_static(f->GetDexFieldIndex(), nullptr, self, referrer, test);
#else
UNUSED(f, self, referrer, test);
LOG(INFO) << "Skipping setObjstatic as I don't know how to do that on " << kRuntimeISA;
@@ -1670,18 +1671,18 @@
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
-static void set_and_check_instance(Handle<mirror::ArtField>* f, mirror::Object* trg,
+static void set_and_check_instance(ArtField* f, mirror::Object* trg,
mirror::Object* val, Thread* self, mirror::ArtMethod* referrer,
StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+ test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
reinterpret_cast<size_t>(trg),
reinterpret_cast<size_t>(val),
StubTest::GetEntrypoint(self, kQuickSetObjInstance),
self,
referrer);
- size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+ size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
reinterpret_cast<size_t>(trg),
0U,
StubTest::GetEntrypoint(self, kQuickGetObjInstance),
@@ -1690,11 +1691,11 @@
EXPECT_EQ(res, reinterpret_cast<size_t>(val)) << "Value " << val;
- EXPECT_EQ(val, f->Get()->GetObj(trg));
+ EXPECT_EQ(val, f->GetObj(trg));
}
#endif
-static void GetSetObjInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
+static void GetSetObjInstance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
@@ -1716,20 +1717,20 @@
// TODO: Complete these tests for 32b architectures.
-static void GetSet64Static(Handle<mirror::ArtField>* f, Thread* self, mirror::ArtMethod* referrer,
+static void GetSet64Static(ArtField* f, Thread* self, mirror::ArtMethod* referrer,
StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
for (size_t i = 0; i < arraysize(values); ++i) {
- test->Invoke3UWithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+ test->Invoke3UWithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
values[i],
StubTest::GetEntrypoint(self, kQuickSet64Static),
self,
referrer);
- size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+ size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
0U, 0U,
StubTest::GetEntrypoint(self, kQuickGet64Static),
self,
@@ -1746,27 +1747,27 @@
}
-static void GetSet64Instance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
+static void GetSet64Instance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
for (size_t i = 0; i < arraysize(values); ++i) {
- test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+ test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
reinterpret_cast<size_t>(obj->Get()),
static_cast<size_t>(values[i]),
StubTest::GetEntrypoint(self, kQuickSet64Instance),
self,
referrer);
- int64_t res = f->Get()->GetLong(obj->Get());
+ int64_t res = f->GetLong(obj->Get());
EXPECT_EQ(res, static_cast<int64_t>(values[i])) << "Iteration " << i;
res++;
- f->Get()->SetLong<false>(obj->Get(), res);
+ f->SetLong<false>(obj->Get(), res);
- size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+ size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
reinterpret_cast<size_t>(obj->Get()),
0U,
StubTest::GetEntrypoint(self, kQuickGet64Instance),
@@ -1792,7 +1793,7 @@
CHECK(o != NULL);
ScopedObjectAccess soa(self);
- StackHandleScope<5> hs(self);
+ StackHandleScope<4> hs(self);
Handle<mirror::Object> obj(hs.NewHandle(soa.Decode<mirror::Object*>(o)));
Handle<mirror::Class> c(hs.NewHandle(obj->GetClass()));
// Need a method as a referrer
@@ -1801,112 +1802,80 @@
// Play with it...
// Static fields.
- {
- Handle<mirror::ObjectArray<mirror::ArtField>> fields(hs.NewHandle(c.Get()->GetSFields()));
- int32_t num_fields = fields->GetLength();
- for (int32_t i = 0; i < num_fields; ++i) {
- StackHandleScope<1> hs2(self);
- Handle<mirror::ArtField> f(hs2.NewHandle(fields->Get(i)));
-
- Primitive::Type type = f->GetTypeAsPrimitiveType();
- switch (type) {
- case Primitive::Type::kPrimBoolean:
- if (test_type == type) {
- GetSetBooleanStatic(&f, self, m.Get(), test);
- }
- break;
- case Primitive::Type::kPrimByte:
- if (test_type == type) {
- GetSetByteStatic(&f, self, m.Get(), test);
- }
- break;
- case Primitive::Type::kPrimChar:
- if (test_type == type) {
- GetSetCharStatic(&f, self, m.Get(), test);
- }
- break;
- case Primitive::Type::kPrimShort:
- if (test_type == type) {
- GetSetShortStatic(&f, self, m.Get(), test);
- }
- break;
- case Primitive::Type::kPrimInt:
- if (test_type == type) {
- GetSet32Static(&f, self, m.Get(), test);
- }
- break;
-
- case Primitive::Type::kPrimLong:
- if (test_type == type) {
- GetSet64Static(&f, self, m.Get(), test);
- }
- break;
-
- case Primitive::Type::kPrimNot:
- // Don't try array.
- if (test_type == type && f->GetTypeDescriptor()[0] != '[') {
- GetSetObjStatic(&f, self, m.Get(), test);
- }
- break;
-
- default:
- break; // Skip.
- }
+ ArtField* fields = c->GetSFields();
+ size_t num_fields = c->NumStaticFields();
+ for (size_t i = 0; i < num_fields; ++i) {
+ ArtField* f = &fields[i];
+ Primitive::Type type = f->GetTypeAsPrimitiveType();
+ if (test_type != type) {
+ continue;
+ }
+ switch (type) {
+ case Primitive::Type::kPrimBoolean:
+ GetSetBooleanStatic(f, self, m.Get(), test);
+ break;
+ case Primitive::Type::kPrimByte:
+ GetSetByteStatic(f, self, m.Get(), test);
+ break;
+ case Primitive::Type::kPrimChar:
+ GetSetCharStatic(f, self, m.Get(), test);
+ break;
+ case Primitive::Type::kPrimShort:
+ GetSetShortStatic(f, self, m.Get(), test);
+ break;
+ case Primitive::Type::kPrimInt:
+ GetSet32Static(f, self, m.Get(), test);
+ break;
+ case Primitive::Type::kPrimLong:
+ GetSet64Static(f, self, m.Get(), test);
+ break;
+ case Primitive::Type::kPrimNot:
+ // Don't try array.
+ if (f->GetTypeDescriptor()[0] != '[') {
+ GetSetObjStatic(f, self, m.Get(), test);
+ }
+ break;
+ default:
+ break; // Skip.
}
}
// Instance fields.
- {
- Handle<mirror::ObjectArray<mirror::ArtField>> fields(hs.NewHandle(c.Get()->GetIFields()));
- int32_t num_fields = fields->GetLength();
- for (int32_t i = 0; i < num_fields; ++i) {
- StackHandleScope<1> hs2(self);
- Handle<mirror::ArtField> f(hs2.NewHandle(fields->Get(i)));
-
- Primitive::Type type = f->GetTypeAsPrimitiveType();
- switch (type) {
- case Primitive::Type::kPrimBoolean:
- if (test_type == type) {
- GetSetBooleanInstance(&obj, &f, self, m.Get(), test);
- }
- break;
- case Primitive::Type::kPrimByte:
- if (test_type == type) {
- GetSetByteInstance(&obj, &f, self, m.Get(), test);
- }
- break;
- case Primitive::Type::kPrimChar:
- if (test_type == type) {
- GetSetCharInstance(&obj, &f, self, m.Get(), test);
- }
- break;
- case Primitive::Type::kPrimShort:
- if (test_type == type) {
- GetSetShortInstance(&obj, &f, self, m.Get(), test);
- }
- break;
- case Primitive::Type::kPrimInt:
- if (test_type == type) {
- GetSet32Instance(&obj, &f, self, m.Get(), test);
- }
- break;
-
- case Primitive::Type::kPrimLong:
- if (test_type == type) {
- GetSet64Instance(&obj, &f, self, m.Get(), test);
- }
- break;
-
- case Primitive::Type::kPrimNot:
- // Don't try array.
- if (test_type == type && f->GetTypeDescriptor()[0] != '[') {
- GetSetObjInstance(&obj, &f, self, m.Get(), test);
- }
- break;
-
- default:
- break; // Skip.
- }
+ fields = c->GetIFields();
+ num_fields = c->NumInstanceFields();
+ for (size_t i = 0; i < num_fields; ++i) {
+ ArtField* f = &fields[i];
+ Primitive::Type type = f->GetTypeAsPrimitiveType();
+ if (test_type != type) {
+ continue;
+ }
+ switch (type) {
+ case Primitive::Type::kPrimBoolean:
+ GetSetBooleanInstance(&obj, f, self, m.Get(), test);
+ break;
+ case Primitive::Type::kPrimByte:
+ GetSetByteInstance(&obj, f, self, m.Get(), test);
+ break;
+ case Primitive::Type::kPrimChar:
+ GetSetCharInstance(&obj, f, self, m.Get(), test);
+ break;
+ case Primitive::Type::kPrimShort:
+ GetSetShortInstance(&obj, f, self, m.Get(), test);
+ break;
+ case Primitive::Type::kPrimInt:
+ GetSet32Instance(&obj, f, self, m.Get(), test);
+ break;
+ case Primitive::Type::kPrimLong:
+ GetSet64Instance(&obj, f, self, m.Get(), test);
+ break;
+ case Primitive::Type::kPrimNot:
+ // Don't try array.
+ if (f->GetTypeDescriptor()[0] != '[') {
+ GetSetObjInstance(&obj, f, self, m.Get(), test);
+ }
+ break;
+ default:
+ break; // Skip.
}
}
diff --git a/runtime/mirror/art_field-inl.h b/runtime/art_field-inl.h
similarity index 75%
rename from runtime/mirror/art_field-inl.h
rename to runtime/art_field-inl.h
index 986852f..a2625e2 100644
--- a/runtime/mirror/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -14,57 +14,52 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_MIRROR_ART_FIELD_INL_H_
-#define ART_RUNTIME_MIRROR_ART_FIELD_INL_H_
+#ifndef ART_RUNTIME_ART_FIELD_INL_H_
+#define ART_RUNTIME_ART_FIELD_INL_H_
#include "art_field.h"
#include "base/logging.h"
#include "class_linker.h"
-#include "dex_cache.h"
+#include "gc_root-inl.h"
#include "gc/accounting/card_table-inl.h"
#include "jvalue.h"
-#include "object-inl.h"
+#include "mirror/dex_cache.h"
+#include "mirror/object-inl.h"
#include "primitive.h"
#include "thread-inl.h"
#include "scoped_thread_state_change.h"
#include "well_known_classes.h"
namespace art {
-namespace mirror {
-inline uint32_t ArtField::ClassSize() {
- uint32_t vtable_entries = Object::kVTableLength;
- return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0);
-}
-
-inline Class* ArtField::GetDeclaringClass() {
- Class* result = GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(ArtField, declaring_class_));
- DCHECK(result != NULL);
+inline mirror::Class* ArtField::GetDeclaringClass() {
+ mirror::Class* result = declaring_class_.Read();
+ DCHECK(result != nullptr);
DCHECK(result->IsLoaded() || result->IsErroneous());
return result;
}
-inline void ArtField::SetDeclaringClass(Class *new_declaring_class) {
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ArtField, declaring_class_), new_declaring_class);
+inline void ArtField::SetDeclaringClass(mirror::Class* new_declaring_class) {
+ declaring_class_ = GcRoot<mirror::Class>(new_declaring_class);
}
inline uint32_t ArtField::GetAccessFlags() {
DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous());
- return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtField, access_flags_));
+ return access_flags_;
}
inline MemberOffset ArtField::GetOffset() {
DCHECK(GetDeclaringClass()->IsResolved() || GetDeclaringClass()->IsErroneous());
- return MemberOffset(GetField32(OFFSET_OF_OBJECT_MEMBER(ArtField, offset_)));
+ return MemberOffset(offset_);
}
inline MemberOffset ArtField::GetOffsetDuringLinking() {
DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous());
- return MemberOffset(GetField32(OFFSET_OF_OBJECT_MEMBER(ArtField, offset_)));
+ return MemberOffset(offset_);
}
-inline uint32_t ArtField::Get32(Object* object) {
+inline uint32_t ArtField::Get32(mirror::Object* object) {
DCHECK(object != nullptr) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
if (UNLIKELY(IsVolatile())) {
@@ -74,7 +69,7 @@
}
template<bool kTransactionActive>
-inline void ArtField::Set32(Object* object, uint32_t new_value) {
+inline void ArtField::Set32(mirror::Object* object, uint32_t new_value) {
DCHECK(object != nullptr) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
if (UNLIKELY(IsVolatile())) {
@@ -84,7 +79,7 @@
}
}
-inline uint64_t ArtField::Get64(Object* object) {
+inline uint64_t ArtField::Get64(mirror::Object* object) {
DCHECK(object != NULL) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
if (UNLIKELY(IsVolatile())) {
@@ -94,7 +89,7 @@
}
template<bool kTransactionActive>
-inline void ArtField::Set64(Object* object, uint64_t new_value) {
+inline void ArtField::Set64(mirror::Object* object, uint64_t new_value) {
DCHECK(object != NULL) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
if (UNLIKELY(IsVolatile())) {
@@ -104,17 +99,17 @@
}
}
-inline Object* ArtField::GetObj(Object* object) {
+inline mirror::Object* ArtField::GetObj(mirror::Object* object) {
DCHECK(object != NULL) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
if (UNLIKELY(IsVolatile())) {
- return object->GetFieldObjectVolatile<Object>(GetOffset());
+ return object->GetFieldObjectVolatile<mirror::Object>(GetOffset());
}
- return object->GetFieldObject<Object>(GetOffset());
+ return object->GetFieldObject<mirror::Object>(GetOffset());
}
template<bool kTransactionActive>
-inline void ArtField::SetObj(Object* object, Object* new_value) {
+inline void ArtField::SetObj(mirror::Object* object, mirror::Object* new_value) {
DCHECK(object != NULL) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
if (UNLIKELY(IsVolatile())) {
@@ -143,46 +138,46 @@
object->SetField ## type<kTransactionActive>(GetOffset(), value); \
}
-inline uint8_t ArtField::GetBoolean(Object* object) {
+inline uint8_t ArtField::GetBoolean(mirror::Object* object) {
FIELD_GET(object, Boolean);
}
template<bool kTransactionActive>
-inline void ArtField::SetBoolean(Object* object, uint8_t z) {
+inline void ArtField::SetBoolean(mirror::Object* object, uint8_t z) {
FIELD_SET(object, Boolean, z);
}
-inline int8_t ArtField::GetByte(Object* object) {
+inline int8_t ArtField::GetByte(mirror::Object* object) {
FIELD_GET(object, Byte);
}
template<bool kTransactionActive>
-inline void ArtField::SetByte(Object* object, int8_t b) {
+inline void ArtField::SetByte(mirror::Object* object, int8_t b) {
FIELD_SET(object, Byte, b);
}
-inline uint16_t ArtField::GetChar(Object* object) {
+inline uint16_t ArtField::GetChar(mirror::Object* object) {
FIELD_GET(object, Char);
}
template<bool kTransactionActive>
-inline void ArtField::SetChar(Object* object, uint16_t c) {
+inline void ArtField::SetChar(mirror::Object* object, uint16_t c) {
FIELD_SET(object, Char, c);
}
-inline int16_t ArtField::GetShort(Object* object) {
+inline int16_t ArtField::GetShort(mirror::Object* object) {
FIELD_GET(object, Short);
}
template<bool kTransactionActive>
-inline void ArtField::SetShort(Object* object, int16_t s) {
+inline void ArtField::SetShort(mirror::Object* object, int16_t s) {
FIELD_SET(object, Short, s);
}
#undef FIELD_GET
#undef FIELD_SET
-inline int32_t ArtField::GetInt(Object* object) {
+inline int32_t ArtField::GetInt(mirror::Object* object) {
if (kIsDebugBuild) {
Primitive::Type type = GetTypeAsPrimitiveType();
CHECK(type == Primitive::kPrimInt || type == Primitive::kPrimFloat) << PrettyField(this);
@@ -191,7 +186,7 @@
}
template<bool kTransactionActive>
-inline void ArtField::SetInt(Object* object, int32_t i) {
+inline void ArtField::SetInt(mirror::Object* object, int32_t i) {
if (kIsDebugBuild) {
Primitive::Type type = GetTypeAsPrimitiveType();
CHECK(type == Primitive::kPrimInt || type == Primitive::kPrimFloat) << PrettyField(this);
@@ -199,7 +194,7 @@
Set32<kTransactionActive>(object, i);
}
-inline int64_t ArtField::GetLong(Object* object) {
+inline int64_t ArtField::GetLong(mirror::Object* object) {
if (kIsDebugBuild) {
Primitive::Type type = GetTypeAsPrimitiveType();
CHECK(type == Primitive::kPrimLong || type == Primitive::kPrimDouble) << PrettyField(this);
@@ -208,7 +203,7 @@
}
template<bool kTransactionActive>
-inline void ArtField::SetLong(Object* object, int64_t j) {
+inline void ArtField::SetLong(mirror::Object* object, int64_t j) {
if (kIsDebugBuild) {
Primitive::Type type = GetTypeAsPrimitiveType();
CHECK(type == Primitive::kPrimLong || type == Primitive::kPrimDouble) << PrettyField(this);
@@ -216,7 +211,7 @@
Set64<kTransactionActive>(object, j);
}
-inline float ArtField::GetFloat(Object* object) {
+inline float ArtField::GetFloat(mirror::Object* object) {
DCHECK_EQ(Primitive::kPrimFloat, GetTypeAsPrimitiveType()) << PrettyField(this);
JValue bits;
bits.SetI(Get32(object));
@@ -224,14 +219,14 @@
}
template<bool kTransactionActive>
-inline void ArtField::SetFloat(Object* object, float f) {
+inline void ArtField::SetFloat(mirror::Object* object, float f) {
DCHECK_EQ(Primitive::kPrimFloat, GetTypeAsPrimitiveType()) << PrettyField(this);
JValue bits;
bits.SetF(f);
Set32<kTransactionActive>(object, bits.GetI());
}
-inline double ArtField::GetDouble(Object* object) {
+inline double ArtField::GetDouble(mirror::Object* object) {
DCHECK_EQ(Primitive::kPrimDouble, GetTypeAsPrimitiveType()) << PrettyField(this);
JValue bits;
bits.SetJ(Get64(object));
@@ -239,20 +234,20 @@
}
template<bool kTransactionActive>
-inline void ArtField::SetDouble(Object* object, double d) {
+inline void ArtField::SetDouble(mirror::Object* object, double d) {
DCHECK_EQ(Primitive::kPrimDouble, GetTypeAsPrimitiveType()) << PrettyField(this);
JValue bits;
bits.SetD(d);
Set64<kTransactionActive>(object, bits.GetJ());
}
-inline Object* ArtField::GetObject(Object* object) {
+inline mirror::Object* ArtField::GetObject(mirror::Object* object) {
DCHECK_EQ(Primitive::kPrimNot, GetTypeAsPrimitiveType()) << PrettyField(this);
return GetObj(object);
}
template<bool kTransactionActive>
-inline void ArtField::SetObject(Object* object, Object* l) {
+inline void ArtField::SetObject(mirror::Object* object, mirror::Object* l) {
DCHECK_EQ(Primitive::kPrimNot, GetTypeAsPrimitiveType()) << PrettyField(this);
SetObj<kTransactionActive>(object, l);
}
@@ -291,19 +286,18 @@
}
template <bool kResolve>
-inline Class* ArtField::GetType() {
+inline mirror::Class* ArtField::GetType() {
const uint32_t field_index = GetDexFieldIndex();
auto* declaring_class = GetDeclaringClass();
if (UNLIKELY(declaring_class->IsProxyClass())) {
- return Runtime::Current()->GetClassLinker()->FindSystemClass(Thread::Current(),
- GetTypeDescriptor());
+ return ProxyFindSystemClass(GetTypeDescriptor());
}
auto* dex_cache = declaring_class->GetDexCache();
const DexFile* const dex_file = dex_cache->GetDexFile();
const DexFile::FieldId& field_id = dex_file->GetFieldId(field_index);
mirror::Class* type = dex_cache->GetResolvedType(field_id.type_idx_);
if (kResolve && UNLIKELY(type == nullptr)) {
- type = Runtime::Current()->GetClassLinker()->ResolveType(field_id.type_idx_, this);
+ type = ResolveGetType(field_id.type_idx_);
CHECK(type != nullptr || Thread::Current()->IsExceptionPending());
}
return type;
@@ -321,7 +315,7 @@
return GetDexCache()->GetDexFile();
}
-inline String* ArtField::GetStringName(Thread* self, bool resolve) {
+inline mirror::String* ArtField::GetStringName(Thread* self, bool resolve) {
auto dex_field_index = GetDexFieldIndex();
CHECK_NE(dex_field_index, DexFile::kDexNoIndex);
auto* dex_cache = GetDexCache();
@@ -329,14 +323,11 @@
const auto& field_id = dex_file->GetFieldId(dex_field_index);
auto* name = dex_cache->GetResolvedString(field_id.name_idx_);
if (resolve && name == nullptr) {
- StackHandleScope<1> hs(self);
- name = Runtime::Current()->GetClassLinker()->ResolveString(
- *dex_file, field_id.name_idx_, hs.NewHandle(dex_cache));
+ name = ResolveGetStringName(self, *dex_file, field_id.name_idx_, dex_cache);
}
return name;
}
-} // namespace mirror
} // namespace art
-#endif // ART_RUNTIME_MIRROR_ART_FIELD_INL_H_
+#endif // ART_RUNTIME_ART_FIELD_INL_H_
diff --git a/runtime/art_field.cc b/runtime/art_field.cc
new file mode 100644
index 0000000..2aed440
--- /dev/null
+++ b/runtime/art_field.cc
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "art_field.h"
+
+#include "art_field-inl.h"
+#include "class_linker-inl.h"
+#include "gc/accounting/card_table-inl.h"
+#include "handle_scope.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "runtime.h"
+#include "scoped_thread_state_change.h"
+#include "utils.h"
+#include "well_known_classes.h"
+
+namespace art {
+
+ArtField::ArtField() : access_flags_(0), field_dex_idx_(0), offset_(0) {
+ declaring_class_ = GcRoot<mirror::Class>(nullptr);
+}
+
+void ArtField::SetOffset(MemberOffset num_bytes) {
+ DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous());
+ if (kIsDebugBuild && Runtime::Current()->IsAotCompiler() &&
+ Runtime::Current()->IsCompilingBootImage()) {
+ Primitive::Type type = GetTypeAsPrimitiveType();
+ if (type == Primitive::kPrimDouble || type == Primitive::kPrimLong) {
+ DCHECK_ALIGNED(num_bytes.Uint32Value(), 8);
+ }
+ }
+ // Not called within a transaction.
+ offset_ = num_bytes.Uint32Value();
+}
+
+void ArtField::VisitRoots(RootVisitor* visitor) {
+ declaring_class_.VisitRoot(visitor, RootInfo(kRootStickyClass));
+}
+
+ArtField* ArtField::FindInstanceFieldWithOffset(mirror::Class* klass, uint32_t field_offset) {
+ DCHECK(klass != nullptr);
+ auto* instance_fields = klass->GetIFields();
+ for (size_t i = 0, count = klass->NumInstanceFields(); i < count; ++i) {
+ if (instance_fields[i].GetOffset().Uint32Value() == field_offset) {
+ return &instance_fields[i];
+ }
+ }
+ // We did not find field in the class: look into superclass.
+ return (klass->GetSuperClass() != nullptr) ?
+ FindInstanceFieldWithOffset(klass->GetSuperClass(), field_offset) : nullptr;
+}
+
+mirror::Class* ArtField::ProxyFindSystemClass(const char* descriptor) {
+ DCHECK(GetDeclaringClass()->IsProxyClass());
+ return Runtime::Current()->GetClassLinker()->FindSystemClass(Thread::Current(), descriptor);
+}
+
+mirror::Class* ArtField::ResolveGetType(uint32_t type_idx) {
+ return Runtime::Current()->GetClassLinker()->ResolveType(type_idx, this);
+}
+
+mirror::String* ArtField::ResolveGetStringName(Thread* self, const DexFile& dex_file,
+ uint32_t string_idx, mirror::DexCache* dex_cache) {
+ StackHandleScope<1> hs(self);
+ return Runtime::Current()->GetClassLinker()->ResolveString(
+ dex_file, string_idx, hs.NewHandle(dex_cache));
+}
+
+} // namespace art
diff --git a/runtime/art_field.h b/runtime/art_field.h
new file mode 100644
index 0000000..16c46f0
--- /dev/null
+++ b/runtime/art_field.h
@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ART_FIELD_H_
+#define ART_RUNTIME_ART_FIELD_H_
+
+#include <jni.h>
+
+#include "gc_root.h"
+#include "modifiers.h"
+#include "object_callbacks.h"
+#include "offsets.h"
+#include "primitive.h"
+#include "read_barrier_option.h"
+
+namespace art {
+
+class DexFile;
+class ScopedObjectAccessAlreadyRunnable;
+
+namespace mirror {
+class Class;
+class DexCache;
+class Object;
+class String;
+} // namespace mirror
+
+class ArtField {
+ public:
+ ArtField();
+
+ mirror::Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void SetDeclaringClass(mirror::Class *new_declaring_class)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void SetAccessFlags(uint32_t new_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Not called within a transaction.
+ access_flags_ = new_access_flags;
+ }
+
+ bool IsPublic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return (GetAccessFlags() & kAccPublic) != 0;
+ }
+
+ bool IsStatic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return (GetAccessFlags() & kAccStatic) != 0;
+ }
+
+ bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return (GetAccessFlags() & kAccFinal) != 0;
+ }
+
+ uint32_t GetDexFieldIndex() {
+ return field_dex_idx_;
+ }
+
+ void SetDexFieldIndex(uint32_t new_idx) {
+ // Not called within a transaction.
+ field_dex_idx_ = new_idx;
+ }
+
+ // Offset to field within an Object.
+ MemberOffset GetOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static MemberOffset OffsetOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(ArtField, offset_));
+ }
+
+ MemberOffset GetOffsetDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void SetOffset(MemberOffset num_bytes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // field access, null object for static fields
+ uint8_t GetBoolean(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template<bool kTransactionActive>
+ void SetBoolean(mirror::Object* object, uint8_t z) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ int8_t GetByte(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template<bool kTransactionActive>
+ void SetByte(mirror::Object* object, int8_t b) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ uint16_t GetChar(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template<bool kTransactionActive>
+ void SetChar(mirror::Object* object, uint16_t c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ int16_t GetShort(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template<bool kTransactionActive>
+ void SetShort(mirror::Object* object, int16_t s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ int32_t GetInt(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template<bool kTransactionActive>
+ void SetInt(mirror::Object* object, int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ int64_t GetLong(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template<bool kTransactionActive>
+ void SetLong(mirror::Object* object, int64_t j) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ float GetFloat(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template<bool kTransactionActive>
+ void SetFloat(mirror::Object* object, float f) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ double GetDouble(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template<bool kTransactionActive>
+ void SetDouble(mirror::Object* object, double d) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ mirror::Object* GetObject(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template<bool kTransactionActive>
+ void SetObject(mirror::Object* object, mirror::Object* l)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Raw field accesses.
+ uint32_t Get32(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template<bool kTransactionActive>
+ void Set32(mirror::Object* object, uint32_t new_value)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ uint64_t Get64(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template<bool kTransactionActive>
+ void Set64(mirror::Object* object, uint64_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ mirror::Object* GetObj(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template<bool kTransactionActive>
+ void SetObj(mirror::Object* object, mirror::Object* new_value)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void VisitRoots(RootVisitor* visitor)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ bool IsVolatile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return (GetAccessFlags() & kAccVolatile) != 0;
+ }
+
+ // Returns an instance field with this offset in the given class or nullptr if not found.
+ static ArtField* FindInstanceFieldWithOffset(mirror::Class* klass, uint32_t field_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ const char* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Resolves / returns the name from the dex cache.
+ mirror::String* GetStringName(Thread* self, bool resolve)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ const char* GetTypeDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ Primitive::Type GetTypeAsPrimitiveType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ bool IsPrimitiveType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template <bool kResolve>
+ mirror::Class* GetType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ size_t FieldSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ mirror::DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ const DexFile* GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ GcRoot<mirror::Class>& DeclaringClassRoot() {
+ return declaring_class_;
+ }
+
+ private:
+ mirror::Class* ProxyFindSystemClass(const char* descriptor)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Class* ResolveGetType(uint32_t type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::String* ResolveGetStringName(Thread* self, const DexFile& dex_file, uint32_t string_idx,
+ mirror::DexCache* dex_cache)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ GcRoot<mirror::Class> declaring_class_;
+
+ uint32_t access_flags_;
+
+ // Dex cache index of field id
+ uint32_t field_dex_idx_;
+
+ // Offset of field within an instance or in the Class' static fields
+ uint32_t offset_;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_ART_FIELD_H_
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index dba4af8..8057dd1 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -28,8 +28,8 @@
#include "read_barrier_c.h"
-#if defined(__arm__) || defined(__aarch64__) || defined(__mips__)
-// In quick code for ARM, ARM64 and MIPS we make poor use of registers and perform frequent suspend
+#if defined(__arm__) || defined(__mips__)
+// In quick code for ARM and MIPS we make poor use of registers and perform frequent suspend
// checks in the event of loop back edges. The SUSPEND_CHECK_INTERVAL constant is loaded into a
// register at the point of an up-call or after handling a suspend check. It reduces the number of
// loads of the TLS suspend check value by the given amount (turning it into a decrement and compare
@@ -135,13 +135,13 @@
#define MIRROR_CLASS_COMPONENT_TYPE_OFFSET (4 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_COMPONENT_TYPE_OFFSET,
art::mirror::Class::ComponentTypeOffset().Int32Value())
-#define MIRROR_CLASS_ACCESS_FLAGS_OFFSET (52 + MIRROR_OBJECT_HEADER_SIZE)
+#define MIRROR_CLASS_ACCESS_FLAGS_OFFSET (44 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_ACCESS_FLAGS_OFFSET,
art::mirror::Class::AccessFlagsOffset().Int32Value())
-#define MIRROR_CLASS_OBJECT_SIZE_OFFSET (80 + MIRROR_OBJECT_HEADER_SIZE)
+#define MIRROR_CLASS_OBJECT_SIZE_OFFSET (96 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_OBJECT_SIZE_OFFSET,
art::mirror::Class::ObjectSizeOffset().Int32Value())
-#define MIRROR_CLASS_STATUS_OFFSET (92 + MIRROR_OBJECT_HEADER_SIZE)
+#define MIRROR_CLASS_STATUS_OFFSET (108 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_STATUS_OFFSET,
art::mirror::Class::StatusOffset().Int32Value())
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index dd29404..b53fa84 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -20,6 +20,7 @@
#include "arena_allocator.h"
#include "logging.h"
+#include "mem_map.h"
#include "mutex.h"
#include "thread-inl.h"
#include <memcheck/memcheck.h>
@@ -132,16 +133,19 @@
free(reinterpret_cast<void*>(memory_));
}
-MemMapArena::MemMapArena(size_t size) {
+MemMapArena::MemMapArena(size_t size, bool low_4gb) {
std::string error_msg;
- map_.reset(
- MemMap::MapAnonymous("dalvik-LinearAlloc", nullptr, size, PROT_READ | PROT_WRITE, false,
- false, &error_msg));
+ map_.reset(MemMap::MapAnonymous(
+ "LinearAlloc", nullptr, size, PROT_READ | PROT_WRITE, low_4gb, false, &error_msg));
CHECK(map_.get() != nullptr) << error_msg;
memory_ = map_->Begin();
size_ = map_->Size();
}
+MemMapArena::~MemMapArena() {
+ // Destroys MemMap via std::unique_ptr<>.
+}
+
void MemMapArena::Release() {
if (bytes_allocated_ > 0) {
map_->MadviseDontNeedAndZero();
@@ -156,8 +160,12 @@
}
}
-ArenaPool::ArenaPool(bool use_malloc)
- : use_malloc_(use_malloc), lock_("Arena pool lock"), free_arenas_(nullptr) {
+ArenaPool::ArenaPool(bool use_malloc, bool low_4gb)
+ : use_malloc_(use_malloc), lock_("Arena pool lock", kArenaPoolLock), free_arenas_(nullptr),
+ low_4gb_(low_4gb) {
+ if (low_4gb) {
+ CHECK(!use_malloc) << "low4gb must use map implementation";
+ }
if (!use_malloc) {
MemMap::Init();
}
@@ -182,7 +190,8 @@
}
}
if (ret == nullptr) {
- ret = use_malloc_ ? static_cast<Arena*>(new MallocArena(size)) : new MemMapArena(size);
+ ret = use_malloc_ ? static_cast<Arena*>(new MallocArena(size)) :
+ new MemMapArena(size, low_4gb_);
}
ret->Reset();
return ret;
@@ -229,6 +238,17 @@
return ArenaAllocatorStats::BytesAllocated();
}
+size_t ArenaAllocator::BytesUsed() const {
+ size_t total = ptr_ - begin_;
+ if (arena_head_ != nullptr) {
+ for (Arena* cur_arena = arena_head_->next_; cur_arena != nullptr;
+ cur_arena = cur_arena->next_) {
+ total += cur_arena->GetBytesAllocated();
+ }
+ }
+ return total;
+}
+
ArenaAllocator::ArenaAllocator(ArenaPool* pool)
: pool_(pool),
begin_(nullptr),
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index cc7b856..ab5968c 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -22,7 +22,6 @@
#include "debug_stack.h"
#include "macros.h"
-#include "mem_map.h"
#include "mutex.h"
#include "utils.h"
@@ -33,6 +32,7 @@
class ArenaAllocator;
class ArenaStack;
class ScopedArenaAllocator;
+class MemMap;
class MemStats;
template <typename T>
@@ -165,8 +165,8 @@
class MemMapArena FINAL : public Arena {
public:
- explicit MemMapArena(size_t size = Arena::kDefaultSize);
- virtual ~MemMapArena() { }
+ explicit MemMapArena(size_t size, bool low_4gb);
+ virtual ~MemMapArena();
void Release() OVERRIDE;
private:
@@ -175,7 +175,7 @@
class ArenaPool {
public:
- explicit ArenaPool(bool use_malloc = true);
+ explicit ArenaPool(bool use_malloc = true, bool low_4gb = false);
~ArenaPool();
Arena* AllocArena(size_t size) LOCKS_EXCLUDED(lock_);
void FreeArenaChain(Arena* first) LOCKS_EXCLUDED(lock_);
@@ -188,6 +188,7 @@
const bool use_malloc_;
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Arena* free_arenas_ GUARDED_BY(lock_);
+ const bool low_4gb_;
DISALLOW_COPY_AND_ASSIGN(ArenaPool);
};
@@ -227,6 +228,9 @@
void ObtainNewArenaForAllocation(size_t allocation_size);
size_t BytesAllocated() const;
MemStats GetMemStats() const;
+ // The BytesUsed method sums up bytes allocated from arenas in arena_head_ and nodes.
+ // TODO: Change BytesAllocated to this behavior?
+ size_t BytesUsed() const;
private:
static constexpr size_t kAlignment = 8;
diff --git a/runtime/base/histogram-inl.h b/runtime/base/histogram-inl.h
index 812ed86..0f969b9 100644
--- a/runtime/base/histogram-inl.h
+++ b/runtime/base/histogram-inl.h
@@ -165,6 +165,23 @@
}
template <class Value>
+inline void Histogram<Value>::DumpBins(std::ostream& os) const {
+ DCHECK_GT(sample_size_, 0ull);
+ bool dumped_one = false;
+ for (size_t bin_idx = 0; bin_idx < frequency_.size(); ++bin_idx) {
+ if (frequency_[bin_idx] != 0U) {
+ if (dumped_one) {
+ // Prepend a comma if not the first bin.
+ os << ",";
+ } else {
+ dumped_one = true;
+ }
+ os << GetRange(bin_idx) << ":" << frequency_[bin_idx];
+ }
+ }
+}
+
+template <class Value>
inline void Histogram<Value>::PrintConfidenceIntervals(std::ostream &os, double interval,
const CumulativeData& data) const {
static constexpr size_t kFractionalDigits = 3;
@@ -249,4 +266,3 @@
} // namespace art
#endif // ART_RUNTIME_BASE_HISTOGRAM_INL_H_
-
diff --git a/runtime/base/histogram.h b/runtime/base/histogram.h
index 78f6e1c..c312fb2 100644
--- a/runtime/base/histogram.h
+++ b/runtime/base/histogram.h
@@ -61,6 +61,7 @@
void PrintConfidenceIntervals(std::ostream& os, double interval,
const CumulativeData& data) const;
void PrintBins(std::ostream& os, const CumulativeData& data) const;
+ void DumpBins(std::ostream& os) const;
Value GetRange(size_t bucket_idx) const;
size_t GetBucketCount() const;
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index af00834..6e4b96c 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -73,6 +73,7 @@
kRosAllocBulkFreeLock,
kAllocSpaceLock,
kBumpPointerSpaceBlockLock,
+ kArenaPoolLock,
kDexFileMethodInlinerLock,
kDexFileToMethodInlinerMapLock,
kMarkSweepMarkStackLock,
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 2b0167d..f94ebea 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -19,6 +19,7 @@
#include <sys/mman.h>
#include <zlib.h>
+#include "art_field-inl.h"
#include "base/logging.h"
#include "base/to_str.h"
#include "class_linker.h"
@@ -27,7 +28,6 @@
#include "gc/space/space.h"
#include "java_vm_ext.h"
#include "jni_internal.h"
-#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
@@ -169,7 +169,7 @@
return false;
}
- mirror::ArtField* f = CheckFieldID(soa, fid);
+ ArtField* f = CheckFieldID(soa, fid);
if (f == nullptr) {
return false;
}
@@ -248,7 +248,7 @@
bool CheckStaticFieldID(ScopedObjectAccess& soa, jclass java_class, jfieldID fid)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::Class* c = soa.Decode<mirror::Class*>(java_class);
- mirror::ArtField* f = CheckFieldID(soa, fid);
+ ArtField* f = CheckFieldID(soa, fid);
if (f == nullptr) {
return false;
}
@@ -565,7 +565,7 @@
if (!is_static && !CheckInstanceFieldID(soa, obj, fid)) {
return false;
}
- mirror::ArtField* field = soa.DecodeField(fid);
+ ArtField* field = soa.DecodeField(fid);
DCHECK(field != nullptr); // Already checked by Check.
if (is_static != field->IsStatic()) {
AbortF("attempt to access %s field %s: %p",
@@ -817,7 +817,7 @@
}
case 'f': { // jfieldID
jfieldID fid = arg.f;
- mirror::ArtField* f = soa.DecodeField(fid);
+ ArtField* f = soa.DecodeField(fid);
*msg += PrettyField(f);
if (!entry) {
StringAppendF(msg, " (%p)", fid);
@@ -986,14 +986,15 @@
return true;
}
- mirror::ArtField* CheckFieldID(ScopedObjectAccess& soa, jfieldID fid)
+ ArtField* CheckFieldID(ScopedObjectAccess& soa, jfieldID fid)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (fid == nullptr) {
AbortF("jfieldID was NULL");
return nullptr;
}
- mirror::ArtField* f = soa.DecodeField(fid);
- if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(f) || !f->IsArtField()) {
+ ArtField* f = soa.DecodeField(fid);
+ // TODO: Better check here.
+ if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(f->GetDeclaringClass())) {
Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
AbortF("invalid jfieldID: %p", fid);
return nullptr;
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 5198769..87d1c4c 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -17,10 +17,10 @@
#ifndef ART_RUNTIME_CLASS_LINKER_INL_H_
#define ART_RUNTIME_CLASS_LINKER_INL_H_
+#include "art_field.h"
#include "class_linker.h"
#include "gc_root-inl.h"
#include "gc/heap-inl.h"
-#include "mirror/art_field.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache-inl.h"
#include "mirror/iftable.h"
@@ -88,7 +88,7 @@
return resolved_type;
}
-inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx, mirror::ArtField* referrer) {
+inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx, ArtField* referrer) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
mirror::DexCache* dex_cache_ptr = declaring_class->GetDexCache();
mirror::Class* resolved_type = dex_cache_ptr->GetResolvedType(type_idx);
@@ -133,15 +133,19 @@
return resolved_method;
}
-inline mirror::ArtField* ClassLinker::GetResolvedField(uint32_t field_idx,
- mirror::Class* field_declaring_class) {
- return field_declaring_class->GetDexCache()->GetResolvedField(field_idx);
+inline ArtField* ClassLinker::GetResolvedField(uint32_t field_idx, mirror::DexCache* dex_cache) {
+ return dex_cache->GetResolvedField(field_idx, image_pointer_size_);
}
-inline mirror::ArtField* ClassLinker::ResolveField(uint32_t field_idx, mirror::ArtMethod* referrer,
+inline ArtField* ClassLinker::GetResolvedField(
+ uint32_t field_idx, mirror::Class* field_declaring_class) {
+ return GetResolvedField(field_idx, field_declaring_class->GetDexCache());
+}
+
+inline ArtField* ClassLinker::ResolveField(uint32_t field_idx, mirror::ArtMethod* referrer,
bool is_static) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
- mirror::ArtField* resolved_field = GetResolvedField(field_idx, declaring_class);
+ ArtField* resolved_field = GetResolvedField(field_idx, declaring_class);
if (UNLIKELY(resolved_field == NULL)) {
StackHandleScope<2> hs(Thread::Current());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
@@ -187,17 +191,6 @@
ifcount * mirror::IfTable::kMax));
}
-inline mirror::ObjectArray<mirror::ArtField>* ClassLinker::AllocArtFieldArray(Thread* self,
- size_t length) {
- gc::Heap* const heap = Runtime::Current()->GetHeap();
- // Can't have movable field arrays for mark compact since we need these arrays to always be valid
- // so that we can do Object::VisitReferences in the case where the fields don't fit in the
- // reference offsets word.
- return mirror::ObjectArray<mirror::ArtField>::Alloc(
- self, GetClassRoot(kJavaLangReflectArtFieldArrayClass), length,
- kMoveFieldArrays ? heap->GetCurrentAllocator() : heap->GetCurrentNonMovingAllocator());
-}
-
inline mirror::Class* ClassLinker::GetClassRoot(ClassRoot class_root)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(!class_roots_.IsNull());
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 12fa546..4e59217 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -25,6 +25,7 @@
#include <utility>
#include <vector>
+#include "art_field-inl.h"
#include "base/casts.h"
#include "base/logging.h"
#include "base/scoped_flock.h"
@@ -46,11 +47,11 @@
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "leb128.h"
+#include "linear_alloc.h"
#include "oat.h"
#include "oat_file.h"
#include "oat_file_assistant.h"
#include "object_lock.h"
-#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class.h"
#include "mirror/class-inl.h"
@@ -77,6 +78,8 @@
namespace art {
+static constexpr bool kSanityCheckObjects = kIsDebugBuild;
+
static void ThrowNoClassDefFoundError(const char* fmt, ...)
__attribute__((__format__(__printf__, 1, 2)))
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -186,7 +189,7 @@
template<int n>
static void ShuffleForward(size_t* current_field_idx,
MemberOffset* field_offset,
- std::deque<mirror::ArtField*>* grouped_and_sorted_fields,
+ std::deque<ArtField*>* grouped_and_sorted_fields,
FieldGaps* gaps)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(current_field_idx != nullptr);
@@ -196,7 +199,7 @@
DCHECK(IsPowerOfTwo(n));
while (!grouped_and_sorted_fields->empty()) {
- mirror::ArtField* field = grouped_and_sorted_fields->front();
+ ArtField* field = grouped_and_sorted_fields->front();
Primitive::Type type = field->GetTypeAsPrimitiveType();
if (Primitive::ComponentSize(type) < n) {
break;
@@ -357,6 +360,13 @@
mirror::IntArray::SetArrayClass(int_array_class.Get());
SetClassRoot(kIntArrayClass, int_array_class.Get());
+ // Create long array type for AllocDexCache (done in AppendToBootClassPath).
+ Handle<mirror::Class> long_array_class(hs.NewHandle(
+ AllocClass(self, java_lang_Class.Get(), mirror::Array::ClassSize())));
+ long_array_class->SetComponentType(GetClassRoot(kPrimitiveLong));
+ mirror::LongArray::SetArrayClass(long_array_class.Get());
+ SetClassRoot(kLongArrayClass, long_array_class.Get());
+
// now that these are registered, we can use AllocClass() and AllocObjectArray
// Set up DexCache. This cannot be done later since AppendToBootClassPath calls AllocDexCache.
@@ -366,15 +376,8 @@
java_lang_DexCache->SetObjectSize(mirror::DexCache::InstanceSize());
mirror::Class::SetStatus(java_lang_DexCache, mirror::Class::kStatusResolved, self);
- // Constructor, Field, Method, and AbstractMethod are necessary so
+ // Constructor, Method, and AbstractMethod are necessary so
// that FindClass can link members.
- Handle<mirror::Class> java_lang_reflect_ArtField(hs.NewHandle(
- AllocClass(self, java_lang_Class.Get(), mirror::ArtField::ClassSize())));
- CHECK(java_lang_reflect_ArtField.Get() != nullptr);
- java_lang_reflect_ArtField->SetObjectSize(mirror::ArtField::InstanceSize());
- SetClassRoot(kJavaLangReflectArtField, java_lang_reflect_ArtField.Get());
- mirror::Class::SetStatus(java_lang_reflect_ArtField, mirror::Class::kStatusResolved, self);
- mirror::ArtField::SetClass(java_lang_reflect_ArtField.Get());
Handle<mirror::Class> java_lang_reflect_ArtMethod(hs.NewHandle(
AllocClass(self, java_lang_Class.Get(), mirror::ArtMethod::ClassSize())));
@@ -398,12 +401,6 @@
object_array_art_method->SetComponentType(java_lang_reflect_ArtMethod.Get());
SetClassRoot(kJavaLangReflectArtMethodArrayClass, object_array_art_method.Get());
- Handle<mirror::Class> object_array_art_field(hs.NewHandle(
- AllocClass(self, java_lang_Class.Get(),
- mirror::ObjectArray<mirror::ArtField>::ClassSize())));
- object_array_art_field->SetComponentType(java_lang_reflect_ArtField.Get());
- SetClassRoot(kJavaLangReflectArtFieldArrayClass, object_array_art_field.Get());
-
// Setup boot_class_path_ and register class_path now that we can use AllocObjectArray to create
// DexCache instances. Needs to be after String, Field, Method arrays since AllocDexCache uses
// these roots.
@@ -471,8 +468,8 @@
mirror::Class* found_int_array_class = FindSystemClass(self, "[I");
CHECK_EQ(int_array_class.Get(), found_int_array_class);
- SetClassRoot(kLongArrayClass, FindSystemClass(self, "[J"));
- mirror::LongArray::SetArrayClass(GetClassRoot(kLongArrayClass));
+ mirror::Class* found_long_array_class = FindSystemClass(self, "[J");
+ CHECK_EQ(long_array_class.Get(), found_long_array_class);
SetClassRoot(kFloatArrayClass, FindSystemClass(self, "[F"));
mirror::FloatArray::SetArrayClass(GetClassRoot(kFloatArrayClass));
@@ -513,10 +510,6 @@
mirror::Class* Art_method_class = FindSystemClass(self, "Ljava/lang/reflect/ArtMethod;");
CHECK_EQ(java_lang_reflect_ArtMethod.Get(), Art_method_class);
- mirror::Class::SetStatus(java_lang_reflect_ArtField, mirror::Class::kStatusNotReady, self);
- mirror::Class* Art_field_class = FindSystemClass(self, "Ljava/lang/reflect/ArtField;");
- CHECK_EQ(java_lang_reflect_ArtField.Get(), Art_field_class);
-
mirror::Class* String_array_class =
FindSystemClass(self, GetClassRootDescriptor(kJavaLangStringArrayClass));
CHECK_EQ(object_array_string.Get(), String_array_class);
@@ -525,10 +518,6 @@
FindSystemClass(self, GetClassRootDescriptor(kJavaLangReflectArtMethodArrayClass));
CHECK_EQ(object_array_art_method.Get(), Art_method_array_class);
- mirror::Class* Art_field_array_class =
- FindSystemClass(self, GetClassRootDescriptor(kJavaLangReflectArtFieldArrayClass));
- CHECK_EQ(object_array_art_field.Get(), Art_field_array_class);
-
// End of special init trickery, subsequent classes may be loaded via FindSystemClass.
// Create java.lang.reflect.Proxy root.
@@ -624,23 +613,23 @@
mirror::Class* java_lang_ref_FinalizerReference =
FindSystemClass(self, "Ljava/lang/ref/FinalizerReference;");
- mirror::ArtField* pendingNext = java_lang_ref_Reference->GetInstanceField(0);
+ ArtField* pendingNext = java_lang_ref_Reference->GetInstanceField(0);
CHECK_STREQ(pendingNext->GetName(), "pendingNext");
CHECK_STREQ(pendingNext->GetTypeDescriptor(), "Ljava/lang/ref/Reference;");
- mirror::ArtField* queue = java_lang_ref_Reference->GetInstanceField(1);
+ ArtField* queue = java_lang_ref_Reference->GetInstanceField(1);
CHECK_STREQ(queue->GetName(), "queue");
CHECK_STREQ(queue->GetTypeDescriptor(), "Ljava/lang/ref/ReferenceQueue;");
- mirror::ArtField* queueNext = java_lang_ref_Reference->GetInstanceField(2);
+ ArtField* queueNext = java_lang_ref_Reference->GetInstanceField(2);
CHECK_STREQ(queueNext->GetName(), "queueNext");
CHECK_STREQ(queueNext->GetTypeDescriptor(), "Ljava/lang/ref/Reference;");
- mirror::ArtField* referent = java_lang_ref_Reference->GetInstanceField(3);
+ ArtField* referent = java_lang_ref_Reference->GetInstanceField(3);
CHECK_STREQ(referent->GetName(), "referent");
CHECK_STREQ(referent->GetTypeDescriptor(), "Ljava/lang/Object;");
- mirror::ArtField* zombie = java_lang_ref_FinalizerReference->GetInstanceField(2);
+ ArtField* zombie = java_lang_ref_FinalizerReference->GetInstanceField(2);
CHECK_STREQ(zombie->GetName(), "zombie");
CHECK_STREQ(zombie->GetTypeDescriptor(), "Ljava/lang/Object;");
@@ -802,6 +791,23 @@
}
}
+void SanityCheckObjectsCallback(mirror::Object* obj, void* arg ATTRIBUTE_UNUSED)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(obj != nullptr);
+ CHECK(obj->GetClass() != nullptr) << "Null class " << obj;
+ CHECK(obj->GetClass()->GetClass() != nullptr) << "Null class class " << obj;
+ if (obj->IsClass()) {
+ auto klass = obj->AsClass();
+ ArtField* fields[2] = { klass->GetSFields(), klass->GetIFields() };
+ size_t num_fields[2] = { klass->NumStaticFields(), klass->NumInstanceFields() };
+ for (size_t i = 0; i < 2; ++i) {
+ for (size_t j = 0; j < num_fields[i]; ++j) {
+ CHECK_EQ(fields[i][j].GetDeclaringClass(), klass);
+ }
+ }
+ }
+}
+
void ClassLinker::InitFromImage() {
VLOG(startup) << "ClassLinker::InitFromImage entering";
CHECK(!init_done_);
@@ -882,6 +888,18 @@
if (!runtime->IsAotCompiler() && runtime->GetInstrumentation()->InterpretOnly()) {
heap->VisitObjects(InitFromImageInterpretOnlyCallback, this);
}
+ if (kSanityCheckObjects) {
+ for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
+ auto* dex_cache = dex_caches->Get(i);
+ for (size_t j = 0; j < dex_cache->NumResolvedFields(); ++j) {
+ auto* field = dex_cache->GetResolvedField(j, image_pointer_size_);
+ if (field != nullptr) {
+ CHECK(field->GetDeclaringClass()->GetClass() != nullptr);
+ }
+ }
+ }
+ heap->VisitObjects(SanityCheckObjectsCallback, nullptr);
+ }
// reinit class_roots_
mirror::Class::SetClassClass(class_roots->Get(kJavaLangClass));
@@ -894,7 +912,6 @@
mirror::Field::SetClass(GetClassRoot(kJavaLangReflectField));
mirror::Field::SetArrayClass(GetClassRoot(kJavaLangReflectFieldArrayClass));
mirror::Reference::SetClass(GetClassRoot(kJavaLangRefReference));
- mirror::ArtField::SetClass(GetClassRoot(kJavaLangReflectArtField));
mirror::BooleanArray::SetArrayClass(GetClassRoot(kBooleanArrayClass));
mirror::ByteArray::SetArrayClass(GetClassRoot(kByteArrayClass));
mirror::CharArray::SetArrayClass(GetClassRoot(kCharArrayClass));
@@ -913,18 +930,22 @@
void ClassLinker::VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) {
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
+ visitor, RootInfo(kRootStickyClass));
if ((flags & kVisitRootFlagAllRoots) != 0) {
- BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
- visitor, RootInfo(kRootStickyClass));
for (GcRoot<mirror::Class>& root : class_table_) {
buffered_visitor.VisitRoot(root);
+ root.Read()->VisitFieldRoots(buffered_visitor);
}
+ // PreZygote classes can't move so we won't need to update fields' declaring classes.
for (GcRoot<mirror::Class>& root : pre_zygote_class_table_) {
buffered_visitor.VisitRoot(root);
+ root.Read()->VisitFieldRoots(buffered_visitor);
}
} else if ((flags & kVisitRootFlagNewRoots) != 0) {
for (auto& root : new_class_roots_) {
mirror::Class* old_ref = root.Read<kWithoutReadBarrier>();
+ old_ref->VisitFieldRoots(buffered_visitor);
root.VisitRoot(visitor, RootInfo(kRootStickyClass));
mirror::Class* new_ref = root.Read<kWithoutReadBarrier>();
if (UNLIKELY(new_ref != old_ref)) {
@@ -937,6 +958,7 @@
}
}
}
+ buffered_visitor.Flush(); // Flush before clearing new_class_roots_.
if ((flags & kVisitRootFlagClearRootLog) != 0) {
new_class_roots_.clear();
}
@@ -1077,7 +1099,6 @@
mirror::Class::ResetClass();
mirror::String::ResetClass();
mirror::Reference::ResetClass();
- mirror::ArtField::ResetClass();
mirror::ArtMethod::ResetClass();
mirror::Field::ResetClass();
mirror::Field::ResetArrayClass();
@@ -1095,7 +1116,7 @@
}
mirror::DexCache* ClassLinker::AllocDexCache(Thread* self, const DexFile& dex_file) {
- gc::Heap* heap = Runtime::Current()->GetHeap();
+ gc::Heap* const heap = Runtime::Current()->GetHeap();
StackHandleScope<16> hs(self);
Handle<mirror::Class> dex_cache_class(hs.NewHandle(GetClassRoot(kJavaLangDexCache)));
Handle<mirror::DexCache> dex_cache(
@@ -1125,8 +1146,12 @@
if (methods.Get() == nullptr) {
return nullptr;
}
- Handle<mirror::ObjectArray<mirror::ArtField>>
- fields(hs.NewHandle(AllocArtFieldArray(self, dex_file.NumFieldIds())));
+ Handle<mirror::Array> fields;
+ if (image_pointer_size_ == 8) {
+ fields = hs.NewHandle<mirror::Array>(mirror::LongArray::Alloc(self, dex_file.NumFieldIds()));
+ } else {
+ fields = hs.NewHandle<mirror::Array>(mirror::IntArray::Alloc(self, dex_file.NumFieldIds()));
+ }
if (fields.Get() == nullptr) {
return nullptr;
}
@@ -1154,11 +1179,6 @@
return AllocClass(self, GetClassRoot(kJavaLangClass), class_size);
}
-mirror::ArtField* ClassLinker::AllocArtField(Thread* self) {
- return down_cast<mirror::ArtField*>(
- GetClassRoot(kJavaLangReflectArtField)->AllocNonMovableObject(self));
-}
-
mirror::ArtMethod* ClassLinker::AllocArtMethod(Thread* self) {
return down_cast<mirror::ArtMethod*>(
GetClassRoot(kJavaLangReflectArtMethod)->AllocNonMovableObject(self));
@@ -1239,97 +1259,124 @@
return ClassPathEntry(nullptr, nullptr);
}
-mirror::Class* ClassLinker::FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
- Thread* self, const char* descriptor,
- size_t hash,
- Handle<mirror::ClassLoader> class_loader) {
- // Can we special case for a well understood PathClassLoader with the BootClassLoader as parent?
- if (class_loader->GetClass() !=
- soa.Decode<mirror::Class*>(WellKnownClasses::dalvik_system_PathClassLoader) ||
- class_loader->GetParent()->GetClass() !=
- soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_BootClassLoader)) {
- return nullptr;
- }
- ClassPathEntry pair = FindInClassPath(descriptor, hash, boot_class_path_);
- // Check if this would be found in the parent boot class loader.
- if (pair.second != nullptr) {
- mirror::Class* klass = LookupClass(self, descriptor, hash, nullptr);
- if (klass != nullptr) {
- // May return null if resolution on another thread fails.
- klass = EnsureResolved(self, descriptor, klass);
+static bool IsBootClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
+ mirror::ClassLoader* class_loader)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return class_loader == nullptr ||
+ class_loader->GetClass() ==
+ soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_BootClassLoader);
+}
+
+bool ClassLinker::FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
+ Thread* self, const char* descriptor,
+ size_t hash,
+ Handle<mirror::ClassLoader> class_loader,
+ mirror::Class** result) {
+ // Termination case: boot class-loader.
+ if (IsBootClassLoader(soa, class_loader.Get())) {
+ // The boot class loader, search the boot class path.
+ ClassPathEntry pair = FindInClassPath(descriptor, hash, boot_class_path_);
+ if (pair.second != nullptr) {
+ mirror::Class* klass = LookupClass(self, descriptor, hash, nullptr);
+ if (klass != nullptr) {
+ *result = EnsureResolved(self, descriptor, klass);
+ } else {
+ *result = DefineClass(self, descriptor, hash, NullHandle<mirror::ClassLoader>(),
+ *pair.first, *pair.second);
+ }
+ if (*result == nullptr) {
+ CHECK(self->IsExceptionPending()) << descriptor;
+ self->ClearException();
+ }
} else {
- // May OOME.
- klass = DefineClass(self, descriptor, hash, NullHandle<mirror::ClassLoader>(), *pair.first,
- *pair.second);
+ *result = nullptr;
}
- if (klass == nullptr) {
- CHECK(self->IsExceptionPending()) << descriptor;
- self->ClearException();
- }
- return klass;
- } else {
- // Handle as if this is the child PathClassLoader.
- // Handles as RegisterDexFile may allocate dex caches (and cause thread suspension).
- StackHandleScope<3> hs(self);
- // The class loader is a PathClassLoader which inherits from BaseDexClassLoader.
- // We need to get the DexPathList and loop through it.
- Handle<mirror::ArtField> cookie_field =
- hs.NewHandle(soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie));
- Handle<mirror::ArtField> dex_file_field =
- hs.NewHandle(
- soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile));
- mirror::Object* dex_path_list =
- soa.DecodeField(WellKnownClasses::dalvik_system_PathClassLoader_pathList)->
- GetObject(class_loader.Get());
- if (dex_path_list != nullptr && dex_file_field.Get() != nullptr &&
- cookie_field.Get() != nullptr) {
- // DexPathList has an array dexElements of Elements[] which each contain a dex file.
- mirror::Object* dex_elements_obj =
- soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
- GetObject(dex_path_list);
- // Loop through each dalvik.system.DexPathList$Element's dalvik.system.DexFile and look
- // at the mCookie which is a DexFile vector.
- if (dex_elements_obj != nullptr) {
- Handle<mirror::ObjectArray<mirror::Object>> dex_elements =
- hs.NewHandle(dex_elements_obj->AsObjectArray<mirror::Object>());
- for (int32_t i = 0; i < dex_elements->GetLength(); ++i) {
- mirror::Object* element = dex_elements->GetWithoutChecks(i);
- if (element == nullptr) {
- // Should never happen, fall back to java code to throw a NPE.
+ return true;
+ }
+
+ // Unsupported class-loader?
+ if (class_loader->GetClass() !=
+ soa.Decode<mirror::Class*>(WellKnownClasses::dalvik_system_PathClassLoader)) {
+ *result = nullptr;
+ return false;
+ }
+
+ // Handles as RegisterDexFile may allocate dex caches (and cause thread suspension).
+ StackHandleScope<4> hs(self);
+ Handle<mirror::ClassLoader> h_parent(hs.NewHandle(class_loader->GetParent()));
+ bool recursive_result = FindClassInPathClassLoader(soa, self, descriptor, hash, h_parent, result);
+
+ if (!recursive_result) {
+ // Something wrong up the chain.
+ return false;
+ }
+
+ if (*result != nullptr) {
+ // Found the class up the chain.
+ return true;
+ }
+
+ // Handle this step.
+ // Handle as if this is the child PathClassLoader.
+ // The class loader is a PathClassLoader which inherits from BaseDexClassLoader.
+ // We need to get the DexPathList and loop through it.
+ ArtField* const cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie);
+ ArtField* const dex_file_field =
+ soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
+ mirror::Object* dex_path_list =
+ soa.DecodeField(WellKnownClasses::dalvik_system_PathClassLoader_pathList)->
+ GetObject(class_loader.Get());
+ if (dex_path_list != nullptr && dex_file_field != nullptr && cookie_field != nullptr) {
+ // DexPathList has an array dexElements of Elements[] which each contain a dex file.
+ mirror::Object* dex_elements_obj =
+ soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
+ GetObject(dex_path_list);
+ // Loop through each dalvik.system.DexPathList$Element's dalvik.system.DexFile and look
+ // at the mCookie which is a DexFile vector.
+ if (dex_elements_obj != nullptr) {
+ Handle<mirror::ObjectArray<mirror::Object>> dex_elements =
+ hs.NewHandle(dex_elements_obj->AsObjectArray<mirror::Object>());
+ for (int32_t i = 0; i < dex_elements->GetLength(); ++i) {
+ mirror::Object* element = dex_elements->GetWithoutChecks(i);
+ if (element == nullptr) {
+ // Should never happen, fall back to java code to throw a NPE.
+ break;
+ }
+ mirror::Object* dex_file = dex_file_field->GetObject(element);
+ if (dex_file != nullptr) {
+ mirror::LongArray* long_array = cookie_field->GetObject(dex_file)->AsLongArray();
+ if (long_array == nullptr) {
+ // This should never happen so log a warning.
+ LOG(WARNING) << "Null DexFile::mCookie for " << descriptor;
break;
}
- mirror::Object* dex_file = dex_file_field->GetObject(element);
- if (dex_file != nullptr) {
- mirror::LongArray* long_array = cookie_field->GetObject(dex_file)->AsLongArray();
- if (long_array == nullptr) {
- // This should never happen so log a warning.
- LOG(WARNING) << "Null DexFile::mCookie for " << descriptor;
- break;
- }
- int32_t long_array_size = long_array->GetLength();
- for (int32_t j = 0; j < long_array_size; ++j) {
- const DexFile* cp_dex_file = reinterpret_cast<const DexFile*>(static_cast<uintptr_t>(
- long_array->GetWithoutChecks(j)));
- const DexFile::ClassDef* dex_class_def = cp_dex_file->FindClassDef(descriptor, hash);
- if (dex_class_def != nullptr) {
- RegisterDexFile(*cp_dex_file);
- mirror::Class* klass = DefineClass(self, descriptor, hash, class_loader,
- *cp_dex_file, *dex_class_def);
- if (klass == nullptr) {
- CHECK(self->IsExceptionPending()) << descriptor;
- self->ClearException();
- return nullptr;
- }
- return klass;
+ int32_t long_array_size = long_array->GetLength();
+ for (int32_t j = 0; j < long_array_size; ++j) {
+ const DexFile* cp_dex_file = reinterpret_cast<const DexFile*>(static_cast<uintptr_t>(
+ long_array->GetWithoutChecks(j)));
+ const DexFile::ClassDef* dex_class_def = cp_dex_file->FindClassDef(descriptor, hash);
+ if (dex_class_def != nullptr) {
+ RegisterDexFile(*cp_dex_file);
+ mirror::Class* klass = DefineClass(self, descriptor, hash, class_loader,
+ *cp_dex_file, *dex_class_def);
+ if (klass == nullptr) {
+ CHECK(self->IsExceptionPending()) << descriptor;
+ self->ClearException();
+ // TODO: Is it really right to break here, and not check the other dex files?
+ return true;
}
+ *result = klass;
+ return true;
}
}
}
}
}
self->AssertNoPendingException();
- return nullptr;
}
+
+ // Result is still null from the parent call, no need to set it again...
+ return true;
}
mirror::Class* ClassLinker::FindClass(Thread* self, const char* descriptor,
@@ -1367,10 +1414,18 @@
}
} else {
ScopedObjectAccessUnchecked soa(self);
- mirror::Class* cp_klass = FindClassInPathClassLoader(soa, self, descriptor, hash,
- class_loader);
- if (cp_klass != nullptr) {
- return cp_klass;
+ mirror::Class* cp_klass;
+ if (FindClassInPathClassLoader(soa, self, descriptor, hash, class_loader, &cp_klass)) {
+ // The chain was understood. So the value in cp_klass is either the class we were looking
+ // for, or not found.
+ if (cp_klass != nullptr) {
+ return cp_klass;
+ }
+ // TODO: We handle the boot classpath loader in FindClassInPathClassLoader. Try to unify this
+ // and the branch above. TODO: throw the right exception here.
+
+ // We'll let the Java-side rediscover all this and throw the exception with the right stack
+ // trace.
}
if (Runtime::Current()->IsAotCompiler()) {
@@ -1433,8 +1488,6 @@
klass.Assign(GetClassRoot(kJavaLangRefReference));
} else if (strcmp(descriptor, "Ljava/lang/DexCache;") == 0) {
klass.Assign(GetClassRoot(kJavaLangDexCache));
- } else if (strcmp(descriptor, "Ljava/lang/reflect/ArtField;") == 0) {
- klass.Assign(GetClassRoot(kJavaLangReflectArtField));
} else if (strcmp(descriptor, "Ljava/lang/reflect/ArtMethod;") == 0) {
klass.Assign(GetClassRoot(kJavaLangReflectArtMethod));
}
@@ -1452,16 +1505,10 @@
return nullptr;
}
klass->SetDexCache(FindDexCache(dex_file));
- LoadClass(self, dex_file, dex_class_def, klass, class_loader.Get());
+
+ SetupClass(dex_file, dex_class_def, klass, class_loader.Get());
+
ObjectLock<mirror::Class> lock(self, klass);
- if (self->IsExceptionPending()) {
- // An exception occured during load, set status to erroneous while holding klass' lock in case
- // notification is necessary.
- if (!klass->IsErroneous()) {
- mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
- }
- return nullptr;
- }
klass->SetClinitThreadId(self->GetTid());
// Add the newly loaded class to the loaded classes table.
@@ -1472,6 +1519,20 @@
return EnsureResolved(self, descriptor, existing);
}
+ // Load the fields and other things after we are inserted in the table. This is so that we don't
+ // end up allocating unfree-able linear alloc resources and then lose the race condition. The
+ // other reason is that the field roots are only visited from the class table. So we need to be
+ // inserted before we allocate / fill in these fields.
+ LoadClass(self, dex_file, dex_class_def, klass);
+ if (self->IsExceptionPending()) {
+ // An exception occured during load, set status to erroneous while holding klass' lock in case
+ // notification is necessary.
+ if (!klass->IsErroneous()) {
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
+ }
+ return nullptr;
+ }
+
// Finish loading (if necessary) by finding parents
CHECK(!klass->IsLoaded());
if (!LoadSuperAndInterfaces(klass, dex_file)) {
@@ -1845,12 +1906,8 @@
}
}
-
-
-void ClassLinker::LoadClass(Thread* self, const DexFile& dex_file,
- const DexFile::ClassDef& dex_class_def,
- Handle<mirror::Class> klass,
- mirror::ClassLoader* class_loader) {
+void ClassLinker::SetupClass(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def,
+ Handle<mirror::Class> klass, mirror::ClassLoader* class_loader) {
CHECK(klass.Get() != nullptr);
CHECK(klass->GetDexCache() != nullptr);
CHECK_EQ(mirror::Class::kStatusNotReady, klass->GetStatus());
@@ -1868,13 +1925,15 @@
klass->SetDexClassDefIndex(dex_file.GetIndexForClassDef(dex_class_def));
klass->SetDexTypeIndex(dex_class_def.class_idx_);
CHECK(klass->GetDexCacheStrings() != nullptr);
+}
+void ClassLinker::LoadClass(Thread* self, const DexFile& dex_file,
+ const DexFile::ClassDef& dex_class_def,
+ Handle<mirror::Class> klass) {
const uint8_t* class_data = dex_file.GetClassData(dex_class_def);
if (class_data == nullptr) {
return; // no fields or methods - for example a marker interface
}
-
-
bool has_oat_class = false;
if (Runtime::Current()->IsStarted() && !Runtime::Current()->IsAotCompiler()) {
OatFile::OatClass oat_class = FindOatClass(dex_file, klass->GetDexClassDefIndex(),
@@ -1888,52 +1947,42 @@
}
}
+ArtField* ClassLinker::AllocArtFieldArray(Thread* self, size_t length) {
+ auto* const la = Runtime::Current()->GetLinearAlloc();
+ auto* ptr = reinterpret_cast<ArtField*>(la->AllocArray<ArtField>(self, length));
+ CHECK(ptr!= nullptr);
+ std::uninitialized_fill_n(ptr, length, ArtField());
+ return ptr;
+}
+
void ClassLinker::LoadClassMembers(Thread* self, const DexFile& dex_file,
const uint8_t* class_data,
Handle<mirror::Class> klass,
const OatFile::OatClass* oat_class) {
- // Load fields.
+ // Load static fields.
ClassDataItemIterator it(dex_file, class_data);
- if (it.NumStaticFields() != 0) {
- mirror::ObjectArray<mirror::ArtField>* statics = AllocArtFieldArray(self, it.NumStaticFields());
- if (UNLIKELY(statics == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
- return;
- }
- klass->SetSFields(statics);
- }
- if (it.NumInstanceFields() != 0) {
- mirror::ObjectArray<mirror::ArtField>* fields =
- AllocArtFieldArray(self, it.NumInstanceFields());
- if (UNLIKELY(fields == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
- return;
- }
- klass->SetIFields(fields);
- }
+ const size_t num_sfields = it.NumStaticFields();
+ ArtField* sfields = num_sfields != 0 ? AllocArtFieldArray(self, num_sfields) : nullptr;
for (size_t i = 0; it.HasNextStaticField(); i++, it.Next()) {
- self->AllowThreadSuspension();
- StackHandleScope<1> hs(self);
- Handle<mirror::ArtField> sfield(hs.NewHandle(AllocArtField(self)));
- if (UNLIKELY(sfield.Get() == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
- return;
- }
- klass->SetStaticField(i, sfield.Get());
- LoadField(dex_file, it, klass, sfield);
+ CHECK_LT(i, num_sfields);
+ LoadField(it, klass, &sfields[i]);
}
+ klass->SetSFields(sfields);
+ klass->SetNumStaticFields(num_sfields);
+ DCHECK_EQ(klass->NumStaticFields(), num_sfields);
+ // Load instance fields.
+ const size_t num_ifields = it.NumInstanceFields();
+ ArtField* ifields = num_ifields != 0 ? AllocArtFieldArray(self, num_ifields) : nullptr;
for (size_t i = 0; it.HasNextInstanceField(); i++, it.Next()) {
- self->AllowThreadSuspension();
- StackHandleScope<1> hs(self);
- Handle<mirror::ArtField> ifield(hs.NewHandle(AllocArtField(self)));
- if (UNLIKELY(ifield.Get() == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
- return;
- }
- klass->SetInstanceField(i, ifield.Get());
- LoadField(dex_file, it, klass, ifield);
+ CHECK_LT(i, num_ifields);
+ LoadField(it, klass, &ifields[i]);
}
-
+ klass->SetIFields(ifields);
+ klass->SetNumInstanceFields(num_ifields);
+ DCHECK_EQ(klass->NumInstanceFields(), num_ifields);
+ // Note: We cannot have thread suspension until the field arrays are setup or else
+ // Class::VisitFieldRoots may miss some fields.
+ self->AllowThreadSuspension();
// Load methods.
if (it.NumDirectMethods() != 0) {
// TODO: append direct methods to class object
@@ -1995,10 +2044,9 @@
DCHECK(!it.HasNext());
}
-void ClassLinker::LoadField(const DexFile& /*dex_file*/, const ClassDataItemIterator& it,
- Handle<mirror::Class> klass,
- Handle<mirror::ArtField> dst) {
- uint32_t field_idx = it.GetMemberIndex();
+void ClassLinker::LoadField(const ClassDataItemIterator& it, Handle<mirror::Class> klass,
+ ArtField* dst) {
+ const uint32_t field_idx = it.GetMemberIndex();
dst->SetDexFieldIndex(field_idx);
dst->SetDeclaringClass(klass.Get());
dst->SetAccessFlags(it.GetFieldAccessFlags());
@@ -2282,13 +2330,12 @@
} else if (strcmp(descriptor,
GetClassRootDescriptor(kJavaLangReflectArtMethodArrayClass)) == 0) {
new_class.Assign(GetClassRoot(kJavaLangReflectArtMethodArrayClass));
- } else if (strcmp(descriptor,
- GetClassRootDescriptor(kJavaLangReflectArtFieldArrayClass)) == 0) {
- new_class.Assign(GetClassRoot(kJavaLangReflectArtFieldArrayClass));
} else if (strcmp(descriptor, "[C") == 0) {
new_class.Assign(GetClassRoot(kCharArrayClass));
} else if (strcmp(descriptor, "[I") == 0) {
new_class.Assign(GetClassRoot(kIntArrayClass));
+ } else if (strcmp(descriptor, "[J") == 0) {
+ new_class.Assign(GetClassRoot(kLongArrayClass));
}
}
if (new_class.Get() == nullptr) {
@@ -2919,32 +2966,20 @@
mirror::Class::SetStatus(klass, mirror::Class::kStatusIdx, self);
// Instance fields are inherited, but we add a couple of static fields...
- {
- mirror::ObjectArray<mirror::ArtField>* sfields = AllocArtFieldArray(self, 2);
- if (UNLIKELY(sfields == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
- return nullptr;
- }
- klass->SetSFields(sfields);
- }
+ const size_t num_fields = 2;
+ ArtField* sfields = AllocArtFieldArray(self, num_fields);
+ klass->SetSFields(sfields);
+ klass->SetNumStaticFields(num_fields);
+
// 1. Create a static field 'interfaces' that holds the _declared_ interfaces implemented by
// our proxy, so Class.getInterfaces doesn't return the flattened set.
- Handle<mirror::ArtField> interfaces_sfield(hs.NewHandle(AllocArtField(self)));
- if (UNLIKELY(interfaces_sfield.Get() == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
- return nullptr;
- }
- klass->SetStaticField(0, interfaces_sfield.Get());
+ ArtField* interfaces_sfield = &sfields[0];
interfaces_sfield->SetDexFieldIndex(0);
interfaces_sfield->SetDeclaringClass(klass.Get());
interfaces_sfield->SetAccessFlags(kAccStatic | kAccPublic | kAccFinal);
+
// 2. Create a static field 'throws' that holds exceptions thrown by our methods.
- Handle<mirror::ArtField> throws_sfield(hs.NewHandle(AllocArtField(self)));
- if (UNLIKELY(throws_sfield.Get() == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
- return nullptr;
- }
- klass->SetStaticField(1, throws_sfield.Get());
+ ArtField* throws_sfield = &sfields[1];
throws_sfield->SetDexFieldIndex(1);
throws_sfield->SetDeclaringClass(klass.Get());
throws_sfield->SetAccessFlags(kAccStatic | kAccPublic | kAccFinal);
@@ -3332,11 +3367,11 @@
// Eagerly fill in static fields so that the we don't have to do as many expensive
// Class::FindStaticField in ResolveField.
for (size_t i = 0; i < num_static_fields; ++i) {
- mirror::ArtField* field = klass->GetStaticField(i);
+ ArtField* field = klass->GetStaticField(i);
const uint32_t field_idx = field->GetDexFieldIndex();
- mirror::ArtField* resolved_field = dex_cache->GetResolvedField(field_idx);
+ ArtField* resolved_field = dex_cache->GetResolvedField(field_idx, image_pointer_size_);
if (resolved_field == nullptr) {
- dex_cache->SetResolvedField(field_idx, field);
+ dex_cache->SetResolvedField(field_idx, field, image_pointer_size_);
} else {
DCHECK_EQ(field, resolved_field);
}
@@ -3350,9 +3385,8 @@
DCHECK(field_it.HasNextStaticField());
CHECK(can_init_statics);
for ( ; value_it.HasNext(); value_it.Next(), field_it.Next()) {
- StackHandleScope<1> hs2(self);
- Handle<mirror::ArtField> field(hs2.NewHandle(
- ResolveField(dex_file, field_it.GetMemberIndex(), dex_cache, class_loader, true)));
+ ArtField* field = ResolveField(
+ dex_file, field_it.GetMemberIndex(), dex_cache, class_loader, true);
if (Runtime::Current()->IsActiveTransaction()) {
value_it.ReadValueToField<true>(field);
} else {
@@ -3586,21 +3620,17 @@
}
void ClassLinker::FixupTemporaryDeclaringClass(mirror::Class* temp_class, mirror::Class* new_class) {
- mirror::ObjectArray<mirror::ArtField>* fields = new_class->GetIFields();
- if (fields != nullptr) {
- for (int index = 0; index < fields->GetLength(); index ++) {
- if (fields->Get(index)->GetDeclaringClass() == temp_class) {
- fields->Get(index)->SetDeclaringClass(new_class);
- }
+ ArtField* fields = new_class->GetIFields();
+ for (size_t i = 0, count = new_class->NumInstanceFields(); i < count; i++) {
+ if (fields[i].GetDeclaringClass() == temp_class) {
+ fields[i].SetDeclaringClass(new_class);
}
}
fields = new_class->GetSFields();
- if (fields != nullptr) {
- for (int index = 0; index < fields->GetLength(); index ++) {
- if (fields->Get(index)->GetDeclaringClass() == temp_class) {
- fields->Get(index)->SetDeclaringClass(new_class);
- }
+ for (size_t i = 0, count = new_class->NumStaticFields(); i < count; i++) {
+ if (fields[i].GetDeclaringClass() == temp_class) {
+ fields[i].SetDeclaringClass(new_class);
}
}
@@ -4567,7 +4597,7 @@
explicit LinkFieldsComparator() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
}
// No thread safety analysis as will be called from STL. Checked lock held in constructor.
- bool operator()(mirror::ArtField* field1, mirror::ArtField* field2)
+ bool operator()(ArtField* field1, ArtField* field2)
NO_THREAD_SAFETY_ANALYSIS {
// First come reference fields, then 64-bit, then 32-bit, and then 16-bit, then finally 8-bit.
Primitive::Type type1 = field1->GetTypeAsPrimitiveType();
@@ -4600,11 +4630,8 @@
bool ClassLinker::LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_static,
size_t* class_size) {
self->AllowThreadSuspension();
- size_t num_fields =
- is_static ? klass->NumStaticFields() : klass->NumInstanceFields();
-
- mirror::ObjectArray<mirror::ArtField>* fields =
- is_static ? klass->GetSFields() : klass->GetIFields();
+ const size_t num_fields = is_static ? klass->NumStaticFields() : klass->NumInstanceFields();
+ ArtField* const fields = is_static ? klass->GetSFields() : klass->GetIFields();
// Initialize field_offset
MemberOffset field_offset(0);
@@ -4623,13 +4650,11 @@
// we want a relatively stable order so that adding new fields
// minimizes disruption of C++ version such as Class and Method.
- std::deque<mirror::ArtField*> grouped_and_sorted_fields;
+ std::deque<ArtField*> grouped_and_sorted_fields;
const char* old_no_suspend_cause = self->StartAssertNoThreadSuspension(
"Naked ArtField references in deque");
for (size_t i = 0; i < num_fields; i++) {
- mirror::ArtField* f = fields->Get(i);
- CHECK(f != nullptr) << PrettyClass(klass.Get());
- grouped_and_sorted_fields.push_back(f);
+ grouped_and_sorted_fields.push_back(&fields[i]);
}
std::sort(grouped_and_sorted_fields.begin(), grouped_and_sorted_fields.end(),
LinkFieldsComparator());
@@ -4640,7 +4665,7 @@
FieldGaps gaps;
for (; current_field < num_fields; current_field++) {
- mirror::ArtField* field = grouped_and_sorted_fields.front();
+ ArtField* field = grouped_and_sorted_fields.front();
Primitive::Type type = field->GetTypeAsPrimitiveType();
bool isPrimitive = type != Primitive::kPrimNot;
if (isPrimitive) {
@@ -4674,7 +4699,7 @@
// We know there are no non-reference fields in the Reference classes, and we know
// that 'referent' is alphabetically last, so this is easy...
CHECK_EQ(num_reference_fields, num_fields) << PrettyClass(klass.Get());
- CHECK_STREQ(fields->Get(num_fields - 1)->GetName(), "referent") << PrettyClass(klass.Get());
+ CHECK_STREQ(fields[num_fields - 1].GetName(), "referent") << PrettyClass(klass.Get());
--num_reference_fields;
}
@@ -4713,16 +4738,12 @@
sizeof(mirror::HeapReference<mirror::Object>));
MemberOffset current_ref_offset = start_ref_offset;
for (size_t i = 0; i < num_fields; i++) {
- mirror::ArtField* field = fields->Get(i);
- if ((false)) { // enable to debug field layout
- LOG(INFO) << "LinkFields: " << (is_static ? "static" : "instance")
- << " class=" << PrettyClass(klass.Get())
- << " field=" << PrettyField(field)
- << " offset="
- << field->GetField32(mirror::ArtField::OffsetOffset());
- }
+ ArtField* field = &fields[i];
+ VLOG(class_linker) << "LinkFields: " << (is_static ? "static" : "instance")
+ << " class=" << PrettyClass(klass.Get()) << " field=" << PrettyField(field) << " offset="
+ << field->GetOffset();
if (i != 0) {
- mirror::ArtField* prev_field = fields->Get(i - 1u);
+ ArtField* const prev_field = &fields[i - 1];
// NOTE: The field names can be the same. This is not possible in the Java language
// but it's valid Java/dex bytecode and for example proguard can generate such bytecode.
CHECK_LE(strcmp(prev_field->GetName(), field->GetName()), 0);
@@ -4994,12 +5015,11 @@
}
}
-mirror::ArtField* ClassLinker::ResolveField(const DexFile& dex_file, uint32_t field_idx,
- Handle<mirror::DexCache> dex_cache,
- Handle<mirror::ClassLoader> class_loader,
- bool is_static) {
+ArtField* ClassLinker::ResolveField(const DexFile& dex_file, uint32_t field_idx,
+ Handle<mirror::DexCache> dex_cache,
+ Handle<mirror::ClassLoader> class_loader, bool is_static) {
DCHECK(dex_cache.Get() != nullptr);
- mirror::ArtField* resolved = dex_cache->GetResolvedField(field_idx);
+ ArtField* resolved = dex_cache->GetResolvedField(field_idx, image_pointer_size_);
if (resolved != nullptr) {
return resolved;
}
@@ -5032,16 +5052,15 @@
return nullptr;
}
}
- dex_cache->SetResolvedField(field_idx, resolved);
+ dex_cache->SetResolvedField(field_idx, resolved, image_pointer_size_);
return resolved;
}
-mirror::ArtField* ClassLinker::ResolveFieldJLS(const DexFile& dex_file,
- uint32_t field_idx,
- Handle<mirror::DexCache> dex_cache,
- Handle<mirror::ClassLoader> class_loader) {
+ArtField* ClassLinker::ResolveFieldJLS(const DexFile& dex_file, uint32_t field_idx,
+ Handle<mirror::DexCache> dex_cache,
+ Handle<mirror::ClassLoader> class_loader) {
DCHECK(dex_cache.Get() != nullptr);
- mirror::ArtField* resolved = dex_cache->GetResolvedField(field_idx);
+ ArtField* resolved = dex_cache->GetResolvedField(field_idx, image_pointer_size_);
if (resolved != nullptr) {
return resolved;
}
@@ -5060,7 +5079,7 @@
dex_file.GetTypeId(field_id.type_idx_).descriptor_idx_));
resolved = mirror::Class::FindField(self, klass, name, type);
if (resolved != nullptr) {
- dex_cache->SetResolvedField(field_idx, resolved);
+ dex_cache->SetResolvedField(field_idx, resolved, image_pointer_size_);
} else {
ThrowNoSuchFieldError("", klass.Get(), type, name);
}
@@ -5190,12 +5209,10 @@
"Ljava/lang/String;",
"Ljava/lang/DexCache;",
"Ljava/lang/ref/Reference;",
- "Ljava/lang/reflect/ArtField;",
"Ljava/lang/reflect/ArtMethod;",
"Ljava/lang/reflect/Field;",
"Ljava/lang/reflect/Proxy;",
"[Ljava/lang/String;",
- "[Ljava/lang/reflect/ArtField;",
"[Ljava/lang/reflect/ArtMethod;",
"[Ljava/lang/reflect/Field;",
"Ljava/lang/ClassLoader;",
@@ -5310,12 +5327,12 @@
}
// For now, create a libcore-level DexFile for each ART DexFile. This "explodes" multidex.
- StackHandleScope<11> hs(self);
+ StackHandleScope<10> hs(self);
- Handle<mirror::ArtField> h_dex_elements_field =
- hs.NewHandle(soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements));
+ ArtField* dex_elements_field =
+ soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements);
- mirror::Class* dex_elements_class = h_dex_elements_field->GetType<true>();
+ mirror::Class* dex_elements_class = dex_elements_field->GetType<true>();
DCHECK(dex_elements_class != nullptr);
DCHECK(dex_elements_class->IsArrayClass());
Handle<mirror::ObjectArray<mirror::Object>> h_dex_elements(hs.NewHandle(
@@ -5323,14 +5340,12 @@
Handle<mirror::Class> h_dex_element_class =
hs.NewHandle(dex_elements_class->GetComponentType());
- Handle<mirror::ArtField> h_element_file_field =
- hs.NewHandle(
- soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile));
- DCHECK_EQ(h_dex_element_class.Get(), h_element_file_field->GetDeclaringClass());
+ ArtField* element_file_field =
+ soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
+ DCHECK_EQ(h_dex_element_class.Get(), element_file_field->GetDeclaringClass());
- Handle<mirror::ArtField> h_cookie_field =
- hs.NewHandle(soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie));
- DCHECK_EQ(h_cookie_field->GetDeclaringClass(), h_element_file_field->GetType<false>());
+ ArtField* cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie);
+ DCHECK_EQ(cookie_field->GetDeclaringClass(), element_file_field->GetType<false>());
// Fill the elements array.
int32_t index = 0;
@@ -5342,13 +5357,13 @@
h_long_array->Set(0, reinterpret_cast<intptr_t>(dex_file));
Handle<mirror::Object> h_dex_file = hs2.NewHandle(
- h_cookie_field->GetDeclaringClass()->AllocObject(self));
+ cookie_field->GetDeclaringClass()->AllocObject(self));
DCHECK(h_dex_file.Get() != nullptr);
- h_cookie_field->SetObject<false>(h_dex_file.Get(), h_long_array.Get());
+ cookie_field->SetObject<false>(h_dex_file.Get(), h_long_array.Get());
Handle<mirror::Object> h_element = hs2.NewHandle(h_dex_element_class->AllocObject(self));
DCHECK(h_element.Get() != nullptr);
- h_element_file_field->SetObject<false>(h_element.Get(), h_dex_file.Get());
+ element_file_field->SetObject<false>(h_element.Get(), h_dex_file.Get());
h_dex_elements->Set(index, h_element.Get());
index++;
@@ -5357,10 +5372,10 @@
// Create DexPathList.
Handle<mirror::Object> h_dex_path_list = hs.NewHandle(
- h_dex_elements_field->GetDeclaringClass()->AllocObject(self));
+ dex_elements_field->GetDeclaringClass()->AllocObject(self));
DCHECK(h_dex_path_list.Get() != nullptr);
// Set elements.
- h_dex_elements_field->SetObject<false>(h_dex_path_list.Get(), h_dex_elements.Get());
+ dex_elements_field->SetObject<false>(h_dex_path_list.Get(), h_dex_elements.Get());
// Create PathClassLoader.
Handle<mirror::Class> h_path_class_class = hs.NewHandle(
@@ -5369,20 +5384,20 @@
h_path_class_class->AllocObject(self));
DCHECK(h_path_class_loader.Get() != nullptr);
// Set DexPathList.
- Handle<mirror::ArtField> h_path_list_field = hs.NewHandle(
- soa.DecodeField(WellKnownClasses::dalvik_system_PathClassLoader_pathList));
- DCHECK(h_path_list_field.Get() != nullptr);
- h_path_list_field->SetObject<false>(h_path_class_loader.Get(), h_dex_path_list.Get());
+ ArtField* path_list_field =
+ soa.DecodeField(WellKnownClasses::dalvik_system_PathClassLoader_pathList);
+ DCHECK(path_list_field != nullptr);
+ path_list_field->SetObject<false>(h_path_class_loader.Get(), h_dex_path_list.Get());
// Make a pretend boot-classpath.
// TODO: Should we scan the image?
- Handle<mirror::ArtField> h_parent_field = hs.NewHandle(
+ ArtField* const parent_field =
mirror::Class::FindField(self, hs.NewHandle(h_path_class_loader->GetClass()), "parent",
- "Ljava/lang/ClassLoader;"));
- DCHECK(h_parent_field.Get() != nullptr);
+ "Ljava/lang/ClassLoader;");
+ DCHECK(parent_field!= nullptr);
mirror::Object* boot_cl =
soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_BootClassLoader)->AllocObject(self);
- h_parent_field->SetObject<false>(h_path_class_loader.Get(), boot_cl);
+ parent_field->SetObject<false>(h_path_class_loader.Get(), boot_cl);
// Make it a global ref and return.
ScopedLocalRef<jobject> local_ref(
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 577fec2..68624b0 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -70,12 +70,10 @@
kJavaLangString,
kJavaLangDexCache,
kJavaLangRefReference,
- kJavaLangReflectArtField,
kJavaLangReflectArtMethod,
kJavaLangReflectField,
kJavaLangReflectProxy,
kJavaLangStringArrayClass,
- kJavaLangReflectArtFieldArrayClass,
kJavaLangReflectArtMethodArrayClass,
kJavaLangReflectFieldArrayClass,
kJavaLangClassLoader,
@@ -119,11 +117,15 @@
Handle<mirror::ClassLoader> class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Find a class in the path class loader, loading it if necessary without using JNI. Hash
- // function is supposed to be ComputeModifiedUtf8Hash(descriptor).
- mirror::Class* FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
- Thread* self, const char* descriptor, size_t hash,
- Handle<mirror::ClassLoader> class_loader)
+ // Finds a class in the path class loader, loading it if necessary without using JNI. Hash
+ // function is supposed to be ComputeModifiedUtf8Hash(descriptor). Returns true if the
+ // class-loader chain could be handled, false otherwise, i.e., a non-supported class-loader
+ // was encountered while walking the parent chain (currently only BootClassLoader and
+ // PathClassLoader are supported).
+ bool FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
+ Thread* self, const char* descriptor, size_t hash,
+ Handle<mirror::ClassLoader> class_loader,
+ mirror::Class** result)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Finds a class by its descriptor using the "system" class loader, ie by searching the
@@ -201,7 +203,7 @@
mirror::Class* ResolveType(uint16_t type_idx, mirror::ArtMethod* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::Class* ResolveType(uint16_t type_idx, mirror::ArtField* referrer)
+ mirror::Class* ResolveType(uint16_t type_idx, ArtField* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolve a type with the given ID from the DexFile, storing the
@@ -232,10 +234,11 @@
InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtField* GetResolvedField(uint32_t field_idx, mirror::Class* field_declaring_class)
+ ArtField* GetResolvedField(uint32_t field_idx, mirror::Class* field_declaring_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtField* ResolveField(uint32_t field_idx, mirror::ArtMethod* referrer,
- bool is_static)
+ ArtField* GetResolvedField(uint32_t field_idx, mirror::DexCache* dex_cache)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtField* ResolveField(uint32_t field_idx, mirror::ArtMethod* referrer, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolve a field with a given ID from the DexFile, storing the
@@ -243,7 +246,7 @@
// in ResolveType. What is unique is the is_static argument which is
// used to determine if we are resolving a static or non-static
// field.
- mirror::ArtField* ResolveField(const DexFile& dex_file,
+ ArtField* ResolveField(const DexFile& dex_file,
uint32_t field_idx,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
@@ -254,7 +257,7 @@
// result in DexCache. The ClassLinker and ClassLoader are used as
// in ResolveType. No is_static argument is provided so that Java
// field resolution semantics are followed.
- mirror::ArtField* ResolveFieldJLS(const DexFile& dex_file, uint32_t field_idx,
+ ArtField* ResolveFieldJLS(const DexFile& dex_file, uint32_t field_idx,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -354,7 +357,7 @@
mirror::IfTable* AllocIfTable(Thread* self, size_t ifcount)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ObjectArray<mirror::ArtField>* AllocArtFieldArray(Thread* self, size_t length)
+ ArtField* AllocArtFieldArray(Thread* self, size_t length)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::StackTraceElement>* AllocStackTraceElementArray(Thread* self,
@@ -485,7 +488,6 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::DexCache* AllocDexCache(Thread* self, const DexFile& dex_file)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtField* AllocArtField(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Class* CreatePrimitiveClass(Thread* self, Primitive::Type type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -507,15 +509,21 @@
uint32_t SizeOfClassWithoutEmbeddedTables(const DexFile& dex_file,
const DexFile::ClassDef& dex_class_def);
+ // Setup the classloader, class def index, type idx so that we can insert this class in the class
+ // table.
+ void SetupClass(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def,
+ Handle<mirror::Class> klass, mirror::ClassLoader* class_loader)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
void LoadClass(Thread* self, const DexFile& dex_file, const DexFile::ClassDef& dex_class_def,
- Handle<mirror::Class> klass, mirror::ClassLoader* class_loader)
+ Handle<mirror::Class> klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void LoadClassMembers(Thread* self, const DexFile& dex_file, const uint8_t* class_data,
Handle<mirror::Class> klass, const OatFile::OatClass* oat_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void LoadField(const DexFile& dex_file, const ClassDataItemIterator& it,
- Handle<mirror::Class> klass, Handle<mirror::ArtField> dst)
+ void LoadField(const ClassDataItemIterator& it, Handle<mirror::Class> klass,
+ ArtField* dst)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::ArtMethod* LoadMethod(Thread* self, const DexFile& dex_file,
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 3f6c5a0..a31a785 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -19,14 +19,13 @@
#include <memory>
#include <string>
+#include "art_field-inl.h"
#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "dex_file.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "gc/heap.h"
#include "mirror/accessible_object.h"
-#include "mirror/art_field-inl.h"
-#include "mirror/art_method.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
@@ -173,10 +172,9 @@
method->GetDeclaringClass()->GetDexCache()->GetResolvedTypes()));
}
- void AssertField(mirror::Class* klass, mirror::ArtField* field)
+ void AssertField(mirror::Class* klass, ArtField* field)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
EXPECT_TRUE(field != nullptr);
- EXPECT_TRUE(field->GetClass() != nullptr);
EXPECT_EQ(klass, field->GetDeclaringClass());
EXPECT_TRUE(field->GetName() != nullptr);
EXPECT_TRUE(field->GetType<true>() != nullptr);
@@ -262,30 +260,27 @@
}
for (size_t i = 0; i < klass->NumInstanceFields(); i++) {
- mirror::ArtField* field = klass->GetInstanceField(i);
+ ArtField* field = klass->GetInstanceField(i);
AssertField(klass.Get(), field);
EXPECT_FALSE(field->IsStatic());
}
for (size_t i = 0; i < klass->NumStaticFields(); i++) {
- mirror::ArtField* field = klass->GetStaticField(i);
+ ArtField* field = klass->GetStaticField(i);
AssertField(klass.Get(), field);
EXPECT_TRUE(field->IsStatic());
}
// Confirm that all instances field offsets are packed together at the start.
EXPECT_GE(klass->NumInstanceFields(), klass->NumReferenceInstanceFields());
- StackHandleScope<1> hs(Thread::Current());
- MutableHandle<mirror::ArtField> fhandle = hs.NewHandle<mirror::ArtField>(nullptr);
MemberOffset start_ref_offset = klass->GetFirstReferenceInstanceFieldOffset();
MemberOffset end_ref_offset(start_ref_offset.Uint32Value() +
klass->NumReferenceInstanceFields() *
sizeof(mirror::HeapReference<mirror::Object>));
MemberOffset current_ref_offset = start_ref_offset;
for (size_t i = 0; i < klass->NumInstanceFields(); i++) {
- mirror::ArtField* field = klass->GetInstanceField(i);
- fhandle.Assign(field);
- mirror::Class* field_type = fhandle->GetType<true>();
+ ArtField* field = klass->GetInstanceField(i);
+ mirror::Class* field_type = field->GetType<true>();
ASSERT_TRUE(field_type != nullptr);
if (!field->IsPrimitiveType()) {
ASSERT_TRUE(!field_type->IsPrimitive());
@@ -293,7 +288,7 @@
if (current_ref_offset.Uint32Value() == end_ref_offset.Uint32Value()) {
// While Reference.referent is not primitive, the ClassLinker
// treats it as such so that the garbage collector won't scan it.
- EXPECT_EQ(PrettyField(fhandle.Get()),
+ EXPECT_EQ(PrettyField(field),
"java.lang.Object java.lang.ref.Reference.referent");
} else {
current_ref_offset = MemberOffset(current_ref_offset.Uint32Value() +
@@ -425,7 +420,7 @@
}
for (size_t i = 0; i < offsets.size(); i++) {
- mirror::ArtField* field = is_static ? klass->GetStaticField(i) : klass->GetInstanceField(i);
+ ArtField* field = is_static ? klass->GetStaticField(i) : klass->GetInstanceField(i);
StringPiece field_name(field->GetName());
if (field_name != offsets[i].java_name) {
error = true;
@@ -434,7 +429,7 @@
if (error) {
for (size_t i = 0; i < offsets.size(); i++) {
CheckOffset& offset = offsets[i];
- mirror::ArtField* field = is_static ? klass->GetStaticField(i) : klass->GetInstanceField(i);
+ ArtField* field = is_static ? klass->GetStaticField(i) : klass->GetInstanceField(i);
StringPiece field_name(field->GetName());
if (field_name != offsets[i].java_name) {
LOG(ERROR) << "JAVA FIELD ORDER MISMATCH NEXT LINE:";
@@ -448,7 +443,7 @@
for (size_t i = 0; i < offsets.size(); i++) {
CheckOffset& offset = offsets[i];
- mirror::ArtField* field = is_static ? klass->GetStaticField(i) : klass->GetInstanceField(i);
+ ArtField* field = is_static ? klass->GetStaticField(i) : klass->GetInstanceField(i);
if (field->GetOffset().Uint32Value() != offset.cpp_offset) {
error = true;
}
@@ -456,7 +451,7 @@
if (error) {
for (size_t i = 0; i < offsets.size(); i++) {
CheckOffset& offset = offsets[i];
- mirror::ArtField* field = is_static ? klass->GetStaticField(i) : klass->GetInstanceField(i);
+ ArtField* field = is_static ? klass->GetStaticField(i) : klass->GetInstanceField(i);
if (field->GetOffset().Uint32Value() != offset.cpp_offset) {
LOG(ERROR) << "OFFSET MISMATCH NEXT LINE:";
}
@@ -486,15 +481,6 @@
};
};
-struct ArtFieldOffsets : public CheckOffsets<mirror::ArtField> {
- ArtFieldOffsets() : CheckOffsets<mirror::ArtField>(false, "Ljava/lang/reflect/ArtField;") {
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtField, access_flags_), "accessFlags"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtField, declaring_class_), "declaringClass"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtField, field_dex_idx_), "fieldDexIndex"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtField, offset_), "offset"));
- };
-};
-
struct ArtMethodOffsets : public CheckOffsets<mirror::ArtMethod> {
ArtMethodOffsets() : CheckOffsets<mirror::ArtMethod>(false, "Ljava/lang/reflect/ArtMethod;") {
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, access_flags_), "accessFlags"));
@@ -522,8 +508,10 @@
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, ifields_), "iFields"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, iftable_), "ifTable"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, name_), "name"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, num_instance_fields_), "numInstanceFields"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, num_reference_instance_fields_), "numReferenceInstanceFields"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, num_reference_static_fields_), "numReferenceStaticFields"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, num_static_fields_), "numStaticFields"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, object_size_), "objectSize"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, primitive_type_), "primitiveType"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, reference_instance_offsets_), "referenceInstanceOffsets"));
@@ -629,7 +617,6 @@
TEST_F(ClassLinkerTest, ValidateFieldOrderOfJavaCppUnionClasses) {
ScopedObjectAccess soa(Thread::Current());
EXPECT_TRUE(ObjectOffsets().Check());
- EXPECT_TRUE(ArtFieldOffsets().Check());
EXPECT_TRUE(ArtMethodOffsets().Check());
EXPECT_TRUE(ClassOffsets().Check());
EXPECT_TRUE(StringOffsets().Check());
@@ -844,21 +831,21 @@
NullHandle<mirror::ClassLoader> class_loader;
mirror::Class* c;
c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Boolean;", class_loader);
- EXPECT_STREQ("value", c->GetIFields()->Get(0)->GetName());
+ EXPECT_STREQ("value", c->GetIFields()[0].GetName());
c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Byte;", class_loader);
- EXPECT_STREQ("value", c->GetIFields()->Get(0)->GetName());
+ EXPECT_STREQ("value", c->GetIFields()[0].GetName());
c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Character;", class_loader);
- EXPECT_STREQ("value", c->GetIFields()->Get(0)->GetName());
+ EXPECT_STREQ("value", c->GetIFields()[0].GetName());
c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Double;", class_loader);
- EXPECT_STREQ("value", c->GetIFields()->Get(0)->GetName());
+ EXPECT_STREQ("value", c->GetIFields()[0].GetName());
c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Float;", class_loader);
- EXPECT_STREQ("value", c->GetIFields()->Get(0)->GetName());
+ EXPECT_STREQ("value", c->GetIFields()[0].GetName());
c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Integer;", class_loader);
- EXPECT_STREQ("value", c->GetIFields()->Get(0)->GetName());
+ EXPECT_STREQ("value", c->GetIFields()[0].GetName());
c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Long;", class_loader);
- EXPECT_STREQ("value", c->GetIFields()->Get(0)->GetName());
+ EXPECT_STREQ("value", c->GetIFields()[0].GetName());
c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Short;", class_loader);
- EXPECT_STREQ("value", c->GetIFields()->Get(0)->GetName());
+ EXPECT_STREQ("value", c->GetIFields()[0].GetName());
}
TEST_F(ClassLinkerTest, TwoClassLoadersOneClass) {
@@ -892,49 +879,47 @@
EXPECT_EQ(9U, statics->NumStaticFields());
- mirror::ArtField* s0 = mirror::Class::FindStaticField(soa.Self(), statics, "s0", "Z");
- std::string temp;
- EXPECT_STREQ(s0->GetClass()->GetDescriptor(&temp), "Ljava/lang/reflect/ArtField;");
+ ArtField* s0 = mirror::Class::FindStaticField(soa.Self(), statics, "s0", "Z");
EXPECT_EQ(s0->GetTypeAsPrimitiveType(), Primitive::kPrimBoolean);
EXPECT_EQ(true, s0->GetBoolean(statics.Get()));
s0->SetBoolean<false>(statics.Get(), false);
- mirror::ArtField* s1 = mirror::Class::FindStaticField(soa.Self(), statics, "s1", "B");
+ ArtField* s1 = mirror::Class::FindStaticField(soa.Self(), statics, "s1", "B");
EXPECT_EQ(s1->GetTypeAsPrimitiveType(), Primitive::kPrimByte);
EXPECT_EQ(5, s1->GetByte(statics.Get()));
s1->SetByte<false>(statics.Get(), 6);
- mirror::ArtField* s2 = mirror::Class::FindStaticField(soa.Self(), statics, "s2", "C");
+ ArtField* s2 = mirror::Class::FindStaticField(soa.Self(), statics, "s2", "C");
EXPECT_EQ(s2->GetTypeAsPrimitiveType(), Primitive::kPrimChar);
EXPECT_EQ('a', s2->GetChar(statics.Get()));
s2->SetChar<false>(statics.Get(), 'b');
- mirror::ArtField* s3 = mirror::Class::FindStaticField(soa.Self(), statics, "s3", "S");
+ ArtField* s3 = mirror::Class::FindStaticField(soa.Self(), statics, "s3", "S");
EXPECT_EQ(s3->GetTypeAsPrimitiveType(), Primitive::kPrimShort);
EXPECT_EQ(-536, s3->GetShort(statics.Get()));
s3->SetShort<false>(statics.Get(), -535);
- mirror::ArtField* s4 = mirror::Class::FindStaticField(soa.Self(), statics, "s4", "I");
+ ArtField* s4 = mirror::Class::FindStaticField(soa.Self(), statics, "s4", "I");
EXPECT_EQ(s4->GetTypeAsPrimitiveType(), Primitive::kPrimInt);
EXPECT_EQ(2000000000, s4->GetInt(statics.Get()));
s4->SetInt<false>(statics.Get(), 2000000001);
- mirror::ArtField* s5 = mirror::Class::FindStaticField(soa.Self(), statics, "s5", "J");
+ ArtField* s5 = mirror::Class::FindStaticField(soa.Self(), statics, "s5", "J");
EXPECT_EQ(s5->GetTypeAsPrimitiveType(), Primitive::kPrimLong);
EXPECT_EQ(0x1234567890abcdefLL, s5->GetLong(statics.Get()));
s5->SetLong<false>(statics.Get(), INT64_C(0x34567890abcdef12));
- mirror::ArtField* s6 = mirror::Class::FindStaticField(soa.Self(), statics, "s6", "F");
+ ArtField* s6 = mirror::Class::FindStaticField(soa.Self(), statics, "s6", "F");
EXPECT_EQ(s6->GetTypeAsPrimitiveType(), Primitive::kPrimFloat);
EXPECT_DOUBLE_EQ(0.5, s6->GetFloat(statics.Get()));
s6->SetFloat<false>(statics.Get(), 0.75);
- mirror::ArtField* s7 = mirror::Class::FindStaticField(soa.Self(), statics, "s7", "D");
+ ArtField* s7 = mirror::Class::FindStaticField(soa.Self(), statics, "s7", "D");
EXPECT_EQ(s7->GetTypeAsPrimitiveType(), Primitive::kPrimDouble);
EXPECT_DOUBLE_EQ(16777217.0, s7->GetDouble(statics.Get()));
s7->SetDouble<false>(statics.Get(), 16777219);
- mirror::ArtField* s8 = mirror::Class::FindStaticField(soa.Self(), statics, "s8",
+ ArtField* s8 = mirror::Class::FindStaticField(soa.Self(), statics, "s8",
"Ljava/lang/String;");
EXPECT_EQ(s8->GetTypeAsPrimitiveType(), Primitive::kPrimNot);
EXPECT_TRUE(s8->GetObject(statics.Get())->AsString()->Equals("android"));
@@ -1006,13 +991,13 @@
EXPECT_EQ(Aj1, A->FindVirtualMethodForVirtualOrInterface(Jj1));
EXPECT_EQ(Aj2, A->FindVirtualMethodForVirtualOrInterface(Jj2));
- mirror::ArtField* Afoo = mirror::Class::FindStaticField(soa.Self(), A, "foo",
+ ArtField* Afoo = mirror::Class::FindStaticField(soa.Self(), A, "foo",
"Ljava/lang/String;");
- mirror::ArtField* Bfoo = mirror::Class::FindStaticField(soa.Self(), B, "foo",
+ ArtField* Bfoo = mirror::Class::FindStaticField(soa.Self(), B, "foo",
"Ljava/lang/String;");
- mirror::ArtField* Jfoo = mirror::Class::FindStaticField(soa.Self(), J, "foo",
+ ArtField* Jfoo = mirror::Class::FindStaticField(soa.Self(), J, "foo",
"Ljava/lang/String;");
- mirror::ArtField* Kfoo = mirror::Class::FindStaticField(soa.Self(), K, "foo",
+ ArtField* Kfoo = mirror::Class::FindStaticField(soa.Self(), K, "foo",
"Ljava/lang/String;");
ASSERT_TRUE(Afoo != nullptr);
EXPECT_EQ(Afoo, Bfoo);
@@ -1110,9 +1095,6 @@
c = class_linker_->FindClass(soa.Self(), "Ljava/lang/DexCache;", class_loader);
EXPECT_EQ(c->GetClassSize(), mirror::DexCache::ClassSize());
- c = class_linker_->FindClass(soa.Self(), "Ljava/lang/reflect/ArtField;", class_loader);
- EXPECT_EQ(c->GetClassSize(), mirror::ArtField::ClassSize());
-
c = class_linker_->FindClass(soa.Self(), "Ljava/lang/reflect/ArtMethod;", class_loader);
EXPECT_EQ(c->GetClassSize(), mirror::ArtMethod::ClassSize());
}
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index d400010..60b7fa2 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -398,7 +398,7 @@
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<4> hs(Thread::Current());
+ StackHandleScope<2> hs(soa.Self());
Handle<mirror::ClassLoader> class_loader = hs.NewHandle(
soa.Decode<mirror::ClassLoader*>(jclass_loader));
@@ -409,16 +409,13 @@
// The class loader is a PathClassLoader which inherits from BaseDexClassLoader.
// We need to get the DexPathList and loop through it.
- Handle<mirror::ArtField> cookie_field =
- hs.NewHandle(soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie));
- Handle<mirror::ArtField> dex_file_field =
- hs.NewHandle(
- soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile));
+ ArtField* cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie);
+ ArtField* dex_file_field =
+ soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
mirror::Object* dex_path_list =
soa.DecodeField(WellKnownClasses::dalvik_system_PathClassLoader_pathList)->
GetObject(class_loader.Get());
- if (dex_path_list != nullptr && dex_file_field.Get() != nullptr &&
- cookie_field.Get() != nullptr) {
+ if (dex_path_list != nullptr && dex_file_field!= nullptr && cookie_field != nullptr) {
// DexPathList has an array dexElements of Elements[] which each contain a dex file.
mirror::Object* dex_elements_obj =
soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 36de221..407746f 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -18,6 +18,7 @@
#include <sstream>
+#include "art_field-inl.h"
#include "base/logging.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
@@ -160,7 +161,7 @@
ThrowException("Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str());
}
-void ThrowIllegalAccessErrorField(mirror::Class* referrer, mirror::ArtField* accessed) {
+void ThrowIllegalAccessErrorField(mirror::Class* referrer, ArtField* accessed) {
std::ostringstream msg;
msg << "Field '" << PrettyField(accessed, false) << "' is inaccessible to class '"
<< PrettyDescriptor(referrer) << "'";
@@ -168,7 +169,7 @@
}
void ThrowIllegalAccessErrorFinalField(mirror::ArtMethod* referrer,
- mirror::ArtField* accessed) {
+ ArtField* accessed) {
std::ostringstream msg;
msg << "Final field '" << PrettyField(accessed, false) << "' cannot be written to by method '"
<< PrettyMethod(referrer) << "'";
@@ -226,7 +227,7 @@
msg.str().c_str());
}
-void ThrowIncompatibleClassChangeErrorField(mirror::ArtField* resolved_field, bool is_static,
+void ThrowIncompatibleClassChangeErrorField(ArtField* resolved_field, bool is_static,
mirror::ArtMethod* referrer) {
std::ostringstream msg;
msg << "Expected '" << PrettyField(resolved_field) << "' to be a "
@@ -314,7 +315,7 @@
// NullPointerException
-void ThrowNullPointerExceptionForFieldAccess(mirror::ArtField* field, bool is_read) {
+void ThrowNullPointerExceptionForFieldAccess(ArtField* field, bool is_read) {
std::ostringstream msg;
msg << "Attempt to " << (is_read ? "read from" : "write to")
<< " field '" << PrettyField(field, true) << "' on a null object reference";
@@ -394,7 +395,7 @@
case Instruction::IGET_BYTE:
case Instruction::IGET_CHAR:
case Instruction::IGET_SHORT: {
- mirror::ArtField* field =
+ ArtField* field =
Runtime::Current()->GetClassLinker()->ResolveField(instr->VRegC_22c(), method, false);
ThrowNullPointerExceptionForFieldAccess(field, true /* read */);
break;
@@ -408,7 +409,7 @@
case Instruction::IGET_OBJECT_QUICK: {
// Since we replaced the field index, we ask the verifier to tell us which
// field is accessed at this location.
- mirror::ArtField* field =
+ ArtField* field =
verifier::MethodVerifier::FindAccessedFieldAtDexPc(method, throw_dex_pc);
if (field != NULL) {
// NPE with precise message.
@@ -426,7 +427,7 @@
case Instruction::IPUT_BYTE:
case Instruction::IPUT_CHAR:
case Instruction::IPUT_SHORT: {
- mirror::ArtField* field =
+ ArtField* field =
Runtime::Current()->GetClassLinker()->ResolveField(instr->VRegC_22c(), method, false);
ThrowNullPointerExceptionForFieldAccess(field, false /* write */);
break;
@@ -440,7 +441,7 @@
case Instruction::IPUT_OBJECT_QUICK: {
// Since we replaced the field index, we ask the verifier to tell us which
// field is accessed at this location.
- mirror::ArtField* field =
+ ArtField* field =
verifier::MethodVerifier::FindAccessedFieldAtDexPc(method, throw_dex_pc);
if (field != NULL) {
// NPE with precise message.
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index 9e749e3..df95cf9 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -22,11 +22,11 @@
namespace art {
namespace mirror {
- class ArtField;
class ArtMethod;
class Class;
class Object;
} // namespace mirror
+class ArtField;
class Signature;
class StringPiece;
@@ -81,10 +81,10 @@
void ThrowIllegalAccessErrorMethod(mirror::Class* referrer, mirror::ArtMethod* accessed)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowIllegalAccessErrorField(mirror::Class* referrer, mirror::ArtField* accessed)
+void ThrowIllegalAccessErrorField(mirror::Class* referrer, ArtField* accessed)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowIllegalAccessErrorFinalField(mirror::ArtMethod* referrer, mirror::ArtField* accessed)
+void ThrowIllegalAccessErrorFinalField(mirror::ArtMethod* referrer, ArtField* accessed)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIllegalAccessError(mirror::Class* referrer, const char* fmt, ...)
@@ -112,7 +112,7 @@
mirror::ArtMethod* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowIncompatibleClassChangeErrorField(mirror::ArtField* resolved_field, bool is_static,
+void ThrowIncompatibleClassChangeErrorField(ArtField* resolved_field, bool is_static,
mirror::ArtMethod* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
@@ -160,7 +160,7 @@
// NullPointerException
-void ThrowNullPointerExceptionForFieldAccess(mirror::ArtField* field,
+void ThrowNullPointerExceptionForFieldAccess(ArtField* field,
bool is_read)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index a909a1a..c074b54 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -21,6 +21,7 @@
#include <set>
#include "arch/context.h"
+#include "art_field-inl.h"
#include "class_linker.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
@@ -30,7 +31,6 @@
#include "gc/space/space-inl.h"
#include "handle_scope.h"
#include "jdwp/object_registry.h"
-#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class.h"
#include "mirror/class-inl.h"
@@ -266,14 +266,14 @@
}
void FieldRead(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
- uint32_t dex_pc, mirror::ArtField* field)
+ uint32_t dex_pc, ArtField* field)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
UNUSED(thread);
Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
}
void FieldWritten(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field,
+ mirror::ArtMethod* method, uint32_t dex_pc, ArtField* field,
const JValue& field_value)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
@@ -415,9 +415,8 @@
static Thread* DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id,
JDWP::JdwpError* error)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_) {
mirror::Object* thread_peer = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_id, error);
if (thread_peer == nullptr) {
// This isn't even an object.
@@ -432,6 +431,7 @@
return nullptr;
}
+ MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Thread* thread = Thread::FromManagedThread(soa, thread_peer);
// If thread is null then this a java.lang.Thread without a Thread*. Must be a un-started or a
// zombie.
@@ -856,17 +856,13 @@
};
ScopedObjectAccessUnchecked soa(Thread::Current());
- Thread* thread;
- {
- MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
- JDWP::JdwpError error;
- thread = DecodeThread(soa, thread_id, &error);
- if (thread == nullptr) {
- return error;
- }
- if (!IsSuspendedForDebugger(soa, thread)) {
- return JDWP::ERR_THREAD_NOT_SUSPENDED;
- }
+ JDWP::JdwpError error;
+ Thread* thread = DecodeThread(soa, thread_id, &error);
+ if (thread == nullptr) {
+ return error;
+ }
+ if (!IsSuspendedForDebugger(soa, thread)) {
+ return JDWP::ERR_THREAD_NOT_SUSPENDED;
}
std::unique_ptr<Context> context(Context::Create());
OwnedMonitorVisitor visitor(thread, context.get(), monitors, stack_depths);
@@ -876,21 +872,17 @@
JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id,
JDWP::ObjectId* contended_monitor) {
- mirror::Object* contended_monitor_obj;
ScopedObjectAccessUnchecked soa(Thread::Current());
*contended_monitor = 0;
- {
- MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
- JDWP::JdwpError error;
- Thread* thread = DecodeThread(soa, thread_id, &error);
- if (thread == nullptr) {
- return error;
- }
- if (!IsSuspendedForDebugger(soa, thread)) {
- return JDWP::ERR_THREAD_NOT_SUSPENDED;
- }
- contended_monitor_obj = Monitor::GetContendedMonitor(thread);
+ JDWP::JdwpError error;
+ Thread* thread = DecodeThread(soa, thread_id, &error);
+ if (thread == nullptr) {
+ return error;
}
+ if (!IsSuspendedForDebugger(soa, thread)) {
+ return JDWP::ERR_THREAD_NOT_SUSPENDED;
+ }
+ mirror::Object* contended_monitor_obj = Monitor::GetContendedMonitor(thread);
// Add() requires the thread_list_lock_ not held to avoid the lock
// level violation.
*contended_monitor = gRegistry->Add(contended_monitor_obj);
@@ -1336,8 +1328,7 @@
return JDWP::ERR_NONE;
}
-JDWP::FieldId Dbg::ToFieldId(const mirror::ArtField* f) {
- CHECK(!kMovingFields);
+JDWP::FieldId Dbg::ToFieldId(const ArtField* f) {
return static_cast<JDWP::FieldId>(reinterpret_cast<uintptr_t>(f));
}
@@ -1347,10 +1338,9 @@
return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(m));
}
-static mirror::ArtField* FromFieldId(JDWP::FieldId fid)
+static ArtField* FromFieldId(JDWP::FieldId fid)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CHECK(!kMovingFields);
- return reinterpret_cast<mirror::ArtField*>(static_cast<uintptr_t>(fid));
+ return reinterpret_cast<ArtField*>(static_cast<uintptr_t>(fid));
}
static mirror::ArtMethod* FromMethodId(JDWP::MethodId mid)
@@ -1387,8 +1377,8 @@
}
bool Dbg::MatchField(JDWP::RefTypeId expected_type_id, JDWP::FieldId expected_field_id,
- mirror::ArtField* event_field) {
- mirror::ArtField* expected_field = FromFieldId(expected_field_id);
+ ArtField* event_field) {
+ ArtField* expected_field = FromFieldId(expected_field_id);
if (expected_field != event_field) {
return false;
}
@@ -1402,7 +1392,9 @@
}
void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, mirror::ArtMethod* m, uint32_t dex_pc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_,
+ Locks::thread_suspend_count_lock_) {
if (m == nullptr) {
memset(location, 0, sizeof(*location));
} else {
@@ -1423,7 +1415,7 @@
}
std::string Dbg::GetFieldName(JDWP::FieldId field_id) {
- mirror::ArtField* f = FromFieldId(field_id);
+ ArtField* f = FromFieldId(field_id);
if (f == nullptr) {
return "NULL";
}
@@ -1510,7 +1502,7 @@
expandBufAdd4BE(pReply, instance_field_count + static_field_count);
for (size_t i = 0; i < instance_field_count + static_field_count; ++i) {
- mirror::ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) : c->GetStaticField(i - instance_field_count);
+ ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) : c->GetStaticField(i - instance_field_count);
expandBufAddFieldId(pReply, ToFieldId(f));
expandBufAddUtf8String(pReply, f->GetName());
expandBufAddUtf8String(pReply, f->GetTypeDescriptor());
@@ -1680,7 +1672,7 @@
void Dbg::OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value,
JDWP::ExpandBuf* pReply) {
- mirror::ArtField* f = FromFieldId(field_id);
+ ArtField* f = FromFieldId(field_id);
JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
OutputJValue(tag, field_value, pReply);
}
@@ -1723,7 +1715,7 @@
if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) {
return JDWP::ERR_INVALID_OBJECT;
}
- mirror::ArtField* f = FromFieldId(field_id);
+ ArtField* f = FromFieldId(field_id);
mirror::Class* receiver_class = c;
if (receiver_class == nullptr && o != nullptr) {
@@ -1785,7 +1777,7 @@
if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) {
return JDWP::ERR_INVALID_OBJECT;
}
- mirror::ArtField* f = FromFieldId(field_id);
+ ArtField* f = FromFieldId(field_id);
// The RI only enforces the static/non-static mismatch in one direction.
// TODO: should we change the tests and check both?
@@ -1822,11 +1814,10 @@
if (v != nullptr) {
mirror::Class* field_type;
{
- StackHandleScope<3> hs(Thread::Current());
+ StackHandleScope<2> hs(Thread::Current());
HandleWrapper<mirror::Object> h_v(hs.NewHandleWrapper(&v));
- HandleWrapper<mirror::ArtField> h_f(hs.NewHandleWrapper(&f));
HandleWrapper<mirror::Object> h_o(hs.NewHandleWrapper(&o));
- field_type = h_f->GetType<true>();
+ field_type = f->GetType<true>();
}
if (!field_type->IsAssignableFrom(v->GetClass())) {
return JDWP::ERR_INVALID_OBJECT;
@@ -1893,7 +1884,6 @@
JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string* name) {
ScopedObjectAccessUnchecked soa(Thread::Current());
- MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
JDWP::JdwpError error;
Thread* thread = DecodeThread(soa, thread_id, &error);
UNUSED(thread);
@@ -1904,7 +1894,7 @@
// We still need to report the zombie threads' names, so we can't just call Thread::GetThreadName.
mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id, &error);
CHECK(thread_object != nullptr) << error;
- mirror::ArtField* java_lang_Thread_name_field =
+ ArtField* java_lang_Thread_name_field =
soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
mirror::String* s =
reinterpret_cast<mirror::String*>(java_lang_Thread_name_field->GetObject(thread_object));
@@ -1923,11 +1913,8 @@
}
ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroup");
// Okay, so it's an object, but is it actually a thread?
- {
- MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
- Thread* thread = DecodeThread(soa, thread_id, &error);
- UNUSED(thread);
- }
+ Thread* thread = DecodeThread(soa, thread_id, &error);
+ UNUSED(thread);
if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
// Zombie threads are in the null group.
expandBufAddObjectId(pReply, JDWP::ObjectId(0));
@@ -1935,7 +1922,7 @@
} else if (error == JDWP::ERR_NONE) {
mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
CHECK(c != nullptr);
- mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
+ ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
CHECK(f != nullptr);
mirror::Object* group = f->GetObject(thread_object);
CHECK(group != nullptr);
@@ -1976,7 +1963,7 @@
return error;
}
ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroupName");
- mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
+ ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
CHECK(f != nullptr);
mirror::String* s = reinterpret_cast<mirror::String*>(f->GetObject(thread_group));
@@ -1995,7 +1982,7 @@
mirror::Object* parent;
{
ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroupParent");
- mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_parent);
+ ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_parent);
CHECK(f != nullptr);
parent = f->GetObject(thread_group);
}
@@ -2010,7 +1997,7 @@
CHECK(thread_group != nullptr);
// Get the ArrayList<ThreadGroup> "groups" out of this thread group...
- mirror::ArtField* groups_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_groups);
+ ArtField* groups_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_groups);
mirror::Object* groups_array_list = groups_field->GetObject(thread_group);
{
// The "groups" field is declared as a java.util.List: check it really is
@@ -2022,8 +2009,8 @@
}
// Get the array and size out of the ArrayList<ThreadGroup>...
- mirror::ArtField* array_field = soa.DecodeField(WellKnownClasses::java_util_ArrayList_array);
- mirror::ArtField* size_field = soa.DecodeField(WellKnownClasses::java_util_ArrayList_size);
+ ArtField* array_field = soa.DecodeField(WellKnownClasses::java_util_ArrayList_array);
+ ArtField* size_field = soa.DecodeField(WellKnownClasses::java_util_ArrayList_size);
mirror::ObjectArray<mirror::Object>* groups_array =
array_field->GetObject(groups_array_list)->AsObjectArray<mirror::Object>();
const int32_t size = size_field->GetInt(groups_array_list);
@@ -2069,7 +2056,7 @@
JDWP::ObjectId Dbg::GetSystemThreadGroupId() {
ScopedObjectAccessUnchecked soa(Thread::Current());
- mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
+ ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
mirror::Object* group = f->GetObject(f->GetDeclaringClass());
return gRegistry->Add(group);
}
@@ -2115,7 +2102,6 @@
*pSuspendStatus = JDWP::SUSPEND_STATUS_NOT_SUSPENDED;
- MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
JDWP::JdwpError error;
Thread* thread = DecodeThread(soa, thread_id, &error);
if (error != JDWP::ERR_NONE) {
@@ -2136,7 +2122,6 @@
JDWP::JdwpError Dbg::GetThreadDebugSuspendCount(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
ScopedObjectAccess soa(Thread::Current());
- MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
JDWP::JdwpError error;
Thread* thread = DecodeThread(soa, thread_id, &error);
if (error != JDWP::ERR_NONE) {
@@ -2149,7 +2134,6 @@
JDWP::JdwpError Dbg::Interrupt(JDWP::ObjectId thread_id) {
ScopedObjectAccess soa(Thread::Current());
- MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
JDWP::JdwpError error;
Thread* thread = DecodeThread(soa, thread_id, &error);
if (error != JDWP::ERR_NONE) {
@@ -2166,7 +2150,7 @@
if (desired_thread_group == nullptr) {
return true;
}
- mirror::ArtField* thread_group_field = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
+ ArtField* thread_group_field = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
DCHECK(thread_group_field != nullptr);
mirror::Object* group = thread_group_field->GetObject(peer);
return (group == desired_thread_group);
@@ -2228,7 +2212,6 @@
JDWP::JdwpError Dbg::GetThreadFrameCount(JDWP::ObjectId thread_id, size_t* result) {
ScopedObjectAccess soa(Thread::Current());
- MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
JDWP::JdwpError error;
*result = 0;
Thread* thread = DecodeThread(soa, thread_id, &error);
@@ -2254,9 +2237,7 @@
expandBufAdd4BE(buf_, frame_count_);
}
- // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
- // annotalysis.
- virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
+ bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (GetMethod()->IsRuntimeMethod()) {
return true; // The debugger can't do anything useful with a frame that has no Method*.
}
@@ -2283,7 +2264,6 @@
};
ScopedObjectAccessUnchecked soa(Thread::Current());
- MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
JDWP::JdwpError error;
Thread* thread = DecodeThread(soa, thread_id, &error);
if (error != JDWP::ERR_NONE) {
@@ -2390,17 +2370,13 @@
JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id,
JDWP::ObjectId* result) {
ScopedObjectAccessUnchecked soa(Thread::Current());
- Thread* thread;
- {
- MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
- JDWP::JdwpError error;
- thread = DecodeThread(soa, thread_id, &error);
- if (error != JDWP::ERR_NONE) {
- return error;
- }
- if (!IsSuspendedForDebugger(soa, thread)) {
- return JDWP::ERR_THREAD_NOT_SUSPENDED;
- }
+ JDWP::JdwpError error;
+ Thread* thread = DecodeThread(soa, thread_id, &error);
+ if (error != JDWP::ERR_NONE) {
+ return error;
+ }
+ if (!IsSuspendedForDebugger(soa, thread)) {
+ return JDWP::ERR_THREAD_NOT_SUSPENDED;
}
std::unique_ptr<Context> context(Context::Create());
GetThisVisitor visitor(thread, context.get(), frame_id);
@@ -2447,17 +2423,13 @@
JDWP::FrameId frame_id = request->ReadFrameId();
ScopedObjectAccessUnchecked soa(Thread::Current());
- Thread* thread;
- {
- MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
- JDWP::JdwpError error;
- thread = DecodeThread(soa, thread_id, &error);
- if (error != JDWP::ERR_NONE) {
- return error;
- }
- if (!IsSuspendedForDebugger(soa, thread)) {
- return JDWP::ERR_THREAD_NOT_SUSPENDED;
- }
+ JDWP::JdwpError error;
+ Thread* thread = DecodeThread(soa, thread_id, &error);
+ if (error != JDWP::ERR_NONE) {
+ return error;
+ }
+ if (!IsSuspendedForDebugger(soa, thread)) {
+ return JDWP::ERR_THREAD_NOT_SUSPENDED;
}
// Find the frame with the given frame_id.
std::unique_ptr<Context> context(Context::Create());
@@ -2478,7 +2450,7 @@
size_t width = Dbg::GetTagWidth(reqSigByte);
uint8_t* ptr = expandBufAddSpace(pReply, width + 1);
- JDWP::JdwpError error = Dbg::GetLocalValue(visitor, soa, slot, reqSigByte, ptr, width);
+ error = Dbg::GetLocalValue(visitor, soa, slot, reqSigByte, ptr, width);
if (error != JDWP::ERR_NONE) {
return error;
}
@@ -2622,17 +2594,13 @@
JDWP::FrameId frame_id = request->ReadFrameId();
ScopedObjectAccessUnchecked soa(Thread::Current());
- Thread* thread;
- {
- MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
- JDWP::JdwpError error;
- thread = DecodeThread(soa, thread_id, &error);
- if (error != JDWP::ERR_NONE) {
- return error;
- }
- if (!IsSuspendedForDebugger(soa, thread)) {
- return JDWP::ERR_THREAD_NOT_SUSPENDED;
- }
+ JDWP::JdwpError error;
+ Thread* thread = DecodeThread(soa, thread_id, &error);
+ if (error != JDWP::ERR_NONE) {
+ return error;
+ }
+ if (!IsSuspendedForDebugger(soa, thread)) {
+ return JDWP::ERR_THREAD_NOT_SUSPENDED;
}
// Find the frame with the given frame_id.
std::unique_ptr<Context> context(Context::Create());
@@ -2651,7 +2619,7 @@
uint64_t value = request->ReadValue(width);
VLOG(jdwp) << " --> slot " << slot << " " << sigByte << " " << value;
- JDWP::JdwpError error = Dbg::SetLocalValue(visitor, slot, sigByte, value, width);
+ error = Dbg::SetLocalValue(visitor, slot, sigByte, value, width);
if (error != JDWP::ERR_NONE) {
return error;
}
@@ -2771,7 +2739,7 @@
}
void Dbg::PostFieldAccessEvent(mirror::ArtMethod* m, int dex_pc,
- mirror::Object* this_object, mirror::ArtField* f) {
+ mirror::Object* this_object, ArtField* f) {
if (!IsDebuggerActive()) {
return;
}
@@ -2784,7 +2752,7 @@
}
void Dbg::PostFieldModificationEvent(mirror::ArtMethod* m, int dex_pc,
- mirror::Object* this_object, mirror::ArtField* f,
+ mirror::Object* this_object, ArtField* f,
const JValue* field_value) {
if (!IsDebuggerActive()) {
return;
@@ -3498,10 +3466,7 @@
self_suspend_(false),
other_suspend_(false) {
ScopedObjectAccessUnchecked soa(self);
- {
- MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
- thread_ = DecodeThread(soa, thread_id, &error_);
- }
+ thread_ = DecodeThread(soa, thread_id, &error_);
if (error_ == JDWP::ERR_NONE) {
if (thread_ == soa.Self()) {
self_suspend_ = true;
@@ -3671,7 +3636,6 @@
void Dbg::UnconfigureStep(JDWP::ObjectId thread_id) {
ScopedObjectAccessUnchecked soa(Thread::Current());
- MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
JDWP::JdwpError error;
Thread* thread = DecodeThread(soa, thread_id, &error);
if (error == JDWP::ERR_NONE) {
@@ -3721,7 +3685,6 @@
Thread* self = Thread::Current();
{
ScopedObjectAccessUnchecked soa(self);
- MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
JDWP::JdwpError error;
targetThread = DecodeThread(soa, thread_id, &error);
if (error != JDWP::ERR_NONE) {
diff --git a/runtime/debugger.h b/runtime/debugger.h
index dd7f9c5..c287121 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -37,13 +37,13 @@
namespace art {
namespace mirror {
-class ArtField;
class ArtMethod;
class Class;
class Object;
class Throwable;
} // namespace mirror
class AllocRecord;
+class ArtField;
class ObjectRegistry;
class ScopedObjectAccessUnchecked;
class StackVisitor;
@@ -340,7 +340,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static bool MatchField(JDWP::RefTypeId expected_type_id, JDWP::FieldId expected_field_id,
- mirror::ArtField* event_field)
+ ArtField* event_field)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static bool MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* event_instance)
@@ -525,10 +525,10 @@
kMethodExit = 0x08,
};
static void PostFieldAccessEvent(mirror::ArtMethod* m, int dex_pc, mirror::Object* this_object,
- mirror::ArtField* f)
+ ArtField* f)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void PostFieldModificationEvent(mirror::ArtMethod* m, int dex_pc,
- mirror::Object* this_object, mirror::ArtField* f,
+ mirror::Object* this_object, ArtField* f,
const JValue* field_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void PostException(mirror::Throwable* exception)
@@ -706,7 +706,7 @@
static JDWP::JdwpTypeTag GetTypeTag(mirror::Class* klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static JDWP::FieldId ToFieldId(const mirror::ArtField* f)
+ static JDWP::FieldId ToFieldId(const ArtField* f)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void SetJdwpLocation(JDWP::JdwpLocation* location, mirror::ArtMethod* m, uint32_t dex_pc)
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 8685d8e..03a47a3 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -27,6 +27,7 @@
#include <memory>
#include <sstream>
+#include "art_field-inl.h"
#include "base/logging.h"
#include "base/stringprintf.h"
#include "class_linker.h"
@@ -34,7 +35,6 @@
#include "dex_file_verifier.h"
#include "globals.h"
#include "leb128.h"
-#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/string.h"
#include "os.h"
@@ -1210,7 +1210,7 @@
}
template<bool kTransactionActive>
-void EncodedStaticFieldValueIterator::ReadValueToField(Handle<mirror::ArtField> field) const {
+void EncodedStaticFieldValueIterator::ReadValueToField(ArtField* field) const {
switch (type_) {
case kBoolean: field->SetBoolean<kTransactionActive>(field->GetDeclaringClass(), jval_.z); break;
case kByte: field->SetByte<kTransactionActive>(field->GetDeclaringClass(), jval_.b); break;
@@ -1222,13 +1222,11 @@
case kDouble: field->SetDouble<kTransactionActive>(field->GetDeclaringClass(), jval_.d); break;
case kNull: field->SetObject<kTransactionActive>(field->GetDeclaringClass(), NULL); break;
case kString: {
- CHECK(!kMovingFields);
mirror::String* resolved = linker_->ResolveString(dex_file_, jval_.i, *dex_cache_);
field->SetObject<kTransactionActive>(field->GetDeclaringClass(), resolved);
break;
}
case kType: {
- CHECK(!kMovingFields);
mirror::Class* resolved = linker_->ResolveType(dex_file_, jval_.i, *dex_cache_,
*class_loader_);
field->SetObject<kTransactionActive>(field->GetDeclaringClass(), resolved);
@@ -1237,8 +1235,8 @@
default: UNIMPLEMENTED(FATAL) << ": type " << type_;
}
}
-template void EncodedStaticFieldValueIterator::ReadValueToField<true>(Handle<mirror::ArtField> field) const;
-template void EncodedStaticFieldValueIterator::ReadValueToField<false>(Handle<mirror::ArtField> field) const;
+template void EncodedStaticFieldValueIterator::ReadValueToField<true>(ArtField* field) const;
+template void EncodedStaticFieldValueIterator::ReadValueToField<false>(ArtField* field) const;
CatchHandlerIterator::CatchHandlerIterator(const DexFile::CodeItem& code_item, uint32_t address) {
handler_.address_ = -1;
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 8e2d6c2..5bdd9b6 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -37,11 +37,11 @@
// TODO: remove dependencies on mirror classes, primarily by moving
// EncodedStaticFieldValueIterator to its own file.
namespace mirror {
- class ArtField;
class ArtMethod;
class ClassLoader;
class DexCache;
} // namespace mirror
+class ArtField;
class ClassLinker;
class MemMap;
class OatDexFile;
@@ -1298,7 +1298,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void ReadValueToField(Handle<mirror::ArtField> field) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void ReadValueToField(ArtField* field) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool HasNext() const { return pos_ < array_size_; }
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 411ec43..f2b013f 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -25,7 +25,6 @@
#include "base/stringprintf.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
-#include "dwarf.h"
#include "elf_file_impl.h"
#include "elf_utils.h"
#include "leb128.h"
@@ -1587,487 +1586,6 @@
return nullptr;
}
-struct PACKED(1) FDE32 {
- uint32_t raw_length_;
- uint32_t GetLength() {
- return raw_length_ + sizeof(raw_length_);
- }
- uint32_t CIE_pointer;
- uint32_t initial_location;
- uint32_t address_range;
- uint8_t instructions[0];
-};
-
-static FDE32* NextFDE(FDE32* frame) {
- uint8_t* fde_bytes = reinterpret_cast<uint8_t*>(frame);
- fde_bytes += frame->GetLength();
- return reinterpret_cast<FDE32*>(fde_bytes);
-}
-
-static bool IsFDE(FDE32* frame) {
- return frame->CIE_pointer != 0;
-}
-
-struct PACKED(1) FDE64 {
- uint32_t raw_length_;
- uint64_t extended_length_;
- uint64_t GetLength() {
- return extended_length_ + sizeof(raw_length_) + sizeof(extended_length_);
- }
- uint64_t CIE_pointer;
- uint64_t initial_location;
- uint64_t address_range;
- uint8_t instructions[0];
-};
-
-static FDE64* NextFDE(FDE64* frame) {
- uint8_t* fde_bytes = reinterpret_cast<uint8_t*>(frame);
- fde_bytes += frame->GetLength();
- return reinterpret_cast<FDE64*>(fde_bytes);
-}
-
-static bool IsFDE(FDE64* frame) {
- return frame->CIE_pointer != 0;
-}
-
-template <typename Elf_SOff>
-static bool FixupEHFrame(Elf_SOff base_address_delta, uint8_t* eh_frame, size_t eh_frame_size) {
- // TODO: Check the spec whether this is really data-dependent, or whether it's clear from the
- // ELF file whether we should expect 32-bit or 64-bit.
- if (*(reinterpret_cast<uint32_t*>(eh_frame)) == 0xffffffff) {
- FDE64* last_frame = reinterpret_cast<FDE64*>(eh_frame + eh_frame_size);
- FDE64* frame = NextFDE(reinterpret_cast<FDE64*>(eh_frame));
- for (; frame < last_frame; frame = NextFDE(frame)) {
- if (!IsFDE(frame)) {
- return false;
- }
- frame->initial_location += base_address_delta;
- }
- return true;
- } else {
- CHECK(IsInt<32>(base_address_delta));
- FDE32* last_frame = reinterpret_cast<FDE32*>(eh_frame + eh_frame_size);
- FDE32* frame = NextFDE(reinterpret_cast<FDE32*>(eh_frame));
- for (; frame < last_frame; frame = NextFDE(frame)) {
- if (!IsFDE(frame)) {
- return false;
- }
- frame->initial_location += base_address_delta;
- }
- return true;
- }
-}
-
-static uint8_t* NextLeb128(uint8_t* current) {
- DecodeUnsignedLeb128(const_cast<const uint8_t**>(¤t));
- return current;
-}
-
-struct PACKED(1) DebugLineHeader {
- uint32_t unit_length_; // TODO 32-bit specific size
- uint16_t version_;
- uint32_t header_length_; // TODO 32-bit specific size
- uint8_t minimum_instruction_lenght_;
- uint8_t default_is_stmt_;
- int8_t line_base_;
- uint8_t line_range_;
- uint8_t opcode_base_;
- uint8_t remaining_[0];
-
- bool IsStandardOpcode(const uint8_t* op) const {
- return *op != 0 && *op < opcode_base_;
- }
-
- bool IsExtendedOpcode(const uint8_t* op) const {
- return *op == 0;
- }
-
- const uint8_t* GetStandardOpcodeLengths() const {
- return remaining_;
- }
-
- uint8_t* GetNextOpcode(uint8_t* op) const {
- if (IsExtendedOpcode(op)) {
- uint8_t* length_field = op + 1;
- uint32_t length = DecodeUnsignedLeb128(const_cast<const uint8_t**>(&length_field));
- return length_field + length;
- } else if (!IsStandardOpcode(op)) {
- return op + 1;
- } else if (*op == dwarf::DW_LNS_fixed_advance_pc) {
- return op + 1 + sizeof(uint16_t);
- } else {
- uint8_t num_args = GetStandardOpcodeLengths()[*op - 1];
- op += 1;
- for (int i = 0; i < num_args; i++) {
- op = NextLeb128(op);
- }
- return op;
- }
- }
-
- uint8_t* GetDebugLineData() const {
- const uint8_t* hdr_start =
- reinterpret_cast<const uint8_t*>(&header_length_) + sizeof(header_length_);
- return const_cast<uint8_t*>(hdr_start + header_length_);
- }
-};
-
-class DebugLineInstructionIterator FINAL {
- public:
- static DebugLineInstructionIterator* Create(DebugLineHeader* header, size_t section_size) {
- std::unique_ptr<DebugLineInstructionIterator> line_iter(
- new DebugLineInstructionIterator(header, section_size));
- if (line_iter.get() == nullptr) {
- return nullptr;
- } else {
- return line_iter.release();
- }
- }
-
- ~DebugLineInstructionIterator() {}
-
- bool Next() {
- if (current_instruction_ == nullptr) {
- return false;
- }
- current_instruction_ = header_->GetNextOpcode(current_instruction_);
- if (current_instruction_ >= last_instruction_) {
- current_instruction_ = nullptr;
- return false;
- } else {
- return true;
- }
- }
-
- uint8_t* GetInstruction() const {
- return current_instruction_;
- }
-
- bool IsExtendedOpcode() const {
- return header_->IsExtendedOpcode(current_instruction_);
- }
-
- uint8_t GetOpcode() {
- if (!IsExtendedOpcode()) {
- return *current_instruction_;
- } else {
- uint8_t* len_ptr = current_instruction_ + 1;
- return *NextLeb128(len_ptr);
- }
- }
-
- uint8_t* GetArguments() {
- if (!IsExtendedOpcode()) {
- return current_instruction_ + 1;
- } else {
- uint8_t* len_ptr = current_instruction_ + 1;
- return NextLeb128(len_ptr) + 1;
- }
- }
-
- private:
- DebugLineInstructionIterator(DebugLineHeader* header, size_t size)
- : header_(header), last_instruction_(reinterpret_cast<uint8_t*>(header) + size),
- current_instruction_(header->GetDebugLineData()) {}
-
- DebugLineHeader* const header_;
- uint8_t* const last_instruction_;
- uint8_t* current_instruction_;
-};
-
-template <typename Elf_SOff>
-static bool FixupDebugLine(Elf_SOff base_offset_delta, DebugLineInstructionIterator* iter) {
- CHECK(IsInt<32>(base_offset_delta));
- for (; iter->GetInstruction(); iter->Next()) {
- if (iter->IsExtendedOpcode() && iter->GetOpcode() == dwarf::DW_LNE_set_address) {
- *reinterpret_cast<uint32_t*>(iter->GetArguments()) += base_offset_delta;
- }
- }
- return true;
-}
-
-struct PACKED(1) DebugInfoHeader {
- uint32_t unit_length; // TODO 32-bit specific size
- uint16_t version;
- uint32_t debug_abbrev_offset; // TODO 32-bit specific size
- uint8_t address_size;
-};
-
-// Returns -1 if it is variable length, which we will just disallow for now.
-static int32_t FormLength(uint32_t att) {
- switch (att) {
- case dwarf::DW_FORM_data1:
- case dwarf::DW_FORM_flag:
- case dwarf::DW_FORM_flag_present:
- case dwarf::DW_FORM_ref1:
- return 1;
-
- case dwarf::DW_FORM_data2:
- case dwarf::DW_FORM_ref2:
- return 2;
-
- case dwarf::DW_FORM_addr: // TODO 32-bit only
- case dwarf::DW_FORM_ref_addr: // TODO 32-bit only
- case dwarf::DW_FORM_sec_offset: // TODO 32-bit only
- case dwarf::DW_FORM_strp: // TODO 32-bit only
- case dwarf::DW_FORM_data4:
- case dwarf::DW_FORM_ref4:
- return 4;
-
- case dwarf::DW_FORM_data8:
- case dwarf::DW_FORM_ref8:
- case dwarf::DW_FORM_ref_sig8:
- return 8;
-
- case dwarf::DW_FORM_block:
- case dwarf::DW_FORM_block1:
- case dwarf::DW_FORM_block2:
- case dwarf::DW_FORM_block4:
- case dwarf::DW_FORM_exprloc:
- case dwarf::DW_FORM_indirect:
- case dwarf::DW_FORM_ref_udata:
- case dwarf::DW_FORM_sdata:
- case dwarf::DW_FORM_string:
- case dwarf::DW_FORM_udata:
- default:
- return -1;
- }
-}
-
-class DebugTag FINAL {
- public:
- ~DebugTag() {}
- // Creates a new tag and moves data pointer up to the start of the next one.
- // nullptr means error.
- static DebugTag* Create(const uint8_t** data_pointer) {
- const uint8_t* data = *data_pointer;
- uint32_t index = DecodeUnsignedLeb128(&data);
- std::unique_ptr<DebugTag> tag(new DebugTag(index));
- tag->size_ = static_cast<uint32_t>(
- reinterpret_cast<uintptr_t>(data) - reinterpret_cast<uintptr_t>(*data_pointer));
- // skip the abbrev
- tag->tag_ = DecodeUnsignedLeb128(&data);
- tag->has_child_ = (*data == 0);
- data++;
- while (true) {
- uint32_t attr = DecodeUnsignedLeb128(&data);
- uint32_t form = DecodeUnsignedLeb128(&data);
- if (attr == 0 && form == 0) {
- break;
- } else if (attr == 0 || form == 0) {
- // Bad abbrev.
- return nullptr;
- }
- int32_t size = FormLength(form);
- if (size == -1) {
- return nullptr;
- }
- tag->AddAttribute(attr, static_cast<uint32_t>(size));
- }
- *data_pointer = data;
- return tag.release();
- }
-
- uint32_t GetSize() const {
- return size_;
- }
-
- bool HasChild() const {
- return has_child_;
- }
-
- uint32_t GetTagNumber() const {
- return tag_;
- }
-
- uint32_t GetIndex() const {
- return index_;
- }
-
- // Gets the offset of a particular attribute in this tag structure.
- // Interpretation of the data is left to the consumer. 0 is returned if the
- // tag does not contain the attribute.
- uint32_t GetOffsetOf(uint32_t dwarf_attribute) const {
- auto it = off_map_.find(dwarf_attribute);
- if (it == off_map_.end()) {
- return 0;
- } else {
- return it->second;
- }
- }
-
- // Gets the size of attribute
- uint32_t GetAttrSize(uint32_t dwarf_attribute) const {
- auto it = size_map_.find(dwarf_attribute);
- if (it == size_map_.end()) {
- return 0;
- } else {
- return it->second;
- }
- }
-
- private:
- explicit DebugTag(uint32_t index) : index_(index), size_(0), tag_(0), has_child_(false) {}
- void AddAttribute(uint32_t type, uint32_t attr_size) {
- off_map_.insert(std::pair<uint32_t, uint32_t>(type, size_));
- size_map_.insert(std::pair<uint32_t, uint32_t>(type, attr_size));
- size_ += attr_size;
- }
-
- const uint32_t index_;
- std::map<uint32_t, uint32_t> off_map_;
- std::map<uint32_t, uint32_t> size_map_;
- uint32_t size_;
- uint32_t tag_;
- bool has_child_;
-};
-
-class DebugAbbrev {
- public:
- ~DebugAbbrev() {}
- static DebugAbbrev* Create(const uint8_t* dbg_abbrev, size_t dbg_abbrev_size) {
- std::unique_ptr<DebugAbbrev> abbrev(new DebugAbbrev(dbg_abbrev, dbg_abbrev + dbg_abbrev_size));
- if (!abbrev->ReadAtOffset(0)) {
- return nullptr;
- }
- return abbrev.release();
- }
-
- bool ReadAtOffset(uint32_t abbrev_offset) {
- tags_.clear();
- tag_list_.clear();
- const uint8_t* dbg_abbrev = begin_ + abbrev_offset;
- while (dbg_abbrev < end_ && *dbg_abbrev != 0) {
- std::unique_ptr<DebugTag> tag(DebugTag::Create(&dbg_abbrev));
- if (tag.get() == nullptr) {
- return false;
- } else {
- tags_.insert(std::pair<uint32_t, uint32_t>(tag->GetIndex(), tag_list_.size()));
- tag_list_.push_back(std::move(tag));
- }
- }
- return true;
- }
-
- DebugTag* ReadTag(const uint8_t* entry) {
- uint32_t tag_num = DecodeUnsignedLeb128(&entry);
- auto it = tags_.find(tag_num);
- if (it == tags_.end()) {
- return nullptr;
- } else {
- CHECK_GT(tag_list_.size(), it->second);
- return tag_list_.at(it->second).get();
- }
- }
-
- private:
- DebugAbbrev(const uint8_t* begin, const uint8_t* end) : begin_(begin), end_(end) {}
- const uint8_t* const begin_;
- const uint8_t* const end_;
- std::map<uint32_t, uint32_t> tags_;
- std::vector<std::unique_ptr<DebugTag>> tag_list_;
-};
-
-class DebugInfoIterator {
- public:
- static DebugInfoIterator* Create(DebugInfoHeader* header, size_t frame_size,
- DebugAbbrev* abbrev) {
- std::unique_ptr<DebugInfoIterator> iter(new DebugInfoIterator(header, frame_size, abbrev));
- if (iter->GetCurrentTag() == nullptr) {
- return nullptr;
- } else {
- return iter.release();
- }
- }
- ~DebugInfoIterator() {}
-
- // Moves to the next DIE. Returns false if at last entry.
- // TODO Handle variable length attributes.
- bool next() {
- if (current_entry_ == nullptr || current_tag_ == nullptr) {
- return false;
- }
- bool reread_abbrev = false;
- current_entry_ += current_tag_->GetSize();
- if (reinterpret_cast<DebugInfoHeader*>(current_entry_) >= next_cu_) {
- current_cu_ = next_cu_;
- next_cu_ = GetNextCu(current_cu_);
- current_entry_ = reinterpret_cast<uint8_t*>(current_cu_) + sizeof(DebugInfoHeader);
- reread_abbrev = true;
- }
- if (current_entry_ >= last_entry_) {
- current_entry_ = nullptr;
- return false;
- }
- if (reread_abbrev) {
- abbrev_->ReadAtOffset(current_cu_->debug_abbrev_offset);
- }
- current_tag_ = abbrev_->ReadTag(current_entry_);
- if (current_tag_ == nullptr) {
- current_entry_ = nullptr;
- return false;
- } else {
- return true;
- }
- }
-
- const DebugTag* GetCurrentTag() {
- return const_cast<DebugTag*>(current_tag_);
- }
- uint8_t* GetPointerToField(uint8_t dwarf_field) {
- if (current_tag_ == nullptr || current_entry_ == nullptr || current_entry_ >= last_entry_) {
- return nullptr;
- }
- uint32_t off = current_tag_->GetOffsetOf(dwarf_field);
- if (off == 0) {
- // tag does not have that field.
- return nullptr;
- } else {
- DCHECK_LT(off, current_tag_->GetSize());
- return current_entry_ + off;
- }
- }
-
- private:
- static DebugInfoHeader* GetNextCu(DebugInfoHeader* hdr) {
- uint8_t* hdr_byte = reinterpret_cast<uint8_t*>(hdr);
- return reinterpret_cast<DebugInfoHeader*>(hdr_byte + sizeof(uint32_t) + hdr->unit_length);
- }
-
- DebugInfoIterator(DebugInfoHeader* header, size_t frame_size, DebugAbbrev* abbrev)
- : abbrev_(abbrev),
- current_cu_(header),
- next_cu_(GetNextCu(header)),
- last_entry_(reinterpret_cast<uint8_t*>(header) + frame_size),
- current_entry_(reinterpret_cast<uint8_t*>(header) + sizeof(DebugInfoHeader)),
- current_tag_(abbrev_->ReadTag(current_entry_)) {}
- DebugAbbrev* const abbrev_;
- DebugInfoHeader* current_cu_;
- DebugInfoHeader* next_cu_;
- uint8_t* const last_entry_;
- uint8_t* current_entry_;
- DebugTag* current_tag_;
-};
-
-template <typename Elf_SOff>
-static bool FixupDebugInfo(Elf_SOff base_address_delta, DebugInfoIterator* iter) {
- CHECK(IsInt<32>(base_address_delta));
- do {
- if (iter->GetCurrentTag()->GetAttrSize(dwarf::DW_AT_low_pc) != sizeof(int32_t) ||
- iter->GetCurrentTag()->GetAttrSize(dwarf::DW_AT_high_pc) != sizeof(int32_t)) {
- LOG(ERROR) << "DWARF information with 64 bit pointers is not supported yet.";
- return false;
- }
- uint32_t* PC_low = reinterpret_cast<uint32_t*>(iter->GetPointerToField(dwarf::DW_AT_low_pc));
- uint32_t* PC_high = reinterpret_cast<uint32_t*>(iter->GetPointerToField(dwarf::DW_AT_high_pc));
- if (PC_low != nullptr && PC_high != nullptr) {
- *PC_low += base_address_delta;
- *PC_high += base_address_delta;
- }
- } while (iter->next());
- return true;
-}
-
template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
@@ -2076,9 +1594,7 @@
::FixupDebugSections(typename std::make_signed<Elf_Off>::type base_address_delta) {
const Elf_Shdr* debug_info = FindSectionByName(".debug_info");
const Elf_Shdr* debug_abbrev = FindSectionByName(".debug_abbrev");
- const Elf_Shdr* eh_frame = FindSectionByName(".eh_frame");
const Elf_Shdr* debug_str = FindSectionByName(".debug_str");
- const Elf_Shdr* debug_line = FindSectionByName(".debug_line");
const Elf_Shdr* strtab_sec = FindSectionByName(".strtab");
const Elf_Shdr* symtab_sec = FindSectionByName(".symtab");
@@ -2090,37 +1606,82 @@
if (base_address_delta == 0) {
return true;
}
- if (eh_frame != nullptr &&
- !FixupEHFrame(base_address_delta, Begin() + eh_frame->sh_offset, eh_frame->sh_size)) {
+ if (!ApplyOatPatchesTo(".eh_frame", base_address_delta)) {
return false;
}
+ if (!ApplyOatPatchesTo(".debug_info", base_address_delta)) {
+ return false;
+ }
+ if (!ApplyOatPatchesTo(".debug_line", base_address_delta)) {
+ return false;
+ }
+ return true;
+}
- std::unique_ptr<DebugAbbrev> abbrev(DebugAbbrev::Create(Begin() + debug_abbrev->sh_offset,
- debug_abbrev->sh_size));
- if (abbrev.get() == nullptr) {
+template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
+ typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
+ typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
+bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
+ Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
+ ::ApplyOatPatchesTo(const char* target_section_name,
+ typename std::make_signed<Elf_Off>::type delta) {
+ auto patches_section = FindSectionByName(".oat_patches");
+ if (patches_section == nullptr) {
+ LOG(ERROR) << ".oat_patches section not found.";
return false;
}
- DebugInfoHeader* info_header =
- reinterpret_cast<DebugInfoHeader*>(Begin() + debug_info->sh_offset);
- std::unique_ptr<DebugInfoIterator> info_iter(DebugInfoIterator::Create(info_header,
- debug_info->sh_size,
- abbrev.get()));
- if (info_iter.get() == nullptr) {
+ if (patches_section->sh_type != SHT_OAT_PATCH) {
+ LOG(ERROR) << "Unexpected type of .oat_patches.";
return false;
}
- if (debug_line != nullptr) {
- DebugLineHeader* line_header =
- reinterpret_cast<DebugLineHeader*>(Begin() + debug_line->sh_offset);
- std::unique_ptr<DebugLineInstructionIterator> line_iter(
- DebugLineInstructionIterator::Create(line_header, debug_line->sh_size));
- if (line_iter.get() == nullptr) {
- return false;
- }
- if (!FixupDebugLine(base_address_delta, line_iter.get())) {
- return false;
- }
+ auto target_section = FindSectionByName(target_section_name);
+ if (target_section == nullptr) {
+ LOG(ERROR) << target_section_name << " section not found.";
+ return false;
}
- return FixupDebugInfo(base_address_delta, info_iter.get());
+ if (!ApplyOatPatches(
+ Begin() + patches_section->sh_offset,
+ Begin() + patches_section->sh_offset + patches_section->sh_size,
+ target_section_name, delta,
+ Begin() + target_section->sh_offset,
+ Begin() + target_section->sh_offset + target_section->sh_size)) {
+ LOG(ERROR) << target_section_name << " section not found in .oat_patches.";
+ }
+ return true;
+}
+
+// Apply .oat_patches to given section.
+template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
+ typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
+ typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
+bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
+ Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
+ ::ApplyOatPatches(const uint8_t* patches, const uint8_t* patches_end,
+ const char* target_section_name,
+ typename std::make_signed<Elf_Off>::type delta,
+ uint8_t* to_patch, const uint8_t* to_patch_end) {
+ // Read null-terminated section name.
+ const char* section_name;
+ while ((section_name = reinterpret_cast<const char*>(patches))[0] != '\0') {
+ patches += strlen(section_name) + 1;
+ uint32_t length = DecodeUnsignedLeb128(&patches);
+ const uint8_t* next_section = patches + length;
+ // Is it the section we want to patch?
+ if (strcmp(section_name, target_section_name) == 0) {
+ // Read LEB128 encoded list of advances.
+ while (patches < next_section) {
+ DCHECK_LT(patches, patches_end) << "Unexpected end of .oat_patches.";
+ to_patch += DecodeUnsignedLeb128(&patches);
+ DCHECK_LT(to_patch, to_patch_end) << "Patch past the end of " << section_name;
+ // TODO: 32-bit vs 64-bit. What is the right type to use here?
+ auto* patch_loc = reinterpret_cast<typename std::make_signed<Elf_Off>::type*>(to_patch);
+ *patch_loc += delta;
+ }
+ return true;
+ }
+ patches = next_section;
+ }
+ return false;
}
template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
diff --git a/runtime/elf_file_impl.h b/runtime/elf_file_impl.h
index 16d3857..383dc41 100644
--- a/runtime/elf_file_impl.h
+++ b/runtime/elf_file_impl.h
@@ -110,6 +110,12 @@
bool FixupSymbols(Elf_Addr base_address, bool dynamic);
bool FixupRelocations(Elf_Addr base_address);
bool FixupDebugSections(typename std::make_signed<Elf_Off>::type base_address_delta);
+ bool ApplyOatPatchesTo(const char* target_section_name,
+ typename std::make_signed<Elf_Off>::type base_address_delta);
+ static bool ApplyOatPatches(const uint8_t* patches, const uint8_t* patches_end,
+ const char* target_section_name,
+ typename std::make_signed<Elf_Off>::type delta,
+ uint8_t* to_patch, const uint8_t* to_patch_end);
bool Strip(std::string* error_msg);
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 8a13d34..cbfba12 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -252,7 +252,7 @@
}
template<FindFieldType type, bool access_check>
-inline mirror::ArtField* FindFieldFromCode(uint32_t field_idx, mirror::ArtMethod* referrer,
+inline ArtField* FindFieldFromCode(uint32_t field_idx, mirror::ArtMethod* referrer,
Thread* self, size_t expected_size) {
bool is_primitive;
bool is_set;
@@ -269,7 +269,7 @@
default: is_primitive = true; is_set = true; is_static = true; break;
}
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- mirror::ArtField* resolved_field = class_linker->ResolveField(field_idx, referrer, is_static);
+ ArtField* resolved_field = class_linker->ResolveField(field_idx, referrer, is_static);
if (UNLIKELY(resolved_field == nullptr)) {
DCHECK(self->IsExceptionPending()); // Throw exception and unwind.
return nullptr; // Failure.
@@ -324,7 +324,7 @@
// Explicit template declarations of FindFieldFromCode for all field access types.
#define EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \
template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \
-mirror::ArtField* FindFieldFromCode<_type, _access_check>(uint32_t field_idx, \
+ArtField* FindFieldFromCode<_type, _access_check>(uint32_t field_idx, \
mirror::ArtMethod* referrer, \
Thread* self, size_t expected_size) \
@@ -469,11 +469,11 @@
#undef EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL
// Fast path field resolution that can't initialize classes or throw exceptions.
-inline mirror::ArtField* FindFieldFast(uint32_t field_idx,
+inline ArtField* FindFieldFast(uint32_t field_idx,
mirror::ArtMethod* referrer,
FindFieldType type, size_t expected_size) {
- mirror::ArtField* resolved_field =
- referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx);
+ ArtField* resolved_field =
+ referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx, sizeof(void*));
if (UNLIKELY(resolved_field == nullptr)) {
return nullptr;
}
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 70e2851..1d8df68 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -16,11 +16,11 @@
#include "entrypoints/entrypoint_utils.h"
+#include "art_field-inl.h"
#include "base/mutex.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
#include "gc/accounting/card_table-inl.h"
-#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 77eec46..8d419f8 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -30,14 +30,14 @@
namespace art {
namespace mirror {
- class Class;
class Array;
- class ArtField;
class ArtMethod;
+ class Class;
class Object;
class String;
} // namespace mirror
+class ArtField;
class ScopedObjectAccessAlreadyRunnable;
class Thread;
@@ -132,7 +132,7 @@
};
template<FindFieldType type, bool access_check>
-inline mirror::ArtField* FindFieldFromCode(uint32_t field_idx, mirror::ArtMethod* referrer,
+inline ArtField* FindFieldFromCode(uint32_t field_idx, mirror::ArtMethod* referrer,
Thread* self, size_t expected_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -143,7 +143,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Fast path field resolution that can't initialize classes or throw exceptions.
-inline mirror::ArtField* FindFieldFast(uint32_t field_idx,
+inline ArtField* FindFieldFast(uint32_t field_idx,
mirror::ArtMethod* referrer,
FindFieldType type, size_t expected_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index 22bf939..b5a7c09 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -14,10 +14,10 @@
* limitations under the License.
*/
+#include "art_field-inl.h"
#include "callee_save_frame.h"
#include "dex_file-inl.h"
#include "entrypoints/entrypoint_utils-inl.h"
-#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
@@ -29,8 +29,7 @@
Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead,
- sizeof(int8_t));
+ ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int8_t));
if (LIKELY(field != nullptr)) {
return field->GetByte(field->GetDeclaringClass());
}
@@ -45,8 +44,7 @@
Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead,
- sizeof(int8_t));
+ ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int8_t));
if (LIKELY(field != nullptr)) {
return field->GetBoolean(field->GetDeclaringClass());
}
@@ -61,8 +59,7 @@
Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead,
- sizeof(int16_t));
+ ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int16_t));
if (LIKELY(field != nullptr)) {
return field->GetShort(field->GetDeclaringClass());
}
@@ -78,8 +75,7 @@
Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead,
- sizeof(int16_t));
+ ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int16_t));
if (LIKELY(field != nullptr)) {
return field->GetChar(field->GetDeclaringClass());
}
@@ -95,8 +91,7 @@
Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead,
- sizeof(int32_t));
+ ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int32_t));
if (LIKELY(field != nullptr)) {
return field->Get32(field->GetDeclaringClass());
}
@@ -112,8 +107,7 @@
Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead,
- sizeof(int64_t));
+ ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int64_t));
if (LIKELY(field != nullptr)) {
return field->Get64(field->GetDeclaringClass());
}
@@ -129,8 +123,8 @@
Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectRead,
- sizeof(mirror::HeapReference<mirror::Object>));
+ ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectRead,
+ sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != nullptr)) {
return field->GetObj(field->GetDeclaringClass());
}
@@ -146,8 +140,7 @@
mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead,
- sizeof(int8_t));
+ ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
return field->GetByte(obj);
}
@@ -167,8 +160,7 @@
mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead,
- sizeof(int8_t));
+ ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
return field->GetBoolean(obj);
}
@@ -187,8 +179,7 @@
mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead,
- sizeof(int16_t));
+ ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int16_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
return field->GetShort(obj);
}
@@ -208,8 +199,7 @@
mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead,
- sizeof(int16_t));
+ ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int16_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
return field->GetChar(obj);
}
@@ -229,8 +219,7 @@
mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead,
- sizeof(int32_t));
+ ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int32_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
return field->Get32(obj);
}
@@ -250,8 +239,7 @@
mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead,
- sizeof(int64_t));
+ ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int64_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
return field->Get64(obj);
}
@@ -272,13 +260,13 @@
Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectRead,
- sizeof(mirror::HeapReference<mirror::Object>));
+ ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectRead,
+ sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != nullptr && obj != nullptr)) {
return field->GetObj(obj);
}
- field = FindFieldFromCode<InstanceObjectRead, true>(field_idx, referrer, self,
- sizeof(mirror::HeapReference<mirror::Object>));
+ field = FindFieldFromCode<InstanceObjectRead, true>(
+ field_idx, referrer, self, sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != nullptr)) {
if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerExceptionForFieldAccess(field, true);
@@ -293,8 +281,7 @@
mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite,
- sizeof(int8_t));
+ ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int8_t));
if (LIKELY(field != nullptr)) {
Primitive::Type type = field->GetTypeAsPrimitiveType();
// Compiled code can't use transactional mode.
@@ -325,8 +312,7 @@
mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite,
- sizeof(int16_t));
+ ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int16_t));
if (LIKELY(field != nullptr)) {
Primitive::Type type = field->GetTypeAsPrimitiveType();
// Compiled code can't use transactional mode.
@@ -357,8 +343,7 @@
mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite,
- sizeof(int32_t));
+ ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int32_t));
if (LIKELY(field != nullptr)) {
// Compiled code can't use transactional mode.
field->Set32<false>(field->GetDeclaringClass(), new_value);
@@ -377,8 +362,7 @@
uint64_t new_value, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite,
- sizeof(int64_t));
+ ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t));
if (LIKELY(field != nullptr)) {
// Compiled code can't use transactional mode.
field->Set64<false>(field->GetDeclaringClass(), new_value);
@@ -397,8 +381,8 @@
mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectWrite,
- sizeof(mirror::HeapReference<mirror::Object>));
+ ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectWrite,
+ sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != nullptr)) {
if (LIKELY(!field->IsPrimitiveType())) {
// Compiled code can't use transactional mode.
@@ -420,8 +404,7 @@
mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
- sizeof(int8_t));
+ ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int8_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
Primitive::Type type = field->GetTypeAsPrimitiveType();
// Compiled code can't use transactional mode.
@@ -460,8 +443,7 @@
mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
- sizeof(int16_t));
+ ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int16_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
Primitive::Type type = field->GetTypeAsPrimitiveType();
// Compiled code can't use transactional mode.
@@ -501,8 +483,7 @@
mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
- sizeof(int32_t));
+ ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int32_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
// Compiled code can't use transactional mode.
field->Set32<false>(obj, new_value);
@@ -530,8 +511,7 @@
mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
- sizeof(int64_t));
+ ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int64_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
// Compiled code can't use transactional mode.
field->Set64<false>(obj, new_value);
@@ -556,8 +536,8 @@
mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite,
- sizeof(mirror::HeapReference<mirror::Object>));
+ ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite,
+ sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != nullptr && obj != nullptr)) {
// Compiled code can't use transactional mode.
field->SetObj<false>(obj, new_value);
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index c1276b5..e478d2a 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -68,7 +68,6 @@
PopLocalReferences(saved_local_ref_cookie, self);
}
-
extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject locked,
Thread* self) {
GoToRunnable(self);
@@ -76,38 +75,34 @@
PopLocalReferences(saved_local_ref_cookie, self);
}
-extern mirror::Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie,
- Thread* self) {
- GoToRunnable(self);
- mirror::Object* o = self->DecodeJObject(result); // Must decode before pop.
+// Common result handling for EndWithReference.
+static mirror::Object* JniMethodEndWithReferenceHandleResult(jobject result,
+ uint32_t saved_local_ref_cookie,
+ Thread* self)
+ NO_THREAD_SAFETY_ANALYSIS {
+ // Must decode before pop. The 'result' may not be valid in case of an exception, though.
+ mirror::Object* o = self->IsExceptionPending() ? nullptr : self->DecodeJObject(result);
PopLocalReferences(saved_local_ref_cookie, self);
// Process result.
if (UNLIKELY(self->GetJniEnv()->check_jni)) {
- if (self->IsExceptionPending()) {
- return NULL;
- }
CheckReferenceResult(o, self);
}
VerifyObject(o);
return o;
}
+extern mirror::Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie,
+ Thread* self) {
+ GoToRunnable(self);
+ return JniMethodEndWithReferenceHandleResult(result, saved_local_ref_cookie, self);
+}
+
extern mirror::Object* JniMethodEndWithReferenceSynchronized(jobject result,
uint32_t saved_local_ref_cookie,
jobject locked, Thread* self) {
GoToRunnable(self);
- UnlockJniSynchronizedMethod(locked, self); // Must decode before pop.
- mirror::Object* o = self->DecodeJObject(result);
- PopLocalReferences(saved_local_ref_cookie, self);
- // Process result.
- if (UNLIKELY(self->GetJniEnv()->check_jni)) {
- if (self->IsExceptionPending()) {
- return NULL;
- }
- CheckReferenceResult(o, self);
- }
- VerifyObject(o);
- return o;
+ UnlockJniSynchronizedMethod(locked, self);
+ return JniMethodEndWithReferenceHandleResult(result, saved_local_ref_cookie, self);
}
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 8351e22..2e813c8 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -88,7 +88,7 @@
// | LR |
// | X29 |
// | : |
- // | X20 |
+ // | X19 |
// | X7 |
// | : |
// | X1 |
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
index 20984fd..13fcdb3 100644
--- a/runtime/gc/accounting/bitmap.cc
+++ b/runtime/gc/accounting/bitmap.cc
@@ -35,6 +35,10 @@
CHECK_NE(bitmap_size, 0U);
}
+Bitmap::~Bitmap() {
+ // Destroys MemMap via std::unique_ptr<>.
+}
+
MemMap* Bitmap::AllocateMemMap(const std::string& name, size_t num_bits) {
const size_t bitmap_size = RoundUp(
RoundUp(num_bits, kBitsPerBitmapWord) / kBitsPerBitmapWord * sizeof(uintptr_t), kPageSize);
diff --git a/runtime/gc/accounting/bitmap.h b/runtime/gc/accounting/bitmap.h
index cf2c293..b294d49 100644
--- a/runtime/gc/accounting/bitmap.h
+++ b/runtime/gc/accounting/bitmap.h
@@ -103,6 +103,7 @@
static constexpr size_t kBitsPerBitmapWord = sizeof(uintptr_t) * kBitsPerByte;
Bitmap(MemMap* mem_map, size_t bitmap_size);
+ ~Bitmap();
// Allocate the mem-map for a bitmap based on how many bits are required.
static MemMap* AllocateMemMap(const std::string& name, size_t num_bits);
diff --git a/runtime/gc/accounting/card_table-inl.h b/runtime/gc/accounting/card_table-inl.h
index 83ad33e..b936d93 100644
--- a/runtime/gc/accounting/card_table-inl.h
+++ b/runtime/gc/accounting/card_table-inl.h
@@ -20,6 +20,7 @@
#include "atomic.h"
#include "base/logging.h"
#include "card_table.h"
+#include "mem_map.h"
#include "space_bitmap.h"
#include "utils.h"
@@ -223,6 +224,12 @@
return card_addr;
}
+inline bool CardTable::IsValidCard(const uint8_t* card_addr) const {
+ uint8_t* begin = mem_map_->Begin() + offset_;
+ uint8_t* end = mem_map_->End();
+ return card_addr >= begin && card_addr < end;
+}
+
inline void CardTable::CheckCardValid(uint8_t* card) const {
DCHECK(IsValidCard(card))
<< " card_addr: " << reinterpret_cast<const void*>(card)
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index ad1f192..7879632 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -21,6 +21,7 @@
#include "gc/heap.h"
#include "gc/space/space.h"
#include "heap_bitmap.h"
+#include "mem_map.h"
#include "runtime.h"
#include "utils.h"
@@ -90,6 +91,10 @@
: mem_map_(mem_map), biased_begin_(biased_begin), offset_(offset) {
}
+CardTable::~CardTable() {
+ // Destroys MemMap via std::unique_ptr<>.
+}
+
void CardTable::ClearSpaceCards(space::ContinuousSpace* space) {
// TODO: clear just the range of the table that has been modified
uint8_t* card_start = CardFromAddr(space->Begin());
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index 3ea7651..896cce5 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -21,10 +21,11 @@
#include "base/mutex.h"
#include "globals.h"
-#include "mem_map.h"
namespace art {
+class MemMap;
+
namespace mirror {
class Object;
} // namespace mirror
@@ -52,6 +53,7 @@
static constexpr uint8_t kCardDirty = 0x70;
static CardTable* Create(const uint8_t* heap_begin, size_t heap_capacity);
+ ~CardTable();
// Set the card associated with the given address to GC_CARD_DIRTY.
ALWAYS_INLINE void MarkCard(const void *addr) {
@@ -130,11 +132,7 @@
CardTable(MemMap* begin, uint8_t* biased_begin, size_t offset);
// Returns true iff the card table address is within the bounds of the card table.
- bool IsValidCard(const uint8_t* card_addr) const {
- uint8_t* begin = mem_map_->Begin() + offset_;
- uint8_t* end = mem_map_->End();
- return card_addr >= begin && card_addr < end;
- }
+ bool IsValidCard(const uint8_t* card_addr) const ALWAYS_INLINE;
void CheckCardValid(uint8_t* card) const ALWAYS_INLINE;
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index a3fac58..cd3f910 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -28,7 +28,6 @@
#include "gc/heap.h"
#include "gc/space/space.h"
#include "gc/space/image_space.h"
-#include "mirror/art_field-inl.h"
#include "mirror/object-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object_array-inl.h"
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
index 94bb3f5..043b558 100644
--- a/runtime/gc/accounting/mod_union_table_test.cc
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -16,6 +16,7 @@
#include "mod_union_table-inl.h"
+#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "gc/space/space-inl.h"
#include "mirror/array-inl.h"
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index b16a146..eeb385e 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -26,7 +26,6 @@
#include "gc/collector/semi_space.h"
#include "gc/heap.h"
#include "gc/space/space.h"
-#include "mirror/art_field-inl.h"
#include "mirror/object-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object_array-inl.h"
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index ad8d988..2da8325 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -16,12 +16,12 @@
#include "space_bitmap-inl.h"
+#include "art_field-inl.h"
#include "base/stringprintf.h"
#include "dex_file-inl.h"
#include "mem_map.h"
#include "mirror/object-inl.h"
#include "mirror/class.h"
-#include "mirror/art_field.h"
#include "mirror/object_array.h"
namespace art {
@@ -190,15 +190,13 @@
WalkInstanceFields(visited, callback, obj, super, arg);
}
// Walk instance fields
- mirror::ObjectArray<mirror::ArtField>* fields = klass->GetIFields();
- if (fields != NULL) {
- for (int32_t i = 0; i < fields->GetLength(); i++) {
- mirror::ArtField* field = fields->Get(i);
- if (!field->IsPrimitiveType()) {
- mirror::Object* value = field->GetObj(obj);
- if (value != NULL) {
- WalkFieldsInOrder(visited, callback, value, arg);
- }
+ auto* fields = klass->GetIFields();
+ for (size_t i = 0, count = klass->NumInstanceFields(); i < count; ++i) {
+ ArtField* field = &fields[i];
+ if (!field->IsPrimitiveType()) {
+ mirror::Object* value = field->GetObj(obj);
+ if (value != nullptr) {
+ WalkFieldsInOrder(visited, callback, value, arg);
}
}
}
@@ -219,15 +217,13 @@
WalkInstanceFields(visited, callback, obj, klass, arg);
// Walk static fields of a Class
if (obj->IsClass()) {
- mirror::ObjectArray<mirror::ArtField>* fields = klass->GetSFields();
- if (fields != NULL) {
- for (int32_t i = 0; i < fields->GetLength(); i++) {
- mirror::ArtField* field = fields->Get(i);
- if (!field->IsPrimitiveType()) {
- mirror::Object* value = field->GetObj(NULL);
- if (value != NULL) {
- WalkFieldsInOrder(visited, callback, value, arg);
- }
+ auto* sfields = klass->GetSFields();
+ for (size_t i = 0, count = klass->NumStaticFields(); i < count; ++i) {
+ ArtField* field = &sfields[i];
+ if (!field->IsPrimitiveType()) {
+ mirror::Object* value = field->GetObj(nullptr);
+ if (value != nullptr) {
+ WalkFieldsInOrder(visited, callback, value, arg);
}
}
}
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index f64a4ff..515f124 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -18,6 +18,7 @@
#include "base/mutex-inl.h"
#include "gc/space/valgrind_settings.h"
+#include "mem_map.h"
#include "mirror/class-inl.h"
#include "mirror/object.h"
#include "mirror/object-inl.h"
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index d1e7ad9..a54edcc 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -30,11 +30,13 @@
#include "base/mutex.h"
#include "base/logging.h"
#include "globals.h"
-#include "mem_map.h"
#include "thread.h"
#include "utils.h"
namespace art {
+
+class MemMap;
+
namespace gc {
namespace allocator {
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 6a68880..eabb1c2 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -16,12 +16,12 @@
#include "concurrent_copying.h"
+#include "art_field-inl.h"
#include "gc/accounting/heap_bitmap-inl.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/space/image_space.h"
#include "gc/space/space.h"
#include "intern_table.h"
-#include "mirror/art_field-inl.h"
#include "mirror/object-inl.h"
#include "scoped_thread_state_change.h"
#include "thread-inl.h"
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 8902df8..3c247cd 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -35,8 +35,6 @@
#include "jni_internal.h"
#include "mark_sweep-inl.h"
#include "monitor.h"
-#include "mirror/art_field.h"
-#include "mirror/art_field-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache.h"
diff --git a/runtime/gc/collector/mark_sweep-inl.h b/runtime/gc/collector/mark_sweep-inl.h
index 104ed36..4e3845e 100644
--- a/runtime/gc/collector/mark_sweep-inl.h
+++ b/runtime/gc/collector/mark_sweep-inl.h
@@ -17,10 +17,9 @@
#ifndef ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_INL_H_
#define ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_INL_H_
-#include "gc/collector/mark_sweep.h"
+#include "mark_sweep.h"
#include "gc/heap.h"
-#include "mirror/art_field.h"
#include "mirror/class-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/reference.h"
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 79d1034..bb8d876 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -39,7 +39,6 @@
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
#include "mark_sweep-inl.h"
-#include "mirror/art_field-inl.h"
#include "mirror/object-inl.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
@@ -491,29 +490,21 @@
class VerifyRootVisitor : public SingleRootVisitor {
public:
- explicit VerifyRootVisitor(MarkSweep* collector) : collector_(collector) { }
-
void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
- collector_->VerifyRoot(root, info);
- }
-
- private:
- MarkSweep* const collector_;
-};
-
-void MarkSweep::VerifyRoot(const Object* root, const RootInfo& root_info) {
- // See if the root is on any space bitmap.
- if (heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
- space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
- if (large_object_space != nullptr && !large_object_space->Contains(root)) {
- LOG(ERROR) << "Found invalid root: " << root << " " << root_info;
+ // See if the root is on any space bitmap.
+ auto* heap = Runtime::Current()->GetHeap();
+ if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
+ space::LargeObjectSpace* large_object_space = heap->GetLargeObjectsSpace();
+ if (large_object_space != nullptr && !large_object_space->Contains(root)) {
+ LOG(ERROR) << "Found invalid root: " << root << " " << info;
+ }
}
}
-}
+};
void MarkSweep::VerifyRoots() {
- VerifyRootVisitor visitor(this);
+ VerifyRootVisitor visitor;
Runtime::Current()->GetThreadList()->VisitRoots(&visitor);
}
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 31cea17..fad3403 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -248,9 +248,6 @@
// whether or not we care about pauses.
size_t GetThreadCount(bool paused) const;
- void VerifyRoot(const mirror::Object* root, const RootInfo& root_info)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
-
// Push a single reference on a mark stack.
void PushOnMarkStack(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index b9153c1..beaf067 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -23,6 +23,7 @@
#include <memory>
#include <vector>
+#include "art_field-inl.h"
#include "base/allocator.h"
#include "base/dumpable.h"
#include "base/histogram-inl.h"
@@ -58,7 +59,6 @@
#include "heap-inl.h"
#include "image.h"
#include "intern_table.h"
-#include "mirror/art_field-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object.h"
#include "mirror/object-inl.h"
@@ -195,7 +195,17 @@
last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
pending_collector_transition_(nullptr),
pending_heap_trim_(nullptr),
- use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom) {
+ use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom),
+ running_collection_is_blocking_(false),
+ blocking_gc_count_(0U),
+ blocking_gc_time_(0U),
+ last_update_time_gc_count_rate_histograms_( // Round down by the window duration.
+ (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration),
+ gc_count_last_window_(0U),
+ blocking_gc_count_last_window_(0U),
+ gc_count_rate_histogram_("gc count rate histogram", 1U, kGcCountRateMaxBucketCount),
+ blocking_gc_count_rate_histogram_("blocking gc count rate histogram", 1U,
+ kGcCountRateMaxBucketCount) {
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Heap() entering";
}
@@ -233,7 +243,7 @@
CHECK_GT(oat_file_end_addr, image_space->End());
requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
} else {
- LOG(WARNING) << "Could not create image space with image file '" << image_file_name << "'. "
+ LOG(ERROR) << "Could not create image space with image file '" << image_file_name << "'. "
<< "Attempting to fall back to imageless running. Error was: " << error_msg;
}
}
@@ -482,7 +492,7 @@
non_moving_space_->GetMemMap());
if (!no_gap) {
MemMap::DumpMaps(LOG(ERROR));
- LOG(FATAL) << "There's a gap between the image space and the main space";
+ LOG(FATAL) << "There's a gap between the image space and the non-moving space";
}
}
if (running_on_valgrind_) {
@@ -926,7 +936,6 @@
total_duration += collector->GetCumulativeTimings().GetTotalNs();
total_paused_time += collector->GetTotalPausedTimeNs();
collector->DumpPerformanceInfo(os);
- collector->ResetMeasurements();
}
uint64_t allocation_time =
static_cast<uint64_t>(total_allocation_time_.LoadRelaxed()) * kTimeAdjust;
@@ -940,8 +949,8 @@
}
uint64_t total_objects_allocated = GetObjectsAllocatedEver();
os << "Total number of allocations " << total_objects_allocated << "\n";
- uint64_t total_bytes_allocated = GetBytesAllocatedEver();
- os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n";
+ os << "Total bytes allocated " << PrettySize(GetBytesAllocatedEver()) << "\n";
+ os << "Total bytes freed " << PrettySize(GetBytesFreedEver()) << "\n";
os << "Free memory " << PrettySize(GetFreeMemory()) << "\n";
os << "Free memory until GC " << PrettySize(GetFreeMemoryUntilGC()) << "\n";
os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n";
@@ -956,10 +965,68 @@
os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n";
}
os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
- os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_);
+ os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
+ os << "Total GC count: " << GetGcCount() << "\n";
+ os << "Total GC time: " << PrettyDuration(GetGcTime()) << "\n";
+ os << "Total blocking GC count: " << GetBlockingGcCount() << "\n";
+ os << "Total blocking GC time: " << PrettyDuration(GetBlockingGcTime()) << "\n";
+
+ {
+ MutexLock mu(Thread::Current(), *gc_complete_lock_);
+ if (gc_count_rate_histogram_.SampleSize() > 0U) {
+ os << "Histogram of GC count per " << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
+ gc_count_rate_histogram_.DumpBins(os);
+ os << "\n";
+ }
+ if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
+ os << "Histogram of blocking GC count per "
+ << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
+ blocking_gc_count_rate_histogram_.DumpBins(os);
+ os << "\n";
+ }
+ }
+
BaseMutex::DumpAll(os);
}
+uint64_t Heap::GetGcCount() const {
+ uint64_t gc_count = 0U;
+ for (auto& collector : garbage_collectors_) {
+ gc_count += collector->GetCumulativeTimings().GetIterations();
+ }
+ return gc_count;
+}
+
+uint64_t Heap::GetGcTime() const {
+ uint64_t gc_time = 0U;
+ for (auto& collector : garbage_collectors_) {
+ gc_time += collector->GetCumulativeTimings().GetTotalNs();
+ }
+ return gc_time;
+}
+
+uint64_t Heap::GetBlockingGcCount() const {
+ return blocking_gc_count_;
+}
+
+uint64_t Heap::GetBlockingGcTime() const {
+ return blocking_gc_time_;
+}
+
+void Heap::DumpGcCountRateHistogram(std::ostream& os) const {
+ MutexLock mu(Thread::Current(), *gc_complete_lock_);
+ if (gc_count_rate_histogram_.SampleSize() > 0U) {
+ gc_count_rate_histogram_.DumpBins(os);
+ }
+}
+
+void Heap::DumpBlockingGcCountRateHistogram(std::ostream& os) const {
+ MutexLock mu(Thread::Current(), *gc_complete_lock_);
+ if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
+ blocking_gc_count_rate_histogram_.DumpBins(os);
+ }
+}
+
Heap::~Heap() {
VLOG(heap) << "Starting ~Heap()";
STLDeleteElements(&garbage_collectors_);
@@ -2274,7 +2341,6 @@
}
collector_type_running_ = collector_type_;
}
-
if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
++runtime->GetStats()->gc_for_alloc_count;
++self->GetStats()->gc_for_alloc_count;
@@ -2389,11 +2455,55 @@
collector_type_running_ = kCollectorTypeNone;
if (gc_type != collector::kGcTypeNone) {
last_gc_type_ = gc_type;
+
+ // Update stats.
+ ++gc_count_last_window_;
+ if (running_collection_is_blocking_) {
+ // If the currently running collection was a blocking one,
+ // increment the counters and reset the flag.
+ ++blocking_gc_count_;
+ blocking_gc_time_ += GetCurrentGcIteration()->GetDurationNs();
+ ++blocking_gc_count_last_window_;
+ }
+ // Update the gc count rate histograms if due.
+ UpdateGcCountRateHistograms();
}
+ // Reset.
+ running_collection_is_blocking_ = false;
// Wake anyone who may have been waiting for the GC to complete.
gc_complete_cond_->Broadcast(self);
}
+void Heap::UpdateGcCountRateHistograms() {
+ // Invariant: if the time since the last update includes more than
+ // one windows, all the GC runs (if > 0) must have happened in first
+ // window because otherwise the update must have already taken place
+ // at an earlier GC run. So, we report the non-first windows with
+ // zero counts to the histograms.
+ DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
+ uint64_t now = NanoTime();
+ DCHECK_GE(now, last_update_time_gc_count_rate_histograms_);
+ uint64_t time_since_last_update = now - last_update_time_gc_count_rate_histograms_;
+ uint64_t num_of_windows = time_since_last_update / kGcCountRateHistogramWindowDuration;
+ if (time_since_last_update >= kGcCountRateHistogramWindowDuration) {
+ // Record the first window.
+ gc_count_rate_histogram_.AddValue(gc_count_last_window_ - 1); // Exclude the current run.
+ blocking_gc_count_rate_histogram_.AddValue(running_collection_is_blocking_ ?
+ blocking_gc_count_last_window_ - 1 : blocking_gc_count_last_window_);
+ // Record the other windows (with zero counts).
+ for (uint64_t i = 0; i < num_of_windows - 1; ++i) {
+ gc_count_rate_histogram_.AddValue(0);
+ blocking_gc_count_rate_histogram_.AddValue(0);
+ }
+ // Update the last update time and reset the counters.
+ last_update_time_gc_count_rate_histograms_ =
+ (now / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
+ gc_count_last_window_ = 1; // Include the current run.
+ blocking_gc_count_last_window_ = running_collection_is_blocking_ ? 1 : 0;
+ }
+ DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
+}
+
class RootMatchesObjectVisitor : public SingleRootVisitor {
public:
explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
@@ -2708,12 +2818,12 @@
// Print which field of the object is dead.
if (!obj->IsObjectArray()) {
mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
- CHECK(klass != NULL);
- mirror::ObjectArray<mirror::ArtField>* fields = is_static ? klass->GetSFields()
- : klass->GetIFields();
- CHECK(fields != NULL);
- for (int32_t i = 0; i < fields->GetLength(); ++i) {
- mirror::ArtField* cur = fields->Get(i);
+ CHECK(klass != nullptr);
+ auto* fields = is_static ? klass->GetSFields() : klass->GetIFields();
+ auto num_fields = is_static ? klass->NumStaticFields() : klass->NumInstanceFields();
+ CHECK_EQ(fields == nullptr, num_fields == 0u);
+ for (size_t i = 0; i < num_fields; ++i) {
+ ArtField* cur = &fields[i];
if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
<< PrettyField(cur);
@@ -3003,6 +3113,14 @@
collector::GcType last_gc_type = collector::kGcTypeNone;
uint64_t wait_start = NanoTime();
while (collector_type_running_ != kCollectorTypeNone) {
+ if (self != task_processor_->GetRunningThread()) {
+ // The current thread is about to wait for a currently running
+ // collection to finish. If the waiting thread is not the heap
+ // task daemon thread, the currently running collection is
+ // considered as a blocking GC.
+ running_collection_is_blocking_ = true;
+ VLOG(gc) << "Waiting for a blocking GC " << cause;
+ }
ATRACE_BEGIN("GC: Wait For Completion");
// We must wait, change thread state then sleep on gc_complete_cond_;
gc_complete_cond_->Wait(self);
@@ -3015,6 +3133,13 @@
LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time)
<< " for cause " << cause;
}
+ if (self != task_processor_->GetRunningThread()) {
+ // The current thread is about to run a collection. If the thread
+ // is not the heap task daemon thread, it's considered as a
+ // blocking GC (i.e., blocking itself).
+ running_collection_is_blocking_ = true;
+ VLOG(gc) << "Starting a blocking GC " << cause;
+ }
return last_gc_type;
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 603cbfd..2f62798 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -672,6 +672,14 @@
min_interval_homogeneous_space_compaction_by_oom_ = interval;
}
+ // Helpers for android.os.Debug.getRuntimeStat().
+ uint64_t GetGcCount() const;
+ uint64_t GetGcTime() const;
+ uint64_t GetBlockingGcCount() const;
+ uint64_t GetBlockingGcTime() const;
+ void DumpGcCountRateHistogram(std::ostream& os) const;
+ void DumpBlockingGcCountRateHistogram(std::ostream& os) const;
+
private:
class ConcurrentGCTask;
class CollectorTransitionTask;
@@ -873,6 +881,8 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+ void UpdateGcCountRateHistograms() EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_);
+
// All-known continuous spaces, where objects lie within fixed bounds.
std::vector<space::ContinuousSpace*> continuous_spaces_;
@@ -1156,6 +1166,28 @@
// Whether or not we use homogeneous space compaction to avoid OOM errors.
bool use_homogeneous_space_compaction_for_oom_;
+ // True if the currently running collection has made some thread wait.
+ bool running_collection_is_blocking_ GUARDED_BY(gc_complete_lock_);
+ // The number of blocking GC runs.
+ uint64_t blocking_gc_count_;
+ // The total duration of blocking GC runs.
+ uint64_t blocking_gc_time_;
+ // The duration of the window for the GC count rate histograms.
+ static constexpr uint64_t kGcCountRateHistogramWindowDuration = MsToNs(10 * 1000); // 10s.
+ // The last time when the GC count rate histograms were updated.
+ // This is rounded by kGcCountRateHistogramWindowDuration (a multiple of 10s).
+ uint64_t last_update_time_gc_count_rate_histograms_;
+ // The running count of GC runs in the last window.
+ uint64_t gc_count_last_window_;
+ // The running count of blocking GC runs in the last window.
+ uint64_t blocking_gc_count_last_window_;
+ // The maximum number of buckets in the GC count rate histograms.
+ static constexpr size_t kGcCountRateMaxBucketCount = 200;
+ // The histogram of the number of GC invocations per window duration.
+ Histogram<uint64_t> gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
+ // The histogram of the number of blocking GC invocations per window duration.
+ Histogram<uint64_t> blocking_gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
+
friend class CollectorTransitionTask;
friend class collector::GarbageCollector;
friend class collector::MarkCompact;
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index 14d78d8..a3cefd9 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/accounting/space_bitmap-inl.h"
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 1fb3252..e28e8d7 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -43,8 +43,9 @@
Atomic<uint32_t> ImageSpace::bitmap_index_(0);
ImageSpace::ImageSpace(const std::string& image_filename, const char* image_location,
- MemMap* mem_map, accounting::ContinuousSpaceBitmap* live_bitmap)
- : MemMapSpace(image_filename, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
+ MemMap* mem_map, accounting::ContinuousSpaceBitmap* live_bitmap,
+ uint8_t* end)
+ : MemMapSpace(image_filename, mem_map, mem_map->Begin(), end, end,
kGcRetentionPolicyNeverCollect),
image_location_(image_location) {
DCHECK(live_bitmap != nullptr);
@@ -642,10 +643,10 @@
void ImageSpace::VerifyImageAllocations() {
uint8_t* current = Begin() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
while (current < End()) {
- DCHECK_ALIGNED(current, kObjectAlignment);
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(current);
- CHECK(live_bitmap_->Test(obj));
+ CHECK_ALIGNED(current, kObjectAlignment);
+ auto* obj = reinterpret_cast<mirror::Object*>(current);
CHECK(obj->GetClass() != nullptr) << "Image object at address " << obj << " has null class";
+ CHECK(live_bitmap_->Test(obj)) << PrettyTypeOf(obj);
if (kUseBakerOrBrooksReadBarrier) {
obj->AssertReadBarrierPointer();
}
@@ -675,7 +676,6 @@
*error_msg = StringPrintf("Invalid image header in '%s'", image_filename);
return nullptr;
}
-
// Check that the file is large enough.
uint64_t image_file_size = static_cast<uint64_t>(file->GetLength());
if (image_header.GetImageSize() > image_file_size) {
@@ -683,23 +683,18 @@
image_file_size, image_header.GetImageSize());
return nullptr;
}
- if (image_header.GetBitmapOffset() + image_header.GetImageBitmapSize() != image_file_size) {
- *error_msg = StringPrintf("Image file too small for image bitmap: %" PRIu64 " vs. %zu.",
- image_file_size,
- image_header.GetBitmapOffset() + image_header.GetImageBitmapSize());
+ auto end_of_bitmap = image_header.GetImageBitmapOffset() + image_header.GetImageBitmapSize();
+ if (end_of_bitmap != image_file_size) {
+ *error_msg = StringPrintf(
+ "Image file size does not equal end of bitmap: size=%" PRIu64 " vs. %zu.", image_file_size,
+ end_of_bitmap);
return nullptr;
}
// Note: The image header is part of the image due to mmap page alignment required of offset.
- std::unique_ptr<MemMap> map(MemMap::MapFileAtAddress(image_header.GetImageBegin(),
- image_header.GetImageSize(),
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE,
- file->Fd(),
- 0,
- false,
- image_filename,
- error_msg));
+ std::unique_ptr<MemMap> map(MemMap::MapFileAtAddress(
+ image_header.GetImageBegin(), image_header.GetImageSize() + image_header.GetArtFieldsSize(),
+ PROT_READ | PROT_WRITE, MAP_PRIVATE, file->Fd(), 0, false, image_filename, error_msg));
if (map.get() == NULL) {
DCHECK(!error_msg->empty());
return nullptr;
@@ -710,7 +705,7 @@
std::unique_ptr<MemMap> image_map(
MemMap::MapFileAtAddress(nullptr, image_header.GetImageBitmapSize(),
PROT_READ, MAP_PRIVATE,
- file->Fd(), image_header.GetBitmapOffset(),
+ file->Fd(), image_header.GetImageBitmapOffset(),
false,
image_filename,
error_msg));
@@ -730,8 +725,9 @@
return nullptr;
}
+ uint8_t* const image_end = map->Begin() + image_header.GetImageSize();
std::unique_ptr<ImageSpace> space(new ImageSpace(image_filename, image_location,
- map.release(), bitmap.release()));
+ map.release(), bitmap.release(), image_end));
// VerifyImageAllocations() will be called later in Runtime::Init()
// as some class roots like ArtMethod::java_lang_reflect_ArtMethod_
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index d7f8057..9ae2af4 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -145,7 +145,7 @@
std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap_;
ImageSpace(const std::string& name, const char* image_location,
- MemMap* mem_map, accounting::ContinuousSpaceBitmap* live_bitmap);
+ MemMap* mem_map, accounting::ContinuousSpaceBitmap* live_bitmap, uint8_t* end);
// The OatFile associated with the image during early startup to
// reserve space contiguous to the image. It is later released to
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 5c8e4b9..a4a9d80 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -37,6 +37,15 @@
explicit ValgrindLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
}
+ ~ValgrindLargeObjectMapSpace() OVERRIDE {
+ // Keep valgrind happy if there is any large objects such as dex cache arrays which aren't
+ // freed since they are held live by the class linker.
+ MutexLock mu(Thread::Current(), lock_);
+ for (auto& m : mem_maps_) {
+ delete m.second;
+ }
+ }
+
virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
OVERRIDE {
diff --git a/runtime/gc/task_processor.cc b/runtime/gc/task_processor.cc
index 2ca4b3f..ef34c68 100644
--- a/runtime/gc/task_processor.cc
+++ b/runtime/gc/task_processor.cc
@@ -22,7 +22,8 @@
namespace gc {
TaskProcessor::TaskProcessor()
- : lock_(new Mutex("Task processor lock", kReferenceProcessorLock)), is_running_(false) {
+ : lock_(new Mutex("Task processor lock", kReferenceProcessorLock)), is_running_(false),
+ running_thread_(nullptr) {
// Piggyback off the reference processor lock level.
cond_.reset(new ConditionVariable("Task processor condition", *lock_));
}
@@ -96,15 +97,22 @@
return is_running_;
}
+Thread* TaskProcessor::GetRunningThread() const {
+ MutexLock mu(Thread::Current(), *lock_);
+ return running_thread_;
+}
+
void TaskProcessor::Stop(Thread* self) {
MutexLock mu(self, *lock_);
is_running_ = false;
+ running_thread_ = nullptr;
cond_->Broadcast(self);
}
void TaskProcessor::Start(Thread* self) {
MutexLock mu(self, *lock_);
is_running_ = true;
+ running_thread_ = self;
}
void TaskProcessor::RunAllTasks(Thread* self) {
diff --git a/runtime/gc/task_processor.h b/runtime/gc/task_processor.h
index 765f035..67e3a54 100644
--- a/runtime/gc/task_processor.h
+++ b/runtime/gc/task_processor.h
@@ -63,6 +63,7 @@
bool IsRunning() const LOCKS_EXCLUDED(lock_);
void UpdateTargetRunTime(Thread* self, HeapTask* target_time, uint64_t new_target_time)
LOCKS_EXCLUDED(lock_);
+ Thread* GetRunningThread() const LOCKS_EXCLUDED(lock_);
private:
class CompareByTargetRunTime {
@@ -76,6 +77,7 @@
bool is_running_ GUARDED_BY(lock_);
std::unique_ptr<ConditionVariable> cond_ GUARDED_BY(lock_);
std::multiset<HeapTask*, CompareByTargetRunTime> tasks_ GUARDED_BY(lock_);
+ Thread* running_thread_ GUARDED_BY(lock_);
};
} // namespace gc
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
index 0d3c93b..b67e9c2 100644
--- a/runtime/gc_root.h
+++ b/runtime/gc_root.h
@@ -50,7 +50,7 @@
};
std::ostream& operator<<(std::ostream& os, const RootType& root_type);
-// Only used by hprof. tid and root_type are only used by hprof.
+// Only used by hprof. thread_id_ and type_ are only used by hprof.
class RootInfo {
public:
// Thread id 0 is for non thread roots.
@@ -85,12 +85,13 @@
public:
virtual ~RootVisitor() { }
- // Single root versions, not overridable.
+ // Single root version, not overridable.
ALWAYS_INLINE void VisitRoot(mirror::Object** roots, const RootInfo& info)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
VisitRoots(&roots, 1, info);
}
+ // Single root version, not overridable.
ALWAYS_INLINE void VisitRootIfNonNull(mirror::Object** roots, const RootInfo& info)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (*roots != nullptr) {
@@ -161,6 +162,9 @@
ALWAYS_INLINE GcRoot(MirrorType* ref = nullptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
+ // Root visitors take pointers to root_ and place the min CompressedReference** arrays. We use a
+ // CompressedReference<mirror::Object> here since it violates strict aliasing requirements to
+ // cast CompressedReference<MirrorType>* to CompressedReference<mirror::Object>*.
mutable mirror::CompressedReference<mirror::Object> root_;
template <size_t kBufferSize> friend class BufferedRootVisitor;
diff --git a/runtime/globals.h b/runtime/globals.h
index ac8751c..4d7fd2e 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -71,8 +71,6 @@
static constexpr bool kMoveFieldArrays = !kMarkCompactSupport;
// True if we allow moving classes.
static constexpr bool kMovingClasses = !kMarkCompactSupport;
-// True if we allow moving fields.
-static constexpr bool kMovingFields = false;
// True if we allow moving methods.
static constexpr bool kMovingMethods = false;
diff --git a/runtime/handle.h b/runtime/handle.h
index 3ebb2d5..d94d875 100644
--- a/runtime/handle.h
+++ b/runtime/handle.h
@@ -70,8 +70,8 @@
return reinterpret_cast<jobject>(reference_);
}
- StackReference<mirror::Object>* GetReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- ALWAYS_INLINE {
+ ALWAYS_INLINE StackReference<mirror::Object>* GetReference()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return reference_;
}
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index cdb3e2a..23af25d 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -37,6 +37,7 @@
#include <set>
+#include "art_field-inl.h"
#include "base/logging.h"
#include "base/stringprintf.h"
#include "base/unix_file/fd_file.h"
@@ -51,7 +52,6 @@
#include "globals.h"
#include "jdwp/jdwp.h"
#include "jdwp/jdwp_priv.h"
-#include "mirror/art_field-inl.h"
#include "mirror/class.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
@@ -999,7 +999,7 @@
__ AddClassStaticsId(klass);
for (size_t i = 0; i < sFieldCount; ++i) {
- mirror::ArtField* f = klass->GetStaticField(i);
+ ArtField* f = klass->GetStaticField(i);
size_t size;
HprofBasicType t = SignatureToBasicTypeAndSize(f->GetTypeDescriptor(), &size);
@@ -1038,7 +1038,7 @@
int iFieldCount = klass->IsObjectClass() ? 0 : klass->NumInstanceFields();
__ AddU2((uint16_t)iFieldCount);
for (int i = 0; i < iFieldCount; ++i) {
- mirror::ArtField* f = klass->GetInstanceField(i);
+ ArtField* f = klass->GetInstanceField(i);
__ AddStringId(LookupStringId(f->GetName()));
HprofBasicType t = SignatureToBasicTypeAndSize(f->GetTypeDescriptor(), nullptr);
__ AddU1(t);
@@ -1102,7 +1102,7 @@
while (!klass->IsObjectClass()) {
int ifieldCount = klass->NumInstanceFields();
for (int i = 0; i < ifieldCount; ++i) {
- mirror::ArtField* f = klass->GetInstanceField(i);
+ ArtField* f = klass->GetInstanceField(i);
size_t size;
auto t = SignatureToBasicTypeAndSize(f->GetTypeDescriptor(), &size);
switch (t) {
diff --git a/runtime/image.cc b/runtime/image.cc
index 3cb2580..2d8c1c4 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,10 +24,12 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '4', '\0' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '5', '\0' };
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
+ uint32_t art_fields_offset,
+ uint32_t art_fields_size,
uint32_t image_bitmap_offset,
uint32_t image_bitmap_size,
uint32_t image_roots,
@@ -39,6 +41,8 @@
bool compile_pic)
: image_begin_(image_begin),
image_size_(image_size),
+ art_fields_offset_(art_fields_offset),
+ art_fields_size_(art_fields_size),
image_bitmap_offset_(image_bitmap_offset),
image_bitmap_size_(image_bitmap_size),
oat_checksum_(oat_checksum),
diff --git a/runtime/image.h b/runtime/image.h
index 3c527b8..613414a 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -32,6 +32,8 @@
ImageHeader(uint32_t image_begin,
uint32_t image_size_,
+ uint32_t art_fields_offset,
+ uint32_t art_fields_size,
uint32_t image_bitmap_offset,
uint32_t image_bitmap_size,
uint32_t image_roots,
@@ -53,6 +55,14 @@
return static_cast<uint32_t>(image_size_);
}
+ size_t GetArtFieldsOffset() const {
+ return art_fields_offset_;
+ }
+
+ size_t GetArtFieldsSize() const {
+ return art_fields_size_;
+ }
+
size_t GetImageBitmapOffset() const {
return image_bitmap_offset_;
}
@@ -89,10 +99,6 @@
return patch_delta_;
}
- size_t GetBitmapOffset() const {
- return RoundUp(image_size_, kPageSize);
- }
-
static std::string GetOatLocationFromImageLocation(const std::string& image) {
std::string oat_filename = image;
if (oat_filename.length() <= 3) {
@@ -140,6 +146,12 @@
// Image size, not page aligned.
uint32_t image_size_;
+ // ArtField array offset.
+ uint32_t art_fields_offset_;
+
+ // ArtField size in bytes.
+ uint32_t art_fields_size_;
+
// Image bitmap offset in the file.
uint32_t image_bitmap_offset_;
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 5012965..d6f9682 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -258,7 +258,10 @@
void IndirectReferenceTable::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
BufferedRootVisitor<kDefaultBufferedRootCount> root_visitor(visitor, root_info);
for (auto ref : *this) {
- root_visitor.VisitRootIfNonNull(*ref);
+ if (!ref->IsNull()) {
+ root_visitor.VisitRoot(*ref);
+ DCHECK(!ref->IsNull());
+ }
}
}
diff --git a/runtime/indirect_reference_table_test.cc b/runtime/indirect_reference_table_test.cc
index 1156cf5..fe1b8f0 100644
--- a/runtime/indirect_reference_table_test.cc
+++ b/runtime/indirect_reference_table_test.cc
@@ -16,6 +16,7 @@
#include "indirect_reference_table-inl.h"
+#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "mirror/object-inl.h"
#include "scoped_thread_state_change.h"
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 680b563..51600f7 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -50,11 +50,6 @@
const bool kVerboseInstrumentation = false;
-// Do we want to deoptimize for method entry and exit listeners or just try to intercept
-// invocations? Deoptimization forces all code to run in the interpreter and considerably hurts the
-// application's performance.
-static constexpr bool kDeoptimizeForAccurateMethodEntryExitListeners = true;
-
static bool InstallStubsClassVisitor(mirror::Class* klass, void* arg)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
Instrumentation* instrumentation = reinterpret_cast<Instrumentation*>(arg);
@@ -846,8 +841,7 @@
ConfigureStubs(false, false);
}
-void Instrumentation::EnableMethodTracing() {
- bool require_interpreter = kDeoptimizeForAccurateMethodEntryExitListeners;
+void Instrumentation::EnableMethodTracing(bool require_interpreter) {
ConfigureStubs(!require_interpreter, require_interpreter);
}
@@ -929,7 +923,7 @@
void Instrumentation::FieldReadEventImpl(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc,
- mirror::ArtField* field) const {
+ ArtField* field) const {
if (HasFieldReadListeners()) {
std::shared_ptr<std::list<InstrumentationListener*>> original(field_read_listeners_);
for (InstrumentationListener* listener : *original.get()) {
@@ -940,7 +934,7 @@
void Instrumentation::FieldWriteEventImpl(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc,
- mirror::ArtField* field, const JValue& field_value) const {
+ ArtField* field, const JValue& field_value) const {
if (HasFieldWriteListeners()) {
std::shared_ptr<std::list<InstrumentationListener*>> original(field_write_listeners_);
for (InstrumentationListener* listener : *original.get()) {
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 77314c60..8b7fcca 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -30,12 +30,12 @@
namespace art {
namespace mirror {
- class ArtField;
class ArtMethod;
class Class;
class Object;
class Throwable;
} // namespace mirror
+class ArtField;
union JValue;
class Thread;
@@ -49,6 +49,11 @@
kNumHandlerTables
};
+// Do we want to deoptimize for method entry and exit listeners or just try to intercept
+// invocations? Deoptimization forces all code to run in the interpreter and considerably hurts the
+// application's performance.
+static constexpr bool kDeoptimizeForAccurateMethodEntryExitListeners = true;
+
// Instrumentation event listener API. Registered listeners will get the appropriate call back for
// the events they are listening for. The call backs supply the thread, method and dex_pc the event
// occurred upon. The thread may or may not be Thread::Current().
@@ -82,11 +87,11 @@
// Call-back for when we read from a field.
virtual void FieldRead(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
- uint32_t dex_pc, mirror::ArtField* field) = 0;
+ uint32_t dex_pc, ArtField* field) = 0;
// Call-back for when we write into a field.
virtual void FieldWritten(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
- uint32_t dex_pc, mirror::ArtField* field, const JValue& field_value) = 0;
+ uint32_t dex_pc, ArtField* field, const JValue& field_value) = 0;
// Call-back when an exception is caught.
virtual void ExceptionCaught(Thread* thread, mirror::Throwable* exception_object)
@@ -170,7 +175,8 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Enable method tracing by installing instrumentation entry/exit stubs.
- void EnableMethodTracing()
+ void EnableMethodTracing(
+ bool require_interpreter = kDeoptimizeForAccurateMethodEntryExitListeners)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_);
@@ -301,7 +307,7 @@
// Inform listeners that we read a field (only supported by the interpreter).
void FieldReadEvent(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc,
- mirror::ArtField* field) const
+ ArtField* field) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (UNLIKELY(HasFieldReadListeners())) {
FieldReadEventImpl(thread, this_object, method, dex_pc, field);
@@ -311,7 +317,7 @@
// Inform listeners that we write a field (only supported by the interpreter).
void FieldWriteEvent(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc,
- mirror::ArtField* field, const JValue& field_value) const
+ ArtField* field, const JValue& field_value) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (UNLIKELY(HasFieldWriteListeners())) {
FieldWriteEventImpl(thread, this_object, method, dex_pc, field, field_value);
@@ -377,11 +383,11 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FieldReadEventImpl(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc,
- mirror::ArtField* field) const
+ ArtField* field) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FieldWriteEventImpl(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc,
- mirror::ArtField* field, const JValue& field_value) const
+ ArtField* field, const JValue& field_value) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Read barrier-aware utility functions for accessing deoptimized_methods_
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 375d644..3ae611b 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -281,11 +281,10 @@
// object in the destructor.
Class* field_class;
{
- StackHandleScope<3> hs(self);
- HandleWrapper<mirror::ArtField> h_f(hs.NewHandleWrapper(&f));
+ StackHandleScope<2> hs(self);
HandleWrapper<mirror::Object> h_reg(hs.NewHandleWrapper(®));
HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&obj));
- field_class = h_f->GetType<true>();
+ field_class = f->GetType<true>();
}
if (!reg->VerifierInstanceOf(field_class)) {
// This should never happen.
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 2f8bf55..0e0d56a 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -24,6 +24,7 @@
#include <iostream>
#include <sstream>
+#include "art_field-inl.h"
#include "base/logging.h"
#include "base/macros.h"
#include "class_linker-inl.h"
@@ -32,7 +33,6 @@
#include "dex_instruction-inl.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "handle_scope-inl.h"
-#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
@@ -41,7 +41,6 @@
#include "thread.h"
#include "well_known_classes.h"
-using ::art::mirror::ArtField;
using ::art::mirror::ArtMethod;
using ::art::mirror::Array;
using ::art::mirror::BooleanArray;
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 9af8102..61def35 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -39,6 +39,7 @@
#include "thread.h"
#include "transaction.h"
#include "well_known_classes.h"
+#include "zip_archive.h"
namespace art {
namespace interpreter {
@@ -109,10 +110,23 @@
}
}
+static mirror::String* GetClassName(Thread* self, ShadowFrame* shadow_frame, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* param = shadow_frame->GetVRegReference(arg_offset);
+ if (param == nullptr) {
+ AbortTransactionOrFail(self, "Null-pointer in Class.forName.");
+ return nullptr;
+ }
+ return param->AsString();
+}
+
static void UnstartedClassForName(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset)->AsString();
+ mirror::String* class_name = GetClassName(self, shadow_frame, arg_offset);
+ if (class_name == nullptr) {
+ return;
+ }
StackHandleScope<1> hs(self);
Handle<mirror::String> h_class_name(hs.NewHandle(class_name));
UnstartedRuntimeFindClass(self, h_class_name, NullHandle<mirror::ClassLoader>(), result,
@@ -123,7 +137,10 @@
static void UnstartedClassForNameLong(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset)->AsString();
+ mirror::String* class_name = GetClassName(self, shadow_frame, arg_offset);
+ if (class_name == nullptr) {
+ return;
+ }
bool initialize_class = shadow_frame->GetVReg(arg_offset + 1) != 0;
mirror::ClassLoader* class_loader =
down_cast<mirror::ClassLoader*>(shadow_frame->GetVRegReference(arg_offset + 2));
@@ -138,7 +155,10 @@
static void UnstartedClassClassForName(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset)->AsString();
+ mirror::String* class_name = GetClassName(self, shadow_frame, arg_offset);
+ if (class_name == nullptr) {
+ return;
+ }
bool initialize_class = shadow_frame->GetVReg(arg_offset + 1) != 0;
mirror::ClassLoader* class_loader =
down_cast<mirror::ClassLoader*>(shadow_frame->GetVRegReference(arg_offset + 2));
@@ -154,7 +174,12 @@
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
StackHandleScope<3> hs(self); // Class, constructor, object.
- mirror::Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
+ mirror::Object* param = shadow_frame->GetVRegReference(arg_offset);
+ if (param == nullptr) {
+ AbortTransactionOrFail(self, "Null-pointer in Class.newInstance.");
+ return;
+ }
+ mirror::Class* klass = param->AsClass();
Handle<mirror::Class> h_klass(hs.NewHandle(klass));
// Check that it's not null.
@@ -208,20 +233,22 @@
// going the reflective Dex way.
mirror::Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
mirror::String* name2 = shadow_frame->GetVRegReference(arg_offset + 1)->AsString();
- mirror::ArtField* found = nullptr;
- mirror::ObjectArray<mirror::ArtField>* fields = klass->GetIFields();
- for (int32_t i = 0; i < fields->GetLength() && found == nullptr; ++i) {
- mirror::ArtField* f = fields->Get(i);
+ ArtField* found = nullptr;
+ ArtField* fields = klass->GetIFields();
+ for (int32_t i = 0, count = klass->NumInstanceFields(); i < count; ++i) {
+ ArtField* f = &fields[i];
if (name2->Equals(f->GetName())) {
found = f;
+ break;
}
}
if (found == nullptr) {
fields = klass->GetSFields();
- for (int32_t i = 0; i < fields->GetLength() && found == nullptr; ++i) {
- mirror::ArtField* f = fields->Get(i);
+ for (int32_t i = 0, count = klass->NumStaticFields(); i < count; ++i) {
+ ArtField* f = &fields[i];
if (name2->Equals(f->GetName())) {
found = f;
+ break;
}
}
}
@@ -634,6 +661,100 @@
}
}
+// This allows reading security.properties in an unstarted runtime and initialize Security.
+static void UnstartedSecurityGetSecurityPropertiesReader(
+ Thread* self,
+ ShadowFrame* shadow_frame ATTRIBUTE_UNUSED,
+ JValue* result,
+ size_t arg_offset ATTRIBUTE_UNUSED)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Runtime* runtime = Runtime::Current();
+ const std::vector<const DexFile*>& path = runtime->GetClassLinker()->GetBootClassPath();
+ std::string canonical(DexFile::GetDexCanonicalLocation(path[0]->GetLocation().c_str()));
+ mirror::String* string_data;
+
+ // Use a block to enclose the I/O and MemMap code so buffers are released early.
+ {
+ std::string error_msg;
+ std::unique_ptr<ZipArchive> zip_archive(ZipArchive::Open(canonical.c_str(), &error_msg));
+ if (zip_archive.get() == nullptr) {
+ AbortTransactionOrFail(self, "Could not open zip file %s: %s", canonical.c_str(),
+ error_msg.c_str());
+ return;
+ }
+ std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find("java/security/security.properties",
+ &error_msg));
+ if (zip_entry.get() == nullptr) {
+ AbortTransactionOrFail(self, "Could not find security.properties file in %s: %s",
+ canonical.c_str(), error_msg.c_str());
+ return;
+ }
+ std::unique_ptr<MemMap> map(zip_entry->ExtractToMemMap(canonical.c_str(),
+ "java/security/security.properties",
+ &error_msg));
+ if (map.get() == nullptr) {
+ AbortTransactionOrFail(self, "Could not unzip security.properties file in %s: %s",
+ canonical.c_str(), error_msg.c_str());
+ return;
+ }
+
+ uint32_t length = zip_entry->GetUncompressedLength();
+ std::unique_ptr<char[]> tmp(new char[length + 1]);
+ memcpy(tmp.get(), map->Begin(), length);
+ tmp.get()[length] = 0; // null terminator
+
+ string_data = mirror::String::AllocFromModifiedUtf8(self, tmp.get());
+ }
+
+ if (string_data == nullptr) {
+ AbortTransactionOrFail(self, "Could not create string from file content of %s",
+ canonical.c_str());
+ return;
+ }
+
+ // Create a StringReader.
+ StackHandleScope<3> hs(self);
+ Handle<mirror::String> h_string(hs.NewHandle(string_data));
+
+ Handle<mirror::Class> h_class(hs.NewHandle(
+ runtime->GetClassLinker()->FindClass(self,
+ "Ljava/io/StringReader;",
+ NullHandle<mirror::ClassLoader>())));
+ if (h_class.Get() == nullptr) {
+ AbortTransactionOrFail(self, "Could not find StringReader class");
+ return;
+ }
+
+ if (!runtime->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
+ AbortTransactionOrFail(self, "Could not initialize StringReader class");
+ return;
+ }
+
+ Handle<mirror::Object> h_obj(hs.NewHandle(h_class->AllocObject(self)));
+ if (h_obj.Get() == nullptr) {
+ AbortTransactionOrFail(self, "Could not allocate StringReader object");
+ return;
+ }
+
+ mirror::ArtMethod* constructor = h_class->FindDeclaredDirectMethod("<init>",
+ "(Ljava/lang/String;)V");
+ if (constructor == nullptr) {
+ AbortTransactionOrFail(self, "Could not find StringReader constructor");
+ return;
+ }
+
+ uint32_t args[1];
+ args[0] = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(h_string.Get()));
+ EnterInterpreterFromInvoke(self, constructor, h_obj.Get(), args, nullptr);
+
+ if (self->IsExceptionPending()) {
+ AbortTransactionOrFail(self, "Could not run StringReader constructor");
+ return;
+ }
+
+ result->SetL(h_obj.Get());
+}
+
static void UnstartedJNIVMRuntimeNewUnpaddedArray(Thread* self,
mirror::ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver ATTRIBUTE_UNUSED,
@@ -779,6 +900,31 @@
result->SetL(mirror::Array::CreateMultiArray(self, h_class, h_dimensions));
}
+static void UnstartedJNIArrayCreateObjectArray(Thread* self,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ int32_t length = static_cast<int32_t>(args[1]);
+ if (length < 0) {
+ ThrowNegativeArraySizeException(length);
+ return;
+ }
+ mirror::Class* element_class = reinterpret_cast<mirror::Class*>(args[0])->AsClass();
+ Runtime* runtime = Runtime::Current();
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ mirror::Class* array_class = class_linker->FindArrayClass(self, &element_class);
+ if (UNLIKELY(array_class == NULL)) {
+ CHECK(self->IsExceptionPending());
+ return;
+ }
+ DCHECK(array_class->IsObjectArrayClass());
+ mirror::Array* new_array = mirror::ObjectArray<mirror::Object*>::Alloc(
+ self, array_class, length, runtime->GetHeap()->GetCurrentAllocator());
+ result->SetL(new_array);
+}
+
static void UnstartedJNIThrowableNativeFillInStackTrace(Thread* self,
mirror::ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver ATTRIBUTE_UNUSED,
@@ -931,6 +1077,8 @@
&UnstartedMemoryPeekEntry },
{ "void libcore.io.Memory.peekByteArray(long, byte[], int, int)",
&UnstartedMemoryPeekArrayEntry },
+ { "java.io.Reader java.security.Security.getSecurityPropertiesReader()",
+ &UnstartedSecurityGetSecurityPropertiesReader },
};
for (auto& def : defs) {
@@ -973,6 +1121,8 @@
&UnstartedJNIStringFastIndexOf },
{ "java.lang.Object java.lang.reflect.Array.createMultiArray(java.lang.Class, int[])",
&UnstartedJNIArrayCreateMultiArray },
+ { "java.lang.Object java.lang.reflect.Array.createObjectArray(java.lang.Class, int)",
+ &UnstartedJNIArrayCreateObjectArray },
{ "java.lang.Object java.lang.Throwable.nativeFillInStackTrace()",
&UnstartedJNIThrowableNativeFillInStackTrace },
{ "int java.lang.System.identityHashCode(java.lang.Object)",
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index 31c9a0b..8dffee6 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -33,11 +33,11 @@
namespace art {
+class ArtField;
union JValue;
class Thread;
namespace mirror {
- class ArtField;
class ArtMethod;
class Class;
class Object;
@@ -51,22 +51,24 @@
* Fundamental types.
*
* ObjectId and RefTypeId must be the same size.
+ * Its OK to change MethodId and FieldId sizes as long as the size is <= 8 bytes.
+ * Note that ArtFields are 64 bit pointers on 64 bit targets. So this one must remain 8 bytes.
*/
-typedef uint32_t FieldId; /* static or instance field */
-typedef uint32_t MethodId; /* any kind of method, including constructors */
+typedef uint64_t FieldId; /* static or instance field */
+typedef uint64_t MethodId; /* any kind of method, including constructors */
typedef uint64_t ObjectId; /* any object (threadID, stringID, arrayID, etc) */
typedef uint64_t RefTypeId; /* like ObjectID, but unique for Class objects */
typedef uint64_t FrameId; /* short-lived stack frame ID */
ObjectId ReadObjectId(const uint8_t** pBuf);
-static inline void SetFieldId(uint8_t* buf, FieldId val) { return Set4BE(buf, val); }
-static inline void SetMethodId(uint8_t* buf, MethodId val) { return Set4BE(buf, val); }
+static inline void SetFieldId(uint8_t* buf, FieldId val) { return Set8BE(buf, val); }
+static inline void SetMethodId(uint8_t* buf, MethodId val) { return Set8BE(buf, val); }
static inline void SetObjectId(uint8_t* buf, ObjectId val) { return Set8BE(buf, val); }
static inline void SetRefTypeId(uint8_t* buf, RefTypeId val) { return Set8BE(buf, val); }
static inline void SetFrameId(uint8_t* buf, FrameId val) { return Set8BE(buf, val); }
-static inline void expandBufAddFieldId(ExpandBuf* pReply, FieldId id) { expandBufAdd4BE(pReply, id); }
-static inline void expandBufAddMethodId(ExpandBuf* pReply, MethodId id) { expandBufAdd4BE(pReply, id); }
+static inline void expandBufAddFieldId(ExpandBuf* pReply, FieldId id) { expandBufAdd8BE(pReply, id); }
+static inline void expandBufAddMethodId(ExpandBuf* pReply, MethodId id) { expandBufAdd8BE(pReply, id); }
static inline void expandBufAddObjectId(ExpandBuf* pReply, ObjectId id) { expandBufAdd8BE(pReply, id); }
static inline void expandBufAddRefTypeId(ExpandBuf* pReply, RefTypeId id) { expandBufAdd8BE(pReply, id); }
static inline void expandBufAddFrameId(ExpandBuf* pReply, FrameId id) { expandBufAdd8BE(pReply, id); }
@@ -207,7 +209,7 @@
* "fieldValue" is non-null for field modification events only.
* "is_modification" is true for field modification, false for field access.
*/
- void PostFieldEvent(const EventLocation* pLoc, mirror::ArtField* field, mirror::Object* thisPtr,
+ void PostFieldEvent(const EventLocation* pLoc, ArtField* field, mirror::Object* thisPtr,
const JValue* fieldValue, bool is_modification)
LOCKS_EXCLUDED(event_list_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index c9a4483..1ec800f 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -21,6 +21,7 @@
#include <string.h>
#include <unistd.h>
+#include "art_field-inl.h"
#include "base/logging.h"
#include "base/stringprintf.h"
#include "debugger.h"
@@ -28,7 +29,6 @@
#include "jdwp/jdwp_expand_buf.h"
#include "jdwp/jdwp_priv.h"
#include "jdwp/object_registry.h"
-#include "mirror/art_field-inl.h"
#include "scoped_thread_state_change.h"
#include "thread-inl.h"
@@ -119,7 +119,7 @@
mirror::Class* locationClass; /* ClassOnly */
mirror::Class* exceptionClass; /* ExceptionOnly */
bool caught; /* ExceptionOnly */
- mirror::ArtField* field; /* FieldOnly */
+ ArtField* field; /* FieldOnly */
mirror::Object* thisPtr; /* InstanceOnly */
/* nothing for StepOnly -- handled differently */
};
@@ -914,7 +914,7 @@
SendRequestAndPossiblySuspend(pReq, suspend_policy, thread_id);
}
-void JdwpState::PostFieldEvent(const EventLocation* pLoc, mirror::ArtField* field,
+void JdwpState::PostFieldEvent(const EventLocation* pLoc, ArtField* field,
mirror::Object* this_object, const JValue* fieldValue,
bool is_modification) {
DCHECK(pLoc != nullptr);
@@ -957,7 +957,7 @@
VLOG(jdwp) << StringPrintf(" this=%#" PRIx64, instance_id);
VLOG(jdwp) << StringPrintf(" type=%#" PRIx64, field_type_id) << " "
<< Dbg::GetClassName(field_id);
- VLOG(jdwp) << StringPrintf(" field=%#" PRIx32, field_id) << " "
+ VLOG(jdwp) << StringPrintf(" field=%#" PRIx64, field_id) << " "
<< Dbg::GetFieldName(field_id);
VLOG(jdwp) << " suspend_policy=" << suspend_policy;
}
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index d0ca214..2457f14 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -38,11 +38,11 @@
namespace JDWP {
std::string DescribeField(const FieldId& field_id) {
- return StringPrintf("%#x (%s)", field_id, Dbg::GetFieldName(field_id).c_str());
+ return StringPrintf("%#" PRIx64 " (%s)", field_id, Dbg::GetFieldName(field_id).c_str());
}
std::string DescribeMethod(const MethodId& method_id) {
- return StringPrintf("%#x (%s)", method_id, Dbg::GetMethodName(method_id).c_str());
+ return StringPrintf("%#" PRIx64 " (%s)", method_id, Dbg::GetMethodName(method_id).c_str());
}
std::string DescribeRefTypeId(const RefTypeId& ref_type_id) {
@@ -101,8 +101,8 @@
VLOG(jdwp) << StringPrintf(" --> thread_id=%#" PRIx64 " object_id=%#" PRIx64,
thread_id, object_id);
- VLOG(jdwp) << StringPrintf(" class_id=%#" PRIx64 " method_id=%x %s.%s", class_id,
- method_id, Dbg::GetClassName(class_id).c_str(),
+ VLOG(jdwp) << StringPrintf(" class_id=%#" PRIx64 " method_id=%#" PRIx64 " %s.%s",
+ class_id, method_id, Dbg::GetClassName(class_id).c_str(),
Dbg::GetMethodName(method_id).c_str());
VLOG(jdwp) << StringPrintf(" %d args:", arg_count);
@@ -256,8 +256,6 @@
/*
* Respond with the sizes of the basic debugger types.
- *
- * All IDs are 8 bytes.
*/
static JdwpError VM_IDSizes(JdwpState*, Request*, ExpandBuf* pReply)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
diff --git a/runtime/jdwp/jdwp_request.cc b/runtime/jdwp/jdwp_request.cc
index 7b15d6d..18f40a1 100644
--- a/runtime/jdwp/jdwp_request.cc
+++ b/runtime/jdwp/jdwp_request.cc
@@ -87,13 +87,13 @@
}
FieldId Request::ReadFieldId() {
- FieldId id = Read4BE();
+ FieldId id = Read8BE();
VLOG(jdwp) << " field id " << DescribeField(id);
return id;
}
MethodId Request::ReadMethodId() {
- MethodId id = Read4BE();
+ MethodId id = Read8BE();
VLOG(jdwp) << " method id " << DescribeMethod(id);
return id;
}
diff --git a/runtime/jdwp/object_registry.cc b/runtime/jdwp/object_registry.cc
index 99a005d..a42a58f 100644
--- a/runtime/jdwp/object_registry.cc
+++ b/runtime/jdwp/object_registry.cc
@@ -50,6 +50,10 @@
Thread* const self = Thread::Current();
self->AssertNoPendingException();
+ // Object::IdentityHashCode may cause these locks to be held so check we do not already
+ // hold them.
+ Locks::thread_list_lock_->AssertNotHeld(self);
+ Locks::thread_suspend_count_lock_->AssertNotHeld(self);
StackHandleScope<1> hs(self);
Handle<mirror::Object> obj_h(hs.NewHandle(o));
diff --git a/runtime/jdwp/object_registry.h b/runtime/jdwp/object_registry.h
index 0693f33..27a4e55 100644
--- a/runtime/jdwp/object_registry.h
+++ b/runtime/jdwp/object_registry.h
@@ -62,9 +62,13 @@
ObjectRegistry();
JDWP::ObjectId Add(mirror::Object* o)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::thread_list_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_,
+ Locks::thread_suspend_count_lock_);
JDWP::RefTypeId AddRefType(mirror::Class* c)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::thread_list_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_,
+ Locks::thread_suspend_count_lock_);
template<typename T> T Get(JDWP::ObjectId id, JDWP::JdwpError* error)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -96,7 +100,9 @@
private:
JDWP::ObjectId InternalAdd(mirror::Object* o)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(lock_, Locks::thread_list_lock_);
+ LOCKS_EXCLUDED(lock_,
+ Locks::thread_list_lock_,
+ Locks::thread_suspend_count_lock_);
mirror::Object* InternalGet(JDWP::ObjectId id, JDWP::JdwpError* error)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
diff --git a/runtime/jit/jit_instrumentation.h b/runtime/jit/jit_instrumentation.h
index 425d2d3..9d5d74f 100644
--- a/runtime/jit/jit_instrumentation.h
+++ b/runtime/jit/jit_instrumentation.h
@@ -31,12 +31,12 @@
namespace art {
namespace mirror {
- class ArtField;
class ArtMethod;
class Class;
class Object;
class Throwable;
} // namespace mirror
+class ArtField;
union JValue;
class Thread;
@@ -77,10 +77,10 @@
mirror::ArtMethod* /*method*/, uint32_t /*dex_pc*/) OVERRIDE { }
virtual void FieldRead(Thread* /*thread*/, mirror::Object* /*this_object*/,
mirror::ArtMethod* /*method*/, uint32_t /*dex_pc*/,
- mirror::ArtField* /*field*/) OVERRIDE { }
+ ArtField* /*field*/) OVERRIDE { }
virtual void FieldWritten(Thread* /*thread*/, mirror::Object* /*this_object*/,
mirror::ArtMethod* /*method*/, uint32_t /*dex_pc*/,
- mirror::ArtField* /*field*/, const JValue& /*field_value*/)
+ ArtField* /*field*/, const JValue& /*field_value*/)
OVERRIDE { }
virtual void ExceptionCaught(Thread* /*thread*/,
mirror::Throwable* /*exception_object*/) OVERRIDE { }
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 9ec64d4..8a5461b 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -23,6 +23,7 @@
#include <utility>
#include <vector>
+#include "art_field-inl.h"
#include "atomic.h"
#include "base/allocator.h"
#include "base/logging.h"
@@ -37,7 +38,6 @@
#include "interpreter/interpreter.h"
#include "jni_env_ext.h"
#include "java_vm_ext.h"
-#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
@@ -184,7 +184,7 @@
if (c.Get() == nullptr) {
return nullptr;
}
- mirror::ArtField* field = nullptr;
+ ArtField* field = nullptr;
mirror::Class* field_type;
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
if (sig[1] != '\0') {
@@ -379,7 +379,7 @@
static jobject ToReflectedField(JNIEnv* env, jclass, jfieldID fid, jboolean) {
CHECK_NON_NULL_ARGUMENT(fid);
ScopedObjectAccess soa(env);
- mirror::ArtField* f = soa.DecodeField(fid);
+ ArtField* f = soa.DecodeField(fid);
return soa.AddLocalReference<jobject>(mirror::Field::CreateFromArtField(soa.Self(), f, true));
}
@@ -1203,14 +1203,14 @@
CHECK_NON_NULL_ARGUMENT(fid);
ScopedObjectAccess soa(env);
mirror::Object* o = soa.Decode<mirror::Object*>(obj);
- mirror::ArtField* f = soa.DecodeField(fid);
+ ArtField* f = soa.DecodeField(fid);
return soa.AddLocalReference<jobject>(f->GetObject(o));
}
static jobject GetStaticObjectField(JNIEnv* env, jclass, jfieldID fid) {
CHECK_NON_NULL_ARGUMENT(fid);
ScopedObjectAccess soa(env);
- mirror::ArtField* f = soa.DecodeField(fid);
+ ArtField* f = soa.DecodeField(fid);
return soa.AddLocalReference<jobject>(f->GetObject(f->GetDeclaringClass()));
}
@@ -1220,7 +1220,7 @@
ScopedObjectAccess soa(env);
mirror::Object* o = soa.Decode<mirror::Object*>(java_object);
mirror::Object* v = soa.Decode<mirror::Object*>(java_value);
- mirror::ArtField* f = soa.DecodeField(fid);
+ ArtField* f = soa.DecodeField(fid);
f->SetObject<false>(o, v);
}
@@ -1228,7 +1228,7 @@
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(fid);
ScopedObjectAccess soa(env);
mirror::Object* v = soa.Decode<mirror::Object*>(java_value);
- mirror::ArtField* f = soa.DecodeField(fid);
+ ArtField* f = soa.DecodeField(fid);
f->SetObject<false>(f->GetDeclaringClass(), v);
}
@@ -1237,13 +1237,13 @@
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(fid); \
ScopedObjectAccess soa(env); \
mirror::Object* o = soa.Decode<mirror::Object*>(instance); \
- mirror::ArtField* f = soa.DecodeField(fid); \
+ ArtField* f = soa.DecodeField(fid); \
return f->Get ##fn (o)
#define GET_STATIC_PRIMITIVE_FIELD(fn) \
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(fid); \
ScopedObjectAccess soa(env); \
- mirror::ArtField* f = soa.DecodeField(fid); \
+ ArtField* f = soa.DecodeField(fid); \
return f->Get ##fn (f->GetDeclaringClass())
#define SET_PRIMITIVE_FIELD(fn, instance, value) \
@@ -1251,13 +1251,13 @@
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(fid); \
ScopedObjectAccess soa(env); \
mirror::Object* o = soa.Decode<mirror::Object*>(instance); \
- mirror::ArtField* f = soa.DecodeField(fid); \
+ ArtField* f = soa.DecodeField(fid); \
f->Set ##fn <false>(o, value)
#define SET_STATIC_PRIMITIVE_FIELD(fn, value) \
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(fid); \
ScopedObjectAccess soa(env); \
- mirror::ArtField* f = soa.DecodeField(fid); \
+ ArtField* f = soa.DecodeField(fid); \
f->Set ##fn <false>(f->GetDeclaringClass(), value)
static jboolean GetBooleanField(JNIEnv* env, jobject obj, jfieldID fid) {
diff --git a/runtime/linear_alloc.cc b/runtime/linear_alloc.cc
new file mode 100644
index 0000000..fe6bee6
--- /dev/null
+++ b/runtime/linear_alloc.cc
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "linear_alloc.h"
+
+#include "thread-inl.h"
+
+namespace art {
+
+LinearAlloc::LinearAlloc(ArenaPool* pool) : lock_("linear alloc"), allocator_(pool) {
+}
+
+void* LinearAlloc::Alloc(Thread* self, size_t size) {
+ MutexLock mu(self, lock_);
+ return allocator_.Alloc(size);
+}
+
+size_t LinearAlloc::GetUsedMemory() const {
+ MutexLock mu(Thread::Current(), lock_);
+ return allocator_.BytesUsed();
+}
+
+} // namespace art
diff --git a/runtime/linear_alloc.h b/runtime/linear_alloc.h
new file mode 100644
index 0000000..6d8eda6
--- /dev/null
+++ b/runtime/linear_alloc.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_LINEAR_ALLOC_H_
+#define ART_RUNTIME_LINEAR_ALLOC_H_
+
+#include "base/arena_allocator.h"
+
+namespace art {
+
+class ArenaPool;
+
+// TODO: Support freeing if we add poor man's class unloading.
+class LinearAlloc {
+ public:
+ explicit LinearAlloc(ArenaPool* pool);
+
+ void* Alloc(Thread* self, size_t size);
+
+ // Allocate and construct an array of structs of type T.
+ template<class T>
+ T* AllocArray(Thread* self, size_t elements) {
+ return reinterpret_cast<T*>(Alloc(self, elements * sizeof(T)));
+ }
+
+ // Return the number of bytes used in the allocator.
+ size_t GetUsedMemory() const;
+
+ private:
+ mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ ArenaAllocator allocator_ GUARDED_BY(lock_);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_LINEAR_ALLOC_H_
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 588615f..edd2888 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -622,7 +622,7 @@
}
void MemMap::DumpMapsLocked(std::ostream& os) {
- os << maps_;
+ os << *maps_;
}
bool MemMap::HasMemMap(MemMap* map) {
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 6452f31..8b3418d 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -203,7 +203,7 @@
template<typename T>
inline PrimitiveArray<T>* PrimitiveArray<T>::Alloc(Thread* self, size_t length) {
Array* raw_array = Array::Alloc<true>(self, GetArrayClass(), length,
- ComponentSizeShiftWidth<sizeof(T)>(),
+ ComponentSizeShiftWidth(sizeof(T)),
Runtime::Current()->GetHeap()->GetCurrentAllocator());
return down_cast<PrimitiveArray<T>*>(raw_array);
}
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 115fcf2..832ad68 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -54,7 +54,7 @@
}
void SetLength(int32_t length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CHECK_GE(length, 0);
+ DCHECK_GE(length, 0);
// We use non transactional version since we can't undo this write. We also disable checking
// since it would fail during a transaction.
SetField32<false, false, kVerifyNone>(OFFSET_OF_OBJECT_MEMBER(Array, length_), length);
diff --git a/runtime/mirror/art_field.cc b/runtime/mirror/art_field.cc
deleted file mode 100644
index 83602d4..0000000
--- a/runtime/mirror/art_field.cc
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "art_field.h"
-
-#include "art_field-inl.h"
-#include "gc/accounting/card_table-inl.h"
-#include "object-inl.h"
-#include "object_array-inl.h"
-#include "runtime.h"
-#include "scoped_thread_state_change.h"
-#include "utils.h"
-#include "well_known_classes.h"
-
-namespace art {
-namespace mirror {
-
-// TODO: Get global references for these
-GcRoot<Class> ArtField::java_lang_reflect_ArtField_;
-
-void ArtField::SetClass(Class* java_lang_reflect_ArtField) {
- CHECK(java_lang_reflect_ArtField_.IsNull());
- CHECK(java_lang_reflect_ArtField != NULL);
- java_lang_reflect_ArtField_ = GcRoot<Class>(java_lang_reflect_ArtField);
-}
-
-void ArtField::ResetClass() {
- CHECK(!java_lang_reflect_ArtField_.IsNull());
- java_lang_reflect_ArtField_ = GcRoot<Class>(nullptr);
-}
-
-void ArtField::SetOffset(MemberOffset num_bytes) {
- DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous());
- if (kIsDebugBuild && Runtime::Current()->IsAotCompiler() &&
- Runtime::Current()->IsCompilingBootImage()) {
- Primitive::Type type = GetTypeAsPrimitiveType();
- if (type == Primitive::kPrimDouble || type == Primitive::kPrimLong) {
- DCHECK_ALIGNED(num_bytes.Uint32Value(), 8);
- }
- }
- // Not called within a transaction.
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtField, offset_), num_bytes.Uint32Value());
-}
-
-void ArtField::VisitRoots(RootVisitor* visitor) {
- java_lang_reflect_ArtField_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
-}
-
-// TODO: we could speed up the search if fields are ordered by offsets.
-ArtField* ArtField::FindInstanceFieldWithOffset(mirror::Class* klass, uint32_t field_offset) {
- DCHECK(klass != nullptr);
- ObjectArray<ArtField>* instance_fields = klass->GetIFields();
- if (instance_fields != nullptr) {
- for (int32_t i = 0, e = instance_fields->GetLength(); i < e; ++i) {
- mirror::ArtField* field = instance_fields->GetWithoutChecks(i);
- if (field->GetOffset().Uint32Value() == field_offset) {
- return field;
- }
- }
- }
- // We did not find field in the class: look into superclass.
- if (klass->GetSuperClass() != NULL) {
- return FindInstanceFieldWithOffset(klass->GetSuperClass(), field_offset);
- } else {
- return nullptr;
- }
-}
-
-} // namespace mirror
-} // namespace art
diff --git a/runtime/mirror/art_field.h b/runtime/mirror/art_field.h
deleted file mode 100644
index 9d95cb9..0000000
--- a/runtime/mirror/art_field.h
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_MIRROR_ART_FIELD_H_
-#define ART_RUNTIME_MIRROR_ART_FIELD_H_
-
-#include <jni.h>
-
-#include "gc_root.h"
-#include "modifiers.h"
-#include "object.h"
-#include "object_callbacks.h"
-#include "primitive.h"
-#include "read_barrier_option.h"
-
-namespace art {
-
-struct ArtFieldOffsets;
-class DexFile;
-class ScopedObjectAccessAlreadyRunnable;
-
-namespace mirror {
-
-class DexCache;
-
-// C++ mirror of java.lang.reflect.ArtField
-class MANAGED ArtField FINAL : public Object {
- public:
- // Size of java.lang.reflect.ArtField.class.
- static uint32_t ClassSize();
-
- // Size of an instance of java.lang.reflect.ArtField not including its value array.
- static constexpr uint32_t InstanceSize() {
- return sizeof(ArtField);
- }
-
- Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- void SetDeclaringClass(Class *new_declaring_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- void SetAccessFlags(uint32_t new_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // Not called within a transaction.
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtField, access_flags_), new_access_flags);
- }
-
- bool IsPublic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return (GetAccessFlags() & kAccPublic) != 0;
- }
-
- bool IsStatic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return (GetAccessFlags() & kAccStatic) != 0;
- }
-
- bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return (GetAccessFlags() & kAccFinal) != 0;
- }
-
- uint32_t GetDexFieldIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtField, field_dex_idx_));
- }
-
- void SetDexFieldIndex(uint32_t new_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // Not called within a transaction.
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtField, field_dex_idx_), new_idx);
- }
-
- // Offset to field within an Object.
- MemberOffset GetOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- static MemberOffset OffsetOffset() {
- return MemberOffset(OFFSETOF_MEMBER(ArtField, offset_));
- }
-
- MemberOffset GetOffsetDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- void SetOffset(MemberOffset num_bytes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- // field access, null object for static fields
- uint8_t GetBoolean(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<bool kTransactionActive>
- void SetBoolean(Object* object, uint8_t z) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- int8_t GetByte(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<bool kTransactionActive>
- void SetByte(Object* object, int8_t b) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- uint16_t GetChar(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<bool kTransactionActive>
- void SetChar(Object* object, uint16_t c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- int16_t GetShort(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<bool kTransactionActive>
- void SetShort(Object* object, int16_t s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- int32_t GetInt(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<bool kTransactionActive>
- void SetInt(Object* object, int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- int64_t GetLong(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<bool kTransactionActive>
- void SetLong(Object* object, int64_t j) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- float GetFloat(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<bool kTransactionActive>
- void SetFloat(Object* object, float f) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- double GetDouble(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<bool kTransactionActive>
- void SetDouble(Object* object, double d) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- Object* GetObject(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<bool kTransactionActive>
- void SetObject(Object* object, Object* l) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- // Raw field accesses.
- uint32_t Get32(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<bool kTransactionActive>
- void Set32(Object* object, uint32_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- uint64_t Get64(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<bool kTransactionActive>
- void Set64(Object* object, uint64_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- Object* GetObj(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<bool kTransactionActive>
- void SetObj(Object* object, Object* new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- static Class* GetJavaLangReflectArtField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(!java_lang_reflect_ArtField_.IsNull());
- return java_lang_reflect_ArtField_.Read<kReadBarrierOption>();
- }
-
- static void SetClass(Class* java_lang_reflect_ArtField);
- static void ResetClass();
- static void VisitRoots(RootVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- bool IsVolatile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return (GetAccessFlags() & kAccVolatile) != 0;
- }
-
- // Returns an instance field with this offset in the given class or nullptr if not found.
- static ArtField* FindInstanceFieldWithOffset(mirror::Class* klass, uint32_t field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- const char* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- // Resolves / returns the name from the dex cache.
- String* GetStringName(Thread* self, bool resolve) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- const char* GetTypeDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- Primitive::Type GetTypeAsPrimitiveType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- bool IsPrimitiveType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- template <bool kResolve>
- Class* GetType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- size_t FieldSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- mirror::DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- const DexFile* GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- private:
- // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
- // The class we are a part of
- HeapReference<Class> declaring_class_;
-
- uint32_t access_flags_;
-
- // Dex cache index of field id
- uint32_t field_dex_idx_;
-
- // Offset of field within an instance or in the Class' static fields
- uint32_t offset_;
-
- static GcRoot<Class> java_lang_reflect_ArtField_;
-
- friend struct art::ArtFieldOffsets; // for verifying offset information
- DISALLOW_IMPLICIT_CONSTRUCTORS(ArtField);
-};
-
-} // namespace mirror
-} // namespace art
-
-#endif // ART_RUNTIME_MIRROR_ART_FIELD_H_
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index 0ccf5db..fb427dc 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -36,7 +36,7 @@
namespace mirror {
inline uint32_t ArtMethod::ClassSize() {
- uint32_t vtable_entries = Object::kVTableLength + 8;
+ uint32_t vtable_entries = Object::kVTableLength + 7;
return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0);
}
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index edbbb4a..92aea1f 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -53,7 +53,7 @@
ArtMethod* ArtMethod::FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa,
jobject jlr_method) {
- mirror::ArtField* f =
+ ArtField* f =
soa.DecodeField(WellKnownClasses::java_lang_reflect_AbstractMethod_artMethod);
mirror::ArtMethod* method = f->GetObject(soa.Decode<mirror::Object*>(jlr_method))->AsArtMethod();
DCHECK(method != nullptr);
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 22481ce..55b8068 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -183,6 +183,10 @@
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_), new_method_index);
}
+ static MemberOffset DexMethodIndexOffset() {
+ return OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_method_index_);
+ }
+
static MemberOffset MethodIndexOffset() {
return OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_);
}
@@ -214,6 +218,8 @@
return OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_types_);
}
+ ALWAYS_INLINE ObjectArray<ArtMethod>* GetDexCacheResolvedMethods()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
ALWAYS_INLINE ArtMethod* GetDexCacheResolvedMethod(uint16_t method_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
ALWAYS_INLINE void SetDexCacheResolvedMethod(uint16_t method_idx, ArtMethod* new_method)
@@ -434,10 +440,6 @@
EntryPointFromJniOffset(pointer_size), entrypoint, pointer_size);
}
- static MemberOffset GetMethodIndexOffset() {
- return OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_);
- }
-
// Is this a CalleSaveMethod or ResolutionMethod and therefore doesn't adhere to normal
// conventions for a method of managed code. Returns false for Proxy methods.
bool IsRuntimeMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -604,9 +606,6 @@
private:
ALWAYS_INLINE void CheckObjectSizeEqualsMirrorSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ALWAYS_INLINE ObjectArray<ArtMethod>* GetDexCacheResolvedMethods()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
ALWAYS_INLINE ObjectArray<Class>* GetDexCacheResolvedTypes()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index c368dc6..aaa66f9 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -21,7 +21,6 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
-#include "class_linker-inl.h"
#include "class_loader.h"
#include "common_throws.h"
#include "dex_cache.h"
@@ -39,12 +38,8 @@
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline uint32_t Class::GetObjectSize() {
- if (kIsDebugBuild) {
- // Use a local variable as (D)CHECK can't handle the space between
- // the two template params.
- bool is_variable_size = IsVariableSize<kVerifyFlags, kReadBarrierOption>();
- CHECK(!is_variable_size) << " class=" << PrettyTypeOf(this);
- }
+ // Note: Extra parentheses to avoid the comma being interpreted as macro parameter separator.
+ DCHECK((!IsVariableSize<kVerifyFlags, kReadBarrierOption>())) << " class=" << PrettyTypeOf(this);
return GetField32(ObjectSizeOffset());
}
@@ -397,9 +392,9 @@
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, iftable_), new_iftable);
}
-inline ObjectArray<ArtField>* Class::GetIFields() {
+inline ArtField* Class::GetIFields() {
DCHECK(IsLoaded() || IsErroneous());
- return GetFieldObject<ObjectArray<ArtField>>(OFFSET_OF_OBJECT_MEMBER(Class, ifields_));
+ return GetFieldPtr<ArtField*>(OFFSET_OF_OBJECT_MEMBER(Class, ifields_));
}
inline MemberOffset Class::GetFirstReferenceInstanceFieldOffset() {
@@ -432,55 +427,46 @@
return MemberOffset(base);
}
-inline void Class::SetIFields(ObjectArray<ArtField>* new_ifields)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(NULL == GetFieldObject<ObjectArray<ArtField>>(OFFSET_OF_OBJECT_MEMBER(Class, ifields_)));
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, ifields_), new_ifields);
+inline void Class::SetIFields(ArtField* new_ifields) {
+ DCHECK(GetIFieldsUnchecked() == nullptr);
+ return SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(Class, ifields_), new_ifields);
}
-inline ObjectArray<ArtField>* Class::GetSFields() {
+inline void Class::SetIFieldsUnchecked(ArtField* new_ifields) {
+ SetFieldPtr<false, true, kVerifyNone>(OFFSET_OF_OBJECT_MEMBER(Class, ifields_), new_ifields);
+}
+
+inline ArtField* Class::GetSFieldsUnchecked() {
+ return GetFieldPtr<ArtField*>(OFFSET_OF_OBJECT_MEMBER(Class, sfields_));
+}
+
+inline ArtField* Class::GetIFieldsUnchecked() {
+ return GetFieldPtr<ArtField*>(OFFSET_OF_OBJECT_MEMBER(Class, ifields_));
+}
+
+inline ArtField* Class::GetSFields() {
DCHECK(IsLoaded() || IsErroneous()) << GetStatus();
- return GetFieldObject<ObjectArray<ArtField>>(OFFSET_OF_OBJECT_MEMBER(Class, sfields_));
+ return GetSFieldsUnchecked();
}
-inline void Class::SetSFields(ObjectArray<ArtField>* new_sfields)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+inline void Class::SetSFields(ArtField* new_sfields) {
DCHECK((IsRetired() && new_sfields == nullptr) ||
- (NULL == GetFieldObject<ObjectArray<ArtField>>(OFFSET_OF_OBJECT_MEMBER(Class, sfields_))));
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, sfields_), new_sfields);
+ GetFieldPtr<ArtField*>(OFFSET_OF_OBJECT_MEMBER(Class, sfields_)) == nullptr);
+ SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(Class, sfields_), new_sfields);
}
-inline uint32_t Class::NumStaticFields() {
- return (GetSFields() != NULL) ? GetSFields()->GetLength() : 0;
+inline void Class::SetSFieldsUnchecked(ArtField* new_sfields) {
+ SetFieldPtr<false, true, kVerifyNone>(OFFSET_OF_OBJECT_MEMBER(Class, sfields_), new_sfields);
}
-
-inline ArtField* Class::GetStaticField(uint32_t i) // TODO: uint16_t
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetSFields()->GetWithoutChecks(i);
+inline ArtField* Class::GetStaticField(uint32_t i) {
+ DCHECK_LT(i, NumStaticFields());
+ return &GetSFields()[i];
}
-inline void Class::SetStaticField(uint32_t i, ArtField* f) // TODO: uint16_t
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ObjectArray<ArtField>* sfields= GetFieldObject<ObjectArray<ArtField>>(
- OFFSET_OF_OBJECT_MEMBER(Class, sfields_));
- sfields->Set<false>(i, f);
-}
-
-inline uint32_t Class::NumInstanceFields() {
- return (GetIFields() != NULL) ? GetIFields()->GetLength() : 0;
-}
-
-inline ArtField* Class::GetInstanceField(uint32_t i) { // TODO: uint16_t
- DCHECK_NE(NumInstanceFields(), 0U);
- return GetIFields()->GetWithoutChecks(i);
-}
-
-inline void Class::SetInstanceField(uint32_t i, ArtField* f) // TODO: uint16_t
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ObjectArray<ArtField>* ifields= GetFieldObject<ObjectArray<ArtField>>(
- OFFSET_OF_OBJECT_MEMBER(Class, ifields_));
- ifields->Set<false>(i, f);
+inline ArtField* Class::GetInstanceField(uint32_t i) {
+ DCHECK_LT(i, NumInstanceFields());
+ return &GetIFields()[i];
}
template<VerifyObjectFlags kVerifyFlags>
@@ -513,14 +499,12 @@
DCHECK(IsIdxLoaded<kVerifyFlags>() || IsRetired<kVerifyFlags>() ||
IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>() ||
this == String::GetJavaLangString() ||
- this == ArtField::GetJavaLangReflectArtField() ||
this == ArtMethod::GetJavaLangReflectArtMethod())
<< "IsIdxLoaded=" << IsIdxLoaded<kVerifyFlags>()
<< " IsRetired=" << IsRetired<kVerifyFlags>()
<< " IsErroneous=" <<
IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>()
<< " IsString=" << (this == String::GetJavaLangString())
- << " IsArtField=" << (this == ArtField::GetJavaLangReflectArtField())
<< " IsArtMethod=" << (this == ArtMethod::GetJavaLangReflectArtMethod())
<< " descriptor=" << PrettyDescriptor(this);
return GetField32<kVerifyFlags>(AccessFlagsOffset());
@@ -691,11 +675,6 @@
}
template<ReadBarrierOption kReadBarrierOption>
-inline bool Class::IsArtFieldClass() const {
- return this == ArtField::GetJavaLangReflectArtField<kReadBarrierOption>();
-}
-
-template<ReadBarrierOption kReadBarrierOption>
inline bool Class::IsArtMethodClass() const {
return this == ArtMethod::GetJavaLangReflectArtMethod<kReadBarrierOption>();
}
@@ -722,7 +701,7 @@
} else if (IsPrimitive()) {
return strcmp(Primitive::Descriptor(GetPrimitiveType()), match) == 0;
} else if (IsProxyClass()) {
- return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(this) == match;
+ return ProxyDescriptorEquals(match);
} else {
const DexFile& dex_file = GetDexFile();
const DexFile::TypeId& type_id = dex_file.GetTypeId(GetClassDef()->class_idx_);
@@ -741,35 +720,35 @@
inline ObjectArray<Class>* Class::GetInterfaces() {
CHECK(IsProxyClass());
// First static field.
- DCHECK(GetSFields()->Get(0)->IsArtField());
- DCHECK_STREQ(GetSFields()->Get(0)->GetName(), "interfaces");
- MemberOffset field_offset = GetSFields()->Get(0)->GetOffset();
+ auto* field = GetStaticField(0);
+ DCHECK_STREQ(field->GetName(), "interfaces");
+ MemberOffset field_offset = field->GetOffset();
return GetFieldObject<ObjectArray<Class>>(field_offset);
}
inline ObjectArray<ObjectArray<Class>>* Class::GetThrows() {
CHECK(IsProxyClass());
// Second static field.
- DCHECK(GetSFields()->Get(1)->IsArtField());
- DCHECK_STREQ(GetSFields()->Get(1)->GetName(), "throws");
- MemberOffset field_offset = GetSFields()->Get(1)->GetOffset();
+ auto* field = GetStaticField(1);
+ DCHECK_STREQ(field->GetName(), "throws");
+ MemberOffset field_offset = field->GetOffset();
return GetFieldObject<ObjectArray<ObjectArray<Class>>>(field_offset);
}
inline MemberOffset Class::GetDisableIntrinsicFlagOffset() {
CHECK(IsReferenceClass());
// First static field
- DCHECK(GetSFields()->Get(0)->IsArtField());
- DCHECK_STREQ(GetSFields()->Get(0)->GetName(), "disableIntrinsic");
- return GetSFields()->Get(0)->GetOffset();
+ auto* field = GetStaticField(0);
+ DCHECK_STREQ(field->GetName(), "disableIntrinsic");
+ return field->GetOffset();
}
inline MemberOffset Class::GetSlowPathFlagOffset() {
CHECK(IsReferenceClass());
// Second static field
- DCHECK(GetSFields()->Get(1)->IsArtField());
- DCHECK_STREQ(GetSFields()->Get(1)->GetName(), "slowPathEnabled");
- return GetSFields()->Get(1)->GetOffset();
+ auto* field = GetStaticField(1);
+ DCHECK_STREQ(field->GetName(), "slowPathEnabled");
+ return field->GetOffset();
}
inline bool Class::GetSlowPathEnabled() {
@@ -827,6 +806,30 @@
return GetFieldObject<ObjectArray<String>>(DexCacheStringsOffset());
}
+template<class Visitor>
+void mirror::Class::VisitFieldRoots(Visitor& visitor) {
+ ArtField* const sfields = GetSFieldsUnchecked();
+ // Since we visit class roots while we may be writing these fields, check against null.
+ // TODO: Is this safe for concurrent compaction?
+ if (sfields != nullptr) {
+ for (size_t i = 0, count = NumStaticFields(); i < count; ++i) {
+ if (kIsDebugBuild && IsResolved()) {
+ CHECK_EQ(sfields[i].GetDeclaringClass(), this) << GetStatus();
+ }
+ visitor.VisitRoot(sfields[i].DeclaringClassRoot().AddressWithoutBarrier());
+ }
+ }
+ ArtField* const ifields = GetIFieldsUnchecked();
+ if (ifields != nullptr) {
+ for (size_t i = 0, count = NumInstanceFields(); i < count; ++i) {
+ if (kIsDebugBuild && IsResolved()) {
+ CHECK_EQ(ifields[i].GetDeclaringClass(), this) << GetStatus();
+ }
+ visitor.VisitRoot(ifields[i].DeclaringClassRoot().AddressWithoutBarrier());
+ }
+ }
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 8fb8147..2afb4af 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -18,7 +18,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
-#include "class_linker.h"
+#include "class_linker-inl.h"
#include "class_loader.h"
#include "class-inl.h"
#include "dex_cache.h"
@@ -871,5 +871,10 @@
return new_class->AsClass();
}
+bool Class::ProxyDescriptorEquals(const char* match) {
+ DCHECK(IsProxyClass());
+ return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(this) == match;
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index b82a58f..20f2387 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -35,6 +35,7 @@
namespace art {
+class ArtField;
struct ClassOffsets;
template<class T> class Handle;
template<class T> class Handle;
@@ -44,7 +45,6 @@
namespace mirror {
-class ArtField;
class ArtMethod;
class ClassLoader;
class DexCache;
@@ -420,9 +420,6 @@
bool IsThrowableClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsArtFieldClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsArtMethodClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
@@ -823,17 +820,22 @@
ALWAYS_INLINE void SetIfTable(IfTable* new_iftable) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get instance fields of the class (See also GetSFields).
- ObjectArray<ArtField>* GetIFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtField* GetIFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetIFields(ObjectArray<ArtField>* new_ifields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetIFields(ArtField* new_ifields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- uint32_t NumInstanceFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Unchecked edition has no verification flags.
+ void SetIFieldsUnchecked(ArtField* new_sfields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtField* GetInstanceField(uint32_t i) // TODO: uint16_t
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint32_t NumInstanceFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_instance_fields_));
+ }
- void SetInstanceField(uint32_t i, ArtField* f) // TODO: uint16_t
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetNumInstanceFields(uint32_t num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_instance_fields_), num);
+ }
+
+ ArtField* GetInstanceField(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns the number of instance fields containing reference types.
uint32_t NumReferenceInstanceFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -884,18 +886,24 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Gets the static fields of the class.
- ObjectArray<ArtField>* GetSFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtField* GetSFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetSFields(ObjectArray<ArtField>* new_sfields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetSFields(ArtField* new_sfields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- uint32_t NumStaticFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Unchecked edition has no verification flags.
+ void SetSFieldsUnchecked(ArtField* new_sfields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ uint32_t NumStaticFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_static_fields_));
+ }
+
+ void SetNumStaticFields(uint32_t num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_static_fields_), num);
+ }
// TODO: uint16_t
ArtField* GetStaticField(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // TODO: uint16_t
- void SetStaticField(uint32_t i, ArtField* f) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Find a static or instance field using the JLS resolution order
static ArtField* FindField(Thread* self, Handle<Class> klass, const StringPiece& name,
const StringPiece& type)
@@ -974,6 +982,10 @@
static void VisitRoots(RootVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<class Visitor>
+ // Visit field roots.
+ void VisitFieldRoots(Visitor& visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// When class is verified, set the kAccPreverified flag on each method.
void SetPreverifiedFlagOnAllMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -1079,6 +1091,12 @@
void CheckObjectAlloc() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Unchecked editions is for root visiting.
+ ArtField* GetSFieldsUnchecked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtField* GetIFieldsUnchecked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ bool ProxyDescriptorEquals(const char* match) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// defining class loader, or NULL for the "bootstrap" system loader
HeapReference<ClassLoader> class_loader_;
@@ -1096,18 +1114,6 @@
// static, private, and <init> methods
HeapReference<ObjectArray<ArtMethod>> direct_methods_;
- // instance fields
- //
- // These describe the layout of the contents of an Object.
- // Note that only the fields directly declared by this class are
- // listed in ifields; fields declared by a superclass are listed in
- // the superclass's Class.ifields.
- //
- // All instance fields that refer to objects are guaranteed to be at
- // the beginning of the field list. num_reference_instance_fields_
- // specifies the number of reference fields.
- HeapReference<ObjectArray<ArtField>> ifields_;
-
// The interface table (iftable_) contains pairs of a interface class and an array of the
// interface methods. There is one pair per interface supported by this class. That means one
// pair for each interface we support directly, indirectly via superclass, or indirectly via a
@@ -1124,9 +1130,6 @@
// Descriptor for the class such as "java.lang.Class" or "[C". Lazily initialized by ComputeName
HeapReference<String> name_;
- // Static fields
- HeapReference<ObjectArray<ArtField>> sfields_;
-
// The superclass, or NULL if this is java.lang.Object, an interface or primitive type.
HeapReference<Class> super_class_;
@@ -1143,8 +1146,22 @@
HeapReference<ObjectArray<ArtMethod>> vtable_;
// Access flags; low 16 bits are defined by VM spec.
+ // Note: Shuffled back.
uint32_t access_flags_;
+ // instance fields
+ //
+ // These describe the layout of the contents of an Object.
+ // Note that only the fields directly declared by this class are
+ // listed in ifields; fields declared by a superclass are listed in
+ // the superclass's Class.ifields.
+ //
+ // ArtField arrays are allocated as an array of fields, and not an array of fields pointers.
+ uint64_t ifields_;
+
+ // Static fields
+ uint64_t sfields_;
+
// Total size of the Class instance; used when allocating storage on gc heap.
// See also object_size_.
uint32_t class_size_;
@@ -1160,12 +1177,18 @@
// TODO: really 16bits
int32_t dex_type_idx_;
+ // Number of static fields.
+ uint32_t num_instance_fields_;
+
// Number of instance fields that are object refs.
uint32_t num_reference_instance_fields_;
// Number of static fields that are object refs,
uint32_t num_reference_static_fields_;
+ // Number of static fields.
+ uint32_t num_static_fields_;
+
// Total object size; used when allocating storage on gc heap.
// (For interfaces and abstract classes this will be zero.)
// See also class_size_.
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index 288e88e..1cb437e 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -19,6 +19,7 @@
#include "dex_cache.h"
+#include "art_field-inl.h"
#include "base/logging.h"
#include "mirror/class.h"
#include "runtime.h"
@@ -27,7 +28,7 @@
namespace mirror {
inline uint32_t DexCache::ClassSize() {
- uint32_t vtable_entries = Object::kVTableLength + 1;
+ uint32_t vtable_entries = Object::kVTableLength + 5;
return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0);
}
@@ -35,12 +36,11 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ArtMethod* method = GetResolvedMethods()->Get(method_idx);
// Hide resolution trampoline methods from the caller
- if (method != NULL && method->IsRuntimeMethod()) {
- DCHECK(method == Runtime::Current()->GetResolutionMethod());
- return NULL;
- } else {
- return method;
+ if (method != nullptr && method->IsRuntimeMethod()) {
+ DCHECK_EQ(method, Runtime::Current()->GetResolutionMethod());
+ return nullptr;
}
+ return method;
}
inline void DexCache::SetResolvedType(uint32_t type_idx, Class* resolved) {
@@ -49,6 +49,34 @@
GetResolvedTypes()->Set(type_idx, resolved);
}
+inline ArtField* DexCache::GetResolvedField(uint32_t idx, size_t ptr_size) {
+ ArtField* field = nullptr;
+ if (ptr_size == 8) {
+ field = reinterpret_cast<ArtField*>(
+ static_cast<uintptr_t>(GetResolvedFields()->AsLongArray()->GetWithoutChecks(idx)));
+ } else {
+ DCHECK_EQ(ptr_size, 4u);
+ field = reinterpret_cast<ArtField*>(
+ static_cast<uintptr_t>(GetResolvedFields()->AsIntArray()->GetWithoutChecks(idx)));
+ }
+ if (field == nullptr || field->GetDeclaringClass()->IsErroneous()) {
+ return nullptr;
+ }
+ return field;
+}
+
+inline void DexCache::SetResolvedField(uint32_t idx, ArtField* field, size_t ptr_size) {
+ if (ptr_size == 8) {
+ GetResolvedFields()->AsLongArray()->Set(
+ idx, static_cast<uint64_t>(reinterpret_cast<uintptr_t>(field)));
+ } else {
+ DCHECK_EQ(ptr_size, 4u);
+ CHECK_LE(reinterpret_cast<uintptr_t>(field), 0xFFFFFFFF);
+ GetResolvedFields()->AsIntArray()->Set(
+ idx, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(field)));
+ }
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/dex_cache.cc b/runtime/mirror/dex_cache.cc
index d6c11e8..ade8bd2 100644
--- a/runtime/mirror/dex_cache.cc
+++ b/runtime/mirror/dex_cache.cc
@@ -36,7 +36,7 @@
ObjectArray<String>* strings,
ObjectArray<Class>* resolved_types,
ObjectArray<ArtMethod>* resolved_methods,
- ObjectArray<ArtField>* resolved_fields) {
+ Array* resolved_fields) {
CHECK(dex_file != nullptr);
CHECK(location != nullptr);
CHECK(strings != nullptr);
@@ -44,19 +44,18 @@
CHECK(resolved_methods != nullptr);
CHECK(resolved_fields != nullptr);
- SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file);
+ SetDexFile(dex_file);
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_), location);
SetFieldObject<false>(StringsOffset(), strings);
+ SetFieldObject<false>(ResolvedFieldsOffset(), resolved_fields);
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_), resolved_types);
SetFieldObject<false>(ResolvedMethodsOffset(), resolved_methods);
- SetFieldObject<false>(ResolvedFieldsOffset(), resolved_fields);
Runtime* runtime = Runtime::Current();
if (runtime->HasResolutionMethod()) {
// Initialize the resolve methods array to contain trampolines for resolution.
ArtMethod* trampoline = runtime->GetResolutionMethod();
- size_t length = resolved_methods->GetLength();
- for (size_t i = 0; i < length; i++) {
+ for (size_t i = 0, length = resolved_methods->GetLength(); i < length; i++) {
resolved_methods->SetWithoutChecks<false>(i, trampoline);
}
}
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index c548c03..7e30b89 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -50,7 +50,7 @@
ObjectArray<String>* strings,
ObjectArray<Class>* types,
ObjectArray<ArtMethod>* methods,
- ObjectArray<ArtField>* fields)
+ Array* fields)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void Fixup(ArtMethod* trampoline) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -116,23 +116,16 @@
GetResolvedMethods()->Set(method_idx, resolved);
}
- ArtField* GetResolvedField(uint32_t field_idx) ALWAYS_INLINE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ArtField* field = GetResolvedFields()->Get(field_idx);
- if (UNLIKELY(field == nullptr || field->GetDeclaringClass()->IsErroneous())) {
- return nullptr;
- } else {
- return field;
- }
- }
+ // Pointer sized variant, used for patching.
+ ArtField* GetResolvedField(uint32_t idx, size_t ptr_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetResolvedField(uint32_t field_idx, ArtField* resolved) ALWAYS_INLINE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- GetResolvedFields()->Set(field_idx, resolved);
- }
+ // Pointer sized variant, used for patching.
+ void SetResolvedField(uint32_t idx, ArtField* field, size_t ptr_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
ObjectArray<String>* GetStrings() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldObject< ObjectArray<String>>(StringsOffset());
+ return GetFieldObject<ObjectArray<String>>(StringsOffset());
}
ObjectArray<Class>* GetResolvedTypes() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -145,9 +138,8 @@
return GetFieldObject< ObjectArray<ArtMethod>>(ResolvedMethodsOffset());
}
- ObjectArray<ArtField>* GetResolvedFields() ALWAYS_INLINE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldObject<ObjectArray<ArtField>>(ResolvedFieldsOffset());
+ Array* GetResolvedFields() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldObject<Array>(ResolvedFieldsOffset());
}
const DexFile* GetDexFile() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -162,7 +154,8 @@
private:
HeapReference<Object> dex_;
HeapReference<String> location_;
- HeapReference<ObjectArray<ArtField>> resolved_fields_;
+ // Either an int array or long array (64 bit).
+ HeapReference<Object> resolved_fields_;
HeapReference<ObjectArray<ArtMethod>> resolved_methods_;
HeapReference<ObjectArray<Class>> resolved_types_;
HeapReference<ObjectArray<String>> strings_;
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index 53e5534..1d6846b 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -47,7 +47,7 @@
EXPECT_LE(0, dex_cache->GetStrings()->GetLength());
EXPECT_LE(0, dex_cache->GetResolvedTypes()->GetLength());
EXPECT_LE(0, dex_cache->GetResolvedMethods()->GetLength());
- EXPECT_LE(0, dex_cache->GetResolvedFields()->GetLength());
+ EXPECT_LE(0u, dex_cache->NumResolvedFields());
EXPECT_EQ(java_lang_dex_file_->NumStringIds(),
static_cast<uint32_t>(dex_cache->GetStrings()->GetLength()));
@@ -55,8 +55,7 @@
static_cast<uint32_t>(dex_cache->GetResolvedTypes()->GetLength()));
EXPECT_EQ(java_lang_dex_file_->NumMethodIds(),
static_cast<uint32_t>(dex_cache->GetResolvedMethods()->GetLength()));
- EXPECT_EQ(java_lang_dex_file_->NumFieldIds(),
- static_cast<uint32_t>(dex_cache->GetResolvedFields()->GetLength()));
+ EXPECT_EQ(java_lang_dex_file_->NumFieldIds(), dex_cache->NumResolvedFields());
}
} // namespace mirror
diff --git a/runtime/mirror/field-inl.h b/runtime/mirror/field-inl.h
index 24ebc48..9820db7 100644
--- a/runtime/mirror/field-inl.h
+++ b/runtime/mirror/field-inl.h
@@ -20,6 +20,7 @@
#include "field.h"
#include "art_field-inl.h"
+#include "mirror/dex_cache-inl.h"
#include "runtime-inl.h"
namespace art {
@@ -27,9 +28,8 @@
namespace mirror {
template <bool kTransactionActive>
-inline mirror::Field* Field::CreateFromArtField(Thread* self, mirror::ArtField* field,
+inline mirror::Field* Field::CreateFromArtField(Thread* self, ArtField* field,
bool force_resolve) {
- CHECK(!kMovingFields);
// Try to resolve type before allocating since this is a thread suspension point.
mirror::Class* type = field->GetType<true>();
@@ -57,13 +57,13 @@
return nullptr;
}
auto dex_field_index = field->GetDexFieldIndex();
- auto* resolved_field = field->GetDexCache()->GetResolvedField(dex_field_index);
+ auto* resolved_field = field->GetDexCache()->GetResolvedField(dex_field_index, sizeof(void*));
if (resolved_field != nullptr) {
DCHECK_EQ(resolved_field, field);
} else {
// We rely on the field being resolved so that we can back to the ArtField
// (i.e. FromReflectedMethod).
- field->GetDexCache()->SetResolvedField(dex_field_index, field);
+ field->GetDexCache()->SetResolvedField(dex_field_index, field, sizeof(void*));
}
ret->SetType<kTransactionActive>(type);
ret->SetDeclaringClass<kTransactionActive>(field->GetDeclaringClass());
diff --git a/runtime/mirror/field.cc b/runtime/mirror/field.cc
index 82cc26e..70311bb 100644
--- a/runtime/mirror/field.cc
+++ b/runtime/mirror/field.cc
@@ -55,7 +55,7 @@
ArtField* Field::GetArtField() {
mirror::DexCache* const dex_cache = GetDeclaringClass()->GetDexCache();
- mirror::ArtField* const art_field = dex_cache->GetResolvedField(GetDexFieldIndex());
+ ArtField* const art_field = dex_cache->GetResolvedField(GetDexFieldIndex(), sizeof(void*));
CHECK(art_field != nullptr);
return art_field;
}
diff --git a/runtime/mirror/field.h b/runtime/mirror/field.h
index cea06f5..9988f84 100644
--- a/runtime/mirror/field.h
+++ b/runtime/mirror/field.h
@@ -25,11 +25,11 @@
namespace art {
+class ArtField;
struct FieldOffsets;
namespace mirror {
-class ArtField;
class Class;
class String;
@@ -93,10 +93,10 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Slow, try to use only for PrettyField and such.
- mirror::ArtField* GetArtField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtField* GetArtField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template <bool kTransactionActive = false>
- static mirror::Field* CreateFromArtField(Thread* self, mirror::ArtField* field,
+ static mirror::Field* CreateFromArtField(Thread* self, ArtField* field,
bool force_resolve)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index d690163..af0e856 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -225,18 +225,6 @@
}
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
-inline bool Object::IsArtField() {
- return GetClass<kVerifyFlags, kReadBarrierOption>()->
- template IsArtFieldClass<kReadBarrierOption>();
-}
-
-template<VerifyObjectFlags kVerifyFlags>
-inline ArtField* Object::AsArtField() {
- DCHECK(IsArtField<kVerifyFlags>());
- return down_cast<ArtField*>(this);
-}
-
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline bool Object::IsArtMethod() {
return GetClass<kVerifyFlags, kReadBarrierOption>()->
template IsArtMethodClass<kReadBarrierOption>();
@@ -318,8 +306,8 @@
template<VerifyObjectFlags kVerifyFlags>
inline IntArray* Object::AsIntArray() {
constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
- DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
- DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveInt() ||
+ CHECK(GetClass<kVerifyFlags>()->IsArrayClass());
+ CHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveInt() ||
GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveFloat());
return down_cast<IntArray*>(this);
}
@@ -327,8 +315,8 @@
template<VerifyObjectFlags kVerifyFlags>
inline LongArray* Object::AsLongArray() {
constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
- DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
- DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveLong() ||
+ CHECK(GetClass<kVerifyFlags>()->IsArrayClass());
+ CHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveLong() ||
GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveDouble());
return down_cast<LongArray*>(this);
}
@@ -403,7 +391,6 @@
}
DCHECK_GE(result, sizeof(Object))
<< " class=" << PrettyTypeOf(GetClass<kNewFlags, kReadBarrierOption>());
- DCHECK(!(IsArtField<kNewFlags, kReadBarrierOption>()) || result == sizeof(ArtField));
return result;
}
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 57ac46f..04d0cd8 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -204,22 +204,19 @@
return;
}
for (Class* cur = c; cur != NULL; cur = cur->GetSuperClass()) {
- ObjectArray<ArtField>* fields = cur->GetIFields();
- if (fields != NULL) {
- size_t num_ifields = fields->GetLength();
- for (size_t i = 0; i < num_ifields; ++i) {
- StackHandleScope<1> hs(Thread::Current());
- Handle<Object> h_object(hs.NewHandle(new_value));
- ArtField* field = fields->Get(i);
- if (field->GetOffset().Int32Value() == field_offset.Int32Value()) {
- CHECK_NE(field->GetTypeAsPrimitiveType(), Primitive::kPrimNot);
- // TODO: resolve the field type for moving GC.
- mirror::Class* field_type = field->GetType<!kMovingCollector>();
- if (field_type != nullptr) {
- CHECK(field_type->IsAssignableFrom(new_value->GetClass()));
- }
- return;
+ ArtField* fields = cur->GetIFields();
+ for (size_t i = 0, count = cur->NumInstanceFields(); i < count; ++i) {
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<Object> h_object(hs.NewHandle(new_value));
+ ArtField* field = &fields[i];
+ if (field->GetOffset().Int32Value() == field_offset.Int32Value()) {
+ CHECK_NE(field->GetTypeAsPrimitiveType(), Primitive::kPrimNot);
+ // TODO: resolve the field type for moving GC.
+ mirror::Class* field_type = field->GetType<!kMovingCollector>();
+ if (field_type != nullptr) {
+ CHECK(field_type->IsAssignableFrom(new_value->GetClass()));
}
+ return;
}
}
}
@@ -228,20 +225,17 @@
return;
}
if (IsClass()) {
- ObjectArray<ArtField>* fields = AsClass()->GetSFields();
- if (fields != NULL) {
- size_t num_sfields = fields->GetLength();
- for (size_t i = 0; i < num_sfields; ++i) {
- ArtField* field = fields->Get(i);
- if (field->GetOffset().Int32Value() == field_offset.Int32Value()) {
- CHECK_NE(field->GetTypeAsPrimitiveType(), Primitive::kPrimNot);
- // TODO: resolve the field type for moving GC.
- mirror::Class* field_type = field->GetType<!kMovingCollector>();
- if (field_type != nullptr) {
- CHECK(field_type->IsAssignableFrom(new_value->GetClass()));
- }
- return;
+ ArtField* fields = AsClass()->GetSFields();
+ for (size_t i = 0, count = AsClass()->NumStaticFields(); i < count; ++i) {
+ ArtField* field = &fields[i];
+ if (field->GetOffset().Int32Value() == field_offset.Int32Value()) {
+ CHECK_NE(field->GetTypeAsPrimitiveType(), Primitive::kPrimNot);
+ // TODO: resolve the field type for moving GC.
+ mirror::Class* field_type = field->GetType<!kMovingCollector>();
+ if (field_type != nullptr) {
+ CHECK(field_type->IsAssignableFrom(new_value->GetClass()));
}
+ return;
}
}
}
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index cfc8549..343c9bc 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -24,6 +24,7 @@
namespace art {
+class ArtField;
class ImageWriter;
class LockWord;
class Monitor;
@@ -33,7 +34,6 @@
namespace mirror {
-class ArtField;
class ArtMethod;
class Array;
class Class;
@@ -111,7 +111,10 @@
Object* Clone(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- int32_t IdentityHashCode() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int32_t IdentityHashCode() const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_,
+ Locks::thread_suspend_count_lock_);
static MemberOffset MonitorOffset() {
return OFFSET_OF_OBJECT_MEMBER(Object, monitor_);
@@ -191,12 +194,6 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ArtMethod* AsArtMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsArtField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ArtField* AsArtField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index 80d5135..30bc1cd 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -22,7 +22,6 @@
#include "array-inl.h"
#include "base/stringprintf.h"
#include "gc/heap.h"
-#include "mirror/art_field.h"
#include "mirror/class.h"
#include "runtime.h"
#include "handle_scope-inl.h"
@@ -36,13 +35,13 @@
inline ObjectArray<T>* ObjectArray<T>::Alloc(Thread* self, Class* object_array_class,
int32_t length, gc::AllocatorType allocator_type) {
Array* array = Array::Alloc<true>(self, object_array_class, length,
- ComponentSizeShiftWidth<sizeof(HeapReference<Object>)>(),
+ ComponentSizeShiftWidth(sizeof(HeapReference<Object>)),
allocator_type);
if (UNLIKELY(array == nullptr)) {
return nullptr;
} else {
DCHECK_EQ(array->GetClass()->GetComponentSizeShift(),
- ComponentSizeShiftWidth<sizeof(HeapReference<Object>)>());
+ ComponentSizeShiftWidth(sizeof(HeapReference<Object>)));
return array->AsObjectArray<T>();
}
}
diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h
index 5edda8b..055be85 100644
--- a/runtime/mirror/object_reference.h
+++ b/runtime/mirror/object_reference.h
@@ -91,7 +91,7 @@
: ObjectReference<kPoisonHeapReferences, MirrorType>(mirror_ptr) {}
};
-// Standard compressed reference used in the runtime. Used for StackRefernce and GC roots.
+// Standard compressed reference used in the runtime. Used for StackReference and GC roots.
template<class MirrorType>
class MANAGED CompressedReference : public mirror::ObjectReference<false, MirrorType> {
public:
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 1ce298d..747a008 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -722,14 +722,12 @@
"Ljava/util/Comparator;") == NULL);
// Right name and type.
- Handle<ArtField> f1(hs.NewHandle(
- c->FindDeclaredStaticField("CASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;")));
- Handle<ArtField> f2(hs.NewHandle(
- mirror::Class::FindStaticField(soa.Self(), c, "CASE_INSENSITIVE_ORDER",
- "Ljava/util/Comparator;")));
- EXPECT_TRUE(f1.Get() != NULL);
- EXPECT_TRUE(f2.Get() != NULL);
- EXPECT_EQ(f1.Get(), f2.Get());
+ ArtField* f1 = c->FindDeclaredStaticField("CASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;");
+ ArtField* f2 = mirror::Class::FindStaticField(soa.Self(), c, "CASE_INSENSITIVE_ORDER",
+ "Ljava/util/Comparator;");
+ EXPECT_TRUE(f1 != NULL);
+ EXPECT_TRUE(f2 != NULL);
+ EXPECT_EQ(f1, f2);
// TODO: test static fields via superclasses.
// TODO: test static fields via interfaces.
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index 6d1e721..2351463 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -20,6 +20,7 @@
#include <string>
#include "atomic.h"
+#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "handle_scope-inl.h"
#include "mirror/class-inl.h"
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 2724d91..876e29a 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -19,6 +19,9 @@
#include <string.h>
#include <unistd.h>
+#include <sstream>
+
+#include "base/histogram-inl.h"
#include "class_linker.h"
#include "common_throws.h"
#include "debugger.h"
@@ -329,6 +332,123 @@
env->ReleasePrimitiveArrayCritical(data, arr, 0);
}
+// The runtime stat names for VMDebug.getRuntimeStat().
+enum class VMDebugRuntimeStatId {
+ kArtGcGcCount = 0,
+ kArtGcGcTime,
+ kArtGcBytesAllocated,
+ kArtGcBytesFreed,
+ kArtGcBlockingGcCount,
+ kArtGcBlockingGcTime,
+ kArtGcGcCountRateHistogram,
+ kArtGcBlockingGcCountRateHistogram,
+ kNumRuntimeStats,
+};
+
+static jobject VMDebug_getRuntimeStatInternal(JNIEnv* env, jclass, jint statId) {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ switch (static_cast<VMDebugRuntimeStatId>(statId)) {
+ case VMDebugRuntimeStatId::kArtGcGcCount: {
+ std::string output = std::to_string(heap->GetGcCount());
+ return env->NewStringUTF(output.c_str());
+ }
+ case VMDebugRuntimeStatId::kArtGcGcTime: {
+ std::string output = std::to_string(NsToMs(heap->GetGcTime()));
+ return env->NewStringUTF(output.c_str());
+ }
+ case VMDebugRuntimeStatId::kArtGcBytesAllocated: {
+ std::string output = std::to_string(heap->GetBytesAllocatedEver());
+ return env->NewStringUTF(output.c_str());
+ }
+ case VMDebugRuntimeStatId::kArtGcBytesFreed: {
+ std::string output = std::to_string(heap->GetBytesFreedEver());
+ return env->NewStringUTF(output.c_str());
+ }
+ case VMDebugRuntimeStatId::kArtGcBlockingGcCount: {
+ std::string output = std::to_string(heap->GetBlockingGcCount());
+ return env->NewStringUTF(output.c_str());
+ }
+ case VMDebugRuntimeStatId::kArtGcBlockingGcTime: {
+ std::string output = std::to_string(NsToMs(heap->GetBlockingGcTime()));
+ return env->NewStringUTF(output.c_str());
+ }
+ case VMDebugRuntimeStatId::kArtGcGcCountRateHistogram: {
+ std::ostringstream output;
+ heap->DumpGcCountRateHistogram(output);
+ return env->NewStringUTF(output.str().c_str());
+ }
+ case VMDebugRuntimeStatId::kArtGcBlockingGcCountRateHistogram: {
+ std::ostringstream output;
+ heap->DumpBlockingGcCountRateHistogram(output);
+ return env->NewStringUTF(output.str().c_str());
+ }
+ default:
+ return nullptr;
+ }
+}
+
+static bool SetRuntimeStatValue(JNIEnv* env, jobjectArray result, VMDebugRuntimeStatId id,
+ std::string value) {
+ ScopedLocalRef<jstring> jvalue(env, env->NewStringUTF(value.c_str()));
+ if (jvalue.get() == nullptr) {
+ return false;
+ }
+ env->SetObjectArrayElement(result, static_cast<jint>(id), jvalue.get());
+ return true;
+}
+
+static jobjectArray VMDebug_getRuntimeStatsInternal(JNIEnv* env, jclass) {
+ jobjectArray result = env->NewObjectArray(
+ static_cast<jint>(VMDebugRuntimeStatId::kNumRuntimeStats),
+ WellKnownClasses::java_lang_String,
+ nullptr);
+ if (result == nullptr) {
+ return nullptr;
+ }
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcGcCount,
+ std::to_string(heap->GetGcCount()))) {
+ return nullptr;
+ }
+ if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcGcTime,
+ std::to_string(NsToMs(heap->GetGcTime())))) {
+ return nullptr;
+ }
+ if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcBytesAllocated,
+ std::to_string(heap->GetBytesAllocatedEver()))) {
+ return nullptr;
+ }
+ if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcBytesFreed,
+ std::to_string(heap->GetBytesFreedEver()))) {
+ return nullptr;
+ }
+ if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcBlockingGcCount,
+ std::to_string(heap->GetBlockingGcCount()))) {
+ return nullptr;
+ }
+ if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcBlockingGcTime,
+ std::to_string(NsToMs(heap->GetBlockingGcTime())))) {
+ return nullptr;
+ }
+ {
+ std::ostringstream output;
+ heap->DumpGcCountRateHistogram(output);
+ if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcGcCountRateHistogram,
+ output.str())) {
+ return nullptr;
+ }
+ }
+ {
+ std::ostringstream output;
+ heap->DumpBlockingGcCountRateHistogram(output);
+ if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcBlockingGcCountRateHistogram,
+ output.str())) {
+ return nullptr;
+ }
+ }
+ return result;
+}
+
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(VMDebug, countInstancesOfClass, "(Ljava/lang/Class;Z)J"),
NATIVE_METHOD(VMDebug, crash, "()V"),
@@ -359,6 +479,8 @@
NATIVE_METHOD(VMDebug, stopInstructionCounting, "()V"),
NATIVE_METHOD(VMDebug, stopMethodTracing, "()V"),
NATIVE_METHOD(VMDebug, threadCpuTimeNanos, "!()J"),
+ NATIVE_METHOD(VMDebug, getRuntimeStatInternal, "(I)Ljava/lang/String;"),
+ NATIVE_METHOD(VMDebug, getRuntimeStatsInternal, "()[Ljava/lang/String;")
};
void register_dalvik_system_VMDebug(JNIEnv* env) {
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 760038a..196a231 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -250,8 +250,7 @@
class PreloadDexCachesStringsVisitor : public SingleRootVisitor {
public:
- explicit PreloadDexCachesStringsVisitor(StringTable* table) : table_(table) {
- }
+ explicit PreloadDexCachesStringsVisitor(StringTable* table) : table_(table) { }
void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -313,8 +312,8 @@
static void PreloadDexCachesResolveField(Handle<mirror::DexCache> dex_cache, uint32_t field_idx,
bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtField* field = dex_cache->GetResolvedField(field_idx);
- if (field != NULL) {
+ ArtField* field = dex_cache->GetResolvedField(field_idx, sizeof(void*));
+ if (field != nullptr) {
return;
}
const DexFile* dex_file = dex_cache->GetDexFile();
@@ -334,7 +333,7 @@
return;
}
// LOG(INFO) << "VMRuntime.preloadDexCaches resolved field " << PrettyField(field);
- dex_cache->SetResolvedField(field_idx, field);
+ dex_cache->SetResolvedField(field_idx, field, sizeof(void*));
}
// Based on ClassLinker::ResolveMethod.
@@ -437,7 +436,7 @@
}
}
for (size_t j = 0; j < dex_cache->NumResolvedFields(); j++) {
- mirror::ArtField* field = dex_cache->GetResolvedField(j);
+ ArtField* field = linker->GetResolvedField(j, dex_cache);
if (field != NULL) {
filled->num_fields++;
}
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 022c56f..af01a02 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -18,14 +18,18 @@
#include <stdlib.h>
+#include <cutils/process_name.h>
+
#include "arch/instruction_set.h"
#include "debugger.h"
#include "java_vm_ext.h"
#include "jit/jit.h"
#include "jni_internal.h"
#include "JNIHelp.h"
+#include "scoped_thread_state_change.h"
#include "ScopedUtfChars.h"
#include "thread-inl.h"
+#include "trace.h"
#if defined(__linux__)
#include <sys/prctl.h>
@@ -121,6 +125,11 @@
runtime->PreZygoteFork();
+ if (Trace::GetMethodTracingMode() != TracingMode::kTracingInactive) {
+ // Tracing active, pause it.
+ Trace::Pause();
+ }
+
// Grab thread before fork potentially makes Thread::pthread_key_self_ unusable.
return reinterpret_cast<jlong>(ThreadForEnv(env));
}
@@ -132,6 +141,49 @@
thread->InitAfterFork();
EnableDebugFeatures(debug_flags);
+ // Update tracing.
+ if (Trace::GetMethodTracingMode() != TracingMode::kTracingInactive) {
+ Trace::TraceOutputMode output_mode = Trace::GetOutputMode();
+ Trace::TraceMode trace_mode = Trace::GetMode();
+
+ // Just drop it.
+ Trace::Abort();
+
+ // Only restart if it was streaming mode.
+ // TODO: Expose buffer size, so we can also do file mode.
+ if (output_mode == Trace::TraceOutputMode::kStreaming) {
+ const char* proc_name_cutils = get_process_name();
+ std::string proc_name;
+ if (proc_name_cutils != nullptr) {
+ proc_name = proc_name_cutils;
+ }
+ if (proc_name_cutils == nullptr || proc_name == "zygote" || proc_name == "zygote64") {
+ // Either no process name, or the name hasn't been changed, yet. Just use pid.
+ pid_t pid = getpid();
+ proc_name = StringPrintf("%u", static_cast<uint32_t>(pid));
+ }
+
+ std::string profiles_dir(GetDalvikCache("profiles", false /* create_if_absent */));
+ if (!profiles_dir.empty()) {
+ std::string trace_file = StringPrintf("%s/%s.trace.bin", profiles_dir.c_str(),
+ proc_name.c_str());
+ Trace::Start(trace_file.c_str(),
+ -1,
+ -1, // TODO: Expose buffer size.
+ 0, // TODO: Expose flags.
+ output_mode,
+ trace_mode,
+ 0); // TODO: Expose interval.
+ if (thread->IsExceptionPending()) {
+ ScopedObjectAccess soa(env);
+ thread->ClearException();
+ }
+ } else {
+ LOG(ERROR) << "Profiles dir is empty?!?!";
+ }
+ }
+ }
+
if (instruction_set != nullptr) {
ScopedUtfChars isa_string(env, instruction_set);
InstructionSet isa = GetInstructionSetFromString(isa_string.c_str());
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index c893f0a..5ad18f8 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -16,12 +16,12 @@
#include "java_lang_Class.h"
+#include "art_field-inl.h"
#include "class_linker.h"
#include "common_throws.h"
#include "dex_file-inl.h"
#include "jni_internal.h"
#include "nth_caller_visitor.h"
-#include "mirror/art_field-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/field-inl.h"
@@ -91,6 +91,18 @@
return soa.AddLocalReference<jclass>(c.Get());
}
+static jobject Class_findOverriddenMethodIfProxy(JNIEnv* env, jclass, jobject art_method) {
+ ScopedFastNativeObjectAccess soa(env);
+ mirror::ArtMethod* method = soa.Decode<mirror::ArtMethod*>(art_method);
+ mirror::Class* declaring_klass = method->GetDeclaringClass();
+ if (!declaring_klass->IsProxyClass()) {
+ return art_method;
+ }
+ uint32_t dex_method_index = method->GetDexMethodIndex();
+ mirror::ArtMethod* overriden_method = method->GetDexCacheResolvedMethods()->Get(dex_method_index);
+ return soa.AddLocalReference<jobject>(overriden_method);
+}
+
static jstring Class_getNameNative(JNIEnv* env, jobject javaThis) {
ScopedFastNativeObjectAccess soa(env);
StackHandleScope<1> hs(soa.Self());
@@ -107,33 +119,33 @@
static mirror::ObjectArray<mirror::Field>* GetDeclaredFields(
Thread* self, mirror::Class* klass, bool public_only, bool force_resolve)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- StackHandleScope<3> hs(self);
- auto h_ifields = hs.NewHandle(klass->GetIFields());
- auto h_sfields = hs.NewHandle(klass->GetSFields());
- const int32_t num_ifields = h_ifields.Get() != nullptr ? h_ifields->GetLength() : 0;
- const int32_t num_sfields = h_sfields.Get() != nullptr ? h_sfields->GetLength() : 0;
- int32_t array_size = num_ifields + num_sfields;
+ StackHandleScope<1> hs(self);
+ auto* ifields = klass->GetIFields();
+ auto* sfields = klass->GetSFields();
+ const auto num_ifields = klass->NumInstanceFields();
+ const auto num_sfields = klass->NumStaticFields();
+ size_t array_size = num_ifields + num_sfields;
if (public_only) {
// Lets go subtract all the non public fields.
- for (int32_t i = 0; i < num_ifields; ++i) {
- if (!h_ifields->GetWithoutChecks(i)->IsPublic()) {
+ for (size_t i = 0; i < num_ifields; ++i) {
+ if (!ifields[i].IsPublic()) {
--array_size;
}
}
- for (int32_t i = 0; i < num_sfields; ++i) {
- if (!h_sfields->GetWithoutChecks(i)->IsPublic()) {
+ for (size_t i = 0; i < num_sfields; ++i) {
+ if (!sfields[i].IsPublic()) {
--array_size;
}
}
}
- int32_t array_idx = 0;
+ size_t array_idx = 0;
auto object_array = hs.NewHandle(mirror::ObjectArray<mirror::Field>::Alloc(
self, mirror::Field::ArrayClass(), array_size));
if (object_array.Get() == nullptr) {
return nullptr;
}
- for (int32_t i = 0; i < num_ifields; ++i) {
- auto* art_field = h_ifields->GetWithoutChecks(i);
+ for (size_t i = 0; i < num_ifields; ++i) {
+ auto* art_field = &ifields[i];
if (!public_only || art_field->IsPublic()) {
auto* field = mirror::Field::CreateFromArtField(self, art_field, force_resolve);
if (field == nullptr) {
@@ -146,8 +158,8 @@
object_array->SetWithoutChecks<false>(array_idx++, field);
}
}
- for (int32_t i = 0; i < num_sfields; ++i) {
- auto* art_field = h_sfields->GetWithoutChecks(i);
+ for (size_t i = 0; i < num_sfields; ++i) {
+ auto* art_field = &sfields[i];
if (!public_only || art_field->IsPublic()) {
auto* field = mirror::Field::CreateFromArtField(self, art_field, force_resolve);
if (field == nullptr) {
@@ -185,17 +197,16 @@
// Performs a binary search through an array of fields, TODO: Is this fast enough if we don't use
// the dex cache for lookups? I think CompareModifiedUtf8ToUtf16AsCodePointValues should be fairly
// fast.
-ALWAYS_INLINE static inline mirror::ArtField* FindFieldByName(
- Thread* self ATTRIBUTE_UNUSED, mirror::String* name,
- mirror::ObjectArray<mirror::ArtField>* fields)
+ALWAYS_INLINE static inline ArtField* FindFieldByName(
+ Thread* self ATTRIBUTE_UNUSED, mirror::String* name, ArtField* fields, size_t num_fields)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- uint32_t low = 0;
- uint32_t high = fields->GetLength();
+ size_t low = 0;
+ size_t high = num_fields;
const uint16_t* const data = name->GetCharArray()->GetData() + name->GetOffset();
const size_t length = name->GetLength();
while (low < high) {
auto mid = (low + high) / 2;
- mirror::ArtField* const field = fields->GetWithoutChecks(mid);
+ ArtField* const field = &fields[mid];
int result = CompareModifiedUtf8ToUtf16AsCodePointValues(field->GetName(), data, length);
// Alternate approach, only a few % faster at the cost of more allocations.
// int result = field->GetStringName(self, true)->CompareTo(name);
@@ -208,8 +219,8 @@
}
}
if (kIsDebugBuild) {
- for (int32_t i = 0; i < fields->GetLength(); ++i) {
- CHECK_NE(fields->GetWithoutChecks(i)->GetName(), name->ToModifiedUtf8());
+ for (size_t i = 0; i < num_fields; ++i) {
+ CHECK_NE(fields[i].GetName(), name->ToModifiedUtf8());
}
}
return nullptr;
@@ -219,18 +230,14 @@
Thread* self, mirror::Class* c, mirror::String* name)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
auto* instance_fields = c->GetIFields();
- if (instance_fields != nullptr) {
- auto* art_field = FindFieldByName(self, name, instance_fields);
- if (art_field != nullptr) {
- return mirror::Field::CreateFromArtField(self, art_field, true);
- }
+ auto* art_field = FindFieldByName(self, name, instance_fields, c->NumInstanceFields());
+ if (art_field != nullptr) {
+ return mirror::Field::CreateFromArtField(self, art_field, true);
}
auto* static_fields = c->GetSFields();
- if (static_fields != nullptr) {
- auto* art_field = FindFieldByName(self, name, static_fields);
- if (art_field != nullptr) {
- return mirror::Field::CreateFromArtField(self, art_field, true);
- }
+ art_field = FindFieldByName(self, name, static_fields, c->NumStaticFields());
+ if (art_field != nullptr) {
+ return mirror::Field::CreateFromArtField(self, art_field, true);
}
return nullptr;
}
@@ -264,6 +271,8 @@
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(Class, classForName, "!(Ljava/lang/String;ZLjava/lang/ClassLoader;)Ljava/lang/Class;"),
+ NATIVE_METHOD(Class, findOverriddenMethodIfProxy,
+ "!(Ljava/lang/reflect/ArtMethod;)Ljava/lang/reflect/ArtMethod;"),
NATIVE_METHOD(Class, getNameNative, "!()Ljava/lang/String;"),
NATIVE_METHOD(Class, getProxyInterfaces, "!()[Ljava/lang/Class;"),
NATIVE_METHOD(Class, getDeclaredFields, "!()[Ljava/lang/reflect/Field;"),
diff --git a/runtime/native/java_lang_DexCache.cc b/runtime/native/java_lang_DexCache.cc
index 27eae46..1198c2e 100644
--- a/runtime/native/java_lang_DexCache.cc
+++ b/runtime/native/java_lang_DexCache.cc
@@ -18,7 +18,7 @@
#include "dex_file.h"
#include "jni_internal.h"
-#include "mirror/dex_cache.h"
+#include "mirror/dex_cache-inl.h"
#include "mirror/object-inl.h"
#include "scoped_fast_native_object_access.h"
#include "well_known_classes.h"
@@ -48,8 +48,38 @@
args);
}
+static jobject DexCache_getResolvedType(JNIEnv* env, jobject javaDexCache, jint type_index) {
+ ScopedFastNativeObjectAccess soa(env);
+ mirror::DexCache* dex_cache = soa.Decode<mirror::DexCache*>(javaDexCache);
+ return soa.AddLocalReference<jobject>(dex_cache->GetResolvedType(type_index));
+}
+
+static jobject DexCache_getResolvedString(JNIEnv* env, jobject javaDexCache, jint string_index) {
+ ScopedFastNativeObjectAccess soa(env);
+ mirror::DexCache* dex_cache = soa.Decode<mirror::DexCache*>(javaDexCache);
+ return soa.AddLocalReference<jobject>(dex_cache->GetResolvedString(string_index));
+}
+
+static void DexCache_setResolvedType(JNIEnv* env, jobject javaDexCache, jint type_index,
+ jobject type) {
+ ScopedFastNativeObjectAccess soa(env);
+ mirror::DexCache* dex_cache = soa.Decode<mirror::DexCache*>(javaDexCache);
+ dex_cache->SetResolvedType(type_index, soa.Decode<mirror::Class*>(type));
+}
+
+static void DexCache_setResolvedString(JNIEnv* env, jobject javaDexCache, jint string_index,
+ jobject string) {
+ ScopedFastNativeObjectAccess soa(env);
+ mirror::DexCache* dex_cache = soa.Decode<mirror::DexCache*>(javaDexCache);
+ dex_cache->SetResolvedString(string_index, soa.Decode<mirror::String*>(string));
+}
+
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(DexCache, getDexNative, "!()Lcom/android/dex/Dex;"),
+ NATIVE_METHOD(DexCache, getResolvedType, "!(I)Ljava/lang/Class;"),
+ NATIVE_METHOD(DexCache, getResolvedString, "!(I)Ljava/lang/String;"),
+ NATIVE_METHOD(DexCache, setResolvedType, "!(ILjava/lang/Class;)V"),
+ NATIVE_METHOD(DexCache, setResolvedString, "!(ILjava/lang/String;)V"),
};
void register_java_lang_DexCache(JNIEnv* env) {
diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc
index 35932e0..0c39f2b 100644
--- a/runtime/native/java_lang_VMClassLoader.cc
+++ b/runtime/native/java_lang_VMClassLoader.cc
@@ -44,8 +44,8 @@
if (loader != nullptr) {
// Try the common case.
StackHandleScope<1> hs(soa.Self());
- c = cl->FindClassInPathClassLoader(soa, soa.Self(), descriptor.c_str(), descriptor_hash,
- hs.NewHandle(loader));
+ cl->FindClassInPathClassLoader(soa, soa.Self(), descriptor.c_str(), descriptor_hash,
+ hs.NewHandle(loader), &c);
if (c != nullptr) {
return soa.AddLocalReference<jclass>(c);
}
diff --git a/runtime/native/java_lang_reflect_Array.cc b/runtime/native/java_lang_reflect_Array.cc
index 1ffcbdf..eddd7de 100644
--- a/runtime/native/java_lang_reflect_Array.cc
+++ b/runtime/native/java_lang_reflect_Array.cc
@@ -61,10 +61,8 @@
return NULL;
}
DCHECK(array_class->IsObjectArrayClass());
- mirror::Array* new_array = mirror::Array::Alloc<true>(
- soa.Self(), array_class, length,
- ComponentSizeShiftWidth<sizeof(mirror::HeapReference<mirror::Object>)>(),
- runtime->GetHeap()->GetCurrentAllocator());
+ mirror::Array* new_array = mirror::ObjectArray<mirror::Object*>::Alloc(
+ soa.Self(), array_class, length, runtime->GetHeap()->GetCurrentAllocator());
return soa.AddLocalReference<jobject>(new_array);
}
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index 765f548..5e1a4c5 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -57,8 +57,6 @@
bool movable = true;
if (!kMovingMethods && c->IsArtMethodClass()) {
movable = false;
- } else if (!kMovingFields && c->IsArtFieldClass()) {
- movable = false;
} else if (!kMovingClasses && c->IsClassClass()) {
movable = false;
}
diff --git a/runtime/oat.h b/runtime/oat.h
index de95fef..a31e09a 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -38,6 +38,7 @@
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
static constexpr const char* kDex2OatHostKey = "dex2oat-host";
static constexpr const char* kPicKey = "pic";
+ static constexpr const char* kClassPathKey = "classpath";
static OatHeader* Create(InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features,
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 81703b1..d3c4b49 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -20,6 +20,7 @@
#include <string.h>
#include <unistd.h>
+#include <cstdlib>
#include <sstream>
#include "base/bit_vector.h"
@@ -592,4 +593,90 @@
// TODO: Check against oat_patches. b/18144996
}
+static constexpr char kDexClassPathEncodingSeparator = '*';
+
+std::string OatFile::EncodeDexFileDependencies(const std::vector<const DexFile*>& dex_files) {
+ std::ostringstream out;
+
+ for (const DexFile* dex_file : dex_files) {
+ out << dex_file->GetLocation().c_str();
+ out << kDexClassPathEncodingSeparator;
+ out << dex_file->GetLocationChecksum();
+ out << kDexClassPathEncodingSeparator;
+ }
+
+ return out.str();
+}
+
+bool OatFile::CheckStaticDexFileDependencies(const char* dex_dependencies, std::string* msg) {
+ if (dex_dependencies == nullptr || dex_dependencies[0] == 0) {
+ // No dependencies.
+ return true;
+ }
+
+ // Assumption: this is not performance-critical. So it's OK to do this with a std::string and
+ // Split() instead of manual parsing of the combined char*.
+ std::vector<std::string> split;
+ Split(dex_dependencies, kDexClassPathEncodingSeparator, &split);
+ if (split.size() % 2 != 0) {
+ // Expected pairs of location and checksum.
+ *msg = StringPrintf("Odd number of elements in dependency list %s", dex_dependencies);
+ return false;
+ }
+
+ for (auto it = split.begin(), end = split.end(); it != end; it += 2) {
+ std::string& location = *it;
+ std::string& checksum = *(it + 1);
+ int64_t converted = strtoll(checksum.c_str(), nullptr, 10);
+ if (converted == 0) {
+ // Conversion error.
+ *msg = StringPrintf("Conversion error for %s", checksum.c_str());
+ return false;
+ }
+
+ uint32_t dex_checksum;
+ std::string error_msg;
+ if (DexFile::GetChecksum(DexFile::GetDexCanonicalLocation(location.c_str()).c_str(),
+ &dex_checksum,
+ &error_msg)) {
+ if (converted != dex_checksum) {
+ *msg = StringPrintf("Checksums don't match for %s: %" PRId64 " vs %u",
+ location.c_str(), converted, dex_checksum);
+ return false;
+ }
+ } else {
+ // Problem retrieving checksum.
+ // TODO: odex files?
+ *msg = StringPrintf("Could not retrieve checksum for %s: %s", location.c_str(),
+ error_msg.c_str());
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool OatFile::GetDexLocationsFromDependencies(const char* dex_dependencies,
+ std::vector<std::string>* locations) {
+ DCHECK(locations != nullptr);
+ if (dex_dependencies == nullptr || dex_dependencies[0] == 0) {
+ return true;
+ }
+
+ // Assumption: this is not performance-critical. So it's OK to do this with a std::string and
+ // Split() instead of manual parsing of the combined char*.
+ std::vector<std::string> split;
+ Split(dex_dependencies, kDexClassPathEncodingSeparator, &split);
+ if (split.size() % 2 != 0) {
+ // Expected pairs of location and checksum.
+ return false;
+ }
+
+ for (auto it = split.begin(), end = split.end(); it != end; it += 2) {
+ locations->push_back(*it);
+ }
+
+ return true;
+}
+
} // namespace art
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 73a8c8e..a5d5ae8 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -248,6 +248,18 @@
static std::string ResolveRelativeEncodedDexLocation(
const char* abs_dex_location, const std::string& rel_dex_location);
+ // Create a dependency list (dex locations and checksums) for the given dex files.
+ static std::string EncodeDexFileDependencies(const std::vector<const DexFile*>& dex_files);
+
+ // Check the given dependency list against their dex files - thus the name "Static," this does
+ // not check the class-loader environment, only whether there have been file updates.
+ static bool CheckStaticDexFileDependencies(const char* dex_dependencies, std::string* msg);
+
+ // Get the dex locations of a dependency list. Note: this is *not* cleaned for synthetic
+ // locations of multidex files.
+ static bool GetDexLocationsFromDependencies(const char* dex_dependencies,
+ std::vector<std::string>* locations);
+
private:
static void CheckLocation(const std::string& location);
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index d2362a2..0c942d2 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -25,11 +25,11 @@
#include <backtrace/BacktraceMap.h>
#include <gtest/gtest.h>
-#include "class_linker.h"
+#include "art_field-inl.h"
+#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "compiler_callbacks.h"
#include "mem_map.h"
-#include "mirror/art_field-inl.h"
#include "os.h"
#include "scoped_thread_state_change.h"
#include "thread-inl.h"
@@ -959,25 +959,25 @@
ASSERT_FALSE(dexfile.Get() == nullptr);
linker->EnsureInitialized(soa.Self(), dexfile, true, true);
- mirror::ArtField* no_dexopt_needed = mirror::Class::FindStaticField(
+ ArtField* no_dexopt_needed = mirror::Class::FindStaticField(
soa.Self(), dexfile, "NO_DEXOPT_NEEDED", "I");
ASSERT_FALSE(no_dexopt_needed == nullptr);
EXPECT_EQ(no_dexopt_needed->GetTypeAsPrimitiveType(), Primitive::kPrimInt);
EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded, no_dexopt_needed->GetInt(dexfile.Get()));
- mirror::ArtField* dex2oat_needed = mirror::Class::FindStaticField(
+ ArtField* dex2oat_needed = mirror::Class::FindStaticField(
soa.Self(), dexfile, "DEX2OAT_NEEDED", "I");
ASSERT_FALSE(dex2oat_needed == nullptr);
EXPECT_EQ(dex2oat_needed->GetTypeAsPrimitiveType(), Primitive::kPrimInt);
EXPECT_EQ(OatFileAssistant::kDex2OatNeeded, dex2oat_needed->GetInt(dexfile.Get()));
- mirror::ArtField* patchoat_needed = mirror::Class::FindStaticField(
+ ArtField* patchoat_needed = mirror::Class::FindStaticField(
soa.Self(), dexfile, "PATCHOAT_NEEDED", "I");
ASSERT_FALSE(patchoat_needed == nullptr);
EXPECT_EQ(patchoat_needed->GetTypeAsPrimitiveType(), Primitive::kPrimInt);
EXPECT_EQ(OatFileAssistant::kPatchOatNeeded, patchoat_needed->GetInt(dexfile.Get()));
- mirror::ArtField* self_patchoat_needed = mirror::Class::FindStaticField(
+ ArtField* self_patchoat_needed = mirror::Class::FindStaticField(
soa.Self(), dexfile, "SELF_PATCHOAT_NEEDED", "I");
ASSERT_FALSE(self_patchoat_needed == nullptr);
EXPECT_EQ(self_patchoat_needed->GetTypeAsPrimitiveType(), Primitive::kPrimInt);
diff --git a/runtime/oat_file_test.cc b/runtime/oat_file_test.cc
index f2213e9..a88553c 100644
--- a/runtime/oat_file_test.cc
+++ b/runtime/oat_file_test.cc
@@ -20,9 +20,15 @@
#include <gtest/gtest.h>
+#include "common_runtime_test.h"
+#include "scoped_thread_state_change.h"
+
namespace art {
-TEST(OatFileTest, ResolveRelativeEncodedDexLocation) {
+class OatFileTest : public CommonRuntimeTest {
+};
+
+TEST_F(OatFileTest, ResolveRelativeEncodedDexLocation) {
EXPECT_EQ(std::string("/data/app/foo/base.apk"),
OatFile::ResolveRelativeEncodedDexLocation(
nullptr, "/data/app/foo/base.apk"));
@@ -56,4 +62,54 @@
"/data/app/foo/base.apk", "o/base.apk"));
}
+static std::vector<const DexFile*> ToConstDexFiles(
+ const std::vector<std::unique_ptr<const DexFile>>& in) {
+ std::vector<const DexFile*> ret;
+ for (auto& d : in) {
+ ret.push_back(d.get());
+ }
+ return ret;
+}
+
+TEST_F(OatFileTest, DexFileDependencies) {
+ std::string error_msg;
+
+ // No dependencies.
+ EXPECT_TRUE(OatFile::CheckStaticDexFileDependencies(nullptr, &error_msg)) << error_msg;
+ EXPECT_TRUE(OatFile::CheckStaticDexFileDependencies("", &error_msg)) << error_msg;
+
+ // Ill-formed dependencies.
+ EXPECT_FALSE(OatFile::CheckStaticDexFileDependencies("abc", &error_msg));
+ EXPECT_FALSE(OatFile::CheckStaticDexFileDependencies("abc*123*def", &error_msg));
+ EXPECT_FALSE(OatFile::CheckStaticDexFileDependencies("abc*def*", &error_msg));
+
+ // Unsatisfiable dependency.
+ EXPECT_FALSE(OatFile::CheckStaticDexFileDependencies("abc*123*", &error_msg));
+
+ // Load some dex files to be able to do a real test.
+ ScopedObjectAccess soa(Thread::Current());
+
+ std::vector<std::unique_ptr<const DexFile>> dex_files1 = OpenTestDexFiles("Main");
+ std::vector<const DexFile*> dex_files_const1 = ToConstDexFiles(dex_files1);
+ std::string encoding1 = OatFile::EncodeDexFileDependencies(dex_files_const1);
+ EXPECT_TRUE(OatFile::CheckStaticDexFileDependencies(encoding1.c_str(), &error_msg))
+ << error_msg << " " << encoding1;
+ std::vector<std::string> split1;
+ EXPECT_TRUE(OatFile::GetDexLocationsFromDependencies(encoding1.c_str(), &split1));
+ ASSERT_EQ(split1.size(), 1U);
+ EXPECT_EQ(split1[0], dex_files_const1[0]->GetLocation());
+
+ std::vector<std::unique_ptr<const DexFile>> dex_files2 = OpenTestDexFiles("MultiDex");
+ EXPECT_GT(dex_files2.size(), 1U);
+ std::vector<const DexFile*> dex_files_const2 = ToConstDexFiles(dex_files2);
+ std::string encoding2 = OatFile::EncodeDexFileDependencies(dex_files_const2);
+ EXPECT_TRUE(OatFile::CheckStaticDexFileDependencies(encoding2.c_str(), &error_msg))
+ << error_msg << " " << encoding2;
+ std::vector<std::string> split2;
+ EXPECT_TRUE(OatFile::GetDexLocationsFromDependencies(encoding2.c_str(), &split2));
+ ASSERT_EQ(split2.size(), 2U);
+ EXPECT_EQ(split2[0], dex_files_const2[0]->GetLocation());
+ EXPECT_EQ(split2[1], dex_files_const2[1]->GetLocation());
+}
+
} // namespace art
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index c23f744..0758b27 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -216,6 +216,8 @@
.Define("-Xmethod-trace-file-size:_")
.WithType<unsigned int>()
.IntoKey(M::MethodTraceFileSize)
+ .Define("-Xmethod-trace-stream")
+ .IntoKey(M::MethodTraceStreaming)
.Define("-Xprofile:_")
.WithType<TraceClockSource>()
.WithValueMap({{"threadcpuclock", TraceClockSource::kThreadCpu},
diff --git a/runtime/primitive.h b/runtime/primitive.h
index 32bfdaf..3818487 100644
--- a/runtime/primitive.h
+++ b/runtime/primitive.h
@@ -27,21 +27,11 @@
static constexpr size_t kObjectReferenceSize = 4;
-template<size_t kComponentSize>
-size_t ComponentSizeShiftWidth() {
- switch (kComponentSize) {
- case 1:
- return 0U;
- case 2:
- return 1U;
- case 4:
- return 2U;
- case 8:
- return 3U;
- default:
- LOG(FATAL) << "Unexpected component size : " << kComponentSize;
- return 0U;
- }
+constexpr size_t ComponentSizeShiftWidth(size_t component_size) {
+ return component_size == 1u ? 0u :
+ component_size == 2u ? 1u :
+ component_size == 4u ? 2u :
+ component_size == 8u ? 3u : 0u;
}
class Primitive {
@@ -95,7 +85,7 @@
case kPrimFloat: return 2;
case kPrimLong:
case kPrimDouble: return 3;
- case kPrimNot: return ComponentSizeShiftWidth<kObjectReferenceSize>();
+ case kPrimNot: return ComponentSizeShiftWidth(kObjectReferenceSize);
default:
LOG(FATAL) << "Invalid type " << static_cast<int>(type);
return 0;
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index cb97049..6061f73 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -17,8 +17,9 @@
#include <jni.h>
#include <vector>
+#include "art_field-inl.h"
+#include "class_linker-inl.h"
#include "common_compiler_test.h"
-#include "mirror/art_field-inl.h"
#include "scoped_thread_state_change.h"
namespace art {
@@ -165,14 +166,12 @@
ASSERT_TRUE(proxyClass->IsProxyClass());
ASSERT_TRUE(proxyClass->IsInitialized());
- Handle<mirror::ObjectArray<mirror::ArtField>> instance_fields(
- hs.NewHandle(proxyClass->GetIFields()));
- EXPECT_TRUE(instance_fields.Get() == nullptr);
+ ArtField* instance_fields = proxyClass->GetIFields();
+ EXPECT_TRUE(instance_fields == nullptr);
- Handle<mirror::ObjectArray<mirror::ArtField>> static_fields(
- hs.NewHandle(proxyClass->GetSFields()));
- ASSERT_TRUE(static_fields.Get() != nullptr);
- ASSERT_EQ(2, static_fields->GetLength());
+ ArtField* static_fields = proxyClass->GetSFields();
+ ASSERT_TRUE(static_fields != nullptr);
+ ASSERT_EQ(2u, proxyClass->NumStaticFields());
Handle<mirror::Class> interfacesFieldClass(
hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Class;")));
@@ -182,21 +181,21 @@
ASSERT_TRUE(throwsFieldClass.Get() != nullptr);
// Test "Class[] interfaces" field.
- MutableHandle<mirror::ArtField> fhandle = hs.NewHandle(static_fields->Get(0));
- EXPECT_EQ("interfaces", std::string(fhandle->GetName()));
- EXPECT_EQ("[Ljava/lang/Class;", std::string(fhandle->GetTypeDescriptor()));
- EXPECT_EQ(interfacesFieldClass.Get(), fhandle->GetType<true>());
+ ArtField* field = &static_fields[0];
+ EXPECT_STREQ("interfaces", field->GetName());
+ EXPECT_STREQ("[Ljava/lang/Class;", field->GetTypeDescriptor());
+ EXPECT_EQ(interfacesFieldClass.Get(), field->GetType<true>());
std::string temp;
- EXPECT_EQ("L$Proxy1234;", std::string(fhandle->GetDeclaringClass()->GetDescriptor(&temp)));
- EXPECT_FALSE(fhandle->IsPrimitiveType());
+ EXPECT_STREQ("L$Proxy1234;", field->GetDeclaringClass()->GetDescriptor(&temp));
+ EXPECT_FALSE(field->IsPrimitiveType());
// Test "Class[][] throws" field.
- fhandle.Assign(static_fields->Get(1));
- EXPECT_EQ("throws", std::string(fhandle->GetName()));
- EXPECT_EQ("[[Ljava/lang/Class;", std::string(fhandle->GetTypeDescriptor()));
- EXPECT_EQ(throwsFieldClass.Get(), fhandle->GetType<true>());
- EXPECT_EQ("L$Proxy1234;", std::string(fhandle->GetDeclaringClass()->GetDescriptor(&temp)));
- EXPECT_FALSE(fhandle->IsPrimitiveType());
+ field = &static_fields[1];
+ EXPECT_STREQ("throws", field->GetName());
+ EXPECT_STREQ("[[Ljava/lang/Class;", field->GetTypeDescriptor());
+ EXPECT_EQ(throwsFieldClass.Get(), field->GetType<true>());
+ EXPECT_STREQ("L$Proxy1234;", field->GetDeclaringClass()->GetDescriptor(&temp));
+ EXPECT_FALSE(field->IsPrimitiveType());
}
} // namespace art
diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc
index 44e2844..9cf4b16 100644
--- a/runtime/quick/inline_method_analyser.cc
+++ b/runtime/quick/inline_method_analyser.cc
@@ -15,18 +15,15 @@
*/
#include "inline_method_analyser.h"
+
+#include "art_field-inl.h"
+#include "class_linker-inl.h"
#include "dex_file-inl.h"
#include "dex_instruction.h"
#include "dex_instruction-inl.h"
-#include "mirror/art_field.h"
-#include "mirror/art_field-inl.h"
-#include "mirror/art_method.h"
#include "mirror/art_method-inl.h"
-#include "mirror/class.h"
#include "mirror/class-inl.h"
-#include "mirror/dex_cache.h"
#include "mirror/dex_cache-inl.h"
-#include "verifier/method_verifier.h"
#include "verifier/method_verifier-inl.h"
/*
@@ -331,7 +328,7 @@
mirror::DexCache* dex_cache = verifier->GetDexCache();
uint32_t method_idx = verifier->GetMethodReference().dex_method_index;
mirror::ArtMethod* method = dex_cache->GetResolvedMethod(method_idx);
- mirror::ArtField* field = dex_cache->GetResolvedField(field_idx);
+ ArtField* field = Runtime::Current()->GetClassLinker()->GetResolvedField(field_idx, dex_cache);
if (method == nullptr || field == nullptr || field->IsStatic()) {
return false;
}
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index 471b37c..52d83a2 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -30,7 +30,6 @@
namespace art {
namespace mirror {
- class ArtField;
class ArtMethod;
class Object;
template<typename MirrorType> class HeapReference;
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 4e94de4..3e1315c 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -16,16 +16,15 @@
#include "reflection-inl.h"
+#include "art_field-inl.h"
#include "class_linker.h"
#include "common_throws.h"
#include "dex_file-inl.h"
#include "entrypoints/entrypoint_utils.h"
#include "jni_internal.h"
-#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object_array-inl.h"
-#include "mirror/object_array.h"
#include "nth_caller_visitor.h"
#include "scoped_thread_state_change.h"
#include "stack.h"
@@ -238,13 +237,13 @@
#define DO_FIRST_ARG(match_descriptor, get_fn, append) { \
if (LIKELY(arg != nullptr && arg->GetClass<>()->DescriptorEquals(match_descriptor))) { \
- mirror::ArtField* primitive_field = arg->GetClass()->GetIFields()->Get(0); \
+ ArtField* primitive_field = arg->GetClass()->GetInstanceField(0); \
append(primitive_field-> get_fn(arg));
#define DO_ARG(match_descriptor, get_fn, append) \
} else if (LIKELY(arg != nullptr && \
arg->GetClass<>()->DescriptorEquals(match_descriptor))) { \
- mirror::ArtField* primitive_field = arg->GetClass()->GetIFields()->Get(0); \
+ ArtField* primitive_field = arg->GetClass()->GetInstanceField(0); \
append(primitive_field-> get_fn(arg));
#define DO_FAIL(expected) \
@@ -692,7 +691,7 @@
return result.GetL();
}
-static std::string UnboxingFailureKind(mirror::ArtField* f)
+static std::string UnboxingFailureKind(ArtField* f)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (f != nullptr) {
return "field " + PrettyField(f, false);
@@ -701,7 +700,7 @@
}
static bool UnboxPrimitive(mirror::Object* o,
- mirror::Class* dst_class, mirror::ArtField* f,
+ mirror::Class* dst_class, ArtField* f,
JValue* unboxed_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
bool unbox_for_result = (f == nullptr);
@@ -742,8 +741,8 @@
JValue boxed_value;
mirror::Class* klass = o->GetClass();
mirror::Class* src_class = nullptr;
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- mirror::ArtField* primitive_field = o->GetClass()->GetIFields()->Get(0);
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ ArtField* primitive_field = &klass->GetIFields()[0];
if (klass->DescriptorEquals("Ljava/lang/Boolean;")) {
src_class = class_linker->FindPrimitiveClass('Z');
boxed_value.SetZ(primitive_field->GetBoolean(o));
@@ -782,7 +781,7 @@
boxed_value, unboxed_value);
}
-bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, mirror::ArtField* f,
+bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, ArtField* f,
JValue* unboxed_value) {
DCHECK(f != nullptr);
return UnboxPrimitive(o, dst_class, f, unboxed_value);
diff --git a/runtime/reflection.h b/runtime/reflection.h
index ff970e5..c2d406a 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -23,18 +23,18 @@
namespace art {
namespace mirror {
- class ArtField;
class ArtMethod;
class Class;
class Object;
} // namespace mirror
+class ArtField;
union JValue;
class ScopedObjectAccessAlreadyRunnable;
class ShadowFrame;
mirror::Object* BoxPrimitive(Primitive::Type src_class, const JValue& value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, mirror::ArtField* f,
+bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, ArtField* f,
JValue* unboxed_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool UnboxPrimitiveForResult(mirror::Object* o, mirror::Class* dst_class, JValue* unboxed_value)
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 1cd0a96..7bebb96 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -48,12 +48,13 @@
#include "arch/x86/registers_x86.h"
#include "arch/x86_64/quick_method_frame_info_x86_64.h"
#include "arch/x86_64/registers_x86_64.h"
+#include "art_field-inl.h"
#include "asm_support.h"
#include "atomic.h"
#include "base/arena_allocator.h"
#include "base/dumpable.h"
#include "base/unix_file/fd_file.h"
-#include "class_linker.h"
+#include "class_linker-inl.h"
#include "compiler_callbacks.h"
#include "debugger.h"
#include "elf_file.h"
@@ -70,8 +71,8 @@
#include "interpreter/interpreter.h"
#include "jit/jit.h"
#include "jni_internal.h"
+#include "linear_alloc.h"
#include "mirror/array.h"
-#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
@@ -257,7 +258,9 @@
VLOG(jit) << "Deleting jit";
jit_.reset(nullptr);
}
+ linear_alloc_.reset();
arena_pool_.reset();
+ low_4gb_arena_pool_.reset();
// Shutdown the fault manager if it was initialized.
fault_manager.Shutdown();
@@ -441,7 +444,7 @@
hs.NewHandle(soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread)));
CHECK(cl->EnsureInitialized(soa.Self(), thread_class, true, true));
- mirror::ArtField* contextClassLoader =
+ ArtField* contextClassLoader =
thread_class->FindDeclaredInstanceField("contextClassLoader", "Ljava/lang/ClassLoader;");
CHECK(contextClassLoader != NULL);
@@ -549,6 +552,17 @@
StartProfiler(profile_output_filename_.c_str());
}
+ if (trace_config_.get() != nullptr && trace_config_->trace_file != "") {
+ ScopedThreadStateChange tsc(self, kWaitingForMethodTracingStart);
+ Trace::Start(trace_config_->trace_file.c_str(),
+ -1,
+ static_cast<int>(trace_config_->trace_file_size),
+ 0,
+ trace_config_->trace_output_mode,
+ trace_config_->trace_mode,
+ 0);
+ }
+
return true;
}
@@ -862,7 +876,14 @@
// Use MemMap arena pool for jit, malloc otherwise. Malloc arenas are faster to allocate but
// can't be trimmed as easily.
const bool use_malloc = !use_jit;
- arena_pool_.reset(new ArenaPool(use_malloc));
+ arena_pool_.reset(new ArenaPool(use_malloc, false));
+ if (IsCompiler() && Is64BitInstructionSet(kRuntimeISA)) {
+ // 4gb, no malloc. Explanation in header.
+ low_4gb_arena_pool_.reset(new ArenaPool(false, true));
+ linear_alloc_.reset(new LinearAlloc(low_4gb_arena_pool_.get()));
+ } else {
+ linear_alloc_.reset(new LinearAlloc(arena_pool_.get()));
+ }
BlockSignals();
InitPlatformSignalHandlers();
@@ -990,7 +1011,9 @@
trace_config_->trace_file = runtime_options.ReleaseOrDefault(Opt::MethodTraceFile);
trace_config_->trace_file_size = runtime_options.ReleaseOrDefault(Opt::MethodTraceFileSize);
trace_config_->trace_mode = Trace::TraceMode::kMethodTracing;
- trace_config_->trace_output_mode = Trace::TraceOutputMode::kFile;
+ trace_config_->trace_output_mode = runtime_options.Exists(Opt::MethodTraceStreaming) ?
+ Trace::TraceOutputMode::kStreaming :
+ Trace::TraceOutputMode::kFile;
}
{
@@ -1016,17 +1039,6 @@
// TODO: move this to just be an Trace::Start argument
Trace::SetDefaultClockSource(runtime_options.GetOrDefault(Opt::ProfileClock));
- if (trace_config_.get() != nullptr) {
- ScopedThreadStateChange tsc(self, kWaitingForMethodTracingStart);
- Trace::Start(trace_config_->trace_file.c_str(),
- -1,
- static_cast<int>(trace_config_->trace_file_size),
- 0,
- trace_config_->trace_output_mode,
- trace_config_->trace_mode,
- 0);
- }
-
// Pre-allocate an OutOfMemoryError for the double-OOME case.
self->ThrowNewException("Ljava/lang/OutOfMemoryError;",
"OutOfMemoryError thrown while trying to throw OutOfMemoryError; "
@@ -1294,7 +1306,6 @@
void Runtime::VisitConstantRoots(RootVisitor* visitor) {
// Visit the classes held as static in mirror classes, these can be visited concurrently and only
// need to be visited once per GC since they never change.
- mirror::ArtField::VisitRoots(visitor);
mirror::ArtMethod::VisitRoots(visitor);
mirror::Class::VisitRoots(visitor);
mirror::Reference::VisitRoots(visitor);
diff --git a/runtime/runtime.h b/runtime/runtime.h
index baa4d18..d95640d 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -42,6 +42,7 @@
class ArenaPool;
class CompilerCallbacks;
+class LinearAlloc;
namespace gc {
class Heap;
@@ -549,6 +550,9 @@
const ArenaPool* GetArenaPool() const {
return arena_pool_.get();
}
+ LinearAlloc* GetLinearAlloc() {
+ return linear_alloc_.get();
+ }
jit::JitOptions* GetJITOptions() {
return jit_options_.get();
@@ -618,6 +622,13 @@
gc::Heap* heap_;
std::unique_ptr<ArenaPool> arena_pool_;
+ // Special low 4gb pool for compiler linear alloc. We need ArtFields to be in low 4gb if we are
+ // compiling using a 32 bit image on a 64 bit compiler in case we resolve things in the image
+ // since the field arrays are int arrays in this case.
+ std::unique_ptr<ArenaPool> low_4gb_arena_pool_;
+
+ // Shared linear alloc for now.
+ std::unique_ptr<LinearAlloc> linear_alloc_;
// The number of spins that are done before thread suspension is used to forcibly inflate.
size_t max_spins_before_thin_lock_inflation_;
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 339f925..eff787a 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -95,6 +95,7 @@
RUNTIME_OPTIONS_KEY (Unit, MethodTrace)
RUNTIME_OPTIONS_KEY (std::string, MethodTraceFile, "/data/method-trace-file.bin")
RUNTIME_OPTIONS_KEY (unsigned int, MethodTraceFileSize, 10 * MB)
+RUNTIME_OPTIONS_KEY (Unit, MethodTraceStreaming)
RUNTIME_OPTIONS_KEY (TraceClockSource, ProfileClock, kDefaultTraceClockSource) // -Xprofile:
RUNTIME_OPTIONS_KEY (TestProfilerOptions, ProfilerOpts) // -Xenable-profiler, -Xprofile-*
RUNTIME_OPTIONS_KEY (std::string, Compiler)
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index adf3480..11b7df6 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -20,7 +20,7 @@
#include "base/casts.h"
#include "java_vm_ext.h"
#include "jni_env_ext-inl.h"
-#include "mirror/art_field.h"
+#include "art_field.h"
#include "read_barrier.h"
#include "thread-inl.h"
#include "verify_object.h"
@@ -148,20 +148,16 @@
return down_cast<T>(Self()->DecodeJObject(obj));
}
- mirror::ArtField* DecodeField(jfieldID fid) const
+ ArtField* DecodeField(jfieldID fid) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
- CHECK(!kMovingFields);
- mirror::ArtField* field = reinterpret_cast<mirror::ArtField*>(fid);
- return ReadBarrier::BarrierForRoot<mirror::ArtField, kWithReadBarrier>(&field);
+ return reinterpret_cast<ArtField*>(fid);
}
- jfieldID EncodeField(mirror::ArtField* field) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ jfieldID EncodeField(ArtField* field) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
- CHECK(!kMovingFields);
return reinterpret_cast<jfieldID>(field);
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index af11f73..5ca51fb 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -32,11 +32,11 @@
#include <sstream>
#include "arch/context.h"
+#include "art_field-inl.h"
#include "base/mutex.h"
#include "base/timing_logger.h"
#include "base/to_str.h"
#include "class_linker-inl.h"
-#include "class_linker.h"
#include "debugger.h"
#include "dex_file-inl.h"
#include "entrypoints/entrypoint_utils.h"
@@ -47,10 +47,8 @@
#include "gc/heap.h"
#include "gc/space/space.h"
#include "handle_scope-inl.h"
-#include "handle_scope.h"
#include "indirect_reference_table-inl.h"
#include "jni_internal.h"
-#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class_loader.h"
#include "mirror/class-inl.h"
@@ -173,7 +171,7 @@
self->tlsPtr_.jpeer = nullptr;
self->SetThreadName(self->GetThreadName(soa)->ToModifiedUtf8().c_str());
- mirror::ArtField* priorityField = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority);
+ ArtField* priorityField = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority);
self->SetNativePriority(priorityField->GetInt(self->tlsPtr_.opeer));
Dbg::PostThreadStart(self);
@@ -190,7 +188,7 @@
Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
mirror::Object* thread_peer) {
- mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer);
+ ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer);
Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetLong(thread_peer)));
// Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_
// to stop it from going away.
@@ -589,7 +587,7 @@
}
mirror::String* Thread::GetThreadName(const ScopedObjectAccessAlreadyRunnable& soa) const {
- mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
+ ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
return (tlsPtr_.opeer != nullptr) ? reinterpret_cast<mirror::String*>(f->GetObject(tlsPtr_.opeer)) : nullptr;
}
@@ -794,7 +792,7 @@
soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(thread->tlsPtr_.opeer);
if (thread_group != nullptr) {
- mirror::ArtField* group_name_field =
+ ArtField* group_name_field =
soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
mirror::String* group_name_string =
reinterpret_cast<mirror::String*>(group_name_field->GetObject(thread_group));
@@ -1072,12 +1070,7 @@
// If we're currently in native code, dump that stack before dumping the managed stack.
if (dump_for_abort || ShouldShowNativeStack(this)) {
DumpKernelStack(os, GetTid(), " kernel: ", false);
- // b/20040863. Temporary workaround for x86 libunwind issue.
-#if defined(__i386__) && defined(HAVE_ANDROID_OS)
- os << "Cannot dump native stack. b/20040863.\n";
-#else
DumpNativeStack(os, GetTid(), " native: ", GetCurrentMethod(nullptr, !dump_for_abort));
-#endif
}
DumpJavaStack(os);
} else {
@@ -1393,6 +1386,8 @@
visitor, RootInfo(kRootNativeStack, thread_id));
for (HandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) {
for (size_t j = 0, count = cur->NumberOfReferences(); j < count; ++j) {
+ // GetReference returns a pointer to the stack reference within the handle scope. If this
+ // needs to be updated, it will be done by the root visitor.
buffered_visitor.VisitRootIfNonNull(cur->GetHandle(j).GetReference());
}
}
@@ -2319,6 +2314,7 @@
ReleaseLongJumpContext(context);
for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) {
visitor->VisitRootIfNonNull(&frame.this_object_, RootInfo(kRootVMInternal, thread_id));
+ DCHECK(frame.method_ != nullptr);
visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&frame.method_),
RootInfo(kRootVMInternal, thread_id));
}
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 5375dc0..5322f9f 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -31,7 +31,7 @@
#include "instrumentation.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
-#include "mirror/dex_cache.h"
+#include "mirror/dex_cache-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
#include "os.h"
@@ -85,9 +85,12 @@
kTraceMethodActionMask = 0x03, // two bits
};
+static constexpr uint8_t kOpNewMethod = 1U;
+static constexpr uint8_t kOpNewThread = 2U;
+
class BuildStackTraceVisitor : public StackVisitor {
public:
- explicit BuildStackTraceVisitor(Thread* thread) : StackVisitor(thread, NULL),
+ explicit BuildStackTraceVisitor(Thread* thread) : StackVisitor(thread, nullptr),
method_trace_(Trace::AllocStackTrace()) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -118,7 +121,7 @@
TraceClockSource Trace::default_clock_source_ = kDefaultTraceClockSource;
-Trace* volatile Trace::the_trace_ = NULL;
+Trace* volatile Trace::the_trace_ = nullptr;
pthread_t Trace::sampling_pthread_ = 0U;
std::unique_ptr<std::vector<mirror::ArtMethod*>> Trace::temp_stack_trace_;
@@ -138,7 +141,7 @@
}
std::vector<mirror::ArtMethod*>* Trace::AllocStackTrace() {
- if (temp_stack_trace_.get() != NULL) {
+ if (temp_stack_trace_.get() != nullptr) {
return temp_stack_trace_.release();
} else {
return new std::vector<mirror::ArtMethod*>();
@@ -246,7 +249,7 @@
static void ClearThreadStackTraceAndClockBase(Thread* thread, void* arg ATTRIBUTE_UNUSED) {
thread->SetTraceClockBase(0);
std::vector<mirror::ArtMethod*>* stack_trace = thread->GetStackTraceSample();
- thread->SetStackTraceSample(NULL);
+ thread->SetStackTraceSample(nullptr);
delete stack_trace;
}
@@ -260,7 +263,7 @@
uint32_t thread_clock_diff = 0;
uint32_t wall_clock_diff = 0;
ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
- if (old_stack_trace == NULL) {
+ if (old_stack_trace == nullptr) {
// If there's no previous stack trace sample for this thread, log an entry event for all
// methods in the trace.
for (std::vector<mirror::ArtMethod*>::reverse_iterator rit = stack_trace->rbegin();
@@ -308,7 +311,7 @@
{
MutexLock mu(self, *Locks::trace_lock_);
the_trace = the_trace_;
- if (the_trace == NULL) {
+ if (the_trace == nullptr) {
break;
}
}
@@ -323,7 +326,7 @@
}
runtime->DetachCurrentThread();
- return NULL;
+ return nullptr;
}
void Trace::Start(const char* trace_filename, int trace_fd, int buffer_size, int flags,
@@ -331,7 +334,7 @@
Thread* self = Thread::Current();
{
MutexLock mu(self, *Locks::trace_lock_);
- if (the_trace_ != NULL) {
+ if (the_trace_ != nullptr) {
LOG(ERROR) << "Trace already in progress, ignoring this request";
return;
}
@@ -354,7 +357,7 @@
trace_file.reset(new File(trace_fd, "tracefile"));
trace_file->DisableAutoClose();
}
- if (trace_file.get() == NULL) {
+ if (trace_file.get() == nullptr) {
PLOG(ERROR) << "Unable to open trace file '" << trace_filename << "'";
ScopedObjectAccess soa(self);
ThrowRuntimeException("Unable to open trace file '%s'", trace_filename);
@@ -372,20 +375,23 @@
// Create Trace object.
{
MutexLock mu(self, *Locks::trace_lock_);
- if (the_trace_ != NULL) {
+ if (the_trace_ != nullptr) {
LOG(ERROR) << "Trace already in progress, ignoring this request";
} else {
enable_stats = (flags && kTraceCountAllocs) != 0;
- the_trace_ = new Trace(trace_file.release(), buffer_size, flags, trace_mode);
+ the_trace_ = new Trace(trace_file.release(), trace_filename, buffer_size, flags, output_mode,
+ trace_mode);
if (trace_mode == TraceMode::kSampling) {
- CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, NULL, &RunSamplingThread,
+ CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, nullptr, &RunSamplingThread,
reinterpret_cast<void*>(interval_us)),
"Sampling profiler thread");
+ the_trace_->interval_us_ = interval_us;
} else {
runtime->GetInstrumentation()->AddListener(the_trace_,
instrumentation::Instrumentation::kMethodEntered |
instrumentation::Instrumentation::kMethodExited |
instrumentation::Instrumentation::kMethodUnwind);
+ // TODO: In full-PIC mode, we don't need to fully deopt.
runtime->GetInstrumentation()->EnableMethodTracing();
}
}
@@ -399,18 +405,18 @@
}
}
-void Trace::Stop() {
+void Trace::StopTracing(bool finish_tracing, bool flush_file) {
bool stop_alloc_counting = false;
Runtime* const runtime = Runtime::Current();
Trace* the_trace = nullptr;
pthread_t sampling_pthread = 0U;
{
MutexLock mu(Thread::Current(), *Locks::trace_lock_);
- if (the_trace_ == NULL) {
+ if (the_trace_ == nullptr) {
LOG(ERROR) << "Trace stop requested, but no trace currently running";
} else {
the_trace = the_trace_;
- the_trace_ = NULL;
+ the_trace_ = nullptr;
sampling_pthread = sampling_pthread_;
}
}
@@ -418,13 +424,16 @@
// the sampling thread access a stale pointer. This finishes since the sampling thread exits when
// the_trace_ is null.
if (sampling_pthread != 0U) {
- CHECK_PTHREAD_CALL(pthread_join, (sampling_pthread, NULL), "sampling thread shutdown");
+ CHECK_PTHREAD_CALL(pthread_join, (sampling_pthread, nullptr), "sampling thread shutdown");
sampling_pthread_ = 0U;
}
runtime->GetThreadList()->SuspendAll(__FUNCTION__);
+
if (the_trace != nullptr) {
- stop_alloc_counting = (the_trace->flags_ & kTraceCountAllocs) != 0;
- the_trace->FinishTracing();
+ stop_alloc_counting = (the_trace->flags_ & Trace::kTraceCountAllocs) != 0;
+ if (finish_tracing) {
+ the_trace->FinishTracing();
+ }
if (the_trace->trace_mode_ == TraceMode::kSampling) {
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
@@ -438,8 +447,12 @@
}
if (the_trace->trace_file_.get() != nullptr) {
// Do not try to erase, so flush and close explicitly.
- if (the_trace->trace_file_->Flush() != 0) {
- PLOG(ERROR) << "Could not flush trace file.";
+ if (flush_file) {
+ if (the_trace->trace_file_->Flush() != 0) {
+ PLOG(ERROR) << "Could not flush trace file.";
+ }
+ } else {
+ the_trace->trace_file_->MarkUnchecked(); // Do not trigger guard.
}
if (the_trace->trace_file_->Close() != 0) {
PLOG(ERROR) << "Could not close trace file.";
@@ -454,15 +467,118 @@
}
}
+void Trace::Abort() {
+ // Do not write anything anymore.
+ StopTracing(false, false);
+}
+
+void Trace::Stop() {
+ // Finish writing.
+ StopTracing(true, true);
+}
+
void Trace::Shutdown() {
if (GetMethodTracingMode() != kTracingInactive) {
Stop();
}
}
+void Trace::Pause() {
+ bool stop_alloc_counting = false;
+ Runtime* runtime = Runtime::Current();
+ Trace* the_trace = nullptr;
+
+ pthread_t sampling_pthread = 0U;
+ {
+ MutexLock mu(Thread::Current(), *Locks::trace_lock_);
+ if (the_trace_ == nullptr) {
+ LOG(ERROR) << "Trace pause requested, but no trace currently running";
+ return;
+ } else {
+ the_trace = the_trace_;
+ sampling_pthread = sampling_pthread_;
+ }
+ }
+
+ if (sampling_pthread != 0U) {
+ {
+ MutexLock mu(Thread::Current(), *Locks::trace_lock_);
+ the_trace_ = nullptr;
+ }
+ CHECK_PTHREAD_CALL(pthread_join, (sampling_pthread, nullptr), "sampling thread shutdown");
+ sampling_pthread_ = 0U;
+ {
+ MutexLock mu(Thread::Current(), *Locks::trace_lock_);
+ the_trace_ = the_trace;
+ }
+ }
+
+ if (the_trace != nullptr) {
+ runtime->GetThreadList()->SuspendAll(__FUNCTION__);
+ stop_alloc_counting = (the_trace->flags_ & Trace::kTraceCountAllocs) != 0;
+
+ if (the_trace->trace_mode_ == TraceMode::kSampling) {
+ MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+ runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, nullptr);
+ } else {
+ runtime->GetInstrumentation()->DisableMethodTracing();
+ runtime->GetInstrumentation()->RemoveListener(the_trace,
+ instrumentation::Instrumentation::kMethodEntered |
+ instrumentation::Instrumentation::kMethodExited |
+ instrumentation::Instrumentation::kMethodUnwind);
+ }
+ runtime->GetThreadList()->ResumeAll();
+ }
+
+ if (stop_alloc_counting) {
+ // Can be racy since SetStatsEnabled is not guarded by any locks.
+ Runtime::Current()->SetStatsEnabled(false);
+ }
+}
+
+void Trace::Resume() {
+ Thread* self = Thread::Current();
+ Trace* the_trace;
+ {
+ MutexLock mu(self, *Locks::trace_lock_);
+ if (the_trace_ == nullptr) {
+ LOG(ERROR) << "No trace to resume (or sampling mode), ignoring this request";
+ return;
+ }
+ the_trace = the_trace_;
+ }
+
+ Runtime* runtime = Runtime::Current();
+
+ // Enable count of allocs if specified in the flags.
+ bool enable_stats = (the_trace->flags_ && kTraceCountAllocs) != 0;
+
+ runtime->GetThreadList()->SuspendAll(__FUNCTION__);
+
+ // Reenable.
+ if (the_trace->trace_mode_ == TraceMode::kSampling) {
+ CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, nullptr, &RunSamplingThread,
+ reinterpret_cast<void*>(the_trace->interval_us_)), "Sampling profiler thread");
+ } else {
+ runtime->GetInstrumentation()->AddListener(the_trace,
+ instrumentation::Instrumentation::kMethodEntered |
+ instrumentation::Instrumentation::kMethodExited |
+ instrumentation::Instrumentation::kMethodUnwind);
+ // TODO: In full-PIC mode, we don't need to fully deopt.
+ runtime->GetInstrumentation()->EnableMethodTracing();
+ }
+
+ runtime->GetThreadList()->ResumeAll();
+
+ // Can't call this when holding the mutator lock.
+ if (enable_stats) {
+ runtime->SetStatsEnabled(true);
+ }
+}
+
TracingMode Trace::GetMethodTracingMode() {
MutexLock mu(Thread::Current(), *Locks::trace_lock_);
- if (the_trace_ == NULL) {
+ if (the_trace_ == nullptr) {
return kTracingInactive;
} else {
switch (the_trace_->trace_mode_) {
@@ -476,13 +592,26 @@
}
}
-Trace::Trace(File* trace_file, int buffer_size, int flags, TraceMode trace_mode)
- : trace_file_(trace_file), buf_(new uint8_t[buffer_size]()), flags_(flags),
- trace_mode_(trace_mode), clock_source_(default_clock_source_),
- buffer_size_(buffer_size), start_time_(MicroTime()),
- clock_overhead_ns_(GetClockOverheadNanoSeconds()), cur_offset_(0), overflow_(false) {
- // Set up the beginning of the trace.
+static constexpr size_t kStreamingBufferSize = 16 * KB;
+
+Trace::Trace(File* trace_file, const char* trace_name, int buffer_size, int flags,
+ TraceOutputMode output_mode, TraceMode trace_mode)
+ : trace_file_(trace_file),
+ buf_(new uint8_t[output_mode == TraceOutputMode::kStreaming ?
+ kStreamingBufferSize :
+ buffer_size]()),
+ flags_(flags), trace_output_mode_(output_mode), trace_mode_(trace_mode),
+ clock_source_(default_clock_source_),
+ buffer_size_(output_mode == TraceOutputMode::kStreaming ?
+ kStreamingBufferSize :
+ buffer_size),
+ start_time_(MicroTime()), clock_overhead_ns_(GetClockOverheadNanoSeconds()), cur_offset_(0),
+ overflow_(false), interval_us_(0), streaming_lock_(nullptr) {
uint16_t trace_version = GetTraceVersion(clock_source_);
+ if (output_mode == TraceOutputMode::kStreaming) {
+ trace_version |= 0xF0U;
+ }
+ // Set up the beginning of the trace.
memset(buf_.get(), 0, kTraceHeaderLength);
Append4LE(buf_.get(), kTraceMagicValue);
Append2LE(buf_.get() + 4, trace_version);
@@ -495,6 +624,16 @@
// Update current offset.
cur_offset_.StoreRelaxed(kTraceHeaderLength);
+
+ if (output_mode == TraceOutputMode::kStreaming) {
+ streaming_file_name_ = trace_name;
+ streaming_lock_ = new Mutex("tracing lock");
+ seen_threads_.reset(new ThreadIDBitSet());
+ }
+}
+
+Trace::~Trace() {
+ delete streaming_lock_;
}
static void DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source)
@@ -511,14 +650,38 @@
}
}
-void Trace::FinishTracing() {
- // Compute elapsed time.
- uint64_t elapsed = MicroTime() - start_time_;
+static void GetVisitedMethodsFromBitSets(
+ const std::map<mirror::DexCache*, DexIndexBitSet*>& seen_methods,
+ std::set<mirror::ArtMethod*>* visited_methods) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ for (auto& e : seen_methods) {
+ DexIndexBitSet* bit_set = e.second;
+ for (uint32_t i = 0; i < bit_set->size(); ++i) {
+ if ((*bit_set)[i]) {
+ visited_methods->insert(e.first->GetResolvedMethod(i));
+ }
+ }
+ }
+}
- size_t final_offset = cur_offset_.LoadRelaxed();
+void Trace::FinishTracing() {
+ size_t final_offset = 0;
std::set<mirror::ArtMethod*> visited_methods;
- GetVisitedMethods(final_offset, &visited_methods);
+ if (trace_output_mode_ == TraceOutputMode::kStreaming) {
+ // Write the secondary file with all the method names.
+ GetVisitedMethodsFromBitSets(seen_methods_, &visited_methods);
+
+ // Clean up.
+ for (auto& e : seen_methods_) {
+ delete e.second;
+ }
+ } else {
+ final_offset = cur_offset_.LoadRelaxed();
+ GetVisitedMethods(final_offset, &visited_methods);
+ }
+
+ // Compute elapsed time.
+ uint64_t elapsed = MicroTime() - start_time_;
std::ostringstream os;
@@ -535,8 +698,10 @@
os << StringPrintf("clock=wall\n");
}
os << StringPrintf("elapsed-time-usec=%" PRIu64 "\n", elapsed);
- size_t num_records = (final_offset - kTraceHeaderLength) / GetRecordSize(clock_source_);
- os << StringPrintf("num-method-calls=%zd\n", num_records);
+ if (trace_output_mode_ != TraceOutputMode::kStreaming) {
+ size_t num_records = (final_offset - kTraceHeaderLength) / GetRecordSize(clock_source_);
+ os << StringPrintf("num-method-calls=%zd\n", num_records);
+ }
os << StringPrintf("clock-call-overhead-nsec=%d\n", clock_overhead_ns_);
os << StringPrintf("vm=art\n");
os << StringPrintf("pid=%d\n", getpid());
@@ -550,27 +715,44 @@
os << StringPrintf("%cmethods\n", kTraceTokenChar);
DumpMethodList(os, visited_methods);
os << StringPrintf("%cend\n", kTraceTokenChar);
-
std::string header(os.str());
- if (trace_file_.get() == NULL) {
- iovec iov[2];
- iov[0].iov_base = reinterpret_cast<void*>(const_cast<char*>(header.c_str()));
- iov[0].iov_len = header.length();
- iov[1].iov_base = buf_.get();
- iov[1].iov_len = final_offset;
- Dbg::DdmSendChunkV(CHUNK_TYPE("MPSE"), iov, 2);
- const bool kDumpTraceInfo = false;
- if (kDumpTraceInfo) {
- LOG(INFO) << "Trace sent:\n" << header;
- DumpBuf(buf_.get(), final_offset, clock_source_);
+
+ if (trace_output_mode_ == TraceOutputMode::kStreaming) {
+ File file;
+ if (!file.Open(streaming_file_name_ + ".sec", O_CREAT | O_WRONLY)) {
+ LOG(WARNING) << "Could not open secondary trace file!";
+ return;
}
- } else {
- if (!trace_file_->WriteFully(header.c_str(), header.length()) ||
- !trace_file_->WriteFully(buf_.get(), final_offset)) {
+ if (!file.WriteFully(header.c_str(), header.length())) {
+ file.Erase();
std::string detail(StringPrintf("Trace data write failed: %s", strerror(errno)));
PLOG(ERROR) << detail;
ThrowRuntimeException("%s", detail.c_str());
}
+ if (file.FlushCloseOrErase() != 0) {
+ PLOG(ERROR) << "Could not write secondary file";
+ }
+ } else {
+ if (trace_file_.get() == nullptr) {
+ iovec iov[2];
+ iov[0].iov_base = reinterpret_cast<void*>(const_cast<char*>(header.c_str()));
+ iov[0].iov_len = header.length();
+ iov[1].iov_base = buf_.get();
+ iov[1].iov_len = final_offset;
+ Dbg::DdmSendChunkV(CHUNK_TYPE("MPSE"), iov, 2);
+ const bool kDumpTraceInfo = false;
+ if (kDumpTraceInfo) {
+ LOG(INFO) << "Trace sent:\n" << header;
+ DumpBuf(buf_.get(), final_offset, clock_source_);
+ }
+ } else {
+ if (!trace_file_->WriteFully(header.c_str(), header.length()) ||
+ !trace_file_->WriteFully(buf_.get(), final_offset)) {
+ std::string detail(StringPrintf("Trace data write failed: %s", strerror(errno)));
+ PLOG(ERROR) << detail;
+ ThrowRuntimeException("%s", detail.c_str());
+ }
+ }
}
}
@@ -582,7 +764,7 @@
}
void Trace::FieldRead(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field)
+ mirror::ArtMethod* method, uint32_t dex_pc, ArtField* field)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
UNUSED(thread, this_object, method, dex_pc, field);
// We're not recorded to listen to this kind of event, so complain.
@@ -590,7 +772,7 @@
}
void Trace::FieldWritten(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field,
+ mirror::ArtMethod* method, uint32_t dex_pc, ArtField* field,
const JValue& field_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
UNUSED(thread, this_object, method, dex_pc, field, field_value);
@@ -654,20 +836,76 @@
}
}
+bool Trace::RegisterMethod(mirror::ArtMethod* method) {
+ mirror::DexCache* dex_cache = method->GetDexCache();
+ if (dex_cache->GetResolvedMethod(method->GetDexMethodIndex()) != method) {
+ DCHECK(dex_cache->GetResolvedMethod(method->GetDexMethodIndex()) == nullptr);
+ dex_cache->SetResolvedMethod(method->GetDexMethodIndex(), method);
+ }
+ if (seen_methods_.find(dex_cache) == seen_methods_.end()) {
+ seen_methods_.insert(std::make_pair(dex_cache, new DexIndexBitSet()));
+ }
+ DexIndexBitSet* bit_set = seen_methods_.find(dex_cache)->second;
+ if (!(*bit_set)[method->GetDexMethodIndex()]) {
+ bit_set->set(method->GetDexMethodIndex());
+ return true;
+ }
+ return false;
+}
+
+bool Trace::RegisterThread(Thread* thread) {
+ pid_t tid = thread->GetTid();
+ CHECK_LT(0U, static_cast<uint32_t>(tid));
+ CHECK_LT(static_cast<uint32_t>(tid), 65536U);
+
+ if (!(*seen_threads_)[tid]) {
+ seen_threads_->set(tid);
+ return true;
+ }
+ return false;
+}
+
+static std::string GetMethodLine(mirror::ArtMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return StringPrintf("%p\t%s\t%s\t%s\t%s\n", method,
+ PrettyDescriptor(method->GetDeclaringClassDescriptor()).c_str(), method->GetName(),
+ method->GetSignature().ToString().c_str(), method->GetDeclaringClassSourceFile());
+}
+
+void Trace::WriteToBuf(const uint8_t* src, size_t src_size) {
+ int32_t old_offset = cur_offset_.LoadRelaxed();
+ int32_t new_offset = old_offset + static_cast<int32_t>(src_size);
+ if (new_offset > buffer_size_) {
+ // Flush buffer.
+ if (!trace_file_->WriteFully(buf_.get(), old_offset)) {
+ PLOG(WARNING) << "Failed streaming a tracing event.";
+ }
+ old_offset = 0;
+ new_offset = static_cast<int32_t>(src_size);
+ }
+ cur_offset_.StoreRelease(new_offset);
+ // Fill in data.
+ memcpy(buf_.get() + old_offset, src, src_size);
+}
+
void Trace::LogMethodTraceEvent(Thread* thread, mirror::ArtMethod* method,
instrumentation::Instrumentation::InstrumentationEvent event,
uint32_t thread_clock_diff, uint32_t wall_clock_diff) {
// Advance cur_offset_ atomically.
int32_t new_offset;
- int32_t old_offset;
- do {
- old_offset = cur_offset_.LoadRelaxed();
- new_offset = old_offset + GetRecordSize(clock_source_);
- if (new_offset > buffer_size_) {
- overflow_ = true;
- return;
- }
- } while (!cur_offset_.CompareExchangeWeakSequentiallyConsistent(old_offset, new_offset));
+ int32_t old_offset = 0;
+
+ // We do a busy loop here trying to acquire the next offset.
+ if (trace_output_mode_ != TraceOutputMode::kStreaming) {
+ do {
+ old_offset = cur_offset_.LoadRelaxed();
+ new_offset = old_offset + GetRecordSize(clock_source_);
+ if (new_offset > buffer_size_) {
+ overflow_ = true;
+ return;
+ }
+ } while (!cur_offset_.CompareExchangeWeakSequentiallyConsistent(old_offset, new_offset));
+ }
TraceAction action = kTraceMethodEnter;
switch (event) {
@@ -687,7 +925,15 @@
uint32_t method_value = EncodeTraceMethodAndAction(method, action);
// Write data
- uint8_t* ptr = buf_.get() + old_offset;
+ uint8_t* ptr;
+ static constexpr size_t kPacketSize = 14U; // The maximum size of data in a packet.
+ uint8_t stack_buf[kPacketSize]; // Space to store a packet when in streaming mode.
+ if (trace_output_mode_ == TraceOutputMode::kStreaming) {
+ ptr = stack_buf;
+ } else {
+ ptr = buf_.get() + old_offset;
+ }
+
Append2LE(ptr, thread->GetTid());
Append4LE(ptr + 2, method_value);
ptr += 6;
@@ -699,6 +945,34 @@
if (UseWallClock()) {
Append4LE(ptr, wall_clock_diff);
}
+ static_assert(kPacketSize == 2 + 4 + 4 + 4, "Packet size incorrect.");
+
+ if (trace_output_mode_ == TraceOutputMode::kStreaming) {
+ MutexLock mu(Thread::Current(), *streaming_lock_); // To serialize writing.
+ if (RegisterMethod(method)) {
+ // Write a special block with the name.
+ std::string method_line(GetMethodLine(method));
+ uint8_t buf2[5];
+ Append2LE(buf2, 0);
+ buf2[2] = kOpNewMethod;
+ Append2LE(buf2 + 3, static_cast<uint16_t>(method_line.length()));
+ WriteToBuf(buf2, sizeof(buf2));
+ WriteToBuf(reinterpret_cast<const uint8_t*>(method_line.c_str()), method_line.length());
+ }
+ if (RegisterThread(thread)) {
+ // It might be better to postpone this. Threads might not have received names...
+ std::string thread_name;
+ thread->GetThreadName(thread_name);
+ uint8_t buf2[7];
+ Append2LE(buf2, 0);
+ buf2[2] = kOpNewThread;
+ Append2LE(buf2 + 3, static_cast<uint16_t>(thread->GetTid()));
+ Append2LE(buf2 + 5, static_cast<uint16_t>(thread_name.length()));
+ WriteToBuf(buf2, sizeof(buf2));
+ WriteToBuf(reinterpret_cast<const uint8_t*>(thread_name.c_str()), thread_name.length());
+ }
+ WriteToBuf(stack_buf, sizeof(stack_buf));
+ }
}
void Trace::GetVisitedMethods(size_t buf_size,
@@ -716,9 +990,7 @@
void Trace::DumpMethodList(std::ostream& os, const std::set<mirror::ArtMethod*>& visited_methods) {
for (const auto& method : visited_methods) {
- os << StringPrintf("%p\t%s\t%s\t%s\t%s\n", method,
- PrettyDescriptor(method->GetDeclaringClassDescriptor()).c_str(), method->GetName(),
- method->GetSignature().ToString().c_str(), method->GetDeclaringClassSourceFile());
+ os << GetMethodLine(method);
}
}
@@ -750,4 +1022,16 @@
}
}
+Trace::TraceOutputMode Trace::GetOutputMode() {
+ MutexLock mu(Thread::Current(), *Locks::trace_lock_);
+ CHECK(the_trace_ != nullptr) << "Trace output mode requested, but no trace currently running";
+ return the_trace_->trace_output_mode_;
+}
+
+Trace::TraceMode Trace::GetMode() {
+ MutexLock mu(Thread::Current(), *Locks::trace_lock_);
+ CHECK(the_trace_ != nullptr) << "Trace mode requested, but no trace currently running";
+ return the_trace_->trace_mode_;
+}
+
} // namespace art
diff --git a/runtime/trace.h b/runtime/trace.h
index 80f926f..b8329ff 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -17,6 +17,8 @@
#ifndef ART_RUNTIME_TRACE_H_
#define ART_RUNTIME_TRACE_H_
+#include <bitset>
+#include <map>
#include <memory>
#include <ostream>
#include <set>
@@ -33,12 +35,16 @@
namespace art {
namespace mirror {
- class ArtField;
class ArtMethod;
+ class DexCache;
} // namespace mirror
+class ArtField;
class Thread;
+using DexIndexBitSet = std::bitset<65536>;
+using ThreadIDBitSet = std::bitset<65536>;
+
enum TracingMode {
kTracingInactive,
kMethodTracingActive,
@@ -53,7 +59,8 @@
enum class TraceOutputMode {
kFile,
- kDDMS
+ kDDMS,
+ kStreaming
};
enum class TraceMode {
@@ -61,6 +68,8 @@
kSampling
};
+ ~Trace();
+
static void SetDefaultClockSource(TraceClockSource clock_source);
static void Start(const char* trace_filename, int trace_fd, int buffer_size, int flags,
@@ -69,7 +78,16 @@
Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_,
Locks::trace_lock_);
+ static void Pause() LOCKS_EXCLUDED(Locks::trace_lock_, Locks::thread_list_lock_);
+ static void Resume() LOCKS_EXCLUDED(Locks::trace_lock_);
+
+ // Stop tracing. This will finish the trace and write it to file/send it via DDMS.
static void Stop()
+ LOCKS_EXCLUDED(Locks::mutator_lock_,
+ Locks::thread_list_lock_,
+ Locks::trace_lock_);
+ // Abort tracing. This will just stop tracing and *not* write/send the collected data.
+ static void Abort()
LOCKS_EXCLUDED(Locks::mutator_lock_,
Locks::thread_list_lock_,
Locks::trace_lock_);
@@ -99,10 +117,10 @@
mirror::ArtMethod* method, uint32_t new_dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
void FieldRead(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field)
+ mirror::ArtMethod* method, uint32_t dex_pc, ArtField* field)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
void FieldWritten(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field,
+ mirror::ArtMethod* method, uint32_t dex_pc, ArtField* field,
const JValue& field_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
void ExceptionCaught(Thread* thread, mirror::Throwable* exception_object)
@@ -116,19 +134,25 @@
// Save id and name of a thread before it exits.
static void StoreExitingThreadInfo(Thread* thread);
+ static TraceOutputMode GetOutputMode() LOCKS_EXCLUDED(Locks::trace_lock_);
+ static TraceMode GetMode() LOCKS_EXCLUDED(Locks::trace_lock_);
+
private:
- explicit Trace(File* trace_file, int buffer_size, int flags, TraceMode trace_mode);
+ Trace(File* trace_file, const char* trace_name, int buffer_size, int flags,
+ TraceOutputMode output_mode, TraceMode trace_mode);
// The sampling interval in microseconds is passed as an argument.
static void* RunSamplingThread(void* arg) LOCKS_EXCLUDED(Locks::trace_lock_);
+ static void StopTracing(bool finish_tracing, bool flush_file);
void FinishTracing() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff);
void LogMethodTraceEvent(Thread* thread, mirror::ArtMethod* method,
instrumentation::Instrumentation::InstrumentationEvent event,
- uint32_t thread_clock_diff, uint32_t wall_clock_diff);
+ uint32_t thread_clock_diff, uint32_t wall_clock_diff)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Methods to output traced methods and threads.
void GetVisitedMethods(size_t end_offset, std::set<mirror::ArtMethod*>* visited_methods);
@@ -136,6 +160,18 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void DumpThreadList(std::ostream& os) LOCKS_EXCLUDED(Locks::thread_list_lock_);
+ // Methods to register seen entitites in streaming mode. The methods return true if the entity
+ // is newly discovered.
+ bool RegisterMethod(mirror::ArtMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(streaming_lock_);
+ bool RegisterThread(Thread* thread)
+ EXCLUSIVE_LOCKS_REQUIRED(streaming_lock_);
+
+ // Copy a temporary buffer to the main buffer. Used for streaming. Exposed here for lock
+ // annotation.
+ void WriteToBuf(const uint8_t* src, size_t src_size)
+ EXCLUSIVE_LOCKS_REQUIRED(streaming_lock_);
+
// Singleton instance of the Trace or NULL when no method tracing is active.
static Trace* volatile the_trace_ GUARDED_BY(Locks::trace_lock_);
@@ -157,7 +193,10 @@
// Flags enabling extra tracing of things such as alloc counts.
const int flags_;
- // True if traceview should sample instead of instrumenting method entry/exit.
+ // The kind of output for this tracing.
+ const TraceOutputMode trace_output_mode_;
+
+ // The tracing method.
const TraceMode trace_mode_;
const TraceClockSource clock_source_;
@@ -180,6 +219,15 @@
// Map of thread ids and names that have already exited.
SafeMap<pid_t, std::string> exited_threads_;
+ // Sampling profiler sampling interval.
+ int interval_us_;
+
+ // Streaming mode data.
+ std::string streaming_file_name_;
+ Mutex* streaming_lock_;
+ std::map<mirror::DexCache*, DexIndexBitSet*> seen_methods_;
+ std::unique_ptr<ThreadIDBitSet> seen_threads_;
+
DISALLOW_COPY_AND_ASSIGN(Trace);
};
diff --git a/runtime/transaction_test.cc b/runtime/transaction_test.cc
index 24ecf6b..aee2c54 100644
--- a/runtime/transaction_test.cc
+++ b/runtime/transaction_test.cc
@@ -16,9 +16,10 @@
#include "transaction.h"
+#include "art_field-inl.h"
+#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "mirror/array-inl.h"
-#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "scoped_thread_state_change.h"
@@ -181,47 +182,47 @@
ASSERT_FALSE(soa.Self()->IsExceptionPending());
// Lookup fields.
- mirror::ArtField* booleanField = h_klass->FindDeclaredStaticField("booleanField", "Z");
+ ArtField* booleanField = h_klass->FindDeclaredStaticField("booleanField", "Z");
ASSERT_TRUE(booleanField != nullptr);
ASSERT_EQ(booleanField->GetTypeAsPrimitiveType(), Primitive::kPrimBoolean);
ASSERT_EQ(booleanField->GetBoolean(h_klass.Get()), false);
- mirror::ArtField* byteField = h_klass->FindDeclaredStaticField("byteField", "B");
+ ArtField* byteField = h_klass->FindDeclaredStaticField("byteField", "B");
ASSERT_TRUE(byteField != nullptr);
ASSERT_EQ(byteField->GetTypeAsPrimitiveType(), Primitive::kPrimByte);
ASSERT_EQ(byteField->GetByte(h_klass.Get()), 0);
- mirror::ArtField* charField = h_klass->FindDeclaredStaticField("charField", "C");
+ ArtField* charField = h_klass->FindDeclaredStaticField("charField", "C");
ASSERT_TRUE(charField != nullptr);
ASSERT_EQ(charField->GetTypeAsPrimitiveType(), Primitive::kPrimChar);
ASSERT_EQ(charField->GetChar(h_klass.Get()), 0u);
- mirror::ArtField* shortField = h_klass->FindDeclaredStaticField("shortField", "S");
+ ArtField* shortField = h_klass->FindDeclaredStaticField("shortField", "S");
ASSERT_TRUE(shortField != nullptr);
ASSERT_EQ(shortField->GetTypeAsPrimitiveType(), Primitive::kPrimShort);
ASSERT_EQ(shortField->GetShort(h_klass.Get()), 0);
- mirror::ArtField* intField = h_klass->FindDeclaredStaticField("intField", "I");
+ ArtField* intField = h_klass->FindDeclaredStaticField("intField", "I");
ASSERT_TRUE(intField != nullptr);
ASSERT_EQ(intField->GetTypeAsPrimitiveType(), Primitive::kPrimInt);
ASSERT_EQ(intField->GetInt(h_klass.Get()), 0);
- mirror::ArtField* longField = h_klass->FindDeclaredStaticField("longField", "J");
+ ArtField* longField = h_klass->FindDeclaredStaticField("longField", "J");
ASSERT_TRUE(longField != nullptr);
ASSERT_EQ(longField->GetTypeAsPrimitiveType(), Primitive::kPrimLong);
ASSERT_EQ(longField->GetLong(h_klass.Get()), static_cast<int64_t>(0));
- mirror::ArtField* floatField = h_klass->FindDeclaredStaticField("floatField", "F");
+ ArtField* floatField = h_klass->FindDeclaredStaticField("floatField", "F");
ASSERT_TRUE(floatField != nullptr);
ASSERT_EQ(floatField->GetTypeAsPrimitiveType(), Primitive::kPrimFloat);
ASSERT_FLOAT_EQ(floatField->GetFloat(h_klass.Get()), static_cast<float>(0.0f));
- mirror::ArtField* doubleField = h_klass->FindDeclaredStaticField("doubleField", "D");
+ ArtField* doubleField = h_klass->FindDeclaredStaticField("doubleField", "D");
ASSERT_TRUE(doubleField != nullptr);
ASSERT_EQ(doubleField->GetTypeAsPrimitiveType(), Primitive::kPrimDouble);
ASSERT_DOUBLE_EQ(doubleField->GetDouble(h_klass.Get()), static_cast<double>(0.0));
- mirror::ArtField* objectField = h_klass->FindDeclaredStaticField("objectField",
+ ArtField* objectField = h_klass->FindDeclaredStaticField("objectField",
"Ljava/lang/Object;");
ASSERT_TRUE(objectField != nullptr);
ASSERT_EQ(objectField->GetTypeAsPrimitiveType(), Primitive::kPrimNot);
@@ -283,47 +284,47 @@
ASSERT_TRUE(h_instance.Get() != nullptr);
// Lookup fields.
- mirror::ArtField* booleanField = h_klass->FindDeclaredInstanceField("booleanField", "Z");
+ ArtField* booleanField = h_klass->FindDeclaredInstanceField("booleanField", "Z");
ASSERT_TRUE(booleanField != nullptr);
ASSERT_EQ(booleanField->GetTypeAsPrimitiveType(), Primitive::kPrimBoolean);
ASSERT_EQ(booleanField->GetBoolean(h_instance.Get()), false);
- mirror::ArtField* byteField = h_klass->FindDeclaredInstanceField("byteField", "B");
+ ArtField* byteField = h_klass->FindDeclaredInstanceField("byteField", "B");
ASSERT_TRUE(byteField != nullptr);
ASSERT_EQ(byteField->GetTypeAsPrimitiveType(), Primitive::kPrimByte);
ASSERT_EQ(byteField->GetByte(h_instance.Get()), 0);
- mirror::ArtField* charField = h_klass->FindDeclaredInstanceField("charField", "C");
+ ArtField* charField = h_klass->FindDeclaredInstanceField("charField", "C");
ASSERT_TRUE(charField != nullptr);
ASSERT_EQ(charField->GetTypeAsPrimitiveType(), Primitive::kPrimChar);
ASSERT_EQ(charField->GetChar(h_instance.Get()), 0u);
- mirror::ArtField* shortField = h_klass->FindDeclaredInstanceField("shortField", "S");
+ ArtField* shortField = h_klass->FindDeclaredInstanceField("shortField", "S");
ASSERT_TRUE(shortField != nullptr);
ASSERT_EQ(shortField->GetTypeAsPrimitiveType(), Primitive::kPrimShort);
ASSERT_EQ(shortField->GetShort(h_instance.Get()), 0);
- mirror::ArtField* intField = h_klass->FindDeclaredInstanceField("intField", "I");
+ ArtField* intField = h_klass->FindDeclaredInstanceField("intField", "I");
ASSERT_TRUE(intField != nullptr);
ASSERT_EQ(intField->GetTypeAsPrimitiveType(), Primitive::kPrimInt);
ASSERT_EQ(intField->GetInt(h_instance.Get()), 0);
- mirror::ArtField* longField = h_klass->FindDeclaredInstanceField("longField", "J");
+ ArtField* longField = h_klass->FindDeclaredInstanceField("longField", "J");
ASSERT_TRUE(longField != nullptr);
ASSERT_EQ(longField->GetTypeAsPrimitiveType(), Primitive::kPrimLong);
ASSERT_EQ(longField->GetLong(h_instance.Get()), static_cast<int64_t>(0));
- mirror::ArtField* floatField = h_klass->FindDeclaredInstanceField("floatField", "F");
+ ArtField* floatField = h_klass->FindDeclaredInstanceField("floatField", "F");
ASSERT_TRUE(floatField != nullptr);
ASSERT_EQ(floatField->GetTypeAsPrimitiveType(), Primitive::kPrimFloat);
ASSERT_FLOAT_EQ(floatField->GetFloat(h_instance.Get()), static_cast<float>(0.0f));
- mirror::ArtField* doubleField = h_klass->FindDeclaredInstanceField("doubleField", "D");
+ ArtField* doubleField = h_klass->FindDeclaredInstanceField("doubleField", "D");
ASSERT_TRUE(doubleField != nullptr);
ASSERT_EQ(doubleField->GetTypeAsPrimitiveType(), Primitive::kPrimDouble);
ASSERT_DOUBLE_EQ(doubleField->GetDouble(h_instance.Get()), static_cast<double>(0.0));
- mirror::ArtField* objectField = h_klass->FindDeclaredInstanceField("objectField",
+ ArtField* objectField = h_klass->FindDeclaredInstanceField("objectField",
"Ljava/lang/Object;");
ASSERT_TRUE(objectField != nullptr);
ASSERT_EQ(objectField->GetTypeAsPrimitiveType(), Primitive::kPrimNot);
@@ -381,63 +382,63 @@
ASSERT_FALSE(soa.Self()->IsExceptionPending());
// Lookup fields.
- mirror::ArtField* booleanArrayField = h_klass->FindDeclaredStaticField("booleanArrayField", "[Z");
+ ArtField* booleanArrayField = h_klass->FindDeclaredStaticField("booleanArrayField", "[Z");
ASSERT_TRUE(booleanArrayField != nullptr);
mirror::BooleanArray* booleanArray = booleanArrayField->GetObject(h_klass.Get())->AsBooleanArray();
ASSERT_TRUE(booleanArray != nullptr);
ASSERT_EQ(booleanArray->GetLength(), 1);
ASSERT_EQ(booleanArray->GetWithoutChecks(0), false);
- mirror::ArtField* byteArrayField = h_klass->FindDeclaredStaticField("byteArrayField", "[B");
+ ArtField* byteArrayField = h_klass->FindDeclaredStaticField("byteArrayField", "[B");
ASSERT_TRUE(byteArrayField != nullptr);
mirror::ByteArray* byteArray = byteArrayField->GetObject(h_klass.Get())->AsByteArray();
ASSERT_TRUE(byteArray != nullptr);
ASSERT_EQ(byteArray->GetLength(), 1);
ASSERT_EQ(byteArray->GetWithoutChecks(0), 0);
- mirror::ArtField* charArrayField = h_klass->FindDeclaredStaticField("charArrayField", "[C");
+ ArtField* charArrayField = h_klass->FindDeclaredStaticField("charArrayField", "[C");
ASSERT_TRUE(charArrayField != nullptr);
mirror::CharArray* charArray = charArrayField->GetObject(h_klass.Get())->AsCharArray();
ASSERT_TRUE(charArray != nullptr);
ASSERT_EQ(charArray->GetLength(), 1);
ASSERT_EQ(charArray->GetWithoutChecks(0), 0u);
- mirror::ArtField* shortArrayField = h_klass->FindDeclaredStaticField("shortArrayField", "[S");
+ ArtField* shortArrayField = h_klass->FindDeclaredStaticField("shortArrayField", "[S");
ASSERT_TRUE(shortArrayField != nullptr);
mirror::ShortArray* shortArray = shortArrayField->GetObject(h_klass.Get())->AsShortArray();
ASSERT_TRUE(shortArray != nullptr);
ASSERT_EQ(shortArray->GetLength(), 1);
ASSERT_EQ(shortArray->GetWithoutChecks(0), 0);
- mirror::ArtField* intArrayField = h_klass->FindDeclaredStaticField("intArrayField", "[I");
+ ArtField* intArrayField = h_klass->FindDeclaredStaticField("intArrayField", "[I");
ASSERT_TRUE(intArrayField != nullptr);
mirror::IntArray* intArray = intArrayField->GetObject(h_klass.Get())->AsIntArray();
ASSERT_TRUE(intArray != nullptr);
ASSERT_EQ(intArray->GetLength(), 1);
ASSERT_EQ(intArray->GetWithoutChecks(0), 0);
- mirror::ArtField* longArrayField = h_klass->FindDeclaredStaticField("longArrayField", "[J");
+ ArtField* longArrayField = h_klass->FindDeclaredStaticField("longArrayField", "[J");
ASSERT_TRUE(longArrayField != nullptr);
mirror::LongArray* longArray = longArrayField->GetObject(h_klass.Get())->AsLongArray();
ASSERT_TRUE(longArray != nullptr);
ASSERT_EQ(longArray->GetLength(), 1);
ASSERT_EQ(longArray->GetWithoutChecks(0), static_cast<int64_t>(0));
- mirror::ArtField* floatArrayField = h_klass->FindDeclaredStaticField("floatArrayField", "[F");
+ ArtField* floatArrayField = h_klass->FindDeclaredStaticField("floatArrayField", "[F");
ASSERT_TRUE(floatArrayField != nullptr);
mirror::FloatArray* floatArray = floatArrayField->GetObject(h_klass.Get())->AsFloatArray();
ASSERT_TRUE(floatArray != nullptr);
ASSERT_EQ(floatArray->GetLength(), 1);
ASSERT_FLOAT_EQ(floatArray->GetWithoutChecks(0), static_cast<float>(0.0f));
- mirror::ArtField* doubleArrayField = h_klass->FindDeclaredStaticField("doubleArrayField", "[D");
+ ArtField* doubleArrayField = h_klass->FindDeclaredStaticField("doubleArrayField", "[D");
ASSERT_TRUE(doubleArrayField != nullptr);
mirror::DoubleArray* doubleArray = doubleArrayField->GetObject(h_klass.Get())->AsDoubleArray();
ASSERT_TRUE(doubleArray != nullptr);
ASSERT_EQ(doubleArray->GetLength(), 1);
ASSERT_DOUBLE_EQ(doubleArray->GetWithoutChecks(0), static_cast<double>(0.0f));
- mirror::ArtField* objectArrayField = h_klass->FindDeclaredStaticField("objectArrayField",
+ ArtField* objectArrayField = h_klass->FindDeclaredStaticField("objectArrayField",
"[Ljava/lang/Object;");
ASSERT_TRUE(objectArrayField != nullptr);
mirror::ObjectArray<mirror::Object>* objectArray =
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 8a23ff7..a303aa4 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -25,10 +25,10 @@
#include <unistd.h>
#include <memory>
+#include "art_field-inl.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "dex_file-inl.h"
-#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
@@ -364,7 +364,7 @@
return result;
}
-std::string PrettyField(mirror::ArtField* f, bool with_type) {
+std::string PrettyField(ArtField* f, bool with_type) {
if (f == NULL) {
return "null";
}
@@ -1440,32 +1440,58 @@
}
}
-std::string GetDalvikCacheOrDie(const char* subdir, const bool create_if_absent) {
+static std::string GetDalvikCacheImpl(const char* subdir,
+ const bool create_if_absent,
+ const bool abort_on_error) {
CHECK(subdir != nullptr);
const char* android_data = GetAndroidData();
const std::string dalvik_cache_root(StringPrintf("%s/dalvik-cache/", android_data));
const std::string dalvik_cache = dalvik_cache_root + subdir;
- if (create_if_absent && !OS::DirectoryExists(dalvik_cache.c_str())) {
+ if (!OS::DirectoryExists(dalvik_cache.c_str())) {
+ if (!create_if_absent) {
+ // TODO: Check callers. Traditional behavior is to not to abort, even when abort_on_error.
+ return "";
+ }
+
// Don't create the system's /data/dalvik-cache/... because it needs special permissions.
- if (strcmp(android_data, "/data") != 0) {
- int result = mkdir(dalvik_cache_root.c_str(), 0700);
- if (result != 0 && errno != EEXIST) {
- PLOG(FATAL) << "Failed to create dalvik-cache directory " << dalvik_cache_root;
- return "";
+ if (strcmp(android_data, "/data") == 0) {
+ if (abort_on_error) {
+ LOG(FATAL) << "Failed to find dalvik-cache directory " << dalvik_cache
+ << ", cannot create /data dalvik-cache.";
+ UNREACHABLE();
}
- result = mkdir(dalvik_cache.c_str(), 0700);
- if (result != 0) {
+ return "";
+ }
+
+ int result = mkdir(dalvik_cache_root.c_str(), 0700);
+ if (result != 0 && errno != EEXIST) {
+ if (abort_on_error) {
+ PLOG(FATAL) << "Failed to create dalvik-cache root directory " << dalvik_cache_root;
+ UNREACHABLE();
+ }
+ return "";
+ }
+
+ result = mkdir(dalvik_cache.c_str(), 0700);
+ if (result != 0) {
+ if (abort_on_error) {
PLOG(FATAL) << "Failed to create dalvik-cache directory " << dalvik_cache;
- return "";
+ UNREACHABLE();
}
- } else {
- LOG(FATAL) << "Failed to find dalvik-cache directory " << dalvik_cache;
return "";
}
}
return dalvik_cache;
}
+std::string GetDalvikCache(const char* subdir, const bool create_if_absent) {
+ return GetDalvikCacheImpl(subdir, create_if_absent, false);
+}
+
+std::string GetDalvikCacheOrDie(const char* subdir, const bool create_if_absent) {
+ return GetDalvikCacheImpl(subdir, create_if_absent, true);
+}
+
bool GetDalvikCacheFilename(const char* location, const char* cache_location,
std::string* filename, std::string* error_msg) {
if (location[0] != '/') {
diff --git a/runtime/utils.h b/runtime/utils.h
index e6a6b1d..6708c67 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -33,10 +33,10 @@
namespace art {
+class ArtField;
class DexFile;
namespace mirror {
-class ArtField;
class ArtMethod;
class Class;
class Object;
@@ -343,7 +343,7 @@
// Returns a human-readable signature for 'f'. Something like "a.b.C.f" or
// "int a.b.C.f" (depending on the value of 'with_type').
-std::string PrettyField(mirror::ArtField* f, bool with_type = true)
+std::string PrettyField(ArtField* f, bool with_type = true)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
std::string PrettyField(uint32_t field_idx, const DexFile& dex_file, bool with_type = true);
@@ -511,6 +511,9 @@
// Find $ANDROID_DATA, /data, or return nullptr.
const char* GetAndroidDataSafe(std::string* error_msg);
+// Returns the dalvik-cache location, with subdir appended. Returns the empty string if the cache
+// could not be found (or created).
+std::string GetDalvikCache(const char* subdir, bool create_if_absent = true);
// Returns the dalvik-cache location, or dies trying. subdir will be
// appended to the cache location.
std::string GetDalvikCacheOrDie(const char* subdir, bool create_if_absent = true);
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index 833427e..6ccbd13 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -16,6 +16,7 @@
#include "utils.h"
+#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "mirror/array.h"
#include "mirror/array-inl.h"
@@ -146,7 +147,7 @@
mirror::Class* java_lang_String = class_linker_->FindSystemClass(soa.Self(),
"Ljava/lang/String;");
- mirror::ArtField* f;
+ ArtField* f;
f = java_lang_String->FindDeclaredInstanceField("count", "I");
EXPECT_EQ("int java.lang.String.count", PrettyField(f));
EXPECT_EQ("java.lang.String.count", PrettyField(f, false));
@@ -366,6 +367,15 @@
GetDalvikCacheFilenameOrDie("/system/framework/boot.oat", "/foo").c_str());
}
+TEST_F(UtilsTest, GetDalvikCache) {
+ EXPECT_STREQ("", GetDalvikCache("should-not-exist123", false).c_str());
+
+ EXPECT_STREQ((android_data_ + "/dalvik-cache/.").c_str(), GetDalvikCache(".", false).c_str());
+ EXPECT_STREQ((android_data_ + "/dalvik-cache/should-not-be-there").c_str(),
+ GetDalvikCache("should-not-be-there", true).c_str());
+}
+
+
TEST_F(UtilsTest, GetSystemImageFilename) {
EXPECT_STREQ("/system/framework/arm/boot.art",
GetSystemImageFilename("/system/framework/boot.art", kArm).c_str());
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 9fc2658..065df05 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -18,6 +18,7 @@
#include <iostream>
+#include "art_field-inl.h"
#include "base/logging.h"
#include "base/mutex-inl.h"
#include "class_linker.h"
@@ -30,7 +31,6 @@
#include "indenter.h"
#include "intern_table.h"
#include "leb128.h"
-#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class.h"
#include "mirror/class-inl.h"
@@ -451,7 +451,7 @@
Verify();
}
-mirror::ArtField* MethodVerifier::FindAccessedFieldAtDexPc(mirror::ArtMethod* m,
+ArtField* MethodVerifier::FindAccessedFieldAtDexPc(mirror::ArtMethod* m,
uint32_t dex_pc) {
Thread* self = Thread::Current();
StackHandleScope<3> hs(self);
@@ -464,7 +464,7 @@
return verifier.FindAccessedFieldAtDexPc(dex_pc);
}
-mirror::ArtField* MethodVerifier::FindAccessedFieldAtDexPc(uint32_t dex_pc) {
+ArtField* MethodVerifier::FindAccessedFieldAtDexPc(uint32_t dex_pc) {
CHECK(code_item_ != nullptr); // This only makes sense for methods with code.
// Strictly speaking, we ought to be able to get away with doing a subset of the full method
@@ -3788,7 +3788,7 @@
}
}
-mirror::ArtField* MethodVerifier::GetStaticField(int field_idx) {
+ArtField* MethodVerifier::GetStaticField(int field_idx) {
const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
// Check access to class
const RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
@@ -3802,8 +3802,8 @@
return nullptr; // Can't resolve Class so no more to do here, will do checking at runtime.
}
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- mirror::ArtField* field = class_linker->ResolveFieldJLS(*dex_file_, field_idx, dex_cache_,
- class_loader_);
+ ArtField* field = class_linker->ResolveFieldJLS(*dex_file_, field_idx, dex_cache_,
+ class_loader_);
if (field == nullptr) {
VLOG(verifier) << "Unable to resolve static field " << field_idx << " ("
<< dex_file_->GetFieldName(field_id) << ") in "
@@ -3823,7 +3823,7 @@
return field;
}
-mirror::ArtField* MethodVerifier::GetInstanceField(const RegType& obj_type, int field_idx) {
+ArtField* MethodVerifier::GetInstanceField(const RegType& obj_type, int field_idx) {
const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
// Check access to class
const RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
@@ -3837,8 +3837,8 @@
return nullptr; // Can't resolve Class so no more to do here
}
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- mirror::ArtField* field = class_linker->ResolveFieldJLS(*dex_file_, field_idx, dex_cache_,
- class_loader_);
+ ArtField* field = class_linker->ResolveFieldJLS(*dex_file_, field_idx, dex_cache_,
+ class_loader_);
if (field == nullptr) {
VLOG(verifier) << "Unable to resolve instance field " << field_idx << " ("
<< dex_file_->GetFieldName(field_id) << ") in "
@@ -3894,7 +3894,7 @@
void MethodVerifier::VerifyISFieldAccess(const Instruction* inst, const RegType& insn_type,
bool is_primitive, bool is_static) {
uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
- mirror::ArtField* field;
+ ArtField* field;
if (is_static) {
field = GetStaticField(field_idx);
} else {
@@ -3914,12 +3914,8 @@
}
}
- mirror::Class* field_type_class;
- {
- StackHandleScope<1> hs(self_);
- HandleWrapper<mirror::ArtField> h_field(hs.NewHandleWrapper(&field));
- field_type_class = can_load_classes_ ? h_field->GetType<true>() : h_field->GetType<false>();
- }
+ mirror::Class* field_type_class =
+ can_load_classes_ ? field->GetType<true>() : field->GetType<false>();
if (field_type_class != nullptr) {
field_type = ®_types_.FromClass(field->GetTypeDescriptor(), field_type_class,
field_type_class->CannotBeAssignedFromOtherTypes());
@@ -3988,7 +3984,7 @@
}
}
-mirror::ArtField* MethodVerifier::GetQuickFieldAccess(const Instruction* inst,
+ArtField* MethodVerifier::GetQuickFieldAccess(const Instruction* inst,
RegisterLine* reg_line) {
DCHECK(IsInstructionIGetQuickOrIPutQuick(inst->Opcode())) << inst->Opcode();
const RegType& object_type = reg_line->GetRegisterType(this, inst->VRegB_22c());
@@ -3997,8 +3993,7 @@
return nullptr;
}
uint32_t field_offset = static_cast<uint32_t>(inst->VRegC_22c());
- mirror::ArtField* const f = mirror::ArtField::FindInstanceFieldWithOffset(object_type.GetClass(),
- field_offset);
+ ArtField* const f = ArtField::FindInstanceFieldWithOffset(object_type.GetClass(), field_offset);
DCHECK_EQ(f->GetOffset().Uint32Value(), field_offset);
if (f == nullptr) {
VLOG(verifier) << "Failed to find instance field at offset '" << field_offset
@@ -4012,7 +4007,7 @@
bool is_primitive) {
DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_);
- mirror::ArtField* field = GetQuickFieldAccess(inst, work_line_.get());
+ ArtField* field = GetQuickFieldAccess(inst, work_line_.get());
if (field == nullptr) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer field from " << inst->Name();
return;
@@ -4030,12 +4025,8 @@
// Get the field type.
const RegType* field_type;
{
- mirror::Class* field_type_class;
- {
- StackHandleScope<1> hs(Thread::Current());
- HandleWrapper<mirror::ArtField> h_field(hs.NewHandleWrapper(&field));
- field_type_class = can_load_classes_ ? h_field->GetType<true>() : h_field->GetType<false>();
- }
+ mirror::Class* field_type_class = can_load_classes_ ? field->GetType<true>() :
+ field->GetType<false>();
if (field_type_class != nullptr) {
field_type = ®_types_.FromClass(field->GetTypeDescriptor(), field_type_class,
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 8c0321e..cd414c2 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -191,7 +191,7 @@
// Returns the accessed field corresponding to the quick instruction's field
// offset at 'dex_pc' in method 'm'.
- static mirror::ArtField* FindAccessedFieldAtDexPc(mirror::ArtMethod* m, uint32_t dex_pc)
+ static ArtField* FindAccessedFieldAtDexPc(mirror::ArtMethod* m, uint32_t dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns the invoked method corresponding to the quick instruction's vtable
@@ -250,7 +250,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns the access field of a quick field access (iget/iput-quick) or nullptr
// if it cannot be found.
- mirror::ArtField* GetQuickFieldAccess(const Instruction* inst, RegisterLine* reg_line)
+ ArtField* GetQuickFieldAccess(const Instruction* inst, RegisterLine* reg_line)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Is the method being verified a constructor?
@@ -301,7 +301,7 @@
void FindLocksAtDexPc() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtField* FindAccessedFieldAtDexPc(uint32_t dex_pc)
+ ArtField* FindAccessedFieldAtDexPc(uint32_t dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::ArtMethod* FindInvokedMethodAtDexPc(uint32_t dex_pc)
@@ -525,11 +525,11 @@
bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Lookup instance field and fail for resolution violations
- mirror::ArtField* GetInstanceField(const RegType& obj_type, int field_idx)
+ ArtField* GetInstanceField(const RegType& obj_type, int field_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Lookup static field and fail for resolution violations
- mirror::ArtField* GetStaticField(int field_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtField* GetStaticField(int field_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an iget/sget/iput/sput instruction.
enum class FieldAccessType { // private
diff --git a/test/099-vmdebug/src/Main.java b/test/099-vmdebug/src/Main.java
index 4d781c3..a8db069 100644
--- a/test/099-vmdebug/src/Main.java
+++ b/test/099-vmdebug/src/Main.java
@@ -17,6 +17,7 @@
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Method;
+import java.util.Map;
public class Main {
public static void main(String[] args) throws Exception {
@@ -26,6 +27,7 @@
return;
}
testMethodTracing();
+ testRuntimeStat();
}
private static File createTempFile() throws Exception {
@@ -109,10 +111,108 @@
tempFile.delete();
}
+ private static void checkNumber(String s) throws Exception {
+ if (s == null) {
+ System.out.println("Got null string");
+ return;
+ }
+ long n = Long.valueOf(s);
+ if (n < 0) {
+ System.out.println("Got negative number " + n);
+ }
+ }
+
+ private static void checkHistogram(String s) throws Exception {
+ if (s == null || s.length() == 0) {
+ System.out.println("Got null or empty string");
+ return;
+ }
+ String[] buckets = s.split(",");
+ long last_key = 0;
+ for (int i = 0; i < buckets.length; ++i) {
+ String bucket = buckets[i];
+ if (bucket.length() == 0) {
+ System.out.println("Got empty bucket");
+ continue;
+ }
+ String[] kv = bucket.split(":");
+ if (kv.length != 2 || kv[0].length() == 0 || kv[1].length() == 0) {
+ System.out.println("Got bad bucket " + bucket);
+ continue;
+ }
+ long key = Long.valueOf(kv[0]);
+ long value = Long.valueOf(kv[1]);
+ if (key < 0 || value < 0) {
+ System.out.println("Got negative key or value " + bucket);
+ continue;
+ }
+ if (key < last_key) {
+ System.out.println("Got decreasing key " + bucket);
+ continue;
+ }
+ last_key = key;
+ }
+ }
+
+ private static void testRuntimeStat() throws Exception {
+ // Invoke at least one GC and wait for 20 seconds or so so we get at
+ // least one bucket in the histograms.
+ for (int i = 0; i < 20; ++i) {
+ Runtime.getRuntime().gc();
+ Thread.sleep(1000L);
+ }
+ String gc_count = VMDebug.getRuntimeStat("art.gc.gc-count");
+ String gc_time = VMDebug.getRuntimeStat("art.gc.gc-time");
+ String bytes_allocated = VMDebug.getRuntimeStat("art.gc.bytes-allocated");
+ String bytes_freed = VMDebug.getRuntimeStat("art.gc.bytes-freed");
+ String blocking_gc_count = VMDebug.getRuntimeStat("art.gc.blocking-gc-count");
+ String blocking_gc_time = VMDebug.getRuntimeStat("art.gc.blocking-gc-time");
+ String gc_count_rate_histogram = VMDebug.getRuntimeStat("art.gc.gc-count-rate-histogram");
+ String blocking_gc_count_rate_histogram =
+ VMDebug.getRuntimeStat("art.gc.blocking-gc-count-rate-histogram");
+ checkNumber(gc_count);
+ checkNumber(gc_time);
+ checkNumber(bytes_allocated);
+ checkNumber(bytes_freed);
+ checkNumber(blocking_gc_count);
+ checkNumber(blocking_gc_time);
+ checkHistogram(gc_count_rate_histogram);
+ checkHistogram(blocking_gc_count_rate_histogram);
+ }
+
+ private static void testRuntimeStats() throws Exception {
+ // Invoke at least one GC and wait for 20 seconds or so so we get at
+ // least one bucket in the histograms.
+ for (int i = 0; i < 20; ++i) {
+ Runtime.getRuntime().gc();
+ Thread.sleep(1000L);
+ }
+ Map<String, String> map = VMDebug.getRuntimeStats();
+ String gc_count = map.get("art.gc.gc-count");
+ String gc_time = map.get("art.gc.gc-time");
+ String bytes_allocated = map.get("art.gc.bytes-allocated");
+ String bytes_freed = map.get("art.gc.bytes-freed");
+ String blocking_gc_count = map.get("art.gc.blocking-gc-count");
+ String blocking_gc_time = map.get("art.gc.blocking-gc-time");
+ String gc_count_rate_histogram = map.get("art.gc.gc-count-rate-histogram");
+ String blocking_gc_count_rate_histogram =
+ map.get("art.gc.blocking-gc-count-rate-histogram");
+ checkNumber(gc_count);
+ checkNumber(gc_time);
+ checkNumber(bytes_allocated);
+ checkNumber(bytes_freed);
+ checkNumber(blocking_gc_count);
+ checkNumber(blocking_gc_time);
+ checkHistogram(gc_count_rate_histogram);
+ checkHistogram(blocking_gc_count_rate_histogram);
+ }
+
private static class VMDebug {
private static final Method startMethodTracingMethod;
private static final Method stopMethodTracingMethod;
private static final Method getMethodTracingModeMethod;
+ private static final Method getRuntimeStatMethod;
+ private static final Method getRuntimeStatsMethod;
static {
try {
Class c = Class.forName("dalvik.system.VMDebug");
@@ -120,6 +220,8 @@
Integer.TYPE, Integer.TYPE, Boolean.TYPE, Integer.TYPE);
stopMethodTracingMethod = c.getDeclaredMethod("stopMethodTracing");
getMethodTracingModeMethod = c.getDeclaredMethod("getMethodTracingMode");
+ getRuntimeStatMethod = c.getDeclaredMethod("getRuntimeStat", String.class);
+ getRuntimeStatsMethod = c.getDeclaredMethod("getRuntimeStats");
} catch (Exception e) {
throw new RuntimeException(e);
}
@@ -136,5 +238,11 @@
public static int getMethodTracingMode() throws Exception {
return (int) getMethodTracingModeMethod.invoke(null);
}
+ public static String getRuntimeStat(String statName) throws Exception {
+ return (String) getRuntimeStatMethod.invoke(null, statName);
+ }
+ public static Map<String, String> getRuntimeStats() throws Exception {
+ return (Map<String, String>) getRuntimeStatsMethod.invoke(null);
+ }
}
}
diff --git a/test/104-growth-limit/src/Main.java b/test/104-growth-limit/src/Main.java
index 55469db..d666377 100644
--- a/test/104-growth-limit/src/Main.java
+++ b/test/104-growth-limit/src/Main.java
@@ -21,8 +21,14 @@
public class Main {
public static void main(String[] args) throws Exception {
-
int alloc1 = 1;
+ // Setup reflection stuff before allocating to prevent OOME caused by allocations from
+ // Class.forName or getDeclaredMethod.
+ // Reflective equivalent of: dalvik.system.VMRuntime.getRuntime().clearGrowthLimit();
+ final Class<?> vm_runtime = Class.forName("dalvik.system.VMRuntime");
+ final Method get_runtime = vm_runtime.getDeclaredMethod("getRuntime");
+ final Object runtime = get_runtime.invoke(null);
+ final Method clear_growth_limit = vm_runtime.getDeclaredMethod("clearGrowthLimit");
try {
List<byte[]> l = new ArrayList<byte[]>();
while (true) {
@@ -33,13 +39,7 @@
} catch (OutOfMemoryError e) {
}
// Expand the heap to the maximum size.
- // Reflective equivalent of: dalvik.system.VMRuntime.getRuntime().clearGrowthLimit();
- Class<?> vm_runtime = Class.forName("dalvik.system.VMRuntime");
- Method get_runtime = vm_runtime.getDeclaredMethod("getRuntime");
- Object runtime = get_runtime.invoke(null);
- Method clear_growth_limit = vm_runtime.getDeclaredMethod("clearGrowthLimit");
clear_growth_limit.invoke(runtime);
-
int alloc2 = 1;
try {
List<byte[]> l = new ArrayList<byte[]>();
diff --git a/test/458-checker-instruction-simplification/src/Main.java b/test/458-checker-instruction-simplification/src/Main.java
index 1f0017e..3cbcebb 100644
--- a/test/458-checker-instruction-simplification/src/Main.java
+++ b/test/458-checker-instruction-simplification/src/Main.java
@@ -309,6 +309,457 @@
return arg ^ -1;
}
+ /**
+ * Test that addition or subtraction operation with both inputs negated are
+ * optimized to use a single negation after the operation.
+ * The transformation tested is implemented in
+ * `InstructionSimplifierVisitor::TryMoveNegOnInputsAfterBinop`.
+ */
+
+ // CHECK-START: int Main.AddNegs1(int, int) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg1:i\d+]] ParameterValue
+ // CHECK-DAG: [[Arg2:i\d+]] ParameterValue
+ // CHECK-DAG: [[Neg1:i\d+]] Neg [ [[Arg1]] ]
+ // CHECK-DAG: [[Neg2:i\d+]] Neg [ [[Arg2]] ]
+ // CHECK-DAG: [[Add:i\d+]] Add [ [[Neg1]] [[Neg2]] ]
+ // CHECK-DAG: Return [ [[Add]] ]
+
+ // CHECK-START: int Main.AddNegs1(int, int) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg1:i\d+]] ParameterValue
+ // CHECK-DAG: [[Arg2:i\d+]] ParameterValue
+ // CHECK-NOT: Neg
+ // CHECK-DAG: [[Add:i\d+]] Add [ [[Arg1]] [[Arg2]] ]
+ // CHECK-DAG: [[Neg:i\d+]] Neg [ [[Add]] ]
+ // CHECK-DAG: Return [ [[Neg]] ]
+
+ public static int AddNegs1(int arg1, int arg2) {
+ return -arg1 + -arg2;
+ }
+
+ /**
+ * This is similar to the test-case AddNegs1, but the negations have
+ * multiple uses.
+ * The transformation tested is implemented in
+ * `InstructionSimplifierVisitor::TryMoveNegOnInputsAfterBinop`.
+ * The current code won't perform the previous optimization. The
+ * transformations do not look at other uses of their inputs. As they don't
+ * know what will happen with other uses, they do not take the risk of
+ * increasing the register pressure by creating or extending live ranges.
+ */
+
+ // CHECK-START: int Main.AddNegs2(int, int) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg1:i\d+]] ParameterValue
+ // CHECK-DAG: [[Arg2:i\d+]] ParameterValue
+ // CHECK-DAG: [[Neg1:i\d+]] Neg [ [[Arg1]] ]
+ // CHECK-DAG: [[Neg2:i\d+]] Neg [ [[Arg2]] ]
+ // CHECK-DAG: [[Add1:i\d+]] Add [ [[Neg1]] [[Neg2]] ]
+ // CHECK-DAG: [[Add2:i\d+]] Add [ [[Neg1]] [[Neg2]] ]
+ // CHECK-DAG: [[Or:i\d+]] Or [ [[Add1]] [[Add2]] ]
+ // CHECK-DAG: Return [ [[Or]] ]
+
+ // CHECK-START: int Main.AddNegs2(int, int) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg1:i\d+]] ParameterValue
+ // CHECK-DAG: [[Arg2:i\d+]] ParameterValue
+ // CHECK-DAG: [[Neg1:i\d+]] Neg [ [[Arg1]] ]
+ // CHECK-DAG: [[Neg2:i\d+]] Neg [ [[Arg2]] ]
+ // CHECK-DAG: [[Add1:i\d+]] Add [ [[Neg1]] [[Neg2]] ]
+ // CHECK-DAG: [[Add2:i\d+]] Add [ [[Neg1]] [[Neg2]] ]
+ // CHECK-NOT: Neg
+ // CHECK-DAG: [[Or:i\d+]] Or [ [[Add1]] [[Add2]] ]
+ // CHECK-DAG: Return [ [[Or]] ]
+
+ public static int AddNegs2(int arg1, int arg2) {
+ int temp1 = -arg1;
+ int temp2 = -arg2;
+ return (temp1 + temp2) | (temp1 + temp2);
+ }
+
+ /**
+ * This follows test-cases AddNegs1 and AddNegs2.
+ * The transformation tested is implemented in
+ * `InstructionSimplifierVisitor::TryMoveNegOnInputsAfterBinop`.
+ * The optimization should not happen if it moves an additional instruction in
+ * the loop.
+ */
+
+ // CHECK-START: long Main.AddNegs3(long, long) instruction_simplifier (before)
+ // -------------- Arguments and initial negation operations.
+ // CHECK-DAG: [[Arg1:j\d+]] ParameterValue
+ // CHECK-DAG: [[Arg2:j\d+]] ParameterValue
+ // CHECK-DAG: [[Neg1:j\d+]] Neg [ [[Arg1]] ]
+ // CHECK-DAG: [[Neg2:j\d+]] Neg [ [[Arg2]] ]
+ // CHECK: Goto
+ // -------------- Loop
+ // CHECK: SuspendCheck
+ // CHECK: [[Add:j\d+]] Add [ [[Neg1]] [[Neg2]] ]
+ // CHECK: Goto
+
+ // CHECK-START: long Main.AddNegs3(long, long) instruction_simplifier (after)
+ // -------------- Arguments and initial negation operations.
+ // CHECK-DAG: [[Arg1:j\d+]] ParameterValue
+ // CHECK-DAG: [[Arg2:j\d+]] ParameterValue
+ // CHECK-DAG: [[Neg1:j\d+]] Neg [ [[Arg1]] ]
+ // CHECK-DAG: [[Neg2:j\d+]] Neg [ [[Arg2]] ]
+ // CHECK: Goto
+ // -------------- Loop
+ // CHECK: SuspendCheck
+ // CHECK: [[Add:j\d+]] Add [ [[Neg1]] [[Neg2]] ]
+ // CHECK-NOT: Neg
+ // CHECK: Goto
+
+ public static long AddNegs3(long arg1, long arg2) {
+ long res = 0;
+ long n_arg1 = -arg1;
+ long n_arg2 = -arg2;
+ for (long i = 0; i < 1; i++) {
+ res += n_arg1 + n_arg2 + i;
+ }
+ return res;
+ }
+
+ /**
+ * Test the simplification of an addition with a negated argument into a
+ * subtraction.
+ * The transformation tested is implemented in `InstructionSimplifierVisitor::VisitAdd`.
+ */
+
+ // CHECK-START: long Main.AddNeg1(long, long) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg1:j\d+]] ParameterValue
+ // CHECK-DAG: [[Arg2:j\d+]] ParameterValue
+ // CHECK-DAG: [[Neg:j\d+]] Neg [ [[Arg1]] ]
+ // CHECK-DAG: [[Add:j\d+]] Add [ [[Neg]] [[Arg2]] ]
+ // CHECK-DAG: Return [ [[Add]] ]
+
+ // CHECK-START: long Main.AddNeg1(long, long) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg1:j\d+]] ParameterValue
+ // CHECK-DAG: [[Arg2:j\d+]] ParameterValue
+ // CHECK-DAG: [[Sub:j\d+]] Sub [ [[Arg2]] [[Arg1]] ]
+ // CHECK-DAG: Return [ [[Sub]] ]
+
+ // CHECK-START: long Main.AddNeg1(long, long) instruction_simplifier (after)
+ // CHECK-NOT: Neg
+ // CHECK-NOT: Add
+
+ public static long AddNeg1(long arg1, long arg2) {
+ return -arg1 + arg2;
+ }
+
+ /**
+ * This is similar to the test-case AddNeg1, but the negation has two uses.
+ * The transformation tested is implemented in `InstructionSimplifierVisitor::VisitAdd`.
+ * The current code won't perform the previous optimization. The
+ * transformations do not look at other uses of their inputs. As they don't
+ * know what will happen with other uses, they do not take the risk of
+ * increasing the register pressure by creating or extending live ranges.
+ */
+
+ // CHECK-START: long Main.AddNeg2(long, long) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg1:j\d+]] ParameterValue
+ // CHECK-DAG: [[Arg2:j\d+]] ParameterValue
+ // CHECK-DAG: [[Neg:j\d+]] Neg [ [[Arg2]] ]
+ // CHECK-DAG: [[Add1:j\d+]] Add [ [[Arg1]] [[Neg]] ]
+ // CHECK-DAG: [[Add2:j\d+]] Add [ [[Arg1]] [[Neg]] ]
+ // CHECK-DAG: [[Res:j\d+]] Or [ [[Add1]] [[Add2]] ]
+ // CHECK-DAG: Return [ [[Res]] ]
+
+ // CHECK-START: long Main.AddNeg2(long, long) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg1:j\d+]] ParameterValue
+ // CHECK-DAG: [[Arg2:j\d+]] ParameterValue
+ // CHECK-DAG: [[Neg:j\d+]] Neg [ [[Arg2]] ]
+ // CHECK-DAG: [[Add1:j\d+]] Add [ [[Arg1]] [[Neg]] ]
+ // CHECK-DAG: [[Add2:j\d+]] Add [ [[Arg1]] [[Neg]] ]
+ // CHECK-DAG: [[Res:j\d+]] Or [ [[Add1]] [[Add2]] ]
+ // CHECK-DAG: Return [ [[Res]] ]
+
+ // CHECK-START: long Main.AddNeg2(long, long) instruction_simplifier (after)
+ // CHECK-NOT: Sub
+
+ public static long AddNeg2(long arg1, long arg2) {
+ long temp = -arg2;
+ return (arg1 + temp) | (arg1 + temp);
+ }
+
+ /**
+ * Test simplification of the `-(-var)` pattern.
+ * The transformation tested is implemented in `InstructionSimplifierVisitor::VisitNeg`.
+ */
+
+ // CHECK-START: long Main.NegNeg1(long) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-DAG: [[Neg1:j\d+]] Neg [ [[Arg]] ]
+ // CHECK-DAG: [[Neg2:j\d+]] Neg [ [[Neg1]] ]
+ // CHECK-DAG: Return [ [[Neg2]] ]
+
+ // CHECK-START: long Main.NegNeg1(long) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-DAG: Return [ [[Arg]] ]
+
+ // CHECK-START: long Main.NegNeg1(long) instruction_simplifier (after)
+ // CHECK-NOT: Neg
+
+ public static long NegNeg1(long arg) {
+ return -(-arg);
+ }
+
+ /**
+ * Test 'multi-step' simplification, where a first transformation yields a
+ * new simplification possibility for the current instruction.
+ * The transformations tested are implemented in `InstructionSimplifierVisitor::VisitNeg`
+ * and in `InstructionSimplifierVisitor::VisitAdd`.
+ */
+
+ // CHECK-START: int Main.NegNeg2(int) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[Neg1:i\d+]] Neg [ [[Arg]] ]
+ // CHECK-DAG: [[Neg2:i\d+]] Neg [ [[Neg1]] ]
+ // CHECK-DAG: [[Add:i\d+]] Add [ [[Neg1]] [[Neg2]] ]
+ // CHECK-DAG: Return [ [[Add]] ]
+
+ // CHECK-START: int Main.NegNeg2(int) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[Sub:i\d+]] Sub [ [[Arg]] [[Arg]] ]
+ // CHECK-DAG: Return [ [[Sub]] ]
+
+ // CHECK-START: int Main.NegNeg2(int) instruction_simplifier (after)
+ // CHECK-NOT: Neg
+ // CHECK-NOT: Add
+
+ public static int NegNeg2(int arg) {
+ int temp = -arg;
+ return temp + -temp;
+ }
+
+ /**
+ * Test another 'multi-step' simplification, where a first transformation
+ * yields a new simplification possibility for the current instruction.
+ * The transformations tested are implemented in `InstructionSimplifierVisitor::VisitNeg`
+ * and in `InstructionSimplifierVisitor::VisitSub`.
+ */
+
+ // CHECK-START: long Main.NegNeg3(long) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-DAG: [[Const0:j\d+]] LongConstant 0
+ // CHECK-DAG: [[Neg:j\d+]] Neg [ [[Arg]] ]
+ // CHECK-DAG: [[Sub:j\d+]] Sub [ [[Const0]] [[Neg]] ]
+ // CHECK-DAG: Return [ [[Sub]] ]
+
+ // CHECK-START: long Main.NegNeg3(long) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-DAG: Return [ [[Arg]] ]
+
+ // CHECK-START: long Main.NegNeg3(long) instruction_simplifier (after)
+ // CHECK-NOT: Neg
+ // CHECK-NOT: Sub
+
+ public static long NegNeg3(long arg) {
+ return 0 - -arg;
+ }
+
+ /**
+ * Test that a negated subtraction is simplified to a subtraction with its
+ * arguments reversed.
+ * The transformation tested is implemented in `InstructionSimplifierVisitor::VisitNeg`.
+ */
+
+ // CHECK-START: int Main.NegSub1(int, int) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg1:i\d+]] ParameterValue
+ // CHECK-DAG: [[Arg2:i\d+]] ParameterValue
+ // CHECK-DAG: [[Sub:i\d+]] Sub [ [[Arg1]] [[Arg2]] ]
+ // CHECK-DAG: [[Neg:i\d+]] Neg [ [[Sub]] ]
+ // CHECK-DAG: Return [ [[Neg]] ]
+
+ // CHECK-START: int Main.NegSub1(int, int) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg1:i\d+]] ParameterValue
+ // CHECK-DAG: [[Arg2:i\d+]] ParameterValue
+ // CHECK-DAG: [[Sub:i\d+]] Sub [ [[Arg2]] [[Arg1]] ]
+ // CHECK-DAG: Return [ [[Sub]] ]
+
+ // CHECK-START: int Main.NegSub1(int, int) instruction_simplifier (after)
+ // CHECK-NOT: Neg
+
+ public static int NegSub1(int arg1, int arg2) {
+ return -(arg1 - arg2);
+ }
+
+ /**
+ * This is similar to the test-case NegSub1, but the subtraction has
+ * multiple uses.
+ * The transformation tested is implemented in `InstructionSimplifierVisitor::VisitNeg`.
+ * The current code won't perform the previous optimization. The
+ * transformations do not look at other uses of their inputs. As they don't
+ * know what will happen with other uses, they do not take the risk of
+ * increasing the register pressure by creating or extending live ranges.
+ */
+
+ // CHECK-START: int Main.NegSub2(int, int) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg1:i\d+]] ParameterValue
+ // CHECK-DAG: [[Arg2:i\d+]] ParameterValue
+ // CHECK-DAG: [[Sub:i\d+]] Sub [ [[Arg1]] [[Arg2]] ]
+ // CHECK-DAG: [[Neg1:i\d+]] Neg [ [[Sub]] ]
+ // CHECK-DAG: [[Neg2:i\d+]] Neg [ [[Sub]] ]
+ // CHECK-DAG: [[Or:i\d+]] Or [ [[Neg1]] [[Neg2]] ]
+ // CHECK-DAG: Return [ [[Or]] ]
+
+ // CHECK-START: int Main.NegSub2(int, int) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg1:i\d+]] ParameterValue
+ // CHECK-DAG: [[Arg2:i\d+]] ParameterValue
+ // CHECK-DAG: [[Sub:i\d+]] Sub [ [[Arg1]] [[Arg2]] ]
+ // CHECK-DAG: [[Neg1:i\d+]] Neg [ [[Sub]] ]
+ // CHECK-DAG: [[Neg2:i\d+]] Neg [ [[Sub]] ]
+ // CHECK-DAG: [[Or:i\d+]] Or [ [[Neg1]] [[Neg2]] ]
+ // CHECK-DAG: Return [ [[Or]] ]
+
+ public static int NegSub2(int arg1, int arg2) {
+ int temp = arg1 - arg2;
+ return -temp | -temp;
+ }
+
+ /**
+ * Test simplification of the `~~var` pattern.
+ * The transformation tested is implemented in `InstructionSimplifierVisitor::VisitNot`.
+ */
+
+ // CHECK-START: long Main.NotNot1(long) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-DAG: [[ConstF1:j\d+]] LongConstant -1
+ // CHECK-DAG: [[Xor1:j\d+]] Xor [ [[Arg]] [[ConstF1]] ]
+ // CHECK-DAG: [[Xor2:j\d+]] Xor [ [[Xor1]] [[ConstF1]] ]
+ // CHECK-DAG: Return [ [[Xor2]] ]
+
+ // CHECK-START: long Main.NotNot1(long) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:j\d+]] ParameterValue
+ // CHECK-DAG: Return [ [[Arg]] ]
+
+ // CHECK-START: long Main.NotNot1(long) instruction_simplifier (after)
+ // CHECK-NOT: Xor
+
+ public static long NotNot1(long arg) {
+ return ~~arg;
+ }
+
+ // CHECK-START: int Main.NotNot2(int) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[ConstF1:i\d+]] IntConstant -1
+ // CHECK-DAG: [[Xor1:i\d+]] Xor [ [[Arg]] [[ConstF1]] ]
+ // CHECK-DAG: [[Xor2:i\d+]] Xor [ [[Xor1]] [[ConstF1]] ]
+ // CHECK-DAG: [[Add:i\d+]] Add [ [[Xor1]] [[Xor2]] ]
+ // CHECK-DAG: Return [ [[Add]] ]
+
+ // CHECK-START: int Main.NotNot2(int) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg:i\d+]] ParameterValue
+ // CHECK-DAG: [[Not:i\d+]] Not [ [[Arg]] ]
+ // CHECK-DAG: [[Add:i\d+]] Add [ [[Not]] [[Arg]] ]
+ // CHECK-DAG: Return [ [[Add]] ]
+
+ // CHECK-START: int Main.NotNot2(int) instruction_simplifier (after)
+ // CHECK-NOT: Xor
+
+ public static int NotNot2(int arg) {
+ int temp = ~arg;
+ return temp + ~temp;
+ }
+
+ /**
+ * Test the simplification of a subtraction with a negated argument.
+ * The transformation tested is implemented in `InstructionSimplifierVisitor::VisitSub`.
+ */
+
+ // CHECK-START: int Main.SubNeg1(int, int) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg1:i\d+]] ParameterValue
+ // CHECK-DAG: [[Arg2:i\d+]] ParameterValue
+ // CHECK-DAG: [[Neg:i\d+]] Neg [ [[Arg1]] ]
+ // CHECK-DAG: [[Sub:i\d+]] Sub [ [[Neg]] [[Arg2]] ]
+ // CHECK-DAG: Return [ [[Sub]] ]
+
+ // CHECK-START: int Main.SubNeg1(int, int) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg1:i\d+]] ParameterValue
+ // CHECK-DAG: [[Arg2:i\d+]] ParameterValue
+ // CHECK-DAG: [[Add:i\d+]] Add [ [[Arg1]] [[Arg2]] ]
+ // CHECK-DAG: [[Neg:i\d+]] Neg [ [[Add]] ]
+ // CHECK-DAG: Return [ [[Neg]] ]
+
+ // CHECK-START: int Main.SubNeg1(int, int) instruction_simplifier (after)
+ // CHECK-NOT: Sub
+
+ public static int SubNeg1(int arg1, int arg2) {
+ return -arg1 - arg2;
+ }
+
+ /**
+ * This is similar to the test-case SubNeg1, but the negation has
+ * multiple uses.
+ * The transformation tested is implemented in `InstructionSimplifierVisitor::VisitSub`.
+ * The current code won't perform the previous optimization. The
+ * transformations do not look at other uses of their inputs. As they don't
+ * know what will happen with other uses, they do not take the risk of
+ * increasing the register pressure by creating or extending live ranges.
+ */
+
+ // CHECK-START: int Main.SubNeg2(int, int) instruction_simplifier (before)
+ // CHECK-DAG: [[Arg1:i\d+]] ParameterValue
+ // CHECK-DAG: [[Arg2:i\d+]] ParameterValue
+ // CHECK-DAG: [[Neg:i\d+]] Neg [ [[Arg1]] ]
+ // CHECK-DAG: [[Sub1:i\d+]] Sub [ [[Neg]] [[Arg2]] ]
+ // CHECK-DAG: [[Sub2:i\d+]] Sub [ [[Neg]] [[Arg2]] ]
+ // CHECK-DAG: [[Or:i\d+]] Or [ [[Sub1]] [[Sub2]] ]
+ // CHECK-DAG: Return [ [[Or]] ]
+
+ // CHECK-START: int Main.SubNeg2(int, int) instruction_simplifier (after)
+ // CHECK-DAG: [[Arg1:i\d+]] ParameterValue
+ // CHECK-DAG: [[Arg2:i\d+]] ParameterValue
+ // CHECK-DAG: [[Neg:i\d+]] Neg [ [[Arg1]] ]
+ // CHECK-DAG: [[Sub1:i\d+]] Sub [ [[Neg]] [[Arg2]] ]
+ // CHECK-DAG: [[Sub2:i\d+]] Sub [ [[Neg]] [[Arg2]] ]
+ // CHECK-DAG: [[Or:i\d+]] Or [ [[Sub1]] [[Sub2]] ]
+ // CHECK-DAG: Return [ [[Or]] ]
+
+ // CHECK-START: int Main.SubNeg2(int, int) instruction_simplifier (after)
+ // CHECK-NOT: Add
+
+ public static int SubNeg2(int arg1, int arg2) {
+ int temp = -arg1;
+ return (temp - arg2) | (temp - arg2);
+ }
+
+ /**
+ * This follows test-cases SubNeg1 and SubNeg2.
+ * The transformation tested is implemented in `InstructionSimplifierVisitor::VisitSub`.
+ * The optimization should not happen if it moves an additional instruction in
+ * the loop.
+ */
+
+ // CHECK-START: long Main.SubNeg3(long, long) instruction_simplifier (before)
+ // -------------- Arguments and initial negation operation.
+ // CHECK-DAG: [[Arg1:j\d+]] ParameterValue
+ // CHECK-DAG: [[Arg2:j\d+]] ParameterValue
+ // CHECK-DAG: [[Neg:j\d+]] Neg [ [[Arg1]] ]
+ // CHECK: Goto
+ // -------------- Loop
+ // CHECK: SuspendCheck
+ // CHECK: [[Sub:j\d+]] Sub [ [[Neg]] [[Arg2]] ]
+ // CHECK: Goto
+
+ // CHECK-START: long Main.SubNeg3(long, long) instruction_simplifier (after)
+ // -------------- Arguments and initial negation operation.
+ // CHECK-DAG: [[Arg1:j\d+]] ParameterValue
+ // CHECK-DAG: [[Arg2:j\d+]] ParameterValue
+ // CHECK-DAG: [[Neg:j\d+]] Neg [ [[Arg1]] ]
+ // CHECK-DAG: Goto
+ // -------------- Loop
+ // CHECK: SuspendCheck
+ // CHECK: [[Sub:j\d+]] Sub [ [[Neg]] [[Arg2]] ]
+ // CHECK-NOT: Neg
+ // CHECK: Goto
+
+ public static long SubNeg3(long arg1, long arg2) {
+ long res = 0;
+ long temp = -arg1;
+ for (long i = 0; i < 1; i++) {
+ res += temp - arg2 - i;
+ }
+ return res;
+ }
+
public static void main(String[] args) {
int arg = 123456;
@@ -328,5 +779,20 @@
assertLongEquals(UShr0(arg), arg);
assertIntEquals(Xor0(arg), arg);
assertIntEquals(XorAllOnes(arg), ~arg);
+ assertIntEquals(AddNegs1(arg, arg + 1), -(arg + arg + 1));
+ assertIntEquals(AddNegs2(arg, arg + 1), -(arg + arg + 1));
+ assertLongEquals(AddNegs3(arg, arg + 1), -(2 * arg + 1));
+ assertLongEquals(AddNeg1(arg, arg + 1), 1);
+ assertLongEquals(AddNeg2(arg, arg + 1), -1);
+ assertLongEquals(NegNeg1(arg), arg);
+ assertIntEquals(NegNeg2(arg), 0);
+ assertLongEquals(NegNeg3(arg), arg);
+ assertIntEquals(NegSub1(arg, arg + 1), 1);
+ assertIntEquals(NegSub2(arg, arg + 1), 1);
+ assertLongEquals(NotNot1(arg), arg);
+ assertIntEquals(NotNot2(arg), -1);
+ assertIntEquals(SubNeg1(arg, arg + 1), -(arg + arg + 1));
+ assertIntEquals(SubNeg2(arg, arg + 1), -(arg + arg + 1));
+ assertLongEquals(SubNeg3(arg, arg + 1), -(2 * arg + 1));
}
}
diff --git a/test/468-bool-simplifier-regression/expected.txt b/test/468-checker-bool-simplifier-regression/expected.txt
similarity index 100%
rename from test/468-bool-simplifier-regression/expected.txt
rename to test/468-checker-bool-simplifier-regression/expected.txt
diff --git a/test/468-bool-simplifier-regression/info.txt b/test/468-checker-bool-simplifier-regression/info.txt
similarity index 100%
rename from test/468-bool-simplifier-regression/info.txt
rename to test/468-checker-bool-simplifier-regression/info.txt
diff --git a/test/468-bool-simplifier-regression/smali/TestCase.smali b/test/468-checker-bool-simplifier-regression/smali/TestCase.smali
similarity index 100%
rename from test/468-bool-simplifier-regression/smali/TestCase.smali
rename to test/468-checker-bool-simplifier-regression/smali/TestCase.smali
diff --git a/test/468-bool-simplifier-regression/src/Main.java b/test/468-checker-bool-simplifier-regression/src/Main.java
similarity index 64%
rename from test/468-bool-simplifier-regression/src/Main.java
rename to test/468-checker-bool-simplifier-regression/src/Main.java
index 1dd27c9..65f20b3 100644
--- a/test/468-bool-simplifier-regression/src/Main.java
+++ b/test/468-checker-bool-simplifier-regression/src/Main.java
@@ -17,6 +17,20 @@
import java.lang.reflect.*;
public class Main {
+
+ // CHECK-START: boolean TestCase.testCase() boolean_simplifier (before)
+ // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
+ // CHECK-DAG: [[Value:z\d+]] StaticFieldGet
+ // CHECK-DAG: If [ [[Value]] ]
+ // CHECK-DAG: [[Phi:i\d+]] Phi [ [[Const1]] [[Const0]] ]
+ // CHECK-DAG: Return [ [[Phi]] ]
+
+ // CHECK-START: boolean TestCase.testCase() boolean_simplifier (after)
+ // CHECK-DAG: [[Value:z\d+]] StaticFieldGet
+ // CHECK-DAG: [[Not:z\d+]] BooleanNot [ [[Value]] ]
+ // CHECK-DAG: Return [ [[Not]] ]
+
public static boolean runTest(boolean input) throws Exception {
Class<?> c = Class.forName("TestCase");
Method m = c.getMethod("testCase");
diff --git a/test/472-type-propagation/expected.txt b/test/472-type-propagation/expected.txt
new file mode 100644
index 0000000..0b29bb1
--- /dev/null
+++ b/test/472-type-propagation/expected.txt
@@ -0,0 +1,2 @@
+4.3
+1.2
diff --git a/test/472-type-propagation/info.txt b/test/472-type-propagation/info.txt
new file mode 100644
index 0000000..b86e5a2
--- /dev/null
+++ b/test/472-type-propagation/info.txt
@@ -0,0 +1,3 @@
+Regression test for optimizing's type propagation:
+If a phi requests its inputs to be of a certain type, the inputs need
+to propagate that type to their users, as those users might be phis.
diff --git a/test/472-type-propagation/src/Main.java b/test/472-type-propagation/src/Main.java
new file mode 100644
index 0000000..f9e302f
--- /dev/null
+++ b/test/472-type-propagation/src/Main.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Main {
+ public static void main(String[] args) {
+ ssaBuilderDouble(new double[] { 1.2, 4.3, 5.2 });
+ ssaBuilderDouble(new double[] { 1.2, 4.3, 5.2, 6.8 });
+ }
+
+ public static void ssaBuilderDouble(double[] array) {
+ double x;
+ if (array.length > 3) {
+ x = array[0];
+ } else {
+ x = array[1];
+ }
+ array[2] = x;
+ System.out.println(x);
+ }
+}
diff --git a/test/473-remove-dead-block/expected.txt b/test/473-remove-dead-block/expected.txt
new file mode 100644
index 0000000..c09201e
--- /dev/null
+++ b/test/473-remove-dead-block/expected.txt
@@ -0,0 +1 @@
+123368133
diff --git a/test/473-remove-dead-block/info.txt b/test/473-remove-dead-block/info.txt
new file mode 100644
index 0000000..81de4e6
--- /dev/null
+++ b/test/473-remove-dead-block/info.txt
@@ -0,0 +1,3 @@
+Regression test for optimizing's dead block removing:
+Removing from predecessors require remove successor otherwise
+CFG remains in an unexpected shape causing further crash of compiler.
diff --git a/test/473-remove-dead-block/src/Main.java b/test/473-remove-dead-block/src/Main.java
new file mode 100644
index 0000000..cca2976
--- /dev/null
+++ b/test/473-remove-dead-block/src/Main.java
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Main {
+ public static void main(String[] args) {
+ System.out.println(test(false, 5));
+ }
+
+ public static int test(boolean b, int i1) {
+ int j=4;
+ int s1=26294;
+
+ for (int i = 25; i > 1; --i) {
+ if (b) continue;
+ // javac/dx will remove the catch information, but
+ // keep the catch code around. The optimizing compiler
+ // used to crash in the presence of dead blocks like the
+ // code in catch.
+ try {
+ i1 = i1 * 26295 + (s1 / 26295);
+ } catch (Throwable exc2) {
+ for (j = 1; j < 39; ++j) {
+ j++;
+ }
+ }
+ }
+ return i1;
+ }
+}
diff --git a/test/MyClassNatives/MyClassNatives.java b/test/MyClassNatives/MyClassNatives.java
index fab153b..8b4a9a4 100644
--- a/test/MyClassNatives/MyClassNatives.java
+++ b/test/MyClassNatives/MyClassNatives.java
@@ -80,6 +80,7 @@
Object o248, Object o249, Object o250, Object o251, Object o252, Object o253);
native void withoutImplementation();
+ native Object withoutImplementationRefReturn();
native static void stackArgsIntsFirst(int i1, int i2, int i3, int i4, int i5, int i6, int i7,
int i8, int i9, int i10, float f1, float f2, float f3, float f4, float f5, float f6,
diff --git a/tools/analyze-init-failures.py b/tools/analyze-init-failures.py
index cca05e1..60c7dc5 100755
--- a/tools/analyze-init-failures.py
+++ b/tools/analyze-init-failures.py
@@ -25,7 +25,7 @@
_CLASS_RE = re.compile(r'^L(.*);$')
-_ERROR_LINE_RE = re.compile(r'^java.lang.InternalError: (.*)')
+_ERROR_LINE_RE = re.compile(r'^dalvik.system.TransactionAbortError: (.*)')
_STACK_LINE_RE = re.compile(r'^\s*at\s[^\s]*\s([^\s]*)')
def Confused(filename, line_number, line):
diff --git a/tools/stream-trace-converter.py b/tools/stream-trace-converter.py
new file mode 100755
index 0000000..951b05b
--- /dev/null
+++ b/tools/stream-trace-converter.py
@@ -0,0 +1,186 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Script that parses a trace filed produced in streaming mode. The file is broken up into
+ a header and body part, which, when concatenated, make up a non-streaming trace file that
+ can be used with traceview."""
+
+import sys
+
+class MyException(Exception):
+ pass
+
+class BufferUnderrun(Exception):
+ pass
+
+def ReadShortLE(f):
+ byte1 = f.read(1)
+ if not byte1:
+ raise BufferUnderrun()
+ byte2 = f.read(1)
+ if not byte2:
+ raise BufferUnderrun()
+ return ord(byte1) + (ord(byte2) << 8);
+
+def WriteShortLE(f, val):
+ bytes = [ (val & 0xFF), ((val >> 8) & 0xFF) ]
+ asbytearray = bytearray(bytes)
+ f.write(asbytearray)
+
+def ReadIntLE(f):
+ byte1 = f.read(1)
+ if not byte1:
+ raise BufferUnderrun()
+ byte2 = f.read(1)
+ if not byte2:
+ raise BufferUnderrun()
+ byte3 = f.read(1)
+ if not byte3:
+ raise BufferUnderrun()
+ byte4 = f.read(1)
+ if not byte4:
+ raise BufferUnderrun()
+ return ord(byte1) + (ord(byte2) << 8) + (ord(byte3) << 16) + (ord(byte4) << 24);
+
+def WriteIntLE(f, val):
+ bytes = [ (val & 0xFF), ((val >> 8) & 0xFF), ((val >> 16) & 0xFF), ((val >> 24) & 0xFF) ]
+ asbytearray = bytearray(bytes)
+ f.write(asbytearray)
+
+def Copy(input, output, length):
+ buf = input.read(length)
+ if len(buf) != length:
+ raise BufferUnderrun()
+ output.write(buf)
+
+class Rewriter:
+
+ def PrintHeader(self, header):
+ header.write('*version\n');
+ header.write('3\n');
+ header.write('data-file-overflow=false\n');
+ header.write('clock=dual\n');
+ header.write('vm=art\n');
+
+ def ProcessDataHeader(self, input, body):
+ magic = ReadIntLE(input)
+ if magic != 0x574f4c53:
+ raise MyException("Magic wrong")
+
+ WriteIntLE(body, magic)
+
+ version = ReadShortLE(input)
+ if (version & 0xf0) != 0xf0:
+ raise MyException("Does not seem to be a streaming trace: %d." % version)
+ version = version ^ 0xf0
+
+ if version != 3:
+ raise MyException("Only support version 3")
+
+ WriteShortLE(body, version)
+
+ # read offset
+ offsetToData = ReadShortLE(input) - 16
+ WriteShortLE(body, offsetToData + 16)
+
+ # copy startWhen
+ Copy(input, body, 8)
+
+ if version == 1:
+ self._mRecordSize = 9;
+ elif version == 2:
+ self._mRecordSize = 10;
+ else:
+ self._mRecordSize = ReadShortLE(input)
+ WriteShortLE(body, self._mRecordSize)
+ offsetToData -= 2;
+
+ # Skip over offsetToData bytes
+ Copy(input, body, offsetToData)
+
+ def ProcessMethod(self, input):
+ stringLength = ReadShortLE(input)
+ str = input.read(stringLength)
+ self._methods.append(str)
+ print 'New method: %s' % str
+
+ def ProcessThread(self, input):
+ tid = ReadShortLE(input)
+ stringLength = ReadShortLE(input)
+ str = input.read(stringLength)
+ self._threads.append('%d\t%s\n' % (tid, str))
+ print 'New thread: %d/%s' % (tid, str)
+
+ def ProcessSpecial(self, input):
+ code = ord(input.read(1))
+ if code == 1:
+ self.ProcessMethod(input)
+ elif code == 2:
+ self.ProcessThread(input)
+ else:
+ raise MyException("Unknown special!")
+
+ def Process(self, input, body):
+ try:
+ while True:
+ threadId = ReadShortLE(input)
+ if threadId == 0:
+ self.ProcessSpecial(input)
+ else:
+ # Regular package, just copy
+ WriteShortLE(body, threadId)
+ Copy(input, body, self._mRecordSize - 2)
+ except BufferUnderrun:
+ print 'Buffer underrun, file was probably truncated. Results should still be usable.'
+
+ def Finalize(self, header):
+ header.write('*threads\n')
+ for t in self._threads:
+ header.write(t)
+ header.write('*methods\n')
+ for m in self._methods:
+ header.write(m)
+ header.write('*end\n')
+
+ def ProcessFile(self, filename):
+ input = open(filename, 'rb') # Input file
+ header = open(filename + '.header', 'w') # Header part
+ body = open(filename + '.body', 'wb') # Body part
+
+ self.PrintHeader(header)
+
+ self.ProcessDataHeader(input, body)
+
+ self._methods = []
+ self._threads = []
+ self.Process(input, body)
+
+ self.Finalize(header)
+
+ input.close()
+ header.close()
+ body.close()
+
+def main():
+ Rewriter().ProcessFile(sys.argv[1])
+ header_name = sys.argv[1] + '.header'
+ body_name = sys.argv[1] + '.body'
+ print 'Results have been written to %s and %s.' % (header_name, body_name)
+ print 'Concatenate the files to get a result usable with traceview.'
+ sys.exit(0)
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file