Merge "Update ElfWriterMclinker for klp"
diff --git a/Android.mk b/Android.mk
index 612fc40..4d9f622 100644
--- a/Android.mk
+++ b/Android.mk
@@ -55,12 +55,15 @@
rm -f $(TARGET_OUT_JAVA_LIBRARIES)/*.odex
rm -f $(TARGET_OUT_JAVA_LIBRARIES)/*.oat
rm -f $(TARGET_OUT_JAVA_LIBRARIES)/*.art
+ rm -f $(DEXPREOPT_PRODUCT_DIR_FULL_PATH)/$(DEXPREOPT_BOOT_JAR_DIR)/*.oat
+ rm -f $(DEXPREOPT_PRODUCT_DIR_FULL_PATH)/$(DEXPREOPT_BOOT_JAR_DIR)/*.art
rm -f $(TARGET_OUT_UNSTRIPPED)/system/framework/*.odex
rm -f $(TARGET_OUT_UNSTRIPPED)/system/framework/*.oat
rm -f $(TARGET_OUT_APPS)/*.odex
rm -f $(TARGET_OUT_INTERMEDIATES)/JAVA_LIBRARIES/*_intermediates/javalib.odex
rm -f $(TARGET_OUT_INTERMEDIATES)/APPS/*_intermediates/*.odex
rm -rf /tmp/test-*/dalvik-cache/*@classes.dex
+ rm -rf /tmp/android-data/dalvik-cache/*@classes.dex
.PHONY: clean-oat-target
clean-oat-target:
@@ -71,9 +74,9 @@
adb shell rm $(ART_TEST_DIR)/*.odex
adb shell rm $(ART_TEST_DIR)/*.oat
adb shell rm $(ART_TEST_DIR)/*.art
- adb shell rm $(DALVIK_CACHE_DIR)/*.dex
- adb shell rm $(DALVIK_CACHE_DIR)/*.oat
- adb shell rm $(DALVIK_CACHE_DIR)/*.art
+ adb shell rm $(ART_DALVIK_CACHE_DIR)/*.dex
+ adb shell rm $(ART_DALVIK_CACHE_DIR)/*.oat
+ adb shell rm $(ART_DALVIK_CACHE_DIR)/*.art
adb shell rm $(DEXPREOPT_BOOT_JAR_DIR)/*.oat
adb shell rm $(DEXPREOPT_BOOT_JAR_DIR)/*.art
adb shell rm system/app/*.odex
diff --git a/build/Android.common.mk b/build/Android.common.mk
index 30d7dcb..415d810 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -87,7 +87,12 @@
# Clang build.
# ART_TARGET_CLANG := true
-# ART_HOST_CLANG := true
+ifeq ($(HOST_OS),darwin)
+ART_HOST_CLANG := true
+endif
+
+# directory used for dalvik-cache on device
+ART_DALVIK_CACHE_DIR := /data/dalvik-cache
# directory used for gtests on device
ART_NATIVETEST_DIR := /data/nativetest/art
@@ -116,7 +121,7 @@
-Wall \
-Werror \
-Wextra \
- -Wstrict-aliasing=3 \
+ -Wstrict-aliasing \
-fstrict-aliasing
ifeq ($(ART_SMALL_MODE),true)
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 364a8bc..f5bb85a 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -141,25 +141,24 @@
CompilationUnit::~CompilationUnit() {
}
+// TODO: Add a cumulative version of logging, and combine with dex2oat --dump-timing
void CompilationUnit::StartTimingSplit(const char* label) {
- if (compiler_driver->GetDumpPasses()) {
+ if (enable_debug & (1 << kDebugTimings)) {
timings.StartSplit(label);
}
}
void CompilationUnit::NewTimingSplit(const char* label) {
- if (compiler_driver->GetDumpPasses()) {
+ if (enable_debug & (1 << kDebugTimings)) {
timings.NewSplit(label);
}
}
void CompilationUnit::EndTiming() {
- if (compiler_driver->GetDumpPasses()) {
+ if (enable_debug & (1 << kDebugTimings)) {
timings.EndSplit();
- if (enable_debug & (1 << kDebugTimings)) {
- LOG(INFO) << "TIMINGS " << PrettyMethod(method_idx, *dex_file);
- LOG(INFO) << Dumpable<TimingLogger>(timings);
- }
+ LOG(INFO) << "TIMINGS " << PrettyMethod(method_idx, *dex_file);
+ LOG(INFO) << Dumpable<TimingLogger>(timings);
}
}
@@ -317,9 +316,6 @@
}
cu.EndTiming();
- compiler.GetTimingsLogger().Start();
- compiler.GetTimingsLogger().AddLogger(cu.timings);
- compiler.GetTimingsLogger().End();
return result;
}
diff --git a/compiler/dex/frontend.h b/compiler/dex/frontend.h
index 8eb6684..8ce1206 100644
--- a/compiler/dex/frontend.h
+++ b/compiler/dex/frontend.h
@@ -18,12 +18,7 @@
#define ART_COMPILER_DEX_FRONTEND_H_
#include "dex_file.h"
-#include "dex_instruction.h"
-
-
-
-
-
+#include "invoke_type.h"
namespace llvm {
class Module;
@@ -82,9 +77,6 @@
kDebugTimings
};
-class DexFileToMethodInlinerMap;
-class CompilerDriver;
-
class LLVMInfo {
public:
LLVMInfo();
@@ -113,8 +105,8 @@
UniquePtr<art::llvm::IRBuilder> ir_builder_;
};
-struct CompilationUnit;
-struct BasicBlock;
+struct CompiledMethod;
+class CompilerDriver;
} // namespace art
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 29554c0..2ce7ecd 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -440,6 +440,20 @@
PushPointer(code_buffer_, &id);
data_lir = NEXT_LIR(data_lir);
}
+ // Push class literals.
+ data_lir = class_literal_list_;
+ while (data_lir != NULL) {
+ uint32_t target = data_lir->operands[0];
+ cu_->compiler_driver->AddClassPatch(cu_->dex_file,
+ cu_->class_def_idx,
+ cu_->method_idx,
+ target,
+ code_buffer_.size());
+ const DexFile::TypeId& id = cu_->dex_file->GetTypeId(target);
+ // unique value based on target to ensure code deduplication works
+ PushPointer(code_buffer_, &id);
+ data_lir = NEXT_LIR(data_lir);
+ }
}
/* Write the switch tables to the output stream */
@@ -772,6 +786,7 @@
offset = AssignLiteralOffsetCommon(literal_list_, offset);
offset = AssignLiteralPointerOffsetCommon(code_literal_list_, offset);
offset = AssignLiteralPointerOffsetCommon(method_literal_list_, offset);
+ offset = AssignLiteralPointerOffsetCommon(class_literal_list_, offset);
return offset;
}
@@ -960,6 +975,7 @@
: Backend(arena),
literal_list_(NULL),
method_literal_list_(NULL),
+ class_literal_list_(NULL),
code_literal_list_(NULL),
first_fixup_(NULL),
cu_(cu),
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 3bd0298..daf21df 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -19,6 +19,7 @@
#include "dex/quick/mir_to_lir-inl.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "mirror/array.h"
+#include "mirror/object-inl.h"
#include "verifier/method_verifier.h"
namespace art {
@@ -883,13 +884,53 @@
// alloc will always check for resolution, do we also need to verify
// access because the verifier was unable to?
ThreadOffset func_offset(-1);
- if (cu_->compiler_driver->CanAccessInstantiableTypeWithoutChecks(
- cu_->method_idx, *cu_->dex_file, type_idx)) {
- func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObject);
+ const DexFile* dex_file = cu_->dex_file;
+ CompilerDriver* driver = cu_->compiler_driver;
+ if (driver->CanAccessInstantiableTypeWithoutChecks(
+ cu_->method_idx, *dex_file, type_idx)) {
+ bool is_type_initialized;
+ bool use_direct_type_ptr;
+ uintptr_t direct_type_ptr;
+ if (kEmbedClassInCode &&
+ driver->CanEmbedTypeInCode(*dex_file, type_idx,
+ &is_type_initialized, &use_direct_type_ptr, &direct_type_ptr)) {
+ // The fast path.
+ if (!use_direct_type_ptr) {
+ // Use the literal pool and a PC-relative load from a data word.
+ LIR* data_target = ScanLiteralPool(class_literal_list_, type_idx, 0);
+ if (data_target == nullptr) {
+ data_target = AddWordData(&class_literal_list_, type_idx);
+ }
+ LIR* load_pc_rel = OpPcRelLoad(TargetReg(kArg0), data_target);
+ AppendLIR(load_pc_rel);
+ if (!is_type_initialized) {
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectResolved);
+ CallRuntimeHelperRegMethod(func_offset, TargetReg(kArg0), true);
+ } else {
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectInitialized);
+ CallRuntimeHelperRegMethod(func_offset, TargetReg(kArg0), true);
+ }
+ } else {
+ // Use the direct pointer.
+ if (!is_type_initialized) {
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectResolved);
+ CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true);
+ } else {
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectInitialized);
+ CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true);
+ }
+ }
+ } else {
+ // The slow path.
+ DCHECK_EQ(func_offset.Int32Value(), -1);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObject);
+ CallRuntimeHelperImmMethod(func_offset, type_idx, true);
+ }
+ DCHECK_NE(func_offset.Int32Value(), -1);
} else {
func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectWithAccessCheck);
+ CallRuntimeHelperImmMethod(func_offset, type_idx, true);
}
- CallRuntimeHelperImmMethod(func_offset, type_idx, true);
RegLocation rl_result = GetReturn(false);
StoreValue(rl_dest, rl_result);
}
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index d942a24..f865207 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -142,6 +142,17 @@
CallHelper(r_tgt, helper_offset, safepoint_pc);
}
+void Mir2Lir::CallRuntimeHelperRegMethod(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(helper_offset);
+ DCHECK_NE(TargetReg(kArg1), arg0);
+ if (TargetReg(kArg0) != arg0) {
+ OpRegCopy(TargetReg(kArg0), arg0);
+ }
+ LoadCurrMethodDirect(TargetReg(kArg1));
+ ClobberCallerSave();
+ CallHelper(r_tgt, helper_offset, safepoint_pc);
+}
+
void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset helper_offset, RegLocation arg0,
RegLocation arg1, bool safepoint_pc) {
int r_tgt = CallHelperSetup(helper_offset);
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index c157327..f9d9e9e 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -529,6 +529,7 @@
bool safepoint_pc);
void CallRuntimeHelperImmMethod(ThreadOffset helper_offset, int arg0,
bool safepoint_pc);
+ void CallRuntimeHelperRegMethod(ThreadOffset helper_offset, int arg0, bool safepoint_pc);
void CallRuntimeHelperRegLocationRegLocation(ThreadOffset helper_offset,
RegLocation arg0, RegLocation arg1,
bool safepoint_pc);
@@ -855,6 +856,7 @@
// TODO: add accessors for these.
LIR* literal_list_; // Constants.
LIR* method_literal_list_; // Method literals requiring patching.
+ LIR* class_literal_list_; // Class literals requiring patching.
LIR* code_literal_list_; // Code literals requiring patching.
LIR* first_fixup_; // Doubly-linked list of LIR nodes requiring fixups.
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 714dc4c..f390b41 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -341,7 +341,7 @@
CompilerBackend compiler_backend, InstructionSet instruction_set,
InstructionSetFeatures instruction_set_features,
bool image, DescriptorSet* image_classes, size_t thread_count,
- bool dump_stats, bool dump_passes, CumulativeLogger* timer)
+ bool dump_stats)
: verified_methods_data_(verified_methods_data),
method_inliner_map_(method_inliner_map),
compiler_backend_(compiler_backend),
@@ -356,8 +356,6 @@
start_ns_(0),
stats_(new AOTCompilationStats),
dump_stats_(dump_stats),
- dump_passes_(dump_passes),
- timings_logger_(timer),
compiler_library_(NULL),
compiler_(NULL),
compiler_context_(NULL),
@@ -441,6 +439,10 @@
MutexLock mu(self, compiled_methods_lock_);
STLDeleteElements(&methods_to_patch_);
}
+ {
+ MutexLock mu(self, compiled_methods_lock_);
+ STLDeleteElements(&classes_to_patch_);
+ }
CHECK_PTHREAD_CALL(pthread_key_delete, (tls_key_), "delete tls key");
typedef void (*UninitCompilerContextFn)(CompilerDriver&);
UninitCompilerContextFn uninit_compiler_context;
@@ -908,6 +910,51 @@
return result;
}
+bool CompilerDriver::CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_idx,
+ bool* is_type_initialized, bool* use_direct_type_ptr,
+ uintptr_t* direct_type_ptr) {
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
+ mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
+ if (resolved_class == nullptr) {
+ return false;
+ }
+ const bool compiling_boot = Runtime::Current()->GetHeap()->IsCompilingBoot();
+ if (compiling_boot) {
+ // boot -> boot class pointers.
+ // True if the class is in the image at boot compiling time.
+ const bool is_image_class = IsImage() && IsImageClass(
+ dex_file.StringDataByIdx(dex_file.GetTypeId(type_idx).descriptor_idx_));
+ // True if pc relative load works.
+ const bool support_boot_image_fixup = GetSupportBootImageFixup();
+ if (is_image_class && support_boot_image_fixup) {
+ *is_type_initialized = resolved_class->IsInitialized();
+ *use_direct_type_ptr = false;
+ *direct_type_ptr = 0;
+ return true;
+ } else {
+ return false;
+ }
+ } else {
+ // True if the class is in the image at app compiling time.
+ const bool class_in_image =
+ Runtime::Current()->GetHeap()->FindSpaceFromObject(resolved_class, false)->IsImageSpace();
+ if (class_in_image) {
+ // boot -> app class pointers.
+ *is_type_initialized = resolved_class->IsInitialized();
+ *use_direct_type_ptr = true;
+ *direct_type_ptr = reinterpret_cast<uintptr_t>(resolved_class);
+ return true;
+ } else {
+ // app -> app class pointers.
+ // Give up because app does not have an image and class
+ // isn't created at compile time. TODO: implement this
+ // if/when each app gets an image.
+ return false;
+ }
+ }
+}
+
static mirror::Class* ComputeCompilingMethodsClass(ScopedObjectAccess& soa,
SirtRef<mirror::DexCache>& dex_cache,
const DexCompilationUnit* mUnit)
@@ -960,21 +1007,8 @@
ComputeCompilingMethodsClass(soa, dex_cache, mUnit);
if (referrer_class != NULL) {
mirror::Class* fields_class = resolved_field->GetDeclaringClass();
- bool access_ok = referrer_class->CanAccess(fields_class) &&
- referrer_class->CanAccessMember(fields_class,
- resolved_field->GetAccessFlags());
- if (!access_ok) {
- // The referring class can't access the resolved field, this may occur as a result of a
- // protected field being made public by a sub-class. Resort to the dex file to determine
- // the correct class for the access check.
- const DexFile& dex_file = *referrer_class->GetDexCache()->GetDexFile();
- mirror::Class* dex_fields_class = mUnit->GetClassLinker()->ResolveType(dex_file,
- dex_file.GetFieldId(field_idx).class_idx_,
- referrer_class);
- access_ok = referrer_class->CanAccess(dex_fields_class) &&
- referrer_class->CanAccessMember(dex_fields_class,
- resolved_field->GetAccessFlags());
- }
+ bool access_ok =
+ referrer_class->CanAccessResolvedField<false>(fields_class, resolved_field, field_idx);
bool is_write_to_final_from_wrong_class = is_put && resolved_field->IsFinal() &&
fields_class != referrer_class;
if (access_ok && !is_write_to_final_from_wrong_class) {
@@ -1020,23 +1054,8 @@
stats_->ResolvedLocalStaticField();
return true; // fast path
} else {
- bool access_ok = referrer_class->CanAccess(fields_class) &&
- referrer_class->CanAccessMember(fields_class,
- resolved_field->GetAccessFlags());
- if (!access_ok) {
- // The referring class can't access the resolved field, this may occur as a result of a
- // protected field being made public by a sub-class. Resort to the dex file to determine
- // the correct class for the access check. Don't change the field's class as that is
- // used to identify the SSB.
- const DexFile& dex_file = *referrer_class->GetDexCache()->GetDexFile();
- mirror::Class* dex_fields_class =
- mUnit->GetClassLinker()->ResolveType(dex_file,
- dex_file.GetFieldId(field_idx).class_idx_,
- referrer_class);
- access_ok = referrer_class->CanAccess(dex_fields_class) &&
- referrer_class->CanAccessMember(dex_fields_class,
- resolved_field->GetAccessFlags());
- }
+ bool access_ok =
+ referrer_class->CanAccessResolvedField<false>(fields_class, resolved_field, field_idx);
bool is_write_to_final_from_wrong_class = is_put && resolved_field->IsFinal();
if (access_ok && !is_write_to_final_from_wrong_class) {
// We have the resolved field, we must make it into a index for the referrer
@@ -1219,20 +1238,8 @@
bool icce = resolved_method->CheckIncompatibleClassChange(*invoke_type);
if (referrer_class != NULL && !icce) {
mirror::Class* methods_class = resolved_method->GetDeclaringClass();
- if (!referrer_class->CanAccess(methods_class) ||
- !referrer_class->CanAccessMember(methods_class,
- resolved_method->GetAccessFlags())) {
- // The referring class can't access the resolved method, this may occur as a result of a
- // protected method being made public by implementing an interface that re-declares the
- // method public. Resort to the dex file to determine the correct class for the access
- // check.
- uint16_t class_idx =
- target_method->dex_file->GetMethodId(target_method->dex_method_index).class_idx_;
- methods_class = mUnit->GetClassLinker()->ResolveType(*target_method->dex_file,
- class_idx, referrer_class);
- }
- if (referrer_class->CanAccess(methods_class) &&
- referrer_class->CanAccessMember(methods_class, resolved_method->GetAccessFlags())) {
+ if (referrer_class->CanAccessResolvedMethod<false>(methods_class, resolved_method,
+ target_method->dex_method_index)) {
const bool enableFinalBasedSharpening = enable_devirtualization;
// Sharpen a virtual call into a direct call when the target is known not to have been
// overridden (ie is final).
@@ -1333,13 +1340,13 @@
InvokeType target_invoke_type,
size_t literal_offset) {
MutexLock mu(Thread::Current(), compiled_methods_lock_);
- code_to_patch_.push_back(new PatchInformation(dex_file,
- referrer_class_def_idx,
- referrer_method_idx,
- referrer_invoke_type,
- target_method_idx,
- target_invoke_type,
- literal_offset));
+ code_to_patch_.push_back(new CallPatchInformation(dex_file,
+ referrer_class_def_idx,
+ referrer_method_idx,
+ referrer_invoke_type,
+ target_method_idx,
+ target_invoke_type,
+ literal_offset));
}
void CompilerDriver::AddMethodPatch(const DexFile* dex_file,
uint16_t referrer_class_def_idx,
@@ -1349,13 +1356,25 @@
InvokeType target_invoke_type,
size_t literal_offset) {
MutexLock mu(Thread::Current(), compiled_methods_lock_);
- methods_to_patch_.push_back(new PatchInformation(dex_file,
- referrer_class_def_idx,
- referrer_method_idx,
- referrer_invoke_type,
- target_method_idx,
- target_invoke_type,
- literal_offset));
+ methods_to_patch_.push_back(new CallPatchInformation(dex_file,
+ referrer_class_def_idx,
+ referrer_method_idx,
+ referrer_invoke_type,
+ target_method_idx,
+ target_invoke_type,
+ literal_offset));
+}
+void CompilerDriver::AddClassPatch(const DexFile* dex_file,
+ uint16_t referrer_class_def_idx,
+ uint32_t referrer_method_idx,
+ uint32_t target_type_idx,
+ size_t literal_offset) {
+ MutexLock mu(Thread::Current(), compiled_methods_lock_);
+ classes_to_patch_.push_back(new TypePatchInformation(dex_file,
+ referrer_class_def_idx,
+ referrer_method_idx,
+ target_type_idx,
+ literal_offset));
}
class ParallelCompilationManager {
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index aabdf2f..eef94a1 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -22,7 +22,6 @@
#include <vector>
#include "base/mutex.h"
-#include "base/timing_logger.h"
#include "class_reference.h"
#include "compiled_class.h"
#include "compiled_method.h"
@@ -98,8 +97,7 @@
CompilerBackend compiler_backend, InstructionSet instruction_set,
InstructionSetFeatures instruction_set_features,
bool image, DescriptorSet* image_classes,
- size_t thread_count, bool dump_stats, bool dump_passes,
- CumulativeLogger* timer);
+ size_t thread_count, bool dump_stats);
~CompilerDriver();
@@ -191,6 +189,10 @@
uint32_t type_idx)
LOCKS_EXCLUDED(Locks::mutator_lock_);
+ bool CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_idx,
+ bool* is_type_initialized, bool* use_direct_type_ptr,
+ uintptr_t* direct_type_ptr);
+
// Can we fast path instance field access? Computes field's offset and volatility.
bool ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put,
int* field_offset, bool* is_volatile)
@@ -230,6 +232,12 @@
InvokeType target_invoke_type,
size_t literal_offset)
LOCKS_EXCLUDED(compiled_methods_lock_);
+ void AddClassPatch(const DexFile* dex_file,
+ uint16_t referrer_class_def_idx,
+ uint32_t referrer_method_idx,
+ uint32_t target_method_idx,
+ size_t literal_offset)
+ LOCKS_EXCLUDED(compiled_methods_lock_);
void SetBitcodeFileName(std::string const& filename);
@@ -269,14 +277,8 @@
return thread_count_;
}
- bool GetDumpPasses() const {
- return dump_passes_;
- }
-
- CumulativeLogger& GetTimingsLogger() const {
- return *timings_logger_;
- }
-
+ class CallPatchInformation;
+ class TypePatchInformation;
class PatchInformation {
public:
const DexFile& GetDexFile() const {
@@ -288,6 +290,48 @@
uint32_t GetReferrerMethodIdx() const {
return referrer_method_idx_;
}
+ size_t GetLiteralOffset() const {
+ return literal_offset_;
+ }
+
+ virtual bool IsCall() const {
+ return false;
+ }
+ virtual bool IsType() const {
+ return false;
+ }
+ virtual const CallPatchInformation* AsCall() const {
+ LOG(FATAL) << "Unreachable";
+ return nullptr;
+ }
+ virtual const TypePatchInformation* AsType() const {
+ LOG(FATAL) << "Unreachable";
+ return nullptr;
+ }
+
+ protected:
+ PatchInformation(const DexFile* dex_file,
+ uint16_t referrer_class_def_idx,
+ uint32_t referrer_method_idx,
+ size_t literal_offset)
+ : dex_file_(dex_file),
+ referrer_class_def_idx_(referrer_class_def_idx),
+ referrer_method_idx_(referrer_method_idx),
+ literal_offset_(literal_offset) {
+ CHECK(dex_file_ != NULL);
+ }
+ virtual ~PatchInformation() {}
+
+ const DexFile* const dex_file_;
+ const uint16_t referrer_class_def_idx_;
+ const uint32_t referrer_method_idx_;
+ const size_t literal_offset_;
+
+ friend class CompilerDriver;
+ };
+
+ class CallPatchInformation : public PatchInformation {
+ public:
InvokeType GetReferrerInvokeType() const {
return referrer_invoke_type_;
}
@@ -297,46 +341,76 @@
InvokeType GetTargetInvokeType() const {
return target_invoke_type_;
}
- size_t GetLiteralOffset() const {;
- return literal_offset_;
+
+ const CallPatchInformation* AsCall() const {
+ return this;
+ }
+ bool IsCall() const {
+ return true;
}
private:
- PatchInformation(const DexFile* dex_file,
- uint16_t referrer_class_def_idx,
- uint32_t referrer_method_idx,
- InvokeType referrer_invoke_type,
- uint32_t target_method_idx,
- InvokeType target_invoke_type,
- size_t literal_offset)
- : dex_file_(dex_file),
- referrer_class_def_idx_(referrer_class_def_idx),
- referrer_method_idx_(referrer_method_idx),
- referrer_invoke_type_(referrer_invoke_type),
- target_method_idx_(target_method_idx),
- target_invoke_type_(target_invoke_type),
- literal_offset_(literal_offset) {
- CHECK(dex_file_ != NULL);
+ CallPatchInformation(const DexFile* dex_file,
+ uint16_t referrer_class_def_idx,
+ uint32_t referrer_method_idx,
+ InvokeType referrer_invoke_type,
+ uint32_t target_method_idx,
+ InvokeType target_invoke_type,
+ size_t literal_offset)
+ : PatchInformation(dex_file, referrer_class_def_idx,
+ referrer_method_idx, literal_offset),
+ referrer_invoke_type_(referrer_invoke_type),
+ target_method_idx_(target_method_idx),
+ target_invoke_type_(target_invoke_type) {
}
- const DexFile* const dex_file_;
- const uint16_t referrer_class_def_idx_;
- const uint32_t referrer_method_idx_;
const InvokeType referrer_invoke_type_;
const uint32_t target_method_idx_;
const InvokeType target_invoke_type_;
- const size_t literal_offset_;
friend class CompilerDriver;
- DISALLOW_COPY_AND_ASSIGN(PatchInformation);
+ DISALLOW_COPY_AND_ASSIGN(CallPatchInformation);
};
- const std::vector<const PatchInformation*>& GetCodeToPatch() const {
+ class TypePatchInformation : public PatchInformation {
+ public:
+ uint32_t GetTargetTypeIdx() const {
+ return target_type_idx_;
+ }
+
+ bool IsType() const {
+ return true;
+ }
+ const TypePatchInformation* AsType() const {
+ return this;
+ }
+
+ private:
+ TypePatchInformation(const DexFile* dex_file,
+ uint16_t referrer_class_def_idx,
+ uint32_t referrer_method_idx,
+ uint32_t target_type_idx,
+ size_t literal_offset)
+ : PatchInformation(dex_file, referrer_class_def_idx,
+ referrer_method_idx, literal_offset),
+ target_type_idx_(target_type_idx) {
+ }
+
+ const uint32_t target_type_idx_;
+
+ friend class CompilerDriver;
+ DISALLOW_COPY_AND_ASSIGN(TypePatchInformation);
+ };
+
+ const std::vector<const CallPatchInformation*>& GetCodeToPatch() const {
return code_to_patch_;
}
- const std::vector<const PatchInformation*>& GetMethodsToPatch() const {
+ const std::vector<const CallPatchInformation*>& GetMethodsToPatch() const {
return methods_to_patch_;
}
+ const std::vector<const TypePatchInformation*>& GetClassesToPatch() const {
+ return classes_to_patch_;
+ }
// Checks if class specified by type_idx is one of the image_classes_
bool IsImageClass(const char* descriptor) const;
@@ -408,8 +482,9 @@
static void CompileClass(const ParallelCompilationManager* context, size_t class_def_index)
LOCKS_EXCLUDED(Locks::mutator_lock_);
- std::vector<const PatchInformation*> code_to_patch_;
- std::vector<const PatchInformation*> methods_to_patch_;
+ std::vector<const CallPatchInformation*> code_to_patch_;
+ std::vector<const CallPatchInformation*> methods_to_patch_;
+ std::vector<const TypePatchInformation*> classes_to_patch_;
VerifiedMethodsData* verified_methods_data_;
DexFileToMethodInlinerMap* method_inliner_map_;
@@ -446,9 +521,6 @@
UniquePtr<AOTCompilationStats> stats_;
bool dump_stats_;
- const bool dump_passes_;
-
- CumulativeLogger* const timings_logger_;
typedef void (*CompilerCallbackFn)(CompilerDriver& driver);
typedef MutexLock* (*CompilerMutexLockFn)(CompilerDriver& driver);
diff --git a/compiler/elf_writer.h b/compiler/elf_writer.h
index 0ef4185..dbc986a 100644
--- a/compiler/elf_writer.h
+++ b/compiler/elf_writer.h
@@ -44,7 +44,7 @@
size_t& oat_data_offset);
// Returns runtime oat_data runtime address for an opened ElfFile.
- static llvm::ELF::Elf32_Addr GetOatDataAddress(ElfFile* elf_file);
+ static ::llvm::ELF::Elf32_Addr GetOatDataAddress(ElfFile* elf_file);
protected:
ElfWriter(const CompilerDriver& driver, File* elf_file);
diff --git a/compiler/elf_writer_mclinker.h b/compiler/elf_writer_mclinker.h
index fa7e9ca..8ee7231 100644
--- a/compiler/elf_writer_mclinker.h
+++ b/compiler/elf_writer_mclinker.h
@@ -69,7 +69,7 @@
void FixupOatMethodOffsets(const std::vector<const DexFile*>& dex_files)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
uint32_t FixupCompiledCodeOffset(ElfFile& elf_file,
- llvm::ELF::Elf32_Addr oatdata_address,
+ ::llvm::ELF::Elf32_Addr oatdata_address,
const CompiledCode& compiled_code);
#endif
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 556dec2..09bb70c 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -734,7 +734,7 @@
}
}
-static ArtMethod* GetTargetMethod(const CompilerDriver::PatchInformation* patch)
+static ArtMethod* GetTargetMethod(const CompilerDriver::CallPatchInformation* patch)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
Thread* self = Thread::Current();
@@ -757,15 +757,34 @@
return method;
}
+static Class* GetTargetType(const CompilerDriver::TypePatchInformation* patch)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ Thread* self = Thread::Current();
+ SirtRef<mirror::DexCache> dex_cache(self, class_linker->FindDexCache(patch->GetDexFile()));
+ SirtRef<mirror::ClassLoader> class_loader(self, nullptr);
+ Class* klass = class_linker->ResolveType(patch->GetDexFile(),
+ patch->GetTargetTypeIdx(),
+ dex_cache,
+ class_loader);
+ CHECK(klass != NULL)
+ << patch->GetDexFile().GetLocation() << " " << patch->GetTargetTypeIdx();
+ CHECK(dex_cache->GetResolvedTypes()->Get(patch->GetTargetTypeIdx()) == klass)
+ << patch->GetDexFile().GetLocation() << " " << patch->GetReferrerMethodIdx() << " "
+ << PrettyClass(dex_cache->GetResolvedTypes()->Get(patch->GetTargetTypeIdx())) << " "
+ << PrettyClass(klass);
+ return klass;
+}
+
void ImageWriter::PatchOatCodeAndMethods() {
Thread* self = Thread::Current();
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
const char* old_cause = self->StartAssertNoThreadSuspension("ImageWriter");
- typedef std::vector<const CompilerDriver::PatchInformation*> Patches;
- const Patches& code_to_patch = compiler_driver_.GetCodeToPatch();
+ typedef std::vector<const CompilerDriver::CallPatchInformation*> CallPatches;
+ const CallPatches& code_to_patch = compiler_driver_.GetCodeToPatch();
for (size_t i = 0; i < code_to_patch.size(); i++) {
- const CompilerDriver::PatchInformation* patch = code_to_patch[i];
+ const CompilerDriver::CallPatchInformation* patch = code_to_patch[i];
ArtMethod* target = GetTargetMethod(patch);
uint32_t code = reinterpret_cast<uint32_t>(class_linker->GetOatCodeFor(target));
uint32_t code_base = reinterpret_cast<uint32_t>(&oat_file_->GetOatHeader());
@@ -773,13 +792,21 @@
SetPatchLocation(patch, reinterpret_cast<uint32_t>(GetOatAddress(code_offset)));
}
- const Patches& methods_to_patch = compiler_driver_.GetMethodsToPatch();
+ const CallPatches& methods_to_patch = compiler_driver_.GetMethodsToPatch();
for (size_t i = 0; i < methods_to_patch.size(); i++) {
- const CompilerDriver::PatchInformation* patch = methods_to_patch[i];
+ const CompilerDriver::CallPatchInformation* patch = methods_to_patch[i];
ArtMethod* target = GetTargetMethod(patch);
SetPatchLocation(patch, reinterpret_cast<uint32_t>(GetImageAddress(target)));
}
+ const std::vector<const CompilerDriver::TypePatchInformation*>& classes_to_patch =
+ compiler_driver_.GetClassesToPatch();
+ for (size_t i = 0; i < classes_to_patch.size(); i++) {
+ const CompilerDriver::TypePatchInformation* patch = classes_to_patch[i];
+ Class* target = GetTargetType(patch);
+ SetPatchLocation(patch, reinterpret_cast<uint32_t>(GetImageAddress(target)));
+ }
+
// Update the image header with the new checksum after patching
ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
image_header->SetOatChecksum(oat_file_->GetOatHeader().GetChecksum());
@@ -796,13 +823,26 @@
uint8_t* base = reinterpret_cast<uint8_t*>(reinterpret_cast<uint32_t>(oat_code) & ~0x1);
uint32_t* patch_location = reinterpret_cast<uint32_t*>(base + patch->GetLiteralOffset());
if (kIsDebugBuild) {
- const DexFile::MethodId& id = patch->GetDexFile().GetMethodId(patch->GetTargetMethodIdx());
- uint32_t expected = reinterpret_cast<uint32_t>(&id);
- uint32_t actual = *patch_location;
- CHECK(actual == expected || actual == value) << std::hex
- << "actual=" << actual
- << "expected=" << expected
- << "value=" << value;
+ if (patch->IsCall()) {
+ const CompilerDriver::CallPatchInformation* cpatch = patch->AsCall();
+ const DexFile::MethodId& id = cpatch->GetDexFile().GetMethodId(cpatch->GetTargetMethodIdx());
+ uint32_t expected = reinterpret_cast<uint32_t>(&id);
+ uint32_t actual = *patch_location;
+ CHECK(actual == expected || actual == value) << std::hex
+ << "actual=" << actual
+ << "expected=" << expected
+ << "value=" << value;
+ }
+ if (patch->IsType()) {
+ const CompilerDriver::TypePatchInformation* tpatch = patch->AsType();
+ const DexFile::TypeId& id = tpatch->GetDexFile().GetTypeId(tpatch->GetTargetTypeIdx());
+ uint32_t expected = reinterpret_cast<uint32_t>(&id);
+ uint32_t actual = *patch_location;
+ CHECK(actual == expected || actual == value) << std::hex
+ << "actual=" << actual
+ << "expected=" << expected
+ << "value=" << value;
+ }
}
*patch_location = value;
oat_header.UpdateChecksum(patch_location, sizeof(value));
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 12d8212..2434262 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -82,11 +82,10 @@
verified_methods_data_.reset(new VerifiedMethodsData);
method_inliner_map_.reset(compiler_backend == kQuick ? new DexFileToMethodInlinerMap : nullptr);
callbacks_.Reset(verified_methods_data_.get(), method_inliner_map_.get());
- CumulativeLogger timer("Compilation times");
compiler_driver_.reset(new CompilerDriver(verified_methods_data_.get(),
method_inliner_map_.get(),
compiler_backend, insn_set,
- insn_features, false, NULL, 2, true, true, &timer));
+ insn_features, false, NULL, 2, true));
jobject class_loader = NULL;
if (kCompile) {
TimingLogger timings("OatTest::WriteRead", false, false);
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 20fafe2..26fac23 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -249,9 +249,7 @@
bool image,
UniquePtr<CompilerDriver::DescriptorSet>& image_classes,
bool dump_stats,
- bool dump_passes,
- TimingLogger& timings,
- CumulativeLogger& compiler_phases_timings) {
+ TimingLogger& timings) {
// SirtRef and ClassLoader creation needs to come after Runtime::Create
jobject class_loader = NULL;
Thread* self = Thread::Current();
@@ -278,9 +276,7 @@
image,
image_classes.release(),
thread_count_,
- dump_stats,
- dump_passes,
- &compiler_phases_timings));
+ dump_stats));
if (compiler_backend_ == kPortable) {
driver->SetBitcodeFileName(bitcode_filename);
@@ -654,7 +650,6 @@
static int dex2oat(int argc, char** argv) {
TimingLogger timings("compiler", false, false);
- CumulativeLogger compiler_phases_timings("compilation times");
InitLogging(argv);
@@ -708,7 +703,6 @@
bool is_host = false;
bool dump_stats = false;
bool dump_timing = false;
- bool dump_passes = false;
bool dump_slow_timing = kIsDebugBuild;
bool watch_dog_enabled = !kIsTargetBuild;
@@ -802,8 +796,6 @@
runtime_args.push_back(argv[i]);
} else if (option == "--dump-timing") {
dump_timing = true;
- } else if (option == "--dump-passes") {
- dump_passes = true;
} else if (option == "--dump-stats") {
dump_stats = true;
} else {
@@ -1075,9 +1067,7 @@
image,
image_classes,
dump_stats,
- dump_passes,
- timings,
- compiler_phases_timings));
+ timings));
if (compiler.get() == NULL) {
LOG(ERROR) << "Failed to create oat file: " << oat_location;
@@ -1153,9 +1143,6 @@
if (dump_timing || (dump_slow_timing && timings.GetTotalNs() > MsToNs(1000))) {
LOG(INFO) << Dumpable<TimingLogger>(timings);
}
- if (dump_passes) {
- LOG(INFO) << Dumpable<CumulativeLogger>(compiler.get()->GetTimingsLogger());
- }
return EXIT_SUCCESS;
}
@@ -1198,9 +1185,6 @@
if (dump_timing || (dump_slow_timing && timings.GetTotalNs() > MsToNs(1000))) {
LOG(INFO) << Dumpable<TimingLogger>(timings);
}
- if (dump_passes) {
- LOG(INFO) << Dumpable<CumulativeLogger>(compiler_phases_timings);
- }
// Everything was successfully written, do an explicit exit here to avoid running Runtime
// destructors that take time (bug 10645725) unless we're a debug build or running on valgrind.
diff --git a/runtime/arch/quick_alloc_entrypoints.S b/runtime/arch/quick_alloc_entrypoints.S
index d32f998..4fdcb35 100644
--- a/runtime/arch/quick_alloc_entrypoints.S
+++ b/runtime/arch/quick_alloc_entrypoints.S
@@ -17,6 +17,10 @@
.macro GENERATE_ALLOC_ENTRYPOINTS c_suffix, cxx_suffix
// Called by managed code to allocate an object.
TWO_ARG_DOWNCALL art_quick_alloc_object\c_suffix, artAllocObjectFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+// Called by managed code to allocate an object of a resolved class.
+TWO_ARG_DOWNCALL art_quick_alloc_object_resolved\c_suffix, artAllocObjectFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+// Called by managed code to allocate an object of an initialized class.
+TWO_ARG_DOWNCALL art_quick_alloc_object_initialized\c_suffix, artAllocObjectFromCodeInitialized\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
// Called by managed code to allocate an object when the caller doesn't know whether it has access
// to the created type.
TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check\c_suffix, artAllocObjectFromCodeWithAccessCheck\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
diff --git a/runtime/arch/quick_alloc_entrypoints.cc b/runtime/arch/quick_alloc_entrypoints.cc
index 457c73a..0fad822 100644
--- a/runtime/arch/quick_alloc_entrypoints.cc
+++ b/runtime/arch/quick_alloc_entrypoints.cc
@@ -21,12 +21,16 @@
extern "C" void* art_quick_alloc_array##suffix(uint32_t, void*, int32_t); \
extern "C" void* art_quick_alloc_array_with_access_check##suffix(uint32_t, void*, int32_t); \
extern "C" void* art_quick_alloc_object##suffix(uint32_t type_idx, void* method); \
+extern "C" void* art_quick_alloc_object_resolved##suffix(void* klass, void* method); \
+extern "C" void* art_quick_alloc_object_initialized##suffix(void* klass, void* method); \
extern "C" void* art_quick_alloc_object_with_access_check##suffix(uint32_t type_idx, void* method); \
extern "C" void* art_quick_check_and_alloc_array##suffix(uint32_t, void*, int32_t); \
extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix(uint32_t, void*, int32_t); \
extern "C" void* art_quick_alloc_array##suffix##_instrumented(uint32_t, void*, int32_t); \
extern "C" void* art_quick_alloc_array_with_access_check##suffix##_instrumented(uint32_t, void*, int32_t); \
extern "C" void* art_quick_alloc_object##suffix##_instrumented(uint32_t type_idx, void* method); \
+extern "C" void* art_quick_alloc_object_resolved##suffix##_instrumented(void* klass, void* method); \
+extern "C" void* art_quick_alloc_object_initialized##suffix##_instrumented(void* klass, void* method); \
extern "C" void* art_quick_alloc_object_with_access_check##suffix##_instrumented(uint32_t type_idx, void* method); \
extern "C" void* art_quick_check_and_alloc_array##suffix##_instrumented(uint32_t, void*, int32_t); \
extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix##_instrumented(uint32_t, void*, int32_t); \
@@ -35,6 +39,8 @@
qpoints->pAllocArray = art_quick_alloc_array##suffix##_instrumented; \
qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check##suffix##_instrumented; \
qpoints->pAllocObject = art_quick_alloc_object##suffix##_instrumented; \
+ qpoints->pAllocObjectResolved = art_quick_alloc_object_resolved##suffix##_instrumented; \
+ qpoints->pAllocObjectInitialized = art_quick_alloc_object_initialized##suffix##_instrumented; \
qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check##suffix##_instrumented; \
qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array##suffix##_instrumented; \
qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check##suffix##_instrumented; \
@@ -42,6 +48,8 @@
qpoints->pAllocArray = art_quick_alloc_array##suffix; \
qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check##suffix; \
qpoints->pAllocObject = art_quick_alloc_object##suffix; \
+ qpoints->pAllocObjectResolved = art_quick_alloc_object_resolved##suffix; \
+ qpoints->pAllocObjectInitialized = art_quick_alloc_object_initialized##suffix; \
qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check##suffix; \
qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array##suffix; \
qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check##suffix; \
diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S
index 3701b22..9ec1995 100644
--- a/runtime/arch/x86/asm_support_x86.S
+++ b/runtime/arch/x86/asm_support_x86.S
@@ -31,11 +31,26 @@
// are mangled with an extra underscore prefix. The use of $x for arguments
// mean that literals need to be represented with $$x in macros.
#define SYMBOL(name) _ ## name
+ #define PLT_SYMBOL(name) _ ## name
#define VAR(name,index) SYMBOL($index)
+ #define PLT_VAR(name, index) SYMBOL($index)
#define REG_VAR(name,index) %$index
#define CALL_MACRO(name,index) $index
#define LITERAL(value) $value
#define MACRO_LITERAL(value) $$value
+
+ // Mac OS' doesn't like cfi_* directives
+ #define CFI_STARTPROC
+ #define CFI_ENDPROC
+ #define CFI_ADJUST_CFA_OFFSET(size)
+ #define CFI_DEF_CFA(reg,size)
+ #define CFI_DEF_CFA_REGISTER(reg)
+ #define CFI_RESTORE(reg)
+ #define CFI_REL_OFFSET(reg,size)
+
+ // Mac OS' doesn't support certain directives
+ #define FUNCTION_TYPE(name)
+ #define SIZE(name)
#else
// Regular gas(1) lets you name macro parameters.
#define MACRO0(macro_name) .macro macro_name
@@ -51,11 +66,25 @@
// special character meaning care needs to be taken when passing registers as macro arguments.
.altmacro
#define SYMBOL(name) name
+ #define PLT_SYMBOL(name) name@PLT
#define VAR(name,index) name&
+ #define PLT_VAR(name, index) name&@PLT
#define REG_VAR(name,index) %name
#define CALL_MACRO(name,index) name&
#define LITERAL(value) $value
#define MACRO_LITERAL(value) $value
+
+ // CFI support
+ #define CFI_STARTPROC .cfi_startproc
+ #define CFI_ENDPROC .cfi_endproc
+ #define CFI_ADJUST_CFA_OFFSET(size) .cfi_adjust_cfa_offset size
+ #define CFI_DEF_CFA(reg,size) .cfi_def_cfa reg,size
+ #define CFI_DEF_CFA_REGISTER(reg) .cfi_def_cfa_register reg
+ #define CFI_RESTORE(reg) .cfi_restore reg
+ #define CFI_REL_OFFSET(reg,size) .cfi_rel_offset reg,size
+
+ #define FUNCTION_TYPE(name) .type name&, @function
+ #define SIZE(name) .size name, .-name
#endif
/* Cache alignment for function entry */
@@ -64,40 +93,40 @@
END_MACRO
MACRO1(DEFINE_FUNCTION, c_name)
- .type VAR(c_name, 0), @function
+ FUNCTION_TYPE(\c_name)
.globl VAR(c_name, 0)
ALIGN_FUNCTION_ENTRY
VAR(c_name, 0):
- .cfi_startproc
+ CFI_STARTPROC
END_MACRO
MACRO1(END_FUNCTION, c_name)
- .cfi_endproc
- .size \c_name, .-\c_name
+ CFI_ENDPROC
+ SIZE(\c_name)
END_MACRO
MACRO1(PUSH, reg)
pushl REG_VAR(reg, 0)
- .cfi_adjust_cfa_offset 4
- .cfi_rel_offset REG_VAR(reg, 0), 0
+ CFI_ADJUST_CFA_OFFSET(4)
+ CFI_REL_OFFSET(REG_VAR(reg, 0), 0)
END_MACRO
MACRO1(POP, reg)
popl REG_VAR(reg,0)
- .cfi_adjust_cfa_offset -4
- .cfi_restore REG_VAR(reg,0)
+ CFI_ADJUST_CFA_OFFSET(-4)
+ CFI_RESTORE(REG_VAR(reg,0))
END_MACRO
MACRO1(UNIMPLEMENTED,name)
- .type VAR(name, 0), @function
+ FUNCTION_TYPE(\name)
.globl VAR(name, 0)
ALIGN_FUNCTION_ENTRY
VAR(name, 0):
- .cfi_startproc
+ CFI_STARTPROC
int3
int3
- .cfi_endproc
- .size \name, .-\name
+ CFI_ENDPROC
+ SIZE(\name)
END_MACRO
MACRO0(SETUP_GOT_NOSAVE)
diff --git a/runtime/arch/x86/jni_entrypoints_x86.S b/runtime/arch/x86/jni_entrypoints_x86.S
index e394819..72047d5 100644
--- a/runtime/arch/x86/jni_entrypoints_x86.S
+++ b/runtime/arch/x86/jni_entrypoints_x86.S
@@ -21,14 +21,14 @@
*/
DEFINE_FUNCTION art_jni_dlsym_lookup_stub
subl LITERAL(4), %esp // align stack
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
SETUP_GOT // pushes ebx
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
- call SYMBOL(artFindNativeMethod)@PLT // (Thread*)
+ CFI_ADJUST_CFA_OFFSET(4)
+ call PLT_SYMBOL(artFindNativeMethod) // (Thread*)
UNDO_SETUP_GOT
addl LITERAL(8), %esp // restore the stack
- .cfi_adjust_cfa_offset -12
+ CFI_ADJUST_CFA_OFFSET(-12)
cmpl LITERAL(0), %eax // check if returned method code is null
je no_native_code_found // if null, jump to return to handle
jmp *%eax // otherwise, tail call to intended method
diff --git a/runtime/arch/x86/portable_entrypoints_x86.S b/runtime/arch/x86/portable_entrypoints_x86.S
index a1f6b2d..48de7c1 100644
--- a/runtime/arch/x86/portable_entrypoints_x86.S
+++ b/runtime/arch/x86/portable_entrypoints_x86.S
@@ -31,7 +31,7 @@
PUSH ebp // save ebp
PUSH ebx // save ebx
mov %esp, %ebp // copy value of stack pointer into base pointer
- .cfi_def_cfa_register ebp
+ CFI_DEF_CFA_REGISTER(ebp)
mov 20(%ebp), %ebx // get arg array size
addl LITERAL(28), %ebx // reserve space for return addr, method*, ebx, and ebp in frame
andl LITERAL(0xFFFFFFF0), %ebx // align frame size to 16 bytes
@@ -42,7 +42,7 @@
pushl 20(%ebp) // push size of region to memcpy
pushl 16(%ebp) // push arg array as source of memcpy
pushl %eax // push stack pointer as destination of memcpy
- call SYMBOL(memcpy)@PLT // (void*, const void*, size_t)
+ call PLT_SYMBOL(memcpy) // (void*, const void*, size_t)
addl LITERAL(12), %esp // pop arguments to memcpy
mov 12(%ebp), %eax // move method pointer into eax
mov %eax, (%esp) // push method pointer onto stack
@@ -69,7 +69,7 @@
DEFINE_FUNCTION art_portable_proxy_invoke_handler
PUSH ebp // Set up frame.
movl %esp, %ebp
- .cfi_def_cfa_register %ebp
+ CFI_DEF_CFA_REGISTER(%ebp)
subl LITERAL(4), %esp // Align stack
SETUP_GOT // pushes ebx
leal 8(%ebp), %edx // %edx = ArtMethod** called_addr
@@ -79,11 +79,11 @@
pushl %fs:THREAD_SELF_OFFSET // Pass thread.
pushl %ecx // Pass receiver.
pushl %eax // Pass called.
- call SYMBOL(artPortableProxyInvokeHandler)@PLT // (called, receiver, Thread*, &called)
+ call PLT_SYMBOL(artPortableProxyInvokeHandler) // (called, receiver, Thread*, &called)
UNDO_SETUP_GOT
leave
- .cfi_restore %ebp
- .cfi_def_cfa %esp, 4
+ CFI_RESTORE(%ebp)
+ CFI_DEF_CFA(%esp, 4)
movd %eax, %xmm0 // Place return value also into floating point return value.
movd %edx, %xmm1
punpckldq %xmm1, %xmm0
@@ -93,7 +93,7 @@
DEFINE_FUNCTION art_portable_resolution_trampoline
PUSH ebp // Set up frame.
movl %esp, %ebp
- .cfi_def_cfa_register %ebp
+ CFI_DEF_CFA_REGISTER(%ebp)
subl LITERAL(4), %esp // Align stack
SETUP_GOT // pushes ebx
leal 8(%ebp), %edx // %edx = ArtMethod** called_addr
@@ -103,11 +103,11 @@
pushl %fs:THREAD_SELF_OFFSET // Pass thread.
pushl %ecx // Pass receiver.
pushl %eax // Pass called.
- call SYMBOL(artPortableResolutionTrampoline)@PLT // (called, receiver, Thread*, &called)
+ call PLT_SYMBOL(artPortableResolutionTrampoline) // (called, receiver, Thread*, &called)
UNDO_SETUP_GOT
leave
- .cfi_restore %ebp
- .cfi_def_cfa %esp, 4
+ CFI_RESTORE(%ebp)
+ CFI_DEF_CFA(%esp, 4)
testl %eax, %eax
jz resolve_fail
jmp * %eax
@@ -118,7 +118,7 @@
DEFINE_FUNCTION art_portable_to_interpreter_bridge
PUSH ebp // Set up frame.
movl %esp, %ebp
- .cfi_def_cfa_register %ebp
+ CFI_DEF_CFA_REGISTER(%ebp)
subl LITERAL(8), %esp // Align stack
SETUP_GOT
leal 8(%ebp), %edx // %edx = ArtMethod** called_addr
@@ -126,10 +126,10 @@
pushl %edx // Pass called_addr.
pushl %fs:THREAD_SELF_OFFSET // Pass thread.
pushl %eax // Pass called.
- call SYMBOL(artPortableToInterpreterBridge)@PLT // (called, Thread*, &called)
+ call PLT_SYMBOL(artPortableToInterpreterBridge) // (called, Thread*, &called)
UNDO_SETUP_GOT
leave
- .cfi_restore %ebp
- .cfi_def_cfa %esp, 4
+ CFI_RESTORE(%ebp)
+ CFI_DEF_CFA(%esp, 4)
ret
END_FUNCTION art_portable_to_interpreter_bridge
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 69738ba..48c7d8d 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -16,8 +16,6 @@
#include "asm_support_x86.S"
-#include "arch/quick_alloc_entrypoints.S"
-
// For x86, the CFA is esp+4, the address above the pushed return address on the stack.
/*
@@ -29,7 +27,7 @@
PUSH esi
PUSH ebp
subl MACRO_LITERAL(16), %esp // Grow stack by 4 words, bottom word will hold Method*
- .cfi_adjust_cfa_offset 16
+ CFI_ADJUST_CFA_OFFSET(16)
END_MACRO
/*
@@ -41,7 +39,7 @@
PUSH esi
PUSH ebp
subl MACRO_LITERAL(16), %esp // Grow stack by 4 words, bottom word will hold Method*
- .cfi_adjust_cfa_offset 16
+ CFI_ADJUST_CFA_OFFSET(16)
END_MACRO
MACRO0(RESTORE_REF_ONLY_CALLEE_SAVE_FRAME)
@@ -49,7 +47,7 @@
POP ebp // Restore callee saves (ebx is saved/restored by the upcall)
POP esi
POP edi
- .cfi_adjust_cfa_offset -28
+ CFI_ADJUST_CFA_OFFSET(-28)
END_MACRO
/*
@@ -68,7 +66,7 @@
MACRO0(RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME)
addl MACRO_LITERAL(4), %esp // Remove padding
- .cfi_adjust_cfa_offset -4
+ CFI_ADJUST_CFA_OFFSET(-4)
POP ecx // Restore args except eax
POP edx
POP ebx
@@ -86,12 +84,12 @@
mov %esp, %ecx
// Outgoing argument set up
subl MACRO_LITERAL(8), %esp // Alignment padding
- .cfi_adjust_cfa_offset 8
+ CFI_ADJUST_CFA_OFFSET(8)
PUSH ecx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
SETUP_GOT_NOSAVE // clobbers ebx (harmless here)
- call SYMBOL(artDeliverPendingExceptionFromCode)@PLT // artDeliverPendingExceptionFromCode(Thread*, SP)
+ call PLT_SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*, SP)
int3 // unreached
END_MACRO
@@ -101,12 +99,12 @@
mov %esp, %ecx
// Outgoing argument set up
subl MACRO_LITERAL(8), %esp // alignment padding
- .cfi_adjust_cfa_offset 8
+ CFI_ADJUST_CFA_OFFSET(8)
PUSH ecx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
SETUP_GOT_NOSAVE // clobbers ebx (harmless here)
- call VAR(cxx_name, 1)@PLT // cxx_name(Thread*, SP)
+ call PLT_VAR(cxx_name, 1) // cxx_name(Thread*, SP)
int3 // unreached
END_FUNCTION VAR(c_name, 0)
END_MACRO
@@ -119,10 +117,10 @@
PUSH eax // alignment padding
PUSH ecx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass arg1
SETUP_GOT_NOSAVE // clobbers ebx (harmless here)
- call VAR(cxx_name, 1)@PLT // cxx_name(arg1, Thread*, SP)
+ call PLT_VAR(cxx_name, 1) // cxx_name(arg1, Thread*, SP)
int3 // unreached
END_FUNCTION VAR(c_name, 0)
END_MACRO
@@ -134,11 +132,11 @@
// Outgoing argument set up
PUSH edx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass arg2
PUSH eax // pass arg1
SETUP_GOT_NOSAVE // clobbers ebx (harmless here)
- call VAR(cxx_name, 1)@PLT // cxx_name(arg1, arg2, Thread*, SP)
+ call PLT_VAR(cxx_name, 1) // cxx_name(arg1, arg2, Thread*, SP)
int3 // unreached
END_FUNCTION VAR(c_name, 0)
END_MACRO
@@ -206,18 +204,18 @@
// Outgoing argument set up
SETUP_GOT_NOSAVE
subl MACRO_LITERAL(12), %esp // alignment padding
- .cfi_adjust_cfa_offset 12
+ CFI_ADJUST_CFA_OFFSET(12)
PUSH edx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
pushl 32(%edx) // pass caller Method*
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass arg2
PUSH eax // pass arg1
- call VAR(cxx_name, 1)@PLT // cxx_name(arg1, arg2, arg3, Thread*, SP)
+ call PLT_VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, Thread*, SP)
movl %edx, %edi // save code pointer in EDI
addl MACRO_LITERAL(36), %esp // Pop arguments skip eax
- .cfi_adjust_cfa_offset -36
+ CFI_ADJUST_CFA_OFFSET(-36)
POP ecx // Restore args except eax
POP edx
POP ebx
@@ -231,7 +229,7 @@
ret
1:
addl MACRO_LITERAL(4), %esp // Pop code pointer off stack
- .cfi_adjust_cfa_offset -4
+ CFI_ADJUST_CFA_OFFSET(-4)
DELIVER_PENDING_EXCEPTION
END_FUNCTION VAR(c_name, 0)
END_MACRO
@@ -259,7 +257,7 @@
PUSH ebp // save ebp
PUSH ebx // save ebx
mov %esp, %ebp // copy value of stack pointer into base pointer
- .cfi_def_cfa_register ebp
+ CFI_DEF_CFA_REGISTER(ebp)
mov 20(%ebp), %ebx // get arg array size
addl LITERAL(28), %ebx // reserve space for return addr, method*, ebx, and ebp in frame
andl LITERAL(0xFFFFFFF0), %ebx // align frame size to 16 bytes
@@ -270,7 +268,7 @@
pushl 20(%ebp) // push size of region to memcpy
pushl 16(%ebp) // push arg array as source of memcpy
pushl %eax // push stack pointer as destination of memcpy
- call SYMBOL(memcpy)@PLT // (void*, const void*, size_t)
+ call PLT_SYMBOL(memcpy) // (void*, const void*, size_t)
addl LITERAL(12), %esp // pop arguments to memcpy
movl LITERAL(0), (%esp) // store NULL for method*
mov 12(%ebp), %eax // move method pointer into eax
@@ -279,7 +277,7 @@
mov 12(%esp), %ebx // copy arg3 into ebx
call *METHOD_CODE_OFFSET(%eax) // call the method
mov %ebp, %esp // restore stack pointer
- .cfi_def_cfa_register esp
+ CFI_DEF_CFA_REGISTER(esp)
POP ebx // pop ebx
POP ebp // pop ebp
mov 20(%esp), %ecx // get result pointer
@@ -303,13 +301,13 @@
SETUP_GOT_NOSAVE // clobbers ebx (harmless here)
// Outgoing argument set up
subl MACRO_LITERAL(8), %esp // push padding
- .cfi_adjust_cfa_offset 8
+ CFI_ADJUST_CFA_OFFSET(8)
PUSH edx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
- call VAR(cxx_name, 1)@PLT // cxx_name(Thread*, SP)
+ CFI_ADJUST_CFA_OFFSET(4)
+ call PLT_VAR(cxx_name, 1) // cxx_name(Thread*, SP)
addl MACRO_LITERAL(16), %esp // pop arguments
- .cfi_adjust_cfa_offset -16
+ CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION VAR(c_name, 0)
@@ -324,11 +322,11 @@
PUSH eax // push padding
PUSH edx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass arg1
- call VAR(cxx_name, 1)@PLT // cxx_name(arg1, Thread*, SP)
+ call PLT_VAR(cxx_name, 1) // cxx_name(arg1, Thread*, SP)
addl MACRO_LITERAL(16), %esp // pop arguments
- .cfi_adjust_cfa_offset -16
+ CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION VAR(c_name, 0)
@@ -342,12 +340,12 @@
// Outgoing argument set up
PUSH edx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass arg2
PUSH eax // pass arg1
- call VAR(cxx_name, 1)@PLT // cxx_name(arg1, arg2, Thread*, SP)
+ call PLT_VAR(cxx_name, 1) // cxx_name(arg1, arg2, Thread*, SP)
addl MACRO_LITERAL(16), %esp // pop arguments
- .cfi_adjust_cfa_offset -16
+ CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION VAR(c_name, 0)
@@ -359,17 +357,17 @@
mov %esp, %ebx // remember SP
// Outgoing argument set up
subl MACRO_LITERAL(12), %esp // alignment padding
- .cfi_adjust_cfa_offset 12
+ CFI_ADJUST_CFA_OFFSET(12)
PUSH ebx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
PUSH edx // pass arg3
PUSH ecx // pass arg2
PUSH eax // pass arg1
SETUP_GOT_NOSAVE // clobbers EBX
- call VAR(cxx_name, 1)@PLT // cxx_name(arg1, arg2, arg3, Thread*, SP)
+ call PLT_VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, Thread*, SP)
addl MACRO_LITERAL(32), %esp // pop arguments
- .cfi_adjust_cfa_offset -32
+ CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION VAR(c_name, 0)
@@ -401,7 +399,98 @@
END_MACRO
// Generate the allocation entrypoints for each allocator.
-GENERATE_ALL_ALLOC_ENTRYPOINTS
+// TODO: use arch/quick_alloc_entrypoints.S. Currently we don't as we need to use concatenation
+// macros to work around differences between OS/X's as and binutils as (OS/X lacks named arguments
+// to macros and the VAR macro won't concatenate arguments properly), this also breaks having
+// multi-line macros that use each other (hence using 1 macro per newline below).
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(c_suffix, cxx_suffix) \
+ TWO_ARG_DOWNCALL art_quick_alloc_object ## c_suffix, artAllocObjectFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(c_suffix, cxx_suffix) \
+ TWO_ARG_DOWNCALL art_quick_alloc_object_resolved ## c_suffix, artAllocObjectFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(c_suffix, cxx_suffix) \
+ TWO_ARG_DOWNCALL art_quick_alloc_object_initialized ## c_suffix, artAllocObjectFromCodeInitialized ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \
+ TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check ## c_suffix, artAllocObjectFromCodeWithAccessCheck ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(c_suffix, cxx_suffix) \
+ THREE_ARG_DOWNCALL art_quick_alloc_array ## c_suffix, artAllocArrayFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \
+ THREE_ARG_DOWNCALL art_quick_alloc_array_with_access_check ## c_suffix, artAllocArrayFromCodeWithAccessCheck ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+#define GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(c_suffix, cxx_suffix) \
+ THREE_ARG_DOWNCALL art_quick_check_and_alloc_array ## c_suffix, artCheckAndAllocArrayFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+#define GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \
+ THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_with_access_check ## c_suffix, artCheckAndAllocArrayFromCodeWithAccessCheck ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc)
+
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented)
+
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
+
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented)
+
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer)
+
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented)
+
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB)
+
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented)
TWO_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO
TWO_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO
@@ -445,11 +534,11 @@
PUSH eax // push padding
PUSH edx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass object
- call artLockObjectFromCode@PLT // artLockObjectFromCode(object, Thread*, SP)
- addl MACRO_LITERAL(16), %esp // pop arguments
- .cfi_adjust_cfa_offset -16
+ call PLT_SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*, SP)
+ addl LITERAL(16), %esp // pop arguments
+ CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_lock_object
@@ -479,11 +568,11 @@
PUSH eax // push padding
PUSH edx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass object
- call artUnlockObjectFromCode@PLT // artUnlockObjectFromCode(object, Thread*, SP)
- addl MACRO_LITERAL(16), %esp // pop arguments
- .cfi_adjust_cfa_offset -16
+ call PLT_SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*, SP)
+ addl LITERAL(16), %esp // pop arguments
+ CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_unlock_object
@@ -493,9 +582,9 @@
PUSH eax // alignment padding
PUSH ecx // pass arg2 - obj->klass
PUSH eax // pass arg1 - checked class
- call SYMBOL(artIsAssignableFromCode)@PLT // (Class* klass, Class* ref_klass)
+ call PLT_SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass)
addl LITERAL(12), %esp // pop arguments
- .cfi_adjust_cfa_offset -12
+ CFI_ADJUST_CFA_OFFSET(-12)
ret
END_FUNCTION art_quick_is_assignable
@@ -504,26 +593,26 @@
PUSH eax // alignment padding
PUSH ecx // pass arg2 - obj->klass
PUSH eax // pass arg1 - checked class
- call SYMBOL(artIsAssignableFromCode)@PLT // (Class* klass, Class* ref_klass)
+ call PLT_SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass)
testl %eax, %eax
jz 1f // jump forward if not assignable
addl LITERAL(12), %esp // pop arguments
- .cfi_adjust_cfa_offset -12
+ CFI_ADJUST_CFA_OFFSET(-12)
ret
1:
POP eax // pop arguments
POP ecx
addl LITERAL(4), %esp
- .cfi_adjust_cfa_offset -12
+ CFI_ADJUST_CFA_OFFSET(-12)
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
mov %esp, %edx
// Outgoing argument set up
PUSH edx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass arg2
PUSH eax // pass arg1
- call SYMBOL(artThrowClassCastException)@PLT // (Class* a, Class* b, Thread*, SP)
+ call PLT_SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*, SP)
int3 // unreached
END_FUNCTION art_quick_check_cast
@@ -568,14 +657,14 @@
PUSH ecx
PUSH edx
subl LITERAL(8), %esp // alignment padding
- .cfi_adjust_cfa_offset 8
+ CFI_ADJUST_CFA_OFFSET(8)
pushl CLASS_OFFSET(%edx) // pass arg2 - type of the value to be stored
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
PUSH ebx // pass arg1 - component type of the array
SETUP_GOT_NOSAVE // clobbers EBX
- call SYMBOL(artIsAssignableFromCode)@PLT // (Class* a, Class* b)
+ call PLT_SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b)
addl LITERAL(16), %esp // pop arguments
- .cfi_adjust_cfa_offset -16
+ CFI_ADJUST_CFA_OFFSET(-16)
testl %eax, %eax
jz throw_array_store_exception
POP edx
@@ -595,10 +684,10 @@
// Outgoing argument set up
PUSH ecx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
PUSH edx // pass arg2 - value
PUSH eax // pass arg1 - array
- call SYMBOL(artThrowArrayStoreException)@PLT // (array, value, Thread*, SP)
+ call PLT_SYMBOL(artThrowArrayStoreException) // (array, value, Thread*, SP)
int3 // unreached
END_FUNCTION art_quick_aput_obj
@@ -607,9 +696,9 @@
PUSH edx // pass arg3
PUSH ecx // pass arg2
PUSH eax // pass arg1
- call SYMBOL(memcpy)@PLT // (void*, const void*, size_t)
+ call PLT_SYMBOL(memcpy) // (void*, const void*, size_t)
addl LITERAL(12), %esp // pop arguments
- .cfi_adjust_cfa_offset -12
+ CFI_ADJUST_CFA_OFFSET(-12)
ret
END_FUNCTION art_quick_memcpy
@@ -617,17 +706,17 @@
DEFINE_FUNCTION art_quick_fmod
subl LITERAL(12), %esp // alignment padding
- .cfi_adjust_cfa_offset 12
+ CFI_ADJUST_CFA_OFFSET(12)
PUSH ebx // pass arg4 b.hi
PUSH edx // pass arg3 b.lo
PUSH ecx // pass arg2 a.hi
PUSH eax // pass arg1 a.lo
SETUP_GOT_NOSAVE // clobbers EBX
- call SYMBOL(fmod)@PLT // (jdouble a, jdouble b)
+ call PLT_SYMBOL(fmod) // (jdouble a, jdouble b)
fstpl (%esp) // pop return value off fp stack
movsd (%esp), %xmm0 // place into %xmm0
addl LITERAL(28), %esp // pop arguments
- .cfi_adjust_cfa_offset -28
+ CFI_ADJUST_CFA_OFFSET(-28)
ret
END_FUNCTION art_quick_fmod
@@ -636,11 +725,11 @@
PUSH ecx // pass arg2 b
PUSH eax // pass arg1 a
SETUP_GOT_NOSAVE // clobbers EBX
- call SYMBOL(fmodf)@PLT // (jfloat a, jfloat b)
+ call PLT_SYMBOL(fmodf) // (jfloat a, jfloat b)
fstps (%esp) // pop return value off fp stack
movss (%esp), %xmm0 // place into %xmm0
addl LITERAL(12), %esp // pop arguments
- .cfi_adjust_cfa_offset -12
+ CFI_ADJUST_CFA_OFFSET(-12)
ret
END_FUNCTION art_quick_fmodf
@@ -651,7 +740,7 @@
fstpl (%esp) // pop value off fp stack as double
movsd (%esp), %xmm0 // place into %xmm0
addl LITERAL(8), %esp // pop arguments
- .cfi_adjust_cfa_offset -8
+ CFI_ADJUST_CFA_OFFSET(-8)
ret
END_FUNCTION art_quick_l2d
@@ -662,7 +751,7 @@
fstps (%esp) // pop value off fp stack as a single
movss (%esp), %xmm0 // place into %xmm0
addl LITERAL(8), %esp // pop argument
- .cfi_adjust_cfa_offset -8
+ CFI_ADJUST_CFA_OFFSET(-8)
ret
END_FUNCTION art_quick_l2f
@@ -671,20 +760,20 @@
PUSH ecx // pass arg2 a.hi
PUSH eax // pass arg1 a.lo
SETUP_GOT_NOSAVE // clobbers EBX
- call SYMBOL(art_d2l)@PLT // (jdouble a)
+ call PLT_SYMBOL(art_d2l) // (jdouble a)
addl LITERAL(12), %esp // pop arguments
- .cfi_adjust_cfa_offset -12
+ CFI_ADJUST_CFA_OFFSET(-12)
ret
END_FUNCTION art_quick_d2l
DEFINE_FUNCTION art_quick_f2l
subl LITERAL(8), %esp // alignment padding
- .cfi_adjust_cfa_offset 8
+ CFI_ADJUST_CFA_OFFSET(8)
SETUP_GOT_NOSAVE // clobbers EBX
PUSH eax // pass arg1 a
- call SYMBOL(art_f2l)@PLT // (jfloat a)
+ call PLT_SYMBOL(art_f2l) // (jfloat a)
addl LITERAL(12), %esp // pop arguments
- .cfi_adjust_cfa_offset -12
+ CFI_ADJUST_CFA_OFFSET(-12)
ret
END_FUNCTION art_quick_f2l
@@ -704,29 +793,29 @@
DEFINE_FUNCTION art_quick_ldiv
subl LITERAL(12), %esp // alignment padding
- .cfi_adjust_cfa_offset 12
+ CFI_ADJUST_CFA_OFFSET(12)
PUSH ebx // pass arg4 b.hi
PUSH edx // pass arg3 b.lo
PUSH ecx // pass arg2 a.hi
PUSH eax // pass arg1 a.lo
SETUP_GOT_NOSAVE // clobbers EBX
- call SYMBOL(artLdiv)@PLT // (jlong a, jlong b)
+ call PLT_SYMBOL(artLdiv) // (jlong a, jlong b)
addl LITERAL(28), %esp // pop arguments
- .cfi_adjust_cfa_offset -28
+ CFI_ADJUST_CFA_OFFSET(-28)
ret
END_FUNCTION art_quick_ldiv
DEFINE_FUNCTION art_quick_lmod
subl LITERAL(12), %esp // alignment padding
- .cfi_adjust_cfa_offset 12
+ CFI_ADJUST_CFA_OFFSET(12)
PUSH ebx // pass arg4 b.hi
PUSH edx // pass arg3 b.lo
PUSH ecx // pass arg2 a.hi
PUSH eax // pass arg1 a.lo
SETUP_GOT_NOSAVE // clobbers EBX
- call SYMBOL(artLmod)@PLT // (jlong a, jlong b)
+ call PLT_SYMBOL(artLmod) // (jlong a, jlong b)
addl LITERAL(28), %esp // pop arguments
- .cfi_adjust_cfa_offset -28
+ CFI_ADJUST_CFA_OFFSET(-28)
ret
END_FUNCTION art_quick_lmod
@@ -782,19 +871,19 @@
SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
mov %esp, %ebx // remember SP
subl LITERAL(8), %esp // alignment padding
- .cfi_adjust_cfa_offset 8
+ CFI_ADJUST_CFA_OFFSET(8)
PUSH ebx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
mov 32(%ebx), %ebx // get referrer
PUSH ebx // pass referrer
PUSH edx // pass new_val
PUSH ecx // pass object
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call SYMBOL(artSet32InstanceFromCode)@PLT // (field_idx, Object*, new_val, referrer, Thread*, SP)
+ call PLT_SYMBOL(artSet32InstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*, SP)
addl LITERAL(32), %esp // pop arguments
- .cfi_adjust_cfa_offset -32
+ CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO // return or deliver exception
END_FUNCTION art_quick_set32_instance
@@ -802,19 +891,19 @@
DEFINE_FUNCTION art_quick_set64_instance
SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
subl LITERAL(8), %esp // alignment padding
- .cfi_adjust_cfa_offset 8
+ CFI_ADJUST_CFA_OFFSET(8)
PUSH esp // pass SP-8
addl LITERAL(8), (%esp) // fix SP on stack by adding 8
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
PUSH ebx // pass high half of new_val
PUSH edx // pass low half of new_val
PUSH ecx // pass object
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call SYMBOL(artSet64InstanceFromCode)@PLT // (field_idx, Object*, new_val, Thread*, SP)
+ call PLT_SYMBOL(artSet64InstanceFromCode) // (field_idx, Object*, new_val, Thread*, SP)
addl LITERAL(32), %esp // pop arguments
- .cfi_adjust_cfa_offset -32
+ CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO // return or deliver exception
END_FUNCTION art_quick_set64_instance
@@ -823,19 +912,19 @@
SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
mov %esp, %ebx // remember SP
subl LITERAL(8), %esp // alignment padding
- .cfi_adjust_cfa_offset 8
+ CFI_ADJUST_CFA_OFFSET(8)
PUSH ebx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
mov 32(%ebx), %ebx // get referrer
PUSH ebx // pass referrer
PUSH edx // pass new_val
PUSH ecx // pass object
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call SYMBOL(artSetObjInstanceFromCode)@PLT // (field_idx, Object*, new_val, referrer, Thread*, SP)
+ call PLT_SYMBOL(artSetObjInstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*, SP)
addl LITERAL(32), %esp // pop arguments
- .cfi_adjust_cfa_offset -32
+ CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO // return or deliver exception
END_FUNCTION art_quick_set_obj_instance
@@ -845,17 +934,17 @@
mov %esp, %ebx // remember SP
mov 32(%esp), %edx // get referrer
subl LITERAL(12), %esp // alignment padding
- .cfi_adjust_cfa_offset 12
+ CFI_ADJUST_CFA_OFFSET(12)
PUSH ebx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
PUSH edx // pass referrer
PUSH ecx // pass object
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call SYMBOL(artGet32InstanceFromCode)@PLT // (field_idx, Object*, referrer, Thread*, SP)
+ call PLT_SYMBOL(artGet32InstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP)
addl LITERAL(32), %esp // pop arguments
- .cfi_adjust_cfa_offset -32
+ CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
END_FUNCTION art_quick_get32_instance
@@ -865,17 +954,17 @@
mov %esp, %ebx // remember SP
mov 32(%esp), %edx // get referrer
subl LITERAL(12), %esp // alignment padding
- .cfi_adjust_cfa_offset 12
+ CFI_ADJUST_CFA_OFFSET(12)
PUSH ebx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
PUSH edx // pass referrer
PUSH ecx // pass object
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call SYMBOL(artGet64InstanceFromCode)@PLT // (field_idx, Object*, referrer, Thread*, SP)
+ call PLT_SYMBOL(artGet64InstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP)
addl LITERAL(32), %esp // pop arguments
- .cfi_adjust_cfa_offset -32
+ CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
END_FUNCTION art_quick_get64_instance
@@ -885,17 +974,17 @@
mov %esp, %ebx // remember SP
mov 32(%esp), %edx // get referrer
subl LITERAL(12), %esp // alignment padding
- .cfi_adjust_cfa_offset 12
+ CFI_ADJUST_CFA_OFFSET(12)
PUSH ebx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
PUSH edx // pass referrer
PUSH ecx // pass object
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call SYMBOL(artGetObjInstanceFromCode)@PLT // (field_idx, Object*, referrer, Thread*, SP)
+ call PLT_SYMBOL(artGetObjInstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP)
addl LITERAL(32), %esp // pop arguments
- .cfi_adjust_cfa_offset -32
+ CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
END_FUNCTION art_quick_get_obj_instance
@@ -905,17 +994,17 @@
mov %esp, %ebx // remember SP
mov 32(%esp), %edx // get referrer
subl LITERAL(12), %esp // alignment padding
- .cfi_adjust_cfa_offset 12
+ CFI_ADJUST_CFA_OFFSET(12)
PUSH ebx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
PUSH edx // pass referrer
PUSH ecx // pass new_val
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call SYMBOL(artSet32StaticFromCode)@PLT // (field_idx, new_val, referrer, Thread*, SP)
+ call PLT_SYMBOL(artSet32StaticFromCode) // (field_idx, new_val, referrer, Thread*, SP)
addl LITERAL(32), %esp // pop arguments
- .cfi_adjust_cfa_offset -32
+ CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO // return or deliver exception
END_FUNCTION art_quick_set32_static
@@ -924,19 +1013,19 @@
SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
mov %esp, %ebx // remember SP
subl LITERAL(8), %esp // alignment padding
- .cfi_adjust_cfa_offset 8
+ CFI_ADJUST_CFA_OFFSET(8)
PUSH ebx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
mov 32(%ebx), %ebx // get referrer
PUSH edx // pass high half of new_val
PUSH ecx // pass low half of new_val
PUSH ebx // pass referrer
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call SYMBOL(artSet64StaticFromCode)@PLT // (field_idx, referrer, new_val, Thread*, SP)
+ call PLT_SYMBOL(artSet64StaticFromCode) // (field_idx, referrer, new_val, Thread*, SP)
addl LITERAL(32), %esp // pop arguments
- .cfi_adjust_cfa_offset -32
+ CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO // return or deliver exception
END_FUNCTION art_quick_set64_static
@@ -946,15 +1035,15 @@
mov %esp, %ebx // remember SP
mov 32(%esp), %edx // get referrer
subl LITERAL(12), %esp // alignment padding
- .cfi_adjust_cfa_offset 12
+ CFI_ADJUST_CFA_OFFSET(12)
PUSH ebx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
PUSH edx // pass referrer
PUSH ecx // pass new_val
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call SYMBOL(artSetObjStaticFromCode)@PLT // (field_idx, new_val, referrer, Thread*, SP)
+ call PLT_SYMBOL(artSetObjStaticFromCode) // (field_idx, new_val, referrer, Thread*, SP)
addl LITERAL(32), %esp // pop arguments
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO // return or deliver exception
@@ -966,13 +1055,13 @@
mov 32(%esp), %ecx // get referrer
PUSH edx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass referrer
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call SYMBOL(artGet32StaticFromCode)@PLT // (field_idx, referrer, Thread*, SP)
+ call PLT_SYMBOL(artGet32StaticFromCode) // (field_idx, referrer, Thread*, SP)
addl LITERAL(16), %esp // pop arguments
- .cfi_adjust_cfa_offset -16
+ CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
END_FUNCTION art_quick_get32_static
@@ -983,13 +1072,13 @@
mov 32(%esp), %ecx // get referrer
PUSH edx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass referrer
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call SYMBOL(artGet64StaticFromCode)@PLT // (field_idx, referrer, Thread*, SP)
+ call PLT_SYMBOL(artGet64StaticFromCode) // (field_idx, referrer, Thread*, SP)
addl LITERAL(16), %esp // pop arguments
- .cfi_adjust_cfa_offset -16
+ CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
END_FUNCTION art_quick_get64_static
@@ -1000,13 +1089,13 @@
mov 32(%esp), %ecx // get referrer
PUSH edx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass referrer
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call SYMBOL(artGetObjStaticFromCode)@PLT // (field_idx, referrer, Thread*, SP)
+ call PLT_SYMBOL(artGetObjStaticFromCode) // (field_idx, referrer, Thread*, SP)
addl LITERAL(16), %esp // pop arguments
- .cfi_adjust_cfa_offset -16
+ CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
END_FUNCTION art_quick_get_obj_static
@@ -1015,16 +1104,16 @@
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save frame and Method*
PUSH esp // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass receiver
PUSH eax // pass proxy method
SETUP_GOT_NOSAVE // clobbers EBX
- call SYMBOL(artQuickProxyInvokeHandler)@PLT // (proxy method, receiver, Thread*, SP)
+ call PLT_SYMBOL(artQuickProxyInvokeHandler) // (proxy method, receiver, Thread*, SP)
movd %eax, %xmm0 // place return value also into floating point return value
movd %edx, %xmm1
punpckldq %xmm1, %xmm0
addl LITERAL(44), %esp // pop arguments
- .cfi_adjust_cfa_offset -44
+ CFI_ADJUST_CFA_OFFSET(-44)
RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
END_FUNCTION art_quick_proxy_invoke_handler
@@ -1046,11 +1135,11 @@
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
PUSH esp // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass receiver
PUSH eax // pass method
SETUP_GOT_NOSAVE // clobbers EBX
- call SYMBOL(artQuickResolutionTrampoline)@PLT // (Method* called, receiver, Thread*, SP)
+ call PLT_SYMBOL(artQuickResolutionTrampoline) // (Method* called, receiver, Thread*, SP)
movl %eax, %edi // remember code pointer in EDI
addl LITERAL(16), %esp // pop arguments
test %eax, %eax // if code pointer is NULL goto deliver pending exception
@@ -1074,15 +1163,15 @@
PUSH eax // alignment padding
PUSH edx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass method
SETUP_GOT_NOSAVE // clobbers EBX
- call SYMBOL(artQuickToInterpreterBridge)@PLT // (method, Thread*, SP)
+ call PLT_SYMBOL(artQuickToInterpreterBridge) // (method, Thread*, SP)
movd %eax, %xmm0 // place return value also into floating point return value
movd %edx, %xmm1
punpckldq %xmm1, %xmm0
addl LITERAL(16), %esp // pop arguments
- .cfi_adjust_cfa_offset -16
+ CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
END_FUNCTION art_quick_to_interpreter_bridge
@@ -1095,20 +1184,20 @@
movl %esp, %edx // Save SP.
PUSH eax // Save eax which will be clobbered by the callee-save method.
subl LITERAL(8), %esp // Align stack.
- .cfi_adjust_cfa_offset 8
+ CFI_ADJUST_CFA_OFFSET(8)
pushl 40(%esp) // Pass LR.
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
PUSH edx // Pass SP.
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // Pass receiver.
PUSH eax // Pass Method*.
SETUP_GOT_NOSAVE // clobbers EBX
- call SYMBOL(artInstrumentationMethodEntryFromCode)@PLT // (Method*, Object*, Thread*, SP, LR)
+ call SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Object*, Thread*, SP, LR)
addl LITERAL(28), %esp // Pop arguments upto saved Method*.
movl 28(%esp), %edi // Restore edi.
movl %eax, 28(%esp) // Place code* over edi, just under return pc.
- movl LITERAL(SYMBOL(art_quick_instrumentation_exit)@PLT), 32(%esp)
+ movl LITERAL(PLT_SYMBOL(art_quick_instrumentation_exit)), 32(%esp)
// Place instrumentation exit as return pc.
movl (%esp), %eax // Restore eax.
movl 8(%esp), %ecx // Restore ecx.
@@ -1125,32 +1214,32 @@
SETUP_REF_ONLY_CALLEE_SAVE_FRAME
mov %esp, %ecx // Remember SP
subl LITERAL(8), %esp // Save float return value.
- .cfi_adjust_cfa_offset 8
+ CFI_ADJUST_CFA_OFFSET(8)
movd %xmm0, (%esp)
PUSH edx // Save gpr return value.
PUSH eax
subl LITERAL(8), %esp // Align stack
movd %xmm0, (%esp)
subl LITERAL(8), %esp // Pass float return value.
- .cfi_adjust_cfa_offset 8
+ CFI_ADJUST_CFA_OFFSET(8)
movd %xmm0, (%esp)
PUSH edx // Pass gpr return value.
PUSH eax
PUSH ecx // Pass SP.
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current.
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
SETUP_GOT_NOSAVE // clobbers EBX
- call SYMBOL(artInstrumentationMethodExitFromCode)@PLT // (Thread*, SP, gpr_result, fpr_result)
+ call PLT_SYMBOL(artInstrumentationMethodExitFromCode) // (Thread*, SP, gpr_result, fpr_result)
mov %eax, %ecx // Move returned link register.
addl LITERAL(32), %esp // Pop arguments.
- .cfi_adjust_cfa_offset -32
+ CFI_ADJUST_CFA_OFFSET(-32)
movl %edx, %ebx // Move returned link register for deopt
// (ebx is pretending to be our LR).
POP eax // Restore gpr return value.
POP edx
movd (%esp), %xmm0 // Restore fpr return value.
addl LITERAL(8), %esp
- .cfi_adjust_cfa_offset -8
+ CFI_ADJUST_CFA_OFFSET(-8)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
addl LITERAL(4), %esp // Remove fake return pc.
jmp *%ecx // Return.
@@ -1165,12 +1254,12 @@
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
mov %esp, %ecx // Remember SP.
subl LITERAL(8), %esp // Align stack.
- .cfi_adjust_cfa_offset 8
+ CFI_ADJUST_CFA_OFFSET(8)
PUSH ecx // Pass SP.
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
- .cfi_adjust_cfa_offset 4
+ CFI_ADJUST_CFA_OFFSET(4)
SETUP_GOT_NOSAVE // clobbers EBX
- call SYMBOL(artDeoptimize)@PLT // artDeoptimize(Thread*, SP)
+ call PLT_SYMBOL(artDeoptimize) // artDeoptimize(Thread*, SP)
int3 // Unreachable.
END_FUNCTION art_quick_deoptimize
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index 3aabc8d..15554ac 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -177,36 +177,43 @@
static const char gHexDigit[] = "0123456789abcdef";
const unsigned char* addr = reinterpret_cast<const unsigned char*>(address_);
- char out[76]; /* exact fit */
- unsigned int offset; /* offset to show while printing */
+ // 01234560: 00 11 22 33 44 55 66 77 88 99 aa bb cc dd ee ff 0123456789abcdef
+ char out[(kBitsPerWord / 4) + /* offset */
+ 1 + /* colon */
+ (16 * 3) + /* 16 hex digits and space */
+ 2 + /* white space */
+ 16 + /* 16 characters*/
+ 1 /* \0 */ ];
+ size_t offset; /* offset to show while printing */
if (show_actual_addresses_) {
- offset = reinterpret_cast<int>(addr);
+ offset = reinterpret_cast<size_t>(addr);
} else {
offset = 0;
}
memset(out, ' ', sizeof(out)-1);
- out[8] = ':';
+ out[kBitsPerWord / 4] = ':';
out[sizeof(out)-1] = '\0';
size_t byte_count = byte_count_;
- int gap = static_cast<int>(offset & 0x0f);
+ size_t gap = offset & 0x0f;
while (byte_count) {
- unsigned int line_offset = offset & ~0x0f;
+ size_t line_offset = offset & ~0x0f;
char* hex = out;
- char* asc = out + 59;
+ char* asc = out + (kBitsPerWord / 4) + /* offset */ 1 + /* colon */
+ (16 * 3) + /* 16 hex digits and space */ 2 /* white space */;
- for (int i = 0; i < 8; i++) {
- *hex++ = gHexDigit[line_offset >> 28];
+ for (int i = 0; i < (kBitsPerWord / 4); i++) {
+ *hex++ = gHexDigit[line_offset >> (kBitsPerWord - 4)];
line_offset <<= 4;
}
hex++;
hex++;
- int count = std::min(static_cast<int>(byte_count), 16 - gap);
- CHECK_NE(count, 0);
- CHECK_LE(count + gap, 16);
+ size_t count = std::min(byte_count, 16 - gap);
+ CHECK_NE(count, 0U);
+ CHECK_LE(count + gap, 16U);
if (gap) {
/* only on first line */
@@ -214,8 +221,8 @@
asc += gap;
}
- int i;
- for (i = gap ; i < count+gap; i++) {
+ size_t i;
+ for (i = gap ; i < count + gap; i++) {
*hex++ = gHexDigit[*addr >> 4];
*hex++ = gHexDigit[*addr & 0x0f];
hex++;
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index 29b3981..30bf623 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -17,6 +17,9 @@
#ifndef ART_RUNTIME_BASE_MUTEX_INL_H_
#define ART_RUNTIME_BASE_MUTEX_INL_H_
+#define __STDC_FORMAT_MACROS 1
+#include <inttypes.h>
+
#include "mutex.h"
#define ATRACE_TAG ATRACE_TAG_DALVIK
@@ -96,7 +99,7 @@
blocked_tid_(kLogLockContentions ? blocked_tid : 0),
owner_tid_(kLogLockContentions ? owner_tid : 0),
start_nano_time_(kLogLockContentions ? NanoTime() : 0) {
- std::string msg = StringPrintf("Lock contention on %s (owner tid: %llu)",
+ std::string msg = StringPrintf("Lock contention on %s (owner tid: %" PRIu64 ")",
mutex->GetName(), owner_tid);
ATRACE_BEGIN(msg.c_str());
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index b1117a2..b5d9fdf 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -4186,7 +4186,7 @@
mirror::Class* referring_class = referrer->GetDeclaringClass();
if (!referring_class->CanAccess(methods_class)) {
ThrowIllegalAccessErrorClassForMethodDispatch(referring_class, methods_class,
- referrer, resolved, type);
+ resolved, type);
return NULL;
} else if (!referring_class->CanAccessMember(methods_class,
resolved->GetAccessFlags())) {
diff --git a/runtime/common_test.h b/runtime/common_test.h
index a75a513..ee95d5b 100644
--- a/runtime/common_test.h
+++ b/runtime/common_test.h
@@ -474,13 +474,12 @@
}
}
class_linker_->FixupDexCaches(runtime_->GetResolutionMethod());
- CumulativeLogger timer("Compilation times");
compiler_driver_.reset(new CompilerDriver(verified_methods_data_.get(),
method_inliner_map_.get(),
compiler_backend, instruction_set,
instruction_set_features,
true, new CompilerDriver::DescriptorSet,
- 2, true, true, &timer));
+ 2, true));
}
// We typically don't generate an image in unit tests, disable this optimization by default.
compiler_driver_->SetSupportBootImageFixup(false);
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 0419dab..dd832df 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -136,7 +136,6 @@
}
void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirror::Class* accessed,
- const mirror::ArtMethod* caller,
const mirror::ArtMethod* called,
InvokeType type) {
std::ostringstream msg;
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index 3164f30..7f13891 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -76,7 +76,6 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirror::Class* accessed,
- const mirror::ArtMethod* caller,
const mirror::ArtMethod* called,
InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index dc9d337..528e112 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -113,13 +113,13 @@
}
bool DexFileVerifier::CheckPointerRange(const void* start, const void* end, const char* label) {
- uint32_t range_start = reinterpret_cast<uint32_t>(start);
- uint32_t range_end = reinterpret_cast<uint32_t>(end);
- uint32_t file_start = reinterpret_cast<uint32_t>(begin_);
- uint32_t file_end = file_start + size_;
+ const byte* range_start = reinterpret_cast<const byte*>(start);
+ const byte* range_end = reinterpret_cast<const byte*>(end);
+ const byte* file_start = reinterpret_cast<const byte*>(begin_);
+ const byte* file_end = file_start + size_;
if (UNLIKELY((range_start < file_start) || (range_start > file_end) ||
(range_end < file_start) || (range_end > file_end))) {
- ErrorStringPrintf("Bad range for %s: %x to %x", label,
+ ErrorStringPrintf("Bad range for %s: %zx to %zx", label,
range_start - file_start, range_end - file_start);
return false;
}
@@ -284,7 +284,7 @@
for (uint32_t i = 0; i < handlers_size; i++) {
bool catch_all;
- uint32_t offset = reinterpret_cast<uint32_t>(ptr_) - reinterpret_cast<uint32_t>(handlers_base);
+ size_t offset = ptr_ - handlers_base;
int32_t size = DecodeSignedLeb128(&ptr_);
if (UNLIKELY((size < -65536) || (size > 65536))) {
@@ -299,7 +299,7 @@
catch_all = false;
}
- handler_offsets[i] = offset;
+ handler_offsets[i] = static_cast<uint32_t>(offset);
while (size-- > 0) {
uint32_t type_idx = DecodeUnsignedLeb128(&ptr_);
@@ -386,14 +386,14 @@
return true;
}
-bool DexFileVerifier::CheckPadding(uint32_t offset, uint32_t aligned_offset) {
+bool DexFileVerifier::CheckPadding(size_t offset, uint32_t aligned_offset) {
if (offset < aligned_offset) {
if (!CheckPointerRange(begin_ + offset, begin_ + aligned_offset, "section")) {
return false;
}
while (offset < aligned_offset) {
if (UNLIKELY(*ptr_ != '\0')) {
- ErrorStringPrintf("Non-zero padding %x before section start at %x", *ptr_, offset);
+ ErrorStringPrintf("Non-zero padding %x before section start at %zx", *ptr_, offset);
return false;
}
ptr_++;
@@ -634,7 +634,7 @@
}
// try_items are 4-byte aligned. Verify the spacer is 0.
- if ((((uint32_t) &insns[insns_size] & 3) != 0) && (insns[insns_size] != 0)) {
+ if (((reinterpret_cast<uintptr_t>(&insns[insns_size]) & 3) != 0) && (insns[insns_size] != 0)) {
ErrorStringPrintf("Non-zero padding: %x", insns[insns_size]);
return false;
}
@@ -975,9 +975,9 @@
return true;
}
-bool DexFileVerifier::CheckIntraSectionIterate(uint32_t offset, uint32_t count, uint16_t type) {
+bool DexFileVerifier::CheckIntraSectionIterate(size_t offset, uint32_t count, uint16_t type) {
// Get the right alignment mask for the type of section.
- uint32_t alignment_mask;
+ size_t alignment_mask;
switch (type) {
case DexFile::kDexTypeClassDataItem:
case DexFile::kDexTypeStringDataItem:
@@ -993,7 +993,7 @@
// Iterate through the items in the section.
for (uint32_t i = 0; i < count; i++) {
- uint32_t aligned_offset = (offset + alignment_mask) & ~alignment_mask;
+ size_t aligned_offset = (offset + alignment_mask) & ~alignment_mask;
// Check the padding between items.
if (!CheckPadding(offset, aligned_offset)) {
@@ -1134,7 +1134,7 @@
offset_to_type_map_.Put(aligned_offset, type);
}
- aligned_offset = reinterpret_cast<uint32_t>(ptr_) - reinterpret_cast<uint32_t>(begin_);
+ aligned_offset = ptr_ - begin_;
if (UNLIKELY(aligned_offset > size_)) {
ErrorStringPrintf("Item %d at ends out of bounds", i);
return false;
@@ -1146,7 +1146,7 @@
return true;
}
-bool DexFileVerifier::CheckIntraIdSection(uint32_t offset, uint32_t count, uint16_t type) {
+bool DexFileVerifier::CheckIntraIdSection(size_t offset, uint32_t count, uint16_t type) {
uint32_t expected_offset;
uint32_t expected_size;
@@ -1183,7 +1183,7 @@
// Check that the offset and size are what were expected from the header.
if (UNLIKELY(offset != expected_offset)) {
- ErrorStringPrintf("Bad offset for section: got %x, expected %x", offset, expected_offset);
+ ErrorStringPrintf("Bad offset for section: got %zx, expected %x", offset, expected_offset);
return false;
}
if (UNLIKELY(count != expected_size)) {
@@ -1194,13 +1194,13 @@
return CheckIntraSectionIterate(offset, count, type);
}
-bool DexFileVerifier::CheckIntraDataSection(uint32_t offset, uint32_t count, uint16_t type) {
- uint32_t data_start = header_->data_off_;
- uint32_t data_end = data_start + header_->data_size_;
+bool DexFileVerifier::CheckIntraDataSection(size_t offset, uint32_t count, uint16_t type) {
+ size_t data_start = header_->data_off_;
+ size_t data_end = data_start + header_->data_size_;
// Sanity check the offset of the section.
if (UNLIKELY((offset < data_start) || (offset > data_end))) {
- ErrorStringPrintf("Bad offset for data subsection: %x", offset);
+ ErrorStringPrintf("Bad offset for data subsection: %zx", offset);
return false;
}
@@ -1208,9 +1208,9 @@
return false;
}
- uint32_t next_offset = reinterpret_cast<uint32_t>(ptr_) - reinterpret_cast<uint32_t>(begin_);
+ size_t next_offset = ptr_ - begin_;
if (next_offset > data_end) {
- ErrorStringPrintf("Out-of-bounds end of data subsection: %x", next_offset);
+ ErrorStringPrintf("Out-of-bounds end of data subsection: %zx", next_offset);
return false;
}
@@ -1222,7 +1222,7 @@
const DexFile::MapItem* item = map->list_;
uint32_t count = map->size_;
- uint32_t offset = 0;
+ size_t offset = 0;
ptr_ = begin_;
// Check the items listed in the map.
@@ -1235,7 +1235,7 @@
if (!CheckPadding(offset, section_offset)) {
return false;
} else if (UNLIKELY(offset > section_offset)) {
- ErrorStringPrintf("Section overlap or out-of-order map: %x, %x", offset, section_offset);
+ ErrorStringPrintf("Section overlap or out-of-order map: %zx, %x", offset, section_offset);
return false;
}
@@ -1262,7 +1262,7 @@
if (!CheckIntraIdSection(section_offset, section_count, type)) {
return false;
}
- offset = reinterpret_cast<uint32_t>(ptr_) - reinterpret_cast<uint32_t>(begin_);
+ offset = ptr_ - begin_;
break;
case DexFile::kDexTypeMapList:
if (UNLIKELY(section_count != 1)) {
@@ -1290,7 +1290,7 @@
if (!CheckIntraDataSection(section_offset, section_count, type)) {
return false;
}
- offset = reinterpret_cast<uint32_t>(ptr_) - reinterpret_cast<uint32_t>(begin_);
+ offset = ptr_ - begin_;
break;
default:
ErrorStringPrintf("Unknown map item type %x", type);
@@ -1303,14 +1303,14 @@
return true;
}
-bool DexFileVerifier::CheckOffsetToTypeMap(uint32_t offset, uint16_t type) {
+bool DexFileVerifier::CheckOffsetToTypeMap(size_t offset, uint16_t type) {
auto it = offset_to_type_map_.find(offset);
if (UNLIKELY(it == offset_to_type_map_.end())) {
- ErrorStringPrintf("No data map entry found @ %x; expected %x", offset, type);
+ ErrorStringPrintf("No data map entry found @ %zx; expected %x", offset, type);
return false;
}
if (UNLIKELY(it->second != type)) {
- ErrorStringPrintf("Unexpected data map entry @ %x; expected %x, found %x",
+ ErrorStringPrintf("Unexpected data map entry @ %zx; expected %x, found %x",
offset, type, it->second);
return false;
}
@@ -1784,9 +1784,9 @@
return true;
}
-bool DexFileVerifier::CheckInterSectionIterate(uint32_t offset, uint32_t count, uint16_t type) {
+bool DexFileVerifier::CheckInterSectionIterate(size_t offset, uint32_t count, uint16_t type) {
// Get the right alignment mask for the type of section.
- uint32_t alignment_mask;
+ size_t alignment_mask;
switch (type) {
case DexFile::kDexTypeClassDataItem:
alignment_mask = sizeof(uint8_t) - 1;
@@ -1871,7 +1871,7 @@
}
previous_item_ = prev_ptr;
- offset = reinterpret_cast<uint32_t>(ptr_) - reinterpret_cast<uint32_t>(begin_);
+ offset = ptr_ - begin_;
}
return true;
diff --git a/runtime/dex_file_verifier.h b/runtime/dex_file_verifier.h
index 4b8b80a..3337785 100644
--- a/runtime/dex_file_verifier.h
+++ b/runtime/dex_file_verifier.h
@@ -53,7 +53,7 @@
bool CheckClassDataItemField(uint32_t idx, uint32_t access_flags, bool expect_static);
bool CheckClassDataItemMethod(uint32_t idx, uint32_t access_flags, uint32_t code_offset,
bool expect_direct);
- bool CheckPadding(uint32_t offset, uint32_t aligned_offset);
+ bool CheckPadding(size_t offset, uint32_t aligned_offset);
bool CheckEncodedValue();
bool CheckEncodedArray();
bool CheckEncodedAnnotation();
@@ -65,12 +65,12 @@
bool CheckIntraAnnotationItem();
bool CheckIntraAnnotationsDirectoryItem();
- bool CheckIntraSectionIterate(uint32_t offset, uint32_t count, uint16_t type);
- bool CheckIntraIdSection(uint32_t offset, uint32_t count, uint16_t type);
- bool CheckIntraDataSection(uint32_t offset, uint32_t count, uint16_t type);
+ bool CheckIntraSectionIterate(size_t offset, uint32_t count, uint16_t type);
+ bool CheckIntraIdSection(size_t offset, uint32_t count, uint16_t type);
+ bool CheckIntraDataSection(size_t offset, uint32_t count, uint16_t type);
bool CheckIntraSection();
- bool CheckOffsetToTypeMap(uint32_t offset, uint16_t type);
+ bool CheckOffsetToTypeMap(size_t offset, uint16_t type);
uint16_t FindFirstClassDataDefiner(const byte* ptr) const;
uint16_t FindFirstAnnotationsDirectoryDefiner(const byte* ptr) const;
@@ -85,7 +85,7 @@
bool CheckInterClassDataItem();
bool CheckInterAnnotationsDirectoryItem();
- bool CheckInterSectionIterate(uint32_t offset, uint32_t count, uint16_t type);
+ bool CheckInterSectionIterate(size_t offset, uint32_t count, uint16_t type);
bool CheckInterSection();
void ErrorStringPrintf(const char* fmt, ...)
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 8304229..5ee750f 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -74,21 +74,48 @@
}
if (UNLIKELY(!klass->IsInitialized())) {
SirtRef<mirror::Class> sirt_klass(self, klass);
- // The class initializer might cause a GC.
+ // EnsureInitialized (the class initializer) might cause a GC.
+ // may cause us to suspend meaning that another thread may try to
+ // change the allocator while we are stuck in the entrypoints of
+ // an old allocator. Also, the class initialization may fail. To
+ // handle these cases we mark the slow path boolean as true so
+ // that the caller knows to check the allocator type to see if it
+ // has changed and to null-check the return value in case the
+ // initialization fails.
+ *slow_path = true;
if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_klass, true, true)) {
DCHECK(self->IsExceptionPending());
return nullptr; // Failure
}
- // TODO: EnsureInitialized may cause us to suspend meaning that another thread may try to
- // change the allocator while we are stuck in the entrypoints of an old allocator. To handle
- // this case we mark the slow path boolean as true so that the caller knows to check the
- // allocator type to see if it has changed.
- *slow_path = true;
return sirt_klass.get();
}
return klass;
}
+// TODO: Fix no thread safety analysis when annotalysis is smarter.
+ALWAYS_INLINE static inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
+ Thread* self, bool* slow_path)
+ NO_THREAD_SAFETY_ANALYSIS {
+ if (UNLIKELY(!klass->IsInitialized())) {
+ SirtRef<mirror::Class> sirt_class(self, klass);
+ // EnsureInitialized (the class initializer) might cause a GC.
+ // may cause us to suspend meaning that another thread may try to
+ // change the allocator while we are stuck in the entrypoints of
+ // an old allocator. Also, the class initialization may fail. To
+ // handle these cases we mark the slow path boolean as true so
+ // that the caller knows to check the allocator type to see if it
+ // has changed and to null-check the return value in case the
+ // initialization fails.
+ *slow_path = true;
+ if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_class, true, true)) {
+ DCHECK(self->IsExceptionPending());
+ return nullptr; // Failure
+ }
+ return sirt_class.get();
+ }
+ return klass;
+}
+
// Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it
// cannot be resolved, throw an error. If it can, use it to create an instance.
// When verification/compiler hasn't been able to verify access, optionally perform an access
@@ -112,6 +139,40 @@
return klass->Alloc<kInstrumented>(self, allocator_type);
}
+// Given the context of a calling Method and a resolved class, create an instance.
+// TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
+template <bool kInstrumented>
+ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
+ mirror::ArtMethod* method,
+ Thread* self,
+ gc::AllocatorType allocator_type)
+ NO_THREAD_SAFETY_ANALYSIS {
+ DCHECK(klass != nullptr);
+ bool slow_path = false;
+ klass = CheckClassInitializedForObjectAlloc(klass, self, &slow_path);
+ if (UNLIKELY(slow_path)) {
+ if (klass == nullptr) {
+ return nullptr;
+ }
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ return klass->Alloc<kInstrumented>(self, heap->GetCurrentAllocator());
+ }
+ return klass->Alloc<kInstrumented>(self, allocator_type);
+}
+
+// Given the context of a calling Method and an initialized class, create an instance.
+// TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
+template <bool kInstrumented>
+ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
+ mirror::ArtMethod* method,
+ Thread* self,
+ gc::AllocatorType allocator_type)
+ NO_THREAD_SAFETY_ANALYSIS {
+ DCHECK(klass != nullptr);
+ return klass->Alloc<kInstrumented>(self, allocator_type);
+}
+
+
// TODO: Fix no thread safety analysis when GCC can handle template specialization.
template <bool kAccessCheck>
ALWAYS_INLINE static inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
@@ -226,28 +287,14 @@
return nullptr;
}
mirror::Class* referring_class = referrer->GetDeclaringClass();
- if (UNLIKELY(!referring_class->CanAccess(fields_class) ||
- !referring_class->CanAccessMember(fields_class,
- resolved_field->GetAccessFlags()))) {
- // The referring class can't access the resolved field, this may occur as a result of a
- // protected field being made public by a sub-class. Resort to the dex file to determine
- // the correct class for the access check.
- const DexFile& dex_file = *referring_class->GetDexCache()->GetDexFile();
- fields_class = class_linker->ResolveType(dex_file,
- dex_file.GetFieldId(field_idx).class_idx_,
- referring_class);
- if (UNLIKELY(!referring_class->CanAccess(fields_class))) {
- ThrowIllegalAccessErrorClass(referring_class, fields_class);
- return nullptr; // failure
- } else if (UNLIKELY(!referring_class->CanAccessMember(fields_class,
- resolved_field->GetAccessFlags()))) {
- ThrowIllegalAccessErrorField(referring_class, resolved_field);
- return nullptr; // failure
- }
+ if (UNLIKELY(!referring_class->CanAccessResolvedField<true>(fields_class, resolved_field,
+ field_idx))) {
+ DCHECK(self->IsExceptionPending()); // Throw exception and unwind.
+ return nullptr; // Failure.
}
if (UNLIKELY(is_set && resolved_field->IsFinal() && (fields_class != referring_class))) {
ThrowIllegalAccessErrorFinalField(referrer, resolved_field);
- return nullptr; // failure
+ return nullptr; // Failure.
} else {
FieldHelper fh(resolved_field);
if (UNLIKELY(fh.IsPrimitiveType() != is_primitive ||
@@ -259,7 +306,7 @@
expected_size * (32 / sizeof(int32_t)),
is_primitive ? "primitive" : "non-primitive",
PrettyField(resolved_field, true).c_str());
- return nullptr; // failure
+ return nullptr; // Failure.
}
}
}
@@ -277,7 +324,7 @@
return resolved_field;
} else {
DCHECK(self->IsExceptionPending()); // Throw exception and unwind
- return nullptr; // failure
+ return nullptr; // Failure.
}
}
}
@@ -330,26 +377,12 @@
}
mirror::Class* methods_class = resolved_method->GetDeclaringClass();
mirror::Class* referring_class = referrer->GetDeclaringClass();
- if (UNLIKELY(!referring_class->CanAccess(methods_class) ||
- !referring_class->CanAccessMember(methods_class,
- resolved_method->GetAccessFlags()))) {
- // The referring class can't access the resolved method, this may occur as a result of a
- // protected method being made public by implementing an interface that re-declares the
- // method public. Resort to the dex file to determine the correct class for the access check
- const DexFile& dex_file = *referring_class->GetDexCache()->GetDexFile();
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- methods_class = class_linker->ResolveType(dex_file,
- dex_file.GetMethodId(method_idx).class_idx_,
- referring_class);
- if (UNLIKELY(!referring_class->CanAccess(methods_class))) {
- ThrowIllegalAccessErrorClassForMethodDispatch(referring_class, methods_class,
- referrer, resolved_method, type);
- return nullptr; // Failure.
- } else if (UNLIKELY(!referring_class->CanAccessMember(methods_class,
- resolved_method->GetAccessFlags()))) {
- ThrowIllegalAccessErrorMethod(referring_class, resolved_method);
- return nullptr; // Failure.
- }
+ bool can_access_resolved_method =
+ referring_class->CanAccessResolvedMethod<true, type>(methods_class, resolved_method,
+ method_idx);
+ if (UNLIKELY(!can_access_resolved_method)) {
+ DCHECK(self->IsExceptionPending()); // Throw exception and unwind.
+ return nullptr; // Failure.
}
}
switch (type) {
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index b1dca77..5657092 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -30,6 +30,18 @@
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
return AllocObjectFromCode<false, instrumented_bool>(type_idx, method, self, allocator_type); \
} \
+extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
+ mirror::Class* klass, mirror::ArtMethod* method, Thread* self, mirror::ArtMethod** sp) \
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
+ return AllocObjectFromCodeResolved<instrumented_bool>(klass, method, self, allocator_type); \
+} \
+extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \
+ mirror::Class* klass, mirror::ArtMethod* method, Thread* self, mirror::ArtMethod** sp) \
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
+ return AllocObjectFromCodeInitialized<instrumented_bool>(klass, method, self, allocator_type); \
+} \
extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheck##suffix##suffix2( \
uint32_t type_idx, mirror::ArtMethod* method, Thread* self, mirror::ArtMethod** sp) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index 1ba2066..bbbc8f2 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -42,6 +42,8 @@
void* (*pAllocArray)(uint32_t, void*, int32_t);
void* (*pAllocArrayWithAccessCheck)(uint32_t, void*, int32_t);
void* (*pAllocObject)(uint32_t, void*);
+ void* (*pAllocObjectResolved)(void*, void*);
+ void* (*pAllocObjectInitialized)(void*, void*);
void* (*pAllocObjectWithAccessCheck)(uint32_t, void*);
void* (*pCheckAndAllocArray)(uint32_t, void*, int32_t);
void* (*pCheckAndAllocArrayWithAccessCheck)(uint32_t, void*, int32_t);
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index 1dde18d..01c70fa 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -18,7 +18,6 @@
#define ART_RUNTIME_GC_ACCOUNTING_SPACE_BITMAP_INL_H_
#include "base/logging.h"
-#include "cutils/atomic-inline.h"
#include "utils.h"
namespace art {
@@ -40,7 +39,7 @@
if ((old_word & mask) != 0) {
return true;
}
- } while (UNLIKELY(android_atomic_cas(old_word, old_word | mask, address) != 0));
+ } while (!__sync_bool_compare_and_swap(address, old_word, old_word | mask));
return false;
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 6d30e1c..fc772b1 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -78,7 +78,8 @@
CollectorType post_zygote_collector_type, CollectorType background_collector_type,
size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
size_t long_pause_log_threshold, size_t long_gc_log_threshold,
- bool ignore_max_footprint, bool use_tlab)
+ bool ignore_max_footprint, bool use_tlab, bool verify_pre_gc_heap,
+ bool verify_post_gc_heap)
: non_moving_space_(nullptr),
rosalloc_space_(nullptr),
dlmalloc_space_(nullptr),
@@ -118,11 +119,9 @@
gc_memory_overhead_(0),
verify_missing_card_marks_(false),
verify_system_weaks_(false),
- verify_pre_gc_heap_(false),
- verify_post_gc_heap_(false),
+ verify_pre_gc_heap_(verify_pre_gc_heap),
+ verify_post_gc_heap_(verify_post_gc_heap),
verify_mod_union_table_(false),
- min_alloc_space_size_for_sticky_gc_(1112 * MB),
- min_remaining_space_for_sticky_gc_(1 * MB),
last_trim_time_ms_(0),
allocation_rate_(0),
/* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This
@@ -222,9 +221,17 @@
// Compute heap capacity. Continuous spaces are sorted in order of Begin().
CHECK(!continuous_spaces_.empty());
+
+ std::string error_str;
+ post_zygote_non_moving_space_mem_map_.reset(
+ MemMap::MapAnonymous("post zygote non-moving space", nullptr, 64 * MB,
+ PROT_READ | PROT_WRITE, &error_str));
+ CHECK(post_zygote_non_moving_space_mem_map_.get() != nullptr) << error_str;
// Relies on the spaces being sorted.
- byte* heap_begin = continuous_spaces_.front()->Begin();
- byte* heap_end = continuous_spaces_.back()->Limit();
+ byte* heap_begin = std::min(post_zygote_non_moving_space_mem_map_->Begin(),
+ continuous_spaces_.front()->Begin());
+ byte* heap_end = std::max(post_zygote_non_moving_space_mem_map_->End(),
+ continuous_spaces_.back()->Limit());
size_t heap_capacity = heap_end - heap_begin;
// Allocate the card table.
@@ -772,7 +779,7 @@
}
bool Heap::IsHeapAddress(const mirror::Object* obj) const {
- if (kMovingCollector && bump_pointer_space_->HasAddress(obj)) {
+ if (kMovingCollector && bump_pointer_space_ && bump_pointer_space_->HasAddress(obj)) {
return true;
}
// TODO: This probably doesn't work for large objects.
@@ -781,21 +788,27 @@
bool Heap::IsLiveObjectLocked(const mirror::Object* obj, bool search_allocation_stack,
bool search_live_stack, bool sorted) {
- // Locks::heap_bitmap_lock_->AssertReaderHeld(Thread::Current());
- if (obj == nullptr || UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
+ if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
+ return false;
+ }
+ if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj)) {
+ mirror::Class* klass = obj->GetClass();
+ if (obj == klass) {
+ return true;
+ }
+ return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
+ } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj)) {
return false;
}
space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
space::DiscontinuousSpace* d_space = NULL;
- if (c_space != NULL) {
+ if (c_space != nullptr) {
if (c_space->GetLiveBitmap()->Test(obj)) {
return true;
}
- } else if (bump_pointer_space_->Contains(obj) || temp_space_->Contains(obj)) {
- return true;
} else {
d_space = FindDiscontinuousSpaceFromObject(obj, true);
- if (d_space != NULL) {
+ if (d_space != nullptr) {
if (d_space->GetLiveObjects()->Test(obj)) {
return true;
}
@@ -828,13 +841,13 @@
}
// We need to check the bitmaps again since there is a race where we mark something as live and
// then clear the stack containing it.
- if (c_space != NULL) {
+ if (c_space != nullptr) {
if (c_space->GetLiveBitmap()->Test(obj)) {
return true;
}
} else {
d_space = FindDiscontinuousSpaceFromObject(obj, true);
- if (d_space != NULL && d_space->GetLiveObjects()->Test(obj)) {
+ if (d_space != nullptr && d_space->GetLiveObjects()->Test(obj)) {
return true;
}
}
@@ -849,6 +862,17 @@
VerifyObjectBody(obj);
}
+bool Heap::VerifyClassClass(const mirror::Class* c) const {
+ // Note: we don't use the accessors here as they have internal sanity checks that we don't want
+ // to run
+ const byte* raw_addr =
+ reinterpret_cast<const byte*>(c) + mirror::Object::ClassOffset().Int32Value();
+ const mirror::Class* c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr);
+ raw_addr = reinterpret_cast<const byte*>(c_c) + mirror::Object::ClassOffset().Int32Value();
+ const mirror::Class* c_c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr);
+ return c_c == c_c_c;
+}
+
void Heap::DumpSpaces(std::ostream& stream) {
for (const auto& space : continuous_spaces_) {
accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
@@ -880,14 +904,7 @@
} else if (UNLIKELY(!IsAligned<kObjectAlignment>(c))) {
LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
}
- // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
- // Note: we don't use the accessors here as they have internal sanity checks
- // that we don't want to run
- raw_addr = reinterpret_cast<const byte*>(c) + mirror::Object::ClassOffset().Int32Value();
- const mirror::Class* c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr);
- raw_addr = reinterpret_cast<const byte*>(c_c) + mirror::Object::ClassOffset().Int32Value();
- const mirror::Class* c_c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr);
- CHECK_EQ(c_c, c_c_c);
+ CHECK(VerifyClassClass(c));
if (verify_object_mode_ > kVerifyAllFast) {
// TODO: the bitmap tests below are racy if VerifyObjectBody is called without the
@@ -1275,10 +1292,6 @@
}
}
-static void MarkInBitmapCallback(mirror::Object* obj, void* arg) {
- reinterpret_cast<accounting::SpaceBitmap*>(arg)->Set(obj);
-}
-
// Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
class ZygoteCompactingCollector : public collector::SemiSpace {
public:
@@ -1347,6 +1360,9 @@
forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated);
if (to_space_live_bitmap_ != nullptr) {
to_space_live_bitmap_->Set(forward_address);
+ } else {
+ GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address);
+ GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address);
}
} else {
size_t size = it->first;
@@ -1398,10 +1414,7 @@
// Update the end and write out image.
non_moving_space_->SetEnd(target_space.End());
non_moving_space_->SetLimit(target_space.Limit());
- accounting::SpaceBitmap* bitmap = non_moving_space_->GetLiveBitmap();
- // Record the allocations in the bitmap.
VLOG(heap) << "Zygote size " << non_moving_space_->Size() << " bytes";
- target_space.Walk(MarkInBitmapCallback, bitmap);
}
// Turn the current alloc space into a zygote space and obtain the new alloc space composed of
// the remaining available heap memory.
@@ -1438,9 +1451,10 @@
}
// Can't use RosAlloc for non moving space due to thread local buffers.
// TODO: Non limited space for non-movable objects?
- space::MallocSpace* new_non_moving_space
- = space::DlMallocSpace::Create("Non moving dlmalloc space", 2 * MB, 64 * MB, 64 * MB,
- nullptr);
+ MemMap* mem_map = post_zygote_non_moving_space_mem_map_.release();
+ space::MallocSpace* new_non_moving_space =
+ space::DlMallocSpace::CreateFromMemMap(mem_map, "Non moving dlmalloc space", kPageSize,
+ 2 * MB, mem_map->Size(), mem_map->Size());
AddSpace(new_non_moving_space, false);
CHECK(new_non_moving_space != nullptr) << "Failed to create new non-moving space";
new_non_moving_space->SetFootprintLimit(new_non_moving_space->Capacity());
@@ -1667,57 +1681,65 @@
void operator()(const mirror::Object* obj, const mirror::Object* ref,
const MemberOffset& offset, bool /* is_static */) const
NO_THREAD_SAFETY_ANALYSIS {
- // Verify that the reference is live.
- if (UNLIKELY(ref != NULL && !IsLive(ref))) {
+ if (ref == nullptr || IsLive(ref)) {
+ // Verify that the reference is live.
+ return;
+ }
+ if (!failed_) {
+ // Print message on only on first failure to prevent spam.
+ LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
+ failed_ = true;
+ }
+ if (obj != nullptr) {
accounting::CardTable* card_table = heap_->GetCardTable();
accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
accounting::ObjectStack* live_stack = heap_->live_stack_.get();
- if (!failed_) {
- // Print message on only on first failure to prevent spam.
- LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
- failed_ = true;
+ byte* card_addr = card_table->CardFromAddr(obj);
+ LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
+ << offset << "\n card value = " << static_cast<int>(*card_addr);
+ if (heap_->IsValidObjectAddress(obj->GetClass())) {
+ LOG(ERROR) << "Obj type " << PrettyTypeOf(obj);
+ } else {
+ LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
}
- if (obj != nullptr) {
- byte* card_addr = card_table->CardFromAddr(obj);
- LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
- << offset << "\n card value = " << static_cast<int>(*card_addr);
- if (heap_->IsValidObjectAddress(obj->GetClass())) {
- LOG(ERROR) << "Obj type " << PrettyTypeOf(obj);
+
+ // Attmept to find the class inside of the recently freed objects.
+ space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
+ if (ref_space != nullptr && ref_space->IsMallocSpace()) {
+ space::MallocSpace* space = ref_space->AsMallocSpace();
+ mirror::Class* ref_class = space->FindRecentFreedObject(ref);
+ if (ref_class != nullptr) {
+ LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
+ << PrettyClass(ref_class);
} else {
- LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
+ LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
}
+ }
- // Attmept to find the class inside of the recently freed objects.
- space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
- if (ref_space != nullptr && ref_space->IsMallocSpace()) {
- space::MallocSpace* space = ref_space->AsMallocSpace();
- mirror::Class* ref_class = space->FindRecentFreedObject(ref);
- if (ref_class != nullptr) {
- LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
- << PrettyClass(ref_class);
- } else {
- LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
- }
+ if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
+ ref->GetClass()->IsClass()) {
+ LOG(ERROR) << "Ref type " << PrettyTypeOf(ref);
+ } else {
+ LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
+ << ") is not a valid heap address";
+ }
+
+ card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj));
+ void* cover_begin = card_table->AddrFromCard(card_addr);
+ void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
+ accounting::CardTable::kCardSize);
+ LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
+ << "-" << cover_end;
+ accounting::SpaceBitmap* bitmap = heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
+
+ if (bitmap == nullptr) {
+ LOG(ERROR) << "Object " << obj << " has no bitmap";
+ if (!heap_->VerifyClassClass(obj->GetClass())) {
+ LOG(ERROR) << "Object " << obj << " failed class verification!";
}
-
- if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
- ref->GetClass()->IsClass()) {
- LOG(ERROR) << "Ref type " << PrettyTypeOf(ref);
- } else {
- LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
- << ") is not a valid heap address";
- }
-
- card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj));
- void* cover_begin = card_table->AddrFromCard(card_addr);
- void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
- accounting::CardTable::kCardSize);
- LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
- << "-" << cover_end;
- accounting::SpaceBitmap* bitmap = heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
-
+ } else {
// Print out how the object is live.
- if (bitmap != NULL && bitmap->Test(obj)) {
+ if (bitmap->Test(obj)) {
LOG(ERROR) << "Object " << obj << " found in live bitmap";
}
if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
@@ -1737,17 +1759,17 @@
byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
card_table->Scan(bitmap, byte_cover_begin,
byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
-
- // Search to see if any of the roots reference our object.
- void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj));
- Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false);
-
- // Search to see if any of the roots reference our reference.
- arg = const_cast<void*>(reinterpret_cast<const void*>(ref));
- Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false);
- } else {
- LOG(ERROR) << "Root references dead object " << ref << "\nRef type " << PrettyTypeOf(ref);
}
+
+ // Search to see if any of the roots reference our object.
+ void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj));
+ Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false);
+
+ // Search to see if any of the roots reference our reference.
+ arg = const_cast<void*>(reinterpret_cast<const void*>(ref));
+ Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false);
+ } else {
+ LOG(ERROR) << "Root " << ref << " is dead with type " << PrettyTypeOf(ref);
}
}
@@ -1848,6 +1870,7 @@
LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
*failed_ = true;
} else if (!card_table->IsDirty(obj)) {
+ // TODO: Check mod-union tables.
// Card should be either kCardDirty if it got re-dirtied after we aged it, or
// kCardDirty - 1 if it didnt get touched since we aged it.
accounting::ObjectStack* live_stack = heap_->live_stack_.get();
@@ -1965,7 +1988,7 @@
// were dirty before the GC started.
// TODO: Don't need to use atomic.
// The races are we either end up with: Aged card, unaged card. Since we have the checkpoint
- // roots and then we scan / update mod union tables after. We will always scan either card.//
+ // roots and then we scan / update mod union tables after. We will always scan either card.
// If we end up with the non aged card, we scan it it in the pause.
card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), VoidFunctor());
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 0c3db86..26bbacd 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -149,7 +149,8 @@
CollectorType post_zygote_collector_type, CollectorType background_collector_type,
size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
size_t long_pause_threshold, size_t long_gc_threshold,
- bool ignore_max_footprint, bool use_tlab);
+ bool ignore_max_footprint, bool use_tlab, bool verify_pre_gc_heap,
+ bool verify_post_gc_heap);
~Heap();
@@ -209,6 +210,8 @@
VerifyObjectImpl(o);
}
}
+ // Check that c.getClass() == c.getClass().getClass().
+ bool VerifyClassClass(const mirror::Class* c) const;
// Check sanity of all live references.
void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
@@ -691,6 +694,9 @@
// don't have to worry about virtual address space fragmentation.
UniquePtr<MemMap> allocator_mem_map_;
+ // The mem-map which we will use for the non-moving space after the zygote is done forking:
+ UniquePtr<MemMap> post_zygote_non_moving_space_mem_map_;
+
// What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
// sweep GC, false for other GC types.
bool concurrent_gc_;
@@ -801,14 +807,6 @@
// Parallel GC data structures.
UniquePtr<ThreadPool> thread_pool_;
- // Sticky mark bits GC has some overhead, so if we have less a few megabytes of AllocSpace then
- // it's probably better to just do a partial GC.
- const size_t min_alloc_space_size_for_sticky_gc_;
-
- // Minimum remaining size for sticky GC. Since sticky GC doesn't free up as much memory as a
- // normal GC, it is important to not use it when we are almost out of memory.
- const size_t min_remaining_space_for_sticky_gc_;
-
// The last time a heap trim occurred.
uint64_t last_trim_time_ms_;
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
index ac20972..74a0274 100644
--- a/runtime/gc/space/bump_pointer_space-inl.h
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -34,10 +34,9 @@
if (UNLIKELY(new_end > growth_end_)) {
return nullptr;
}
- // TODO: Use a cas which always equals the size of pointers.
- } while (android_atomic_cas(reinterpret_cast<int32_t>(old_end),
- reinterpret_cast<int32_t>(new_end),
- reinterpret_cast<volatile int32_t*>(&end_)) != 0);
+ } while (!__sync_bool_compare_and_swap(reinterpret_cast<volatile intptr_t*>(&end_),
+ reinterpret_cast<intptr_t>(old_end),
+ reinterpret_cast<intptr_t>(new_end)));
return reinterpret_cast<mirror::Object*>(old_end);
}
diff --git a/runtime/globals.h b/runtime/globals.h
index a0d7e48..b1ccbdc 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -88,6 +88,10 @@
// True if we allow moving methods.
static constexpr bool kMovingMethods = false;
+// If true, the quick compiler embeds class pointers in the compiled
+// code, if possible.
+static constexpr bool kEmbedClassInCode = true;
+
} // namespace art
#endif // ART_RUNTIME_GLOBALS_H_
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index e0fab8c..cd44ebc 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -22,6 +22,7 @@
#include "art_field.h"
#include "art_method.h"
#include "class_loader.h"
+#include "common_throws.h"
#include "dex_cache.h"
#include "gc/heap-inl.h"
#include "iftable.h"
@@ -202,6 +203,68 @@
return IsArrayAssignableFromArray(src);
}
+template <bool throw_on_failure>
+inline bool Class::CanAccessResolvedField(Class* access_to, ArtField* field,
+ uint32_t field_idx) {
+ if (UNLIKELY(!this->CanAccess(access_to))) {
+ // The referrer class can't access the field's declaring class but may still be able
+ // to access the field if the FieldId specifies an accessible subclass of the declaring
+ // class rather than the declaring class itself.
+ DexCache* referrer_dex_cache = this->GetDexCache();
+ uint32_t class_idx = referrer_dex_cache->GetDexFile()->GetFieldId(field_idx).class_idx_;
+ // The referenced class has already been resolved with the field, get it from the dex cache.
+ Class* dex_access_to = referrer_dex_cache->GetResolvedType(class_idx);
+ DCHECK(dex_access_to != nullptr);
+ if (UNLIKELY(!this->CanAccess(dex_access_to))) {
+ if (throw_on_failure) {
+ ThrowIllegalAccessErrorClass(this, dex_access_to);
+ }
+ return false;
+ }
+ DCHECK_EQ(this->CanAccessMember(access_to, field->GetAccessFlags()),
+ this->CanAccessMember(dex_access_to, field->GetAccessFlags()));
+ }
+ if (LIKELY(this->CanAccessMember(access_to, field->GetAccessFlags()))) {
+ return true;
+ }
+ if (throw_on_failure) {
+ ThrowIllegalAccessErrorField(this, field);
+ }
+ return false;
+}
+
+template <bool throw_on_failure, InvokeType throw_invoke_type>
+inline bool Class::CanAccessResolvedMethod(Class* access_to, ArtMethod* method,
+ uint32_t method_idx) {
+ COMPILE_ASSERT(throw_on_failure || throw_invoke_type == kStatic, non_default_throw_invoke_type);
+ if (UNLIKELY(!this->CanAccess(access_to))) {
+ // The referrer class can't access the method's declaring class but may still be able
+ // to access the method if the MethodId specifies an accessible subclass of the declaring
+ // class rather than the declaring class itself.
+ DexCache* referrer_dex_cache = this->GetDexCache();
+ uint32_t class_idx = referrer_dex_cache->GetDexFile()->GetMethodId(method_idx).class_idx_;
+ // The referenced class has already been resolved with the method, get it from the dex cache.
+ Class* dex_access_to = referrer_dex_cache->GetResolvedType(class_idx);
+ DCHECK(dex_access_to != nullptr);
+ if (UNLIKELY(!this->CanAccess(dex_access_to))) {
+ if (throw_on_failure) {
+ ThrowIllegalAccessErrorClassForMethodDispatch(this, dex_access_to,
+ method, throw_invoke_type);
+ }
+ return false;
+ }
+ DCHECK_EQ(this->CanAccessMember(access_to, method->GetAccessFlags()),
+ this->CanAccessMember(dex_access_to, method->GetAccessFlags()));
+ }
+ if (LIKELY(this->CanAccessMember(access_to, method->GetAccessFlags()))) {
+ return true;
+ }
+ if (throw_on_failure) {
+ ThrowIllegalAccessErrorMethod(this, method);
+ }
+ return false;
+}
+
inline bool Class::IsSubClass(const Class* klass) const {
DCHECK(!IsInterface()) << PrettyClass(this);
DCHECK(!IsArrayClass()) << PrettyClass(this);
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 9aa23d9..d751363 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_MIRROR_CLASS_H_
#include "gc/heap.h"
+#include "invoke_type.h"
#include "modifiers.h"
#include "object.h"
#include "primitive.h"
@@ -449,6 +450,20 @@
return this->IsInSamePackage(access_to);
}
+ // Can this class access a resolved field?
+ // Note that access to field's class is checked and this may require looking up the class
+ // referenced by the FieldId in the DexFile in case the declaring class is inaccessible.
+ template <bool throw_on_failure>
+ bool CanAccessResolvedField(Class* access_to, ArtField* field,
+ uint32_t field_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Can this class access a resolved method?
+ // Note that access to methods's class is checked and this may require looking up the class
+ // referenced by the MethodId in the DexFile in case the declaring class is inaccessible.
+ template <bool throw_on_failure, InvokeType throw_invoke_type = kStatic>
+ bool CanAccessResolvedMethod(Class* access_to, ArtMethod* resolved_method,
+ uint32_t method_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
bool IsSubClass(const Class* klass) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 2af569a..2591224 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -357,24 +357,17 @@
}
static gc::CollectorType ParseCollectorType(const std::string& option) {
- std::vector<std::string> gc_options;
- Split(option, ',', gc_options);
- gc::CollectorType collector_type = gc::kCollectorTypeNone;
- for (size_t i = 0; i < gc_options.size(); ++i) {
- if (gc_options[i] == "MS" || gc_options[i] == "nonconcurrent") {
- collector_type = gc::kCollectorTypeMS;
- } else if (gc_options[i] == "CMS" || gc_options[i] == "concurrent") {
- collector_type = gc::kCollectorTypeCMS;
- } else if (gc_options[i] == "SS") {
- collector_type = gc::kCollectorTypeSS;
- } else if (gc_options[i] == "GSS") {
- collector_type = gc::kCollectorTypeGSS;
- } else {
- LOG(WARNING) << "Ignoring unknown -Xgc option: " << gc_options[i];
- return gc::kCollectorTypeNone;
- }
+ if (option == "MS" || option == "nonconcurrent") {
+ return gc::kCollectorTypeMS;
+ } else if (option == "CMS" || option == "concurrent") {
+ return gc::kCollectorTypeCMS;
+ } else if (option == "SS") {
+ return gc::kCollectorTypeSS;
+ } else if (option == "GSS") {
+ return gc::kCollectorTypeGSS;
+ } else {
+ return gc::kCollectorTypeNone;
}
- return collector_type;
}
Runtime::ParsedOptions* Runtime::ParsedOptions::Create(const Options& options, bool ignore_unrecognized) {
@@ -409,6 +402,8 @@
parsed->max_spins_before_thin_lock_inflation_ = Monitor::kDefaultMaxSpinsBeforeThinLockInflation;
parsed->low_memory_mode_ = false;
parsed->use_tlab_ = false;
+ parsed->verify_pre_gc_heap_ = false;
+ parsed->verify_post_gc_heap_ = kIsDebugBuild;
parsed->compiler_callbacks_ = nullptr;
parsed->is_zygote_ = false;
@@ -594,15 +589,31 @@
} else if (option == "-Xint") {
parsed->interpreter_only_ = true;
} else if (StartsWith(option, "-Xgc:")) {
- gc::CollectorType collector_type = ParseCollectorType(option.substr(strlen("-Xgc:")));
- if (collector_type != gc::kCollectorTypeNone) {
- parsed->collector_type_ = collector_type;
+ std::vector<std::string> gc_options;
+ Split(option.substr(strlen("-Xgc:")), ',', gc_options);
+ for (const std::string& gc_option : gc_options) {
+ gc::CollectorType collector_type = ParseCollectorType(gc_option);
+ if (collector_type != gc::kCollectorTypeNone) {
+ parsed->collector_type_ = collector_type;
+ } else if (gc_option == "preverify") {
+ parsed->verify_pre_gc_heap_ = true;
+ } else if (gc_option == "nopreverify") {
+ parsed->verify_pre_gc_heap_ = false;
+ } else if (gc_option == "postverify") {
+ parsed->verify_post_gc_heap_ = true;
+ } else if (gc_option == "nopostverify") {
+ parsed->verify_post_gc_heap_ = false;
+ } else {
+ LOG(WARNING) << "Ignoring unknown -Xgc option: " << gc_option;
+ }
}
} else if (StartsWith(option, "-XX:BackgroundGC=")) {
- gc::CollectorType collector_type = ParseCollectorType(
- option.substr(strlen("-XX:BackgroundGC=")));
+ const std::string substring = option.substr(strlen("-XX:BackgroundGC="));
+ gc::CollectorType collector_type = ParseCollectorType(substring);
if (collector_type != gc::kCollectorTypeNone) {
parsed->background_collector_type_ = collector_type;
+ } else {
+ LOG(WARNING) << "Ignoring unknown -XX:BackgroundGC option: " << substring;
}
} else if (option == "-XX:+DisableExplicitGC") {
parsed->is_explicit_gc_disabled_ = true;
@@ -987,7 +998,9 @@
options->long_pause_log_threshold_,
options->long_gc_log_threshold_,
options->ignore_max_footprint_,
- options->use_tlab_);
+ options->use_tlab_,
+ options->verify_pre_gc_heap_,
+ options->verify_post_gc_heap_);
dump_gc_performance_on_shutdown_ = options->dump_gc_performance_on_shutdown_;
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 30ab787..9d48631 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -106,6 +106,8 @@
bool interpreter_only_;
bool is_explicit_gc_disabled_;
bool use_tlab_;
+ bool verify_pre_gc_heap_;
+ bool verify_post_gc_heap_;
size_t long_pause_log_threshold_;
size_t long_gc_log_threshold_;
bool dump_gc_performance_on_shutdown_;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 621e350..e7fd660 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1626,6 +1626,8 @@
QUICK_ENTRY_POINT_INFO(pAllocArray),
QUICK_ENTRY_POINT_INFO(pAllocArrayWithAccessCheck),
QUICK_ENTRY_POINT_INFO(pAllocObject),
+ QUICK_ENTRY_POINT_INFO(pAllocObjectResolved),
+ QUICK_ENTRY_POINT_INFO(pAllocObjectInitialized),
QUICK_ENTRY_POINT_INFO(pAllocObjectWithAccessCheck),
QUICK_ENTRY_POINT_INFO(pCheckAndAllocArray),
QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayWithAccessCheck),
diff --git a/runtime/verifier/method_verifier-inl.h b/runtime/verifier/method_verifier-inl.h
index 5cf234d..74c3e33 100644
--- a/runtime/verifier/method_verifier-inl.h
+++ b/runtime/verifier/method_verifier-inl.h
@@ -25,47 +25,47 @@
namespace art {
namespace verifier {
-const DexFile::CodeItem* MethodVerifier::CodeItem() const {
+inline const DexFile::CodeItem* MethodVerifier::CodeItem() const {
return code_item_;
}
-RegisterLine* MethodVerifier::GetRegLine(uint32_t dex_pc) {
+inline RegisterLine* MethodVerifier::GetRegLine(uint32_t dex_pc) {
return reg_table_.GetLine(dex_pc);
}
-const InstructionFlags& MethodVerifier::GetInstructionFlags(size_t index) const {
+inline const InstructionFlags& MethodVerifier::GetInstructionFlags(size_t index) const {
return insn_flags_[index];
}
-mirror::ClassLoader* MethodVerifier::GetClassLoader() {
+inline mirror::ClassLoader* MethodVerifier::GetClassLoader() {
return class_loader_->get();
}
-mirror::DexCache* MethodVerifier::GetDexCache() {
+inline mirror::DexCache* MethodVerifier::GetDexCache() {
return dex_cache_->get();
}
-MethodReference MethodVerifier::GetMethodReference() const {
+inline MethodReference MethodVerifier::GetMethodReference() const {
return MethodReference(dex_file_, dex_method_idx_);
}
-uint32_t MethodVerifier::GetAccessFlags() const {
+inline uint32_t MethodVerifier::GetAccessFlags() const {
return method_access_flags_;
}
-bool MethodVerifier::HasCheckCasts() const {
+inline bool MethodVerifier::HasCheckCasts() const {
return has_check_casts_;
}
-bool MethodVerifier::HasVirtualOrInterfaceInvokes() const {
+inline bool MethodVerifier::HasVirtualOrInterfaceInvokes() const {
return has_virtual_or_interface_invokes_;
}
-bool MethodVerifier::HasFailures() const {
+inline bool MethodVerifier::HasFailures() const {
return !failure_messages_.empty();
}
-const RegType& MethodVerifier::ResolveCheckedClass(uint32_t class_idx) {
+inline const RegType& MethodVerifier::ResolveCheckedClass(uint32_t class_idx) {
DCHECK(!HasFailures());
const RegType& result = ResolveClassAndCheckAccess(class_idx);
DCHECK(!HasFailures());