Merge "ARM: Generate $t mapping symbol to indicate thumb2 code"
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 1d8b65a..2939961 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -64,6 +64,7 @@
ART_GTEST_jni_internal_test_TARGET_DEPS := $(TARGET_CORE_DEX_FILES)
ART_GTEST_proxy_test_TARGET_DEPS := $(TARGET_CORE_DEX_FILES)
ART_GTEST_proxy_test_HOST_DEPS := $(HOST_CORE_IMAGE_default_no-pic_64) $(HOST_CORE_IMAGE_default_no-pic_32)
+ART_GTEST_dex_method_iterator_test_TARGET_DEPS := $(TARGET_CORE_JARS)
# The path for which all the source files are relative, not actually the current directory.
LOCAL_PATH := art
@@ -200,7 +201,7 @@
LOCAL_CFLAGS := $(ART_TARGET_CFLAGS)
LOCAL_SRC_FILES := runtime/common_runtime_test.cc compiler/common_compiler_test.cc
LOCAL_C_INCLUDES := $(ART_C_INCLUDES) art/runtime art/compiler
-LOCAL_SHARED_LIBRARIES := libcutils libartd libartd-compiler libdl
+LOCAL_SHARED_LIBRARIES := libartd libartd-compiler libdl
LOCAL_STATIC_LIBRARIES += libgtest_libc++
LOCAL_CLANG := $(ART_TARGET_CLANG)
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
@@ -216,8 +217,7 @@
LOCAL_SRC_FILES := runtime/common_runtime_test.cc compiler/common_compiler_test.cc
LOCAL_C_INCLUDES := $(ART_C_INCLUDES) art/runtime art/compiler
LOCAL_SHARED_LIBRARIES := libartd libartd-compiler
-LOCAL_STATIC_LIBRARIES := libcutils
-LOCAL_STATIC_LIBRARIES += libgtest_libc++_host
+LOCAL_STATIC_LIBRARIES := libgtest_libc++_host
LOCAL_LDLIBS += -ldl -lpthread
LOCAL_MULTILIB := both
LOCAL_CLANG := $(ART_HOST_CLANG)
@@ -237,6 +237,11 @@
ART_TEST_TARGET_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_GTEST_RULES :=
+ART_GTEST_TARGET_ANDROID_ROOT := '/system'
+ifneq ($(ART_TEST_ANDROID_ROOT),)
+ ART_GTEST_TARGET_ANDROID_ROOT := $(ART_TEST_ANDROID_ROOT)
+endif
+
# Define a make rule for a target device gtest.
# $(1): gtest name - the name of the test we're building such as leb128_test.
# $(2): 2ND_ or undefined - used to differentiate between the primary and secondary architecture.
@@ -250,7 +255,8 @@
$$(ART_GTEST_$(1)_TARGET_DEPS) \
$(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_TARGET_GTEST_$(file)_DEX)) \
$$(ART_TARGET_NATIVETEST_OUT)/$$(TARGET_$(2)ARCH)/$(1) \
- $$($(2)TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so
+ $$($(2)TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so \
+ $$(TARGET_OUT_JAVA_LIBRARIES)/core-libart.jar
.PHONY: $$(gtest_rule)
$$(gtest_rule): test-art-target-sync
@@ -258,7 +264,7 @@
$(hide) adb shell rm $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID
$(hide) adb shell chmod 755 $(ART_TARGET_NATIVETEST_DIR)/$(TARGET_$(2)ARCH)/$(1)
$(hide) $$(call ART_TEST_SKIP,$$@) && \
- (adb shell "LD_LIBRARY_PATH=$(3) \
+ (adb shell "LD_LIBRARY_PATH=$(3) ANDROID_ROOT=$(ART_GTEST_TARGET_ANDROID_ROOT) \
$(ART_TARGET_NATIVETEST_DIR)/$(TARGET_$(2)ARCH)/$(1) && touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID" \
&& (adb pull $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID /tmp/ \
&& $$(call ART_TEST_PASSED,$$@)) \
@@ -385,8 +391,7 @@
else # host
LOCAL_CLANG := $$(ART_HOST_CLANG)
LOCAL_CFLAGS += $$(ART_HOST_CFLAGS) $$(ART_HOST_DEBUG_CFLAGS)
- LOCAL_SHARED_LIBRARIES += libicuuc-host libicui18n-host libnativehelper libz-host
- LOCAL_STATIC_LIBRARIES += libcutils libvixl
+ LOCAL_SHARED_LIBRARIES += libicuuc-host libicui18n-host libnativehelper libziparchive-host libz-host libvixl
LOCAL_LDLIBS := $(ART_HOST_LDLIBS) -lpthread -ldl
LOCAL_IS_HOST_MODULE := true
LOCAL_MULTILIB := both
@@ -421,6 +426,8 @@
art_gtest_extra_c_includes :=
art_gtest_extra_shared_libraries :=
art_gtest_name :=
+ library_path :=
+ 2nd_library_path :=
endef # define-art-gtest
ifeq ($(ART_BUILD_TARGET),true)
@@ -512,6 +519,7 @@
ART_TEST_TARGET_GTEST$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_GTEST_RULES :=
+ART_GTEST_TARGET_ANDROID_ROOT :=
ART_GTEST_class_linker_test_DEX_DEPS :=
ART_GTEST_compiler_driver_test_DEX_DEPS :=
ART_GTEST_dex_file_test_DEX_DEPS :=
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index bded51b..d29d248 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -37,7 +37,8 @@
core_pic_infix :=
ifeq ($(1),optimizing)
- core_compile_options += --compiler-backend=Optimizing
+ # TODO: Use optimizing once all backends can compile a boot image.
+ core_compile_options += --compiler-backend=Quick
core_infix := -optimizing
endif
ifeq ($(1),interpreter)
@@ -125,7 +126,7 @@
core_pic_infix :=
ifeq ($(1),optimizing)
- core_compile_options += --compiler-backend=Optimizing
+ core_compile_options += --compiler-backend=Quick
core_infix := -optimizing
endif
ifeq ($(1),interpreter)
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 172c96c..3edd913 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -261,15 +261,18 @@
LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime
ifeq ($$(art_target_or_host),host)
- LOCAL_LDLIBS += -ldl -lpthread
+ # For compiler driver TLS.
+ LOCAL_LDLIBS += -lpthread
endif
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
+ # Vixl assembly support for ARM64 targets.
+ LOCAL_SHARED_LIBRARIES += libvixl
ifeq ($$(art_target_or_host),target)
- LOCAL_SHARED_LIBRARIES += libcutils libvixl
+ # For atrace.
+ LOCAL_SHARED_LIBRARIES += libcutils
include $(BUILD_SHARED_LIBRARY)
else # host
- LOCAL_STATIC_LIBRARIES += libcutils libvixl
LOCAL_MULTILIB := both
include $(BUILD_HOST_SHARED_LIBRARY)
endif
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index c61e991..ac72a33 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -122,8 +122,8 @@
return -1;
}
-size_t CodeGenerator::FindTwoFreeConsecutiveEntries(bool* array, size_t length) {
- for (size_t i = 0; i < length - 1; ++i) {
+size_t CodeGenerator::FindTwoFreeConsecutiveAlignedEntries(bool* array, size_t length) {
+ for (size_t i = 0; i < length - 1; i += 2) {
if (!array[i] && !array[i + 1]) {
array[i] = true;
array[i + 1] = true;
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index bf9d2c0..71f0b1b 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -190,7 +190,7 @@
virtual Location AllocateFreeRegister(Primitive::Type type) const = 0;
static size_t FindFreeEntry(bool* array, size_t length);
- static size_t FindTwoFreeConsecutiveEntries(bool* array, size_t length);
+ static size_t FindTwoFreeConsecutiveAlignedEntries(bool* array, size_t length);
virtual Location GetStackLocation(HLoadLocal* load) const = 0;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index a06860a..9ed1969 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -296,7 +296,8 @@
}
case Primitive::kPrimDouble: {
- int reg = FindTwoFreeConsecutiveEntries(blocked_fpu_registers_, kNumberOfSRegisters);
+ int reg = FindTwoFreeConsecutiveAlignedEntries(blocked_fpu_registers_, kNumberOfSRegisters);
+ DCHECK_EQ(reg % 2, 0);
return Location::FpuRegisterPairLocation(reg, reg + 1);
}
@@ -341,6 +342,14 @@
blocked_fpu_registers_[S21] = true;
blocked_fpu_registers_[S22] = true;
blocked_fpu_registers_[S23] = true;
+ blocked_fpu_registers_[S24] = true;
+ blocked_fpu_registers_[S25] = true;
+ blocked_fpu_registers_[S26] = true;
+ blocked_fpu_registers_[S27] = true;
+ blocked_fpu_registers_[S28] = true;
+ blocked_fpu_registers_[S29] = true;
+ blocked_fpu_registers_[S30] = true;
+ blocked_fpu_registers_[S31] = true;
UpdateBlockedPairRegisters();
}
@@ -446,7 +455,7 @@
calling_convention.GetRegisterPairAt(index));
return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
} else if (index + 1 == calling_convention.GetNumberOfRegisters()) {
- return Location::QuickParameter(stack_index);
+ return Location::QuickParameter(index, stack_index);
} else {
return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
}
@@ -561,12 +570,13 @@
} else if (source.IsFpuRegister()) {
UNIMPLEMENTED(FATAL);
} else if (source.IsQuickParameter()) {
- uint32_t argument_index = source.GetQuickParameterIndex();
+ uint16_t register_index = source.GetQuickParameterRegisterIndex();
+ uint16_t stack_index = source.GetQuickParameterStackIndex();
InvokeDexCallingConvention calling_convention;
__ Mov(destination.AsRegisterPairLow<Register>(),
- calling_convention.GetRegisterAt(argument_index));
+ calling_convention.GetRegisterAt(register_index));
__ LoadFromOffset(kLoadWord, destination.AsRegisterPairHigh<Register>(),
- SP, calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize());
+ SP, calling_convention.GetStackOffsetOf(stack_index + 1) + GetFrameSize());
} else {
DCHECK(source.IsDoubleStackSlot());
if (destination.AsRegisterPairLow<Register>() == R1) {
@@ -588,20 +598,21 @@
}
} else if (destination.IsQuickParameter()) {
InvokeDexCallingConvention calling_convention;
- uint32_t argument_index = destination.GetQuickParameterIndex();
+ uint16_t register_index = destination.GetQuickParameterRegisterIndex();
+ uint16_t stack_index = destination.GetQuickParameterStackIndex();
if (source.IsRegisterPair()) {
- __ Mov(calling_convention.GetRegisterAt(argument_index),
+ __ Mov(calling_convention.GetRegisterAt(register_index),
source.AsRegisterPairLow<Register>());
__ StoreToOffset(kStoreWord, source.AsRegisterPairHigh<Register>(),
- SP, calling_convention.GetStackOffsetOf(argument_index + 1));
+ SP, calling_convention.GetStackOffsetOf(stack_index + 1));
} else if (source.IsFpuRegister()) {
UNIMPLEMENTED(FATAL);
} else {
DCHECK(source.IsDoubleStackSlot());
__ LoadFromOffset(
- kLoadWord, calling_convention.GetRegisterAt(argument_index), SP, source.GetStackIndex());
+ kLoadWord, calling_convention.GetRegisterAt(register_index), SP, source.GetStackIndex());
__ LoadFromOffset(kLoadWord, R0, SP, source.GetHighStackIndex(kArmWordSize));
- __ StoreToOffset(kStoreWord, R0, SP, calling_convention.GetStackOffsetOf(argument_index + 1));
+ __ StoreToOffset(kStoreWord, R0, SP, calling_convention.GetStackOffsetOf(stack_index + 1));
}
} else {
DCHECK(destination.IsDoubleStackSlot());
@@ -616,11 +627,12 @@
}
} else if (source.IsQuickParameter()) {
InvokeDexCallingConvention calling_convention;
- uint32_t argument_index = source.GetQuickParameterIndex();
- __ StoreToOffset(kStoreWord, calling_convention.GetRegisterAt(argument_index),
+ uint16_t register_index = source.GetQuickParameterRegisterIndex();
+ uint16_t stack_index = source.GetQuickParameterStackIndex();
+ __ StoreToOffset(kStoreWord, calling_convention.GetRegisterAt(register_index),
SP, destination.GetStackIndex());
__ LoadFromOffset(kLoadWord, R0,
- SP, calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize());
+ SP, calling_convention.GetStackOffsetOf(stack_index + 1) + GetFrameSize());
__ StoreToOffset(kStoreWord, R0, SP, destination.GetHighStackIndex(kArmWordSize));
} else if (source.IsFpuRegisterPair()) {
__ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index fe999c2..5d504c6 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -538,7 +538,6 @@
M(DoubleConstant) \
M(Div) \
M(FloatConstant) \
- M(Mul) \
M(LoadClass) \
M(Neg) \
M(NewArray) \
@@ -986,6 +985,44 @@
// Will be generated at use site.
}
+void LocationsBuilderARM64::VisitMul(HMul* mul) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ switch (mul->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Unimplemented mul type " << mul->GetResultType();
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitMul(HMul* mul) {
+ switch (mul->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ __ Mul(OutputRegister(mul), InputRegisterAt(mul, 0), InputRegisterAt(mul, 1));
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Unimplemented mul type " << mul->GetResultType();
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ }
+}
+
void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 267edca..2d6d14f 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -393,7 +393,9 @@
calling_convention.GetRegisterPairAt(index));
return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
} else if (index + 1 == calling_convention.GetNumberOfRegisters()) {
- return Location::QuickParameter(index);
+ // On X86, the register index and stack index of a quick parameter is the same, since
+ // we are passing floating pointer values in core registers.
+ return Location::QuickParameter(index, index);
} else {
return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(index));
}
@@ -453,12 +455,13 @@
} else if (source.IsFpuRegister()) {
LOG(FATAL) << "Unimplemented";
} else if (source.IsQuickParameter()) {
- uint32_t argument_index = source.GetQuickParameterIndex();
+ uint16_t register_index = source.GetQuickParameterRegisterIndex();
+ uint16_t stack_index = source.GetQuickParameterStackIndex();
InvokeDexCallingConvention calling_convention;
__ movl(destination.AsRegisterPairLow<Register>(),
- calling_convention.GetRegisterAt(argument_index));
+ calling_convention.GetRegisterAt(register_index));
__ movl(destination.AsRegisterPairHigh<Register>(), Address(ESP,
- calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize()));
+ calling_convention.GetStackOffsetOf(stack_index + 1) + GetFrameSize()));
} else {
DCHECK(source.IsDoubleStackSlot());
__ movl(destination.AsRegisterPairLow<Register>(), Address(ESP, source.GetStackIndex()));
@@ -467,19 +470,20 @@
}
} else if (destination.IsQuickParameter()) {
InvokeDexCallingConvention calling_convention;
- uint32_t argument_index = destination.GetQuickParameterIndex();
+ uint16_t register_index = destination.GetQuickParameterRegisterIndex();
+ uint16_t stack_index = destination.GetQuickParameterStackIndex();
if (source.IsRegister()) {
- __ movl(calling_convention.GetRegisterAt(argument_index), source.AsRegisterPairLow<Register>());
- __ movl(Address(ESP, calling_convention.GetStackOffsetOf(argument_index + 1)),
+ __ movl(calling_convention.GetRegisterAt(register_index), source.AsRegisterPairLow<Register>());
+ __ movl(Address(ESP, calling_convention.GetStackOffsetOf(stack_index + 1)),
source.AsRegisterPairHigh<Register>());
} else if (source.IsFpuRegister()) {
LOG(FATAL) << "Unimplemented";
} else {
DCHECK(source.IsDoubleStackSlot());
- __ movl(calling_convention.GetRegisterAt(argument_index),
+ __ movl(calling_convention.GetRegisterAt(register_index),
Address(ESP, source.GetStackIndex()));
__ pushl(Address(ESP, source.GetHighStackIndex(kX86WordSize)));
- __ popl(Address(ESP, calling_convention.GetStackOffsetOf(argument_index + 1)));
+ __ popl(Address(ESP, calling_convention.GetStackOffsetOf(stack_index + 1)));
}
} else if (destination.IsFpuRegister()) {
if (source.IsDoubleStackSlot()) {
@@ -495,10 +499,11 @@
source.AsRegisterPairHigh<Register>());
} else if (source.IsQuickParameter()) {
InvokeDexCallingConvention calling_convention;
- uint32_t argument_index = source.GetQuickParameterIndex();
+ uint16_t register_index = source.GetQuickParameterRegisterIndex();
+ uint16_t stack_index = source.GetQuickParameterStackIndex();
__ movl(Address(ESP, destination.GetStackIndex()),
- calling_convention.GetRegisterAt(argument_index));
- DCHECK_EQ(calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize(),
+ calling_convention.GetRegisterAt(register_index));
+ DCHECK_EQ(calling_convention.GetStackOffsetOf(stack_index + 1) + GetFrameSize(),
static_cast<size_t>(destination.GetHighStackIndex(kX86WordSize)));
} else if (source.IsFpuRegister()) {
__ movsd(Address(ESP, destination.GetStackIndex()), source.As<XmmRegister>());
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 03951e2..803a09b 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -408,11 +408,7 @@
MUL_TEST(LONG, MulLong);
#endif
-#if defined(__aarch64__)
-TEST(CodegenTest, DISABLED_ReturnMulIntLit8) {
-#else
TEST(CodegenTest, ReturnMulIntLit8) {
-#endif
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::MUL_INT_LIT8, 3 << 8 | 0,
@@ -421,11 +417,7 @@
TestCode(data, true, 12);
}
-#if defined(__aarch64__)
-TEST(CodegenTest, DISABLED_ReturnMulIntLit16) {
-#else
TEST(CodegenTest, ReturnMulIntLit16) {
-#endif
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::MUL_INT_LIT16, 3,
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 94aded6..d7295aa 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -228,13 +228,18 @@
return GetPayload() - kStackIndexBias + word_size;
}
- static Location QuickParameter(uint32_t parameter_index) {
- return Location(kQuickParameter, parameter_index);
+ static Location QuickParameter(uint16_t register_index, uint16_t stack_index) {
+ return Location(kQuickParameter, register_index << 16 | stack_index);
}
- uint32_t GetQuickParameterIndex() const {
+ uint32_t GetQuickParameterRegisterIndex() const {
DCHECK(IsQuickParameter());
- return GetPayload();
+ return GetPayload() >> 16;
+ }
+
+ uint32_t GetQuickParameterStackIndex() const {
+ DCHECK(IsQuickParameter());
+ return GetPayload() & 0xFFFF;
}
bool IsQuickParameter() const {
diff --git a/dex2oat/Android.mk b/dex2oat/Android.mk
index 2ef826b..4f39c42 100644
--- a/dex2oat/Android.mk
+++ b/dex2oat/Android.mk
@@ -38,8 +38,8 @@
# We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
ifeq ($(ART_BUILD_HOST_NDEBUG),true)
- $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libart-compiler,art/compiler,host,ndebug))
+ $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libart-compiler libziparchive-host,art/compiler,host,ndebug))
endif
ifeq ($(ART_BUILD_HOST_DEBUG),true)
- $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libartd-compiler,art/compiler,host,debug))
+ $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libartd-compiler libziparchive-host,art/compiler,host,debug))
endif
diff --git a/disassembler/Android.mk b/disassembler/Android.mk
index eb3b024..f2dd1ee 100644
--- a/disassembler/Android.mk
+++ b/disassembler/Android.mk
@@ -84,11 +84,11 @@
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
include external/libcxx/libcxx.mk
+ # For disassembler_arm64.
+ LOCAL_SHARED_LIBRARIES += libvixl
ifeq ($$(art_target_or_host),target)
- LOCAL_SHARED_LIBRARIES += libcutils libvixl
include $(BUILD_SHARED_LIBRARY)
else # host
- LOCAL_STATIC_LIBRARIES += libcutils libvixl
include $(BUILD_HOST_SHARED_LIBRARY)
endif
endef
diff --git a/oatdump/Android.mk b/oatdump/Android.mk
index 25d10bd..a8f120f 100644
--- a/oatdump/Android.mk
+++ b/oatdump/Android.mk
@@ -25,14 +25,14 @@
$(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libcutils libart-disassembler libart-compiler,art/disassembler art/compiler,target,ndebug))
endif
ifeq ($(ART_BUILD_TARGET_DEBUG),true)
- $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libcutils libartd-disassembler libart-compiler,art/disassembler art/compiler,target,debug))
+ $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libcutils libartd-disassembler libartd-compiler,art/disassembler art/compiler,target,debug))
endif
ifeq ($(ART_BUILD_HOST_NDEBUG),true)
$(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libart-disassembler libart-compiler,art/disassembler art/compiler,host,ndebug))
endif
ifeq ($(ART_BUILD_HOST_DEBUG),true)
- $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libartd-disassembler libart-compiler,art/disassembler art/compiler,host,debug))
+ $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libartd-disassembler libartd-compiler,art/disassembler art/compiler,host,debug))
endif
########################################################################
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 6f6dcbc..a2fc24a 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -457,15 +457,21 @@
LOCAL_C_INCLUDES += $$(ART_C_INCLUDES)
LOCAL_C_INCLUDES += art/sigchainlib
- LOCAL_SHARED_LIBRARIES += liblog libnativehelper libnativebridge
+ LOCAL_SHARED_LIBRARIES := libnativehelper libnativebridge libsigchain
include external/libcxx/libcxx.mk
LOCAL_SHARED_LIBRARIES += libbacktrace_libc++
ifeq ($$(art_target_or_host),target)
- LOCAL_SHARED_LIBRARIES += libcutils libdl libutils libsigchain
+ LOCAL_SHARED_LIBRARIES += libdl
+ # ZipArchive support, the order matters here to get all symbols.
LOCAL_STATIC_LIBRARIES := libziparchive libz
+ # For android::FileMap used by libziparchive.
+ LOCAL_SHARED_LIBRARIES += libutils
+ # For liblog, atrace, properties, ashmem, set_sched_policy and socket_peer_is_trusted.
+ LOCAL_SHARED_LIBRARIES += libcutils
else # host
- LOCAL_STATIC_LIBRARIES += libcutils libziparchive-host libz libutils
- LOCAL_SHARED_LIBRARIES += libsigchain
+ LOCAL_SHARED_LIBRARIES += libziparchive-host
+ # For ashmem_create_region.
+ LOCAL_STATIC_LIBRARIES += libcutils
endif
ifeq ($$(ART_USE_PORTABLE_COMPILER),true)
include $$(LLVM_GEN_INTRINSICS_MK)
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index d9061c8..5409d54 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -4373,7 +4373,7 @@
// Send a series of heap segment chunks.
HeapChunkContext context((what == HPSG_WHAT_MERGED_OBJECTS), native);
if (native) {
-#ifdef USE_DLMALLOC
+#if defined(HAVE_ANDROID_OS) && defined(USE_DLMALLOC)
dlmalloc_inspect_all(HeapChunkContext::HeapChunkCallback, &context);
#else
UNIMPLEMENTED(WARNING) << "Native heap inspection is only supported with dlmalloc";
diff --git a/runtime/gc/allocator/dlmalloc.h b/runtime/gc/allocator/dlmalloc.h
index c820b19..c7ecbc8 100644
--- a/runtime/gc/allocator/dlmalloc.h
+++ b/runtime/gc/allocator/dlmalloc.h
@@ -30,10 +30,12 @@
#include "../../bionic/libc/upstream-dlmalloc/malloc.h"
+#ifdef HAVE_ANDROID_OS
// Define dlmalloc routines from bionic that cannot be included directly because of redefining
// symbols from the include above.
extern "C" void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*), void* arg);
extern "C" int dlmalloc_trim(size_t);
+#endif
// Callback for dlmalloc_inspect_all or mspace_inspect_all that will madvise(2) unused
// pages back to the kernel.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index c0008aa..6730dfe 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1017,6 +1017,8 @@
// We never move things in the native heap, so we can finish the GC at this point.
FinishGC(self, collector::kGcTypeNone);
size_t native_reclaimed = 0;
+
+#ifdef HAVE_ANDROID_OS
// Only trim the native heap if we don't care about pauses.
if (!CareAboutPauseTimes()) {
#if defined(USE_DLMALLOC)
@@ -1029,6 +1031,7 @@
UNIMPLEMENTED(WARNING) << "Add trimming support";
#endif
}
+#endif // HAVE_ANDROID_OS
uint64_t end_ns = NanoTime();
VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
<< ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration="
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 8012451..6e792d4 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -88,6 +88,23 @@
}
}
+// Note: returns true on failure.
+ALWAYS_INLINE static inline bool FailOrAbort(MethodVerifier* verifier, bool condition,
+ const char* error_msg, uint32_t work_insn_idx) {
+ if (kIsDebugBuild) {
+ // In a debug build, abort if the error condition is wrong.
+ DCHECK(condition) << error_msg << work_insn_idx;
+ } else {
+ // In a non-debug build, just fail the class.
+ if (!condition) {
+ verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << error_msg << work_insn_idx;
+ return true;
+ }
+ }
+
+ return false;
+}
+
MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self,
mirror::Class* klass,
bool allow_soft_failures,
@@ -2014,7 +2031,11 @@
while (0 != instance_of_idx && !insn_flags_[instance_of_idx].IsOpcode()) {
instance_of_idx--;
}
- CHECK(insn_flags_[instance_of_idx].IsOpcode());
+ if (FailOrAbort(this, insn_flags_[instance_of_idx].IsOpcode(),
+ "Unable to get previous instruction of if-eqz/if-nez for work index ",
+ work_insn_idx_)) {
+ break;
+ }
} else {
break;
}
@@ -2072,7 +2093,11 @@
while (0 != move_idx && !insn_flags_[move_idx].IsOpcode()) {
move_idx--;
}
- CHECK(insn_flags_[move_idx].IsOpcode());
+ if (FailOrAbort(this, insn_flags_[move_idx].IsOpcode(),
+ "Unable to get previous instruction of if-eqz/if-nez for work index ",
+ work_insn_idx_)) {
+ break;
+ }
const Instruction* move_inst = Instruction::At(code_item_->insns_ + move_idx);
switch (move_inst->Opcode()) {
case Instruction::MOVE_OBJECT:
@@ -3035,7 +3060,12 @@
// odd case, but nothing to do
} else {
common_super = &common_super->Merge(exception, ®_types_);
- CHECK(reg_types_.JavaLangThrowable(false).IsAssignableFrom(*common_super));
+ if (FailOrAbort(this,
+ reg_types_.JavaLangThrowable(false).IsAssignableFrom(*common_super),
+ "java.lang.Throwable is not assignable-from common_super at ",
+ work_insn_idx_)) {
+ break;
+ }
}
}
}
@@ -3360,18 +3390,32 @@
if (klass->IsInterface()) {
// Derive Object.class from Class.class.getSuperclass().
mirror::Class* object_klass = klass->GetClass()->GetSuperClass();
- CHECK(object_klass->IsObjectClass());
+ if (FailOrAbort(this, object_klass->IsObjectClass(),
+ "Failed to find Object class in quickened invoke receiver",
+ work_insn_idx_)) {
+ return nullptr;
+ }
dispatch_class = object_klass;
} else {
dispatch_class = klass;
}
- CHECK(dispatch_class->HasVTable()) << PrettyDescriptor(dispatch_class);
+ if (FailOrAbort(this, dispatch_class->HasVTable(),
+ "Receiver class has no vtable for quickened invoke at ",
+ work_insn_idx_)) {
+ return nullptr;
+ }
uint16_t vtable_index = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
- CHECK_LT(static_cast<int32_t>(vtable_index), dispatch_class->GetVTableLength())
- << PrettyDescriptor(klass) << " in method "
- << PrettyMethod(dex_method_idx_, *dex_file_, true);
+ if (FailOrAbort(this, static_cast<int32_t>(vtable_index) < dispatch_class->GetVTableLength(),
+ "Receiver class has not enough vtable slots for quickened invoke at ",
+ work_insn_idx_)) {
+ return nullptr;
+ }
mirror::ArtMethod* res_method = dispatch_class->GetVTableEntry(vtable_index);
- CHECK(!self_->IsExceptionPending());
+ if (FailOrAbort(this, !Thread::Current()->IsExceptionPending(),
+ "Unexpected exception pending for quickened invoke at ",
+ work_insn_idx_)) {
+ return nullptr;
+ }
return res_method;
}
@@ -3384,7 +3428,14 @@
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer method from " << inst->Name();
return nullptr;
}
- CHECK(!res_method->IsDirect() && !res_method->IsStatic());
+ if (FailOrAbort(this, !res_method->IsDirect(), "Quick-invoked method is direct at ",
+ work_insn_idx_)) {
+ return nullptr;
+ }
+ if (FailOrAbort(this, !res_method->IsStatic(), "Quick-invoked method is static at ",
+ work_insn_idx_)) {
+ return nullptr;
+ }
// We use vAA as our expected arg count, rather than res_method->insSize, because we need to
// match the call to the signature. Also, we might be calling through an abstract method
diff --git a/test/419-long-parameter/expected.txt b/test/419-long-parameter/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/419-long-parameter/expected.txt
diff --git a/test/419-long-parameter/info.txt b/test/419-long-parameter/info.txt
new file mode 100644
index 0000000..5eac977
--- /dev/null
+++ b/test/419-long-parameter/info.txt
@@ -0,0 +1,3 @@
+Regression test for the long parameter passed both in stack and register
+on 32bits architectures. The move to hard float ABI makes it so that the
+register index does not necessarily match the stack index anymore.
diff --git a/test/419-long-parameter/src/Main.java b/test/419-long-parameter/src/Main.java
new file mode 100644
index 0000000..808b7f6
--- /dev/null
+++ b/test/419-long-parameter/src/Main.java
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ if ($opt$TestCallee(1.0, 2.0, 1L, 2L) != 1L) {
+ throw new Error("Unexpected result");
+ }
+ if ($opt$TestCaller() != 1L) {
+ throw new Error("Unexpected result");
+ }
+ }
+
+ public static long $opt$TestCallee(double a, double b, long c, long d) {
+ return d - c;
+ }
+
+ public static long $opt$TestCaller() {
+ return $opt$TestCallee(1.0, 2.0, 1L, 2L);
+ }
+}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 0efd71b..b281110 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -308,23 +308,33 @@
# Known broken tests for the arm64 optimizing compiler backend.
TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS := \
003-omnibus-opcodes \
+ 004-JniTest \
006-args \
+ 007-count10 \
011-array-copy \
018-stack-overflow \
+ 028-array-write \
036-finalizer \
044-proxy \
+ 067-preemptive-unpark \
070-nio-buffer \
072-precise-gc \
+ 080-oom-throw \
082-inline-execute \
083-compiler-regressions \
+ 084-class-init \
+ 085-old-style-inner-class \
093-serialization \
096-array-copy-concurrent-gc \
+ 098-ddmc \
100-reflect2 \
106-exceptions2 \
107-int-math2 \
+ 110-field-access \
121-modifiers \
122-npe \
123-compiler-regressions-mt \
+ 301-abstract-protected \
405-optimizing-long-allocator \
407-arrays \
410-floats \
@@ -334,8 +344,11 @@
414-static-fields \
414-optimizing-arith-sub \
415-optimizing-arith-neg \
+ 416-optimizing-arith-not \
417-optimizing-arith-div \
+ 419-long-parameter \
700-LoadArgRegs \
+ 702-LargeBranchOffset \
800-smali
ifneq (,$(filter optimizing,$(COMPILER_TYPES)))