Merge "Fixed ArtMethod::GetQuickFrameInfo() for proxy methods"
diff --git a/Android.mk b/Android.mk
index 282b179..d11d011 100644
--- a/Android.mk
+++ b/Android.mk
@@ -23,6 +23,7 @@
#
include $(art_path)/build/Android.common_path.mk
+include $(art_path)/build/Android.oat.mk
# Following the example of build's dont_bother for clean targets.
art_dont_bother := false
@@ -41,20 +42,14 @@
.PHONY: clean-oat-host
clean-oat-host:
- rm -f $(HOST_CORE_IMG_OUT)
- rm -f $(HOST_CORE_OAT_OUT)
+ rm -f $(HOST_CORE_IMG_OUTS)
+ rm -f $(HOST_CORE_OAT_OUTS)
rm -f $(HOST_OUT_JAVA_LIBRARIES)/$(ART_HOST_ARCH)/*.odex
ifneq ($(HOST_PREFER_32_BIT),true)
- rm -f $(2ND_HOST_CORE_IMG_OUT)
- rm -f $(2ND_HOST_CORE_OAT_OUT)
rm -f $(HOST_OUT_JAVA_LIBRARIES)/$(2ND_ART_HOST_ARCH)/*.odex
endif
- rm -f $(TARGET_CORE_IMG_OUT)
- rm -f $(TARGET_CORE_OAT_OUT)
-ifdef TARGET_2ND_ARCH
- rm -f $(2ND_TARGET_CORE_IMG_OUT)
- rm -f $(2ND_TARGET_CORE_OAT_OUT)
-endif
+ rm -f $(TARGET_CORE_IMG_OUTS)
+ rm -f $(TARGET_CORE_OAT_OUTS)
rm -rf $(DEXPREOPT_PRODUCT_DIR_FULL_PATH)
rm -f $(TARGET_OUT_UNSTRIPPED)/system/framework/*.odex
rm -f $(TARGET_OUT_UNSTRIPPED)/system/framework/*/*.oat
@@ -109,7 +104,6 @@
include $(art_path)/patchoat/Android.mk
include $(art_path)/dalvikvm/Android.mk
include $(art_path)/tools/Android.mk
-include $(art_path)/build/Android.oat.mk
include $(art_path)/sigchainlib/Android.mk
@@ -143,10 +137,17 @@
# Sync test files to the target, depends upon all things that must be pushed to the target.
.PHONY: test-art-target-sync
+ifeq ($(ART_TEST_ANDROID_ROOT),)
test-art-target-sync: $(TEST_ART_TARGET_SYNC_DEPS)
adb root
adb wait-for-device remount
adb sync
+else
+test-art-target-sync: $(TEST_ART_TARGET_SYNC_DEPS)
+ adb root
+ adb wait-for-device push $(ANDROID_PRODUCT_OUT)/system $(ART_TEST_ANDROID_ROOT)
+ adb push $(ANDROID_PRODUCT_OUT)/data /data
+endif
# Undefine variable now its served its purpose.
TEST_ART_TARGET_SYNC_DEPS :=
@@ -365,10 +366,10 @@
build-art: build-art-host build-art-target
.PHONY: build-art-host
-build-art-host: $(HOST_OUT_EXECUTABLES)/art $(ART_HOST_DEPENDENCIES) $(HOST_CORE_IMG_OUT) $(2ND_HOST_CORE_IMG_OUT)
+build-art-host: $(HOST_OUT_EXECUTABLES)/art $(ART_HOST_DEPENDENCIES) $(HOST_CORE_IMG_OUTS)
.PHONY: build-art-target
-build-art-target: $(TARGET_OUT_EXECUTABLES)/art $(ART_TARGET_DEPENDENCIES) $(TARGET_CORE_IMG_OUT) $(2ND_TARGET_CORE_IMG_OUT)
+build-art-target: $(TARGET_OUT_EXECUTABLES)/art $(ART_TARGET_DEPENDENCIES) $(TARGET_CORE_IMG_OUTS)
########################################################################
# targets to switch back and forth from libdvm to libart
diff --git a/build/Android.common.mk b/build/Android.common.mk
index 39a734d..39e78fa 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -42,10 +42,17 @@
$(error Do not know what to do with this multi-target configuration!)
endif
else
- ART_PHONY_TEST_TARGET_SUFFIX := 32
- 2ND_ART_PHONY_TEST_TARGET_SUFFIX :=
- ART_TARGET_ARCH_32 := $(TARGET_ARCH)
- ART_TARGET_ARCH_64 :=
+ ifneq ($(filter %64,$(TARGET_ARCH)),)
+ ART_PHONY_TEST_TARGET_SUFFIX := 64
+ 2ND_ART_PHONY_TEST_TARGET_SUFFIX :=
+ ART_TARGET_ARCH_32 :=
+ ART_TARGET_ARCH_64 := $(TARGET_ARCH)
+ else
+ ART_PHONY_TEST_TARGET_SUFFIX := 32
+ 2ND_ART_PHONY_TEST_TARGET_SUFFIX :=
+ ART_TARGET_ARCH_32 := $(TARGET_ARCH)
+ ART_TARGET_ARCH_64 :=
+ endif
endif
ART_HOST_SHLIB_EXTENSION := $(HOST_SHLIB_SUFFIX)
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index 0e930ee..7b38e5e 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -84,13 +84,6 @@
endif
#
-# Used to enable optimizing compiler
-#
-ifeq ($(ART_USE_OPTIMIZING_COMPILER),true)
-DEX2OAT_FLAGS := --compiler-backend=Optimizing
-endif
-
-#
# Used to change the default GC. Valid values are CMS, SS, GSS. The default is CMS.
#
art_default_gc_type ?= CMS
@@ -102,6 +95,9 @@
-include $(LLVM_ROOT_PATH)/llvm.mk
endif
+ART_HOST_CFLAGS :=
+ART_TARGET_CFLAGS :=
+
# Clang build support.
# Host.
@@ -160,15 +156,38 @@
# Enable float equality warnings.
art_clang_cflags += -Wfloat-equal
+# Enable warning of converting ints to void*.
+art_clang_cflags += -Wint-to-void-pointer-cast
+
+# GCC-only warnings.
+art_gcc_cflags := -Wunused-but-set-parameter
+# Suggest const: too many false positives, but good for a trial run.
+# -Wsuggest-attribute=const
+# Useless casts: too many, as we need to be 32/64 agnostic, but the compiler knows.
+# -Wuseless-cast
+# Zero-as-null: Have to convert all NULL and "diagnostic ignore" all includes like libnativehelper
+# that are still stuck pre-C++11.
+# -Wzero-as-null-pointer-constant \
+# Suggest final: Have to move to a more recent GCC.
+# -Wsuggest-final-types
+
+
ifeq ($(ART_HOST_CLANG),true)
- ART_HOST_CFLAGS += $(art_clang_cflags)
+ # Bug: 15446488. We don't omit the frame pointer to work around
+ # clang/libunwind bugs that cause SEGVs in run-test-004-ThreadStress.
+ ART_HOST_CFLAGS += $(art_clang_cflags) -fno-omit-frame-pointer
+else
+ ART_HOST_CFLAGS += $(art_gcc_cflags)
endif
ifeq ($(ART_TARGET_CLANG),true)
ART_TARGET_CFLAGS += $(art_clang_cflags)
+else
+ ART_TARGET_CFLAGS += $(art_gcc_cflags)
endif
-# Clear local variable now its use has ended.
+# Clear local variables now their use has ended.
art_clang_cflags :=
+art_gcc_cflags :=
ART_CPP_EXTENSION := .cc
@@ -188,14 +207,19 @@
-Wall \
-Werror \
-Wextra \
- -Wno-sign-promo \
- -Wno-unused-parameter \
-Wstrict-aliasing \
-fstrict-aliasing \
-Wunreachable-code \
+ -Wno-conversion-null \
+ -Wredundant-decls \
+ -Wshadow \
-fvisibility=protected \
$(art_default_gc_type_cflags)
+# Missing declarations: too many at the moment, as we use "extern" quite a bit.
+# -Wmissing-declarations \
+
+
ifeq ($(ART_SMALL_MODE),true)
art_cflags += -DART_SMALL_MODE=1
endif
@@ -219,20 +243,22 @@
ifeq ($(HOST_OS),linux)
# Larger frame-size for host clang builds today
- art_host_non_debug_cflags += -Wframe-larger-than=2600
+ ifndef SANITIZE_HOST
+ art_host_non_debug_cflags += -Wframe-larger-than=2700
+ endif
art_target_non_debug_cflags += -Wframe-larger-than=1728
endif
ifndef LIBART_IMG_HOST_BASE_ADDRESS
$(error LIBART_IMG_HOST_BASE_ADDRESS unset)
endif
-ART_HOST_CFLAGS := $(art_cflags) -DANDROID_SMP=1 -DART_BASE_ADDRESS=$(LIBART_IMG_HOST_BASE_ADDRESS)
+ART_HOST_CFLAGS += $(art_cflags) -DART_BASE_ADDRESS=$(LIBART_IMG_HOST_BASE_ADDRESS)
ART_HOST_CFLAGS += -DART_DEFAULT_INSTRUCTION_SET_FEATURES=default
ifndef LIBART_IMG_TARGET_BASE_ADDRESS
$(error LIBART_IMG_TARGET_BASE_ADDRESS unset)
endif
-ART_TARGET_CFLAGS := $(art_cflags) -DART_TARGET -DART_BASE_ADDRESS=$(LIBART_IMG_TARGET_BASE_ADDRESS)
+ART_TARGET_CFLAGS += $(art_cflags) -DART_TARGET -DART_BASE_ADDRESS=$(LIBART_IMG_TARGET_BASE_ADDRESS)
ART_HOST_NON_DEBUG_CFLAGS := $(art_host_non_debug_cflags)
ART_TARGET_NON_DEBUG_CFLAGS := $(art_target_non_debug_cflags)
@@ -257,18 +283,6 @@
ART_TARGET_CFLAGS += -DART_BASE_ADDRESS_MIN_DELTA=$(LIBART_IMG_TARGET_MIN_BASE_ADDRESS_DELTA)
ART_TARGET_CFLAGS += -DART_BASE_ADDRESS_MAX_DELTA=$(LIBART_IMG_TARGET_MAX_BASE_ADDRESS_DELTA)
-ifeq ($(TARGET_CPU_SMP),true)
- ART_TARGET_CFLAGS += -DANDROID_SMP=1
-else
- ifeq ($(TARGET_CPU_SMP),false)
- ART_TARGET_CFLAGS += -DANDROID_SMP=0
- else
- $(warning TARGET_CPU_SMP should be (true|false), found $(TARGET_CPU_SMP))
- # Make sure we emit barriers for the worst case.
- ART_TARGET_CFLAGS += -DANDROID_SMP=1
- endif
-endif
-
# To use oprofile_android --callgraph, uncomment this and recompile with "mmm art -B -j16"
# ART_TARGET_CFLAGS += -fno-omit-frame-pointer -marm -mapcs
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index 10695b6..281d189 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -47,25 +47,33 @@
2ND_TARGET_CORE_OAT := $(ART_TARGET_TEST_DIR)/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH)/core.oat
endif
+CORE_OAT_SUFFIX := .oat
+
# Core.oat locations under the out directory.
-HOST_CORE_OAT_OUT := $(HOST_OUT_JAVA_LIBRARIES)/$(ART_HOST_ARCH)/core.oat
+HOST_CORE_OAT_OUT_BASE := $(HOST_OUT_JAVA_LIBRARIES)/$(ART_HOST_ARCH)/core
ifneq ($(HOST_PREFER_32_BIT),true)
-2ND_HOST_CORE_OAT_OUT := $(HOST_OUT_JAVA_LIBRARIES)/$(2ND_ART_HOST_ARCH)/core.oat
+2ND_HOST_CORE_OAT_OUT_BASE := $(HOST_OUT_JAVA_LIBRARIES)/$(2ND_ART_HOST_ARCH)/core
endif
-TARGET_CORE_OAT_OUT := $(ART_TARGET_TEST_OUT)/$(DEX2OAT_TARGET_ARCH)/core.oat
+HOST_CORE_OAT_OUTS :=
+TARGET_CORE_OAT_OUT_BASE := $(ART_TARGET_TEST_OUT)/$(DEX2OAT_TARGET_ARCH)/core
ifdef TARGET_2ND_ARCH
-2ND_TARGET_CORE_OAT_OUT := $(ART_TARGET_TEST_OUT)/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH)/core.oat
+2ND_TARGET_CORE_OAT_OUT_BASE := $(ART_TARGET_TEST_OUT)/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH)/core
endif
+TARGET_CORE_OAT_OUTS :=
+
+CORE_IMG_SUFFIX := .art
# Core.art locations under the out directory.
-HOST_CORE_IMG_OUT := $(HOST_OUT_JAVA_LIBRARIES)/$(ART_HOST_ARCH)/core.art
+HOST_CORE_IMG_OUT_BASE := $(HOST_OUT_JAVA_LIBRARIES)/$(ART_HOST_ARCH)/core
ifneq ($(HOST_PREFER_32_BIT),true)
-2ND_HOST_CORE_IMG_OUT := $(HOST_OUT_JAVA_LIBRARIES)/$(2ND_ART_HOST_ARCH)/core.art
+2ND_HOST_CORE_IMG_OUT_BASE := $(HOST_OUT_JAVA_LIBRARIES)/$(2ND_ART_HOST_ARCH)/core
endif
-TARGET_CORE_IMG_OUT := $(ART_TARGET_TEST_OUT)/$(DEX2OAT_TARGET_ARCH)/core.art
+HOST_CORE_IMG_OUTS :=
+TARGET_CORE_IMG_OUT_BASE := $(ART_TARGET_TEST_OUT)/$(DEX2OAT_TARGET_ARCH)/core
ifdef TARGET_2ND_ARCH
-2ND_TARGET_CORE_IMG_OUT := $(ART_TARGET_TEST_OUT)/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH)/core.art
+2ND_TARGET_CORE_IMG_OUT_BASE := $(ART_TARGET_TEST_OUT)/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH)/core
endif
+TARGET_CORE_IMG_OUTS :=
# Oat location of core.art.
HOST_CORE_IMG_LOCATION := $(HOST_OUT_JAVA_LIBRARIES)/core.art
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index ee72706..2493565 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -63,6 +63,12 @@
# Do you want optimizing compiler tests run?
ART_TEST_OPTIMIZING ?= $(ART_TEST_FULL)
+# Do we want to test a PIC-compiled core image?
+ART_TEST_PIC_IMAGE ?= $(ART_TEST_FULL)
+
+# Do we want to test PIC-compiled tests ("apps")?
+ART_TEST_PIC_TEST ?= $(ART_TEST_FULL)
+
# Do you want tracing tests run?
ART_TEST_TRACE ?= $(ART_TEST_FULL)
@@ -78,6 +84,9 @@
# Do you want run-tests with relocation disabled run?
ART_TEST_RUN_TEST_NO_RELOCATE ?= $(ART_TEST_FULL)
+# Do you want run-tests with prebuilding?
+ART_TEST_RUN_TEST_PREBUILD ?= true
+
# Do you want run-tests with no prebuilding enabled run?
ART_TEST_RUN_TEST_NO_PREBUILD ?= $(ART_TEST_FULL)
@@ -90,6 +99,15 @@
# Do you want run-tests without a dex2oat?
ART_TEST_RUN_TEST_NO_DEX2OAT ?= $(ART_TEST_FULL)
+# Do you want run-tests with libartd.so?
+ART_TEST_RUN_TEST_DEBUG ?= true
+
+# Do you want run-tests with libart.so?
+ART_TEST_RUN_TEST_NDEBUG ?= $(ART_TEST_FULL)
+
+# Do you want run-tests with the host/target's second arch?
+ART_TEST_RUN_TEST_2ND_ARCH ?= true
+
# Do you want failed tests to have their artifacts cleaned up?
ART_TEST_RUN_TEST_ALWAYS_CLEAN ?= true
diff --git a/build/Android.executable.mk b/build/Android.executable.mk
index 81f3297..86f445f 100644
--- a/build/Android.executable.mk
+++ b/build/Android.executable.mk
@@ -54,9 +54,10 @@
include $(CLEAR_VARS)
LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
LOCAL_MODULE_TAGS := optional
- LOCAL_SRC_FILES := $$(art_source) ../sigchainlib/sigchain.cc
+ LOCAL_SRC_FILES := $$(art_source)
LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime $$(art_c_includes)
LOCAL_SHARED_LIBRARIES += $$(art_shared_libraries)
+ LOCAL_WHOLE_STATIC_LIBRARIES += libsigchain
ifeq ($$(art_ndebug_or_debug),ndebug)
LOCAL_MODULE := $$(art_executable)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 38d3f1c..7e28b37 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -48,7 +48,7 @@
# Dex file dependencies for each gtest.
ART_GTEST_class_linker_test_DEX_DEPS := Interfaces MyClass Nested Statics StaticsFromCode
ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod
-ART_GTEST_dex_file_test_DEX_DEPS := GetMethodSignature Nested
+ART_GTEST_dex_file_test_DEX_DEPS := GetMethodSignature Main Nested
ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
ART_GTEST_jni_compiler_test_DEX_DEPS := MyClassNatives
ART_GTEST_jni_internal_test_DEX_DEPS := AllFields StaticLeafMethods
@@ -59,22 +59,30 @@
ART_GTEST_transaction_test_DEX_DEPS := Transaction
# The elf writer test has dependencies on core.oat.
-ART_GTEST_elf_writer_test_HOST_DEPS := $(HOST_CORE_OAT_OUT) $(2ND_HOST_CORE_OAT_OUT)
-ART_GTEST_elf_writer_test_TARGET_DEPS := $(TARGET_CORE_OAT_OUT) $(2ND_TARGET_CORE_OAT_OUT)
-ART_GTEST_jni_internal_test_TARGET_DEPS := $(TARGET_CORE_DEX_FILES)
-ART_GTEST_proxy_test_TARGET_DEPS := $(TARGET_CORE_DEX_FILES)
-ART_GTEST_proxy_test_HOST_DEPS := $(HOST_CORE_OAT_OUT) $(2ND_HOST_CORE_OAT_OUT)
+ART_GTEST_elf_writer_test_HOST_DEPS := $(HOST_CORE_IMAGE_default_no-pic_64) $(HOST_CORE_IMAGE_default_no-pic_32)
+ART_GTEST_elf_writer_test_TARGET_DEPS := $(TARGET_CORE_IMAGE_default_no-pic_64) $(TARGET_CORE_IMAGE_default_no-pic_32)
+
+# TODO: document why this is needed.
+ART_GTEST_proxy_test_HOST_DEPS := $(HOST_CORE_IMAGE_default_no-pic_64) $(HOST_CORE_IMAGE_default_no-pic_32)
# The path for which all the source files are relative, not actually the current directory.
LOCAL_PATH := art
RUNTIME_GTEST_COMMON_SRC_FILES := \
runtime/arch/arch_test.cc \
+ runtime/arch/instruction_set_test.cc \
+ runtime/arch/instruction_set_features_test.cc \
runtime/arch/memcmp16_test.cc \
runtime/arch/stub_test.cc \
+ runtime/arch/arm/instruction_set_features_arm_test.cc \
+ runtime/arch/arm64/instruction_set_features_arm64_test.cc \
+ runtime/arch/mips/instruction_set_features_mips_test.cc \
+ runtime/arch/x86/instruction_set_features_x86_test.cc \
+ runtime/arch/x86_64/instruction_set_features_x86_64_test.cc \
runtime/barrier_test.cc \
runtime/base/bit_field_test.cc \
runtime/base/bit_vector_test.cc \
+ runtime/base/hash_set_test.cc \
runtime/base/hex_dump_test.cc \
runtime/base/histogram_test.cc \
runtime/base/mutex_test.cc \
@@ -82,7 +90,6 @@
runtime/base/stringprintf_test.cc \
runtime/base/timing_logger_test.cc \
runtime/base/unix_file/fd_file_test.cc \
- runtime/base/unix_file/mapped_file_test.cc \
runtime/base/unix_file/null_file_test.cc \
runtime/base/unix_file/random_access_file_utils_test.cc \
runtime/base/unix_file/string_file_test.cc \
@@ -109,8 +116,9 @@
runtime/handle_scope_test.cc \
runtime/indenter_test.cc \
runtime/indirect_reference_table_test.cc \
- runtime/instruction_set_test.cc \
runtime/intern_table_test.cc \
+ runtime/interpreter/safe_math_test.cc \
+ runtime/java_vm_ext_test.cc \
runtime/leb128_test.cc \
runtime/mem_map_test.cc \
runtime/mirror/dex_cache_test.cc \
@@ -141,7 +149,7 @@
compiler/oat_test.cc \
compiler/optimizing/codegen_test.cc \
compiler/optimizing/dead_code_elimination_test.cc \
- compiler/optimizing/constant_propagation_test.cc \
+ compiler/optimizing/constant_folding_test.cc \
compiler/optimizing/dominator_test.cc \
compiler/optimizing/find_loops_test.cc \
compiler/optimizing/graph_checker_test.cc \
@@ -184,7 +192,9 @@
COMPILER_GTEST_HOST_SRC_FILES := \
$(COMPILER_GTEST_COMMON_SRC_FILES) \
- compiler/utils//assembler_thumb_test.cc \
+ compiler/utils/arm/assembler_arm32_test.cc \
+ compiler/utils/arm/assembler_thumb2_test.cc \
+ compiler/utils/assembler_thumb_test.cc \
compiler/utils/x86/assembler_x86_test.cc \
compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -200,8 +210,8 @@
LOCAL_CFLAGS := $(ART_TARGET_CFLAGS)
LOCAL_SRC_FILES := runtime/common_runtime_test.cc compiler/common_compiler_test.cc
LOCAL_C_INCLUDES := $(ART_C_INCLUDES) art/runtime art/compiler
-LOCAL_SHARED_LIBRARIES := libcutils libartd libartd-compiler libdl
-LOCAL_STATIC_LIBRARIES += libgtest_libc++
+LOCAL_SHARED_LIBRARIES := libartd libartd-compiler libdl
+LOCAL_STATIC_LIBRARIES += libgtest
LOCAL_CLANG := $(ART_TARGET_CLANG)
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.gtest.mk
@@ -216,8 +226,7 @@
LOCAL_SRC_FILES := runtime/common_runtime_test.cc compiler/common_compiler_test.cc
LOCAL_C_INCLUDES := $(ART_C_INCLUDES) art/runtime art/compiler
LOCAL_SHARED_LIBRARIES := libartd libartd-compiler
-LOCAL_STATIC_LIBRARIES := libcutils
-LOCAL_STATIC_LIBRARIES += libgtest_libc++_host
+LOCAL_STATIC_LIBRARIES := libgtest_host
LOCAL_LDLIBS += -ldl -lpthread
LOCAL_MULTILIB := both
LOCAL_CLANG := $(ART_HOST_CLANG)
@@ -237,9 +246,15 @@
ART_TEST_TARGET_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_GTEST_RULES :=
+ART_GTEST_TARGET_ANDROID_ROOT := '/system'
+ifneq ($(ART_TEST_ANDROID_ROOT),)
+ ART_GTEST_TARGET_ANDROID_ROOT := $(ART_TEST_ANDROID_ROOT)
+endif
+
# Define a make rule for a target device gtest.
# $(1): gtest name - the name of the test we're building such as leb128_test.
# $(2): 2ND_ or undefined - used to differentiate between the primary and secondary architecture.
+# $(3): LD_LIBRARY_PATH or undefined - used in case libartd.so is not in /system/lib/
define define-art-gtest-rule-target
gtest_rule := test-art-target-gtest-$(1)$$($(2)ART_PHONY_TEST_TARGET_SUFFIX)
@@ -249,7 +264,8 @@
$$(ART_GTEST_$(1)_TARGET_DEPS) \
$(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_TARGET_GTEST_$(file)_DEX)) \
$$(ART_TARGET_NATIVETEST_OUT)/$$(TARGET_$(2)ARCH)/$(1) \
- $$($(2)TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so
+ $$($(2)TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so \
+ $$(TARGET_OUT_JAVA_LIBRARIES)/core-libart.jar
.PHONY: $$(gtest_rule)
$$(gtest_rule): test-art-target-sync
@@ -257,7 +273,8 @@
$(hide) adb shell rm $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID
$(hide) adb shell chmod 755 $(ART_TARGET_NATIVETEST_DIR)/$(TARGET_$(2)ARCH)/$(1)
$(hide) $$(call ART_TEST_SKIP,$$@) && \
- (adb shell "$(ART_TARGET_NATIVETEST_DIR)/$(TARGET_$(2)ARCH)/$(1) && touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID" \
+ (adb shell "LD_LIBRARY_PATH=$(3) ANDROID_ROOT=$(ART_GTEST_TARGET_ANDROID_ROOT) \
+ $(ART_TARGET_NATIVETEST_DIR)/$(TARGET_$(2)ARCH)/$(1) && touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID" \
&& (adb pull $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID /tmp/ \
&& $$(call ART_TEST_PASSED,$$@)) \
|| $$(call ART_TEST_FAILED,$$@))
@@ -332,9 +349,10 @@
LOCAL_MODULE_TAGS := tests
endif
LOCAL_CPP_EXTENSION := $$(ART_CPP_EXTENSION)
- LOCAL_SRC_FILES := $$(art_gtest_filename) sigchainlib/sigchain.cc
+ LOCAL_SRC_FILES := $$(art_gtest_filename)
LOCAL_C_INCLUDES += $$(ART_C_INCLUDES) art/runtime $$(art_gtest_extra_c_includes)
LOCAL_SHARED_LIBRARIES += libartd $$(art_gtest_extra_shared_libraries) libart-gtest
+ LOCAL_WHOLE_STATIC_LIBRARIES += libsigchain
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.gtest.mk
@@ -355,12 +373,26 @@
LOCAL_MODULE_PATH_64 := $$(ART_TARGET_NATIVETEST_OUT)/$$(ART_TARGET_ARCH_64)
LOCAL_MULTILIB := both
include $$(BUILD_EXECUTABLE)
+ library_path :=
+ 2nd_library_path :=
+ ifneq ($$(ART_TEST_ANDROID_ROOT),)
+ ifdef TARGET_2ND_ARCH
+ 2nd_library_path := $$(ART_TEST_ANDROID_ROOT)/lib
+ library_path := $$(ART_TEST_ANDROID_ROOT)/lib64
+ else
+ ifneq ($(filter %64,$(TARGET_ARCH)),)
+ library_path := $$(ART_TEST_ANDROID_ROOT)/lib64
+ else
+ library_path := $$(ART_TEST_ANDROID_ROOT)/lib
+ endif
+ endif
+ endif
ART_TEST_TARGET_GTEST_$$(art_gtest_name)_RULES :=
ifdef TARGET_2ND_ARCH
- $$(eval $$(call define-art-gtest-rule-target,$$(art_gtest_name),2ND_))
+ $$(eval $$(call define-art-gtest-rule-target,$$(art_gtest_name),2ND_,$$(2nd_library_path)))
endif
- $$(eval $$(call define-art-gtest-rule-target,$$(art_gtest_name),))
+ $$(eval $$(call define-art-gtest-rule-target,$$(art_gtest_name),,$$(library_path)))
# A rule to run the different architecture versions of the gtest.
.PHONY: test-art-target-gtest-$$(art_gtest_name)
@@ -372,8 +404,7 @@
else # host
LOCAL_CLANG := $$(ART_HOST_CLANG)
LOCAL_CFLAGS += $$(ART_HOST_CFLAGS) $$(ART_HOST_DEBUG_CFLAGS)
- LOCAL_SHARED_LIBRARIES += libicuuc-host libicui18n-host libnativehelper libz-host
- LOCAL_STATIC_LIBRARIES += libcutils libvixl
+ LOCAL_SHARED_LIBRARIES += libicuuc-host libicui18n-host libnativehelper libziparchive-host libz-host libvixl
LOCAL_LDLIBS := $(ART_HOST_LDLIBS) -lpthread -ldl
LOCAL_IS_HOST_MODULE := true
LOCAL_MULTILIB := both
@@ -408,6 +439,8 @@
art_gtest_extra_c_includes :=
art_gtest_extra_shared_libraries :=
art_gtest_name :=
+ library_path :=
+ 2nd_library_path :=
endef # define-art-gtest
ifeq ($(ART_BUILD_TARGET),true)
@@ -499,6 +532,7 @@
ART_TEST_TARGET_GTEST$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_GTEST_RULES :=
+ART_GTEST_TARGET_ANDROID_ROOT :=
ART_GTEST_class_linker_test_DEX_DEPS :=
ART_GTEST_compiler_driver_test_DEX_DEPS :=
ART_GTEST_dex_file_test_DEX_DEPS :=
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index 2becbb8..e8b363b 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -23,74 +23,210 @@
include art/build/Android.common_build.mk
+ifeq ($(DEX2OAT_HOST_INSTRUCTION_SET_FEATURES),)
+ DEX2OAT_HOST_INSTRUCTION_SET_FEATURES := default
+endif
+ifeq ($($(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES),)
+ $(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES := default
+endif
+
# Use dex2oat debug version for better error reporting
-# $(1): 2ND_ or undefined, 2ND_ for 32-bit host builds.
+# $(1): compiler - default, optimizing or interpreter.
+# $(2): pic/no-pic
+# $(3): 2ND_ or undefined, 2ND_ for 32-bit host builds.
# NB depending on HOST_CORE_DEX_LOCATIONS so we are sure to have the dex files in frameworks for
# run-test --no-image
define create-core-oat-host-rules
-$$($(1)HOST_CORE_IMG_OUT): $$(HOST_CORE_DEX_LOCATIONS) $$(DEX2OAT_DEPENDENCY)
+ core_compile_options :=
+ core_image_name :=
+ core_oat_name :=
+ core_infix :=
+ core_pic_infix :=
+ core_dex2oat_dependency := $(DEX2OAT_DEPENDENCY)
+
+ ifeq ($(1),optimizing)
+ core_compile_options += --compiler-backend=Optimizing
+ # With the optimizing compiler, we want to rerun dex2oat whenever there is
+ # a dex2oat change to catch regressions early.
+ core_dex2oat_dependency := $(DEX2OAT)
+ core_infix := -optimizing
+ endif
+ ifeq ($(1),interpreter)
+ core_compile_options += --compiler-filter=interpret-only
+ core_infix := -interpreter
+ endif
+ ifeq ($(1),default)
+ # Default has no infix, no compile options.
+ endif
+ ifneq ($(filter-out default interpreter optimizing,$(1)),)
+ #Technically this test is not precise, but hopefully good enough.
+ $$(error found $(1) expected default, interpreter or optimizing)
+ endif
+
+ ifeq ($(2),pic)
+ core_compile_options += --compile-pic
+ core_pic_infix := -pic
+ endif
+ ifeq ($(2),no-pic)
+ # No change for non-pic
+ endif
+ ifneq ($(filter-out pic no-pic,$(2)),)
+ # Technically this test is not precise, but hopefully good enough.
+ $$(error found $(2) expected pic or no-pic)
+ endif
+
+ core_image_name := $($(3)HOST_CORE_IMG_OUT_BASE)$$(core_infix)$$(core_pic_infix)$(CORE_IMG_SUFFIX)
+ core_oat_name := $($(3)HOST_CORE_OAT_OUT_BASE)$$(core_infix)$$(core_pic_infix)$(CORE_OAT_SUFFIX)
+
+ # Using the bitness suffix makes it easier to add as a dependency for the run-test mk.
+ ifeq ($(3),)
+ HOST_CORE_IMAGE_$(1)_$(2)_64 := $$(core_image_name)
+ else
+ HOST_CORE_IMAGE_$(1)_$(2)_32 := $$(core_image_name)
+ endif
+ HOST_CORE_IMG_OUTS += $$(core_image_name)
+ HOST_CORE_OAT_OUTS += $$(core_oat_name)
+
+$$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options)
+$$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name)
+$$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name)
+$$(core_image_name): $$(HOST_CORE_DEX_LOCATIONS) $$(core_dex2oat_dependency)
@echo "host dex2oat: $$@ ($$?)"
@mkdir -p $$(dir $$@)
- $$(hide) $$(DEX2OAT) $$(DEX2OAT_FLAGS) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
+ $$(hide) $$(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
--image-classes=$$(PRELOADED_CLASSES) $$(addprefix --dex-file=,$$(HOST_CORE_DEX_FILES)) \
- $$(addprefix --dex-location=,$$(HOST_CORE_DEX_LOCATIONS)) --oat-file=$$($(1)HOST_CORE_OAT_OUT) \
- --oat-location=$$($(1)HOST_CORE_OAT) --image=$$($(1)HOST_CORE_IMG_OUT) \
- --base=$$(LIBART_IMG_HOST_BASE_ADDRESS) --instruction-set=$$($(1)ART_HOST_ARCH) \
- --instruction-set-features=$$($(1)HOST_INSTRUCTION_SET_FEATURES) \
- --host --android-root=$$(HOST_OUT) --include-patch-information
+ $$(addprefix --dex-location=,$$(HOST_CORE_DEX_LOCATIONS)) --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
+ --oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \
+ --base=$$(LIBART_IMG_HOST_BASE_ADDRESS) --instruction-set=$$($(3)ART_HOST_ARCH) \
+ --instruction-set-features=$$($(3)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES) \
+ --host --android-root=$$(HOST_OUT) --include-patch-information \
+ $$(PRIVATE_CORE_COMPILE_OPTIONS)
-# This "renaming" eases declaration in art/Android.mk
-HOST_CORE_IMG_OUT$($(1)ART_PHONY_TEST_HOST_SUFFIX) := $($(1)HOST_CORE_IMG_OUT)
+$$(core_oat_name): $$(core_image_name)
-$$($(1)HOST_CORE_OAT_OUT): $$($(1)HOST_CORE_IMG_OUT)
+ # Clean up locally used variables.
+ core_dex2oat_dependency :=
+ core_compile_options :=
+ core_image_name :=
+ core_oat_name :=
+ core_infix :=
+ core_pic_infix :=
endef # create-core-oat-host-rules
-$(eval $(call create-core-oat-host-rules,))
-ifneq ($(HOST_PREFER_32_BIT),true)
-$(eval $(call create-core-oat-host-rules,2ND_))
-endif
+# $(1): compiler - default, optimizing or interpreter.
+define create-core-oat-host-rule-combination
+ $(call create-core-oat-host-rules,$(1),no-pic,)
+ $(call create-core-oat-host-rules,$(1),pic,)
+
+ ifneq ($(HOST_PREFER_32_BIT),true)
+ $(call create-core-oat-host-rules,$(1),no-pic,2ND_)
+ $(call create-core-oat-host-rules,$(1),pic,2ND_)
+ endif
+endef
+
+$(eval $(call create-core-oat-host-rule-combination,default))
+$(eval $(call create-core-oat-host-rule-combination,optimizing))
+$(eval $(call create-core-oat-host-rule-combination,interpreter))
+
define create-core-oat-target-rules
-$$($(1)TARGET_CORE_IMG_OUT): $$(TARGET_CORE_DEX_FILES) $$(DEX2OAT_DEPENDENCY)
+ core_compile_options :=
+ core_image_name :=
+ core_oat_name :=
+ core_infix :=
+ core_pic_infix :=
+ core_dex2oat_dependency := $(DEX2OAT_DEPENDENCY)
+
+ ifeq ($(1),optimizing)
+ ifeq ($($(3)TARGET_ARCH),arm64)
+ # TODO: Enable image generation on arm64 once the backend
+ # is on par with other architectures.
+ core_compile_options += --compiler-backend=Quick
+ else
+ core_compile_options += --compiler-backend=Optimizing
+ # With the optimizing compiler, we want to rerun dex2oat whenever there is
+ # a dex2oat change to catch regressions early.
+ core_dex2oat_dependency := $(DEX2OAT)
+ endif
+ core_infix := -optimizing
+ endif
+ ifeq ($(1),interpreter)
+ core_compile_options += --compiler-filter=interpret-only
+ core_infix := -interpreter
+ endif
+ ifeq ($(1),default)
+ # Default has no infix, no compile options.
+ endif
+ ifneq ($(filter-out default interpreter optimizing,$(1)),)
+ # Technically this test is not precise, but hopefully good enough.
+ $$(error found $(1) expected default, interpreter or optimizing)
+ endif
+
+ ifeq ($(2),pic)
+ core_compile_options += --compile-pic
+ core_pic_infix := -pic
+ endif
+ ifeq ($(2),no-pic)
+ # No change for non-pic
+ endif
+ ifneq ($(filter-out pic no-pic,$(2)),)
+ #Technically this test is not precise, but hopefully good enough.
+ $$(error found $(2) expected pic or no-pic)
+ endif
+
+ core_image_name := $($(3)TARGET_CORE_IMG_OUT_BASE)$$(core_infix)$$(core_pic_infix)$(CORE_IMG_SUFFIX)
+ core_oat_name := $($(3)TARGET_CORE_OAT_OUT_BASE)$$(core_infix)$$(core_pic_infix)$(CORE_OAT_SUFFIX)
+
+ # Using the bitness suffix makes it easier to add as a dependency for the run-test mk.
+ ifeq ($(3),)
+ ifdef TARGET_2ND_ARCH
+ TARGET_CORE_IMAGE_$(1)_$(2)_64 := $$(core_image_name)
+ else
+ TARGET_CORE_IMAGE_$(1)_$(2)_32 := $$(core_image_name)
+ endif
+ else
+ TARGET_CORE_IMAGE_$(1)_$(2)_32 := $$(core_image_name)
+ endif
+ TARGET_CORE_IMG_OUTS += $$(core_image_name)
+ TARGET_CORE_OAT_OUTS += $$(core_oat_name)
+
+$$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options)
+$$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name)
+$$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name)
+$$(core_image_name): $$(TARGET_CORE_DEX_FILES) $$(core_dex2oat_dependency)
@echo "target dex2oat: $$@ ($$?)"
@mkdir -p $$(dir $$@)
- $$(hide) $$(DEX2OAT) $$(DEX2OAT_FLAGS) --runtime-arg -Xms$(DEX2OAT_XMS) --runtime-arg -Xmx$(DEX2OAT_XMX) \
+ $$(hide) $$(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_XMS) --runtime-arg -Xmx$(DEX2OAT_XMX) \
--image-classes=$$(PRELOADED_CLASSES) $$(addprefix --dex-file=,$$(TARGET_CORE_DEX_FILES)) \
- $$(addprefix --dex-location=,$$(TARGET_CORE_DEX_LOCATIONS)) --oat-file=$$($(1)TARGET_CORE_OAT_OUT) \
- --oat-location=$$($(1)TARGET_CORE_OAT) --image=$$($(1)TARGET_CORE_IMG_OUT) \
- --base=$$(LIBART_IMG_TARGET_BASE_ADDRESS) --instruction-set=$$($(1)TARGET_ARCH) \
- --instruction-set-features=$$($(1)TARGET_INSTRUCTION_SET_FEATURES) \
- --android-root=$$(PRODUCT_OUT)/system --include-patch-information
+ $$(addprefix --dex-location=,$$(TARGET_CORE_DEX_LOCATIONS)) --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
+ --oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \
+ --base=$$(LIBART_IMG_TARGET_BASE_ADDRESS) --instruction-set=$$($(3)TARGET_ARCH) \
+ --instruction-set-features=$$($(3)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \
+ --android-root=$$(PRODUCT_OUT)/system --include-patch-information \
+ $$(PRIVATE_CORE_COMPILE_OPTIONS) || (rm $$(PRIVATE_CORE_OAT_NAME); exit 1)
-# This "renaming" eases declaration in art/Android.mk
-TARGET_CORE_IMG_OUT$($(1)ART_PHONY_TEST_TARGET_SUFFIX) := $($(1)TARGET_CORE_IMG_OUT)
+$$(core_oat_name): $$(core_image_name)
-$$($(1)TARGET_CORE_OAT_OUT): $$($(1)TARGET_CORE_IMG_OUT)
+ # Clean up locally used variables.
+ core_dex2oat_dependency :=
+ core_compile_options :=
+ core_image_name :=
+ core_oat_name :=
+ core_infix :=
+ core_pic_infix :=
endef # create-core-oat-target-rules
-ifdef TARGET_2ND_ARCH
-$(eval $(call create-core-oat-target-rules,2ND_))
-endif
-$(eval $(call create-core-oat-target-rules,))
+# $(1): compiler - default, optimizing or interpreter.
+define create-core-oat-target-rule-combination
+ $(call create-core-oat-target-rules,$(1),no-pic,)
+ $(call create-core-oat-target-rules,$(1),pic,)
+ ifdef TARGET_2ND_ARCH
+ $(call create-core-oat-target-rules,$(1),no-pic,2ND_)
+ $(call create-core-oat-target-rules,$(1),pic,2ND_)
+ endif
+endef
-ifeq ($(ART_BUILD_HOST),true)
-include $(CLEAR_VARS)
-LOCAL_MODULE := core.art-host
-LOCAL_MODULE_TAGS := optional
-LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common.mk
-LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.oat.mk
-LOCAL_ADDITIONAL_DEPENDENCIES += $(HOST_CORE_IMG_OUT)
-include $(BUILD_PHONY_PACKAGE)
-endif # ART_BUILD_HOST
-
-# If we aren't building the host toolchain, skip building the target core.art.
-ifeq ($(ART_BUILD_TARGET),true)
-include $(CLEAR_VARS)
-LOCAL_MODULE := core.art
-LOCAL_MODULE_TAGS := optional
-LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common.mk
-LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.oat.mk
-LOCAL_ADDITIONAL_DEPENDENCIES += $(TARGET_CORE_IMG_OUT)
-include $(BUILD_PHONY_PACKAGE)
-endif # ART_BUILD_TARGET
+$(eval $(call create-core-oat-target-rule-combination,default))
+$(eval $(call create-core-oat-target-rule-combination,optimizing))
+$(eval $(call create-core-oat-target-rule-combination,interpreter))
diff --git a/compiler/Android.mk b/compiler/Android.mk
index f413576..eb9ad47 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -58,22 +58,22 @@
dex/quick/x86/target_x86.cc \
dex/quick/x86/utility_x86.cc \
dex/dex_to_dex_compiler.cc \
- dex/mir_dataflow.cc \
- dex/mir_field_info.cc \
- dex/mir_method_info.cc \
- dex/mir_optimization.cc \
dex/bb_optimizations.cc \
dex/compiler_ir.cc \
+ dex/frontend.cc \
+ dex/mir_analysis.cc \
+ dex/mir_dataflow.cc \
+ dex/mir_field_info.cc \
+ dex/mir_graph.cc \
+ dex/mir_method_info.cc \
+ dex/mir_optimization.cc \
dex/post_opt_passes.cc \
dex/pass_driver_me_opts.cc \
dex/pass_driver_me_post_opt.cc \
- dex/frontend.cc \
- dex/mir_graph.cc \
- dex/mir_analysis.cc \
+ dex/ssa_transformation.cc \
dex/verified_method.cc \
dex/verification_results.cc \
dex/vreg_analysis.cc \
- dex/ssa_transformation.cc \
dex/quick_compiler_callbacks.cc \
driver/compiler_driver.cc \
driver/dex_compilation_unit.cc \
@@ -88,9 +88,10 @@
optimizing/builder.cc \
optimizing/code_generator.cc \
optimizing/code_generator_arm.cc \
+ optimizing/code_generator_arm64.cc \
optimizing/code_generator_x86.cc \
optimizing/code_generator_x86_64.cc \
- optimizing/constant_propagation.cc \
+ optimizing/constant_folding.cc \
optimizing/dead_code_elimination.cc \
optimizing/graph_checker.cc \
optimizing/graph_visualizer.cc \
@@ -98,6 +99,7 @@
optimizing/instruction_simplifier.cc \
optimizing/locations.cc \
optimizing/nodes.cc \
+ optimizing/optimization.cc \
optimizing/optimizing_compiler.cc \
optimizing/parallel_move_resolver.cc \
optimizing/prepare_for_register_allocation.cc \
@@ -131,6 +133,7 @@
file_output_stream.cc \
image_writer.cc \
oat_writer.cc \
+ output_stream.cc \
vector_output_stream.cc
ifeq ($(ART_SEA_IR_MODE),true)
@@ -166,7 +169,18 @@
endif
LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES := \
- dex/compiler_enums.h
+ dex/quick/arm/arm_lir.h \
+ dex/quick/arm64/arm64_lir.h \
+ dex/quick/mips/mips_lir.h \
+ dex/quick/resource_mask.h \
+ dex/compiler_enums.h \
+ dex/global_value_numbering.h \
+ dex/pass_me.h \
+ driver/compiler_driver.h \
+ driver/compiler_options.h \
+ image_writer.h \
+ optimizing/locations.h \
+ utils/arm/constants_arm.h
# $(1): target or host
# $(2): ndebug or debug
@@ -193,7 +207,9 @@
ifeq ($$(art_ndebug_or_debug),ndebug)
LOCAL_MODULE := libart-compiler
LOCAL_SHARED_LIBRARIES += libart
- LOCAL_FDO_SUPPORT := true
+ ifeq ($$(art_target_or_host),target)
+ LOCAL_FDO_SUPPORT := true
+ endif
else # debug
LOCAL_MODULE := libartd-compiler
LOCAL_SHARED_LIBRARIES += libartd
@@ -231,9 +247,6 @@
endif
endif
- # TODO: clean up the compilers and remove this.
- LOCAL_CFLAGS += -Wno-unused-parameter
-
ifeq ($(ART_USE_PORTABLE_COMPILER),true)
LOCAL_SHARED_LIBRARIES += libLLVM
LOCAL_CFLAGS += -DART_USE_PORTABLE_COMPILER=1
@@ -259,15 +272,18 @@
LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime
ifeq ($$(art_target_or_host),host)
- LOCAL_LDLIBS += -ldl -lpthread
+ # For compiler driver TLS.
+ LOCAL_LDLIBS += -lpthread
endif
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
+ # Vixl assembly support for ARM64 targets.
+ LOCAL_SHARED_LIBRARIES += libvixl
ifeq ($$(art_target_or_host),target)
- LOCAL_SHARED_LIBRARIES += libcutils libvixl
+ # For atrace.
+ LOCAL_SHARED_LIBRARIES += libcutils
include $(BUILD_SHARED_LIBRARY)
else # host
- LOCAL_STATIC_LIBRARIES += libcutils libvixl
LOCAL_MULTILIB := both
include $(BUILD_HOST_SHARED_LIBRARY)
endif
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 359d6af..97387a1 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -16,6 +16,7 @@
#include "common_compiler_test.h"
+#include "arch/instruction_set_features.h"
#include "class_linker.h"
#include "compiled_method.h"
#include "dex/quick_compiler_callbacks.h"
@@ -111,7 +112,7 @@
#else
// Only warn if not Intel as Intel doesn't have cache flush instructions.
#if !defined(__i386__) && !defined(__x86_64__)
- LOG(WARNING) << "UNIMPLEMENTED: cache flush";
+ UNIMPLEMENTED(WARNING) << "cache flush";
#endif
#endif
}
@@ -144,8 +145,7 @@
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
if (!runtime_->HasCalleeSaveMethod(type)) {
- runtime_->SetCalleeSaveMethod(
- runtime_->CreateCalleeSaveMethod(type), type);
+ runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(), type);
}
}
@@ -157,7 +157,7 @@
method_inliner_map_.get(),
compiler_kind, instruction_set,
instruction_set_features_.get(),
- true, new std::set<std::string>,
+ true, new std::set<std::string>, nullptr,
2, true, true, timer_.get(), ""));
}
// We typically don't generate an image in unit tests, disable this optimization by default.
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index cdae8d2..7f76eef 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -21,7 +21,7 @@
#include <string>
#include <vector>
-#include "instruction_set.h"
+#include "arch/instruction_set.h"
#include "method_reference.h"
#include "utils.h"
#include "utils/array_ref.h"
@@ -162,7 +162,7 @@
}
this->resize(i + 1);
- for (size_t i = size(); --i >= 1; ) {
+ for (i = size(); --i >= 1; ) {
(*this)[i].from_ -= (*this)[i-1].from_;
(*this)[i].to_ -= (*this)[i-1].to_;
}
diff --git a/compiler/compiler.cc b/compiler/compiler.cc
index fbfd8e6..b9fcf5b 100644
--- a/compiler/compiler.cc
+++ b/compiler/compiler.cc
@@ -25,13 +25,24 @@
namespace art {
#ifdef ART_SEA_IR_MODE
-extern "C" art::CompiledMethod* SeaIrCompileMethod(const art::DexFile::CodeItem* code_item,
- uint32_t access_flags,
- art::InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- jobject class_loader,
- const art::DexFile& dex_file);
+constexpr bool kCanUseSeaIR = true;
+#else
+constexpr bool kCanUseSeaIR = false;
+#endif
+
+extern "C" art::CompiledMethod* SeaIrCompileMethod(const art::DexFile::CodeItem* code_item ATTRIBUTE_UNUSED,
+ uint32_t access_flags ATTRIBUTE_UNUSED,
+ art::InvokeType invoke_type ATTRIBUTE_UNUSED,
+ uint16_t class_def_idx ATTRIBUTE_UNUSED,
+ uint32_t method_idx ATTRIBUTE_UNUSED,
+ jobject class_loader ATTRIBUTE_UNUSED,
+ const art::DexFile& dex_file ATTRIBUTE_UNUSED)
+#ifdef ART_SEA_IR_MODE
+; // NOLINT(whitespace/semicolon)
+#else
+{
+ UNREACHABLE();
+}
#endif
@@ -42,19 +53,18 @@
uint32_t method_idx,
jobject class_loader,
const art::DexFile& dex_file) {
-#ifdef ART_SEA_IR_MODE
- bool use_sea = (std::string::npos != PrettyMethod(method_idx, dex_file).find("fibonacci"));
- if (use_sea) {
- LOG(INFO) << "Using SEA IR to compile..." << std::endl;
- return SeaIrCompileMethod(code_item,
- access_flags,
- invoke_type,
- class_def_idx,
- method_idx,
- class_loader,
- dex_file);
+ bool use_sea = kCanUseSeaIR &&
+ (std::string::npos != PrettyMethod(method_idx, dex_file).find("fibonacci"));
+ if (use_sea) {
+ LOG(INFO) << "Using SEA IR to compile..." << std::endl;
+ return SeaIrCompileMethod(code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file);
}
-#endif
return nullptr;
}
@@ -75,8 +85,30 @@
default:
LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
}
- return nullptr;
+}
+
+bool Compiler::IsPathologicalCase(const DexFile::CodeItem& code_item,
+ uint32_t method_idx,
+ const DexFile& dex_file) {
+ /*
+ * Skip compilation for pathologically large methods - either by instruction count or num vregs.
+ * Dalvik uses 16-bit uints for instruction and register counts. We'll limit to a quarter
+ * of that, which also guarantees we cannot overflow our 16-bit internal Quick SSA name space.
+ */
+ if (code_item.insns_size_in_code_units_ >= UINT16_MAX / 4) {
+ LOG(INFO) << "Method exceeds compiler instruction limit: "
+ << code_item.insns_size_in_code_units_
+ << " in " << PrettyMethod(method_idx, dex_file);
+ return true;
+ }
+ if (code_item.registers_size_ >= UINT16_MAX / 4) {
+ LOG(INFO) << "Method exceeds compiler virtual register limit: "
+ << code_item.registers_size_ << " in " << PrettyMethod(method_idx, dex_file);
+ return true;
+ }
+ return false;
}
} // namespace art
diff --git a/compiler/compiler.h b/compiler/compiler.h
index 05fa858..c2c15ff 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -115,6 +115,7 @@
*/
virtual std::vector<uint8_t>* GetCallFrameInformationInitialization(const CompilerDriver& driver)
const {
+ UNUSED(driver);
return nullptr;
}
@@ -122,6 +123,12 @@
return nullptr;
}
+ // Returns whether the method to compile is such a pathological case that
+ // it's not worth compiling.
+ static bool IsPathologicalCase(const DexFile::CodeItem& code_item,
+ uint32_t method_idx,
+ const DexFile& dex_file);
+
protected:
explicit Compiler(CompilerDriver* driver, uint64_t warning) :
driver_(driver), maximum_compilation_time_before_warning_(warning) {
diff --git a/compiler/dex/backend.h b/compiler/dex/backend.h
index cab3427..9cad933 100644
--- a/compiler/dex/backend.h
+++ b/compiler/dex/backend.h
@@ -38,7 +38,7 @@
/*
* Return the number of reservable vector registers supported
- * @param long_or_fp ‘true’ if floating point computations will be
+ * @param long_or_fp, true if floating point computations will be
* executed or the operations will be long type while vector
* registers are reserved.
* @return the number of vector registers that are available
@@ -46,7 +46,10 @@
* are held back to generate scalar code without exhausting vector
* registers, if scalar code also uses the vector registers.
*/
- virtual int NumReservableVectorRegisters(bool long_or_fp) { return 0; }
+ virtual int NumReservableVectorRegisters(bool long_or_fp) {
+ UNUSED(long_or_fp);
+ return 0;
+ }
protected:
explicit Backend(ArenaAllocator* arena) : arena_(arena) {}
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 78da420..b56fd6f 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -28,6 +28,7 @@
kRefReg,
kAnyReg,
};
+std::ostream& operator<<(std::ostream& os, const RegisterClass& rhs);
enum BitsUsed {
kSize32Bits,
@@ -37,6 +38,7 @@
kSize512Bits,
kSize1024Bits,
};
+std::ostream& operator<<(std::ostream& os, const BitsUsed& rhs);
enum SpecialTargetRegister {
kSelf, // Thread pointer.
@@ -60,6 +62,14 @@
kFArg5,
kFArg6,
kFArg7,
+ kFArg8,
+ kFArg9,
+ kFArg10,
+ kFArg11,
+ kFArg12,
+ kFArg13,
+ kFArg14,
+ kFArg15,
kRet0,
kRet1,
kInvokeTgt,
@@ -67,6 +77,7 @@
kHiddenFpArg,
kCount
};
+std::ostream& operator<<(std::ostream& os, const SpecialTargetRegister& code);
enum RegLocationType {
kLocDalvikFrame = 0, // Normal Dalvik register
@@ -74,6 +85,7 @@
kLocCompilerTemp,
kLocInvalid
};
+std::ostream& operator<<(std::ostream& os, const RegLocationType& rhs);
enum BBType {
kNullBlock,
@@ -83,6 +95,7 @@
kExceptionHandling,
kDead,
};
+std::ostream& operator<<(std::ostream& os, const BBType& code);
// Shared pseudo opcodes - must be < 0.
enum LIRPseudoOpcode {
@@ -103,6 +116,7 @@
kPseudoEHBlockLabel = -2,
kPseudoNormalBlockLabel = -1,
};
+std::ostream& operator<<(std::ostream& os, const LIRPseudoOpcode& rhs);
enum ExtendedMIROpcode {
kMirOpFirst = kNumPackedOpcodes,
@@ -305,7 +319,9 @@
kMIRNullCheckOnly,
kMIRIgnoreRangeCheck,
kMIRRangeCheckOnly,
- kMIRIgnoreClInitCheck,
+ kMIRClassIsInitialized,
+ kMIRClassIsInDexCache,
+ kMirIgnoreDivZeroCheck,
kMIRInlined, // Invoke is inlined (ie dead).
kMIRInlinedPred, // Invoke is inlined via prediction.
kMIRCallee, // Instruction is inlined from callee.
@@ -324,11 +340,13 @@
kPackedSwitch,
kSparseSwitch,
};
+std::ostream& operator<<(std::ostream& os, const BlockListType& rhs);
enum AssemblerStatus {
kSuccess,
kRetryAll,
};
+std::ostream& operator<<(std::ostream& os, const AssemblerStatus& rhs);
enum OpSize {
kWord, // Natural word size of target (32/64).
@@ -342,7 +360,6 @@
kUnsignedByte,
kSignedByte,
};
-
std::ostream& operator<<(std::ostream& os, const OpSize& kind);
enum OpKind {
@@ -384,6 +401,7 @@
kOpBx,
kOpInvalid,
};
+std::ostream& operator<<(std::ostream& os, const OpKind& rhs);
enum MoveType {
kMov8GP, // Move 8-bit general purpose register.
@@ -400,8 +418,7 @@
kMovLo128FP, // Move low 64-bits of 128-bit FP register.
kMovHi128FP, // Move high 64-bits of 128-bit FP register.
};
-
-std::ostream& operator<<(std::ostream& os, const OpKind& kind);
+std::ostream& operator<<(std::ostream& os, const MoveType& kind);
enum ConditionCode {
kCondEq, // equal
@@ -423,7 +440,6 @@
kCondAl, // always
kCondNv, // never
};
-
std::ostream& operator<<(std::ostream& os, const ConditionCode& kind);
// Target specific condition encodings
@@ -445,7 +461,6 @@
kArmCondAl = 0xe, // 1110
kArmCondNv = 0xf, // 1111
};
-
std::ostream& operator<<(std::ostream& os, const ArmConditionCode& kind);
enum X86ConditionCode {
@@ -493,7 +508,6 @@
kX86CondNle = 0xf, // not-less-than
kX86CondG = kX86CondNle, // greater
};
-
std::ostream& operator<<(std::ostream& os, const X86ConditionCode& kind);
enum DividePattern {
@@ -502,7 +516,6 @@
Divide5,
Divide7,
};
-
std::ostream& operator<<(std::ostream& os, const DividePattern& pattern);
/**
@@ -528,7 +541,6 @@
kAnyAny,
kNTStoreStore,
};
-
std::ostream& operator<<(std::ostream& os, const MemBarrierKind& kind);
enum OpFeatureFlags {
@@ -585,6 +597,7 @@
kDefHi,
kDefLo
};
+std::ostream& operator<<(std::ostream& os, const OpFeatureFlags& rhs);
enum SelectInstructionKind {
kSelectNone,
@@ -592,36 +605,34 @@
kSelectMove,
kSelectGoto
};
-
std::ostream& operator<<(std::ostream& os, const SelectInstructionKind& kind);
// LIR fixup kinds for Arm
enum FixupKind {
kFixupNone,
- kFixupLabel, // For labels we just adjust the offset.
- kFixupLoad, // Mostly for immediates.
- kFixupVLoad, // FP load which *may* be pc-relative.
- kFixupCBxZ, // Cbz, Cbnz.
- kFixupTBxZ, // Tbz, Tbnz.
- kFixupPushPop, // Not really pc relative, but changes size based on args.
- kFixupCondBranch, // Conditional branch
- kFixupT1Branch, // Thumb1 Unconditional branch
- kFixupT2Branch, // Thumb2 Unconditional branch
- kFixupBlx1, // Blx1 (start of Blx1/Blx2 pair).
- kFixupBl1, // Bl1 (start of Bl1/Bl2 pair).
- kFixupAdr, // Adr.
- kFixupMovImmLST, // kThumb2MovImm16LST.
- kFixupMovImmHST, // kThumb2MovImm16HST.
- kFixupAlign4, // Align to 4-byte boundary.
+ kFixupLabel, // For labels we just adjust the offset.
+ kFixupLoad, // Mostly for immediates.
+ kFixupVLoad, // FP load which *may* be pc-relative.
+ kFixupCBxZ, // Cbz, Cbnz.
+ kFixupTBxZ, // Tbz, Tbnz.
+ kFixupPushPop, // Not really pc relative, but changes size based on args.
+ kFixupCondBranch, // Conditional branch
+ kFixupT1Branch, // Thumb1 Unconditional branch
+ kFixupT2Branch, // Thumb2 Unconditional branch
+ kFixupBlx1, // Blx1 (start of Blx1/Blx2 pair).
+ kFixupBl1, // Bl1 (start of Bl1/Bl2 pair).
+ kFixupAdr, // Adr.
+ kFixupMovImmLST, // kThumb2MovImm16LST.
+ kFixupMovImmHST, // kThumb2MovImm16HST.
+ kFixupAlign4, // Align to 4-byte boundary.
+ kFixupA53Erratum835769, // Cortex A53 Erratum 835769.
};
-
std::ostream& operator<<(std::ostream& os, const FixupKind& kind);
enum VolatileKind {
kNotVolatile, // Load/Store is not volatile
kVolatile // Load/Store is volatile
};
-
std::ostream& operator<<(std::ostream& os, const VolatileKind& kind);
enum WideKind {
@@ -629,7 +640,6 @@
kWide, // Wide view
kRef // Ref width
};
-
std::ostream& operator<<(std::ostream& os, const WideKind& kind);
} // namespace art
diff --git a/compiler/dex/compiler_ir.cc b/compiler/dex/compiler_ir.cc
index 909c995..a2b3fe4 100644
--- a/compiler/dex/compiler_ir.cc
+++ b/compiler/dex/compiler_ir.cc
@@ -16,6 +16,7 @@
#include "compiler_ir.h"
+#include "base/dumpable.h"
#include "backend.h"
#include "frontend.h"
#include "mir_graph.h"
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index f9a05c2..205a521 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -282,10 +282,11 @@
} // namespace art
extern "C" void ArtCompileDEX(art::CompilerDriver& driver, const art::DexFile::CodeItem* code_item,
- uint32_t access_flags, art::InvokeType invoke_type,
- uint16_t class_def_idx, uint32_t method_idx, jobject class_loader,
- const art::DexFile& dex_file,
- art::DexToDexCompilationLevel dex_to_dex_compilation_level) {
+ uint32_t access_flags, art::InvokeType invoke_type,
+ uint16_t class_def_idx, uint32_t method_idx, jobject class_loader,
+ const art::DexFile& dex_file,
+ art::DexToDexCompilationLevel dex_to_dex_compilation_level) {
+ UNUSED(invoke_type);
if (dex_to_dex_compilation_level != art::kDontDexToDexCompile) {
art::DexCompilationUnit unit(NULL, class_loader, art::Runtime::Current()->GetClassLinker(),
dex_file, code_item, class_def_idx, method_idx, access_flags,
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 2e21d05..3f6231c 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -19,6 +19,7 @@
#include <cstdint>
#include "backend.h"
+#include "base/dumpable.h"
#include "compiler.h"
#include "compiler_internals.h"
#include "driver/compiler_driver.h"
@@ -83,27 +84,12 @@
jobject class_loader, const DexFile& dex_file,
void* llvm_compilation_unit) {
VLOG(compiler) << "Compiling " << PrettyMethod(method_idx, dex_file) << "...";
- /*
- * Skip compilation for pathologically large methods - either by instruction count or num vregs.
- * Dalvik uses 16-bit uints for instruction and register counts. We'll limit to a quarter
- * of that, which also guarantees we cannot overflow our 16-bit internal SSA name space.
- */
- if (code_item->insns_size_in_code_units_ >= UINT16_MAX / 4) {
- LOG(INFO) << "Method exceeds compiler instruction limit: "
- << code_item->insns_size_in_code_units_
- << " in " << PrettyMethod(method_idx, dex_file);
- return NULL;
- }
- if (code_item->registers_size_ >= UINT16_MAX / 4) {
- LOG(INFO) << "Method exceeds compiler virtual register limit: "
- << code_item->registers_size_ << " in " << PrettyMethod(method_idx, dex_file);
- return NULL;
- }
-
- if (!driver.GetCompilerOptions().IsCompilationEnabled()) {
+ if (Compiler::IsPathologicalCase(*code_item, method_idx, dex_file)) {
return nullptr;
}
+ DCHECK(driver.GetCompilerOptions().IsCompilationEnabled());
+
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
CompilationUnit cu(driver.GetArenaPool());
@@ -134,15 +120,8 @@
(cu.enable_debug & (1 << kDebugVerbose));
}
- if (gVerboseMethods.size() != 0) {
- cu.verbose = false;
- for (size_t i = 0; i < gVerboseMethods.size(); ++i) {
- if (PrettyMethod(method_idx, dex_file).find(gVerboseMethods[i])
- != std::string::npos) {
- cu.verbose = true;
- break;
- }
- }
+ if (driver.GetCompilerOptions().HasVerboseMethods()) {
+ cu.verbose = driver.GetCompilerOptions().IsVerboseMethod(PrettyMethod(method_idx, dex_file));
}
if (cu.verbose) {
diff --git a/compiler/dex/global_value_numbering.cc b/compiler/dex/global_value_numbering.cc
index f0f7a70..d311bc7 100644
--- a/compiler/dex/global_value_numbering.cc
+++ b/compiler/dex/global_value_numbering.cc
@@ -70,12 +70,7 @@
DCHECK(work_lvn_.get() == nullptr);
work_lvn_.reset(new (allocator) LocalValueNumbering(this, bb->id, allocator));
if (bb->block_type == kEntryBlock) {
- if ((cu_->access_flags & kAccStatic) == 0) {
- // If non-static method, mark "this" as non-null
- int this_reg = cu_->mir_graph->GetFirstInVR();
- uint16_t value_name = work_lvn_->GetSRegValueName(this_reg);
- work_lvn_->SetValueNameNullChecked(value_name);
- }
+ work_lvn_->PrepareEntryBlock();
DCHECK(bb->first_mir_insn == nullptr); // modifications_allowed_ is irrelevant.
} else {
// To avoid repeated allocation on the ArenaStack, reuse a single vector kept as a member.
@@ -127,12 +122,6 @@
CHECK(!merge_lvns_.empty());
if (merge_lvns_.size() == 1u) {
work_lvn_->MergeOne(*merge_lvns_[0], merge_type);
- BasicBlock* pred_bb = mir_graph_->GetBasicBlock(merge_lvns_[0]->Id());
- if (HasNullCheckLastInsn(pred_bb, bb->id)) {
- int s_reg = pred_bb->last_mir_insn->ssa_rep->uses[0];
- uint16_t value_name = merge_lvns_[0]->GetSRegValueName(s_reg);
- work_lvn_->SetValueNameNullChecked(value_name);
- }
} else {
work_lvn_->Merge(merge_type);
}
diff --git a/compiler/dex/global_value_numbering.h b/compiler/dex/global_value_numbering.h
index df554cd..72d1112 100644
--- a/compiler/dex/global_value_numbering.h
+++ b/compiler/dex/global_value_numbering.h
@@ -19,14 +19,14 @@
#include "base/macros.h"
#include "compiler_internals.h"
-#include "utils/scoped_arena_containers.h"
+#include "utils/arena_object.h"
namespace art {
class LocalValueNumbering;
class MirFieldInfo;
-class GlobalValueNumbering {
+class GlobalValueNumbering : public DeletableArenaObject<kArenaAllocMisc> {
public:
enum Mode {
kModeGvn,
@@ -55,33 +55,17 @@
}
// Allow modifications.
- void StartPostProcessing() {
- DCHECK(Good());
- DCHECK_EQ(mode_, kModeGvn);
- mode_ = kModeGvnPostProcessing;
- }
+ void StartPostProcessing();
bool CanModify() const {
return modifications_allowed_ && Good();
}
- // GlobalValueNumbering should be allocated on the ArenaStack (or the native stack).
- static void* operator new(size_t size, ScopedArenaAllocator* allocator) {
- return allocator->Alloc(sizeof(GlobalValueNumbering), kArenaAllocMisc);
- }
-
- // Allow delete-expression to destroy a GlobalValueNumbering object without deallocation.
- static void operator delete(void* ptr) { UNUSED(ptr); }
-
private:
static constexpr uint16_t kNoValue = 0xffffu;
// Allocate a new value name.
- uint16_t NewValueName() {
- DCHECK_NE(mode_, kModeGvnPostProcessing);
- ++last_value_;
- return last_value_;
- }
+ uint16_t NewValueName();
// Key is concatenation of opcode, operand1, operand2 and modifier, value is value name.
typedef ScopedArenaSafeMap<uint64_t, uint16_t> ValueMap;
@@ -105,6 +89,19 @@
return res;
}
+ // Look up a value in the global value map, don't add a new entry if there was none before.
+ uint16_t FindValue(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier) {
+ uint16_t res;
+ uint64_t key = BuildKey(op, operand1, operand2, modifier);
+ ValueMap::iterator lb = global_value_map_.lower_bound(key);
+ if (lb != global_value_map_.end() && lb->first == key) {
+ res = lb->second;
+ } else {
+ res = kNoValue;
+ }
+ return res;
+ }
+
// Check if the exact value is stored in the global value map.
bool HasValue(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier,
uint16_t value) const {
@@ -215,7 +212,7 @@
}
CompilationUnit* const cu_;
- MIRGraph* mir_graph_;
+ MIRGraph* const mir_graph_;
ScopedArenaAllocator* const allocator_;
// The maximum number of nested loops that we accept for GVN.
@@ -253,9 +250,23 @@
ScopedArenaVector<const LocalValueNumbering*> merge_lvns_; // Not owning.
friend class LocalValueNumbering;
+ friend class GlobalValueNumberingTest;
DISALLOW_COPY_AND_ASSIGN(GlobalValueNumbering);
};
+std::ostream& operator<<(std::ostream& os, const GlobalValueNumbering::Mode& rhs);
+
+inline void GlobalValueNumbering::StartPostProcessing() {
+ DCHECK(Good());
+ DCHECK_EQ(mode_, kModeGvn);
+ mode_ = kModeGvnPostProcessing;
+}
+
+inline uint16_t GlobalValueNumbering::NewValueName() {
+ DCHECK_NE(mode_, kModeGvnPostProcessing);
+ ++last_value_;
+ return last_value_;
+}
} // namespace art
diff --git a/compiler/dex/global_value_numbering_test.cc b/compiler/dex/global_value_numbering_test.cc
index 82a11a5..35d5b99 100644
--- a/compiler/dex/global_value_numbering_test.cc
+++ b/compiler/dex/global_value_numbering_test.cc
@@ -25,6 +25,8 @@
class GlobalValueNumberingTest : public testing::Test {
protected:
+ static constexpr uint16_t kNoValue = GlobalValueNumbering::kNoValue;
+
struct IFieldDef {
uint16_t field_idx;
uintptr_t declaring_dex_file;
@@ -125,6 +127,8 @@
{ bb, opcode, 0u, 0u, 1, { reg }, 0, { } }
#define DEF_MOVE(bb, opcode, reg, src) \
{ bb, opcode, 0u, 0u, 1, { src }, 1, { reg } }
+#define DEF_MOVE_WIDE(bb, opcode, reg, src) \
+ { bb, opcode, 0u, 0u, 2, { src, src + 1 }, 2, { reg, reg + 1 } }
#define DEF_PHI2(bb, reg, src1, src2) \
{ bb, static_cast<Instruction::Code>(kMirOpPhi), 0, 0u, 2u, { src1, src2 }, 1, { reg } }
@@ -157,7 +161,8 @@
MirSFieldLoweringInfo field_info(def->field_idx);
// Mark even unresolved fields as initialized.
field_info.flags_ = MirSFieldLoweringInfo::kFlagIsStatic |
- MirSFieldLoweringInfo::kFlagIsInitialized;
+ MirSFieldLoweringInfo::kFlagClassIsInitialized;
+ // NOTE: MirSFieldLoweringInfo::kFlagClassIsInDexCache isn't used by GVN.
if (def->declaring_dex_file != 0u) {
field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
field_info.declaring_field_idx_ = def->declaring_field_idx;
@@ -341,6 +346,8 @@
cu_.mir_graph->ssa_base_vregs_.push_back(i);
cu_.mir_graph->ssa_subscripts_.push_back(0);
}
+ // Set shorty for a void-returning method without arguments.
+ cu_.shorty = "V";
}
static constexpr size_t kMaxSsaRegs = 16384u;
@@ -356,6 +363,8 @@
ArenaBitVector* live_in_v_;
};
+constexpr uint16_t GlobalValueNumberingTest::kNoValue;
+
class GlobalValueNumberingTestDiamond : public GlobalValueNumberingTest {
public:
GlobalValueNumberingTestDiamond();
@@ -981,6 +990,92 @@
EXPECT_EQ(value_names_[18], value_names_[21]);
}
+TEST_F(GlobalValueNumberingTestDiamond, PhiWide) {
+ static const MIRDef mirs[] = {
+ DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 0u, 1000),
+ DEF_CONST_WIDE(4, Instruction::CONST_WIDE, 2u, 2000),
+ DEF_CONST_WIDE(5, Instruction::CONST_WIDE, 4u, 3000),
+ DEF_MOVE_WIDE(4, Instruction::MOVE_WIDE, 6u, 0u),
+ DEF_MOVE_WIDE(4, Instruction::MOVE_WIDE, 8u, 2u),
+ DEF_MOVE_WIDE(5, Instruction::MOVE_WIDE, 10u, 0u),
+ DEF_MOVE_WIDE(5, Instruction::MOVE_WIDE, 12u, 4u),
+ DEF_PHI2(6, 14u, 6u, 10u), // Same as CONST_WIDE 0u (1000).
+ DEF_PHI2(6, 15u, 7u, 11u), // Same as CONST_WIDE 0u (1000), high word.
+ DEF_PHI2(6, 16u, 6u, 0u), // Same as CONST_WIDE 0u (1000).
+ DEF_PHI2(6, 17u, 7u, 1u), // Same as CONST_WIDE 0u (1000), high word.
+ DEF_PHI2(6, 18u, 0u, 10u), // Same as CONST_WIDE 0u (1000).
+ DEF_PHI2(6, 19u, 1u, 11u), // Same as CONST_WIDE 0u (1000), high word.
+ DEF_PHI2(6, 20u, 8u, 10u), // Merge 2u (2000) and 0u (1000).
+ DEF_PHI2(6, 21u, 9u, 11u), // Merge 2u (2000) and 0u (1000), high word.
+ DEF_PHI2(6, 22u, 2u, 10u), // Merge 2u (2000) and 0u (1000).
+ DEF_PHI2(6, 23u, 3u, 11u), // Merge 2u (2000) and 0u (1000), high word.
+ DEF_PHI2(6, 24u, 8u, 0u), // Merge 2u (2000) and 0u (1000).
+ DEF_PHI2(6, 25u, 9u, 1u), // Merge 2u (2000) and 0u (1000), high word.
+ DEF_PHI2(6, 26u, 2u, 0u), // Merge 2u (2000) and 0u (1000).
+ DEF_PHI2(6, 27u, 5u, 1u), // Merge 2u (2000) and 0u (1000), high word.
+ DEF_PHI2(6, 28u, 6u, 12u), // Merge 0u (1000) and 4u (3000).
+ DEF_PHI2(6, 29u, 7u, 13u), // Merge 0u (1000) and 4u (3000), high word.
+ DEF_PHI2(6, 30u, 0u, 12u), // Merge 0u (1000) and 4u (3000).
+ DEF_PHI2(6, 31u, 1u, 13u), // Merge 0u (1000) and 4u (3000), high word.
+ DEF_PHI2(6, 32u, 6u, 4u), // Merge 0u (1000) and 4u (3000).
+ DEF_PHI2(6, 33u, 7u, 5u), // Merge 0u (1000) and 4u (3000), high word.
+ DEF_PHI2(6, 34u, 0u, 4u), // Merge 0u (1000) and 4u (3000).
+ DEF_PHI2(6, 35u, 1u, 5u), // Merge 0u (1000) and 4u (3000), high word.
+ DEF_PHI2(6, 36u, 8u, 12u), // Merge 2u (2000) and 4u (3000).
+ DEF_PHI2(6, 37u, 9u, 13u), // Merge 2u (2000) and 4u (3000), high word.
+ DEF_PHI2(6, 38u, 2u, 12u), // Merge 2u (2000) and 4u (3000).
+ DEF_PHI2(6, 39u, 3u, 13u), // Merge 2u (2000) and 4u (3000), high word.
+ DEF_PHI2(6, 40u, 8u, 4u), // Merge 2u (2000) and 4u (3000).
+ DEF_PHI2(6, 41u, 9u, 5u), // Merge 2u (2000) and 4u (3000), high word.
+ DEF_PHI2(6, 42u, 2u, 4u), // Merge 2u (2000) and 4u (3000).
+ DEF_PHI2(6, 43u, 3u, 5u), // Merge 2u (2000) and 4u (3000), high word.
+ };
+
+ PrepareMIRs(mirs);
+ PerformGVN();
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ EXPECT_EQ(value_names_[0], value_names_[7]);
+ EXPECT_EQ(value_names_[0], value_names_[9]);
+ EXPECT_EQ(value_names_[0], value_names_[11]);
+ EXPECT_NE(value_names_[13], value_names_[0]);
+ EXPECT_NE(value_names_[13], value_names_[1]);
+ EXPECT_NE(value_names_[13], value_names_[2]);
+ EXPECT_EQ(value_names_[13], value_names_[15]);
+ EXPECT_EQ(value_names_[13], value_names_[17]);
+ EXPECT_EQ(value_names_[13], value_names_[19]);
+ EXPECT_NE(value_names_[21], value_names_[0]);
+ EXPECT_NE(value_names_[21], value_names_[1]);
+ EXPECT_NE(value_names_[21], value_names_[2]);
+ EXPECT_NE(value_names_[21], value_names_[13]);
+ EXPECT_EQ(value_names_[21], value_names_[23]);
+ EXPECT_EQ(value_names_[21], value_names_[25]);
+ EXPECT_EQ(value_names_[21], value_names_[27]);
+ EXPECT_NE(value_names_[29], value_names_[0]);
+ EXPECT_NE(value_names_[29], value_names_[1]);
+ EXPECT_NE(value_names_[29], value_names_[2]);
+ EXPECT_NE(value_names_[29], value_names_[13]);
+ EXPECT_NE(value_names_[29], value_names_[21]);
+ EXPECT_EQ(value_names_[29], value_names_[31]);
+ EXPECT_EQ(value_names_[29], value_names_[33]);
+ EXPECT_EQ(value_names_[29], value_names_[35]);
+ // High words should get kNoValue.
+ EXPECT_EQ(value_names_[8], kNoValue);
+ EXPECT_EQ(value_names_[10], kNoValue);
+ EXPECT_EQ(value_names_[12], kNoValue);
+ EXPECT_EQ(value_names_[14], kNoValue);
+ EXPECT_EQ(value_names_[16], kNoValue);
+ EXPECT_EQ(value_names_[18], kNoValue);
+ EXPECT_EQ(value_names_[20], kNoValue);
+ EXPECT_EQ(value_names_[22], kNoValue);
+ EXPECT_EQ(value_names_[24], kNoValue);
+ EXPECT_EQ(value_names_[26], kNoValue);
+ EXPECT_EQ(value_names_[28], kNoValue);
+ EXPECT_EQ(value_names_[30], kNoValue);
+ EXPECT_EQ(value_names_[32], kNoValue);
+ EXPECT_EQ(value_names_[34], kNoValue);
+ EXPECT_EQ(value_names_[36], kNoValue);
+}
+
TEST_F(GlobalValueNumberingTestLoop, NonAliasingIFields) {
static const IFieldDef ifields[] = {
{ 0u, 1u, 0u, false }, // Int.
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index 8b7ae20..c1ce2ac 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -107,7 +107,8 @@
class LocalValueNumbering::NonAliasingArrayVersions {
public:
- static uint16_t StartMemoryVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
+ static uint16_t StartMemoryVersion(GlobalValueNumbering* gvn,
+ const LocalValueNumbering* lvn ATTRIBUTE_UNUSED,
uint16_t array) {
return gvn->LookupValue(kNonAliasingArrayStartVersionOp, array, kNoValue, kNoValue);
}
@@ -129,8 +130,9 @@
gvn, lvn, &lvn->non_aliasing_array_value_map_, array, index);
}
- static bool HasNewBaseVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
- uint16_t array) {
+ static bool HasNewBaseVersion(GlobalValueNumbering* gvn ATTRIBUTE_UNUSED,
+ const LocalValueNumbering* lvn ATTRIBUTE_UNUSED,
+ uint16_t array ATTRIBUTE_UNUSED) {
return false; // Not affected by global_memory_version_.
}
@@ -164,8 +166,9 @@
return gvn->LookupValue(kAliasingArrayOp, type, location, memory_version);
}
- static uint16_t LookupMergeValue(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
- uint16_t type, uint16_t location) {
+ static uint16_t LookupMergeValue(GlobalValueNumbering* gvn ATTRIBUTE_UNUSED,
+ const LocalValueNumbering* lvn,
+ uint16_t type ATTRIBUTE_UNUSED, uint16_t location) {
// If the location is non-aliasing in lvn, use the non-aliasing value.
uint16_t array = gvn->GetArrayLocationBase(location);
if (lvn->IsNonAliasingArray(array, type)) {
@@ -176,8 +179,11 @@
gvn, lvn, &lvn->aliasing_array_value_map_, type, location);
}
- static bool HasNewBaseVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
- uint16_t type) {
+ static bool HasNewBaseVersion(GlobalValueNumbering* gvn ATTRIBUTE_UNUSED,
+ const LocalValueNumbering* lvn,
+ uint16_t type ATTRIBUTE_UNUSED) {
+ UNUSED(gvn);
+ UNUSED(type);
return lvn->global_memory_version_ == lvn->merge_new_memory_version_;
}
@@ -374,6 +380,12 @@
range_checked_ = other.range_checked_;
null_checked_ = other.null_checked_;
+ const BasicBlock* pred_bb = gvn_->GetBasicBlock(other.Id());
+ if (GlobalValueNumbering::HasNullCheckLastInsn(pred_bb, Id())) {
+ int s_reg = pred_bb->last_mir_insn->ssa_rep->uses[0];
+ null_checked_.insert(other.GetOperandValue(s_reg));
+ }
+
if (merge_type == kCatchMerge) {
// Memory is clobbered. Use new memory version and don't merge aliasing locations.
global_memory_version_ = NewMemoryVersion(&merge_new_memory_version_);
@@ -465,10 +477,7 @@
DCHECK(mir != nullptr);
// Only INVOKEs can leak and clobber non-aliasing references if they throw.
if ((mir->dalvikInsn.FlagsOf() & Instruction::kInvoke) != 0) {
- for (uint16_t i = 0u; i != mir->ssa_rep->num_uses; ++i) {
- uint16_t value_name = lvn->GetOperandValue(mir->ssa_rep->uses[i]);
- non_aliasing_refs_.erase(value_name);
- }
+ HandleInvokeArgs(mir, lvn);
}
}
}
@@ -681,7 +690,7 @@
const BasicBlock* least_entries_bb = gvn_->GetBasicBlock(least_entries_lvn->Id());
if (gvn_->HasNullCheckLastInsn(least_entries_bb, id_)) {
int s_reg = least_entries_bb->last_mir_insn->ssa_rep->uses[0];
- uint32_t value_name = least_entries_lvn->GetSRegValueName(s_reg);
+ uint32_t value_name = least_entries_lvn->GetOperandValue(s_reg);
merge_names_.clear();
merge_names_.resize(gvn_->merge_lvns_.size(), value_name);
if (gvn_->NullCheckedInAllPredecessors(merge_names_)) {
@@ -778,9 +787,9 @@
if (same_version) {
// Find the first non-null values.
for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
- auto it = (lvn->*map_ptr).find(key);
- if (it != (lvn->*map_ptr).end()) {
- cmp_values = &it->second;
+ auto value = (lvn->*map_ptr).find(key);
+ if (value != (lvn->*map_ptr).end()) {
+ cmp_values = &value->second;
break;
}
}
@@ -790,21 +799,21 @@
// field version and the values' memory_version_before_stores, last_stored_value
// and store_loc_set are identical.
for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
- auto it = (lvn->*map_ptr).find(key);
- if (it == (lvn->*map_ptr).end()) {
+ auto value = (lvn->*map_ptr).find(key);
+ if (value == (lvn->*map_ptr).end()) {
if (cmp_values->memory_version_before_stores != kNoValue) {
same_version = false;
break;
}
- } else if (cmp_values->last_stored_value != it->second.last_stored_value ||
- cmp_values->memory_version_before_stores != it->second.memory_version_before_stores ||
- cmp_values->store_loc_set != it->second.store_loc_set) {
+ } else if (cmp_values->last_stored_value != value->second.last_stored_value ||
+ cmp_values->memory_version_before_stores != value->second.memory_version_before_stores ||
+ cmp_values->store_loc_set != value->second.store_loc_set) {
same_version = false;
break;
- } else if (it->second.last_load_memory_version != kNoValue) {
+ } else if (value->second.last_load_memory_version != kNoValue) {
DCHECK(load_memory_version_for_same_version == kNoValue ||
- load_memory_version_for_same_version == it->second.last_load_memory_version);
- load_memory_version_for_same_version = it->second.last_load_memory_version;
+ load_memory_version_for_same_version == value->second.last_load_memory_version);
+ load_memory_version_for_same_version = value->second.last_load_memory_version;
}
}
}
@@ -819,12 +828,12 @@
if (!cmp_values->load_value_map.empty()) {
my_values->load_value_map = cmp_values->load_value_map;
for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
- auto it = (lvn->*map_ptr).find(key);
- if (it == (lvn->*map_ptr).end() || it->second.load_value_map.empty()) {
+ auto value = (lvn->*map_ptr).find(key);
+ if (value == (lvn->*map_ptr).end() || value->second.load_value_map.empty()) {
my_values->load_value_map.clear();
break;
}
- InPlaceIntersectMaps(&my_values->load_value_map, it->second.load_value_map);
+ InPlaceIntersectMaps(&my_values->load_value_map, value->second.load_value_map);
if (my_values->load_value_map.empty()) {
break;
}
@@ -838,20 +847,20 @@
// Calculate the locations that have been either read from or written to in each incoming LVN.
bool first_lvn = true;
for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
- auto it = (lvn->*map_ptr).find(key);
- if (it == (lvn->*map_ptr).end()) {
+ auto value = (lvn->*map_ptr).find(key);
+ if (value == (lvn->*map_ptr).end()) {
my_values->load_value_map.clear();
break;
}
if (first_lvn) {
first_lvn = false;
// Copy the first LVN's locations. Values will be overwritten later.
- my_values->load_value_map = it->second.load_value_map;
- for (uint16_t location : it->second.store_loc_set) {
+ my_values->load_value_map = value->second.load_value_map;
+ for (uint16_t location : value->second.store_loc_set) {
my_values->load_value_map.Put(location, 0u);
}
} else {
- IntersectAliasingValueLocations(my_values, &it->second);
+ IntersectAliasingValueLocations(my_values, &value->second);
}
}
// Calculate merged values for the intersection.
@@ -953,6 +962,26 @@
AliasingArrayVersions>>();
}
+void LocalValueNumbering::PrepareEntryBlock() {
+ uint32_t vreg = gvn_->GetMirGraph()->GetFirstInVR();
+ CompilationUnit* cu = gvn_->GetCompilationUnit();
+ const char* shorty = cu->shorty;
+ ++shorty; // Skip return value.
+ if ((cu->access_flags & kAccStatic) == 0) {
+ // If non-static method, mark "this" as non-null
+ uint16_t value_name = GetOperandValue(vreg);
+ ++vreg;
+ null_checked_.insert(value_name);
+ }
+ for ( ; *shorty != 0; ++shorty, ++vreg) {
+ if (*shorty == 'J' || *shorty == 'D') {
+ uint16_t value_name = GetOperandValueWide(vreg);
+ SetOperandValueWide(vreg, value_name);
+ ++vreg;
+ }
+ }
+}
+
uint16_t LocalValueNumbering::MarkNonAliasingNonNull(MIR* mir) {
uint16_t res = GetOperandValue(mir->ssa_rep->defs[0]);
DCHECK(null_checked_.find(res) == null_checked_.end());
@@ -1039,12 +1068,30 @@
}
}
+void LocalValueNumbering::HandleInvokeArgs(const MIR* mir, const LocalValueNumbering* mir_lvn) {
+ const int32_t* uses = mir->ssa_rep->uses;
+ const int32_t* uses_end = uses + mir->ssa_rep->num_uses;
+ while (uses != uses_end) {
+ uint16_t sreg = *uses;
+ ++uses;
+ // Avoid LookupValue() so that we don't store new values in the global value map.
+ auto local_it = mir_lvn->sreg_value_map_.find(sreg);
+ if (local_it != mir_lvn->sreg_value_map_.end()) {
+ non_aliasing_refs_.erase(local_it->second);
+ } else {
+ uint16_t value_name = gvn_->FindValue(kNoValue, sreg, kNoValue, kNoValue);
+ if (value_name != kNoValue) {
+ non_aliasing_refs_.erase(value_name);
+ }
+ }
+ }
+}
+
uint16_t LocalValueNumbering::HandlePhi(MIR* mir) {
if (gvn_->merge_lvns_.empty()) {
// Running LVN without a full GVN?
return kNoValue;
}
- int16_t num_uses = mir->ssa_rep->num_uses;
int32_t* uses = mir->ssa_rep->uses;
// Try to find out if this is merging wide regs.
if (mir->ssa_rep->defs[0] != 0 &&
@@ -1052,18 +1099,20 @@
// This is the high part of a wide reg. Ignore the Phi.
return kNoValue;
}
- bool wide = false;
- for (int16_t i = 0; i != num_uses; ++i) {
- if (sreg_wide_value_map_.count(uses[i]) != 0u) {
- wide = true;
- break;
- }
+ BasicBlockId* incoming = mir->meta.phi_incoming;
+ int16_t pos = 0;
+ // Check if we're merging a wide value based on the first merged LVN.
+ const LocalValueNumbering* first_lvn = gvn_->merge_lvns_[0];
+ DCHECK_LT(pos, mir->ssa_rep->num_uses);
+ while (incoming[pos] != first_lvn->Id()) {
+ ++pos;
+ DCHECK_LT(pos, mir->ssa_rep->num_uses);
}
+ int first_s_reg = uses[pos];
+ bool wide = (first_lvn->sreg_wide_value_map_.count(first_s_reg) != 0u);
// Iterate over *merge_lvns_ and skip incoming sregs for BBs without associated LVN.
uint16_t value_name = kNoValue;
merge_names_.clear();
- BasicBlockId* incoming = mir->meta.phi_incoming;
- int16_t pos = 0;
bool same_values = true;
for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
DCHECK_LT(pos, mir->ssa_rep->num_uses);
@@ -1271,7 +1320,8 @@
uint16_t LocalValueNumbering::HandleSGet(MIR* mir, uint16_t opcode) {
const MirSFieldLoweringInfo& field_info = gvn_->GetMirGraph()->GetSFieldLoweringInfo(mir);
if (!field_info.IsResolved() || field_info.IsVolatile() ||
- (!field_info.IsInitialized() && (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0)) {
+ (!field_info.IsClassInitialized() &&
+ (mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) == 0)) {
// Volatile SGETs (and unresolved fields are potentially volatile) have acquire semantics
// and class initialization can call arbitrary functions, we need to wipe aliasing values.
HandleInvokeOrClInitOrAcquireOp(mir);
@@ -1307,7 +1357,8 @@
void LocalValueNumbering::HandleSPut(MIR* mir, uint16_t opcode) {
const MirSFieldLoweringInfo& field_info = gvn_->GetMirGraph()->GetSFieldLoweringInfo(mir);
- if (!field_info.IsInitialized() && (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) {
+ if (!field_info.IsClassInitialized() &&
+ (mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) == 0) {
// Class initialization can call arbitrary functions, we need to wipe aliasing values.
HandleInvokeOrClInitOrAcquireOp(mir);
}
@@ -1468,10 +1519,7 @@
case Instruction::INVOKE_STATIC:
case Instruction::INVOKE_STATIC_RANGE:
// Make ref args aliasing.
- for (size_t i = 0u, count = mir->ssa_rep->num_uses; i != count; ++i) {
- uint16_t reg = GetOperandValue(mir->ssa_rep->uses[i]);
- non_aliasing_refs_.erase(reg);
- }
+ HandleInvokeArgs(mir, this);
HandleInvokeOrClInitOrAcquireOp(mir);
break;
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index c60da32..979fd5a 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -21,8 +21,7 @@
#include "compiler_internals.h"
#include "global_value_numbering.h"
-#include "utils/scoped_arena_allocator.h"
-#include "utils/scoped_arena_containers.h"
+#include "utils/arena_object.h"
namespace art {
@@ -31,7 +30,7 @@
// Enable/disable tracking values stored in the FILLED_NEW_ARRAY result.
static constexpr bool kLocalValueNumberingEnableFilledNewArrayTracking = true;
-class LocalValueNumbering {
+class LocalValueNumbering : public DeletableArenaObject<kArenaAllocMisc> {
private:
static constexpr uint16_t kNoValue = GlobalValueNumbering::kNoValue;
@@ -44,14 +43,6 @@
bool Equals(const LocalValueNumbering& other) const;
- uint16_t GetSRegValueName(uint16_t s_reg) const {
- return GetOperandValue(s_reg);
- }
-
- void SetValueNameNullChecked(uint16_t value_name) {
- null_checked_.insert(value_name);
- }
-
bool IsValueNullChecked(uint16_t value_name) const {
return null_checked_.find(value_name) != null_checked_.end();
}
@@ -73,17 +64,10 @@
void MergeOne(const LocalValueNumbering& other, MergeType merge_type);
void Merge(MergeType merge_type); // Merge gvn_->merge_lvns_.
+ void PrepareEntryBlock();
uint16_t GetValueNumber(MIR* mir);
- // LocalValueNumbering should be allocated on the ArenaStack (or the native stack).
- static void* operator new(size_t size, ScopedArenaAllocator* allocator) {
- return allocator->Alloc(sizeof(LocalValueNumbering), kArenaAllocMisc);
- }
-
- // Allow delete-expression to destroy a LocalValueNumbering object without deallocation.
- static void operator delete(void* ptr) { UNUSED(ptr); }
-
private:
// A set of value names.
typedef GlobalValueNumbering::ValueNameSet ValueNameSet;
@@ -121,18 +105,22 @@
}
void SetOperandValue(uint16_t s_reg, uint16_t value) {
+ DCHECK_EQ(sreg_wide_value_map_.count(s_reg), 0u);
SetOperandValueImpl(s_reg, value, &sreg_value_map_);
}
uint16_t GetOperandValue(int s_reg) const {
+ DCHECK_EQ(sreg_wide_value_map_.count(s_reg), 0u);
return GetOperandValueImpl(s_reg, &sreg_value_map_);
}
void SetOperandValueWide(uint16_t s_reg, uint16_t value) {
+ DCHECK_EQ(sreg_value_map_.count(s_reg), 0u);
SetOperandValueImpl(s_reg, value, &sreg_wide_value_map_);
}
uint16_t GetOperandValueWide(int s_reg) const {
+ DCHECK_EQ(sreg_value_map_.count(s_reg), 0u);
return GetOperandValueImpl(s_reg, &sreg_wide_value_map_);
}
@@ -300,6 +288,7 @@
void HandleRangeCheck(MIR* mir, uint16_t array, uint16_t index);
void HandlePutObject(MIR* mir);
void HandleEscapingRef(uint16_t base);
+ void HandleInvokeArgs(const MIR* mir, const LocalValueNumbering* mir_lvn);
uint16_t HandlePhi(MIR* mir);
uint16_t HandleAGet(MIR* mir, uint16_t opcode);
void HandleAPut(MIR* mir, uint16_t opcode);
@@ -355,7 +344,7 @@
GlobalValueNumbering* gvn_;
// We're using the block id as a 16-bit operand value for some lookups.
- COMPILE_ASSERT(sizeof(BasicBlockId) == sizeof(uint16_t), BasicBlockId_must_be_16_bit);
+ static_assert(sizeof(BasicBlockId) == sizeof(uint16_t), "BasicBlockId must be 16 bit");
BasicBlockId id_;
SregValueMap sreg_value_map_;
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index 33d6c14..824c323 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -114,7 +114,8 @@
MirSFieldLoweringInfo field_info(def->field_idx);
// Mark even unresolved fields as initialized.
field_info.flags_ = MirSFieldLoweringInfo::kFlagIsStatic |
- MirSFieldLoweringInfo::kFlagIsInitialized;
+ MirSFieldLoweringInfo::kFlagClassIsInitialized;
+ // NOTE: MirSFieldLoweringInfo::kFlagClassIsInDexCache isn't used by LVN.
if (def->declaring_dex_file != 0u) {
field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
field_info.declaring_field_idx_ = def->declaring_field_idx;
@@ -172,7 +173,7 @@
void MakeSFieldUninitialized(uint32_t sfield_index) {
CHECK_LT(sfield_index, cu_.mir_graph->sfield_lowering_infos_.size());
cu_.mir_graph->sfield_lowering_infos_[sfield_index].flags_ &=
- ~MirSFieldLoweringInfo::kFlagIsInitialized;
+ ~MirSFieldLoweringInfo::kFlagClassIsInitialized;
}
void PerformLVN() {
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index ee48796..44f69ba 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -1112,14 +1112,11 @@
return true;
}
- if (!compiler_options.IsCompilationEnabled()) {
- *skip_message = "Compilation disabled";
- return true;
- }
+ DCHECK(compiler_options.IsCompilationEnabled());
// Set up compilation cutoffs based on current filter mode.
- size_t small_cutoff = 0;
- size_t default_cutoff = 0;
+ size_t small_cutoff;
+ size_t default_cutoff;
switch (compiler_filter) {
case CompilerOptions::kBalanced:
small_cutoff = compiler_options.GetSmallMethodThreshold();
@@ -1136,6 +1133,7 @@
break;
default:
LOG(FATAL) << "Unexpected compiler_filter_: " << compiler_filter;
+ UNREACHABLE();
}
// If size < cutoff, assume we'll compile - but allow removal.
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index 0a6924c..5b7ac3c 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -319,46 +319,46 @@
DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
// 60 SGET vAA, field@BBBB
- DF_DA | DF_SFIELD | DF_UMS,
+ DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
// 61 SGET_WIDE vAA, field@BBBB
- DF_DA | DF_A_WIDE | DF_SFIELD | DF_UMS,
+ DF_DA | DF_A_WIDE | DF_SFIELD | DF_CLINIT | DF_UMS,
// 62 SGET_OBJECT vAA, field@BBBB
- DF_DA | DF_REF_A | DF_SFIELD | DF_UMS,
+ DF_DA | DF_REF_A | DF_SFIELD | DF_CLINIT | DF_UMS,
// 63 SGET_BOOLEAN vAA, field@BBBB
- DF_DA | DF_SFIELD | DF_UMS,
+ DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
// 64 SGET_BYTE vAA, field@BBBB
- DF_DA | DF_SFIELD | DF_UMS,
+ DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
// 65 SGET_CHAR vAA, field@BBBB
- DF_DA | DF_SFIELD | DF_UMS,
+ DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
// 66 SGET_SHORT vAA, field@BBBB
- DF_DA | DF_SFIELD | DF_UMS,
+ DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
// 67 SPUT vAA, field@BBBB
- DF_UA | DF_SFIELD | DF_UMS,
+ DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
// 68 SPUT_WIDE vAA, field@BBBB
- DF_UA | DF_A_WIDE | DF_SFIELD | DF_UMS,
+ DF_UA | DF_A_WIDE | DF_SFIELD | DF_CLINIT | DF_UMS,
// 69 SPUT_OBJECT vAA, field@BBBB
- DF_UA | DF_REF_A | DF_SFIELD | DF_UMS,
+ DF_UA | DF_REF_A | DF_SFIELD | DF_CLINIT | DF_UMS,
// 6A SPUT_BOOLEAN vAA, field@BBBB
- DF_UA | DF_SFIELD | DF_UMS,
+ DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
// 6B SPUT_BYTE vAA, field@BBBB
- DF_UA | DF_SFIELD | DF_UMS,
+ DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
// 6C SPUT_CHAR vAA, field@BBBB
- DF_UA | DF_SFIELD | DF_UMS,
+ DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
// 6D SPUT_SHORT vAA, field@BBBB
- DF_UA | DF_SFIELD | DF_UMS,
+ DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
// 6E INVOKE_VIRTUAL {vD, vE, vF, vG, vA}
DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
@@ -370,7 +370,7 @@
DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
// 71 INVOKE_STATIC {vD, vE, vF, vG, vA}
- DF_FORMAT_35C | DF_UMS,
+ DF_FORMAT_35C | DF_CLINIT | DF_UMS,
// 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
@@ -388,7 +388,7 @@
DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
// 77 INVOKE_STATIC_RANGE {vCCCC .. vNNNN}
- DF_FORMAT_3RC | DF_UMS,
+ DF_FORMAT_3RC | DF_CLINIT | DF_UMS,
// 78 INVOKE_INTERFACE_RANGE {vCCCC .. vNNNN}
DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
@@ -718,10 +718,10 @@
DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
// E5 SGET_VOLATILE
- DF_DA | DF_SFIELD | DF_UMS,
+ DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
// E6 SPUT_VOLATILE
- DF_UA | DF_SFIELD | DF_UMS,
+ DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
// E7 IGET_OBJECT_VOLATILE
DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
@@ -733,10 +733,10 @@
DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
// EA SGET_WIDE_VOLATILE
- DF_DA | DF_A_WIDE | DF_SFIELD | DF_UMS,
+ DF_DA | DF_A_WIDE | DF_SFIELD | DF_CLINIT | DF_UMS,
// EB SPUT_WIDE_VOLATILE
- DF_UA | DF_A_WIDE | DF_SFIELD | DF_UMS,
+ DF_UA | DF_A_WIDE | DF_SFIELD | DF_CLINIT | DF_UMS,
// EC BREAKPOINT
DF_NOP,
@@ -790,10 +790,10 @@
DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
// FD SGET_OBJECT_VOLATILE
- DF_DA | DF_REF_A | DF_SFIELD | DF_UMS,
+ DF_DA | DF_REF_A | DF_SFIELD | DF_CLINIT | DF_UMS,
// FE SPUT_OBJECT_VOLATILE
- DF_UA | DF_REF_A | DF_SFIELD | DF_UMS,
+ DF_UA | DF_REF_A | DF_SFIELD | DF_CLINIT | DF_UMS,
// FF UNUSED_FF
DF_NOP,
@@ -1343,7 +1343,7 @@
* counts explicitly used s_regs. A later phase will add implicit
* counts for things such as Method*, null-checked references, etc.
*/
-void MIRGraph::CountUses(struct BasicBlock* bb) {
+void MIRGraph::CountUses(class BasicBlock* bb) {
if (bb->block_type != kDalvikByteCode) {
return;
}
diff --git a/compiler/dex/mir_field_info.cc b/compiler/dex/mir_field_info.cc
index 68247b7..1db3b5b 100644
--- a/compiler/dex/mir_field_info.cc
+++ b/compiler/dex/mir_field_info.cc
@@ -62,7 +62,7 @@
compiler_driver->GetResolvedFieldDexFileLocation(resolved_field,
&it->declaring_dex_file_, &it->declaring_class_idx_, &it->declaring_field_idx_);
bool is_volatile = compiler_driver->IsFieldVolatile(resolved_field);
- it->field_offset_ = resolved_field->GetOffset();
+ it->field_offset_ = compiler_driver->GetFieldOffset(resolved_field);
std::pair<bool, bool> fast_path = compiler_driver->IsFastInstanceField(
dex_cache.Get(), referrer_class.Get(), resolved_field, field_idx);
it->flags_ = 0u | // Without kFlagIsStatic.
@@ -94,7 +94,7 @@
Handle<mirror::DexCache> dex_cache(hs.NewHandle(compiler_driver->GetDexCache(mUnit)));
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(compiler_driver->GetClassLoader(soa, mUnit)));
- Handle<mirror::Class> referrer_class(hs.NewHandle(
+ Handle<mirror::Class> referrer_class_handle(hs.NewHandle(
compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit)));
// Even if the referrer class is unresolved (i.e. we're compiling a method without class
// definition) we still want to resolve fields and record all available info.
@@ -110,16 +110,27 @@
&it->declaring_dex_file_, &it->declaring_class_idx_, &it->declaring_field_idx_);
bool is_volatile = compiler_driver->IsFieldVolatile(resolved_field) ? 1u : 0u;
- bool is_referrers_class, is_initialized;
+ mirror::Class* referrer_class = referrer_class_handle.Get();
std::pair<bool, bool> fast_path = compiler_driver->IsFastStaticField(
- dex_cache.Get(), referrer_class.Get(), resolved_field, field_idx, &it->field_offset_,
- &it->storage_index_, &is_referrers_class, &is_initialized);
- it->flags_ = kFlagIsStatic |
+ dex_cache.Get(), referrer_class, resolved_field, field_idx, &it->storage_index_);
+ uint16_t flags = kFlagIsStatic |
(is_volatile ? kFlagIsVolatile : 0u) |
(fast_path.first ? kFlagFastGet : 0u) |
- (fast_path.second ? kFlagFastPut : 0u) |
- (is_referrers_class ? kFlagIsReferrersClass : 0u) |
- (is_initialized ? kFlagIsInitialized : 0u);
+ (fast_path.second ? kFlagFastPut : 0u);
+ if (fast_path.first) {
+ it->field_offset_ = compiler_driver->GetFieldOffset(resolved_field);
+ bool is_referrers_class =
+ compiler_driver->IsStaticFieldInReferrerClass(referrer_class, resolved_field);
+ bool is_class_initialized =
+ compiler_driver->IsStaticFieldsClassInitialized(referrer_class, resolved_field);
+ bool is_class_in_dex_cache = !is_referrers_class && // If referrer's class, we don't care.
+ compiler_driver->CanAssumeTypeIsPresentInDexCache(*dex_cache->GetDexFile(),
+ it->storage_index_);
+ flags |= (is_referrers_class ? kFlagIsReferrersClass : 0u) |
+ (is_class_initialized ? kFlagClassIsInitialized : 0u) |
+ (is_class_in_dex_cache ? kFlagClassIsInDexCache : 0u);
+ }
+ it->flags_ = flags;
}
}
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
index 1842a16..e97f7a0 100644
--- a/compiler/dex/mir_field_info.h
+++ b/compiler/dex/mir_field_info.h
@@ -130,7 +130,7 @@
kBitFastPut,
kIFieldLoweringInfoBitEnd
};
- COMPILE_ASSERT(kIFieldLoweringInfoBitEnd <= 16, too_many_flags);
+ static_assert(kIFieldLoweringInfoBitEnd <= 16, "Too many flags");
static constexpr uint16_t kFlagFastGet = 1u << kBitFastGet;
static constexpr uint16_t kFlagFastPut = 1u << kBitFastPut;
@@ -173,8 +173,12 @@
return (flags_ & kFlagIsReferrersClass) != 0u;
}
- bool IsInitialized() const {
- return (flags_ & kFlagIsInitialized) != 0u;
+ bool IsClassInitialized() const {
+ return (flags_ & kFlagClassIsInitialized) != 0u;
+ }
+
+ bool IsClassInDexCache() const {
+ return (flags_ & kFlagClassIsInDexCache) != 0u;
}
MemberOffset FieldOffset() const {
@@ -190,14 +194,16 @@
kBitFastGet = kFieldInfoBitEnd,
kBitFastPut,
kBitIsReferrersClass,
- kBitIsInitialized,
+ kBitClassIsInitialized,
+ kBitClassIsInDexCache,
kSFieldLoweringInfoBitEnd
};
- COMPILE_ASSERT(kSFieldLoweringInfoBitEnd <= 16, too_many_flags);
+ static_assert(kSFieldLoweringInfoBitEnd <= 16, "Too many flags");
static constexpr uint16_t kFlagFastGet = 1u << kBitFastGet;
static constexpr uint16_t kFlagFastPut = 1u << kBitFastPut;
static constexpr uint16_t kFlagIsReferrersClass = 1u << kBitIsReferrersClass;
- static constexpr uint16_t kFlagIsInitialized = 1u << kBitIsInitialized;
+ static constexpr uint16_t kFlagClassIsInitialized = 1u << kBitClassIsInitialized;
+ static constexpr uint16_t kFlagClassIsInDexCache = 1u << kBitClassIsInDexCache;
// The member offset of the field, 0u if unresolved.
MemberOffset field_offset_;
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 3fa80b9..f69d63c 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -97,11 +97,6 @@
max_nested_loops_(0u),
i_dom_list_(NULL),
temp_scoped_alloc_(),
- temp_insn_data_(nullptr),
- temp_bit_vector_size_(0u),
- temp_bit_vector_(nullptr),
- temp_bit_matrix_(nullptr),
- temp_gvn_(),
block_list_(arena->Adapter(kArenaAllocBBList)),
try_block_addr_(NULL),
entry_block_(NULL),
@@ -133,6 +128,7 @@
sfield_lowering_infos_(arena->Adapter(kArenaAllocLoweringInfo)),
method_lowering_infos_(arena->Adapter(kArenaAllocLoweringInfo)),
gen_suspend_test_list_(arena->Adapter()) {
+ memset(&temp_, 0, sizeof(temp_));
use_counts_.reserve(256);
raw_use_counts_.reserve(256);
block_list_.reserve(100);
@@ -302,28 +298,28 @@
* (by the caller)
* Utilizes a map for fast lookup of the typical cases.
*/
-BasicBlock* MIRGraph::FindBlock(DexOffset code_offset, bool split, bool create,
+BasicBlock* MIRGraph::FindBlock(DexOffset code_offset, bool create,
BasicBlock** immed_pred_block_p) {
if (code_offset >= current_code_item_->insns_size_in_code_units_) {
- return NULL;
+ return nullptr;
}
int block_id = dex_pc_to_block_map_[code_offset];
BasicBlock* bb = GetBasicBlock(block_id);
- if ((bb != NULL) && (bb->start_offset == code_offset)) {
+ if ((bb != nullptr) && (bb->start_offset == code_offset)) {
// Does this containing block start with the desired instruction?
return bb;
}
// No direct hit.
if (!create) {
- return NULL;
+ return nullptr;
}
- if (bb != NULL) {
+ if (bb != nullptr) {
// The target exists somewhere in an existing block.
- return SplitBlock(code_offset, bb, bb == *immed_pred_block_p ? immed_pred_block_p : NULL);
+ return SplitBlock(code_offset, bb, bb == *immed_pred_block_p ? immed_pred_block_p : nullptr);
}
// Create a new block.
@@ -360,8 +356,7 @@
CatchHandlerIterator iterator(handlers_ptr);
for (; iterator.HasNext(); iterator.Next()) {
uint32_t address = iterator.GetHandlerAddress();
- FindBlock(address, false /* split */, true /*create*/,
- /* immed_pred_block_p */ NULL);
+ FindBlock(address, true /*create*/, /* immed_pred_block_p */ nullptr);
}
handlers_ptr = iterator.EndDataPointer();
}
@@ -466,7 +461,7 @@
LOG(FATAL) << "Unexpected opcode(" << insn->dalvikInsn.opcode << ") with kBranch set";
}
CountBranch(target);
- BasicBlock* taken_block = FindBlock(target, /* split */ true, /* create */ true,
+ BasicBlock* taken_block = FindBlock(target, /* create */ true,
/* immed_pred_block_p */ &cur_block);
cur_block->taken = taken_block->id;
taken_block->predecessors.push_back(cur_block->id);
@@ -474,19 +469,6 @@
/* Always terminate the current block for conditional branches */
if (flags & Instruction::kContinue) {
BasicBlock* fallthrough_block = FindBlock(cur_offset + width,
- /*
- * If the method is processed
- * in sequential order from the
- * beginning, we don't need to
- * specify split for continue
- * blocks. However, this
- * routine can be called by
- * compileLoop, which starts
- * parsing the method from an
- * arbitrary address in the
- * method body.
- */
- true,
/* create */
true,
/* immed_pred_block_p */
@@ -494,8 +476,7 @@
cur_block->fall_through = fallthrough_block->id;
fallthrough_block->predecessors.push_back(cur_block->id);
} else if (code_ptr < code_end) {
- FindBlock(cur_offset + width, /* split */ false, /* create */ true,
- /* immed_pred_block_p */ NULL);
+ FindBlock(cur_offset + width, /* create */ true, /* immed_pred_block_p */ nullptr);
}
return cur_block;
}
@@ -503,6 +484,7 @@
/* Process instructions with the kSwitch flag */
BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
int width, int flags) {
+ UNUSED(flags);
const uint16_t* switch_data =
reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset + insn->dalvikInsn.vB);
int size;
@@ -554,8 +536,8 @@
cur_block->successor_blocks.reserve(size);
for (i = 0; i < size; i++) {
- BasicBlock* case_block = FindBlock(cur_offset + target_table[i], /* split */ true,
- /* create */ true, /* immed_pred_block_p */ &cur_block);
+ BasicBlock* case_block = FindBlock(cur_offset + target_table[i], /* create */ true,
+ /* immed_pred_block_p */ &cur_block);
SuccessorBlockInfo* successor_block_info =
static_cast<SuccessorBlockInfo*>(arena_->Alloc(sizeof(SuccessorBlockInfo),
kArenaAllocSuccessor));
@@ -568,8 +550,8 @@
}
/* Fall-through case */
- BasicBlock* fallthrough_block = FindBlock(cur_offset + width, /* split */ false,
- /* create */ true, /* immed_pred_block_p */ NULL);
+ BasicBlock* fallthrough_block = FindBlock(cur_offset + width, /* create */ true,
+ /* immed_pred_block_p */ nullptr);
cur_block->fall_through = fallthrough_block->id;
fallthrough_block->predecessors.push_back(cur_block->id);
return cur_block;
@@ -579,6 +561,7 @@
BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
int width, int flags, ArenaBitVector* try_block_addr,
const uint16_t* code_ptr, const uint16_t* code_end) {
+ UNUSED(flags);
bool in_try_block = try_block_addr->IsBitSet(cur_offset);
bool is_throw = (insn->dalvikInsn.opcode == Instruction::THROW);
@@ -593,8 +576,8 @@
}
for (; iterator.HasNext(); iterator.Next()) {
- BasicBlock* catch_block = FindBlock(iterator.GetHandlerAddress(), false /* split*/,
- false /* creat */, NULL /* immed_pred_block_p */);
+ BasicBlock* catch_block = FindBlock(iterator.GetHandlerAddress(), false /* create */,
+ nullptr /* immed_pred_block_p */);
if (insn->dalvikInsn.opcode == Instruction::MONITOR_EXIT &&
IsBadMonitorExitCatch(insn->offset, catch_block->start_offset)) {
// Don't allow monitor-exit to catch its own exception, http://b/15745363 .
@@ -629,8 +612,7 @@
cur_block->explicit_throw = true;
if (code_ptr < code_end) {
// Force creation of new block following THROW via side-effect.
- FindBlock(cur_offset + width, /* split */ false, /* create */ true,
- /* immed_pred_block_p */ NULL);
+ FindBlock(cur_offset + width, /* create */ true, /* immed_pred_block_p */ nullptr);
}
if (!in_try_block) {
// Don't split a THROW that can't rethrow - we're done.
@@ -813,8 +795,7 @@
* Create a fallthrough block for real instructions
* (incl. NOP).
*/
- FindBlock(current_offset_ + width, /* split */ false, /* create */ true,
- /* immed_pred_block_p */ NULL);
+ FindBlock(current_offset_ + width, /* create */ true, /* immed_pred_block_p */ nullptr);
}
} else if (flags & Instruction::kThrow) {
cur_block = ProcessCanThrow(cur_block, insn, current_offset_, width, flags, try_block_addr_,
@@ -837,8 +818,8 @@
}
}
current_offset_ += width;
- BasicBlock* next_block = FindBlock(current_offset_, /* split */ false, /* create */
- false, /* immed_pred_block_p */ NULL);
+ BasicBlock* next_block = FindBlock(current_offset_, /* create */ false,
+ /* immed_pred_block_p */ nullptr);
if (next_block) {
/*
* The next instruction could be the target of a previously parsed
@@ -934,7 +915,7 @@
bb->first_mir_insn ? " | " : " ");
for (mir = bb->first_mir_insn; mir; mir = mir->next) {
int opcode = mir->dalvikInsn.opcode;
- fprintf(file, " {%04x %s %s %s %s %s %s %s\\l}%s\\\n", mir->offset,
+ fprintf(file, " {%04x %s %s %s %s %s %s %s %s\\l}%s\\\n", mir->offset,
mir->ssa_rep ? GetDalvikDisassembly(mir) :
!MIR::DecodedInstruction::IsPseudoMirOp(opcode) ?
Instruction::Name(mir->dalvikInsn.opcode) :
@@ -944,7 +925,8 @@
(mir->optimization_flags & MIR_IGNORE_SUSPEND_CHECK) != 0 ? " no_suspendcheck" : " ",
(mir->optimization_flags & MIR_STORE_NON_TEMPORAL) != 0 ? " non_temporal" : " ",
(mir->optimization_flags & MIR_CALLEE) != 0 ? " inlined" : " ",
- (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) != 0 ? " no_clinit" : " ",
+ (mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) != 0 ? " cl_inited" : " ",
+ (mir->optimization_flags & MIR_CLASS_IS_IN_DEX_CACHE) != 0 ? " cl_in_cache" : " ",
mir->next ? " | " : " ");
}
fprintf(file, " }\"];\n\n");
@@ -1227,6 +1209,10 @@
int defs = (ssa_rep != nullptr) ? ssa_rep->num_defs : 0;
int uses = (ssa_rep != nullptr) ? ssa_rep->num_uses : 0;
+ if (opcode < kMirOpFirst) {
+ return; // It is not an extended instruction.
+ }
+
decoded_mir->append(extended_mir_op_names_[opcode - kMirOpFirst]);
switch (opcode) {
@@ -1362,9 +1348,10 @@
decoded_mir->append(", ");
decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[1], false));
}
- decoded_mir->append(StringPrintf(" = vect%d", mir->dalvikInsn.vB));
+ decoded_mir->append(StringPrintf(" = vect%d (extr_idx:%d)", mir->dalvikInsn.vB, mir->dalvikInsn.arg[0]));
} else {
- decoded_mir->append(StringPrintf(" v%d = vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vB));
+ decoded_mir->append(StringPrintf(" v%d = vect%d (extr_idx:%d)", mir->dalvikInsn.vA,
+ mir->dalvikInsn.vB, mir->dalvikInsn.arg[0]));
}
FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
break;
@@ -1695,9 +1682,9 @@
void MIRGraph::SSATransformationStart() {
DCHECK(temp_scoped_alloc_.get() == nullptr);
temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
- temp_bit_vector_size_ = GetNumOfCodeAndTempVRs();
- temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
- temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapRegisterV);
+ temp_.ssa.num_vregs = GetNumOfCodeAndTempVRs();
+ temp_.ssa.work_live_vregs = new (temp_scoped_alloc_.get()) ArenaBitVector(
+ temp_scoped_alloc_.get(), temp_.ssa.num_vregs, false, kBitMapRegisterV);
}
void MIRGraph::SSATransformationEnd() {
@@ -1706,9 +1693,9 @@
VerifyDataflow();
}
- temp_bit_vector_size_ = 0u;
- temp_bit_vector_ = nullptr;
- temp_bit_matrix_ = nullptr; // Def block matrix.
+ temp_.ssa.num_vregs = 0u;
+ temp_.ssa.work_live_vregs = nullptr;
+ temp_.ssa.def_block_matrix = nullptr;
DCHECK(temp_scoped_alloc_.get() != nullptr);
temp_scoped_alloc_.reset();
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index a405af1..d77ad6f 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -69,6 +69,7 @@
kUsesMethodStar, // Implicit use of Method*.
kUsesIField, // Accesses an instance field (IGET/IPUT).
kUsesSField, // Accesses a static field (SGET/SPUT).
+ kCanInitializeClass, // Can trigger class initialization (SGET/SPUT/INVOKE_STATIC).
kDoLVN, // Worth computing local value numbers.
};
@@ -105,6 +106,7 @@
#define DF_UMS (UINT64_C(1) << kUsesMethodStar)
#define DF_IFIELD (UINT64_C(1) << kUsesIField)
#define DF_SFIELD (UINT64_C(1) << kUsesSField)
+#define DF_CLINIT (UINT64_C(1) << kCanInitializeClass)
#define DF_LVN (UINT64_C(1) << kDoLVN)
#define DF_HAS_USES (DF_UA | DF_UB | DF_UC)
@@ -146,7 +148,9 @@
#define MIR_NULL_CHECK_ONLY (1 << kMIRNullCheckOnly)
#define MIR_IGNORE_RANGE_CHECK (1 << kMIRIgnoreRangeCheck)
#define MIR_RANGE_CHECK_ONLY (1 << kMIRRangeCheckOnly)
-#define MIR_IGNORE_CLINIT_CHECK (1 << kMIRIgnoreClInitCheck)
+#define MIR_CLASS_IS_INITIALIZED (1 << kMIRClassIsInitialized)
+#define MIR_CLASS_IS_IN_DEX_CACHE (1 << kMIRClassIsInDexCache)
+#define MIR_IGNORE_DIV_ZERO_CHECK (1 << kMirIgnoreDivZeroCheck)
#define MIR_INLINED (1 << kMIRInlined)
#define MIR_INLINED_PRED (1 << kMIRInlinedPred)
#define MIR_CALLEE (1 << kMIRCallee)
@@ -224,7 +228,8 @@
* The Midlevel Intermediate Representation node, which may be largely considered a
* wrapper around a Dalvik byte code.
*/
-struct MIR {
+class MIR : public ArenaObject<kArenaAllocMIR> {
+ public:
/*
* TODO: remove embedded DecodedInstruction to save space, keeping only opcode. Recover
* additional fields on as-needed basis. Question: how to support MIR Pseudo-ops; probably
@@ -340,16 +345,12 @@
MIR* Copy(CompilationUnit *c_unit);
MIR* Copy(MIRGraph* mir_Graph);
-
- static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(sizeof(MIR), kArenaAllocMIR);
- }
- static void operator delete(void* p) {} // Nop.
};
struct SuccessorBlockInfo;
-struct BasicBlock {
+class BasicBlock : public DeletableArenaObject<kArenaAllocBB> {
+ public:
BasicBlock(BasicBlockId block_id, BBType type, ArenaAllocator* allocator)
: id(block_id),
dfs_id(), start_offset(), fall_through(), taken(), i_dom(), nesting_depth(),
@@ -453,10 +454,8 @@
MIR* GetNextUnconditionalMir(MIRGraph* mir_graph, MIR* current);
bool IsExceptionBlock() const;
- static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(sizeof(BasicBlock), kArenaAllocBB);
- }
- static void operator delete(void* p) {} // Nop.
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BasicBlock);
};
/*
@@ -544,7 +543,7 @@
/* Find existing block */
BasicBlock* FindBlock(DexOffset code_offset) {
- return FindBlock(code_offset, false, false, NULL);
+ return FindBlock(code_offset, false, NULL);
}
const uint16_t* GetCurrentInsns() const {
@@ -623,7 +622,7 @@
return def_count_;
}
- ArenaAllocator* GetArena() {
+ ArenaAllocator* GetArena() const {
return arena_;
}
@@ -1131,7 +1130,7 @@
* @brief Count the uses in the BasicBlock
* @param bb the BasicBlock
*/
- void CountUses(struct BasicBlock* bb);
+ void CountUses(class BasicBlock* bb);
static uint64_t GetDataFlowAttributes(Instruction::Code opcode);
static uint64_t GetDataFlowAttributes(MIR* mir);
@@ -1204,8 +1203,7 @@
bool ContentIsInsn(const uint16_t* code_ptr);
BasicBlock* SplitBlock(DexOffset code_offset, BasicBlock* orig_block,
BasicBlock** immed_pred_block_p);
- BasicBlock* FindBlock(DexOffset code_offset, bool split, bool create,
- BasicBlock** immed_pred_block_p);
+ BasicBlock* FindBlock(DexOffset code_offset, bool create, BasicBlock** immed_pred_block_p);
void ProcessTryCatchBlocks();
bool IsBadMonitorExitCatch(NarrowDexOffset monitor_exit_offset, NarrowDexOffset catch_offset);
BasicBlock* ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
@@ -1229,7 +1227,7 @@
void ComputeDomPostOrderTraversal(BasicBlock* bb);
int GetSSAUseCount(int s_reg);
bool BasicBlockOpt(BasicBlock* bb);
- bool BuildExtendedBBList(struct BasicBlock* bb);
+ bool BuildExtendedBBList(class BasicBlock* bb);
bool FillDefBlockMatrix(BasicBlock* bb);
void InitializeDominationInfo(BasicBlock* bb);
bool ComputeblockIDom(BasicBlock* bb);
@@ -1262,7 +1260,7 @@
ArenaVector<BasicBlockId> dom_post_order_traversal_;
ArenaVector<BasicBlockId> topological_order_;
// Indexes in topological_order_ need to be only as big as the BasicBlockId.
- COMPILE_ASSERT(sizeof(BasicBlockId) == sizeof(uint16_t), assuming_16_bit_BasicBlockId);
+ static_assert(sizeof(BasicBlockId) == sizeof(uint16_t), "Assuming 16 bit BasicBlockId");
// For each loop head, remember the past-the-end index of the end of the loop. 0 if not loop head.
ArenaVector<uint16_t> topological_order_loop_ends_;
// Map BB ids to topological_order_ indexes. 0xffff if not included (hidden or null block).
@@ -1272,15 +1270,38 @@
size_t max_nested_loops_;
int* i_dom_list_;
std::unique_ptr<ScopedArenaAllocator> temp_scoped_alloc_;
- uint16_t* temp_insn_data_;
- uint32_t temp_bit_vector_size_;
- ArenaBitVector* temp_bit_vector_;
- // temp_bit_matrix_ used as one of
- // - def_block_matrix: original num registers x num_blocks_,
- // - ending_null_check_matrix: num_blocks_ x original num registers,
- // - ending_clinit_check_matrix: num_blocks_ x unique class count.
- ArenaBitVector** temp_bit_matrix_;
- std::unique_ptr<GlobalValueNumbering> temp_gvn_;
+ // Union of temporaries used by different passes.
+ union {
+ // Class init check elimination.
+ struct {
+ size_t num_class_bits; // 2 bits per class: class initialized and class in dex cache.
+ ArenaBitVector* work_classes_to_check;
+ ArenaBitVector** ending_classes_to_check_matrix; // num_blocks_ x num_class_bits.
+ uint16_t* indexes;
+ } cice;
+ // Null check elimination.
+ struct {
+ size_t num_vregs;
+ ArenaBitVector* work_vregs_to_check;
+ ArenaBitVector** ending_vregs_to_check_matrix; // num_blocks_ x num_vregs.
+ } nce;
+ // Special method inlining.
+ struct {
+ size_t num_indexes;
+ ArenaBitVector* processed_indexes;
+ uint16_t* lowering_infos;
+ } smi;
+ // SSA transformation.
+ struct {
+ size_t num_vregs;
+ ArenaBitVector* work_live_vregs;
+ ArenaBitVector** def_block_matrix; // num_vregs x num_blocks_.
+ } ssa;
+ // Global value numbering.
+ struct {
+ GlobalValueNumbering* gvn;
+ } gvn;
+ } temp_;
static const int kInvalidEntry = -1;
ArenaVector<BasicBlock*> block_list_;
ArenaBitVector* try_block_addr_;
@@ -1301,7 +1322,7 @@
int method_sreg_;
unsigned int attributes_;
Checkstats* checkstats_;
- ArenaAllocator* arena_;
+ ArenaAllocator* const arena_;
int backward_branches_;
int forward_branches_;
size_t num_non_special_compiler_temps_; // Keeps track of allocated non-special compiler temps. These are VRs that are in compiler temp region on stack.
diff --git a/compiler/dex/mir_method_info.cc b/compiler/dex/mir_method_info.cc
index cc2bd95..b234950 100644
--- a/compiler/dex/mir_method_info.cc
+++ b/compiler/dex/mir_method_info.cc
@@ -76,14 +76,16 @@
int fast_path_flags = compiler_driver->IsFastInvoke(
soa, dex_cache, class_loader, mUnit, referrer_class.Get(), resolved_method, &invoke_type,
&target_method, devirt_target, &it->direct_code_, &it->direct_method_);
- bool needs_clinit =
- compiler_driver->NeedsClassInitialization(referrer_class.Get(), resolved_method);
+ bool is_referrers_class = (referrer_class.Get() == resolved_method->GetDeclaringClass());
+ bool is_class_initialized =
+ compiler_driver->IsMethodsClassInitialized(referrer_class.Get(), resolved_method);
uint16_t other_flags = it->flags_ &
- ~(kFlagFastPath | kFlagNeedsClassInitialization | (kInvokeTypeMask << kBitSharpTypeBegin));
+ ~(kFlagFastPath | kFlagClassIsInitialized | (kInvokeTypeMask << kBitSharpTypeBegin));
it->flags_ = other_flags |
(fast_path_flags != 0 ? kFlagFastPath : 0u) |
(static_cast<uint16_t>(invoke_type) << kBitSharpTypeBegin) |
- (needs_clinit ? kFlagNeedsClassInitialization : 0u);
+ (is_referrers_class ? kFlagIsReferrersClass : 0u) |
+ (is_class_initialized ? kFlagClassIsInitialized : 0u);
it->target_dex_file_ = target_method.dex_file;
it->target_method_idx_ = target_method.dex_method_index;
it->stats_flags_ = fast_path_flags;
diff --git a/compiler/dex/mir_method_info.h b/compiler/dex/mir_method_info.h
index efe92f3..08fb103 100644
--- a/compiler/dex/mir_method_info.h
+++ b/compiler/dex/mir_method_info.h
@@ -60,7 +60,7 @@
kBitIsStatic = 0,
kMethodInfoBitEnd
};
- COMPILE_ASSERT(kMethodInfoBitEnd <= 16, too_many_flags);
+ static_assert(kMethodInfoBitEnd <= 16, "Too many flags");
static constexpr uint16_t kFlagIsStatic = 1u << kBitIsStatic;
MirMethodInfo(uint16_t method_idx, uint16_t flags)
@@ -123,8 +123,12 @@
return (flags_ & kFlagFastPath) != 0u;
}
- bool NeedsClassInitialization() const {
- return (flags_ & kFlagNeedsClassInitialization) != 0u;
+ bool IsReferrersClass() const {
+ return (flags_ & kFlagIsReferrersClass) != 0;
+ }
+
+ bool IsClassInitialized() const {
+ return (flags_ & kFlagClassIsInitialized) != 0u;
}
InvokeType GetInvokeType() const {
@@ -162,17 +166,19 @@
kBitInvokeTypeEnd = kBitInvokeTypeBegin + 3, // 3 bits for invoke type.
kBitSharpTypeBegin,
kBitSharpTypeEnd = kBitSharpTypeBegin + 3, // 3 bits for sharp type.
- kBitNeedsClassInitialization = kBitSharpTypeEnd,
- kMethodLoweringInfoEnd
+ kBitIsReferrersClass = kBitSharpTypeEnd,
+ kBitClassIsInitialized,
+ kMethodLoweringInfoBitEnd
};
- COMPILE_ASSERT(kMethodLoweringInfoEnd <= 16, too_many_flags);
+ static_assert(kMethodLoweringInfoBitEnd <= 16, "Too many flags");
static constexpr uint16_t kFlagFastPath = 1u << kBitFastPath;
- static constexpr uint16_t kFlagNeedsClassInitialization = 1u << kBitNeedsClassInitialization;
+ static constexpr uint16_t kFlagIsReferrersClass = 1u << kBitIsReferrersClass;
+ static constexpr uint16_t kFlagClassIsInitialized = 1u << kBitClassIsInitialized;
static constexpr uint16_t kInvokeTypeMask = 7u;
- COMPILE_ASSERT((1u << (kBitInvokeTypeEnd - kBitInvokeTypeBegin)) - 1u == kInvokeTypeMask,
- assert_invoke_type_bits_ok);
- COMPILE_ASSERT((1u << (kBitSharpTypeEnd - kBitSharpTypeBegin)) - 1u == kInvokeTypeMask,
- assert_sharp_type_bits_ok);
+ static_assert((1u << (kBitInvokeTypeEnd - kBitInvokeTypeBegin)) - 1u == kInvokeTypeMask,
+ "assert invoke type bits failed");
+ static_assert((1u << (kBitSharpTypeEnd - kBitSharpTypeBegin)) - 1u == kInvokeTypeMask,
+ "assert sharp type bits failed");
uintptr_t direct_code_;
uintptr_t direct_method_;
@@ -185,7 +191,7 @@
uint16_t vtable_idx_;
int stats_flags_;
- friend class ClassInitCheckEliminationTest;
+ friend class MirOptimizationTest;
};
} // namespace art
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 96505ab..d025d08 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -16,12 +16,11 @@
#include "base/bit_vector-inl.h"
#include "compiler_internals.h"
+#include "dataflow_iterator-inl.h"
#include "global_value_numbering.h"
#include "local_value_numbering.h"
-#include "dataflow_iterator-inl.h"
-#include "dex/global_value_numbering.h"
-#include "dex/quick/dex_file_method_inliner.h"
-#include "dex/quick/dex_file_to_method_inliner_map.h"
+#include "quick/dex_file_method_inliner.h"
+#include "quick/dex_file_to_method_inliner_map.h"
#include "stack.h"
#include "utils/scoped_arena_containers.h"
@@ -215,8 +214,8 @@
kCondEq, kCondNe, kCondLt, kCondGe, kCondGt, kCondLe
};
-COMPILE_ASSERT(arraysize(kIfCcZConditionCodes) == Instruction::IF_LEZ - Instruction::IF_EQZ + 1,
- if_ccz_ccodes_size1);
+static_assert(arraysize(kIfCcZConditionCodes) == Instruction::IF_LEZ - Instruction::IF_EQZ + 1,
+ "if_ccz_ccodes_size1");
static constexpr bool IsInstructionIfCcZ(Instruction::Code opcode) {
return Instruction::IF_EQZ <= opcode && opcode <= Instruction::IF_LEZ;
@@ -226,12 +225,12 @@
return kIfCcZConditionCodes[opcode - Instruction::IF_EQZ];
}
-COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_EQZ) == kCondEq, check_if_eqz_ccode);
-COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_NEZ) == kCondNe, check_if_nez_ccode);
-COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_LTZ) == kCondLt, check_if_ltz_ccode);
-COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_GEZ) == kCondGe, check_if_gez_ccode);
-COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_GTZ) == kCondGt, check_if_gtz_ccode);
-COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_LEZ) == kCondLe, check_if_lez_ccode);
+static_assert(ConditionCodeForIfCcZ(Instruction::IF_EQZ) == kCondEq, "if_eqz ccode");
+static_assert(ConditionCodeForIfCcZ(Instruction::IF_NEZ) == kCondNe, "if_nez ccode");
+static_assert(ConditionCodeForIfCcZ(Instruction::IF_LTZ) == kCondLt, "if_ltz ccode");
+static_assert(ConditionCodeForIfCcZ(Instruction::IF_GEZ) == kCondGe, "if_gez ccode");
+static_assert(ConditionCodeForIfCcZ(Instruction::IF_GTZ) == kCondGt, "if_gtz ccode");
+static_assert(ConditionCodeForIfCcZ(Instruction::IF_LEZ) == kCondLe, "if_lez ccode");
int MIRGraph::GetSSAUseCount(int s_reg) {
DCHECK_LT(static_cast<size_t>(s_reg), ssa_subscripts_.size());
@@ -660,7 +659,7 @@
}
/* Collect stats on number of checks removed */
-void MIRGraph::CountChecks(struct BasicBlock* bb) {
+void MIRGraph::CountChecks(class BasicBlock* bb) {
if (bb->data_flow_info != NULL) {
for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
if (mir->ssa_rep == NULL) {
@@ -750,7 +749,7 @@
}
/* Combine any basic blocks terminated by instructions that we now know can't throw */
-void MIRGraph::CombineBlocks(struct BasicBlock* bb) {
+void MIRGraph::CombineBlocks(class BasicBlock* bb) {
// Loop here to allow combining a sequence of blocks
while ((bb->block_type == kDalvikByteCode) &&
(bb->last_mir_insn != nullptr) &&
@@ -772,14 +771,14 @@
if ((df_attributes & DF_IFIELD) != 0) {
// Combine only if fast, otherwise weird things can happen.
const MirIFieldLoweringInfo& field_info = GetIFieldLoweringInfo(throw_insn);
- ok = (df_attributes & DF_DA) ? field_info.FastPut() : field_info.FastGet();
+ ok = (df_attributes & DF_DA) ? field_info.FastGet() : field_info.FastPut();
} else if ((df_attributes & DF_SFIELD) != 0) {
// Combine only if fast, otherwise weird things can happen.
const MirSFieldLoweringInfo& field_info = GetSFieldLoweringInfo(throw_insn);
- bool fast = ((df_attributes & DF_DA) ? field_info.FastPut() : field_info.FastGet());
+ bool fast = ((df_attributes & DF_DA) ? field_info.FastGet() : field_info.FastPut());
// Don't combine if the SGET/SPUT can call <clinit>().
- bool clinit = !field_info.IsInitialized() &&
- (throw_insn->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0;
+ bool clinit = !field_info.IsClassInitialized() &&
+ (throw_insn->optimization_flags & MIR_CLASS_IS_INITIALIZED) == 0;
ok = fast && !clinit;
} else if ((df_attributes & DF_HAS_RANGE_CHKS) != 0) {
// Only AGET/APUT have range checks. We have processed the AGET/APUT null check above.
@@ -892,12 +891,12 @@
DCHECK(temp_scoped_alloc_.get() == nullptr);
temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
- temp_bit_vector_size_ = GetNumOfCodeVRs();
- temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
- temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapNullCheck);
- temp_bit_matrix_ = static_cast<ArenaBitVector**>(
+ temp_.nce.num_vregs = GetNumOfCodeVRs();
+ temp_.nce.work_vregs_to_check = new (temp_scoped_alloc_.get()) ArenaBitVector(
+ temp_scoped_alloc_.get(), temp_.nce.num_vregs, false, kBitMapNullCheck);
+ temp_.nce.ending_vregs_to_check_matrix = static_cast<ArenaBitVector**>(
temp_scoped_alloc_->Alloc(sizeof(ArenaBitVector*) * GetNumBlocks(), kArenaAllocMisc));
- std::fill_n(temp_bit_matrix_, GetNumBlocks(), nullptr);
+ std::fill_n(temp_.nce.ending_vregs_to_check_matrix, GetNumBlocks(), nullptr);
// reset MIR_MARK
AllNodesIterator iter(this);
@@ -920,7 +919,7 @@
return false;
}
- ArenaBitVector* vregs_to_check = temp_bit_vector_;
+ ArenaBitVector* vregs_to_check = temp_.nce.work_vregs_to_check;
/*
* Set initial state. Catch blocks don't need any special treatment.
*/
@@ -941,7 +940,7 @@
// Starting state is union of all incoming arcs.
bool copied_first = false;
for (BasicBlockId pred_id : bb->predecessors) {
- if (temp_bit_matrix_[pred_id] == nullptr) {
+ if (temp_.nce.ending_vregs_to_check_matrix[pred_id] == nullptr) {
continue;
}
BasicBlock* pred_bb = GetBasicBlock(pred_id);
@@ -963,9 +962,9 @@
}
if (!copied_first) {
copied_first = true;
- vregs_to_check->Copy(temp_bit_matrix_[pred_id]);
+ vregs_to_check->Copy(temp_.nce.ending_vregs_to_check_matrix[pred_id]);
} else {
- vregs_to_check->Union(temp_bit_matrix_[pred_id]);
+ vregs_to_check->Union(temp_.nce.ending_vregs_to_check_matrix[pred_id]);
}
if (null_check_insn != nullptr) {
vregs_to_check->ClearBit(null_check_insn->dalvikInsn.vA);
@@ -1058,27 +1057,27 @@
// Did anything change?
bool nce_changed = false;
- ArenaBitVector* old_ending_ssa_regs_to_check = temp_bit_matrix_[bb->id];
+ ArenaBitVector* old_ending_ssa_regs_to_check = temp_.nce.ending_vregs_to_check_matrix[bb->id];
if (old_ending_ssa_regs_to_check == nullptr) {
DCHECK(temp_scoped_alloc_.get() != nullptr);
nce_changed = vregs_to_check->GetHighestBitSet() != -1;
- temp_bit_matrix_[bb->id] = vregs_to_check;
+ temp_.nce.ending_vregs_to_check_matrix[bb->id] = vregs_to_check;
// Create a new vregs_to_check for next BB.
- temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
- temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapNullCheck);
+ temp_.nce.work_vregs_to_check = new (temp_scoped_alloc_.get()) ArenaBitVector(
+ temp_scoped_alloc_.get(), temp_.nce.num_vregs, false, kBitMapNullCheck);
} else if (!vregs_to_check->SameBitsSet(old_ending_ssa_regs_to_check)) {
nce_changed = true;
- temp_bit_matrix_[bb->id] = vregs_to_check;
- temp_bit_vector_ = old_ending_ssa_regs_to_check; // Reuse for vregs_to_check for next BB.
+ temp_.nce.ending_vregs_to_check_matrix[bb->id] = vregs_to_check;
+ temp_.nce.work_vregs_to_check = old_ending_ssa_regs_to_check; // Reuse for next BB.
}
return nce_changed;
}
void MIRGraph::EliminateNullChecksEnd() {
// Clean up temporaries.
- temp_bit_vector_size_ = 0u;
- temp_bit_vector_ = nullptr;
- temp_bit_matrix_ = nullptr;
+ temp_.nce.num_vregs = 0u;
+ temp_.nce.work_vregs_to_check = nullptr;
+ temp_.nce.ending_vregs_to_check_matrix = nullptr;
DCHECK(temp_scoped_alloc_.get() != nullptr);
temp_scoped_alloc_.reset();
@@ -1087,7 +1086,7 @@
for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
constexpr int kMarkToIgnoreNullCheckShift = kMIRMark - kMIRIgnoreNullCheck;
- COMPILE_ASSERT(kMarkToIgnoreNullCheckShift > 0, check_valid_shift_right);
+ static_assert(kMarkToIgnoreNullCheckShift > 0, "Not a valid right-shift");
uint16_t mirMarkAdjustedToIgnoreNullCheck =
(mir->optimization_flags & MIR_MARK) >> kMarkToIgnoreNullCheckShift;
mir->optimization_flags |= mirMarkAdjustedToIgnoreNullCheck;
@@ -1116,7 +1115,7 @@
bool MIRGraph::EliminateClassInitChecksGate() {
if ((cu_->disable_opt & (1 << kClassInitCheckElimination)) != 0 ||
- !cu_->mir_graph->HasStaticFieldAccess()) {
+ (merged_df_flags_ & DF_CLINIT) == 0) {
return false;
}
@@ -1125,8 +1124,9 @@
// Each insn we use here has at least 2 code units, offset/2 will be a unique index.
const size_t end = (GetNumDalvikInsns() + 1u) / 2u;
- temp_insn_data_ = static_cast<uint16_t*>(
- temp_scoped_alloc_->Alloc(end * sizeof(*temp_insn_data_), kArenaAllocGrowableArray));
+ temp_.cice.indexes = static_cast<uint16_t*>(
+ temp_scoped_alloc_->Alloc(end * sizeof(*temp_.cice.indexes), kArenaAllocGrowableArray));
+ std::fill_n(temp_.cice.indexes, end, 0xffffu);
uint32_t unique_class_count = 0u;
{
@@ -1162,8 +1162,7 @@
if (mir->dalvikInsn.opcode >= Instruction::SGET &&
mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
const MirSFieldLoweringInfo& field_info = GetSFieldLoweringInfo(mir);
- uint16_t index = 0xffffu;
- if (!field_info.IsInitialized()) {
+ if (!field_info.IsReferrersClass()) {
DCHECK_LT(class_to_index_map.size(), 0xffffu);
MapEntry entry = {
// Treat unresolved fields as if each had its own class.
@@ -1173,10 +1172,24 @@
: field_info.FieldIndex(),
static_cast<uint16_t>(class_to_index_map.size())
};
- index = class_to_index_map.insert(entry).first->index;
+ uint16_t index = class_to_index_map.insert(entry).first->index;
+ // Using offset/2 for index into temp_.cice.indexes.
+ temp_.cice.indexes[mir->offset / 2u] = index;
}
- // Using offset/2 for index into temp_insn_data_.
- temp_insn_data_[mir->offset / 2u] = index;
+ } else if (mir->dalvikInsn.opcode == Instruction::INVOKE_STATIC ||
+ mir->dalvikInsn.opcode == Instruction::INVOKE_STATIC_RANGE) {
+ const MirMethodLoweringInfo& method_info = GetMethodLoweringInfo(mir);
+ DCHECK(method_info.IsStatic());
+ if (method_info.FastPath() && !method_info.IsReferrersClass()) {
+ MapEntry entry = {
+ method_info.DeclaringDexFile(),
+ method_info.DeclaringClassIndex(),
+ static_cast<uint16_t>(class_to_index_map.size())
+ };
+ uint16_t index = class_to_index_map.insert(entry).first->index;
+ // Using offset/2 for index into temp_.cice.indexes.
+ temp_.cice.indexes[mir->offset / 2u] = index;
+ }
}
}
}
@@ -1186,18 +1199,19 @@
if (unique_class_count == 0u) {
// All SGET/SPUTs refer to initialized classes. Nothing to do.
- temp_insn_data_ = nullptr;
+ temp_.cice.indexes = nullptr;
temp_scoped_alloc_.reset();
return false;
}
- temp_bit_vector_size_ = unique_class_count;
- temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
- temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapClInitCheck);
- temp_bit_matrix_ = static_cast<ArenaBitVector**>(
+ // 2 bits for each class: is class initialized, is class in dex cache.
+ temp_.cice.num_class_bits = 2u * unique_class_count;
+ temp_.cice.work_classes_to_check = new (temp_scoped_alloc_.get()) ArenaBitVector(
+ temp_scoped_alloc_.get(), temp_.cice.num_class_bits, false, kBitMapClInitCheck);
+ temp_.cice.ending_classes_to_check_matrix = static_cast<ArenaBitVector**>(
temp_scoped_alloc_->Alloc(sizeof(ArenaBitVector*) * GetNumBlocks(), kArenaAllocMisc));
- std::fill_n(temp_bit_matrix_, GetNumBlocks(), nullptr);
- DCHECK_GT(temp_bit_vector_size_, 0u);
+ std::fill_n(temp_.cice.ending_classes_to_check_matrix, GetNumBlocks(), nullptr);
+ DCHECK_GT(temp_.cice.num_class_bits, 0u);
return true;
}
@@ -1215,30 +1229,22 @@
/*
* Set initial state. Catch blocks don't need any special treatment.
*/
- ArenaBitVector* classes_to_check = temp_bit_vector_;
+ ArenaBitVector* classes_to_check = temp_.cice.work_classes_to_check;
DCHECK(classes_to_check != nullptr);
if (bb->block_type == kEntryBlock) {
- classes_to_check->SetInitialBits(temp_bit_vector_size_);
- } else if (bb->predecessors.size() == 1) {
- BasicBlock* pred_bb = GetBasicBlock(bb->predecessors[0]);
- // pred_bb must have already been processed at least once.
- DCHECK(pred_bb != nullptr);
- DCHECK(temp_bit_matrix_[pred_bb->id] != nullptr);
- classes_to_check->Copy(temp_bit_matrix_[pred_bb->id]);
+ classes_to_check->SetInitialBits(temp_.cice.num_class_bits);
} else {
// Starting state is union of all incoming arcs.
bool copied_first = false;
for (BasicBlockId pred_id : bb->predecessors) {
- BasicBlock* pred_bb = GetBasicBlock(pred_id);
- DCHECK(pred_bb != nullptr);
- if (temp_bit_matrix_[pred_bb->id] == nullptr) {
+ if (temp_.cice.ending_classes_to_check_matrix[pred_id] == nullptr) {
continue;
}
if (!copied_first) {
copied_first = true;
- classes_to_check->Copy(temp_bit_matrix_[pred_bb->id]);
+ classes_to_check->Copy(temp_.cice.ending_classes_to_check_matrix[pred_id]);
} else {
- classes_to_check->Union(temp_bit_matrix_[pred_bb->id]);
+ classes_to_check->Union(temp_.cice.ending_classes_to_check_matrix[pred_id]);
}
}
DCHECK(copied_first); // At least one predecessor must have been processed before this bb.
@@ -1247,51 +1253,75 @@
// Walk through the instruction in the block, updating as necessary
for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
- if (mir->dalvikInsn.opcode >= Instruction::SGET &&
- mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
- uint16_t index = temp_insn_data_[mir->offset / 2u];
- if (index != 0xffffu) {
- if (mir->dalvikInsn.opcode >= Instruction::SGET &&
- mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
- if (!classes_to_check->IsBitSet(index)) {
- // Eliminate the class init check.
- mir->optimization_flags |= MIR_IGNORE_CLINIT_CHECK;
- } else {
- // Do the class init check.
- mir->optimization_flags &= ~MIR_IGNORE_CLINIT_CHECK;
- }
+ uint16_t index = temp_.cice.indexes[mir->offset / 2u];
+ if (index != 0xffffu) {
+ bool check_initialization = false;
+ bool check_dex_cache = false;
+
+ // NOTE: index != 0xffff does not guarantee that this is an SGET/SPUT/INVOKE_STATIC.
+ // Dex instructions with width 1 can have the same offset/2.
+
+ if (mir->dalvikInsn.opcode >= Instruction::SGET &&
+ mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
+ check_initialization = true;
+ check_dex_cache = true;
+ } else if (mir->dalvikInsn.opcode == Instruction::INVOKE_STATIC ||
+ mir->dalvikInsn.opcode == Instruction::INVOKE_STATIC_RANGE) {
+ check_initialization = true;
+ // NOTE: INVOKE_STATIC doesn't guarantee that the type will be in the dex cache.
+ }
+
+ if (check_dex_cache) {
+ uint32_t check_dex_cache_index = 2u * index + 1u;
+ if (!classes_to_check->IsBitSet(check_dex_cache_index)) {
+ // Eliminate the class init check.
+ mir->optimization_flags |= MIR_CLASS_IS_IN_DEX_CACHE;
+ } else {
+ // Do the class init check.
+ mir->optimization_flags &= ~MIR_CLASS_IS_IN_DEX_CACHE;
+ }
+ classes_to_check->ClearBit(check_dex_cache_index);
+ }
+ if (check_initialization) {
+ uint32_t check_clinit_index = 2u * index;
+ if (!classes_to_check->IsBitSet(check_clinit_index)) {
+ // Eliminate the class init check.
+ mir->optimization_flags |= MIR_CLASS_IS_INITIALIZED;
+ } else {
+ // Do the class init check.
+ mir->optimization_flags &= ~MIR_CLASS_IS_INITIALIZED;
}
// Mark the class as initialized.
- classes_to_check->ClearBit(index);
+ classes_to_check->ClearBit(check_clinit_index);
}
}
}
// Did anything change?
bool changed = false;
- ArenaBitVector* old_ending_classes_to_check = temp_bit_matrix_[bb->id];
+ ArenaBitVector* old_ending_classes_to_check = temp_.cice.ending_classes_to_check_matrix[bb->id];
if (old_ending_classes_to_check == nullptr) {
DCHECK(temp_scoped_alloc_.get() != nullptr);
changed = classes_to_check->GetHighestBitSet() != -1;
- temp_bit_matrix_[bb->id] = classes_to_check;
+ temp_.cice.ending_classes_to_check_matrix[bb->id] = classes_to_check;
// Create a new classes_to_check for next BB.
- temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
- temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapClInitCheck);
+ temp_.cice.work_classes_to_check = new (temp_scoped_alloc_.get()) ArenaBitVector(
+ temp_scoped_alloc_.get(), temp_.cice.num_class_bits, false, kBitMapClInitCheck);
} else if (!classes_to_check->Equal(old_ending_classes_to_check)) {
changed = true;
- temp_bit_matrix_[bb->id] = classes_to_check;
- temp_bit_vector_ = old_ending_classes_to_check; // Reuse for classes_to_check for next BB.
+ temp_.cice.ending_classes_to_check_matrix[bb->id] = classes_to_check;
+ temp_.cice.work_classes_to_check = old_ending_classes_to_check; // Reuse for next BB.
}
return changed;
}
void MIRGraph::EliminateClassInitChecksEnd() {
// Clean up temporaries.
- temp_bit_vector_size_ = 0u;
- temp_bit_vector_ = nullptr;
- temp_bit_matrix_ = nullptr;
- DCHECK(temp_insn_data_ != nullptr);
- temp_insn_data_ = nullptr;
+ temp_.cice.num_class_bits = 0u;
+ temp_.cice.work_classes_to_check = nullptr;
+ temp_.cice.ending_classes_to_check_matrix = nullptr;
+ DCHECK(temp_.cice.indexes != nullptr);
+ temp_.cice.indexes = nullptr;
DCHECK(temp_scoped_alloc_.get() != nullptr);
temp_scoped_alloc_.reset();
}
@@ -1303,39 +1333,39 @@
DCHECK(temp_scoped_alloc_ == nullptr);
temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
- DCHECK(temp_gvn_ == nullptr);
- temp_gvn_.reset(
- new (temp_scoped_alloc_.get()) GlobalValueNumbering(cu_, temp_scoped_alloc_.get(),
- GlobalValueNumbering::kModeGvn));
+ DCHECK(temp_.gvn.gvn == nullptr);
+ temp_.gvn.gvn = new (temp_scoped_alloc_.get()) GlobalValueNumbering(
+ cu_, temp_scoped_alloc_.get(), GlobalValueNumbering::kModeGvn);
return true;
}
bool MIRGraph::ApplyGlobalValueNumbering(BasicBlock* bb) {
- DCHECK(temp_gvn_ != nullptr);
- LocalValueNumbering* lvn = temp_gvn_->PrepareBasicBlock(bb);
+ DCHECK(temp_.gvn.gvn != nullptr);
+ LocalValueNumbering* lvn = temp_.gvn.gvn->PrepareBasicBlock(bb);
if (lvn != nullptr) {
for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
lvn->GetValueNumber(mir);
}
}
- bool change = (lvn != nullptr) && temp_gvn_->FinishBasicBlock(bb);
+ bool change = (lvn != nullptr) && temp_.gvn.gvn->FinishBasicBlock(bb);
return change;
}
void MIRGraph::ApplyGlobalValueNumberingEnd() {
// Perform modifications.
- if (temp_gvn_->Good()) {
+ DCHECK(temp_.gvn.gvn != nullptr);
+ if (temp_.gvn.gvn->Good()) {
if (max_nested_loops_ != 0u) {
- temp_gvn_->StartPostProcessing();
+ temp_.gvn.gvn->StartPostProcessing();
TopologicalSortIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
ScopedArenaAllocator allocator(&cu_->arena_stack); // Reclaim memory after each LVN.
- LocalValueNumbering* lvn = temp_gvn_->PrepareBasicBlock(bb, &allocator);
+ LocalValueNumbering* lvn = temp_.gvn.gvn->PrepareBasicBlock(bb, &allocator);
if (lvn != nullptr) {
for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
lvn->GetValueNumber(mir);
}
- bool change = temp_gvn_->FinishBasicBlock(bb);
+ bool change = temp_.gvn.gvn->FinishBasicBlock(bb);
DCHECK(!change) << PrettyMethod(cu_->method_idx, *cu_->dex_file);
}
}
@@ -1346,16 +1376,16 @@
LOG(WARNING) << "GVN failed for " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
}
- DCHECK(temp_gvn_ != nullptr);
- temp_gvn_.reset();
+ delete temp_.gvn.gvn;
+ temp_.gvn.gvn = nullptr;
DCHECK(temp_scoped_alloc_ != nullptr);
temp_scoped_alloc_.reset();
}
void MIRGraph::ComputeInlineIFieldLoweringInfo(uint16_t field_idx, MIR* invoke, MIR* iget_or_iput) {
uint32_t method_index = invoke->meta.method_lowering_info;
- if (temp_bit_vector_->IsBitSet(method_index)) {
- iget_or_iput->meta.ifield_lowering_info = temp_insn_data_[method_index];
+ if (temp_.smi.processed_indexes->IsBitSet(method_index)) {
+ iget_or_iput->meta.ifield_lowering_info = temp_.smi.lowering_infos[method_index];
DCHECK_EQ(field_idx, GetIFieldLoweringInfo(iget_or_iput).FieldIndex());
return;
}
@@ -1372,8 +1402,8 @@
uint32_t field_info_index = ifield_lowering_infos_.size();
ifield_lowering_infos_.push_back(inlined_field_info);
- temp_bit_vector_->SetBit(method_index);
- temp_insn_data_[method_index] = field_info_index;
+ temp_.smi.processed_indexes->SetBit(method_index);
+ temp_.smi.lowering_infos[method_index] = field_info_index;
iget_or_iput->meta.ifield_lowering_info = field_info_index;
}
@@ -1395,12 +1425,12 @@
DCHECK(temp_scoped_alloc_.get() == nullptr);
temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
- temp_bit_vector_size_ = method_lowering_infos_.size();
- temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
- temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapMisc);
- temp_bit_vector_->ClearAllBits();
- temp_insn_data_ = static_cast<uint16_t*>(temp_scoped_alloc_->Alloc(
- temp_bit_vector_size_ * sizeof(*temp_insn_data_), kArenaAllocGrowableArray));
+ temp_.smi.num_indexes = method_lowering_infos_.size();
+ temp_.smi.processed_indexes = new (temp_scoped_alloc_.get()) ArenaBitVector(
+ temp_scoped_alloc_.get(), temp_.smi.num_indexes, false, kBitMapMisc);
+ temp_.smi.processed_indexes->ClearAllBits();
+ temp_.smi.lowering_infos = static_cast<uint16_t*>(temp_scoped_alloc_->Alloc(
+ temp_.smi.num_indexes * sizeof(*temp_.smi.lowering_infos), kArenaAllocGrowableArray));
}
void MIRGraph::InlineSpecialMethods(BasicBlock* bb) {
@@ -1425,8 +1455,8 @@
}
if (sharp_type == kStatic) {
- bool needs_clinit = method_info.NeedsClassInitialization() &&
- ((mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0);
+ bool needs_clinit = !method_info.IsClassInitialized() &&
+ ((mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) == 0);
if (needs_clinit) {
continue;
}
@@ -1447,10 +1477,12 @@
}
void MIRGraph::InlineSpecialMethodsEnd() {
- DCHECK(temp_insn_data_ != nullptr);
- temp_insn_data_ = nullptr;
- DCHECK(temp_bit_vector_ != nullptr);
- temp_bit_vector_ = nullptr;
+ // Clean up temporaries.
+ DCHECK(temp_.smi.lowering_infos != nullptr);
+ temp_.smi.lowering_infos = nullptr;
+ temp_.smi.num_indexes = 0u;
+ DCHECK(temp_.smi.processed_indexes != nullptr);
+ temp_.smi.processed_indexes = nullptr;
DCHECK(temp_scoped_alloc_.get() != nullptr);
temp_scoped_alloc_.reset();
}
@@ -1479,7 +1511,7 @@
}
}
-bool MIRGraph::BuildExtendedBBList(struct BasicBlock* bb) {
+bool MIRGraph::BuildExtendedBBList(class BasicBlock* bb) {
if (bb->visited) return false;
if (!((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
|| (bb->block_type == kExitBlock))) {
diff --git a/compiler/dex/mir_optimization_test.cc b/compiler/dex/mir_optimization_test.cc
index 337d4ef..8874faf 100644
--- a/compiler/dex/mir_optimization_test.cc
+++ b/compiler/dex/mir_optimization_test.cc
@@ -36,10 +36,21 @@
BasicBlockId predecessors[kMaxPredecessors];
};
+ struct MethodDef {
+ uint16_t method_idx;
+ uintptr_t declaring_dex_file;
+ uint16_t declaring_class_idx;
+ uint16_t declaring_method_idx;
+ InvokeType invoke_type;
+ InvokeType sharp_type;
+ bool is_referrers_class;
+ bool is_initialized;
+ };
+
struct MIRDef {
BasicBlockId bbid;
Instruction::Code opcode;
- uint32_t field_info;
+ uint32_t field_or_method_info;
uint32_t vA;
uint32_t vB;
uint32_t vC;
@@ -68,6 +79,19 @@
#define DEF_BB(type, succ, pred) \
{ type, succ, pred }
+#define DEF_SGET_SPUT(bb, opcode, vA, field_info) \
+ { bb, opcode, field_info, vA, 0u, 0u }
+#define DEF_IGET_IPUT(bb, opcode, vA, vB, field_info) \
+ { bb, opcode, field_info, vA, vB, 0u }
+#define DEF_AGET_APUT(bb, opcode, vA, vB, vC) \
+ { bb, opcode, 0u, vA, vB, vC }
+#define DEF_INVOKE(bb, opcode, vC, method_info) \
+ { bb, opcode, method_info, 0u, 0u, vC }
+#define DEF_OTHER1(bb, opcode, vA) \
+ { bb, opcode, 0u, vA, 0u, 0u }
+#define DEF_OTHER2(bb, opcode, vA, vB) \
+ { bb, opcode, 0u, vA, vB, 0u }
+
void DoPrepareBasicBlocks(const BBDef* defs, size_t count) {
cu_.mir_graph->block_id_map_.clear();
cu_.mir_graph->block_list_.clear();
@@ -172,6 +196,35 @@
check_bb->successor_blocks.push_back(successor_block_info);
}
+ void DoPrepareMethods(const MethodDef* defs, size_t count) {
+ cu_.mir_graph->method_lowering_infos_.clear();
+ cu_.mir_graph->method_lowering_infos_.reserve(count);
+ for (size_t i = 0u; i != count; ++i) {
+ const MethodDef* def = &defs[i];
+ MirMethodLoweringInfo method_info(def->method_idx, def->invoke_type);
+ if (def->declaring_dex_file != 0u) {
+ method_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
+ method_info.declaring_class_idx_ = def->declaring_class_idx;
+ method_info.declaring_method_idx_ = def->declaring_method_idx;
+ }
+ ASSERT_EQ(def->invoke_type != kStatic, def->sharp_type != kStatic);
+ method_info.flags_ =
+ ((def->invoke_type == kStatic) ? MirMethodLoweringInfo::kFlagIsStatic : 0u) |
+ MirMethodLoweringInfo::kFlagFastPath |
+ (static_cast<uint16_t>(def->invoke_type) << MirMethodLoweringInfo::kBitInvokeTypeBegin) |
+ (static_cast<uint16_t>(def->sharp_type) << MirMethodLoweringInfo::kBitSharpTypeBegin) |
+ ((def->is_referrers_class) ? MirMethodLoweringInfo::kFlagIsReferrersClass : 0u) |
+ ((def->is_initialized == kStatic) ? MirMethodLoweringInfo::kFlagClassIsInitialized : 0u);
+ ASSERT_EQ(def->declaring_dex_file != 0u, method_info.IsResolved());
+ cu_.mir_graph->method_lowering_infos_.push_back(method_info);
+ }
+ }
+
+ template <size_t count>
+ void PrepareMethods(const MethodDef (&defs)[count]) {
+ DoPrepareMethods(defs, count);
+ }
+
void DoPrepareMIRs(const MIRDef* defs, size_t count) {
mir_count_ = count;
mirs_ = reinterpret_cast<MIR*>(cu_.arena.Alloc(sizeof(MIR) * count, kArenaAllocMIR));
@@ -184,11 +237,16 @@
BasicBlock* bb = cu_.mir_graph->block_list_[def->bbid];
bb->AppendMIR(mir);
if (def->opcode >= Instruction::SGET && def->opcode <= Instruction::SPUT_SHORT) {
- ASSERT_LT(def->field_info, cu_.mir_graph->sfield_lowering_infos_.size());
- mir->meta.sfield_lowering_info = def->field_info;
+ ASSERT_LT(def->field_or_method_info, cu_.mir_graph->sfield_lowering_infos_.size());
+ mir->meta.sfield_lowering_info = def->field_or_method_info;
} else if (def->opcode >= Instruction::IGET && def->opcode <= Instruction::IPUT_SHORT) {
- ASSERT_LT(def->field_info, cu_.mir_graph->ifield_lowering_infos_.size());
- mir->meta.ifield_lowering_info = def->field_info;
+ ASSERT_LT(def->field_or_method_info, cu_.mir_graph->ifield_lowering_infos_.size());
+ mir->meta.ifield_lowering_info = def->field_or_method_info;
+ } else if (def->opcode >= Instruction::INVOKE_VIRTUAL &&
+ def->opcode < Instruction::INVOKE_INTERFACE_RANGE &&
+ def->opcode != Instruction::RETURN_VOID_BARRIER) {
+ ASSERT_LT(def->field_or_method_info, cu_.mir_graph->method_lowering_infos_.size());
+ mir->meta.method_lowering_info = def->field_or_method_info;
}
mir->dalvikInsn.vA = def->vA;
mir->dalvikInsn.vB = def->vB;
@@ -251,7 +309,7 @@
field_info.flags_ = MirSFieldLoweringInfo::kFlagIsStatic;
}
ASSERT_EQ(def->declaring_dex_file != 0u, field_info.IsResolved());
- ASSERT_FALSE(field_info.IsInitialized());
+ ASSERT_FALSE(field_info.IsClassInitialized());
cu_.mir_graph->sfield_lowering_infos_.push_back(field_info);
}
}
@@ -326,12 +384,13 @@
NullCheckEliminationTest()
: MirOptimizationTest() {
+ static const MethodDef methods[] = {
+ { 0u, 1u, 0u, 0u, kDirect, kDirect, false, false }, // Dummy.
+ };
+ PrepareMethods(methods);
}
};
-#define DEF_SGET_SPUT_V0(bb, opcode, field_info) \
- { bb, opcode, field_info, 0u, 0u, 0u }
-
TEST_F(ClassInitCheckEliminationTest, SingleBlock) {
static const SFieldDef sfields[] = {
{ 0u, 1u, 0u, 0u },
@@ -342,17 +401,17 @@
{ 5u, 0u, 0u, 0u }, // Unresolved.
};
static const MIRDef mirs[] = {
- DEF_SGET_SPUT_V0(3u, Instruction::SPUT, 5u), // Unresolved.
- DEF_SGET_SPUT_V0(3u, Instruction::SPUT, 0u),
- DEF_SGET_SPUT_V0(3u, Instruction::SGET, 1u),
- DEF_SGET_SPUT_V0(3u, Instruction::SGET, 2u),
- DEF_SGET_SPUT_V0(3u, Instruction::SGET, 5u), // Unresolved.
- DEF_SGET_SPUT_V0(3u, Instruction::SGET, 0u),
- DEF_SGET_SPUT_V0(3u, Instruction::SGET, 1u),
- DEF_SGET_SPUT_V0(3u, Instruction::SGET, 2u),
- DEF_SGET_SPUT_V0(3u, Instruction::SGET, 5u), // Unresolved.
- DEF_SGET_SPUT_V0(3u, Instruction::SGET, 3u),
- DEF_SGET_SPUT_V0(3u, Instruction::SGET, 4u),
+ DEF_SGET_SPUT(3u, Instruction::SPUT, 0u, 5u), // Unresolved.
+ DEF_SGET_SPUT(3u, Instruction::SPUT, 0u, 0u),
+ DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 1u),
+ DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 2u),
+ DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 5u), // Unresolved.
+ DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 0u),
+ DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 1u),
+ DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 2u),
+ DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 5u), // Unresolved.
+ DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 3u),
+ DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 4u),
};
static const bool expected_ignore_clinit_check[] = {
false, false, false, false, true, true, true, true, true, false, true
@@ -365,7 +424,50 @@
ASSERT_EQ(arraysize(expected_ignore_clinit_check), mir_count_);
for (size_t i = 0u; i != arraysize(mirs); ++i) {
EXPECT_EQ(expected_ignore_clinit_check[i],
- (mirs_[i].optimization_flags & MIR_IGNORE_CLINIT_CHECK) != 0) << i;
+ (mirs_[i].optimization_flags & MIR_CLASS_IS_INITIALIZED) != 0) << i;
+ EXPECT_EQ(expected_ignore_clinit_check[i],
+ (mirs_[i].optimization_flags & MIR_CLASS_IS_IN_DEX_CACHE) != 0) << i;
+ }
+}
+
+TEST_F(ClassInitCheckEliminationTest, SingleBlockWithInvokes) {
+ static const SFieldDef sfields[] = {
+ { 0u, 1u, 0u, 0u },
+ { 1u, 1u, 1u, 1u },
+ { 2u, 1u, 2u, 2u },
+ };
+ static const MethodDef methods[] = {
+ { 0u, 1u, 0u, 0u, kStatic, kStatic, false, false },
+ { 1u, 1u, 1u, 1u, kStatic, kStatic, false, false },
+ { 2u, 1u, 2u, 2u, kStatic, kStatic, false, false },
+ };
+ static const MIRDef mirs[] = {
+ DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 0u),
+ DEF_INVOKE(3u, Instruction::INVOKE_STATIC, 0u /* dummy */, 0u),
+ DEF_INVOKE(3u, Instruction::INVOKE_STATIC, 0u /* dummy */, 1u),
+ DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 1u),
+ DEF_INVOKE(3u, Instruction::INVOKE_STATIC, 0u /* dummy */, 2u),
+ DEF_INVOKE(3u, Instruction::INVOKE_STATIC, 0u /* dummy */, 2u),
+ };
+ static const bool expected_class_initialized[] = {
+ false, true, false, true, false, true
+ };
+ static const bool expected_class_in_dex_cache[] = {
+ false, false, false, false, false, false
+ };
+
+ PrepareSFields(sfields);
+ PrepareMethods(methods);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformClassInitCheckElimination();
+ ASSERT_EQ(arraysize(expected_class_initialized), mir_count_);
+ ASSERT_EQ(arraysize(expected_class_in_dex_cache), mir_count_);
+ for (size_t i = 0u; i != arraysize(mirs); ++i) {
+ EXPECT_EQ(expected_class_initialized[i],
+ (mirs_[i].optimization_flags & MIR_CLASS_IS_INITIALIZED) != 0) << i;
+ EXPECT_EQ(expected_class_in_dex_cache[i],
+ (mirs_[i].optimization_flags & MIR_CLASS_IS_IN_DEX_CACHE) != 0) << i;
}
}
@@ -385,32 +487,32 @@
};
static const MIRDef mirs[] = {
// NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
- DEF_SGET_SPUT_V0(3u, Instruction::SGET, 10u), // Unresolved.
- DEF_SGET_SPUT_V0(3u, Instruction::SPUT, 10u), // Unresolved.
- DEF_SGET_SPUT_V0(3u, Instruction::SPUT, 0u),
- DEF_SGET_SPUT_V0(6u, Instruction::SGET, 0u), // Eliminated (BB #3 dominates #6).
- DEF_SGET_SPUT_V0(4u, Instruction::SPUT, 1u),
- DEF_SGET_SPUT_V0(6u, Instruction::SGET, 1u), // Not eliminated (BB #4 doesn't dominate #6).
- DEF_SGET_SPUT_V0(3u, Instruction::SGET, 2u),
- DEF_SGET_SPUT_V0(4u, Instruction::SGET, 2u), // Eliminated (BB #3 dominates #4).
- DEF_SGET_SPUT_V0(3u, Instruction::SGET, 3u),
- DEF_SGET_SPUT_V0(5u, Instruction::SGET, 3u), // Eliminated (BB #3 dominates #5).
- DEF_SGET_SPUT_V0(3u, Instruction::SGET, 4u),
- DEF_SGET_SPUT_V0(6u, Instruction::SGET, 4u), // Eliminated (BB #3 dominates #6).
- DEF_SGET_SPUT_V0(4u, Instruction::SGET, 5u),
- DEF_SGET_SPUT_V0(6u, Instruction::SGET, 5u), // Not eliminated (BB #4 doesn't dominate #6).
- DEF_SGET_SPUT_V0(5u, Instruction::SGET, 6u),
- DEF_SGET_SPUT_V0(6u, Instruction::SGET, 6u), // Not eliminated (BB #5 doesn't dominate #6).
- DEF_SGET_SPUT_V0(4u, Instruction::SGET, 7u),
- DEF_SGET_SPUT_V0(5u, Instruction::SGET, 7u),
- DEF_SGET_SPUT_V0(6u, Instruction::SGET, 7u), // Eliminated (initialized in both #3 and #4).
- DEF_SGET_SPUT_V0(4u, Instruction::SGET, 8u),
- DEF_SGET_SPUT_V0(5u, Instruction::SGET, 9u),
- DEF_SGET_SPUT_V0(6u, Instruction::SGET, 8u), // Eliminated (with sfield[9] in BB #5).
- DEF_SGET_SPUT_V0(6u, Instruction::SPUT, 9u), // Eliminated (with sfield[8] in BB #4).
+ DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 10u), // Unresolved.
+ DEF_SGET_SPUT(3u, Instruction::SPUT, 0u, 10u), // Unresolved.
+ DEF_SGET_SPUT(3u, Instruction::SPUT, 0u, 0u),
+ DEF_SGET_SPUT(6u, Instruction::SGET, 0u, 0u), // Eliminated (BB #3 dominates #6).
+ DEF_SGET_SPUT(4u, Instruction::SPUT, 0u, 1u),
+ DEF_SGET_SPUT(6u, Instruction::SGET, 0u, 1u), // Not eliminated (BB #4 doesn't dominate #6).
+ DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 2u),
+ DEF_SGET_SPUT(4u, Instruction::SGET, 0u, 2u), // Eliminated (BB #3 dominates #4).
+ DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 3u),
+ DEF_SGET_SPUT(5u, Instruction::SGET, 0u, 3u), // Eliminated (BB #3 dominates #5).
+ DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 4u),
+ DEF_SGET_SPUT(6u, Instruction::SGET, 0u, 4u), // Eliminated (BB #3 dominates #6).
+ DEF_SGET_SPUT(4u, Instruction::SGET, 0u, 5u),
+ DEF_SGET_SPUT(6u, Instruction::SGET, 0u, 5u), // Not eliminated (BB #4 doesn't dominate #6).
+ DEF_SGET_SPUT(5u, Instruction::SGET, 0u, 6u),
+ DEF_SGET_SPUT(6u, Instruction::SGET, 0u, 6u), // Not eliminated (BB #5 doesn't dominate #6).
+ DEF_SGET_SPUT(4u, Instruction::SGET, 0u, 7u),
+ DEF_SGET_SPUT(5u, Instruction::SGET, 0u, 7u),
+ DEF_SGET_SPUT(6u, Instruction::SGET, 0u, 7u), // Eliminated (initialized in both #3 and #4).
+ DEF_SGET_SPUT(4u, Instruction::SGET, 0u, 8u),
+ DEF_SGET_SPUT(5u, Instruction::SGET, 0u, 9u),
+ DEF_SGET_SPUT(6u, Instruction::SGET, 0u, 8u), // Eliminated (with sfield[9] in BB #5).
+ DEF_SGET_SPUT(6u, Instruction::SPUT, 0u, 9u), // Eliminated (with sfield[8] in BB #4).
};
static const bool expected_ignore_clinit_check[] = {
- false, true, // Unresolved: sfield[10], method[2]
+ false, true, // Unresolved: sfield[10]
false, true, // sfield[0]
false, false, // sfield[1]
false, true, // sfield[2]
@@ -429,7 +531,70 @@
ASSERT_EQ(arraysize(expected_ignore_clinit_check), mir_count_);
for (size_t i = 0u; i != arraysize(mirs); ++i) {
EXPECT_EQ(expected_ignore_clinit_check[i],
- (mirs_[i].optimization_flags & MIR_IGNORE_CLINIT_CHECK) != 0) << i;
+ (mirs_[i].optimization_flags & MIR_CLASS_IS_INITIALIZED) != 0) << i;
+ EXPECT_EQ(expected_ignore_clinit_check[i],
+ (mirs_[i].optimization_flags & MIR_CLASS_IS_IN_DEX_CACHE) != 0) << i;
+ }
+}
+
+TEST_F(ClassInitCheckEliminationTest, DiamondWithInvokes) {
+ static const SFieldDef sfields[] = {
+ { 0u, 1u, 0u, 0u },
+ { 1u, 1u, 1u, 1u },
+ { 2u, 1u, 2u, 2u },
+ { 3u, 1u, 3u, 3u },
+ { 4u, 1u, 4u, 4u },
+ };
+ static const MethodDef methods[] = {
+ { 0u, 1u, 0u, 0u, kStatic, kStatic, false, false },
+ { 1u, 1u, 1u, 1u, kStatic, kStatic, false, false },
+ { 2u, 1u, 2u, 2u, kStatic, kStatic, false, false },
+ { 3u, 1u, 3u, 3u, kStatic, kStatic, false, false },
+ { 4u, 1u, 4u, 4u, kStatic, kStatic, false, false },
+ };
+ static const MIRDef mirs[] = {
+ // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
+ DEF_SGET_SPUT(3u, Instruction::SPUT, 0u, 0u),
+ DEF_INVOKE(6u, Instruction::INVOKE_STATIC, 0u /* dummy */, 0u),
+ DEF_INVOKE(3u, Instruction::INVOKE_STATIC, 0u /* dummy */, 1u),
+ DEF_SGET_SPUT(6u, Instruction::SPUT, 0u, 1u),
+ DEF_SGET_SPUT(4u, Instruction::SGET, 0u, 2u),
+ DEF_INVOKE(5u, Instruction::INVOKE_STATIC, 0u /* dummy */, 2u),
+ DEF_SGET_SPUT(6u, Instruction::SPUT, 0u, 2u),
+ DEF_INVOKE(4u, Instruction::INVOKE_STATIC, 0u /* dummy */, 3u),
+ DEF_SGET_SPUT(5u, Instruction::SPUT, 0u, 3u),
+ DEF_SGET_SPUT(6u, Instruction::SGET, 0u, 3u),
+ DEF_SGET_SPUT(4u, Instruction::SPUT, 0u, 4u),
+ DEF_SGET_SPUT(5u, Instruction::SGET, 0u, 4u),
+ DEF_INVOKE(6u, Instruction::INVOKE_STATIC, 0u /* dummy */, 4u),
+ };
+ static const bool expected_class_initialized[] = {
+ false, true, // BB #3 SPUT, BB#6 INVOKE_STATIC
+ false, true, // BB #3 INVOKE_STATIC, BB#6 SPUT
+ false, false, true, // BB #4 SGET, BB #5 INVOKE_STATIC, BB #6 SPUT
+ false, false, true, // BB #4 INVOKE_STATIC, BB #5 SPUT, BB #6 SGET
+ false, false, true, // BB #4 SPUT, BB #5 SGET, BB #6 INVOKE_STATIC
+ };
+ static const bool expected_class_in_dex_cache[] = {
+ false, false, // BB #3 SPUT, BB#6 INVOKE_STATIC
+ false, false, // BB #3 INVOKE_STATIC, BB#6 SPUT
+ false, false, false, // BB #4 SGET, BB #5 INVOKE_STATIC, BB #6 SPUT
+ false, false, false, // BB #4 INVOKE_STATIC, BB #5 SPUT, BB #6 SGET
+ false, false, false, // BB #4 SPUT, BB #5 SGET, BB #6 INVOKE_STATIC
+ };
+
+ PrepareSFields(sfields);
+ PrepareMethods(methods);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformClassInitCheckElimination();
+ ASSERT_EQ(arraysize(expected_class_initialized), mir_count_);
+ ASSERT_EQ(arraysize(expected_class_in_dex_cache), mir_count_);
+ for (size_t i = 0u; i != arraysize(mirs); ++i) {
+ EXPECT_EQ(expected_class_initialized[i],
+ (mirs_[i].optimization_flags & MIR_CLASS_IS_INITIALIZED) != 0) << i;
+ EXPECT_EQ(expected_class_in_dex_cache[i],
+ (mirs_[i].optimization_flags & MIR_CLASS_IS_IN_DEX_CACHE) != 0) << i;
}
}
@@ -437,15 +602,18 @@
static const SFieldDef sfields[] = {
{ 0u, 1u, 0u, 0u },
{ 1u, 1u, 1u, 1u },
+ { 2u, 1u, 2u, 2u },
};
static const MIRDef mirs[] = {
- DEF_SGET_SPUT_V0(3u, Instruction::SGET, 0u),
- DEF_SGET_SPUT_V0(4u, Instruction::SGET, 1u),
- DEF_SGET_SPUT_V0(5u, Instruction::SGET, 0u), // Eliminated.
- DEF_SGET_SPUT_V0(5u, Instruction::SGET, 1u), // Eliminated.
+ DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 0u),
+ DEF_SGET_SPUT(4u, Instruction::SGET, 0u, 0u), // Eliminated.
+ DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 1u),
+ DEF_SGET_SPUT(5u, Instruction::SGET, 0u, 1u), // Eliminated.
+ DEF_SGET_SPUT(4u, Instruction::SGET, 0u, 2u),
+ DEF_SGET_SPUT(5u, Instruction::SGET, 0u, 2u), // Eliminated.
};
static const bool expected_ignore_clinit_check[] = {
- false, false, true, true
+ false, true, false, true, false, true,
};
PrepareSFields(sfields);
@@ -455,7 +623,49 @@
ASSERT_EQ(arraysize(expected_ignore_clinit_check), mir_count_);
for (size_t i = 0u; i != arraysize(mirs); ++i) {
EXPECT_EQ(expected_ignore_clinit_check[i],
- (mirs_[i].optimization_flags & MIR_IGNORE_CLINIT_CHECK) != 0) << i;
+ (mirs_[i].optimization_flags & MIR_CLASS_IS_INITIALIZED) != 0) << i;
+ EXPECT_EQ(expected_ignore_clinit_check[i],
+ (mirs_[i].optimization_flags & MIR_CLASS_IS_IN_DEX_CACHE) != 0) << i;
+ }
+}
+
+TEST_F(ClassInitCheckEliminationTest, LoopWithInvokes) {
+ static const SFieldDef sfields[] = {
+ { 0u, 1u, 0u, 0u },
+ };
+ static const MethodDef methods[] = {
+ { 0u, 1u, 0u, 0u, kStatic, kStatic, false, false },
+ { 1u, 1u, 1u, 1u, kStatic, kStatic, false, false },
+ { 2u, 1u, 2u, 2u, kStatic, kStatic, false, false },
+ };
+ static const MIRDef mirs[] = {
+ DEF_INVOKE(3u, Instruction::INVOKE_STATIC, 0u /* dummy */, 0u),
+ DEF_INVOKE(4u, Instruction::INVOKE_STATIC, 0u /* dummy */, 0u),
+ DEF_INVOKE(3u, Instruction::INVOKE_STATIC, 0u /* dummy */, 1u),
+ DEF_INVOKE(5u, Instruction::INVOKE_STATIC, 0u /* dummy */, 1u),
+ DEF_INVOKE(4u, Instruction::INVOKE_STATIC, 0u /* dummy */, 2u),
+ DEF_INVOKE(5u, Instruction::INVOKE_STATIC, 0u /* dummy */, 2u),
+ DEF_SGET_SPUT(5u, Instruction::SGET, 0u, 0u),
+ };
+ static const bool expected_class_initialized[] = {
+ false, true, false, true, false, true, true,
+ };
+ static const bool expected_class_in_dex_cache[] = {
+ false, false, false, false, false, false, false,
+ };
+
+ PrepareSFields(sfields);
+ PrepareMethods(methods);
+ PrepareLoop();
+ PrepareMIRs(mirs);
+ PerformClassInitCheckElimination();
+ ASSERT_EQ(arraysize(expected_class_initialized), mir_count_);
+ ASSERT_EQ(arraysize(expected_class_in_dex_cache), mir_count_);
+ for (size_t i = 0u; i != arraysize(mirs); ++i) {
+ EXPECT_EQ(expected_class_initialized[i],
+ (mirs_[i].optimization_flags & MIR_CLASS_IS_INITIALIZED) != 0) << i;
+ EXPECT_EQ(expected_class_in_dex_cache[i],
+ (mirs_[i].optimization_flags & MIR_CLASS_IS_IN_DEX_CACHE) != 0) << i;
}
}
@@ -467,16 +677,16 @@
{ 3u, 1u, 3u, 3u },
};
static const MIRDef mirs[] = {
- DEF_SGET_SPUT_V0(3u, Instruction::SGET, 0u), // Before the exception edge.
- DEF_SGET_SPUT_V0(3u, Instruction::SGET, 1u), // Before the exception edge.
- DEF_SGET_SPUT_V0(4u, Instruction::SGET, 2u), // After the exception edge.
- DEF_SGET_SPUT_V0(4u, Instruction::SGET, 3u), // After the exception edge.
- DEF_SGET_SPUT_V0(5u, Instruction::SGET, 0u), // In catch handler; clinit check eliminated.
- DEF_SGET_SPUT_V0(5u, Instruction::SGET, 2u), // In catch handler; clinit check not eliminated.
- DEF_SGET_SPUT_V0(6u, Instruction::SGET, 0u), // Class init check eliminated.
- DEF_SGET_SPUT_V0(6u, Instruction::SGET, 1u), // Class init check eliminated.
- DEF_SGET_SPUT_V0(6u, Instruction::SGET, 2u), // Class init check eliminated.
- DEF_SGET_SPUT_V0(6u, Instruction::SGET, 3u), // Class init check not eliminated.
+ DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 0u), // Before the exception edge.
+ DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 1u), // Before the exception edge.
+ DEF_SGET_SPUT(4u, Instruction::SGET, 0u, 2u), // After the exception edge.
+ DEF_SGET_SPUT(4u, Instruction::SGET, 0u, 3u), // After the exception edge.
+ DEF_SGET_SPUT(5u, Instruction::SGET, 0u, 0u), // In catch handler; eliminated.
+ DEF_SGET_SPUT(5u, Instruction::SGET, 0u, 2u), // In catch handler; not eliminated.
+ DEF_SGET_SPUT(6u, Instruction::SGET, 0u, 0u), // Class init check eliminated.
+ DEF_SGET_SPUT(6u, Instruction::SGET, 0u, 1u), // Class init check eliminated.
+ DEF_SGET_SPUT(6u, Instruction::SGET, 0u, 2u), // Class init check eliminated.
+ DEF_SGET_SPUT(6u, Instruction::SGET, 0u, 3u), // Class init check not eliminated.
};
static const bool expected_ignore_clinit_check[] = {
false, false, false, false, true, false, true, true, true, false
@@ -489,21 +699,12 @@
ASSERT_EQ(arraysize(expected_ignore_clinit_check), mir_count_);
for (size_t i = 0u; i != arraysize(mirs); ++i) {
EXPECT_EQ(expected_ignore_clinit_check[i],
- (mirs_[i].optimization_flags & MIR_IGNORE_CLINIT_CHECK) != 0) << i;
+ (mirs_[i].optimization_flags & MIR_CLASS_IS_INITIALIZED) != 0) << i;
+ EXPECT_EQ(expected_ignore_clinit_check[i],
+ (mirs_[i].optimization_flags & MIR_CLASS_IS_IN_DEX_CACHE) != 0) << i;
}
}
-#define DEF_IGET_IPUT(bb, opcode, vA, vB, field_info) \
- { bb, opcode, field_info, vA, vB, 0u }
-#define DEF_AGET_APUT(bb, opcode, vA, vB, vC) \
- { bb, opcode, 0u, vA, vB, vC }
-#define DEF_INVOKE(bb, opcode, vC) \
- { bb, opcode, 0u, 0u, 0u, vC }
-#define DEF_OTHER1(bb, opcode, vA) \
- { bb, opcode, 0u, vA, 0u, 0u }
-#define DEF_OTHER2(bb, opcode, vA, vB) \
- { bb, opcode, 0u, vA, vB, 0u }
-
TEST_F(NullCheckEliminationTest, SingleBlock) {
static const IFieldDef ifields[] = {
{ 0u, 1u, 0u, 0u },
@@ -525,10 +726,10 @@
DEF_IGET_IPUT(3u, Instruction::IPUT, 11u, 105u, 1u),
DEF_IGET_IPUT(3u, Instruction::IPUT, 12u, 106u, 0u),
DEF_IGET_IPUT(3u, Instruction::IGET, 13u, 106u, 1u),
- DEF_INVOKE(3u, Instruction::INVOKE_DIRECT, 107),
+ DEF_INVOKE(3u, Instruction::INVOKE_DIRECT, 107, 0u /* dummy */),
DEF_IGET_IPUT(3u, Instruction::IGET, 15u, 107u, 1u),
DEF_IGET_IPUT(3u, Instruction::IGET, 16u, 108u, 0u),
- DEF_INVOKE(3u, Instruction::INVOKE_DIRECT, 108),
+ DEF_INVOKE(3u, Instruction::INVOKE_DIRECT, 108, 0u /* dummy */),
DEF_AGET_APUT(3u, Instruction::AGET, 18u, 109u, 110u),
DEF_AGET_APUT(3u, Instruction::APUT, 19u, 109u, 111u),
DEF_OTHER2(3u, Instruction::ARRAY_LENGTH, 20u, 112u),
@@ -583,7 +784,7 @@
DEF_IGET_IPUT(6u, Instruction::IPUT, 7u, 103u, 1u), // Not eliminated (going through BB #5).
DEF_IGET_IPUT(5u, Instruction::IGET, 8u, 104u, 1u),
DEF_IGET_IPUT(6u, Instruction::IGET, 9u, 104u, 0u), // Not eliminated (going through BB #4).
- DEF_INVOKE(4u, Instruction::INVOKE_DIRECT, 105u),
+ DEF_INVOKE(4u, Instruction::INVOKE_DIRECT, 105u, 0u /* dummy */),
DEF_IGET_IPUT(5u, Instruction::IGET, 11u, 105u, 1u),
DEF_IGET_IPUT(6u, Instruction::IPUT, 12u, 105u, 0u), // Eliminated.
DEF_IGET_IPUT(3u, Instruction::IGET_OBJECT, 13u, 106u, 2u),
diff --git a/compiler/dex/pass.h b/compiler/dex/pass.h
index e349eed..d3e54a0 100644
--- a/compiler/dex/pass.h
+++ b/compiler/dex/pass.h
@@ -19,11 +19,13 @@
#include <string>
-#include "base/macros.h"
+#include "compiler_ir.h"
+#include "base/logging.h"
+
namespace art {
// Forward declarations.
-struct BasicBlock;
+class BasicBlock;
struct CompilationUnit;
class Pass;
@@ -81,15 +83,10 @@
* @param data the object containing data necessary for the pass.
* @return whether or not there is a change when walking the BasicBlock
*/
- virtual bool Worker(PassDataHolder* data) const {
- // Unused parameter.
- UNUSED(data);
-
+ virtual bool Worker(PassDataHolder* data ATTRIBUTE_UNUSED) const {
// Passes that do all their work in Start() or End() should not allow useless node iteration.
- DCHECK(false) << "Unsupported default Worker() used for " << GetName();
-
- // BasicBlock did not change.
- return false;
+ LOG(FATAL) << "Unsupported default Worker() used for " << GetName();
+ UNREACHABLE();
}
static void BasePrintMessage(CompilationUnit* c_unit, const char* pass_name, const char* message, ...) {
diff --git a/compiler/dex/pass_me.h b/compiler/dex/pass_me.h
index 2f3c8b2..d0b450a 100644
--- a/compiler/dex/pass_me.h
+++ b/compiler/dex/pass_me.h
@@ -23,7 +23,7 @@
namespace art {
// Forward declarations.
-struct BasicBlock;
+class BasicBlock;
struct CompilationUnit;
class Pass;
@@ -32,10 +32,11 @@
* @details Each enum should be a power of 2 to be correctly used.
*/
enum OptimizationFlag {
- kOptimizationBasicBlockChange = 1, /**< @brief Has there been a change to a BasicBlock? */
- kOptimizationDefUsesChange = 2, /**< @brief Has there been a change to a def-use? */
- kLoopStructureChange = 4, /**< @brief Has there been a loop structural change? */
+ kOptimizationBasicBlockChange = 1, /// @brief Has there been a change to a BasicBlock?
+ kOptimizationDefUsesChange = 2, /// @brief Has there been a change to a def-use?
+ kLoopStructureChange = 4, /// @brief Has there been a loop structural change?
};
+std::ostream& operator<<(std::ostream& os, const OptimizationFlag& rhs);
// Data holder class.
class PassMEDataHolder: public PassDataHolder {
@@ -47,24 +48,25 @@
};
enum DataFlowAnalysisMode {
- kAllNodes = 0, /**< @brief All nodes. */
- kPreOrderDFSTraversal, /**< @brief Depth-First-Search / Pre-Order. */
- kRepeatingPreOrderDFSTraversal, /**< @brief Depth-First-Search / Repeating Pre-Order. */
- kReversePostOrderDFSTraversal, /**< @brief Depth-First-Search / Reverse Post-Order. */
- kRepeatingPostOrderDFSTraversal, /**< @brief Depth-First-Search / Repeating Post-Order. */
- kRepeatingReversePostOrderDFSTraversal, /**< @brief Depth-First-Search / Repeating Reverse Post-Order. */
- kPostOrderDOMTraversal, /**< @brief Dominator tree / Post-Order. */
- kTopologicalSortTraversal, /**< @brief Topological Order traversal. */
- kLoopRepeatingTopologicalSortTraversal, /**< @brief Loop-repeating Topological Order traversal. */
- kNoNodes, /**< @brief Skip BasicBlock traversal. */
+ kAllNodes = 0, /// @brief All nodes.
+ kPreOrderDFSTraversal, /// @brief Depth-First-Search / Pre-Order.
+ kRepeatingPreOrderDFSTraversal, /// @brief Depth-First-Search / Repeating Pre-Order.
+ kReversePostOrderDFSTraversal, /// @brief Depth-First-Search / Reverse Post-Order.
+ kRepeatingPostOrderDFSTraversal, /// @brief Depth-First-Search / Repeating Post-Order.
+ kRepeatingReversePostOrderDFSTraversal, /// @brief Depth-First-Search / Repeating Reverse Post-Order.
+ kPostOrderDOMTraversal, /// @brief Dominator tree / Post-Order.
+ kTopologicalSortTraversal, /// @brief Topological Order traversal.
+ kLoopRepeatingTopologicalSortTraversal, /// @brief Loop-repeating Topological Order traversal.
+ kNoNodes, /// @brief Skip BasicBlock traversal.
};
+std::ostream& operator<<(std::ostream& os, const DataFlowAnalysisMode& rhs);
/**
* @class Pass
* @brief Pass is the Pass structure for the optimizations.
* @details The following structure has the different optimization passes that we are going to do.
*/
-class PassME: public Pass {
+class PassME : public Pass {
public:
explicit PassME(const char* name, DataFlowAnalysisMode type = kAllNodes,
unsigned int flags = 0u, const char* dump = "")
diff --git a/compiler/dex/portable/mir_to_gbc.h b/compiler/dex/portable/mir_to_gbc.h
index 94ae3f7..bc4f5c4 100644
--- a/compiler/dex/portable/mir_to_gbc.h
+++ b/compiler/dex/portable/mir_to_gbc.h
@@ -73,7 +73,7 @@
std::unique_ptr<art::llvm::IRBuilder> ir_builder_;
};
-struct BasicBlock;
+class BasicBlock;
struct CallInfo;
struct CompilationUnit;
struct MIR;
diff --git a/compiler/dex/quick/arm/arm_lir.h b/compiler/dex/quick/arm/arm_lir.h
index d935bc3..4c7f874 100644
--- a/compiler/dex/quick/arm/arm_lir.h
+++ b/compiler/dex/quick/arm/arm_lir.h
@@ -97,7 +97,7 @@
// First FP callee save.
#define ARM_FP_CALLEE_SAVE_BASE 16
// Flag for using R4 to do suspend check
-#define ARM_R4_SUSPEND_FLAG
+// #define ARM_R4_SUSPEND_FLAG
enum ArmResourceEncodingPos {
kArmGPReg0 = 0,
@@ -109,7 +109,7 @@
kArmRegEnd = 48,
};
-enum ArmNativeRegisterPool {
+enum ArmNativeRegisterPool { // private marker to avoid generate-operator-out.py from processing.
r0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0,
r1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 1,
r2 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 2,
@@ -297,19 +297,20 @@
constexpr RegStorage rs_dr31(RegStorage::kValid | dr31);
#endif
-// RegisterLocation templates return values (r0, or r0/r1).
-const RegLocation arm_loc_c_return
- {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1,
- RegStorage(RegStorage::k32BitSolo, r0), INVALID_SREG, INVALID_SREG};
-const RegLocation arm_loc_c_return_wide
+// RegisterLocation templates return values (r0, r0/r1, s0, or d0).
+// Note: The return locations are shared between quick code and quick helper. This follows quick
+// ABI. Quick helper assembly routine needs to handle the ABI differences.
+const RegLocation arm_loc_c_return =
+ {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, rs_r0, INVALID_SREG, INVALID_SREG};
+const RegLocation arm_loc_c_return_wide =
{kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
- RegStorage(RegStorage::k64BitPair, r0, r1), INVALID_SREG, INVALID_SREG};
-const RegLocation arm_loc_c_return_float
- {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1,
- RegStorage(RegStorage::k32BitSolo, r0), INVALID_SREG, INVALID_SREG};
-const RegLocation arm_loc_c_return_double
- {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
- RegStorage(RegStorage::k64BitPair, r0, r1), INVALID_SREG, INVALID_SREG};
+ RegStorage::MakeRegPair(rs_r0, rs_r1), INVALID_SREG, INVALID_SREG};
+const RegLocation arm_loc_c_return_float = kArm32QuickCodeUseSoftFloat
+ ? arm_loc_c_return
+ : RegLocation({kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, rs_fr0, INVALID_SREG, INVALID_SREG});
+const RegLocation arm_loc_c_return_double = kArm32QuickCodeUseSoftFloat
+ ? arm_loc_c_return_wide
+ : RegLocation({kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1, rs_dr0, INVALID_SREG, INVALID_SREG});
enum ArmShiftEncodings {
kArmLsl = 0x0,
@@ -545,6 +546,7 @@
kThumb2StrdI8, // strd rt, rt2, [rn +-/1024].
kArmLast,
};
+std::ostream& operator<<(std::ostream& os, const ArmOpcode& rhs);
enum ArmOpDmbOptions {
kSY = 0xf,
@@ -576,6 +578,7 @@
kFmtOff24, // 24-bit Thumb2 unconditional branch encoding.
kFmtSkip, // Unused field, but continue to next.
};
+std::ostream& operator<<(std::ostream& os, const ArmEncodingKind& rhs);
// Struct used to define the snippet positions for each Thumb opcode.
struct ArmEncodingMap {
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 13b9bf0..f15d707 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -288,18 +288,12 @@
StoreValue(rl_dest, rl_result);
}
-/*
- * Mark garbage collection card. Skip if the value we're storing is null.
- */
-void ArmMir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) {
+void ArmMir2Lir::UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) {
RegStorage reg_card_base = AllocTemp();
RegStorage reg_card_no = AllocTemp();
- LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
LoadWordDisp(rs_rARM_SELF, Thread::CardTableOffset<4>().Int32Value(), reg_card_base);
OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
- LIR* target = NewLIR0(kPseudoTargetLabel);
- branch_over->target = target;
FreeTemp(reg_card_base);
FreeTemp(reg_card_no);
}
@@ -475,9 +469,9 @@
* Bit of a hack here - in the absence of a real scheduling pass,
* emit the next instruction in static & direct invoke sequences.
*/
-static int ArmNextSDCallInsn(CompilationUnit* cu, CallInfo* info,
+static int ArmNextSDCallInsn(CompilationUnit* cu, CallInfo* info ATTRIBUTE_UNUSED,
int state, const MethodReference& target_method,
- uint32_t unused,
+ uint32_t unused_idx ATTRIBUTE_UNUSED,
uintptr_t direct_code, uintptr_t direct_method,
InvokeType type) {
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
@@ -536,8 +530,8 @@
if (direct_code == 0) {
// kInvokeTgt := arg0_ref->entrypoint
cg->LoadWordDisp(arg0_ref,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
- cg->TargetPtrReg(kInvokeTgt));
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArmPointerSize).Int32Value(), cg->TargetPtrReg(kInvokeTgt));
}
break;
default:
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 1c87a03..e8d0c32 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -25,6 +25,64 @@
namespace art {
class ArmMir2Lir FINAL : public Mir2Lir {
+ protected:
+ // TODO: Consolidate hard float target support.
+ // InToRegStorageMapper and InToRegStorageMapping can be shared with all backends.
+ // Base class used to get RegStorage for next argument.
+ class InToRegStorageMapper {
+ public:
+ virtual RegStorage GetNextReg(bool is_double_or_float, bool is_wide) = 0;
+ virtual ~InToRegStorageMapper() {
+ }
+ };
+
+ // Inherited class for ARM backend.
+ class InToRegStorageArmMapper FINAL : public InToRegStorageMapper {
+ public:
+ InToRegStorageArmMapper()
+ : cur_core_reg_(0), cur_fp_reg_(0), cur_fp_double_reg_(0) {
+ }
+
+ virtual ~InToRegStorageArmMapper() {
+ }
+
+ RegStorage GetNextReg(bool is_double_or_float, bool is_wide) OVERRIDE;
+
+ private:
+ uint32_t cur_core_reg_;
+ uint32_t cur_fp_reg_;
+ uint32_t cur_fp_double_reg_;
+ };
+
+ // Class to map argument to RegStorage. The mapping object is initialized by a mapper.
+ class InToRegStorageMapping FINAL {
+ public:
+ InToRegStorageMapping()
+ : max_mapped_in_(0), is_there_stack_mapped_(false), initialized_(false) {
+ }
+
+ int GetMaxMappedIn() const {
+ return max_mapped_in_;
+ }
+
+ bool IsThereStackMapped() const {
+ return is_there_stack_mapped_;
+ }
+
+ bool IsInitialized() const {
+ return initialized_;
+ }
+
+ void Initialize(RegLocation* arg_locs, int count, InToRegStorageMapper* mapper);
+ RegStorage Get(int in_position) const;
+
+ private:
+ std::map<int, RegStorage> mapping_;
+ int max_mapped_in_;
+ bool is_there_stack_mapped_;
+ bool initialized_;
+ };
+
public:
ArmMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
@@ -32,6 +90,10 @@
bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
RegLocation rl_dest, int lit);
bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
+ void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
+ int32_t constant) OVERRIDE;
+ void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
+ int64_t constant) OVERRIDE;
LIR* CheckSuspendUsingLoad() OVERRIDE;
RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
@@ -44,18 +106,35 @@
OpSize size, VolatileKind is_volatile) OVERRIDE;
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
OpSize size) OVERRIDE;
- void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
+
+ /// @copydoc Mir2Lir::UnconditionallyMarkGCCard(RegStorage)
+ void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
// Required for target - register utilities.
- RegStorage TargetReg(SpecialTargetRegister reg);
- RegStorage GetArgMappingToPhysicalReg(int arg_num);
- RegLocation GetReturnAlt();
- RegLocation GetReturnWideAlt();
- RegLocation LocCReturn();
- RegLocation LocCReturnRef();
- RegLocation LocCReturnDouble();
- RegLocation LocCReturnFloat();
- RegLocation LocCReturnWide();
+ RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
+ RegStorage TargetReg(SpecialTargetRegister reg, WideKind wide_kind) OVERRIDE {
+ if (wide_kind == kWide) {
+ DCHECK((kArg0 <= reg && reg < kArg3) || (kFArg0 <= reg && reg < kFArg15) || (kRet0 == reg));
+ RegStorage ret_reg = RegStorage::MakeRegPair(TargetReg(reg),
+ TargetReg(static_cast<SpecialTargetRegister>(reg + 1)));
+ if (ret_reg.IsFloat()) {
+ // Regard double as double, be consistent with register allocation.
+ ret_reg = As64BitFloatReg(ret_reg);
+ }
+ return ret_reg;
+ } else {
+ return TargetReg(reg);
+ }
+ }
+
+ RegStorage GetArgMappingToPhysicalReg(int arg_num) OVERRIDE;
+ RegLocation GetReturnAlt() OVERRIDE;
+ RegLocation GetReturnWideAlt() OVERRIDE;
+ RegLocation LocCReturn() OVERRIDE;
+ RegLocation LocCReturnRef() OVERRIDE;
+ RegLocation LocCReturnDouble() OVERRIDE;
+ RegLocation LocCReturnFloat() OVERRIDE;
+ RegLocation LocCReturnWide() OVERRIDE;
ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
void AdjustSpillMask();
void ClobberCallerSave();
@@ -87,15 +166,15 @@
// Required for target - Dalvik-level generators.
void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) OVERRIDE;
+ RegLocation rl_src2, int flags) OVERRIDE;
void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2);
+ RegLocation rl_src1, RegLocation rl_src2, int flags);
void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
RegLocation rl_index, RegLocation rl_dest, int scale);
void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
RegLocation rl_src, int scale, bool card_mark);
void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_shift);
+ RegLocation rl_src1, RegLocation rl_shift, int flags);
void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2);
void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
@@ -123,7 +202,7 @@
void GenSelect(BasicBlock* bb, MIR* mir);
void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) OVERRIDE;
+ RegisterClass dest_reg_class) OVERRIDE;
bool GenMemBarrier(MemBarrierKind barrier_kind);
void GenMonitorEnter(int opt_flags, RegLocation rl_src);
void GenMonitorExit(int opt_flags, RegLocation rl_src);
@@ -178,10 +257,10 @@
RegStorage AllocPreservedDouble(int s_reg);
RegStorage AllocPreservedSingle(int s_reg);
- bool WideGPRsAreAliases() OVERRIDE {
+ bool WideGPRsAreAliases() const OVERRIDE {
return false; // Wide GPRs are formed by pairing.
}
- bool WideFPRsAreAliases() OVERRIDE {
+ bool WideFPRsAreAliases() const OVERRIDE {
return false; // Wide FPRs are formed by pairing.
}
@@ -210,6 +289,19 @@
LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
size_t GetInstructionOffset(LIR* lir);
+ int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel,
+ NextCallInsn next_call_insn,
+ const MethodReference& target_method,
+ uint32_t vtable_idx,
+ uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
+ bool skip_this) OVERRIDE;
+ int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel,
+ NextCallInsn next_call_insn,
+ const MethodReference& target_method,
+ uint32_t vtable_idx,
+ uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
+ bool skip_this) OVERRIDE;
+
private:
void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
@@ -224,12 +316,12 @@
void InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
void AssignDataOffsets();
RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
- bool is_div, bool check_zero);
- RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div);
- typedef struct {
+ bool is_div, int flags) OVERRIDE;
+ RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) OVERRIDE;
+ struct EasyMultiplyOp {
OpKind op;
uint32_t shift;
- } EasyMultiplyOp;
+ };
bool GetEasyMultiplyOp(int lit, EasyMultiplyOp* op);
bool GetEasyMultiplyTwoOps(int lit, EasyMultiplyOp* ops);
void GenEasyMultiplyTwoOps(RegStorage r_dest, RegStorage r_src, EasyMultiplyOp* ops);
@@ -239,6 +331,36 @@
static constexpr ResourceMask EncodeArmRegFpcsList(int reg_list);
ArenaVector<LIR*> call_method_insns_;
+
+ /**
+ * @brief Given float register pair, returns Solo64 float register.
+ * @param reg #RegStorage containing a float register pair (e.g. @c s2 and @c s3).
+ * @return A Solo64 float mapping to the register pair (e.g. @c d1).
+ */
+ static RegStorage As64BitFloatReg(RegStorage reg) {
+ DCHECK(reg.IsFloat());
+
+ RegStorage low = reg.GetLow();
+ RegStorage high = reg.GetHigh();
+ DCHECK((low.GetRegNum() % 2 == 0) && (low.GetRegNum() + 1 == high.GetRegNum()));
+
+ return RegStorage::FloatSolo64(low.GetRegNum() / 2);
+ }
+
+ /**
+ * @brief Given Solo64 float register, returns float register pair.
+ * @param reg #RegStorage containing a Solo64 float register (e.g. @c d1).
+ * @return A float register pair mapping to the Solo64 float pair (e.g. @c s2 and s3).
+ */
+ static RegStorage As64BitFloatRegPair(RegStorage reg) {
+ DCHECK(reg.IsDouble() && reg.Is64BitSolo());
+
+ int reg_num = reg.GetRegNum();
+ return RegStorage::MakeRegPair(RegStorage::FloatSolo32(reg_num * 2),
+ RegStorage::FloatSolo32(reg_num * 2 + 1));
+ }
+
+ InToRegStorageMapping in_to_reg_storage_mapping_;
};
} // namespace art
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index 3eb7c83..2b2592d 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -113,6 +113,32 @@
StoreValueWide(rl_dest, rl_result);
}
+void ArmMir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
+ int32_t constant) {
+ RegLocation rl_result;
+ RegStorage r_tmp = AllocTempSingle();
+ LoadConstantNoClobber(r_tmp, constant);
+ rl_src1 = LoadValue(rl_src1, kFPReg);
+ rl_result = EvalLoc(rl_dest, kFPReg, true);
+ NewLIR3(kThumb2Vmuls, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), r_tmp.GetReg());
+ StoreValue(rl_dest, rl_result);
+}
+
+void ArmMir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
+ int64_t constant) {
+ RegLocation rl_result;
+ RegStorage r_tmp = AllocTempDouble();
+ DCHECK(r_tmp.IsDouble());
+ LoadConstantWide(r_tmp, constant);
+ rl_src1 = LoadValueWide(rl_src1, kFPReg);
+ DCHECK(rl_src1.wide);
+ rl_result = EvalLocWide(rl_dest, kFPReg, true);
+ DCHECK(rl_dest.wide);
+ DCHECK(rl_result.wide);
+ NewLIR3(kThumb2Vmuld, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), r_tmp.GetReg());
+ StoreValueWide(rl_dest, rl_result);
+}
+
void ArmMir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src) {
int op = kThumbBkpt;
int src_reg;
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 018dc1c..cab039b 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -16,6 +16,7 @@
/* This file contains codegen for the Thumb2 ISA. */
+#include "arch/instruction_set_features.h"
#include "arm_lir.h"
#include "codegen_arm.h"
#include "dex/quick/mir_to_lir-inl.h"
@@ -209,7 +210,8 @@
void ArmMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) {
+ RegisterClass dest_reg_class) {
+ UNUSED(dest_reg_class);
// TODO: Generalize the IT below to accept more than one-instruction loads.
DCHECK(InexpensiveConstantInt(true_val));
DCHECK(InexpensiveConstantInt(false_val));
@@ -232,6 +234,7 @@
}
void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb);
RegLocation rl_result;
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
RegLocation rl_dest = mir_graph_->GetDest(mir);
@@ -442,6 +445,15 @@
bool src_fp = r_src.IsFloat();
DCHECK(r_dest.Is64Bit());
DCHECK(r_src.Is64Bit());
+ // Note: If the register is get by register allocator, it should never be a pair.
+ // But some functions in mir_2_lir assume 64-bit registers are 32-bit register pairs.
+ // TODO: Rework Mir2Lir::LoadArg() and Mir2Lir::LoadArgDirect().
+ if (dest_fp && r_dest.IsPair()) {
+ r_dest = As64BitFloatReg(r_dest);
+ }
+ if (src_fp && r_src.IsPair()) {
+ r_src = As64BitFloatReg(r_src);
+ }
if (dest_fp) {
if (src_fp) {
OpRegCopy(r_dest, r_src);
@@ -495,6 +507,7 @@
// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
bool ArmMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(dalvik_opcode);
if ((lit < 0) || (lit >= static_cast<int>(sizeof(magic_table)/sizeof(magic_table[0])))) {
return false;
}
@@ -678,14 +691,17 @@
}
RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, bool check_zero) {
+ RegLocation rl_src2, bool is_div, int flags) {
+ UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
LOG(FATAL) << "Unexpected use of GenDivRem for Arm";
- return rl_dest;
+ UNREACHABLE();
}
-RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) {
+RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
+ bool is_div) {
+ UNUSED(rl_dest, rl_src1, lit, is_div);
LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm";
- return rl_dest;
+ UNREACHABLE();
}
RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
@@ -1063,6 +1079,7 @@
void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
RegLocation rl_result, int lit,
int first_bit, int second_bit) {
+ UNUSED(lit);
OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg,
EncodeShift(kArmLsl, second_bit - first_bit));
if (first_bit != 0) {
@@ -1086,7 +1103,7 @@
#else
RegStorage t_reg = AllocTemp();
LoadBaseDisp(rs_rARM_SELF, Thread::ThreadFlagsOffset<4>().Int32Value(),
- t_reg, kUnsignedHalf);
+ t_reg, kUnsignedHalf, kNotVolatile);
LIR* cmp_branch = OpCmpImmBranch((target == NULL) ? kCondNe : kCondEq, t_reg,
0, target);
FreeTemp(t_reg);
@@ -1103,7 +1120,9 @@
}
bool ArmMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
-#if ANDROID_SMP != 0
+ if (!cu_->GetInstructionSetFeatures()->IsSmp()) {
+ return false;
+ }
// Start off with using the last LIR as the barrier. If it is not enough, then we will generate one.
LIR* barrier = last_lir_insn_;
@@ -1133,9 +1152,6 @@
DCHECK(!barrier->flags.use_def_invalid);
barrier->u.m.def_mask = &kEncodeAll;
return ret;
-#else
- return false;
-#endif
}
void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
@@ -1159,112 +1175,113 @@
void ArmMir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2) {
- /*
- * tmp1 = src1.hi * src2.lo; // src1.hi is no longer needed
- * dest = src1.lo * src2.lo;
- * tmp1 += src1.lo * src2.hi;
- * dest.hi += tmp1;
- *
- * To pull off inline multiply, we have a worst-case requirement of 7 temporary
- * registers. Normally for Arm, we get 5. We can get to 6 by including
- * lr in the temp set. The only problematic case is all operands and result are
- * distinct, and none have been promoted. In that case, we can succeed by aggressively
- * freeing operand temp registers after they are no longer needed. All other cases
- * can proceed normally. We'll just punt on the case of the result having a misaligned
- * overlap with either operand and send that case to a runtime handler.
- */
- RegLocation rl_result;
- if (PartiallyIntersects(rl_src1, rl_dest) || (PartiallyIntersects(rl_src2, rl_dest))) {
- FlushAllRegs();
- CallRuntimeHelperRegLocationRegLocation(kQuickLmul, rl_src1, rl_src2, false);
- rl_result = GetReturnWide(kCoreReg);
- StoreValueWide(rl_dest, rl_result);
- return;
- }
-
- rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-
- int reg_status = 0;
- RegStorage res_lo;
- RegStorage res_hi;
- bool dest_promoted = rl_dest.location == kLocPhysReg && rl_dest.reg.Valid() &&
- !IsTemp(rl_dest.reg.GetLow()) && !IsTemp(rl_dest.reg.GetHigh());
- bool src1_promoted = !IsTemp(rl_src1.reg.GetLow()) && !IsTemp(rl_src1.reg.GetHigh());
- bool src2_promoted = !IsTemp(rl_src2.reg.GetLow()) && !IsTemp(rl_src2.reg.GetHigh());
- // Check if rl_dest is *not* either operand and we have enough temp registers.
- if ((rl_dest.s_reg_low != rl_src1.s_reg_low && rl_dest.s_reg_low != rl_src2.s_reg_low) &&
- (dest_promoted || src1_promoted || src2_promoted)) {
- // In this case, we do not need to manually allocate temp registers for result.
- rl_result = EvalLoc(rl_dest, kCoreReg, true);
- res_lo = rl_result.reg.GetLow();
- res_hi = rl_result.reg.GetHigh();
- } else {
- res_lo = AllocTemp();
- if ((rl_src1.s_reg_low == rl_src2.s_reg_low) || src1_promoted || src2_promoted) {
- // In this case, we have enough temp registers to be allocated for result.
- res_hi = AllocTemp();
- reg_status = 1;
- } else {
- // In this case, all temps are now allocated.
- // res_hi will be allocated after we can free src1_hi.
- reg_status = 2;
- }
- }
-
- // Temporarily add LR to the temp pool, and assign it to tmp1
- MarkTemp(rs_rARM_LR);
- FreeTemp(rs_rARM_LR);
- RegStorage tmp1 = rs_rARM_LR;
- LockTemp(rs_rARM_LR);
-
- if (rl_src1.reg == rl_src2.reg) {
- DCHECK(res_hi.Valid());
- DCHECK(res_lo.Valid());
- NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg());
- NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src1.reg.GetLowReg(),
- rl_src1.reg.GetLowReg());
- OpRegRegRegShift(kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
- } else {
- NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetHighReg());
- if (reg_status == 2) {
- DCHECK(!res_hi.Valid());
- DCHECK_NE(rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
- DCHECK_NE(rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
- // Will force free src1_hi, so must clobber.
- Clobber(rl_src1.reg);
- FreeTemp(rl_src1.reg.GetHigh());
- res_hi = AllocTemp();
- }
- DCHECK(res_hi.Valid());
- DCHECK(res_lo.Valid());
- NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src2.reg.GetLowReg(),
- rl_src1.reg.GetLowReg());
- NewLIR4(kThumb2Mla, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetHighReg(),
- tmp1.GetReg());
- NewLIR4(kThumb2AddRRR, res_hi.GetReg(), tmp1.GetReg(), res_hi.GetReg(), 0);
- if (reg_status == 2) {
- FreeTemp(rl_src1.reg.GetLow());
- }
- }
-
- // Now, restore lr to its non-temp status.
- FreeTemp(tmp1);
- Clobber(rs_rARM_LR);
- UnmarkTemp(rs_rARM_LR);
-
- if (reg_status != 0) {
- // We had manually allocated registers for rl_result.
- // Now construct a RegLocation.
- rl_result = GetReturnWide(kCoreReg); // Just using as a template.
- rl_result.reg = RegStorage::MakeRegPair(res_lo, res_hi);
- }
-
+ UNUSED(opcode);
+ /*
+ * tmp1 = src1.hi * src2.lo; // src1.hi is no longer needed
+ * dest = src1.lo * src2.lo;
+ * tmp1 += src1.lo * src2.hi;
+ * dest.hi += tmp1;
+ *
+ * To pull off inline multiply, we have a worst-case requirement of 7 temporary
+ * registers. Normally for Arm, we get 5. We can get to 6 by including
+ * lr in the temp set. The only problematic case is all operands and result are
+ * distinct, and none have been promoted. In that case, we can succeed by aggressively
+ * freeing operand temp registers after they are no longer needed. All other cases
+ * can proceed normally. We'll just punt on the case of the result having a misaligned
+ * overlap with either operand and send that case to a runtime handler.
+ */
+ RegLocation rl_result;
+ if (PartiallyIntersects(rl_src1, rl_dest) || (PartiallyIntersects(rl_src2, rl_dest))) {
+ FlushAllRegs();
+ CallRuntimeHelperRegLocationRegLocation(kQuickLmul, rl_src1, rl_src2, false);
+ rl_result = GetReturnWide(kCoreReg);
StoreValueWide(rl_dest, rl_result);
+ return;
+ }
+
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+
+ int reg_status = 0;
+ RegStorage res_lo;
+ RegStorage res_hi;
+ bool dest_promoted = rl_dest.location == kLocPhysReg && rl_dest.reg.Valid() &&
+ !IsTemp(rl_dest.reg.GetLow()) && !IsTemp(rl_dest.reg.GetHigh());
+ bool src1_promoted = !IsTemp(rl_src1.reg.GetLow()) && !IsTemp(rl_src1.reg.GetHigh());
+ bool src2_promoted = !IsTemp(rl_src2.reg.GetLow()) && !IsTemp(rl_src2.reg.GetHigh());
+ // Check if rl_dest is *not* either operand and we have enough temp registers.
+ if ((rl_dest.s_reg_low != rl_src1.s_reg_low && rl_dest.s_reg_low != rl_src2.s_reg_low) &&
+ (dest_promoted || src1_promoted || src2_promoted)) {
+ // In this case, we do not need to manually allocate temp registers for result.
+ rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ res_lo = rl_result.reg.GetLow();
+ res_hi = rl_result.reg.GetHigh();
+ } else {
+ res_lo = AllocTemp();
+ if ((rl_src1.s_reg_low == rl_src2.s_reg_low) || src1_promoted || src2_promoted) {
+ // In this case, we have enough temp registers to be allocated for result.
+ res_hi = AllocTemp();
+ reg_status = 1;
+ } else {
+ // In this case, all temps are now allocated.
+ // res_hi will be allocated after we can free src1_hi.
+ reg_status = 2;
+ }
+ }
+
+ // Temporarily add LR to the temp pool, and assign it to tmp1
+ MarkTemp(rs_rARM_LR);
+ FreeTemp(rs_rARM_LR);
+ RegStorage tmp1 = rs_rARM_LR;
+ LockTemp(rs_rARM_LR);
+
+ if (rl_src1.reg == rl_src2.reg) {
+ DCHECK(res_hi.Valid());
+ DCHECK(res_lo.Valid());
+ NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg());
+ NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src1.reg.GetLowReg(),
+ rl_src1.reg.GetLowReg());
+ OpRegRegRegShift(kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
+ } else {
+ NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetHighReg());
+ if (reg_status == 2) {
+ DCHECK(!res_hi.Valid());
+ DCHECK_NE(rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
+ DCHECK_NE(rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
+ // Will force free src1_hi, so must clobber.
+ Clobber(rl_src1.reg);
+ FreeTemp(rl_src1.reg.GetHigh());
+ res_hi = AllocTemp();
+ }
+ DCHECK(res_hi.Valid());
+ DCHECK(res_lo.Valid());
+ NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src2.reg.GetLowReg(),
+ rl_src1.reg.GetLowReg());
+ NewLIR4(kThumb2Mla, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetHighReg(),
+ tmp1.GetReg());
+ NewLIR4(kThumb2AddRRR, res_hi.GetReg(), tmp1.GetReg(), res_hi.GetReg(), 0);
+ if (reg_status == 2) {
+ FreeTemp(rl_src1.reg.GetLow());
+ }
+ }
+
+ // Now, restore lr to its non-temp status.
+ FreeTemp(tmp1);
+ Clobber(rs_rARM_LR);
+ UnmarkTemp(rs_rARM_LR);
+
+ if (reg_status != 0) {
+ // We had manually allocated registers for rl_result.
+ // Now construct a RegLocation.
+ rl_result = GetReturnWide(kCoreReg); // Just using as a template.
+ rl_result.reg = RegStorage::MakeRegPair(res_lo, res_hi);
+ }
+
+ StoreValueWide(rl_dest, rl_result);
}
void ArmMir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) {
+ RegLocation rl_src2, int flags) {
switch (opcode) {
case Instruction::MUL_LONG:
case Instruction::MUL_LONG_2ADDR:
@@ -1279,7 +1296,7 @@
}
// Fallback for all other ops.
- Mir2Lir::GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
+ Mir2Lir::GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
}
/*
@@ -1342,7 +1359,6 @@
FreeTemp(reg_len);
}
LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, kNotVolatile);
- MarkPossibleNullPointerException(opt_flags);
if (!constant_index) {
FreeTemp(reg_ptr);
}
@@ -1363,7 +1379,6 @@
FreeTemp(reg_len);
}
LoadBaseIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale, size);
- MarkPossibleNullPointerException(opt_flags);
FreeTemp(reg_ptr);
StoreValue(rl_dest, rl_result);
}
@@ -1442,7 +1457,6 @@
}
StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size, kNotVolatile);
- MarkPossibleNullPointerException(opt_flags);
} else {
/* reg_ptr -> array data */
OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
@@ -1452,7 +1466,6 @@
FreeTemp(reg_len);
}
StoreBaseIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale, size);
- MarkPossibleNullPointerException(opt_flags);
}
if (allocated_reg_ptr_temp) {
FreeTemp(reg_ptr);
@@ -1464,7 +1477,9 @@
void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
- RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift) {
+ RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift,
+ int flags) {
+ UNUSED(flags);
rl_src = LoadValueWide(rl_src, kCoreReg);
// Per spec, we only care about low 6 bits of shift amount.
int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
@@ -1537,11 +1552,12 @@
}
void ArmMir2Lir::GenArithImmOpLong(Instruction::Code opcode,
- RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+ RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+ int flags) {
if ((opcode == Instruction::SUB_LONG_2ADDR) || (opcode == Instruction::SUB_LONG)) {
if (!rl_src2.is_const) {
// Don't bother with special handling for subtract from immediate.
- GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
+ GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
return;
}
} else {
@@ -1552,7 +1568,7 @@
}
}
if (PartiallyIntersects(rl_src1, rl_dest)) {
- GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
+ GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
return;
}
DCHECK(rl_src2.is_const);
@@ -1569,7 +1585,7 @@
case Instruction::SUB_LONG:
case Instruction::SUB_LONG_2ADDR:
if ((mod_imm_lo < 0) || (mod_imm_hi < 0)) {
- GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
+ GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
return;
}
break;
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index dd8f7fe..0e8f645 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -89,7 +89,7 @@
// Return a target-dependent special register.
RegStorage ArmMir2Lir::TargetReg(SpecialTargetRegister reg) {
- RegStorage res_reg = RegStorage::InvalidReg();
+ RegStorage res_reg;
switch (reg) {
case kSelf: res_reg = rs_rARM_SELF; break;
#ifdef ARM_R4_SUSPEND_FLAG
@@ -104,10 +104,22 @@
case kArg1: res_reg = rs_r1; break;
case kArg2: res_reg = rs_r2; break;
case kArg3: res_reg = rs_r3; break;
- case kFArg0: res_reg = rs_r0; break;
- case kFArg1: res_reg = rs_r1; break;
- case kFArg2: res_reg = rs_r2; break;
- case kFArg3: res_reg = rs_r3; break;
+ case kFArg0: res_reg = kArm32QuickCodeUseSoftFloat ? rs_r0 : rs_fr0; break;
+ case kFArg1: res_reg = kArm32QuickCodeUseSoftFloat ? rs_r1 : rs_fr1; break;
+ case kFArg2: res_reg = kArm32QuickCodeUseSoftFloat ? rs_r2 : rs_fr2; break;
+ case kFArg3: res_reg = kArm32QuickCodeUseSoftFloat ? rs_r3 : rs_fr3; break;
+ case kFArg4: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr4; break;
+ case kFArg5: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr5; break;
+ case kFArg6: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr6; break;
+ case kFArg7: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr7; break;
+ case kFArg8: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr8; break;
+ case kFArg9: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr9; break;
+ case kFArg10: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr10; break;
+ case kFArg11: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr11; break;
+ case kFArg12: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr12; break;
+ case kFArg13: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr13; break;
+ case kFArg14: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr14; break;
+ case kFArg15: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr15; break;
case kRet0: res_reg = rs_r0; break;
case kRet1: res_reg = rs_r1; break;
case kInvokeTgt: res_reg = rs_rARM_LR; break;
@@ -119,20 +131,6 @@
return res_reg;
}
-RegStorage ArmMir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
- // For the 32-bit internal ABI, the first 3 arguments are passed in registers.
- switch (arg_num) {
- case 0:
- return rs_r1;
- case 1:
- return rs_r2;
- case 2:
- return rs_r3;
- default:
- return RegStorage::InvalidReg();
- }
-}
-
/*
* Decode the register id.
*/
@@ -561,11 +559,10 @@
call_method_insns_.reserve(100);
// Sanity check - make sure encoding map lines up.
for (int i = 0; i < kArmLast; i++) {
- if (ArmMir2Lir::EncodingMap[i].opcode != i) {
- LOG(FATAL) << "Encoding order for " << ArmMir2Lir::EncodingMap[i].name
- << " is wrong: expecting " << i << ", seeing "
- << static_cast<int>(ArmMir2Lir::EncodingMap[i].opcode);
- }
+ DCHECK_EQ(ArmMir2Lir::EncodingMap[i].opcode, i)
+ << "Encoding order for " << ArmMir2Lir::EncodingMap[i].name
+ << " is wrong: expecting " << i << ", seeing "
+ << static_cast<int>(ArmMir2Lir::EncodingMap[i].opcode);
}
}
@@ -718,6 +715,32 @@
LockTemp(rs_r1);
LockTemp(rs_r2);
LockTemp(rs_r3);
+ if (!kArm32QuickCodeUseSoftFloat) {
+ LockTemp(rs_fr0);
+ LockTemp(rs_fr1);
+ LockTemp(rs_fr2);
+ LockTemp(rs_fr3);
+ LockTemp(rs_fr4);
+ LockTemp(rs_fr5);
+ LockTemp(rs_fr6);
+ LockTemp(rs_fr7);
+ LockTemp(rs_fr8);
+ LockTemp(rs_fr9);
+ LockTemp(rs_fr10);
+ LockTemp(rs_fr11);
+ LockTemp(rs_fr12);
+ LockTemp(rs_fr13);
+ LockTemp(rs_fr14);
+ LockTemp(rs_fr15);
+ LockTemp(rs_dr0);
+ LockTemp(rs_dr1);
+ LockTemp(rs_dr2);
+ LockTemp(rs_dr3);
+ LockTemp(rs_dr4);
+ LockTemp(rs_dr5);
+ LockTemp(rs_dr6);
+ LockTemp(rs_dr7);
+ }
}
/* To be used when explicitly managing register use */
@@ -726,6 +749,32 @@
FreeTemp(rs_r1);
FreeTemp(rs_r2);
FreeTemp(rs_r3);
+ if (!kArm32QuickCodeUseSoftFloat) {
+ FreeTemp(rs_fr0);
+ FreeTemp(rs_fr1);
+ FreeTemp(rs_fr2);
+ FreeTemp(rs_fr3);
+ FreeTemp(rs_fr4);
+ FreeTemp(rs_fr5);
+ FreeTemp(rs_fr6);
+ FreeTemp(rs_fr7);
+ FreeTemp(rs_fr8);
+ FreeTemp(rs_fr9);
+ FreeTemp(rs_fr10);
+ FreeTemp(rs_fr11);
+ FreeTemp(rs_fr12);
+ FreeTemp(rs_fr13);
+ FreeTemp(rs_fr14);
+ FreeTemp(rs_fr15);
+ FreeTemp(rs_dr0);
+ FreeTemp(rs_dr1);
+ FreeTemp(rs_dr2);
+ FreeTemp(rs_dr3);
+ FreeTemp(rs_dr4);
+ FreeTemp(rs_dr5);
+ FreeTemp(rs_dr6);
+ FreeTemp(rs_dr7);
+ }
}
RegStorage ArmMir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
@@ -847,4 +896,313 @@
Mir2Lir::InstallLiteralPools();
}
+RegStorage ArmMir2Lir::InToRegStorageArmMapper::GetNextReg(bool is_double_or_float, bool is_wide) {
+ const RegStorage coreArgMappingToPhysicalReg[] =
+ {rs_r1, rs_r2, rs_r3};
+ const int coreArgMappingToPhysicalRegSize = arraysize(coreArgMappingToPhysicalReg);
+ const RegStorage fpArgMappingToPhysicalReg[] =
+ {rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
+ rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15};
+ constexpr uint32_t fpArgMappingToPhysicalRegSize = arraysize(fpArgMappingToPhysicalReg);
+ static_assert(fpArgMappingToPhysicalRegSize % 2 == 0, "Number of FP Arg regs is not even");
+
+ if (kArm32QuickCodeUseSoftFloat) {
+ is_double_or_float = false; // Regard double as long, float as int.
+ is_wide = false; // Map long separately.
+ }
+
+ RegStorage result = RegStorage::InvalidReg();
+ if (is_double_or_float) {
+ // TODO: Remove "cur_fp_double_reg_ % 2 != 0" when we return double as double.
+ if (is_wide || cur_fp_double_reg_ % 2 != 0) {
+ cur_fp_double_reg_ = std::max(cur_fp_double_reg_, RoundUp(cur_fp_reg_, 2));
+ if (cur_fp_double_reg_ < fpArgMappingToPhysicalRegSize) {
+ // TODO: Replace by following code in the branch when FlushIns() support 64-bit registers.
+ // result = RegStorage::MakeRegPair(fpArgMappingToPhysicalReg[cur_fp_double_reg_],
+ // fpArgMappingToPhysicalReg[cur_fp_double_reg_ + 1]);
+ // result = As64BitFloatReg(result);
+ // cur_fp_double_reg_ += 2;
+ result = fpArgMappingToPhysicalReg[cur_fp_double_reg_];
+ cur_fp_double_reg_++;
+ }
+ } else {
+ // TODO: Remove the check when we return double as double.
+ DCHECK_EQ(cur_fp_double_reg_ % 2, 0U);
+ if (cur_fp_reg_ % 2 == 0) {
+ cur_fp_reg_ = std::max(cur_fp_double_reg_, cur_fp_reg_);
+ }
+ if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) {
+ result = fpArgMappingToPhysicalReg[cur_fp_reg_];
+ cur_fp_reg_++;
+ }
+ }
+ } else {
+ if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
+ result = coreArgMappingToPhysicalReg[cur_core_reg_++];
+ // TODO: Enable following code when FlushIns() support 64-bit registers.
+ // if (is_wide && cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
+ // result = RegStorage::MakeRegPair(result, coreArgMappingToPhysicalReg[cur_core_reg_++]);
+ // }
+ }
+ }
+ return result;
+}
+
+RegStorage ArmMir2Lir::InToRegStorageMapping::Get(int in_position) const {
+ DCHECK(IsInitialized());
+ auto res = mapping_.find(in_position);
+ return res != mapping_.end() ? res->second : RegStorage::InvalidReg();
+}
+
+void ArmMir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count,
+ InToRegStorageMapper* mapper) {
+ DCHECK(mapper != nullptr);
+ max_mapped_in_ = -1;
+ is_there_stack_mapped_ = false;
+ for (int in_position = 0; in_position < count; in_position++) {
+ RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp,
+ arg_locs[in_position].wide);
+ if (reg.Valid()) {
+ mapping_[in_position] = reg;
+ // TODO: Enable the following code when FlushIns() support 64-bit argument registers.
+ // if (arg_locs[in_position].wide) {
+ // if (reg.Is32Bit()) {
+ // // As it is a split long, the hi-part is on stack.
+ // is_there_stack_mapped_ = true;
+ // }
+ // // We covered 2 v-registers, so skip the next one
+ // in_position++;
+ // }
+ max_mapped_in_ = std::max(max_mapped_in_, in_position);
+ } else {
+ is_there_stack_mapped_ = true;
+ }
+ }
+ initialized_ = true;
+}
+
+// TODO: Should be able to return long, double registers.
+// Need check some common code as it will break some assumption.
+RegStorage ArmMir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
+ if (!in_to_reg_storage_mapping_.IsInitialized()) {
+ int start_vreg = mir_graph_->GetFirstInVR();
+ RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg];
+
+ InToRegStorageArmMapper mapper;
+ in_to_reg_storage_mapping_.Initialize(arg_locs, mir_graph_->GetNumOfInVRs(), &mapper);
+ }
+ return in_to_reg_storage_mapping_.Get(arg_num);
+}
+
+int ArmMir2Lir::GenDalvikArgsNoRange(CallInfo* info,
+ int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
+ const MethodReference& target_method,
+ uint32_t vtable_idx, uintptr_t direct_code,
+ uintptr_t direct_method, InvokeType type, bool skip_this) {
+ if (kArm32QuickCodeUseSoftFloat) {
+ return Mir2Lir::GenDalvikArgsNoRange(info, call_state, pcrLabel, next_call_insn, target_method,
+ vtable_idx, direct_code, direct_method, type, skip_this);
+ } else {
+ return GenDalvikArgsRange(info, call_state, pcrLabel, next_call_insn, target_method, vtable_idx,
+ direct_code, direct_method, type, skip_this);
+ }
+}
+
+int ArmMir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
+ LIR** pcrLabel, NextCallInsn next_call_insn,
+ const MethodReference& target_method,
+ uint32_t vtable_idx, uintptr_t direct_code,
+ uintptr_t direct_method, InvokeType type, bool skip_this) {
+ if (kArm32QuickCodeUseSoftFloat) {
+ return Mir2Lir::GenDalvikArgsRange(info, call_state, pcrLabel, next_call_insn, target_method,
+ vtable_idx, direct_code, direct_method, type, skip_this);
+ }
+
+ // TODO: Rework the implementation when argument register can be long or double.
+
+ /* If no arguments, just return */
+ if (info->num_arg_words == 0) {
+ return call_state;
+ }
+
+ const int start_index = skip_this ? 1 : 0;
+
+ InToRegStorageArmMapper mapper;
+ InToRegStorageMapping in_to_reg_storage_mapping;
+ in_to_reg_storage_mapping.Initialize(info->args, info->num_arg_words, &mapper);
+ const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn();
+ int regs_left_to_pass_via_stack = info->num_arg_words - (last_mapped_in + 1);
+
+ // First of all, check whether it makes sense to use bulk copying.
+ // Bulk copying is done only for the range case.
+ // TODO: make a constant instead of 2
+ if (info->is_range && regs_left_to_pass_via_stack >= 2) {
+ // Scan the rest of the args - if in phys_reg flush to memory
+ for (int next_arg = last_mapped_in + 1; next_arg < info->num_arg_words;) {
+ RegLocation loc = info->args[next_arg];
+ if (loc.wide) {
+ // TODO: Only flush hi-part.
+ if (loc.high_word) {
+ loc = info->args[--next_arg];
+ }
+ loc = UpdateLocWide(loc);
+ if (loc.location == kLocPhysReg) {
+ ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+ StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
+ }
+ next_arg += 2;
+ } else {
+ loc = UpdateLoc(loc);
+ if (loc.location == kLocPhysReg) {
+ ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+ if (loc.ref) {
+ StoreRefDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kNotVolatile);
+ } else {
+ StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32,
+ kNotVolatile);
+ }
+ }
+ next_arg++;
+ }
+ }
+
+ // The rest can be copied together
+ int start_offset = SRegOffset(info->args[last_mapped_in + 1].s_reg_low);
+ int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + 1,
+ cu_->instruction_set);
+
+ int current_src_offset = start_offset;
+ int current_dest_offset = outs_offset;
+
+ // Only davik regs are accessed in this loop; no next_call_insn() calls.
+ ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+ while (regs_left_to_pass_via_stack > 0) {
+ /*
+ * TODO: Improve by adding block copy for large number of arguments. This
+ * should be done, if possible, as a target-depending helper. For now, just
+ * copy a Dalvik vreg at a time.
+ */
+ // Moving 32-bits via general purpose register.
+ size_t bytes_to_move = sizeof(uint32_t);
+
+ // Instead of allocating a new temp, simply reuse one of the registers being used
+ // for argument passing.
+ RegStorage temp = TargetReg(kArg3, kNotWide);
+
+ // Now load the argument VR and store to the outs.
+ Load32Disp(TargetPtrReg(kSp), current_src_offset, temp);
+ Store32Disp(TargetPtrReg(kSp), current_dest_offset, temp);
+
+ current_src_offset += bytes_to_move;
+ current_dest_offset += bytes_to_move;
+ regs_left_to_pass_via_stack -= (bytes_to_move >> 2);
+ }
+ DCHECK_EQ(regs_left_to_pass_via_stack, 0);
+ }
+
+ // Now handle rest not registers if they are
+ if (in_to_reg_storage_mapping.IsThereStackMapped()) {
+ RegStorage regWide = TargetReg(kArg2, kWide);
+ for (int i = start_index; i <= last_mapped_in + regs_left_to_pass_via_stack; i++) {
+ RegLocation rl_arg = info->args[i];
+ rl_arg = UpdateRawLoc(rl_arg);
+ RegStorage reg = in_to_reg_storage_mapping.Get(i);
+ // TODO: Only pass split wide hi-part via stack.
+ if (!reg.Valid() || rl_arg.wide) {
+ int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
+
+ {
+ ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+ if (rl_arg.wide) {
+ if (rl_arg.location == kLocPhysReg) {
+ StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile);
+ } else {
+ LoadValueDirectWideFixed(rl_arg, regWide);
+ StoreBaseDisp(TargetPtrReg(kSp), out_offset, regWide, k64, kNotVolatile);
+ }
+ } else {
+ if (rl_arg.location == kLocPhysReg) {
+ if (rl_arg.ref) {
+ StoreRefDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, kNotVolatile);
+ } else {
+ StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k32, kNotVolatile);
+ }
+ } else {
+ if (rl_arg.ref) {
+ RegStorage regSingle = TargetReg(kArg2, kRef);
+ LoadValueDirectFixed(rl_arg, regSingle);
+ StoreRefDisp(TargetPtrReg(kSp), out_offset, regSingle, kNotVolatile);
+ } else {
+ RegStorage regSingle = TargetReg(kArg2, kNotWide);
+ LoadValueDirectFixed(rl_arg, regSingle);
+ StoreBaseDisp(TargetPtrReg(kSp), out_offset, regSingle, k32, kNotVolatile);
+ }
+ }
+ }
+ }
+
+ call_state = next_call_insn(cu_, info, call_state, target_method,
+ vtable_idx, direct_code, direct_method, type);
+ }
+ if (rl_arg.wide) {
+ i++;
+ }
+ }
+ }
+
+ // Finish with mapped registers
+ for (int i = start_index; i <= last_mapped_in; i++) {
+ RegLocation rl_arg = info->args[i];
+ rl_arg = UpdateRawLoc(rl_arg);
+ RegStorage reg = in_to_reg_storage_mapping.Get(i);
+ if (reg.Valid()) {
+ if (reg.Is64Bit()) {
+ LoadValueDirectWideFixed(rl_arg, reg);
+ } else {
+ // TODO: Only split long should be the case we need to care about.
+ if (rl_arg.wide) {
+ ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+ int high_word = rl_arg.high_word ? 1 : 0;
+ rl_arg = high_word ? info->args[i - 1] : rl_arg;
+ if (rl_arg.location == kLocPhysReg) {
+ RegStorage rs_arg = rl_arg.reg;
+ if (rs_arg.IsDouble() && rs_arg.Is64BitSolo()) {
+ rs_arg = As64BitFloatRegPair(rs_arg);
+ }
+ RegStorage rs_arg_low = rs_arg.GetLow();
+ RegStorage rs_arg_high = rs_arg.GetHigh();
+ OpRegCopy(reg, high_word ? rs_arg_high : rs_arg_low);
+ } else {
+ Load32Disp(TargetPtrReg(kSp), SRegOffset(rl_arg.s_reg_low + high_word), reg);
+ }
+ } else {
+ LoadValueDirectFixed(rl_arg, reg);
+ }
+ }
+ call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
+ direct_code, direct_method, type);
+ }
+ if (reg.Is64Bit()) {
+ i++;
+ }
+ }
+
+ call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
+ direct_code, direct_method, type);
+ if (pcrLabel) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
+ *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
+ } else {
+ *pcrLabel = nullptr;
+ // In lieu of generating a check for kArg1 being null, we need to
+ // perform a load when doing implicit checks.
+ RegStorage tmp = AllocTemp();
+ Load32Disp(TargetReg(kArg1, kRef), 0, tmp);
+ MarkPossibleNullPointerException(info->opt_flags);
+ FreeTemp(tmp);
+ }
+ }
+ return call_state;
+}
+
} // namespace art
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 09acf4c..0c7812b 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -14,8 +14,10 @@
* limitations under the License.
*/
-#include "arm_lir.h"
#include "codegen_arm.h"
+
+#include "arch/arm/instruction_set_features_arm.h"
+#include "arm_lir.h"
#include "dex/quick/mir_to_lir-inl.h"
#include "dex/reg_storage_eq.h"
@@ -373,18 +375,21 @@
}
LIR* ArmMir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
+ UNUSED(r_dest, r_base, offset, move_type);
UNIMPLEMENTED(FATAL);
- return nullptr;
+ UNREACHABLE();
}
LIR* ArmMir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
+ UNUSED(r_base, offset, r_src, move_type);
UNIMPLEMENTED(FATAL);
- return nullptr;
+ UNREACHABLE();
}
LIR* ArmMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
+ UNUSED(op, cc, r_dest, r_src);
LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm";
- return NULL;
+ UNREACHABLE();
}
LIR* ArmMir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1,
@@ -461,7 +466,6 @@
}
LIR* ArmMir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
- LIR* res;
bool neg = (value < 0);
int32_t abs_value = (neg) ? -value : value;
ArmOpcode opcode = kThumbBkpt;
@@ -587,6 +591,7 @@
} else {
RegStorage r_scratch = AllocTemp();
LoadConstant(r_scratch, value);
+ LIR* res;
if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
res = NewLIR4(alt_opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg(), 0);
else
@@ -956,7 +961,7 @@
// TODO: in future may need to differentiate Dalvik accesses w/ spills
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK(r_base == rs_rARM_SP);
+ DCHECK_EQ(r_base, rs_rARM_SP);
AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
}
return load;
@@ -971,7 +976,7 @@
LIR* load;
if (is_volatile == kVolatile && (size == k64 || size == kDouble) &&
!cu_->compiler_driver->GetInstructionSetFeatures()->
- AsArmInstructionSetFeatures()->HasLpae()) {
+ AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd()) {
// Only 64-bit load needs special handling.
// If the cpu supports LPAE, aligned LDRD is atomic - fall through to LoadBaseDisp().
DCHECK(!r_dest.IsFloat()); // See RegClassForFieldLoadSave().
@@ -1007,6 +1012,12 @@
// Intentional fall-though.
case k64:
if (r_src.IsFloat()) {
+ // Note: If the register is retrieved by register allocator, it should never be a pair.
+ // But some functions in mir2lir assume 64-bit registers are 32-bit register pairs.
+ // TODO: Rework Mir2Lir::LoadArg() and Mir2Lir::LoadArgDirect().
+ if (r_src.IsPair()) {
+ r_src = As64BitFloatReg(r_src);
+ }
DCHECK(!r_src.IsPair());
store = LoadStoreUsingInsnWithOffsetImm8Shl2(kThumb2Vstrd, r_base, displacement, r_src);
} else {
@@ -1079,7 +1090,7 @@
// TODO: In future, may need to differentiate Dalvik & spill accesses
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK(r_base == rs_rARM_SP);
+ DCHECK_EQ(r_base, rs_rARM_SP);
AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
}
return store;
@@ -1095,7 +1106,7 @@
LIR* store;
if (is_volatile == kVolatile && (size == k64 || size == kDouble) &&
!cu_->compiler_driver->GetInstructionSetFeatures()->
- AsArmInstructionSetFeatures()->HasLpae()) {
+ AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd()) {
// Only 64-bit store needs special handling.
// If the cpu supports LPAE, aligned STRD is atomic - fall through to StoreBaseDisp().
// Use STREXD for the atomic store. (Expect displacement > 0, don't optimize for == 0.)
@@ -1161,11 +1172,13 @@
}
LIR* ArmMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
+ UNUSED(op, r_base, disp);
LOG(FATAL) << "Unexpected use of OpMem for Arm";
- return NULL;
+ UNREACHABLE();
}
LIR* ArmMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+ UNUSED(trampoline); // The address of the trampoline is already loaded into r_tgt.
return OpReg(op, r_tgt);
}
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index a87b06a..f8a7310 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -127,7 +127,7 @@
R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
// Registers (integer) values.
-enum A64NativeRegisterPool {
+enum A64NativeRegisterPool { // private marker to avoid generate-operator-out.py from processing.
# define A64_DEFINE_REGISTERS(nr) \
rw##nr = RegStorage::k32BitSolo | RegStorage::kCoreRegister | nr, \
rx##nr = RegStorage::k64BitSolo | RegStorage::kCoreRegister | nr, \
@@ -320,6 +320,7 @@
kA64Mul3rrr, // mul [00011011000] rm[20-16] [011111] rn[9-5] rd[4-0].
kA64Msub4rrrr, // msub[s0011011000] rm[20-16] [1] ra[14-10] rn[9-5] rd[4-0].
kA64Neg3rro, // neg alias of "sub arg0, rzr, arg1, arg2".
+ kA64Nop0, // nop alias of "hint #0" [11010101000000110010000000011111].
kA64Orr3Rrl, // orr [s01100100] N[22] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0].
kA64Orr4rrro, // orr [s0101010] shift[23-22] [0] rm[20-16] imm_6[15-10] rn[9-5] rd[4-0].
kA64Ret, // ret [11010110010111110000001111000000].
@@ -332,7 +333,7 @@
kA64Scvtf2fw, // scvtf [000111100s100010000000] rn[9-5] rd[4-0].
kA64Scvtf2fx, // scvtf [100111100s100010000000] rn[9-5] rd[4-0].
kA64Sdiv3rrr, // sdiv[s0011010110] rm[20-16] [000011] rn[9-5] rd[4-0].
- kA64Smaddl4xwwx, // smaddl [10011011001] rm[20-16] [0] ra[14-10] rn[9-5] rd[4-0].
+ kA64Smull3xww, // smull [10011011001] rm[20-16] [011111] rn[9-5] rd[4-0].
kA64Smulh3xxx, // smulh [10011011010] rm[20-16] [011111] rn[9-5] rd[4-0].
kA64Stp4ffXD, // stp [0s10110100] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
kA64Stp4rrXD, // stp [s010100100] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
@@ -362,9 +363,10 @@
kA64Tbz3rht, // tbz imm_6_b5[31] [0110110] imm_6_b40[23-19] imm_14[18-5] rt[4-0].
kA64Ubfm4rrdd, // ubfm[s10100110] N[22] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0].
kA64Last,
- kA64NotWide = 0, // Flag used to select the first instruction variant.
- kA64Wide = 0x1000 // Flag used to select the second instruction variant.
+ kA64NotWide = kA64First, // 0 - Flag used to select the first instruction variant.
+ kA64Wide = 0x1000 // Flag used to select the second instruction variant.
};
+std::ostream& operator<<(std::ostream& os, const A64Opcode& rhs);
/*
* The A64 instruction set provides two variants for many instructions. For example, "mov wN, wM"
@@ -414,6 +416,7 @@
kFmtExtend, // Register extend, 9-bit at [23..21, 15..10].
kFmtSkip, // Unused field, but continue to next.
};
+std::ostream& operator<<(std::ostream& os, const A64EncodingKind & rhs);
// Struct used to define the snippet positions for each A64 opcode.
struct A64EncodingMap {
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index e2ff090..cab11cc 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -14,8 +14,10 @@
* limitations under the License.
*/
-#include "arm64_lir.h"
#include "codegen_arm64.h"
+
+#include "arch/arm64/instruction_set_features_arm64.h"
+#include "arm64_lir.h"
#include "dex/quick/mir_to_lir-inl.h"
namespace art {
@@ -468,13 +470,17 @@
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
"mul", "!0r, !1r, !2r", kFixupNone),
ENCODING_MAP(WIDE(kA64Msub4rrrr), SF_VARIANTS(0x1b008000),
- kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 14, 10,
- kFmtRegR, 20, 16, IS_QUAD_OP | REG_DEF0_USE123,
- "msub", "!0r, !1r, !3r, !2r", kFixupNone),
+ kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
+ kFmtRegR, 14, 10, IS_QUAD_OP | REG_DEF0_USE123 | NEEDS_FIXUP,
+ "msub", "!0r, !1r, !2r, !3r", kFixupA53Erratum835769),
ENCODING_MAP(WIDE(kA64Neg3rro), SF_VARIANTS(0x4b0003e0),
kFmtRegR, 4, 0, kFmtRegR, 20, 16, kFmtShift, -1, -1,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
"neg", "!0r, !1r!2o", kFixupNone),
+ ENCODING_MAP(kA64Nop0, NO_VARIANTS(0xd503201f),
+ kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, NO_OPERAND,
+ "nop", "", kFixupNone),
ENCODING_MAP(WIDE(kA64Orr3Rrl), SF_VARIANTS(0x32000000),
kFmtRegROrSp, 4, 0, kFmtRegR, 9, 5, kFmtBitBlt, 22, 10,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
@@ -523,10 +529,10 @@
kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
"sdiv", "!0r, !1r, !2r", kFixupNone),
- ENCODING_MAP(WIDE(kA64Smaddl4xwwx), NO_VARIANTS(0x9b200000),
+ ENCODING_MAP(kA64Smull3xww, NO_VARIANTS(0x9b207c00),
kFmtRegX, 4, 0, kFmtRegW, 9, 5, kFmtRegW, 20, 16,
- kFmtRegX, 14, 10, IS_QUAD_OP | REG_DEF0_USE123,
- "smaddl", "!0x, !1w, !2w, !3x", kFixupNone),
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "smull", "!0x, !1w, !2w", kFixupNone),
ENCODING_MAP(kA64Smulh3xxx, NO_VARIANTS(0x9b407c00),
kFmtRegX, 4, 0, kFmtRegX, 9, 5, kFmtRegX, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
@@ -779,8 +785,8 @@
// and zr. This means that these two registers do not need any special treatment, as
// their bottom 5 bits are correctly set to 31 == 0b11111, which is the right
// value for encoding both sp and zr.
- COMPILE_ASSERT((rxzr & 0x1f) == 0x1f, rzr_register_number_must_be_31);
- COMPILE_ASSERT((rsp & 0x1f) == 0x1f, rsp_register_number_must_be_31);
+ static_assert((rxzr & 0x1f) == 0x1f, "rzr register number must be 31");
+ static_assert((rsp & 0x1f) == 0x1f, "rsp register number must be 31");
}
value = (operand << encoder->field_loc[i].start) &
@@ -881,6 +887,14 @@
LOG(FATAL) << "Invalid jump range in kFixupT1Branch";
}
lir->operands[0] = delta >> 2;
+ if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && lir->operands[0] == 1) {
+ // Useless branch.
+ offset_adjustment -= lir->flags.size;
+ lir->flags.is_nop = true;
+ // Don't unlink - just set to do-nothing.
+ lir->flags.fixup = kFixupNone;
+ res = kRetryAll;
+ }
break;
}
case kFixupLoad:
@@ -928,14 +942,13 @@
// Check if branch offset can be encoded in tbz/tbnz.
if (!IS_SIGNED_IMM14(delta >> 2)) {
DexOffset dalvik_offset = lir->dalvik_offset;
- int16_t opcode = lir->opcode;
- LIR* target = lir->target;
+ LIR* targetLIR = lir->target;
// "tbz/tbnz Rt, #imm, label" -> "tst Rt, #(1<<imm)".
offset_adjustment -= lir->flags.size;
- int32_t imm = EncodeLogicalImmediate(IS_WIDE(opcode), 1 << lir->operands[1]);
- DCHECK_NE(imm, -1);
+ int32_t encodedImm = EncodeLogicalImmediate(IS_WIDE(opcode), 1 << lir->operands[1]);
+ DCHECK_NE(encodedImm, -1);
lir->opcode = IS_WIDE(opcode) ? WIDE(kA64Tst2rl) : kA64Tst2rl;
- lir->operands[1] = imm;
+ lir->operands[1] = encodedImm;
lir->target = nullptr;
lir->flags.fixup = EncodingMap[kA64Tst2rl].fixup;
lir->flags.size = EncodingMap[kA64Tst2rl].size;
@@ -944,7 +957,7 @@
opcode = UNWIDE(opcode);
DCHECK(opcode == kA64Tbz3rht || opcode == kA64Tbnz3rht);
LIR* new_lir = RawLIR(dalvik_offset, kA64B2ct,
- opcode == kA64Tbz3rht ? kArmCondEq : kArmCondNe, 0, 0, 0, 0, target);
+ opcode == kA64Tbz3rht ? kArmCondEq : kArmCondNe, 0, 0, 0, 0, targetLIR);
InsertLIRAfter(lir, new_lir);
new_lir->offset = lir->offset + lir->flags.size;
new_lir->flags.generation = generation;
@@ -981,6 +994,30 @@
lir->operands[1] = delta;
break;
}
+ case kFixupA53Erratum835769:
+ // Avoid emitting code that could trigger Cortex A53's erratum 835769.
+ // This fixup should be carried out for all multiply-accumulate instructions: madd, msub,
+ // smaddl, smsubl, umaddl and umsubl.
+ if (cu_->GetInstructionSetFeatures()->AsArm64InstructionSetFeatures()
+ ->NeedFixCortexA53_835769()) {
+ // Check that this is a 64-bit multiply-accumulate.
+ if (IS_WIDE(lir->opcode)) {
+ uint64_t prev_insn_flags = EncodingMap[UNWIDE(lir->prev->opcode)].flags;
+ // Check that the instruction preceding the multiply-accumulate is a load or store.
+ if ((prev_insn_flags & IS_LOAD) != 0 || (prev_insn_flags & IS_STORE) != 0) {
+ // insert a NOP between the load/store and the multiply-accumulate.
+ LIR* new_lir = RawLIR(lir->dalvik_offset, kA64Nop0, 0, 0, 0, 0, 0, NULL);
+ new_lir->offset = lir->offset;
+ new_lir->flags.fixup = kFixupNone;
+ new_lir->flags.size = EncodingMap[kA64Nop0].size;
+ InsertLIRBefore(lir, new_lir);
+ lir->offset += new_lir->flags.size;
+ offset_adjustment += new_lir->flags.size;
+ res = kRetryAll;
+ }
+ }
+ }
+ break;
default:
LOG(FATAL) << "Unexpected case " << lir->flags.fixup;
}
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index c898e2d..089e4b6 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -251,20 +251,14 @@
StoreValue(rl_dest, rl_result);
}
-/*
- * Mark garbage collection card. Skip if the value we're storing is null.
- */
-void Arm64Mir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) {
+void Arm64Mir2Lir::UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) {
RegStorage reg_card_base = AllocTempWide();
RegStorage reg_card_no = AllocTempWide(); // Needs to be wide as addr is ref=64b
- LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
LoadWordDisp(rs_xSELF, Thread::CardTableOffset<8>().Int32Value(), reg_card_base);
OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
// TODO(Arm64): generate "strb wB, [xB, wC, uxtw]" rather than "strb wB, [xB, xC]"?
StoreBaseIndexed(reg_card_base, reg_card_no, As32BitReg(reg_card_base),
0, kUnsignedByte);
- LIR* target = NewLIR0(kPseudoTargetLabel);
- branch_over->target = target;
FreeTemp(reg_card_base);
FreeTemp(reg_card_no);
}
@@ -296,7 +290,8 @@
* We can safely skip the stack overflow check if we're
* a leaf *and* our frame size < fudge factor.
*/
- bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, kArm64);
+ bool skip_overflow_check = mir_graph_->MethodIsLeaf() &&
+ !FrameNeedsStackCheck(frame_size_, kArm64);
NewLIR0(kPseudoMethodEntry);
@@ -320,7 +315,7 @@
// TODO: If the frame size is small enough, is it possible to make this a pre-indexed load,
// so that we can avoid the following "sub sp" when spilling?
OpRegRegImm(kOpSub, rs_x8, rs_sp, GetStackOverflowReservedBytes(kArm64));
- LoadWordDisp(rs_x8, 0, rs_x8);
+ Load32Disp(rs_x8, 0, rs_wzr);
MarkPossibleStackOverflowException();
}
}
@@ -401,6 +396,7 @@
}
static bool Arm64UseRelativeCall(CompilationUnit* cu, const MethodReference& target_method) {
+ UNUSED(cu, target_method);
// Always emit relative calls.
return true;
}
@@ -411,9 +407,10 @@
*/
static int Arm64NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
int state, const MethodReference& target_method,
- uint32_t unused,
+ uint32_t unused_idx,
uintptr_t direct_code, uintptr_t direct_method,
InvokeType type) {
+ UNUSED(info, unused_idx);
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
if (direct_code != 0 && direct_method != 0) {
switch (state) {
@@ -470,8 +467,8 @@
if (direct_code == 0) {
// kInvokeTgt := arg0_ref->entrypoint
cg->LoadWordDisp(arg0_ref,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
- cg->TargetPtrReg(kInvokeTgt));
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArm64PointerSize).Int32Value(), cg->TargetPtrReg(kInvokeTgt));
}
break;
default:
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 510bd4c..5e10f80 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -71,6 +71,10 @@
bool HandleEasyDivRem64(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int64_t lit);
bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
+ void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
+ int32_t constant) OVERRIDE;
+ void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
+ int64_t constant) OVERRIDE;
LIR* CheckSuspendUsingLoad() OVERRIDE;
RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
@@ -90,7 +94,10 @@
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
OpSize size) OVERRIDE;
LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale) OVERRIDE;
- void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) OVERRIDE;
+
+ /// @copydoc Mir2Lir::UnconditionallyMarkGCCard(RegStorage)
+ void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
+
LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
int offset, int check_value, LIR* target, LIR** compare) OVERRIDE;
@@ -141,13 +148,13 @@
void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation lr_shift) OVERRIDE;
void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) OVERRIDE;
+ RegLocation rl_src2, int flags) OVERRIDE;
void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
RegLocation rl_dest, int scale) OVERRIDE;
void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
RegLocation rl_src, int scale, bool card_mark) OVERRIDE;
void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_shift) OVERRIDE;
+ RegLocation rl_shift, int flags) OVERRIDE;
void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2) OVERRIDE;
void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
@@ -173,7 +180,7 @@
bool GenInlinedArrayCopyCharArray(CallInfo* info) OVERRIDE;
void GenIntToLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) OVERRIDE;
+ RegLocation rl_src2, int flags) OVERRIDE;
RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div)
OVERRIDE;
RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div)
@@ -188,7 +195,7 @@
void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) OVERRIDE;
+ RegisterClass dest_reg_class) OVERRIDE;
bool GenMemBarrier(MemBarrierKind barrier_kind) OVERRIDE;
void GenMonitorEnter(int opt_flags, RegLocation rl_src) OVERRIDE;
@@ -249,10 +256,10 @@
uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
bool skip_this) OVERRIDE;
- bool WideGPRsAreAliases() OVERRIDE {
+ bool WideGPRsAreAliases() const OVERRIDE {
return true; // 64b architecture.
}
- bool WideFPRsAreAliases() OVERRIDE {
+ bool WideFPRsAreAliases() const OVERRIDE {
return true; // 64b architecture.
}
@@ -363,8 +370,8 @@
void InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
void AssignDataOffsets();
RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
- bool is_div, bool check_zero);
- RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div);
+ bool is_div, int flags) OVERRIDE;
+ RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) OVERRIDE;
size_t GetLoadStoreSize(LIR* lir);
bool SmallLiteralDivRem64(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
@@ -413,7 +420,7 @@
void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
void GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div);
+ RegLocation rl_src2, bool is_div, int flags);
InToRegStorageMapping in_to_reg_storage_mapping_;
static const A64EncodingMap EncodingMap[kA64Last];
diff --git a/compiler/dex/quick/arm64/fp_arm64.cc b/compiler/dex/quick/arm64/fp_arm64.cc
index db24d12..ff692b7 100644
--- a/compiler/dex/quick/arm64/fp_arm64.cc
+++ b/compiler/dex/quick/arm64/fp_arm64.cc
@@ -116,6 +116,32 @@
StoreValueWide(rl_dest, rl_result);
}
+void Arm64Mir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
+ int32_t constant) {
+ RegLocation rl_result;
+ RegStorage r_tmp = AllocTempSingle();
+ LoadConstantNoClobber(r_tmp, constant);
+ rl_src1 = LoadValue(rl_src1, kFPReg);
+ rl_result = EvalLoc(rl_dest, kFPReg, true);
+ NewLIR3(kA64Fmul3fff, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), r_tmp.GetReg());
+ StoreValue(rl_dest, rl_result);
+}
+
+void Arm64Mir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
+ int64_t constant) {
+ RegLocation rl_result;
+ RegStorage r_tmp = AllocTempDouble();
+ DCHECK(r_tmp.IsDouble());
+ LoadConstantWide(r_tmp, constant);
+ rl_src1 = LoadValueWide(rl_src1, kFPReg);
+ DCHECK(rl_src1.wide);
+ rl_result = EvalLocWide(rl_dest, kFPReg, true);
+ DCHECK(rl_dest.wide);
+ DCHECK(rl_result.wide);
+ NewLIR3(WIDE(kA64Fmul3fff), rl_result.reg.GetReg(), rl_src1.reg.GetReg(), r_tmp.GetReg());
+ StoreValueWide(rl_dest, rl_result);
+}
+
void Arm64Mir2Lir::GenConversion(Instruction::Code opcode,
RegLocation rl_dest, RegLocation rl_src) {
int op = kA64Brk1d;
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index abcb30f..0e00698 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -16,6 +16,7 @@
/* This file contains codegen for the Thumb2 ISA. */
+#include "arch/instruction_set_features.h"
#include "arm64_lir.h"
#include "codegen_arm64.h"
#include "dex/quick/mir_to_lir-inl.h"
@@ -32,11 +33,13 @@
}
LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode, const char* guide) {
+ UNUSED(ccode, guide);
LOG(FATAL) << "Unexpected use of OpIT for Arm64";
- return NULL;
+ UNREACHABLE();
}
void Arm64Mir2Lir::OpEndIT(LIR* it) {
+ UNUSED(it);
LOG(FATAL) << "Unexpected use of OpEndIT for Arm64";
}
@@ -174,13 +177,14 @@
void Arm64Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) {
+ RegisterClass dest_reg_class) {
DCHECK(rs_dest.Valid());
OpRegReg(kOpCmp, left_op, right_op);
GenSelect(true_val, false_val, code, rs_dest, dest_reg_class);
}
void Arm64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb);
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
rl_src = LoadValue(rl_src, rl_src.ref ? kRefReg : kCoreReg);
// rl_src may be aliased with rl_result/rl_dest, so do compare early.
@@ -406,6 +410,7 @@
// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(dalvik_opcode);
if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
return false;
}
@@ -423,8 +428,7 @@
rl_src = LoadValue(rl_src, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
RegStorage r_long_mul = AllocTemp();
- NewLIR4(kA64Smaddl4xwwx, As64BitReg(r_long_mul).GetReg(),
- r_magic.GetReg(), rl_src.reg.GetReg(), rxzr);
+ NewLIR3(kA64Smull3xww, As64BitReg(r_long_mul).GetReg(), r_magic.GetReg(), rl_src.reg.GetReg());
switch (pattern) {
case Divide3:
OpRegRegImm(kOpLsr, As64BitReg(r_long_mul), As64BitReg(r_long_mul), 32);
@@ -450,6 +454,7 @@
bool Arm64Mir2Lir::SmallLiteralDivRem64(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int64_t lit) {
+ UNUSED(dalvik_opcode);
if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
return false;
}
@@ -478,7 +483,6 @@
} else {
reconstructed_imm = base + 1;
}
- DCHECK_EQ(reconstructed_imm, magic_table[lit].magic64) << " for literal " << lit;
}
// Load the magic constant in two instructions.
@@ -590,13 +594,16 @@
}
bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(rl_src, rl_dest, lit);
LOG(FATAL) << "Unexpected use of EasyMultiply for Arm64";
- return false;
+ UNREACHABLE();
}
-RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) {
+RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
+ bool is_div) {
+ UNUSED(rl_dest, rl_src1, lit, is_div);
LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm64";
- return rl_dest;
+ UNREACHABLE();
}
RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
@@ -614,9 +621,10 @@
}
RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, bool check_zero) {
+ RegLocation rl_src2, bool is_div, int flags) {
+ UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
LOG(FATAL) << "Unexpected use of GenDivRem for Arm64";
- return rl_dest;
+ UNREACHABLE();
}
RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage r_src1, RegStorage r_src2,
@@ -640,7 +648,7 @@
}
OpRegRegReg(kOpDiv, temp, r_src1, r_src2);
NewLIR4(kA64Msub4rrrr | wide, rl_result.reg.GetReg(), temp.GetReg(),
- r_src1.GetReg(), r_src2.GetReg());
+ r_src2.GetReg(), r_src1.GetReg());
FreeTemp(temp);
}
return rl_result;
@@ -929,25 +937,27 @@
}
LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
LOG(FATAL) << "Unexpected use of OpVldm for Arm64";
- return NULL;
+ UNREACHABLE();
}
LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
LOG(FATAL) << "Unexpected use of OpVstm for Arm64";
- return NULL;
+ UNREACHABLE();
}
void Arm64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
- RegLocation rl_result, int lit,
- int first_bit, int second_bit) {
+ RegLocation rl_result, int lit ATTRIBUTE_UNUSED,
+ int first_bit, int second_bit) {
OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg, EncodeShift(kA64Lsl, second_bit - first_bit));
if (first_bit != 0) {
OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
}
}
-void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg) {
+void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivZero for Arm64";
}
@@ -969,7 +979,9 @@
}
bool Arm64Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
-#if ANDROID_SMP != 0
+ if (!cu_->GetInstructionSetFeatures()->IsSmp()) {
+ return false;
+ }
// Start off with using the last LIR as the barrier. If it is not enough, then we will generate one.
LIR* barrier = last_lir_insn_;
@@ -1005,9 +1017,6 @@
DCHECK(!barrier->flags.use_def_invalid);
barrier->u.m.def_mask = &kEncodeAll;
return ret;
-#else
- return false;
-#endif
}
void Arm64Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
@@ -1020,7 +1029,7 @@
}
void Arm64Mir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2, bool is_div) {
+ RegLocation rl_src1, RegLocation rl_src2, bool is_div, int flags) {
if (rl_src2.is_const) {
DCHECK(rl_src2.wide);
int64_t lit = mir_graph_->ConstantValueWide(rl_src2);
@@ -1032,7 +1041,9 @@
RegLocation rl_result;
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
- GenDivZeroCheck(rl_src2.reg);
+ if ((flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
+ GenDivZeroCheck(rl_src2.reg);
+ }
rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, is_div);
StoreValueWide(rl_dest, rl_result);
}
@@ -1067,7 +1078,7 @@
}
void Arm64Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) {
+ RegLocation rl_src1, RegLocation rl_src2, int flags) {
switch (opcode) {
case Instruction::NOT_LONG:
GenNotLong(rl_dest, rl_src2);
@@ -1086,11 +1097,11 @@
return;
case Instruction::DIV_LONG:
case Instruction::DIV_LONG_2ADDR:
- GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true);
+ GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true, flags);
return;
case Instruction::REM_LONG:
case Instruction::REM_LONG_2ADDR:
- GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false);
+ GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false, flags);
return;
case Instruction::AND_LONG_2ADDR:
case Instruction::AND_LONG:
@@ -1135,11 +1146,6 @@
data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
}
- // If index is constant, just fold it into the data offset
- if (constant_index) {
- data_offset += mir_graph_->ConstantValue(rl_index) << scale;
- }
-
/* null object? */
GenNullCheck(rl_array.reg, opt_flags);
@@ -1153,43 +1159,22 @@
} else {
ForceImplicitNullCheck(rl_array.reg, opt_flags);
}
- if (rl_dest.wide || rl_dest.fp || constant_index) {
- RegStorage reg_ptr;
- if (constant_index) {
- reg_ptr = rl_array.reg; // NOTE: must not alter reg_ptr in constant case.
- } else {
- // No special indexed operation, lea + load w/ displacement
- reg_ptr = AllocTempRef();
- OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, As64BitReg(rl_index.reg),
- EncodeShift(kA64Lsl, scale));
- FreeTemp(rl_index.reg);
- }
+ if (constant_index) {
rl_result = EvalLoc(rl_dest, reg_class, true);
if (needs_range_check) {
- if (constant_index) {
- GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
- } else {
- GenArrayBoundsCheck(rl_index.reg, reg_len);
- }
+ GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
FreeTemp(reg_len);
}
+ // Fold the constant index into the data offset.
+ data_offset += mir_graph_->ConstantValue(rl_index) << scale;
if (rl_result.ref) {
- LoadRefDisp(reg_ptr, data_offset, rl_result.reg, kNotVolatile);
+ LoadRefDisp(rl_array.reg, data_offset, rl_result.reg, kNotVolatile);
} else {
- LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, kNotVolatile);
- }
- MarkPossibleNullPointerException(opt_flags);
- if (!constant_index) {
- FreeTemp(reg_ptr);
- }
- if (rl_dest.wide) {
- StoreValueWide(rl_dest, rl_result);
- } else {
- StoreValue(rl_dest, rl_result);
+ LoadBaseDisp(rl_array.reg, data_offset, rl_result.reg, size, kNotVolatile);
}
} else {
- // Offset base, then use indexed load
+ // Offset base, then use indexed load.
RegStorage reg_ptr = AllocTempRef();
OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
FreeTemp(rl_array.reg);
@@ -1200,12 +1185,15 @@
FreeTemp(reg_len);
}
if (rl_result.ref) {
- LoadRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale);
+ LoadRefIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale);
} else {
- LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale, size);
+ LoadBaseIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale, size);
}
- MarkPossibleNullPointerException(opt_flags);
FreeTemp(reg_ptr);
+ }
+ if (rl_dest.wide) {
+ StoreValueWide(rl_dest, rl_result);
+ } else {
StoreValue(rl_dest, rl_result);
}
}
@@ -1227,11 +1215,6 @@
data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
}
- // If index is constant, just fold it into the data offset.
- if (constant_index) {
- data_offset += mir_graph_->ConstantValue(rl_index) << scale;
- }
-
rl_array = LoadValue(rl_array, kRefReg);
if (!constant_index) {
rl_index = LoadValue(rl_index, kCoreReg);
@@ -1264,44 +1247,35 @@
ForceImplicitNullCheck(rl_array.reg, opt_flags);
}
/* at this point, reg_ptr points to array, 2 live temps */
- if (rl_src.wide || rl_src.fp || constant_index) {
- if (rl_src.wide) {
- rl_src = LoadValueWide(rl_src, reg_class);
- } else {
- rl_src = LoadValue(rl_src, reg_class);
- }
- if (!constant_index) {
- OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, As64BitReg(rl_index.reg),
- EncodeShift(kA64Lsl, scale));
- }
+ if (rl_src.wide) {
+ rl_src = LoadValueWide(rl_src, reg_class);
+ } else {
+ rl_src = LoadValue(rl_src, reg_class);
+ }
+ if (constant_index) {
if (needs_range_check) {
- if (constant_index) {
- GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
- } else {
- GenArrayBoundsCheck(rl_index.reg, reg_len);
- }
+ GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
FreeTemp(reg_len);
}
+ // Fold the constant index into the data offset.
+ data_offset += mir_graph_->ConstantValue(rl_index) << scale;
if (rl_src.ref) {
StoreRefDisp(reg_ptr, data_offset, rl_src.reg, kNotVolatile);
} else {
StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size, kNotVolatile);
}
- MarkPossibleNullPointerException(opt_flags);
} else {
/* reg_ptr -> array data */
OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
- rl_src = LoadValue(rl_src, reg_class);
if (needs_range_check) {
GenArrayBoundsCheck(rl_index.reg, reg_len);
FreeTemp(reg_len);
}
if (rl_src.ref) {
- StoreRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale);
+ StoreRefIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale);
} else {
- StoreBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale, size);
+ StoreBaseIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale, size);
}
- MarkPossibleNullPointerException(opt_flags);
}
if (allocated_reg_ptr_temp) {
FreeTemp(reg_ptr);
@@ -1312,7 +1286,8 @@
}
void Arm64Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
- RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift) {
+ RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift,
+ int flags ATTRIBUTE_UNUSED) {
OpKind op = kOpBkpt;
// Per spec, we only care about low 6 bits of shift amount.
int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
@@ -1344,7 +1319,7 @@
}
void Arm64Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) {
+ RegLocation rl_src1, RegLocation rl_src2, int flags) {
OpKind op = kOpBkpt;
switch (opcode) {
case Instruction::ADD_LONG:
@@ -1373,7 +1348,7 @@
if (op == kOpSub) {
if (!rl_src2.is_const) {
- return GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
+ return GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
}
} else {
// Associativity.
@@ -1468,8 +1443,8 @@
}
}
-static int SpillRegsPreSub(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg_mask,
- uint32_t fp_reg_mask, int frame_size) {
+static int SpillRegsPreSub(Arm64Mir2Lir* m2l, uint32_t core_reg_mask, uint32_t fp_reg_mask,
+ int frame_size) {
m2l->OpRegRegImm(kOpSub, rs_sp, rs_sp, frame_size);
int core_count = POPCOUNT(core_reg_mask);
@@ -1491,7 +1466,7 @@
}
static int SpillRegsPreIndexed(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg_mask,
- uint32_t fp_reg_mask, int frame_size) {
+ uint32_t fp_reg_mask) {
// Otherwise, spill both core and fp regs at the same time.
// The very first instruction will be an stp with pre-indexed address, moving the stack pointer
// down. From then on, we fill upwards. This will generate overall the same number of instructions
@@ -1614,9 +1589,9 @@
// This case is also optimal when we have an odd number of core spills, and an even (non-zero)
// number of fp spills.
if ((RoundUp(frame_size, 8) / 8 <= 63)) {
- return SpillRegsPreSub(this, base, core_reg_mask, fp_reg_mask, frame_size);
+ return SpillRegsPreSub(this, core_reg_mask, fp_reg_mask, frame_size);
} else {
- return SpillRegsPreIndexed(this, base, core_reg_mask, fp_reg_mask, frame_size);
+ return SpillRegsPreIndexed(this, base, core_reg_mask, fp_reg_mask);
}
}
@@ -1654,6 +1629,7 @@
void Arm64Mir2Lir::UnspillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask,
int frame_size) {
+ DCHECK_EQ(base, rs_sp);
// Restore saves and drop stack frame.
// 2 versions:
//
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index ba47883..094ff51 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -589,11 +589,10 @@
call_method_insns_(arena->Adapter()) {
// Sanity check - make sure encoding map lines up.
for (int i = 0; i < kA64Last; i++) {
- if (UNWIDE(Arm64Mir2Lir::EncodingMap[i].opcode) != i) {
- LOG(FATAL) << "Encoding order for " << Arm64Mir2Lir::EncodingMap[i].name
- << " is wrong: expecting " << i << ", seeing "
- << static_cast<int>(Arm64Mir2Lir::EncodingMap[i].opcode);
- }
+ DCHECK_EQ(UNWIDE(Arm64Mir2Lir::EncodingMap[i].opcode), i)
+ << "Encoding order for " << Arm64Mir2Lir::EncodingMap[i].name
+ << " is wrong: expecting " << i << ", seeing "
+ << static_cast<int>(Arm64Mir2Lir::EncodingMap[i].opcode);
}
}
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index 0883694..78a6df8 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -306,7 +306,7 @@
// algorithm will give it a low priority for promotion, even when it is referenced many times in
// the code.
-bool Arm64Mir2Lir::InexpensiveConstantInt(int32_t value) {
+bool Arm64Mir2Lir::InexpensiveConstantInt(int32_t value ATTRIBUTE_UNUSED) {
// A 32-bit int can always be loaded with 2 instructions (and without using the literal pool).
// We therefore return true and give it a low priority for promotion.
return true;
@@ -345,7 +345,7 @@
case Instruction::SUB_INT_2ADDR:
// The code below is consistent with the implementation of OpRegRegImm().
{
- int32_t abs_value = std::abs(value);
+ uint32_t abs_value = (value == INT_MIN) ? value : std::abs(value);
if (abs_value < 0x1000) {
return true;
} else if ((abs_value & UINT64_C(0xfff)) == 0 && ((abs_value >> 12) < 0x1000)) {
@@ -673,19 +673,24 @@
}
}
-LIR* Arm64Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
+LIR* Arm64Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
+ MoveType move_type) {
+ UNUSED(r_dest, r_base, offset, move_type);
UNIMPLEMENTED(FATAL);
- return nullptr;
+ UNREACHABLE();
}
-LIR* Arm64Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
+LIR* Arm64Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src,
+ MoveType move_type) {
+ UNUSED(r_base, offset, r_src, move_type);
UNIMPLEMENTED(FATAL);
return nullptr;
}
LIR* Arm64Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
+ UNUSED(op, cc, r_dest, r_src);
LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm64";
- return NULL;
+ UNREACHABLE();
}
LIR* Arm64Mir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1,
@@ -768,8 +773,8 @@
opcode = kA64Sub4RRre;
break;
default:
- LOG(FATAL) << "Unimplemented opcode: " << op;
- break;
+ UNIMPLEMENTED(FATAL) << "Unimplemented opcode: " << op;
+ UNREACHABLE();
}
A64Opcode widened_opcode = r_dest.Is64Bit() ? WIDE(opcode) : opcode;
@@ -805,7 +810,7 @@
LIR* Arm64Mir2Lir::OpRegRegImm64(OpKind op, RegStorage r_dest, RegStorage r_src1, int64_t value) {
LIR* res;
bool neg = (value < 0);
- int64_t abs_value = (neg) ? -value : value;
+ uint64_t abs_value = (neg & !(value == LLONG_MIN)) ? -value : value;
A64Opcode opcode = kA64Brk1d;
A64Opcode alt_opcode = kA64Brk1d;
bool is_logical = false;
@@ -938,7 +943,7 @@
A64Opcode neg_opcode = kA64Brk1d;
bool shift;
bool neg = (value < 0);
- uint64_t abs_value = (neg) ? -value : value;
+ uint64_t abs_value = (neg & !(value == LLONG_MIN)) ? -value : value;
if (LIKELY(abs_value < 0x1000)) {
// abs_value is a 12-bit immediate.
@@ -1261,7 +1266,7 @@
// TODO: in future may need to differentiate Dalvik accesses w/ spills
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK(r_base == rs_sp);
+ DCHECK_EQ(r_base, rs_sp);
AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
}
return load;
@@ -1352,7 +1357,7 @@
// TODO: In future, may need to differentiate Dalvik & spill accesses.
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK(r_base == rs_sp);
+ DCHECK_EQ(r_base, rs_sp);
AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
}
return store;
@@ -1386,16 +1391,20 @@
}
LIR* Arm64Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
+ UNUSED(r_dest, r_src);
LOG(FATAL) << "Unexpected use of OpFpRegCopy for Arm64";
- return NULL;
+ UNREACHABLE();
}
LIR* Arm64Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
+ UNUSED(op, r_base, disp);
LOG(FATAL) << "Unexpected use of OpMem for Arm64";
- return NULL;
+ UNREACHABLE();
}
-LIR* Arm64Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+LIR* Arm64Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt,
+ QuickEntrypointEnum trampoline ATTRIBUTE_UNUSED) {
+ // The address of the trampoline is already loaded into r_tgt.
return OpReg(op, r_tgt);
}
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 80a1ac4..80cb535 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -105,17 +105,17 @@
void Mir2Lir::UnlinkLIR(LIR* lir) {
if (UNLIKELY(lir == first_lir_insn_)) {
first_lir_insn_ = lir->next;
- if (lir->next != NULL) {
- lir->next->prev = NULL;
+ if (lir->next != nullptr) {
+ lir->next->prev = nullptr;
} else {
- DCHECK(lir->next == NULL);
+ DCHECK(lir->next == nullptr);
DCHECK(lir == last_lir_insn_);
- last_lir_insn_ = NULL;
+ last_lir_insn_ = nullptr;
}
} else if (lir == last_lir_insn_) {
last_lir_insn_ = lir->prev;
- lir->prev->next = NULL;
- } else if ((lir->prev != NULL) && (lir->next != NULL)) {
+ lir->prev->next = nullptr;
+ } else if ((lir->prev != nullptr) && (lir->next != nullptr)) {
lir->prev->next = lir->next;
lir->next->prev = lir->prev;
}
@@ -314,6 +314,15 @@
}
}
+void Mir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) {
+ DCHECK(val_reg.Valid());
+ DCHECK_EQ(val_reg.Is64Bit(), cu_->target64);
+ LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, nullptr);
+ UnconditionallyMarkGCCard(tgt_addr_reg);
+ LIR* target = NewLIR0(kPseudoTargetLabel);
+ branch_over->target = target;
+}
+
/* Dump instructions and constant pool contents */
void Mir2Lir::CodegenDump() {
LOG(INFO) << "Dumping LIR insns for "
@@ -334,10 +343,10 @@
<< static_cast<float>(total_size_) / static_cast<float>(insns_size * 2);
DumpPromotionMap();
UpdateLIROffsets();
- for (lir_insn = first_lir_insn_; lir_insn != NULL; lir_insn = lir_insn->next) {
+ for (lir_insn = first_lir_insn_; lir_insn != nullptr; lir_insn = lir_insn->next) {
DumpLIRInsn(lir_insn, 0);
}
- for (lir_insn = literal_list_; lir_insn != NULL; lir_insn = lir_insn->next) {
+ for (lir_insn = literal_list_; lir_insn != nullptr; lir_insn = lir_insn->next) {
LOG(INFO) << StringPrintf("%x (%04x): .word (%#x)", lir_insn->offset, lir_insn->offset,
lir_insn->operands[0]);
}
@@ -368,13 +377,13 @@
return data_target;
data_target = data_target->next;
}
- return NULL;
+ return nullptr;
}
/* Search the existing constants in the literal pool for an exact wide match */
LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi) {
bool lo_match = false;
- LIR* lo_target = NULL;
+ LIR* lo_target = nullptr;
while (data_target) {
if (lo_match && (data_target->operands[0] == val_hi)) {
// Record high word in case we need to expand this later.
@@ -388,7 +397,7 @@
}
data_target = data_target->next;
}
- return NULL;
+ return nullptr;
}
/* Search the existing constants in the literal pool for an exact method match */
@@ -431,7 +440,7 @@
estimated_native_code_size_ += sizeof(value);
return new_value;
}
- return NULL;
+ return nullptr;
}
/* Add a 64-bit constant to the constant pool or mixed with code */
@@ -469,14 +478,14 @@
void Mir2Lir::InstallLiteralPools() {
AlignBuffer(code_buffer_, data_offset_);
LIR* data_lir = literal_list_;
- while (data_lir != NULL) {
+ while (data_lir != nullptr) {
Push32(code_buffer_, data_lir->operands[0]);
data_lir = NEXT_LIR(data_lir);
}
// TODO: patches_.reserve() as needed.
// Push code and method literals, record offsets for the compiler to patch.
data_lir = code_literal_list_;
- while (data_lir != NULL) {
+ while (data_lir != nullptr) {
uint32_t target_method_idx = data_lir->operands[0];
const DexFile* target_dex_file =
reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1]));
@@ -486,7 +495,7 @@
data_lir = NEXT_LIR(data_lir);
}
data_lir = method_literal_list_;
- while (data_lir != NULL) {
+ while (data_lir != nullptr) {
uint32_t target_method_idx = data_lir->operands[0];
const DexFile* target_dex_file =
reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1]));
@@ -497,7 +506,7 @@
}
// Push class literals.
data_lir = class_literal_list_;
- while (data_lir != NULL) {
+ while (data_lir != nullptr) {
uint32_t target_type_idx = data_lir->operands[0];
const DexFile* class_dex_file =
reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1]));
@@ -577,7 +586,7 @@
}
static int AssignLiteralOffsetCommon(LIR* lir, CodeOffset offset) {
- for (; lir != NULL; lir = lir->next) {
+ for (; lir != nullptr; lir = lir->next) {
lir->offset = offset;
offset += 4;
}
@@ -588,7 +597,7 @@
unsigned int element_size) {
// Align to natural pointer size.
offset = RoundUp(offset, element_size);
- for (; lir != NULL; lir = lir->next) {
+ for (; lir != nullptr; lir = lir->next) {
lir->offset = offset;
offset += element_size;
}
@@ -642,7 +651,7 @@
uint32_t dex2pc_entries = 0u;
uint32_t dex2pc_offset = 0u;
uint32_t dex2pc_dalvik_offset = 0u;
- for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
+ for (LIR* tgt_lir = first_lir_insn_; tgt_lir != nullptr; tgt_lir = NEXT_LIR(tgt_lir)) {
pc2dex_src_entries++;
if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
pc2dex_entries += 1;
@@ -682,7 +691,7 @@
pc2dex_dalvik_offset = 0u;
dex2pc_offset = 0u;
dex2pc_dalvik_offset = 0u;
- for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
+ for (LIR* tgt_lir = first_lir_insn_; tgt_lir != nullptr; tgt_lir = NEXT_LIR(tgt_lir)) {
if (generate_src_map && !tgt_lir->flags.is_nop) {
src_mapping_table_.push_back(SrcMapElem({tgt_lir->offset,
static_cast<int32_t>(tgt_lir->dalvik_offset)}));
@@ -717,7 +726,7 @@
CHECK_EQ(table.PcToDexSize(), pc2dex_entries);
auto it = table.PcToDexBegin();
auto it2 = table.DexToPcBegin();
- for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
+ for (LIR* tgt_lir = first_lir_insn_; tgt_lir != nullptr; tgt_lir = NEXT_LIR(tgt_lir)) {
if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
CHECK_EQ(tgt_lir->offset, it.NativePcOffset());
CHECK_EQ(tgt_lir->dalvik_offset, it.DexPc());
@@ -758,7 +767,7 @@
uint32_t native_offset = it.NativePcOffset();
uint32_t dex_pc = it.DexPc();
const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false);
- CHECK(references != NULL) << "Missing ref for dex pc 0x" << std::hex << dex_pc <<
+ CHECK(references != nullptr) << "Missing ref for dex pc 0x" << std::hex << dex_pc <<
": " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
native_gc_map_builder.AddEntry(native_offset, references);
}
@@ -768,8 +777,8 @@
int Mir2Lir::AssignLiteralOffset(CodeOffset offset) {
offset = AssignLiteralOffsetCommon(literal_list_, offset);
constexpr unsigned int ptr_size = sizeof(uint32_t);
- COMPILE_ASSERT(ptr_size >= sizeof(mirror::HeapReference<mirror::Object>),
- ptr_size_cannot_hold_a_heap_reference);
+ static_assert(ptr_size >= sizeof(mirror::HeapReference<mirror::Object>),
+ "Pointer size cannot hold a heap reference");
offset = AssignLiteralPointerOffsetCommon(code_literal_list_, offset, ptr_size);
offset = AssignLiteralPointerOffsetCommon(method_literal_list_, offset, ptr_size);
offset = AssignLiteralPointerOffsetCommon(class_literal_list_, offset, ptr_size);
@@ -904,6 +913,7 @@
/* Set up special LIR to mark a Dalvik byte-code instruction start for pretty printing */
void Mir2Lir::MarkBoundary(DexOffset offset, const char* inst_str) {
+ UNUSED(offset);
// NOTE: only used for debug listings.
NewLIR1(kPseudoDalvikByteCodeBoundary, WrapPointer(ArenaStrdup(inst_str)));
}
@@ -925,7 +935,7 @@
case Instruction::IF_LEZ: is_taken = (src1 <= 0); break;
default:
LOG(FATAL) << "Unexpected opcode " << opcode;
- is_taken = false;
+ UNREACHABLE();
}
return is_taken;
}
@@ -941,8 +951,8 @@
case kCondLe: res = kCondGe; break;
case kCondGe: res = kCondLe; break;
default:
- res = static_cast<ConditionCode>(0);
LOG(FATAL) << "Unexpected ccode " << before;
+ UNREACHABLE();
}
return res;
}
@@ -957,8 +967,8 @@
case kCondLe: res = kCondGt; break;
case kCondGe: res = kCondLt; break;
default:
- res = static_cast<ConditionCode>(0);
LOG(FATAL) << "Unexpected ccode " << before;
+ UNREACHABLE();
}
return res;
}
@@ -966,11 +976,11 @@
// TODO: move to mir_to_lir.cc
Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
: Backend(arena),
- literal_list_(NULL),
- method_literal_list_(NULL),
- class_literal_list_(NULL),
- code_literal_list_(NULL),
- first_fixup_(NULL),
+ literal_list_(nullptr),
+ method_literal_list_(nullptr),
+ class_literal_list_(nullptr),
+ code_literal_list_(nullptr),
+ first_fixup_(nullptr),
cu_(cu),
mir_graph_(mir_graph),
switch_tables_(arena->Adapter(kArenaAllocSwitchTable)),
@@ -980,8 +990,8 @@
pointer_storage_(arena->Adapter()),
data_offset_(0),
total_size_(0),
- block_label_list_(NULL),
- promotion_map_(NULL),
+ block_label_list_(nullptr),
+ promotion_map_(nullptr),
current_dalvik_offset_(0),
estimated_native_code_size_(0),
reg_pool_(nullptr),
@@ -994,8 +1004,8 @@
frame_size_(0),
core_spill_mask_(0),
fp_spill_mask_(0),
- first_lir_insn_(NULL),
- last_lir_insn_(NULL),
+ first_lir_insn_(nullptr),
+ last_lir_insn_(nullptr),
slow_paths_(arena->Adapter(kArenaAllocSlowPaths)),
mem_ref_type_(ResourceMask::kHeapRef),
mask_cache_(arena) {
@@ -1005,8 +1015,8 @@
reginfo_map_.reserve(RegStorage::kMaxRegs);
pointer_storage_.reserve(128);
slow_paths_.reserve(32);
- // Reserve pointer id 0 for NULL.
- size_t null_idx = WrapPointer(NULL);
+ // Reserve pointer id 0 for nullptr.
+ size_t null_idx = WrapPointer(nullptr);
DCHECK_EQ(null_idx, 0U);
}
@@ -1126,14 +1136,14 @@
* unit
*/
void Mir2Lir::AppendLIR(LIR* lir) {
- if (first_lir_insn_ == NULL) {
- DCHECK(last_lir_insn_ == NULL);
+ if (first_lir_insn_ == nullptr) {
+ DCHECK(last_lir_insn_ == nullptr);
last_lir_insn_ = first_lir_insn_ = lir;
- lir->prev = lir->next = NULL;
+ lir->prev = lir->next = nullptr;
} else {
last_lir_insn_->next = lir;
lir->prev = last_lir_insn_;
- lir->next = NULL;
+ lir->next = nullptr;
last_lir_insn_ = lir;
}
}
@@ -1145,7 +1155,7 @@
* prev_lir <-> new_lir <-> current_lir
*/
void Mir2Lir::InsertLIRBefore(LIR* current_lir, LIR* new_lir) {
- DCHECK(current_lir->prev != NULL);
+ DCHECK(current_lir->prev != nullptr);
LIR *prev_lir = current_lir->prev;
prev_lir->next = new_lir;
@@ -1216,7 +1226,7 @@
void Mir2Lir::LoadCodeAddress(const MethodReference& target_method, InvokeType type,
SpecialTargetRegister symbolic_reg) {
LIR* data_target = ScanLiteralPoolMethod(code_literal_list_, target_method);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWordData(&code_literal_list_, target_method.dex_method_index);
data_target->operands[1] = WrapPointer(const_cast<DexFile*>(target_method.dex_file));
// NOTE: The invoke type doesn't contribute to the literal identity. In fact, we can have
@@ -1233,7 +1243,7 @@
void Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type,
SpecialTargetRegister symbolic_reg) {
LIR* data_target = ScanLiteralPoolMethod(method_literal_list_, target_method);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWordData(&method_literal_list_, target_method.dex_method_index);
data_target->operands[1] = WrapPointer(const_cast<DexFile*>(target_method.dex_file));
// NOTE: The invoke type doesn't contribute to the literal identity. In fact, we can have
@@ -1291,7 +1301,9 @@
}
void Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb, mir);
LOG(FATAL) << "Unknown MIR opcode not supported on this architecture";
+ UNREACHABLE();
}
} // namespace art
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 0f1d765..e12d305 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -68,40 +68,41 @@
false, // kIntrinsicUnsafePut
true, // kIntrinsicSystemArrayCopyCharArray
};
-COMPILE_ASSERT(arraysize(kIntrinsicIsStatic) == kInlineOpNop, check_arraysize_kIntrinsicIsStatic);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicDoubleCvt], DoubleCvt_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicFloatCvt], FloatCvt_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicReverseBits], ReverseBits_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicReverseBytes], ReverseBytes_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicAbsInt], AbsInt_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicAbsLong], AbsLong_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicAbsFloat], AbsFloat_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicAbsDouble], AbsDouble_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxInt], MinMaxInt_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxLong], MinMaxLong_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxFloat], MinMaxFloat_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxDouble], MinMaxDouble_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicSqrt], Sqrt_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicCeil], Ceil_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicFloor], Floor_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicRint], Rint_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicRoundFloat], RoundFloat_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicRoundDouble], RoundDouble_must_be_static);
-COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicReferenceGetReferent], Get_must_not_be_static);
-COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCharAt], CharAt_must_not_be_static);
-COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCompareTo], CompareTo_must_not_be_static);
-COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicIsEmptyOrLength], IsEmptyOrLength_must_not_be_static);
-COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicIndexOf], IndexOf_must_not_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicCurrentThread], CurrentThread_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicPeek], Peek_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicPoke], Poke_must_be_static);
-COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCas], Cas_must_not_be_static);
-COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicUnsafeGet], UnsafeGet_must_not_be_static);
-COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicUnsafePut], UnsafePut_must_not_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicSystemArrayCopyCharArray],
- SystemArrayCopyCharArray_must_be_static);
+static_assert(arraysize(kIntrinsicIsStatic) == kInlineOpNop,
+ "arraysize of kIntrinsicIsStatic unexpected");
+static_assert(kIntrinsicIsStatic[kIntrinsicDoubleCvt], "DoubleCvt must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicFloatCvt], "FloatCvt must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicReverseBits], "ReverseBits must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicReverseBytes], "ReverseBytes must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicAbsInt], "AbsInt must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicAbsLong], "AbsLong must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicAbsFloat], "AbsFloat must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicAbsDouble], "AbsDouble must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicMinMaxInt], "MinMaxInt must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicMinMaxLong], "MinMaxLong_must_be_static");
+static_assert(kIntrinsicIsStatic[kIntrinsicMinMaxFloat], "MinMaxFloat_must_be_static");
+static_assert(kIntrinsicIsStatic[kIntrinsicMinMaxDouble], "MinMaxDouble_must_be_static");
+static_assert(kIntrinsicIsStatic[kIntrinsicSqrt], "Sqrt must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicCeil], "Ceil must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicFloor], "Floor must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicRint], "Rint must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicRoundFloat], "RoundFloat must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicRoundDouble], "RoundDouble must be static");
+static_assert(!kIntrinsicIsStatic[kIntrinsicReferenceGetReferent], "Get must not be static");
+static_assert(!kIntrinsicIsStatic[kIntrinsicCharAt], "CharAt must not be static");
+static_assert(!kIntrinsicIsStatic[kIntrinsicCompareTo], "CompareTo must not be static");
+static_assert(!kIntrinsicIsStatic[kIntrinsicIsEmptyOrLength], "IsEmptyOrLength must not be static");
+static_assert(!kIntrinsicIsStatic[kIntrinsicIndexOf], "IndexOf must not be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicCurrentThread], "CurrentThread must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicPeek], "Peek must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicPoke], "Poke must be static");
+static_assert(!kIntrinsicIsStatic[kIntrinsicCas], "Cas must not be static");
+static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafeGet], "UnsafeGet_must_not_be_static");
+static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafePut], "UnsafePut must not be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicSystemArrayCopyCharArray],
+ "SystemArrayCopyCharArray must be static");
-MIR* AllocReplacementMIR(MIRGraph* mir_graph, MIR* invoke, MIR* move_return) {
+MIR* AllocReplacementMIR(MIRGraph* mir_graph, MIR* invoke) {
MIR* insn = mir_graph->NewMIR();
insn->offset = invoke->offset;
insn->optimization_flags = MIR_CALLEE;
@@ -395,12 +396,15 @@
DexFileMethodInliner::DexFileMethodInliner()
: lock_("DexFileMethodInliner lock", kDexFileMethodInlinerLock),
dex_file_(NULL) {
- COMPILE_ASSERT(kClassCacheFirst == 0, kClassCacheFirst_not_0);
- COMPILE_ASSERT(arraysize(kClassCacheNames) == kClassCacheLast, bad_arraysize_kClassCacheNames);
- COMPILE_ASSERT(kNameCacheFirst == 0, kNameCacheFirst_not_0);
- COMPILE_ASSERT(arraysize(kNameCacheNames) == kNameCacheLast, bad_arraysize_kNameCacheNames);
- COMPILE_ASSERT(kProtoCacheFirst == 0, kProtoCacheFirst_not_0);
- COMPILE_ASSERT(arraysize(kProtoCacheDefs) == kProtoCacheLast, bad_arraysize_kProtoCacheNames);
+ static_assert(kClassCacheFirst == 0, "kClassCacheFirst not 0");
+ static_assert(arraysize(kClassCacheNames) == kClassCacheLast,
+ "bad arraysize for kClassCacheNames");
+ static_assert(kNameCacheFirst == 0, "kNameCacheFirst not 0");
+ static_assert(arraysize(kNameCacheNames) == kNameCacheLast,
+ "bad arraysize for kNameCacheNames");
+ static_assert(kProtoCacheFirst == 0, "kProtoCacheFirst not 0");
+ static_assert(arraysize(kProtoCacheDefs) == kProtoCacheLast,
+ "bad arraysize kProtoCacheNames");
}
DexFileMethodInliner::~DexFileMethodInliner() {
@@ -555,11 +559,11 @@
break;
case kInlineOpIGet:
move_result = mir_graph->FindMoveResult(bb, invoke);
- result = GenInlineIGet(mir_graph, bb, invoke, move_result, method, method_idx);
+ result = GenInlineIGet(mir_graph, bb, invoke, move_result, method);
break;
case kInlineOpIPut:
move_result = mir_graph->FindMoveResult(bb, invoke);
- result = GenInlineIPut(mir_graph, bb, invoke, move_result, method, method_idx);
+ result = GenInlineIPut(mir_graph, bb, invoke, move_result, method);
break;
default:
LOG(FATAL) << "Unexpected inline op: " << method.opcode;
@@ -737,7 +741,7 @@
method.d.data == 0u));
// Insert the CONST instruction.
- MIR* insn = AllocReplacementMIR(mir_graph, invoke, move_result);
+ MIR* insn = AllocReplacementMIR(mir_graph, invoke);
insn->dalvikInsn.opcode = Instruction::CONST;
insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
insn->dalvikInsn.vB = method.d.data;
@@ -775,7 +779,7 @@
}
// Insert the move instruction
- MIR* insn = AllocReplacementMIR(mir_graph, invoke, move_result);
+ MIR* insn = AllocReplacementMIR(mir_graph, invoke);
insn->dalvikInsn.opcode = opcode;
insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
insn->dalvikInsn.vB = arg;
@@ -784,8 +788,7 @@
}
bool DexFileMethodInliner::GenInlineIGet(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
- MIR* move_result, const InlineMethod& method,
- uint32_t method_idx) {
+ MIR* move_result, const InlineMethod& method) {
CompilationUnit* cu = mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit();
if (cu->enable_debug & (1 << kDebugSlowFieldPath)) {
return false;
@@ -819,7 +822,7 @@
invoke->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
}
- MIR* insn = AllocReplacementMIR(mir_graph, invoke, move_result);
+ MIR* insn = AllocReplacementMIR(mir_graph, invoke);
insn->offset = invoke->offset;
insn->dalvikInsn.opcode = opcode;
insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
@@ -836,8 +839,7 @@
}
bool DexFileMethodInliner::GenInlineIPut(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
- MIR* move_result, const InlineMethod& method,
- uint32_t method_idx) {
+ MIR* move_result, const InlineMethod& method) {
CompilationUnit* cu = mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit();
if (cu->enable_debug & (1 << kDebugSlowFieldPath)) {
return false;
@@ -881,7 +883,7 @@
invoke->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
}
- MIR* insn = AllocReplacementMIR(mir_graph, invoke, move_result);
+ MIR* insn = AllocReplacementMIR(mir_graph, invoke);
insn->dalvikInsn.opcode = opcode;
insn->dalvikInsn.vA = src_reg;
insn->dalvikInsn.vB = object_reg;
@@ -895,7 +897,7 @@
bb->InsertMIRAfter(invoke, insn);
if (move_result != nullptr) {
- MIR* move = AllocReplacementMIR(mir_graph, invoke, move_result);
+ MIR* move = AllocReplacementMIR(mir_graph, invoke);
move->offset = move_result->offset;
if (move_result->dalvikInsn.opcode == Instruction::MOVE_RESULT) {
move->dalvikInsn.opcode = Instruction::MOVE_FROM16;
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index 30a2d90..cb521da 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -31,9 +31,9 @@
class MethodVerifier;
} // namespace verifier
-struct BasicBlock;
+class BasicBlock;
struct CallInfo;
-struct MIR;
+class MIR;
class MIRGraph;
class Mir2Lir;
@@ -315,9 +315,9 @@
static bool GenInlineReturnArg(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
MIR* move_result, const InlineMethod& method);
static bool GenInlineIGet(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
- MIR* move_result, const InlineMethod& method, uint32_t method_idx);
+ MIR* move_result, const InlineMethod& method);
static bool GenInlineIPut(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
- MIR* move_result, const InlineMethod& method, uint32_t method_idx);
+ MIR* move_result, const InlineMethod& method);
ReaderWriterMutex lock_;
/*
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index a33d15f..c00f90b 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -13,6 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+#include "arch/arm/instruction_set_features_arm.h"
#include "dex/compiler_ir.h"
#include "dex/compiler_internals.h"
#include "dex/quick/arm/arm_lir.h"
@@ -66,8 +67,8 @@
void Mir2Lir::AddDivZeroCheckSlowPath(LIR* branch) {
class DivZeroCheckSlowPath : public Mir2Lir::LIRSlowPath {
public:
- DivZeroCheckSlowPath(Mir2Lir* m2l, LIR* branch)
- : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) {
+ DivZeroCheckSlowPath(Mir2Lir* m2l, LIR* branch_in)
+ : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch_in) {
}
void Compile() OVERRIDE {
@@ -84,9 +85,10 @@
void Mir2Lir::GenArrayBoundsCheck(RegStorage index, RegStorage length) {
class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath {
public:
- ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, RegStorage index, RegStorage length)
- : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch),
- index_(index), length_(length) {
+ ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in, RegStorage index_in,
+ RegStorage length_in)
+ : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch_in),
+ index_(index_in), length_(length_in) {
}
void Compile() OVERRIDE {
@@ -108,9 +110,9 @@
void Mir2Lir::GenArrayBoundsCheck(int index, RegStorage length) {
class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath {
public:
- ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, int index, RegStorage length)
- : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch),
- index_(index), length_(length) {
+ ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in, int index_in, RegStorage length_in)
+ : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch_in),
+ index_(index_in), length_(length_in) {
}
void Compile() OVERRIDE {
@@ -160,6 +162,10 @@
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
return GenExplicitNullCheck(m_reg, opt_flags);
}
+ // If null check has not been eliminated, reset redundant store tracking.
+ if ((opt_flags & MIR_IGNORE_NULL_CHECK) == 0) {
+ ResetDefTracking();
+ }
return nullptr;
}
@@ -212,8 +218,7 @@
}
void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
- RegLocation rl_src2, LIR* taken,
- LIR* fall_through) {
+ RegLocation rl_src2, LIR* taken) {
ConditionCode cond;
RegisterClass reg_class = (rl_src1.ref || rl_src2.ref) ? kRefReg : kCoreReg;
switch (opcode) {
@@ -276,8 +281,7 @@
OpCmpBranch(cond, rl_src1.reg, rl_src2.reg, taken);
}
-void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken,
- LIR* fall_through) {
+void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken) {
ConditionCode cond;
RegisterClass reg_class = rl_src.ref ? kRefReg : kCoreReg;
rl_src = LoadValue(rl_src, reg_class);
@@ -412,8 +416,8 @@
// share array alignment with ints (see comment at head of function)
size_t component_size = sizeof(int32_t);
- // Having a range of 0 is legal
- if (info->is_range && (elems > 0)) {
+ if (elems > 5) {
+ DCHECK(info->is_range); // Non-range insn can't encode more than 5 elems.
/*
* Bit of ugliness here. We're going generate a mem copy loop
* on the register range, but it is possible that some regs
@@ -426,7 +430,11 @@
RegLocation loc = UpdateLoc(info->args[i]);
if (loc.location == kLocPhysReg) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- Store32Disp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg);
+ if (loc.ref) {
+ StoreRefDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kNotVolatile);
+ } else {
+ Store32Disp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg);
+ }
}
}
/*
@@ -463,7 +471,7 @@
// Set up the loop counter (known to be > 0)
LoadConstant(r_idx, elems - 1);
// Generate the copy loop. Going backwards for convenience
- LIR* target = NewLIR0(kPseudoTargetLabel);
+ LIR* loop_head_target = NewLIR0(kPseudoTargetLabel);
// Copy next element
{
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -473,24 +481,45 @@
}
StoreBaseIndexed(r_dst, r_idx, r_val, 2, k32);
FreeTemp(r_val);
- OpDecAndBranch(kCondGe, r_idx, target);
+ OpDecAndBranch(kCondGe, r_idx, loop_head_target);
if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
// Restore the target pointer
OpRegRegImm(kOpAdd, ref_reg, r_dst,
-mirror::Array::DataOffset(component_size).Int32Value());
}
- } else if (!info->is_range) {
+ FreeTemp(r_idx);
+ FreeTemp(r_dst);
+ FreeTemp(r_src);
+ } else {
+ DCHECK_LE(elems, 5); // Usually but not necessarily non-range.
// TUNING: interleave
for (int i = 0; i < elems; i++) {
- RegLocation rl_arg = LoadValue(info->args[i], kCoreReg);
- Store32Disp(ref_reg,
- mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg);
+ RegLocation rl_arg;
+ if (info->args[i].ref) {
+ rl_arg = LoadValue(info->args[i], kRefReg);
+ StoreRefDisp(ref_reg,
+ mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg,
+ kNotVolatile);
+ } else {
+ rl_arg = LoadValue(info->args[i], kCoreReg);
+ Store32Disp(ref_reg,
+ mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg);
+ }
// If the LoadValue caused a temp to be allocated, free it
if (IsTemp(rl_arg.reg)) {
FreeTemp(rl_arg.reg);
}
}
}
+ if (elems != 0 && info->args[0].ref) {
+ // If there is at least one potentially non-null value, unconditionally mark the GC card.
+ for (int i = 0; i < elems; i++) {
+ if (!mir_graph_->IsConstantNullRef(info->args[i])) {
+ UnconditionallyMarkGCCard(ref_reg);
+ break;
+ }
+ }
+ }
if (info->result.location != kLocInvalid) {
StoreValue(info->result, GetReturn(kRefReg));
}
@@ -522,15 +551,21 @@
//
class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath {
public:
+ // There are up to two branches to the static field slow path, the "unresolved" when the type
+ // entry in the dex cache is null, and the "uninit" when the class is not yet initialized.
+ // At least one will be non-null here, otherwise we wouldn't generate the slow path.
StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index,
- RegStorage r_base) :
- LIRSlowPath(m2l, m2l->GetCurrentDexPc(), unresolved, cont), uninit_(uninit),
- storage_index_(storage_index), r_base_(r_base) {
+ RegStorage r_base)
+ : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), unresolved != nullptr ? unresolved : uninit, cont),
+ second_branch_(unresolved != nullptr ? uninit : nullptr),
+ storage_index_(storage_index), r_base_(r_base) {
}
void Compile() {
- LIR* unresolved_target = GenerateTargetLabel();
- uninit_->target = unresolved_target;
+ LIR* target = GenerateTargetLabel();
+ if (second_branch_ != nullptr) {
+ second_branch_->target = target;
+ }
m2l_->CallRuntimeHelperImm(kQuickInitializeStaticStorage, storage_index_, true);
// Copy helper's result into r_base, a no-op on all but MIPS.
m2l_->OpRegCopy(r_base_, m2l_->TargetReg(kRet0, kRef));
@@ -539,7 +574,9 @@
}
private:
- LIR* const uninit_;
+ // Second branch to the slow path, or null if there's only one branch.
+ LIR* const second_branch_;
+
const int storage_index_;
const RegStorage r_base_;
};
@@ -577,30 +614,38 @@
int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value();
LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile);
// r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
- if (!field_info.IsInitialized() &&
- (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) {
- // Check if r_base is NULL or a not yet initialized class.
-
- // The slow path is invoked if the r_base is NULL or the class pointed
- // to by it is not initialized.
- LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL);
+ LIR* unresolved_branch = nullptr;
+ if (!field_info.IsClassInDexCache() &&
+ (mir->optimization_flags & MIR_CLASS_IS_IN_DEX_CACHE) == 0) {
+ // Check if r_base is NULL.
+ unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL);
+ }
+ LIR* uninit_branch = nullptr;
+ if (!field_info.IsClassInitialized() &&
+ (mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) == 0) {
+ // Check if r_base is not yet initialized class.
RegStorage r_tmp = TargetReg(kArg2, kNotWide);
LockTemp(r_tmp);
- LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base,
+ uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base,
mirror::Class::StatusOffset().Int32Value(),
mirror::Class::kStatusInitialized, nullptr, nullptr);
+ FreeTemp(r_tmp);
+ }
+ if (unresolved_branch != nullptr || uninit_branch != nullptr) {
+ // The slow path is invoked if the r_base is NULL or the class pointed
+ // to by it is not initialized.
LIR* cont = NewLIR0(kPseudoTargetLabel);
-
AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont,
field_info.StorageIndex(), r_base));
- FreeTemp(r_tmp);
- // Ensure load of status and store of value don't re-order.
- // TODO: Presumably the actual value store is control-dependent on the status load,
- // and will thus not be reordered in any case, since stores are never speculated.
- // Does later code "know" that the class is now initialized? If so, we still
- // need the barrier to guard later static loads.
- GenMemBarrier(kLoadAny);
+ if (uninit_branch != nullptr) {
+ // Ensure load of status and store of value don't re-order.
+ // TODO: Presumably the actual value store is control-dependent on the status load,
+ // and will thus not be reordered in any case, since stores are never speculated.
+ // Does later code "know" that the class is now initialized? If so, we still
+ // need the barrier to guard later static loads.
+ GenMemBarrier(kLoadAny);
+ }
}
FreeTemp(r_method);
}
@@ -684,26 +729,34 @@
int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value();
LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile);
// r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
- if (!field_info.IsInitialized() &&
- (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) {
- // Check if r_base is NULL or a not yet initialized class.
-
- // The slow path is invoked if the r_base is NULL or the class pointed
- // to by it is not initialized.
- LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL);
+ LIR* unresolved_branch = nullptr;
+ if (!field_info.IsClassInDexCache() &&
+ (mir->optimization_flags & MIR_CLASS_IS_IN_DEX_CACHE) == 0) {
+ // Check if r_base is NULL.
+ unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL);
+ }
+ LIR* uninit_branch = nullptr;
+ if (!field_info.IsClassInitialized() &&
+ (mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) == 0) {
+ // Check if r_base is not yet initialized class.
RegStorage r_tmp = TargetReg(kArg2, kNotWide);
LockTemp(r_tmp);
- LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base,
+ uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base,
mirror::Class::StatusOffset().Int32Value(),
mirror::Class::kStatusInitialized, nullptr, nullptr);
+ FreeTemp(r_tmp);
+ }
+ if (unresolved_branch != nullptr || uninit_branch != nullptr) {
+ // The slow path is invoked if the r_base is NULL or the class pointed
+ // to by it is not initialized.
LIR* cont = NewLIR0(kPseudoTargetLabel);
-
AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont,
field_info.StorageIndex(), r_base));
- FreeTemp(r_tmp);
- // Ensure load of status and load of value don't re-order.
- GenMemBarrier(kLoadAny);
+ if (uninit_branch != nullptr) {
+ // Ensure load of status and load of value don't re-order.
+ GenMemBarrier(kLoadAny);
+ }
}
FreeTemp(r_method);
}
@@ -933,7 +986,6 @@
RegLocation rl_method = LoadCurrMethod();
CheckRegLocation(rl_method);
RegStorage res_reg = AllocTempRef();
- RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
*cu_->dex_file,
type_idx)) {
@@ -943,6 +995,7 @@
RegLocation rl_result = GetReturn(kRefReg);
StoreValue(rl_dest, rl_result);
} else {
+ RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
// We're don't need access checks, load type from dex cache
int32_t dex_cache_offset =
mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value();
@@ -959,10 +1012,10 @@
// Object to generate the slow path for class resolution.
class SlowPath : public LIRSlowPath {
public:
- SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx,
- const RegLocation& rl_method, const RegLocation& rl_result) :
- LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx),
- rl_method_(rl_method), rl_result_(rl_result) {
+ SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont_in, const int type_idx_in,
+ const RegLocation& rl_method_in, const RegLocation& rl_result_in) :
+ LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont_in),
+ type_idx_(type_idx_in), rl_method_(rl_method_in), rl_result_(rl_result_in) {
}
void Compile() {
@@ -1023,9 +1076,10 @@
// Object to generate the slow path for string resolution.
class SlowPath : public LIRSlowPath {
public:
- SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, RegStorage r_method, int32_t string_idx) :
- LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont),
- r_method_(r_method), string_idx_(string_idx) {
+ SlowPath(Mir2Lir* m2l, LIR* fromfast_in, LIR* cont_in, RegStorage r_method_in,
+ int32_t string_idx_in) :
+ LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast_in, cont_in),
+ r_method_(r_method_in), string_idx_(string_idx_in) {
}
void Compile() {
@@ -1203,10 +1257,10 @@
class InitTypeSlowPath : public Mir2Lir::LIRSlowPath {
public:
- InitTypeSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont, uint32_t type_idx,
- RegLocation rl_src)
- : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, cont), type_idx_(type_idx),
- rl_src_(rl_src) {
+ InitTypeSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont, uint32_t type_idx_in,
+ RegLocation rl_src_in)
+ : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, cont), type_idx_(type_idx_in),
+ rl_src_(rl_src_in) {
}
void Compile() OVERRIDE {
@@ -1348,10 +1402,10 @@
// Slow path to initialize the type. Executed if the type is NULL.
class SlowPath : public LIRSlowPath {
public:
- SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx,
- const RegStorage class_reg) :
- LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx),
- class_reg_(class_reg) {
+ SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont_in, const int type_idx_in,
+ const RegStorage class_reg_in) :
+ LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont_in),
+ type_idx_(type_idx_in), class_reg_(class_reg_in) {
}
void Compile() {
@@ -1501,7 +1555,7 @@
void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) {
+ RegLocation rl_src1, RegLocation rl_src2, int flags) {
DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64);
OpKind op = kOpBkpt;
bool is_div_rem = false;
@@ -1600,7 +1654,7 @@
if (cu_->instruction_set == kMips || cu_->instruction_set == kArm64) {
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_src2 = LoadValue(rl_src2, kCoreReg);
- if (check_zero) {
+ if (check_zero && (flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
GenDivZeroCheck(rl_src2.reg);
}
rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv);
@@ -1612,7 +1666,7 @@
// calculate using a MUL and subtract.
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_src2 = LoadValue(rl_src2, kCoreReg);
- if (check_zero) {
+ if (check_zero && (flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
GenDivZeroCheck(rl_src2.reg);
}
rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv);
@@ -1626,7 +1680,7 @@
LoadValueDirectFixed(rl_src2, TargetReg(kArg1, kNotWide));
RegStorage r_tgt = CallHelperSetup(kQuickIdivmod);
LoadValueDirectFixed(rl_src1, TargetReg(kArg0, kNotWide));
- if (check_zero) {
+ if (check_zero && (flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
GenDivZeroCheck(TargetReg(kArg1, kNotWide));
}
// NOTE: callout here is not a safepoint.
@@ -1761,6 +1815,34 @@
return true;
}
+// Returns true if it generates instructions.
+bool Mir2Lir::HandleEasyFloatingPointDiv(RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) {
+ if (!rl_src2.is_const ||
+ ((cu_->instruction_set != kThumb2) && (cu_->instruction_set != kArm64))) {
+ return false;
+ }
+
+ if (!rl_src2.wide) {
+ int32_t divisor = mir_graph_->ConstantValue(rl_src2);
+ if (CanDivideByReciprocalMultiplyFloat(divisor)) {
+ // Generate multiply by reciprocal instead of div.
+ float recip = 1.0f/bit_cast<int32_t, float>(divisor);
+ GenMultiplyByConstantFloat(rl_dest, rl_src1, bit_cast<float, int32_t>(recip));
+ return true;
+ }
+ } else {
+ int64_t divisor = mir_graph_->ConstantValueWide(rl_src2);
+ if (CanDivideByReciprocalMultiplyDouble(divisor)) {
+ // Generate multiply by reciprocal instead of div.
+ double recip = 1.0/bit_cast<double, int64_t>(divisor);
+ GenMultiplyByConstantDouble(rl_dest, rl_src1, bit_cast<double, int64_t>(recip));
+ return true;
+ }
+ }
+ return false;
+}
+
void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src,
int lit) {
RegLocation rl_result;
@@ -1914,7 +1996,7 @@
}
void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) {
+ RegLocation rl_src1, RegLocation rl_src2, int flags) {
RegLocation rl_result;
OpKind first_op = kOpBkpt;
OpKind second_op = kOpBkpt;
@@ -1999,7 +2081,9 @@
RegStorage r_tmp2 = TargetReg(kArg2, kWide);
LoadValueDirectWideFixed(rl_src2, r_tmp2);
RegStorage r_tgt = CallHelperSetup(target);
- GenDivZeroCheckWide(r_tmp2);
+ if ((flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
+ GenDivZeroCheckWide(r_tmp2);
+ }
LoadValueDirectWideFixed(rl_src1, r_tmp1);
// NOTE: callout here is not a safepoint
CallHelper(r_tgt, target, false /* not safepoint */);
@@ -2108,12 +2192,14 @@
/* Call out to helper assembly routine that will null check obj and then lock it. */
void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
+ UNUSED(opt_flags); // TODO: avoid null check with specialized non-null helper.
FlushAllRegs();
CallRuntimeHelperRegLocation(kQuickLockObject, rl_src, true);
}
/* Call out to helper assembly routine that will null check obj and then unlock it. */
void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
+ UNUSED(opt_flags); // TODO: avoid null check with specialized non-null helper.
FlushAllRegs();
CallRuntimeHelperRegLocation(kQuickUnlockObject, rl_src, true);
}
@@ -2130,25 +2216,25 @@
const uint16_t entries = table[1];
// Chained cmp-and-branch.
const int32_t* as_int32 = reinterpret_cast<const int32_t*>(&table[2]);
- int32_t current_key = as_int32[0];
+ int32_t starting_key = as_int32[0];
const int32_t* targets = &as_int32[1];
rl_src = LoadValue(rl_src, kCoreReg);
int i = 0;
- for (; i < entries; i++, current_key++) {
- if (!InexpensiveConstantInt(current_key, Instruction::Code::IF_EQ)) {
+ for (; i < entries; i++) {
+ if (!InexpensiveConstantInt(starting_key + i, Instruction::Code::IF_EQ)) {
// Switch to using a temp and add.
break;
}
BasicBlock* case_block =
mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]);
- OpCmpImmBranch(kCondEq, rl_src.reg, current_key, &block_label_list_[case_block->id]);
+ OpCmpImmBranch(kCondEq, rl_src.reg, starting_key + i, &block_label_list_[case_block->id]);
}
if (i < entries) {
// The rest do not seem to be inexpensive. Try to allocate a temp and use add.
RegStorage key_temp = AllocTypedTemp(false, kCoreReg, false);
if (key_temp.Valid()) {
- LoadConstantNoClobber(key_temp, current_key);
- for (; i < entries - 1; i++, current_key++) {
+ LoadConstantNoClobber(key_temp, starting_key + i);
+ for (; i < entries - 1; i++) {
BasicBlock* case_block =
mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]);
OpCmpBranch(kCondEq, rl_src.reg, key_temp, &block_label_list_[case_block->id]);
@@ -2159,10 +2245,10 @@
OpCmpBranch(kCondEq, rl_src.reg, key_temp, &block_label_list_[case_block->id]);
} else {
// No free temp, just finish the old loop.
- for (; i < entries; i++, current_key++) {
+ for (; i < entries; i++) {
BasicBlock* case_block =
mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]);
- OpCmpImmBranch(kCondEq, rl_src.reg, current_key, &block_label_list_[case_block->id]);
+ OpCmpImmBranch(kCondEq, rl_src.reg, starting_key + i, &block_label_list_[case_block->id]);
}
}
}
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 2bef7c5..a7900ae 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -44,8 +44,8 @@
void Mir2Lir::AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume) {
class IntrinsicSlowPathPath : public Mir2Lir::LIRSlowPath {
public:
- IntrinsicSlowPathPath(Mir2Lir* m2l, CallInfo* info, LIR* branch, LIR* resume = nullptr)
- : LIRSlowPath(m2l, info->offset, branch, resume), info_(info) {
+ IntrinsicSlowPathPath(Mir2Lir* m2l, CallInfo* info_in, LIR* branch_in, LIR* resume_in)
+ : LIRSlowPath(m2l, info_in->offset, branch_in, resume_in), info_(info_in) {
}
void Compile() {
@@ -248,13 +248,13 @@
if (cu_->instruction_set == kMips) {
LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg1, kNotWide));
} else {
- LoadValueDirectFixed(arg1, TargetReg(kArg1, kNotWide));
+ LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg1 : kArg1, kNotWide));
}
} else {
if (cu_->instruction_set == kMips) {
LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide));
} else {
- LoadValueDirectWideFixed(arg1, TargetReg(kArg1, kWide));
+ LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg1 : kArg1, kWide));
}
}
} else {
@@ -365,6 +365,7 @@
* ArgLocs is an array of location records describing the incoming arguments
* with one location record per word of argument.
*/
+// TODO: Support 64-bit argument registers.
void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
/*
* Dummy up a RegLocation for the incoming StackReference<mirror::ArtMethod>
@@ -472,13 +473,13 @@
cg->MarkPossibleNullPointerException(info->opt_flags);
}
-static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const CallInfo* info,
- const RegStorage* alt_from,
+static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const RegStorage* alt_from,
const CompilationUnit* cu, Mir2Lir* cg) {
if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
+ int32_t offset = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ InstructionSetPointerSize(cu->instruction_set)).Int32Value();
// Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt]
- cg->LoadWordDisp(alt_from == nullptr ? cg->TargetReg(kArg0, kRef) : *alt_from,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
+ cg->LoadWordDisp(alt_from == nullptr ? cg->TargetReg(kArg0, kRef) : *alt_from, offset,
cg->TargetPtrReg(kInvokeTgt));
return true;
}
@@ -491,9 +492,10 @@
*/
static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
int state, const MethodReference& target_method,
- uint32_t unused,
+ uint32_t,
uintptr_t direct_code, uintptr_t direct_method,
InvokeType type) {
+ UNUSED(info);
DCHECK(cu->instruction_set != kX86 && cu->instruction_set != kX86_64 &&
cu->instruction_set != kThumb2 && cu->instruction_set != kArm &&
cu->instruction_set != kArm64);
@@ -546,7 +548,7 @@
break;
case 3: // Grab the code from the method*
if (direct_code == 0) {
- if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, &arg0_ref, cu, cg)) {
+ if (CommonCallCodeLoadCodePointerIntoInvokeTgt(&arg0_ref, cu, cg)) {
break; // kInvokeTgt := arg0_ref->entrypoint
}
} else {
@@ -570,8 +572,9 @@
*/
static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
int state, const MethodReference& target_method,
- uint32_t method_idx, uintptr_t unused, uintptr_t unused2,
- InvokeType unused3) {
+ uint32_t method_idx, uintptr_t, uintptr_t,
+ InvokeType) {
+ UNUSED(target_method);
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
/*
* This is the fast path in which the target virtual method is
@@ -594,7 +597,7 @@
break;
}
case 3:
- if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, nullptr, cu, cg)) {
+ if (CommonCallCodeLoadCodePointerIntoInvokeTgt(nullptr, cu, cg)) {
break; // kInvokeTgt := kArg0->entrypoint
}
DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
@@ -613,8 +616,7 @@
*/
static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t method_idx, uintptr_t unused,
- uintptr_t direct_method, InvokeType unused2) {
+ uint32_t method_idx, uintptr_t, uintptr_t, InvokeType) {
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
switch (state) {
@@ -640,7 +642,7 @@
break;
}
case 4:
- if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, nullptr, cu, cg)) {
+ if (CommonCallCodeLoadCodePointerIntoInvokeTgt(nullptr, cu, cg)) {
break; // kInvokeTgt := kArg0->entrypoint
}
DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
@@ -654,9 +656,9 @@
static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info,
QuickEntrypointEnum trampoline, int state,
const MethodReference& target_method, uint32_t method_idx) {
+ UNUSED(info, method_idx);
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
-
/*
* This handles the case in which the base method is not fully
* resolved at compile time, we bail to a runtime helper.
@@ -683,32 +685,28 @@
static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
int state,
const MethodReference& target_method,
- uint32_t unused, uintptr_t unused2,
- uintptr_t unused3, InvokeType unused4) {
+ uint32_t, uintptr_t, uintptr_t, InvokeType) {
return NextInvokeInsnSP(cu, info, kQuickInvokeStaticTrampolineWithAccessCheck, state,
target_method, 0);
}
static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t unused, uintptr_t unused2,
- uintptr_t unused3, InvokeType unused4) {
+ uint32_t, uintptr_t, uintptr_t, InvokeType) {
return NextInvokeInsnSP(cu, info, kQuickInvokeDirectTrampolineWithAccessCheck, state,
target_method, 0);
}
static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t unused, uintptr_t unused2,
- uintptr_t unused3, InvokeType unused4) {
+ uint32_t, uintptr_t, uintptr_t, InvokeType) {
return NextInvokeInsnSP(cu, info, kQuickInvokeSuperTrampolineWithAccessCheck, state,
target_method, 0);
}
static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t unused, uintptr_t unused2,
- uintptr_t unused3, InvokeType unused4) {
+ uint32_t, uintptr_t, uintptr_t, InvokeType) {
return NextInvokeInsnSP(cu, info, kQuickInvokeVirtualTrampolineWithAccessCheck, state,
target_method, 0);
}
@@ -716,8 +714,7 @@
static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t unused, uintptr_t unused2,
- uintptr_t unused3, InvokeType unused4) {
+ uint32_t, uintptr_t, uintptr_t, InvokeType) {
return NextInvokeInsnSP(cu, info, kQuickInvokeInterfaceTrampolineWithAccessCheck, state,
target_method, 0);
}
@@ -794,13 +791,13 @@
if (rl_arg.reg.IsPair()) {
reg = rl_arg.reg.GetHigh();
} else {
- RegisterInfo* info = GetRegInfo(rl_arg.reg);
- info = info->FindMatchingView(RegisterInfo::kHighSingleStorageMask);
- if (info == nullptr) {
+ RegisterInfo* reg_info = GetRegInfo(rl_arg.reg);
+ reg_info = reg_info->FindMatchingView(RegisterInfo::kHighSingleStorageMask);
+ if (reg_info == nullptr) {
// NOTE: For hard float convention we won't split arguments across reg/mem.
UNIMPLEMENTED(FATAL) << "Needs hard float api.";
}
- reg = info->GetReg();
+ reg = reg_info->GetReg();
}
} else {
// kArg2 & rArg3 can safely be used here
@@ -1399,28 +1396,34 @@
}
bool Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
- // Currently implemented only for ARM64
+ // Currently implemented only for ARM64.
+ UNUSED(info, size);
return false;
}
bool Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) {
- // Currently implemented only for ARM64
+ // Currently implemented only for ARM64.
+ UNUSED(info, is_min, is_double);
return false;
}
bool Mir2Lir::GenInlinedCeil(CallInfo* info) {
+ UNUSED(info);
return false;
}
bool Mir2Lir::GenInlinedFloor(CallInfo* info) {
+ UNUSED(info);
return false;
}
bool Mir2Lir::GenInlinedRint(CallInfo* info) {
+ UNUSED(info);
return false;
}
bool Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) {
+ UNUSED(info, is_double);
return false;
}
@@ -1447,6 +1450,7 @@
}
bool Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
+ UNUSED(info);
return false;
}
@@ -1689,7 +1693,6 @@
const MirMethodLoweringInfo& method_info = mir_graph_->GetMethodLoweringInfo(info->mir);
cu_->compiler_driver->ProcessedInvoke(method_info.GetInvokeType(), method_info.StatsFlags());
- BeginInvoke(info);
InvokeType original_type = static_cast<InvokeType>(method_info.GetInvokeType());
info->type = method_info.GetSharpType();
bool fast_path = method_info.FastPath();
@@ -1733,7 +1736,6 @@
method_info.DirectCode(), method_info.DirectMethod(), original_type);
}
LIR* call_insn = GenCallInsn(method_info);
- EndInvoke(info);
MarkSafepointPC(call_insn);
ClobberCallerSave();
@@ -1754,6 +1756,7 @@
}
LIR* Mir2Lir::GenCallInsn(const MirMethodLoweringInfo& method_info) {
+ UNUSED(method_info);
DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64 &&
cu_->instruction_set != kThumb2 && cu_->instruction_set != kArm &&
cu_->instruction_set != kArm64);
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
index 39b40a0..d314601 100644
--- a/compiler/dex/quick/gen_loadstore.cc
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -149,8 +149,9 @@
// Wrong register class, realloc, copy and transfer ownership.
RegStorage new_reg = AllocTypedTemp(rl_src.fp, op_kind);
OpRegCopy(new_reg, rl_src.reg);
- // Clobber the old reg.
+ // Clobber the old regs and free it.
Clobber(rl_src.reg);
+ FreeTemp(rl_src.reg);
// ...and mark the new one live.
rl_src.reg = new_reg;
MarkLive(rl_src);
@@ -232,8 +233,9 @@
// Wrong register class, realloc, copy and transfer ownership.
RegStorage new_regs = AllocTypedTempWide(rl_src.fp, op_kind);
OpRegCopyWide(new_regs, rl_src.reg);
- // Clobber the old regs.
+ // Clobber the old regs and free it.
Clobber(rl_src.reg);
+ FreeTemp(rl_src.reg);
// ...and mark the new ones live.
rl_src.reg = new_regs;
MarkLive(rl_src);
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index 01d1a1e..0d1d9bf 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -15,6 +15,7 @@
*/
#include "codegen_mips.h"
+
#include "dex/quick/mir_to_lir-inl.h"
#include "mips_lir.h"
@@ -146,12 +147,10 @@
kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF_HI | REG_DEF_LO | REG_USE01,
"div", "!0r,!1r", 4),
-#if __mips_isa_rev >= 2
ENCODING_MAP(kMipsExt, 0x7c000000,
kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 10, 6,
kFmtBitBlt, 15, 11, IS_QUAD_OP | REG_DEF0 | REG_USE1,
"ext", "!0r,!1r,!2d,!3D", 4),
-#endif
ENCODING_MAP(kMipsJal, 0x0c000000,
kFmtBitBlt, 25, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR,
@@ -240,7 +239,6 @@
kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
"sb", "!0r,!1d(!2r)", 4),
-#if __mips_isa_rev >= 2
ENCODING_MAP(kMipsSeb, 0x7c000420,
kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
@@ -249,7 +247,6 @@
kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
"seh", "!0r,!1r", 4),
-#endif
ENCODING_MAP(kMipsSh, 0xA4000000,
kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
@@ -700,12 +697,12 @@
// TUNING: replace with proper delay slot handling
if (encoder->size == 8) {
DCHECK(!IsPseudoLirOp(lir->opcode));
- const MipsEncodingMap *encoder = &EncodingMap[kMipsNop];
- uint32_t bits = encoder->skeleton;
- code_buffer_.push_back(bits & 0xff);
- code_buffer_.push_back((bits >> 8) & 0xff);
- code_buffer_.push_back((bits >> 16) & 0xff);
- code_buffer_.push_back((bits >> 24) & 0xff);
+ const MipsEncodingMap *encoder2 = &EncodingMap[kMipsNop];
+ uint32_t bits2 = encoder2->skeleton;
+ code_buffer_.push_back(bits2 & 0xff);
+ code_buffer_.push_back((bits2 >> 8) & 0xff);
+ code_buffer_.push_back((bits2 >> 16) & 0xff);
+ code_buffer_.push_back((bits2 >> 24) & 0xff);
}
}
return res;
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 8b5bc45..3bb81bf 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -17,6 +17,7 @@
/* This file contains codegen for the Mips ISA */
#include "codegen_mips.h"
+
#include "dex/quick/mir_to_lir-inl.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "gc/accounting/card_table.h"
@@ -24,9 +25,9 @@
namespace art {
-bool MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir,
- const InlineMethod& special) {
+bool MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special) {
// TODO
+ UNUSED(bb, mir, special);
return false;
}
@@ -221,19 +222,13 @@
StoreValue(rl_dest, rl_result);
}
-/*
- * Mark garbage collection card. Skip if the value we're storing is null.
- */
-void MipsMir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) {
+void MipsMir2Lir::UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) {
RegStorage reg_card_base = AllocTemp();
RegStorage reg_card_no = AllocTemp();
- LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
// NOTE: native pointer.
LoadWordDisp(rs_rMIPS_SELF, Thread::CardTableOffset<4>().Int32Value(), reg_card_base);
OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
- LIR* target = NewLIR0(kPseudoTargetLabel);
- branch_over->target = target;
FreeTemp(reg_card_base);
FreeTemp(reg_card_no);
}
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index bd709f3..e08846c 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -31,6 +31,10 @@
bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
RegLocation rl_dest, int lit);
bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
+ void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
+ int32_t constant) OVERRIDE;
+ void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
+ int64_t constant) OVERRIDE;
LIR* CheckSuspendUsingLoad() OVERRIDE;
RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
@@ -45,7 +49,9 @@
OpSize size) OVERRIDE;
LIR* GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest);
LIR* GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src);
- void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
+
+ /// @copydoc Mir2Lir::UnconditionallyMarkGCCard(RegStorage)
+ void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
// Required for target - register utilities.
RegStorage Solo64ToPair64(RegStorage reg);
@@ -86,13 +92,13 @@
// Required for target - Dalvik-level generators.
void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2);
+ RegLocation rl_src1, RegLocation rl_src2, int flags);
void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
RegLocation rl_index, RegLocation rl_dest, int scale);
void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark);
void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_shift);
+ RegLocation rl_shift, int flags);
void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2);
void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
@@ -108,7 +114,7 @@
bool GenInlinedPeek(CallInfo* info, OpSize size);
bool GenInlinedPoke(CallInfo* info, OpSize size);
void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) OVERRIDE;
+ RegLocation rl_src2, int flags) OVERRIDE;
RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
@@ -121,7 +127,7 @@
void GenSelect(BasicBlock* bb, MIR* mir);
void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) OVERRIDE;
+ RegisterClass dest_reg_class) OVERRIDE;
bool GenMemBarrier(MemBarrierKind barrier_kind);
void GenMoveException(RegLocation rl_dest);
void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
@@ -172,10 +178,10 @@
bool InexpensiveConstantLong(int64_t value);
bool InexpensiveConstantDouble(int64_t value);
- bool WideGPRsAreAliases() OVERRIDE {
+ bool WideGPRsAreAliases() const OVERRIDE {
return false; // Wide GPRs are formed by pairing.
}
- bool WideFPRsAreAliases() OVERRIDE {
+ bool WideFPRsAreAliases() const OVERRIDE {
return false; // Wide FPRs are formed by pairing.
}
@@ -190,8 +196,8 @@
void ConvertShortToLongBranch(LIR* lir);
RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, bool check_zero);
- RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div);
+ RegLocation rl_src2, bool is_div, int flags) OVERRIDE;
+ RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) OVERRIDE;
};
} // namespace art
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
index 3a4128a..495d85e 100644
--- a/compiler/dex/quick/mips/fp_mips.cc
+++ b/compiler/dex/quick/mips/fp_mips.cc
@@ -15,6 +15,7 @@
*/
#include "codegen_mips.h"
+
#include "dex/quick/mir_to_lir-inl.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "mips_lir.h"
@@ -113,6 +114,20 @@
StoreValueWide(rl_dest, rl_result);
}
+void MipsMir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
+ int32_t constant) {
+ // TODO: need mips implementation.
+ UNUSED(rl_dest, rl_src1, constant);
+ LOG(FATAL) << "Unimplemented GenMultiplyByConstantFloat in mips";
+}
+
+void MipsMir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
+ int64_t constant) {
+ // TODO: need mips implementation.
+ UNUSED(rl_dest, rl_src1, constant);
+ LOG(FATAL) << "Unimplemented GenMultiplyByConstantDouble in mips";
+}
+
void MipsMir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src) {
int op = kMipsNop;
@@ -207,8 +222,8 @@
StoreValue(rl_dest, rl_result);
}
-void MipsMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir,
- bool gt_bias, bool is_double) {
+void MipsMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) {
+ UNUSED(bb, mir, gt_bias, is_double);
UNIMPLEMENTED(FATAL) << "Need codegen for fused fp cmp branch";
}
@@ -230,7 +245,8 @@
}
bool MipsMir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) {
- // TODO: need Mips implementation
+ // TODO: need Mips implementation.
+ UNUSED(info, is_min, is_long);
return false;
}
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index 30aa611..fb47238 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -17,6 +17,7 @@
/* This file contains codegen for the Mips ISA */
#include "codegen_mips.h"
+
#include "dex/quick/mir_to_lir-inl.h"
#include "dex/reg_storage_eq.h"
#include "entrypoints/quick/quick_entrypoints.h"
@@ -217,7 +218,8 @@
void MipsMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) {
+ RegisterClass dest_reg_class) {
+ UNUSED(dest_reg_class);
// Implement as a branch-over.
// TODO: Conditional move?
LoadConstant(rs_dest, true_val);
@@ -228,10 +230,12 @@
}
void MipsMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb, mir);
UNIMPLEMENTED(FATAL) << "Need codegen for select";
}
void MipsMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb, mir);
UNIMPLEMENTED(FATAL) << "Need codegen for fused long cmp branch";
}
@@ -262,34 +266,39 @@
return rl_result;
}
-RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, bool check_zero) {
+RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+ bool is_div, int flags) {
+ UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
LOG(FATAL) << "Unexpected use of GenDivRem for Mips";
- return rl_dest;
+ UNREACHABLE();
}
-RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) {
+RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
+ bool is_div) {
+ UNUSED(rl_dest, rl_src1, lit, is_div);
LOG(FATAL) << "Unexpected use of GenDivRemLit for Mips";
- return rl_dest;
+ UNREACHABLE();
}
bool MipsMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
- DCHECK_NE(cu_->instruction_set, kThumb2);
+ UNUSED(info, is_long, is_object);
return false;
}
bool MipsMir2Lir::GenInlinedAbsFloat(CallInfo* info) {
- // TODO - add Mips implementation
+ UNUSED(info);
+ // TODO: add Mips implementation.
return false;
}
bool MipsMir2Lir::GenInlinedAbsDouble(CallInfo* info) {
- // TODO - add Mips implementation
+ UNUSED(info);
+ // TODO: add Mips implementation.
return false;
}
bool MipsMir2Lir::GenInlinedSqrt(CallInfo* info) {
- DCHECK_NE(cu_->instruction_set, kThumb2);
+ UNUSED(info);
return false;
}
@@ -325,23 +334,27 @@
}
LIR* MipsMir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
+ UNUSED(reg, target);
LOG(FATAL) << "Unexpected use of OpPcRelLoad for Mips";
- return NULL;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::OpVldm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
LOG(FATAL) << "Unexpected use of OpVldm for Mips";
- return NULL;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::OpVstm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
LOG(FATAL) << "Unexpected use of OpVstm for Mips";
- return NULL;
+ UNREACHABLE();
}
void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
RegLocation rl_result, int lit,
int first_bit, int second_bit) {
+ UNUSED(lit);
RegStorage t_reg = AllocTemp();
OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit);
OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg);
@@ -373,27 +386,31 @@
bool MipsMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(dalvik_opcode, is_div, rl_src, rl_dest, lit);
LOG(FATAL) << "Unexpected use of smallLiteralDive in Mips";
- return false;
+ UNREACHABLE();
}
bool MipsMir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(rl_src, rl_dest, lit);
LOG(FATAL) << "Unexpected use of easyMultiply in Mips";
- return false;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::OpIT(ConditionCode cond, const char* guide) {
+ UNUSED(cond, guide);
LOG(FATAL) << "Unexpected use of OpIT in Mips";
- return NULL;
+ UNREACHABLE();
}
void MipsMir2Lir::OpEndIT(LIR* it) {
+ UNUSED(it);
LOG(FATAL) << "Unexpected use of OpEndIT in Mips";
}
-
void MipsMir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2) {
+ UNUSED(opcode);
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
@@ -416,6 +433,7 @@
void MipsMir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2) {
+ UNUSED(opcode);
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
@@ -437,7 +455,7 @@
}
void MipsMir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) {
+ RegLocation rl_src2, int flags) {
switch (opcode) {
case Instruction::ADD_LONG:
case Instruction::ADD_LONG_2ADDR:
@@ -456,7 +474,7 @@
}
// Fallback for all other ops.
- Mir2Lir::GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
+ Mir2Lir::GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
}
void MipsMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
@@ -628,15 +646,17 @@
}
void MipsMir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_shift) {
+ RegLocation rl_src1, RegLocation rl_shift, int flags) {
+ UNUSED(flags);
// Default implementation is just to ignore the constant case.
GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
}
void MipsMir2Lir::GenArithImmOpLong(Instruction::Code opcode,
- RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+ RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+ int flags) {
// Default - bail to non-const handler.
- GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
+ GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
}
} // namespace art
diff --git a/compiler/dex/quick/mips/mips_lir.h b/compiler/dex/quick/mips/mips_lir.h
index 495eb16..3df8f2e 100644
--- a/compiler/dex/quick/mips/mips_lir.h
+++ b/compiler/dex/quick/mips/mips_lir.h
@@ -142,7 +142,7 @@
// This bit determines how the CPU access FP registers.
#define FR_BIT 0
-enum MipsNativeRegisterPool {
+enum MipsNativeRegisterPool { // private marker to avoid generate-operator-out.py from processing.
rZERO = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0,
rAT = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 1,
rV0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 2,
@@ -214,44 +214,43 @@
rF30 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 30,
rF31 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 31,
#endif
-#if (FR_BIT == 0)
- rD0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 0,
- rD1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 2,
- rD2 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 4,
- rD3 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 6,
- rD4 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 8,
- rD5 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
- rD6 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
- rD7 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
+ // Double precision registers where the FPU is in 32-bit mode.
+ rD0_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 0,
+ rD1_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 2,
+ rD2_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 4,
+ rD3_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 6,
+ rD4_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 8,
+ rD5_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
+ rD6_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
+ rD7_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
#if 0 // TODO: expand resource mask to enable use of all MIPS fp registers.
- rD8 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 16,
- rD9 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 18,
- rD10 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 20,
- rD11 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 22,
- rD12 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 24,
- rD13 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 26,
- rD14 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 28,
- rD15 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 30,
+ rD8_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 16,
+ rD9_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 18,
+ rD10_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 20,
+ rD11_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 22,
+ rD12_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 24,
+ rD13_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 26,
+ rD14_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 28,
+ rD15_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 30,
#endif
-#else
- rD0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 0,
- rD1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 1,
- rD2 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 2,
- rD3 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 3,
- rD4 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 4,
- rD5 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 5,
- rD6 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 6,
- rD7 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 7,
+ // Double precision registers where the FPU is in 64-bit mode.
+ rD0_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 0,
+ rD1_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 1,
+ rD2_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 2,
+ rD3_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 3,
+ rD4_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 4,
+ rD5_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 5,
+ rD6_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 6,
+ rD7_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 7,
#if 0 // TODO: expand resource mask to enable use of all MIPS fp registers.
- rD8 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 8,
- rD9 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 9,
- rD10 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
- rD11 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 11,
- rD12 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
- rD13 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 13,
- rD14 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
- rD15 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 15,
-#endif
+ rD8_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 8,
+ rD9_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 9,
+ rD10_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
+ rD11_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 11,
+ rD12_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
+ rD13_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 13,
+ rD14_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
+ rD15_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 15,
#endif
};
@@ -309,14 +308,23 @@
constexpr RegStorage rs_rF14(RegStorage::kValid | rF14);
constexpr RegStorage rs_rF15(RegStorage::kValid | rF15);
-constexpr RegStorage rs_rD0(RegStorage::kValid | rD0);
-constexpr RegStorage rs_rD1(RegStorage::kValid | rD1);
-constexpr RegStorage rs_rD2(RegStorage::kValid | rD2);
-constexpr RegStorage rs_rD3(RegStorage::kValid | rD3);
-constexpr RegStorage rs_rD4(RegStorage::kValid | rD4);
-constexpr RegStorage rs_rD5(RegStorage::kValid | rD5);
-constexpr RegStorage rs_rD6(RegStorage::kValid | rD6);
-constexpr RegStorage rs_rD7(RegStorage::kValid | rD7);
+constexpr RegStorage rs_rD0_fr0(RegStorage::kValid | rD0_fr0);
+constexpr RegStorage rs_rD1_fr0(RegStorage::kValid | rD1_fr0);
+constexpr RegStorage rs_rD2_fr0(RegStorage::kValid | rD2_fr0);
+constexpr RegStorage rs_rD3_fr0(RegStorage::kValid | rD3_fr0);
+constexpr RegStorage rs_rD4_fr0(RegStorage::kValid | rD4_fr0);
+constexpr RegStorage rs_rD5_fr0(RegStorage::kValid | rD5_fr0);
+constexpr RegStorage rs_rD6_fr0(RegStorage::kValid | rD6_fr0);
+constexpr RegStorage rs_rD7_fr0(RegStorage::kValid | rD7_fr0);
+
+constexpr RegStorage rs_rD0_fr1(RegStorage::kValid | rD0_fr1);
+constexpr RegStorage rs_rD1_fr1(RegStorage::kValid | rD1_fr1);
+constexpr RegStorage rs_rD2_fr1(RegStorage::kValid | rD2_fr1);
+constexpr RegStorage rs_rD3_fr1(RegStorage::kValid | rD3_fr1);
+constexpr RegStorage rs_rD4_fr1(RegStorage::kValid | rD4_fr1);
+constexpr RegStorage rs_rD5_fr1(RegStorage::kValid | rD5_fr1);
+constexpr RegStorage rs_rD6_fr1(RegStorage::kValid | rD6_fr1);
+constexpr RegStorage rs_rD7_fr1(RegStorage::kValid | rD7_fr1);
// TODO: reduce/eliminate use of these.
#define rMIPS_SUSPEND rS0
@@ -408,9 +416,7 @@
kMipsBnez, // bnez s,o [000101] s[25..21] [00000] o[15..0].
kMipsBne, // bne s,t,o [000101] s[25..21] t[20..16] o[15..0].
kMipsDiv, // div s,t [000000] s[25..21] t[20..16] [0000000000011010].
-#if __mips_isa_rev >= 2
kMipsExt, // ext t,s,p,z [011111] s[25..21] t[20..16] z[15..11] p[10..6] [000000].
-#endif
kMipsJal, // jal t [000011] t[25..0].
kMipsJalr, // jalr d,s [000000] s[25..21] [00000] d[15..11] hint[10..6] [001001].
kMipsJr, // jr s [000000] s[25..21] [0000000000] hint[10..6] [001000].
@@ -433,10 +439,8 @@
kMipsOri, // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
kMipsPref, // pref h,o(b) [101011] b[25..21] h[20..16] o[15..0].
kMipsSb, // sb t,o(b) [101000] b[25..21] t[20..16] o[15..0].
-#if __mips_isa_rev >= 2
kMipsSeb, // seb d,t [01111100000] t[20..16] d[15..11] [10000100000].
kMipsSeh, // seh d,t [01111100000] t[20..16] d[15..11] [11000100000].
-#endif
kMipsSh, // sh t,o(b) [101001] b[25..21] t[20..16] o[15..0].
kMipsSll, // sll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [000000].
kMipsSllv, // sllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000100].
@@ -481,15 +485,17 @@
kMipsUndefined, // undefined [011001xxxxxxxxxxxxxxxx].
kMipsLast
};
+std::ostream& operator<<(std::ostream& os, const MipsOpCode& rhs);
// Instruction assembly field_loc kind.
enum MipsEncodingKind {
kFmtUnused,
- kFmtBitBlt, /* Bit string using end/start */
- kFmtDfp, /* Double FP reg */
- kFmtSfp, /* Single FP reg */
- kFmtBlt5_2, /* Same 5-bit field to 2 locations */
+ kFmtBitBlt, // Bit string using end/start.
+ kFmtDfp, // Double FP reg.
+ kFmtSfp, // Single FP reg
+ kFmtBlt5_2, // Same 5-bit field to 2 locations.
};
+std::ostream& operator<<(std::ostream& os, const MipsEncodingKind& rhs);
// Struct used to define the snippet positions for each MIPS opcode.
struct MipsEncodingMap {
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index d3719ab..185112d 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -20,6 +20,7 @@
#include <string>
+#include "arch/mips/instruction_set_features_mips.h"
#include "backend_mips.h"
#include "dex/compiler_internals.h"
#include "dex/quick/mir_to_lir-inl.h"
@@ -34,8 +35,12 @@
static constexpr RegStorage sp_regs_arr[] =
{rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15};
-static constexpr RegStorage dp_regs_arr[] =
- {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7};
+static constexpr RegStorage dp_fr0_regs_arr[] =
+ {rs_rD0_fr0, rs_rD1_fr0, rs_rD2_fr0, rs_rD3_fr0, rs_rD4_fr0, rs_rD5_fr0, rs_rD6_fr0,
+ rs_rD7_fr0};
+static constexpr RegStorage dp_fr1_regs_arr[] =
+ {rs_rD0_fr1, rs_rD1_fr1, rs_rD2_fr1, rs_rD3_fr1, rs_rD4_fr1, rs_rD5_fr1, rs_rD6_fr1,
+ rs_rD7_fr1};
static constexpr RegStorage reserved_regs_arr[] =
{rs_rZERO, rs_rAT, rs_rS0, rs_rS1, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rRA};
static constexpr RegStorage core_temps_arr[] =
@@ -44,17 +49,23 @@
static constexpr RegStorage sp_temps_arr[] =
{rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15};
-static constexpr RegStorage dp_temps_arr[] =
- {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7};
+static constexpr RegStorage dp_fr0_temps_arr[] =
+ {rs_rD0_fr0, rs_rD1_fr0, rs_rD2_fr0, rs_rD3_fr0, rs_rD4_fr0, rs_rD5_fr0, rs_rD6_fr0,
+ rs_rD7_fr0};
+static constexpr RegStorage dp_fr1_temps_arr[] =
+ {rs_rD0_fr1, rs_rD1_fr1, rs_rD2_fr1, rs_rD3_fr1, rs_rD4_fr1, rs_rD5_fr1, rs_rD6_fr1,
+ rs_rD7_fr1};
static constexpr ArrayRef<const RegStorage> empty_pool;
static constexpr ArrayRef<const RegStorage> core_regs(core_regs_arr);
static constexpr ArrayRef<const RegStorage> sp_regs(sp_regs_arr);
-static constexpr ArrayRef<const RegStorage> dp_regs(dp_regs_arr);
+static constexpr ArrayRef<const RegStorage> dp_fr0_regs(dp_fr0_regs_arr);
+static constexpr ArrayRef<const RegStorage> dp_fr1_regs(dp_fr1_regs_arr);
static constexpr ArrayRef<const RegStorage> reserved_regs(reserved_regs_arr);
static constexpr ArrayRef<const RegStorage> core_temps(core_temps_arr);
static constexpr ArrayRef<const RegStorage> sp_temps(sp_temps_arr);
-static constexpr ArrayRef<const RegStorage> dp_temps(dp_temps_arr);
+static constexpr ArrayRef<const RegStorage> dp_fr0_temps(dp_fr0_temps_arr);
+static constexpr ArrayRef<const RegStorage> dp_fr1_temps(dp_fr1_temps_arr);
RegLocation MipsMir2Lir::LocCReturn() {
return mips_loc_c_return;
@@ -129,14 +140,17 @@
* Decode the register id.
*/
ResourceMask MipsMir2Lir::GetRegMaskCommon(const RegStorage& reg) const {
- return reg.IsDouble()
- /* Each double register is equal to a pair of single-precision FP registers */
-#if (FR_BIT == 0)
- ? ResourceMask::TwoBits((reg.GetRegNum() & ~1) + kMipsFPReg0)
-#else
- ? ResourceMask::TwoBits(reg.GetRegNum() * 2 + kMipsFPReg0)
-#endif
- : ResourceMask::Bit(reg.IsSingle() ? reg.GetRegNum() + kMipsFPReg0 : reg.GetRegNum());
+ if (reg.IsDouble()) {
+ if (cu_->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()->Is32BitFloatingPoint()) {
+ return ResourceMask::TwoBits((reg.GetRegNum() & ~1) + kMipsFPReg0);
+ } else {
+ return ResourceMask::TwoBits(reg.GetRegNum() * 2 + kMipsFPReg0);
+ }
+ } else if (reg.IsSingle()) {
+ return ResourceMask::Bit(reg.GetRegNum() + kMipsFPReg0);
+ } else {
+ return ResourceMask::Bit(reg.GetRegNum());
+ }
}
ResourceMask MipsMir2Lir::GetPCUseDefEncoding() const {
@@ -382,14 +396,25 @@
Clobber(rs_rF13);
Clobber(rs_rF14);
Clobber(rs_rF15);
- Clobber(rs_rD0);
- Clobber(rs_rD1);
- Clobber(rs_rD2);
- Clobber(rs_rD3);
- Clobber(rs_rD4);
- Clobber(rs_rD5);
- Clobber(rs_rD6);
- Clobber(rs_rD7);
+ if (cu_->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()->Is32BitFloatingPoint()) {
+ Clobber(rs_rD0_fr0);
+ Clobber(rs_rD1_fr0);
+ Clobber(rs_rD2_fr0);
+ Clobber(rs_rD3_fr0);
+ Clobber(rs_rD4_fr0);
+ Clobber(rs_rD5_fr0);
+ Clobber(rs_rD6_fr0);
+ Clobber(rs_rD7_fr0);
+ } else {
+ Clobber(rs_rD0_fr1);
+ Clobber(rs_rD1_fr1);
+ Clobber(rs_rD2_fr1);
+ Clobber(rs_rD3_fr1);
+ Clobber(rs_rD4_fr1);
+ Clobber(rs_rD5_fr1);
+ Clobber(rs_rD6_fr1);
+ Clobber(rs_rD7_fr1);
+ }
}
RegLocation MipsMir2Lir::GetReturnWideAlt() {
@@ -420,32 +445,37 @@
FreeTemp(rs_rMIPS_ARG3);
}
-bool MipsMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
-#if ANDROID_SMP != 0
- NewLIR1(kMipsSync, 0 /* Only stype currently supported */);
- return true;
-#else
- return false;
-#endif
+bool MipsMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind ATTRIBUTE_UNUSED) {
+ if (cu_->GetInstructionSetFeatures()->IsSmp()) {
+ NewLIR1(kMipsSync, 0 /* Only stype currently supported */);
+ return true;
+ } else {
+ return false;
+ }
}
void MipsMir2Lir::CompilerInitializeRegAlloc() {
+ const bool fpu_is_32bit =
+ cu_->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()->Is32BitFloatingPoint();
reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs, empty_pool /* core64 */,
- sp_regs, dp_regs,
+ sp_regs,
+ fpu_is_32bit ? dp_fr0_regs : dp_fr1_regs,
reserved_regs, empty_pool /* reserved64 */,
core_temps, empty_pool /* core64_temps */,
- sp_temps, dp_temps));
+ sp_temps,
+ fpu_is_32bit ? dp_fr0_temps : dp_fr1_temps));
// Target-specific adjustments.
// Alias single precision floats to appropriate half of overlapping double.
for (RegisterInfo* info : reg_pool_->sp_regs_) {
int sp_reg_num = info->GetReg().GetRegNum();
-#if (FR_BIT == 0)
- int dp_reg_num = sp_reg_num & ~1;
-#else
- int dp_reg_num = sp_reg_num >> 1;
-#endif
+ int dp_reg_num;
+ if (fpu_is_32bit) {
+ dp_reg_num = sp_reg_num & ~1;
+ } else {
+ dp_reg_num = sp_reg_num >> 1;
+ }
RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num);
RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
// Double precision register's master storage should refer to itself.
@@ -464,11 +494,11 @@
// TODO: adjust when we roll to hard float calling convention.
reg_pool_->next_core_reg_ = 2;
reg_pool_->next_sp_reg_ = 2;
-#if (FR_BIT == 0)
- reg_pool_->next_dp_reg_ = 2;
-#else
- reg_pool_->next_dp_reg_ = 1;
-#endif
+ if (fpu_is_32bit) {
+ reg_pool_->next_dp_reg_ = 2;
+ } else {
+ reg_pool_->next_dp_reg_ = 1;
+ }
}
/*
@@ -574,11 +604,10 @@
MipsMir2Lir::MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
: Mir2Lir(cu, mir_graph, arena) {
for (int i = 0; i < kMipsLast; i++) {
- if (MipsMir2Lir::EncodingMap[i].opcode != i) {
- LOG(FATAL) << "Encoding order for " << MipsMir2Lir::EncodingMap[i].name
- << " is wrong: expecting " << i << ", seeing "
- << static_cast<int>(MipsMir2Lir::EncodingMap[i].opcode);
- }
+ DCHECK_EQ(MipsMir2Lir::EncodingMap[i].opcode, i)
+ << "Encoding order for " << MipsMir2Lir::EncodingMap[i].name
+ << " is wrong: expecting " << i << ", seeing "
+ << static_cast<int>(MipsMir2Lir::EncodingMap[i].opcode);
}
}
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 7178ede..18f1cde 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -15,6 +15,8 @@
*/
#include "codegen_mips.h"
+
+#include "arch/mips/instruction_set_features_mips.h"
#include "dex/quick/mir_to_lir-inl.h"
#include "dex/reg_storage_eq.h"
#include "mips_lir.h"
@@ -56,14 +58,17 @@
}
bool MipsMir2Lir::InexpensiveConstantFloat(int32_t value) {
+ UNUSED(value);
return false; // TUNING
}
bool MipsMir2Lir::InexpensiveConstantLong(int64_t value) {
+ UNUSED(value);
return false; // TUNING
}
bool MipsMir2Lir::InexpensiveConstantDouble(int64_t value) {
+ UNUSED(value);
return false; // TUNING
}
@@ -301,44 +306,49 @@
case kOpXor:
return OpRegRegReg(op, r_dest_src1, r_dest_src1, r_src2);
case kOp2Byte:
-#if __mips_isa_rev >= 2
- res = NewLIR2(kMipsSeb, r_dest_src1.GetReg(), r_src2.GetReg());
-#else
- res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 24);
- OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 24);
-#endif
+ if (cu_->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()
+ ->IsMipsIsaRevGreaterThanEqual2()) {
+ res = NewLIR2(kMipsSeb, r_dest_src1.GetReg(), r_src2.GetReg());
+ } else {
+ res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 24);
+ OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 24);
+ }
return res;
case kOp2Short:
-#if __mips_isa_rev >= 2
- res = NewLIR2(kMipsSeh, r_dest_src1.GetReg(), r_src2.GetReg());
-#else
- res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 16);
- OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 16);
-#endif
+ if (cu_->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()
+ ->IsMipsIsaRevGreaterThanEqual2()) {
+ res = NewLIR2(kMipsSeh, r_dest_src1.GetReg(), r_src2.GetReg());
+ } else {
+ res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 16);
+ OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 16);
+ }
return res;
case kOp2Char:
return NewLIR3(kMipsAndi, r_dest_src1.GetReg(), r_src2.GetReg(), 0xFFFF);
default:
LOG(FATAL) << "Bad case in OpRegReg";
- break;
+ UNREACHABLE();
}
return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
}
LIR* MipsMir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
MoveType move_type) {
+ UNUSED(r_dest, r_base, offset, move_type);
UNIMPLEMENTED(FATAL);
- return nullptr;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
+ UNUSED(r_base, offset, r_src, move_type);
UNIMPLEMENTED(FATAL);
- return nullptr;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
+ UNUSED(op, cc, r_dest, r_src);
LOG(FATAL) << "Unexpected use of OpCondRegReg for MIPS";
- return NULL;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
@@ -538,7 +548,7 @@
}
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK(r_base == rs_rMIPS_SP);
+ DCHECK_EQ(r_base, rs_rMIPS_SP);
AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
true /* is_load */, pair /* is64bit */);
if (pair) {
@@ -640,7 +650,7 @@
}
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK(r_base == rs_rMIPS_SP);
+ DCHECK_EQ(r_base, rs_rMIPS_SP);
AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
false /* is_load */, pair /* is64bit */);
if (pair) {
@@ -681,16 +691,19 @@
}
LIR* MipsMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
+ UNUSED(op, r_base, disp);
LOG(FATAL) << "Unexpected use of OpMem for MIPS";
- return NULL;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
+ UNUSED(cc, target);
LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS";
- return NULL;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+ UNUSED(trampoline); // The address of the trampoline is already loaded into r_tgt.
return OpReg(op, r_tgt);
}
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 4399981..70ef991 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -201,6 +201,16 @@
RegStorage reg_arg_low = GetArgMappingToPhysicalReg(in_position);
RegStorage reg_arg_high = GetArgMappingToPhysicalReg(in_position + 1);
+ if (cu_->instruction_set == kX86) {
+ // Can't handle double split between reg & memory. Flush reg half to memory.
+ if (rl_dest.reg.IsDouble() && (reg_arg_low.Valid() != reg_arg_high.Valid())) {
+ DCHECK(reg_arg_low.Valid());
+ DCHECK(!reg_arg_high.Valid());
+ Store32Disp(TargetPtrReg(kSp), offset, reg_arg_low);
+ reg_arg_low = RegStorage::InvalidReg();
+ }
+ }
+
if (reg_arg_low.Valid() && reg_arg_high.Valid()) {
OpRegCopyWide(rl_dest.reg, RegStorage::MakeRegPair(reg_arg_low, reg_arg_high));
} else if (reg_arg_low.Valid() && !reg_arg_high.Valid()) {
@@ -417,10 +427,10 @@
RegLocation rl_src[3];
RegLocation rl_dest = mir_graph_->GetBadLoc();
RegLocation rl_result = mir_graph_->GetBadLoc();
- Instruction::Code opcode = mir->dalvikInsn.opcode;
- int opt_flags = mir->optimization_flags;
- uint32_t vB = mir->dalvikInsn.vB;
- uint32_t vC = mir->dalvikInsn.vC;
+ const Instruction::Code opcode = mir->dalvikInsn.opcode;
+ const int opt_flags = mir->optimization_flags;
+ const uint32_t vB = mir->dalvikInsn.vB;
+ const uint32_t vC = mir->dalvikInsn.vC;
DCHECK(CheckCorePoolSanity()) << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " @ 0x:"
<< std::hex << current_dalvik_offset_;
@@ -572,7 +582,7 @@
GenThrow(rl_src[0]);
break;
- case Instruction::ARRAY_LENGTH:
+ case Instruction::ARRAY_LENGTH: {
int len_offset;
len_offset = mirror::Array::LengthOffset().Int32Value();
rl_src[0] = LoadValue(rl_src[0], kRefReg);
@@ -582,7 +592,7 @@
MarkPossibleNullPointerException(opt_flags);
StoreValue(rl_dest, rl_result);
break;
-
+ }
case Instruction::CONST_STRING:
case Instruction::CONST_STRING_JUMBO:
GenConstString(vB, rl_dest);
@@ -647,7 +657,6 @@
case Instruction::IF_GT:
case Instruction::IF_LE: {
LIR* taken = &label_list[bb->taken];
- LIR* fall_through = &label_list[bb->fall_through];
// Result known at compile time?
if (rl_src[0].is_const && rl_src[1].is_const) {
bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg),
@@ -664,11 +673,10 @@
!mir_graph_->HasSuspendTestBetween(bb, bb->fall_through))) {
GenSuspendTest(opt_flags);
}
- GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken, fall_through);
+ GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken);
}
break;
- }
-
+ }
case Instruction::IF_EQZ:
case Instruction::IF_NEZ:
case Instruction::IF_LTZ:
@@ -676,7 +684,6 @@
case Instruction::IF_GTZ:
case Instruction::IF_LEZ: {
LIR* taken = &label_list[bb->taken];
- LIR* fall_through = &label_list[bb->fall_through];
// Result known at compile time?
if (rl_src[0].is_const) {
bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg), 0);
@@ -692,10 +699,10 @@
!mir_graph_->HasSuspendTestBetween(bb, bb->fall_through))) {
GenSuspendTest(opt_flags);
}
- GenCompareZeroAndBranch(opcode, rl_src[0], taken, fall_through);
+ GenCompareZeroAndBranch(opcode, rl_src[0], taken);
}
break;
- }
+ }
case Instruction::AGET_WIDE:
GenArrayGet(opt_flags, k64, rl_src[0], rl_src[1], rl_dest, 3);
@@ -928,12 +935,12 @@
case Instruction::NEG_INT:
case Instruction::NOT_INT:
- GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[0]);
+ GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[0], opt_flags);
break;
case Instruction::NEG_LONG:
case Instruction::NOT_LONG:
- GenArithOpLong(opcode, rl_dest, rl_src[0], rl_src[0]);
+ GenArithOpLong(opcode, rl_dest, rl_src[0], rl_src[0], opt_flags);
break;
case Instruction::NEG_FLOAT:
@@ -993,7 +1000,7 @@
GenArithOpIntLit(opcode, rl_dest, rl_src[0],
mir_graph_->ConstantValue(rl_src[1].orig_sreg));
} else {
- GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[1]);
+ GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[1], opt_flags);
}
break;
@@ -1013,7 +1020,7 @@
InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]), opcode)) {
GenArithOpIntLit(opcode, rl_dest, rl_src[0], mir_graph_->ConstantValue(rl_src[1]));
} else {
- GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[1]);
+ GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[1], opt_flags);
}
break;
@@ -1028,7 +1035,7 @@
case Instruction::OR_LONG_2ADDR:
case Instruction::XOR_LONG_2ADDR:
if (rl_src[0].is_const || rl_src[1].is_const) {
- GenArithImmOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
+ GenArithImmOpLong(opcode, rl_dest, rl_src[0], rl_src[1], opt_flags);
break;
}
FALLTHROUGH_INTENDED;
@@ -1038,7 +1045,7 @@
case Instruction::MUL_LONG_2ADDR:
case Instruction::DIV_LONG_2ADDR:
case Instruction::REM_LONG_2ADDR:
- GenArithOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
+ GenArithOpLong(opcode, rl_dest, rl_src[0], rl_src[1], opt_flags);
break;
case Instruction::SHL_LONG:
@@ -1048,34 +1055,42 @@
case Instruction::SHR_LONG_2ADDR:
case Instruction::USHR_LONG_2ADDR:
if (rl_src[1].is_const) {
- GenShiftImmOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
+ GenShiftImmOpLong(opcode, rl_dest, rl_src[0], rl_src[1], opt_flags);
} else {
GenShiftOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
}
break;
+ case Instruction::DIV_FLOAT:
+ case Instruction::DIV_FLOAT_2ADDR:
+ if (HandleEasyFloatingPointDiv(rl_dest, rl_src[0], rl_src[1])) {
+ break;
+ }
+ FALLTHROUGH_INTENDED;
case Instruction::ADD_FLOAT:
case Instruction::SUB_FLOAT:
case Instruction::MUL_FLOAT:
- case Instruction::DIV_FLOAT:
case Instruction::REM_FLOAT:
case Instruction::ADD_FLOAT_2ADDR:
case Instruction::SUB_FLOAT_2ADDR:
case Instruction::MUL_FLOAT_2ADDR:
- case Instruction::DIV_FLOAT_2ADDR:
case Instruction::REM_FLOAT_2ADDR:
GenArithOpFloat(opcode, rl_dest, rl_src[0], rl_src[1]);
break;
+ case Instruction::DIV_DOUBLE:
+ case Instruction::DIV_DOUBLE_2ADDR:
+ if (HandleEasyFloatingPointDiv(rl_dest, rl_src[0], rl_src[1])) {
+ break;
+ }
+ FALLTHROUGH_INTENDED;
case Instruction::ADD_DOUBLE:
case Instruction::SUB_DOUBLE:
case Instruction::MUL_DOUBLE:
- case Instruction::DIV_DOUBLE:
case Instruction::REM_DOUBLE:
case Instruction::ADD_DOUBLE_2ADDR:
case Instruction::SUB_DOUBLE_2ADDR:
case Instruction::MUL_DOUBLE_2ADDR:
- case Instruction::DIV_DOUBLE_2ADDR:
case Instruction::REM_DOUBLE_2ADDR:
GenArithOpDouble(opcode, rl_dest, rl_src[0], rl_src[1]);
break;
@@ -1233,7 +1248,7 @@
if (opcode == kMirOpCheck) {
// Combine check and work halves of throwing instruction.
MIR* work_half = mir->meta.throw_insn;
- mir->dalvikInsn.opcode = work_half->dalvikInsn.opcode;
+ mir->dalvikInsn = work_half->dalvikInsn;
mir->optimization_flags = work_half->optimization_flags;
mir->meta = work_half->meta; // Whatever the work_half had, we need to copy it.
opcode = work_half->dalvikInsn.opcode;
@@ -1377,8 +1392,9 @@
}
size_t Mir2Lir::GetInstructionOffset(LIR* lir) {
- UNIMPLEMENTED(FATAL) << "Unsuppored GetInstructionOffset()";
- return 0;
+ UNUSED(lir);
+ UNIMPLEMENTED(FATAL) << "Unsupported GetInstructionOffset()";
+ UNREACHABLE();
}
} // namespace art
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index ea93bbe..886b238 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -17,7 +17,7 @@
#ifndef ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_
#define ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_
-#include "invoke_type.h"
+#include "arch/instruction_set.h"
#include "compiled_method.h"
#include "dex/compiler_enums.h"
#include "dex/compiler_ir.h"
@@ -26,13 +26,14 @@
#include "dex/backend.h"
#include "dex/quick/resource_mask.h"
#include "driver/compiler_driver.h"
-#include "instruction_set.h"
-#include "leb128.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
+#include "invoke_type.h"
+#include "leb128.h"
#include "safe_map.h"
#include "utils/array_ref.h"
#include "utils/arena_allocator.h"
#include "utils/arena_containers.h"
+#include "utils/arena_object.h"
#include "utils/stack_checks.h"
namespace art {
@@ -129,11 +130,11 @@
#define INVALID_SREG (-1)
#endif
-struct BasicBlock;
+class BasicBlock;
struct CallInfo;
struct CompilationUnit;
struct InlineMethod;
-struct MIR;
+class MIR;
struct LIR;
struct RegisterInfo;
class DexFileMethodInliner;
@@ -318,13 +319,10 @@
* Working plan is, for all targets, to follow mechanism 1 for 64-bit core registers, and
* mechanism 2 for aliased float registers and x86 vector registers.
*/
- class RegisterInfo {
+ class RegisterInfo : public ArenaObject<kArenaAllocRegAlloc> {
public:
RegisterInfo(RegStorage r, const ResourceMask& mask = kEncodeAll);
~RegisterInfo() {}
- static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(size, kArenaAllocRegAlloc);
- }
static const uint32_t k32SoloStorageMask = 0x00000001;
static const uint32_t kLowSingleStorageMask = 0x00000001;
@@ -420,7 +418,7 @@
RegisterInfo* alias_chain_; // Chain of aliased registers.
};
- class RegisterPool {
+ class RegisterPool : public DeletableArenaObject<kArenaAllocRegAlloc> {
public:
RegisterPool(Mir2Lir* m2l, ArenaAllocator* arena,
const ArrayRef<const RegStorage>& core_regs,
@@ -434,10 +432,6 @@
const ArrayRef<const RegStorage>& sp_temps,
const ArrayRef<const RegStorage>& dp_temps);
~RegisterPool() {}
- static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(size, kArenaAllocRegAlloc);
- }
- static void operator delete(void* ptr) { UNUSED(ptr); }
void ResetNextTemp() {
next_core_reg_ = 0;
next_sp_reg_ = 0;
@@ -501,20 +495,15 @@
// has completed.
//
- class LIRSlowPath {
+ class LIRSlowPath : public ArenaObject<kArenaAllocSlowPaths> {
public:
LIRSlowPath(Mir2Lir* m2l, const DexOffset dexpc, LIR* fromfast,
LIR* cont = nullptr) :
m2l_(m2l), cu_(m2l->cu_), current_dex_pc_(dexpc), fromfast_(fromfast), cont_(cont) {
- m2l->StartSlowPath(this);
}
virtual ~LIRSlowPath() {}
virtual void Compile() = 0;
- static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(size, kArenaAllocData);
- }
-
LIR *GetContinuationLabel() {
return cont_;
}
@@ -694,11 +683,6 @@
void MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec);
void MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec);
- virtual void StartSlowPath(LIRSlowPath* slowpath) {}
- virtual void BeginInvoke(CallInfo* info) {}
- virtual void EndInvoke(CallInfo* info) {}
-
-
// Handle bookkeeping to convert a wide RegLocation to a narrow RegLocation. No code generated.
virtual RegLocation NarrowRegLoc(RegLocation loc);
@@ -805,6 +789,7 @@
virtual bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit);
bool HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit);
+ bool HandleEasyFloatingPointDiv(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
virtual void HandleSlowPaths();
void GenBarrier();
void GenDivZeroException();
@@ -822,10 +807,9 @@
LIR* GenNullCheck(RegStorage m_reg, int opt_flags);
LIR* GenExplicitNullCheck(RegStorage m_reg, int opt_flags);
virtual void GenImplicitNullCheck(RegStorage reg, int opt_flags);
- void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
- RegLocation rl_src2, LIR* taken, LIR* fall_through);
- void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src,
- LIR* taken, LIR* fall_through);
+ void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, RegLocation rl_src2,
+ LIR* taken);
+ void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken);
virtual void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
void GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src);
@@ -857,7 +841,7 @@
void GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src, int lit);
virtual void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2);
+ RegLocation rl_src1, RegLocation rl_src2, int flags);
void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src);
virtual void GenSuspendTest(int opt_flags);
virtual void GenSuspendTestAndBranch(int opt_flags, LIR* target);
@@ -865,7 +849,7 @@
// This will be overridden by x86 implementation.
virtual void GenConstWide(RegLocation rl_dest, int64_t value);
virtual void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2);
+ RegLocation rl_src1, RegLocation rl_src2, int flags);
// Shared by all targets - implemented in gen_invoke.cc.
LIR* CallHelper(RegStorage r_tgt, QuickEntrypointEnum trampoline, bool safepoint_pc,
@@ -1087,6 +1071,13 @@
// Update LIR for verbose listings.
void UpdateLIROffsets();
+ /**
+ * @brief Mark a garbage collection card. Skip if the stored value is null.
+ * @param val_reg the register holding the stored value to check against null.
+ * @param tgt_addr_reg the address of the object or array where the value was stored.
+ */
+ void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
+
/*
* @brief Load the address of the dex method into the register.
* @param target_method The MethodReference of the method to be invoked.
@@ -1137,6 +1128,10 @@
virtual bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit) = 0;
virtual bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) = 0;
+ virtual void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
+ int32_t constant) = 0;
+ virtual void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
+ int64_t constant) = 0;
virtual LIR* CheckSuspendUsingLoad() = 0;
virtual RegStorage LoadHelper(QuickEntrypointEnum trampoline) = 0;
@@ -1151,7 +1146,12 @@
OpSize size, VolatileKind is_volatile) = 0;
virtual LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
int scale, OpSize size) = 0;
- virtual void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) = 0;
+
+ /**
+ * @brief Unconditionally mark a garbage collection card.
+ * @param tgt_addr_reg the address of the object or array where the value was stored.
+ */
+ virtual void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) = 0;
// Required for target - register utilities.
@@ -1191,14 +1191,18 @@
*/
virtual RegStorage TargetReg(SpecialTargetRegister reg, WideKind wide_kind) {
if (wide_kind == kWide) {
- DCHECK((kArg0 <= reg && reg < kArg7) || (kFArg0 <= reg && reg < kFArg7) || (kRet0 == reg));
- COMPILE_ASSERT((kArg1 == kArg0 + 1) && (kArg2 == kArg1 + 1) && (kArg3 == kArg2 + 1) &&
- (kArg4 == kArg3 + 1) && (kArg5 == kArg4 + 1) && (kArg6 == kArg5 + 1) &&
- (kArg7 == kArg6 + 1), kargs_range_unexpected);
- COMPILE_ASSERT((kFArg1 == kFArg0 + 1) && (kFArg2 == kFArg1 + 1) && (kFArg3 == kFArg2 + 1) &&
- (kFArg4 == kFArg3 + 1) && (kFArg5 == kFArg4 + 1) && (kFArg6 == kFArg5 + 1) &&
- (kFArg7 == kFArg6 + 1), kfargs_range_unexpected);
- COMPILE_ASSERT(kRet1 == kRet0 + 1, kret_range_unexpected);
+ DCHECK((kArg0 <= reg && reg < kArg7) || (kFArg0 <= reg && reg < kFArg15) || (kRet0 == reg));
+ static_assert((kArg1 == kArg0 + 1) && (kArg2 == kArg1 + 1) && (kArg3 == kArg2 + 1) &&
+ (kArg4 == kArg3 + 1) && (kArg5 == kArg4 + 1) && (kArg6 == kArg5 + 1) &&
+ (kArg7 == kArg6 + 1), "kargs range unexpected");
+ static_assert((kFArg1 == kFArg0 + 1) && (kFArg2 == kFArg1 + 1) && (kFArg3 == kFArg2 + 1) &&
+ (kFArg4 == kFArg3 + 1) && (kFArg5 == kFArg4 + 1) && (kFArg6 == kFArg5 + 1) &&
+ (kFArg7 == kFArg6 + 1) && (kFArg8 == kFArg7 + 1) && (kFArg9 == kFArg8 + 1) &&
+ (kFArg10 == kFArg9 + 1) && (kFArg11 == kFArg10 + 1) &&
+ (kFArg12 == kFArg11 + 1) && (kFArg13 == kFArg12 + 1) &&
+ (kFArg14 == kFArg13 + 1) && (kFArg15 == kFArg14 + 1),
+ "kfargs range unexpected");
+ static_assert(kRet1 == kRet0 + 1, "kret range unexpected");
return RegStorage::MakeRegPair(TargetReg(reg),
TargetReg(static_cast<SpecialTargetRegister>(reg + 1)));
} else {
@@ -1259,7 +1263,7 @@
// Required for target - Dalvik-level generators.
virtual void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) = 0;
+ RegLocation rl_src1, RegLocation rl_src2, int flags) = 0;
virtual void GenArithOpDouble(Instruction::Code opcode,
RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2) = 0;
@@ -1297,10 +1301,11 @@
* @param rl_src1 Numerator Location.
* @param rl_src2 Divisor Location.
* @param is_div 'true' if this is a division, 'false' for a remainder.
- * @param check_zero 'true' if an exception should be generated if the divisor is 0.
+ * @param flags The instruction optimization flags. It can include information
+ * if exception check can be elided.
*/
virtual RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, bool check_zero) = 0;
+ RegLocation rl_src2, bool is_div, int flags) = 0;
/*
* @brief Generate an integer div or rem operation by a literal.
* @param rl_dest Destination Location.
@@ -1345,7 +1350,7 @@
*/
virtual void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) = 0;
+ RegisterClass dest_reg_class) = 0;
/**
* @brief Used to generate a memory barrier in an architecture specific way.
@@ -1382,7 +1387,7 @@
RegLocation rl_index, RegLocation rl_src, int scale,
bool card_mark) = 0;
virtual void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_shift) = 0;
+ RegLocation rl_src1, RegLocation rl_shift, int flags) = 0;
// Required for target - single operation generators.
virtual LIR* OpUnconditionalBranch(LIR* target) = 0;
@@ -1447,9 +1452,30 @@
virtual bool InexpensiveConstantLong(int64_t value) = 0;
virtual bool InexpensiveConstantDouble(int64_t value) = 0;
virtual bool InexpensiveConstantInt(int32_t value, Instruction::Code opcode) {
+ UNUSED(opcode);
return InexpensiveConstantInt(value);
}
+ /**
+ * @brief Whether division by the given divisor can be converted to multiply by its reciprocal.
+ * @param divisor A constant divisor bits of float type.
+ * @return Returns true iff, x/divisor == x*(1.0f/divisor), for every float x.
+ */
+ bool CanDivideByReciprocalMultiplyFloat(int32_t divisor) {
+ // True, if float value significand bits are 0.
+ return ((divisor & 0x7fffff) == 0);
+ }
+
+ /**
+ * @brief Whether division by the given divisor can be converted to multiply by its reciprocal.
+ * @param divisor A constant divisor bits of double type.
+ * @return Returns true iff, x/divisor == x*(1.0/divisor), for every double x.
+ */
+ bool CanDivideByReciprocalMultiplyDouble(int64_t divisor) {
+ // True, if double value significand bits are 0.
+ return ((divisor & ((UINT64_C(1) << 52) - 1)) == 0);
+ }
+
// May be optimized by targets.
virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src);
virtual void GenMonitorExit(int opt_flags, RegLocation rl_src);
@@ -1637,12 +1663,12 @@
/**
* Returns true iff wide GPRs are just different views on the same physical register.
*/
- virtual bool WideGPRsAreAliases() = 0;
+ virtual bool WideGPRsAreAliases() const = 0;
/**
* Returns true iff wide FPRs are just different views on the same physical register.
*/
- virtual bool WideFPRsAreAliases() = 0;
+ virtual bool WideFPRsAreAliases() const = 0;
enum class WidenessCheck { // private
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 6f2a647..8d4cb3c 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -78,14 +78,14 @@
DISALLOW_COPY_AND_ASSIGN(QuickCompiler);
};
-COMPILE_ASSERT(0U == static_cast<size_t>(kNone), kNone_not_0);
-COMPILE_ASSERT(1U == static_cast<size_t>(kArm), kArm_not_1);
-COMPILE_ASSERT(2U == static_cast<size_t>(kArm64), kArm64_not_2);
-COMPILE_ASSERT(3U == static_cast<size_t>(kThumb2), kThumb2_not_3);
-COMPILE_ASSERT(4U == static_cast<size_t>(kX86), kX86_not_4);
-COMPILE_ASSERT(5U == static_cast<size_t>(kX86_64), kX86_64_not_5);
-COMPILE_ASSERT(6U == static_cast<size_t>(kMips), kMips_not_6);
-COMPILE_ASSERT(7U == static_cast<size_t>(kMips64), kMips64_not_7);
+static_assert(0U == static_cast<size_t>(kNone), "kNone not 0");
+static_assert(1U == static_cast<size_t>(kArm), "kArm not 1");
+static_assert(2U == static_cast<size_t>(kArm64), "kArm64 not 2");
+static_assert(3U == static_cast<size_t>(kThumb2), "kThumb2 not 3");
+static_assert(4U == static_cast<size_t>(kX86), "kX86 not 4");
+static_assert(5U == static_cast<size_t>(kX86_64), "kX86_64 not 5");
+static_assert(6U == static_cast<size_t>(kMips), "kMips not 6");
+static_assert(7U == static_cast<size_t>(kMips64), "kMips64 not 7");
// Additional disabled optimizations (over generally disabled) per instruction set.
static constexpr uint32_t kDisabledOptimizationsPerISA[] = {
@@ -118,7 +118,8 @@
// 7 = kMips64.
~0U
};
-COMPILE_ASSERT(sizeof(kDisabledOptimizationsPerISA) == 8 * sizeof(uint32_t), kDisabledOpts_unexp);
+static_assert(sizeof(kDisabledOptimizationsPerISA) == 8 * sizeof(uint32_t),
+ "kDisabledOpts unexpected");
// Supported shorty types per instruction set. nullptr means that all are available.
// Z : boolean
@@ -149,7 +150,7 @@
// 7 = kMips64.
""
};
-COMPILE_ASSERT(sizeof(kSupportedTypes) == 8 * sizeof(char*), kSupportedTypes_unexp);
+static_assert(sizeof(kSupportedTypes) == 8 * sizeof(char*), "kSupportedTypes unexpected");
static int kAllOpcodes[] = {
Instruction::NOP,
@@ -425,6 +426,21 @@
kMirOpSelect,
};
+static int kInvokeOpcodes[] = {
+ Instruction::INVOKE_VIRTUAL,
+ Instruction::INVOKE_SUPER,
+ Instruction::INVOKE_DIRECT,
+ Instruction::INVOKE_STATIC,
+ Instruction::INVOKE_INTERFACE,
+ Instruction::INVOKE_VIRTUAL_RANGE,
+ Instruction::INVOKE_SUPER_RANGE,
+ Instruction::INVOKE_DIRECT_RANGE,
+ Instruction::INVOKE_STATIC_RANGE,
+ Instruction::INVOKE_INTERFACE_RANGE,
+ Instruction::INVOKE_VIRTUAL_QUICK,
+ Instruction::INVOKE_VIRTUAL_RANGE_QUICK,
+};
+
// Unsupported opcodes. nullptr can be used when everything is supported. Size of the lists is
// recorded below.
static const int* kUnsupportedOpcodes[] = {
@@ -445,7 +461,7 @@
// 7 = kMips64.
kAllOpcodes
};
-COMPILE_ASSERT(sizeof(kUnsupportedOpcodes) == 8 * sizeof(int*), kUnsupportedOpcodes_unexp);
+static_assert(sizeof(kUnsupportedOpcodes) == 8 * sizeof(int*), "kUnsupportedOpcodes unexpected");
// Size of the arrays stored above.
static const size_t kUnsupportedOpcodesSize[] = {
@@ -466,8 +482,8 @@
// 7 = kMips64.
arraysize(kAllOpcodes),
};
-COMPILE_ASSERT(sizeof(kUnsupportedOpcodesSize) == 8 * sizeof(size_t),
- kUnsupportedOpcodesSize_unexp);
+static_assert(sizeof(kUnsupportedOpcodesSize) == 8 * sizeof(size_t),
+ "kUnsupportedOpcodesSize unexpected");
// The maximum amount of Dalvik register in a method for which we will start compiling. Tries to
// avoid an abort when we need to manage more SSA registers than we can.
@@ -523,8 +539,8 @@
for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
int opcode = mir->dalvikInsn.opcode;
// Check if we support the byte code.
- if (std::find(unsupport_list, unsupport_list + unsupport_list_size,
- opcode) != unsupport_list + unsupport_list_size) {
+ if (std::find(unsupport_list, unsupport_list + unsupport_list_size, opcode)
+ != unsupport_list + unsupport_list_size) {
if (!MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
VLOG(compiler) << "Unsupported dalvik byte code : "
<< mir->dalvikInsn.opcode;
@@ -535,11 +551,8 @@
return false;
}
// Check if it invokes a prototype that we cannot support.
- if (Instruction::INVOKE_VIRTUAL == opcode ||
- Instruction::INVOKE_SUPER == opcode ||
- Instruction::INVOKE_DIRECT == opcode ||
- Instruction::INVOKE_STATIC == opcode ||
- Instruction::INVOKE_INTERFACE == opcode) {
+ if (std::find(kInvokeOpcodes, kInvokeOpcodes + arraysize(kInvokeOpcodes), opcode)
+ != kInvokeOpcodes + arraysize(kInvokeOpcodes)) {
uint32_t invoke_method_idx = mir->dalvikInsn.vB;
const char* invoke_method_shorty = dex_file.GetMethodShorty(
dex_file.GetMethodId(invoke_method_idx));
@@ -600,7 +613,8 @@
}
uintptr_t QuickCompiler::GetEntryPointOf(mirror::ArtMethod* method) const {
- return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode());
+ return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
+ InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet())));
}
bool QuickCompiler::WriteElf(art::File* file,
@@ -613,6 +627,7 @@
}
Backend* QuickCompiler::GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const {
+ UNUSED(compilation_unit);
Mir2Lir* mir_to_lir = nullptr;
switch (cu->instruction_set) {
case kThumb2:
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 6305b22..0a98c80 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -316,16 +316,16 @@
// TODO: this is Thumb2 only. Remove when DoPromotion refactored.
RegStorage Mir2Lir::AllocPreservedDouble(int s_reg) {
- RegStorage res;
+ UNUSED(s_reg);
UNIMPLEMENTED(FATAL) << "Unexpected use of AllocPreservedDouble";
- return res;
+ UNREACHABLE();
}
// TODO: this is Thumb2 only. Remove when DoPromotion refactored.
RegStorage Mir2Lir::AllocPreservedSingle(int s_reg) {
- RegStorage res;
+ UNUSED(s_reg);
UNIMPLEMENTED(FATAL) << "Unexpected use of AllocPreservedSingle";
- return res;
+ UNREACHABLE();
}
@@ -1392,6 +1392,7 @@
}
bool Mir2Lir::LiveOut(int s_reg) {
+ UNUSED(s_reg);
// For now.
return true;
}
diff --git a/compiler/dex/quick/resource_mask.cc b/compiler/dex/quick/resource_mask.cc
index 17995fb..088bec8 100644
--- a/compiler/dex/quick/resource_mask.cc
+++ b/compiler/dex/quick/resource_mask.cc
@@ -33,16 +33,16 @@
ResourceMask::Bit(ResourceMask::kCCode),
};
// The 127-bit is the same as CLZ(masks_[1]) for a ResourceMask with only that bit set.
-COMPILE_ASSERT(kNoRegMasks[127-ResourceMask::kHeapRef].Equals(
- kEncodeHeapRef), check_kNoRegMasks_heap_ref_index);
-COMPILE_ASSERT(kNoRegMasks[127-ResourceMask::kLiteral].Equals(
- kEncodeLiteral), check_kNoRegMasks_literal_index);
-COMPILE_ASSERT(kNoRegMasks[127-ResourceMask::kDalvikReg].Equals(
- kEncodeDalvikReg), check_kNoRegMasks_dalvik_reg_index);
-COMPILE_ASSERT(kNoRegMasks[127-ResourceMask::kFPStatus].Equals(
- ResourceMask::Bit(ResourceMask::kFPStatus)), check_kNoRegMasks_fp_status_index);
-COMPILE_ASSERT(kNoRegMasks[127-ResourceMask::kCCode].Equals(
- ResourceMask::Bit(ResourceMask::kCCode)), check_kNoRegMasks_ccode_index);
+static_assert(kNoRegMasks[127-ResourceMask::kHeapRef].Equals(
+ kEncodeHeapRef), "kNoRegMasks heap ref index unexpected");
+static_assert(kNoRegMasks[127-ResourceMask::kLiteral].Equals(
+ kEncodeLiteral), "kNoRegMasks literal index unexpected");
+static_assert(kNoRegMasks[127-ResourceMask::kDalvikReg].Equals(
+ kEncodeDalvikReg), "kNoRegMasks dalvik reg index unexpected");
+static_assert(kNoRegMasks[127-ResourceMask::kFPStatus].Equals(
+ ResourceMask::Bit(ResourceMask::kFPStatus)), "kNoRegMasks fp status index unexpected");
+static_assert(kNoRegMasks[127-ResourceMask::kCCode].Equals(
+ ResourceMask::Bit(ResourceMask::kCCode)), "kNoRegMasks ccode index unexpected");
template <size_t special_bit>
constexpr ResourceMask OneRegOneSpecial(size_t reg) {
@@ -74,19 +74,19 @@
}
// The 127-bit is the same as CLZ(masks_[1]) for a ResourceMask with only that bit set.
-COMPILE_ASSERT(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kHeapRef, 0)].Equals(
- OneRegOneSpecial<ResourceMask::kHeapRef>(0)), check_kSingleRegMasks_heap_ref_index);
-COMPILE_ASSERT(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kLiteral, 0)].Equals(
- OneRegOneSpecial<ResourceMask::kLiteral>(0)), check_kSingleRegMasks_literal_index);
-COMPILE_ASSERT(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kDalvikReg, 0)].Equals(
- OneRegOneSpecial<ResourceMask::kDalvikReg>(0)), check_kSingleRegMasks_dalvik_reg_index);
-COMPILE_ASSERT(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kFPStatus, 0)].Equals(
- OneRegOneSpecial<ResourceMask::kFPStatus>(0)), check_kSingleRegMasks_fp_status_index);
-COMPILE_ASSERT(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kCCode, 0)].Equals(
- OneRegOneSpecial<ResourceMask::kCCode>(0)), check_kSingleRegMasks_ccode_index);
+static_assert(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kHeapRef, 0)].Equals(
+ OneRegOneSpecial<ResourceMask::kHeapRef>(0)), "kSingleRegMasks heap ref index unexpected");
+static_assert(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kLiteral, 0)].Equals(
+ OneRegOneSpecial<ResourceMask::kLiteral>(0)), "kSingleRegMasks literal index unexpected");
+static_assert(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kDalvikReg, 0)].Equals(
+ OneRegOneSpecial<ResourceMask::kDalvikReg>(0)), "kSingleRegMasks dalvik reg index unexpected");
+static_assert(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kFPStatus, 0)].Equals(
+ OneRegOneSpecial<ResourceMask::kFPStatus>(0)), "kSingleRegMasks fp status index unexpected");
+static_assert(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kCCode, 0)].Equals(
+ OneRegOneSpecial<ResourceMask::kCCode>(0)), "kSingleRegMasks ccode index unexpected");
// NOTE: arraysize(kNoRegMasks) multiplied by 32 due to the gcc bug workaround, see above.
-COMPILE_ASSERT(arraysize(kSingleRegMasks) == arraysize(kNoRegMasks) * 32, check_arraysizes);
+static_assert(arraysize(kSingleRegMasks) == arraysize(kNoRegMasks) * 32, "arraysizes unexpected");
constexpr ResourceMask kTwoRegsMasks[] = {
#define TWO(a, b) ResourceMask::Bit(a).Union(ResourceMask::Bit(b))
@@ -115,7 +115,7 @@
TWO(8, 15), TWO(9, 15), TWO(10, 15), TWO(11, 15), TWO(12, 15), TWO(13, 15), TWO(14, 15),
#undef TWO
};
-COMPILE_ASSERT(arraysize(kTwoRegsMasks) == 16 * 15 / 2, check_arraysize_kTwoRegsMasks);
+static_assert(arraysize(kTwoRegsMasks) == 16 * 15 / 2, "arraysize of kTwoRegsMasks unexpected");
constexpr size_t TwoRegsIndex(size_t higher, size_t lower) {
return (higher * (higher - 1)) / 2u + lower;
@@ -136,7 +136,7 @@
(CheckTwoRegsMaskLine(lines - 1) && CheckTwoRegsMaskTable(lines - 1u));
}
-COMPILE_ASSERT(CheckTwoRegsMaskTable(16), check_two_regs_masks_table);
+static_assert(CheckTwoRegsMaskTable(16), "two regs masks table check failed");
} // anonymous namespace
diff --git a/compiler/dex/quick/resource_mask.h b/compiler/dex/quick/resource_mask.h
index 436cdb5..78e81b2 100644
--- a/compiler/dex/quick/resource_mask.h
+++ b/compiler/dex/quick/resource_mask.h
@@ -20,6 +20,7 @@
#include <stdint.h>
#include "base/logging.h"
+#include "base/value_object.h"
#include "dex/reg_storage.h"
namespace art {
@@ -113,10 +114,7 @@
return (masks_[0] & other.masks_[0]) != 0u || (masks_[1] & other.masks_[1]) != 0u;
}
- void SetBit(size_t bit) {
- DCHECK_LE(bit, kHighestCommonResource);
- masks_[bit / 64u] |= UINT64_C(1) << (bit & 63u);
- }
+ void SetBit(size_t bit);
constexpr bool HasBit(size_t bit) const {
return (masks_[bit / 64u] & (UINT64_C(1) << (bit & 63u))) != 0u;
@@ -139,6 +137,12 @@
friend class ResourceMaskCache;
};
+std::ostream& operator<<(std::ostream& os, const ResourceMask::ResourceBit& rhs);
+
+inline void ResourceMask::SetBit(size_t bit) {
+ DCHECK_LE(bit, kHighestCommonResource);
+ masks_[bit / 64u] |= UINT64_C(1) << (bit & 63u);
+}
constexpr ResourceMask kEncodeNone = ResourceMask::NoBits();
constexpr ResourceMask kEncodeAll = ResourceMask::AllBits();
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index dce2b73..84d68d2 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -424,15 +424,15 @@
{ kX86PextrbRRI, kRegRegImmStore, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0x66, 0, 0x0F, 0x3A, 0x14, 0, 0, 1, false }, "PextbRRI", "!0r,!1r,!2d" },
{ kX86PextrwRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0x66, 0, 0x0F, 0xC5, 0x00, 0, 0, 1, false }, "PextwRRI", "!0r,!1r,!2d" },
{ kX86PextrdRRI, kRegRegImmStore, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1, false }, "PextdRRI", "!0r,!1r,!2d" },
- { kX86PextrbMRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_STORE, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1, false }, "kX86PextrbMRI", "[!0r+!1d],!2r,!3d" },
- { kX86PextrwMRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_STORE, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1, false }, "kX86PextrwMRI", "[!0r+!1d],!2r,!3d" },
- { kX86PextrdMRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_STORE, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1, false }, "kX86PextrdMRI", "[!0r+!1d],!2r,!3d" },
+ { kX86PextrbMRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_STORE, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1, false }, "PextrbMRI", "[!0r+!1d],!2r,!3d" },
+ { kX86PextrwMRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_STORE, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1, false }, "PextrwMRI", "[!0r+!1d],!2r,!3d" },
+ { kX86PextrdMRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_STORE, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1, false }, "PextrdMRI", "[!0r+!1d],!2r,!3d" },
{ kX86PshuflwRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0xF2, 0, 0x0F, 0x70, 0, 0, 0, 1, false }, "PshuflwRRI", "!0r,!1r,!2d" },
{ kX86PshufdRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0x66, 0, 0x0F, 0x70, 0, 0, 0, 1, false }, "PshuffRRI", "!0r,!1r,!2d" },
- { kX86ShufpsRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0x00, 0, 0x0F, 0xC6, 0, 0, 0, 1, false }, "kX86ShufpsRRI", "!0r,!1r,!2d" },
- { kX86ShufpdRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0x66, 0, 0x0F, 0xC6, 0, 0, 0, 1, false }, "kX86ShufpdRRI", "!0r,!1r,!2d" },
+ { kX86ShufpsRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0_USE0 | REG_USE1, { 0x00, 0, 0x0F, 0xC6, 0, 0, 0, 1, false }, "ShufpsRRI", "!0r,!1r,!2d" },
+ { kX86ShufpdRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0_USE0 | REG_USE1, { 0x66, 0, 0x0F, 0xC6, 0, 0, 0, 1, false }, "ShufpdRRI", "!0r,!1r,!2d" },
{ kX86PsrawRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x71, 0, 4, 0, 1, false }, "PsrawRI", "!0r,!1d" },
{ kX86PsradRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x72, 0, 4, 0, 1, false }, "PsradRI", "!0r,!1d" },
@@ -541,12 +541,17 @@
{ kX86CallI, kCall, IS_UNARY_OP | IS_BRANCH, { 0, 0, 0xE8, 0, 0, 0, 0, 4, false }, "CallI", "!0d" },
{ kX86Ret, kNullary, NO_OPERAND | IS_BRANCH, { 0, 0, 0xC3, 0, 0, 0, 0, 0, false }, "Ret", "" },
- { kX86StartOfMethod, kMacro, IS_UNARY_OP | SETS_CCODES, { 0, 0, 0, 0, 0, 0, 0, 0, false }, "StartOfMethod", "!0r" },
+ { kX86StartOfMethod, kMacro, IS_UNARY_OP | REG_DEF0 | SETS_CCODES, { 0, 0, 0, 0, 0, 0, 0, 0, false }, "StartOfMethod", "!0r" },
{ kX86PcRelLoadRA, kPcRel, IS_LOAD | IS_QUIN_OP | REG_DEF0_USE12, { 0, 0, 0x8B, 0, 0, 0, 0, 0, false }, "PcRelLoadRA", "!0r,[!1r+!2r<<!3d+!4p]" },
{ kX86PcRelAdr, kPcRel, IS_LOAD | IS_BINARY_OP | REG_DEF0, { 0, 0, 0xB8, 0, 0, 0, 0, 4, false }, "PcRelAdr", "!0r,!1p" },
{ kX86RepneScasw, kNullary, NO_OPERAND | REG_USEA | REG_USEC | SETS_CCODES, { 0x66, 0xF2, 0xAF, 0, 0, 0, 0, 0, false }, "RepNE ScasW", "" },
};
+std::ostream& operator<<(std::ostream& os, const X86OpCode& rhs) {
+ os << X86Mir2Lir::EncodingMap[rhs].name;
+ return os;
+}
+
static bool NeedsRex(int32_t raw_reg) {
return RegStorage::RegNum(raw_reg) > 7;
}
@@ -672,7 +677,7 @@
++size; // modrm
}
if (!modrm_is_reg_reg) {
- if (has_sib || LowRegisterBits(raw_base) == rs_rX86_SP.GetRegNum()
+ if (has_sib || (LowRegisterBits(raw_base) == rs_rX86_SP_32.GetRegNum())
|| (cu_->target64 && entry->skeleton.prefix1 == THREAD_PREFIX)) {
// SP requires a SIB byte.
// GS access also needs a SIB byte for absolute adressing in 64-bit mode.
@@ -1005,9 +1010,9 @@
void X86Mir2Lir::EmitModrmThread(uint8_t reg_or_opcode) {
if (cu_->target64) {
// Absolute adressing for GS access.
- uint8_t modrm = (0 << 6) | (reg_or_opcode << 3) | rs_rX86_SP.GetRegNum();
+ uint8_t modrm = (0 << 6) | (reg_or_opcode << 3) | rs_rX86_SP_32.GetRegNum();
code_buffer_.push_back(modrm);
- uint8_t sib = (0/*TIMES_1*/ << 6) | (rs_rX86_SP.GetRegNum() << 3) | rs_rBP.GetRegNum();
+ uint8_t sib = (0/*TIMES_1*/ << 6) | (rs_rX86_SP_32.GetRegNum() << 3) | rs_rBP.GetRegNum();
code_buffer_.push_back(sib);
} else {
uint8_t modrm = (0 << 6) | (reg_or_opcode << 3) | rs_rBP.GetRegNum();
@@ -1020,9 +1025,9 @@
DCHECK_LT(base, 8);
uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (reg_or_opcode << 3) | base;
code_buffer_.push_back(modrm);
- if (base == rs_rX86_SP.GetRegNum()) {
+ if (base == rs_rX86_SP_32.GetRegNum()) {
// Special SIB for SP base
- code_buffer_.push_back(0 << 6 | rs_rX86_SP.GetRegNum() << 3 | rs_rX86_SP.GetRegNum());
+ code_buffer_.push_back(0 << 6 | rs_rX86_SP_32.GetRegNum() << 3 | rs_rX86_SP_32.GetRegNum());
}
EmitDisp(base, disp);
}
@@ -1031,7 +1036,7 @@
int scale, int32_t disp) {
DCHECK_LT(RegStorage::RegNum(reg_or_opcode), 8);
uint8_t modrm = (ModrmForDisp(base, disp) << 6) | RegStorage::RegNum(reg_or_opcode) << 3 |
- rs_rX86_SP.GetRegNum();
+ rs_rX86_SP_32.GetRegNum();
code_buffer_.push_back(modrm);
DCHECK_LT(scale, 4);
DCHECK_LT(RegStorage::RegNum(index), 8);
@@ -1579,7 +1584,7 @@
DCHECK_EQ(0, entry->skeleton.extra_opcode1);
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
uint8_t low_reg = LowRegisterBits(raw_reg);
- uint8_t modrm = (2 << 6) | (low_reg << 3) | rs_rX86_SP.GetRegNum();
+ uint8_t modrm = (2 << 6) | (low_reg << 3) | rs_rX86_SP_32.GetRegNum();
code_buffer_.push_back(modrm);
DCHECK_LT(scale, 4);
uint8_t low_base_or_table = LowRegisterBits(raw_base_or_table);
@@ -1631,6 +1636,7 @@
* sequence or request that the trace be shortened and retried.
*/
AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
+ UNUSED(start_addr);
LIR *lir;
AssemblerStatus res = kSuccess; // Assume success
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 86efc1e..a808459 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -136,23 +136,16 @@
StoreValue(rl_dest, rl_result);
}
-/*
- * Mark garbage collection card. Skip if the value we're storing is null.
- */
-void X86Mir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) {
+void X86Mir2Lir::UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) {
DCHECK_EQ(tgt_addr_reg.Is64Bit(), cu_->target64);
- DCHECK_EQ(val_reg.Is64Bit(), cu_->target64);
RegStorage reg_card_base = AllocTempRef();
RegStorage reg_card_no = AllocTempRef();
- LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
int ct_offset = cu_->target64 ?
Thread::CardTableOffset<8>().Int32Value() :
Thread::CardTableOffset<4>().Int32Value();
NewLIR2(cu_->target64 ? kX86Mov64RT : kX86Mov32RT, reg_card_base.GetReg(), ct_offset);
OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
- LIR* target = NewLIR0(kPseudoTargetLabel);
- branch_over->target = target;
FreeTemp(reg_card_base);
FreeTemp(reg_card_no);
}
@@ -164,16 +157,20 @@
* expanding the frame or flushing. This leaves the utility
* code with no spare temps.
*/
- LockTemp(rs_rX86_ARG0);
- LockTemp(rs_rX86_ARG1);
- LockTemp(rs_rX86_ARG2);
+ const RegStorage arg0 = TargetReg32(kArg0);
+ const RegStorage arg1 = TargetReg32(kArg1);
+ const RegStorage arg2 = TargetReg32(kArg2);
+ LockTemp(arg0);
+ LockTemp(arg1);
+ LockTemp(arg2);
/*
* We can safely skip the stack overflow check if we're
* a leaf *and* our frame size < fudge factor.
*/
- InstructionSet isa = cu_->target64 ? kX86_64 : kX86;
+ const InstructionSet isa = cu_->target64 ? kX86_64 : kX86;
bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, isa);
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
// If we doing an implicit stack overflow check, perform the load immediately
// before the stack pointer is decremented and anything is saved.
@@ -182,12 +179,12 @@
// Implicit stack overflow check.
// test eax,[esp + -overflow]
int overflow = GetStackOverflowReservedBytes(isa);
- NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rX86_SP.GetReg(), -overflow);
+ NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rSP.GetReg(), -overflow);
MarkPossibleStackOverflowException();
}
/* Build frame, return address already on stack */
- stack_decrement_ = OpRegImm(kOpSub, rs_rX86_SP, frame_size_ -
+ stack_decrement_ = OpRegImm(kOpSub, rs_rSP, frame_size_ -
GetInstructionSetPointerSize(cu_->instruction_set));
NewLIR0(kPseudoMethodEntry);
@@ -204,7 +201,8 @@
m2l_->ResetRegPool();
m2l_->ResetDefTracking();
GenerateTargetLabel(kPseudoThrowTarget);
- m2l_->OpRegImm(kOpAdd, rs_rX86_SP, sp_displace_);
+ const RegStorage local_rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
+ m2l_->OpRegImm(kOpAdd, local_rs_rSP, sp_displace_);
m2l_->ClobberCallerSave();
// Assumes codegen and target are in thumb2 mode.
m2l_->CallHelper(RegStorage::InvalidReg(), kQuickThrowStackOverflow,
@@ -225,9 +223,9 @@
// may have moved us outside of the reserved area at the end of the stack.
// cmp rs_rX86_SP, fs:[stack_end_]; jcc throw_slowpath
if (cu_->target64) {
- OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<8>());
+ OpRegThreadMem(kOpCmp, rs_rX86_SP_64, Thread::StackEndOffset<8>());
} else {
- OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<4>());
+ OpRegThreadMem(kOpCmp, rs_rX86_SP_32, Thread::StackEndOffset<4>());
}
LIR* branch = OpCondBranch(kCondUlt, nullptr);
AddSlowPath(
@@ -245,13 +243,13 @@
setup_method_address_[0] = NewLIR1(kX86StartOfMethod, method_start.GetReg());
int displacement = SRegOffset(base_of_code_->s_reg_low);
// Native pointer - must be natural word size.
- setup_method_address_[1] = StoreBaseDisp(rs_rX86_SP, displacement, method_start,
+ setup_method_address_[1] = StoreBaseDisp(rs_rSP, displacement, method_start,
cu_->target64 ? k64 : k32, kNotVolatile);
}
- FreeTemp(rs_rX86_ARG0);
- FreeTemp(rs_rX86_ARG1);
- FreeTemp(rs_rX86_ARG2);
+ FreeTemp(arg0);
+ FreeTemp(arg1);
+ FreeTemp(arg2);
}
void X86Mir2Lir::GenExitSequence() {
@@ -266,7 +264,9 @@
UnSpillCoreRegs();
UnSpillFPRegs();
/* Remove frame except for return address */
- stack_increment_ = OpRegImm(kOpAdd, rs_rX86_SP, frame_size_ - GetInstructionSetPointerSize(cu_->instruction_set));
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
+ stack_increment_ = OpRegImm(kOpAdd, rs_rSP,
+ frame_size_ - GetInstructionSetPointerSize(cu_->instruction_set));
NewLIR0(kX86Ret);
}
@@ -290,9 +290,10 @@
*/
static int X86NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
int state, const MethodReference& target_method,
- uint32_t unused,
+ uint32_t,
uintptr_t direct_code, uintptr_t direct_method,
InvokeType type) {
+ UNUSED(info, direct_code);
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
if (direct_method != 0) {
switch (state) {
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index b3544da..26641f8 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -78,6 +78,10 @@
bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
RegLocation rl_dest, int lit) OVERRIDE;
bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
+ void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
+ int32_t constant) OVERRIDE;
+ void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
+ int64_t constant) OVERRIDE;
LIR* CheckSuspendUsingLoad() OVERRIDE;
RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
@@ -90,7 +94,10 @@
OpSize size, VolatileKind is_volatile) OVERRIDE;
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
OpSize size) OVERRIDE;
- void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) OVERRIDE;
+
+ /// @copydoc Mir2Lir::UnconditionallyMarkGCCard(RegStorage)
+ void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
+
void GenImplicitNullCheck(RegStorage reg, int opt_flags) OVERRIDE;
// Required for target - register utilities.
@@ -180,11 +187,11 @@
// Long instructions.
void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) OVERRIDE;
+ RegLocation rl_src2, int flags) OVERRIDE;
void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) OVERRIDE;
+ RegLocation rl_src2, int flags) OVERRIDE;
void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_shift) OVERRIDE;
+ RegLocation rl_src1, RegLocation rl_shift, int flags) OVERRIDE;
void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) OVERRIDE;
void GenIntToLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
@@ -250,7 +257,7 @@
void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) OVERRIDE;
+ RegisterClass dest_reg_class) OVERRIDE;
bool GenMemBarrier(MemBarrierKind barrier_kind) OVERRIDE;
void GenMoveException(RegLocation rl_dest) OVERRIDE;
void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
@@ -314,9 +321,10 @@
* @param rl_dest Destination for the result.
* @param rl_lhs Left hand operand.
* @param rl_rhs Right hand operand.
+ * @param flags The instruction optimization flags.
*/
void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_lhs,
- RegLocation rl_rhs) OVERRIDE;
+ RegLocation rl_rhs, int flags) OVERRIDE;
/*
* @brief Load the Method* of a dex method into the register.
@@ -384,7 +392,7 @@
LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
protected:
- RegStorage TargetReg32(SpecialTargetRegister reg);
+ RegStorage TargetReg32(SpecialTargetRegister reg) const;
// Casting of RegStorage
RegStorage As32BitReg(RegStorage reg) {
DCHECK(!reg.IsPair());
@@ -427,7 +435,7 @@
LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
RegStorage r_src, OpSize size, int opt_flags = 0);
- RegStorage GetCoreArgMappingToPhysicalReg(int core_arg_num);
+ RegStorage GetCoreArgMappingToPhysicalReg(int core_arg_num) const;
int AssignInsnOffsets();
void AssignOffsets();
@@ -498,7 +506,7 @@
void GenConstWide(RegLocation rl_dest, int64_t value);
void GenMultiplyVectorSignedByte(RegStorage rs_dest_src1, RegStorage rs_src2);
void GenMultiplyVectorLong(RegStorage rs_dest_src1, RegStorage rs_src2);
- void GenShiftByteVector(BasicBlock *bb, MIR *mir);
+ void GenShiftByteVector(MIR* mir);
void AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3,
uint32_t m4);
void MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m1, uint32_t m2,
@@ -525,7 +533,7 @@
* @brief Check if a register is byte addressable.
* @returns true if a register is byte addressable.
*/
- bool IsByteRegister(RegStorage reg);
+ bool IsByteRegister(RegStorage reg) const;
void GenDivRemLongLit(RegLocation rl_dest, RegLocation rl_src, int64_t imm, bool is_div);
@@ -556,88 +564,80 @@
/*
* @brief Load 128 bit constant into vector register.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector
* @note vA is the TypeSize for the register.
* @note vB is the destination XMM register. arg[0..3] are 32 bit constant values.
*/
- void GenConst128(BasicBlock* bb, MIR* mir);
+ void GenConst128(MIR* mir);
/*
* @brief MIR to move a vectorized register to another.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination
* @note vC: source
*/
- void GenMoveVector(BasicBlock *bb, MIR *mir);
+ void GenMoveVector(MIR* mir);
/*
* @brief Packed multiply of units in two vector registers: vB = vB .* @note vC using vA to know
* the type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: source
*/
- void GenMultiplyVector(BasicBlock *bb, MIR *mir);
+ void GenMultiplyVector(MIR* mir);
/*
* @brief Packed addition of units in two vector registers: vB = vB .+ vC using vA to know the
* type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: source
*/
- void GenAddVector(BasicBlock *bb, MIR *mir);
+ void GenAddVector(MIR* mir);
/*
* @brief Packed subtraction of units in two vector registers: vB = vB .- vC using vA to know the
* type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: source
*/
- void GenSubtractVector(BasicBlock *bb, MIR *mir);
+ void GenSubtractVector(MIR* mir);
/*
* @brief Packed shift left of units in two vector registers: vB = vB .<< vC using vA to know the
* type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: immediate
*/
- void GenShiftLeftVector(BasicBlock *bb, MIR *mir);
+ void GenShiftLeftVector(MIR* mir);
/*
* @brief Packed signed shift right of units in two vector registers: vB = vB .>> vC using vA to
* know the type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: immediate
*/
- void GenSignedShiftRightVector(BasicBlock *bb, MIR *mir);
+ void GenSignedShiftRightVector(MIR* mir);
/*
* @brief Packed unsigned shift right of units in two vector registers: vB = vB .>>> vC using vA
* to know the type of the vector.
- * @param bb The basic block in which the MIR is from..
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: immediate
*/
- void GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir);
+ void GenUnsignedShiftRightVector(MIR* mir);
/*
* @brief Packed bitwise and of units in two vector registers: vB = vB .& vC using vA to know the
@@ -646,51 +646,47 @@
* @note vB: destination and source
* @note vC: source
*/
- void GenAndVector(BasicBlock *bb, MIR *mir);
+ void GenAndVector(MIR* mir);
/*
* @brief Packed bitwise or of units in two vector registers: vB = vB .| vC using vA to know the
* type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: source
*/
- void GenOrVector(BasicBlock *bb, MIR *mir);
+ void GenOrVector(MIR* mir);
/*
* @brief Packed bitwise xor of units in two vector registers: vB = vB .^ vC using vA to know the
* type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: source
*/
- void GenXorVector(BasicBlock *bb, MIR *mir);
+ void GenXorVector(MIR* mir);
/*
* @brief Reduce a 128-bit packed element into a single VR by taking lower bits
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @details Instruction does a horizontal addition of the packed elements and then adds it to VR.
* @note vA: TypeSize
* @note vB: destination and source VR (not vector register)
* @note vC: source (vector register)
*/
- void GenAddReduceVector(BasicBlock *bb, MIR *mir);
+ void GenAddReduceVector(MIR* mir);
/*
* @brief Extract a packed element into a single VR.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination VR (not vector register)
* @note vC: source (vector register)
* @note arg[0]: The index to use for extraction from vector register (which packed element).
*/
- void GenReduceVector(BasicBlock *bb, MIR *mir);
+ void GenReduceVector(MIR* mir);
/*
* @brief Create a vector value, with all TypeSize values equal to vC
@@ -700,21 +696,21 @@
* @note vB: destination vector register.
* @note vC: source VR (not vector register).
*/
- void GenSetVector(BasicBlock *bb, MIR *mir);
+ void GenSetVector(MIR* mir);
/**
* @brief Used to generate code for kMirOpPackedArrayGet.
* @param bb The basic block of MIR.
* @param mir The mir whose opcode is kMirOpPackedArrayGet.
*/
- void GenPackedArrayGet(BasicBlock *bb, MIR *mir);
+ void GenPackedArrayGet(BasicBlock* bb, MIR* mir);
/**
* @brief Used to generate code for kMirOpPackedArrayPut.
* @param bb The basic block of MIR.
* @param mir The mir whose opcode is kMirOpPackedArrayPut.
*/
- void GenPackedArrayPut(BasicBlock *bb, MIR *mir);
+ void GenPackedArrayPut(BasicBlock* bb, MIR* mir);
/*
* @brief Generate code for a vector opcode.
@@ -768,10 +764,11 @@
* @param rl_src1 Numerator Location.
* @param rl_src2 Divisor Location.
* @param is_div 'true' if this is a division, 'false' for a remainder.
- * @param check_zero 'true' if an exception should be generated if the divisor is 0.
+ * @param flags The instruction optimization flags. It can include information
+ * if exception check can be elided.
*/
RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
- bool is_div, bool check_zero);
+ bool is_div, int flags);
/*
* @brief Generate an integer div or rem operation by a literal.
@@ -788,10 +785,11 @@
* @param rl_dest The destination.
* @param rl_src The value to be shifted.
* @param shift_amount How much to shift.
+ * @param flags The instruction optimization flags.
* @returns the RegLocation of the result.
*/
RegLocation GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src, int shift_amount);
+ RegLocation rl_src, int shift_amount, int flags);
/*
* Generate an imul of a register by a constant or a better sequence.
* @param dest Destination Register.
@@ -858,13 +856,13 @@
// Try to do a long multiplication where rl_src2 is a constant. This simplified setup might fail,
// in which case false will be returned.
- bool GenMulLongConst(RegLocation rl_dest, RegLocation rl_src1, int64_t val);
+ bool GenMulLongConst(RegLocation rl_dest, RegLocation rl_src1, int64_t val, int flags);
void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
+ RegLocation rl_src2, int flags);
void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div);
+ RegLocation rl_src2, bool is_div, int flags);
void SpillCoreRegs();
void UnSpillCoreRegs();
@@ -887,8 +885,8 @@
* the value is live in a temp register of the correct class. Additionally, if the value is in
* a temp register of the wrong register class, it will be clobbered.
*/
- RegLocation UpdateLocTyped(RegLocation loc, int reg_class);
- RegLocation UpdateLocWideTyped(RegLocation loc, int reg_class);
+ RegLocation UpdateLocTyped(RegLocation loc);
+ RegLocation UpdateLocWideTyped(RegLocation loc);
/*
* @brief Analyze MIR before generating code, to prepare for the code generation.
@@ -899,7 +897,7 @@
* @brief Analyze one basic block.
* @param bb Basic block to analyze.
*/
- void AnalyzeBB(BasicBlock * bb);
+ void AnalyzeBB(BasicBlock* bb);
/*
* @brief Analyze one extended MIR instruction
@@ -907,7 +905,7 @@
* @param bb Basic block containing instruction.
* @param mir Extended instruction to analyze.
*/
- void AnalyzeExtendedMIR(int opcode, BasicBlock * bb, MIR *mir);
+ void AnalyzeExtendedMIR(int opcode, BasicBlock* bb, MIR* mir);
/*
* @brief Analyze one MIR instruction
@@ -915,7 +913,7 @@
* @param bb Basic block containing instruction.
* @param mir Instruction to analyze.
*/
- virtual void AnalyzeMIR(int opcode, BasicBlock * bb, MIR *mir);
+ virtual void AnalyzeMIR(int opcode, BasicBlock* bb, MIR* mir);
/*
* @brief Analyze one MIR float/double instruction
@@ -923,7 +921,7 @@
* @param bb Basic block containing instruction.
* @param mir Instruction to analyze.
*/
- virtual void AnalyzeFPInstruction(int opcode, BasicBlock * bb, MIR *mir);
+ virtual void AnalyzeFPInstruction(int opcode, BasicBlock* bb, MIR* mir);
/*
* @brief Analyze one use of a double operand.
@@ -937,7 +935,7 @@
* @param bb Basic block containing instruction.
* @param mir Instruction to analyze.
*/
- void AnalyzeInvokeStatic(int opcode, BasicBlock * bb, MIR *mir);
+ void AnalyzeInvokeStatic(int opcode, BasicBlock* bb, MIR* mir);
// Information derived from analysis of MIR
@@ -984,12 +982,11 @@
*/
LIR* AddVectorLiteral(int32_t* constants);
- InToRegStorageMapping in_to_reg_storage_mapping_;
-
- bool WideGPRsAreAliases() OVERRIDE {
+ bool WideGPRsAreAliases() const OVERRIDE {
return cu_->target64; // On 64b, we have 64b GPRs.
}
- bool WideFPRsAreAliases() OVERRIDE {
+
+ bool WideFPRsAreAliases() const OVERRIDE {
return true; // xmm registers have 64b views even on x86.
}
@@ -999,11 +996,17 @@
*/
static void DumpRegLocation(RegLocation loc);
- static const X86EncodingMap EncodingMap[kX86Last];
+ InToRegStorageMapping in_to_reg_storage_mapping_;
private:
void SwapBits(RegStorage result_reg, int shift, int32_t value);
void SwapBits64(RegStorage result_reg, int shift, int64_t value);
+
+ static const X86EncodingMap EncodingMap[kX86Last];
+
+ friend std::ostream& operator<<(std::ostream& os, const X86OpCode& rhs);
+
+ DISALLOW_COPY_AND_ASSIGN(X86Mir2Lir);
};
} // namespace art
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index 21d1a5c..4825db6 100755
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -122,6 +122,20 @@
StoreValueWide(rl_dest, rl_result);
}
+void X86Mir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
+ int32_t constant) {
+ // TODO: need x86 implementation.
+ UNUSED(rl_dest, rl_src1, constant);
+ LOG(FATAL) << "Unimplemented GenMultiplyByConstantFloat in x86";
+}
+
+void X86Mir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
+ int64_t constant) {
+ // TODO: need x86 implementation.
+ UNUSED(rl_dest, rl_src1, constant);
+ LOG(FATAL) << "Unimplemented GenMultiplyByConstantDouble in x86";
+}
+
void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_double) {
// Compute offsets to the source and destination VRs on stack
int src_v_reg_offset = SRegOffset(rl_src.s_reg_low);
@@ -145,12 +159,13 @@
} else {
// It must have been register promoted if it is not a temp but is still in physical
// register. Since we need it to be in memory to convert, we place it there now.
- StoreBaseDisp(rs_rX86_SP, src_v_reg_offset, rl_src.reg, k64, kNotVolatile);
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
+ StoreBaseDisp(rs_rSP, src_v_reg_offset, rl_src.reg, k64, kNotVolatile);
}
}
// Push the source virtual register onto the x87 stack.
- LIR *fild64 = NewLIR2NoDest(kX86Fild64M, rs_rX86_SP.GetReg(),
+ LIR *fild64 = NewLIR2NoDest(kX86Fild64M, rs_rX86_SP_32.GetReg(),
src_v_reg_offset + LOWORD_OFFSET);
AnnotateDalvikRegAccess(fild64, (src_v_reg_offset + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is64bit */);
@@ -158,7 +173,7 @@
// Now pop off x87 stack and store it in the destination VR's stack location.
int opcode = is_double ? kX86Fstp64M : kX86Fstp32M;
int displacement = is_double ? dest_v_reg_offset + LOWORD_OFFSET : dest_v_reg_offset;
- LIR *fstp = NewLIR2NoDest(opcode, rs_rX86_SP.GetReg(), displacement);
+ LIR *fstp = NewLIR2NoDest(opcode, rs_rX86_SP_32.GetReg(), displacement);
AnnotateDalvikRegAccess(fstp, displacement >> 2, false /* is_load */, is_double);
/*
@@ -169,8 +184,7 @@
* If the result's location is in memory, then we do not need to do anything
* more since the fstp has already placed the correct value in memory.
*/
- RegLocation rl_result = is_double ? UpdateLocWideTyped(rl_dest, kFPReg) :
- UpdateLocTyped(rl_dest, kFPReg);
+ RegLocation rl_result = is_double ? UpdateLocWideTyped(rl_dest) : UpdateLocTyped(rl_dest);
if (rl_result.location == kLocPhysReg) {
/*
* We already know that the result is in a physical register but do not know if it is the
@@ -178,12 +192,13 @@
* correct register class.
*/
rl_result = EvalLoc(rl_dest, kFPReg, true);
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
if (is_double) {
- LoadBaseDisp(rs_rX86_SP, dest_v_reg_offset, rl_result.reg, k64, kNotVolatile);
+ LoadBaseDisp(rs_rSP, dest_v_reg_offset, rl_result.reg, k64, kNotVolatile);
StoreFinalValueWide(rl_dest, rl_result);
} else {
- Load32Disp(rs_rX86_SP, dest_v_reg_offset, rl_result.reg);
+ Load32Disp(rs_rSP, dest_v_reg_offset, rl_result.reg);
StoreFinalValue(rl_dest, rl_result);
}
@@ -353,6 +368,7 @@
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
// If the source is in physical register, then put it in its location on stack.
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
if (rl_src1.location == kLocPhysReg) {
RegisterInfo* reg_info = GetRegInfo(rl_src1.reg);
@@ -364,7 +380,7 @@
} else {
// It must have been register promoted if it is not a temp but is still in physical
// register. Since we need it to be in memory to convert, we place it there now.
- StoreBaseDisp(rs_rX86_SP, src1_v_reg_offset, rl_src1.reg, is_double ? k64 : k32,
+ StoreBaseDisp(rs_rSP, src1_v_reg_offset, rl_src1.reg, is_double ? k64 : k32,
kNotVolatile);
}
}
@@ -375,7 +391,7 @@
FlushSpecificReg(reg_info);
ResetDef(rl_src2.reg);
} else {
- StoreBaseDisp(rs_rX86_SP, src2_v_reg_offset, rl_src2.reg, is_double ? k64 : k32,
+ StoreBaseDisp(rs_rSP, src2_v_reg_offset, rl_src2.reg, is_double ? k64 : k32,
kNotVolatile);
}
}
@@ -383,12 +399,12 @@
int fld_opcode = is_double ? kX86Fld64M : kX86Fld32M;
// Push the source virtual registers onto the x87 stack.
- LIR *fld_2 = NewLIR2NoDest(fld_opcode, rs_rX86_SP.GetReg(),
+ LIR *fld_2 = NewLIR2NoDest(fld_opcode, rs_rSP.GetReg(),
src2_v_reg_offset + LOWORD_OFFSET);
AnnotateDalvikRegAccess(fld_2, (src2_v_reg_offset + LOWORD_OFFSET) >> 2,
true /* is_load */, is_double /* is64bit */);
- LIR *fld_1 = NewLIR2NoDest(fld_opcode, rs_rX86_SP.GetReg(),
+ LIR *fld_1 = NewLIR2NoDest(fld_opcode, rs_rSP.GetReg(),
src1_v_reg_offset + LOWORD_OFFSET);
AnnotateDalvikRegAccess(fld_1, (src1_v_reg_offset + LOWORD_OFFSET) >> 2,
true /* is_load */, is_double /* is64bit */);
@@ -417,7 +433,7 @@
// Now store result in the destination VR's stack location.
int displacement = dest_v_reg_offset + LOWORD_OFFSET;
int opcode = is_double ? kX86Fst64M : kX86Fst32M;
- LIR *fst = NewLIR2NoDest(opcode, rs_rX86_SP.GetReg(), displacement);
+ LIR *fst = NewLIR2NoDest(opcode, rs_rSP.GetReg(), displacement);
AnnotateDalvikRegAccess(fst, displacement >> 2, false /* is_load */, is_double /* is64bit */);
// Pop ST(1) and ST(0).
@@ -431,15 +447,14 @@
* If the result's location is in memory, then we do not need to do anything
* more since the fstp has already placed the correct value in memory.
*/
- RegLocation rl_result = is_double ? UpdateLocWideTyped(rl_dest, kFPReg) :
- UpdateLocTyped(rl_dest, kFPReg);
+ RegLocation rl_result = is_double ? UpdateLocWideTyped(rl_dest) : UpdateLocTyped(rl_dest);
if (rl_result.location == kLocPhysReg) {
rl_result = EvalLoc(rl_dest, kFPReg, true);
if (is_double) {
- LoadBaseDisp(rs_rX86_SP, dest_v_reg_offset, rl_result.reg, k64, kNotVolatile);
+ LoadBaseDisp(rs_rSP, dest_v_reg_offset, rl_result.reg, k64, kNotVolatile);
StoreFinalValueWide(rl_dest, rl_result);
} else {
- Load32Disp(rs_rX86_SP, dest_v_reg_offset, rl_result.reg);
+ Load32Disp(rs_rSP, dest_v_reg_offset, rl_result.reg);
StoreFinalValue(rl_dest, rl_result);
}
}
@@ -569,16 +584,16 @@
void X86Mir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
RegLocation rl_result;
rl_src = LoadValueWide(rl_src, kCoreReg);
- rl_result = EvalLocWide(rl_dest, kCoreReg, true);
if (cu_->target64) {
+ rl_result = EvalLocWide(rl_dest, kCoreReg, true);
OpRegCopy(rl_result.reg, rl_src.reg);
// Flip sign bit.
NewLIR2(kX86Rol64RI, rl_result.reg.GetReg(), 1);
NewLIR2(kX86Xor64RI, rl_result.reg.GetReg(), 1);
NewLIR2(kX86Ror64RI, rl_result.reg.GetReg(), 1);
} else {
- OpRegRegImm(kOpAdd, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 0x80000000);
- OpRegCopy(rl_result.reg, rl_src.reg);
+ rl_result = ForceTempWide(rl_src);
+ OpRegRegImm(kOpAdd, rl_result.reg.GetHigh(), rl_result.reg.GetHigh(), 0x80000000);
}
StoreValueWide(rl_dest, rl_result);
}
@@ -627,7 +642,7 @@
// Operate directly into memory.
int displacement = SRegOffset(rl_dest.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- LIR *lir = NewLIR3(kX86And32MI, rs_rX86_SP.GetReg(), displacement, 0x7fffffff);
+ LIR *lir = NewLIR3(kX86And32MI, rs_rX86_SP_32.GetReg(), displacement, 0x7fffffff);
AnnotateDalvikRegAccess(lir, displacement >> 2, false /*is_load */, false /* is_64bit */);
AnnotateDalvikRegAccess(lir, displacement >> 2, true /* is_load */, false /* is_64bit*/);
return true;
@@ -691,7 +706,7 @@
// Operate directly into memory.
int displacement = SRegOffset(rl_dest.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- LIR *lir = NewLIR3(kX86And32MI, rs_rX86_SP.GetReg(), displacement + HIWORD_OFFSET, 0x7fffffff);
+ LIR *lir = NewLIR3(kX86And32MI, rs_rX86_SP_32.GetReg(), displacement + HIWORD_OFFSET, 0x7fffffff);
AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2, true /* is_load */, true /* is_64bit*/);
AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2, false /*is_load */, true /* is_64bit */);
return true;
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 07034cb..3f501b4 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -208,7 +208,7 @@
void X86Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) {
+ RegisterClass dest_reg_class) {
DCHECK(!left_op.IsPair() && !right_op.IsPair() && !rs_dest.IsPair());
DCHECK(!left_op.IsFloat() && !right_op.IsFloat() && !rs_dest.IsFloat());
@@ -268,6 +268,7 @@
}
void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb);
RegLocation rl_result;
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
RegLocation rl_dest = mir_graph_->GetDest(mir);
@@ -594,8 +595,9 @@
}
RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div) {
+ UNUSED(rl_dest, reg_lo, lit, is_div);
LOG(FATAL) << "Unexpected use of GenDivRemLit for x86";
- return rl_dest;
+ UNREACHABLE();
}
RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src,
@@ -763,12 +765,14 @@
RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi,
bool is_div) {
+ UNUSED(rl_dest, reg_lo, reg_hi, is_div);
LOG(FATAL) << "Unexpected use of GenDivRem for x86";
- return rl_dest;
+ UNREACHABLE();
}
RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, bool check_zero) {
+ RegLocation rl_src2, bool is_div, int flags) {
+ UNUSED(rl_dest);
// We have to use fixed registers, so flush all the temps.
// Prepare for explicit register usage.
@@ -783,7 +787,7 @@
// Copy LHS sign bit into EDX.
NewLIR0(kx86Cdq32Da);
- if (check_zero) {
+ if ((flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
// Handle division by zero case.
GenDivZeroCheck(rs_r1);
}
@@ -858,6 +862,11 @@
RegLocation rl_dest = InlineTargetWide(info);
int res_vreg, src1_vreg, src2_vreg;
+ if (rl_dest.s_reg_low == INVALID_SREG) {
+ // Result is unused, the code is dead. Inlining successful, no code generated.
+ return true;
+ }
+
/*
* If the result register is the same as the second element, then we
* need to be careful. The reason is that the first copy will
@@ -1017,7 +1026,7 @@
DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
// In 32-bit mode the only EAX..EDX registers can be used with Mov8MR.
if (!cu_->target64 && size == kSignedByte) {
- rl_src_value = UpdateLocTyped(rl_src_value, kCoreReg);
+ rl_src_value = UpdateLocTyped(rl_src_value);
if (rl_src_value.location == kLocPhysReg && !IsByteRegister(rl_src_value.reg)) {
RegStorage temp = AllocateByteRegister();
OpRegCopy(temp, rl_src_value.reg);
@@ -1115,15 +1124,16 @@
}
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
const size_t push_offset = (push_si ? 4u : 0u) + (push_di ? 4u : 0u);
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
if (!obj_in_si && !obj_in_di) {
- LoadWordDisp(rs_rX86_SP, SRegOffset(rl_src_obj.s_reg_low) + push_offset, rs_obj);
+ LoadWordDisp(rs_rSP, SRegOffset(rl_src_obj.s_reg_low) + push_offset, rs_obj);
// Dalvik register annotation in LoadBaseIndexedDisp() used wrong offset. Fix it.
DCHECK(!DECODE_ALIAS_INFO_WIDE(last_lir_insn_->flags.alias_info));
int reg_id = DECODE_ALIAS_INFO_REG(last_lir_insn_->flags.alias_info) - push_offset / 4u;
AnnotateDalvikRegAccess(last_lir_insn_, reg_id, true, false);
}
if (!off_in_si && !off_in_di) {
- LoadWordDisp(rs_rX86_SP, SRegOffset(rl_src_offset.s_reg_low) + push_offset, rs_off);
+ LoadWordDisp(rs_rSP, SRegOffset(rl_src_offset.s_reg_low) + push_offset, rs_off);
// Dalvik register annotation in LoadBaseIndexedDisp() used wrong offset. Fix it.
DCHECK(!DECODE_ALIAS_INFO_WIDE(last_lir_insn_->flags.alias_info));
int reg_id = DECODE_ALIAS_INFO_REG(last_lir_insn_->flags.alias_info) - push_offset / 4u;
@@ -1304,18 +1314,21 @@
}
LIR* X86Mir2Lir::OpVldm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
LOG(FATAL) << "Unexpected use of OpVldm for x86";
- return NULL;
+ UNREACHABLE();
}
LIR* X86Mir2Lir::OpVstm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
LOG(FATAL) << "Unexpected use of OpVstm for x86";
- return NULL;
+ UNREACHABLE();
}
void X86Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
RegLocation rl_result, int lit,
int first_bit, int second_bit) {
+ UNUSED(lit);
RegStorage t_reg = AllocTemp();
OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit);
OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg);
@@ -1350,10 +1363,10 @@
int len_offset) {
class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath {
public:
- ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch,
- RegStorage index, RegStorage array_base, int32_t len_offset)
- : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch),
- index_(index), array_base_(array_base), len_offset_(len_offset) {
+ ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in,
+ RegStorage index_in, RegStorage array_base_in, int32_t len_offset_in)
+ : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch_in),
+ index_(index_in), array_base_(array_base_in), len_offset_(len_offset_in) {
}
void Compile() OVERRIDE {
@@ -1398,10 +1411,10 @@
int32_t len_offset) {
class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath {
public:
- ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch,
- int32_t index, RegStorage array_base, int32_t len_offset)
- : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch),
- index_(index), array_base_(array_base), len_offset_(len_offset) {
+ ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in,
+ int32_t index_in, RegStorage array_base_in, int32_t len_offset_in)
+ : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch_in),
+ index_(index_in), array_base_(array_base_in), len_offset_(len_offset_in) {
}
void Compile() OVERRIDE {
@@ -1448,22 +1461,27 @@
bool X86Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(dalvik_opcode, is_div, rl_src, rl_dest, lit);
LOG(FATAL) << "Unexpected use of smallLiteralDive in x86";
- return false;
+ UNREACHABLE();
}
bool X86Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(rl_src, rl_dest, lit);
LOG(FATAL) << "Unexpected use of easyMultiply in x86";
- return false;
+ UNREACHABLE();
}
LIR* X86Mir2Lir::OpIT(ConditionCode cond, const char* guide) {
+ UNUSED(cond, guide);
LOG(FATAL) << "Unexpected use of OpIT in x86";
- return NULL;
+ UNREACHABLE();
}
void X86Mir2Lir::OpEndIT(LIR* it) {
+ UNUSED(it);
LOG(FATAL) << "Unexpected use of OpEndIT in x86";
+ UNREACHABLE();
}
void X86Mir2Lir::GenImulRegImm(RegStorage dest, RegStorage src, int val) {
@@ -1481,6 +1499,7 @@
}
void X86Mir2Lir::GenImulMemImm(RegStorage dest, int sreg, int displacement, int val) {
+ UNUSED(sreg);
// All memory accesses below reference dalvik regs.
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -1489,19 +1508,21 @@
case 0:
NewLIR2(kX86Xor32RR, dest.GetReg(), dest.GetReg());
break;
- case 1:
- LoadBaseDisp(rs_rX86_SP, displacement, dest, k32, kNotVolatile);
+ case 1: {
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
+ LoadBaseDisp(rs_rSP, displacement, dest, k32, kNotVolatile);
break;
+ }
default:
m = NewLIR4(IS_SIMM8(val) ? kX86Imul32RMI8 : kX86Imul32RMI, dest.GetReg(),
- rs_rX86_SP.GetReg(), displacement, val);
+ rs_rX86_SP_32.GetReg(), displacement, val);
AnnotateDalvikRegAccess(m, displacement >> 2, true /* is_load */, true /* is_64bit */);
break;
}
}
void X86Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) {
+ RegLocation rl_src2, int flags) {
if (!cu_->target64) {
// Some x86 32b ops are fallback.
switch (opcode) {
@@ -1510,7 +1531,7 @@
case Instruction::DIV_LONG_2ADDR:
case Instruction::REM_LONG:
case Instruction::REM_LONG_2ADDR:
- Mir2Lir::GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
+ Mir2Lir::GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
return;
default:
@@ -1536,17 +1557,17 @@
case Instruction::MUL_LONG:
case Instruction::MUL_LONG_2ADDR:
- GenMulLong(opcode, rl_dest, rl_src1, rl_src2);
+ GenMulLong(opcode, rl_dest, rl_src1, rl_src2, flags);
return;
case Instruction::DIV_LONG:
case Instruction::DIV_LONG_2ADDR:
- GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true);
+ GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true, flags);
return;
case Instruction::REM_LONG:
case Instruction::REM_LONG_2ADDR:
- GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false);
+ GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false, flags);
return;
case Instruction::AND_LONG_2ADDR:
@@ -1574,7 +1595,7 @@
}
}
-bool X86Mir2Lir::GenMulLongConst(RegLocation rl_dest, RegLocation rl_src1, int64_t val) {
+bool X86Mir2Lir::GenMulLongConst(RegLocation rl_dest, RegLocation rl_src1, int64_t val, int flags) {
// All memory accesses below reference dalvik regs.
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -1592,14 +1613,14 @@
StoreValueWide(rl_dest, rl_src1);
return true;
} else if (val == 2) {
- GenArithOpLong(Instruction::ADD_LONG, rl_dest, rl_src1, rl_src1);
+ GenArithOpLong(Instruction::ADD_LONG, rl_dest, rl_src1, rl_src1, flags);
return true;
} else if (IsPowerOfTwo(val)) {
int shift_amount = LowestSetBit(val);
if (!PartiallyIntersects(rl_src1, rl_dest)) {
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
RegLocation rl_result = GenShiftImmOpLong(Instruction::SHL_LONG, rl_dest, rl_src1,
- shift_amount);
+ shift_amount, flags);
StoreValueWide(rl_dest, rl_result);
return true;
}
@@ -1611,7 +1632,7 @@
int32_t val_hi = High32Bits(val);
// Prepare for explicit register usage.
ExplicitTempRegisterLock(this, 3, &rs_r0, &rs_r1, &rs_r2);
- rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg);
+ rl_src1 = UpdateLocWideTyped(rl_src1);
bool src1_in_reg = rl_src1.location == kLocPhysReg;
int displacement = SRegOffset(rl_src1.s_reg_low);
@@ -1635,7 +1656,7 @@
if (src1_in_reg) {
NewLIR1(kX86Mul32DaR, rl_src1.reg.GetLowReg());
} else {
- LIR *m = NewLIR2(kX86Mul32DaM, rs_rX86_SP.GetReg(), displacement + LOWORD_OFFSET);
+ LIR *m = NewLIR2(kX86Mul32DaM, rs_rX86_SP_32.GetReg(), displacement + LOWORD_OFFSET);
AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is_64bit */);
}
@@ -1653,13 +1674,13 @@
}
void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) {
+ RegLocation rl_src2, int flags) {
if (rl_src1.is_const) {
std::swap(rl_src1, rl_src2);
}
if (rl_src2.is_const) {
- if (GenMulLongConst(rl_dest, rl_src1, mir_graph_->ConstantValueWide(rl_src2))) {
+ if (GenMulLongConst(rl_dest, rl_src1, mir_graph_->ConstantValueWide(rl_src2), flags)) {
return;
}
}
@@ -1695,18 +1716,19 @@
// Prepare for explicit register usage.
ExplicitTempRegisterLock(this, 3, &rs_r0, &rs_r1, &rs_r2);
- rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg);
- rl_src2 = UpdateLocWideTyped(rl_src2, kCoreReg);
+ rl_src1 = UpdateLocWideTyped(rl_src1);
+ rl_src2 = UpdateLocWideTyped(rl_src2);
// At this point, the VRs are in their home locations.
bool src1_in_reg = rl_src1.location == kLocPhysReg;
bool src2_in_reg = rl_src2.location == kLocPhysReg;
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
// ECX <- 1H
if (src1_in_reg) {
NewLIR2(kX86Mov32RR, rs_r1.GetReg(), rl_src1.reg.GetHighReg());
} else {
- LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src1.s_reg_low) + HIWORD_OFFSET, rs_r1, k32,
+ LoadBaseDisp(rs_rSP, SRegOffset(rl_src1.s_reg_low) + HIWORD_OFFSET, rs_r1, k32,
kNotVolatile);
}
@@ -1717,7 +1739,7 @@
NewLIR2(kX86Imul32RR, rs_r1.GetReg(), rl_src2.reg.GetLowReg());
} else {
int displacement = SRegOffset(rl_src2.s_reg_low);
- LIR* m = NewLIR3(kX86Imul32RM, rs_r1.GetReg(), rs_rX86_SP.GetReg(),
+ LIR* m = NewLIR3(kX86Imul32RM, rs_r1.GetReg(), rs_rX86_SP_32.GetReg(),
displacement + LOWORD_OFFSET);
AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is_64bit */);
@@ -1730,7 +1752,7 @@
if (src2_in_reg) {
NewLIR2(kX86Mov32RR, rs_r0.GetReg(), rl_src2.reg.GetHighReg());
} else {
- LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, rs_r0, k32,
+ LoadBaseDisp(rs_rSP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, rs_r0, k32,
kNotVolatile);
}
@@ -1739,7 +1761,7 @@
NewLIR2(kX86Imul32RR, rs_r0.GetReg(), rl_src1.reg.GetLowReg());
} else {
int displacement = SRegOffset(rl_src1.s_reg_low);
- LIR *m = NewLIR3(kX86Imul32RM, rs_r0.GetReg(), rs_rX86_SP.GetReg(),
+ LIR *m = NewLIR3(kX86Imul32RM, rs_r0.GetReg(), rs_rX86_SP_32.GetReg(),
displacement + LOWORD_OFFSET);
AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is_64bit */);
@@ -1750,7 +1772,7 @@
NewLIR2(kX86Imul32RR, rs_r1.GetReg(), rl_src2.reg.GetLowReg());
} else {
int displacement = SRegOffset(rl_src2.s_reg_low);
- LIR *m = NewLIR3(kX86Imul32RM, rs_r1.GetReg(), rs_rX86_SP.GetReg(),
+ LIR *m = NewLIR3(kX86Imul32RM, rs_r1.GetReg(), rs_rX86_SP_32.GetReg(),
displacement + LOWORD_OFFSET);
AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is_64bit */);
@@ -1764,7 +1786,7 @@
if (src2_in_reg) {
NewLIR2(kX86Mov32RR, rs_r0.GetReg(), rl_src2.reg.GetLowReg());
} else {
- LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + LOWORD_OFFSET, rs_r0, k32,
+ LoadBaseDisp(rs_rSP, SRegOffset(rl_src2.s_reg_low) + LOWORD_OFFSET, rs_r0, k32,
kNotVolatile);
}
@@ -1773,7 +1795,7 @@
NewLIR1(kX86Mul32DaR, rl_src1.reg.GetLowReg());
} else {
int displacement = SRegOffset(rl_src1.s_reg_low);
- LIR *m = NewLIR2(kX86Mul32DaM, rs_rX86_SP.GetReg(), displacement + LOWORD_OFFSET);
+ LIR *m = NewLIR2(kX86Mul32DaM, rs_rX86_SP_32.GetReg(), displacement + LOWORD_OFFSET);
AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is_64bit */);
}
@@ -1815,7 +1837,7 @@
// RHS is in memory.
DCHECK((rl_src.location == kLocDalvikFrame) ||
(rl_src.location == kLocCompilerTemp));
- int r_base = rs_rX86_SP.GetReg();
+ int r_base = rs_rX86_SP_32.GetReg();
int displacement = SRegOffset(rl_src.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -1832,12 +1854,12 @@
}
void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op) {
- rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg);
+ rl_dest = UpdateLocWideTyped(rl_dest);
if (rl_dest.location == kLocPhysReg) {
// Ensure we are in a register pair
RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- rl_src = UpdateLocWideTyped(rl_src, kCoreReg);
+ rl_src = UpdateLocWideTyped(rl_src);
GenLongRegOrMemOp(rl_result, rl_src, op);
StoreFinalValueWide(rl_dest, rl_result);
return;
@@ -1845,7 +1867,7 @@
// Handle the case when src and dest are intersect.
rl_src = LoadValueWide(rl_src, kCoreReg);
RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- rl_src = UpdateLocWideTyped(rl_src, kCoreReg);
+ rl_src = UpdateLocWideTyped(rl_src);
GenLongRegOrMemOp(rl_result, rl_src, op);
StoreFinalValueWide(rl_dest, rl_result);
return;
@@ -1858,7 +1880,7 @@
// Operate directly into memory.
X86OpCode x86op = GetOpcode(op, rl_dest, rl_src, false);
- int r_base = rs_rX86_SP.GetReg();
+ int r_base = rs_rX86_SP_32.GetReg();
int displacement = SRegOffset(rl_dest.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -1905,7 +1927,7 @@
rl_result = ForceTempWide(rl_result);
// Perform the operation using the RHS.
- rl_src2 = UpdateLocWideTyped(rl_src2, kCoreReg);
+ rl_src2 = UpdateLocWideTyped(rl_src2);
GenLongRegOrMemOp(rl_result, rl_src2, op);
// And now record that the result is in the temp.
@@ -1914,10 +1936,9 @@
}
// It wasn't in registers, so it better be in memory.
- DCHECK((rl_dest.location == kLocDalvikFrame) ||
- (rl_dest.location == kLocCompilerTemp));
- rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg);
- rl_src2 = UpdateLocWideTyped(rl_src2, kCoreReg);
+ DCHECK((rl_dest.location == kLocDalvikFrame) || (rl_dest.location == kLocCompilerTemp));
+ rl_src1 = UpdateLocWideTyped(rl_src1);
+ rl_src2 = UpdateLocWideTyped(rl_src2);
// Get one of the source operands into temporary register.
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
@@ -2083,13 +2104,13 @@
NewLIR1(kX86Imul64DaR, numerator_reg.GetReg());
} else {
// Only need this once. Multiply directly from the value.
- rl_src = UpdateLocWideTyped(rl_src, kCoreReg);
+ rl_src = UpdateLocWideTyped(rl_src);
if (rl_src.location != kLocPhysReg) {
// Okay, we can do this from memory.
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
int displacement = SRegOffset(rl_src.s_reg_low);
// RDX:RAX = magic * numerator.
- LIR *m = NewLIR2(kX86Imul64DaM, rs_rX86_SP.GetReg(), displacement);
+ LIR *m = NewLIR2(kX86Imul64DaM, rs_rX86_SP_32.GetReg(), displacement);
AnnotateDalvikRegAccess(m, displacement >> 2,
true /* is_load */, true /* is_64bit */);
} else {
@@ -2159,7 +2180,7 @@
}
void X86Mir2Lir::GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div) {
+ RegLocation rl_src2, bool is_div, int flags) {
if (!cu_->target64) {
LOG(FATAL) << "Unexpected use GenDivRemLong()";
return;
@@ -2186,7 +2207,9 @@
NewLIR0(kx86Cqo64Da);
// Handle division by zero case.
- GenDivZeroCheckWide(rs_r1q);
+ if ((flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
+ GenDivZeroCheckWide(rs_r1q);
+ }
// Have to catch 0x8000000000000000/-1 case, or we will get an exception!
NewLIR2(kX86Cmp64RI8, rs_r1q.GetReg(), -1);
@@ -2227,13 +2250,6 @@
OpRegReg(kOpNeg, rl_result.reg, rl_src.reg);
} else {
rl_result = ForceTempWide(rl_src);
- if (((rl_dest.location == kLocPhysReg) && (rl_src.location == kLocPhysReg)) &&
- ((rl_dest.reg.GetLowReg() == rl_src.reg.GetHighReg()))) {
- // The registers are the same, so we would clobber it before the use.
- RegStorage temp_reg = AllocTemp();
- OpRegCopy(temp_reg, rl_result.reg);
- rl_result.reg.SetHighReg(temp_reg.GetReg());
- }
OpRegReg(kOpNeg, rl_result.reg.GetLow(), rl_result.reg.GetLow()); // rLow = -rLow
OpRegImm(kOpAdc, rl_result.reg.GetHigh(), 0); // rHigh = rHigh + CF
OpRegReg(kOpNeg, rl_result.reg.GetHigh(), rl_result.reg.GetHigh()); // rHigh = -rHigh
@@ -2387,7 +2403,8 @@
}
RegLocation X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src, int shift_amount) {
+ RegLocation rl_src, int shift_amount, int flags) {
+ UNUSED(flags);
RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
if (cu_->target64) {
OpKind op = static_cast<OpKind>(0); /* Make gcc happy */
@@ -2472,7 +2489,7 @@
}
void X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src, RegLocation rl_shift) {
+ RegLocation rl_src, RegLocation rl_shift, int flags) {
// Per spec, we only care about low 6 bits of shift amount.
int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
if (shift_amount == 0) {
@@ -2482,7 +2499,7 @@
} else if (shift_amount == 1 &&
(opcode == Instruction::SHL_LONG || opcode == Instruction::SHL_LONG_2ADDR)) {
// Need to handle this here to avoid calling StoreValueWide twice.
- GenArithOpLong(Instruction::ADD_LONG, rl_dest, rl_src, rl_src);
+ GenArithOpLong(Instruction::ADD_LONG, rl_dest, rl_src, rl_src, flags);
return;
}
if (PartiallyIntersects(rl_src, rl_dest)) {
@@ -2490,12 +2507,13 @@
return;
}
rl_src = LoadValueWide(rl_src, kCoreReg);
- RegLocation rl_result = GenShiftImmOpLong(opcode, rl_dest, rl_src, shift_amount);
+ RegLocation rl_result = GenShiftImmOpLong(opcode, rl_dest, rl_src, shift_amount, flags);
StoreValueWide(rl_dest, rl_result);
}
void X86Mir2Lir::GenArithImmOpLong(Instruction::Code opcode,
- RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+ RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+ int flags) {
bool isConstSuccess = false;
switch (opcode) {
case Instruction::ADD_LONG:
@@ -2514,7 +2532,7 @@
if (rl_src2.is_const) {
isConstSuccess = GenLongLongImm(rl_dest, rl_src1, rl_src2, opcode);
} else {
- GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
+ GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
isConstSuccess = true;
}
break;
@@ -2540,7 +2558,7 @@
if (!isConstSuccess) {
// Default - bail to non-const handler.
- GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
+ GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
}
}
@@ -2684,7 +2702,7 @@
return in_mem ? kX86Xor32MI : kX86Xor32RI;
default:
LOG(FATAL) << "Unexpected opcode: " << op;
- return kX86Add32MI;
+ UNREACHABLE();
}
}
@@ -2698,11 +2716,11 @@
return false;
}
- rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg);
+ rl_dest = UpdateLocWideTyped(rl_dest);
if ((rl_dest.location == kLocDalvikFrame) ||
(rl_dest.location == kLocCompilerTemp)) {
- int r_base = rs_rX86_SP.GetReg();
+ int r_base = rs_rX86_SP_32.GetReg();
int displacement = SRegOffset(rl_dest.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -2728,12 +2746,12 @@
int32_t val_lo = Low32Bits(val);
int32_t val_hi = High32Bits(val);
- rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg);
+ rl_dest = UpdateLocWideTyped(rl_dest);
// Can we just do this into memory?
if ((rl_dest.location == kLocDalvikFrame) ||
(rl_dest.location == kLocCompilerTemp)) {
- int r_base = rs_rX86_SP.GetReg();
+ int r_base = rs_rX86_SP_32.GetReg();
int displacement = SRegOffset(rl_dest.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -2804,8 +2822,8 @@
int32_t val_lo = Low32Bits(val);
int32_t val_hi = High32Bits(val);
- rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg);
- rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg);
+ rl_dest = UpdateLocWideTyped(rl_dest);
+ rl_src1 = UpdateLocWideTyped(rl_src1);
// Can we do this directly into the destination registers?
if (rl_dest.location == kLocPhysReg && rl_src1.location == kLocPhysReg &&
@@ -2912,7 +2930,7 @@
}
void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_lhs, RegLocation rl_rhs) {
+ RegLocation rl_lhs, RegLocation rl_rhs, int flags) {
OpKind op = kOpBkpt;
bool is_div_rem = false;
bool unary = false;
@@ -3017,7 +3035,7 @@
// Get the div/rem stuff out of the way.
if (is_div_rem) {
- rl_result = GenDivRem(rl_dest, rl_lhs, rl_rhs, op == kOpDiv, true);
+ rl_result = GenDivRem(rl_dest, rl_lhs, rl_rhs, op == kOpDiv, flags);
StoreValue(rl_dest, rl_result);
return;
}
@@ -3027,7 +3045,7 @@
if (unary) {
rl_lhs = LoadValue(rl_lhs, kCoreReg);
- rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocTyped(rl_dest);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
OpRegReg(op, rl_result.reg, rl_lhs.reg);
} else {
@@ -3037,7 +3055,7 @@
LoadValueDirectFixed(rl_rhs, t_reg);
if (is_two_addr) {
// Can we do this directly into memory?
- rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocTyped(rl_dest);
if (rl_result.location != kLocPhysReg) {
// Okay, we can do this into memory
OpMemReg(op, rl_result, t_reg.GetReg());
@@ -3060,12 +3078,12 @@
// Multiply is 3 operand only (sort of).
if (is_two_addr && op != kOpMul) {
// Can we do this directly into memory?
- rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocTyped(rl_dest);
if (rl_result.location == kLocPhysReg) {
// Ensure res is in a core reg
rl_result = EvalLoc(rl_dest, kCoreReg, true);
// Can we do this from memory directly?
- rl_rhs = UpdateLocTyped(rl_rhs, kCoreReg);
+ rl_rhs = UpdateLocTyped(rl_rhs);
if (rl_rhs.location != kLocPhysReg) {
OpRegMem(op, rl_result.reg, rl_rhs);
StoreFinalValue(rl_dest, rl_result);
@@ -3080,7 +3098,7 @@
// It might happen rl_rhs and rl_dest are the same VR
// in this case rl_dest is in reg after LoadValue while
// rl_result is not updated yet, so do this
- rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocTyped(rl_dest);
if (rl_result.location != kLocPhysReg) {
// Okay, we can do this into memory.
OpMemReg(op, rl_result, rl_rhs.reg.GetReg());
@@ -3097,8 +3115,8 @@
}
} else {
// Try to use reg/memory instructions.
- rl_lhs = UpdateLocTyped(rl_lhs, kCoreReg);
- rl_rhs = UpdateLocTyped(rl_rhs, kCoreReg);
+ rl_lhs = UpdateLocTyped(rl_lhs);
+ rl_rhs = UpdateLocTyped(rl_rhs);
// We can't optimize with FP registers.
if (!IsOperationSafeWithoutTemps(rl_lhs, rl_rhs)) {
// Something is difficult, so fall back to the standard case.
@@ -3170,14 +3188,14 @@
Mir2Lir::GenIntToLong(rl_dest, rl_src);
return;
}
- rl_src = UpdateLocTyped(rl_src, kCoreReg);
+ rl_src = UpdateLocTyped(rl_src);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (rl_src.location == kLocPhysReg) {
NewLIR2(kX86MovsxdRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
} else {
int displacement = SRegOffset(rl_src.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- LIR *m = NewLIR3(kX86MovsxdRM, rl_result.reg.GetReg(), rs_rX86_SP.GetReg(),
+ LIR *m = NewLIR3(kX86MovsxdRM, rl_result.reg.GetReg(), rs_rX86_SP_32.GetReg(),
displacement + LOWORD_OFFSET);
AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is_64bit */);
@@ -3270,7 +3288,7 @@
LoadValueDirectFixed(rl_shift, t_reg);
if (is_two_addr) {
// Can we do this directly into memory?
- rl_result = UpdateLocWideTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocWideTyped(rl_dest);
if (rl_result.location != kLocPhysReg) {
// Okay, we can do this into memory
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 2ef4c21..998aeff 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -18,6 +18,7 @@
#include <inttypes.h>
#include <string>
+#include "arch/instruction_set_features.h"
#include "backend_x86.h"
#include "codegen_x86.h"
#include "dex/compiler_internals.h"
@@ -141,46 +142,6 @@
static constexpr ArrayRef<const RegStorage> xp_temps_32(xp_temps_arr_32);
static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64);
-RegStorage rs_rX86_SP;
-
-X86NativeRegisterPool rX86_ARG0;
-X86NativeRegisterPool rX86_ARG1;
-X86NativeRegisterPool rX86_ARG2;
-X86NativeRegisterPool rX86_ARG3;
-X86NativeRegisterPool rX86_ARG4;
-X86NativeRegisterPool rX86_ARG5;
-X86NativeRegisterPool rX86_FARG0;
-X86NativeRegisterPool rX86_FARG1;
-X86NativeRegisterPool rX86_FARG2;
-X86NativeRegisterPool rX86_FARG3;
-X86NativeRegisterPool rX86_FARG4;
-X86NativeRegisterPool rX86_FARG5;
-X86NativeRegisterPool rX86_FARG6;
-X86NativeRegisterPool rX86_FARG7;
-X86NativeRegisterPool rX86_RET0;
-X86NativeRegisterPool rX86_RET1;
-X86NativeRegisterPool rX86_INVOKE_TGT;
-X86NativeRegisterPool rX86_COUNT;
-
-RegStorage rs_rX86_ARG0;
-RegStorage rs_rX86_ARG1;
-RegStorage rs_rX86_ARG2;
-RegStorage rs_rX86_ARG3;
-RegStorage rs_rX86_ARG4;
-RegStorage rs_rX86_ARG5;
-RegStorage rs_rX86_FARG0;
-RegStorage rs_rX86_FARG1;
-RegStorage rs_rX86_FARG2;
-RegStorage rs_rX86_FARG3;
-RegStorage rs_rX86_FARG4;
-RegStorage rs_rX86_FARG5;
-RegStorage rs_rX86_FARG6;
-RegStorage rs_rX86_FARG7;
-RegStorage rs_rX86_RET0;
-RegStorage rs_rX86_RET1;
-RegStorage rs_rX86_INVOKE_TGT;
-RegStorage rs_rX86_COUNT;
-
RegLocation X86Mir2Lir::LocCReturn() {
return x86_loc_c_return;
}
@@ -201,44 +162,100 @@
return x86_loc_c_return_double;
}
+// 32-bit reg storage locations for 32-bit targets.
+static const RegStorage RegStorage32FromSpecialTargetRegister_Target32[] {
+ RegStorage::InvalidReg(), // kSelf - Thread pointer.
+ RegStorage::InvalidReg(), // kSuspend - Used to reduce suspend checks for some targets.
+ RegStorage::InvalidReg(), // kLr - no register as the return address is pushed on entry.
+ RegStorage::InvalidReg(), // kPc - not exposed on X86 see kX86StartOfMethod.
+ rs_rX86_SP_32, // kSp
+ rs_rAX, // kArg0
+ rs_rCX, // kArg1
+ rs_rDX, // kArg2
+ rs_rBX, // kArg3
+ RegStorage::InvalidReg(), // kArg4
+ RegStorage::InvalidReg(), // kArg5
+ RegStorage::InvalidReg(), // kArg6
+ RegStorage::InvalidReg(), // kArg7
+ rs_rAX, // kFArg0
+ rs_rCX, // kFArg1
+ rs_rDX, // kFArg2
+ rs_rBX, // kFArg3
+ RegStorage::InvalidReg(), // kFArg4
+ RegStorage::InvalidReg(), // kFArg5
+ RegStorage::InvalidReg(), // kFArg6
+ RegStorage::InvalidReg(), // kFArg7
+ RegStorage::InvalidReg(), // kFArg8
+ RegStorage::InvalidReg(), // kFArg9
+ RegStorage::InvalidReg(), // kFArg10
+ RegStorage::InvalidReg(), // kFArg11
+ RegStorage::InvalidReg(), // kFArg12
+ RegStorage::InvalidReg(), // kFArg13
+ RegStorage::InvalidReg(), // kFArg14
+ RegStorage::InvalidReg(), // kFArg15
+ rs_rAX, // kRet0
+ rs_rDX, // kRet1
+ rs_rAX, // kInvokeTgt
+ rs_rAX, // kHiddenArg - used to hold the method index before copying to fr0.
+ rs_fr0, // kHiddenFpArg
+ rs_rCX, // kCount
+};
+
+// 32-bit reg storage locations for 64-bit targets.
+static const RegStorage RegStorage32FromSpecialTargetRegister_Target64[] {
+ RegStorage::InvalidReg(), // kSelf - Thread pointer.
+ RegStorage::InvalidReg(), // kSuspend - Used to reduce suspend checks for some targets.
+ RegStorage::InvalidReg(), // kLr - no register as the return address is pushed on entry.
+ RegStorage::InvalidReg(), // kPc - TODO: RIP based addressing.
+ rs_rX86_SP_32, // kSp
+ rs_rDI, // kArg0
+ rs_rSI, // kArg1
+ rs_rDX, // kArg2
+ rs_rCX, // kArg3
+ rs_r8, // kArg4
+ rs_r9, // kArg5
+ RegStorage::InvalidReg(), // kArg6
+ RegStorage::InvalidReg(), // kArg7
+ rs_fr0, // kFArg0
+ rs_fr1, // kFArg1
+ rs_fr2, // kFArg2
+ rs_fr3, // kFArg3
+ rs_fr4, // kFArg4
+ rs_fr5, // kFArg5
+ rs_fr6, // kFArg6
+ rs_fr7, // kFArg7
+ RegStorage::InvalidReg(), // kFArg8
+ RegStorage::InvalidReg(), // kFArg9
+ RegStorage::InvalidReg(), // kFArg10
+ RegStorage::InvalidReg(), // kFArg11
+ RegStorage::InvalidReg(), // kFArg12
+ RegStorage::InvalidReg(), // kFArg13
+ RegStorage::InvalidReg(), // kFArg14
+ RegStorage::InvalidReg(), // kFArg15
+ rs_rAX, // kRet0
+ rs_rDX, // kRet1
+ rs_rAX, // kInvokeTgt
+ rs_rAX, // kHiddenArg
+ RegStorage::InvalidReg(), // kHiddenFpArg
+ rs_rCX, // kCount
+};
+static_assert(arraysize(RegStorage32FromSpecialTargetRegister_Target32) ==
+ arraysize(RegStorage32FromSpecialTargetRegister_Target64),
+ "Mismatch in RegStorage array sizes");
+
// Return a target-dependent special register for 32-bit.
-RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) {
- RegStorage res_reg = RegStorage::InvalidReg();
- switch (reg) {
- case kSelf: res_reg = RegStorage::InvalidReg(); break;
- case kSuspend: res_reg = RegStorage::InvalidReg(); break;
- case kLr: res_reg = RegStorage::InvalidReg(); break;
- case kPc: res_reg = RegStorage::InvalidReg(); break;
- case kSp: res_reg = rs_rX86_SP_32; break; // This must be the concrete one, as _SP is target-
- // specific size.
- case kArg0: res_reg = rs_rX86_ARG0; break;
- case kArg1: res_reg = rs_rX86_ARG1; break;
- case kArg2: res_reg = rs_rX86_ARG2; break;
- case kArg3: res_reg = rs_rX86_ARG3; break;
- case kArg4: res_reg = rs_rX86_ARG4; break;
- case kArg5: res_reg = rs_rX86_ARG5; break;
- case kFArg0: res_reg = rs_rX86_FARG0; break;
- case kFArg1: res_reg = rs_rX86_FARG1; break;
- case kFArg2: res_reg = rs_rX86_FARG2; break;
- case kFArg3: res_reg = rs_rX86_FARG3; break;
- case kFArg4: res_reg = rs_rX86_FARG4; break;
- case kFArg5: res_reg = rs_rX86_FARG5; break;
- case kFArg6: res_reg = rs_rX86_FARG6; break;
- case kFArg7: res_reg = rs_rX86_FARG7; break;
- case kRet0: res_reg = rs_rX86_RET0; break;
- case kRet1: res_reg = rs_rX86_RET1; break;
- case kInvokeTgt: res_reg = rs_rX86_INVOKE_TGT; break;
- case kHiddenArg: res_reg = rs_rAX; break;
- case kHiddenFpArg: DCHECK(!cu_->target64); res_reg = rs_fr0; break;
- case kCount: res_reg = rs_rX86_COUNT; break;
- default: res_reg = RegStorage::InvalidReg();
- }
- return res_reg;
+RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) const {
+ DCHECK_EQ(RegStorage32FromSpecialTargetRegister_Target32[kCount], rs_rCX);
+ DCHECK_EQ(RegStorage32FromSpecialTargetRegister_Target64[kCount], rs_rCX);
+ DCHECK_LT(reg, arraysize(RegStorage32FromSpecialTargetRegister_Target32));
+ return cu_->target64 ? RegStorage32FromSpecialTargetRegister_Target64[reg]
+ : RegStorage32FromSpecialTargetRegister_Target32[reg];
}
RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
+ UNUSED(reg);
LOG(FATAL) << "Do not use this function!!!";
- return RegStorage::InvalidReg();
+ UNREACHABLE();
}
/*
@@ -451,7 +468,7 @@
RegStorage X86Mir2Lir::AllocateByteRegister() {
RegStorage reg = AllocTypedTemp(false, kCoreReg);
if (!cu_->target64) {
- DCHECK_LT(reg.GetRegNum(), rs_rX86_SP.GetRegNum());
+ DCHECK_LT(reg.GetRegNum(), rs_rX86_SP_32.GetRegNum());
}
return reg;
}
@@ -460,8 +477,8 @@
return GetRegInfo(reg)->Master()->GetReg();
}
-bool X86Mir2Lir::IsByteRegister(RegStorage reg) {
- return cu_->target64 || reg.GetRegNum() < rs_rX86_SP.GetRegNum();
+bool X86Mir2Lir::IsByteRegister(RegStorage reg) const {
+ return cu_->target64 || reg.GetRegNum() < rs_rX86_SP_32.GetRegNum();
}
/* Clobber all regs that might be used by an external C call */
@@ -501,8 +518,8 @@
RegLocation X86Mir2Lir::GetReturnWideAlt() {
RegLocation res = LocCReturnWide();
- DCHECK(res.reg.GetLowReg() == rs_rAX.GetReg());
- DCHECK(res.reg.GetHighReg() == rs_rDX.GetReg());
+ DCHECK_EQ(res.reg.GetLowReg(), rs_rAX.GetReg());
+ DCHECK_EQ(res.reg.GetHighReg(), rs_rDX.GetReg());
Clobber(rs_rAX);
Clobber(rs_rDX);
MarkInUse(rs_rAX);
@@ -521,41 +538,41 @@
/* To be used when explicitly managing register use */
void X86Mir2Lir::LockCallTemps() {
- LockTemp(rs_rX86_ARG0);
- LockTemp(rs_rX86_ARG1);
- LockTemp(rs_rX86_ARG2);
- LockTemp(rs_rX86_ARG3);
+ LockTemp(TargetReg32(kArg0));
+ LockTemp(TargetReg32(kArg1));
+ LockTemp(TargetReg32(kArg2));
+ LockTemp(TargetReg32(kArg3));
if (cu_->target64) {
- LockTemp(rs_rX86_ARG4);
- LockTemp(rs_rX86_ARG5);
- LockTemp(rs_rX86_FARG0);
- LockTemp(rs_rX86_FARG1);
- LockTemp(rs_rX86_FARG2);
- LockTemp(rs_rX86_FARG3);
- LockTemp(rs_rX86_FARG4);
- LockTemp(rs_rX86_FARG5);
- LockTemp(rs_rX86_FARG6);
- LockTemp(rs_rX86_FARG7);
+ LockTemp(TargetReg32(kArg4));
+ LockTemp(TargetReg32(kArg5));
+ LockTemp(TargetReg32(kFArg0));
+ LockTemp(TargetReg32(kFArg1));
+ LockTemp(TargetReg32(kFArg2));
+ LockTemp(TargetReg32(kFArg3));
+ LockTemp(TargetReg32(kFArg4));
+ LockTemp(TargetReg32(kFArg5));
+ LockTemp(TargetReg32(kFArg6));
+ LockTemp(TargetReg32(kFArg7));
}
}
/* To be used when explicitly managing register use */
void X86Mir2Lir::FreeCallTemps() {
- FreeTemp(rs_rX86_ARG0);
- FreeTemp(rs_rX86_ARG1);
- FreeTemp(rs_rX86_ARG2);
- FreeTemp(rs_rX86_ARG3);
+ FreeTemp(TargetReg32(kArg0));
+ FreeTemp(TargetReg32(kArg1));
+ FreeTemp(TargetReg32(kArg2));
+ FreeTemp(TargetReg32(kArg3));
if (cu_->target64) {
- FreeTemp(rs_rX86_ARG4);
- FreeTemp(rs_rX86_ARG5);
- FreeTemp(rs_rX86_FARG0);
- FreeTemp(rs_rX86_FARG1);
- FreeTemp(rs_rX86_FARG2);
- FreeTemp(rs_rX86_FARG3);
- FreeTemp(rs_rX86_FARG4);
- FreeTemp(rs_rX86_FARG5);
- FreeTemp(rs_rX86_FARG6);
- FreeTemp(rs_rX86_FARG7);
+ FreeTemp(TargetReg32(kArg4));
+ FreeTemp(TargetReg32(kArg5));
+ FreeTemp(TargetReg32(kFArg0));
+ FreeTemp(TargetReg32(kFArg1));
+ FreeTemp(TargetReg32(kFArg2));
+ FreeTemp(TargetReg32(kFArg3));
+ FreeTemp(TargetReg32(kFArg4));
+ FreeTemp(TargetReg32(kFArg5));
+ FreeTemp(TargetReg32(kFArg6));
+ FreeTemp(TargetReg32(kFArg7));
}
}
@@ -578,7 +595,9 @@
}
bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
-#if ANDROID_SMP != 0
+ if (!cu_->GetInstructionSetFeatures()->IsSmp()) {
+ return false;
+ }
// Start off with using the last LIR as the barrier. If it is not enough, then we will update it.
LIR* mem_barrier = last_lir_insn_;
@@ -614,9 +633,6 @@
mem_barrier->u.m.def_mask = &kEncodeAll;
}
return ret;
-#else
- return false;
-#endif
}
void X86Mir2Lir::CompilerInitializeRegAlloc() {
@@ -705,11 +721,14 @@
}
// Spill mask not including fake return address register
uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
- int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
+ int offset =
+ frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
OpSize size = cu_->target64 ? k64 : k32;
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
for (int reg = 0; mask; mask >>= 1, reg++) {
if (mask & 0x1) {
- StoreBaseDisp(rs_rX86_SP, offset, cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg),
+ StoreBaseDisp(rs_rSP, offset,
+ cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg),
size, kNotVolatile);
offset += GetInstructionSetPointerSize(cu_->instruction_set);
}
@@ -724,9 +743,10 @@
uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
OpSize size = cu_->target64 ? k64 : k32;
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
for (int reg = 0; mask; mask >>= 1, reg++) {
if (mask & 0x1) {
- LoadBaseDisp(rs_rX86_SP, offset, cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg),
+ LoadBaseDisp(rs_rSP, offset, cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg),
size, kNotVolatile);
offset += GetInstructionSetPointerSize(cu_->instruction_set);
}
@@ -738,11 +758,12 @@
return;
}
uint32_t mask = fp_spill_mask_;
- int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_));
+ int offset = frame_size_ -
+ (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_));
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
for (int reg = 0; mask; mask >>= 1, reg++) {
if (mask & 0x1) {
- StoreBaseDisp(rs_rX86_SP, offset, RegStorage::FloatSolo64(reg),
- k64, kNotVolatile);
+ StoreBaseDisp(rs_rSP, offset, RegStorage::FloatSolo64(reg), k64, kNotVolatile);
offset += sizeof(double);
}
}
@@ -752,10 +773,12 @@
return;
}
uint32_t mask = fp_spill_mask_;
- int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_));
+ int offset = frame_size_ -
+ (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_));
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
for (int reg = 0; mask; mask >>= 1, reg++) {
if (mask & 0x1) {
- LoadBaseDisp(rs_rX86_SP, offset, RegStorage::FloatSolo64(reg),
+ LoadBaseDisp(rs_rSP, offset, RegStorage::FloatSolo64(reg),
k64, kNotVolatile);
offset += sizeof(double);
}
@@ -795,84 +818,12 @@
class_type_address_insns_.reserve(100);
call_method_insns_.reserve(100);
store_method_addr_used_ = false;
- if (kIsDebugBuild) {
for (int i = 0; i < kX86Last; i++) {
- if (X86Mir2Lir::EncodingMap[i].opcode != i) {
- LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
- << " is wrong: expecting " << i << ", seeing "
- << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
- }
- }
+ DCHECK_EQ(X86Mir2Lir::EncodingMap[i].opcode, i)
+ << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
+ << " is wrong: expecting " << i << ", seeing "
+ << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
}
- if (cu_->target64) {
- rs_rX86_SP = rs_rX86_SP_64;
-
- rs_rX86_ARG0 = rs_rDI;
- rs_rX86_ARG1 = rs_rSI;
- rs_rX86_ARG2 = rs_rDX;
- rs_rX86_ARG3 = rs_rCX;
- rs_rX86_ARG4 = rs_r8;
- rs_rX86_ARG5 = rs_r9;
- rs_rX86_FARG0 = rs_fr0;
- rs_rX86_FARG1 = rs_fr1;
- rs_rX86_FARG2 = rs_fr2;
- rs_rX86_FARG3 = rs_fr3;
- rs_rX86_FARG4 = rs_fr4;
- rs_rX86_FARG5 = rs_fr5;
- rs_rX86_FARG6 = rs_fr6;
- rs_rX86_FARG7 = rs_fr7;
- rX86_ARG0 = rDI;
- rX86_ARG1 = rSI;
- rX86_ARG2 = rDX;
- rX86_ARG3 = rCX;
- rX86_ARG4 = r8;
- rX86_ARG5 = r9;
- rX86_FARG0 = fr0;
- rX86_FARG1 = fr1;
- rX86_FARG2 = fr2;
- rX86_FARG3 = fr3;
- rX86_FARG4 = fr4;
- rX86_FARG5 = fr5;
- rX86_FARG6 = fr6;
- rX86_FARG7 = fr7;
- rs_rX86_INVOKE_TGT = rs_rDI;
- } else {
- rs_rX86_SP = rs_rX86_SP_32;
-
- rs_rX86_ARG0 = rs_rAX;
- rs_rX86_ARG1 = rs_rCX;
- rs_rX86_ARG2 = rs_rDX;
- rs_rX86_ARG3 = rs_rBX;
- rs_rX86_ARG4 = RegStorage::InvalidReg();
- rs_rX86_ARG5 = RegStorage::InvalidReg();
- rs_rX86_FARG0 = rs_rAX;
- rs_rX86_FARG1 = rs_rCX;
- rs_rX86_FARG2 = rs_rDX;
- rs_rX86_FARG3 = rs_rBX;
- rs_rX86_FARG4 = RegStorage::InvalidReg();
- rs_rX86_FARG5 = RegStorage::InvalidReg();
- rs_rX86_FARG6 = RegStorage::InvalidReg();
- rs_rX86_FARG7 = RegStorage::InvalidReg();
- rX86_ARG0 = rAX;
- rX86_ARG1 = rCX;
- rX86_ARG2 = rDX;
- rX86_ARG3 = rBX;
- rX86_FARG0 = rAX;
- rX86_FARG1 = rCX;
- rX86_FARG2 = rDX;
- rX86_FARG3 = rBX;
- rs_rX86_INVOKE_TGT = rs_rAX;
- // TODO(64): Initialize with invalid reg
-// rX86_ARG4 = RegStorage::InvalidReg();
-// rX86_ARG5 = RegStorage::InvalidReg();
- }
- rs_rX86_RET0 = rs_rAX;
- rs_rX86_RET1 = rs_rDX;
- rs_rX86_COUNT = rs_rCX;
- rX86_RET0 = rAX;
- rX86_RET1 = rDX;
- rX86_INVOKE_TGT = rAX;
- rX86_COUNT = rCX;
}
Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
@@ -882,8 +833,9 @@
// Not used in x86(-64)
RegStorage X86Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
+ UNUSED(trampoline);
LOG(FATAL) << "Unexpected use of LoadHelper in x86";
- return RegStorage::InvalidReg();
+ UNREACHABLE();
}
LIR* X86Mir2Lir::CheckSuspendUsingLoad() {
@@ -921,7 +873,7 @@
(rl_dest.location == kLocCompilerTemp)) {
int32_t val_lo = Low32Bits(value);
int32_t val_hi = High32Bits(value);
- int r_base = rs_rX86_SP.GetReg();
+ int r_base = rs_rX86_SP_32.GetReg();
int displacement = SRegOffset(rl_dest.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -1054,7 +1006,8 @@
call_insn = CallWithLinkerFixup(method_info.GetTargetMethod(), method_info.GetSharpType());
} else {
call_insn = OpMem(kOpBlx, TargetReg(kArg0, kRef),
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ cu_->target64 ? 8 : 4).Int32Value());
}
} else {
call_insn = GenInvokeNoInlineCall(this, method_info.GetSharpType());
@@ -1373,7 +1326,7 @@
// Load the start index from stack, remembering that we pushed EDI.
int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- Load32Disp(rs_rX86_SP, displacement, rs_rDI);
+ Load32Disp(rs_rX86_SP_32, displacement, rs_rDI);
// Dalvik register annotation in LoadBaseIndexedDisp() used wrong offset. Fix it.
DCHECK(!DECODE_ALIAS_INFO_WIDE(last_lir_insn_->flags.alias_info));
int reg_id = DECODE_ALIAS_INFO_REG(last_lir_insn_->flags.alias_info) - 1;
@@ -1548,46 +1501,46 @@
ReturnVectorRegisters(mir);
break;
case kMirOpConstVector:
- GenConst128(bb, mir);
+ GenConst128(mir);
break;
case kMirOpMoveVector:
- GenMoveVector(bb, mir);
+ GenMoveVector(mir);
break;
case kMirOpPackedMultiply:
- GenMultiplyVector(bb, mir);
+ GenMultiplyVector(mir);
break;
case kMirOpPackedAddition:
- GenAddVector(bb, mir);
+ GenAddVector(mir);
break;
case kMirOpPackedSubtract:
- GenSubtractVector(bb, mir);
+ GenSubtractVector(mir);
break;
case kMirOpPackedShiftLeft:
- GenShiftLeftVector(bb, mir);
+ GenShiftLeftVector(mir);
break;
case kMirOpPackedSignedShiftRight:
- GenSignedShiftRightVector(bb, mir);
+ GenSignedShiftRightVector(mir);
break;
case kMirOpPackedUnsignedShiftRight:
- GenUnsignedShiftRightVector(bb, mir);
+ GenUnsignedShiftRightVector(mir);
break;
case kMirOpPackedAnd:
- GenAndVector(bb, mir);
+ GenAndVector(mir);
break;
case kMirOpPackedOr:
- GenOrVector(bb, mir);
+ GenOrVector(mir);
break;
case kMirOpPackedXor:
- GenXorVector(bb, mir);
+ GenXorVector(mir);
break;
case kMirOpPackedAddReduce:
- GenAddReduceVector(bb, mir);
+ GenAddReduceVector(mir);
break;
case kMirOpPackedReduce:
- GenReduceVector(bb, mir);
+ GenReduceVector(mir);
break;
case kMirOpPackedSet:
- GenSetVector(bb, mir);
+ GenSetVector(mir);
break;
case kMirOpMemBarrier:
GenMemBarrier(static_cast<MemBarrierKind>(mir->dalvikInsn.vA));
@@ -1638,7 +1591,7 @@
}
}
-void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) {
+void X86Mir2Lir::GenConst128(MIR* mir) {
RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
Clobber(rs_dest);
@@ -1689,7 +1642,7 @@
load->target = data_target;
}
-void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenMoveVector(MIR* mir) {
// We only support 128 bit registers.
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1805,7 +1758,7 @@
NewLIR2(kX86PaddqRR, rs_dest_src1.GetReg(), rs_tmp_vector_1.GetReg());
}
-void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenMultiplyVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1839,7 +1792,7 @@
NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
}
-void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenAddVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1874,7 +1827,7 @@
NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
}
-void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenSubtractVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1909,7 +1862,7 @@
NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
}
-void X86Mir2Lir::GenShiftByteVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenShiftByteVector(MIR* mir) {
// Destination does not need clobbered because it has already been as part
// of the general packed shift handler (caller of this method).
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1953,7 +1906,7 @@
AndMaskVectorRegister(rs_dest_src1, int_mask, int_mask, int_mask, int_mask);
}
-void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenShiftLeftVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1973,7 +1926,7 @@
break;
case kSignedByte:
case kUnsignedByte:
- GenShiftByteVector(bb, mir);
+ GenShiftByteVector(mir);
return;
default:
LOG(FATAL) << "Unsupported vector shift left " << opsize;
@@ -1982,7 +1935,7 @@
NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
}
-void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenSignedShiftRightVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1999,18 +1952,18 @@
break;
case kSignedByte:
case kUnsignedByte:
- GenShiftByteVector(bb, mir);
+ GenShiftByteVector(mir);
return;
case k64:
// TODO Implement emulated shift algorithm.
default:
LOG(FATAL) << "Unsupported vector signed shift right " << opsize;
- break;
+ UNREACHABLE();
}
NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
}
-void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenUnsignedShiftRightVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2030,7 +1983,7 @@
break;
case kSignedByte:
case kUnsignedByte:
- GenShiftByteVector(bb, mir);
+ GenShiftByteVector(mir);
return;
default:
LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize;
@@ -2039,7 +1992,7 @@
NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
}
-void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenAndVector(MIR* mir) {
// We only support 128 bit registers.
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2048,7 +2001,7 @@
NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
}
-void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenOrVector(MIR* mir) {
// We only support 128 bit registers.
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2057,7 +2010,7 @@
NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
}
-void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenXorVector(MIR* mir) {
// We only support 128 bit registers.
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2084,7 +2037,7 @@
AppendOpcodeWithConst(opcode, rs_src1.GetReg(), const_mirp);
}
-void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenAddReduceVector(MIR* mir) {
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage vector_src = RegStorage::Solo128(mir->dalvikInsn.vB);
bool is_wide = opsize == k64 || opsize == kDouble;
@@ -2166,7 +2119,7 @@
NewLIR2(kX86MovdrxRR, temp_loc.reg.GetHighReg(), vector_src.GetReg());
}
- GenArithOpLong(Instruction::ADD_LONG_2ADDR, rl_dest, temp_loc, temp_loc);
+ GenArithOpLong(Instruction::ADD_LONG_2ADDR, rl_dest, temp_loc, temp_loc, mir->optimization_flags);
} else if (opsize == kSignedByte || opsize == kUnsignedByte) {
RegStorage rs_tmp = Get128BitRegister(AllocTempDouble());
NewLIR2(kX86PxorRR, rs_tmp.GetReg(), rs_tmp.GetReg());
@@ -2219,7 +2172,7 @@
// except the rhs is not a VR but a physical register allocated above.
// No load of source VR is done because it assumes that rl_result will
// share physical register / memory location.
- rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocTyped(rl_dest);
if (rl_result.location == kLocPhysReg) {
// Ensure res is in a core reg.
rl_result = EvalLoc(rl_dest, kCoreReg, true);
@@ -2232,7 +2185,7 @@
}
}
-void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenReduceVector(MIR* mir) {
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegLocation rl_dest = mir_graph_->GetDest(mir);
RegStorage vector_src = RegStorage::Solo128(mir->dalvikInsn.vB);
@@ -2286,7 +2239,7 @@
} else {
int extract_index = mir->dalvikInsn.arg[0];
int extr_opcode = 0;
- rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocTyped(rl_dest);
// Handle the rest of integral types now.
switch (opsize) {
@@ -2302,7 +2255,7 @@
break;
default:
LOG(FATAL) << "Unsupported vector reduce " << opsize;
- return;
+ UNREACHABLE();
}
if (rl_result.location == kLocPhysReg) {
@@ -2310,7 +2263,8 @@
StoreFinalValue(rl_dest, rl_result);
} else {
int displacement = SRegOffset(rl_result.s_reg_low);
- LIR *l = NewLIR3(extr_opcode, rs_rX86_SP.GetReg(), displacement, vector_src.GetReg());
+ LIR *l = NewLIR4(extr_opcode, rs_rX86_SP_32.GetReg(), displacement, vector_src.GetReg(),
+ extract_index);
AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is_wide /* is_64bit */);
AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is_wide /* is_64bit */);
}
@@ -2331,7 +2285,7 @@
}
}
-void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenSetVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2406,11 +2360,13 @@
}
}
-void X86Mir2Lir::GenPackedArrayGet(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenPackedArrayGet(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb, mir);
UNIMPLEMENTED(FATAL) << "Extended opcode kMirOpPackedArrayGet not supported.";
}
-void X86Mir2Lir::GenPackedArrayPut(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenPackedArrayPut(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb, mir);
UNIMPLEMENTED(FATAL) << "Extended opcode kMirOpPackedArrayPut not supported.";
}
@@ -2506,18 +2462,14 @@
return in_to_reg_storage_mapping_.Get(arg_num);
}
-RegStorage X86Mir2Lir::GetCoreArgMappingToPhysicalReg(int core_arg_num) {
+RegStorage X86Mir2Lir::GetCoreArgMappingToPhysicalReg(int core_arg_num) const {
// For the 32-bit internal ABI, the first 3 arguments are passed in registers.
// Not used for 64-bit, TODO: Move X86_32 to the same framework
switch (core_arg_num) {
- case 0:
- return rs_rX86_ARG1;
- case 1:
- return rs_rX86_ARG2;
- case 2:
- return rs_rX86_ARG3;
- default:
- return RegStorage::InvalidReg();
+ case 0: return TargetReg32(kArg1);
+ case 1: return TargetReg32(kArg2);
+ case 2: return TargetReg32(kArg3);
+ default: return RegStorage::InvalidReg();
}
}
@@ -2547,7 +2499,8 @@
StoreValue(rl_method, rl_src);
// If Method* has been promoted, explicitly flush
if (rl_method.location == kLocPhysReg) {
- StoreRefDisp(rs_rX86_SP, 0, As32BitReg(TargetReg(kArg0, kRef)), kNotVolatile);
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
+ StoreRefDisp(rs_rSP, 0, As32BitReg(TargetReg(kArg0, kRef)), kNotVolatile);
}
if (mir_graph_->GetNumOfInVRs() == 0) {
@@ -2584,9 +2537,9 @@
} else {
// Needs flush.
if (t_loc->ref) {
- StoreRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, kNotVolatile);
+ StoreRefDisp(rs_rX86_SP_64, SRegOffset(start_vreg + i), reg, kNotVolatile);
} else {
- StoreBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32,
+ StoreBaseDisp(rs_rX86_SP_64, SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32,
kNotVolatile);
}
}
@@ -2594,9 +2547,9 @@
// If arriving in frame & promoted.
if (t_loc->location == kLocPhysReg) {
if (t_loc->ref) {
- LoadRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile);
+ LoadRefDisp(rs_rX86_SP_64, SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile);
} else {
- LoadBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg,
+ LoadBaseDisp(rs_rX86_SP_64, SRegOffset(start_vreg + i), t_loc->reg,
t_loc->wide ? k64 : k32, kNotVolatile);
}
}
@@ -2622,16 +2575,16 @@
uintptr_t direct_method, InvokeType type, bool skip_this) {
if (!cu_->target64) {
return Mir2Lir::GenDalvikArgsNoRange(info,
- call_state, pcrLabel, next_call_insn,
- target_method,
- vtable_idx, direct_code,
- direct_method, type, skip_this);
+ call_state, pcrLabel, next_call_insn,
+ target_method,
+ vtable_idx, direct_code,
+ direct_method, type, skip_this);
}
return GenDalvikArgsRange(info,
- call_state, pcrLabel, next_call_insn,
- target_method,
- vtable_idx, direct_code,
- direct_method, type, skip_this);
+ call_state, pcrLabel, next_call_insn,
+ target_method,
+ vtable_idx, direct_code,
+ direct_method, type, skip_this);
}
/*
@@ -2687,14 +2640,14 @@
loc = UpdateLocWide(loc);
if (loc.location == kLocPhysReg) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
+ StoreBaseDisp(rs_rX86_SP_64, SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
}
next_arg += 2;
} else {
loc = UpdateLoc(loc);
if (loc.location == kLocPhysReg) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile);
+ StoreBaseDisp(rs_rX86_SP_64, SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile);
}
next_arg++;
}
@@ -2747,25 +2700,25 @@
bool src_is_8b_aligned = (current_src_offset & 0x7) == 0;
bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0;
- ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+ ScopedMemRefType mem_ref_type2(this, ResourceMask::kDalvikReg);
if (src_is_16b_aligned) {
- ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovA128FP);
+ ld1 = OpMovRegMem(temp, rs_rX86_SP_64, current_src_offset, kMovA128FP);
} else if (src_is_8b_aligned) {
- ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovLo128FP);
- ld2 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset + (bytes_to_move >> 1),
+ ld1 = OpMovRegMem(temp, rs_rX86_SP_64, current_src_offset, kMovLo128FP);
+ ld2 = OpMovRegMem(temp, rs_rX86_SP_64, current_src_offset + (bytes_to_move >> 1),
kMovHi128FP);
} else {
- ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovU128FP);
+ ld1 = OpMovRegMem(temp, rs_rX86_SP_64, current_src_offset, kMovU128FP);
}
if (dest_is_16b_aligned) {
- st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovA128FP);
+ st1 = OpMovMemReg(rs_rX86_SP_64, current_dest_offset, temp, kMovA128FP);
} else if (dest_is_8b_aligned) {
- st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovLo128FP);
- st2 = OpMovMemReg(rs_rX86_SP, current_dest_offset + (bytes_to_move >> 1),
+ st1 = OpMovMemReg(rs_rX86_SP_64, current_dest_offset, temp, kMovLo128FP);
+ st2 = OpMovMemReg(rs_rX86_SP_64, current_dest_offset + (bytes_to_move >> 1),
temp, kMovHi128FP);
} else {
- st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovU128FP);
+ st1 = OpMovMemReg(rs_rX86_SP_64, current_dest_offset, temp, kMovU128FP);
}
// TODO If we could keep track of aliasing information for memory accesses that are wider
@@ -2802,8 +2755,8 @@
RegStorage temp = TargetReg(kArg3, kNotWide);
// Now load the argument VR and store to the outs.
- Load32Disp(rs_rX86_SP, current_src_offset, temp);
- Store32Disp(rs_rX86_SP, current_dest_offset, temp);
+ Load32Disp(rs_rX86_SP_64, current_src_offset, temp);
+ Store32Disp(rs_rX86_SP_64, current_dest_offset, temp);
}
current_src_offset += bytes_to_move;
@@ -2829,17 +2782,17 @@
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
if (rl_arg.wide) {
if (rl_arg.location == kLocPhysReg) {
- StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k64, kNotVolatile);
+ StoreBaseDisp(rs_rX86_SP_64, out_offset, rl_arg.reg, k64, kNotVolatile);
} else {
LoadValueDirectWideFixed(rl_arg, regWide);
- StoreBaseDisp(rs_rX86_SP, out_offset, regWide, k64, kNotVolatile);
+ StoreBaseDisp(rs_rX86_SP_64, out_offset, regWide, k64, kNotVolatile);
}
} else {
if (rl_arg.location == kLocPhysReg) {
- StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k32, kNotVolatile);
+ StoreBaseDisp(rs_rX86_SP_64, out_offset, rl_arg.reg, k32, kNotVolatile);
} else {
LoadValueDirectFixed(rl_arg, regSingle);
- StoreBaseDisp(rs_rX86_SP, out_offset, regSingle, k32, kNotVolatile);
+ StoreBaseDisp(rs_rX86_SP_64, out_offset, regSingle, k32, kNotVolatile);
}
}
}
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index 8d5dabc..ad3222c 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -54,14 +54,16 @@
}
bool X86Mir2Lir::InexpensiveConstantInt(int32_t value) {
+ UNUSED(value);
return true;
}
bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value) {
- return false;
+ return value == 0;
}
bool X86Mir2Lir::InexpensiveConstantLong(int64_t value) {
+ UNUSED(value);
return true;
}
@@ -228,7 +230,7 @@
// TODO: there are several instances of this check. A utility function perhaps?
// TODO: Similar to Arm's reg < 8 check. Perhaps add attribute checks to RegStorage?
// Use shifts instead of a byte operand if the source can't be byte accessed.
- if (r_src2.GetRegNum() >= rs_rX86_SP.GetRegNum()) {
+ if (r_src2.GetRegNum() >= rs_rX86_SP_32.GetRegNum()) {
NewLIR2(is64Bit ? kX86Mov64RR : kX86Mov32RR, r_dest_src1.GetReg(), r_src2.GetReg());
NewLIR2(is64Bit ? kX86Sal64RI : kX86Sal32RI, r_dest_src1.GetReg(), is64Bit ? 56 : 24);
return NewLIR2(is64Bit ? kX86Sar64RI : kX86Sar32RI, r_dest_src1.GetReg(),
@@ -383,7 +385,7 @@
}
LIR *l = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), offset);
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK(r_base == rs_rX86_SP);
+ DCHECK_EQ(r_base, cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32);
AnnotateDalvikRegAccess(l, offset >> 2, true /* is_load */, false /* is_64bit */);
}
return l;
@@ -409,7 +411,7 @@
LOG(FATAL) << "Bad case in OpMemReg " << op;
break;
}
- LIR *l = NewLIR3(opcode, rs_rX86_SP.GetReg(), displacement, r_value);
+ LIR *l = NewLIR3(opcode, rs_rX86_SP_32.GetReg(), displacement, r_value);
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */);
AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is64Bit /* is_64bit */);
@@ -435,7 +437,7 @@
LOG(FATAL) << "Bad case in OpRegMem " << op;
break;
}
- LIR *l = NewLIR3(opcode, r_dest.GetReg(), rs_rX86_SP.GetReg(), displacement);
+ LIR *l = NewLIR3(opcode, r_dest.GetReg(), rs_rX86_SP_32.GetReg(), displacement);
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */);
}
@@ -486,6 +488,7 @@
case kOpAdc:
case kOpAnd:
case kOpXor:
+ case kOpMul:
break;
default:
LOG(FATAL) << "Bad case in OpRegRegReg " << op;
@@ -512,7 +515,7 @@
r_src.GetReg() /* index */, value /* scale */, 0 /* disp */);
} else if (op == kOpAdd) { // lea add special case
return NewLIR5(r_dest.Is64Bit() ? kX86Lea64RA : kX86Lea32RA, r_dest.GetReg(),
- r_src.GetReg() /* base */, rs_rX86_SP.GetReg()/*r4sib_no_index*/ /* index */,
+ r_src.GetReg() /* base */, rs_rX86_SP_32.GetReg()/*r4sib_no_index*/ /* index */,
0 /* scale */, value /* disp */);
}
OpRegCopy(r_dest, r_src);
@@ -703,7 +706,7 @@
}
}
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK(r_base == rs_rX86_SP);
+ DCHECK_EQ(r_base, cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32);
AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
true /* is_load */, is64bit);
if (pair) {
@@ -868,7 +871,7 @@
store2 = NewLIR3(opcode, r_base.GetReg(), displacement + HIWORD_OFFSET, r_src.GetHighReg());
}
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK(r_base == rs_rX86_SP);
+ DCHECK_EQ(r_base, cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32);
AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
false /* is_load */, is64bit);
if (pair) {
@@ -934,13 +937,14 @@
LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
int offset, int check_value, LIR* target, LIR** compare) {
- LIR* inst = NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(),
- offset, check_value);
- if (compare != nullptr) {
- *compare = inst;
- }
- LIR* branch = OpCondBranch(cond, target);
- return branch;
+ UNUSED(temp_reg); // Comparison performed directly with memory.
+ LIR* inst = NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(),
+ offset, check_value);
+ if (compare != nullptr) {
+ *compare = inst;
+ }
+ LIR* branch = OpCondBranch(cond, target);
+ return branch;
}
void X86Mir2Lir::AnalyzeMIR() {
@@ -965,13 +969,13 @@
}
}
-void X86Mir2Lir::AnalyzeBB(BasicBlock * bb) {
+void X86Mir2Lir::AnalyzeBB(BasicBlock* bb) {
if (bb->block_type == kDead) {
// Ignore dead blocks
return;
}
- for (MIR *mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
int opcode = mir->dalvikInsn.opcode;
if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
AnalyzeExtendedMIR(opcode, bb, mir);
@@ -982,7 +986,7 @@
}
-void X86Mir2Lir::AnalyzeExtendedMIR(int opcode, BasicBlock * bb, MIR *mir) {
+void X86Mir2Lir::AnalyzeExtendedMIR(int opcode, BasicBlock* bb, MIR* mir) {
switch (opcode) {
// Instructions referencing doubles.
case kMirOpFusedCmplDouble:
@@ -1009,7 +1013,7 @@
}
}
-void X86Mir2Lir::AnalyzeMIR(int opcode, BasicBlock * bb, MIR *mir) {
+void X86Mir2Lir::AnalyzeMIR(int opcode, BasicBlock* bb, MIR* mir) {
// Looking for
// - Do we need a pointer to the code (used for packed switches and double lits)?
@@ -1046,7 +1050,8 @@
}
}
-void X86Mir2Lir::AnalyzeFPInstruction(int opcode, BasicBlock * bb, MIR *mir) {
+void X86Mir2Lir::AnalyzeFPInstruction(int opcode, BasicBlock* bb, MIR* mir) {
+ UNUSED(bb);
// Look at all the uses, and see if they are double constants.
uint64_t attrs = MIRGraph::GetDataFlowAttributes(static_cast<Instruction::Code>(opcode));
int next_sreg = 0;
@@ -1080,7 +1085,7 @@
}
}
-RegLocation X86Mir2Lir::UpdateLocTyped(RegLocation loc, int reg_class) {
+RegLocation X86Mir2Lir::UpdateLocTyped(RegLocation loc) {
loc = UpdateLoc(loc);
if ((loc.location == kLocPhysReg) && (loc.fp != loc.reg.IsFloat())) {
if (GetRegInfo(loc.reg)->IsTemp()) {
@@ -1094,7 +1099,7 @@
return loc;
}
-RegLocation X86Mir2Lir::UpdateLocWideTyped(RegLocation loc, int reg_class) {
+RegLocation X86Mir2Lir::UpdateLocWideTyped(RegLocation loc) {
loc = UpdateLocWide(loc);
if ((loc.location == kLocPhysReg) && (loc.fp != loc.reg.IsFloat())) {
if (GetRegInfo(loc.reg)->IsTemp()) {
@@ -1108,7 +1113,8 @@
return loc;
}
-void X86Mir2Lir::AnalyzeInvokeStatic(int opcode, BasicBlock * bb, MIR *mir) {
+void X86Mir2Lir::AnalyzeInvokeStatic(int opcode, BasicBlock* bb, MIR* mir) {
+ UNUSED(opcode, bb);
// For now this is only actual for x86-32.
if (cu_->target64) {
return;
@@ -1132,6 +1138,7 @@
}
LIR* X86Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+ UNUSED(r_tgt); // Call to absolute memory location doesn't need a temporary target register.
if (cu_->target64) {
return OpThreadMem(op, GetThreadOffset<8>(trampoline));
} else {
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index 22a2f30..76a67c4 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -234,7 +234,7 @@
constexpr RegStorage rs_rBX = rs_r3;
constexpr RegStorage rs_rX86_SP_64(RegStorage::kValid | r4sp_64);
constexpr RegStorage rs_rX86_SP_32(RegStorage::kValid | r4sp_32);
-extern RegStorage rs_rX86_SP;
+static_assert(rs_rX86_SP_64.GetRegNum() == rs_rX86_SP_32.GetRegNum(), "Unexpected mismatch");
constexpr RegStorage rs_r5(RegStorage::kValid | r5);
constexpr RegStorage rs_r5q(RegStorage::kValid | r5q);
constexpr RegStorage rs_rBP = rs_r5;
@@ -313,43 +313,8 @@
constexpr RegStorage rs_xr14(RegStorage::kValid | xr14);
constexpr RegStorage rs_xr15(RegStorage::kValid | xr15);
-extern X86NativeRegisterPool rX86_ARG0;
-extern X86NativeRegisterPool rX86_ARG1;
-extern X86NativeRegisterPool rX86_ARG2;
-extern X86NativeRegisterPool rX86_ARG3;
-extern X86NativeRegisterPool rX86_ARG4;
-extern X86NativeRegisterPool rX86_ARG5;
-extern X86NativeRegisterPool rX86_FARG0;
-extern X86NativeRegisterPool rX86_FARG1;
-extern X86NativeRegisterPool rX86_FARG2;
-extern X86NativeRegisterPool rX86_FARG3;
-extern X86NativeRegisterPool rX86_FARG4;
-extern X86NativeRegisterPool rX86_FARG5;
-extern X86NativeRegisterPool rX86_FARG6;
-extern X86NativeRegisterPool rX86_FARG7;
-extern X86NativeRegisterPool rX86_RET0;
-extern X86NativeRegisterPool rX86_RET1;
-extern X86NativeRegisterPool rX86_INVOKE_TGT;
-extern X86NativeRegisterPool rX86_COUNT;
-
-extern RegStorage rs_rX86_ARG0;
-extern RegStorage rs_rX86_ARG1;
-extern RegStorage rs_rX86_ARG2;
-extern RegStorage rs_rX86_ARG3;
-extern RegStorage rs_rX86_ARG4;
-extern RegStorage rs_rX86_ARG5;
-extern RegStorage rs_rX86_FARG0;
-extern RegStorage rs_rX86_FARG1;
-extern RegStorage rs_rX86_FARG2;
-extern RegStorage rs_rX86_FARG3;
-extern RegStorage rs_rX86_FARG4;
-extern RegStorage rs_rX86_FARG5;
-extern RegStorage rs_rX86_FARG6;
-extern RegStorage rs_rX86_FARG7;
-extern RegStorage rs_rX86_RET0;
-extern RegStorage rs_rX86_RET1;
-extern RegStorage rs_rX86_INVOKE_TGT;
-extern RegStorage rs_rX86_COUNT;
+constexpr RegStorage rs_rX86_RET0 = rs_rAX;
+constexpr RegStorage rs_rX86_RET1 = rs_rDX;
// RegisterLocation templates return values (r_V0, or r_V0/r_V1).
const RegLocation x86_loc_c_return
@@ -674,6 +639,7 @@
kX86RepneScasw, // repne scasw
kX86Last
};
+std::ostream& operator<<(std::ostream& os, const X86OpCode& rhs);
/* Instruction assembly field_loc kind */
enum X86EncodingKind {
diff --git a/compiler/dex/reg_storage.h b/compiler/dex/reg_storage.h
index 706933a..46ed011 100644
--- a/compiler/dex/reg_storage.h
+++ b/compiler/dex/reg_storage.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_DEX_REG_STORAGE_H_
#include "base/logging.h"
+#include "base/value_object.h"
#include "compiler_enums.h" // For WideKind
namespace art {
@@ -72,7 +73,7 @@
* records.
*/
-class RegStorage {
+class RegStorage : public ValueObject {
public:
enum RegStorageKind {
kValidMask = 0x8000,
@@ -112,7 +113,7 @@
}
constexpr RegStorage(RegStorageKind rs_kind, int low_reg, int high_reg)
: reg_(
- DCHECK_CONSTEXPR(rs_kind == k64BitPair, << rs_kind, 0u)
+ DCHECK_CONSTEXPR(rs_kind == k64BitPair, << static_cast<int>(rs_kind), 0u)
DCHECK_CONSTEXPR((low_reg & kFloatingPoint) == (high_reg & kFloatingPoint),
<< low_reg << ", " << high_reg, 0u)
DCHECK_CONSTEXPR((high_reg & kRegNumMask) <= kHighRegNumMask,
@@ -331,14 +332,16 @@
case k256BitSolo: return 32;
case k512BitSolo: return 64;
case k1024BitSolo: return 128;
- default: LOG(FATAL) << "Unexpected shape";
+ default: LOG(FATAL) << "Unexpected shape"; UNREACHABLE();
}
- return 0;
}
private:
uint16_t reg_;
};
+static inline std::ostream& operator<<(std::ostream& o, const RegStorage& rhs) {
+ return o << rhs.GetRawBits(); // TODO: better output.
+}
} // namespace art
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 412f85d..ed33882 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -126,7 +126,7 @@
for (uint32_t idx : bb->data_flow_info->def_v->Indexes()) {
/* Block bb defines register idx */
- temp_bit_matrix_[idx]->SetBit(bb->id);
+ temp_.ssa.def_block_matrix[idx]->SetBit(bb->id);
}
return true;
}
@@ -135,16 +135,16 @@
int num_registers = GetNumOfCodeAndTempVRs();
/* Allocate num_registers bit vector pointers */
DCHECK(temp_scoped_alloc_ != nullptr);
- DCHECK(temp_bit_matrix_ == nullptr);
- temp_bit_matrix_ = static_cast<ArenaBitVector**>(
+ DCHECK(temp_.ssa.def_block_matrix == nullptr);
+ temp_.ssa.def_block_matrix = static_cast<ArenaBitVector**>(
temp_scoped_alloc_->Alloc(sizeof(ArenaBitVector*) * num_registers, kArenaAllocDFInfo));
int i;
/* Initialize num_register vectors with num_blocks bits each */
for (i = 0; i < num_registers; i++) {
- temp_bit_matrix_[i] = new (temp_scoped_alloc_.get()) ArenaBitVector(arena_, GetNumBlocks(),
- false, kBitMapBMatrix);
- temp_bit_matrix_[i]->ClearAllBits();
+ temp_.ssa.def_block_matrix[i] = new (temp_scoped_alloc_.get()) ArenaBitVector(
+ arena_, GetNumBlocks(), false, kBitMapBMatrix);
+ temp_.ssa.def_block_matrix[i]->ClearAllBits();
}
AllNodesIterator iter(this);
@@ -163,7 +163,7 @@
int num_regs = GetNumOfCodeVRs();
int in_reg = GetFirstInVR();
for (; in_reg < num_regs; in_reg++) {
- temp_bit_matrix_[in_reg]->SetBit(GetEntryBlock()->id);
+ temp_.ssa.def_block_matrix[in_reg]->SetBit(GetEntryBlock()->id);
}
}
@@ -435,32 +435,32 @@
* insert a phi node if the variable is live-in to the block.
*/
bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) {
- DCHECK_EQ(temp_bit_vector_size_, cu_->mir_graph.get()->GetNumOfCodeAndTempVRs());
- ArenaBitVector* temp_dalvik_register_v = temp_bit_vector_;
+ DCHECK_EQ(temp_.ssa.num_vregs, cu_->mir_graph.get()->GetNumOfCodeAndTempVRs());
+ ArenaBitVector* temp_live_vregs = temp_.ssa.work_live_vregs;
if (bb->data_flow_info == NULL) {
return false;
}
- temp_dalvik_register_v->Copy(bb->data_flow_info->live_in_v);
+ temp_live_vregs->Copy(bb->data_flow_info->live_in_v);
BasicBlock* bb_taken = GetBasicBlock(bb->taken);
BasicBlock* bb_fall_through = GetBasicBlock(bb->fall_through);
if (bb_taken && bb_taken->data_flow_info)
- ComputeSuccLineIn(temp_dalvik_register_v, bb_taken->data_flow_info->live_in_v,
+ ComputeSuccLineIn(temp_live_vregs, bb_taken->data_flow_info->live_in_v,
bb->data_flow_info->def_v);
if (bb_fall_through && bb_fall_through->data_flow_info)
- ComputeSuccLineIn(temp_dalvik_register_v, bb_fall_through->data_flow_info->live_in_v,
+ ComputeSuccLineIn(temp_live_vregs, bb_fall_through->data_flow_info->live_in_v,
bb->data_flow_info->def_v);
if (bb->successor_block_list_type != kNotUsed) {
for (SuccessorBlockInfo* successor_block_info : bb->successor_blocks) {
BasicBlock* succ_bb = GetBasicBlock(successor_block_info->block);
if (succ_bb->data_flow_info) {
- ComputeSuccLineIn(temp_dalvik_register_v, succ_bb->data_flow_info->live_in_v,
+ ComputeSuccLineIn(temp_live_vregs, succ_bb->data_flow_info->live_in_v,
bb->data_flow_info->def_v);
}
}
}
- if (!temp_dalvik_register_v->Equal(bb->data_flow_info->live_in_v)) {
- bb->data_flow_info->live_in_v->Copy(temp_dalvik_register_v);
+ if (!temp_live_vregs->Equal(bb->data_flow_info->live_in_v)) {
+ bb->data_flow_info->live_in_v->Copy(temp_live_vregs);
return true;
}
return false;
@@ -482,7 +482,7 @@
/* Iterate through each Dalvik register */
for (dalvik_reg = GetNumOfCodeAndTempVRs() - 1; dalvik_reg >= 0; dalvik_reg--) {
- input_blocks->Copy(temp_bit_matrix_[dalvik_reg]);
+ input_blocks->Copy(temp_.ssa.def_block_matrix[dalvik_reg]);
phi_blocks->ClearAllBits();
do {
// TUNING: When we repeat this, we could skip indexes from the previous pass.
@@ -539,8 +539,7 @@
for (BasicBlockId pred_id : bb->predecessors) {
BasicBlock* pred_bb = GetBasicBlock(pred_id);
DCHECK(pred_bb != nullptr);
- int ssa_reg = pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg];
- uses[idx] = ssa_reg;
+ uses[idx] = pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg];
incoming[idx] = pred_id;
idx++;
}
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index a8e6b3c..4929b5b 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -106,6 +106,8 @@
if (use_sea) {
return true;
}
+#else
+ UNUSED(method_ref);
#endif
if (!compiler_options_->IsCompilationEnabled()) {
return false;
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 9f0a696..17328c4 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -282,6 +282,10 @@
Instruction::Code code = inst->Opcode();
if ((code == Instruction::CHECK_CAST) || (code == Instruction::APUT_OBJECT)) {
uint32_t dex_pc = inst->GetDexPc(code_item->insns_);
+ if (!method_verifier->GetInstructionFlags(dex_pc).IsVisited()) {
+ // Do not attempt to quicken this instruction, it's unreachable anyway.
+ continue;
+ }
const verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
bool is_safe_cast = false;
if (code == Instruction::CHECK_CAST) {
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
index bdfab13..f6c7d52 100644
--- a/compiler/dex/vreg_analysis.cc
+++ b/compiler/dex/vreg_analysis.cc
@@ -378,7 +378,20 @@
changed |= SetWide(defs[1]);
changed |= SetHigh(defs[1]);
}
+
+ bool has_ins = (GetNumOfInVRs() > 0);
+
for (int i = 0; i < ssa_rep->num_uses; i++) {
+ if (has_ins && IsInVReg(uses[i])) {
+ // NB: The SSA name for the first def of an in-reg will be the same as
+ // the reg's actual name.
+ if (!reg_location_[uses[i]].fp && defined_fp) {
+ // If we were about to infer that this first def of an in-reg is a float
+ // when it wasn't previously (because float/int is set during SSA initialization),
+ // do not allow this to happen.
+ continue;
+ }
+ }
changed |= SetFp(uses[i], defined_fp);
changed |= SetCore(uses[i], defined_core);
changed |= SetRef(uses[i], defined_ref);
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 3325568..ebf7874 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -93,6 +93,10 @@
return field->IsVolatile();
}
+inline MemberOffset CompilerDriver::GetFieldOffset(mirror::ArtField* field) {
+ return field->GetOffset();
+}
+
inline std::pair<bool, bool> CompilerDriver::IsFastInstanceField(
mirror::DexCache* dex_cache, mirror::Class* referrer_class,
mirror::ArtField* resolved_field, uint16_t field_idx) {
@@ -107,16 +111,12 @@
inline std::pair<bool, bool> CompilerDriver::IsFastStaticField(
mirror::DexCache* dex_cache, mirror::Class* referrer_class,
- mirror::ArtField* resolved_field, uint16_t field_idx, MemberOffset* field_offset,
- uint32_t* storage_index, bool* is_referrers_class, bool* is_initialized) {
+ mirror::ArtField* resolved_field, uint16_t field_idx, uint32_t* storage_index) {
DCHECK(resolved_field->IsStatic());
if (LIKELY(referrer_class != nullptr)) {
mirror::Class* fields_class = resolved_field->GetDeclaringClass();
if (fields_class == referrer_class) {
- *field_offset = resolved_field->GetOffset();
*storage_index = fields_class->GetDexTypeIndex();
- *is_referrers_class = true; // implies no worrying about class initialization
- *is_initialized = true;
return std::make_pair(true, true);
}
if (referrer_class->CanAccessResolvedField(fields_class, resolved_field,
@@ -148,23 +148,30 @@
}
}
if (storage_idx != DexFile::kDexNoIndex) {
- *field_offset = resolved_field->GetOffset();
*storage_index = storage_idx;
- *is_referrers_class = false;
- *is_initialized = fields_class->IsInitialized() &&
- CanAssumeTypeIsPresentInDexCache(*dex_file, storage_idx);
return std::make_pair(true, !resolved_field->IsFinal());
}
}
}
// Conservative defaults.
- *field_offset = MemberOffset(0u);
*storage_index = DexFile::kDexNoIndex;
- *is_referrers_class = false;
- *is_initialized = false;
return std::make_pair(false, false);
}
+inline bool CompilerDriver::IsStaticFieldInReferrerClass(mirror::Class* referrer_class,
+ mirror::ArtField* resolved_field) {
+ DCHECK(resolved_field->IsStatic());
+ mirror::Class* fields_class = resolved_field->GetDeclaringClass();
+ return referrer_class == fields_class;
+}
+
+inline bool CompilerDriver::IsStaticFieldsClassInitialized(mirror::Class* referrer_class,
+ mirror::ArtField* resolved_field) {
+ DCHECK(resolved_field->IsStatic());
+ mirror::Class* fields_class = resolved_field->GetDeclaringClass();
+ return fields_class == referrer_class || fields_class->IsInitialized();
+}
+
inline mirror::ArtMethod* CompilerDriver::ResolveMethod(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
@@ -233,7 +240,8 @@
bool can_sharpen_super_based_on_type = (*invoke_type == kSuper) &&
(referrer_class != methods_class) && referrer_class->IsSubClass(methods_class) &&
resolved_method->GetMethodIndex() < methods_class->GetVTableLength() &&
- (methods_class->GetVTableEntry(resolved_method->GetMethodIndex()) == resolved_method);
+ (methods_class->GetVTableEntry(resolved_method->GetMethodIndex()) == resolved_method) &&
+ !resolved_method->IsAbstract();
if (can_sharpen_virtual_based_on_type || can_sharpen_super_based_on_type) {
// Sharpen a virtual call into a direct call. The method_idx is into referrer's
@@ -312,14 +320,13 @@
return stats_flags;
}
-inline bool CompilerDriver::NeedsClassInitialization(mirror::Class* referrer_class,
- mirror::ArtMethod* resolved_method) {
+inline bool CompilerDriver::IsMethodsClassInitialized(mirror::Class* referrer_class,
+ mirror::ArtMethod* resolved_method) {
if (!resolved_method->IsStatic()) {
- return false;
+ return true;
}
mirror::Class* methods_class = resolved_method->GetDeclaringClass();
- // NOTE: Unlike in IsFastStaticField(), we don't check CanAssumeTypeIsPresentInDexCache() here.
- return methods_class != referrer_class && !methods_class->IsInitialized();
+ return methods_class == referrer_class || methods_class->IsInitialized();
}
} // namespace art
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index e1b5984..2e9f835 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -330,7 +330,8 @@
Compiler::Kind compiler_kind,
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features,
- bool image, std::set<std::string>* image_classes, size_t thread_count,
+ bool image, std::set<std::string>* image_classes,
+ std::set<std::string>* compiled_classes, size_t thread_count,
bool dump_stats, bool dump_passes, CumulativeLogger* timer,
const std::string& profile_file)
: profile_present_(false), compiler_options_(compiler_options),
@@ -346,16 +347,13 @@
non_relative_linker_patch_count_(0u),
image_(image),
image_classes_(image_classes),
+ classes_to_compile_(compiled_classes),
thread_count_(thread_count),
- start_ns_(0),
stats_(new AOTCompilationStats),
dump_stats_(dump_stats),
dump_passes_(dump_passes),
timings_logger_(timer),
- compiler_library_(nullptr),
compiler_context_(nullptr),
- compiler_enable_auto_elf_loading_(nullptr),
- compiler_get_method_code_addr_(nullptr),
support_boot_image_fixup_(instruction_set != kMips),
dedupe_code_("dedupe code"),
dedupe_src_mapping_table_("dedupe source mapping table"),
@@ -574,7 +572,7 @@
class_def);
}
CompileMethod(code_item, access_flags, invoke_type, class_def_idx, method_idx, jclass_loader,
- *dex_file, dex_to_dex_compilation_level);
+ *dex_file, dex_to_dex_compilation_level, true);
self->GetJniEnv()->DeleteGlobalRef(jclass_loader);
@@ -617,6 +615,17 @@
}
}
+bool CompilerDriver::IsClassToCompile(const char* descriptor) const {
+ if (!IsImage()) {
+ return true;
+ } else {
+ if (classes_to_compile_ == nullptr) {
+ return true;
+ }
+ return classes_to_compile_->find(descriptor) != classes_to_compile_->end();
+ }
+}
+
static void ResolveExceptionsForMethod(MutableMethodHelper* mh,
std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -722,9 +731,9 @@
for (const std::pair<uint16_t, const DexFile*>& exception_type : unresolved_exception_types) {
uint16_t exception_type_idx = exception_type.first;
const DexFile* dex_file = exception_type.second;
- StackHandleScope<2> hs(self);
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(*dex_file)));
- Handle<mirror::Class> klass(hs.NewHandle(
+ StackHandleScope<2> hs2(self);
+ Handle<mirror::DexCache> dex_cache(hs2.NewHandle(class_linker->FindDexCache(*dex_file)));
+ Handle<mirror::Class> klass(hs2.NewHandle(
class_linker->ResolveType(*dex_file, exception_type_idx, dex_cache,
NullHandle<mirror::ClassLoader>())));
if (klass.Get() == nullptr) {
@@ -761,13 +770,13 @@
}
VLOG(compiler) << "Adding " << descriptor << " to image classes";
for (size_t i = 0; i < klass->NumDirectInterfaces(); ++i) {
- StackHandleScope<1> hs(self);
- MaybeAddToImageClasses(hs.NewHandle(mirror::Class::GetDirectInterface(self, klass, i)),
+ StackHandleScope<1> hs2(self);
+ MaybeAddToImageClasses(hs2.NewHandle(mirror::Class::GetDirectInterface(self, klass, i)),
image_classes);
}
if (klass->IsArrayClass()) {
- StackHandleScope<1> hs(self);
- MaybeAddToImageClasses(hs.NewHandle(klass->GetComponentType()), image_classes);
+ StackHandleScope<1> hs2(self);
+ MaybeAddToImageClasses(hs2.NewHandle(klass->GetComponentType()), image_classes);
}
klass.Assign(klass->GetSuperClass());
}
@@ -1075,7 +1084,8 @@
bool CompilerDriver::ComputeStaticFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit,
bool is_put, MemberOffset* field_offset,
uint32_t* storage_index, bool* is_referrers_class,
- bool* is_volatile, bool* is_initialized) {
+ bool* is_volatile, bool* is_initialized,
+ Primitive::Type* type) {
ScopedObjectAccess soa(Thread::Current());
// Try to resolve the field and compiling method's class.
mirror::ArtField* resolved_field;
@@ -1098,17 +1108,25 @@
if (resolved_field != nullptr && referrer_class != nullptr) {
*is_volatile = IsFieldVolatile(resolved_field);
std::pair<bool, bool> fast_path = IsFastStaticField(
- dex_cache, referrer_class, resolved_field, field_idx, field_offset,
- storage_index, is_referrers_class, is_initialized);
+ dex_cache, referrer_class, resolved_field, field_idx, storage_index);
result = is_put ? fast_path.second : fast_path.first;
}
- if (!result) {
+ if (result) {
+ *field_offset = GetFieldOffset(resolved_field);
+ *is_referrers_class = IsStaticFieldInReferrerClass(referrer_class, resolved_field);
+ // *is_referrers_class == true implies no worrying about class initialization.
+ *is_initialized = (*is_referrers_class) ||
+ (IsStaticFieldsClassInitialized(referrer_class, resolved_field) &&
+ CanAssumeTypeIsPresentInDexCache(*mUnit->GetDexFile(), *storage_index));
+ *type = resolved_field->GetTypeAsPrimitiveType();
+ } else {
// Conservative defaults.
*is_volatile = true;
*field_offset = MemberOffset(static_cast<size_t>(-1));
*storage_index = -1;
*is_referrers_class = false;
*is_initialized = false;
+ *type = Primitive::kPrimVoid;
}
ProcessedStaticField(result, *is_referrers_class);
return result;
@@ -1911,6 +1929,10 @@
it.Next();
}
CompilerDriver* driver = manager->GetCompiler();
+
+ bool compilation_enabled = driver->IsClassToCompile(
+ dex_file.StringByTypeIdx(class_def.class_idx_));
+
// Compile direct methods
int64_t previous_direct_method_idx = -1;
while (it.HasNextDirectMethod()) {
@@ -1924,7 +1946,8 @@
previous_direct_method_idx = method_idx;
driver->CompileMethod(it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
it.GetMethodInvokeType(class_def), class_def_index,
- method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level);
+ method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
+ compilation_enabled);
it.Next();
}
// Compile virtual methods
@@ -1940,7 +1963,8 @@
previous_virtual_method_idx = method_idx;
driver->CompileMethod(it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
it.GetMethodInvokeType(class_def), class_def_index,
- method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level);
+ method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
+ compilation_enabled);
it.Next();
}
DCHECK(!it.HasNext());
@@ -1955,27 +1979,44 @@
context.ForAll(0, dex_file.NumClassDefs(), CompilerDriver::CompileClass, thread_count_);
}
+// Does the runtime for the InstructionSet provide an implementation returned by
+// GetQuickGenericJniStub allowing down calls that aren't compiled using a JNI compiler?
+static bool InstructionSetHasGenericJniStub(InstructionSet isa) {
+ switch (isa) {
+ case kArm:
+ case kArm64:
+ case kThumb2:
+ case kMips:
+ case kX86:
+ case kX86_64: return true;
+ default: return false;
+ }
+}
+
void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
InvokeType invoke_type, uint16_t class_def_idx,
uint32_t method_idx, jobject class_loader,
const DexFile& dex_file,
- DexToDexCompilationLevel dex_to_dex_compilation_level) {
+ DexToDexCompilationLevel dex_to_dex_compilation_level,
+ bool compilation_enabled) {
CompiledMethod* compiled_method = nullptr;
uint64_t start_ns = kTimeCompileMethod ? NanoTime() : 0;
if ((access_flags & kAccNative) != 0) {
// Are we interpreting only and have support for generic JNI down calls?
if (!compiler_options_->IsCompilationEnabled() &&
- (instruction_set_ == kX86_64 || instruction_set_ == kArm64)) {
+ InstructionSetHasGenericJniStub(instruction_set_)) {
// Leaving this empty will trigger the generic JNI version
} else {
compiled_method = compiler_->JniCompile(access_flags, method_idx, dex_file);
CHECK(compiled_method != nullptr);
}
} else if ((access_flags & kAccAbstract) != 0) {
+ // Abstract methods don't have code.
} else {
MethodReference method_ref(&dex_file, method_idx);
- bool compile = verification_results_->IsCandidateForCompilation(method_ref, access_flags);
+ bool compile = compilation_enabled &&
+ verification_results_->IsCandidateForCompilation(method_ref, access_flags);
if (compile) {
// NOTE: if compiler declines to compile this method, it will return nullptr.
compiled_method = compiler_->Compile(code_item, access_flags, invoke_type, class_def_idx,
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 3d59ef1..437a1a9 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -21,6 +21,7 @@
#include <string>
#include <vector>
+#include "arch/instruction_set.h"
#include "base/mutex.h"
#include "base/timing_logger.h"
#include "class_reference.h"
@@ -28,7 +29,6 @@
#include "compiler.h"
#include "dex_file.h"
#include "driver/compiler_options.h"
-#include "instruction_set.h"
#include "invoke_type.h"
#include "method_reference.h"
#include "mirror/class.h" // For mirror::Class::Status.
@@ -51,6 +51,7 @@
class DexCompilationUnit;
class DexFileToMethodInlinerMap;
struct InlineIGetIPutData;
+class InstructionSetFeatures;
class OatWriter;
class ParallelCompilationManager;
class ScopedObjectAccess;
@@ -75,6 +76,7 @@
kRequired, // Dex-to-dex compilation required for correctness.
kOptimize // Perform required transformation and peep-hole optimizations.
};
+std::ostream& operator<<(std::ostream& os, const DexToDexCompilationLevel& rhs);
class CompilerDriver {
public:
@@ -90,6 +92,7 @@
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features,
bool image, std::set<std::string>* image_classes,
+ std::set<std::string>* compiled_classes,
size_t thread_count, bool dump_stats, bool dump_passes,
CumulativeLogger* timer, const std::string& profile_file);
@@ -233,6 +236,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsFieldVolatile(mirror::ArtField* field) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ MemberOffset GetFieldOffset(mirror::ArtField* field) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Can we fast-path an IGET/IPUT access to an instance field? If yes, compute the field offset.
std::pair<bool, bool> IsFastInstanceField(
@@ -240,13 +244,20 @@
mirror::ArtField* resolved_field, uint16_t field_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Can we fast-path an SGET/SPUT access to a static field? If yes, compute the field offset,
- // the type index of the declaring class in the referrer's dex file and whether the declaring
- // class is the referrer's class or at least can be assumed to be initialized.
+ // Can we fast-path an SGET/SPUT access to a static field? If yes, compute the type index
+ // of the declaring class in the referrer's dex file.
std::pair<bool, bool> IsFastStaticField(
mirror::DexCache* dex_cache, mirror::Class* referrer_class,
- mirror::ArtField* resolved_field, uint16_t field_idx, MemberOffset* field_offset,
- uint32_t* storage_index, bool* is_referrers_class, bool* is_initialized)
+ mirror::ArtField* resolved_field, uint16_t field_idx, uint32_t* storage_index)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Is static field's in referrer's class?
+ bool IsStaticFieldInReferrerClass(mirror::Class* referrer_class, mirror::ArtField* resolved_field)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Is static field's class initialized?
+ bool IsStaticFieldsClassInitialized(mirror::Class* referrer_class,
+ mirror::ArtField* resolved_field)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolve a method. Returns nullptr on failure, including incompatible class change.
@@ -277,8 +288,10 @@
uintptr_t* direct_code, uintptr_t* direct_method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Does invokation of the resolved method need class initialization?
- bool NeedsClassInitialization(mirror::Class* referrer_class, mirror::ArtMethod* resolved_method)
+ // Is method's class initialized for an invoke?
+ // For static invokes to determine whether we need to consider potential call to <clinit>().
+ // For non-static invokes, assuming a non-null reference, the class is always initialized.
+ bool IsMethodsClassInitialized(mirror::Class* referrer_class, mirror::ArtMethod* resolved_method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ProcessedInstanceField(bool resolved);
@@ -301,7 +314,8 @@
// field is within the referrer (which can avoid checking class initialization).
bool ComputeStaticFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put,
MemberOffset* field_offset, uint32_t* storage_index,
- bool* is_referrers_class, bool* is_volatile, bool* is_initialized)
+ bool* is_referrers_class, bool* is_volatile, bool* is_initialized,
+ Primitive::Type* type)
LOCKS_EXCLUDED(Locks::mutator_lock_);
// Can we fastpath a interface, super class or virtual method call? Computes method's vtable
@@ -362,6 +376,9 @@
// Checks if class specified by type_idx is one of the image_classes_
bool IsImageClass(const char* descriptor) const;
+ // Checks if the provided class should be compiled, i.e., is in classes_to_compile_.
+ bool IsClassToCompile(const char* descriptor) const;
+
void RecordClassStatus(ClassReference ref, mirror::Class::Status status)
LOCKS_EXCLUDED(compiled_classes_lock_);
@@ -463,7 +480,8 @@
void CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx,
jobject class_loader, const DexFile& dex_file,
- DexToDexCompilationLevel dex_to_dex_compilation_level)
+ DexToDexCompilationLevel dex_to_dex_compilation_level,
+ bool compilation_enabled)
LOCKS_EXCLUDED(compiled_methods_lock_);
static void CompileClass(const ParallelCompilationManager* context, size_t class_def_index)
@@ -502,8 +520,12 @@
// included in the image.
std::unique_ptr<std::set<std::string>> image_classes_;
+ // If image_ is true, specifies the classes that will be compiled in
+ // the image. Note if classes_to_compile_ is nullptr, all classes are
+ // included in the image.
+ std::unique_ptr<std::set<std::string>> classes_to_compile_;
+
size_t thread_count_;
- uint64_t start_ns_;
class AOTCompilationStats;
std::unique_ptr<AOTCompilationStats> stats_;
@@ -516,8 +538,6 @@
typedef void (*CompilerCallbackFn)(CompilerDriver& driver);
typedef MutexLock* (*CompilerMutexLockFn)(CompilerDriver& driver);
- void* compiler_library_;
-
typedef void (*DexToDexCompilerFn)(CompilerDriver& driver,
const DexFile::CodeItem* code_item,
uint32_t access_flags, InvokeType invoke_type,
@@ -533,13 +553,6 @@
// Arena pool used by the compiler.
ArenaPool arena_pool_;
- typedef void (*CompilerEnableAutoElfLoadingFn)(CompilerDriver& driver);
- CompilerEnableAutoElfLoadingFn compiler_enable_auto_elf_loading_;
-
- typedef const void* (*CompilerGetMethodCodeAddrFn)
- (const CompilerDriver& driver, const CompiledMethod* cm, const mirror::ArtMethod* method);
- CompilerGetMethodCodeAddrFn compiler_get_method_code_addr_;
-
bool support_boot_image_fixup_;
// DeDuplication data structures, these own the corresponding byte arrays.
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 9ae9bd4..5a0ec2f 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -86,11 +86,11 @@
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader)));
mirror::Class* c = class_linker->FindClass(soa.Self(), descriptor, loader);
CHECK(c != NULL);
- for (size_t i = 0; i < c->NumDirectMethods(); i++) {
- MakeExecutable(c->GetDirectMethod(i));
+ for (size_t j = 0; j < c->NumDirectMethods(); j++) {
+ MakeExecutable(c->GetDirectMethod(j));
}
- for (size_t i = 0; i < c->NumVirtualMethods(); i++) {
- MakeExecutable(c->GetVirtualMethod(i));
+ for (size_t j = 0; j < c->NumVirtualMethods(); j++) {
+ MakeExecutable(c->GetVirtualMethod(j));
}
}
}
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 3a50bfd..0592f0c 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -17,9 +17,15 @@
#ifndef ART_COMPILER_DRIVER_COMPILER_OPTIONS_H_
#define ART_COMPILER_DRIVER_COMPILER_OPTIONS_H_
+#include <string>
+#include <vector>
+
+#include "base/macros.h"
+#include "globals.h"
+
namespace art {
-class CompilerOptions {
+class CompilerOptions FINAL {
public:
enum CompilerFilter {
kVerifyNone, // Skip verification and compile nothing except JNI stubs.
@@ -60,11 +66,12 @@
implicit_null_checks_(false),
implicit_so_checks_(false),
implicit_suspend_checks_(false),
- compile_pic_(false)
+ compile_pic_(false),
#ifdef ART_SEA_IR_MODE
- , sea_ir_mode_(false)
+ sea_ir_mode_(false),
#endif
- {}
+ verbose_methods_(nullptr) {
+ }
CompilerOptions(CompilerFilter compiler_filter,
size_t huge_method_threshold,
@@ -79,10 +86,11 @@
bool implicit_null_checks,
bool implicit_so_checks,
bool implicit_suspend_checks,
- bool compile_pic
+ bool compile_pic,
#ifdef ART_SEA_IR_MODE
- , bool sea_ir_mode
+ bool sea_ir_mode,
#endif
+ const std::vector<std::string>* verbose_methods
) : // NOLINT(whitespace/parens)
compiler_filter_(compiler_filter),
huge_method_threshold_(huge_method_threshold),
@@ -97,11 +105,12 @@
implicit_null_checks_(implicit_null_checks),
implicit_so_checks_(implicit_so_checks),
implicit_suspend_checks_(implicit_suspend_checks),
- compile_pic_(compile_pic)
+ compile_pic_(compile_pic),
#ifdef ART_SEA_IR_MODE
- , sea_ir_mode_(sea_ir_mode)
+ sea_ir_mode_(sea_ir_mode),
#endif
- {}
+ verbose_methods_(verbose_methods) {
+ }
CompilerFilter GetCompilerFilter() const {
return compiler_filter_;
@@ -168,28 +177,18 @@
return implicit_null_checks_;
}
- void SetImplicitNullChecks(bool new_val) {
- implicit_null_checks_ = new_val;
- }
-
bool GetImplicitStackOverflowChecks() const {
return implicit_so_checks_;
}
- void SetImplicitStackOverflowChecks(bool new_val) {
- implicit_so_checks_ = new_val;
- }
-
bool GetImplicitSuspendChecks() const {
return implicit_suspend_checks_;
}
- void SetImplicitSuspendChecks(bool new_val) {
- implicit_suspend_checks_ = new_val;
- }
-
#ifdef ART_SEA_IR_MODE
- bool GetSeaIrMode();
+ bool GetSeaIrMode() const {
+ return sea_ir_mode_;
+ }
#endif
bool GetGenerateGDBInformation() const {
@@ -205,26 +204,46 @@
return compile_pic_;
}
+ bool HasVerboseMethods() const {
+ return verbose_methods_ != nullptr && !verbose_methods_->empty();
+ }
+
+ bool IsVerboseMethod(const std::string& pretty_method) const {
+ for (const std::string& cur_method : *verbose_methods_) {
+ if (pretty_method.find(cur_method) != std::string::npos) {
+ return true;
+ }
+ }
+ return false;
+ }
+
private:
CompilerFilter compiler_filter_;
- size_t huge_method_threshold_;
- size_t large_method_threshold_;
- size_t small_method_threshold_;
- size_t tiny_method_threshold_;
- size_t num_dex_methods_threshold_;
- bool generate_gdb_information_;
- bool include_patch_information_;
+ const size_t huge_method_threshold_;
+ const size_t large_method_threshold_;
+ const size_t small_method_threshold_;
+ const size_t tiny_method_threshold_;
+ const size_t num_dex_methods_threshold_;
+ const bool generate_gdb_information_;
+ const bool include_patch_information_;
// When using a profile file only the top K% of the profiled samples will be compiled.
- double top_k_profile_threshold_;
- bool include_debug_symbols_;
- bool implicit_null_checks_;
- bool implicit_so_checks_;
- bool implicit_suspend_checks_;
- bool compile_pic_;
+ const double top_k_profile_threshold_;
+ const bool include_debug_symbols_;
+ const bool implicit_null_checks_;
+ const bool implicit_so_checks_;
+ const bool implicit_suspend_checks_;
+ const bool compile_pic_;
+
#ifdef ART_SEA_IR_MODE
- bool sea_ir_mode_;
+ const bool sea_ir_mode_;
#endif
+
+ // Vector of methods to have verbose output enabled for.
+ const std::vector<std::string>* const verbose_methods_;
+
+ DISALLOW_COPY_AND_ASSIGN(CompilerOptions);
};
+std::ostream& operator<<(std::ostream& os, const CompilerOptions::CompilerFilter& rhs);
} // namespace art
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index c32bdb4..273b62d 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -17,12 +17,12 @@
#ifndef ART_COMPILER_ELF_BUILDER_H_
#define ART_COMPILER_ELF_BUILDER_H_
+#include "arch/instruction_set.h"
#include "base/stl_util.h"
#include "base/value_object.h"
#include "buffered_output_stream.h"
#include "elf_utils.h"
#include "file_output_stream.h"
-#include "instruction_set.h"
namespace art {
@@ -494,7 +494,7 @@
output_(output) {}
protected:
- bool DoActualWrite(File* elf_file) OVERRIDE {
+ bool DoActualWrite(File* elf_file ATTRIBUTE_UNUSED) OVERRIDE {
// All data is written by the ElfFileRodataPiece right now, as the oat writer writes in one
// piece. This is for future flexibility.
UNUSED(output_);
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index c75d8f8..25cf086 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -222,9 +222,9 @@
typename Elf_Phdr, typename Elf_Shdr>
bool ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::Write(OatWriter* oat_writer,
- const std::vector<const DexFile*>& dex_files_unused,
- const std::string& android_root_unused,
- bool is_host_unused) {
+ const std::vector<const DexFile*>& dex_files_unused ATTRIBUTE_UNUSED,
+ const std::string& android_root_unused ATTRIBUTE_UNUSED,
+ bool is_host_unused ATTRIBUTE_UNUSED) {
constexpr bool debug = false;
const OatHeader& oat_header = oat_writer->GetOatHeader();
Elf_Word oat_data_size = oat_header.GetExecutableOffset();
@@ -686,6 +686,13 @@
symtab->AddSymbol(it->method_name_, &builder->GetTextBuilder(), it->low_pc_, true,
it->high_pc_ - it->low_pc_, STB_GLOBAL, STT_FUNC);
+ // Conforming to aaelf, add $t mapping symbol to indicate start of a sequence of thumb2
+ // instructions, so that disassembler tools can correctly disassemble.
+ if (it->compiled_method_->GetInstructionSet() == kThumb2) {
+ symtab->AddSymbol("$t", &builder->GetTextBuilder(), it->low_pc_ & ~1, true,
+ 0, STB_LOCAL, STT_NOTYPE);
+ }
+
// Include CFI for compiled method, if possible.
if (cfi_info.get() != nullptr) {
DCHECK(it->compiled_method_ != nullptr);
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index d5d487f..dac1ef4 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -63,7 +63,9 @@
ScratchFile oat_file(OS::CreateEmptyFile(oat_filename.c_str()));
const uintptr_t requested_image_base = ART_BASE_ADDRESS;
- std::unique_ptr<ImageWriter> writer(new ImageWriter(*compiler_driver_, requested_image_base));
+ std::unique_ptr<ImageWriter> writer(new ImageWriter(*compiler_driver_, requested_image_base,
+ /*compile_pic*/false));
+ // TODO: compile_pic should be a test argument.
{
{
jobject class_loader = NULL;
@@ -103,13 +105,16 @@
ASSERT_TRUE(success_image);
bool success_fixup = ElfWriter::Fixup(dup_oat.get(), writer->GetOatDataBegin());
ASSERT_TRUE(success_fixup);
+
+ ASSERT_EQ(dup_oat->FlushCloseOrErase(), 0) << "Could not flush and close oat file "
+ << oat_file.GetFilename();
}
{
std::unique_ptr<File> file(OS::OpenFileForReading(image_file.GetFilename().c_str()));
ASSERT_TRUE(file.get() != NULL);
ImageHeader image_header;
- file->ReadFully(&image_header, sizeof(image_header));
+ ASSERT_EQ(file->ReadFully(&image_header, sizeof(image_header)), true);
ASSERT_TRUE(image_header.IsValid());
ASSERT_GE(image_header.GetImageBitmapOffset(), sizeof(image_header));
ASSERT_NE(0U, image_header.GetImageBitmapSize());
@@ -212,7 +217,8 @@
oat_file_begin,
oat_data_begin,
oat_data_end,
- oat_file_end);
+ oat_file_end,
+ /*compile_pic*/false);
ASSERT_TRUE(image_header.IsValid());
char* magic = const_cast<char*>(image_header.GetMagic());
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 35a3d4b..4f5026d 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -54,7 +54,8 @@
#include "runtime.h"
#include "scoped_thread_state_change.h"
#include "handle_scope-inl.h"
-#include "utils.h"
+
+#include <numeric>
using ::art::mirror::ArtField;
using ::art::mirror::ArtMethod;
@@ -67,12 +68,15 @@
namespace art {
+// Separate objects into multiple bins to optimize dirty memory use.
+static constexpr bool kBinObjects = true;
+
bool ImageWriter::PrepareImageAddressSpace() {
+ target_ptr_size_ = InstructionSetPointerSize(compiler_driver_.GetInstructionSet());
{
Thread::Current()->TransitionFromSuspendedToRunnable();
PruneNonImageClasses(); // Remove junk
ComputeLazyFieldsForImageClasses(); // Add useful information
- ComputeEagerResolvedStrings();
Thread::Current()->TransitionFromRunnableToSuspended(kNative);
}
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -103,13 +107,13 @@
std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_filename.c_str()));
if (oat_file.get() == NULL) {
- LOG(ERROR) << "Failed to open oat file " << oat_filename << " for " << oat_location;
+ PLOG(ERROR) << "Failed to open oat file " << oat_filename << " for " << oat_location;
return false;
}
std::string error_msg;
oat_file_ = OatFile::OpenReadable(oat_file.get(), oat_location, &error_msg);
if (oat_file_ == nullptr) {
- LOG(ERROR) << "Failed to open writable oat file " << oat_filename << " for " << oat_location
+ PLOG(ERROR) << "Failed to open writable oat file " << oat_filename << " for " << oat_location
<< ": " << error_msg;
return false;
}
@@ -149,6 +153,11 @@
SetOatChecksumFromElfFile(oat_file.get());
+ if (oat_file->FlushCloseOrErase() != 0) {
+ LOG(ERROR) << "Failed to flush and close oat file " << oat_filename << " for " << oat_location;
+ return false;
+ }
+
std::unique_ptr<File> image_file(OS::CreateEmptyFile(image_filename.c_str()));
ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
if (image_file.get() == NULL) {
@@ -157,6 +166,7 @@
}
if (fchmod(image_file->Fd(), 0644) != 0) {
PLOG(ERROR) << "Failed to make image file world readable: " << image_filename;
+ image_file->Erase();
return EXIT_FAILURE;
}
@@ -164,6 +174,7 @@
CHECK_EQ(image_end_, image_header->GetImageSize());
if (!image_file->WriteFully(image_->Begin(), image_end_)) {
PLOG(ERROR) << "Failed to write image file " << image_filename;
+ image_file->Erase();
return false;
}
@@ -173,53 +184,54 @@
image_header->GetImageBitmapSize(),
image_header->GetImageBitmapOffset())) {
PLOG(ERROR) << "Failed to write image file " << image_filename;
+ image_file->Erase();
return false;
}
+ if (image_file->FlushCloseOrErase() != 0) {
+ PLOG(ERROR) << "Failed to flush and close image file " << image_filename;
+ return false;
+ }
return true;
}
-void ImageWriter::SetImageOffset(mirror::Object* object, size_t offset) {
+void ImageWriter::SetImageOffset(mirror::Object* object,
+ ImageWriter::BinSlot bin_slot,
+ size_t offset) {
DCHECK(object != nullptr);
DCHECK_NE(offset, 0U);
- DCHECK(!IsImageOffsetAssigned(object));
mirror::Object* obj = reinterpret_cast<mirror::Object*>(image_->Begin() + offset);
DCHECK_ALIGNED(obj, kObjectAlignment);
- image_bitmap_->Set(obj);
- // Before we stomp over the lock word, save the hash code for later.
- Monitor::Deflate(Thread::Current(), object);;
- LockWord lw(object->GetLockWord(false));
- switch (lw.GetState()) {
- case LockWord::kFatLocked: {
- LOG(FATAL) << "Fat locked object " << obj << " found during object copy";
- break;
+
+ image_bitmap_->Set(obj); // Mark the obj as mutated, since we will end up changing it.
+ {
+ // Remember the object-inside-of-the-image's hash code so we can restore it after the copy.
+ auto hash_it = saved_hashes_map_.find(bin_slot);
+ if (hash_it != saved_hashes_map_.end()) {
+ std::pair<BinSlot, uint32_t> slot_hash = *hash_it;
+ saved_hashes_.push_back(std::make_pair(obj, slot_hash.second));
+ saved_hashes_map_.erase(hash_it);
}
- case LockWord::kThinLocked: {
- LOG(FATAL) << "Thin locked object " << obj << " found during object copy";
- break;
- }
- case LockWord::kUnlocked:
- // No hash, don't need to save it.
- break;
- case LockWord::kHashCode:
- saved_hashes_.push_back(std::make_pair(obj, lw.GetHashCode()));
- break;
- default:
- LOG(FATAL) << "Unreachable.";
- break;
}
+ // The object is already deflated from when we set the bin slot. Just overwrite the lock word.
object->SetLockWord(LockWord::FromForwardingAddress(offset), false);
DCHECK(IsImageOffsetAssigned(object));
}
-void ImageWriter::AssignImageOffset(mirror::Object* object) {
+void ImageWriter::AssignImageOffset(mirror::Object* object, ImageWriter::BinSlot bin_slot) {
DCHECK(object != nullptr);
- SetImageOffset(object, image_end_);
- image_end_ += RoundUp(object->SizeOf(), 8); // 64-bit alignment
- DCHECK_LT(image_end_, image_->Size());
+ DCHECK_NE(image_objects_offset_begin_, 0u);
+
+ size_t previous_bin_sizes = GetBinSizeSum(bin_slot.GetBin()); // sum sizes in [0..bin#)
+ size_t new_offset = image_objects_offset_begin_ + previous_bin_sizes + bin_slot.GetIndex();
+ DCHECK_ALIGNED(new_offset, kObjectAlignment);
+
+ SetImageOffset(object, bin_slot, new_offset);
+ DCHECK_LT(new_offset, image_end_);
}
bool ImageWriter::IsImageOffsetAssigned(mirror::Object* object) const {
+ // Will also return true if the bin slot was assigned since we are reusing the lock word.
DCHECK(object != nullptr);
return object->GetLockWord(false).GetState() == LockWord::kForwardingAddress;
}
@@ -233,6 +245,178 @@
return offset;
}
+void ImageWriter::SetImageBinSlot(mirror::Object* object, BinSlot bin_slot) {
+ DCHECK(object != nullptr);
+ DCHECK(!IsImageOffsetAssigned(object));
+ DCHECK(!IsImageBinSlotAssigned(object));
+
+ // Before we stomp over the lock word, save the hash code for later.
+ Monitor::Deflate(Thread::Current(), object);;
+ LockWord lw(object->GetLockWord(false));
+ switch (lw.GetState()) {
+ case LockWord::kFatLocked: {
+ LOG(FATAL) << "Fat locked object " << object << " found during object copy";
+ break;
+ }
+ case LockWord::kThinLocked: {
+ LOG(FATAL) << "Thin locked object " << object << " found during object copy";
+ break;
+ }
+ case LockWord::kUnlocked:
+ // No hash, don't need to save it.
+ break;
+ case LockWord::kHashCode:
+ saved_hashes_map_[bin_slot] = lw.GetHashCode();
+ break;
+ default:
+ LOG(FATAL) << "Unreachable.";
+ UNREACHABLE();
+ }
+ object->SetLockWord(LockWord::FromForwardingAddress(static_cast<uint32_t>(bin_slot)),
+ false);
+ DCHECK(IsImageBinSlotAssigned(object));
+}
+
+void ImageWriter::AssignImageBinSlot(mirror::Object* object) {
+ DCHECK(object != nullptr);
+ size_t object_size;
+ if (object->IsArtMethod()) {
+ // Methods are sized based on the target pointer size.
+ object_size = mirror::ArtMethod::InstanceSize(target_ptr_size_);
+ } else {
+ object_size = object->SizeOf();
+ }
+
+ // The magic happens here. We segregate objects into different bins based
+ // on how likely they are to get dirty at runtime.
+ //
+ // Likely-to-dirty objects get packed together into the same bin so that
+ // at runtime their page dirtiness ratio (how many dirty objects a page has) is
+ // maximized.
+ //
+ // This means more pages will stay either clean or shared dirty (with zygote) and
+ // the app will use less of its own (private) memory.
+ Bin bin = kBinRegular;
+
+ if (kBinObjects) {
+ //
+ // Changing the bin of an object is purely a memory-use tuning.
+ // It has no change on runtime correctness.
+ //
+ // Memory analysis has determined that the following types of objects get dirtied
+ // the most:
+ //
+ // * Class'es which are verified [their clinit runs only at runtime]
+ // - classes in general [because their static fields get overwritten]
+ // - initialized classes with all-final statics are unlikely to be ever dirty,
+ // so bin them separately
+ // * Art Methods that are:
+ // - native [their native entry point is not looked up until runtime]
+ // - have declaring classes that aren't initialized
+ // [their interpreter/quick entry points are trampolines until the class
+ // becomes initialized]
+ //
+ // We also assume the following objects get dirtied either never or extremely rarely:
+ // * Strings (they are immutable)
+ // * Art methods that aren't native and have initialized declared classes
+ //
+ // We assume that "regular" bin objects are highly unlikely to become dirtied,
+ // so packing them together will not result in a noticeably tighter dirty-to-clean ratio.
+ //
+ if (object->IsClass()) {
+ bin = kBinClassVerified;
+ mirror::Class* klass = object->AsClass();
+
+ if (klass->GetStatus() == Class::kStatusInitialized) {
+ bin = kBinClassInitialized;
+
+ // If the class's static fields are all final, put it into a separate bin
+ // since it's very likely it will stay clean.
+ uint32_t num_static_fields = klass->NumStaticFields();
+ if (num_static_fields == 0) {
+ bin = kBinClassInitializedFinalStatics;
+ } else {
+ // Maybe all the statics are final?
+ bool all_final = true;
+ for (uint32_t i = 0; i < num_static_fields; ++i) {
+ ArtField* field = klass->GetStaticField(i);
+ if (!field->IsFinal()) {
+ all_final = false;
+ break;
+ }
+ }
+
+ if (all_final) {
+ bin = kBinClassInitializedFinalStatics;
+ }
+ }
+ }
+ } else if (object->IsArtMethod<kVerifyNone>()) {
+ mirror::ArtMethod* art_method = down_cast<ArtMethod*>(object);
+ if (art_method->IsNative()) {
+ bin = kBinArtMethodNative;
+ } else {
+ mirror::Class* declaring_class = art_method->GetDeclaringClass();
+ if (declaring_class->GetStatus() != Class::kStatusInitialized) {
+ bin = kBinArtMethodNotInitialized;
+ } else {
+ // This is highly unlikely to dirty since there's no entry points to mutate.
+ bin = kBinArtMethodsManagedInitialized;
+ }
+ }
+ } else if (object->GetClass<kVerifyNone>()->IsStringClass()) {
+ bin = kBinString; // Strings are almost always immutable (except for object header).
+ } // else bin = kBinRegular
+ }
+
+ size_t current_offset = bin_slot_sizes_[bin]; // How many bytes the current bin is at (aligned).
+ // Move the current bin size up to accomodate the object we just assigned a bin slot.
+ size_t offset_delta = RoundUp(object_size, kObjectAlignment); // 64-bit alignment
+ bin_slot_sizes_[bin] += offset_delta;
+
+ BinSlot new_bin_slot(bin, current_offset);
+ SetImageBinSlot(object, new_bin_slot);
+
+ ++bin_slot_count_[bin];
+
+ DCHECK_LT(GetBinSizeSum(), image_->Size());
+
+ // Grow the image closer to the end by the object we just assigned.
+ image_end_ += offset_delta;
+ DCHECK_LT(image_end_, image_->Size());
+}
+
+bool ImageWriter::IsImageBinSlotAssigned(mirror::Object* object) const {
+ DCHECK(object != nullptr);
+
+ // We always stash the bin slot into a lockword, in the 'forwarding address' state.
+ // If it's in some other state, then we haven't yet assigned an image bin slot.
+ if (object->GetLockWord(false).GetState() != LockWord::kForwardingAddress) {
+ return false;
+ } else if (kIsDebugBuild) {
+ LockWord lock_word = object->GetLockWord(false);
+ size_t offset = lock_word.ForwardingAddress();
+ BinSlot bin_slot(offset);
+ DCHECK_LT(bin_slot.GetIndex(), bin_slot_sizes_[bin_slot.GetBin()])
+ << "bin slot offset should not exceed the size of that bin";
+ }
+ return true;
+}
+
+ImageWriter::BinSlot ImageWriter::GetImageBinSlot(mirror::Object* object) const {
+ DCHECK(object != nullptr);
+ DCHECK(IsImageBinSlotAssigned(object));
+
+ LockWord lock_word = object->GetLockWord(false);
+ size_t offset = lock_word.ForwardingAddress(); // TODO: ForwardingAddress should be uint32_t
+ DCHECK_LE(offset, std::numeric_limits<uint32_t>::max());
+
+ BinSlot bin_slot(static_cast<uint32_t>(offset));
+ DCHECK_LT(bin_slot.GetIndex(), bin_slot_sizes_[bin_slot.GetBin()]);
+
+ return bin_slot;
+}
+
bool ImageWriter::AllocMemory() {
size_t length = RoundUp(Runtime::Current()->GetHeap()->GetTotalMemory(), kPageSize);
std::string error_msg;
@@ -265,7 +449,150 @@
return true;
}
-void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg) {
+// Count the number of strings in the heap and put the result in arg as a size_t pointer.
+static void CountStringsCallback(Object* obj, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (obj->GetClass()->IsStringClass()) {
+ ++*reinterpret_cast<size_t*>(arg);
+ }
+}
+
+// Collect all the java.lang.String in the heap and put them in the output strings_ array.
+class StringCollector {
+ public:
+ StringCollector(Handle<mirror::ObjectArray<mirror::String>> strings, size_t index)
+ : strings_(strings), index_(index) {
+ }
+ static void Callback(Object* obj, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ auto* collector = reinterpret_cast<StringCollector*>(arg);
+ if (obj->GetClass()->IsStringClass()) {
+ collector->strings_->SetWithoutChecks<false>(collector->index_++, obj->AsString());
+ }
+ }
+ size_t GetIndex() const {
+ return index_;
+ }
+
+ private:
+ Handle<mirror::ObjectArray<mirror::String>> strings_;
+ size_t index_;
+};
+
+// Compare strings based on length, used for sorting strings by length / reverse length.
+class StringLengthComparator {
+ public:
+ explicit StringLengthComparator(Handle<mirror::ObjectArray<mirror::String>> strings)
+ : strings_(strings) {
+ }
+ bool operator()(size_t a, size_t b) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return strings_->GetWithoutChecks(a)->GetLength() < strings_->GetWithoutChecks(b)->GetLength();
+ }
+
+ private:
+ Handle<mirror::ObjectArray<mirror::String>> strings_;
+};
+
+// Normal string < comparison through the chars_ array.
+class SubstringComparator {
+ public:
+ explicit SubstringComparator(const std::vector<uint16_t>* const chars) : chars_(chars) {
+ }
+ bool operator()(const std::pair<size_t, size_t>& a, const std::pair<size_t, size_t>& b) {
+ return std::lexicographical_compare(chars_->begin() + a.first,
+ chars_->begin() + a.first + a.second,
+ chars_->begin() + b.first,
+ chars_->begin() + b.first + b.second);
+ }
+
+ private:
+ const std::vector<uint16_t>* const chars_;
+};
+
+void ImageWriter::ProcessStrings() {
+ size_t total_strings = 0;
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ {
+ ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ heap->VisitObjects(CountStringsCallback, &total_strings); // Count the strings.
+ }
+ Thread* self = Thread::Current();
+ StackHandleScope<1> hs(self);
+ auto strings = hs.NewHandle(cl->AllocStringArray(self, total_strings));
+ StringCollector string_collector(strings, 0U);
+ {
+ ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ // Read strings into the array.
+ heap->VisitObjects(StringCollector::Callback, &string_collector);
+ }
+ // Some strings could have gotten freed if AllocStringArray caused a GC.
+ CHECK_LE(string_collector.GetIndex(), total_strings);
+ total_strings = string_collector.GetIndex();
+ size_t total_length = 0;
+ std::vector<size_t> reverse_sorted_strings;
+ for (size_t i = 0; i < total_strings; ++i) {
+ mirror::String* s = strings->GetWithoutChecks(i);
+ // Look up the string in the array.
+ total_length += s->GetLength();
+ reverse_sorted_strings.push_back(i);
+ }
+ // Sort by reverse length.
+ StringLengthComparator comparator(strings);
+ std::sort(reverse_sorted_strings.rbegin(), reverse_sorted_strings.rend(), comparator);
+ // Deduplicate prefixes and add strings to the char array.
+ std::vector<uint16_t> combined_chars(total_length, 0U);
+ size_t num_chars = 0;
+ // Characters of strings which are non equal prefix of another string (not the same string).
+ // We don't count the savings from equal strings since these would get interned later anyways.
+ size_t prefix_saved_chars = 0;
+ std::set<std::pair<size_t, size_t>, SubstringComparator> existing_strings((
+ SubstringComparator(&combined_chars)));
+ for (size_t i = 0; i < total_strings; ++i) {
+ mirror::String* s = strings->GetWithoutChecks(reverse_sorted_strings[i]);
+ // Add the string to the end of the char array.
+ size_t length = s->GetLength();
+ for (size_t j = 0; j < length; ++j) {
+ combined_chars[num_chars++] = s->CharAt(j);
+ }
+ // Try to see if the string exists as a prefix of an existing string.
+ size_t new_offset = 0;
+ std::pair<size_t, size_t> new_string(num_chars - length, length);
+ auto it = existing_strings.lower_bound(new_string);
+ bool is_prefix = false;
+ if (it != existing_strings.end()) {
+ CHECK_LE(length, it->second);
+ is_prefix = std::equal(combined_chars.begin() + it->first,
+ combined_chars.begin() + it->first + it->second,
+ combined_chars.begin() + new_string.first);
+ }
+ if (is_prefix) {
+ // Shares a prefix, set the offset to where the new offset will be.
+ new_offset = it->first;
+ // Remove the added chars.
+ num_chars -= length;
+ if (it->second != length) {
+ prefix_saved_chars += length;
+ }
+ } else {
+ new_offset = new_string.first;
+ existing_strings.insert(new_string);
+ }
+ s->SetOffset(new_offset);
+ }
+ // Allocate and update the char arrays.
+ auto* array = mirror::CharArray::Alloc(self, num_chars);
+ for (size_t i = 0; i < num_chars; ++i) {
+ array->SetWithoutChecks<false>(i, combined_chars[i]);
+ }
+ for (size_t i = 0; i < total_strings; ++i) {
+ strings->GetWithoutChecks(i)->SetArray(array);
+ }
+ LOG(INFO) << "Total # image strings=" << total_strings << " combined length="
+ << total_length << " prefix saved chars=" << prefix_saved_chars;
+ ComputeEagerResolvedStrings();
+}
+
+void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED) {
if (!obj->GetClass()->IsStringClass()) {
return;
}
@@ -293,7 +620,7 @@
}
}
-void ImageWriter::ComputeEagerResolvedStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void ImageWriter::ComputeEagerResolvedStrings() {
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
Runtime::Current()->GetHeap()->VisitObjects(ComputeEagerResolvedStringsCallback, this);
}
@@ -324,7 +651,8 @@
// Remove the undesired classes from the class roots.
for (const std::string& it : non_image_classes) {
- class_linker->RemoveClass(it.c_str(), NULL);
+ bool result = class_linker->RemoveClass(it.c_str(), NULL);
+ DCHECK(result);
}
// Clear references to removed classes from the DexCaches.
@@ -363,8 +691,7 @@
return true;
}
-void ImageWriter::CheckNonImageClassesRemoved()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void ImageWriter::CheckNonImageClassesRemoved() {
if (compiler_driver_.GetImageClasses() != nullptr) {
gc::Heap* heap = Runtime::Current()->GetHeap();
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
@@ -393,29 +720,29 @@
}
}
-void ImageWriter::CalculateObjectOffsets(Object* obj) {
+void ImageWriter::CalculateObjectBinSlots(Object* obj) {
DCHECK(obj != NULL);
// if it is a string, we want to intern it if its not interned.
if (obj->GetClass()->IsStringClass()) {
// we must be an interned string that was forward referenced and already assigned
- if (IsImageOffsetAssigned(obj)) {
+ if (IsImageBinSlotAssigned(obj)) {
DCHECK_EQ(obj, obj->AsString()->Intern());
return;
}
mirror::String* const interned = obj->AsString()->Intern();
if (obj != interned) {
- if (!IsImageOffsetAssigned(interned)) {
+ if (!IsImageBinSlotAssigned(interned)) {
// interned obj is after us, allocate its location early
- AssignImageOffset(interned);
+ AssignImageBinSlot(interned);
}
// point those looking for this object to the interned version.
- SetImageOffset(obj, GetImageOffset(interned));
+ SetImageBinSlot(obj, GetImageBinSlot(interned));
return;
}
// else (obj == interned), nothing to do but fall through to the normal case
}
- AssignImageOffset(obj);
+ AssignImageBinSlot(obj);
}
ObjectArray<Object>* ImageWriter::CreateImageRoots() const {
@@ -454,6 +781,8 @@
ObjectArray<Object>::Alloc(self, object_array_class.Get(), ImageHeader::kImageRootsMax)));
image_roots->Set<false>(ImageHeader::kResolutionMethod, runtime->GetResolutionMethod());
image_roots->Set<false>(ImageHeader::kImtConflictMethod, runtime->GetImtConflictMethod());
+ image_roots->Set<false>(ImageHeader::kImtUnimplementedMethod,
+ runtime->GetImtUnimplementedMethod());
image_roots->Set<false>(ImageHeader::kDefaultImt, runtime->GetDefaultImt());
image_roots->Set<false>(ImageHeader::kCalleeSaveMethod,
runtime->GetCalleeSaveMethod(Runtime::kSaveAll));
@@ -481,36 +810,40 @@
}
//
size_t num_reference_fields = h_class->NumReferenceInstanceFields();
+ MemberOffset field_offset = h_class->GetFirstReferenceInstanceFieldOffset();
for (size_t i = 0; i < num_reference_fields; ++i) {
- mirror::ArtField* field = h_class->GetInstanceField(i);
- MemberOffset field_offset = field->GetOffset();
mirror::Object* value = obj->GetFieldObject<mirror::Object>(field_offset);
if (value != nullptr) {
WalkFieldsInOrder(value);
}
+ field_offset = MemberOffset(field_offset.Uint32Value() +
+ sizeof(mirror::HeapReference<mirror::Object>));
}
}
// For an unvisited object, visit it then all its children found via fields.
void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) {
- if (!IsImageOffsetAssigned(obj)) {
+ // Use our own visitor routine (instead of GC visitor) to get better locality between
+ // an object and its fields
+ if (!IsImageBinSlotAssigned(obj)) {
// Walk instance fields of all objects
StackHandleScope<2> hs(Thread::Current());
Handle<mirror::Object> h_obj(hs.NewHandle(obj));
Handle<mirror::Class> klass(hs.NewHandle(obj->GetClass()));
// visit the object itself.
- CalculateObjectOffsets(h_obj.Get());
+ CalculateObjectBinSlots(h_obj.Get());
WalkInstanceFields(h_obj.Get(), klass.Get());
// Walk static fields of a Class.
if (h_obj->IsClass()) {
size_t num_static_fields = klass->NumReferenceStaticFields();
+ MemberOffset field_offset = klass->GetFirstReferenceStaticFieldOffset();
for (size_t i = 0; i < num_static_fields; ++i) {
- mirror::ArtField* field = klass->GetStaticField(i);
- MemberOffset field_offset = field->GetOffset();
mirror::Object* value = h_obj->GetFieldObject<mirror::Object>(field_offset);
if (value != nullptr) {
WalkFieldsInOrder(value);
}
+ field_offset = MemberOffset(field_offset.Uint32Value() +
+ sizeof(mirror::HeapReference<mirror::Object>));
}
} else if (h_obj->IsObjectArray()) {
// Walk elements of an object array.
@@ -532,6 +865,24 @@
writer->WalkFieldsInOrder(obj);
}
+void ImageWriter::UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg) {
+ ImageWriter* writer = reinterpret_cast<ImageWriter*>(arg);
+ DCHECK(writer != nullptr);
+ writer->UnbinObjectsIntoOffset(obj);
+}
+
+void ImageWriter::UnbinObjectsIntoOffset(mirror::Object* obj) {
+ CHECK(obj != nullptr);
+
+ // We know the bin slot, and the total bin sizes for all objects by now,
+ // so calculate the object's final image offset.
+
+ DCHECK(IsImageBinSlotAssigned(obj));
+ BinSlot bin_slot = GetImageBinSlot(obj);
+ // Change the lockword from a bin slot into an offset
+ AssignImageOffset(obj, bin_slot);
+}
+
void ImageWriter::CalculateNewObjectOffsets() {
Thread* self = Thread::Current();
StackHandleScope<1> hs(self);
@@ -542,16 +893,22 @@
// Leave space for the header, but do not write it yet, we need to
// know where image_roots is going to end up
- image_end_ += RoundUp(sizeof(ImageHeader), 8); // 64-bit-alignment
+ image_end_ += RoundUp(sizeof(ImageHeader), kObjectAlignment); // 64-bit-alignment
{
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
// TODO: Image spaces only?
DCHECK_LT(image_end_, image_->Size());
- // Clear any pre-existing monitors which may have been in the monitor words.
+ image_objects_offset_begin_ = image_end_;
+ // Clear any pre-existing monitors which may have been in the monitor words, assign bin slots.
heap->VisitObjects(WalkFieldsCallback, this);
+ // Transform each object's bin slot into an offset which will be used to do the final copy.
+ heap->VisitObjects(UnbinObjectsIntoOffsetCallback, this);
+ DCHECK(saved_hashes_map_.empty()); // All binslot hashes should've been put into vector by now.
}
+ DCHECK_GT(image_end_, GetBinSizeSum());
+
image_roots_address_ = PointerToLowMemUInt32(GetImageAddress(image_roots.Get()));
// Note that image_end_ is left at end of used space
@@ -561,6 +918,7 @@
CHECK_NE(0U, oat_loaded_size);
const uint8_t* oat_file_begin = GetOatFileBegin();
const uint8_t* oat_file_end = oat_file_begin + oat_loaded_size;
+
oat_data_begin_ = oat_file_begin + oat_data_offset;
const uint8_t* oat_data_end = oat_data_begin_ + oat_file_->Size();
@@ -578,12 +936,11 @@
PointerToLowMemUInt32(oat_file_begin),
PointerToLowMemUInt32(oat_data_begin_),
PointerToLowMemUInt32(oat_data_end),
- PointerToLowMemUInt32(oat_file_end));
+ PointerToLowMemUInt32(oat_file_end),
+ compile_pic_);
}
-
-void ImageWriter::CopyAndFixupObjects()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void ImageWriter::CopyAndFixupObjects() {
ScopedAssertNoThreadSuspension ants(Thread::Current(), "ImageWriter");
gc::Heap* heap = Runtime::Current()->GetHeap();
// TODO: heap validation can't handle this fix up pass
@@ -606,7 +963,14 @@
size_t offset = image_writer->GetImageOffset(obj);
uint8_t* dst = image_writer->image_->Begin() + offset;
const uint8_t* src = reinterpret_cast<const uint8_t*>(obj);
- size_t n = obj->SizeOf();
+ size_t n;
+ if (obj->IsArtMethod()) {
+ // Size without pointer fields since we don't want to overrun the buffer if target art method
+ // is 32 bits but source is 64 bits.
+ n = mirror::ArtMethod::SizeWithoutPointerFields();
+ } else {
+ n = obj->SizeOf();
+ }
DCHECK_LT(offset + n, image_writer->image_->Size());
memcpy(dst, src, n);
Object* copy = reinterpret_cast<Object*>(dst);
@@ -616,6 +980,7 @@
image_writer->FixupObject(obj, copy);
}
+// Rewrite all the references in the copied object to point to their image address equivalent
class FixupVisitor {
public:
FixupVisitor(ImageWriter* image_writer, Object* copy) : image_writer_(image_writer), copy_(copy) {
@@ -651,14 +1016,16 @@
void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
DCHECK(obj->IsClass());
- FixupVisitor::operator()(obj, offset, false);
+ FixupVisitor::operator()(obj, offset, /*is_static*/false);
+ // TODO: Remove dead code
if (offset.Uint32Value() < mirror::Class::EmbeddedVTableOffset().Uint32Value()) {
return;
}
}
- void operator()(mirror::Class* /*klass*/, mirror::Reference* ref) const
+ void operator()(mirror::Class* klass ATTRIBUTE_UNUSED,
+ mirror::Reference* ref ATTRIBUTE_UNUSED) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
LOG(FATAL) << "Reference not expected here.";
@@ -685,12 +1052,16 @@
}
if (orig->IsArtMethod<kVerifyNone>()) {
FixupMethod(orig->AsArtMethod<kVerifyNone>(), down_cast<ArtMethod*>(copy));
+ } else if (orig->IsClass() && orig->AsClass()->IsArtMethodClass()) {
+ // Set the right size for the target.
+ size_t size = mirror::ArtMethod::InstanceSize(target_ptr_size_);
+ down_cast<mirror::Class*>(copy)->SetObjectSizeWithoutChecks(size);
}
}
const uint8_t* ImageWriter::GetQuickCode(mirror::ArtMethod* method, bool* quick_is_interpreted) {
DCHECK(!method->IsResolutionMethod() && !method->IsImtConflictMethod() &&
- !method->IsAbstract()) << PrettyMethod(method);
+ !method->IsImtUnimplementedMethod() && !method->IsAbstract()) << PrettyMethod(method);
// Use original code if it exists. Otherwise, set the code pointer to the resolution
// trampoline.
@@ -721,9 +1092,11 @@
const uint8_t* ImageWriter::GetQuickEntryPoint(mirror::ArtMethod* method) {
// Calculate the quick entry point following the same logic as FixupMethod() below.
// The resolution method has a special trampoline to call.
- if (UNLIKELY(method == Runtime::Current()->GetResolutionMethod())) {
+ Runtime* runtime = Runtime::Current();
+ if (UNLIKELY(method == runtime->GetResolutionMethod())) {
return GetOatAddress(quick_resolution_trampoline_offset_);
- } else if (UNLIKELY(method == Runtime::Current()->GetImtConflictMethod())) {
+ } else if (UNLIKELY(method == runtime->GetImtConflictMethod() ||
+ method == runtime->GetImtUnimplementedMethod())) {
return GetOatAddress(quick_imt_conflict_trampoline_offset_);
} else {
// We assume all methods have code. If they don't currently then we set them to the use the
@@ -741,27 +1114,48 @@
void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
// OatWriter replaces the code_ with an offset value. Here we re-adjust to a pointer relative to
// oat_begin_
+ // For 64 bit targets we need to repack the current runtime pointer sized fields to the right
+ // locations.
+ // Copy all of the fields from the runtime methods to the target methods first since we did a
+ // bytewise copy earlier.
+ copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
+ orig->GetEntryPointFromPortableCompiledCode(), target_ptr_size_);
+ copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>(orig->GetEntryPointFromInterpreter(),
+ target_ptr_size_);
+ copy->SetEntryPointFromJniPtrSize<kVerifyNone>(orig->GetEntryPointFromJni(), target_ptr_size_);
+ copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
+ orig->GetEntryPointFromQuickCompiledCode(), target_ptr_size_);
+ copy->SetNativeGcMapPtrSize<kVerifyNone>(orig->GetNativeGcMap(), target_ptr_size_);
// The resolution method has a special trampoline to call.
- if (UNLIKELY(orig == Runtime::Current()->GetResolutionMethod())) {
- copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_resolution_trampoline_offset_));
- copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_resolution_trampoline_offset_));
- } else if (UNLIKELY(orig == Runtime::Current()->GetImtConflictMethod())) {
- copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_imt_conflict_trampoline_offset_));
- copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_imt_conflict_trampoline_offset_));
+ Runtime* runtime = Runtime::Current();
+ if (UNLIKELY(orig == runtime->GetResolutionMethod())) {
+ copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
+ GetOatAddress(portable_resolution_trampoline_offset_), target_ptr_size_);
+ copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
+ GetOatAddress(quick_resolution_trampoline_offset_), target_ptr_size_);
+ } else if (UNLIKELY(orig == runtime->GetImtConflictMethod() ||
+ orig == runtime->GetImtUnimplementedMethod())) {
+ copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
+ GetOatAddress(portable_imt_conflict_trampoline_offset_), target_ptr_size_);
+ copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
+ GetOatAddress(quick_imt_conflict_trampoline_offset_), target_ptr_size_);
} else {
// We assume all methods have code. If they don't currently then we set them to the use the
// resolution trampoline. Abstract methods never have code and so we need to make sure their
// use results in an AbstractMethodError. We use the interpreter to achieve this.
if (UNLIKELY(orig->IsAbstract())) {
- copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_to_interpreter_bridge_offset_));
- copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_to_interpreter_bridge_offset_));
- copy->SetEntryPointFromInterpreter<kVerifyNone>(reinterpret_cast<EntryPointFromInterpreter*>
- (const_cast<uint8_t*>(GetOatAddress(interpreter_to_interpreter_bridge_offset_))));
+ copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
+ GetOatAddress(portable_to_interpreter_bridge_offset_), target_ptr_size_);
+ copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
+ GetOatAddress(quick_to_interpreter_bridge_offset_), target_ptr_size_);
+ copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>(
+ reinterpret_cast<EntryPointFromInterpreter*>(const_cast<uint8_t*>(
+ GetOatAddress(interpreter_to_interpreter_bridge_offset_))), target_ptr_size_);
} else {
bool quick_is_interpreted;
const uint8_t* quick_code = GetQuickCode(orig, &quick_is_interpreted);
- copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(quick_code);
+ copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(quick_code, target_ptr_size_);
// Portable entrypoint:
const uint8_t* portable_code = GetOatAddress(orig->GetPortableOatCodeOffset());
@@ -784,18 +1178,19 @@
// initialization.
portable_code = GetOatAddress(portable_resolution_trampoline_offset_);
}
- copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(portable_code);
-
+ copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
+ portable_code, target_ptr_size_);
// JNI entrypoint:
if (orig->IsNative()) {
// The native method's pointer is set to a stub to lookup via dlsym.
// Note this is not the code_ pointer, that is handled above.
- copy->SetNativeMethod<kVerifyNone>(GetOatAddress(jni_dlsym_lookup_offset_));
+ copy->SetEntryPointFromJniPtrSize<kVerifyNone>(GetOatAddress(jni_dlsym_lookup_offset_),
+ target_ptr_size_);
} else {
// Normal (non-abstract non-native) methods have various tables to relocate.
uint32_t native_gc_map_offset = orig->GetOatNativeGcMapOffset();
const uint8_t* native_gc_map = GetOatAddress(native_gc_map_offset);
- copy->SetNativeGcMap<kVerifyNone>(reinterpret_cast<const uint8_t*>(native_gc_map));
+ copy->SetNativeGcMapPtrSize<kVerifyNone>(native_gc_map, target_ptr_size_);
}
// Interpreter entrypoint:
@@ -803,9 +1198,11 @@
uint32_t interpreter_code = (quick_is_interpreted && portable_is_interpreted)
? interpreter_to_interpreter_bridge_offset_
: interpreter_to_compiled_code_bridge_offset_;
- copy->SetEntryPointFromInterpreter<kVerifyNone>(
+ EntryPointFromInterpreter* interpreter_entrypoint =
reinterpret_cast<EntryPointFromInterpreter*>(
- const_cast<uint8_t*>(GetOatAddress(interpreter_code))));
+ const_cast<uint8_t*>(GetOatAddress(interpreter_code)));
+ copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>(
+ interpreter_entrypoint, target_ptr_size_);
}
}
}
@@ -835,4 +1232,32 @@
image_header->SetOatChecksum(oat_header->GetChecksum());
}
+size_t ImageWriter::GetBinSizeSum(ImageWriter::Bin up_to) const {
+ DCHECK_LE(up_to, kBinSize);
+ return std::accumulate(&bin_slot_sizes_[0], &bin_slot_sizes_[up_to], /*init*/0);
+}
+
+ImageWriter::BinSlot::BinSlot(uint32_t lockword) : lockword_(lockword) {
+ // These values may need to get updated if more bins are added to the enum Bin
+ static_assert(kBinBits == 3, "wrong number of bin bits");
+ static_assert(kBinShift == 29, "wrong number of shift");
+ static_assert(sizeof(BinSlot) == sizeof(LockWord), "BinSlot/LockWord must have equal sizes");
+
+ DCHECK_LT(GetBin(), kBinSize);
+ DCHECK_ALIGNED(GetIndex(), kObjectAlignment);
+}
+
+ImageWriter::BinSlot::BinSlot(Bin bin, uint32_t index)
+ : BinSlot(index | (static_cast<uint32_t>(bin) << kBinShift)) {
+ DCHECK_EQ(index, GetIndex());
+}
+
+ImageWriter::Bin ImageWriter::BinSlot::GetBin() const {
+ return static_cast<Bin>((lockword_ & kBinMask) >> kBinShift);
+}
+
+uint32_t ImageWriter::BinSlot::GetIndex() const {
+ return lockword_ & ~kBinMask;
+}
+
} // namespace art
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index e6a98d1..8c84b68 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -23,29 +23,36 @@
#include <memory>
#include <set>
#include <string>
+#include <ostream>
+#include "base/macros.h"
#include "driver/compiler_driver.h"
+#include "gc/space/space.h"
#include "mem_map.h"
#include "oat_file.h"
#include "mirror/dex_cache.h"
#include "os.h"
#include "safe_map.h"
#include "gc/space/space.h"
+#include "utils.h"
namespace art {
// Write a Space built during compilation for use during execution.
-class ImageWriter {
+class ImageWriter FINAL {
public:
- ImageWriter(const CompilerDriver& compiler_driver, uintptr_t image_begin)
+ ImageWriter(const CompilerDriver& compiler_driver, uintptr_t image_begin,
+ bool compile_pic)
: compiler_driver_(compiler_driver), image_begin_(reinterpret_cast<uint8_t*>(image_begin)),
- image_end_(0), image_roots_address_(0), oat_file_(NULL),
- oat_data_begin_(NULL), interpreter_to_interpreter_bridge_offset_(0),
+ image_end_(0), image_objects_offset_begin_(0), image_roots_address_(0), oat_file_(nullptr),
+ oat_data_begin_(nullptr), interpreter_to_interpreter_bridge_offset_(0),
interpreter_to_compiled_code_bridge_offset_(0), jni_dlsym_lookup_offset_(0),
portable_imt_conflict_trampoline_offset_(0), portable_resolution_trampoline_offset_(0),
portable_to_interpreter_bridge_offset_(0), quick_generic_jni_trampoline_offset_(0),
quick_imt_conflict_trampoline_offset_(0), quick_resolution_trampoline_offset_(0),
- quick_to_interpreter_bridge_offset_(0) {
+ quick_to_interpreter_bridge_offset_(0), compile_pic_(compile_pic),
+ target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())),
+ bin_slot_sizes_(), bin_slot_count_() {
CHECK_NE(image_begin, 0U);
}
@@ -59,8 +66,8 @@
mirror::Object* GetImageAddress(mirror::Object* object) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (object == NULL) {
- return NULL;
+ if (object == nullptr) {
+ return nullptr;
}
return reinterpret_cast<mirror::Object*>(image_begin_ + GetImageOffset(object));
}
@@ -84,14 +91,71 @@
// Mark the objects defined in this space in the given live bitmap.
void RecordImageAllocations() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Classify different kinds of bins that objects end up getting packed into during image writing.
+ enum Bin {
+ // Likely-clean:
+ kBinString, // [String] Almost always immutable (except for obj header).
+ kBinArtMethodsManagedInitialized, // [ArtMethod] Not-native, and initialized. Unlikely to dirty
+ // Unknown mix of clean/dirty:
+ kBinRegular,
+ // Likely-dirty:
+ // All classes get their own bins since their fields often dirty
+ kBinClassInitializedFinalStatics, // Class initializers have been run, no non-final statics
+ kBinClassInitialized, // Class initializers have been run
+ kBinClassVerified, // Class verified, but initializers haven't been run
+ kBinArtMethodNative, // Art method that is actually native
+ kBinArtMethodNotInitialized, // Art method with a declaring class that wasn't initialized
+ // Don't care about other art methods since they don't dirty
+ // Add more bins here if we add more segregation code.
+ kBinSize,
+ };
+
+ friend std::ostream& operator<<(std::ostream& stream, const Bin& bin);
+
+ static constexpr size_t kBinBits = MinimumBitsToStore(kBinSize - 1);
+ // uint32 = typeof(lockword_)
+ static constexpr size_t kBinShift = BitSizeOf<uint32_t>() - kBinBits;
+ // 111000.....0
+ static constexpr size_t kBinMask = ((static_cast<size_t>(1) << kBinBits) - 1) << kBinShift;
+
+ // We use the lock word to store the bin # and bin index of the object in the image.
+ //
+ // The struct size must be exactly sizeof(LockWord), currently 32-bits, since this will end up
+ // stored in the lock word bit-for-bit when object forwarding addresses are being calculated.
+ struct BinSlot {
+ explicit BinSlot(uint32_t lockword);
+ BinSlot(Bin bin, uint32_t index);
+
+ // The bin an object belongs to, i.e. regular, class/verified, class/initialized, etc.
+ Bin GetBin() const;
+ // The offset in bytes from the beginning of the bin. Aligned to object size.
+ uint32_t GetIndex() const;
+ // Pack into a single uint32_t, for storing into a lock word.
+ explicit operator uint32_t() const { return lockword_; }
+ // Comparison operator for map support
+ bool operator<(const BinSlot& other) const { return lockword_ < other.lockword_; }
+
+ private:
+ // Must be the same size as LockWord, any larger and we would truncate the data.
+ const uint32_t lockword_;
+ };
+
// We use the lock word to store the offset of the object in the image.
- void AssignImageOffset(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetImageOffset(mirror::Object* object, size_t offset)
+ void AssignImageOffset(mirror::Object* object, BinSlot bin_slot)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetImageOffset(mirror::Object* object, BinSlot bin_slot, size_t offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsImageOffsetAssigned(mirror::Object* object) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
size_t GetImageOffset(mirror::Object* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void AssignImageBinSlot(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetImageBinSlot(mirror::Object* object, BinSlot bin_slot)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsImageBinSlotAssigned(mirror::Object* object) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ BinSlot GetImageBinSlot(mirror::Object* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
static void* GetImageAddressCallback(void* writer, mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return reinterpret_cast<ImageWriter*>(writer)->GetImageAddress(obj);
@@ -111,8 +175,8 @@
// different .o ELF objects.
DCHECK_LT(offset, oat_file_->Size());
#endif
- if (offset == 0) {
- return NULL;
+ if (offset == 0u) {
+ return nullptr;
}
return oat_data_begin_ + offset;
}
@@ -134,13 +198,16 @@
static void ComputeEagerResolvedStringsCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Combine string char arrays.
+ void ProcessStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Remove unwanted classes from various roots.
void PruneNonImageClasses() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static bool NonImageClassesVisitor(mirror::Class* c, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Verify unwanted classes removed.
- void CheckNonImageClassesRemoved();
+ void CheckNonImageClassesRemoved() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void CheckNonImageClassesRemovedCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -151,7 +218,9 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Object>* CreateImageRoots() const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CalculateObjectOffsets(mirror::Object* obj)
+ void CalculateObjectBinSlots(mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void UnbinObjectsIntoOffset(mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void WalkInstanceFields(mirror::Object* obj, mirror::Class* klass)
@@ -160,9 +229,11 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void WalkFieldsCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Creates the contiguous image in memory and adjusts pointers.
- void CopyAndFixupObjects();
+ void CopyAndFixupObjects() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FixupMethod(mirror::ArtMethod* orig, mirror::ArtMethod* copy)
@@ -180,6 +251,9 @@
// Patches references in OatFile to expect runtime addresses.
void SetOatChecksumFromElfFile(File* elf_file);
+ // Calculate the sum total of the bin slot sizes in [0, up_to). Defaults to all bins.
+ size_t GetBinSizeSum(Bin up_to = kBinSize) const;
+
const CompilerDriver& compiler_driver_;
// Beginning target image address for the output image.
@@ -188,6 +262,9 @@
// Offset to the free space in image_.
size_t image_end_;
+ // Offset from image_begin_ to where the first object is in image_.
+ size_t image_objects_offset_begin_;
+
// The image roots address in the image.
uint32_t image_roots_address_;
@@ -200,6 +277,9 @@
// Saved hashes (objects are inside of the image so that they don't move).
std::vector<std::pair<mirror::Object*, uint32_t>> saved_hashes_;
+ // Saved hashes (objects are bin slots to inside of the image, not yet allocated an address).
+ std::map<BinSlot, uint32_t> saved_hashes_map_;
+
// Beginning target oat address for the pointers from the output image to its oat file.
const uint8_t* oat_data_begin_;
@@ -217,6 +297,14 @@
uint32_t quick_imt_conflict_trampoline_offset_;
uint32_t quick_resolution_trampoline_offset_;
uint32_t quick_to_interpreter_bridge_offset_;
+ const bool compile_pic_;
+
+ // Size of pointers on the target architecture.
+ size_t target_ptr_size_;
+
+ // Bin slot tracking for dirty object packing
+ size_t bin_slot_sizes_[kBinSize]; // Number of bytes in a bin
+ size_t bin_slot_count_[kBinSize]; // Number of objects in a bin
friend class FixupVisitor;
friend class FixupClassVisitor;
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 0fea2a7..2755442 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -530,7 +530,7 @@
// point return value would be in xmm0. We use log, to somehow ensure
// the compiler will use the floating point stack.
-jdouble Java_MyClassNatives_logD(JNIEnv* env, jclass klass, jdouble x) {
+jdouble Java_MyClassNatives_logD(JNIEnv*, jclass, jdouble x) {
return log(x);
}
@@ -544,7 +544,7 @@
JNI_TEST(RunStaticLogDoubleMethod)
-jfloat Java_MyClassNatives_logF(JNIEnv* env, jclass klass, jfloat x) {
+jfloat Java_MyClassNatives_logF(JNIEnv*, jclass, jfloat x) {
return logf(x);
}
@@ -558,15 +558,15 @@
JNI_TEST(RunStaticLogFloatMethod)
-jboolean Java_MyClassNatives_returnTrue(JNIEnv* env, jclass klass) {
+jboolean Java_MyClassNatives_returnTrue(JNIEnv*, jclass) {
return JNI_TRUE;
}
-jboolean Java_MyClassNatives_returnFalse(JNIEnv* env, jclass klass) {
+jboolean Java_MyClassNatives_returnFalse(JNIEnv*, jclass) {
return JNI_FALSE;
}
-jint Java_MyClassNatives_returnInt(JNIEnv* env, jclass klass) {
+jint Java_MyClassNatives_returnInt(JNIEnv*, jclass) {
return 42;
}
@@ -785,9 +785,9 @@
EXPECT_EQ(11, trace_array->GetLength());
// Check stack trace entries have expected values
- for (int32_t i = 0; i < trace_array->GetLength(); ++i) {
- EXPECT_EQ(-2, trace_array->Get(i)->GetLineNumber());
- mirror::StackTraceElement* ste = trace_array->Get(i);
+ for (int32_t j = 0; j < trace_array->GetLength(); ++j) {
+ EXPECT_EQ(-2, trace_array->Get(j)->GetLineNumber());
+ mirror::StackTraceElement* ste = trace_array->Get(j);
EXPECT_STREQ("MyClassNatives.java", ste->GetFileName()->ToModifiedUtf8().c_str());
EXPECT_STREQ("MyClassNatives", ste->GetDeclaringClass()->ToModifiedUtf8().c_str());
EXPECT_STREQ("fooI", ste->GetMethodName()->ToModifiedUtf8().c_str());
@@ -1056,7 +1056,10 @@
JNI_TEST(CompileAndRunFloatFloatMethod)
-void Java_MyClassNatives_checkParameterAlign(JNIEnv* env, jobject thisObj, jint i1, jlong l1) {
+void Java_MyClassNatives_checkParameterAlign(JNIEnv* env ATTRIBUTE_UNUSED,
+ jobject thisObj ATTRIBUTE_UNUSED,
+ jint i1 ATTRIBUTE_UNUSED,
+ jlong l1 ATTRIBUTE_UNUSED) {
// EXPECT_EQ(kNative, Thread::Current()->GetState());
// EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
// EXPECT_TRUE(thisObj != nullptr);
@@ -1520,7 +1523,7 @@
JNI_TEST(WithoutImplementation)
-void Java_MyClassNatives_stackArgsIntsFirst(JNIEnv* env, jclass klass, jint i1, jint i2, jint i3,
+void Java_MyClassNatives_stackArgsIntsFirst(JNIEnv*, jclass, jint i1, jint i2, jint i3,
jint i4, jint i5, jint i6, jint i7, jint i8, jint i9,
jint i10, jfloat f1, jfloat f2, jfloat f3, jfloat f4,
jfloat f5, jfloat f6, jfloat f7, jfloat f8, jfloat f9,
@@ -1591,7 +1594,7 @@
JNI_TEST(StackArgsIntsFirst)
-void Java_MyClassNatives_stackArgsFloatsFirst(JNIEnv* env, jclass klass, jfloat f1, jfloat f2,
+void Java_MyClassNatives_stackArgsFloatsFirst(JNIEnv*, jclass, jfloat f1, jfloat f2,
jfloat f3, jfloat f4, jfloat f5, jfloat f6, jfloat f7,
jfloat f8, jfloat f9, jfloat f10, jint i1, jint i2,
jint i3, jint i4, jint i5, jint i6, jint i7, jint i8,
@@ -1662,7 +1665,7 @@
JNI_TEST(StackArgsFloatsFirst)
-void Java_MyClassNatives_stackArgsMixed(JNIEnv* env, jclass klass, jint i1, jfloat f1, jint i2,
+void Java_MyClassNatives_stackArgsMixed(JNIEnv*, jclass, jint i1, jfloat f1, jint i2,
jfloat f2, jint i3, jfloat f3, jint i4, jfloat f4, jint i5,
jfloat f5, jint i6, jfloat f6, jint i7, jfloat f7, jint i8,
jfloat f8, jint i9, jfloat f9, jint i10, jfloat f10) {
diff --git a/compiler/jni/portable/jni_compiler.cc b/compiler/jni/portable/jni_compiler.cc
index d2f54f8..ff37d85 100644
--- a/compiler/jni/portable/jni_compiler.cc
+++ b/compiler/jni/portable/jni_compiler.cc
@@ -298,6 +298,7 @@
case 'D': ret_type = irb_.getJDoubleTy(); break;
case 'L': ret_type = irb_.getJObjectTy(); break;
default: LOG(FATAL) << "Unreachable: unexpected return type in shorty " << shorty;
+ UNREACHABLE();
}
// Get argument type
std::vector< ::llvm::Type*> args_type;
diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc
index f0c0ed7..769cd4c 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.cc
+++ b/compiler/jni/quick/arm/calling_convention_arm.cc
@@ -21,6 +21,22 @@
namespace art {
namespace arm {
+// Used by hard float.
+static const Register kHFCoreArgumentRegisters[] = {
+ R0, R1, R2, R3
+};
+
+static const SRegister kHFSArgumentRegisters[] = {
+ S0, S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11, S12, S13, S14, S15
+};
+
+static const DRegister kHFDArgumentRegisters[] = {
+ D0, D1, D2, D3, D4, D5, D6, D7
+};
+
+static_assert(arraysize(kHFDArgumentRegisters) * 2 == arraysize(kHFSArgumentRegisters),
+ "ks d argument registers mismatch");
+
// Calling convention
ManagedRegister ArmManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
@@ -31,26 +47,43 @@
return ArmManagedRegister::FromCoreRegister(IP); // R12
}
-static ManagedRegister ReturnRegisterForShorty(const char* shorty) {
- if (shorty[0] == 'F') {
- return ArmManagedRegister::FromCoreRegister(R0);
- } else if (shorty[0] == 'D') {
- return ArmManagedRegister::FromRegisterPair(R0_R1);
- } else if (shorty[0] == 'J') {
- return ArmManagedRegister::FromRegisterPair(R0_R1);
- } else if (shorty[0] == 'V') {
- return ArmManagedRegister::NoRegister();
+ManagedRegister ArmManagedRuntimeCallingConvention::ReturnRegister() {
+ if (kArm32QuickCodeUseSoftFloat) {
+ switch (GetShorty()[0]) {
+ case 'V':
+ return ArmManagedRegister::NoRegister();
+ case 'D':
+ case 'J':
+ return ArmManagedRegister::FromRegisterPair(R0_R1);
+ default:
+ return ArmManagedRegister::FromCoreRegister(R0);
+ }
} else {
- return ArmManagedRegister::FromCoreRegister(R0);
+ switch (GetShorty()[0]) {
+ case 'V':
+ return ArmManagedRegister::NoRegister();
+ case 'D':
+ return ArmManagedRegister::FromDRegister(D0);
+ case 'F':
+ return ArmManagedRegister::FromSRegister(S0);
+ case 'J':
+ return ArmManagedRegister::FromRegisterPair(R0_R1);
+ default:
+ return ArmManagedRegister::FromCoreRegister(R0);
+ }
}
}
-ManagedRegister ArmManagedRuntimeCallingConvention::ReturnRegister() {
- return ReturnRegisterForShorty(GetShorty());
-}
-
ManagedRegister ArmJniCallingConvention::ReturnRegister() {
- return ReturnRegisterForShorty(GetShorty());
+ switch (GetShorty()[0]) {
+ case 'V':
+ return ArmManagedRegister::NoRegister();
+ case 'D':
+ case 'J':
+ return ArmManagedRegister::FromRegisterPair(R0_R1);
+ default:
+ return ArmManagedRegister::FromCoreRegister(R0);
+ }
}
ManagedRegister ArmJniCallingConvention::IntReturnRegister() {
@@ -88,17 +121,70 @@
const ManagedRegisterEntrySpills& ArmManagedRuntimeCallingConvention::EntrySpills() {
// We spill the argument registers on ARM to free them up for scratch use, we then assume
// all arguments are on the stack.
- if (entry_spills_.size() == 0) {
- size_t num_spills = NumArgs() + NumLongOrDoubleArgs();
- if (num_spills > 0) {
- entry_spills_.push_back(ArmManagedRegister::FromCoreRegister(R1));
- if (num_spills > 1) {
- entry_spills_.push_back(ArmManagedRegister::FromCoreRegister(R2));
- if (num_spills > 2) {
- entry_spills_.push_back(ArmManagedRegister::FromCoreRegister(R3));
+ if (kArm32QuickCodeUseSoftFloat) {
+ if (entry_spills_.size() == 0) {
+ size_t num_spills = NumArgs() + NumLongOrDoubleArgs();
+ if (num_spills > 0) {
+ entry_spills_.push_back(ArmManagedRegister::FromCoreRegister(R1));
+ if (num_spills > 1) {
+ entry_spills_.push_back(ArmManagedRegister::FromCoreRegister(R2));
+ if (num_spills > 2) {
+ entry_spills_.push_back(ArmManagedRegister::FromCoreRegister(R3));
+ }
}
}
}
+ } else {
+ if ((entry_spills_.size() == 0) && (NumArgs() > 0)) {
+ uint32_t gpr_index = 1; // R0 ~ R3. Reserve r0 for ArtMethod*.
+ uint32_t fpr_index = 0; // S0 ~ S15.
+ uint32_t fpr_double_index = 0; // D0 ~ D7.
+
+ ResetIterator(FrameOffset(0));
+ while (HasNext()) {
+ if (IsCurrentParamAFloatOrDouble()) {
+ if (IsCurrentParamADouble()) { // Double.
+ // Double should not overlap with float.
+ fpr_double_index = (std::max(fpr_double_index * 2, RoundUp(fpr_index, 2))) / 2;
+ if (fpr_double_index < arraysize(kHFDArgumentRegisters)) {
+ entry_spills_.push_back(
+ ArmManagedRegister::FromDRegister(kHFDArgumentRegisters[fpr_double_index++]));
+ } else {
+ entry_spills_.push_back(ManagedRegister::NoRegister(), 8);
+ }
+ } else { // Float.
+ // Float should not overlap with double.
+ if (fpr_index % 2 == 0) {
+ fpr_index = std::max(fpr_double_index * 2, fpr_index);
+ }
+ if (fpr_index < arraysize(kHFSArgumentRegisters)) {
+ entry_spills_.push_back(
+ ArmManagedRegister::FromSRegister(kHFSArgumentRegisters[fpr_index++]));
+ } else {
+ entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
+ }
+ }
+ } else {
+ // FIXME: Pointer this returns as both reference and long.
+ if (IsCurrentParamALong() && !IsCurrentParamAReference()) { // Long.
+ if (gpr_index < arraysize(kHFCoreArgumentRegisters)) {
+ entry_spills_.push_back(
+ ArmManagedRegister::FromCoreRegister(kHFCoreArgumentRegisters[gpr_index++]));
+ } else {
+ entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
+ }
+ }
+ // High part of long or 32-bit argument.
+ if (gpr_index < arraysize(kHFCoreArgumentRegisters)) {
+ entry_spills_.push_back(
+ ArmManagedRegister::FromCoreRegister(kHFCoreArgumentRegisters[gpr_index++]));
+ } else {
+ entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
+ }
+ }
+ Next();
+ }
+ }
}
return entry_spills_;
}
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index f6795ea..c3fe75b 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -176,12 +176,8 @@
// 4. Write out the end of the quick frames.
if (is_64_bit_target) {
__ StoreStackPointerToThread64(Thread::TopOfManagedStackOffset<8>());
- __ StoreImmediateToThread64(Thread::TopOfManagedStackPcOffset<8>(), 0,
- mr_conv->InterproceduralScratchRegister());
} else {
__ StoreStackPointerToThread32(Thread::TopOfManagedStackOffset<4>());
- __ StoreImmediateToThread32(Thread::TopOfManagedStackPcOffset<4>(), 0,
- mr_conv->InterproceduralScratchRegister());
}
// 5. Move frame down to allow space for out going args.
@@ -311,7 +307,9 @@
}
// 9. Plant call to native code associated with method.
- __ Call(main_jni_conv->MethodStackOffset(), mirror::ArtMethod::NativeMethodOffset(),
+ MemberOffset jni_entrypoint_offset = mirror::ArtMethod::EntryPointFromJniOffset(
+ InstructionSetPointerSize(instruction_set));
+ __ Call(main_jni_conv->MethodStackOffset(), jni_entrypoint_offset,
mr_conv->InterproceduralScratchRegister());
// 10. Fix differences in result widths.
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
index 525f05c..a100552 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -38,6 +38,7 @@
}
static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni) {
+ UNUSED(jni);
if (shorty[0] == 'F' || shorty[0] == 'D') {
return X86_64ManagedRegister::FromXmmRegister(XMM0);
} else if (shorty[0] == 'J') {
diff --git a/compiler/llvm/ir_builder.h b/compiler/llvm/ir_builder.h
index 03498ef..990ba02 100644
--- a/compiler/llvm/ir_builder.h
+++ b/compiler/llvm/ir_builder.h
@@ -101,10 +101,8 @@
// Extend memory barrier
//--------------------------------------------------------------------------
void CreateMemoryBarrier(MemBarrierKind barrier_kind) {
-#if ANDROID_SMP
// TODO: select atomic ordering according to given barrier kind.
CreateFence(::llvm::SequentiallyConsistent);
-#endif
}
//--------------------------------------------------------------------------
diff --git a/compiler/llvm/llvm_compiler.cc b/compiler/llvm/llvm_compiler.cc
index 55af614..fa93e00 100644
--- a/compiler/llvm/llvm_compiler.cc
+++ b/compiler/llvm/llvm_compiler.cc
@@ -16,6 +16,7 @@
#include "llvm_compiler.h"
+#include "base/macros.h"
#ifdef ART_USE_PORTABLE_COMPILER
#include "compiler.h"
#include "compiler_llvm.h"
@@ -152,9 +153,10 @@
Compiler* CreateLLVMCompiler(CompilerDriver* driver) {
#ifdef ART_USE_PORTABLE_COMPILER
- return new llvm::LLVMCompiler(driver);
+ return new llvm::LLVMCompiler(driver);
#else
- return nullptr;
+ UNUSED(driver);
+ return nullptr;
#endif
}
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 847fa0d..ce4ed6d 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "arch/instruction_set_features.h"
#include "class_linker.h"
#include "common_compiler_test.h"
#include "compiler.h"
@@ -97,7 +98,7 @@
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> insn_features(
- InstructionSetFeatures::FromFeatureString(insn_set, "default", &error_msg));
+ InstructionSetFeatures::FromVariant(insn_set, "default", &error_msg));
ASSERT_TRUE(insn_features.get() != nullptr) << error_msg;
compiler_options_.reset(new CompilerOptions);
verification_results_.reset(new VerificationResults(compiler_options_.get()));
@@ -109,12 +110,12 @@
verification_results_.get(),
method_inliner_map_.get(),
compiler_kind, insn_set,
- insn_features.get(), false, nullptr, 2, true, true,
- timer_.get(), ""));
+ insn_features.get(), false, nullptr, nullptr, 2, true,
+ true, timer_.get(), ""));
jobject class_loader = nullptr;
if (kCompile) {
- TimingLogger timings("OatTest::WriteRead", false, false);
- compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings);
+ TimingLogger timings2("OatTest::WriteRead", false, false);
+ compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings2);
}
ScratchFile tmp;
@@ -139,7 +140,7 @@
compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings);
}
std::unique_ptr<OatFile> oat_file(OatFile::Open(tmp.GetFilename(), tmp.GetFilename(), nullptr,
- false, &error_msg));
+ nullptr, false, &error_msg));
ASSERT_TRUE(oat_file.get() != nullptr) << error_msg;
const OatHeader& oat_header = oat_file->GetOatHeader();
ASSERT_TRUE(oat_header.IsValid());
@@ -174,12 +175,12 @@
oat_class.GetType()) << descriptor;
size_t method_index = 0;
- for (size_t i = 0; i < klass->NumDirectMethods(); i++, method_index++) {
- CheckMethod(klass->GetDirectMethod(i),
+ for (size_t j = 0; j < klass->NumDirectMethods(); j++, method_index++) {
+ CheckMethod(klass->GetDirectMethod(j),
oat_class.GetOatMethod(method_index), dex_file);
}
- for (size_t i = 0; i < num_virtual_methods; i++, method_index++) {
- CheckMethod(klass->GetVirtualMethod(i),
+ for (size_t j = 0; j < num_virtual_methods; j++, method_index++) {
+ CheckMethod(klass->GetVirtualMethod(j),
oat_class.GetOatMethod(method_index), dex_file);
}
}
@@ -198,7 +199,7 @@
InstructionSet insn_set = kX86;
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> insn_features(
- InstructionSetFeatures::FromFeatureString(insn_set, "default", &error_msg));
+ InstructionSetFeatures::FromVariant(insn_set, "default", &error_msg));
ASSERT_TRUE(insn_features.get() != nullptr) << error_msg;
std::vector<const DexFile*> dex_files;
uint32_t image_file_location_oat_checksum = 0;
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index be52f40..c6beb36 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -70,16 +70,18 @@
public:
NoRelativeCallPatcher() { }
- uint32_t ReserveSpace(uint32_t offset, const CompiledMethod* compiled_method) OVERRIDE {
+ uint32_t ReserveSpace(uint32_t offset,
+ const CompiledMethod* compiled_method ATTRIBUTE_UNUSED) OVERRIDE {
return offset; // No space reserved; no patches expected.
}
- uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE {
+ uint32_t WriteThunks(OutputStream* out ATTRIBUTE_UNUSED, uint32_t offset) OVERRIDE {
return offset; // No thunks added; no patches expected.
}
- void Patch(std::vector<uint8_t>* code, uint32_t literal_offset, uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE {
+ void Patch(std::vector<uint8_t>* code ATTRIBUTE_UNUSED, uint32_t literal_offset ATTRIBUTE_UNUSED,
+ uint32_t patch_offset ATTRIBUTE_UNUSED,
+ uint32_t target_offset ATTRIBUTE_UNUSED) OVERRIDE {
LOG(FATAL) << "Unexpected relative patch.";
}
@@ -91,11 +93,12 @@
public:
X86RelativeCallPatcher() { }
- uint32_t ReserveSpace(uint32_t offset, const CompiledMethod* compiled_method) OVERRIDE {
+ uint32_t ReserveSpace(uint32_t offset,
+ const CompiledMethod* compiled_method ATTRIBUTE_UNUSED) OVERRIDE {
return offset; // No space reserved; no limit on relative call distance.
}
- uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE {
+ uint32_t WriteThunks(OutputStream* out ATTRIBUTE_UNUSED, uint32_t offset) OVERRIDE {
return offset; // No thunks added; no limit on relative call distance.
}
@@ -306,7 +309,7 @@
arm::Thumb2Assembler assembler;
assembler.LoadFromOffset(
arm::kLoadWord, arm::PC, arm::R0,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value());
assembler.bkpt(0);
std::vector<uint8_t> thunk_code(assembler.CodeSize());
MemoryRegion code(thunk_code.data(), thunk_code.size());
@@ -360,7 +363,8 @@
// The thunk just uses the entry point in the ArtMethod. This works even for calls
// to the generic JNI and interpreter trampolines.
arm64::Arm64Assembler assembler;
- Offset offset(mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
+ Offset offset(mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArm64PointerSize).Int32Value());
assembler.JumpTo(ManagedRegister(arm64::X0), offset, ManagedRegister(arm64::IP0));
std::vector<uint8_t> thunk_code(assembler.CodeSize());
MemoryRegion code(thunk_code.data(), thunk_code.size());
@@ -648,7 +652,7 @@
return true;
}
- bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) {
+ bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED, const ClassDataItemIterator& it) {
// Fill in the compiled_methods_ array for methods that have a
// CompiledMethod. We track the number of non-null entries in
// num_non_null_compiled_methods_ since we only want to allocate
@@ -860,7 +864,7 @@
: OatDexMethodVisitor(writer, offset) {
}
- bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
+ bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it ATTRIBUTE_UNUSED)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
@@ -1291,9 +1295,9 @@
// Update oat_dex_files_.
auto oat_class_it = oat_classes_.begin();
for (OatDexFile* oat_dex_file : oat_dex_files_) {
- for (uint32_t& offset : oat_dex_file->methods_offsets_) {
+ for (uint32_t& method_offset : oat_dex_file->methods_offsets_) {
DCHECK(oat_class_it != oat_classes_.end());
- offset = (*oat_class_it)->offset_;
+ method_offset = (*oat_class_it)->offset_;
++oat_class_it;
}
oat_dex_file->UpdateChecksum(oat_header_);
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index a1e61b9..5b61f21 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -231,10 +231,10 @@
// data to write
- COMPILE_ASSERT(mirror::Class::Status::kStatusMax < (2 ^ 16), class_status_wont_fit_in_16bits);
+ static_assert(mirror::Class::Status::kStatusMax < (2 ^ 16), "class status won't fit in 16bits");
int16_t status_;
- COMPILE_ASSERT(OatClassType::kOatClassMax < (2 ^ 16), oat_class_type_wont_fit_in_16bits);
+ static_assert(OatClassType::kOatClassMax < (2 ^ 16), "oat_class type won't fit in 16bits");
uint16_t type_;
uint32_t method_bitmap_size_;
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 2648d4d..be8631a 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -1,5 +1,4 @@
/*
- *
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -42,33 +41,33 @@
*/
class Temporaries : public ValueObject {
public:
- Temporaries(HGraph* graph, size_t count) : graph_(graph), count_(count), index_(0) {
- graph_->UpdateNumberOfTemporaries(count_);
- }
+ explicit Temporaries(HGraph* graph) : graph_(graph), index_(0) {}
void Add(HInstruction* instruction) {
- // We currently only support vreg size temps.
- DCHECK(instruction->GetType() != Primitive::kPrimLong
- && instruction->GetType() != Primitive::kPrimDouble);
- HInstruction* temp = new (graph_->GetArena()) HTemporary(index_++);
+ HInstruction* temp = new (graph_->GetArena()) HTemporary(index_);
instruction->GetBlock()->AddInstruction(temp);
+
DCHECK(temp->GetPrevious() == instruction);
+
+ size_t offset;
+ if (instruction->GetType() == Primitive::kPrimLong
+ || instruction->GetType() == Primitive::kPrimDouble) {
+ offset = 2;
+ } else {
+ offset = 1;
+ }
+ index_ += offset;
+
+ graph_->UpdateTemporariesVRegSlots(index_);
}
private:
HGraph* const graph_;
- // The total number of temporaries that will be used.
- const size_t count_;
-
// Current index in the temporary stack, updated by `Add`.
size_t index_;
};
-static bool IsTypeSupported(Primitive::Type type) {
- return type != Primitive::kPrimFloat && type != Primitive::kPrimDouble;
-}
-
void HGraphBuilder::InitializeLocals(uint16_t count) {
graph_->SetNumberOfVRegs(count);
locals_.SetSize(count);
@@ -79,10 +78,10 @@
}
}
-bool HGraphBuilder::InitializeParameters(uint16_t number_of_parameters) {
+void HGraphBuilder::InitializeParameters(uint16_t number_of_parameters) {
// dex_compilation_unit_ is null only when unit testing.
if (dex_compilation_unit_ == nullptr) {
- return true;
+ return;
}
graph_->SetNumberOfInVRegs(number_of_parameters);
@@ -117,60 +116,49 @@
parameter_index++;
}
}
- return true;
-}
-
-static bool CanHandleCodeItem(const DexFile::CodeItem& code_item) {
- if (code_item.tries_size_ > 0) {
- return false;
- }
- return true;
}
template<typename T>
-void HGraphBuilder::If_22t(const Instruction& instruction, uint32_t dex_offset) {
+void HGraphBuilder::If_22t(const Instruction& instruction, uint32_t dex_pc) {
int32_t target_offset = instruction.GetTargetOffset();
- PotentiallyAddSuspendCheck(target_offset, dex_offset);
+ PotentiallyAddSuspendCheck(target_offset, dex_pc);
HInstruction* first = LoadLocal(instruction.VRegA(), Primitive::kPrimInt);
HInstruction* second = LoadLocal(instruction.VRegB(), Primitive::kPrimInt);
T* comparison = new (arena_) T(first, second);
current_block_->AddInstruction(comparison);
HInstruction* ifinst = new (arena_) HIf(comparison);
current_block_->AddInstruction(ifinst);
- HBasicBlock* target = FindBlockStartingAt(dex_offset + target_offset);
+ HBasicBlock* target = FindBlockStartingAt(dex_pc + target_offset);
DCHECK(target != nullptr);
current_block_->AddSuccessor(target);
- target = FindBlockStartingAt(dex_offset + instruction.SizeInCodeUnits());
+ target = FindBlockStartingAt(dex_pc + instruction.SizeInCodeUnits());
DCHECK(target != nullptr);
current_block_->AddSuccessor(target);
current_block_ = nullptr;
}
template<typename T>
-void HGraphBuilder::If_21t(const Instruction& instruction, uint32_t dex_offset) {
+void HGraphBuilder::If_21t(const Instruction& instruction, uint32_t dex_pc) {
int32_t target_offset = instruction.GetTargetOffset();
- PotentiallyAddSuspendCheck(target_offset, dex_offset);
+ PotentiallyAddSuspendCheck(target_offset, dex_pc);
HInstruction* value = LoadLocal(instruction.VRegA(), Primitive::kPrimInt);
T* comparison = new (arena_) T(value, GetIntConstant(0));
current_block_->AddInstruction(comparison);
HInstruction* ifinst = new (arena_) HIf(comparison);
current_block_->AddInstruction(ifinst);
- HBasicBlock* target = FindBlockStartingAt(dex_offset + target_offset);
+ HBasicBlock* target = FindBlockStartingAt(dex_pc + target_offset);
DCHECK(target != nullptr);
current_block_->AddSuccessor(target);
- target = FindBlockStartingAt(dex_offset + instruction.SizeInCodeUnits());
+ target = FindBlockStartingAt(dex_pc + instruction.SizeInCodeUnits());
DCHECK(target != nullptr);
current_block_->AddSuccessor(target);
current_block_ = nullptr;
}
HGraph* HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item) {
- if (!CanHandleCodeItem(code_item)) {
- return nullptr;
- }
-
const uint16_t* code_ptr = code_item.insns_;
const uint16_t* code_end = code_item.insns_ + code_item.insns_size_in_code_units_;
+ code_start_ = code_ptr;
// Setup the graph with the entry block and exit block.
graph_ = new (arena_) HGraph(arena_);
@@ -187,17 +175,34 @@
// start a new block, and create these blocks.
ComputeBranchTargets(code_ptr, code_end);
- if (!InitializeParameters(code_item.ins_size_)) {
- return nullptr;
+ // Also create blocks for catch handlers.
+ if (code_item.tries_size_ != 0) {
+ const uint8_t* handlers_ptr = DexFile::GetCatchHandlerData(code_item, 0);
+ uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
+ for (uint32_t idx = 0; idx < handlers_size; ++idx) {
+ CatchHandlerIterator iterator(handlers_ptr);
+ for (; iterator.HasNext(); iterator.Next()) {
+ uint32_t address = iterator.GetHandlerAddress();
+ HBasicBlock* block = FindBlockStartingAt(address);
+ if (block == nullptr) {
+ block = new (arena_) HBasicBlock(graph_, address);
+ branch_targets_.Put(address, block);
+ }
+ block->SetIsCatchBlock();
+ }
+ handlers_ptr = iterator.EndDataPointer();
+ }
}
- size_t dex_offset = 0;
+ InitializeParameters(code_item.ins_size_);
+
+ size_t dex_pc = 0;
while (code_ptr < code_end) {
- // Update the current block if dex_offset starts a new block.
- MaybeUpdateCurrentBlock(dex_offset);
+ // Update the current block if dex_pc starts a new block.
+ MaybeUpdateCurrentBlock(dex_pc);
const Instruction& instruction = *Instruction::At(code_ptr);
- if (!AnalyzeDexInstruction(instruction, dex_offset)) return nullptr;
- dex_offset += instruction.SizeInCodeUnits();
+ if (!AnalyzeDexInstruction(instruction, dex_pc)) return nullptr;
+ dex_pc += instruction.SizeInCodeUnits();
code_ptr += instruction.SizeInCodeUnits();
}
@@ -238,25 +243,25 @@
// Iterate over all instructions and find branching instructions. Create blocks for
// the locations these instructions branch to.
- size_t dex_offset = 0;
+ size_t dex_pc = 0;
while (code_ptr < code_end) {
const Instruction& instruction = *Instruction::At(code_ptr);
if (instruction.IsBranch()) {
- int32_t target = instruction.GetTargetOffset() + dex_offset;
+ int32_t target = instruction.GetTargetOffset() + dex_pc;
// Create a block for the target instruction.
if (FindBlockStartingAt(target) == nullptr) {
block = new (arena_) HBasicBlock(graph_, target);
branch_targets_.Put(target, block);
}
- dex_offset += instruction.SizeInCodeUnits();
+ dex_pc += instruction.SizeInCodeUnits();
code_ptr += instruction.SizeInCodeUnits();
- if ((code_ptr < code_end) && (FindBlockStartingAt(dex_offset) == nullptr)) {
- block = new (arena_) HBasicBlock(graph_, dex_offset);
- branch_targets_.Put(dex_offset, block);
+ if ((code_ptr < code_end) && (FindBlockStartingAt(dex_pc) == nullptr)) {
+ block = new (arena_) HBasicBlock(graph_, dex_pc);
+ branch_targets_.Put(dex_pc, block);
}
} else {
code_ptr += instruction.SizeInCodeUnits();
- dex_offset += instruction.SizeInCodeUnits();
+ dex_pc += instruction.SizeInCodeUnits();
}
}
}
@@ -267,6 +272,21 @@
}
template<typename T>
+void HGraphBuilder::Unop_12x(const Instruction& instruction, Primitive::Type type) {
+ HInstruction* first = LoadLocal(instruction.VRegB(), type);
+ current_block_->AddInstruction(new (arena_) T(type, first));
+ UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+}
+
+void HGraphBuilder::Conversion_12x(const Instruction& instruction,
+ Primitive::Type input_type,
+ Primitive::Type result_type) {
+ HInstruction* first = LoadLocal(instruction.VRegB(), input_type);
+ current_block_->AddInstruction(new (arena_) HTypeConversion(result_type, first));
+ UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+}
+
+template<typename T>
void HGraphBuilder::Binop_23x(const Instruction& instruction, Primitive::Type type) {
HInstruction* first = LoadLocal(instruction.VRegB(), type);
HInstruction* second = LoadLocal(instruction.VRegC(), type);
@@ -275,6 +295,16 @@
}
template<typename T>
+void HGraphBuilder::Binop_23x(const Instruction& instruction,
+ Primitive::Type type,
+ uint32_t dex_pc) {
+ HInstruction* first = LoadLocal(instruction.VRegB(), type);
+ HInstruction* second = LoadLocal(instruction.VRegC(), type);
+ current_block_->AddInstruction(new (arena_) T(type, first, second, dex_pc));
+ UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+}
+
+template<typename T>
void HGraphBuilder::Binop_12x(const Instruction& instruction, Primitive::Type type) {
HInstruction* first = LoadLocal(instruction.VRegA(), type);
HInstruction* second = LoadLocal(instruction.VRegB(), type);
@@ -283,6 +313,16 @@
}
template<typename T>
+void HGraphBuilder::Binop_12x(const Instruction& instruction,
+ Primitive::Type type,
+ uint32_t dex_pc) {
+ HInstruction* first = LoadLocal(instruction.VRegA(), type);
+ HInstruction* second = LoadLocal(instruction.VRegB(), type);
+ current_block_->AddInstruction(new (arena_) T(type, first, second, dex_pc));
+ UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+}
+
+template<typename T>
void HGraphBuilder::Binop_22s(const Instruction& instruction, bool reverse) {
HInstruction* first = LoadLocal(instruction.VRegB(), Primitive::kPrimInt);
HInstruction* second = GetIntConstant(instruction.VRegC_22s());
@@ -316,7 +356,7 @@
}
bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
- uint32_t dex_offset,
+ uint32_t dex_pc,
uint32_t method_idx,
uint32_t number_of_vreg_arguments,
bool is_range,
@@ -358,31 +398,44 @@
const size_t number_of_arguments = strlen(descriptor) - (is_instance_call ? 0 : 1);
HInvoke* invoke = nullptr;
- if (invoke_type == kVirtual) {
+ if (invoke_type == kVirtual || invoke_type == kInterface || invoke_type == kSuper) {
MethodReference target_method(dex_file_, method_idx);
uintptr_t direct_code;
uintptr_t direct_method;
- int vtable_index;
- // TODO: Add devirtualization support.
- compiler_driver_->ComputeInvokeInfo(dex_compilation_unit_, dex_offset, true, true,
- &invoke_type, &target_method, &vtable_index,
+ int table_index;
+ InvokeType optimized_invoke_type = invoke_type;
+ compiler_driver_->ComputeInvokeInfo(dex_compilation_unit_, dex_pc, true, true,
+ &optimized_invoke_type, &target_method, &table_index,
&direct_code, &direct_method);
- if (vtable_index == -1) {
+ if (table_index == -1) {
return false;
}
- invoke = new (arena_) HInvokeVirtual(
- arena_, number_of_arguments, return_type, dex_offset, vtable_index);
+
+ if (optimized_invoke_type == kVirtual) {
+ invoke = new (arena_) HInvokeVirtual(
+ arena_, number_of_arguments, return_type, dex_pc, table_index);
+ } else if (optimized_invoke_type == kInterface) {
+ invoke = new (arena_) HInvokeInterface(
+ arena_, number_of_arguments, return_type, dex_pc, method_idx, table_index);
+ } else if (optimized_invoke_type == kDirect) {
+ // For this compiler, sharpening only works if we compile PIC.
+ DCHECK(compiler_driver_->GetCompilerOptions().GetCompilePic());
+ // Treat invoke-direct like static calls for now.
+ invoke = new (arena_) HInvokeStatic(
+ arena_, number_of_arguments, return_type, dex_pc, target_method.dex_method_index);
+ }
} else {
+ DCHECK(invoke_type == kDirect || invoke_type == kStatic);
// Treat invoke-direct like static calls for now.
invoke = new (arena_) HInvokeStatic(
- arena_, number_of_arguments, return_type, dex_offset, method_idx);
+ arena_, number_of_arguments, return_type, dex_pc, method_idx);
}
size_t start_index = 0;
- Temporaries temps(graph_, is_instance_call ? 1 : 0);
+ Temporaries temps(graph_);
if (is_instance_call) {
HInstruction* arg = LoadLocal(is_range ? register_index : args[0], Primitive::kPrimNot);
- HNullCheck* null_check = new (arena_) HNullCheck(arg, dex_offset);
+ HNullCheck* null_check = new (arena_) HNullCheck(arg, dex_pc);
current_block_->AddInstruction(null_check);
temps.Add(null_check);
invoke->SetArgumentAt(0, null_check);
@@ -396,7 +449,7 @@
bool is_wide = (type == Primitive::kPrimLong) || (type == Primitive::kPrimDouble);
if (!is_range && is_wide && args[i] + 1 != args[i + 1]) {
LOG(WARNING) << "Non sequential register pair in " << dex_compilation_unit_->GetSymbol()
- << " at " << dex_offset;
+ << " at " << dex_pc;
// We do not implement non sequential register pair.
return false;
}
@@ -409,12 +462,13 @@
DCHECK_EQ(argument_index, number_of_arguments);
current_block_->AddInstruction(invoke);
+ latest_result_ = invoke;
return true;
}
-bool HGraphBuilder::BuildFieldAccess(const Instruction& instruction,
- uint32_t dex_offset,
- bool is_put) {
+bool HGraphBuilder::BuildInstanceFieldAccess(const Instruction& instruction,
+ uint32_t dex_pc,
+ bool is_put) {
uint32_t source_or_dest_reg = instruction.VRegA_22c();
uint32_t obj_reg = instruction.VRegB_22c();
uint16_t field_index = instruction.VRegC_22c();
@@ -432,14 +486,11 @@
}
Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType();
- if (!IsTypeSupported(field_type)) {
- return false;
- }
HInstruction* object = LoadLocal(obj_reg, Primitive::kPrimNot);
- current_block_->AddInstruction(new (arena_) HNullCheck(object, dex_offset));
+ current_block_->AddInstruction(new (arena_) HNullCheck(object, dex_pc));
if (is_put) {
- Temporaries temps(graph_, 1);
+ Temporaries temps(graph_);
HInstruction* null_check = current_block_->GetLastInstruction();
// We need one temporary for the null check.
temps.Add(null_check);
@@ -460,21 +511,114 @@
return true;
}
+
+
+bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
+ uint32_t dex_pc,
+ bool is_put) {
+ uint32_t source_or_dest_reg = instruction.VRegA_21c();
+ uint16_t field_index = instruction.VRegB_21c();
+
+ uint32_t storage_index;
+ bool is_referrers_class;
+ bool is_initialized;
+ bool is_volatile;
+ MemberOffset field_offset(0u);
+ Primitive::Type field_type;
+
+ bool fast_path = compiler_driver_->ComputeStaticFieldInfo(field_index,
+ dex_compilation_unit_,
+ is_put,
+ &field_offset,
+ &storage_index,
+ &is_referrers_class,
+ &is_volatile,
+ &is_initialized,
+ &field_type);
+ if (!fast_path) {
+ return false;
+ }
+
+ if (is_volatile) {
+ return false;
+ }
+
+ HLoadClass* constant = new (arena_) HLoadClass(
+ storage_index, is_referrers_class, dex_pc);
+ current_block_->AddInstruction(constant);
+
+ HInstruction* cls = constant;
+ if (!is_initialized) {
+ cls = new (arena_) HClinitCheck(constant, dex_pc);
+ current_block_->AddInstruction(cls);
+ }
+
+ if (is_put) {
+ // We need to keep the class alive before loading the value.
+ Temporaries temps(graph_);
+ temps.Add(cls);
+ HInstruction* value = LoadLocal(source_or_dest_reg, field_type);
+ DCHECK_EQ(value->GetType(), field_type);
+ current_block_->AddInstruction(
+ new (arena_) HStaticFieldSet(cls, value, field_type, field_offset));
+ } else {
+ current_block_->AddInstruction(new (arena_) HStaticFieldGet(cls, field_type, field_offset));
+ UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
+ }
+ return true;
+}
+
+void HGraphBuilder::BuildCheckedDivRem(uint16_t out_vreg,
+ uint16_t first_vreg,
+ int64_t second_vreg_or_constant,
+ uint32_t dex_pc,
+ Primitive::Type type,
+ bool second_is_constant,
+ bool isDiv) {
+ DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
+
+ HInstruction* first = LoadLocal(first_vreg, type);
+ HInstruction* second = nullptr;
+ if (second_is_constant) {
+ if (type == Primitive::kPrimInt) {
+ second = GetIntConstant(second_vreg_or_constant);
+ } else {
+ second = GetLongConstant(second_vreg_or_constant);
+ }
+ } else {
+ second = LoadLocal(second_vreg_or_constant, type);
+ }
+
+ if (!second_is_constant
+ || (type == Primitive::kPrimInt && second->AsIntConstant()->GetValue() == 0)
+ || (type == Primitive::kPrimLong && second->AsLongConstant()->GetValue() == 0)) {
+ second = new (arena_) HDivZeroCheck(second, dex_pc);
+ Temporaries temps(graph_);
+ current_block_->AddInstruction(second);
+ temps.Add(current_block_->GetLastInstruction());
+ }
+
+ if (isDiv) {
+ current_block_->AddInstruction(new (arena_) HDiv(type, first, second, dex_pc));
+ } else {
+ current_block_->AddInstruction(new (arena_) HRem(type, first, second, dex_pc));
+ }
+ UpdateLocal(out_vreg, current_block_->GetLastInstruction());
+}
+
void HGraphBuilder::BuildArrayAccess(const Instruction& instruction,
- uint32_t dex_offset,
+ uint32_t dex_pc,
bool is_put,
Primitive::Type anticipated_type) {
uint8_t source_or_dest_reg = instruction.VRegA_23x();
uint8_t array_reg = instruction.VRegB_23x();
uint8_t index_reg = instruction.VRegC_23x();
- DCHECK(IsTypeSupported(anticipated_type));
-
// We need one temporary for the null check, one for the index, and one for the length.
- Temporaries temps(graph_, 3);
+ Temporaries temps(graph_);
HInstruction* object = LoadLocal(array_reg, Primitive::kPrimNot);
- object = new (arena_) HNullCheck(object, dex_offset);
+ object = new (arena_) HNullCheck(object, dex_pc);
current_block_->AddInstruction(object);
temps.Add(object);
@@ -482,29 +626,171 @@
current_block_->AddInstruction(length);
temps.Add(length);
HInstruction* index = LoadLocal(index_reg, Primitive::kPrimInt);
- index = new (arena_) HBoundsCheck(index, length, dex_offset);
+ index = new (arena_) HBoundsCheck(index, length, dex_pc);
current_block_->AddInstruction(index);
temps.Add(index);
if (is_put) {
HInstruction* value = LoadLocal(source_or_dest_reg, anticipated_type);
// TODO: Insert a type check node if the type is Object.
current_block_->AddInstruction(new (arena_) HArraySet(
- object, index, value, anticipated_type, dex_offset));
+ object, index, value, anticipated_type, dex_pc));
} else {
current_block_->AddInstruction(new (arena_) HArrayGet(object, index, anticipated_type));
UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
}
}
-void HGraphBuilder::PotentiallyAddSuspendCheck(int32_t target_offset, uint32_t dex_offset) {
- if (target_offset <= 0) {
- // Unconditionnally add a suspend check to backward branches. We can remove
- // them after we recognize loops in the graph.
- current_block_->AddInstruction(new (arena_) HSuspendCheck(dex_offset));
+void HGraphBuilder::BuildFilledNewArray(uint32_t dex_pc,
+ uint32_t type_index,
+ uint32_t number_of_vreg_arguments,
+ bool is_range,
+ uint32_t* args,
+ uint32_t register_index) {
+ HInstruction* length = GetIntConstant(number_of_vreg_arguments);
+ HInstruction* object = new (arena_) HNewArray(length, dex_pc, type_index);
+ current_block_->AddInstruction(object);
+
+ const char* descriptor = dex_file_->StringByTypeIdx(type_index);
+ DCHECK_EQ(descriptor[0], '[') << descriptor;
+ char primitive = descriptor[1];
+ DCHECK(primitive == 'I'
+ || primitive == 'L'
+ || primitive == '[') << descriptor;
+ bool is_reference_array = (primitive == 'L') || (primitive == '[');
+ Primitive::Type type = is_reference_array ? Primitive::kPrimNot : Primitive::kPrimInt;
+
+ Temporaries temps(graph_);
+ temps.Add(object);
+ for (size_t i = 0; i < number_of_vreg_arguments; ++i) {
+ HInstruction* value = LoadLocal(is_range ? register_index + i : args[i], type);
+ HInstruction* index = GetIntConstant(i);
+ current_block_->AddInstruction(
+ new (arena_) HArraySet(object, index, value, type, dex_pc));
+ }
+ latest_result_ = object;
+}
+
+template <typename T>
+void HGraphBuilder::BuildFillArrayData(HInstruction* object,
+ const T* data,
+ uint32_t element_count,
+ Primitive::Type anticipated_type,
+ uint32_t dex_pc) {
+ for (uint32_t i = 0; i < element_count; ++i) {
+ HInstruction* index = GetIntConstant(i);
+ HInstruction* value = GetIntConstant(data[i]);
+ current_block_->AddInstruction(new (arena_) HArraySet(
+ object, index, value, anticipated_type, dex_pc));
}
}
-bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32_t dex_offset) {
+void HGraphBuilder::BuildFillArrayData(const Instruction& instruction, uint32_t dex_pc) {
+ Temporaries temps(graph_);
+ HInstruction* array = LoadLocal(instruction.VRegA_31t(), Primitive::kPrimNot);
+ HNullCheck* null_check = new (arena_) HNullCheck(array, dex_pc);
+ current_block_->AddInstruction(null_check);
+ temps.Add(null_check);
+
+ HInstruction* length = new (arena_) HArrayLength(null_check);
+ current_block_->AddInstruction(length);
+
+ int32_t payload_offset = instruction.VRegB_31t() + dex_pc;
+ const Instruction::ArrayDataPayload* payload =
+ reinterpret_cast<const Instruction::ArrayDataPayload*>(code_start_ + payload_offset);
+ const uint8_t* data = payload->data;
+ uint32_t element_count = payload->element_count;
+
+ // Implementation of this DEX instruction seems to be that the bounds check is
+ // done before doing any stores.
+ HInstruction* last_index = GetIntConstant(payload->element_count - 1);
+ current_block_->AddInstruction(new (arena_) HBoundsCheck(last_index, length, dex_pc));
+
+ switch (payload->element_width) {
+ case 1:
+ BuildFillArrayData(null_check,
+ reinterpret_cast<const int8_t*>(data),
+ element_count,
+ Primitive::kPrimByte,
+ dex_pc);
+ break;
+ case 2:
+ BuildFillArrayData(null_check,
+ reinterpret_cast<const int16_t*>(data),
+ element_count,
+ Primitive::kPrimShort,
+ dex_pc);
+ break;
+ case 4:
+ BuildFillArrayData(null_check,
+ reinterpret_cast<const int32_t*>(data),
+ element_count,
+ Primitive::kPrimInt,
+ dex_pc);
+ break;
+ case 8:
+ BuildFillWideArrayData(null_check,
+ reinterpret_cast<const int64_t*>(data),
+ element_count,
+ dex_pc);
+ break;
+ default:
+ LOG(FATAL) << "Unknown element width for " << payload->element_width;
+ }
+}
+
+void HGraphBuilder::BuildFillWideArrayData(HInstruction* object,
+ const int64_t* data,
+ uint32_t element_count,
+ uint32_t dex_pc) {
+ for (uint32_t i = 0; i < element_count; ++i) {
+ HInstruction* index = GetIntConstant(i);
+ HInstruction* value = GetLongConstant(data[i]);
+ current_block_->AddInstruction(new (arena_) HArraySet(
+ object, index, value, Primitive::kPrimLong, dex_pc));
+ }
+}
+
+bool HGraphBuilder::BuildTypeCheck(const Instruction& instruction,
+ uint8_t destination,
+ uint8_t reference,
+ uint16_t type_index,
+ uint32_t dex_pc) {
+ bool type_known_final;
+ bool type_known_abstract;
+ bool is_referrers_class;
+ bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
+ dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index,
+ &type_known_final, &type_known_abstract, &is_referrers_class);
+ if (!can_access) {
+ return false;
+ }
+ HInstruction* object = LoadLocal(reference, Primitive::kPrimNot);
+ HLoadClass* cls = new (arena_) HLoadClass(type_index, is_referrers_class, dex_pc);
+ current_block_->AddInstruction(cls);
+ // The class needs a temporary before being used by the type check.
+ Temporaries temps(graph_);
+ temps.Add(cls);
+ if (instruction.Opcode() == Instruction::INSTANCE_OF) {
+ current_block_->AddInstruction(
+ new (arena_) HInstanceOf(object, cls, type_known_final, dex_pc));
+ UpdateLocal(destination, current_block_->GetLastInstruction());
+ } else {
+ DCHECK_EQ(instruction.Opcode(), Instruction::CHECK_CAST);
+ current_block_->AddInstruction(
+ new (arena_) HCheckCast(object, cls, type_known_final, dex_pc));
+ }
+ return true;
+}
+
+void HGraphBuilder::PotentiallyAddSuspendCheck(int32_t target_offset, uint32_t dex_pc) {
+ if (target_offset <= 0) {
+ // Unconditionnally add a suspend check to backward branches. We can remove
+ // them after we recognize loops in the graph.
+ current_block_->AddInstruction(new (arena_) HSuspendCheck(dex_pc));
+ }
+}
+
+bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32_t dex_pc) {
if (current_block_ == nullptr) {
return true; // Dead code
}
@@ -575,8 +861,7 @@
break;
}
- // TODO: these instructions are also used to move floating point values, so what is
- // the type (int or float)?
+ // Note that the SSA building will refine the types.
case Instruction::MOVE:
case Instruction::MOVE_FROM16:
case Instruction::MOVE_16: {
@@ -585,8 +870,7 @@
break;
}
- // TODO: these instructions are also used to move floating point values, so what is
- // the type (long or double)?
+ // Note that the SSA building will refine the types.
case Instruction::MOVE_WIDE:
case Instruction::MOVE_WIDE_FROM16:
case Instruction::MOVE_WIDE_16: {
@@ -609,8 +893,8 @@
}
#define IF_XX(comparison, cond) \
- case Instruction::IF_##cond: If_22t<comparison>(instruction, dex_offset); break; \
- case Instruction::IF_##cond##Z: If_21t<comparison>(instruction, dex_offset); break
+ case Instruction::IF_##cond: If_22t<comparison>(instruction, dex_pc); break; \
+ case Instruction::IF_##cond##Z: If_21t<comparison>(instruction, dex_pc); break
IF_XX(HEqual, EQ);
IF_XX(HNotEqual, NE);
@@ -623,8 +907,8 @@
case Instruction::GOTO_16:
case Instruction::GOTO_32: {
int32_t offset = instruction.GetTargetOffset();
- PotentiallyAddSuspendCheck(offset, dex_offset);
- HBasicBlock* target = FindBlockStartingAt(offset + dex_offset);
+ PotentiallyAddSuspendCheck(offset, dex_pc);
+ HBasicBlock* target = FindBlockStartingAt(offset + dex_pc);
DCHECK(target != nullptr);
current_block_->AddInstruction(new (arena_) HGoto());
current_block_->AddSuccessor(target);
@@ -652,32 +936,102 @@
break;
}
- case Instruction::INVOKE_STATIC:
case Instruction::INVOKE_DIRECT:
+ case Instruction::INVOKE_INTERFACE:
+ case Instruction::INVOKE_STATIC:
+ case Instruction::INVOKE_SUPER:
case Instruction::INVOKE_VIRTUAL: {
uint32_t method_idx = instruction.VRegB_35c();
uint32_t number_of_vreg_arguments = instruction.VRegA_35c();
uint32_t args[5];
instruction.GetVarArgs(args);
- if (!BuildInvoke(instruction, dex_offset, method_idx, number_of_vreg_arguments, false, args, -1)) {
+ if (!BuildInvoke(instruction, dex_pc, method_idx,
+ number_of_vreg_arguments, false, args, -1)) {
return false;
}
break;
}
- case Instruction::INVOKE_STATIC_RANGE:
case Instruction::INVOKE_DIRECT_RANGE:
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ case Instruction::INVOKE_STATIC_RANGE:
+ case Instruction::INVOKE_SUPER_RANGE:
case Instruction::INVOKE_VIRTUAL_RANGE: {
uint32_t method_idx = instruction.VRegB_3rc();
uint32_t number_of_vreg_arguments = instruction.VRegA_3rc();
uint32_t register_index = instruction.VRegC();
- if (!BuildInvoke(instruction, dex_offset, method_idx,
+ if (!BuildInvoke(instruction, dex_pc, method_idx,
number_of_vreg_arguments, true, nullptr, register_index)) {
return false;
}
break;
}
+ case Instruction::NEG_INT: {
+ Unop_12x<HNeg>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::NEG_LONG: {
+ Unop_12x<HNeg>(instruction, Primitive::kPrimLong);
+ break;
+ }
+
+ case Instruction::NEG_FLOAT: {
+ Unop_12x<HNeg>(instruction, Primitive::kPrimFloat);
+ break;
+ }
+
+ case Instruction::NEG_DOUBLE: {
+ Unop_12x<HNeg>(instruction, Primitive::kPrimDouble);
+ break;
+ }
+
+ case Instruction::NOT_INT: {
+ Unop_12x<HNot>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::NOT_LONG: {
+ Unop_12x<HNot>(instruction, Primitive::kPrimLong);
+ break;
+ }
+
+ case Instruction::INT_TO_LONG: {
+ Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimLong);
+ break;
+ }
+
+ case Instruction::INT_TO_FLOAT: {
+ Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimFloat);
+ break;
+ }
+
+ case Instruction::INT_TO_DOUBLE: {
+ Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimDouble);
+ break;
+ }
+
+ case Instruction::LONG_TO_INT: {
+ Conversion_12x(instruction, Primitive::kPrimLong, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::INT_TO_BYTE: {
+ Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimByte);
+ break;
+ }
+
+ case Instruction::INT_TO_SHORT: {
+ Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimShort);
+ break;
+ }
+
+ case Instruction::INT_TO_CHAR: {
+ Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimChar);
+ break;
+ }
+
case Instruction::ADD_INT: {
Binop_23x<HAdd>(instruction, Primitive::kPrimInt);
break;
@@ -708,6 +1062,16 @@
break;
}
+ case Instruction::SUB_FLOAT: {
+ Binop_23x<HSub>(instruction, Primitive::kPrimFloat);
+ break;
+ }
+
+ case Instruction::SUB_DOUBLE: {
+ Binop_23x<HSub>(instruction, Primitive::kPrimDouble);
+ break;
+ }
+
case Instruction::ADD_INT_2ADDR: {
Binop_12x<HAdd>(instruction, Primitive::kPrimInt);
break;
@@ -723,6 +1087,80 @@
break;
}
+ case Instruction::MUL_FLOAT: {
+ Binop_23x<HMul>(instruction, Primitive::kPrimFloat);
+ break;
+ }
+
+ case Instruction::MUL_DOUBLE: {
+ Binop_23x<HMul>(instruction, Primitive::kPrimDouble);
+ break;
+ }
+
+ case Instruction::DIV_INT: {
+ BuildCheckedDivRem(instruction.VRegA(), instruction.VRegB(), instruction.VRegC(),
+ dex_pc, Primitive::kPrimInt, false, true);
+ break;
+ }
+
+ case Instruction::DIV_LONG: {
+ BuildCheckedDivRem(instruction.VRegA(), instruction.VRegB(), instruction.VRegC(),
+ dex_pc, Primitive::kPrimLong, false, true);
+ break;
+ }
+
+ case Instruction::DIV_FLOAT: {
+ Binop_23x<HDiv>(instruction, Primitive::kPrimFloat, dex_pc);
+ break;
+ }
+
+ case Instruction::DIV_DOUBLE: {
+ Binop_23x<HDiv>(instruction, Primitive::kPrimDouble, dex_pc);
+ break;
+ }
+
+ case Instruction::REM_INT: {
+ BuildCheckedDivRem(instruction.VRegA(), instruction.VRegB(), instruction.VRegC(),
+ dex_pc, Primitive::kPrimInt, false, false);
+ break;
+ }
+
+ case Instruction::REM_LONG: {
+ BuildCheckedDivRem(instruction.VRegA(), instruction.VRegB(), instruction.VRegC(),
+ dex_pc, Primitive::kPrimLong, false, false);
+ break;
+ }
+
+ case Instruction::AND_INT: {
+ Binop_23x<HAnd>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::AND_LONG: {
+ Binop_23x<HAnd>(instruction, Primitive::kPrimLong);
+ break;
+ }
+
+ case Instruction::OR_INT: {
+ Binop_23x<HOr>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::OR_LONG: {
+ Binop_23x<HOr>(instruction, Primitive::kPrimLong);
+ break;
+ }
+
+ case Instruction::XOR_INT: {
+ Binop_23x<HXor>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::XOR_LONG: {
+ Binop_23x<HXor>(instruction, Primitive::kPrimLong);
+ break;
+ }
+
case Instruction::ADD_LONG_2ADDR: {
Binop_12x<HAdd>(instruction, Primitive::kPrimLong);
break;
@@ -748,6 +1186,16 @@
break;
}
+ case Instruction::SUB_FLOAT_2ADDR: {
+ Binop_12x<HSub>(instruction, Primitive::kPrimFloat);
+ break;
+ }
+
+ case Instruction::SUB_DOUBLE_2ADDR: {
+ Binop_12x<HSub>(instruction, Primitive::kPrimDouble);
+ break;
+ }
+
case Instruction::MUL_INT_2ADDR: {
Binop_12x<HMul>(instruction, Primitive::kPrimInt);
break;
@@ -758,11 +1206,100 @@
break;
}
+ case Instruction::MUL_FLOAT_2ADDR: {
+ Binop_12x<HMul>(instruction, Primitive::kPrimFloat);
+ break;
+ }
+
+ case Instruction::MUL_DOUBLE_2ADDR: {
+ Binop_12x<HMul>(instruction, Primitive::kPrimDouble);
+ break;
+ }
+
+ case Instruction::DIV_INT_2ADDR: {
+ BuildCheckedDivRem(instruction.VRegA(), instruction.VRegA(), instruction.VRegB(),
+ dex_pc, Primitive::kPrimInt, false, true);
+ break;
+ }
+
+ case Instruction::DIV_LONG_2ADDR: {
+ BuildCheckedDivRem(instruction.VRegA(), instruction.VRegA(), instruction.VRegB(),
+ dex_pc, Primitive::kPrimLong, false, true);
+ break;
+ }
+
+ case Instruction::REM_INT_2ADDR: {
+ BuildCheckedDivRem(instruction.VRegA(), instruction.VRegA(), instruction.VRegB(),
+ dex_pc, Primitive::kPrimInt, false, false);
+ break;
+ }
+
+ case Instruction::REM_LONG_2ADDR: {
+ BuildCheckedDivRem(instruction.VRegA(), instruction.VRegA(), instruction.VRegB(),
+ dex_pc, Primitive::kPrimLong, false, false);
+ break;
+ }
+
+ case Instruction::DIV_FLOAT_2ADDR: {
+ Binop_12x<HDiv>(instruction, Primitive::kPrimFloat, dex_pc);
+ break;
+ }
+
+ case Instruction::DIV_DOUBLE_2ADDR: {
+ Binop_12x<HDiv>(instruction, Primitive::kPrimDouble, dex_pc);
+ break;
+ }
+
+ case Instruction::AND_INT_2ADDR: {
+ Binop_12x<HAnd>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::AND_LONG_2ADDR: {
+ Binop_12x<HAnd>(instruction, Primitive::kPrimLong);
+ break;
+ }
+
+ case Instruction::OR_INT_2ADDR: {
+ Binop_12x<HOr>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::OR_LONG_2ADDR: {
+ Binop_12x<HOr>(instruction, Primitive::kPrimLong);
+ break;
+ }
+
+ case Instruction::XOR_INT_2ADDR: {
+ Binop_12x<HXor>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::XOR_LONG_2ADDR: {
+ Binop_12x<HXor>(instruction, Primitive::kPrimLong);
+ break;
+ }
+
case Instruction::ADD_INT_LIT16: {
Binop_22s<HAdd>(instruction, false);
break;
}
+ case Instruction::AND_INT_LIT16: {
+ Binop_22s<HAnd>(instruction, false);
+ break;
+ }
+
+ case Instruction::OR_INT_LIT16: {
+ Binop_22s<HOr>(instruction, false);
+ break;
+ }
+
+ case Instruction::XOR_INT_LIT16: {
+ Binop_22s<HXor>(instruction, false);
+ break;
+ }
+
case Instruction::RSUB_INT: {
Binop_22s<HSub>(instruction, true);
break;
@@ -778,6 +1315,21 @@
break;
}
+ case Instruction::AND_INT_LIT8: {
+ Binop_22b<HAnd>(instruction, false);
+ break;
+ }
+
+ case Instruction::OR_INT_LIT8: {
+ Binop_22b<HOr>(instruction, false);
+ break;
+ }
+
+ case Instruction::XOR_INT_LIT8: {
+ Binop_22b<HXor>(instruction, false);
+ break;
+ }
+
case Instruction::RSUB_INT_LIT8: {
Binop_22b<HSub>(instruction, true);
break;
@@ -788,17 +1340,63 @@
break;
}
+ case Instruction::DIV_INT_LIT16:
+ case Instruction::DIV_INT_LIT8: {
+ BuildCheckedDivRem(instruction.VRegA(), instruction.VRegB(), instruction.VRegC(),
+ dex_pc, Primitive::kPrimInt, true, true);
+ break;
+ }
+
+ case Instruction::REM_INT_LIT16:
+ case Instruction::REM_INT_LIT8: {
+ BuildCheckedDivRem(instruction.VRegA(), instruction.VRegB(), instruction.VRegC(),
+ dex_pc, Primitive::kPrimInt, true, false);
+ break;
+ }
+
case Instruction::NEW_INSTANCE: {
current_block_->AddInstruction(
- new (arena_) HNewInstance(dex_offset, instruction.VRegB_21c()));
+ new (arena_) HNewInstance(dex_pc, instruction.VRegB_21c()));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
break;
}
+ case Instruction::NEW_ARRAY: {
+ HInstruction* length = LoadLocal(instruction.VRegB_22c(), Primitive::kPrimInt);
+ current_block_->AddInstruction(
+ new (arena_) HNewArray(length, dex_pc, instruction.VRegC_22c()));
+ UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction());
+ break;
+ }
+
+ case Instruction::FILLED_NEW_ARRAY: {
+ uint32_t number_of_vreg_arguments = instruction.VRegA_35c();
+ uint32_t type_index = instruction.VRegB_35c();
+ uint32_t args[5];
+ instruction.GetVarArgs(args);
+ BuildFilledNewArray(dex_pc, type_index, number_of_vreg_arguments, false, args, 0);
+ break;
+ }
+
+ case Instruction::FILLED_NEW_ARRAY_RANGE: {
+ uint32_t number_of_vreg_arguments = instruction.VRegA_3rc();
+ uint32_t type_index = instruction.VRegB_3rc();
+ uint32_t register_index = instruction.VRegC_3rc();
+ BuildFilledNewArray(
+ dex_pc, type_index, number_of_vreg_arguments, true, nullptr, register_index);
+ break;
+ }
+
+ case Instruction::FILL_ARRAY_DATA: {
+ BuildFillArrayData(instruction, dex_pc);
+ break;
+ }
+
case Instruction::MOVE_RESULT:
case Instruction::MOVE_RESULT_WIDE:
case Instruction::MOVE_RESULT_OBJECT:
- UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+ UpdateLocal(instruction.VRegA(), latest_result_);
+ latest_result_ = nullptr;
break;
case Instruction::CMP_LONG: {
@@ -816,7 +1414,7 @@
case Instruction::IGET_BYTE:
case Instruction::IGET_CHAR:
case Instruction::IGET_SHORT: {
- if (!BuildFieldAccess(instruction, dex_offset, false)) {
+ if (!BuildInstanceFieldAccess(instruction, dex_pc, false)) {
return false;
}
break;
@@ -829,7 +1427,33 @@
case Instruction::IPUT_BYTE:
case Instruction::IPUT_CHAR:
case Instruction::IPUT_SHORT: {
- if (!BuildFieldAccess(instruction, dex_offset, true)) {
+ if (!BuildInstanceFieldAccess(instruction, dex_pc, true)) {
+ return false;
+ }
+ break;
+ }
+
+ case Instruction::SGET:
+ case Instruction::SGET_WIDE:
+ case Instruction::SGET_OBJECT:
+ case Instruction::SGET_BOOLEAN:
+ case Instruction::SGET_BYTE:
+ case Instruction::SGET_CHAR:
+ case Instruction::SGET_SHORT: {
+ if (!BuildStaticFieldAccess(instruction, dex_pc, false)) {
+ return false;
+ }
+ break;
+ }
+
+ case Instruction::SPUT:
+ case Instruction::SPUT_WIDE:
+ case Instruction::SPUT_OBJECT:
+ case Instruction::SPUT_BOOLEAN:
+ case Instruction::SPUT_BYTE:
+ case Instruction::SPUT_CHAR:
+ case Instruction::SPUT_SHORT: {
+ if (!BuildStaticFieldAccess(instruction, dex_pc, true)) {
return false;
}
break;
@@ -837,11 +1461,11 @@
#define ARRAY_XX(kind, anticipated_type) \
case Instruction::AGET##kind: { \
- BuildArrayAccess(instruction, dex_offset, false, anticipated_type); \
+ BuildArrayAccess(instruction, dex_pc, false, anticipated_type); \
break; \
} \
case Instruction::APUT##kind: { \
- BuildArrayAccess(instruction, dex_offset, true, anticipated_type); \
+ BuildArrayAccess(instruction, dex_pc, true, anticipated_type); \
break; \
}
@@ -855,16 +1479,101 @@
case Instruction::ARRAY_LENGTH: {
HInstruction* object = LoadLocal(instruction.VRegB_12x(), Primitive::kPrimNot);
+ // No need for a temporary for the null check, it is the only input of the following
+ // instruction.
+ object = new (arena_) HNullCheck(object, dex_pc);
+ current_block_->AddInstruction(object);
current_block_->AddInstruction(new (arena_) HArrayLength(object));
UpdateLocal(instruction.VRegA_12x(), current_block_->GetLastInstruction());
break;
}
+ case Instruction::CONST_STRING: {
+ current_block_->AddInstruction(new (arena_) HLoadString(instruction.VRegB_21c(), dex_pc));
+ UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
+ break;
+ }
+
+ case Instruction::CONST_STRING_JUMBO: {
+ current_block_->AddInstruction(new (arena_) HLoadString(instruction.VRegB_31c(), dex_pc));
+ UpdateLocal(instruction.VRegA_31c(), current_block_->GetLastInstruction());
+ break;
+ }
+
+ case Instruction::CONST_CLASS: {
+ uint16_t type_index = instruction.VRegB_21c();
+ bool type_known_final;
+ bool type_known_abstract;
+ bool is_referrers_class;
+ bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
+ dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index,
+ &type_known_final, &type_known_abstract, &is_referrers_class);
+ if (!can_access) {
+ return false;
+ }
+ current_block_->AddInstruction(
+ new (arena_) HLoadClass(type_index, is_referrers_class, dex_pc));
+ UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
+ break;
+ }
+
+ case Instruction::MOVE_EXCEPTION: {
+ current_block_->AddInstruction(new (arena_) HLoadException());
+ UpdateLocal(instruction.VRegA_11x(), current_block_->GetLastInstruction());
+ break;
+ }
+
+ case Instruction::THROW: {
+ HInstruction* exception = LoadLocal(instruction.VRegA_11x(), Primitive::kPrimNot);
+ current_block_->AddInstruction(new (arena_) HThrow(exception, dex_pc));
+ // A throw instruction must branch to the exit block.
+ current_block_->AddSuccessor(exit_block_);
+ // We finished building this block. Set the current block to null to avoid
+ // adding dead instructions to it.
+ current_block_ = nullptr;
+ break;
+ }
+
+ case Instruction::INSTANCE_OF: {
+ uint8_t destination = instruction.VRegA_22c();
+ uint8_t reference = instruction.VRegB_22c();
+ uint16_t type_index = instruction.VRegC_22c();
+ if (!BuildTypeCheck(instruction, destination, reference, type_index, dex_pc)) {
+ return false;
+ }
+ break;
+ }
+
+ case Instruction::CHECK_CAST: {
+ uint8_t reference = instruction.VRegA_21c();
+ uint16_t type_index = instruction.VRegB_21c();
+ if (!BuildTypeCheck(instruction, -1, reference, type_index, dex_pc)) {
+ return false;
+ }
+ break;
+ }
+
+ case Instruction::MONITOR_ENTER: {
+ current_block_->AddInstruction(new (arena_) HMonitorOperation(
+ LoadLocal(instruction.VRegA_11x(), Primitive::kPrimNot),
+ HMonitorOperation::kEnter,
+ dex_pc));
+ break;
+ }
+
+ case Instruction::MONITOR_EXIT: {
+ current_block_->AddInstruction(new (arena_) HMonitorOperation(
+ LoadLocal(instruction.VRegA_11x(), Primitive::kPrimNot),
+ HMonitorOperation::kExit,
+ dex_pc));
+ break;
+ }
+
default:
return false;
}
return true;
-}
+} // NOLINT(readability/fn_size)
HIntConstant* HGraphBuilder::GetIntConstant0() {
if (constant0_ != nullptr) {
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index e68cdb0..897bcec 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -48,7 +48,9 @@
dex_file_(dex_file),
dex_compilation_unit_(dex_compilation_unit),
compiler_driver_(driver),
- return_type_(Primitive::GetType(dex_compilation_unit_->GetShorty()[0])) {}
+ return_type_(Primitive::GetType(dex_compilation_unit_->GetShorty()[0])),
+ code_start_(nullptr),
+ latest_result_(nullptr) {}
// Only for unit testing.
HGraphBuilder(ArenaAllocator* arena, Primitive::Type return_type = Primitive::kPrimInt)
@@ -64,7 +66,9 @@
dex_file_(nullptr),
dex_compilation_unit_(nullptr),
compiler_driver_(nullptr),
- return_type_(return_type) {}
+ return_type_(return_type),
+ code_start_(nullptr),
+ latest_result_(nullptr) {}
HGraph* BuildGraph(const DexFile::CodeItem& code);
@@ -72,7 +76,7 @@
// Analyzes the dex instruction and adds HInstruction to the graph
// to execute that instruction. Returns whether the instruction can
// be handled.
- bool AnalyzeDexInstruction(const Instruction& instruction, uint32_t dex_offset);
+ bool AnalyzeDexInstruction(const Instruction& instruction, uint32_t dex_pc);
// Finds all instructions that start a new block, and populates branch_targets_ with
// the newly created blocks.
@@ -88,44 +92,102 @@
HLocal* GetLocalAt(int register_index) const;
void UpdateLocal(int register_index, HInstruction* instruction) const;
HInstruction* LoadLocal(int register_index, Primitive::Type type) const;
- void PotentiallyAddSuspendCheck(int32_t target_offset, uint32_t dex_offset);
+ void PotentiallyAddSuspendCheck(int32_t target_offset, uint32_t dex_pc);
+ void InitializeParameters(uint16_t number_of_parameters);
- // Temporarily returns whether the compiler supports the parameters
- // of the method.
- bool InitializeParameters(uint16_t number_of_parameters);
+ template<typename T>
+ void Unop_12x(const Instruction& instruction, Primitive::Type type);
template<typename T>
void Binop_23x(const Instruction& instruction, Primitive::Type type);
template<typename T>
+ void Binop_23x(const Instruction& instruction, Primitive::Type type, uint32_t dex_pc);
+
+ template<typename T>
void Binop_12x(const Instruction& instruction, Primitive::Type type);
template<typename T>
+ void Binop_12x(const Instruction& instruction, Primitive::Type type, uint32_t dex_pc);
+
+ template<typename T>
void Binop_22b(const Instruction& instruction, bool reverse);
template<typename T>
void Binop_22s(const Instruction& instruction, bool reverse);
- template<typename T> void If_21t(const Instruction& instruction, uint32_t dex_offset);
- template<typename T> void If_22t(const Instruction& instruction, uint32_t dex_offset);
+ template<typename T> void If_21t(const Instruction& instruction, uint32_t dex_pc);
+ template<typename T> void If_22t(const Instruction& instruction, uint32_t dex_pc);
+
+ void Conversion_12x(const Instruction& instruction,
+ Primitive::Type input_type,
+ Primitive::Type result_type);
+
+ void BuildCheckedDivRem(uint16_t out_reg,
+ uint16_t first_reg,
+ int64_t second_reg_or_constant,
+ uint32_t dex_pc,
+ Primitive::Type type,
+ bool second_is_lit,
+ bool is_div);
void BuildReturn(const Instruction& instruction, Primitive::Type type);
- bool BuildFieldAccess(const Instruction& instruction, uint32_t dex_offset, bool is_get);
+ // Builds an instance field access node and returns whether the instruction is supported.
+ bool BuildInstanceFieldAccess(const Instruction& instruction, uint32_t dex_pc, bool is_put);
+
+ // Builds a static field access node and returns whether the instruction is supported.
+ bool BuildStaticFieldAccess(const Instruction& instruction, uint32_t dex_pc, bool is_put);
+
void BuildArrayAccess(const Instruction& instruction,
- uint32_t dex_offset,
+ uint32_t dex_pc,
bool is_get,
Primitive::Type anticipated_type);
// Builds an invocation node and returns whether the instruction is supported.
bool BuildInvoke(const Instruction& instruction,
- uint32_t dex_offset,
+ uint32_t dex_pc,
uint32_t method_idx,
uint32_t number_of_vreg_arguments,
bool is_range,
uint32_t* args,
uint32_t register_index);
+ // Builds a new array node and the instructions that fill it.
+ void BuildFilledNewArray(uint32_t dex_pc,
+ uint32_t type_index,
+ uint32_t number_of_vreg_arguments,
+ bool is_range,
+ uint32_t* args,
+ uint32_t register_index);
+
+ void BuildFillArrayData(const Instruction& instruction, uint32_t dex_pc);
+
+ // Fills the given object with data as specified in the fill-array-data
+ // instruction. Currently only used for non-reference and non-floating point
+ // arrays.
+ template <typename T>
+ void BuildFillArrayData(HInstruction* object,
+ const T* data,
+ uint32_t element_count,
+ Primitive::Type anticipated_type,
+ uint32_t dex_pc);
+
+ // Fills the given object with data as specified in the fill-array-data
+ // instruction. The data must be for long and double arrays.
+ void BuildFillWideArrayData(HInstruction* object,
+ const int64_t* data,
+ uint32_t element_count,
+ uint32_t dex_pc);
+
+ // Builds a `HInstanceOf`, or a `HCheckCast` instruction.
+ // Returns whether we succeeded in building the instruction.
+ bool BuildTypeCheck(const Instruction& instruction,
+ uint8_t destination,
+ uint8_t reference,
+ uint16_t type_index,
+ uint32_t dex_pc);
+
ArenaAllocator* const arena_;
// A list of the size of the dex code holding block information for
@@ -148,6 +210,14 @@
CompilerDriver* const compiler_driver_;
const Primitive::Type return_type_;
+ // The pointer in the dex file where the instructions of the code item
+ // being currently compiled start.
+ const uint16_t* code_start_;
+
+ // The last invoke or fill-new-array being built. Only to be
+ // used by move-result instructions.
+ HInstruction* latest_result_;
+
DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
};
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 408e13e..0b59327 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -17,6 +17,7 @@
#include "code_generator.h"
#include "code_generator_arm.h"
+#include "code_generator_arm64.h"
#include "code_generator_x86.h"
#include "code_generator_x86_64.h"
#include "compiled_method.h"
@@ -25,6 +26,9 @@
#include "gc_map_builder.h"
#include "leb128.h"
#include "mapping_table.h"
+#include "mirror/array-inl.h"
+#include "mirror/object_array-inl.h"
+#include "mirror/object_reference.h"
#include "ssa_liveness_analysis.h"
#include "utils/assembler.h"
#include "verifier/dex_gc_map.h"
@@ -32,6 +36,10 @@
namespace art {
+size_t CodeGenerator::GetCacheOffset(uint32_t index) {
+ return mirror::ObjectArray<mirror::Object>::OffsetOfElement(index).SizeValue();
+}
+
void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) {
const GrowableArray<HBasicBlock*>& blocks = GetGraph()->GetBlocks();
DCHECK(blocks.Get(0) == GetGraph()->GetEntryBlock());
@@ -43,7 +51,7 @@
MarkNotLeaf();
}
ComputeFrameSize(GetGraph()->GetNumberOfLocalVRegs()
- + GetGraph()->GetNumberOfTemporaries()
+ + GetGraph()->GetTemporariesVRegSlots()
+ 1 /* filler */,
0, /* the baseline compiler does not have live registers at slow path */
GetGraph()->GetMaximumNumberOfOutVRegs()
@@ -110,6 +118,20 @@
}
}
LOG(FATAL) << "Could not find a register in baseline register allocator";
+ UNREACHABLE();
+ return -1;
+}
+
+size_t CodeGenerator::FindTwoFreeConsecutiveAlignedEntries(bool* array, size_t length) {
+ for (size_t i = 0; i < length - 1; i += 2) {
+ if (!array[i] && !array[i + 1]) {
+ array[i] = true;
+ array[i + 1] = true;
+ return i;
+ }
+ }
+ LOG(FATAL) << "Could not find a register in baseline register allocator";
+ UNREACHABLE();
return -1;
}
@@ -128,12 +150,15 @@
Location CodeGenerator::GetTemporaryLocation(HTemporary* temp) const {
uint16_t number_of_locals = GetGraph()->GetNumberOfLocalVRegs();
+ // The type of the previous instruction tells us if we need a single or double stack slot.
+ Primitive::Type type = temp->GetType();
+ int32_t temp_size = (type == Primitive::kPrimLong) || (type == Primitive::kPrimDouble) ? 2 : 1;
// Use the temporary region (right below the dex registers).
int32_t slot = GetFrameSize() - FrameEntrySpillSize()
- kVRegSize // filler
- (number_of_locals * kVRegSize)
- - ((1 + temp->GetIndex()) * kVRegSize);
- return Location::StackSlot(slot);
+ - ((temp_size + temp->GetIndex()) * kVRegSize);
+ return temp_size == 2 ? Location::DoubleStackSlot(slot) : Location::StackSlot(slot);
}
int32_t CodeGenerator::GetStackSlot(HLocal* local) const {
@@ -179,6 +204,11 @@
} else if (loc.IsFpuRegister()) {
DCHECK(!blocked_fpu_registers_[loc.reg()]);
blocked_fpu_registers_[loc.reg()] = true;
+ } else if (loc.IsFpuRegisterPair()) {
+ DCHECK(!blocked_fpu_registers_[loc.AsFpuRegisterPairLow<int>()]);
+ blocked_fpu_registers_[loc.AsFpuRegisterPairLow<int>()] = true;
+ DCHECK(!blocked_fpu_registers_[loc.AsFpuRegisterPairHigh<int>()]);
+ blocked_fpu_registers_[loc.AsFpuRegisterPairHigh<int>()] = true;
} else if (loc.IsRegisterPair()) {
DCHECK(!blocked_core_registers_[loc.AsRegisterPairLow<int>()]);
blocked_core_registers_[loc.AsRegisterPairLow<int>()] = true;
@@ -189,10 +219,14 @@
for (size_t i = 0, e = locations->GetTempCount(); i < e; ++i) {
Location loc = locations->GetTemp(i);
+ // The DCHECKS below check that a register is not specified twice in
+ // the summary.
if (loc.IsRegister()) {
- // Check that a register is not specified twice in the summary.
DCHECK(!blocked_core_registers_[loc.reg()]);
blocked_core_registers_[loc.reg()] = true;
+ } else if (loc.IsFpuRegister()) {
+ DCHECK(!blocked_fpu_registers_[loc.reg()]);
+ blocked_fpu_registers_[loc.reg()] = true;
} else {
DCHECK_EQ(loc.GetPolicy(), Location::kRequiresRegister);
}
@@ -254,16 +288,22 @@
HInstruction* previous = instruction->GetPrevious();
Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
Move(previous, temp_location, instruction);
- previous->GetLocations()->SetOut(temp_location);
}
return;
}
AllocateRegistersLocally(instruction);
for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
Location location = instruction->GetLocations()->InAt(i);
+ HInstruction* input = instruction->InputAt(i);
if (location.IsValid()) {
// Move the input to the desired location.
- Move(instruction->InputAt(i), location, instruction);
+ if (input->GetNext()->IsTemporary()) {
+ // If the input was stored in a temporary, use that temporary to
+ // perform the move.
+ Move(input->GetNext(), location, instruction);
+ } else {
+ Move(input, location, instruction);
+ }
}
}
}
@@ -281,6 +321,9 @@
case kThumb2: {
return new (allocator) arm::CodeGeneratorARM(graph);
}
+ case kArm64: {
+ return new (allocator) arm64::CodeGeneratorARM64(graph);
+ }
case kMips:
return nullptr;
case kX86: {
@@ -326,12 +369,13 @@
int32_t pc2dex_dalvik_offset = 0;
uint32_t dex2pc_data_size = 0u;
uint32_t dex2pc_entries = 0u;
+ uint32_t dex2pc_offset = 0u;
+ int32_t dex2pc_dalvik_offset = 0;
if (src_map != nullptr) {
src_map->reserve(pc2dex_entries);
}
- // We currently only have pc2dex entries.
for (size_t i = 0; i < pc2dex_entries; i++) {
struct PcInfo pc_info = pc_infos_.Get(i);
pc2dex_data_size += UnsignedLeb128Size(pc_info.native_pc - pc2dex_offset);
@@ -343,6 +387,19 @@
}
}
+ // Walk over the blocks and find which ones correspond to catch block entries.
+ for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) {
+ HBasicBlock* block = graph_->GetBlocks().Get(i);
+ if (block->IsCatchBlock()) {
+ intptr_t native_pc = GetAddressOf(block);
+ ++dex2pc_entries;
+ dex2pc_data_size += UnsignedLeb128Size(native_pc - dex2pc_offset);
+ dex2pc_data_size += SignedLeb128Size(block->GetDexPc() - dex2pc_dalvik_offset);
+ dex2pc_offset = native_pc;
+ dex2pc_dalvik_offset = block->GetDexPc();
+ }
+ }
+
uint32_t total_entries = pc2dex_entries + dex2pc_entries;
uint32_t hdr_data_size = UnsignedLeb128Size(total_entries) + UnsignedLeb128Size(pc2dex_entries);
uint32_t data_size = hdr_data_size + pc2dex_data_size + dex2pc_data_size;
@@ -350,6 +407,7 @@
uint8_t* data_ptr = &(*data)[0];
uint8_t* write_pos = data_ptr;
+
write_pos = EncodeUnsignedLeb128(write_pos, total_entries);
write_pos = EncodeUnsignedLeb128(write_pos, pc2dex_entries);
DCHECK_EQ(static_cast<size_t>(write_pos - data_ptr), hdr_data_size);
@@ -357,6 +415,9 @@
pc2dex_offset = 0u;
pc2dex_dalvik_offset = 0u;
+ dex2pc_offset = 0u;
+ dex2pc_dalvik_offset = 0u;
+
for (size_t i = 0; i < pc2dex_entries; i++) {
struct PcInfo pc_info = pc_infos_.Get(i);
DCHECK(pc2dex_offset <= pc_info.native_pc);
@@ -365,6 +426,19 @@
pc2dex_offset = pc_info.native_pc;
pc2dex_dalvik_offset = pc_info.dex_pc;
}
+
+ for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) {
+ HBasicBlock* block = graph_->GetBlocks().Get(i);
+ if (block->IsCatchBlock()) {
+ intptr_t native_pc = GetAddressOf(block);
+ write_pos2 = EncodeUnsignedLeb128(write_pos2, native_pc - dex2pc_offset);
+ write_pos2 = EncodeSignedLeb128(write_pos2, block->GetDexPc() - dex2pc_dalvik_offset);
+ dex2pc_offset = native_pc;
+ dex2pc_dalvik_offset = block->GetDexPc();
+ }
+ }
+
+
DCHECK_EQ(static_cast<size_t>(write_pos - data_ptr), hdr_data_size + pc2dex_data_size);
DCHECK_EQ(static_cast<size_t>(write_pos2 - data_ptr), data_size);
@@ -381,6 +455,14 @@
CHECK_EQ(pc_info.dex_pc, it.DexPc());
++it;
}
+ for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) {
+ HBasicBlock* block = graph_->GetBlocks().Get(i);
+ if (block->IsCatchBlock()) {
+ CHECK_EQ(GetAddressOf(block), it2.NativePcOffset());
+ CHECK_EQ(block->GetDexPc(), it2.DexPc());
+ ++it2;
+ }
+ }
CHECK(it == table.PcToDexEnd());
CHECK(it2 == table.DexToPcEnd());
}
@@ -473,8 +555,7 @@
case Location::kRegister : {
int id = location.reg();
stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInRegister, id);
- if (current->GetType() == Primitive::kPrimDouble
- || current->GetType() == Primitive::kPrimLong) {
+ if (current->GetType() == Primitive::kPrimLong) {
stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInRegister, id);
++i;
DCHECK_LT(i, environment_size);
@@ -482,52 +563,59 @@
break;
}
+ case Location::kFpuRegister : {
+ int id = location.reg();
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInFpuRegister, id);
+ if (current->GetType() == Primitive::kPrimDouble) {
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInFpuRegister, id);
+ ++i;
+ DCHECK_LT(i, environment_size);
+ }
+ break;
+ }
+
default:
LOG(FATAL) << "Unexpected kind " << location.GetKind();
}
}
}
-size_t CodeGenerator::GetStackOffsetOfSavedRegister(size_t index) {
- return first_register_slot_in_slow_path_ + index * GetWordSize();
-}
-
void CodeGenerator::SaveLiveRegisters(LocationSummary* locations) {
RegisterSet* register_set = locations->GetLiveRegisters();
- uint32_t count = 0;
+ size_t stack_offset = first_register_slot_in_slow_path_;
for (size_t i = 0, e = GetNumberOfCoreRegisters(); i < e; ++i) {
if (register_set->ContainsCoreRegister(i)) {
- size_t stack_offset = GetStackOffsetOfSavedRegister(count);
- ++count;
- SaveCoreRegister(Location::StackSlot(stack_offset), i);
// If the register holds an object, update the stack mask.
if (locations->RegisterContainsObject(i)) {
locations->SetStackBit(stack_offset / kVRegSize);
}
+ DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize());
+ stack_offset += SaveCoreRegister(stack_offset, i);
}
}
for (size_t i = 0, e = GetNumberOfFloatingPointRegisters(); i < e; ++i) {
if (register_set->ContainsFloatingPointRegister(i)) {
- LOG(FATAL) << "Unimplemented";
+ DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize());
+ stack_offset += SaveFloatingPointRegister(stack_offset, i);
}
}
}
void CodeGenerator::RestoreLiveRegisters(LocationSummary* locations) {
RegisterSet* register_set = locations->GetLiveRegisters();
- uint32_t count = 0;
+ size_t stack_offset = first_register_slot_in_slow_path_;
for (size_t i = 0, e = GetNumberOfCoreRegisters(); i < e; ++i) {
if (register_set->ContainsCoreRegister(i)) {
- size_t stack_offset = GetStackOffsetOfSavedRegister(count);
- ++count;
- RestoreCoreRegister(Location::StackSlot(stack_offset), i);
+ DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize());
+ stack_offset += RestoreCoreRegister(stack_offset, i);
}
}
for (size_t i = 0, e = GetNumberOfFloatingPointRegisters(); i < e; ++i) {
if (register_set->ContainsFloatingPointRegister(i)) {
- LOG(FATAL) << "Unimplemented";
+ DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize());
+ stack_offset += RestoreFloatingPointRegister(stack_offset, i);
}
}
}
@@ -551,4 +639,13 @@
}
}
+void CodeGenerator::EmitParallelMoves(Location from1, Location to1, Location from2, Location to2) {
+ MoveOperands move1(from1, to1, nullptr);
+ MoveOperands move2(from2, to2, nullptr);
+ HParallelMove parallel_move(GetGraph()->GetArena());
+ parallel_move.AddMove(&move1);
+ parallel_move.AddMove(&move2);
+ GetMoveResolver()->EmitNativeCode(¶llel_move);
+}
+
} // namespace art
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 7aaf991..f906eb8 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -17,9 +17,9 @@
#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
+#include "arch/instruction_set.h"
#include "base/bit_field.h"
#include "globals.h"
-#include "instruction_set.h"
#include "locations.h"
#include "memory_region.h"
#include "nodes.h"
@@ -33,6 +33,7 @@
class Assembler;
class CodeGenerator;
class DexCompilationUnit;
+class ParallelMoveResolver;
class SrcMap;
class CodeAllocator {
@@ -51,7 +52,7 @@
uintptr_t native_pc;
};
-class SlowPathCode : public ArenaObject {
+class SlowPathCode : public ArenaObject<kArenaAllocSlowPaths> {
public:
SlowPathCode() {}
virtual ~SlowPathCode() {}
@@ -62,7 +63,7 @@
DISALLOW_COPY_AND_ASSIGN(SlowPathCode);
};
-class CodeGenerator : public ArenaObject {
+class CodeGenerator : public ArenaObject<kArenaAllocMisc> {
public:
// Compiles the graph to executable instructions. Returns whether the compilation
// succeeded.
@@ -92,6 +93,7 @@
virtual HGraphVisitor* GetInstructionVisitor() = 0;
virtual Assembler* GetAssembler() = 0;
virtual size_t GetWordSize() const = 0;
+ virtual uintptr_t GetAddressOf(HBasicBlock* block) const = 0;
void ComputeFrameSize(size_t number_of_spill_slots,
size_t maximum_number_of_live_registers,
size_t number_of_out_slots);
@@ -110,8 +112,20 @@
virtual void DumpCoreRegister(std::ostream& stream, int reg) const = 0;
virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const = 0;
virtual InstructionSet GetInstructionSet() const = 0;
- virtual void SaveCoreRegister(Location stack_location, uint32_t reg_id) = 0;
- virtual void RestoreCoreRegister(Location stack_location, uint32_t reg_id) = 0;
+ // Saves the register in the stack. Returns the size taken on stack.
+ virtual size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) = 0;
+ // Restores the register from the stack. Returns the size taken on stack.
+ virtual size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) = 0;
+ virtual size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
+ UNUSED(stack_index, reg_id);
+ UNIMPLEMENTED(FATAL);
+ UNREACHABLE();
+ }
+ virtual size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
+ UNUSED(stack_index, reg_id);
+ UNIMPLEMENTED(FATAL);
+ UNREACHABLE();
+ }
void RecordPcInfo(HInstruction* instruction, uint32_t dex_pc);
@@ -145,6 +159,23 @@
void ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check) const;
bool* GetBlockedCoreRegisters() const { return blocked_core_registers_; }
+ bool* GetBlockedFloatingPointRegisters() const { return blocked_fpu_registers_; }
+
+ // Helper that returns the pointer offset of an index in an object array.
+ // Note: this method assumes we always have the same pointer size, regardless
+ // of the architecture.
+ static size_t GetCacheOffset(uint32_t index);
+
+ void EmitParallelMoves(Location from1, Location to1, Location from2, Location to2);
+
+ static bool StoreNeedsWriteBarrier(Primitive::Type type, HInstruction* value) {
+ if (kIsDebugBuild) {
+ if (type == Primitive::kPrimNot && value->IsIntConstant()) {
+ CHECK_EQ(value->AsIntConstant()->GetValue(), 0);
+ }
+ }
+ return type == Primitive::kPrimNot && !value->IsIntConstant();
+ }
protected:
CodeGenerator(HGraph* graph,
@@ -174,9 +205,12 @@
virtual Location AllocateFreeRegister(Primitive::Type type) const = 0;
static size_t FindFreeEntry(bool* array, size_t length);
+ static size_t FindTwoFreeConsecutiveAlignedEntries(bool* array, size_t length);
virtual Location GetStackLocation(HLoadLocal* load) const = 0;
+ virtual ParallelMoveResolver* GetMoveResolver() = 0;
+
// Frame size required for this method.
uint32_t frame_size_;
uint32_t core_spill_mask_;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index a2cf670..1701ef5 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -22,17 +22,18 @@
#include "mirror/art_method.h"
#include "mirror/class.h"
#include "thread.h"
-#include "utils/assembler.h"
#include "utils/arm/assembler_arm.h"
#include "utils/arm/managed_register_arm.h"
+#include "utils/assembler.h"
#include "utils/stack_checks.h"
namespace art {
namespace arm {
-static SRegister FromDToLowS(DRegister reg) {
- return static_cast<SRegister>(reg * 2);
+static DRegister FromLowSToD(SRegister reg) {
+ DCHECK_EQ(reg % 2, 0);
+ return static_cast<DRegister>(reg / 2);
}
static constexpr bool kExplicitStackOverflowCheck = false;
@@ -40,13 +41,13 @@
static constexpr int kNumberOfPushedRegistersAtEntry = 1 + 2; // LR, R6, R7
static constexpr int kCurrentMethodStackOffset = 0;
-static constexpr Register kRuntimeParameterCoreRegisters[] = { R0, R1, R2 };
+static constexpr Register kRuntimeParameterCoreRegisters[] = { R0, R1, R2, R3 };
static constexpr size_t kRuntimeParameterCoreRegistersLength =
arraysize(kRuntimeParameterCoreRegisters);
-static constexpr DRegister kRuntimeParameterFpuRegisters[] = { };
+static constexpr SRegister kRuntimeParameterFpuRegisters[] = { };
static constexpr size_t kRuntimeParameterFpuRegistersLength = 0;
-class InvokeRuntimeCallingConvention : public CallingConvention<Register, DRegister> {
+class InvokeRuntimeCallingConvention : public CallingConvention<Register, SRegister> {
public:
InvokeRuntimeCallingConvention()
: CallingConvention(kRuntimeParameterCoreRegisters,
@@ -59,6 +60,7 @@
};
#define __ reinterpret_cast<ArmAssembler*>(codegen->GetAssembler())->
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmWordSize, x).Int32Value()
class SlowPathCodeARM : public SlowPathCode {
public:
@@ -78,12 +80,11 @@
public:
explicit NullCheckSlowPathARM(HNullCheck* instruction) : instruction_(instruction) {}
- virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
- int32_t offset = QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pThrowNullPointer).Int32Value();
- __ LoadFromOffset(kLoadWord, LR, TR, offset);
- __ blx(LR);
- codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ arm_codegen->InvokeRuntime(
+ QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc());
}
private:
@@ -91,11 +92,27 @@
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM);
};
+class DivZeroCheckSlowPathARM : public SlowPathCodeARM {
+ public:
+ explicit DivZeroCheckSlowPathARM(HDivZeroCheck* instruction) : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
+ __ Bind(GetEntryLabel());
+ arm_codegen->InvokeRuntime(
+ QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc());
+ }
+
+ private:
+ HDivZeroCheck* const instruction_;
+ DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM);
+};
+
class StackOverflowCheckSlowPathARM : public SlowPathCodeARM {
public:
StackOverflowCheckSlowPathARM() {}
- virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
__ Bind(GetEntryLabel());
__ LoadFromOffset(kLoadWord, PC, TR,
QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pThrowStackOverflow).Int32Value());
@@ -107,17 +124,15 @@
class SuspendCheckSlowPathARM : public SlowPathCodeARM {
public:
- explicit SuspendCheckSlowPathARM(HSuspendCheck* instruction, HBasicBlock* successor)
+ SuspendCheckSlowPathARM(HSuspendCheck* instruction, HBasicBlock* successor)
: instruction_(instruction), successor_(successor) {}
- virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
codegen->SaveLiveRegisters(instruction_->GetLocations());
- int32_t offset = QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pTestSuspend).Int32Value();
- __ LoadFromOffset(kLoadWord, LR, TR, offset);
- __ blx(LR);
- codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ arm_codegen->InvokeRuntime(
+ QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc());
codegen->RestoreLiveRegisters(instruction_->GetLocations());
if (successor_ == nullptr) {
__ b(GetReturnLabel());
@@ -151,16 +166,19 @@
index_location_(index_location),
length_location_(length_location) {}
- virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
InvokeRuntimeCallingConvention calling_convention;
- arm_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
- arm_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(1)), length_location_);
- int32_t offset = QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pThrowArrayBounds).Int32Value();
- __ LoadFromOffset(kLoadWord, LR, TR, offset);
- __ blx(LR);
- codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ codegen->EmitParallelMoves(
+ index_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ length_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ arm_codegen->InvokeRuntime(
+ QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc());
}
private:
@@ -171,6 +189,139 @@
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM);
};
+class LoadClassSlowPathARM : public SlowPathCodeARM {
+ public:
+ LoadClassSlowPathARM(HLoadClass* cls,
+ HInstruction* at,
+ uint32_t dex_pc,
+ bool do_clinit)
+ : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+ }
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = at_->GetLocations();
+
+ CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
+ __ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ __ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
+ arm_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
+ int32_t entry_point_offset = do_clinit_
+ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
+ : QUICK_ENTRY_POINT(pInitializeType);
+ arm_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_);
+
+ // Move the class to the desired location.
+ Location out = locations->Out();
+ if (out.IsValid()) {
+ DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
+ arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
+ }
+ codegen->RestoreLiveRegisters(locations);
+ __ b(GetExitLabel());
+ }
+
+ private:
+ // The class this slow path will load.
+ HLoadClass* const cls_;
+
+ // The instruction where this slow path is happening.
+ // (Might be the load class or an initialization check).
+ HInstruction* const at_;
+
+ // The dex PC of `at_`.
+ const uint32_t dex_pc_;
+
+ // Whether to initialize the class.
+ const bool do_clinit_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM);
+};
+
+class LoadStringSlowPathARM : public SlowPathCodeARM {
+ public:
+ explicit LoadStringSlowPathARM(HLoadString* instruction) : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+
+ CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
+ __ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ arm_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(0));
+ __ LoadImmediate(calling_convention.GetRegisterAt(1), instruction_->GetStringIndex());
+ arm_codegen->InvokeRuntime(
+ QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc());
+ arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
+
+ codegen->RestoreLiveRegisters(locations);
+ __ b(GetExitLabel());
+ }
+
+ private:
+ HLoadString* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM);
+};
+
+class TypeCheckSlowPathARM : public SlowPathCodeARM {
+ public:
+ TypeCheckSlowPathARM(HInstruction* instruction,
+ Location class_to_check,
+ Location object_class,
+ uint32_t dex_pc)
+ : instruction_(instruction),
+ class_to_check_(class_to_check),
+ object_class_(object_class),
+ dex_pc_(dex_pc) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(instruction_->IsCheckCast()
+ || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+
+ CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
+ __ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(locations);
+
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
+ InvokeRuntimeCallingConvention calling_convention;
+ codegen->EmitParallelMoves(
+ class_to_check_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ object_class_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+
+ if (instruction_->IsInstanceOf()) {
+ arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_);
+ arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
+ } else {
+ DCHECK(instruction_->IsCheckCast());
+ arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_);
+ }
+
+ codegen->RestoreLiveRegisters(locations);
+ __ b(GetExitLabel());
+ }
+
+ private:
+ HInstruction* const instruction_;
+ const Location class_to_check_;
+ const Location object_class_;
+ uint32_t dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM);
+};
+
+#undef __
+
#undef __
#define __ reinterpret_cast<ArmAssembler*>(GetAssembler())->
@@ -207,19 +358,21 @@
}
void CodeGeneratorARM::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
- stream << ArmManagedRegister::FromDRegister(DRegister(reg));
+ stream << ArmManagedRegister::FromSRegister(SRegister(reg));
}
-void CodeGeneratorARM::SaveCoreRegister(Location stack_location, uint32_t reg_id) {
- __ StoreToOffset(kStoreWord, static_cast<Register>(reg_id), SP, stack_location.GetStackIndex());
+size_t CodeGeneratorARM::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
+ __ StoreToOffset(kStoreWord, static_cast<Register>(reg_id), SP, stack_index);
+ return kArmWordSize;
}
-void CodeGeneratorARM::RestoreCoreRegister(Location stack_location, uint32_t reg_id) {
- __ LoadFromOffset(kLoadWord, static_cast<Register>(reg_id), SP, stack_location.GetStackIndex());
+size_t CodeGeneratorARM::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
+ __ LoadFromOffset(kLoadWord, static_cast<Register>(reg_id), SP, stack_index);
+ return kArmWordSize;
}
CodeGeneratorARM::CodeGeneratorARM(HGraph* graph)
- : CodeGenerator(graph, kNumberOfCoreRegisters, kNumberOfDRegisters, kNumberOfRegisterPairs),
+ : CodeGenerator(graph, kNumberOfCoreRegisters, kNumberOfSRegisters, kNumberOfRegisterPairs),
block_labels_(graph->GetArena(), 0),
location_builder_(graph, this),
instruction_visitor_(graph, this),
@@ -263,12 +416,17 @@
return Location::RegisterLocation(reg);
}
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble: {
- int reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfDRegisters);
+ case Primitive::kPrimFloat: {
+ int reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfSRegisters);
return Location::FpuRegisterLocation(reg);
}
+ case Primitive::kPrimDouble: {
+ int reg = FindTwoFreeConsecutiveAlignedEntries(blocked_fpu_registers_, kNumberOfSRegisters);
+ DCHECK_EQ(reg % 2, 0);
+ return Location::FpuRegisterPairLocation(reg, reg + 1);
+ }
+
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << type;
}
@@ -285,9 +443,6 @@
blocked_core_registers_[LR] = true;
blocked_core_registers_[PC] = true;
- // Reserve R4 for suspend check.
- blocked_core_registers_[R4] = true;
-
// Reserve thread register.
blocked_core_registers_[TR] = true;
@@ -297,19 +452,28 @@
// TODO: We currently don't use Quick's callee saved registers.
// We always save and restore R6 and R7 to make sure we can use three
// register pairs for long operations.
+ blocked_core_registers_[R4] = true;
blocked_core_registers_[R5] = true;
blocked_core_registers_[R8] = true;
blocked_core_registers_[R10] = true;
blocked_core_registers_[R11] = true;
- blocked_fpu_registers_[D8] = true;
- blocked_fpu_registers_[D9] = true;
- blocked_fpu_registers_[D10] = true;
- blocked_fpu_registers_[D11] = true;
- blocked_fpu_registers_[D12] = true;
- blocked_fpu_registers_[D13] = true;
- blocked_fpu_registers_[D14] = true;
- blocked_fpu_registers_[D15] = true;
+ blocked_fpu_registers_[S16] = true;
+ blocked_fpu_registers_[S17] = true;
+ blocked_fpu_registers_[S18] = true;
+ blocked_fpu_registers_[S19] = true;
+ blocked_fpu_registers_[S20] = true;
+ blocked_fpu_registers_[S21] = true;
+ blocked_fpu_registers_[S22] = true;
+ blocked_fpu_registers_[S23] = true;
+ blocked_fpu_registers_[S24] = true;
+ blocked_fpu_registers_[S25] = true;
+ blocked_fpu_registers_[S26] = true;
+ blocked_fpu_registers_[S27] = true;
+ blocked_fpu_registers_[S28] = true;
+ blocked_fpu_registers_[S29] = true;
+ blocked_fpu_registers_[S30] = true;
+ blocked_fpu_registers_[S31] = true;
UpdateBlockedPairRegisters();
}
@@ -395,28 +559,56 @@
case Primitive::kPrimChar:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
- case Primitive::kPrimFloat:
case Primitive::kPrimNot: {
uint32_t index = gp_index_++;
+ uint32_t stack_index = stack_index_++;
if (index < calling_convention.GetNumberOfRegisters()) {
return Location::RegisterLocation(calling_convention.GetRegisterAt(index));
} else {
- return Location::StackSlot(calling_convention.GetStackOffsetOf(index));
+ return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
}
}
- case Primitive::kPrimLong:
- case Primitive::kPrimDouble: {
+ case Primitive::kPrimLong: {
uint32_t index = gp_index_;
+ uint32_t stack_index = stack_index_;
gp_index_ += 2;
+ stack_index_ += 2;
if (index + 1 < calling_convention.GetNumberOfRegisters()) {
ArmManagedRegister pair = ArmManagedRegister::FromRegisterPair(
calling_convention.GetRegisterPairAt(index));
return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
} else if (index + 1 == calling_convention.GetNumberOfRegisters()) {
- return Location::QuickParameter(index);
+ return Location::QuickParameter(index, stack_index);
} else {
- return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(index));
+ return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
+ }
+ }
+
+ case Primitive::kPrimFloat: {
+ uint32_t stack_index = stack_index_++;
+ if (float_index_ % 2 == 0) {
+ float_index_ = std::max(double_index_, float_index_);
+ }
+ if (float_index_ < calling_convention.GetNumberOfFpuRegisters()) {
+ return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(float_index_++));
+ } else {
+ return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index));
+ }
+ }
+
+ case Primitive::kPrimDouble: {
+ double_index_ = std::max(double_index_, RoundUp(float_index_, 2));
+ uint32_t stack_index = stack_index_;
+ stack_index_ += 2;
+ if (double_index_ + 1 < calling_convention.GetNumberOfFpuRegisters()) {
+ uint32_t index = double_index_;
+ double_index_ += 2;
+ return Location::FpuRegisterPairLocation(
+ calling_convention.GetFpuRegisterAt(index),
+ calling_convention.GetFpuRegisterAt(index + 1));
+ } else {
+ return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
}
}
@@ -427,6 +619,36 @@
return Location();
}
+Location InvokeDexCallingConventionVisitor::GetReturnLocation(Primitive::Type type) {
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ return Location::RegisterLocation(R0);
+ }
+
+ case Primitive::kPrimFloat: {
+ return Location::FpuRegisterLocation(S0);
+ }
+
+ case Primitive::kPrimLong: {
+ return Location::RegisterPairLocation(R0, R1);
+ }
+
+ case Primitive::kPrimDouble: {
+ return Location::FpuRegisterPairLocation(S0, S1);
+ }
+
+ case Primitive::kPrimVoid:
+ return Location();
+ }
+ UNREACHABLE();
+ return Location();
+}
+
void CodeGeneratorARM::Move32(Location destination, Location source) {
if (source.Equals(destination)) {
return;
@@ -435,26 +657,26 @@
if (source.IsRegister()) {
__ Mov(destination.As<Register>(), source.As<Register>());
} else if (source.IsFpuRegister()) {
- __ vmovrs(destination.As<Register>(), FromDToLowS(source.As<DRegister>()));
+ __ vmovrs(destination.As<Register>(), source.As<SRegister>());
} else {
__ LoadFromOffset(kLoadWord, destination.As<Register>(), SP, source.GetStackIndex());
}
} else if (destination.IsFpuRegister()) {
if (source.IsRegister()) {
- __ vmovsr(FromDToLowS(destination.As<DRegister>()), source.As<Register>());
+ __ vmovsr(destination.As<SRegister>(), source.As<Register>());
} else if (source.IsFpuRegister()) {
- __ vmovs(FromDToLowS(destination.As<DRegister>()), FromDToLowS(source.As<DRegister>()));
+ __ vmovs(destination.As<SRegister>(), source.As<SRegister>());
} else {
- __ vldrs(FromDToLowS(destination.As<DRegister>()), Address(SP, source.GetStackIndex()));
+ __ LoadSFromOffset(destination.As<SRegister>(), SP, source.GetStackIndex());
}
} else {
- DCHECK(destination.IsStackSlot());
+ DCHECK(destination.IsStackSlot()) << destination;
if (source.IsRegister()) {
__ StoreToOffset(kStoreWord, source.As<Register>(), SP, destination.GetStackIndex());
} else if (source.IsFpuRegister()) {
- __ vstrs(FromDToLowS(source.As<DRegister>()), Address(SP, destination.GetStackIndex()));
+ __ StoreSToOffset(source.As<SRegister>(), SP, destination.GetStackIndex());
} else {
- DCHECK(source.IsStackSlot());
+ DCHECK(source.IsStackSlot()) << source;
__ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
__ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
}
@@ -470,14 +692,15 @@
__ Mov(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
__ Mov(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
} else if (source.IsFpuRegister()) {
- LOG(FATAL) << "Unimplemented";
+ UNIMPLEMENTED(FATAL);
} else if (source.IsQuickParameter()) {
- uint32_t argument_index = source.GetQuickParameterIndex();
+ uint16_t register_index = source.GetQuickParameterRegisterIndex();
+ uint16_t stack_index = source.GetQuickParameterStackIndex();
InvokeDexCallingConvention calling_convention;
__ Mov(destination.AsRegisterPairLow<Register>(),
- calling_convention.GetRegisterAt(argument_index));
+ calling_convention.GetRegisterAt(register_index));
__ LoadFromOffset(kLoadWord, destination.AsRegisterPairHigh<Register>(),
- SP, calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize());
+ SP, calling_convention.GetStackOffsetOf(stack_index + 1) + GetFrameSize());
} else {
DCHECK(source.IsDoubleStackSlot());
if (destination.AsRegisterPairLow<Register>() == R1) {
@@ -489,27 +712,31 @@
SP, source.GetStackIndex());
}
}
- } else if (destination.IsFpuRegister()) {
+ } else if (destination.IsFpuRegisterPair()) {
if (source.IsDoubleStackSlot()) {
- __ vldrd(destination.As<DRegister>(), Address(SP, source.GetStackIndex()));
+ __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
+ SP,
+ source.GetStackIndex());
} else {
- LOG(FATAL) << "Unimplemented";
+ UNIMPLEMENTED(FATAL);
}
} else if (destination.IsQuickParameter()) {
InvokeDexCallingConvention calling_convention;
- uint32_t argument_index = destination.GetQuickParameterIndex();
+ uint16_t register_index = destination.GetQuickParameterRegisterIndex();
+ uint16_t stack_index = destination.GetQuickParameterStackIndex();
if (source.IsRegisterPair()) {
- __ Mov(calling_convention.GetRegisterAt(argument_index),
+ __ Mov(calling_convention.GetRegisterAt(register_index),
source.AsRegisterPairLow<Register>());
__ StoreToOffset(kStoreWord, source.AsRegisterPairHigh<Register>(),
- SP, calling_convention.GetStackOffsetOf(argument_index + 1));
+ SP, calling_convention.GetStackOffsetOf(stack_index + 1));
} else if (source.IsFpuRegister()) {
- LOG(FATAL) << "Unimplemented";
+ UNIMPLEMENTED(FATAL);
} else {
DCHECK(source.IsDoubleStackSlot());
- __ LoadFromOffset(kLoadWord, calling_convention.GetRegisterAt(argument_index), SP, source.GetStackIndex());
+ __ LoadFromOffset(
+ kLoadWord, calling_convention.GetRegisterAt(register_index), SP, source.GetStackIndex());
__ LoadFromOffset(kLoadWord, R0, SP, source.GetHighStackIndex(kArmWordSize));
- __ StoreToOffset(kStoreWord, R0, SP, calling_convention.GetStackOffsetOf(argument_index + 1));
+ __ StoreToOffset(kStoreWord, R0, SP, calling_convention.GetStackOffsetOf(stack_index + 1));
}
} else {
DCHECK(destination.IsDoubleStackSlot());
@@ -524,14 +751,17 @@
}
} else if (source.IsQuickParameter()) {
InvokeDexCallingConvention calling_convention;
- uint32_t argument_index = source.GetQuickParameterIndex();
- __ StoreToOffset(kStoreWord, calling_convention.GetRegisterAt(argument_index),
+ uint16_t register_index = source.GetQuickParameterRegisterIndex();
+ uint16_t stack_index = source.GetQuickParameterStackIndex();
+ __ StoreToOffset(kStoreWord, calling_convention.GetRegisterAt(register_index),
SP, destination.GetStackIndex());
__ LoadFromOffset(kLoadWord, R0,
- SP, calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize());
+ SP, calling_convention.GetStackOffsetOf(stack_index + 1) + GetFrameSize());
__ StoreToOffset(kStoreWord, R0, SP, destination.GetHighStackIndex(kArmWordSize));
- } else if (source.IsFpuRegister()) {
- __ vstrd(source.As<DRegister>(), Address(SP, destination.GetStackIndex()));
+ } else if (source.IsFpuRegisterPair()) {
+ __ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
+ SP,
+ destination.GetStackIndex());
} else {
DCHECK(source.IsDoubleStackSlot());
__ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
@@ -548,26 +778,29 @@
return;
}
- if (instruction->IsIntConstant()) {
- int32_t value = instruction->AsIntConstant()->GetValue();
- if (location.IsRegister()) {
- __ LoadImmediate(location.As<Register>(), value);
- } else {
- DCHECK(location.IsStackSlot());
- __ LoadImmediate(IP, value);
- __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
- }
- } else if (instruction->IsLongConstant()) {
- int64_t value = instruction->AsLongConstant()->GetValue();
- if (location.IsRegisterPair()) {
- __ LoadImmediate(location.AsRegisterPairLow<Register>(), Low32Bits(value));
- __ LoadImmediate(location.AsRegisterPairHigh<Register>(), High32Bits(value));
- } else {
- DCHECK(location.IsDoubleStackSlot());
- __ LoadImmediate(IP, Low32Bits(value));
- __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
- __ LoadImmediate(IP, High32Bits(value));
- __ StoreToOffset(kStoreWord, IP, SP, location.GetHighStackIndex(kArmWordSize));
+ if (locations != nullptr && locations->Out().IsConstant()) {
+ HConstant* const_to_move = locations->Out().GetConstant();
+ if (const_to_move->IsIntConstant()) {
+ int32_t value = const_to_move->AsIntConstant()->GetValue();
+ if (location.IsRegister()) {
+ __ LoadImmediate(location.As<Register>(), value);
+ } else {
+ DCHECK(location.IsStackSlot());
+ __ LoadImmediate(IP, value);
+ __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
+ }
+ } else if (const_to_move->IsLongConstant()) {
+ int64_t value = const_to_move->AsLongConstant()->GetValue();
+ if (location.IsRegisterPair()) {
+ __ LoadImmediate(location.AsRegisterPairLow<Register>(), Low32Bits(value));
+ __ LoadImmediate(location.AsRegisterPairHigh<Register>(), High32Bits(value));
+ } else {
+ DCHECK(location.IsDoubleStackSlot());
+ __ LoadImmediate(IP, Low32Bits(value));
+ __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
+ __ LoadImmediate(IP, High32Bits(value));
+ __ StoreToOffset(kStoreWord, IP, SP, location.GetHighStackIndex(kArmWordSize));
+ }
}
} else if (instruction->IsLoadLocal()) {
uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
@@ -590,6 +823,14 @@
default:
LOG(FATAL) << "Unexpected type " << instruction->GetType();
}
+ } else if (instruction->IsTemporary()) {
+ Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
+ if (temp_location.IsStackSlot()) {
+ Move32(location, temp_location);
+ } else {
+ DCHECK(temp_location.IsDoubleStackSlot());
+ Move64(location, temp_location);
+ }
} else {
DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
switch (instruction->GetType()) {
@@ -614,6 +855,19 @@
}
}
+void CodeGeneratorARM::InvokeRuntime(int32_t entry_point_offset,
+ HInstruction* instruction,
+ uint32_t dex_pc) {
+ __ LoadFromOffset(kLoadWord, LR, TR, entry_point_offset);
+ __ blx(LR);
+ RecordPcInfo(instruction, dex_pc);
+ DCHECK(instruction->IsSuspendCheck()
+ || instruction->IsBoundsCheck()
+ || instruction->IsNullCheck()
+ || instruction->IsDivZeroCheck()
+ || !IsLeafMethod());
+}
+
void LocationsBuilderARM::VisitGoto(HGoto* got) {
got->SetLocations(nullptr);
}
@@ -645,6 +899,7 @@
}
void InstructionCodeGeneratorARM::VisitExit(HExit* exit) {
+ UNUSED(exit);
if (kIsDebugBuild) {
__ Comment("Unreachable");
__ bkpt(0);
@@ -656,7 +911,7 @@
new (GetGraph()->GetArena()) LocationSummary(if_instr, LocationSummary::kNoCall);
HInstruction* cond = if_instr->InputAt(0);
if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
- locations->SetInAt(0, Location::RequiresRegister(), Location::kDiesAtEntry);
+ locations->SetInAt(0, Location::RequiresRegister());
}
}
@@ -715,10 +970,10 @@
void LocationsBuilderARM::VisitCondition(HCondition* comp) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(comp, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister(), Location::kDiesAtEntry);
- locations->SetInAt(1, Location::RegisterOrConstant(comp->InputAt(1)), Location::kDiesAtEntry);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(comp->InputAt(1)));
if (comp->NeedsMaterialization()) {
- locations->SetOut(Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
}
@@ -810,6 +1065,7 @@
void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(load);
}
void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
@@ -837,6 +1093,7 @@
}
void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store) {
+ UNUSED(store);
}
void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
@@ -847,6 +1104,7 @@
void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
@@ -857,6 +1115,29 @@
void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
+}
+
+void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant) {
+ // Will be generated at use site.
+ UNUSED(constant);
+}
+
+void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant) {
+ // Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
@@ -864,56 +1145,18 @@
}
void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret) {
+ UNUSED(ret);
codegen_->GenerateFrameExit();
}
void LocationsBuilderARM::VisitReturn(HReturn* ret) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall);
- switch (ret->InputAt(0)->GetType()) {
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- case Primitive::kPrimInt:
- case Primitive::kPrimNot:
- case Primitive::kPrimFloat:
- locations->SetInAt(0, Location::RegisterLocation(R0));
- break;
-
- case Primitive::kPrimLong:
- case Primitive::kPrimDouble:
- locations->SetInAt(0, Location::RegisterPairLocation(R0, R1));
- break;
-
- default:
- LOG(FATAL) << "Unimplemented return type " << ret->InputAt(0)->GetType();
- }
+ locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType()));
}
void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) {
- if (kIsDebugBuild) {
- switch (ret->InputAt(0)->GetType()) {
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- case Primitive::kPrimInt:
- case Primitive::kPrimNot:
- case Primitive::kPrimFloat:
- DCHECK_EQ(ret->GetLocations()->InAt(0).As<Register>(), R0);
- break;
-
- case Primitive::kPrimLong:
- case Primitive::kPrimDouble:
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegisterPairLow<Register>(), R0);
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegisterPairHigh<Register>(), R1);
- break;
-
- default:
- LOG(FATAL) << "Unimplemented return type " << ret->InputAt(0)->GetType();
- }
- }
+ UNUSED(ret);
codegen_->GenerateFrameExit();
}
@@ -921,15 +1164,12 @@
HandleInvoke(invoke);
}
-void InstructionCodeGeneratorARM::LoadCurrentMethod(Register reg) {
+void CodeGeneratorARM::LoadCurrentMethod(Register reg) {
__ LoadFromOffset(kLoadWord, reg, SP, kCurrentMethodStackOffset);
}
void InstructionCodeGeneratorARM::VisitInvokeStatic(HInvokeStatic* invoke) {
Register temp = invoke->GetLocations()->GetTemp(0).As<Register>();
- uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
- size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).Int32Value() +
- invoke->GetIndexInDexCache() * kArmWordSize;
// TODO: Implement all kinds of calls:
// 1) boot -> boot
@@ -939,14 +1179,17 @@
// Currently we implement the app -> app logic, which looks up in the resolve cache.
// temp = method;
- LoadCurrentMethod(temp);
+ codegen_->LoadCurrentMethod(temp);
// temp = temp->dex_cache_resolved_methods_;
- __ LoadFromOffset(kLoadWord, temp, temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
+ __ LoadFromOffset(
+ kLoadWord, temp, temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
// temp = temp[index_in_cache]
- __ LoadFromOffset(kLoadWord, temp, temp, index_in_cache);
+ __ LoadFromOffset(
+ kLoadWord, temp, temp, CodeGenerator::GetCacheOffset(invoke->GetIndexInDexCache()));
// LR = temp[offset_of_quick_compiled_code]
__ LoadFromOffset(kLoadWord, LR, temp,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArmWordSize).Int32Value());
// LR()
__ blx(LR);
@@ -954,10 +1197,6 @@
DCHECK(!codegen_->IsLeafMethod());
}
-void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
- HandleInvoke(invoke);
-}
-
void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
@@ -969,27 +1208,12 @@
locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType()));
}
- switch (invoke->GetType()) {
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- case Primitive::kPrimInt:
- case Primitive::kPrimNot:
- case Primitive::kPrimFloat:
- locations->SetOut(Location::RegisterLocation(R0));
- break;
-
- case Primitive::kPrimLong:
- case Primitive::kPrimDouble:
- locations->SetOut(Location::RegisterPairLocation(R0, R1));
- break;
-
- case Primitive::kPrimVoid:
- break;
- }
+ locations->SetOut(calling_convention_visitor.GetReturnLocation(invoke->GetType()));
}
+void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ HandleInvoke(invoke);
+}
void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
Register temp = invoke->GetLocations()->GetTemp(0).As<Register>();
@@ -1006,7 +1230,8 @@
__ LoadFromOffset(kLoadWord, temp, receiver.As<Register>(), class_offset);
}
// temp = temp->GetMethodAt(method_offset);
- uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value();
+ uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArmWordSize).Int32Value();
__ LoadFromOffset(kLoadWord, temp, temp, method_offset);
// LR = temp->GetEntryPoint();
__ LoadFromOffset(kLoadWord, LR, temp, entry_point);
@@ -1016,16 +1241,438 @@
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
+void LocationsBuilderARM::VisitInvokeInterface(HInvokeInterface* invoke) {
+ HandleInvoke(invoke);
+ // Add the hidden argument.
+ invoke->GetLocations()->AddTemp(Location::RegisterLocation(R12));
+}
+
+void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) {
+ // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
+ Register temp = invoke->GetLocations()->GetTemp(0).As<Register>();
+ uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
+ (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
+ LocationSummary* locations = invoke->GetLocations();
+ Location receiver = locations->InAt(0);
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+
+ // Set the hidden argument.
+ __ LoadImmediate(invoke->GetLocations()->GetTemp(1).As<Register>(), invoke->GetDexMethodIndex());
+
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
+ __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
+ } else {
+ __ LoadFromOffset(kLoadWord, temp, receiver.As<Register>(), class_offset);
+ }
+ // temp = temp->GetImtEntryAt(method_offset);
+ uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArmWordSize).Int32Value();
+ __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
+ // LR = temp->GetEntryPoint();
+ __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
+ // LR();
+ __ blx(LR);
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void LocationsBuilderARM::VisitNeg(HNeg* neg) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ switch (neg->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ bool output_overlaps = (neg->GetResultType() == Primitive::kPrimLong);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), output_overlaps);
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorARM::VisitNeg(HNeg* neg) {
+ LocationSummary* locations = neg->GetLocations();
+ Location out = locations->Out();
+ Location in = locations->InAt(0);
+ switch (neg->GetResultType()) {
+ case Primitive::kPrimInt:
+ DCHECK(in.IsRegister());
+ __ rsb(out.As<Register>(), in.As<Register>(), ShifterOperand(0));
+ break;
+
+ case Primitive::kPrimLong:
+ DCHECK(in.IsRegisterPair());
+ // out.lo = 0 - in.lo (and update the carry/borrow (C) flag)
+ __ rsbs(out.AsRegisterPairLow<Register>(),
+ in.AsRegisterPairLow<Register>(),
+ ShifterOperand(0));
+ // We cannot emit an RSC (Reverse Subtract with Carry)
+ // instruction here, as it does not exist in the Thumb-2
+ // instruction set. We use the following approach
+ // using SBC and SUB instead.
+ //
+ // out.hi = -C
+ __ sbc(out.AsRegisterPairHigh<Register>(),
+ out.AsRegisterPairHigh<Register>(),
+ ShifterOperand(out.AsRegisterPairHigh<Register>()));
+ // out.hi = out.hi - in.hi
+ __ sub(out.AsRegisterPairHigh<Register>(),
+ out.AsRegisterPairHigh<Register>(),
+ ShifterOperand(in.AsRegisterPairHigh<Register>()));
+ break;
+
+ case Primitive::kPrimFloat:
+ DCHECK(in.IsFpuRegister());
+ __ vnegs(out.As<SRegister>(), in.As<SRegister>());
+ break;
+
+ case Primitive::kPrimDouble:
+ DCHECK(in.IsFpuRegisterPair());
+ __ vnegd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
+ FromLowSToD(in.AsFpuRegisterPairLow<SRegister>()));
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
+ }
+}
+
+void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+ switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-byte' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimShort:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-short' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Processing a Dex `long-to-int' instruction.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimLong:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-long' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type << " to "
+ << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimChar:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-char' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimFloat:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-float' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ };
+ break;
+
+ case Primitive::kPrimDouble:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-double' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+
+ case Primitive::kPrimLong:
+ case Primitive::kPrimFloat:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ };
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+}
+
+void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversion) {
+ LocationSummary* locations = conversion->GetLocations();
+ Location out = locations->Out();
+ Location in = locations->InAt(0);
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+ switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-byte' instruction.
+ __ sbfx(out.As<Register>(), in.As<Register>(), 0, 8);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimShort:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-short' instruction.
+ __ sbfx(out.As<Register>(), in.As<Register>(), 0, 16);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Processing a Dex `long-to-int' instruction.
+ DCHECK(out.IsRegister());
+ if (in.IsRegisterPair()) {
+ __ Mov(out.As<Register>(), in.AsRegisterPairLow<Register>());
+ } else if (in.IsDoubleStackSlot()) {
+ __ LoadFromOffset(kLoadWord, out.As<Register>(), SP, in.GetStackIndex());
+ } else {
+ DCHECK(in.IsConstant());
+ DCHECK(in.GetConstant()->IsLongConstant());
+ int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
+ __ LoadImmediate(out.As<Register>(), static_cast<int32_t>(value));
+ }
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimLong:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-long' instruction.
+ DCHECK(out.IsRegisterPair());
+ DCHECK(in.IsRegister());
+ __ Mov(out.AsRegisterPairLow<Register>(), in.As<Register>());
+ // Sign extension.
+ __ Asr(out.AsRegisterPairHigh<Register>(),
+ out.AsRegisterPairLow<Register>(),
+ 31);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type << " to "
+ << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimChar:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-char' instruction.
+ __ ubfx(out.As<Register>(), in.As<Register>(), 0, 16);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimFloat:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar: {
+ // Processing a Dex `int-to-float' instruction.
+ __ vmovsr(out.As<SRegister>(), in.As<Register>());
+ __ vcvtsi(out.As<SRegister>(), out.As<SRegister>());
+ break;
+ }
+
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ };
+ break;
+
+ case Primitive::kPrimDouble:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar: {
+ // Processing a Dex `int-to-double' instruction.
+ __ vmovsr(out.AsFpuRegisterPairLow<SRegister>(), in.As<Register>());
+ __ vcvtdi(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
+ out.AsFpuRegisterPairLow<SRegister>());
+ break;
+ }
+
+ case Primitive::kPrimLong:
+ case Primitive::kPrimFloat:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ };
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+}
+
void LocationsBuilderARM::VisitAdd(HAdd* add) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
switch (add->GetResultType()) {
case Primitive::kPrimInt:
case Primitive::kPrimLong: {
- bool dies_at_entry = add->GetResultType() != Primitive::kPrimLong;
- locations->SetInAt(0, Location::RequiresRegister(), dies_at_entry);
- locations->SetInAt(1, Location::RegisterOrConstant(add->InputAt(1)), dies_at_entry);
- locations->SetOut(Location::RequiresRegister());
+ bool output_overlaps = (add->GetResultType() == Primitive::kPrimLong);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(add->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), output_overlaps);
break;
}
@@ -1033,7 +1680,7 @@
case Primitive::kPrimDouble: {
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresFpuRegister());
- locations->SetOut(Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
break;
}
@@ -1068,13 +1715,13 @@
break;
case Primitive::kPrimFloat:
- __ vadds(FromDToLowS(out.As<DRegister>()),
- FromDToLowS(first.As<DRegister>()),
- FromDToLowS(second.As<DRegister>()));
+ __ vadds(out.As<SRegister>(), first.As<SRegister>(), second.As<SRegister>());
break;
case Primitive::kPrimDouble:
- __ vaddd(out.As<DRegister>(), first.As<DRegister>(), second.As<DRegister>());
+ __ vaddd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
+ FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
+ FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
break;
default:
@@ -1088,59 +1735,66 @@
switch (sub->GetResultType()) {
case Primitive::kPrimInt:
case Primitive::kPrimLong: {
- bool dies_at_entry = sub->GetResultType() != Primitive::kPrimLong;
- locations->SetInAt(0, Location::RequiresRegister(), dies_at_entry);
- locations->SetInAt(1, Location::RegisterOrConstant(sub->InputAt(1)), dies_at_entry);
- locations->SetOut(Location::RequiresRegister());
+ bool output_overlaps = (sub->GetResultType() == Primitive::kPrimLong);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(sub->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), output_overlaps);
break;
}
-
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
break;
-
+ }
default:
- LOG(FATAL) << "Unimplemented sub type " << sub->GetResultType();
+ LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
}
}
void InstructionCodeGeneratorARM::VisitSub(HSub* sub) {
LocationSummary* locations = sub->GetLocations();
+ Location out = locations->Out();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
switch (sub->GetResultType()) {
case Primitive::kPrimInt: {
- if (locations->InAt(1).IsRegister()) {
- __ sub(locations->Out().As<Register>(),
- locations->InAt(0).As<Register>(),
- ShifterOperand(locations->InAt(1).As<Register>()));
+ if (second.IsRegister()) {
+ __ sub(out.As<Register>(), first.As<Register>(), ShifterOperand(second.As<Register>()));
} else {
- __ AddConstant(locations->Out().As<Register>(),
- locations->InAt(0).As<Register>(),
- -locations->InAt(1).GetConstant()->AsIntConstant()->GetValue());
+ __ AddConstant(out.As<Register>(),
+ first.As<Register>(),
+ -second.GetConstant()->AsIntConstant()->GetValue());
}
break;
}
- case Primitive::kPrimLong:
- __ subs(locations->Out().AsRegisterPairLow<Register>(),
- locations->InAt(0).AsRegisterPairLow<Register>(),
- ShifterOperand(locations->InAt(1).AsRegisterPairLow<Register>()));
- __ sbc(locations->Out().AsRegisterPairHigh<Register>(),
- locations->InAt(0).AsRegisterPairHigh<Register>(),
- ShifterOperand(locations->InAt(1).AsRegisterPairHigh<Register>()));
+ case Primitive::kPrimLong: {
+ __ subs(out.AsRegisterPairLow<Register>(),
+ first.AsRegisterPairLow<Register>(),
+ ShifterOperand(second.AsRegisterPairLow<Register>()));
+ __ sbc(out.AsRegisterPairHigh<Register>(),
+ first.AsRegisterPairHigh<Register>(),
+ ShifterOperand(second.AsRegisterPairHigh<Register>()));
break;
+ }
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
+ case Primitive::kPrimFloat: {
+ __ vsubs(out.As<SRegister>(), first.As<SRegister>(), second.As<SRegister>());
break;
+ }
+
+ case Primitive::kPrimDouble: {
+ __ vsubd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
+ FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
+ FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
+ break;
+ }
+
default:
- LOG(FATAL) << "Unimplemented sub type " << sub->GetResultType();
+ LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
}
}
@@ -1150,21 +1804,22 @@
switch (mul->GetResultType()) {
case Primitive::kPrimInt:
case Primitive::kPrimLong: {
- locations->SetInAt(0, Location::RequiresRegister(), Location::kDiesAtEntry);
- locations->SetInAt(1, Location::RequiresRegister(), Location::kDiesAtEntry);
- locations->SetOut(Location::RequiresRegister());
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
}
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
break;
+ }
default:
- LOG(FATAL) << "Unimplemented mul type " << mul->GetResultType();
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
}
}
@@ -1208,15 +1863,227 @@
__ add(out_hi, out_hi, ShifterOperand(IP));
break;
}
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+
+ case Primitive::kPrimFloat: {
+ __ vmuls(out.As<SRegister>(), first.As<SRegister>(), second.As<SRegister>());
break;
+ }
+
+ case Primitive::kPrimDouble: {
+ __ vmuld(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
+ FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
+ FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
+ break;
+ }
default:
- LOG(FATAL) << "Unimplemented mul type " << mul->GetResultType();
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ }
+}
+
+void LocationsBuilderARM::VisitDiv(HDiv* div) {
+ LocationSummary::CallKind call_kind = div->GetResultType() == Primitive::kPrimLong
+ ? LocationSummary::kCall
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
+
+ switch (div->GetResultType()) {
+ case Primitive::kPrimInt: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+ case Primitive::kPrimLong: {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(1, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
+ // The runtime helper puts the output in R0,R2.
+ locations->SetOut(Location::RegisterPairLocation(R0, R2));
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected div type " << div->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorARM::VisitDiv(HDiv* div) {
+ LocationSummary* locations = div->GetLocations();
+ Location out = locations->Out();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+
+ switch (div->GetResultType()) {
+ case Primitive::kPrimInt: {
+ __ sdiv(out.As<Register>(), first.As<Register>(), second.As<Register>());
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ InvokeRuntimeCallingConvention calling_convention;
+ DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegisterPairLow<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(1), first.AsRegisterPairHigh<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(2), second.AsRegisterPairLow<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(3), second.AsRegisterPairHigh<Register>());
+ DCHECK_EQ(R0, out.AsRegisterPairLow<Register>());
+ DCHECK_EQ(R2, out.AsRegisterPairHigh<Register>());
+
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv), div, div->GetDexPc());
+ break;
+ }
+
+ case Primitive::kPrimFloat: {
+ __ vdivs(out.As<SRegister>(), first.As<SRegister>(), second.As<SRegister>());
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ __ vdivd(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()),
+ FromLowSToD(first.AsFpuRegisterPairLow<SRegister>()),
+ FromLowSToD(second.AsFpuRegisterPairLow<SRegister>()));
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected div type " << div->GetResultType();
+ }
+}
+
+void LocationsBuilderARM::VisitRem(HRem* rem) {
+ LocationSummary::CallKind call_kind = rem->GetResultType() == Primitive::kPrimLong
+ ? LocationSummary::kCall
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
+
+ switch (rem->GetResultType()) {
+ case Primitive::kPrimInt: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ locations->AddTemp(Location::RequiresRegister());
+ break;
+ }
+ case Primitive::kPrimLong: {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(1, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
+ // The runtime helper puts the output in R2,R3.
+ locations->SetOut(Location::RegisterPairLocation(R2, R3));
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ LOG(FATAL) << "Unimplemented rem type " << rem->GetResultType();
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected rem type " << rem->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorARM::VisitRem(HRem* rem) {
+ LocationSummary* locations = rem->GetLocations();
+ Location out = locations->Out();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+
+ switch (rem->GetResultType()) {
+ case Primitive::kPrimInt: {
+ Register reg1 = first.As<Register>();
+ Register reg2 = second.As<Register>();
+ Register temp = locations->GetTemp(0).As<Register>();
+
+ // temp = reg1 / reg2 (integer division)
+ // temp = temp * reg2
+ // dest = reg1 - temp
+ __ sdiv(temp, reg1, reg2);
+ __ mul(temp, temp, reg2);
+ __ sub(out.As<Register>(), reg1, ShifterOperand(temp));
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ InvokeRuntimeCallingConvention calling_convention;
+ DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegisterPairLow<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(1), first.AsRegisterPairHigh<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(2), second.AsRegisterPairLow<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(3), second.AsRegisterPairHigh<Register>());
+ DCHECK_EQ(R2, out.AsRegisterPairLow<Register>());
+ DCHECK_EQ(R3, out.AsRegisterPairHigh<Register>());
+
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLmod), rem, rem->GetDexPc());
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ LOG(FATAL) << "Unimplemented rem type " << rem->GetResultType();
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected rem type " << rem->GetResultType();
+ }
+}
+
+void LocationsBuilderARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM(instruction);
+ codegen_->AddSlowPath(slow_path);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location value = locations->InAt(0);
+
+ switch (instruction->GetType()) {
+ case Primitive::kPrimInt: {
+ if (value.IsRegister()) {
+ __ cmp(value.As<Register>(), ShifterOperand(0));
+ __ b(slow_path->GetEntryLabel(), EQ);
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
+ __ b(slow_path->GetEntryLabel());
+ }
+ }
+ break;
+ }
+ case Primitive::kPrimLong: {
+ if (value.IsRegisterPair()) {
+ __ orrs(IP,
+ value.AsRegisterPairLow<Register>(),
+ ShifterOperand(value.AsRegisterPairHigh<Register>()));
+ __ b(slow_path->GetEntryLabel(), EQ);
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
+ __ b(slow_path->GetEntryLabel());
+ }
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
+ }
}
}
@@ -1231,15 +2098,28 @@
void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) {
InvokeRuntimeCallingConvention calling_convention;
- LoadCurrentMethod(calling_convention.GetRegisterAt(1));
+ codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
__ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
+ codegen_->InvokeRuntime(
+ QUICK_ENTRY_POINT(pAllocObjectWithAccessCheck), instruction, instruction->GetDexPc());
+}
- int32_t offset = QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocObjectWithAccessCheck).Int32Value();
- __ LoadFromOffset(kLoadWord, LR, TR, offset);
- __ blx(LR);
+void LocationsBuilderARM::VisitNewArray(HNewArray* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetOut(Location::RegisterLocation(R0));
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+}
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
- DCHECK(!codegen_->IsLeafMethod());
+void InstructionCodeGeneratorARM::VisitNewArray(HNewArray* instruction) {
+ InvokeRuntimeCallingConvention calling_convention;
+ codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
+ __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
+ codegen_->InvokeRuntime(
+ QUICK_ENTRY_POINT(pAllocArrayWithAccessCheck), instruction, instruction->GetDexPc());
}
void LocationsBuilderARM::VisitParameterValue(HParameterValue* instruction) {
@@ -1256,31 +2136,50 @@
void InstructionCodeGeneratorARM::VisitParameterValue(HParameterValue* instruction) {
// Nothing to do, the parameter is already at its location.
+ UNUSED(instruction);
}
-void LocationsBuilderARM::VisitNot(HNot* instruction) {
+void LocationsBuilderARM::VisitNot(HNot* not_) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister(), Location::kDiesAtEntry);
- locations->SetOut(Location::RequiresRegister());
+ new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
-void InstructionCodeGeneratorARM::VisitNot(HNot* instruction) {
- LocationSummary* locations = instruction->GetLocations();
- __ eor(locations->Out().As<Register>(),
- locations->InAt(0).As<Register>(), ShifterOperand(1));
+void InstructionCodeGeneratorARM::VisitNot(HNot* not_) {
+ LocationSummary* locations = not_->GetLocations();
+ Location out = locations->Out();
+ Location in = locations->InAt(0);
+ switch (not_->InputAt(0)->GetType()) {
+ case Primitive::kPrimBoolean:
+ __ eor(out.As<Register>(), in.As<Register>(), ShifterOperand(1));
+ break;
+
+ case Primitive::kPrimInt:
+ __ mvn(out.As<Register>(), ShifterOperand(in.As<Register>()));
+ break;
+
+ case Primitive::kPrimLong:
+ __ mvn(out.AsRegisterPairLow<Register>(),
+ ShifterOperand(in.AsRegisterPairLow<Register>()));
+ __ mvn(out.AsRegisterPairHigh<Register>(),
+ ShifterOperand(in.AsRegisterPairHigh<Register>()));
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType();
+ }
}
void LocationsBuilderARM::VisitCompare(HCompare* compare) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister(), Location::kDiesAtEntry);
- locations->SetInAt(1, Location::RequiresRegister(), Location::kDiesAtEntry);
- locations->SetOut(Location::RequiresRegister());
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
void InstructionCodeGeneratorARM::VisitCompare(HCompare* compare) {
- Label greater, done;
LocationSummary* locations = compare->GetLocations();
switch (compare->InputAt(0)->GetType()) {
case Primitive::kPrimLong: {
@@ -1325,18 +2224,19 @@
}
void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
void LocationsBuilderARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- bool is_object_type = instruction->GetFieldType() == Primitive::kPrimNot;
- bool dies_at_entry = !is_object_type;
- locations->SetInAt(0, Location::RequiresRegister(), dies_at_entry);
- locations->SetInAt(1, Location::RequiresRegister(), dies_at_entry);
+ bool needs_write_barrier =
+ CodeGenerator::StoreNeedsWriteBarrier(instruction->GetFieldType(), instruction->GetValue());
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
// Temporary registers for the write barrier.
- if (is_object_type) {
+ if (needs_write_barrier) {
locations->AddTemp(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
}
@@ -1367,7 +2267,7 @@
case Primitive::kPrimNot: {
Register value = locations->InAt(1).As<Register>();
__ StoreToOffset(kStoreWord, value, obj, offset);
- if (field_type == Primitive::kPrimNot) {
+ if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->GetValue())) {
Register temp = locations->GetTemp(0).As<Register>();
Register card = locations->GetTemp(1).As<Register>();
codegen_->MarkGCCard(temp, card, obj, value);
@@ -1381,10 +2281,18 @@
break;
}
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register type " << field_type;
- UNREACHABLE();
+ case Primitive::kPrimFloat: {
+ SRegister value = locations->InAt(1).As<SRegister>();
+ __ StoreSToOffset(value, obj, offset);
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ DRegister value = FromLowSToD(locations->InAt(1).AsFpuRegisterPairLow<SRegister>());
+ __ StoreDToOffset(value, obj, offset);
+ break;
+ }
+
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << field_type;
UNREACHABLE();
@@ -1394,8 +2302,8 @@
void LocationsBuilderARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister(), Location::kDiesAtEntry);
- locations->SetOut(Location::RequiresRegister());
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
void InstructionCodeGeneratorARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
@@ -1442,10 +2350,18 @@
break;
}
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
- UNREACHABLE();
+ case Primitive::kPrimFloat: {
+ SRegister out = locations->Out().As<SRegister>();
+ __ LoadSFromOffset(out, obj, offset);
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ DRegister out = FromLowSToD(locations->Out().AsFpuRegisterPairLow<SRegister>());
+ __ LoadDFromOffset(out, obj, offset);
+ break;
+ }
+
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -1481,10 +2397,9 @@
void LocationsBuilderARM::VisitArrayGet(HArrayGet* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister(), Location::kDiesAtEntry);
- locations->SetInAt(
- 1, Location::RegisterOrConstant(instruction->InputAt(1)), Location::kDiesAtEntry);
- locations->SetOut(Location::RequiresRegister());
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
@@ -1585,19 +2500,28 @@
void LocationsBuilderARM::VisitArraySet(HArraySet* instruction) {
Primitive::Type value_type = instruction->GetComponentType();
- bool is_object = value_type == Primitive::kPrimNot;
+
+ bool needs_write_barrier =
+ CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
+ bool needs_runtime_call = instruction->NeedsTypeCheck();
+
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
- instruction, is_object ? LocationSummary::kCall : LocationSummary::kNoCall);
- if (is_object) {
+ instruction, needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall);
+ if (needs_runtime_call) {
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
} else {
- locations->SetInAt(0, Location::RequiresRegister(), Location::kDiesAtEntry);
- locations->SetInAt(
- 1, Location::RegisterOrConstant(instruction->InputAt(1)), Location::kDiesAtEntry);
- locations->SetInAt(2, Location::RequiresRegister(), Location::kDiesAtEntry);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ locations->SetInAt(2, Location::RequiresRegister());
+
+ if (needs_write_barrier) {
+ // Temporary registers for the write barrier.
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ }
}
}
@@ -1606,6 +2530,9 @@
Register obj = locations->InAt(0).As<Register>();
Location index = locations->InAt(1);
Primitive::Type value_type = instruction->GetComponentType();
+ bool needs_runtime_call = locations->WillCall();
+ bool needs_write_barrier =
+ CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
switch (value_type) {
case Primitive::kPrimBoolean:
@@ -1636,25 +2563,29 @@
break;
}
- case Primitive::kPrimInt: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- Register value = locations->InAt(2).As<Register>();
- if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- __ StoreToOffset(kStoreWord, value, obj, offset);
- } else {
- __ add(IP, obj, ShifterOperand(index.As<Register>(), LSL, TIMES_4));
- __ StoreToOffset(kStoreWord, value, IP, data_offset);
- }
- break;
- }
-
+ case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- int32_t offset = QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAputObject).Int32Value();
- __ LoadFromOffset(kLoadWord, LR, TR, offset);
- __ blx(LR);
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
- DCHECK(!codegen_->IsLeafMethod());
+ if (!needs_runtime_call) {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ Register value = locations->InAt(2).As<Register>();
+ if (index.IsConstant()) {
+ size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ StoreToOffset(kStoreWord, value, obj, offset);
+ } else {
+ DCHECK(index.IsRegister()) << index;
+ __ add(IP, obj, ShifterOperand(index.As<Register>(), LSL, TIMES_4));
+ __ StoreToOffset(kStoreWord, value, IP, data_offset);
+ }
+ if (needs_write_barrier) {
+ DCHECK_EQ(value_type, Primitive::kPrimNot);
+ Register temp = locations->GetTemp(0).As<Register>();
+ Register card = locations->GetTemp(1).As<Register>();
+ codegen_->MarkGCCard(temp, card, obj, value);
+ }
+ } else {
+ DCHECK_EQ(value_type, Primitive::kPrimNot);
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject), instruction, instruction->GetDexPc());
+ }
break;
}
@@ -1684,8 +2615,8 @@
void LocationsBuilderARM::VisitArrayLength(HArrayLength* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister(), Location::kDiesAtEntry);
- locations->SetOut(Location::RequiresRegister());
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
void InstructionCodeGeneratorARM::VisitArrayLength(HArrayLength* instruction) {
@@ -1734,9 +2665,11 @@
void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(temp);
}
void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
@@ -1768,12 +2701,15 @@
new (GetGraph()->GetArena()) SuspendCheckSlowPathARM(instruction, successor);
codegen_->AddSlowPath(slow_path);
- __ subs(R4, R4, ShifterOperand(1));
+ __ LoadFromOffset(
+ kLoadUnsignedHalfword, IP, TR, Thread::ThreadFlagsOffset<kArmWordSize>().Int32Value());
+ __ cmp(IP, ShifterOperand(0));
+ // TODO: Figure out the branch offsets and use cbz/cbnz.
if (successor == nullptr) {
- __ b(slow_path->GetEntryLabel(), EQ);
+ __ b(slow_path->GetEntryLabel(), NE);
__ Bind(slow_path->GetReturnLabel());
} else {
- __ b(codegen_->GetLabelOf(successor), NE);
+ __ b(codegen_->GetLabelOf(successor), EQ);
__ b(slow_path->GetEntryLabel());
}
}
@@ -1865,5 +2801,420 @@
__ Pop(static_cast<Register>(reg));
}
+void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
+ LocationSummary::CallKind call_kind = cls->CanCallRuntime()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
+ Register out = cls->GetLocations()->Out().As<Register>();
+ if (cls->IsReferrersClass()) {
+ DCHECK(!cls->CanCallRuntime());
+ DCHECK(!cls->MustGenerateClinitCheck());
+ codegen_->LoadCurrentMethod(out);
+ __ LoadFromOffset(kLoadWord, out, out, mirror::ArtMethod::DeclaringClassOffset().Int32Value());
+ } else {
+ DCHECK(cls->CanCallRuntime());
+ codegen_->LoadCurrentMethod(out);
+ __ LoadFromOffset(
+ kLoadWord, out, out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value());
+ __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
+
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
+ cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+ codegen_->AddSlowPath(slow_path);
+ __ cmp(out, ShifterOperand(0));
+ __ b(slow_path->GetEntryLabel(), EQ);
+ if (cls->MustGenerateClinitCheck()) {
+ GenerateClassInitializationCheck(slow_path, out);
+ } else {
+ __ Bind(slow_path->GetExitLabel());
+ }
+ }
+}
+
+void LocationsBuilderARM::VisitClinitCheck(HClinitCheck* check) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (check->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorARM::VisitClinitCheck(HClinitCheck* check) {
+ // We assume the class is not null.
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
+ check->GetLoadClass(), check, check->GetDexPc(), true);
+ codegen_->AddSlowPath(slow_path);
+ GenerateClassInitializationCheck(slow_path, check->GetLocations()->InAt(0).As<Register>());
+}
+
+void InstructionCodeGeneratorARM::GenerateClassInitializationCheck(
+ SlowPathCodeARM* slow_path, Register class_reg) {
+ __ LoadFromOffset(kLoadWord, IP, class_reg, mirror::Class::StatusOffset().Int32Value());
+ __ cmp(IP, ShifterOperand(mirror::Class::kStatusInitialized));
+ __ b(slow_path->GetEntryLabel(), LT);
+ // Even if the initialized flag is set, we may be in a situation where caches are not synced
+ // properly. Therefore, we do a memory fence.
+ __ dmb(ISH);
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorARM::VisitStaticFieldGet(HStaticFieldGet* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register cls = locations->InAt(0).As<Register>();
+ uint32_t offset = instruction->GetFieldOffset().Uint32Value();
+
+ switch (instruction->GetType()) {
+ case Primitive::kPrimBoolean: {
+ Register out = locations->Out().As<Register>();
+ __ LoadFromOffset(kLoadUnsignedByte, out, cls, offset);
+ break;
+ }
+
+ case Primitive::kPrimByte: {
+ Register out = locations->Out().As<Register>();
+ __ LoadFromOffset(kLoadSignedByte, out, cls, offset);
+ break;
+ }
+
+ case Primitive::kPrimShort: {
+ Register out = locations->Out().As<Register>();
+ __ LoadFromOffset(kLoadSignedHalfword, out, cls, offset);
+ break;
+ }
+
+ case Primitive::kPrimChar: {
+ Register out = locations->Out().As<Register>();
+ __ LoadFromOffset(kLoadUnsignedHalfword, out, cls, offset);
+ break;
+ }
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ Register out = locations->Out().As<Register>();
+ __ LoadFromOffset(kLoadWord, out, cls, offset);
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ // TODO: support volatile.
+ Location out = locations->Out();
+ __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), cls, offset);
+ break;
+ }
+
+ case Primitive::kPrimFloat: {
+ SRegister out = locations->Out().As<SRegister>();
+ __ LoadSFromOffset(out, cls, offset);
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ DRegister out = FromLowSToD(locations->Out().AsFpuRegisterPairLow<SRegister>());
+ __ LoadDFromOffset(out, cls, offset);
+ break;
+ }
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ bool needs_write_barrier =
+ CodeGenerator::StoreNeedsWriteBarrier(instruction->GetFieldType(), instruction->GetValue());
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ // Temporary registers for the write barrier.
+ if (needs_write_barrier) {
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ }
+}
+
+void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register cls = locations->InAt(0).As<Register>();
+ uint32_t offset = instruction->GetFieldOffset().Uint32Value();
+ Primitive::Type field_type = instruction->GetFieldType();
+
+ switch (field_type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte: {
+ Register value = locations->InAt(1).As<Register>();
+ __ StoreToOffset(kStoreByte, value, cls, offset);
+ break;
+ }
+
+ case Primitive::kPrimShort:
+ case Primitive::kPrimChar: {
+ Register value = locations->InAt(1).As<Register>();
+ __ StoreToOffset(kStoreHalfword, value, cls, offset);
+ break;
+ }
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ Register value = locations->InAt(1).As<Register>();
+ __ StoreToOffset(kStoreWord, value, cls, offset);
+ if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->GetValue())) {
+ Register temp = locations->GetTemp(0).As<Register>();
+ Register card = locations->GetTemp(1).As<Register>();
+ codegen_->MarkGCCard(temp, card, cls, value);
+ }
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ Location value = locations->InAt(1);
+ __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), cls, offset);
+ break;
+ }
+
+ case Primitive::kPrimFloat: {
+ SRegister value = locations->InAt(1).As<SRegister>();
+ __ StoreSToOffset(value, cls, offset);
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ DRegister value = FromLowSToD(locations->InAt(1).AsFpuRegisterPairLow<SRegister>());
+ __ StoreDToOffset(value, cls, offset);
+ break;
+ }
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << field_type;
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderARM::VisitLoadString(HLoadString* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load);
+ codegen_->AddSlowPath(slow_path);
+
+ Register out = load->GetLocations()->Out().As<Register>();
+ codegen_->LoadCurrentMethod(out);
+ __ LoadFromOffset(
+ kLoadWord, out, out, mirror::ArtMethod::DexCacheStringsOffset().Int32Value());
+ __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
+ __ cmp(out, ShifterOperand(0));
+ __ b(slow_path->GetEntryLabel(), EQ);
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderARM::VisitLoadException(HLoadException* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM::VisitLoadException(HLoadException* load) {
+ Register out = load->GetLocations()->Out().As<Register>();
+ int32_t offset = Thread::ExceptionOffset<kArmWordSize>().Int32Value();
+ __ LoadFromOffset(kLoadWord, out, TR, offset);
+ __ LoadImmediate(IP, 0);
+ __ StoreToOffset(kStoreWord, IP, TR, offset);
+}
+
+void LocationsBuilderARM::VisitThrow(HThrow* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) {
+ codegen_->InvokeRuntime(
+ QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc());
+}
+
+void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
+ LocationSummary::CallKind call_kind = instruction->IsClassFinal()
+ ? LocationSummary::kNoCall
+ : LocationSummary::kCallOnSlowPath;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).As<Register>();
+ Register cls = locations->InAt(1).As<Register>();
+ Register out = locations->Out().As<Register>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ Label done, zero;
+ SlowPathCodeARM* slow_path = nullptr;
+
+ // Return 0 if `obj` is null.
+ // TODO: avoid this check if we know obj is not null.
+ __ cmp(obj, ShifterOperand(0));
+ __ b(&zero, EQ);
+ // Compare the class of `obj` with `cls`.
+ __ LoadFromOffset(kLoadWord, out, obj, class_offset);
+ __ cmp(out, ShifterOperand(cls));
+ if (instruction->IsClassFinal()) {
+ // Classes must be equal for the instanceof to succeed.
+ __ b(&zero, NE);
+ __ LoadImmediate(out, 1);
+ __ b(&done);
+ } else {
+ // If the classes are not equal, we go into a slow path.
+ DCHECK(locations->OnlyCallsOnSlowPath());
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
+ instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
+ codegen_->AddSlowPath(slow_path);
+ __ b(slow_path->GetEntryLabel(), NE);
+ __ LoadImmediate(out, 1);
+ __ b(&done);
+ }
+ __ Bind(&zero);
+ __ LoadImmediate(out, 0);
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
+ }
+ __ Bind(&done);
+}
+
+void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).As<Register>();
+ Register cls = locations->InAt(1).As<Register>();
+ Register temp = locations->GetTemp(0).As<Register>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
+ instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
+ codegen_->AddSlowPath(slow_path);
+
+ // TODO: avoid this check if we know obj is not null.
+ __ cmp(obj, ShifterOperand(0));
+ __ b(slow_path->GetExitLabel(), EQ);
+ // Compare the class of `obj` with `cls`.
+ __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
+ __ cmp(temp, ShifterOperand(cls));
+ __ b(slow_path->GetEntryLabel(), NE);
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderARM::VisitMonitorOperation(HMonitorOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorARM::VisitMonitorOperation(HMonitorOperation* instruction) {
+ codegen_->InvokeRuntime(instruction->IsEnter()
+ ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
+ instruction,
+ instruction->GetDexPc());
+}
+
+void LocationsBuilderARM::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
+void LocationsBuilderARM::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
+void LocationsBuilderARM::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
+
+void LocationsBuilderARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt
+ || instruction->GetResultType() == Primitive::kPrimLong);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ bool output_overlaps = (instruction->GetResultType() == Primitive::kPrimLong);
+ locations->SetOut(Location::RequiresRegister(), output_overlaps);
+}
+
+void InstructionCodeGeneratorARM::VisitAnd(HAnd* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorARM::VisitOr(HOr* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorARM::VisitXor(HXor* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+
+ if (instruction->GetResultType() == Primitive::kPrimInt) {
+ Register first = locations->InAt(0).As<Register>();
+ Register second = locations->InAt(1).As<Register>();
+ Register out = locations->Out().As<Register>();
+ if (instruction->IsAnd()) {
+ __ and_(out, first, ShifterOperand(second));
+ } else if (instruction->IsOr()) {
+ __ orr(out, first, ShifterOperand(second));
+ } else {
+ DCHECK(instruction->IsXor());
+ __ eor(out, first, ShifterOperand(second));
+ }
+ } else {
+ DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ Location out = locations->Out();
+ if (instruction->IsAnd()) {
+ __ and_(out.AsRegisterPairLow<Register>(),
+ first.AsRegisterPairLow<Register>(),
+ ShifterOperand(second.AsRegisterPairLow<Register>()));
+ __ and_(out.AsRegisterPairHigh<Register>(),
+ first.AsRegisterPairHigh<Register>(),
+ ShifterOperand(second.AsRegisterPairHigh<Register>()));
+ } else if (instruction->IsOr()) {
+ __ orr(out.AsRegisterPairLow<Register>(),
+ first.AsRegisterPairLow<Register>(),
+ ShifterOperand(second.AsRegisterPairLow<Register>()));
+ __ orr(out.AsRegisterPairHigh<Register>(),
+ first.AsRegisterPairHigh<Register>(),
+ ShifterOperand(second.AsRegisterPairHigh<Register>()));
+ } else {
+ DCHECK(instruction->IsXor());
+ __ eor(out.AsRegisterPairLow<Register>(),
+ first.AsRegisterPairLow<Register>(),
+ ShifterOperand(second.AsRegisterPairLow<Register>()));
+ __ eor(out.AsRegisterPairHigh<Register>(),
+ first.AsRegisterPairHigh<Register>(),
+ ShifterOperand(second.AsRegisterPairHigh<Register>()));
+ }
+ }
+}
+
} // namespace arm
} // namespace art
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 57b289c..c00fac1 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -26,16 +26,19 @@
namespace arm {
class CodeGeneratorARM;
+class SlowPathCodeARM;
-static constexpr size_t kArmWordSize = 4;
+// Use a local definition to prevent copying mistakes.
+static constexpr size_t kArmWordSize = kArmPointerSize;
static constexpr Register kParameterCoreRegisters[] = { R1, R2, R3 };
static constexpr RegisterPair kParameterCorePairRegisters[] = { R1_R2, R2_R3 };
static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
-static constexpr DRegister kParameterFpuRegisters[] = { };
-static constexpr size_t kParameterFpuRegistersLength = 0;
+static constexpr SRegister kParameterFpuRegisters[] =
+ { S0, S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11, S12, S13, S14, S15 };
+static constexpr size_t kParameterFpuRegistersLength = arraysize(kParameterFpuRegisters);
-class InvokeDexCallingConvention : public CallingConvention<Register, DRegister> {
+class InvokeDexCallingConvention : public CallingConvention<Register, SRegister> {
public:
InvokeDexCallingConvention()
: CallingConvention(kParameterCoreRegisters,
@@ -54,13 +57,18 @@
class InvokeDexCallingConventionVisitor {
public:
- InvokeDexCallingConventionVisitor() : gp_index_(0) {}
+ InvokeDexCallingConventionVisitor()
+ : gp_index_(0), float_index_(0), double_index_(0), stack_index_(0) {}
Location GetNextLocation(Primitive::Type type);
+ Location GetReturnLocation(Primitive::Type type);
private:
InvokeDexCallingConvention calling_convention;
uint32_t gp_index_;
+ uint32_t float_index_;
+ uint32_t double_index_;
+ uint32_t stack_index_;
DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitor);
};
@@ -70,10 +78,10 @@
ParallelMoveResolverARM(ArenaAllocator* allocator, CodeGeneratorARM* codegen)
: ParallelMoveResolver(allocator), codegen_(codegen) {}
- virtual void EmitMove(size_t index) OVERRIDE;
- virtual void EmitSwap(size_t index) OVERRIDE;
- virtual void SpillScratch(int reg) OVERRIDE;
- virtual void RestoreScratch(int reg) OVERRIDE;
+ void EmitMove(size_t index) OVERRIDE;
+ void EmitSwap(size_t index) OVERRIDE;
+ void SpillScratch(int reg) OVERRIDE;
+ void RestoreScratch(int reg) OVERRIDE;
ArmAssembler* GetAssembler() const;
@@ -92,15 +100,16 @@
: HGraphVisitor(graph), codegen_(codegen) {}
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- virtual void Visit##name(H##name* instr);
+ void Visit##name(H##name* instr);
FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
- void HandleInvoke(HInvoke* invoke);
-
private:
+ void HandleInvoke(HInvoke* invoke);
+ void HandleBitwiseOperation(HBinaryOperation* operation);
+
CodeGeneratorARM* const codegen_;
InvokeDexCallingConventionVisitor parameter_visitor_;
@@ -112,20 +121,21 @@
InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen);
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- virtual void Visit##name(H##name* instr);
+ void Visit##name(H##name* instr);
FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
ArmAssembler* GetAssembler() const { return assembler_; }
- void LoadCurrentMethod(Register reg);
private:
// Generate code for the given suspend check. If not null, `successor`
// is the block to branch to if the suspend check is not needed, and after
// the suspend call.
void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
+ void GenerateClassInitializationCheck(SlowPathCodeARM* slow_path, Register class_reg);
+ void HandleBitwiseOperation(HBinaryOperation* operation);
ArmAssembler* const assembler_;
CodeGeneratorARM* const codegen_;
@@ -138,48 +148,52 @@
explicit CodeGeneratorARM(HGraph* graph);
virtual ~CodeGeneratorARM() {}
- virtual void GenerateFrameEntry() OVERRIDE;
- virtual void GenerateFrameExit() OVERRIDE;
- virtual void Bind(HBasicBlock* block) OVERRIDE;
- virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
- virtual void SaveCoreRegister(Location stack_location, uint32_t reg_id) OVERRIDE;
- virtual void RestoreCoreRegister(Location stack_location, uint32_t reg_id) OVERRIDE;
+ void GenerateFrameEntry() OVERRIDE;
+ void GenerateFrameExit() OVERRIDE;
+ void Bind(HBasicBlock* block) OVERRIDE;
+ void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- virtual size_t GetWordSize() const OVERRIDE {
+ size_t GetWordSize() const OVERRIDE {
return kArmWordSize;
}
- virtual size_t FrameEntrySpillSize() const OVERRIDE;
+ size_t FrameEntrySpillSize() const OVERRIDE;
- virtual HGraphVisitor* GetLocationBuilder() OVERRIDE {
+ HGraphVisitor* GetLocationBuilder() OVERRIDE {
return &location_builder_;
}
- virtual HGraphVisitor* GetInstructionVisitor() OVERRIDE {
+ HGraphVisitor* GetInstructionVisitor() OVERRIDE {
return &instruction_visitor_;
}
- virtual ArmAssembler* GetAssembler() OVERRIDE {
+ ArmAssembler* GetAssembler() OVERRIDE {
return &assembler_;
}
- virtual void SetupBlockedRegisters() const OVERRIDE;
+ uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+ return GetLabelOf(block)->Position();
+ }
- virtual Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+ void SetupBlockedRegisters() const OVERRIDE;
- virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
+ Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
- virtual void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
- virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+ Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
+
+ void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
// Blocks all register pairs made out of blocked core registers.
void UpdateBlockedPairRegisters() const;
- ParallelMoveResolverARM* GetMoveResolver() {
+ ParallelMoveResolverARM* GetMoveResolver() OVERRIDE {
return &move_resolver_;
}
- virtual InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const OVERRIDE {
return InstructionSet::kThumb2;
}
@@ -188,6 +202,12 @@
// Helper method to move a 64bits value between two locations.
void Move64(Location destination, Location source);
+ // Load current method into `reg`.
+ void LoadCurrentMethod(Register reg);
+
+ // Generate code to invoke a runtime entry point.
+ void InvokeRuntime(int32_t offset, HInstruction* instruction, uint32_t dex_pc);
+
// Emit a write barrier.
void MarkGCCard(Register temp, Register card, Register object, Register value);
@@ -195,7 +215,7 @@
return block_labels_.GetRawStorage() + block->GetBlockId();
}
- virtual void Initialize() OVERRIDE {
+ void Initialize() OVERRIDE {
block_labels_.SetSize(GetGraph()->GetBlocks().Size());
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
new file mode 100644
index 0000000..82dced5
--- /dev/null
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -0,0 +1,1980 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "code_generator_arm64.h"
+
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "gc/accounting/card_table.h"
+#include "mirror/array-inl.h"
+#include "mirror/art_method.h"
+#include "mirror/class.h"
+#include "thread.h"
+#include "utils/arm64/assembler_arm64.h"
+#include "utils/assembler.h"
+#include "utils/stack_checks.h"
+
+
+using namespace vixl; // NOLINT(build/namespaces)
+
+#ifdef __
+#error "ARM64 Codegen VIXL macro-assembler macro already defined."
+#endif
+
+
+namespace art {
+
+namespace arm64 {
+
+// TODO: clean-up some of the constant definitions.
+static constexpr size_t kHeapRefSize = sizeof(mirror::HeapReference<mirror::Object>);
+static constexpr int kCurrentMethodStackOffset = 0;
+
+namespace {
+
+bool IsFPType(Primitive::Type type) {
+ return type == Primitive::kPrimFloat || type == Primitive::kPrimDouble;
+}
+
+bool IsIntegralType(Primitive::Type type) {
+ switch (type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool Is64BitType(Primitive::Type type) {
+ return type == Primitive::kPrimLong || type == Primitive::kPrimDouble;
+}
+
+// Convenience helpers to ease conversion to and from VIXL operands.
+static_assert((SP == 31) && (WSP == 31) && (XZR == 32) && (WZR == 32),
+ "Unexpected values for register codes.");
+
+int VIXLRegCodeFromART(int code) {
+ if (code == SP) {
+ return vixl::kSPRegInternalCode;
+ }
+ if (code == XZR) {
+ return vixl::kZeroRegCode;
+ }
+ return code;
+}
+
+int ARTRegCodeFromVIXL(int code) {
+ if (code == vixl::kSPRegInternalCode) {
+ return SP;
+ }
+ if (code == vixl::kZeroRegCode) {
+ return XZR;
+ }
+ return code;
+}
+
+Register XRegisterFrom(Location location) {
+ return Register::XRegFromCode(VIXLRegCodeFromART(location.reg()));
+}
+
+Register WRegisterFrom(Location location) {
+ return Register::WRegFromCode(VIXLRegCodeFromART(location.reg()));
+}
+
+Register RegisterFrom(Location location, Primitive::Type type) {
+ DCHECK(type != Primitive::kPrimVoid && !IsFPType(type));
+ return type == Primitive::kPrimLong ? XRegisterFrom(location) : WRegisterFrom(location);
+}
+
+Register OutputRegister(HInstruction* instr) {
+ return RegisterFrom(instr->GetLocations()->Out(), instr->GetType());
+}
+
+Register InputRegisterAt(HInstruction* instr, int input_index) {
+ return RegisterFrom(instr->GetLocations()->InAt(input_index),
+ instr->InputAt(input_index)->GetType());
+}
+
+FPRegister DRegisterFrom(Location location) {
+ return FPRegister::DRegFromCode(location.reg());
+}
+
+FPRegister SRegisterFrom(Location location) {
+ return FPRegister::SRegFromCode(location.reg());
+}
+
+FPRegister FPRegisterFrom(Location location, Primitive::Type type) {
+ DCHECK(IsFPType(type));
+ return type == Primitive::kPrimDouble ? DRegisterFrom(location) : SRegisterFrom(location);
+}
+
+FPRegister OutputFPRegister(HInstruction* instr) {
+ return FPRegisterFrom(instr->GetLocations()->Out(), instr->GetType());
+}
+
+FPRegister InputFPRegisterAt(HInstruction* instr, int input_index) {
+ return FPRegisterFrom(instr->GetLocations()->InAt(input_index),
+ instr->InputAt(input_index)->GetType());
+}
+
+CPURegister OutputCPURegister(HInstruction* instr) {
+ return IsFPType(instr->GetType()) ? static_cast<CPURegister>(OutputFPRegister(instr))
+ : static_cast<CPURegister>(OutputRegister(instr));
+}
+
+CPURegister InputCPURegisterAt(HInstruction* instr, int index) {
+ return IsFPType(instr->InputAt(index)->GetType())
+ ? static_cast<CPURegister>(InputFPRegisterAt(instr, index))
+ : static_cast<CPURegister>(InputRegisterAt(instr, index));
+}
+
+int64_t Int64ConstantFrom(Location location) {
+ HConstant* instr = location.GetConstant();
+ return instr->IsIntConstant() ? instr->AsIntConstant()->GetValue()
+ : instr->AsLongConstant()->GetValue();
+}
+
+Operand OperandFrom(Location location, Primitive::Type type) {
+ if (location.IsRegister()) {
+ return Operand(RegisterFrom(location, type));
+ } else {
+ return Operand(Int64ConstantFrom(location));
+ }
+}
+
+Operand InputOperandAt(HInstruction* instr, int input_index) {
+ return OperandFrom(instr->GetLocations()->InAt(input_index),
+ instr->InputAt(input_index)->GetType());
+}
+
+MemOperand StackOperandFrom(Location location) {
+ return MemOperand(sp, location.GetStackIndex());
+}
+
+MemOperand HeapOperand(const Register& base, size_t offset) {
+ // A heap reference must be 32bit, so fit in a W register.
+ DCHECK(base.IsW());
+ return MemOperand(base.X(), offset);
+}
+
+MemOperand HeapOperand(const Register& base, Offset offset) {
+ return HeapOperand(base, offset.SizeValue());
+}
+
+MemOperand HeapOperandFrom(Location location, Offset offset) {
+ return HeapOperand(RegisterFrom(location, Primitive::kPrimNot), offset);
+}
+
+Location LocationFrom(const Register& reg) {
+ return Location::RegisterLocation(ARTRegCodeFromVIXL(reg.code()));
+}
+
+Location LocationFrom(const FPRegister& fpreg) {
+ return Location::FpuRegisterLocation(fpreg.code());
+}
+
+} // namespace
+
+inline Condition ARM64Condition(IfCondition cond) {
+ switch (cond) {
+ case kCondEQ: return eq;
+ case kCondNE: return ne;
+ case kCondLT: return lt;
+ case kCondLE: return le;
+ case kCondGT: return gt;
+ case kCondGE: return ge;
+ default:
+ LOG(FATAL) << "Unknown if condition";
+ }
+ return nv; // Unreachable.
+}
+
+Location ARM64ReturnLocation(Primitive::Type return_type) {
+ DCHECK_NE(return_type, Primitive::kPrimVoid);
+ // Note that in practice, `LocationFrom(x0)` and `LocationFrom(w0)` create the
+ // same Location object, and so do `LocationFrom(d0)` and `LocationFrom(s0)`,
+ // but we use the exact registers for clarity.
+ if (return_type == Primitive::kPrimFloat) {
+ return LocationFrom(s0);
+ } else if (return_type == Primitive::kPrimDouble) {
+ return LocationFrom(d0);
+ } else if (return_type == Primitive::kPrimLong) {
+ return LocationFrom(x0);
+ } else {
+ return LocationFrom(w0);
+ }
+}
+
+static const Register kRuntimeParameterCoreRegisters[] = { x0, x1, x2, x3, x4, x5, x6, x7 };
+static constexpr size_t kRuntimeParameterCoreRegistersLength =
+ arraysize(kRuntimeParameterCoreRegisters);
+static const FPRegister kRuntimeParameterFpuRegisters[] = { };
+static constexpr size_t kRuntimeParameterFpuRegistersLength = 0;
+
+class InvokeRuntimeCallingConvention : public CallingConvention<Register, FPRegister> {
+ public:
+ static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
+
+ InvokeRuntimeCallingConvention()
+ : CallingConvention(kRuntimeParameterCoreRegisters,
+ kRuntimeParameterCoreRegistersLength,
+ kRuntimeParameterFpuRegisters,
+ kRuntimeParameterFpuRegistersLength) {}
+
+ Location GetReturnLocation(Primitive::Type return_type);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
+};
+
+Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type return_type) {
+ return ARM64ReturnLocation(return_type);
+}
+
+#define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()->
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, x).Int32Value()
+
+class SlowPathCodeARM64 : public SlowPathCode {
+ public:
+ SlowPathCodeARM64() : entry_label_(), exit_label_() {}
+
+ vixl::Label* GetEntryLabel() { return &entry_label_; }
+ vixl::Label* GetExitLabel() { return &exit_label_; }
+
+ private:
+ vixl::Label entry_label_;
+ vixl::Label exit_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARM64);
+};
+
+class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+ BoundsCheckSlowPathARM64() {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ __ Brk(__LINE__); // TODO: Unimplemented BoundsCheckSlowPathARM64.
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64);
+};
+
+class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+ explicit DivZeroCheckSlowPathARM64(HDivZeroCheck* instruction) : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
+ __ Bind(GetEntryLabel());
+ arm64_codegen->InvokeRuntime(
+ QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc());
+ }
+
+ private:
+ HDivZeroCheck* const instruction_;
+ DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM64);
+};
+
+class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+ LoadClassSlowPathARM64(HLoadClass* cls,
+ HInstruction* at,
+ uint32_t dex_pc,
+ bool do_clinit)
+ : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+ }
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = at_->GetLocations();
+ CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
+
+ __ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ __ Mov(calling_convention.GetRegisterAt(0).W(), cls_->GetTypeIndex());
+ arm64_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1).W());
+ int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
+ : QUICK_ENTRY_POINT(pInitializeType);
+ arm64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_);
+
+ // Move the class to the desired location.
+ Location out = locations->Out();
+ if (out.IsValid()) {
+ DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
+ Primitive::Type type = at_->GetType();
+ arm64_codegen->MoveHelper(out, calling_convention.GetReturnLocation(type), type);
+ }
+
+ codegen->RestoreLiveRegisters(locations);
+ __ B(GetExitLabel());
+ }
+
+ private:
+ // The class this slow path will load.
+ HLoadClass* const cls_;
+
+ // The instruction where this slow path is happening.
+ // (Might be the load class or an initialization check).
+ HInstruction* const at_;
+
+ // The dex PC of `at_`.
+ const uint32_t dex_pc_;
+
+ // Whether to initialize the class.
+ const bool do_clinit_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM64);
+};
+
+class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+ explicit LoadStringSlowPathARM64(HLoadString* instruction) : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
+
+ __ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ arm64_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(0).W());
+ __ Mov(calling_convention.GetRegisterAt(1).W(), instruction_->GetStringIndex());
+ arm64_codegen->InvokeRuntime(
+ QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc());
+ Primitive::Type type = instruction_->GetType();
+ arm64_codegen->MoveHelper(locations->Out(), calling_convention.GetReturnLocation(type), type);
+
+ codegen->RestoreLiveRegisters(locations);
+ __ B(GetExitLabel());
+ }
+
+ private:
+ HLoadString* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64);
+};
+
+class NullCheckSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+ explicit NullCheckSlowPathARM64(HNullCheck* instr) : instruction_(instr) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
+ __ Bind(GetEntryLabel());
+ arm64_codegen->InvokeRuntime(
+ QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc());
+ }
+
+ private:
+ HNullCheck* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM64);
+};
+
+class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+ explicit SuspendCheckSlowPathARM64(HSuspendCheck* instruction,
+ HBasicBlock* successor)
+ : instruction_(instruction), successor_(successor) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
+ __ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(instruction_->GetLocations());
+ arm64_codegen->InvokeRuntime(
+ QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc());
+ codegen->RestoreLiveRegisters(instruction_->GetLocations());
+ if (successor_ == nullptr) {
+ __ B(GetReturnLabel());
+ } else {
+ __ B(arm64_codegen->GetLabelOf(successor_));
+ }
+ }
+
+ vixl::Label* GetReturnLabel() {
+ DCHECK(successor_ == nullptr);
+ return &return_label_;
+ }
+
+
+ private:
+ HSuspendCheck* const instruction_;
+ // If not null, the block to branch to after the suspend check.
+ HBasicBlock* const successor_;
+
+ // If `successor_` is null, the label to branch to after the suspend check.
+ vixl::Label return_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM64);
+};
+
+class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+ TypeCheckSlowPathARM64() {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ __ Brk(__LINE__); // TODO: Unimplemented TypeCheckSlowPathARM64.
+ __ b(GetExitLabel());
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM64);
+};
+
+#undef __
+
+Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) {
+ Location next_location;
+ if (type == Primitive::kPrimVoid) {
+ LOG(FATAL) << "Unreachable type " << type;
+ }
+
+ if (IsFPType(type) && (fp_index_ < calling_convention.GetNumberOfFpuRegisters())) {
+ next_location = LocationFrom(calling_convention.GetFpuRegisterAt(fp_index_++));
+ } else if (!IsFPType(type) && (gp_index_ < calling_convention.GetNumberOfRegisters())) {
+ next_location = LocationFrom(calling_convention.GetRegisterAt(gp_index_++));
+ } else {
+ size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
+ next_location = Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
+ : Location::StackSlot(stack_offset);
+ }
+
+ // Space on the stack is reserved for all arguments.
+ stack_index_ += Is64BitType(type) ? 2 : 1;
+ return next_location;
+}
+
+CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph)
+ : CodeGenerator(graph,
+ kNumberOfAllocatableRegisters,
+ kNumberOfAllocatableFPRegisters,
+ kNumberOfAllocatableRegisterPairs),
+ block_labels_(nullptr),
+ location_builder_(graph, this),
+ instruction_visitor_(graph, this) {}
+
+#undef __
+#define __ GetVIXLAssembler()->
+
+void CodeGeneratorARM64::GenerateFrameEntry() {
+ // TODO: Add proper support for the stack overflow check.
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ Register temp = temps.AcquireX();
+ __ Add(temp, sp, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64)));
+ __ Ldr(temp, MemOperand(temp, 0));
+ RecordPcInfo(nullptr, 0);
+
+ CPURegList preserved_regs = GetFramePreservedRegisters();
+ int frame_size = GetFrameSize();
+ core_spill_mask_ |= preserved_regs.list();
+
+ __ Str(w0, MemOperand(sp, -frame_size, PreIndex));
+ __ PokeCPURegList(preserved_regs, frame_size - preserved_regs.TotalSizeInBytes());
+
+ // Stack layout:
+ // sp[frame_size - 8] : lr.
+ // ... : other preserved registers.
+ // sp[frame_size - regs_size]: first preserved register.
+ // ... : reserved frame space.
+ // sp[0] : current method.
+}
+
+void CodeGeneratorARM64::GenerateFrameExit() {
+ int frame_size = GetFrameSize();
+ CPURegList preserved_regs = GetFramePreservedRegisters();
+ __ PeekCPURegList(preserved_regs, frame_size - preserved_regs.TotalSizeInBytes());
+ __ Drop(frame_size);
+}
+
+void CodeGeneratorARM64::Bind(HBasicBlock* block) {
+ __ Bind(GetLabelOf(block));
+}
+
+void CodeGeneratorARM64::Move(HInstruction* instruction,
+ Location location,
+ HInstruction* move_for) {
+ LocationSummary* locations = instruction->GetLocations();
+ if (locations != nullptr && locations->Out().Equals(location)) {
+ return;
+ }
+
+ Primitive::Type type = instruction->GetType();
+ DCHECK_NE(type, Primitive::kPrimVoid);
+
+ if (instruction->IsIntConstant() || instruction->IsLongConstant()) {
+ int64_t value = instruction->IsIntConstant() ? instruction->AsIntConstant()->GetValue()
+ : instruction->AsLongConstant()->GetValue();
+ if (location.IsRegister()) {
+ Register dst = RegisterFrom(location, type);
+ DCHECK((instruction->IsIntConstant() && dst.Is32Bits()) ||
+ (instruction->IsLongConstant() && dst.Is64Bits()));
+ __ Mov(dst, value);
+ } else {
+ DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot());
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ Register temp = instruction->IsIntConstant() ? temps.AcquireW() : temps.AcquireX();
+ __ Mov(temp, value);
+ __ Str(temp, StackOperandFrom(location));
+ }
+ } else if (instruction->IsTemporary()) {
+ Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
+ MoveHelper(location, temp_location, type);
+ } else if (instruction->IsLoadLocal()) {
+ uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
+ if (Is64BitType(type)) {
+ MoveHelper(location, Location::DoubleStackSlot(stack_slot), type);
+ } else {
+ MoveHelper(location, Location::StackSlot(stack_slot), type);
+ }
+
+ } else {
+ DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
+ MoveHelper(location, locations->Out(), type);
+ }
+}
+
+size_t CodeGeneratorARM64::FrameEntrySpillSize() const {
+ return GetFramePreservedRegistersSize();
+}
+
+Location CodeGeneratorARM64::GetStackLocation(HLoadLocal* load) const {
+ Primitive::Type type = load->GetType();
+
+ switch (type) {
+ case Primitive::kPrimNot:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ return Location::StackSlot(GetStackSlot(load->GetLocal()));
+
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unexpected type " << type;
+ }
+
+ LOG(FATAL) << "Unreachable";
+ return Location::NoLocation();
+}
+
+void CodeGeneratorARM64::MarkGCCard(Register object, Register value) {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ Register card = temps.AcquireX();
+ Register temp = temps.AcquireX();
+ vixl::Label done;
+ __ Cbz(value, &done);
+ __ Ldr(card, MemOperand(tr, Thread::CardTableOffset<kArm64WordSize>().Int32Value()));
+ __ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
+ __ Strb(card, MemOperand(card, temp));
+ __ Bind(&done);
+}
+
+void CodeGeneratorARM64::SetupBlockedRegisters() const {
+ // Block reserved registers:
+ // ip0 (VIXL temporary)
+ // ip1 (VIXL temporary)
+ // xSuspend (Suspend counter)
+ // lr
+ // sp is not part of the allocatable registers, so we don't need to block it.
+ // TODO: Avoid blocking callee-saved registers, and instead preserve them
+ // where necessary.
+ CPURegList reserved_core_registers = vixl_reserved_core_registers;
+ reserved_core_registers.Combine(runtime_reserved_core_registers);
+ reserved_core_registers.Combine(quick_callee_saved_registers);
+ while (!reserved_core_registers.IsEmpty()) {
+ blocked_core_registers_[reserved_core_registers.PopLowestIndex().code()] = true;
+ }
+ CPURegList reserved_fp_registers = vixl_reserved_fp_registers;
+ reserved_fp_registers.Combine(CPURegList::GetCalleeSavedFP());
+ while (!reserved_core_registers.IsEmpty()) {
+ blocked_fpu_registers_[reserved_fp_registers.PopLowestIndex().code()] = true;
+ }
+}
+
+Location CodeGeneratorARM64::AllocateFreeRegister(Primitive::Type type) const {
+ if (type == Primitive::kPrimVoid) {
+ LOG(FATAL) << "Unreachable type " << type;
+ }
+
+ if (IsFPType(type)) {
+ ssize_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfAllocatableFPRegisters);
+ DCHECK_NE(reg, -1);
+ return Location::FpuRegisterLocation(reg);
+ } else {
+ ssize_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfAllocatableRegisters);
+ DCHECK_NE(reg, -1);
+ return Location::RegisterLocation(reg);
+ }
+}
+
+void CodeGeneratorARM64::DumpCoreRegister(std::ostream& stream, int reg) const {
+ stream << Arm64ManagedRegister::FromXRegister(XRegister(reg));
+}
+
+void CodeGeneratorARM64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
+ stream << Arm64ManagedRegister::FromDRegister(DRegister(reg));
+}
+
+void CodeGeneratorARM64::MoveConstant(CPURegister destination, HConstant* constant) {
+ if (constant->IsIntConstant() || constant->IsLongConstant()) {
+ __ Mov(Register(destination),
+ constant->IsIntConstant() ? constant->AsIntConstant()->GetValue()
+ : constant->AsLongConstant()->GetValue());
+ } else if (constant->IsFloatConstant()) {
+ __ Fmov(FPRegister(destination), constant->AsFloatConstant()->GetValue());
+ } else {
+ DCHECK(constant->IsDoubleConstant());
+ __ Fmov(FPRegister(destination), constant->AsDoubleConstant()->GetValue());
+ }
+}
+
+void CodeGeneratorARM64::MoveHelper(Location destination,
+ Location source,
+ Primitive::Type type) {
+ if (source.Equals(destination)) {
+ return;
+ }
+ if (destination.IsRegister()) {
+ Register dst = RegisterFrom(destination, type);
+ if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
+ DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot());
+ __ Ldr(dst, StackOperandFrom(source));
+ } else {
+ __ Mov(dst, OperandFrom(source, type));
+ }
+ } else if (destination.IsFpuRegister()) {
+ FPRegister dst = FPRegisterFrom(destination, type);
+ if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
+ DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot());
+ __ Ldr(dst, StackOperandFrom(source));
+ } else if (source.IsFpuRegister()) {
+ __ Fmov(dst, FPRegisterFrom(source, type));
+ } else {
+ MoveConstant(dst, source.GetConstant());
+ }
+ } else {
+ DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
+ if (source.IsRegister()) {
+ __ Str(RegisterFrom(source, type), StackOperandFrom(destination));
+ } else if (source.IsFpuRegister()) {
+ __ Str(FPRegisterFrom(source, type), StackOperandFrom(destination));
+ } else if (source.IsConstant()) {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ HConstant* cst = source.GetConstant();
+ CPURegister temp;
+ if (cst->IsIntConstant() || cst->IsLongConstant()) {
+ temp = cst->IsIntConstant() ? temps.AcquireW() : temps.AcquireX();
+ } else {
+ DCHECK(cst->IsFloatConstant() || cst->IsDoubleConstant());
+ temp = cst->IsFloatConstant() ? temps.AcquireS() : temps.AcquireD();
+ }
+ MoveConstant(temp, cst);
+ __ Str(temp, StackOperandFrom(destination));
+ } else {
+ DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ Register temp = destination.IsDoubleStackSlot() ? temps.AcquireX() : temps.AcquireW();
+ __ Ldr(temp, StackOperandFrom(source));
+ __ Str(temp, StackOperandFrom(destination));
+ }
+ }
+}
+
+void CodeGeneratorARM64::Load(Primitive::Type type,
+ vixl::CPURegister dst,
+ const vixl::MemOperand& src) {
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ __ Ldrb(Register(dst), src);
+ break;
+ case Primitive::kPrimByte:
+ __ Ldrsb(Register(dst), src);
+ break;
+ case Primitive::kPrimShort:
+ __ Ldrsh(Register(dst), src);
+ break;
+ case Primitive::kPrimChar:
+ __ Ldrh(Register(dst), src);
+ break;
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ case Primitive::kPrimLong:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ DCHECK(dst.Is64Bits() == Is64BitType(type));
+ __ Ldr(dst, src);
+ break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << type;
+ }
+}
+
+void CodeGeneratorARM64::Store(Primitive::Type type,
+ vixl::CPURegister rt,
+ const vixl::MemOperand& dst) {
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ __ Strb(Register(rt), dst);
+ break;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ __ Strh(Register(rt), dst);
+ break;
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ case Primitive::kPrimLong:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ DCHECK(rt.Is64Bits() == Is64BitType(type));
+ __ Str(rt, dst);
+ break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << type;
+ }
+}
+
+void CodeGeneratorARM64::LoadCurrentMethod(vixl::Register current_method) {
+ DCHECK(current_method.IsW());
+ __ Ldr(current_method, MemOperand(sp, kCurrentMethodStackOffset));
+}
+
+void CodeGeneratorARM64::InvokeRuntime(int32_t entry_point_offset,
+ HInstruction* instruction,
+ uint32_t dex_pc) {
+ __ Ldr(lr, MemOperand(tr, entry_point_offset));
+ __ Blr(lr);
+ RecordPcInfo(instruction, dex_pc);
+ DCHECK(instruction->IsSuspendCheck()
+ || instruction->IsBoundsCheck()
+ || instruction->IsNullCheck()
+ || instruction->IsDivZeroCheck()
+ || !IsLeafMethod());
+}
+
+void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path,
+ vixl::Register class_reg) {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ Register temp = temps.AcquireW();
+ __ Ldr(temp, HeapOperand(class_reg, mirror::Class::StatusOffset()));
+ __ Cmp(temp, mirror::Class::kStatusInitialized);
+ __ B(lt, slow_path->GetEntryLabel());
+ // Even if the initialized flag is set, we may be in a situation where caches are not synced
+ // properly. Therefore, we do a memory fence.
+ __ Dmb(InnerShareable, BarrierAll);
+ __ Bind(slow_path->GetExitLabel());
+}
+
+InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
+ CodeGeneratorARM64* codegen)
+ : HGraphVisitor(graph),
+ assembler_(codegen->GetAssembler()),
+ codegen_(codegen) {}
+
+#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M) \
+ M(ParallelMove) \
+ M(Rem)
+
+#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode
+
+enum UnimplementedInstructionBreakCode {
+ // Using a base helps identify when we hit such breakpoints.
+ UnimplementedInstructionBreakCodeBaseCode = 0x900,
+#define ENUM_UNIMPLEMENTED_INSTRUCTION(name) UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name),
+ FOR_EACH_UNIMPLEMENTED_INSTRUCTION(ENUM_UNIMPLEMENTED_INSTRUCTION)
+#undef ENUM_UNIMPLEMENTED_INSTRUCTION
+};
+
+#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name) \
+ void InstructionCodeGeneratorARM64::Visit##name(H##name* instr) { \
+ UNUSED(instr); \
+ __ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name)); \
+ } \
+ void LocationsBuilderARM64::Visit##name(H##name* instr) { \
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); \
+ locations->SetOut(Location::Any()); \
+ }
+ FOR_EACH_UNIMPLEMENTED_INSTRUCTION(DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS)
+#undef DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS
+
+#undef UNIMPLEMENTED_INSTRUCTION_BREAK_CODE
+#undef FOR_EACH_UNIMPLEMENTED_INSTRUCTION
+
+void LocationsBuilderARM64::HandleBinaryOp(HBinaryOperation* instr) {
+ DCHECK_EQ(instr->InputCount(), 2U);
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
+ Primitive::Type type = instr->GetResultType();
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected " << instr->DebugName() << " type " << type;
+ }
+}
+
+void InstructionCodeGeneratorARM64::HandleBinaryOp(HBinaryOperation* instr) {
+ Primitive::Type type = instr->GetType();
+
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ Register dst = OutputRegister(instr);
+ Register lhs = InputRegisterAt(instr, 0);
+ Operand rhs = InputOperandAt(instr, 1);
+ if (instr->IsAdd()) {
+ __ Add(dst, lhs, rhs);
+ } else if (instr->IsAnd()) {
+ __ And(dst, lhs, rhs);
+ } else if (instr->IsOr()) {
+ __ Orr(dst, lhs, rhs);
+ } else if (instr->IsSub()) {
+ __ Sub(dst, lhs, rhs);
+ } else {
+ DCHECK(instr->IsXor());
+ __ Eor(dst, lhs, rhs);
+ }
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ FPRegister dst = OutputFPRegister(instr);
+ FPRegister lhs = InputFPRegisterAt(instr, 0);
+ FPRegister rhs = InputFPRegisterAt(instr, 1);
+ if (instr->IsAdd()) {
+ __ Fadd(dst, lhs, rhs);
+ } else if (instr->IsSub()) {
+ __ Fsub(dst, lhs, rhs);
+ } else {
+ LOG(FATAL) << "Unexpected floating-point binary operation";
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected binary operation type " << type;
+ }
+}
+
+void LocationsBuilderARM64::VisitAdd(HAdd* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitAdd(HAdd* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderARM64::VisitAnd(HAnd* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitAnd(HAnd* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Primitive::Type type = instruction->GetType();
+ Register obj = InputRegisterAt(instruction, 0);
+ Location index = locations->InAt(1);
+ size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(type)).Uint32Value();
+ MemOperand source(obj);
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+
+ if (index.IsConstant()) {
+ offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(type);
+ source = MemOperand(obj, offset);
+ } else {
+ Register temp = temps.AcquireSameSizeAs(obj);
+ Register index_reg = RegisterFrom(index, Primitive::kPrimInt);
+ __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(type)));
+ source = MemOperand(temp, offset);
+ }
+
+ codegen_->Load(type, OutputCPURegister(instruction), source);
+}
+
+void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction) {
+ __ Ldr(OutputRegister(instruction),
+ HeapOperand(InputRegisterAt(instruction, 0), mirror::Array::LengthOffset()));
+}
+
+void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) {
+ Primitive::Type value_type = instruction->GetComponentType();
+ bool is_object = value_type == Primitive::kPrimNot;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction, is_object ? LocationSummary::kCall : LocationSummary::kNoCall);
+ if (is_object) {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2)));
+ } else {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ locations->SetInAt(2, Location::RequiresRegister());
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
+ Primitive::Type value_type = instruction->GetComponentType();
+ if (value_type == Primitive::kPrimNot) {
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject), instruction, instruction->GetDexPc());
+
+ } else {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = InputRegisterAt(instruction, 0);
+ CPURegister value = InputCPURegisterAt(instruction, 2);
+ Location index = locations->InAt(1);
+ size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value();
+ MemOperand destination(obj);
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+
+ if (index.IsConstant()) {
+ offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type);
+ destination = MemOperand(obj, offset);
+ } else {
+ Register temp = temps.AcquireSameSizeAs(obj);
+ Register index_reg = InputRegisterAt(instruction, 1);
+ __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(value_type)));
+ destination = MemOperand(temp, offset);
+ }
+
+ codegen_->Store(value_type, value, destination);
+ }
+}
+
+void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
+ BoundsCheckSlowPathARM64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64();
+ codegen_->AddSlowPath(slow_path);
+
+ __ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1));
+ __ B(slow_path->GetEntryLabel(), hs);
+}
+
+void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ Register obj = InputRegisterAt(instruction, 0);;
+ Register cls = InputRegisterAt(instruction, 1);;
+ Register temp = temps.AcquireW();
+
+ SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64();
+ codegen_->AddSlowPath(slow_path);
+
+ // TODO: avoid this check if we know obj is not null.
+ __ Cbz(obj, slow_path->GetExitLabel());
+ // Compare the class of `obj` with `cls`.
+ __ Ldr(temp, HeapOperand(obj, mirror::Object::ClassOffset()));
+ __ Cmp(temp, cls);
+ __ B(ne, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (check->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitClinitCheck(HClinitCheck* check) {
+ // We assume the class is not null.
+ SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
+ check->GetLoadClass(), check, check->GetDexPc(), true);
+ codegen_->AddSlowPath(slow_path);
+ GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
+}
+
+void LocationsBuilderARM64::VisitCompare(HCompare* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorARM64::VisitCompare(HCompare* instruction) {
+ Primitive::Type in_type = instruction->InputAt(0)->GetType();
+
+ DCHECK_EQ(in_type, Primitive::kPrimLong);
+ switch (in_type) {
+ case Primitive::kPrimLong: {
+ vixl::Label done;
+ Register result = OutputRegister(instruction);
+ Register left = InputRegisterAt(instruction, 0);
+ Operand right = InputOperandAt(instruction, 1);
+ __ Subs(result.X(), left, right);
+ __ B(eq, &done);
+ __ Mov(result, 1);
+ __ Cneg(result, result, le);
+ __ Bind(&done);
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unimplemented compare type " << in_type;
+ }
+}
+
+void LocationsBuilderARM64::VisitCondition(HCondition* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ if (instruction->NeedsMaterialization()) {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitCondition(HCondition* instruction) {
+ if (!instruction->NeedsMaterialization()) {
+ return;
+ }
+
+ LocationSummary* locations = instruction->GetLocations();
+ Register lhs = InputRegisterAt(instruction, 0);
+ Operand rhs = InputOperandAt(instruction, 1);
+ Register res = RegisterFrom(locations->Out(), instruction->GetType());
+ Condition cond = ARM64Condition(instruction->GetCondition());
+
+ __ Cmp(lhs, rhs);
+ __ Csel(res, vixl::Assembler::AppropriateZeroRegFor(res), Operand(1), InvertCondition(cond));
+}
+
+#define FOR_EACH_CONDITION_INSTRUCTION(M) \
+ M(Equal) \
+ M(NotEqual) \
+ M(LessThan) \
+ M(LessThanOrEqual) \
+ M(GreaterThan) \
+ M(GreaterThanOrEqual)
+#define DEFINE_CONDITION_VISITORS(Name) \
+void LocationsBuilderARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); } \
+void InstructionCodeGeneratorARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); }
+FOR_EACH_CONDITION_INSTRUCTION(DEFINE_CONDITION_VISITORS)
+#undef DEFINE_CONDITION_VISITORS
+#undef FOR_EACH_CONDITION_INSTRUCTION
+
+void LocationsBuilderARM64::VisitDiv(HDiv* div) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
+ switch (div->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected div type " << div->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitDiv(HDiv* div) {
+ Primitive::Type type = div->GetResultType();
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ __ Sdiv(OutputRegister(div), InputRegisterAt(div, 0), InputRegisterAt(div, 1));
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ __ Fdiv(OutputFPRegister(div), InputFPRegisterAt(div, 0), InputFPRegisterAt(div, 1));
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected div type " << type;
+ }
+}
+
+void LocationsBuilderARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ SlowPathCodeARM64* slow_path =
+ new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM64(instruction);
+ codegen_->AddSlowPath(slow_path);
+ Location value = instruction->GetLocations()->InAt(0);
+
+ if (value.IsConstant()) {
+ int64_t divisor = Int64ConstantFrom(value);
+ if (divisor == 0) {
+ __ B(slow_path->GetEntryLabel());
+ } else {
+ LOG(FATAL) << "Divisions by non-null constants should have been optimized away.";
+ }
+ } else {
+ __ Cbz(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel());
+ }
+}
+
+void LocationsBuilderARM64::VisitDoubleConstant(HDoubleConstant* constant) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorARM64::VisitDoubleConstant(HDoubleConstant* constant) {
+ UNUSED(constant);
+ // Will be generated at use site.
+}
+
+void LocationsBuilderARM64::VisitExit(HExit* exit) {
+ exit->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARM64::VisitExit(HExit* exit) {
+ UNUSED(exit);
+ if (kIsDebugBuild) {
+ down_cast<Arm64Assembler*>(GetAssembler())->Comment("Unreachable");
+ __ Brk(__LINE__); // TODO: Introduce special markers for such code locations.
+ }
+}
+
+void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant) {
+ UNUSED(constant);
+ // Will be generated at use site.
+}
+
+void LocationsBuilderARM64::VisitGoto(HGoto* got) {
+ got->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARM64::VisitGoto(HGoto* got) {
+ HBasicBlock* successor = got->GetSuccessor();
+ // TODO: Support for suspend checks emission.
+ if (!codegen_->GoesToNextBlock(got->GetBlock(), successor)) {
+ __ B(codegen_->GetLabelOf(successor));
+ }
+}
+
+void LocationsBuilderARM64::VisitIf(HIf* if_instr) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ HInstruction* cond = if_instr->InputAt(0);
+ DCHECK(cond->IsCondition());
+ if (cond->AsCondition()->NeedsMaterialization()) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) {
+ HInstruction* cond = if_instr->InputAt(0);
+ DCHECK(cond->IsCondition());
+ HCondition* condition = cond->AsCondition();
+ vixl::Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
+ vixl::Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
+
+ // TODO: Support constant condition input in VisitIf.
+
+ if (condition->NeedsMaterialization()) {
+ // The condition instruction has been materialized, compare the output to 0.
+ Location cond_val = if_instr->GetLocations()->InAt(0);
+ DCHECK(cond_val.IsRegister());
+ __ Cbnz(InputRegisterAt(if_instr, 0), true_target);
+
+ } else {
+ // The condition instruction has not been materialized, use its inputs as
+ // the comparison and its condition as the branch condition.
+ Register lhs = InputRegisterAt(condition, 0);
+ Operand rhs = InputOperandAt(condition, 1);
+ Condition arm64_cond = ARM64Condition(condition->GetCondition());
+ if ((arm64_cond == eq || arm64_cond == ne) && rhs.IsImmediate() && (rhs.immediate() == 0)) {
+ if (arm64_cond == eq) {
+ __ Cbz(lhs, true_target);
+ } else {
+ __ Cbnz(lhs, true_target);
+ }
+ } else {
+ __ Cmp(lhs, rhs);
+ __ B(arm64_cond, true_target);
+ }
+ }
+
+ if (!codegen_->GoesToNextBlock(if_instr->GetBlock(), if_instr->IfFalseSuccessor())) {
+ __ B(false_target);
+ }
+}
+
+void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
+ MemOperand field = MemOperand(InputRegisterAt(instruction, 0),
+ instruction->GetFieldOffset().Uint32Value());
+ codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), field);
+}
+
+void LocationsBuilderARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+ Primitive::Type field_type = instruction->GetFieldType();
+ CPURegister value = InputCPURegisterAt(instruction, 1);
+ Register obj = InputRegisterAt(instruction, 0);
+ codegen_->Store(field_type, value, MemOperand(obj, instruction->GetFieldOffset().Uint32Value()));
+ if (field_type == Primitive::kPrimNot) {
+ codegen_->MarkGCCard(obj, Register(value));
+ }
+}
+
+void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) {
+ LocationSummary::CallKind call_kind =
+ instruction->IsClassFinal() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), true); // The output does overlap inputs.
+}
+
+void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = InputRegisterAt(instruction, 0);;
+ Register cls = InputRegisterAt(instruction, 1);;
+ Register out = OutputRegister(instruction);
+
+ vixl::Label done;
+
+ // Return 0 if `obj` is null.
+ // TODO: Avoid this check if we know `obj` is not null.
+ __ Mov(out, 0);
+ __ Cbz(obj, &done);
+
+ // Compare the class of `obj` with `cls`.
+ __ Ldr(out, MemOperand(obj, mirror::Object::ClassOffset().Int32Value()));
+ __ Cmp(out, cls);
+ if (instruction->IsClassFinal()) {
+ // Classes must be equal for the instanceof to succeed.
+ __ Cset(out, eq);
+ } else {
+ // If the classes are not equal, we go into a slow path.
+ DCHECK(locations->OnlyCallsOnSlowPath());
+ SlowPathCodeARM64* slow_path =
+ new (GetGraph()->GetArena()) TypeCheckSlowPathARM64();
+ codegen_->AddSlowPath(slow_path);
+ __ B(ne, slow_path->GetEntryLabel());
+ __ Mov(out, 1);
+ __ Bind(slow_path->GetExitLabel());
+ }
+
+ __ Bind(&done);
+}
+
+void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant) {
+ // Will be generated at use site.
+ UNUSED(constant);
+}
+
+void LocationsBuilderARM64::HandleInvoke(HInvoke* invoke) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
+ locations->AddTemp(LocationFrom(x0));
+
+ InvokeDexCallingConventionVisitor calling_convention_visitor;
+ for (size_t i = 0; i < invoke->InputCount(); i++) {
+ HInstruction* input = invoke->InputAt(i);
+ locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType()));
+ }
+
+ Primitive::Type return_type = invoke->GetType();
+ if (return_type != Primitive::kPrimVoid) {
+ locations->SetOut(calling_convention_visitor.GetReturnLocation(return_type));
+ }
+}
+
+void LocationsBuilderARM64::VisitInvokeInterface(HInvokeInterface* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invoke) {
+ // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
+ Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0));
+ uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
+ (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
+ Location receiver = invoke->GetLocations()->InAt(0);
+ Offset class_offset = mirror::Object::ClassOffset();
+ Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
+
+ // The register ip1 is required to be used for the hidden argument in
+ // art_quick_imt_conflict_trampoline, so prevent VIXL from using it.
+ UseScratchRegisterScope scratch_scope(GetVIXLAssembler());
+ scratch_scope.Exclude(ip1);
+ __ Mov(ip1, invoke->GetDexMethodIndex());
+
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ Ldr(temp, StackOperandFrom(receiver));
+ __ Ldr(temp, HeapOperand(temp, class_offset));
+ } else {
+ __ Ldr(temp, HeapOperandFrom(receiver, class_offset));
+ }
+ // temp = temp->GetImtEntryAt(method_offset);
+ __ Ldr(temp, HeapOperand(temp, method_offset));
+ // lr = temp->GetEntryPoint();
+ __ Ldr(lr, HeapOperand(temp, entry_point));
+ // lr();
+ __ Blr(lr);
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ HandleInvoke(invoke);
+}
+
+void LocationsBuilderARM64::VisitInvokeStatic(HInvokeStatic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorARM64::VisitInvokeStatic(HInvokeStatic* invoke) {
+ Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0));
+ // Make sure that ArtMethod* is passed in W0 as per the calling convention
+ DCHECK(temp.Is(w0));
+ size_t index_in_cache = mirror::Array::DataOffset(kHeapRefSize).SizeValue() +
+ invoke->GetIndexInDexCache() * kHeapRefSize;
+
+ // TODO: Implement all kinds of calls:
+ // 1) boot -> boot
+ // 2) app -> boot
+ // 3) app -> app
+ //
+ // Currently we implement the app -> app logic, which looks up in the resolve cache.
+
+ // temp = method;
+ codegen_->LoadCurrentMethod(temp);
+ // temp = temp->dex_cache_resolved_methods_;
+ __ Ldr(temp, MemOperand(temp.X(),
+ mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
+ // temp = temp[index_in_cache];
+ __ Ldr(temp, MemOperand(temp.X(), index_in_cache));
+ // lr = temp->entry_point_from_quick_compiled_code_;
+ __ Ldr(lr, MemOperand(temp.X(),
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArm64WordSize).SizeValue()));
+ // lr();
+ __ Blr(lr);
+
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ DCHECK(!codegen_->IsLeafMethod());
+}
+
+void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ LocationSummary* locations = invoke->GetLocations();
+ Location receiver = locations->InAt(0);
+ Register temp = XRegisterFrom(invoke->GetLocations()->GetTemp(0));
+ size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() +
+ invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
+ Offset class_offset = mirror::Object::ClassOffset();
+ Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
+
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ Ldr(temp.W(), MemOperand(sp, receiver.GetStackIndex()));
+ __ Ldr(temp.W(), MemOperand(temp, class_offset.SizeValue()));
+ } else {
+ DCHECK(receiver.IsRegister());
+ __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset));
+ }
+ // temp = temp->GetMethodAt(method_offset);
+ __ Ldr(temp.W(), MemOperand(temp, method_offset));
+ // lr = temp->GetEntryPoint();
+ __ Ldr(lr, MemOperand(temp, entry_point.SizeValue()));
+ // lr();
+ __ Blr(lr);
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
+ LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) {
+ Register out = OutputRegister(cls);
+ if (cls->IsReferrersClass()) {
+ DCHECK(!cls->CanCallRuntime());
+ DCHECK(!cls->MustGenerateClinitCheck());
+ codegen_->LoadCurrentMethod(out);
+ __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset()));
+ } else {
+ DCHECK(cls->CanCallRuntime());
+ codegen_->LoadCurrentMethod(out);
+ __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DexCacheResolvedTypesOffset()));
+ __ Ldr(out, MemOperand(out.X(), CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
+
+ SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
+ cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+ codegen_->AddSlowPath(slow_path);
+ __ Cbz(out, slow_path->GetEntryLabel());
+ if (cls->MustGenerateClinitCheck()) {
+ GenerateClassInitializationCheck(slow_path, out);
+ } else {
+ __ Bind(slow_path->GetExitLabel());
+ }
+ }
+}
+
+void LocationsBuilderARM64::VisitLoadException(HLoadException* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitLoadException(HLoadException* instruction) {
+ MemOperand exception = MemOperand(tr, Thread::ExceptionOffset<kArm64WordSize>().Int32Value());
+ __ Ldr(OutputRegister(instruction), exception);
+ __ Str(wzr, exception);
+}
+
+void LocationsBuilderARM64::VisitLoadLocal(HLoadLocal* load) {
+ load->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load) {
+ // Nothing to do, this is driven by the code generator.
+ UNUSED(load);
+}
+
+void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
+ SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load);
+ codegen_->AddSlowPath(slow_path);
+
+ Register out = OutputRegister(load);
+ codegen_->LoadCurrentMethod(out);
+ __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DexCacheStringsOffset()));
+ __ Ldr(out, MemOperand(out.X(), CodeGenerator::GetCacheOffset(load->GetStringIndex())));
+ __ Cbz(out, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderARM64::VisitLocal(HLocal* local) {
+ local->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARM64::VisitLocal(HLocal* local) {
+ DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
+}
+
+void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant) {
+ // Will be generated at use site.
+ UNUSED(constant);
+}
+
+void LocationsBuilderARM64::VisitMonitorOperation(HMonitorOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorARM64::VisitMonitorOperation(HMonitorOperation* instruction) {
+ codegen_->InvokeRuntime(instruction->IsEnter()
+ ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
+ instruction,
+ instruction->GetDexPc());
+}
+
+void LocationsBuilderARM64::VisitMul(HMul* mul) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ switch (mul->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitMul(HMul* mul) {
+ switch (mul->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ __ Mul(OutputRegister(mul), InputRegisterAt(mul, 0), InputRegisterAt(mul, 1));
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ __ Fmul(OutputFPRegister(mul), InputFPRegisterAt(mul, 0), InputFPRegisterAt(mul, 1));
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ }
+}
+
+void LocationsBuilderARM64::VisitNeg(HNeg* neg) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ switch (neg->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RegisterOrConstant(neg->InputAt(0)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitNeg(HNeg* neg) {
+ switch (neg->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ __ Neg(OutputRegister(neg), InputOperandAt(neg, 0));
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ __ Fneg(OutputFPRegister(neg), InputFPRegisterAt(neg, 0));
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
+ }
+}
+
+void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1)));
+ locations->SetOut(LocationFrom(x0));
+ locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(2)));
+}
+
+void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ InvokeRuntimeCallingConvention calling_convention;
+ Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt);
+ DCHECK(type_index.Is(w0));
+ Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot);
+ DCHECK(current_method.Is(w1));
+ codegen_->LoadCurrentMethod(current_method);
+ __ Mov(type_index, instruction->GetTypeIndex());
+ codegen_->InvokeRuntime(
+ QUICK_ENTRY_POINT(pAllocArrayWithAccessCheck), instruction, instruction->GetDexPc());
+}
+
+void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1)));
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
+}
+
+void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt);
+ DCHECK(type_index.Is(w0));
+ Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot);
+ DCHECK(current_method.Is(w1));
+ codegen_->LoadCurrentMethod(current_method);
+ __ Mov(type_index, instruction->GetTypeIndex());
+ codegen_->InvokeRuntime(
+ QUICK_ENTRY_POINT(pAllocObjectWithAccessCheck), instruction, instruction->GetDexPc());
+}
+
+void LocationsBuilderARM64::VisitNot(HNot* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorARM64::VisitNot(HNot* instruction) {
+ switch (instruction->InputAt(0)->GetType()) {
+ case Primitive::kPrimBoolean:
+ __ Eor(OutputRegister(instruction), InputRegisterAt(instruction, 0), Operand(1));
+ break;
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ __ Mvn(OutputRegister(instruction), InputOperandAt(instruction, 0));
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
+ }
+}
+
+void LocationsBuilderARM64::VisitNullCheck(HNullCheck* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitNullCheck(HNullCheck* instruction) {
+ SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM64(instruction);
+ codegen_->AddSlowPath(slow_path);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location obj = locations->InAt(0);
+ if (obj.IsRegister()) {
+ __ Cbz(RegisterFrom(obj, instruction->InputAt(0)->GetType()), slow_path->GetEntryLabel());
+ } else {
+ DCHECK(obj.IsConstant()) << obj;
+ DCHECK_EQ(obj.GetConstant()->AsIntConstant()->GetValue(), 0);
+ __ B(slow_path->GetEntryLabel());
+ }
+}
+
+void LocationsBuilderARM64::VisitOr(HOr* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitOr(HOr* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
+ if (location.IsStackSlot()) {
+ location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
+ } else if (location.IsDoubleStackSlot()) {
+ location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
+ }
+ locations->SetOut(location);
+}
+
+void InstructionCodeGeneratorARM64::VisitParameterValue(HParameterValue* instruction) {
+ // Nothing to do, the parameter is already at its location.
+ UNUSED(instruction);
+}
+
+void LocationsBuilderARM64::VisitPhi(HPhi* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
+ locations->SetInAt(i, Location::Any());
+ }
+ locations->SetOut(Location::Any());
+}
+
+void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction) {
+ UNUSED(instruction);
+ LOG(FATAL) << "Unreachable";
+}
+
+void LocationsBuilderARM64::VisitReturn(HReturn* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ Primitive::Type return_type = instruction->InputAt(0)->GetType();
+ locations->SetInAt(0, ARM64ReturnLocation(return_type));
+}
+
+void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction) {
+ UNUSED(instruction);
+ codegen_->GenerateFrameExit();
+ __ Br(lr);
+}
+
+void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) {
+ instruction->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction) {
+ UNUSED(instruction);
+ codegen_->GenerateFrameExit();
+ __ Br(lr);
+}
+
+void LocationsBuilderARM64::VisitStoreLocal(HStoreLocal* store) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
+ Primitive::Type field_type = store->InputAt(1)->GetType();
+ switch (field_type) {
+ case Primitive::kPrimNot:
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
+ break;
+
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented local type " << field_type;
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store) {
+ UNUSED(store);
+}
+
+void LocationsBuilderARM64::VisitSub(HSub* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitSub(HSub* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
+ Register cls = InputRegisterAt(instruction, 0);
+ uint32_t offset = instruction->GetFieldOffset().Uint32Value();
+ codegen_->Load(instruction->GetType(), OutputCPURegister(instruction), MemOperand(cls, offset));
+}
+
+void LocationsBuilderARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+ CPURegister value = InputCPURegisterAt(instruction, 1);
+ Register cls = InputRegisterAt(instruction, 0);
+ uint32_t offset = instruction->GetFieldOffset().Uint32Value();
+ Primitive::Type field_type = instruction->GetFieldType();
+
+ codegen_->Store(field_type, value, MemOperand(cls, offset));
+ if (field_type == Primitive::kPrimNot) {
+ codegen_->MarkGCCard(cls, Register(value));
+ }
+}
+
+void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+}
+
+void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
+ // TODO: Improve support for suspend checks.
+ SuspendCheckSlowPathARM64* slow_path =
+ new (GetGraph()->GetArena()) SuspendCheckSlowPathARM64(instruction, nullptr);
+ codegen_->AddSlowPath(slow_path);
+
+ __ Subs(wSuspend, wSuspend, 1);
+ __ B(slow_path->GetEntryLabel(), le);
+ __ Bind(slow_path->GetReturnLabel());
+}
+
+void LocationsBuilderARM64::VisitTemporary(HTemporary* temp) {
+ temp->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp) {
+ // Nothing to do, this is driven by the code generator.
+ UNUSED(temp);
+}
+
+void LocationsBuilderARM64::VisitThrow(HThrow* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorARM64::VisitThrow(HThrow* instruction) {
+ codegen_->InvokeRuntime(
+ QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc());
+}
+
+void LocationsBuilderARM64::VisitTypeConversion(HTypeConversion* conversion) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
+ Primitive::Type input_type = conversion->GetInputType();
+ Primitive::Type result_type = conversion->GetResultType();
+ if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
+ (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
+ LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
+ }
+
+ if (IsFPType(input_type)) {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ } else {
+ locations->SetInAt(0, Location::RequiresRegister());
+ }
+
+ if (IsFPType(result_type)) {
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ } else {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitTypeConversion(HTypeConversion* conversion) {
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+
+ DCHECK_NE(input_type, result_type);
+
+ if (IsIntegralType(result_type) && IsIntegralType(input_type)) {
+ int result_size = Primitive::ComponentSize(result_type);
+ int input_size = Primitive::ComponentSize(input_type);
+ int min_size = kBitsPerByte * std::min(result_size, input_size);
+ if ((result_type == Primitive::kPrimChar) ||
+ ((input_type == Primitive::kPrimChar) && (result_size > input_size))) {
+ __ Ubfx(OutputRegister(conversion), InputRegisterAt(conversion, 0), 0, min_size);
+ } else {
+ __ Sbfx(OutputRegister(conversion), InputRegisterAt(conversion, 0), 0, min_size);
+ }
+ return;
+ }
+
+ LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
+ << " to " << result_type;
+}
+
+void LocationsBuilderARM64::VisitXor(HXor* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitXor(HXor* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+#undef __
+#undef QUICK_ENTRY_POINT
+
+} // namespace arm64
+} // namespace art
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
new file mode 100644
index 0000000..a40f27f
--- /dev/null
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -0,0 +1,264 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
+#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
+
+#include "code_generator.h"
+#include "nodes.h"
+#include "parallel_move_resolver.h"
+#include "utils/arm64/assembler_arm64.h"
+#include "a64/disasm-a64.h"
+#include "a64/macro-assembler-a64.h"
+#include "arch/arm64/quick_method_frame_info_arm64.h"
+
+namespace art {
+namespace arm64 {
+
+class CodeGeneratorARM64;
+class SlowPathCodeARM64;
+
+// Use a local definition to prevent copying mistakes.
+static constexpr size_t kArm64WordSize = kArm64PointerSize;
+
+static const vixl::Register kParameterCoreRegisters[] = {
+ vixl::x1, vixl::x2, vixl::x3, vixl::x4, vixl::x5, vixl::x6, vixl::x7
+};
+static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
+static const vixl::FPRegister kParameterFPRegisters[] = {
+ vixl::d0, vixl::d1, vixl::d2, vixl::d3, vixl::d4, vixl::d5, vixl::d6, vixl::d7
+};
+static constexpr size_t kParameterFPRegistersLength = arraysize(kParameterFPRegisters);
+
+const vixl::Register tr = vixl::x18; // Thread Register
+const vixl::Register wSuspend = vixl::w19; // Suspend Register
+const vixl::Register xSuspend = vixl::x19;
+
+const vixl::CPURegList vixl_reserved_core_registers(vixl::ip0, vixl::ip1);
+const vixl::CPURegList vixl_reserved_fp_registers(vixl::d31);
+const vixl::CPURegList runtime_reserved_core_registers(tr, xSuspend, vixl::lr);
+const vixl::CPURegList quick_callee_saved_registers(vixl::CPURegister::kRegister,
+ vixl::kXRegSize,
+ kArm64CalleeSaveRefSpills);
+
+Location ARM64ReturnLocation(Primitive::Type return_type);
+
+class InvokeDexCallingConvention : public CallingConvention<vixl::Register, vixl::FPRegister> {
+ public:
+ InvokeDexCallingConvention()
+ : CallingConvention(kParameterCoreRegisters,
+ kParameterCoreRegistersLength,
+ kParameterFPRegisters,
+ kParameterFPRegistersLength) {}
+
+ Location GetReturnLocation(Primitive::Type return_type) {
+ return ARM64ReturnLocation(return_type);
+ }
+
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
+};
+
+class InvokeDexCallingConventionVisitor {
+ public:
+ InvokeDexCallingConventionVisitor() : gp_index_(0), fp_index_(0), stack_index_(0) {}
+
+ Location GetNextLocation(Primitive::Type type);
+ Location GetReturnLocation(Primitive::Type return_type) {
+ return calling_convention.GetReturnLocation(return_type);
+ }
+
+ private:
+ InvokeDexCallingConvention calling_convention;
+ // The current index for core registers.
+ uint32_t gp_index_;
+ // The current index for floating-point registers.
+ uint32_t fp_index_;
+ // The current stack index.
+ uint32_t stack_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitor);
+};
+
+class InstructionCodeGeneratorARM64 : public HGraphVisitor {
+ public:
+ InstructionCodeGeneratorARM64(HGraph* graph, CodeGeneratorARM64* codegen);
+
+#define DECLARE_VISIT_INSTRUCTION(name, super) \
+ void Visit##name(H##name* instr) OVERRIDE;
+ FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+#undef DECLARE_VISIT_INSTRUCTION
+
+ void LoadCurrentMethod(XRegister reg);
+
+ Arm64Assembler* GetAssembler() const { return assembler_; }
+ vixl::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->vixl_masm_; }
+
+ private:
+ void GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path, vixl::Register class_reg);
+ void HandleBinaryOp(HBinaryOperation* instr);
+
+ Arm64Assembler* const assembler_;
+ CodeGeneratorARM64* const codegen_;
+
+ DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorARM64);
+};
+
+class LocationsBuilderARM64 : public HGraphVisitor {
+ public:
+ explicit LocationsBuilderARM64(HGraph* graph, CodeGeneratorARM64* codegen)
+ : HGraphVisitor(graph), codegen_(codegen) {}
+
+#define DECLARE_VISIT_INSTRUCTION(name, super) \
+ void Visit##name(H##name* instr) OVERRIDE;
+ FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+#undef DECLARE_VISIT_INSTRUCTION
+
+ private:
+ void HandleBinaryOp(HBinaryOperation* instr);
+ void HandleInvoke(HInvoke* instr);
+
+ CodeGeneratorARM64* const codegen_;
+ InvokeDexCallingConventionVisitor parameter_visitor_;
+
+ DISALLOW_COPY_AND_ASSIGN(LocationsBuilderARM64);
+};
+
+class CodeGeneratorARM64 : public CodeGenerator {
+ public:
+ explicit CodeGeneratorARM64(HGraph* graph);
+ virtual ~CodeGeneratorARM64() {}
+
+ void GenerateFrameEntry() OVERRIDE;
+ void GenerateFrameExit() OVERRIDE;
+
+ static const vixl::CPURegList& GetFramePreservedRegisters() {
+ static const vixl::CPURegList frame_preserved_regs =
+ vixl::CPURegList(vixl::CPURegister::kRegister, vixl::kXRegSize, vixl::lr.Bit());
+ return frame_preserved_regs;
+ }
+ static int GetFramePreservedRegistersSize() {
+ return GetFramePreservedRegisters().TotalSizeInBytes();
+ }
+
+ void Bind(HBasicBlock* block) OVERRIDE;
+
+ vixl::Label* GetLabelOf(HBasicBlock* block) const {
+ return block_labels_ + block->GetBlockId();
+ }
+
+ void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
+
+ size_t GetWordSize() const OVERRIDE {
+ return kArm64WordSize;
+ }
+
+ uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+ vixl::Label* block_entry_label = GetLabelOf(block);
+ DCHECK(block_entry_label->IsBound());
+ return block_entry_label->location();
+ }
+
+ size_t FrameEntrySpillSize() const OVERRIDE;
+
+ HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
+ HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
+ Arm64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
+ vixl::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->vixl_masm_; }
+
+ // Emit a write barrier.
+ void MarkGCCard(vixl::Register object, vixl::Register value);
+
+ // Register allocation.
+
+ void SetupBlockedRegisters() const OVERRIDE;
+ // AllocateFreeRegister() is only used when allocating registers locally
+ // during CompileBaseline().
+ Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+
+ Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
+
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
+ UNUSED(stack_index);
+ UNUSED(reg_id);
+ LOG(INFO) << "CodeGeneratorARM64::SaveCoreRegister()";
+ return kArm64WordSize;
+ }
+
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
+ UNUSED(stack_index);
+ UNUSED(reg_id);
+ LOG(INFO) << "CodeGeneratorARM64::RestoreCoreRegister()";
+ return kArm64WordSize;
+ }
+
+ // The number of registers that can be allocated. The register allocator may
+ // decide to reserve and not use a few of them.
+ // We do not consider registers sp, xzr, wzr. They are either not allocatable
+ // (xzr, wzr), or make for poor allocatable registers (sp alignment
+ // requirements, etc.). This also facilitates our task as all other registers
+ // can easily be mapped via to or from their type and index or code.
+ static const int kNumberOfAllocatableRegisters = vixl::kNumberOfRegisters - 1;
+ static const int kNumberOfAllocatableFPRegisters = vixl::kNumberOfFPRegisters;
+ static constexpr int kNumberOfAllocatableRegisterPairs = 0;
+
+ void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+
+ InstructionSet GetInstructionSet() const OVERRIDE {
+ return InstructionSet::kArm64;
+ }
+
+ void Initialize() OVERRIDE {
+ HGraph* graph = GetGraph();
+ int length = graph->GetBlocks().Size();
+ block_labels_ = graph->GetArena()->AllocArray<vixl::Label>(length);
+ for (int i = 0; i < length; ++i) {
+ new(block_labels_ + i) vixl::Label();
+ }
+ }
+
+ // Code generation helpers.
+ void MoveConstant(vixl::CPURegister destination, HConstant* constant);
+ void MoveHelper(Location destination, Location source, Primitive::Type type);
+ void Load(Primitive::Type type, vixl::CPURegister dst, const vixl::MemOperand& src);
+ void Store(Primitive::Type type, vixl::CPURegister rt, const vixl::MemOperand& dst);
+ void LoadCurrentMethod(vixl::Register current_method);
+
+ // Generate code to invoke a runtime entry point.
+ void InvokeRuntime(int32_t offset, HInstruction* instruction, uint32_t dex_pc);
+
+ ParallelMoveResolver* GetMoveResolver() OVERRIDE {
+ UNIMPLEMENTED(INFO) << "TODO: MoveResolver";
+ return nullptr;
+ }
+
+ private:
+ // Labels for each block that will be compiled.
+ vixl::Label* block_labels_;
+
+ LocationsBuilderARM64 location_builder_;
+ InstructionCodeGeneratorARM64 instruction_visitor_;
+ Arm64Assembler assembler_;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARM64);
+};
+
+} // namespace arm64
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 041acdf..3c53cea 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -36,12 +36,15 @@
static constexpr int kNumberOfPushedRegistersAtEntry = 1;
static constexpr int kCurrentMethodStackOffset = 0;
-static constexpr Register kRuntimeParameterCoreRegisters[] = { EAX, ECX, EDX };
+static constexpr Register kRuntimeParameterCoreRegisters[] = { EAX, ECX, EDX, EBX };
static constexpr size_t kRuntimeParameterCoreRegistersLength =
arraysize(kRuntimeParameterCoreRegisters);
static constexpr XmmRegister kRuntimeParameterFpuRegisters[] = { };
static constexpr size_t kRuntimeParameterFpuRegistersLength = 0;
+// Marker for places that can be updated once we don't follow the quick ABI.
+static constexpr bool kFollowsQuickABI = true;
+
class InvokeRuntimeCallingConvention : public CallingConvention<Register, XmmRegister> {
public:
InvokeRuntimeCallingConvention()
@@ -85,6 +88,41 @@
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86);
};
+class DivZeroCheckSlowPathX86 : public SlowPathCodeX86 {
+ public:
+ explicit DivZeroCheckSlowPathX86(HDivZeroCheck* instruction) : instruction_(instruction) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowDivZero)));
+ codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ }
+
+ private:
+ HDivZeroCheck* const instruction_;
+ DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathX86);
+};
+
+class DivRemMinusOneSlowPathX86 : public SlowPathCodeX86 {
+ public:
+ explicit DivRemMinusOneSlowPathX86(Register reg, bool is_div) : reg_(reg), is_div_(is_div) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ if (is_div_) {
+ __ negl(reg_);
+ } else {
+ __ movl(reg_, Immediate(0));
+ }
+ __ jmp(GetExitLabel());
+ }
+
+ private:
+ Register reg_;
+ bool is_div_;
+ DISALLOW_COPY_AND_ASSIGN(DivRemMinusOneSlowPathX86);
+};
+
class StackOverflowCheckSlowPathX86 : public SlowPathCodeX86 {
public:
StackOverflowCheckSlowPathX86() {}
@@ -110,9 +148,14 @@
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
InvokeRuntimeCallingConvention calling_convention;
- x86_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
- x86_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(1)), length_location_);
+ x86_codegen->EmitParallelMoves(
+ index_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ length_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
__ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowArrayBounds)));
codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
}
@@ -157,6 +200,141 @@
DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathX86);
};
+class LoadStringSlowPathX86 : public SlowPathCodeX86 {
+ public:
+ explicit LoadStringSlowPathX86(HLoadString* instruction) : instruction_(instruction) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+
+ CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
+ __ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ x86_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(0));
+ __ movl(calling_convention.GetRegisterAt(1), Immediate(instruction_->GetStringIndex()));
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pResolveString)));
+ codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
+ codegen->RestoreLiveRegisters(locations);
+
+ __ jmp(GetExitLabel());
+ }
+
+ private:
+ HLoadString* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86);
+};
+
+class LoadClassSlowPathX86 : public SlowPathCodeX86 {
+ public:
+ LoadClassSlowPathX86(HLoadClass* cls,
+ HInstruction* at,
+ uint32_t dex_pc,
+ bool do_clinit)
+ : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+ }
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = at_->GetLocations();
+ CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
+ __ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ __ movl(calling_convention.GetRegisterAt(0), Immediate(cls_->GetTypeIndex()));
+ x86_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
+ __ fs()->call(Address::Absolute(do_clinit_
+ ? QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeStaticStorage)
+ : QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeType)));
+ codegen->RecordPcInfo(at_, dex_pc_);
+
+ // Move the class to the desired location.
+ Location out = locations->Out();
+ if (out.IsValid()) {
+ DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
+ x86_codegen->Move32(out, Location::RegisterLocation(EAX));
+ }
+
+ codegen->RestoreLiveRegisters(locations);
+ __ jmp(GetExitLabel());
+ }
+
+ private:
+ // The class this slow path will load.
+ HLoadClass* const cls_;
+
+ // The instruction where this slow path is happening.
+ // (Might be the load class or an initialization check).
+ HInstruction* const at_;
+
+ // The dex PC of `at_`.
+ const uint32_t dex_pc_;
+
+ // Whether to initialize the class.
+ const bool do_clinit_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathX86);
+};
+
+class TypeCheckSlowPathX86 : public SlowPathCodeX86 {
+ public:
+ TypeCheckSlowPathX86(HInstruction* instruction,
+ Location class_to_check,
+ Location object_class,
+ uint32_t dex_pc)
+ : instruction_(instruction),
+ class_to_check_(class_to_check),
+ object_class_(object_class),
+ dex_pc_(dex_pc) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(instruction_->IsCheckCast()
+ || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+
+ CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
+ __ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(locations);
+
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
+ InvokeRuntimeCallingConvention calling_convention;
+ x86_codegen->EmitParallelMoves(
+ class_to_check_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ object_class_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+
+ if (instruction_->IsInstanceOf()) {
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInstanceofNonTrivial)));
+ } else {
+ DCHECK(instruction_->IsCheckCast());
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pCheckCast)));
+ }
+
+ codegen->RecordPcInfo(instruction_, dex_pc_);
+ if (instruction_->IsInstanceOf()) {
+ x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
+ }
+ codegen->RestoreLiveRegisters(locations);
+
+ __ jmp(GetExitLabel());
+ }
+
+ private:
+ HInstruction* const instruction_;
+ const Location class_to_check_;
+ const Location object_class_;
+ const uint32_t dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86);
+};
+
#undef __
#define __ reinterpret_cast<X86Assembler*>(GetAssembler())->
@@ -182,12 +360,14 @@
stream << X86ManagedRegister::FromXmmRegister(XmmRegister(reg));
}
-void CodeGeneratorX86::SaveCoreRegister(Location stack_location, uint32_t reg_id) {
- __ movl(Address(ESP, stack_location.GetStackIndex()), static_cast<Register>(reg_id));
+size_t CodeGeneratorX86::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
+ __ movl(Address(ESP, stack_index), static_cast<Register>(reg_id));
+ return kX86WordSize;
}
-void CodeGeneratorX86::RestoreCoreRegister(Location stack_location, uint32_t reg_id) {
- __ movl(static_cast<Register>(reg_id), Address(ESP, stack_location.GetStackIndex()));
+size_t CodeGeneratorX86::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
+ __ movl(static_cast<Register>(reg_id), Address(ESP, stack_index));
+ return kX86WordSize;
}
CodeGeneratorX86::CodeGeneratorX86(HGraph* graph)
@@ -255,6 +435,7 @@
blocked_core_registers_[ESP] = true;
// TODO: We currently don't use Quick's callee saved registers.
+ DCHECK(kFollowsQuickABI);
blocked_core_registers_[EBP] = true;
blocked_core_registers_[ESI] = true;
blocked_core_registers_[EDI] = true;
@@ -311,7 +492,7 @@
__ Bind(GetLabelOf(block));
}
-void InstructionCodeGeneratorX86::LoadCurrentMethod(Register reg) {
+void CodeGeneratorX86::LoadCurrentMethod(Register reg) {
__ movl(reg, Address(ESP, kCurrentMethodStackOffset));
}
@@ -365,7 +546,9 @@
calling_convention.GetRegisterPairAt(index));
return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
} else if (index + 1 == calling_convention.GetNumberOfRegisters()) {
- return Location::QuickParameter(index);
+ // On X86, the register index and stack index of a quick parameter is the same, since
+ // we are passing floating pointer values in core registers.
+ return Location::QuickParameter(index, index);
} else {
return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(index));
}
@@ -401,7 +584,7 @@
__ movss(destination.As<XmmRegister>(), Address(ESP, source.GetStackIndex()));
}
} else {
- DCHECK(destination.IsStackSlot());
+ DCHECK(destination.IsStackSlot()) << destination;
if (source.IsRegister()) {
__ movl(Address(ESP, destination.GetStackIndex()), source.As<Register>());
} else if (source.IsFpuRegister()) {
@@ -425,12 +608,13 @@
} else if (source.IsFpuRegister()) {
LOG(FATAL) << "Unimplemented";
} else if (source.IsQuickParameter()) {
- uint32_t argument_index = source.GetQuickParameterIndex();
+ uint16_t register_index = source.GetQuickParameterRegisterIndex();
+ uint16_t stack_index = source.GetQuickParameterStackIndex();
InvokeDexCallingConvention calling_convention;
__ movl(destination.AsRegisterPairLow<Register>(),
- calling_convention.GetRegisterAt(argument_index));
+ calling_convention.GetRegisterAt(register_index));
__ movl(destination.AsRegisterPairHigh<Register>(), Address(ESP,
- calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize()));
+ calling_convention.GetStackOffsetOf(stack_index + 1) + GetFrameSize()));
} else {
DCHECK(source.IsDoubleStackSlot());
__ movl(destination.AsRegisterPairLow<Register>(), Address(ESP, source.GetStackIndex()));
@@ -439,19 +623,20 @@
}
} else if (destination.IsQuickParameter()) {
InvokeDexCallingConvention calling_convention;
- uint32_t argument_index = destination.GetQuickParameterIndex();
+ uint16_t register_index = destination.GetQuickParameterRegisterIndex();
+ uint16_t stack_index = destination.GetQuickParameterStackIndex();
if (source.IsRegister()) {
- __ movl(calling_convention.GetRegisterAt(argument_index), source.AsRegisterPairLow<Register>());
- __ movl(Address(ESP, calling_convention.GetStackOffsetOf(argument_index + 1)),
+ __ movl(calling_convention.GetRegisterAt(register_index), source.AsRegisterPairLow<Register>());
+ __ movl(Address(ESP, calling_convention.GetStackOffsetOf(stack_index + 1)),
source.AsRegisterPairHigh<Register>());
} else if (source.IsFpuRegister()) {
LOG(FATAL) << "Unimplemented";
} else {
DCHECK(source.IsDoubleStackSlot());
- __ movl(calling_convention.GetRegisterAt(argument_index),
+ __ movl(calling_convention.GetRegisterAt(register_index),
Address(ESP, source.GetStackIndex()));
__ pushl(Address(ESP, source.GetHighStackIndex(kX86WordSize)));
- __ popl(Address(ESP, calling_convention.GetStackOffsetOf(argument_index + 1)));
+ __ popl(Address(ESP, calling_convention.GetStackOffsetOf(stack_index + 1)));
}
} else if (destination.IsFpuRegister()) {
if (source.IsDoubleStackSlot()) {
@@ -460,17 +645,18 @@
LOG(FATAL) << "Unimplemented";
}
} else {
- DCHECK(destination.IsDoubleStackSlot());
+ DCHECK(destination.IsDoubleStackSlot()) << destination;
if (source.IsRegisterPair()) {
__ movl(Address(ESP, destination.GetStackIndex()), source.AsRegisterPairLow<Register>());
__ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)),
source.AsRegisterPairHigh<Register>());
} else if (source.IsQuickParameter()) {
InvokeDexCallingConvention calling_convention;
- uint32_t argument_index = source.GetQuickParameterIndex();
+ uint16_t register_index = source.GetQuickParameterRegisterIndex();
+ uint16_t stack_index = source.GetQuickParameterStackIndex();
__ movl(Address(ESP, destination.GetStackIndex()),
- calling_convention.GetRegisterAt(argument_index));
- DCHECK_EQ(calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize(),
+ calling_convention.GetRegisterAt(register_index));
+ DCHECK_EQ(calling_convention.GetStackOffsetOf(stack_index + 1) + GetFrameSize(),
static_cast<size_t>(destination.GetHighStackIndex(kX86WordSize)));
} else if (source.IsFpuRegister()) {
__ movsd(Address(ESP, destination.GetStackIndex()), source.As<XmmRegister>());
@@ -485,21 +671,43 @@
}
void CodeGeneratorX86::Move(HInstruction* instruction, Location location, HInstruction* move_for) {
- if (instruction->IsIntConstant()) {
- Immediate imm(instruction->AsIntConstant()->GetValue());
- if (location.IsRegister()) {
- __ movl(location.As<Register>(), imm);
- } else {
- __ movl(Address(ESP, location.GetStackIndex()), imm);
+ LocationSummary* locations = instruction->GetLocations();
+ if (locations != nullptr && locations->Out().Equals(location)) {
+ return;
+ }
+
+ if (locations != nullptr && locations->Out().IsConstant()) {
+ HConstant* const_to_move = locations->Out().GetConstant();
+ if (const_to_move->IsIntConstant()) {
+ Immediate imm(const_to_move->AsIntConstant()->GetValue());
+ if (location.IsRegister()) {
+ __ movl(location.As<Register>(), imm);
+ } else if (location.IsStackSlot()) {
+ __ movl(Address(ESP, location.GetStackIndex()), imm);
+ } else {
+ DCHECK(location.IsConstant());
+ DCHECK_EQ(location.GetConstant(), const_to_move);
+ }
+ } else if (const_to_move->IsLongConstant()) {
+ int64_t value = const_to_move->AsLongConstant()->GetValue();
+ if (location.IsRegisterPair()) {
+ __ movl(location.AsRegisterPairLow<Register>(), Immediate(Low32Bits(value)));
+ __ movl(location.AsRegisterPairHigh<Register>(), Immediate(High32Bits(value)));
+ } else if (location.IsDoubleStackSlot()) {
+ __ movl(Address(ESP, location.GetStackIndex()), Immediate(Low32Bits(value)));
+ __ movl(Address(ESP, location.GetHighStackIndex(kX86WordSize)), Immediate(High32Bits(value)));
+ } else {
+ DCHECK(location.IsConstant());
+ DCHECK_EQ(location.GetConstant(), instruction);
+ }
}
- } else if (instruction->IsLongConstant()) {
- int64_t value = instruction->AsLongConstant()->GetValue();
- if (location.IsRegister()) {
- __ movl(location.AsRegisterPairLow<Register>(), Immediate(Low32Bits(value)));
- __ movl(location.AsRegisterPairHigh<Register>(), Immediate(High32Bits(value)));
+ } else if (instruction->IsTemporary()) {
+ Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
+ if (temp_location.IsStackSlot()) {
+ Move32(location, temp_location);
} else {
- __ movl(Address(ESP, location.GetStackIndex()), Immediate(Low32Bits(value)));
- __ movl(Address(ESP, location.GetHighStackIndex(kX86WordSize)), Immediate(High32Bits(value)));
+ DCHECK(temp_location.IsDoubleStackSlot());
+ Move64(location, temp_location);
}
} else if (instruction->IsLoadLocal()) {
int slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
@@ -532,12 +740,12 @@
case Primitive::kPrimInt:
case Primitive::kPrimNot:
case Primitive::kPrimFloat:
- Move32(location, instruction->GetLocations()->Out());
+ Move32(location, locations->Out());
break;
case Primitive::kPrimLong:
case Primitive::kPrimDouble:
- Move64(location, instruction->GetLocations()->Out());
+ Move64(location, locations->Out());
break;
default:
@@ -577,6 +785,7 @@
}
void InstructionCodeGeneratorX86::VisitExit(HExit* exit) {
+ UNUSED(exit);
if (kIsDebugBuild) {
__ Comment("Unreachable");
__ int3();
@@ -588,7 +797,7 @@
new (GetGraph()->GetArena()) LocationSummary(if_instr, LocationSummary::kNoCall);
HInstruction* cond = if_instr->InputAt(0);
if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
- locations->SetInAt(0, Location::Any(), Location::kDiesAtEntry);
+ locations->SetInAt(0, Location::Any());
}
}
@@ -666,6 +875,7 @@
void InstructionCodeGeneratorX86::VisitLoadLocal(HLoadLocal* load) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(load);
}
void LocationsBuilderX86::VisitStoreLocal(HStoreLocal* store) {
@@ -694,13 +904,14 @@
}
void InstructionCodeGeneratorX86::VisitStoreLocal(HStoreLocal* store) {
+ UNUSED(store);
}
void LocationsBuilderX86::VisitCondition(HCondition* comp) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(comp, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister(), Location::kDiesAtEntry);
- locations->SetInAt(1, Location::Any(), Location::kDiesAtEntry);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
if (comp->NeedsMaterialization()) {
locations->SetOut(Location::RequiresRegister());
}
@@ -783,6 +994,7 @@
void InstructionCodeGeneratorX86::VisitIntConstant(HIntConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86::VisitLongConstant(HLongConstant* constant) {
@@ -793,6 +1005,29 @@
void InstructionCodeGeneratorX86::VisitLongConstant(HLongConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
+}
+
+void LocationsBuilderX86::VisitFloatConstant(HFloatConstant* constant) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorX86::VisitFloatConstant(HFloatConstant* constant) {
+ // Will be generated at use site.
+ UNUSED(constant);
+}
+
+void LocationsBuilderX86::VisitDoubleConstant(HDoubleConstant* constant) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorX86::VisitDoubleConstant(HDoubleConstant* constant) {
+ // Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86::VisitReturnVoid(HReturnVoid* ret) {
@@ -800,6 +1035,7 @@
}
void InstructionCodeGeneratorX86::VisitReturnVoid(HReturnVoid* ret) {
+ UNUSED(ret);
codegen_->GenerateFrameExit();
__ ret();
}
@@ -869,9 +1105,6 @@
void InstructionCodeGeneratorX86::VisitInvokeStatic(HInvokeStatic* invoke) {
Register temp = invoke->GetLocations()->GetTemp(0).As<Register>();
- uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
- size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).Int32Value() +
- invoke->GetIndexInDexCache() * kX86WordSize;
// TODO: Implement all kinds of calls:
// 1) boot -> boot
@@ -881,13 +1114,14 @@
// Currently we implement the app -> app logic, which looks up in the resolve cache.
// temp = method;
- LoadCurrentMethod(temp);
+ codegen_->LoadCurrentMethod(temp);
// temp = temp->dex_cache_resolved_methods_;
__ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
// temp = temp[index_in_cache]
- __ movl(temp, Address(temp, index_in_cache));
+ __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetIndexInDexCache())));
// (temp + offset_of_quick_compiled_code)()
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+ __ call(Address(
+ temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -951,12 +1185,447 @@
// temp = temp->GetMethodAt(method_offset);
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+ __ call(Address(
+ temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
+void LocationsBuilderX86::VisitInvokeInterface(HInvokeInterface* invoke) {
+ HandleInvoke(invoke);
+ // Add the hidden argument.
+ invoke->GetLocations()->AddTemp(Location::FpuRegisterLocation(XMM0));
+}
+
+void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke) {
+ // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
+ Register temp = invoke->GetLocations()->GetTemp(0).As<Register>();
+ uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
+ (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
+ LocationSummary* locations = invoke->GetLocations();
+ Location receiver = locations->InAt(0);
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+
+ // Set the hidden argument.
+ __ movl(temp, Immediate(invoke->GetDexMethodIndex()));
+ __ movd(invoke->GetLocations()->GetTemp(1).As<XmmRegister>(), temp);
+
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ movl(temp, Address(ESP, receiver.GetStackIndex()));
+ __ movl(temp, Address(temp, class_offset));
+ } else {
+ __ movl(temp, Address(receiver.As<Register>(), class_offset));
+ }
+ // temp = temp->GetImtEntryAt(method_offset);
+ __ movl(temp, Address(temp, method_offset));
+ // call temp->GetEntryPoint();
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kX86WordSize).Int32Value()));
+
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void LocationsBuilderX86::VisitNeg(HNeg* neg) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ switch (neg->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ // Output overlaps as we need a fresh (zero-initialized)
+ // register to perform subtraction from zero.
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorX86::VisitNeg(HNeg* neg) {
+ LocationSummary* locations = neg->GetLocations();
+ Location out = locations->Out();
+ Location in = locations->InAt(0);
+ switch (neg->GetResultType()) {
+ case Primitive::kPrimInt:
+ DCHECK(in.IsRegister());
+ DCHECK(in.Equals(out));
+ __ negl(out.As<Register>());
+ break;
+
+ case Primitive::kPrimLong:
+ DCHECK(in.IsRegisterPair());
+ DCHECK(in.Equals(out));
+ __ negl(out.AsRegisterPairLow<Register>());
+ // Negation is similar to subtraction from zero. The least
+ // significant byte triggers a borrow when it is different from
+ // zero; to take it into account, add 1 to the most significant
+ // byte if the carry flag (CF) is set to 1 after the first NEGL
+ // operation.
+ __ adcl(out.AsRegisterPairHigh<Register>(), Immediate(0));
+ __ negl(out.AsRegisterPairHigh<Register>());
+ break;
+
+ case Primitive::kPrimFloat:
+ DCHECK(!in.Equals(out));
+ // out = 0
+ __ xorps(out.As<XmmRegister>(), out.As<XmmRegister>());
+ // out = out - in
+ __ subss(out.As<XmmRegister>(), in.As<XmmRegister>());
+ break;
+
+ case Primitive::kPrimDouble:
+ DCHECK(!in.Equals(out));
+ // out = 0
+ __ xorpd(out.As<XmmRegister>(), out.As<XmmRegister>());
+ // out = out - in
+ __ subsd(out.As<XmmRegister>(), in.As<XmmRegister>());
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
+ }
+}
+
+void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+ switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-byte' instruction.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimShort:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-short' instruction.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Processing a Dex `long-to-int' instruction.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimLong:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-long' instruction.
+ locations->SetInAt(0, Location::RegisterLocation(EAX));
+ locations->SetOut(Location::RegisterPairLocation(EAX, EDX));
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type << " to "
+ << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimChar:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-char' instruction.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimFloat:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-float' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ };
+ break;
+
+ case Primitive::kPrimDouble:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-double' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+
+ case Primitive::kPrimLong:
+ case Primitive::kPrimFloat:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+}
+
+void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversion) {
+ LocationSummary* locations = conversion->GetLocations();
+ Location out = locations->Out();
+ Location in = locations->InAt(0);
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+ switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-byte' instruction.
+ if (in.IsRegister()) {
+ __ movsxb(out.As<Register>(), in.As<ByteRegister>());
+ } else if (in.IsStackSlot()) {
+ __ movsxb(out.As<Register>(), Address(ESP, in.GetStackIndex()));
+ } else {
+ DCHECK(in.GetConstant()->IsIntConstant());
+ int32_t value = in.GetConstant()->AsIntConstant()->GetValue();
+ __ movl(out.As<Register>(), Immediate(static_cast<int8_t>(value)));
+ }
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimShort:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-short' instruction.
+ if (in.IsRegister()) {
+ __ movsxw(out.As<Register>(), in.As<Register>());
+ } else if (in.IsStackSlot()) {
+ __ movsxw(out.As<Register>(), Address(ESP, in.GetStackIndex()));
+ } else {
+ DCHECK(in.GetConstant()->IsIntConstant());
+ int32_t value = in.GetConstant()->AsIntConstant()->GetValue();
+ __ movl(out.As<Register>(), Immediate(static_cast<int16_t>(value)));
+ }
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Processing a Dex `long-to-int' instruction.
+ if (in.IsRegisterPair()) {
+ __ movl(out.As<Register>(), in.AsRegisterPairLow<Register>());
+ } else if (in.IsDoubleStackSlot()) {
+ __ movl(out.As<Register>(), Address(ESP, in.GetStackIndex()));
+ } else {
+ DCHECK(in.IsConstant());
+ DCHECK(in.GetConstant()->IsLongConstant());
+ int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
+ __ movl(out.As<Register>(), Immediate(static_cast<int32_t>(value)));
+ }
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimLong:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-long' instruction.
+ DCHECK_EQ(out.AsRegisterPairLow<Register>(), EAX);
+ DCHECK_EQ(out.AsRegisterPairHigh<Register>(), EDX);
+ DCHECK_EQ(in.As<Register>(), EAX);
+ __ cdq();
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type << " to "
+ << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimChar:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `Process a Dex `int-to-char'' instruction.
+ if (in.IsRegister()) {
+ __ movzxw(out.As<Register>(), in.As<Register>());
+ } else if (in.IsStackSlot()) {
+ __ movzxw(out.As<Register>(), Address(ESP, in.GetStackIndex()));
+ } else {
+ DCHECK(in.GetConstant()->IsIntConstant());
+ int32_t value = in.GetConstant()->AsIntConstant()->GetValue();
+ __ movl(out.As<Register>(), Immediate(static_cast<uint16_t>(value)));
+ }
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimFloat:
+ switch (input_type) {
+ // Processing a Dex `int-to-float' instruction.
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ __ cvtsi2ss(out.As<XmmRegister>(), in.As<Register>());
+ break;
+
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ };
+ break;
+
+ case Primitive::kPrimDouble:
+ switch (input_type) {
+ // Processing a Dex `int-to-double' instruction.
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ __ cvtsi2sd(out.As<XmmRegister>(), in.As<Register>());
+ break;
+
+ case Primitive::kPrimLong:
+ case Primitive::kPrimFloat:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ };
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+}
+
void LocationsBuilderX86::VisitAdd(HAdd* add) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
@@ -987,16 +1656,13 @@
LocationSummary* locations = add->GetLocations();
Location first = locations->InAt(0);
Location second = locations->InAt(1);
-
+ DCHECK(first.Equals(locations->Out()));
switch (add->GetResultType()) {
case Primitive::kPrimInt: {
- DCHECK_EQ(first.As<Register>(), locations->Out().As<Register>());
if (second.IsRegister()) {
__ addl(first.As<Register>(), second.As<Register>());
} else if (second.IsConstant()) {
- HConstant* instruction = second.GetConstant();
- Immediate imm(instruction->AsIntConstant()->GetValue());
- __ addl(first.As<Register>(), imm);
+ __ addl(first.As<Register>(), Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
} else {
__ addl(first.As<Register>(), Address(ESP, second.GetStackIndex()));
}
@@ -1004,11 +1670,7 @@
}
case Primitive::kPrimLong: {
- DCHECK_EQ(first.AsRegisterPairLow<Register>(),
- locations->Out().AsRegisterPairLow<Register>());
- DCHECK_EQ(first.AsRegisterPairHigh<Register>(),
- locations->Out().AsRegisterPairHigh<Register>());
- if (second.IsRegister()) {
+ if (second.IsRegisterPair()) {
__ addl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
__ adcl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
} else {
@@ -1053,16 +1715,16 @@
locations->SetOut(Location::SameAsFirstInput());
break;
}
-
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::SameAsFirstInput());
break;
+ }
default:
- LOG(FATAL) << "Unimplemented sub type " << sub->GetResultType();
+ LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
}
}
@@ -1070,52 +1732,43 @@
LocationSummary* locations = sub->GetLocations();
Location first = locations->InAt(0);
Location second = locations->InAt(1);
+ DCHECK(first.Equals(locations->Out()));
switch (sub->GetResultType()) {
case Primitive::kPrimInt: {
- DCHECK_EQ(first.As<Register>(),
- locations->Out().As<Register>());
if (second.IsRegister()) {
- __ subl(first.As<Register>(),
- second.As<Register>());
+ __ subl(first.As<Register>(), second.As<Register>());
} else if (second.IsConstant()) {
- HConstant* instruction = second.GetConstant();
- Immediate imm(instruction->AsIntConstant()->GetValue());
- __ subl(first.As<Register>(), imm);
+ __ subl(first.As<Register>(), Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
} else {
- __ subl(first.As<Register>(),
- Address(ESP, second.GetStackIndex()));
+ __ subl(first.As<Register>(), Address(ESP, second.GetStackIndex()));
}
break;
}
case Primitive::kPrimLong: {
- DCHECK_EQ(first.AsRegisterPairLow<Register>(),
- locations->Out().AsRegisterPairLow<Register>());
- DCHECK_EQ(first.AsRegisterPairHigh<Register>(),
- locations->Out().AsRegisterPairHigh<Register>());
- if (second.IsRegister()) {
- __ subl(first.AsRegisterPairLow<Register>(),
- second.AsRegisterPairLow<Register>());
- __ sbbl(first.AsRegisterPairHigh<Register>(),
- second.AsRegisterPairHigh<Register>());
+ if (second.IsRegisterPair()) {
+ __ subl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
+ __ sbbl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
} else {
- __ subl(first.AsRegisterPairLow<Register>(),
- Address(ESP, second.GetStackIndex()));
+ __ subl(first.AsRegisterPairLow<Register>(), Address(ESP, second.GetStackIndex()));
__ sbbl(first.AsRegisterPairHigh<Register>(),
Address(ESP, second.GetHighStackIndex(kX86WordSize)));
}
break;
}
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
+ case Primitive::kPrimFloat: {
+ __ subss(first.As<XmmRegister>(), second.As<XmmRegister>());
break;
+ }
+
+ case Primitive::kPrimDouble: {
+ __ subsd(first.As<XmmRegister>(), second.As<XmmRegister>());
+ break;
+ }
default:
- LOG(FATAL) << "Unimplemented sub type " << sub->GetResultType();
+ LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
}
}
@@ -1143,16 +1796,16 @@
locations->AddTemp(Location::RegisterLocation(EDX));
break;
}
-
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::SameAsFirstInput());
break;
+ }
default:
- LOG(FATAL) << "Unimplemented mul type " << mul->GetResultType();
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
}
}
@@ -1214,15 +1867,265 @@
break;
}
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ case Primitive::kPrimFloat: {
+ __ mulss(first.As<XmmRegister>(), second.As<XmmRegister>());
break;
+ }
+
+ case Primitive::kPrimDouble: {
+ __ mulsd(first.As<XmmRegister>(), second.As<XmmRegister>());
+ break;
+ }
default:
- LOG(FATAL) << "Unimplemented mul type " << mul->GetResultType();
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorX86::GenerateDivRemIntegral(HBinaryOperation* instruction) {
+ DCHECK(instruction->IsDiv() || instruction->IsRem());
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location out = locations->Out();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ bool is_div = instruction->IsDiv();
+
+ switch (instruction->GetResultType()) {
+ case Primitive::kPrimInt: {
+ Register second_reg = second.As<Register>();
+ DCHECK_EQ(EAX, first.As<Register>());
+ DCHECK_EQ(is_div ? EAX : EDX, out.As<Register>());
+
+ SlowPathCodeX86* slow_path =
+ new (GetGraph()->GetArena()) DivRemMinusOneSlowPathX86(out.As<Register>(), is_div);
+ codegen_->AddSlowPath(slow_path);
+
+ // 0x80000000/-1 triggers an arithmetic exception!
+ // Dividing by -1 is actually negation and -0x800000000 = 0x80000000 so
+ // it's safe to just use negl instead of more complex comparisons.
+
+ __ cmpl(second_reg, Immediate(-1));
+ __ j(kEqual, slow_path->GetEntryLabel());
+
+ // edx:eax <- sign-extended of eax
+ __ cdq();
+ // eax = quotient, edx = remainder
+ __ idivl(second_reg);
+
+ __ Bind(slow_path->GetExitLabel());
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ InvokeRuntimeCallingConvention calling_convention;
+ DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegisterPairLow<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(1), first.AsRegisterPairHigh<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(2), second.AsRegisterPairLow<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(3), second.AsRegisterPairHigh<Register>());
+ DCHECK_EQ(EAX, out.AsRegisterPairLow<Register>());
+ DCHECK_EQ(EDX, out.AsRegisterPairHigh<Register>());
+
+ if (is_div) {
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pLdiv)));
+ } else {
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pLmod)));
+ }
+ uint32_t dex_pc = is_div
+ ? instruction->AsDiv()->GetDexPc()
+ : instruction->AsRem()->GetDexPc();
+ codegen_->RecordPcInfo(instruction, dex_pc);
+
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected type for GenerateDivRemIntegral " << instruction->GetResultType();
+ }
+}
+
+void LocationsBuilderX86::VisitDiv(HDiv* div) {
+ LocationSummary::CallKind call_kind = div->GetResultType() == Primitive::kPrimLong
+ ? LocationSummary::kCall
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
+
+ switch (div->GetResultType()) {
+ case Primitive::kPrimInt: {
+ locations->SetInAt(0, Location::RegisterLocation(EAX));
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ // Intel uses edx:eax as the dividend.
+ locations->AddTemp(Location::RegisterLocation(EDX));
+ break;
+ }
+ case Primitive::kPrimLong: {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(1, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
+ // Runtime helper puts the result in EAX, EDX.
+ locations->SetOut(Location::RegisterPairLocation(EAX, EDX));
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected div type " << div->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorX86::VisitDiv(HDiv* div) {
+ LocationSummary* locations = div->GetLocations();
+ Location out = locations->Out();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+
+ switch (div->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ GenerateDivRemIntegral(div);
+ break;
+ }
+
+ case Primitive::kPrimFloat: {
+ DCHECK(first.Equals(out));
+ __ divss(first.As<XmmRegister>(), second.As<XmmRegister>());
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ DCHECK(first.Equals(out));
+ __ divsd(first.As<XmmRegister>(), second.As<XmmRegister>());
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected div type " << div->GetResultType();
+ }
+}
+
+void LocationsBuilderX86::VisitRem(HRem* rem) {
+ LocationSummary::CallKind call_kind = rem->GetResultType() == Primitive::kPrimLong
+ ? LocationSummary::kCall
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
+
+ switch (rem->GetResultType()) {
+ case Primitive::kPrimInt: {
+ locations->SetInAt(0, Location::RegisterLocation(EAX));
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RegisterLocation(EDX));
+ break;
+ }
+ case Primitive::kPrimLong: {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(1, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
+ // Runtime helper puts the result in EAX, EDX.
+ locations->SetOut(Location::RegisterPairLocation(EAX, EDX));
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ LOG(FATAL) << "Unimplemented rem type " << rem->GetResultType();
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected rem type " << rem->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorX86::VisitRem(HRem* rem) {
+ Primitive::Type type = rem->GetResultType();
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ GenerateDivRemIntegral(rem);
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ LOG(FATAL) << "Unimplemented rem type " << type;
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected rem type " << type;
+ }
+}
+
+void LocationsBuilderX86::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ switch (instruction->GetType()) {
+ case Primitive::kPrimInt: {
+ locations->SetInAt(0, Location::Any());
+ break;
+ }
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
+ if (!instruction->IsConstant()) {
+ locations->AddTemp(Location::RequiresRegister());
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
+ }
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorX86::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathX86(instruction);
+ codegen_->AddSlowPath(slow_path);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location value = locations->InAt(0);
+
+ switch (instruction->GetType()) {
+ case Primitive::kPrimInt: {
+ if (value.IsRegister()) {
+ __ testl(value.As<Register>(), value.As<Register>());
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else if (value.IsStackSlot()) {
+ __ cmpl(Address(ESP, value.GetStackIndex()), Immediate(0));
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
+ __ jmp(slow_path->GetEntryLabel());
+ }
+ }
+ break;
+ }
+ case Primitive::kPrimLong: {
+ if (value.IsRegisterPair()) {
+ Register temp = locations->GetTemp(0).As<Register>();
+ __ movl(temp, value.AsRegisterPairLow<Register>());
+ __ orl(temp, value.AsRegisterPairHigh<Register>());
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
+ __ jmp(slow_path->GetEntryLabel());
+ }
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected type for HDivZeroCheck" << instruction->GetType();
}
}
@@ -1237,7 +2140,7 @@
void InstructionCodeGeneratorX86::VisitNewInstance(HNewInstance* instruction) {
InvokeRuntimeCallingConvention calling_convention;
- LoadCurrentMethod(calling_convention.GetRegisterAt(1));
+ codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
__ movl(calling_convention.GetRegisterAt(0), Immediate(instruction->GetTypeIndex()));
__ fs()->call(
@@ -1247,6 +2150,28 @@
DCHECK(!codegen_->IsLeafMethod());
}
+void LocationsBuilderX86::VisitNewArray(HNewArray* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ locations->SetOut(Location::RegisterLocation(EAX));
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+}
+
+void InstructionCodeGeneratorX86::VisitNewArray(HNewArray* instruction) {
+ InvokeRuntimeCallingConvention calling_convention;
+ codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
+ __ movl(calling_convention.GetRegisterAt(0), Immediate(instruction->GetTypeIndex()));
+
+ __ fs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocArrayWithAccessCheck)));
+
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ DCHECK(!codegen_->IsLeafMethod());
+}
+
void LocationsBuilderX86::VisitParameterValue(HParameterValue* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
@@ -1260,32 +2185,49 @@
}
void InstructionCodeGeneratorX86::VisitParameterValue(HParameterValue* instruction) {
+ UNUSED(instruction);
}
-void LocationsBuilderX86::VisitNot(HNot* instruction) {
+void LocationsBuilderX86::VisitNot(HNot* not_) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
-void InstructionCodeGeneratorX86::VisitNot(HNot* instruction) {
- LocationSummary* locations = instruction->GetLocations();
+void InstructionCodeGeneratorX86::VisitNot(HNot* not_) {
+ LocationSummary* locations = not_->GetLocations();
+ Location in = locations->InAt(0);
Location out = locations->Out();
- DCHECK_EQ(locations->InAt(0).As<Register>(), out.As<Register>());
- __ xorl(out.As<Register>(), Immediate(1));
+ DCHECK(in.Equals(out));
+ switch (not_->InputAt(0)->GetType()) {
+ case Primitive::kPrimBoolean:
+ __ xorl(out.As<Register>(), Immediate(1));
+ break;
+
+ case Primitive::kPrimInt:
+ __ notl(out.As<Register>());
+ break;
+
+ case Primitive::kPrimLong:
+ __ notl(out.AsRegisterPairLow<Register>());
+ __ notl(out.AsRegisterPairHigh<Register>());
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType();
+ }
}
void LocationsBuilderX86::VisitCompare(HCompare* compare) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister(), Location::kDiesAtEntry);
- locations->SetInAt(1, Location::Any(), Location::kDiesAtEntry);
- locations->SetOut(Location::RequiresRegister());
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
void InstructionCodeGeneratorX86::VisitCompare(HCompare* compare) {
- Label greater, done;
LocationSummary* locations = compare->GetLocations();
switch (compare->InputAt(0)->GetType()) {
case Primitive::kPrimLong: {
@@ -1337,6 +2279,7 @@
}
void InstructionCodeGeneratorX86::VisitPhi(HPhi* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
@@ -1345,20 +2288,21 @@
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
Primitive::Type field_type = instruction->GetFieldType();
- bool is_object_type = field_type == Primitive::kPrimNot;
+ bool needs_write_barrier =
+ CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1));
+
bool is_byte_type = (field_type == Primitive::kPrimBoolean)
|| (field_type == Primitive::kPrimByte);
// The register allocator does not support multiple
// inputs that die at entry with one in a specific register.
- bool dies_at_entry = !is_object_type && !is_byte_type;
if (is_byte_type) {
// Ensure the value is in a byte register.
- locations->SetInAt(1, Location::RegisterLocation(EAX), dies_at_entry);
+ locations->SetInAt(1, Location::RegisterLocation(EAX));
} else {
- locations->SetInAt(1, Location::RequiresRegister(), dies_at_entry);
+ locations->SetInAt(1, Location::RequiresRegister());
}
// Temporary registers for the write barrier.
- if (is_object_type) {
+ if (needs_write_barrier) {
locations->AddTemp(Location::RequiresRegister());
// Ensure the card is in a byte register.
locations->AddTemp(Location::RegisterLocation(ECX));
@@ -1391,7 +2335,7 @@
Register value = locations->InAt(1).As<Register>();
__ movl(Address(obj, offset), value);
- if (field_type == Primitive::kPrimNot) {
+ if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
Register temp = locations->GetTemp(0).As<Register>();
Register card = locations->GetTemp(1).As<Register>();
codegen_->MarkGCCard(temp, card, obj, value);
@@ -1406,10 +2350,18 @@
break;
}
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register type " << field_type;
- UNREACHABLE();
+ case Primitive::kPrimFloat: {
+ XmmRegister value = locations->InAt(1).As<XmmRegister>();
+ __ movss(Address(obj, offset), value);
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ XmmRegister value = locations->InAt(1).As<XmmRegister>();
+ __ movsd(Address(obj, offset), value);
+ break;
+ }
+
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << field_type;
UNREACHABLE();
@@ -1431,8 +2383,8 @@
void LocationsBuilderX86::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister(), Location::kDiesAtEntry);
- locations->SetOut(Location::RequiresRegister());
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
void InstructionCodeGeneratorX86::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
@@ -1479,10 +2431,18 @@
break;
}
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
- UNREACHABLE();
+ case Primitive::kPrimFloat: {
+ XmmRegister out = locations->Out().As<XmmRegister>();
+ __ movss(out, Address(obj, offset));
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ XmmRegister out = locations->Out().As<XmmRegister>();
+ __ movsd(out, Address(obj, offset));
+ break;
+ }
+
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -1521,10 +2481,9 @@
void LocationsBuilderX86::VisitArrayGet(HArrayGet* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister(), Location::kDiesAtEntry);
- locations->SetInAt(
- 1, Location::RegisterOrConstant(instruction->InputAt(1)), Location::kDiesAtEntry);
- locations->SetOut(Location::RequiresRegister());
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
@@ -1622,11 +2581,20 @@
void LocationsBuilderX86::VisitArraySet(HArraySet* instruction) {
Primitive::Type value_type = instruction->GetComponentType();
+ bool needs_write_barrier =
+ CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
+
+ DCHECK(kFollowsQuickABI);
+ bool not_enough_registers = needs_write_barrier
+ && !instruction->GetValue()->IsConstant()
+ && !instruction->GetIndex()->IsConstant();
+ bool needs_runtime_call = instruction->NeedsTypeCheck() || not_enough_registers;
+
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
instruction,
- value_type == Primitive::kPrimNot ? LocationSummary::kCall : LocationSummary::kNoCall);
+ needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall);
- if (value_type == Primitive::kPrimNot) {
+ if (needs_runtime_call) {
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1637,16 +2605,19 @@
// We need the inputs to be different than the output in case of long operation.
// In case of a byte operation, the register allocator does not support multiple
// inputs that die at entry with one in a specific register.
- bool dies_at_entry = value_type != Primitive::kPrimLong && !is_byte_type;
- locations->SetInAt(0, Location::RequiresRegister(), dies_at_entry);
- locations->SetInAt(
- 1, Location::RegisterOrConstant(instruction->InputAt(1)), dies_at_entry);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
if (is_byte_type) {
// Ensure the value is in a byte register.
- locations->SetInAt(2, Location::ByteRegisterOrConstant(
- EAX, instruction->InputAt(2)), dies_at_entry);
+ locations->SetInAt(2, Location::ByteRegisterOrConstant(EAX, instruction->InputAt(2)));
} else {
- locations->SetInAt(2, Location::RegisterOrConstant(instruction->InputAt(2)), dies_at_entry);
+ locations->SetInAt(2, Location::RegisterOrConstant(instruction->InputAt(2)));
+ }
+ // Temporary registers for the write barrier.
+ if (needs_write_barrier) {
+ locations->AddTemp(Location::RequiresRegister());
+ // Ensure the card is in a byte register.
+ locations->AddTemp(Location::RegisterLocation(ECX));
}
}
}
@@ -1657,6 +2628,9 @@
Location index = locations->InAt(1);
Location value = locations->InAt(2);
Primitive::Type value_type = instruction->GetComponentType();
+ bool needs_runtime_call = locations->WillCall();
+ bool needs_write_barrier =
+ CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
switch (value_type) {
case Primitive::kPrimBoolean:
@@ -1705,34 +2679,45 @@
break;
}
- case Primitive::kPrimInt: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- if (value.IsRegister()) {
- __ movl(Address(obj, offset), value.As<Register>());
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ if (!needs_runtime_call) {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ if (index.IsConstant()) {
+ size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ if (value.IsRegister()) {
+ __ movl(Address(obj, offset), value.As<Register>());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ __ movl(Address(obj, offset),
+ Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
+ }
} else {
- __ movl(Address(obj, offset), Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
+ DCHECK(index.IsRegister()) << index;
+ if (value.IsRegister()) {
+ __ movl(Address(obj, index.As<Register>(), TIMES_4, data_offset),
+ value.As<Register>());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ __ movl(Address(obj, index.As<Register>(), TIMES_4, data_offset),
+ Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
+ }
+ }
+
+ if (needs_write_barrier) {
+ Register temp = locations->GetTemp(0).As<Register>();
+ Register card = locations->GetTemp(1).As<Register>();
+ codegen_->MarkGCCard(temp, card, obj, value.As<Register>());
}
} else {
- if (value.IsRegister()) {
- __ movl(Address(obj, index.As<Register>(), TIMES_4, data_offset),
- value.As<Register>());
- } else {
- __ movl(Address(obj, index.As<Register>(), TIMES_4, data_offset),
- Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
- }
+ DCHECK_EQ(value_type, Primitive::kPrimNot);
+ DCHECK(!codegen_->IsLeafMethod());
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAputObject)));
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
}
break;
}
- case Primitive::kPrimNot: {
- DCHECK(!codegen_->IsLeafMethod());
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAputObject)));
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
- break;
- }
-
case Primitive::kPrimLong: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
if (index.IsConstant()) {
@@ -1776,8 +2761,8 @@
void LocationsBuilderX86::VisitArrayLength(HArrayLength* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
- locations->SetInAt(0, Location::RequiresRegister(), Location::kDiesAtEntry);
- locations->SetOut(Location::RequiresRegister());
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
instruction->SetLocations(locations);
}
@@ -1818,9 +2803,11 @@
void InstructionCodeGeneratorX86::VisitTemporary(HTemporary* temp) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(temp);
}
void LocationsBuilderX86::VisitParallelMove(HParallelMove* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
@@ -1960,5 +2947,460 @@
__ popl(static_cast<Register>(reg));
}
+void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) {
+ LocationSummary::CallKind call_kind = cls->CanCallRuntime()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) {
+ Register out = cls->GetLocations()->Out().As<Register>();
+ if (cls->IsReferrersClass()) {
+ DCHECK(!cls->CanCallRuntime());
+ DCHECK(!cls->MustGenerateClinitCheck());
+ codegen_->LoadCurrentMethod(out);
+ __ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
+ } else {
+ DCHECK(cls->CanCallRuntime());
+ codegen_->LoadCurrentMethod(out);
+ __ movl(out, Address(out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
+ __ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
+
+ SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86(
+ cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+ codegen_->AddSlowPath(slow_path);
+ __ testl(out, out);
+ __ j(kEqual, slow_path->GetEntryLabel());
+ if (cls->MustGenerateClinitCheck()) {
+ GenerateClassInitializationCheck(slow_path, out);
+ } else {
+ __ Bind(slow_path->GetExitLabel());
+ }
+ }
+}
+
+void LocationsBuilderX86::VisitClinitCheck(HClinitCheck* check) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (check->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorX86::VisitClinitCheck(HClinitCheck* check) {
+ // We assume the class to not be null.
+ SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86(
+ check->GetLoadClass(), check, check->GetDexPc(), true);
+ codegen_->AddSlowPath(slow_path);
+ GenerateClassInitializationCheck(slow_path, check->GetLocations()->InAt(0).As<Register>());
+}
+
+void InstructionCodeGeneratorX86::GenerateClassInitializationCheck(
+ SlowPathCodeX86* slow_path, Register class_reg) {
+ __ cmpl(Address(class_reg, mirror::Class::StatusOffset().Int32Value()),
+ Immediate(mirror::Class::kStatusInitialized));
+ __ j(kLess, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+ // No need for memory fence, thanks to the X86 memory model.
+}
+
+void LocationsBuilderX86::VisitStaticFieldGet(HStaticFieldGet* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorX86::VisitStaticFieldGet(HStaticFieldGet* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register cls = locations->InAt(0).As<Register>();
+ uint32_t offset = instruction->GetFieldOffset().Uint32Value();
+
+ switch (instruction->GetType()) {
+ case Primitive::kPrimBoolean: {
+ Register out = locations->Out().As<Register>();
+ __ movzxb(out, Address(cls, offset));
+ break;
+ }
+
+ case Primitive::kPrimByte: {
+ Register out = locations->Out().As<Register>();
+ __ movsxb(out, Address(cls, offset));
+ break;
+ }
+
+ case Primitive::kPrimShort: {
+ Register out = locations->Out().As<Register>();
+ __ movsxw(out, Address(cls, offset));
+ break;
+ }
+
+ case Primitive::kPrimChar: {
+ Register out = locations->Out().As<Register>();
+ __ movzxw(out, Address(cls, offset));
+ break;
+ }
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ Register out = locations->Out().As<Register>();
+ __ movl(out, Address(cls, offset));
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ // TODO: support volatile.
+ __ movl(locations->Out().AsRegisterPairLow<Register>(), Address(cls, offset));
+ __ movl(locations->Out().AsRegisterPairHigh<Register>(), Address(cls, kX86WordSize + offset));
+ break;
+ }
+
+ case Primitive::kPrimFloat: {
+ XmmRegister out = locations->Out().As<XmmRegister>();
+ __ movss(out, Address(cls, offset));
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ XmmRegister out = locations->Out().As<XmmRegister>();
+ __ movsd(out, Address(cls, offset));
+ break;
+ }
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ Primitive::Type field_type = instruction->GetFieldType();
+ bool needs_write_barrier =
+ CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1));
+ bool is_byte_type = (field_type == Primitive::kPrimBoolean)
+ || (field_type == Primitive::kPrimByte);
+ // The register allocator does not support multiple
+ // inputs that die at entry with one in a specific register.
+ if (is_byte_type) {
+ // Ensure the value is in a byte register.
+ locations->SetInAt(1, Location::RegisterLocation(EAX));
+ } else {
+ locations->SetInAt(1, Location::RequiresRegister());
+ }
+ // Temporary registers for the write barrier.
+ if (needs_write_barrier) {
+ locations->AddTemp(Location::RequiresRegister());
+ // Ensure the card is in a byte register.
+ locations->AddTemp(Location::RegisterLocation(ECX));
+ }
+}
+
+void InstructionCodeGeneratorX86::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register cls = locations->InAt(0).As<Register>();
+ uint32_t offset = instruction->GetFieldOffset().Uint32Value();
+ Primitive::Type field_type = instruction->GetFieldType();
+
+ switch (field_type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte: {
+ ByteRegister value = locations->InAt(1).As<ByteRegister>();
+ __ movb(Address(cls, offset), value);
+ break;
+ }
+
+ case Primitive::kPrimShort:
+ case Primitive::kPrimChar: {
+ Register value = locations->InAt(1).As<Register>();
+ __ movw(Address(cls, offset), value);
+ break;
+ }
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ Register value = locations->InAt(1).As<Register>();
+ __ movl(Address(cls, offset), value);
+
+ if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
+ Register temp = locations->GetTemp(0).As<Register>();
+ Register card = locations->GetTemp(1).As<Register>();
+ codegen_->MarkGCCard(temp, card, cls, value);
+ }
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ Location value = locations->InAt(1);
+ __ movl(Address(cls, offset), value.AsRegisterPairLow<Register>());
+ __ movl(Address(cls, kX86WordSize + offset), value.AsRegisterPairHigh<Register>());
+ break;
+ }
+
+ case Primitive::kPrimFloat: {
+ XmmRegister value = locations->InAt(1).As<XmmRegister>();
+ __ movss(Address(cls, offset), value);
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ XmmRegister value = locations->InAt(1).As<XmmRegister>();
+ __ movsd(Address(cls, offset), value);
+ break;
+ }
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << field_type;
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86::VisitLoadString(HLoadString* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) {
+ SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86(load);
+ codegen_->AddSlowPath(slow_path);
+
+ Register out = load->GetLocations()->Out().As<Register>();
+ codegen_->LoadCurrentMethod(out);
+ __ movl(out, Address(out, mirror::ArtMethod::DexCacheStringsOffset().Int32Value()));
+ __ movl(out, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
+ __ testl(out, out);
+ __ j(kEqual, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderX86::VisitLoadException(HLoadException* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86::VisitLoadException(HLoadException* load) {
+ Address address = Address::Absolute(Thread::ExceptionOffset<kX86WordSize>().Int32Value());
+ __ fs()->movl(load->GetLocations()->Out().As<Register>(), address);
+ __ fs()->movl(address, Immediate(0));
+}
+
+void LocationsBuilderX86::VisitThrow(HThrow* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorX86::VisitThrow(HThrow* instruction) {
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pDeliverException)));
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+}
+
+void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) {
+ LocationSummary::CallKind call_kind = instruction->IsClassFinal()
+ ? LocationSummary::kNoCall
+ : LocationSummary::kCallOnSlowPath;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).As<Register>();
+ Location cls = locations->InAt(1);
+ Register out = locations->Out().As<Register>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ Label done, zero;
+ SlowPathCodeX86* slow_path = nullptr;
+
+ // Return 0 if `obj` is null.
+ // TODO: avoid this check if we know obj is not null.
+ __ testl(obj, obj);
+ __ j(kEqual, &zero);
+ __ movl(out, Address(obj, class_offset));
+ // Compare the class of `obj` with `cls`.
+ if (cls.IsRegister()) {
+ __ cmpl(out, cls.As<Register>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(out, Address(ESP, cls.GetStackIndex()));
+ }
+
+ if (instruction->IsClassFinal()) {
+ // Classes must be equal for the instanceof to succeed.
+ __ j(kNotEqual, &zero);
+ __ movl(out, Immediate(1));
+ __ jmp(&done);
+ } else {
+ // If the classes are not equal, we go into a slow path.
+ DCHECK(locations->OnlyCallsOnSlowPath());
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(
+ instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
+ codegen_->AddSlowPath(slow_path);
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ movl(out, Immediate(1));
+ __ jmp(&done);
+ }
+ __ Bind(&zero);
+ __ movl(out, Immediate(0));
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
+ }
+ __ Bind(&done);
+}
+
+void LocationsBuilderX86::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).As<Register>();
+ Location cls = locations->InAt(1);
+ Register temp = locations->GetTemp(0).As<Register>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(
+ instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
+ codegen_->AddSlowPath(slow_path);
+
+ // TODO: avoid this check if we know obj is not null.
+ __ testl(obj, obj);
+ __ j(kEqual, slow_path->GetExitLabel());
+ __ movl(temp, Address(obj, class_offset));
+
+ // Compare the class of `obj` with `cls`.
+ if (cls.IsRegister()) {
+ __ cmpl(temp, cls.As<Register>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(temp, Address(ESP, cls.GetStackIndex()));
+ }
+
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderX86::VisitMonitorOperation(HMonitorOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorX86::VisitMonitorOperation(HMonitorOperation* instruction) {
+ __ fs()->call(Address::Absolute(instruction->IsEnter()
+ ? QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pLockObject)
+ : QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pUnlockObject)));
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+}
+
+void LocationsBuilderX86::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
+void LocationsBuilderX86::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
+void LocationsBuilderX86::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
+
+void LocationsBuilderX86::HandleBitwiseOperation(HBinaryOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt
+ || instruction->GetResultType() == Primitive::kPrimLong);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->SetOut(Location::SameAsFirstInput());
+}
+
+void InstructionCodeGeneratorX86::VisitAnd(HAnd* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorX86::VisitOr(HOr* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorX86::VisitXor(HXor* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorX86::HandleBitwiseOperation(HBinaryOperation* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ DCHECK(first.Equals(locations->Out()));
+
+ if (instruction->GetResultType() == Primitive::kPrimInt) {
+ if (second.IsRegister()) {
+ if (instruction->IsAnd()) {
+ __ andl(first.As<Register>(), second.As<Register>());
+ } else if (instruction->IsOr()) {
+ __ orl(first.As<Register>(), second.As<Register>());
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.As<Register>(), second.As<Register>());
+ }
+ } else if (second.IsConstant()) {
+ if (instruction->IsAnd()) {
+ __ andl(first.As<Register>(), Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
+ } else if (instruction->IsOr()) {
+ __ orl(first.As<Register>(), Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.As<Register>(), Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
+ }
+ } else {
+ if (instruction->IsAnd()) {
+ __ andl(first.As<Register>(), Address(ESP, second.GetStackIndex()));
+ } else if (instruction->IsOr()) {
+ __ orl(first.As<Register>(), Address(ESP, second.GetStackIndex()));
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.As<Register>(), Address(ESP, second.GetStackIndex()));
+ }
+ }
+ } else {
+ DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
+ if (second.IsRegisterPair()) {
+ if (instruction->IsAnd()) {
+ __ andl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
+ __ andl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
+ } else if (instruction->IsOr()) {
+ __ orl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
+ __ orl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
+ __ xorl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
+ }
+ } else {
+ if (instruction->IsAnd()) {
+ __ andl(first.AsRegisterPairLow<Register>(), Address(ESP, second.GetStackIndex()));
+ __ andl(first.AsRegisterPairHigh<Register>(),
+ Address(ESP, second.GetHighStackIndex(kX86WordSize)));
+ } else if (instruction->IsOr()) {
+ __ orl(first.AsRegisterPairLow<Register>(), Address(ESP, second.GetStackIndex()));
+ __ orl(first.AsRegisterPairHigh<Register>(),
+ Address(ESP, second.GetHighStackIndex(kX86WordSize)));
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.AsRegisterPairLow<Register>(), Address(ESP, second.GetStackIndex()));
+ __ xorl(first.AsRegisterPairHigh<Register>(),
+ Address(ESP, second.GetHighStackIndex(kX86WordSize)));
+ }
+ }
+ }
+}
+
} // namespace x86
} // namespace art
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index db8b9ab..0aff6cc 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -25,9 +25,11 @@
namespace art {
namespace x86 {
-static constexpr size_t kX86WordSize = 4;
+// Use a local definition to prevent copying mistakes.
+static constexpr size_t kX86WordSize = kX86PointerSize;
class CodeGeneratorX86;
+class SlowPathCodeX86;
static constexpr Register kParameterCoreRegisters[] = { ECX, EDX, EBX };
static constexpr RegisterPair kParameterCorePairRegisters[] = { ECX_EDX, EDX_EBX };
@@ -70,10 +72,10 @@
ParallelMoveResolverX86(ArenaAllocator* allocator, CodeGeneratorX86* codegen)
: ParallelMoveResolver(allocator), codegen_(codegen) {}
- virtual void EmitMove(size_t index) OVERRIDE;
- virtual void EmitSwap(size_t index) OVERRIDE;
- virtual void SpillScratch(int reg) OVERRIDE;
- virtual void RestoreScratch(int reg) OVERRIDE;
+ void EmitMove(size_t index) OVERRIDE;
+ void EmitSwap(size_t index) OVERRIDE;
+ void SpillScratch(int reg) OVERRIDE;
+ void RestoreScratch(int reg) OVERRIDE;
X86Assembler* GetAssembler() const;
@@ -93,15 +95,16 @@
: HGraphVisitor(graph), codegen_(codegen) {}
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- virtual void Visit##name(H##name* instr);
+ void Visit##name(H##name* instr) OVERRIDE;
FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
+ private:
+ void HandleBitwiseOperation(HBinaryOperation* instruction);
void HandleInvoke(HInvoke* invoke);
- private:
CodeGeneratorX86* const codegen_;
InvokeDexCallingConventionVisitor parameter_visitor_;
@@ -113,14 +116,12 @@
InstructionCodeGeneratorX86(HGraph* graph, CodeGeneratorX86* codegen);
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- virtual void Visit##name(H##name* instr);
+ void Visit##name(H##name* instr) OVERRIDE;
FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
- void LoadCurrentMethod(Register reg);
-
X86Assembler* GetAssembler() const { return assembler_; }
private:
@@ -128,6 +129,9 @@
// is the block to branch to if the suspend check is not needed, and after
// the suspend call.
void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
+ void GenerateClassInitializationCheck(SlowPathCodeX86* slow_path, Register class_reg);
+ void HandleBitwiseOperation(HBinaryOperation* instruction);
+ void GenerateDivRemIntegral(HBinaryOperation* instruction);
X86Assembler* const assembler_;
CodeGeneratorX86* const codegen_;
@@ -140,48 +144,52 @@
explicit CodeGeneratorX86(HGraph* graph);
virtual ~CodeGeneratorX86() {}
- virtual void GenerateFrameEntry() OVERRIDE;
- virtual void GenerateFrameExit() OVERRIDE;
- virtual void Bind(HBasicBlock* block) OVERRIDE;
- virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
- virtual void SaveCoreRegister(Location stack_location, uint32_t reg_id) OVERRIDE;
- virtual void RestoreCoreRegister(Location stack_location, uint32_t reg_id) OVERRIDE;
+ void GenerateFrameEntry() OVERRIDE;
+ void GenerateFrameExit() OVERRIDE;
+ void Bind(HBasicBlock* block) OVERRIDE;
+ void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- virtual size_t GetWordSize() const OVERRIDE {
+ size_t GetWordSize() const OVERRIDE {
return kX86WordSize;
}
- virtual size_t FrameEntrySpillSize() const OVERRIDE;
+ size_t FrameEntrySpillSize() const OVERRIDE;
- virtual HGraphVisitor* GetLocationBuilder() OVERRIDE {
+ HGraphVisitor* GetLocationBuilder() OVERRIDE {
return &location_builder_;
}
- virtual HGraphVisitor* GetInstructionVisitor() OVERRIDE {
+ HGraphVisitor* GetInstructionVisitor() OVERRIDE {
return &instruction_visitor_;
}
- virtual X86Assembler* GetAssembler() OVERRIDE {
+ X86Assembler* GetAssembler() OVERRIDE {
return &assembler_;
}
- virtual void SetupBlockedRegisters() const OVERRIDE;
+ uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+ return GetLabelOf(block)->Position();
+ }
- virtual Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+ void SetupBlockedRegisters() const OVERRIDE;
- virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
+ Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
- virtual void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
- virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+ Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
+
+ void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
// Blocks all register pairs made out of blocked core registers.
void UpdateBlockedPairRegisters() const;
- ParallelMoveResolverX86* GetMoveResolver() {
+ ParallelMoveResolverX86* GetMoveResolver() OVERRIDE {
return &move_resolver_;
}
- virtual InstructionSet GetInstructionSet() const OVERRIDE {
+ InstructionSet GetInstructionSet() const OVERRIDE {
return InstructionSet::kX86;
}
@@ -193,11 +201,13 @@
// Emit a write barrier.
void MarkGCCard(Register temp, Register card, Register object, Register value);
+ void LoadCurrentMethod(Register reg);
+
Label* GetLabelOf(HBasicBlock* block) const {
return block_labels_.GetRawStorage() + block->GetBlockId();
}
- virtual void Initialize() OVERRIDE {
+ void Initialize() OVERRIDE {
block_labels_.SetSize(GetGraph()->GetBlocks().Size());
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 5fa9305..97f5e5c 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -90,6 +90,54 @@
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86_64);
};
+class DivZeroCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
+ public:
+ explicit DivZeroCheckSlowPathX86_64(HDivZeroCheck* instruction) : instruction_(instruction) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ __ gs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowDivZero), true));
+ codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ }
+
+ private:
+ HDivZeroCheck* const instruction_;
+ DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathX86_64);
+};
+
+class DivRemMinusOneSlowPathX86_64 : public SlowPathCodeX86_64 {
+ public:
+ explicit DivRemMinusOneSlowPathX86_64(Register reg, Primitive::Type type, bool is_div)
+ : cpu_reg_(CpuRegister(reg)), type_(type), is_div_(is_div) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ if (type_ == Primitive::kPrimInt) {
+ if (is_div_) {
+ __ negl(cpu_reg_);
+ } else {
+ __ movl(cpu_reg_, Immediate(0));
+ }
+
+ } else {
+ DCHECK_EQ(Primitive::kPrimLong, type_);
+ if (is_div_) {
+ __ negq(cpu_reg_);
+ } else {
+ __ movq(cpu_reg_, Immediate(0));
+ }
+ }
+ __ jmp(GetExitLabel());
+ }
+
+ private:
+ const CpuRegister cpu_reg_;
+ const Primitive::Type type_;
+ const bool is_div_;
+ DISALLOW_COPY_AND_ASSIGN(DivRemMinusOneSlowPathX86_64);
+};
+
class StackOverflowCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
public:
StackOverflowCheckSlowPathX86_64() {}
@@ -148,11 +196,15 @@
length_location_(length_location) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
InvokeRuntimeCallingConvention calling_convention;
- x64_codegen->Move(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
- x64_codegen->Move(Location::RegisterLocation(calling_convention.GetRegisterAt(1)), length_location_);
+ codegen->EmitParallelMoves(
+ index_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ length_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
__ gs()->call(Address::Absolute(
QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowArrayBounds), true));
codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
@@ -166,6 +218,145 @@
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86_64);
};
+class LoadClassSlowPathX86_64 : public SlowPathCodeX86_64 {
+ public:
+ LoadClassSlowPathX86_64(HLoadClass* cls,
+ HInstruction* at,
+ uint32_t dex_pc,
+ bool do_clinit)
+ : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+ }
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = at_->GetLocations();
+ CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
+ __ Bind(GetEntryLabel());
+
+ codegen->SaveLiveRegisters(locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(cls_->GetTypeIndex()));
+ x64_codegen->LoadCurrentMethod(CpuRegister(calling_convention.GetRegisterAt(1)));
+ __ gs()->call(Address::Absolute((do_clinit_
+ ? QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeStaticStorage)
+ : QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeType)) , true));
+ codegen->RecordPcInfo(at_, dex_pc_);
+
+ Location out = locations->Out();
+ // Move the class to the desired location.
+ if (out.IsValid()) {
+ DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
+ x64_codegen->Move(out, Location::RegisterLocation(RAX));
+ }
+
+ codegen->RestoreLiveRegisters(locations);
+ __ jmp(GetExitLabel());
+ }
+
+ private:
+ // The class this slow path will load.
+ HLoadClass* const cls_;
+
+ // The instruction where this slow path is happening.
+ // (Might be the load class or an initialization check).
+ HInstruction* const at_;
+
+ // The dex PC of `at_`.
+ const uint32_t dex_pc_;
+
+ // Whether to initialize the class.
+ const bool do_clinit_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathX86_64);
+};
+
+class LoadStringSlowPathX86_64 : public SlowPathCodeX86_64 {
+ public:
+ explicit LoadStringSlowPathX86_64(HLoadString* instruction) : instruction_(instruction) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+
+ CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
+ __ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ x64_codegen->LoadCurrentMethod(CpuRegister(calling_convention.GetRegisterAt(0)));
+ __ movl(CpuRegister(calling_convention.GetRegisterAt(1)),
+ Immediate(instruction_->GetStringIndex()));
+ __ gs()->call(Address::Absolute(
+ QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pResolveString), true));
+ codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
+ codegen->RestoreLiveRegisters(locations);
+ __ jmp(GetExitLabel());
+ }
+
+ private:
+ HLoadString* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86_64);
+};
+
+class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
+ public:
+ TypeCheckSlowPathX86_64(HInstruction* instruction,
+ Location class_to_check,
+ Location object_class,
+ uint32_t dex_pc)
+ : instruction_(instruction),
+ class_to_check_(class_to_check),
+ object_class_(object_class),
+ dex_pc_(dex_pc) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(instruction_->IsCheckCast()
+ || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+
+ CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
+ __ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(locations);
+
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
+ InvokeRuntimeCallingConvention calling_convention;
+ codegen->EmitParallelMoves(
+ class_to_check_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ object_class_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+
+ if (instruction_->IsInstanceOf()) {
+ __ gs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInstanceofNonTrivial), true));
+ } else {
+ DCHECK(instruction_->IsCheckCast());
+ __ gs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pCheckCast), true));
+ }
+ codegen->RecordPcInfo(instruction_, dex_pc_);
+
+ if (instruction_->IsInstanceOf()) {
+ x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
+ }
+
+ codegen->RestoreLiveRegisters(locations);
+ __ jmp(GetExitLabel());
+ }
+
+ private:
+ HInstruction* const instruction_;
+ const Location class_to_check_;
+ const Location object_class_;
+ const uint32_t dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86_64);
+};
+
#undef __
#define __ reinterpret_cast<X86_64Assembler*>(GetAssembler())->
@@ -191,12 +382,24 @@
stream << X86_64ManagedRegister::FromXmmRegister(FloatRegister(reg));
}
-void CodeGeneratorX86_64::SaveCoreRegister(Location stack_location, uint32_t reg_id) {
- __ movq(Address(CpuRegister(RSP), stack_location.GetStackIndex()), CpuRegister(reg_id));
+size_t CodeGeneratorX86_64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
+ __ movq(Address(CpuRegister(RSP), stack_index), CpuRegister(reg_id));
+ return kX86_64WordSize;
}
-void CodeGeneratorX86_64::RestoreCoreRegister(Location stack_location, uint32_t reg_id) {
- __ movq(CpuRegister(reg_id), Address(CpuRegister(RSP), stack_location.GetStackIndex()));
+size_t CodeGeneratorX86_64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
+ __ movq(CpuRegister(reg_id), Address(CpuRegister(RSP), stack_index));
+ return kX86_64WordSize;
+}
+
+size_t CodeGeneratorX86_64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
+ __ movsd(Address(CpuRegister(RSP), stack_index), XmmRegister(reg_id));
+ return kX86_64WordSize;
+}
+
+size_t CodeGeneratorX86_64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
+ __ movsd(XmmRegister(reg_id), Address(CpuRegister(RSP), stack_index));
+ return kX86_64WordSize;
}
CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph)
@@ -302,7 +505,7 @@
__ Bind(GetLabelOf(block));
}
-void InstructionCodeGeneratorX86_64::LoadCurrentMethod(CpuRegister reg) {
+void CodeGeneratorX86_64::LoadCurrentMethod(CpuRegister reg) {
__ movl(reg, Address(CpuRegister(RSP), kCurrentMethodStackOffset));
}
@@ -391,20 +594,34 @@
void CodeGeneratorX86_64::Move(HInstruction* instruction,
Location location,
HInstruction* move_for) {
- if (instruction->IsIntConstant()) {
- Immediate imm(instruction->AsIntConstant()->GetValue());
- if (location.IsRegister()) {
- __ movl(location.As<CpuRegister>(), imm);
- } else {
- __ movl(Address(CpuRegister(RSP), location.GetStackIndex()), imm);
- }
- } else if (instruction->IsLongConstant()) {
- int64_t value = instruction->AsLongConstant()->GetValue();
- if (location.IsRegister()) {
- __ movq(location.As<CpuRegister>(), Immediate(value));
- } else {
- __ movq(CpuRegister(TMP), Immediate(value));
- __ movq(Address(CpuRegister(RSP), location.GetStackIndex()), CpuRegister(TMP));
+ LocationSummary* locations = instruction->GetLocations();
+ if (locations != nullptr && locations->Out().Equals(location)) {
+ return;
+ }
+
+ if (locations != nullptr && locations->Out().IsConstant()) {
+ HConstant* const_to_move = locations->Out().GetConstant();
+ if (const_to_move->IsIntConstant()) {
+ Immediate imm(const_to_move->AsIntConstant()->GetValue());
+ if (location.IsRegister()) {
+ __ movl(location.As<CpuRegister>(), imm);
+ } else if (location.IsStackSlot()) {
+ __ movl(Address(CpuRegister(RSP), location.GetStackIndex()), imm);
+ } else {
+ DCHECK(location.IsConstant());
+ DCHECK_EQ(location.GetConstant(), const_to_move);
+ }
+ } else if (const_to_move->IsLongConstant()) {
+ int64_t value = const_to_move->AsLongConstant()->GetValue();
+ if (location.IsRegister()) {
+ __ movq(location.As<CpuRegister>(), Immediate(value));
+ } else if (location.IsDoubleStackSlot()) {
+ __ movq(CpuRegister(TMP), Immediate(value));
+ __ movq(Address(CpuRegister(RSP), location.GetStackIndex()), CpuRegister(TMP));
+ } else {
+ DCHECK(location.IsConstant());
+ DCHECK_EQ(location.GetConstant(), const_to_move);
+ }
}
} else if (instruction->IsLoadLocal()) {
switch (instruction->GetType()) {
@@ -426,6 +643,9 @@
default:
LOG(FATAL) << "Unexpected local type " << instruction->GetType();
}
+ } else if (instruction->IsTemporary()) {
+ Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
+ Move(location, temp_location);
} else {
DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
switch (instruction->GetType()) {
@@ -438,7 +658,7 @@
case Primitive::kPrimLong:
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- Move(location, instruction->GetLocations()->Out());
+ Move(location, locations->Out());
break;
default:
@@ -478,6 +698,7 @@
}
void InstructionCodeGeneratorX86_64::VisitExit(HExit* exit) {
+ UNUSED(exit);
if (kIsDebugBuild) {
__ Comment("Unreachable");
__ int3();
@@ -489,7 +710,7 @@
new (GetGraph()->GetArena()) LocationSummary(if_instr, LocationSummary::kNoCall);
HInstruction* cond = if_instr->InputAt(0);
if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
- locations->SetInAt(0, Location::Any(), Location::kDiesAtEntry);
+ locations->SetInAt(0, Location::Any());
}
}
@@ -566,6 +787,7 @@
void InstructionCodeGeneratorX86_64::VisitLoadLocal(HLoadLocal* load) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(load);
}
void LocationsBuilderX86_64::VisitStoreLocal(HStoreLocal* store) {
@@ -593,13 +815,14 @@
}
void InstructionCodeGeneratorX86_64::VisitStoreLocal(HStoreLocal* store) {
+ UNUSED(store);
}
void LocationsBuilderX86_64::VisitCondition(HCondition* comp) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(comp, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister(), Location::kDiesAtEntry);
- locations->SetInAt(1, Location::Any(), Location::kDiesAtEntry);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
if (comp->NeedsMaterialization()) {
locations->SetOut(Location::RequiresRegister());
}
@@ -676,9 +899,9 @@
void LocationsBuilderX86_64::VisitCompare(HCompare* compare) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister(), Location::kDiesAtEntry);
- locations->SetInAt(1, Location::RequiresRegister(), Location::kDiesAtEntry);
- locations->SetOut(Location::RequiresRegister());
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
void InstructionCodeGeneratorX86_64::VisitCompare(HCompare* compare) {
@@ -715,6 +938,7 @@
void InstructionCodeGeneratorX86_64::VisitIntConstant(HIntConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86_64::VisitLongConstant(HLongConstant* constant) {
@@ -725,6 +949,29 @@
void InstructionCodeGeneratorX86_64::VisitLongConstant(HLongConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
+}
+
+void LocationsBuilderX86_64::VisitFloatConstant(HFloatConstant* constant) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorX86_64::VisitFloatConstant(HFloatConstant* constant) {
+ // Will be generated at use site.
+ UNUSED(constant);
+}
+
+void LocationsBuilderX86_64::VisitDoubleConstant(HDoubleConstant* constant) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorX86_64::VisitDoubleConstant(HDoubleConstant* constant) {
+ // Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86_64::VisitReturnVoid(HReturnVoid* ret) {
@@ -732,6 +979,7 @@
}
void InstructionCodeGeneratorX86_64::VisitReturnVoid(HReturnVoid* ret) {
+ UNUSED(ret);
codegen_->GenerateFrameExit();
__ ret();
}
@@ -850,10 +1098,6 @@
void InstructionCodeGeneratorX86_64::VisitInvokeStatic(HInvokeStatic* invoke) {
CpuRegister temp = invoke->GetLocations()->GetTemp(0).As<CpuRegister>();
- uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
- size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).SizeValue() +
- invoke->GetIndexInDexCache() * heap_reference_size;
-
// TODO: Implement all kinds of calls:
// 1) boot -> boot
// 2) app -> boot
@@ -862,22 +1106,19 @@
// Currently we implement the app -> app logic, which looks up in the resolve cache.
// temp = method;
- LoadCurrentMethod(temp);
+ codegen_->LoadCurrentMethod(temp);
// temp = temp->dex_cache_resolved_methods_;
__ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
// temp = temp[index_in_cache]
- __ movl(temp, Address(temp, index_in_cache));
+ __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetIndexInDexCache())));
// (temp + offset_of_quick_compiled_code)()
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kX86_64WordSize).SizeValue()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
-void LocationsBuilderX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
- HandleInvoke(invoke);
-}
-
void LocationsBuilderX86_64::HandleInvoke(HInvoke* invoke) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
@@ -910,6 +1151,10 @@
}
}
+void LocationsBuilderX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ HandleInvoke(invoke);
+}
+
void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
CpuRegister temp = invoke->GetLocations()->GetTemp(0).As<CpuRegister>();
size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() +
@@ -927,12 +1172,464 @@
// temp = temp->GetMethodAt(method_offset);
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kX86_64WordSize).SizeValue()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
+void LocationsBuilderX86_64::VisitInvokeInterface(HInvokeInterface* invoke) {
+ HandleInvoke(invoke);
+ // Add the hidden argument.
+ invoke->GetLocations()->AddTemp(Location::RegisterLocation(RAX));
+}
+
+void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invoke) {
+ // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
+ CpuRegister temp = invoke->GetLocations()->GetTemp(0).As<CpuRegister>();
+ uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
+ (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
+ LocationSummary* locations = invoke->GetLocations();
+ Location receiver = locations->InAt(0);
+ size_t class_offset = mirror::Object::ClassOffset().SizeValue();
+
+ // Set the hidden argument.
+ __ movq(invoke->GetLocations()->GetTemp(1).As<CpuRegister>(),
+ Immediate(invoke->GetDexMethodIndex()));
+
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ movl(temp, Address(CpuRegister(RSP), receiver.GetStackIndex()));
+ __ movl(temp, Address(temp, class_offset));
+ } else {
+ __ movl(temp, Address(receiver.As<CpuRegister>(), class_offset));
+ }
+ // temp = temp->GetImtEntryAt(method_offset);
+ __ movl(temp, Address(temp, method_offset));
+ // call temp->GetEntryPoint();
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kX86_64WordSize).SizeValue()));
+
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void LocationsBuilderX86_64::VisitNeg(HNeg* neg) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ switch (neg->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ // Output overlaps as we need a fresh (zero-initialized)
+ // register to perform subtraction from zero.
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorX86_64::VisitNeg(HNeg* neg) {
+ LocationSummary* locations = neg->GetLocations();
+ Location out = locations->Out();
+ Location in = locations->InAt(0);
+ switch (neg->GetResultType()) {
+ case Primitive::kPrimInt:
+ DCHECK(in.IsRegister());
+ DCHECK(in.Equals(out));
+ __ negl(out.As<CpuRegister>());
+ break;
+
+ case Primitive::kPrimLong:
+ DCHECK(in.IsRegister());
+ DCHECK(in.Equals(out));
+ __ negq(out.As<CpuRegister>());
+ break;
+
+ case Primitive::kPrimFloat:
+ DCHECK(in.IsFpuRegister());
+ DCHECK(out.IsFpuRegister());
+ DCHECK(!in.Equals(out));
+ // TODO: Instead of computing negation as a subtraction from
+ // zero, implement it with an exclusive or with value 0x80000000
+ // (mask for bit 31, representing the sign of a single-precision
+ // floating-point number), fetched from a constant pool:
+ //
+ // xorps out, [RIP:...] // value at RIP is 0x80 00 00 00
+
+ // out = 0
+ __ xorps(out.As<XmmRegister>(), out.As<XmmRegister>());
+ // out = out - in
+ __ subss(out.As<XmmRegister>(), in.As<XmmRegister>());
+ break;
+
+ case Primitive::kPrimDouble:
+ DCHECK(in.IsFpuRegister());
+ DCHECK(out.IsFpuRegister());
+ DCHECK(!in.Equals(out));
+ // TODO: Instead of computing negation as a subtraction from
+ // zero, implement it with an exclusive or with value
+ // 0x8000000000000000 (mask for bit 63, representing the sign of
+ // a double-precision floating-point number), fetched from a
+ // constant pool:
+ //
+ // xorpd out, [RIP:...] // value at RIP is 0x80 00 00 00 00 00 00 00
+
+ // out = 0
+ __ xorpd(out.As<XmmRegister>(), out.As<XmmRegister>());
+ // out = out - in
+ __ subsd(out.As<XmmRegister>(), in.As<XmmRegister>());
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
+ }
+}
+
+void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+ switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-byte' instruction.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimShort:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-short' instruction.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Processing a Dex `long-to-int' instruction.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimLong:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-long' instruction.
+ // TODO: We would benefit from a (to-be-implemented)
+ // Location::RegisterOrStackSlot requirement for this input.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type << " to "
+ << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimChar:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-char' instruction.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimFloat:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-float' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ };
+ break;
+
+ case Primitive::kPrimDouble:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-double' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+
+ case Primitive::kPrimLong:
+ case Primitive::kPrimFloat:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+}
+
+void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conversion) {
+ LocationSummary* locations = conversion->GetLocations();
+ Location out = locations->Out();
+ Location in = locations->InAt(0);
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+ switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-byte' instruction.
+ if (in.IsRegister()) {
+ __ movsxb(out.As<CpuRegister>(), in.As<CpuRegister>());
+ } else if (in.IsStackSlot()) {
+ __ movsxb(out.As<CpuRegister>(),
+ Address(CpuRegister(RSP), in.GetStackIndex()));
+ } else {
+ DCHECK(in.GetConstant()->IsIntConstant());
+ __ movl(out.As<CpuRegister>(),
+ Immediate(static_cast<int8_t>(in.GetConstant()->AsIntConstant()->GetValue())));
+ }
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimShort:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-short' instruction.
+ if (in.IsRegister()) {
+ __ movsxw(out.As<CpuRegister>(), in.As<CpuRegister>());
+ } else if (in.IsStackSlot()) {
+ __ movsxw(out.As<CpuRegister>(),
+ Address(CpuRegister(RSP), in.GetStackIndex()));
+ } else {
+ DCHECK(in.GetConstant()->IsIntConstant());
+ __ movl(out.As<CpuRegister>(),
+ Immediate(static_cast<int16_t>(in.GetConstant()->AsIntConstant()->GetValue())));
+ }
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Processing a Dex `long-to-int' instruction.
+ if (in.IsRegister()) {
+ __ movl(out.As<CpuRegister>(), in.As<CpuRegister>());
+ } else if (in.IsDoubleStackSlot()) {
+ __ movl(out.As<CpuRegister>(),
+ Address(CpuRegister(RSP), in.GetStackIndex()));
+ } else {
+ DCHECK(in.IsConstant());
+ DCHECK(in.GetConstant()->IsLongConstant());
+ int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
+ __ movl(out.As<CpuRegister>(), Immediate(static_cast<int32_t>(value)));
+ }
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimLong:
+ switch (input_type) {
+ DCHECK(out.IsRegister());
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-long' instruction.
+ DCHECK(in.IsRegister());
+ __ movsxd(out.As<CpuRegister>(), in.As<CpuRegister>());
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type << " to "
+ << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimChar:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-char' instruction.
+ if (in.IsRegister()) {
+ __ movzxw(out.As<CpuRegister>(), in.As<CpuRegister>());
+ } else if (in.IsStackSlot()) {
+ __ movzxw(out.As<CpuRegister>(),
+ Address(CpuRegister(RSP), in.GetStackIndex()));
+ } else {
+ DCHECK(in.GetConstant()->IsIntConstant());
+ __ movl(out.As<CpuRegister>(),
+ Immediate(static_cast<uint16_t>(in.GetConstant()->AsIntConstant()->GetValue())));
+ }
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimFloat:
+ switch (input_type) {
+ // Processing a Dex `int-to-float' instruction.
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ __ cvtsi2ss(out.As<XmmRegister>(), in.As<CpuRegister>());
+ break;
+
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ };
+ break;
+
+ case Primitive::kPrimDouble:
+ switch (input_type) {
+ // Processing a Dex `int-to-double' instruction.
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ __ cvtsi2sd(out.As<XmmRegister>(), in.As<CpuRegister>());
+ break;
+
+ case Primitive::kPrimLong:
+ case Primitive::kPrimFloat:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ };
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+}
+
void LocationsBuilderX86_64::VisitAdd(HAdd* add) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
@@ -954,7 +1651,7 @@
case Primitive::kPrimDouble:
case Primitive::kPrimFloat: {
locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetInAt(1, Location::Any());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
locations->SetOut(Location::SameAsFirstInput());
break;
}
@@ -968,19 +1665,17 @@
LocationSummary* locations = add->GetLocations();
Location first = locations->InAt(0);
Location second = locations->InAt(1);
-
DCHECK(first.Equals(locations->Out()));
+
switch (add->GetResultType()) {
case Primitive::kPrimInt: {
if (second.IsRegister()) {
__ addl(first.As<CpuRegister>(), second.As<CpuRegister>());
} else if (second.IsConstant()) {
- HConstant* instruction = second.GetConstant();
- Immediate imm(instruction->AsIntConstant()->GetValue());
+ Immediate imm(second.GetConstant()->AsIntConstant()->GetValue());
__ addl(first.As<CpuRegister>(), imm);
} else {
- __ addl(first.As<CpuRegister>(),
- Address(CpuRegister(RSP), second.GetStackIndex()));
+ __ addl(first.As<CpuRegister>(), Address(CpuRegister(RSP), second.GetStackIndex()));
}
break;
}
@@ -991,21 +1686,12 @@
}
case Primitive::kPrimFloat: {
- if (second.IsFpuRegister()) {
- __ addss(first.As<XmmRegister>(), second.As<XmmRegister>());
- } else {
- __ addss(first.As<XmmRegister>(),
- Address(CpuRegister(RSP), second.GetStackIndex()));
- }
+ __ addss(first.As<XmmRegister>(), second.As<XmmRegister>());
break;
}
case Primitive::kPrimDouble: {
- if (second.IsFpuRegister()) {
- __ addsd(first.As<XmmRegister>(), second.As<XmmRegister>());
- } else {
- __ addsd(first.As<XmmRegister>(), Address(CpuRegister(RSP), second.GetStackIndex()));
- }
+ __ addsd(first.As<XmmRegister>(), second.As<XmmRegister>());
break;
}
@@ -1030,53 +1716,52 @@
locations->SetOut(Location::SameAsFirstInput());
break;
}
-
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::SameAsFirstInput());
break;
-
+ }
default:
- LOG(FATAL) << "Unimplemented sub type " << sub->GetResultType();
+ LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
}
}
void InstructionCodeGeneratorX86_64::VisitSub(HSub* sub) {
LocationSummary* locations = sub->GetLocations();
- DCHECK_EQ(locations->InAt(0).As<CpuRegister>().AsRegister(),
- locations->Out().As<CpuRegister>().AsRegister());
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ DCHECK(first.Equals(locations->Out()));
switch (sub->GetResultType()) {
case Primitive::kPrimInt: {
- if (locations->InAt(1).IsRegister()) {
- __ subl(locations->InAt(0).As<CpuRegister>(),
- locations->InAt(1).As<CpuRegister>());
- } else if (locations->InAt(1).IsConstant()) {
- HConstant* instruction = locations->InAt(1).GetConstant();
- Immediate imm(instruction->AsIntConstant()->GetValue());
- __ subl(locations->InAt(0).As<CpuRegister>(), imm);
+ if (second.IsRegister()) {
+ __ subl(first.As<CpuRegister>(), second.As<CpuRegister>());
+ } else if (second.IsConstant()) {
+ Immediate imm(second.GetConstant()->AsIntConstant()->GetValue());
+ __ subl(first.As<CpuRegister>(), imm);
} else {
- __ subl(locations->InAt(0).As<CpuRegister>(),
- Address(CpuRegister(RSP), locations->InAt(1).GetStackIndex()));
+ __ subl(first.As<CpuRegister>(), Address(CpuRegister(RSP), second.GetStackIndex()));
}
break;
}
case Primitive::kPrimLong: {
- __ subq(locations->InAt(0).As<CpuRegister>(),
- locations->InAt(1).As<CpuRegister>());
+ __ subq(first.As<CpuRegister>(), second.As<CpuRegister>());
break;
}
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
+ case Primitive::kPrimFloat: {
+ __ subss(first.As<XmmRegister>(), second.As<XmmRegister>());
break;
+ }
+
+ case Primitive::kPrimDouble: {
+ __ subsd(first.As<XmmRegister>(), second.As<XmmRegister>());
+ break;
+ }
default:
- LOG(FATAL) << "Unimplemented sub type " << sub->GetResultType();
+ LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
}
}
@@ -1096,16 +1781,16 @@
locations->SetOut(Location::SameAsFirstInput());
break;
}
-
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::SameAsFirstInput());
break;
+ }
default:
- LOG(FATAL) << "Unimplemented mul type " << mul->GetResultType();
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
}
}
@@ -1132,15 +1817,212 @@
break;
}
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ case Primitive::kPrimFloat: {
+ __ mulss(first.As<XmmRegister>(), second.As<XmmRegister>());
break;
+ }
+
+ case Primitive::kPrimDouble: {
+ __ mulsd(first.As<XmmRegister>(), second.As<XmmRegister>());
+ break;
+ }
default:
- LOG(FATAL) << "Unimplemented mul type " << mul->GetResultType();
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorX86_64::GenerateDivRemIntegral(HBinaryOperation* instruction) {
+ DCHECK(instruction->IsDiv() || instruction->IsRem());
+ Primitive::Type type = instruction->GetResultType();
+ DCHECK(type == Primitive::kPrimInt || Primitive::kPrimLong);
+
+ bool is_div = instruction->IsDiv();
+ LocationSummary* locations = instruction->GetLocations();
+
+ CpuRegister out_reg = locations->Out().As<CpuRegister>();
+ CpuRegister second_reg = locations->InAt(1).As<CpuRegister>();
+
+ DCHECK_EQ(RAX, locations->InAt(0).As<CpuRegister>().AsRegister());
+ DCHECK_EQ(is_div ? RAX : RDX, out_reg.AsRegister());
+
+ SlowPathCodeX86_64* slow_path =
+ new (GetGraph()->GetArena()) DivRemMinusOneSlowPathX86_64(
+ out_reg.AsRegister(), type, is_div);
+ codegen_->AddSlowPath(slow_path);
+
+ // 0x80000000(00000000)/-1 triggers an arithmetic exception!
+ // Dividing by -1 is actually negation and -0x800000000(00000000) = 0x80000000(00000000)
+ // so it's safe to just use negl instead of more complex comparisons.
+
+ __ cmpl(second_reg, Immediate(-1));
+ __ j(kEqual, slow_path->GetEntryLabel());
+
+ if (type == Primitive::kPrimInt) {
+ // edx:eax <- sign-extended of eax
+ __ cdq();
+ // eax = quotient, edx = remainder
+ __ idivl(second_reg);
+ } else {
+ // rdx:rax <- sign-extended of rax
+ __ cqo();
+ // rax = quotient, rdx = remainder
+ __ idivq(second_reg);
+ }
+
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderX86_64::VisitDiv(HDiv* div) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
+ switch (div->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RegisterLocation(RAX));
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ // Intel uses edx:eax as the dividend.
+ locations->AddTemp(Location::RegisterLocation(RDX));
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected div type " << div->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorX86_64::VisitDiv(HDiv* div) {
+ LocationSummary* locations = div->GetLocations();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ DCHECK(first.Equals(locations->Out()));
+
+ Primitive::Type type = div->GetResultType();
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ GenerateDivRemIntegral(div);
+ break;
+ }
+
+ case Primitive::kPrimFloat: {
+ __ divss(first.As<XmmRegister>(), second.As<XmmRegister>());
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ __ divsd(first.As<XmmRegister>(), second.As<XmmRegister>());
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected div type " << div->GetResultType();
+ }
+}
+
+void LocationsBuilderX86_64::VisitRem(HRem* rem) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(rem, LocationSummary::kNoCall);
+ switch (rem->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RegisterLocation(RAX));
+ locations->SetInAt(1, Location::RequiresRegister());
+ // Intel uses rdx:rax as the dividend and puts the remainder in rdx
+ locations->SetOut(Location::RegisterLocation(RDX));
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ LOG(FATAL) << "Unimplemented rem type " << rem->GetResultType();
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected rem type " << rem->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorX86_64::VisitRem(HRem* rem) {
+ Primitive::Type type = rem->GetResultType();
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ GenerateDivRemIntegral(rem);
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ LOG(FATAL) << "Unimplemented rem type " << rem->GetResultType();
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected rem type " << rem->GetResultType();
+ }
+}
+
+void LocationsBuilderX86_64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::Any());
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ SlowPathCodeX86_64* slow_path =
+ new (GetGraph()->GetArena()) DivZeroCheckSlowPathX86_64(instruction);
+ codegen_->AddSlowPath(slow_path);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location value = locations->InAt(0);
+
+ switch (instruction->GetType()) {
+ case Primitive::kPrimInt: {
+ if (value.IsRegister()) {
+ __ testl(value.As<CpuRegister>(), value.As<CpuRegister>());
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else if (value.IsStackSlot()) {
+ __ cmpl(Address(CpuRegister(RSP), value.GetStackIndex()), Immediate(0));
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
+ __ jmp(slow_path->GetEntryLabel());
+ }
+ }
+ break;
+ }
+ case Primitive::kPrimLong: {
+ if (value.IsRegister()) {
+ __ testq(value.As<CpuRegister>(), value.As<CpuRegister>());
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else if (value.IsDoubleStackSlot()) {
+ __ cmpq(Address(CpuRegister(RSP), value.GetStackIndex()), Immediate(0));
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
+ __ jmp(slow_path->GetEntryLabel());
+ }
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
}
}
@@ -1155,7 +2037,7 @@
void InstructionCodeGeneratorX86_64::VisitNewInstance(HNewInstance* instruction) {
InvokeRuntimeCallingConvention calling_convention;
- LoadCurrentMethod(CpuRegister(calling_convention.GetRegisterAt(1)));
+ codegen_->LoadCurrentMethod(CpuRegister(calling_convention.GetRegisterAt(1)));
__ movq(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(instruction->GetTypeIndex()));
__ gs()->call(Address::Absolute(
@@ -1165,6 +2047,28 @@
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
}
+void LocationsBuilderX86_64::VisitNewArray(HNewArray* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetOut(Location::RegisterLocation(RAX));
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+}
+
+void InstructionCodeGeneratorX86_64::VisitNewArray(HNewArray* instruction) {
+ InvokeRuntimeCallingConvention calling_convention;
+ codegen_->LoadCurrentMethod(CpuRegister(calling_convention.GetRegisterAt(1)));
+ __ movq(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(instruction->GetTypeIndex()));
+
+ __ gs()->call(Address::Absolute(
+ QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocArrayWithAccessCheck), true));
+
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+}
+
void LocationsBuilderX86_64::VisitParameterValue(HParameterValue* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
@@ -1179,20 +2083,37 @@
void InstructionCodeGeneratorX86_64::VisitParameterValue(HParameterValue* instruction) {
// Nothing to do, the parameter is already at its location.
+ UNUSED(instruction);
}
-void LocationsBuilderX86_64::VisitNot(HNot* instruction) {
+void LocationsBuilderX86_64::VisitNot(HNot* not_) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
-void InstructionCodeGeneratorX86_64::VisitNot(HNot* instruction) {
- LocationSummary* locations = instruction->GetLocations();
+void InstructionCodeGeneratorX86_64::VisitNot(HNot* not_) {
+ LocationSummary* locations = not_->GetLocations();
DCHECK_EQ(locations->InAt(0).As<CpuRegister>().AsRegister(),
locations->Out().As<CpuRegister>().AsRegister());
- __ xorq(locations->Out().As<CpuRegister>(), Immediate(1));
+ Location out = locations->Out();
+ switch (not_->InputAt(0)->GetType()) {
+ case Primitive::kPrimBoolean:
+ __ xorq(out.As<CpuRegister>(), Immediate(1));
+ break;
+
+ case Primitive::kPrimInt:
+ __ notl(out.As<CpuRegister>());
+ break;
+
+ case Primitive::kPrimLong:
+ __ notq(out.As<CpuRegister>());
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType();
+ }
}
void LocationsBuilderX86_64::VisitPhi(HPhi* instruction) {
@@ -1205,6 +2126,7 @@
}
void InstructionCodeGeneratorX86_64::VisitPhi(HPhi* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unimplemented";
}
@@ -1212,11 +2134,11 @@
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
Primitive::Type field_type = instruction->GetFieldType();
- bool is_object_type = field_type == Primitive::kPrimNot;
- bool dies_at_entry = !is_object_type;
- locations->SetInAt(0, Location::RequiresRegister(), dies_at_entry);
- locations->SetInAt(1, Location::RequiresRegister(), dies_at_entry);
- if (is_object_type) {
+ bool needs_write_barrier =
+ CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->GetValue());
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ if (needs_write_barrier) {
// Temporary registers for the write barrier.
locations->AddTemp(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
@@ -1226,27 +2148,29 @@
void InstructionCodeGeneratorX86_64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
LocationSummary* locations = instruction->GetLocations();
CpuRegister obj = locations->InAt(0).As<CpuRegister>();
- CpuRegister value = locations->InAt(1).As<CpuRegister>();
size_t offset = instruction->GetFieldOffset().SizeValue();
Primitive::Type field_type = instruction->GetFieldType();
switch (field_type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte: {
+ CpuRegister value = locations->InAt(1).As<CpuRegister>();
__ movb(Address(obj, offset), value);
break;
}
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
+ CpuRegister value = locations->InAt(1).As<CpuRegister>();
__ movw(Address(obj, offset), value);
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
+ CpuRegister value = locations->InAt(1).As<CpuRegister>();
__ movl(Address(obj, offset), value);
- if (field_type == Primitive::kPrimNot) {
+ if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->GetValue())) {
CpuRegister temp = locations->GetTemp(0).As<CpuRegister>();
CpuRegister card = locations->GetTemp(1).As<CpuRegister>();
codegen_->MarkGCCard(temp, card, obj, value);
@@ -1255,14 +2179,23 @@
}
case Primitive::kPrimLong: {
+ CpuRegister value = locations->InAt(1).As<CpuRegister>();
__ movq(Address(obj, offset), value);
break;
}
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register type " << field_type;
- UNREACHABLE();
+ case Primitive::kPrimFloat: {
+ XmmRegister value = locations->InAt(1).As<XmmRegister>();
+ __ movss(Address(obj, offset), value);
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ XmmRegister value = locations->InAt(1).As<XmmRegister>();
+ __ movsd(Address(obj, offset), value);
+ break;
+ }
+
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << field_type;
UNREACHABLE();
@@ -1272,52 +2205,65 @@
void LocationsBuilderX86_64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister(), Location::kDiesAtEntry);
- locations->SetOut(Location::RequiresRegister());
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
void InstructionCodeGeneratorX86_64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
CpuRegister obj = locations->InAt(0).As<CpuRegister>();
- CpuRegister out = locations->Out().As<CpuRegister>();
size_t offset = instruction->GetFieldOffset().SizeValue();
switch (instruction->GetType()) {
case Primitive::kPrimBoolean: {
+ CpuRegister out = locations->Out().As<CpuRegister>();
__ movzxb(out, Address(obj, offset));
break;
}
case Primitive::kPrimByte: {
+ CpuRegister out = locations->Out().As<CpuRegister>();
__ movsxb(out, Address(obj, offset));
break;
}
case Primitive::kPrimShort: {
+ CpuRegister out = locations->Out().As<CpuRegister>();
__ movsxw(out, Address(obj, offset));
break;
}
case Primitive::kPrimChar: {
+ CpuRegister out = locations->Out().As<CpuRegister>();
__ movzxw(out, Address(obj, offset));
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
+ CpuRegister out = locations->Out().As<CpuRegister>();
__ movl(out, Address(obj, offset));
break;
}
case Primitive::kPrimLong: {
+ CpuRegister out = locations->Out().As<CpuRegister>();
__ movq(out, Address(obj, offset));
break;
}
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
- UNREACHABLE();
+ case Primitive::kPrimFloat: {
+ XmmRegister out = locations->Out().As<XmmRegister>();
+ __ movss(out, Address(obj, offset));
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ XmmRegister out = locations->Out().As<XmmRegister>();
+ __ movsd(out, Address(obj, offset));
+ break;
+ }
+
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -1356,10 +2302,10 @@
void LocationsBuilderX86_64::VisitArrayGet(HArrayGet* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister(), Location::kDiesAtEntry);
+ locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(
- 1, Location::RegisterOrConstant(instruction->InputAt(1)), Location::kDiesAtEntry);
- locations->SetOut(Location::RequiresRegister());
+ 1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
@@ -1442,10 +2388,30 @@
break;
}
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
- UNREACHABLE();
+ case Primitive::kPrimFloat: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
+ XmmRegister out = locations->Out().As<XmmRegister>();
+ if (index.IsConstant()) {
+ __ movss(out, Address(obj,
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset));
+ } else {
+ __ movss(out, Address(obj, index.As<CpuRegister>(), TIMES_4, data_offset));
+ }
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
+ XmmRegister out = locations->Out().As<XmmRegister>();
+ if (index.IsConstant()) {
+ __ movsd(out, Address(obj,
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset));
+ } else {
+ __ movsd(out, Address(obj, index.As<CpuRegister>(), TIMES_8, data_offset));
+ }
+ break;
+ }
+
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -1454,23 +2420,35 @@
void LocationsBuilderX86_64::VisitArraySet(HArraySet* instruction) {
Primitive::Type value_type = instruction->GetComponentType();
- bool is_object = value_type == Primitive::kPrimNot;
+
+ bool needs_write_barrier =
+ CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
+ bool needs_runtime_call = instruction->NeedsTypeCheck();
+
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
- instruction, is_object ? LocationSummary::kCall : LocationSummary::kNoCall);
- if (is_object) {
+ instruction, needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall);
+ if (needs_runtime_call) {
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
} else {
- locations->SetInAt(0, Location::RequiresRegister(), Location::kDiesAtEntry);
+ locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(
- 1, Location::RegisterOrConstant(instruction->InputAt(1)), Location::kDiesAtEntry);
- locations->SetInAt(2, Location::RequiresRegister(), Location::kDiesAtEntry);
+ 1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ locations->SetInAt(2, Location::RequiresRegister());
if (value_type == Primitive::kPrimLong) {
- locations->SetInAt(2, Location::RequiresRegister(), Location::kDiesAtEntry);
+ locations->SetInAt(2, Location::RequiresRegister());
+ } else if (value_type == Primitive::kPrimFloat || value_type == Primitive::kPrimDouble) {
+ locations->SetInAt(2, Location::RequiresFpuRegister());
} else {
- locations->SetInAt(2, Location::RegisterOrConstant(instruction->InputAt(2)), Location::kDiesAtEntry);
+ locations->SetInAt(2, Location::RegisterOrConstant(instruction->InputAt(2)));
+ }
+
+ if (needs_write_barrier) {
+ // Temporary registers for the write barrier.
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
}
}
}
@@ -1481,6 +2459,9 @@
Location index = locations->InAt(1);
Location value = locations->InAt(2);
Primitive::Type value_type = instruction->GetComponentType();
+ bool needs_runtime_call = locations->WillCall();
+ bool needs_write_barrier =
+ CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
switch (value_type) {
case Primitive::kPrimBoolean:
@@ -1513,13 +2494,16 @@
if (value.IsRegister()) {
__ movw(Address(obj, offset), value.As<CpuRegister>());
} else {
+ DCHECK(value.IsConstant()) << value;
__ movw(Address(obj, offset), Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
} else {
+ DCHECK(index.IsRegister()) << index;
if (value.IsRegister()) {
__ movw(Address(obj, index.As<CpuRegister>(), TIMES_2, data_offset),
value.As<CpuRegister>());
} else {
+ DCHECK(value.IsConstant()) << value;
__ movw(Address(obj, index.As<CpuRegister>(), TIMES_2, data_offset),
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
@@ -1527,34 +2511,47 @@
break;
}
- case Primitive::kPrimInt: {
- uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- if (value.IsRegister()) {
- __ movl(Address(obj, offset), value.As<CpuRegister>());
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ if (!needs_runtime_call) {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ if (value.IsRegister()) {
+ __ movl(Address(obj, offset), value.As<CpuRegister>());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ __ movl(Address(obj, offset),
+ Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
+ }
} else {
- __ movl(Address(obj, offset), Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
+ DCHECK(index.IsRegister()) << index;
+ if (value.IsRegister()) {
+ __ movl(Address(obj, index.As<CpuRegister>(), TIMES_4, data_offset),
+ value.As<CpuRegister>());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ __ movl(Address(obj, index.As<CpuRegister>(), TIMES_4, data_offset),
+ Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
+ }
+ }
+
+ if (needs_write_barrier) {
+ DCHECK_EQ(value_type, Primitive::kPrimNot);
+ CpuRegister temp = locations->GetTemp(0).As<CpuRegister>();
+ CpuRegister card = locations->GetTemp(1).As<CpuRegister>();
+ codegen_->MarkGCCard(temp, card, obj, value.As<CpuRegister>());
}
} else {
- if (value.IsRegister()) {
- __ movl(Address(obj, index.As<CpuRegister>(), TIMES_4, data_offset),
- value.As<CpuRegister>());
- } else {
- __ movl(Address(obj, index.As<CpuRegister>(), TIMES_4, data_offset),
- Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
- }
+ DCHECK_EQ(value_type, Primitive::kPrimNot);
+ __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAputObject), true));
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
}
break;
}
- case Primitive::kPrimNot: {
- __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAputObject), true));
- DCHECK(!codegen_->IsLeafMethod());
- codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
- break;
- }
-
case Primitive::kPrimLong: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
if (index.IsConstant()) {
@@ -1569,10 +2566,34 @@
break;
}
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
- UNREACHABLE();
+ case Primitive::kPrimFloat: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
+ if (index.IsConstant()) {
+ size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ DCHECK(value.IsFpuRegister());
+ __ movss(Address(obj, offset), value.As<XmmRegister>());
+ } else {
+ DCHECK(value.IsFpuRegister());
+ __ movss(Address(obj, index.As<CpuRegister>(), TIMES_4, data_offset),
+ value.As<XmmRegister>());
+ }
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
+ if (index.IsConstant()) {
+ size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ DCHECK(value.IsFpuRegister());
+ __ movsd(Address(obj, offset), value.As<XmmRegister>());
+ } else {
+ DCHECK(value.IsFpuRegister());
+ __ movsd(Address(obj, index.As<CpuRegister>(), TIMES_8, data_offset),
+ value.As<XmmRegister>());
+ }
+ break;
+ }
+
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -1582,8 +2603,8 @@
void LocationsBuilderX86_64::VisitArrayLength(HArrayLength* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister(), Location::kDiesAtEntry);
- locations->SetOut(Location::RequiresRegister());
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
void InstructionCodeGeneratorX86_64::VisitArrayLength(HArrayLength* instruction) {
@@ -1638,9 +2659,11 @@
void InstructionCodeGeneratorX86_64::VisitTemporary(HTemporary* temp) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(temp);
}
void LocationsBuilderX86_64::VisitParallelMove(HParallelMove* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unimplemented";
}
@@ -1706,6 +2729,9 @@
if (destination.IsRegister()) {
__ movl(destination.As<CpuRegister>(),
Address(CpuRegister(RSP), source.GetStackIndex()));
+ } else if (destination.IsFpuRegister()) {
+ __ movss(destination.As<XmmRegister>(),
+ Address(CpuRegister(RSP), source.GetStackIndex()));
} else {
DCHECK(destination.IsStackSlot());
__ movl(CpuRegister(TMP), Address(CpuRegister(RSP), source.GetStackIndex()));
@@ -1715,8 +2741,10 @@
if (destination.IsRegister()) {
__ movq(destination.As<CpuRegister>(),
Address(CpuRegister(RSP), source.GetStackIndex()));
+ } else if (destination.IsFpuRegister()) {
+ __ movsd(destination.As<XmmRegister>(), Address(CpuRegister(RSP), source.GetStackIndex()));
} else {
- DCHECK(destination.IsDoubleStackSlot());
+ DCHECK(destination.IsDoubleStackSlot()) << destination;
__ movq(CpuRegister(TMP), Address(CpuRegister(RSP), source.GetStackIndex()));
__ movq(Address(CpuRegister(RSP), destination.GetStackIndex()), CpuRegister(TMP));
}
@@ -1727,6 +2755,7 @@
if (destination.IsRegister()) {
__ movl(destination.As<CpuRegister>(), imm);
} else {
+ DCHECK(destination.IsStackSlot()) << destination;
__ movl(Address(CpuRegister(RSP), destination.GetStackIndex()), imm);
}
} else if (constant->IsLongConstant()) {
@@ -1734,14 +2763,42 @@
if (destination.IsRegister()) {
__ movq(destination.As<CpuRegister>(), Immediate(value));
} else {
+ DCHECK(destination.IsDoubleStackSlot()) << destination;
__ movq(CpuRegister(TMP), Immediate(value));
__ movq(Address(CpuRegister(RSP), destination.GetStackIndex()), CpuRegister(TMP));
}
+ } else if (constant->IsFloatConstant()) {
+ Immediate imm(bit_cast<float, int32_t>(constant->AsFloatConstant()->GetValue()));
+ if (destination.IsFpuRegister()) {
+ __ movl(CpuRegister(TMP), imm);
+ __ movd(destination.As<XmmRegister>(), CpuRegister(TMP));
+ } else {
+ DCHECK(destination.IsStackSlot()) << destination;
+ __ movl(Address(CpuRegister(RSP), destination.GetStackIndex()), imm);
+ }
} else {
- LOG(FATAL) << "Unimplemented constant type";
+ DCHECK(constant->IsDoubleConstant()) << constant->DebugName();
+ Immediate imm(bit_cast<double, int64_t>(constant->AsDoubleConstant()->GetValue()));
+ if (destination.IsFpuRegister()) {
+ __ movq(CpuRegister(TMP), imm);
+ __ movd(destination.As<XmmRegister>(), CpuRegister(TMP));
+ } else {
+ DCHECK(destination.IsDoubleStackSlot()) << destination;
+ __ movq(CpuRegister(TMP), imm);
+ __ movq(Address(CpuRegister(RSP), destination.GetStackIndex()), CpuRegister(TMP));
+ }
}
- } else {
- LOG(FATAL) << "Unimplemented";
+ } else if (source.IsFpuRegister()) {
+ if (destination.IsFpuRegister()) {
+ __ movaps(destination.As<XmmRegister>(), source.As<XmmRegister>());
+ } else if (destination.IsStackSlot()) {
+ __ movss(Address(CpuRegister(RSP), destination.GetStackIndex()),
+ source.As<XmmRegister>());
+ } else {
+ DCHECK(destination.IsDoubleStackSlot());
+ __ movsd(Address(CpuRegister(RSP), destination.GetStackIndex()),
+ source.As<XmmRegister>());
+ }
}
}
@@ -1783,6 +2840,18 @@
CpuRegister(ensure_scratch.GetRegister()));
}
+void ParallelMoveResolverX86_64::Exchange32(XmmRegister reg, int mem) {
+ __ movl(CpuRegister(TMP), Address(CpuRegister(RSP), mem));
+ __ movss(Address(CpuRegister(RSP), mem), reg);
+ __ movd(reg, CpuRegister(TMP));
+}
+
+void ParallelMoveResolverX86_64::Exchange64(XmmRegister reg, int mem) {
+ __ movq(CpuRegister(TMP), Address(CpuRegister(RSP), mem));
+ __ movsd(Address(CpuRegister(RSP), mem), reg);
+ __ movd(reg, CpuRegister(TMP));
+}
+
void ParallelMoveResolverX86_64::EmitSwap(size_t index) {
MoveOperands* move = moves_.Get(index);
Location source = move->GetSource();
@@ -1802,8 +2871,20 @@
Exchange64(destination.As<CpuRegister>(), source.GetStackIndex());
} else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
Exchange64(destination.GetStackIndex(), source.GetStackIndex());
+ } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
+ __ movd(CpuRegister(TMP), source.As<XmmRegister>());
+ __ movaps(source.As<XmmRegister>(), destination.As<XmmRegister>());
+ __ movd(destination.As<XmmRegister>(), CpuRegister(TMP));
+ } else if (source.IsFpuRegister() && destination.IsStackSlot()) {
+ Exchange32(source.As<XmmRegister>(), destination.GetStackIndex());
+ } else if (source.IsStackSlot() && destination.IsFpuRegister()) {
+ Exchange32(destination.As<XmmRegister>(), source.GetStackIndex());
+ } else if (source.IsFpuRegister() && destination.IsDoubleStackSlot()) {
+ Exchange64(source.As<XmmRegister>(), destination.GetStackIndex());
+ } else if (source.IsDoubleStackSlot() && destination.IsFpuRegister()) {
+ Exchange64(destination.As<XmmRegister>(), source.GetStackIndex());
} else {
- LOG(FATAL) << "Unimplemented";
+ LOG(FATAL) << "Unimplemented swap between " << source << " and " << destination;
}
}
@@ -1817,5 +2898,434 @@
__ popq(CpuRegister(reg));
}
+void InstructionCodeGeneratorX86_64::GenerateClassInitializationCheck(
+ SlowPathCodeX86_64* slow_path, CpuRegister class_reg) {
+ __ cmpl(Address(class_reg, mirror::Class::StatusOffset().Int32Value()),
+ Immediate(mirror::Class::kStatusInitialized));
+ __ j(kLess, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+ // No need for memory fence, thanks to the X86_64 memory model.
+}
+
+void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
+ LocationSummary::CallKind call_kind = cls->CanCallRuntime()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) {
+ CpuRegister out = cls->GetLocations()->Out().As<CpuRegister>();
+ if (cls->IsReferrersClass()) {
+ DCHECK(!cls->CanCallRuntime());
+ DCHECK(!cls->MustGenerateClinitCheck());
+ codegen_->LoadCurrentMethod(out);
+ __ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
+ } else {
+ DCHECK(cls->CanCallRuntime());
+ codegen_->LoadCurrentMethod(out);
+ __ movl(out, Address(out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
+ __ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
+ SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64(
+ cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+ codegen_->AddSlowPath(slow_path);
+ __ testl(out, out);
+ __ j(kEqual, slow_path->GetEntryLabel());
+ if (cls->MustGenerateClinitCheck()) {
+ GenerateClassInitializationCheck(slow_path, out);
+ } else {
+ __ Bind(slow_path->GetExitLabel());
+ }
+ }
+}
+
+void LocationsBuilderX86_64::VisitClinitCheck(HClinitCheck* check) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (check->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorX86_64::VisitClinitCheck(HClinitCheck* check) {
+ // We assume the class to not be null.
+ SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64(
+ check->GetLoadClass(), check, check->GetDexPc(), true);
+ codegen_->AddSlowPath(slow_path);
+ GenerateClassInitializationCheck(slow_path, check->GetLocations()->InAt(0).As<CpuRegister>());
+}
+
+void LocationsBuilderX86_64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorX86_64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ CpuRegister cls = locations->InAt(0).As<CpuRegister>();
+ size_t offset = instruction->GetFieldOffset().SizeValue();
+
+ switch (instruction->GetType()) {
+ case Primitive::kPrimBoolean: {
+ CpuRegister out = locations->Out().As<CpuRegister>();
+ __ movzxb(out, Address(cls, offset));
+ break;
+ }
+
+ case Primitive::kPrimByte: {
+ CpuRegister out = locations->Out().As<CpuRegister>();
+ __ movsxb(out, Address(cls, offset));
+ break;
+ }
+
+ case Primitive::kPrimShort: {
+ CpuRegister out = locations->Out().As<CpuRegister>();
+ __ movsxw(out, Address(cls, offset));
+ break;
+ }
+
+ case Primitive::kPrimChar: {
+ CpuRegister out = locations->Out().As<CpuRegister>();
+ __ movzxw(out, Address(cls, offset));
+ break;
+ }
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ CpuRegister out = locations->Out().As<CpuRegister>();
+ __ movl(out, Address(cls, offset));
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ CpuRegister out = locations->Out().As<CpuRegister>();
+ __ movq(out, Address(cls, offset));
+ break;
+ }
+
+ case Primitive::kPrimFloat: {
+ XmmRegister out = locations->Out().As<XmmRegister>();
+ __ movss(out, Address(cls, offset));
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ XmmRegister out = locations->Out().As<XmmRegister>();
+ __ movsd(out, Address(cls, offset));
+ break;
+ }
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86_64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ Primitive::Type field_type = instruction->GetFieldType();
+ bool needs_write_barrier =
+ CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->GetValue());
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ if (needs_write_barrier) {
+ // Temporary registers for the write barrier.
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ }
+}
+
+void InstructionCodeGeneratorX86_64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ CpuRegister cls = locations->InAt(0).As<CpuRegister>();
+ size_t offset = instruction->GetFieldOffset().SizeValue();
+ Primitive::Type field_type = instruction->GetFieldType();
+
+ switch (field_type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte: {
+ CpuRegister value = locations->InAt(1).As<CpuRegister>();
+ __ movb(Address(cls, offset), value);
+ break;
+ }
+
+ case Primitive::kPrimShort:
+ case Primitive::kPrimChar: {
+ CpuRegister value = locations->InAt(1).As<CpuRegister>();
+ __ movw(Address(cls, offset), value);
+ break;
+ }
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ CpuRegister value = locations->InAt(1).As<CpuRegister>();
+ __ movl(Address(cls, offset), value);
+ if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->GetValue())) {
+ CpuRegister temp = locations->GetTemp(0).As<CpuRegister>();
+ CpuRegister card = locations->GetTemp(1).As<CpuRegister>();
+ codegen_->MarkGCCard(temp, card, cls, value);
+ }
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ CpuRegister value = locations->InAt(1).As<CpuRegister>();
+ __ movq(Address(cls, offset), value);
+ break;
+ }
+
+ case Primitive::kPrimFloat: {
+ XmmRegister value = locations->InAt(1).As<XmmRegister>();
+ __ movss(Address(cls, offset), value);
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ XmmRegister value = locations->InAt(1).As<XmmRegister>();
+ __ movsd(Address(cls, offset), value);
+ break;
+ }
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << field_type;
+ UNREACHABLE();
+ }
+}
+
+void LocationsBuilderX86_64::VisitLoadString(HLoadString* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) {
+ SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86_64(load);
+ codegen_->AddSlowPath(slow_path);
+
+ CpuRegister out = load->GetLocations()->Out().As<CpuRegister>();
+ codegen_->LoadCurrentMethod(CpuRegister(out));
+ __ movl(out, Address(out, mirror::ArtMethod::DexCacheStringsOffset().Int32Value()));
+ __ movl(out, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
+ __ testl(out, out);
+ __ j(kEqual, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderX86_64::VisitLoadException(HLoadException* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86_64::VisitLoadException(HLoadException* load) {
+ Address address = Address::Absolute(
+ Thread::ExceptionOffset<kX86_64WordSize>().Int32Value(), true);
+ __ gs()->movl(load->GetLocations()->Out().As<CpuRegister>(), address);
+ __ gs()->movl(address, Immediate(0));
+}
+
+void LocationsBuilderX86_64::VisitThrow(HThrow* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorX86_64::VisitThrow(HThrow* instruction) {
+ __ gs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pDeliverException), true));
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+}
+
+void LocationsBuilderX86_64::VisitInstanceOf(HInstanceOf* instruction) {
+ LocationSummary::CallKind call_kind = instruction->IsClassFinal()
+ ? LocationSummary::kNoCall
+ : LocationSummary::kCallOnSlowPath;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ CpuRegister obj = locations->InAt(0).As<CpuRegister>();
+ Location cls = locations->InAt(1);
+ CpuRegister out = locations->Out().As<CpuRegister>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ Label done, zero;
+ SlowPathCodeX86_64* slow_path = nullptr;
+
+ // Return 0 if `obj` is null.
+ // TODO: avoid this check if we know obj is not null.
+ __ testl(obj, obj);
+ __ j(kEqual, &zero);
+ // Compare the class of `obj` with `cls`.
+ __ movl(out, Address(obj, class_offset));
+ if (cls.IsRegister()) {
+ __ cmpl(out, cls.As<CpuRegister>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(out, Address(CpuRegister(RSP), cls.GetStackIndex()));
+ }
+ if (instruction->IsClassFinal()) {
+ // Classes must be equal for the instanceof to succeed.
+ __ j(kNotEqual, &zero);
+ __ movl(out, Immediate(1));
+ __ jmp(&done);
+ } else {
+ // If the classes are not equal, we go into a slow path.
+ DCHECK(locations->OnlyCallsOnSlowPath());
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(
+ instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
+ codegen_->AddSlowPath(slow_path);
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ movl(out, Immediate(1));
+ __ jmp(&done);
+ }
+ __ Bind(&zero);
+ __ movl(out, Immediate(0));
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
+ }
+ __ Bind(&done);
+}
+
+void LocationsBuilderX86_64::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ CpuRegister obj = locations->InAt(0).As<CpuRegister>();
+ Location cls = locations->InAt(1);
+ CpuRegister temp = locations->GetTemp(0).As<CpuRegister>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(
+ instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
+ codegen_->AddSlowPath(slow_path);
+
+ // TODO: avoid this check if we know obj is not null.
+ __ testl(obj, obj);
+ __ j(kEqual, slow_path->GetExitLabel());
+ // Compare the class of `obj` with `cls`.
+ __ movl(temp, Address(obj, class_offset));
+ if (cls.IsRegister()) {
+ __ cmpl(temp, cls.As<CpuRegister>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(temp, Address(CpuRegister(RSP), cls.GetStackIndex()));
+ }
+ // Classes must be equal for the checkcast to succeed.
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderX86_64::VisitMonitorOperation(HMonitorOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorX86_64::VisitMonitorOperation(HMonitorOperation* instruction) {
+ __ gs()->call(Address::Absolute(instruction->IsEnter()
+ ? QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pLockObject)
+ : QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pUnlockObject),
+ true));
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+}
+
+void LocationsBuilderX86_64::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
+void LocationsBuilderX86_64::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
+void LocationsBuilderX86_64::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
+
+void LocationsBuilderX86_64::HandleBitwiseOperation(HBinaryOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt
+ || instruction->GetResultType() == Primitive::kPrimLong);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (instruction->GetType() == Primitive::kPrimInt) {
+ locations->SetInAt(1, Location::Any());
+ } else {
+ // Request a register to avoid loading a 64bits constant.
+ locations->SetInAt(1, Location::RequiresRegister());
+ }
+ locations->SetOut(Location::SameAsFirstInput());
+}
+
+void InstructionCodeGeneratorX86_64::VisitAnd(HAnd* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorX86_64::VisitOr(HOr* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorX86_64::VisitXor(HXor* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorX86_64::HandleBitwiseOperation(HBinaryOperation* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ DCHECK(first.Equals(locations->Out()));
+
+ if (instruction->GetResultType() == Primitive::kPrimInt) {
+ if (second.IsRegister()) {
+ if (instruction->IsAnd()) {
+ __ andl(first.As<CpuRegister>(), second.As<CpuRegister>());
+ } else if (instruction->IsOr()) {
+ __ orl(first.As<CpuRegister>(), second.As<CpuRegister>());
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.As<CpuRegister>(), second.As<CpuRegister>());
+ }
+ } else if (second.IsConstant()) {
+ Immediate imm(second.GetConstant()->AsIntConstant()->GetValue());
+ if (instruction->IsAnd()) {
+ __ andl(first.As<CpuRegister>(), imm);
+ } else if (instruction->IsOr()) {
+ __ orl(first.As<CpuRegister>(), imm);
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.As<CpuRegister>(), imm);
+ }
+ } else {
+ Address address(CpuRegister(RSP), second.GetStackIndex());
+ if (instruction->IsAnd()) {
+ __ andl(first.As<CpuRegister>(), address);
+ } else if (instruction->IsOr()) {
+ __ orl(first.As<CpuRegister>(), address);
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.As<CpuRegister>(), address);
+ }
+ }
+ } else {
+ DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
+ if (instruction->IsAnd()) {
+ __ andq(first.As<CpuRegister>(), second.As<CpuRegister>());
+ } else if (instruction->IsOr()) {
+ __ orq(first.As<CpuRegister>(), second.As<CpuRegister>());
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorq(first.As<CpuRegister>(), second.As<CpuRegister>());
+ }
+ }
+}
+
} // namespace x86_64
} // namespace art
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 5ac0189..29c679d 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -25,7 +25,8 @@
namespace art {
namespace x86_64 {
-static constexpr size_t kX86_64WordSize = 8;
+// Use a local definition to prevent copying mistakes.
+static constexpr size_t kX86_64WordSize = kX86_64PointerSize;
static constexpr Register kParameterCoreRegisters[] = { RSI, RDX, RCX, R8, R9 };
static constexpr FloatRegister kParameterFloatRegisters[] =
@@ -65,23 +66,26 @@
};
class CodeGeneratorX86_64;
+class SlowPathCodeX86_64;
class ParallelMoveResolverX86_64 : public ParallelMoveResolver {
public:
ParallelMoveResolverX86_64(ArenaAllocator* allocator, CodeGeneratorX86_64* codegen)
: ParallelMoveResolver(allocator), codegen_(codegen) {}
- virtual void EmitMove(size_t index) OVERRIDE;
- virtual void EmitSwap(size_t index) OVERRIDE;
- virtual void SpillScratch(int reg) OVERRIDE;
- virtual void RestoreScratch(int reg) OVERRIDE;
+ void EmitMove(size_t index) OVERRIDE;
+ void EmitSwap(size_t index) OVERRIDE;
+ void SpillScratch(int reg) OVERRIDE;
+ void RestoreScratch(int reg) OVERRIDE;
X86_64Assembler* GetAssembler() const;
private:
void Exchange32(CpuRegister reg, int mem);
+ void Exchange32(XmmRegister reg, int mem);
void Exchange32(int mem1, int mem2);
void Exchange64(CpuRegister reg, int mem);
+ void Exchange64(XmmRegister reg, int mem);
void Exchange64(int mem1, int mem2);
CodeGeneratorX86_64* const codegen_;
@@ -95,15 +99,16 @@
: HGraphVisitor(graph), codegen_(codegen) {}
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- virtual void Visit##name(H##name* instr);
+ void Visit##name(H##name* instr) OVERRIDE;
FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
- void HandleInvoke(HInvoke* invoke);
-
private:
+ void HandleInvoke(HInvoke* invoke);
+ void HandleBitwiseOperation(HBinaryOperation* operation);
+
CodeGeneratorX86_64* const codegen_;
InvokeDexCallingConventionVisitor parameter_visitor_;
@@ -115,14 +120,12 @@
InstructionCodeGeneratorX86_64(HGraph* graph, CodeGeneratorX86_64* codegen);
#define DECLARE_VISIT_INSTRUCTION(name, super) \
- virtual void Visit##name(H##name* instr);
+ void Visit##name(H##name* instr) OVERRIDE;
FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
- void LoadCurrentMethod(CpuRegister reg);
-
X86_64Assembler* GetAssembler() const { return assembler_; }
private:
@@ -130,6 +133,9 @@
// is the block to branch to if the suspend check is not needed, and after
// the suspend call.
void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor);
+ void GenerateClassInitializationCheck(SlowPathCodeX86_64* slow_path, CpuRegister class_reg);
+ void HandleBitwiseOperation(HBinaryOperation* operation);
+ void GenerateDivRemIntegral(HBinaryOperation* instruction);
X86_64Assembler* const assembler_;
CodeGeneratorX86_64* const codegen_;
@@ -142,43 +148,49 @@
explicit CodeGeneratorX86_64(HGraph* graph);
virtual ~CodeGeneratorX86_64() {}
- virtual void GenerateFrameEntry() OVERRIDE;
- virtual void GenerateFrameExit() OVERRIDE;
- virtual void Bind(HBasicBlock* block) OVERRIDE;
- virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
- virtual void SaveCoreRegister(Location stack_location, uint32_t reg_id) OVERRIDE;
- virtual void RestoreCoreRegister(Location stack_location, uint32_t reg_id) OVERRIDE;
+ void GenerateFrameEntry() OVERRIDE;
+ void GenerateFrameExit() OVERRIDE;
+ void Bind(HBasicBlock* block) OVERRIDE;
+ void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
- virtual size_t GetWordSize() const OVERRIDE {
+ size_t GetWordSize() const OVERRIDE {
return kX86_64WordSize;
}
- virtual size_t FrameEntrySpillSize() const OVERRIDE;
+ size_t FrameEntrySpillSize() const OVERRIDE;
- virtual HGraphVisitor* GetLocationBuilder() OVERRIDE {
+ HGraphVisitor* GetLocationBuilder() OVERRIDE {
return &location_builder_;
}
- virtual HGraphVisitor* GetInstructionVisitor() OVERRIDE {
+ HGraphVisitor* GetInstructionVisitor() OVERRIDE {
return &instruction_visitor_;
}
- virtual X86_64Assembler* GetAssembler() OVERRIDE {
+ X86_64Assembler* GetAssembler() OVERRIDE {
return &assembler_;
}
- ParallelMoveResolverX86_64* GetMoveResolver() {
+ ParallelMoveResolverX86_64* GetMoveResolver() OVERRIDE {
return &move_resolver_;
}
- virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
+ uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+ return GetLabelOf(block)->Position();
+ }
- virtual void SetupBlockedRegisters() const OVERRIDE;
- virtual Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
- virtual void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
- virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+ Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
- virtual InstructionSet GetInstructionSet() const OVERRIDE {
+ void SetupBlockedRegisters() const OVERRIDE;
+ Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+ void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+
+ InstructionSet GetInstructionSet() const OVERRIDE {
return InstructionSet::kX86_64;
}
@@ -188,11 +200,13 @@
// Helper method to move a value between two locations.
void Move(Location destination, Location source);
+ void LoadCurrentMethod(CpuRegister reg);
+
Label* GetLabelOf(HBasicBlock* block) const {
return block_labels_.GetRawStorage() + block->GetBlockId();
}
- virtual void Initialize() OVERRIDE {
+ void Initialize() OVERRIDE {
block_labels_.SetSize(GetGraph()->GetBlocks().Size());
}
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 8bb12de..fee3ea6 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -16,19 +16,22 @@
#include <functional>
+#include "arch/instruction_set.h"
+#include "base/macros.h"
#include "builder.h"
#include "code_generator_arm.h"
+#include "code_generator_arm64.h"
#include "code_generator_x86.h"
#include "code_generator_x86_64.h"
#include "common_compiler_test.h"
#include "dex_file.h"
#include "dex_instruction.h"
-#include "instruction_set.h"
#include "nodes.h"
#include "optimizing_unit_test.h"
#include "prepare_for_register_allocation.h"
#include "register_allocator.h"
#include "ssa_liveness_analysis.h"
+#include "utils.h"
#include "gtest/gtest.h"
@@ -36,7 +39,7 @@
class InternalCodeAllocator : public CodeAllocator {
public:
- InternalCodeAllocator() { }
+ InternalCodeAllocator() : size_(0) { }
virtual uint8_t* Allocate(size_t size) {
size_ = size;
@@ -54,24 +57,26 @@
DISALLOW_COPY_AND_ASSIGN(InternalCodeAllocator);
};
+template <typename Expected>
static void Run(const InternalCodeAllocator& allocator,
const CodeGenerator& codegen,
bool has_result,
- int32_t expected) {
- typedef int32_t (*fptr)();
+ Expected expected) {
+ typedef Expected (*fptr)();
CommonCompilerTest::MakeExecutable(allocator.GetMemory(), allocator.GetSize());
fptr f = reinterpret_cast<fptr>(allocator.GetMemory());
if (codegen.GetInstructionSet() == kThumb2) {
// For thumb we need the bottom bit set.
f = reinterpret_cast<fptr>(reinterpret_cast<uintptr_t>(f) + 1);
}
- int32_t result = f();
+ Expected result = f();
if (has_result) {
ASSERT_EQ(result, expected);
}
}
-static void RunCodeBaseline(HGraph* graph, bool has_result, int32_t expected) {
+template <typename Expected>
+static void RunCodeBaseline(HGraph* graph, bool has_result, Expected expected) {
InternalCodeAllocator allocator;
x86::CodeGeneratorX86 codegenX86(graph);
@@ -93,13 +98,20 @@
if (kRuntimeISA == kX86_64) {
Run(allocator, codegenX86_64, has_result, expected);
}
+
+ arm64::CodeGeneratorARM64 codegenARM64(graph);
+ codegenARM64.CompileBaseline(&allocator, true);
+ if (kRuntimeISA == kArm64) {
+ Run(allocator, codegenARM64, has_result, expected);
+ }
}
+template <typename Expected>
static void RunCodeOptimized(CodeGenerator* codegen,
HGraph* graph,
std::function<void(HGraph*)> hook_before_codegen,
bool has_result,
- int32_t expected) {
+ Expected expected) {
SsaLivenessAnalysis liveness(*graph, codegen);
liveness.Analyze();
@@ -112,10 +124,11 @@
Run(allocator, *codegen, has_result, expected);
}
+template <typename Expected>
static void RunCodeOptimized(HGraph* graph,
std::function<void(HGraph*)> hook_before_codegen,
bool has_result,
- int32_t expected) {
+ Expected expected) {
if (kRuntimeISA == kX86) {
x86::CodeGeneratorX86 codegenX86(graph);
RunCodeOptimized(&codegenX86, graph, hook_before_codegen, has_result, expected);
@@ -134,9 +147,21 @@
HGraphBuilder builder(&arena);
const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
HGraph* graph = builder.BuildGraph(*item);
+ ASSERT_NE(graph, nullptr);
// Remove suspend checks, they cannot be executed in this context.
RemoveSuspendChecks(graph);
+ RunCodeBaseline(graph, has_result, expected);
+}
+
+static void TestCodeLong(const uint16_t* data, bool has_result, int64_t expected) {
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+ HGraphBuilder builder(&arena, Primitive::kPrimLong);
+ const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
+ HGraph* graph = builder.BuildGraph(*item);
ASSERT_NE(graph, nullptr);
+ // Remove suspend checks, they cannot be executed in this context.
+ RemoveSuspendChecks(graph);
RunCodeBaseline(graph, has_result, expected);
}
@@ -260,6 +285,104 @@
TestCode(data, true, 0);
}
+// Exercise bit-wise (one's complement) not-int instruction.
+#define NOT_INT_TEST(TEST_NAME, INPUT, EXPECTED_OUTPUT) \
+TEST(CodegenTest, TEST_NAME) { \
+ const int32_t input = INPUT; \
+ const uint16_t input_lo = Low16Bits(input); \
+ const uint16_t input_hi = High16Bits(input); \
+ const uint16_t data[] = TWO_REGISTERS_CODE_ITEM( \
+ Instruction::CONST | 0 << 8, input_lo, input_hi, \
+ Instruction::NOT_INT | 1 << 8 | 0 << 12 , \
+ Instruction::RETURN | 1 << 8); \
+ \
+ TestCode(data, true, EXPECTED_OUTPUT); \
+}
+
+NOT_INT_TEST(ReturnNotIntMinus2, -2, 1)
+NOT_INT_TEST(ReturnNotIntMinus1, -1, 0)
+NOT_INT_TEST(ReturnNotInt0, 0, -1)
+NOT_INT_TEST(ReturnNotInt1, 1, -2)
+NOT_INT_TEST(ReturnNotIntINT32_MIN, -2147483648, 2147483647) // (2^31) - 1
+NOT_INT_TEST(ReturnNotIntINT32_MINPlus1, -2147483647, 2147483646) // (2^31) - 2
+NOT_INT_TEST(ReturnNotIntINT32_MAXMinus1, 2147483646, -2147483647) // -(2^31) - 1
+NOT_INT_TEST(ReturnNotIntINT32_MAX, 2147483647, -2147483648) // -(2^31)
+
+#undef NOT_INT_TEST
+
+// Exercise bit-wise (one's complement) not-long instruction.
+#define NOT_LONG_TEST(TEST_NAME, INPUT, EXPECTED_OUTPUT) \
+TEST(CodegenTest, TEST_NAME) { \
+ const int64_t input = INPUT; \
+ const uint16_t word0 = Low16Bits(Low32Bits(input)); /* LSW. */ \
+ const uint16_t word1 = High16Bits(Low32Bits(input)); \
+ const uint16_t word2 = Low16Bits(High32Bits(input)); \
+ const uint16_t word3 = High16Bits(High32Bits(input)); /* MSW. */ \
+ const uint16_t data[] = FOUR_REGISTERS_CODE_ITEM( \
+ Instruction::CONST_WIDE | 0 << 8, word0, word1, word2, word3, \
+ Instruction::NOT_LONG | 2 << 8 | 0 << 12, \
+ Instruction::RETURN_WIDE | 2 << 8); \
+ \
+ TestCodeLong(data, true, EXPECTED_OUTPUT); \
+}
+
+NOT_LONG_TEST(ReturnNotLongMinus2, INT64_C(-2), INT64_C(1))
+NOT_LONG_TEST(ReturnNotLongMinus1, INT64_C(-1), INT64_C(0))
+NOT_LONG_TEST(ReturnNotLong0, INT64_C(0), INT64_C(-1))
+NOT_LONG_TEST(ReturnNotLong1, INT64_C(1), INT64_C(-2))
+
+NOT_LONG_TEST(ReturnNotLongINT32_MIN,
+ INT64_C(-2147483648),
+ INT64_C(2147483647)) // (2^31) - 1
+NOT_LONG_TEST(ReturnNotLongINT32_MINPlus1,
+ INT64_C(-2147483647),
+ INT64_C(2147483646)) // (2^31) - 2
+NOT_LONG_TEST(ReturnNotLongINT32_MAXMinus1,
+ INT64_C(2147483646),
+ INT64_C(-2147483647)) // -(2^31) - 1
+NOT_LONG_TEST(ReturnNotLongINT32_MAX,
+ INT64_C(2147483647),
+ INT64_C(-2147483648)) // -(2^31)
+
+// Note that the C++ compiler won't accept
+// INT64_C(-9223372036854775808) (that is, INT64_MIN) as a valid
+// int64_t literal, so we use INT64_C(-9223372036854775807)-1 instead.
+NOT_LONG_TEST(ReturnNotINT64_MIN,
+ INT64_C(-9223372036854775807)-1,
+ INT64_C(9223372036854775807)); // (2^63) - 1
+NOT_LONG_TEST(ReturnNotINT64_MINPlus1,
+ INT64_C(-9223372036854775807),
+ INT64_C(9223372036854775806)); // (2^63) - 2
+NOT_LONG_TEST(ReturnNotLongINT64_MAXMinus1,
+ INT64_C(9223372036854775806),
+ INT64_C(-9223372036854775807)); // -(2^63) - 1
+NOT_LONG_TEST(ReturnNotLongINT64_MAX,
+ INT64_C(9223372036854775807),
+ INT64_C(-9223372036854775807)-1); // -(2^63)
+
+#undef NOT_LONG_TEST
+
+#if defined(__aarch64__)
+TEST(CodegenTest, DISABLED_IntToLongOfLongToInt) {
+#else
+TEST(CodegenTest, IntToLongOfLongToInt) {
+#endif
+ const int64_t input = INT64_C(4294967296); // 2^32
+ const uint16_t word0 = Low16Bits(Low32Bits(input)); // LSW.
+ const uint16_t word1 = High16Bits(Low32Bits(input));
+ const uint16_t word2 = Low16Bits(High32Bits(input));
+ const uint16_t word3 = High16Bits(High32Bits(input)); // MSW.
+ const uint16_t data[] = FIVE_REGISTERS_CODE_ITEM(
+ Instruction::CONST_WIDE | 0 << 8, word0, word1, word2, word3,
+ Instruction::CONST_WIDE | 2 << 8, 1, 0, 0, 0,
+ Instruction::ADD_LONG | 0, 0 << 8 | 2, // v0 <- 2^32 + 1
+ Instruction::LONG_TO_INT | 4 << 8 | 0 << 12,
+ Instruction::INT_TO_LONG | 2 << 8 | 4 << 12,
+ Instruction::RETURN_WIDE | 2 << 8);
+
+ TestCodeLong(data, true, 1);
+}
+
TEST(CodegenTest, ReturnAdd1) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
@@ -340,9 +463,9 @@
PrepareForRegisterAllocation(graph).Run();
ASSERT_FALSE(equal->NeedsMaterialization());
- auto hook_before_codegen = [](HGraph* graph) {
- HBasicBlock* block = graph->GetEntryBlock()->GetSuccessors().Get(0);
- HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+ auto hook_before_codegen = [](HGraph* graph_in) {
+ HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors().Get(0);
+ HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
@@ -370,10 +493,10 @@
TestCode(data, true, 12); \
}
+#if !defined(__aarch64__)
MUL_TEST(INT, MulInt);
MUL_TEST(LONG, MulLong);
-// MUL_TEST(FLOAT, Float);
-// MUL_TEST(DOUBLE, Double);
+#endif
TEST(CodegenTest, ReturnMulIntLit8) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
@@ -393,5 +516,148 @@
TestCode(data, true, 12);
}
+TEST(CodegenTest, MaterializedCondition1) {
+ // Check that condition are materialized correctly. A materialized condition
+ // should yield `1` if it evaluated to true, and `0` otherwise.
+ // We force the materialization of comparisons for different combinations of
+ // inputs and check the results.
+
+ int lhs[] = {1, 2, -1, 2, 0xabc};
+ int rhs[] = {2, 1, 2, -1, 0xabc};
+
+ for (size_t i = 0; i < arraysize(lhs); i++) {
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HGraph* graph = new (&allocator) HGraph(&allocator);
+
+ HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(entry_block);
+ graph->SetEntryBlock(entry_block);
+ entry_block->AddInstruction(new (&allocator) HGoto());
+ HBasicBlock* code_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(code_block);
+ HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(exit_block);
+ exit_block->AddInstruction(new (&allocator) HExit());
+
+ entry_block->AddSuccessor(code_block);
+ code_block->AddSuccessor(exit_block);
+ graph->SetExitBlock(exit_block);
+
+ HIntConstant cst_lhs(lhs[i]);
+ code_block->AddInstruction(&cst_lhs);
+ HIntConstant cst_rhs(rhs[i]);
+ code_block->AddInstruction(&cst_rhs);
+ HLessThan cmp_lt(&cst_lhs, &cst_rhs);
+ code_block->AddInstruction(&cmp_lt);
+ HReturn ret(&cmp_lt);
+ code_block->AddInstruction(&ret);
+
+ auto hook_before_codegen = [](HGraph* graph_in) {
+ HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors().Get(0);
+ HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
+ block->InsertInstructionBefore(move, block->GetLastInstruction());
+ };
+
+ RunCodeOptimized(graph, hook_before_codegen, true, lhs[i] < rhs[i]);
+ }
+}
+
+TEST(CodegenTest, MaterializedCondition2) {
+ // Check that HIf correctly interprets a materialized condition.
+ // We force the materialization of comparisons for different combinations of
+ // inputs. An HIf takes the materialized combination as input and returns a
+ // value that we verify.
+
+ int lhs[] = {1, 2, -1, 2, 0xabc};
+ int rhs[] = {2, 1, 2, -1, 0xabc};
+
+
+ for (size_t i = 0; i < arraysize(lhs); i++) {
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HGraph* graph = new (&allocator) HGraph(&allocator);
+
+ HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(entry_block);
+ graph->SetEntryBlock(entry_block);
+ entry_block->AddInstruction(new (&allocator) HGoto());
+
+ HBasicBlock* if_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(if_block);
+ HBasicBlock* if_true_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(if_true_block);
+ HBasicBlock* if_false_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(if_false_block);
+ HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(exit_block);
+ exit_block->AddInstruction(new (&allocator) HExit());
+
+ graph->SetEntryBlock(entry_block);
+ entry_block->AddSuccessor(if_block);
+ if_block->AddSuccessor(if_true_block);
+ if_block->AddSuccessor(if_false_block);
+ if_true_block->AddSuccessor(exit_block);
+ if_false_block->AddSuccessor(exit_block);
+ graph->SetExitBlock(exit_block);
+
+ HIntConstant cst_lhs(lhs[i]);
+ if_block->AddInstruction(&cst_lhs);
+ HIntConstant cst_rhs(rhs[i]);
+ if_block->AddInstruction(&cst_rhs);
+ HLessThan cmp_lt(&cst_lhs, &cst_rhs);
+ if_block->AddInstruction(&cmp_lt);
+ // We insert a temporary to separate the HIf from the HLessThan and force
+ // the materialization of the condition.
+ HTemporary force_materialization(0);
+ if_block->AddInstruction(&force_materialization);
+ HIf if_lt(&cmp_lt);
+ if_block->AddInstruction(&if_lt);
+
+ HIntConstant cst_lt(1);
+ if_true_block->AddInstruction(&cst_lt);
+ HReturn ret_lt(&cst_lt);
+ if_true_block->AddInstruction(&ret_lt);
+ HIntConstant cst_ge(0);
+ if_false_block->AddInstruction(&cst_ge);
+ HReturn ret_ge(&cst_ge);
+ if_false_block->AddInstruction(&ret_ge);
+
+ auto hook_before_codegen = [](HGraph* graph_in) {
+ HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors().Get(0);
+ HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
+ block->InsertInstructionBefore(move, block->GetLastInstruction());
+ };
+
+ RunCodeOptimized(graph, hook_before_codegen, true, lhs[i] < rhs[i]);
+ }
+}
+
+#if defined(__aarch64__)
+TEST(CodegenTest, DISABLED_ReturnDivIntLit8) {
+#else
+TEST(CodegenTest, ReturnDivIntLit8) {
+#endif
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 4 << 12 | 0 << 8,
+ Instruction::DIV_INT_LIT8, 3 << 8 | 0,
+ Instruction::RETURN);
+
+ TestCode(data, true, 1);
+}
+
+#if defined(__aarch64__)
+TEST(CodegenTest, DISABLED_ReturnDivInt2Addr) {
+#else
+TEST(CodegenTest, ReturnDivInt2Addr) {
+#endif
+ const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ Instruction::CONST_4 | 4 << 12 | 0,
+ Instruction::CONST_4 | 2 << 12 | 1 << 8,
+ Instruction::DIV_INT_2ADDR | 1 << 12,
+ Instruction::RETURN);
+
+ TestCode(data, true, 2);
+}
} // namespace art
diff --git a/compiler/optimizing/constant_propagation.cc b/compiler/optimizing/constant_folding.cc
similarity index 64%
rename from compiler/optimizing/constant_propagation.cc
rename to compiler/optimizing/constant_folding.cc
index d675164..fca9933 100644
--- a/compiler/optimizing/constant_propagation.cc
+++ b/compiler/optimizing/constant_folding.cc
@@ -14,11 +14,11 @@
* limitations under the License.
*/
-#include "constant_propagation.h"
+#include "constant_folding.h"
namespace art {
-void ConstantPropagation::Run() {
+void HConstantFolding::Run() {
// Process basic blocks in reverse post-order in the dominator tree,
// so that an instruction turned into a constant, used as input of
// another instruction, may possibly be used to turn that second
@@ -28,14 +28,22 @@
// Traverse this block's instructions in (forward) order and
// replace the ones that can be statically evaluated by a
// compile-time counterpart.
- for (HInstructionIterator it(block->GetInstructions());
- !it.Done(); it.Advance()) {
- HInstruction* inst = it.Current();
- // Constant folding: replace `c <- a op b' with a compile-time
- // evaluation of `a op b' if `a' and `b' are constant.
+ for (HInstructionIterator inst_it(block->GetInstructions());
+ !inst_it.Done(); inst_it.Advance()) {
+ HInstruction* inst = inst_it.Current();
if (inst->IsBinaryOperation()) {
+ // Constant folding: replace `op(a, b)' with a constant at
+ // compile time if `a' and `b' are both constants.
HConstant* constant =
- inst->AsBinaryOperation()->TryStaticEvaluation(graph_->GetArena());
+ inst->AsBinaryOperation()->TryStaticEvaluation();
+ if (constant != nullptr) {
+ inst->GetBlock()->ReplaceAndRemoveInstructionWith(inst, constant);
+ }
+ } else if (inst->IsUnaryOperation()) {
+ // Constant folding: replace `op(a)' with a constant at compile
+ // time if `a' is a constant.
+ HConstant* constant =
+ inst->AsUnaryOperation()->TryStaticEvaluation();
if (constant != nullptr) {
inst->GetBlock()->ReplaceAndRemoveInstructionWith(inst, constant);
}
diff --git a/compiler/optimizing/constant_folding.h b/compiler/optimizing/constant_folding.h
new file mode 100644
index 0000000..ac00824
--- /dev/null
+++ b/compiler/optimizing/constant_folding.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_CONSTANT_FOLDING_H_
+#define ART_COMPILER_OPTIMIZING_CONSTANT_FOLDING_H_
+
+#include "nodes.h"
+#include "optimization.h"
+
+namespace art {
+
+/**
+ * Optimization pass performing a simple constant-expression
+ * evaluation on the SSA form.
+ *
+ * This class is named art::HConstantFolding to avoid name
+ * clashes with the art::ConstantPropagation class defined in
+ * compiler/dex/post_opt_passes.h.
+ */
+class HConstantFolding : public HOptimization {
+ public:
+ explicit HConstantFolding(HGraph* graph)
+ : HOptimization(graph, true, kConstantFoldingPassName) {}
+
+ void Run() OVERRIDE;
+
+ static constexpr const char* kConstantFoldingPassName = "constant_folding";
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HConstantFolding);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_CONSTANT_FOLDING_H_
diff --git a/compiler/optimizing/constant_propagation_test.cc b/compiler/optimizing/constant_folding_test.cc
similarity index 78%
rename from compiler/optimizing/constant_propagation_test.cc
rename to compiler/optimizing/constant_folding_test.cc
index ff44805..a56b9d9 100644
--- a/compiler/optimizing/constant_propagation_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -16,11 +16,12 @@
#include <functional>
-#include "constant_propagation.h"
+#include "code_generator_x86.h"
+#include "constant_folding.h"
#include "dead_code_elimination.h"
-#include "pretty_printer.h"
#include "graph_checker.h"
#include "optimizing_unit_test.h"
+#include "pretty_printer.h"
#include "gtest/gtest.h"
@@ -28,9 +29,9 @@
static void TestCode(const uint16_t* data,
const std::string& expected_before,
- const std::string& expected_after_cp,
+ const std::string& expected_after_cf,
const std::string& expected_after_dce,
- std::function<void(HGraph*)> check_after_cp,
+ std::function<void(HGraph*)> check_after_cf,
Primitive::Type return_type = Primitive::kPrimInt) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
@@ -45,29 +46,86 @@
std::string actual_before = printer_before.str();
ASSERT_EQ(expected_before, actual_before);
- ConstantPropagation(graph).Run();
+ x86::CodeGeneratorX86 codegen(graph);
+ HConstantFolding(graph).Run();
+ SSAChecker ssa_checker(&allocator, graph);
+ ssa_checker.Run();
+ ASSERT_TRUE(ssa_checker.IsValid());
- StringPrettyPrinter printer_after_cp(graph);
- printer_after_cp.VisitInsertionOrder();
- std::string actual_after_cp = printer_after_cp.str();
- ASSERT_EQ(expected_after_cp, actual_after_cp);
+ StringPrettyPrinter printer_after_cf(graph);
+ printer_after_cf.VisitInsertionOrder();
+ std::string actual_after_cf = printer_after_cf.str();
+ ASSERT_EQ(expected_after_cf, actual_after_cf);
- check_after_cp(graph);
+ check_after_cf(graph);
- DeadCodeElimination(graph).Run();
+ HDeadCodeElimination(graph).Run();
+ ssa_checker.Run();
+ ASSERT_TRUE(ssa_checker.IsValid());
StringPrettyPrinter printer_after_dce(graph);
printer_after_dce.VisitInsertionOrder();
std::string actual_after_dce = printer_after_dce.str();
ASSERT_EQ(expected_after_dce, actual_after_dce);
-
- SSAChecker ssa_checker(&allocator, graph);
- ssa_checker.Run();
- ASSERT_TRUE(ssa_checker.IsValid());
}
/**
+ * Tiny three-register program exercising int constant folding on negation.
+ *
+ * 16-bit
+ * offset
+ * ------
+ * v0 <- 1 0. const/4 v0, #+1
+ * v1 <- -v0 1. neg-int v0, v1
+ * return v1 2. return v1
+ */
+TEST(ConstantFolding, IntConstantFoldingNegation) {
+ const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ Instruction::CONST_4 | 0 << 8 | 1 << 12,
+ Instruction::NEG_INT | 1 << 8 | 0 << 12,
+ Instruction::RETURN | 1 << 8);
+
+ std::string expected_before =
+ "BasicBlock 0, succ: 1\n"
+ " 2: IntConstant [5]\n"
+ " 10: SuspendCheck\n"
+ " 11: Goto 1\n"
+ "BasicBlock 1, pred: 0, succ: 2\n"
+ " 5: Neg(2) [8]\n"
+ " 8: Return(5)\n"
+ "BasicBlock 2, pred: 1\n"
+ " 9: Exit\n";
+
+ // Expected difference after constant folding.
+ diff_t expected_cf_diff = {
+ { " 2: IntConstant [5]\n", " 2: IntConstant\n" },
+ { " 5: Neg(2) [8]\n", " 12: IntConstant [8]\n" },
+ { " 8: Return(5)\n", " 8: Return(12)\n" }
+ };
+ std::string expected_after_cf = Patch(expected_before, expected_cf_diff);
+
+ // Check the value of the computed constant.
+ auto check_after_cf = [](HGraph* graph) {
+ HInstruction* inst = graph->GetBlock(1)->GetFirstInstruction();
+ ASSERT_TRUE(inst->IsIntConstant());
+ ASSERT_EQ(inst->AsIntConstant()->GetValue(), -1);
+ };
+
+ // Expected difference after dead code elimination.
+ diff_t expected_dce_diff = {
+ { " 2: IntConstant\n", removed },
+ };
+ std::string expected_after_dce = Patch(expected_after_cf, expected_dce_diff);
+
+ TestCode(data,
+ expected_before,
+ expected_after_cf,
+ expected_after_dce,
+ check_after_cf);
+}
+
+/**
* Tiny three-register program exercising int constant folding on addition.
*
* 16-bit
@@ -78,7 +136,7 @@
* v2 <- v0 + v1 2. add-int v2, v0, v1
* return v2 4. return v2
*/
-TEST(ConstantPropagation, IntConstantFoldingOnAddition1) {
+TEST(ConstantFolding, IntConstantFoldingOnAddition1) {
const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 << 8 | 1 << 12,
Instruction::CONST_4 | 1 << 8 | 2 << 12,
@@ -97,17 +155,17 @@
"BasicBlock 2, pred: 1\n"
" 13: Exit\n";
- // Expected difference after constant propagation.
- diff_t expected_cp_diff = {
+ // Expected difference after constant folding.
+ diff_t expected_cf_diff = {
{ " 3: IntConstant [9]\n", " 3: IntConstant\n" },
{ " 5: IntConstant [9]\n", " 5: IntConstant\n" },
{ " 9: Add(3, 5) [12]\n", " 16: IntConstant [12]\n" },
{ " 12: Return(9)\n", " 12: Return(16)\n" }
};
- std::string expected_after_cp = Patch(expected_before, expected_cp_diff);
+ std::string expected_after_cf = Patch(expected_before, expected_cf_diff);
// Check the value of the computed constant.
- auto check_after_cp = [](HGraph* graph) {
+ auto check_after_cf = [](HGraph* graph) {
HInstruction* inst = graph->GetBlock(1)->GetFirstInstruction();
ASSERT_TRUE(inst->IsIntConstant());
ASSERT_EQ(inst->AsIntConstant()->GetValue(), 3);
@@ -118,13 +176,13 @@
{ " 3: IntConstant\n", removed },
{ " 5: IntConstant\n", removed }
};
- std::string expected_after_dce = Patch(expected_after_cp, expected_dce_diff);
+ std::string expected_after_dce = Patch(expected_after_cf, expected_dce_diff);
TestCode(data,
expected_before,
- expected_after_cp,
+ expected_after_cf,
expected_after_dce,
- check_after_cp);
+ check_after_cf);
}
/**
@@ -142,7 +200,7 @@
* v2 <- v0 + v1 6. add-int v2, v0, v1
* return v2 8. return v2
*/
-TEST(ConstantPropagation, IntConstantFoldingOnAddition2) {
+TEST(ConstantFolding, IntConstantFoldingOnAddition2) {
const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 << 8 | 1 << 12,
Instruction::CONST_4 | 1 << 8 | 2 << 12,
@@ -169,8 +227,8 @@
"BasicBlock 2, pred: 1\n"
" 25: Exit\n";
- // Expected difference after constant propagation.
- diff_t expected_cp_diff = {
+ // Expected difference after constant folding.
+ diff_t expected_cf_diff = {
{ " 3: IntConstant [9]\n", " 3: IntConstant\n" },
{ " 5: IntConstant [9]\n", " 5: IntConstant\n" },
{ " 11: IntConstant [17]\n", " 11: IntConstant\n" },
@@ -180,10 +238,10 @@
{ " 21: Add(9, 17) [24]\n", " 30: IntConstant [24]\n" },
{ " 24: Return(21)\n", " 24: Return(30)\n" }
};
- std::string expected_after_cp = Patch(expected_before, expected_cp_diff);
+ std::string expected_after_cf = Patch(expected_before, expected_cf_diff);
// Check the values of the computed constants.
- auto check_after_cp = [](HGraph* graph) {
+ auto check_after_cf = [](HGraph* graph) {
HInstruction* inst1 = graph->GetBlock(1)->GetFirstInstruction();
ASSERT_TRUE(inst1->IsIntConstant());
ASSERT_EQ(inst1->AsIntConstant()->GetValue(), 3);
@@ -204,13 +262,13 @@
{ " 28: IntConstant\n", removed },
{ " 29: IntConstant\n", removed }
};
- std::string expected_after_dce = Patch(expected_after_cp, expected_dce_diff);
+ std::string expected_after_dce = Patch(expected_after_cf, expected_dce_diff);
TestCode(data,
expected_before,
- expected_after_cp,
+ expected_after_cf,
expected_after_dce,
- check_after_cp);
+ check_after_cf);
}
/**
@@ -224,7 +282,7 @@
* v2 <- v0 - v1 2. sub-int v2, v0, v1
* return v2 4. return v2
*/
-TEST(ConstantPropagation, IntConstantFoldingOnSubtraction) {
+TEST(ConstantFolding, IntConstantFoldingOnSubtraction) {
const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 << 8 | 3 << 12,
Instruction::CONST_4 | 1 << 8 | 2 << 12,
@@ -243,17 +301,17 @@
"BasicBlock 2, pred: 1\n"
" 13: Exit\n";
- // Expected difference after constant propagation.
- diff_t expected_cp_diff = {
+ // Expected difference after constant folding.
+ diff_t expected_cf_diff = {
{ " 3: IntConstant [9]\n", " 3: IntConstant\n" },
{ " 5: IntConstant [9]\n", " 5: IntConstant\n" },
{ " 9: Sub(3, 5) [12]\n", " 16: IntConstant [12]\n" },
{ " 12: Return(9)\n", " 12: Return(16)\n" }
};
- std::string expected_after_cp = Patch(expected_before, expected_cp_diff);
+ std::string expected_after_cf = Patch(expected_before, expected_cf_diff);
// Check the value of the computed constant.
- auto check_after_cp = [](HGraph* graph) {
+ auto check_after_cf = [](HGraph* graph) {
HInstruction* inst = graph->GetBlock(1)->GetFirstInstruction();
ASSERT_TRUE(inst->IsIntConstant());
ASSERT_EQ(inst->AsIntConstant()->GetValue(), 1);
@@ -264,18 +322,15 @@
{ " 3: IntConstant\n", removed },
{ " 5: IntConstant\n", removed }
};
- std::string expected_after_dce = Patch(expected_after_cp, expected_dce_diff);
+ std::string expected_after_dce = Patch(expected_after_cf, expected_dce_diff);
TestCode(data,
expected_before,
- expected_after_cp,
+ expected_after_cf,
expected_after_dce,
- check_after_cp);
+ check_after_cf);
}
-#define SIX_REGISTERS_CODE_ITEM(...) \
- { 6, 0, 0, 0, 0, 0, NUM_INSTRUCTIONS(__VA_ARGS__), 0, __VA_ARGS__ }
-
/**
* Tiny three-register-pair program exercising long constant folding
* on addition.
@@ -289,7 +344,7 @@
* (v0, v1) + (v1, v2) 4. add-long v4, v0, v2
* return (v4, v5) 6. return-wide v4
*/
-TEST(ConstantPropagation, LongConstantFoldingOnAddition) {
+TEST(ConstantFolding, LongConstantFoldingOnAddition) {
const uint16_t data[] = SIX_REGISTERS_CODE_ITEM(
Instruction::CONST_WIDE_16 | 0 << 8, 1,
Instruction::CONST_WIDE_16 | 2 << 8, 2,
@@ -308,17 +363,17 @@
"BasicBlock 2, pred: 1\n"
" 16: Exit\n";
- // Expected difference after constant propagation.
- diff_t expected_cp_diff = {
+ // Expected difference after constant folding.
+ diff_t expected_cf_diff = {
{ " 6: LongConstant [12]\n", " 6: LongConstant\n" },
{ " 8: LongConstant [12]\n", " 8: LongConstant\n" },
{ " 12: Add(6, 8) [15]\n", " 19: LongConstant [15]\n" },
{ " 15: Return(12)\n", " 15: Return(19)\n" }
};
- std::string expected_after_cp = Patch(expected_before, expected_cp_diff);
+ std::string expected_after_cf = Patch(expected_before, expected_cf_diff);
// Check the value of the computed constant.
- auto check_after_cp = [](HGraph* graph) {
+ auto check_after_cf = [](HGraph* graph) {
HInstruction* inst = graph->GetBlock(1)->GetFirstInstruction();
ASSERT_TRUE(inst->IsLongConstant());
ASSERT_EQ(inst->AsLongConstant()->GetValue(), 3);
@@ -329,13 +384,13 @@
{ " 6: LongConstant\n", removed },
{ " 8: LongConstant\n", removed }
};
- std::string expected_after_dce = Patch(expected_after_cp, expected_dce_diff);
+ std::string expected_after_dce = Patch(expected_after_cf, expected_dce_diff);
TestCode(data,
expected_before,
- expected_after_cp,
+ expected_after_cf,
expected_after_dce,
- check_after_cp,
+ check_after_cf,
Primitive::kPrimLong);
}
@@ -352,7 +407,7 @@
* (v0, v1) - (v1, v2) 4. sub-long v4, v0, v2
* return (v4, v5) 6. return-wide v4
*/
-TEST(ConstantPropagation, LongConstantFoldingOnSubtraction) {
+TEST(ConstantFolding, LongConstantFoldingOnSubtraction) {
const uint16_t data[] = SIX_REGISTERS_CODE_ITEM(
Instruction::CONST_WIDE_16 | 0 << 8, 3,
Instruction::CONST_WIDE_16 | 2 << 8, 2,
@@ -371,17 +426,17 @@
"BasicBlock 2, pred: 1\n"
" 16: Exit\n";
- // Expected difference after constant propagation.
- diff_t expected_cp_diff = {
+ // Expected difference after constant folding.
+ diff_t expected_cf_diff = {
{ " 6: LongConstant [12]\n", " 6: LongConstant\n" },
{ " 8: LongConstant [12]\n", " 8: LongConstant\n" },
{ " 12: Sub(6, 8) [15]\n", " 19: LongConstant [15]\n" },
{ " 15: Return(12)\n", " 15: Return(19)\n" }
};
- std::string expected_after_cp = Patch(expected_before, expected_cp_diff);
+ std::string expected_after_cf = Patch(expected_before, expected_cf_diff);
// Check the value of the computed constant.
- auto check_after_cp = [](HGraph* graph) {
+ auto check_after_cf = [](HGraph* graph) {
HInstruction* inst = graph->GetBlock(1)->GetFirstInstruction();
ASSERT_TRUE(inst->IsLongConstant());
ASSERT_EQ(inst->AsLongConstant()->GetValue(), 1);
@@ -392,13 +447,13 @@
{ " 6: LongConstant\n", removed },
{ " 8: LongConstant\n", removed }
};
- std::string expected_after_dce = Patch(expected_after_cp, expected_dce_diff);
+ std::string expected_after_dce = Patch(expected_after_cf, expected_dce_diff);
TestCode(data,
expected_before,
- expected_after_cp,
+ expected_after_cf,
expected_after_dce,
- check_after_cp,
+ check_after_cf,
Primitive::kPrimLong);
}
@@ -424,7 +479,7 @@
* L3: v2 <- v1 + 4 11. add-int/lit16 v2, v1, #+4
* return v2 13. return v2
*/
-TEST(ConstantPropagation, IntConstantFoldingAndJumps) {
+TEST(ConstantFolding, IntConstantFoldingAndJumps) {
const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 << 8 | 0 << 12,
Instruction::CONST_4 | 1 << 8 | 1 << 12,
@@ -462,8 +517,8 @@
"BasicBlock 5, pred: 4\n"
" 29: Exit\n";
- // Expected difference after constant propagation.
- diff_t expected_cp_diff = {
+ // Expected difference after constant folding.
+ diff_t expected_cf_diff = {
{ " 3: IntConstant [9]\n", " 3: IntConstant\n" },
{ " 5: IntConstant [9]\n", " 5: IntConstant []\n" },
{ " 13: IntConstant [14]\n", " 13: IntConstant\n" },
@@ -475,10 +530,10 @@
{ " 25: Add(14, 24) [28]\n", " 35: IntConstant [28]\n" },
{ " 28: Return(25)\n", " 28: Return(35)\n"}
};
- std::string expected_after_cp = Patch(expected_before, expected_cp_diff);
+ std::string expected_after_cf = Patch(expected_before, expected_cf_diff);
// Check the values of the computed constants.
- auto check_after_cp = [](HGraph* graph) {
+ auto check_after_cf = [](HGraph* graph) {
HInstruction* inst1 = graph->GetBlock(1)->GetFirstInstruction();
ASSERT_TRUE(inst1->IsIntConstant());
ASSERT_EQ(inst1->AsIntConstant()->GetValue(), 1);
@@ -501,13 +556,13 @@
{ " 24: IntConstant\n", removed },
{ " 34: IntConstant\n", removed },
};
- std::string expected_after_dce = Patch(expected_after_cp, expected_dce_diff);
+ std::string expected_after_dce = Patch(expected_after_cf, expected_dce_diff);
TestCode(data,
expected_before,
- expected_after_cp,
+ expected_after_cf,
expected_after_dce,
- check_after_cp);
+ check_after_cf);
}
@@ -524,7 +579,7 @@
* L1: v2 <- v0 + v1 5. add-int v2, v0, v1
* return-void 7. return
*/
-TEST(ConstantPropagation, ConstantCondition) {
+TEST(ConstantFolding, ConstantCondition) {
const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 1 << 8 | 1 << 12,
Instruction::CONST_4 | 0 << 8 | 0 << 12,
@@ -553,17 +608,17 @@
"BasicBlock 5, pred: 1, succ: 3\n"
" 21: Goto 3\n";
- // Expected difference after constant propagation.
- diff_t expected_cp_diff = {
+ // Expected difference after constant folding.
+ diff_t expected_cf_diff = {
{ " 3: IntConstant [15, 22, 8]\n", " 3: IntConstant [15, 22]\n" },
{ " 5: IntConstant [22, 8]\n", " 5: IntConstant [22]\n" },
{ " 8: GreaterThanOrEqual(3, 5) [9]\n", " 23: IntConstant [9]\n" },
{ " 9: If(8)\n", " 9: If(23)\n" }
};
- std::string expected_after_cp = Patch(expected_before, expected_cp_diff);
+ std::string expected_after_cf = Patch(expected_before, expected_cf_diff);
// Check the values of the computed constants.
- auto check_after_cp = [](HGraph* graph) {
+ auto check_after_cf = [](HGraph* graph) {
HInstruction* inst = graph->GetBlock(1)->GetFirstInstruction();
ASSERT_TRUE(inst->IsIntConstant());
ASSERT_EQ(inst->AsIntConstant()->GetValue(), 1);
@@ -575,13 +630,13 @@
{ " 22: Phi(3, 5) [15]\n", " 22: Phi(3, 5)\n" },
{ " 15: Add(22, 3)\n", removed }
};
- std::string expected_after_dce = Patch(expected_after_cp, expected_dce_diff);
+ std::string expected_after_dce = Patch(expected_after_cf, expected_dce_diff);
TestCode(data,
expected_before,
- expected_after_cp,
+ expected_after_cf,
expected_after_dce,
- check_after_cp);
+ check_after_cf);
}
} // namespace art
diff --git a/compiler/optimizing/constant_propagation.h b/compiler/optimizing/constant_propagation.h
deleted file mode 100644
index 0729881..0000000
--- a/compiler/optimizing/constant_propagation.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_OPTIMIZING_CONSTANT_PROPAGATION_H_
-#define ART_COMPILER_OPTIMIZING_CONSTANT_PROPAGATION_H_
-
-#include "nodes.h"
-
-namespace art {
-
-/**
- * Optimization pass performing a simple constant propagation on the
- * SSA form.
- */
-class ConstantPropagation : public ValueObject {
- public:
- explicit ConstantPropagation(HGraph* graph)
- : graph_(graph) {}
-
- void Run();
-
- private:
- HGraph* const graph_;
-
- DISALLOW_COPY_AND_ASSIGN(ConstantPropagation);
-};
-
-} // namespace art
-
-#endif // ART_COMPILER_OPTIMIZING_CONSTANT_PROPAGATION_H_
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index 5655544..fc3dd01 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -20,7 +20,7 @@
namespace art {
-void DeadCodeElimination::Run() {
+void HDeadCodeElimination::Run() {
// Process basic blocks in post-order in the dominator tree, so that
// a dead instruction depending on another dead instruction is
// removed.
diff --git a/compiler/optimizing/dead_code_elimination.h b/compiler/optimizing/dead_code_elimination.h
index 48739be..3db2c3f 100644
--- a/compiler/optimizing/dead_code_elimination.h
+++ b/compiler/optimizing/dead_code_elimination.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_OPTIMIZING_DEAD_CODE_ELIMINATION_H_
#include "nodes.h"
+#include "optimization.h"
namespace art {
@@ -25,17 +26,18 @@
* Optimization pass performing dead code elimination (removal of
* unused variables/instructions) on the SSA form.
*/
-class DeadCodeElimination : public ValueObject {
+class HDeadCodeElimination : public HOptimization {
public:
- explicit DeadCodeElimination(HGraph* graph)
- : graph_(graph) {}
+ explicit HDeadCodeElimination(HGraph* graph)
+ : HOptimization(graph, true, kDeadCodeEliminationPassName) {}
- void Run();
+ void Run() OVERRIDE;
+
+ static constexpr const char* kDeadCodeEliminationPassName =
+ "dead_code_elimination";
private:
- HGraph* const graph_;
-
- DISALLOW_COPY_AND_ASSIGN(DeadCodeElimination);
+ DISALLOW_COPY_AND_ASSIGN(HDeadCodeElimination);
};
} // namespace art
diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc
index 3e0ba3a..5d4b9cb 100644
--- a/compiler/optimizing/dead_code_elimination_test.cc
+++ b/compiler/optimizing/dead_code_elimination_test.cc
@@ -14,10 +14,11 @@
* limitations under the License.
*/
+#include "code_generator_x86.h"
#include "dead_code_elimination.h"
-#include "pretty_printer.h"
#include "graph_checker.h"
#include "optimizing_unit_test.h"
+#include "pretty_printer.h"
#include "gtest/gtest.h"
@@ -39,16 +40,16 @@
std::string actual_before = printer_before.str();
ASSERT_EQ(actual_before, expected_before);
- DeadCodeElimination(graph).Run();
+ x86::CodeGeneratorX86 codegen(graph);
+ HDeadCodeElimination(graph).Run();
+ SSAChecker ssa_checker(&allocator, graph);
+ ssa_checker.Run();
+ ASSERT_TRUE(ssa_checker.IsValid());
StringPrettyPrinter printer_after(graph);
printer_after.VisitInsertionOrder();
std::string actual_after = printer_after.str();
ASSERT_EQ(actual_after, expected_after);
-
- SSAChecker ssa_checker(&allocator, graph);
- ssa_checker.Run();
- ASSERT_TRUE(ssa_checker.IsValid());
}
@@ -94,6 +95,7 @@
"BasicBlock 5, pred: 1, succ: 3\n"
" 21: Goto 3\n";
+ // Expected difference after dead code elimination.
diff_t expected_diff = {
{ " 3: IntConstant [15, 22, 8]\n", " 3: IntConstant [22, 8]\n" },
{ " 22: Phi(3, 5) [15]\n", " 22: Phi(3, 5)\n" },
@@ -164,7 +166,7 @@
"BasicBlock 5, pred: 4\n"
" 28: Exit\n";
- // Expected difference after constant propagation.
+ // Expected difference after dead code elimination.
diff_t expected_diff = {
{ " 13: IntConstant [14]\n", removed },
{ " 24: IntConstant [25]\n", removed },
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index 9f40297..1953241 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -53,7 +53,7 @@
<< " lists " << block_count_in_p_successors
<< " occurrences of block " << block->GetBlockId()
<< " in its successors.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
}
@@ -83,7 +83,7 @@
<< " lists " << block_count_in_s_predecessors
<< " occurrences of block " << block->GetBlockId()
<< " in its predecessors.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
}
@@ -93,7 +93,7 @@
std::stringstream error;
error << "Block " << block->GetBlockId()
<< " does not end with a branch instruction.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
// Visit this block's list of phis.
@@ -103,7 +103,7 @@
std::stringstream error;
error << "Block " << current_block_->GetBlockId()
<< " has a non-phi in its phi list.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
it.Current()->Accept(this);
}
@@ -116,7 +116,7 @@
std::stringstream error;
error << "Block " << current_block_->GetBlockId()
<< " has a phi in its non-phi list.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
it.Current()->Accept(this);
}
@@ -139,7 +139,7 @@
} else {
error << " not associated with any block.";
}
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
// Ensure the inputs of `instruction` are defined in a block of the graph.
@@ -154,7 +154,7 @@
error << "Input " << input->GetId()
<< " of instruction " << instruction->GetId()
<< " is not defined in a basic block of the control-flow graph.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
}
@@ -170,7 +170,7 @@
error << "User " << use->GetId()
<< " of instruction " << instruction->GetId()
<< " is not defined in a basic block of the control-flow graph.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
}
}
@@ -188,7 +188,7 @@
std::stringstream error;
error << "Critical edge between blocks " << block->GetBlockId()
<< " and " << successor->GetBlockId() << ".";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
}
}
@@ -207,7 +207,7 @@
std::stringstream error;
error << "Loop pre-header is not the first predecessor of the loop header "
<< id << ".";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
// Ensure the loop header has only two predecessors and that only the
@@ -215,25 +215,25 @@
if (loop_header->GetPredecessors().Size() < 2) {
std::stringstream error;
error << "Loop header " << id << " has less than two predecessors.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
} else if (loop_header->GetPredecessors().Size() > 2) {
std::stringstream error;
error << "Loop header " << id << " has more than two predecessors.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
} else {
HLoopInformation* loop_information = loop_header->GetLoopInformation();
HBasicBlock* first_predecessor = loop_header->GetPredecessors().Get(0);
if (loop_information->IsBackEdge(first_predecessor)) {
std::stringstream error;
error << "First predecessor of loop header " << id << " is a back edge.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
HBasicBlock* second_predecessor = loop_header->GetPredecessors().Get(1);
if (!loop_information->IsBackEdge(second_predecessor)) {
std::stringstream error;
error << "Second predecessor of loop header " << id
<< " is not a back edge.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
}
@@ -244,7 +244,7 @@
std::stringstream error;
error << "Loop defined by header " << id << " has "
<< num_back_edges << " back edge(s).";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
// Ensure all blocks in the loop are dominated by the loop header.
@@ -256,7 +256,7 @@
std::stringstream error;
error << "Loop block " << loop_block->GetBlockId()
<< " not dominated by loop header " << id;
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
}
}
@@ -268,13 +268,13 @@
for (HUseIterator<HInstruction> use_it(instruction->GetUses());
!use_it.Done(); use_it.Advance()) {
HInstruction* use = use_it.Current()->GetUser();
- if (!use->IsPhi() && !instruction->Dominates(use)) {
+ if (!use->IsPhi() && !instruction->StrictlyDominates(use)) {
std::stringstream error;
error << "Instruction " << instruction->GetId()
<< " in block " << current_block_->GetBlockId()
<< " does not dominate use " << use->GetId()
<< " in block " << use->GetBlock()->GetBlockId() << ".";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
}
@@ -285,14 +285,14 @@
for (size_t i = 0, e = environment->Size(); i < e; ++i) {
HInstruction* env_instruction = environment->GetInstructionAt(i);
if (env_instruction != nullptr
- && !env_instruction->Dominates(instruction)) {
+ && !env_instruction->StrictlyDominates(instruction)) {
std::stringstream error;
error << "Instruction " << env_instruction->GetId()
<< " in environment of instruction " << instruction->GetId()
<< " from block " << current_block_->GetBlockId()
<< " does not dominate instruction " << instruction->GetId()
<< ".";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
}
}
@@ -307,7 +307,7 @@
error << "Loop phi " << phi->GetId()
<< " in block " << phi->GetBlock()->GetBlockId()
<< " is its own first input.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
// Ensure the number of phi inputs is the same as the number of
@@ -321,7 +321,7 @@
<< " has " << phi->InputCount() << " inputs, but block "
<< phi->GetBlock()->GetBlockId() << " has "
<< predecessors.Size() << " predecessors.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
} else {
// Ensure phi input at index I either comes from the Ith
// predecessor or from a block that dominates this predecessor.
@@ -336,7 +336,7 @@
<< " from block " << phi->GetBlock()->GetBlockId()
<< " is not defined in predecessor number " << i
<< " nor in a block dominating it.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
}
}
diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h
index 862f1b6..8ba8cb1 100644
--- a/compiler/optimizing/graph_checker.h
+++ b/compiler/optimizing/graph_checker.h
@@ -19,15 +19,18 @@
#include "nodes.h"
+#include <ostream>
+
namespace art {
// A control-flow graph visitor performing various checks.
class GraphChecker : public HGraphVisitor {
public:
- GraphChecker(ArenaAllocator* allocator, HGraph* graph)
+ GraphChecker(ArenaAllocator* allocator, HGraph* graph,
+ const char* dump_prefix = "art::GraphChecker: ")
: HGraphVisitor(graph),
allocator_(allocator),
- errors_(allocator, 0) {}
+ dump_prefix_(dump_prefix) {}
// Check the whole graph (in insertion order).
virtual void Run() { VisitInsertionOrder(); }
@@ -40,22 +43,32 @@
// Was the last visit of the graph valid?
bool IsValid() const {
- return errors_.IsEmpty();
+ return errors_.empty();
}
// Get the list of detected errors.
- const GrowableArray<std::string>& GetErrors() const {
+ const std::vector<std::string>& GetErrors() const {
return errors_;
}
+ // Print detected errors on output stream `os`.
+ void Dump(std::ostream& os) const {
+ for (size_t i = 0, e = errors_.size(); i < e; ++i) {
+ os << dump_prefix_ << errors_[i] << std::endl;
+ }
+ }
+
protected:
ArenaAllocator* const allocator_;
// The block currently visited.
HBasicBlock* current_block_ = nullptr;
// Errors encountered while checking the graph.
- GrowableArray<std::string> errors_;
+ std::vector<std::string> errors_;
private:
+ // String displayed before dumped errors.
+ const char* const dump_prefix_;
+
DISALLOW_COPY_AND_ASSIGN(GraphChecker);
};
@@ -66,7 +79,7 @@
typedef GraphChecker super_type;
SSAChecker(ArenaAllocator* allocator, HGraph* graph)
- : GraphChecker(allocator, graph) {}
+ : GraphChecker(allocator, graph, "art::SSAChecker: ") {}
// Check the whole graph (in reverse post-order).
virtual void Run() {
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index b4eb89d..4ed2156 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -120,13 +120,11 @@
output_<< std::endl;
}
- void DumpLocation(Location location, Primitive::Type type) {
+ void DumpLocation(Location location) {
if (location.IsRegister()) {
- if (type == Primitive::kPrimDouble || type == Primitive::kPrimFloat) {
- codegen_.DumpFloatingPointRegister(output_, location.reg());
- } else {
- codegen_.DumpCoreRegister(output_, location.reg());
- }
+ codegen_.DumpCoreRegister(output_, location.reg());
+ } else if (location.IsFpuRegister()) {
+ codegen_.DumpFloatingPointRegister(output_, location.reg());
} else if (location.IsConstant()) {
output_ << "constant";
HConstant* constant = location.GetConstant();
@@ -150,9 +148,9 @@
output_ << " (";
for (size_t i = 0, e = instruction->NumMoves(); i < e; ++i) {
MoveOperands* move = instruction->MoveOperandsAt(i);
- DumpLocation(move->GetSource(), Primitive::kPrimInt);
+ DumpLocation(move->GetSource());
output_ << " -> ";
- DumpLocation(move->GetDestination(), Primitive::kPrimInt);
+ DumpLocation(move->GetDestination());
if (i + 1 != e) {
output_ << ", ";
}
@@ -183,13 +181,13 @@
if (locations != nullptr) {
output_ << " ( ";
for (size_t i = 0; i < instruction->InputCount(); ++i) {
- DumpLocation(locations->InAt(i), instruction->InputAt(i)->GetType());
+ DumpLocation(locations->InAt(i));
output_ << " ";
}
output_ << ")";
if (locations->Out().IsValid()) {
output_ << " -> ";
- DumpLocation(locations->Out(), instruction->GetType());
+ DumpLocation(locations->Out());
}
}
output_ << " (liveness: " << instruction->GetLifetimePosition() << ")";
@@ -309,7 +307,7 @@
printer.EndTag("compilation");
}
-void HGraphVisualizer::DumpGraph(const char* pass_name) {
+void HGraphVisualizer::DumpGraph(const char* pass_name) const {
if (!is_enabled_) {
return;
}
diff --git a/compiler/optimizing/graph_visualizer.h b/compiler/optimizing/graph_visualizer.h
index f17ba3b..4d8bec2 100644
--- a/compiler/optimizing/graph_visualizer.h
+++ b/compiler/optimizing/graph_visualizer.h
@@ -17,6 +17,8 @@
#ifndef ART_COMPILER_OPTIMIZING_GRAPH_VISUALIZER_H_
#define ART_COMPILER_OPTIMIZING_GRAPH_VISUALIZER_H_
+#include <ostream>
+
#include "base/value_object.h"
namespace art {
@@ -61,7 +63,7 @@
* If this visualizer is enabled, emit the compilation information
* in `output_`.
*/
- void DumpGraph(const char* pass_name);
+ void DumpGraph(const char* pass_name) const;
private:
std::ostream* const output_;
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index 027b3d4..25168b5 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -54,8 +54,9 @@
SideEffects effects = SideEffects::None();
// Update `effects` with the side effects of all instructions in this block.
- for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* instruction = it.Current();
+ for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done();
+ inst_it.Advance()) {
+ HInstruction* instruction = inst_it.Current();
effects = effects.Union(instruction->GetSideEffects());
if (effects.HasAllSideEffects()) {
break;
diff --git a/compiler/optimizing/gvn.h b/compiler/optimizing/gvn.h
index a98d714..a841d5f 100644
--- a/compiler/optimizing/gvn.h
+++ b/compiler/optimizing/gvn.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_OPTIMIZING_GVN_H_
#include "nodes.h"
+#include "optimization.h"
namespace art {
@@ -25,7 +26,7 @@
* A node in the collision list of a ValueSet. Encodes the instruction,
* the hash code, and the next node in the collision list.
*/
-class ValueSetNode : public ArenaObject {
+class ValueSetNode : public ArenaObject<kArenaAllocMisc> {
public:
ValueSetNode(HInstruction* instruction, size_t hash_code, ValueSetNode* next)
: instruction_(instruction), hash_code_(hash_code), next_(next) {}
@@ -52,7 +53,7 @@
* if there is one in the set. In GVN, we would say those instructions have the
* same "number".
*/
-class ValueSet : public ArenaObject {
+class ValueSet : public ArenaObject<kArenaAllocMisc> {
public:
explicit ValueSet(ArenaAllocator* allocator)
: allocator_(allocator), number_of_entries_(0), collisions_(nullptr) {
@@ -165,11 +166,11 @@
/**
* Optimization phase that removes redundant instruction.
*/
-class GlobalValueNumberer : public ValueObject {
+class GlobalValueNumberer : public HOptimization {
public:
GlobalValueNumberer(ArenaAllocator* allocator, HGraph* graph)
- : allocator_(allocator),
- graph_(graph),
+ : HOptimization(graph, true, "GVN"),
+ allocator_(allocator),
block_effects_(allocator, graph->GetBlocks().Size()),
loop_effects_(allocator, graph->GetBlocks().Size()),
sets_(allocator, graph->GetBlocks().Size()),
@@ -186,7 +187,7 @@
}
}
- void Run();
+ void Run() OVERRIDE;
private:
// Per-block GVN. Will also update the ValueSet of the dominated and
@@ -202,7 +203,6 @@
SideEffects GetBlockEffects(HBasicBlock* block) const;
ArenaAllocator* const allocator_;
- HGraph* const graph_;
// Side effects of individual blocks, that is the union of the side effects
// of the instructions in the block.
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 2d9e35c..3d65e9a 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -18,11 +18,22 @@
namespace art {
+class InstructionSimplifierVisitor : public HGraphVisitor {
+ public:
+ explicit InstructionSimplifierVisitor(HGraph* graph) : HGraphVisitor(graph) {}
+
+ private:
+ void VisitSuspendCheck(HSuspendCheck* check) OVERRIDE;
+ void VisitEqual(HEqual* equal) OVERRIDE;
+ void VisitArraySet(HArraySet* equal) OVERRIDE;
+};
+
void InstructionSimplifier::Run() {
- VisitInsertionOrder();
+ InstructionSimplifierVisitor visitor(graph_);
+ visitor.VisitInsertionOrder();
}
-void InstructionSimplifier::VisitSuspendCheck(HSuspendCheck* check) {
+void InstructionSimplifierVisitor::VisitSuspendCheck(HSuspendCheck* check) {
HBasicBlock* block = check->GetBlock();
// Currently always keep the suspend check at entry.
if (block->IsEntryBlock()) return;
@@ -38,7 +49,7 @@
block->RemoveInstruction(check);
}
-void InstructionSimplifier::VisitEqual(HEqual* equal) {
+void InstructionSimplifierVisitor::VisitEqual(HEqual* equal) {
HInstruction* input1 = equal->InputAt(0);
HInstruction* input2 = equal->InputAt(1);
if (input1->GetType() == Primitive::kPrimBoolean && input2->IsIntConstant()) {
@@ -50,7 +61,19 @@
// Replace (bool_value == 0) with !bool_value
DCHECK_EQ(input2->AsIntConstant()->GetValue(), 0);
equal->GetBlock()->ReplaceAndRemoveInstructionWith(
- equal, new (GetGraph()->GetArena()) HNot(input1));
+ equal, new (GetGraph()->GetArena()) HNot(Primitive::kPrimBoolean, input1));
+ }
+ }
+}
+
+void InstructionSimplifierVisitor::VisitArraySet(HArraySet* instruction) {
+ HInstruction* value = instruction->GetValue();
+ if (value->GetType() != Primitive::kPrimNot) return;
+
+ if (value->IsArrayGet()) {
+ if (value->AsArrayGet()->GetArray() == instruction->GetArray()) {
+ // If the code is just swapping elements in the array, no need for a type check.
+ instruction->ClearNeedsTypeCheck();
}
}
}
diff --git a/compiler/optimizing/instruction_simplifier.h b/compiler/optimizing/instruction_simplifier.h
index d74b624..7068c7f 100644
--- a/compiler/optimizing/instruction_simplifier.h
+++ b/compiler/optimizing/instruction_simplifier.h
@@ -18,21 +18,19 @@
#define ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_H_
#include "nodes.h"
+#include "optimization.h"
namespace art {
/**
* Implements optimizations specific to each instruction.
*/
-class InstructionSimplifier : public HGraphVisitor {
+class InstructionSimplifier : public HOptimization {
public:
- explicit InstructionSimplifier(HGraph* graph) : HGraphVisitor(graph) {}
+ explicit InstructionSimplifier(HGraph* graph)
+ : HOptimization(graph, true, "instruction_simplifier") {}
- void Run();
-
- private:
- virtual void VisitSuspendCheck(HSuspendCheck* check) OVERRIDE;
- virtual void VisitEqual(HEqual* equal) OVERRIDE;
+ void Run() OVERRIDE;
};
} // namespace art
diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc
index d5f4f90..89c9495 100644
--- a/compiler/optimizing/live_ranges_test.cc
+++ b/compiler/optimizing/live_ranges_test.cc
@@ -73,7 +73,7 @@
LiveRange* range = interval->GetFirstRange();
ASSERT_EQ(2u, range->GetStart());
// Last use is the return instruction.
- ASSERT_EQ(9u, range->GetEnd());
+ ASSERT_EQ(8u, range->GetEnd());
HBasicBlock* block = graph->GetBlocks().Get(1);
ASSERT_TRUE(block->GetLastInstruction()->IsReturn());
ASSERT_EQ(8u, block->GetLastInstruction()->GetLifetimePosition());
@@ -119,7 +119,7 @@
LiveRange* range = interval->GetFirstRange();
ASSERT_EQ(2u, range->GetStart());
// Last use is the return instruction.
- ASSERT_EQ(23u, range->GetEnd());
+ ASSERT_EQ(22u, range->GetEnd());
HBasicBlock* block = graph->GetBlocks().Get(3);
ASSERT_TRUE(block->GetLastInstruction()->IsReturn());
ASSERT_EQ(22u, block->GetLastInstruction()->GetLifetimePosition());
@@ -193,7 +193,7 @@
range = interval->GetFirstRange();
ASSERT_EQ(22u, liveness.GetInstructionFromSsaIndex(2)->GetLifetimePosition());
ASSERT_EQ(22u, range->GetStart());
- ASSERT_EQ(25u, range->GetEnd());
+ ASSERT_EQ(24u, range->GetEnd());
ASSERT_TRUE(range->GetNext() == nullptr);
}
@@ -263,7 +263,7 @@
range = interval->GetFirstRange();
// The instruction is live until the return instruction after the loop.
ASSERT_EQ(6u, range->GetStart());
- ASSERT_EQ(27u, range->GetEnd());
+ ASSERT_EQ(26u, range->GetEnd());
ASSERT_TRUE(range->GetNext() == nullptr);
// Test for the phi.
@@ -271,7 +271,7 @@
range = interval->GetFirstRange();
// Instruction is consumed by the if.
ASSERT_EQ(14u, range->GetStart());
- ASSERT_EQ(16u, range->GetEnd());
+ ASSERT_EQ(17u, range->GetEnd());
ASSERT_TRUE(range->GetNext() == nullptr);
}
@@ -338,7 +338,7 @@
range = range->GetNext();
ASSERT_TRUE(range != nullptr);
ASSERT_EQ(24u, range->GetStart());
- ASSERT_EQ(27u, range->GetEnd());
+ ASSERT_EQ(26u, range->GetEnd());
// Test for the add instruction.
HAdd* add = liveness.GetInstructionFromSsaIndex(2)->AsAdd();
@@ -410,7 +410,7 @@
interval = liveness.GetInstructionFromSsaIndex(1)->GetLiveInterval();
range = interval->GetFirstRange();
ASSERT_EQ(4u, range->GetStart());
- ASSERT_EQ(29u, range->GetEnd());
+ ASSERT_EQ(28u, range->GetEnd());
ASSERT_TRUE(range->GetNext() == nullptr);
// Test for the first add.
diff --git a/compiler/optimizing/locations.cc b/compiler/optimizing/locations.cc
index 1637484..ed5e260 100644
--- a/compiler/optimizing/locations.cc
+++ b/compiler/optimizing/locations.cc
@@ -25,16 +25,14 @@
temps_(instruction->GetBlock()->GetGraph()->GetArena(), 0),
environment_(instruction->GetBlock()->GetGraph()->GetArena(),
instruction->EnvironmentSize()),
- dies_at_entry_(instruction->GetBlock()->GetGraph()->GetArena(), instruction->InputCount()),
+ output_overlaps_(true),
call_kind_(call_kind),
stack_mask_(nullptr),
register_mask_(0),
live_registers_() {
inputs_.SetSize(instruction->InputCount());
- dies_at_entry_.SetSize(instruction->InputCount());
for (size_t i = 0; i < instruction->InputCount(); ++i) {
inputs_.Put(i, Location());
- dies_at_entry_.Put(i, false);
}
environment_.SetSize(instruction->EnvironmentSize());
for (size_t i = 0; i < instruction->EnvironmentSize(); ++i) {
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index dcf70f2..e1c8e8e 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -27,6 +27,9 @@
class HConstant;
class HInstruction;
+class Location;
+
+std::ostream& operator<<(std::ostream& os, const Location& location);
/**
* A Location is an abstraction over the potential location
@@ -34,7 +37,7 @@
*/
class Location : public ValueObject {
public:
- static constexpr bool kDiesAtEntry = true;
+ static constexpr bool kNoOutputOverlap = false;
enum Kind {
kInvalid = 0,
@@ -47,37 +50,40 @@
// We do not use the value 5 because it conflicts with kLocationConstantMask.
kDoNotUse5 = 5,
- kFpuRegister = 6, // Floating point processor.
+ kFpuRegister = 6, // Float register.
- kRegisterPair = 7,
+ kRegisterPair = 7, // Long register.
+
+ kFpuRegisterPair = 8, // Double register.
+
+ // We do not use the value 9 because it conflicts with kLocationConstantMask.
+ kDoNotUse9 = 9,
// On 32bits architectures, quick can pass a long where the
// low bits are in the last parameter register, and the high
// bits are in a stack slot. The kQuickParameter kind is for
// handling this special case.
- kQuickParameter = 8,
-
- // We do not use the value 9 because it conflicts with kLocationConstantMask.
- kDoNotUse9 = 9,
+ kQuickParameter = 10,
// Unallocated location represents a location that is not fixed and can be
// allocated by a register allocator. Each unallocated location has
// a policy that specifies what kind of location is suitable. Payload
// contains register allocation policy.
- kUnallocated = 10,
+ kUnallocated = 11,
};
Location() : value_(kInvalid) {
// Verify that non-constant location kinds do not interfere with kConstant.
- COMPILE_ASSERT((kInvalid & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kUnallocated & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kStackSlot & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kDoubleStackSlot & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kRegister & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kQuickParameter & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kFpuRegister & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kRegisterPair & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kConstant & kLocationConstantMask) == kConstant, TagError);
+ static_assert((kInvalid & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kUnallocated & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kStackSlot & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kDoubleStackSlot & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kRegister & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kQuickParameter & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kFpuRegister & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kRegisterPair & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kFpuRegisterPair & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kConstant & kLocationConstantMask) == kConstant, "TagError");
DCHECK(!IsValid());
}
@@ -129,6 +135,10 @@
return Location(kRegisterPair, low << 16 | high);
}
+ static Location FpuRegisterPairLocation(int low, int high) {
+ return Location(kFpuRegisterPair, low << 16 | high);
+ }
+
bool IsRegister() const {
return GetKind() == kRegister;
}
@@ -141,6 +151,10 @@
return GetKind() == kRegisterPair;
}
+ bool IsFpuRegisterPair() const {
+ return GetKind() == kFpuRegisterPair;
+ }
+
int reg() const {
DCHECK(IsRegister() || IsFpuRegister());
return GetPayload();
@@ -163,6 +177,18 @@
return static_cast<T>(GetPayload() & 0xFFFF);
}
+ template <typename T>
+ T AsFpuRegisterPairLow() const {
+ DCHECK(IsFpuRegisterPair());
+ return static_cast<T>(GetPayload() >> 16);
+ }
+
+ template <typename T>
+ T AsFpuRegisterPairHigh() const {
+ DCHECK(IsFpuRegisterPair());
+ return static_cast<T>(GetPayload() & 0xFFFF);
+ }
+
static uintptr_t EncodeStackIndex(intptr_t stack_index) {
DCHECK(-kStackIndexBias <= stack_index);
DCHECK(stack_index < kStackIndexBias);
@@ -205,13 +231,18 @@
return GetPayload() - kStackIndexBias + word_size;
}
- static Location QuickParameter(uint32_t parameter_index) {
- return Location(kQuickParameter, parameter_index);
+ static Location QuickParameter(uint16_t register_index, uint16_t stack_index) {
+ return Location(kQuickParameter, register_index << 16 | stack_index);
}
- uint32_t GetQuickParameterIndex() const {
+ uint32_t GetQuickParameterRegisterIndex() const {
DCHECK(IsQuickParameter());
- return GetPayload();
+ return GetPayload() >> 16;
+ }
+
+ uint32_t GetQuickParameterStackIndex() const {
+ DCHECK(IsQuickParameter());
+ return GetPayload() & 0xFFFF;
}
bool IsQuickParameter() const {
@@ -237,6 +268,7 @@
case kConstant: return "C";
case kFpuRegister: return "F";
case kRegisterPair: return "RP";
+ case kFpuRegisterPair: return "FP";
case kDoNotUse5: // fall-through
case kDoNotUse9:
LOG(FATAL) << "Should not use this location kind";
@@ -322,6 +354,8 @@
// way that none of them can be interpreted as a kConstant tag.
uintptr_t value_;
};
+std::ostream& operator<<(std::ostream& os, const Location::Kind& rhs);
+std::ostream& operator<<(std::ostream& os, const Location::Policy& rhs);
class RegisterSet : public ValueObject {
public:
@@ -336,6 +370,15 @@
}
}
+ void Remove(Location loc) {
+ if (loc.IsRegister()) {
+ core_registers_ &= ~(1 << loc.reg());
+ } else {
+ DCHECK(loc.IsFpuRegister()) << loc;
+ floating_point_registers_ &= ~(1 << loc.reg());
+ }
+ }
+
bool ContainsCoreRegister(uint32_t id) {
return Contains(core_registers_, id);
}
@@ -348,6 +391,10 @@
return (register_set & (1 << reg)) != 0;
}
+ size_t GetNumberOfRegisters() const {
+ return __builtin_popcount(core_registers_) + __builtin_popcount(floating_point_registers_);
+ }
+
private:
uint32_t core_registers_;
uint32_t floating_point_registers_;
@@ -363,7 +410,7 @@
* The intent is to have the code for generating the instruction independent of
* register allocation. A register allocator just has to provide a LocationSummary.
*/
-class LocationSummary : public ArenaObject {
+class LocationSummary : public ArenaObject<kArenaAllocMisc> {
public:
enum CallKind {
kNoCall,
@@ -373,8 +420,8 @@
LocationSummary(HInstruction* instruction, CallKind call_kind = kNoCall);
- void SetInAt(uint32_t at, Location location, bool dies_at_entry = false) {
- dies_at_entry_.Put(at, dies_at_entry);
+ void SetInAt(uint32_t at, Location location) {
+ DCHECK(inputs_.Get(at).IsUnallocated() || inputs_.Get(at).IsInvalid());
inputs_.Put(at, location);
}
@@ -386,8 +433,18 @@
return inputs_.Size();
}
- void SetOut(Location location) {
- output_ = Location(location);
+ void SetOut(Location location, bool overlaps = true) {
+ DCHECK(output_.IsUnallocated() || output_.IsInvalid());
+ output_overlaps_ = overlaps;
+ output_ = location;
+ }
+
+ void UpdateOut(Location location) {
+ // The only reason for updating an output is for parameters where
+ // we only know the exact stack slot after doing full register
+ // allocation.
+ DCHECK(output_.IsStackSlot() || output_.IsDoubleStackSlot());
+ output_ = location;
}
void AddTemp(Location location) {
@@ -399,6 +456,7 @@
}
void SetTempAt(uint32_t at, Location location) {
+ DCHECK(temps_.Get(at).IsUnallocated() || temps_.Get(at).IsInvalid());
temps_.Put(at, location);
}
@@ -449,23 +507,34 @@
return &live_registers_;
}
- bool InputOverlapsWithOutputOrTemp(uint32_t input, bool is_environment) const {
+ size_t GetNumberOfLiveRegisters() const {
+ return live_registers_.GetNumberOfRegisters();
+ }
+
+ bool InputOverlapsWithOutputOrTemp(uint32_t input_index, bool is_environment) const {
if (is_environment) return true;
- Location location = Out();
- if (input == 0 && location.IsUnallocated() && location.GetPolicy() == Location::kSameAsFirstInput) {
+ if ((input_index == 0)
+ && output_.IsUnallocated()
+ && (output_.GetPolicy() == Location::kSameAsFirstInput)) {
return false;
}
- if (dies_at_entry_.Get(input)) {
+ if (inputs_.Get(input_index).IsRegister() || inputs_.Get(input_index).IsFpuRegister()) {
return false;
}
return true;
}
+ bool OutputOverlapsWithInputs() const {
+ return output_overlaps_;
+ }
+
private:
GrowableArray<Location> inputs_;
GrowableArray<Location> temps_;
GrowableArray<Location> environment_;
- GrowableArray<bool> dies_at_entry_;
+ // Whether the output overlaps with any of the inputs. If it overlaps, then it cannot
+ // share the same register as the inputs.
+ bool output_overlaps_;
Location output_;
const CallKind call_kind_;
@@ -478,11 +547,11 @@
// Registers that are in use at this position.
RegisterSet live_registers_;
+ ART_FRIEND_TEST(RegisterAllocatorTest, ExpectedInRegisterHint);
+ ART_FRIEND_TEST(RegisterAllocatorTest, SameAsFirstInputHint);
DISALLOW_COPY_AND_ASSIGN(LocationSummary);
};
-std::ostream& operator<<(std::ostream& os, const Location& location);
-
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_LOCATIONS_H_
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index aee2177..8cb2ef6 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -363,6 +363,25 @@
Add(&phis_, this, phi);
}
+void HBasicBlock::InsertPhiAfter(HPhi* phi, HPhi* cursor) {
+ DCHECK_EQ(phi->GetId(), -1);
+ DCHECK_NE(cursor->GetId(), -1);
+ DCHECK_EQ(cursor->GetBlock(), this);
+ if (cursor->next_ == nullptr) {
+ cursor->next_ = phi;
+ phi->previous_ = cursor;
+ DCHECK(phi->next_ == nullptr);
+ } else {
+ phi->next_ = cursor->next_;
+ phi->previous_ = cursor;
+ cursor->next_ = phi;
+ phi->next_->previous_ = phi;
+ }
+ phi->SetBlock(this);
+ phi->SetId(GetGraph()->GetNextInstructionId());
+ UpdateInputsUsers(phi);
+}
+
static void Remove(HInstructionList* instruction_list,
HBasicBlock* block,
HInstruction* instruction) {
@@ -472,7 +491,11 @@
return true;
}
-bool HInstruction::Dominates(HInstruction* other_instruction) const {
+bool HInstruction::StrictlyDominates(HInstruction* other_instruction) const {
+ if (other_instruction == this) {
+ // An instruction does not strictly dominate itself.
+ return false;
+ }
HBasicBlock* block = GetBlock();
HBasicBlock* other_block = other_instruction->GetBlock();
if (block != other_block) {
@@ -527,6 +550,12 @@
env_uses_ = nullptr;
}
+void HInstruction::ReplaceInput(HInstruction* replacement, size_t index) {
+ InputAt(index)->RemoveUser(this, index);
+ SetRawInputAt(index, replacement);
+ replacement->AddUseAt(this, index);
+}
+
size_t HInstruction::EnvironmentSize() const {
return HasEnvironment() ? environment_->Size() : 0;
}
@@ -568,15 +597,30 @@
}
}
-HConstant* HBinaryOperation::TryStaticEvaluation(ArenaAllocator* allocator) const {
+HConstant* HUnaryOperation::TryStaticEvaluation() const {
+ if (GetInput()->IsIntConstant()) {
+ int32_t value = Evaluate(GetInput()->AsIntConstant()->GetValue());
+ return new(GetBlock()->GetGraph()->GetArena()) HIntConstant(value);
+ } else if (GetInput()->IsLongConstant()) {
+ // TODO: Implement static evaluation of long unary operations.
+ //
+ // Do not exit with a fatal condition here. Instead, simply
+ // return `nullptr' to notify the caller that this instruction
+ // cannot (yet) be statically evaluated.
+ return nullptr;
+ }
+ return nullptr;
+}
+
+HConstant* HBinaryOperation::TryStaticEvaluation() const {
if (GetLeft()->IsIntConstant() && GetRight()->IsIntConstant()) {
int32_t value = Evaluate(GetLeft()->AsIntConstant()->GetValue(),
GetRight()->AsIntConstant()->GetValue());
- return new(allocator) HIntConstant(value);
+ return new(GetBlock()->GetGraph()->GetArena()) HIntConstant(value);
} else if (GetLeft()->IsLongConstant() && GetRight()->IsLongConstant()) {
int64_t value = Evaluate(GetLeft()->AsLongConstant()->GetValue(),
GetRight()->AsLongConstant()->GetValue());
- return new(allocator) HLongConstant(value);
+ return new(GetBlock()->GetGraph()->GetArena()) HLongConstant(value);
}
return nullptr;
}
@@ -603,4 +647,16 @@
return true;
}
+std::ostream& operator<<(std::ostream& os, const HInstruction::InstructionKind& rhs) {
+#define DECLARE_CASE(type, super) case HInstruction::k##type: os << #type; break;
+ switch (rhs) {
+ FOR_EACH_INSTRUCTION(DECLARE_CASE)
+ default:
+ os << "Unknown instruction kind " << static_cast<int>(rhs);
+ break;
+ }
+#undef DECLARE_CASE
+ return os;
+}
+
} // namespace art
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index ec26c4a..7d52d7d 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -79,16 +79,18 @@
};
// Control-flow graph of a method. Contains a list of basic blocks.
-class HGraph : public ArenaObject {
+class HGraph : public ArenaObject<kArenaAllocMisc> {
public:
explicit HGraph(ArenaAllocator* arena)
: arena_(arena),
blocks_(arena, kDefaultNumberOfBlocks),
reverse_post_order_(arena, kDefaultNumberOfBlocks),
+ entry_block_(nullptr),
+ exit_block_(nullptr),
maximum_number_of_out_vregs_(0),
number_of_vregs_(0),
number_of_in_vregs_(0),
- number_of_temporaries_(0),
+ temporaries_vreg_slots_(0),
current_instruction_id_(0) {}
ArenaAllocator* GetArena() const { return arena_; }
@@ -127,12 +129,12 @@
maximum_number_of_out_vregs_ = std::max(new_value, maximum_number_of_out_vregs_);
}
- void UpdateNumberOfTemporaries(size_t count) {
- number_of_temporaries_ = std::max(count, number_of_temporaries_);
+ void UpdateTemporariesVRegSlots(size_t slots) {
+ temporaries_vreg_slots_ = std::max(slots, temporaries_vreg_slots_);
}
- size_t GetNumberOfTemporaries() const {
- return number_of_temporaries_;
+ size_t GetTemporariesVRegSlots() const {
+ return temporaries_vreg_slots_;
}
void SetNumberOfVRegs(uint16_t number_of_vregs) {
@@ -190,8 +192,8 @@
// The number of virtual registers used by parameters of this method.
uint16_t number_of_in_vregs_;
- // The number of temporaries that will be needed for the baseline compiler.
- size_t number_of_temporaries_;
+ // Number of vreg size slots that the temporaries use (used in baseline compiler).
+ size_t temporaries_vreg_slots_;
// The current id to assign to a newly added instruction. See HInstruction.id_.
int current_instruction_id_;
@@ -199,7 +201,7 @@
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
-class HLoopInformation : public ArenaObject {
+class HLoopInformation : public ArenaObject<kArenaAllocMisc> {
public:
HLoopInformation(HBasicBlock* header, HGraph* graph)
: header_(header),
@@ -278,7 +280,7 @@
// as a double linked list. Each block knows its predecessors and
// successors.
-class HBasicBlock : public ArenaObject {
+class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
public:
explicit HBasicBlock(HGraph* graph, uint32_t dex_pc = kNoDexPc)
: graph_(graph),
@@ -290,7 +292,8 @@
block_id_(-1),
dex_pc_(dex_pc),
lifetime_start_(kNoLifetime),
- lifetime_end_(kNoLifetime) {}
+ lifetime_end_(kNoLifetime),
+ is_catch_block_(false) {}
const GrowableArray<HBasicBlock*>& GetPredecessors() const {
return predecessors_;
@@ -399,6 +402,7 @@
void ReplaceAndRemoveInstructionWith(HInstruction* initial,
HInstruction* replacement);
void AddPhi(HPhi* phi);
+ void InsertPhiAfter(HPhi* instruction, HPhi* cursor);
void RemovePhi(HPhi* phi);
bool IsLoopHeader() const {
@@ -447,6 +451,9 @@
uint32_t GetDexPc() const { return dex_pc_; }
+ bool IsCatchBlock() const { return is_catch_block_; }
+ void SetIsCatchBlock() { is_catch_block_ = true; }
+
private:
HGraph* const graph_;
GrowableArray<HBasicBlock*> predecessors_;
@@ -461,52 +468,76 @@
const uint32_t dex_pc_;
size_t lifetime_start_;
size_t lifetime_end_;
+ bool is_catch_block_;
DISALLOW_COPY_AND_ASSIGN(HBasicBlock);
};
#define FOR_EACH_CONCRETE_INSTRUCTION(M) \
M(Add, BinaryOperation) \
+ M(And, BinaryOperation) \
+ M(ArrayGet, Instruction) \
+ M(ArrayLength, Instruction) \
+ M(ArraySet, Instruction) \
+ M(BoundsCheck, Instruction) \
+ M(CheckCast, Instruction) \
+ M(ClinitCheck, Instruction) \
+ M(Compare, BinaryOperation) \
M(Condition, BinaryOperation) \
+ M(Div, BinaryOperation) \
+ M(DivZeroCheck, Instruction) \
+ M(DoubleConstant, Constant) \
M(Equal, Condition) \
- M(NotEqual, Condition) \
- M(LessThan, Condition) \
- M(LessThanOrEqual, Condition) \
+ M(Exit, Instruction) \
+ M(FloatConstant, Constant) \
+ M(Goto, Instruction) \
M(GreaterThan, Condition) \
M(GreaterThanOrEqual, Condition) \
- M(Exit, Instruction) \
- M(Goto, Instruction) \
M(If, Instruction) \
- M(IntConstant, Constant) \
- M(InvokeStatic, Invoke) \
- M(InvokeVirtual, Invoke) \
- M(LoadLocal, Instruction) \
- M(Local, Instruction) \
- M(LongConstant, Constant) \
- M(NewInstance, Instruction) \
- M(Not, Instruction) \
- M(ParameterValue, Instruction) \
- M(ParallelMove, Instruction) \
- M(Phi, Instruction) \
- M(Return, Instruction) \
- M(ReturnVoid, Instruction) \
- M(StoreLocal, Instruction) \
- M(Sub, BinaryOperation) \
- M(Compare, BinaryOperation) \
M(InstanceFieldGet, Instruction) \
M(InstanceFieldSet, Instruction) \
- M(ArrayGet, Instruction) \
- M(ArraySet, Instruction) \
- M(ArrayLength, Instruction) \
- M(BoundsCheck, Instruction) \
- M(NullCheck, Instruction) \
- M(Temporary, Instruction) \
- M(SuspendCheck, Instruction) \
+ M(InstanceOf, Instruction) \
+ M(IntConstant, Constant) \
+ M(InvokeInterface, Invoke) \
+ M(InvokeStatic, Invoke) \
+ M(InvokeVirtual, Invoke) \
+ M(LessThan, Condition) \
+ M(LessThanOrEqual, Condition) \
+ M(LoadClass, Instruction) \
+ M(LoadException, Instruction) \
+ M(LoadLocal, Instruction) \
+ M(LoadString, Instruction) \
+ M(Local, Instruction) \
+ M(LongConstant, Constant) \
+ M(MonitorOperation, Instruction) \
M(Mul, BinaryOperation) \
+ M(Neg, UnaryOperation) \
+ M(NewArray, Instruction) \
+ M(NewInstance, Instruction) \
+ M(Not, UnaryOperation) \
+ M(NotEqual, Condition) \
+ M(NullCheck, Instruction) \
+ M(Or, BinaryOperation) \
+ M(ParallelMove, Instruction) \
+ M(ParameterValue, Instruction) \
+ M(Phi, Instruction) \
+ M(Rem, BinaryOperation) \
+ M(Return, Instruction) \
+ M(ReturnVoid, Instruction) \
+ M(StaticFieldGet, Instruction) \
+ M(StaticFieldSet, Instruction) \
+ M(StoreLocal, Instruction) \
+ M(Sub, BinaryOperation) \
+ M(SuspendCheck, Instruction) \
+ M(Temporary, Instruction) \
+ M(Throw, Instruction) \
+ M(TypeConversion, Instruction) \
+ M(Xor, BinaryOperation) \
#define FOR_EACH_INSTRUCTION(M) \
FOR_EACH_CONCRETE_INSTRUCTION(M) \
M(Constant, Instruction) \
+ M(UnaryOperation, Instruction) \
M(BinaryOperation, Instruction) \
M(Invoke, Instruction)
@@ -525,7 +556,7 @@
virtual void Accept(HGraphVisitor* visitor)
template <typename T>
-class HUseListNode : public ArenaObject {
+class HUseListNode : public ArenaObject<kArenaAllocMisc> {
public:
HUseListNode(T* user, size_t index, HUseListNode* tail)
: user_(user), index_(index), tail_(tail) {}
@@ -607,7 +638,7 @@
size_t flags_;
};
-class HInstruction : public ArenaObject {
+class HInstruction : public ArenaObject<kArenaAllocMisc> {
public:
explicit HInstruction(SideEffects side_effects)
: previous_(nullptr),
@@ -684,9 +715,10 @@
return result;
}
- // Does this instruction dominate `other_instruction`? Aborts if
- // this instruction and `other_instruction` are both phis.
- bool Dominates(HInstruction* other_instruction) const;
+ // Does this instruction strictly dominate `other_instruction`?
+ // Returns false if this instruction and `other_instruction` are the same.
+ // Aborts if this instruction and `other_instruction` are both phis.
+ bool StrictlyDominates(HInstruction* other_instruction) const;
int GetId() const { return id_; }
void SetId(int id) { id_ = id; }
@@ -707,6 +739,7 @@
void SetLocations(LocationSummary* locations) { locations_ = locations; }
void ReplaceWith(HInstruction* instruction);
+ void ReplaceInput(HInstruction* replacement, size_t index);
bool HasOnlyOneUse() const {
return uses_ != nullptr && uses_->GetTail() == nullptr;
@@ -724,12 +757,18 @@
virtual bool CanBeMoved() const { return false; }
// Returns whether the two instructions are of the same kind.
- virtual bool InstructionTypeEquals(HInstruction* other) const { return false; }
+ virtual bool InstructionTypeEquals(HInstruction* other) const {
+ UNUSED(other);
+ return false;
+ }
// Returns whether any data encoded in the two instructions is equal.
// This method does not look at the inputs. Both instructions must be
// of the same type, otherwise the method has undefined behavior.
- virtual bool InstructionDataEquals(HInstruction* other) const { return false; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return false;
+ }
// Returns whether two instructions are equal, that is:
// 1) They have the same type and contain the same data,
@@ -794,6 +833,7 @@
DISALLOW_COPY_AND_ASSIGN(HInstruction);
};
+std::ostream& operator<<(std::ostream& os, const HInstruction::InstructionKind& rhs);
template<typename T>
class HUseIterator : public ValueObject {
@@ -819,7 +859,7 @@
};
// A HEnvironment object contains the values of virtual registers at a given location.
-class HEnvironment : public ArenaObject {
+class HEnvironment : public ArenaObject<kArenaAllocMisc> {
public:
HEnvironment(ArenaAllocator* arena, size_t number_of_vregs) : vregs_(arena, number_of_vregs) {
vregs_.SetSize(number_of_vregs);
@@ -951,14 +991,14 @@
public:
intptr_t length() const { return 0; }
const T& operator[](intptr_t i) const {
+ UNUSED(i);
LOG(FATAL) << "Unreachable";
- static T sentinel = 0;
- return sentinel;
+ UNREACHABLE();
}
T& operator[](intptr_t i) {
+ UNUSED(i);
LOG(FATAL) << "Unreachable";
- static T sentinel = 0;
- return sentinel;
+ UNREACHABLE();
}
};
@@ -992,8 +1032,8 @@
virtual Primitive::Type GetType() const { return type_; }
- private:
- const Primitive::Type type_;
+ protected:
+ Primitive::Type type_;
};
// Represents dex's RETURN_VOID opcode. A HReturnVoid is a control flow
@@ -1027,7 +1067,7 @@
};
// The exit instruction is the only instruction of the exit block.
-// Instructions aborting the method (HTrow and HReturn) must branch to the
+// Instructions aborting the method (HThrow and HReturn) must branch to the
// exit block.
class HExit : public HTemplateInstruction<0> {
public:
@@ -1046,7 +1086,7 @@
public:
HGoto() : HTemplateInstruction(SideEffects::None()) {}
- virtual bool IsControlFlow() const { return true; }
+ bool IsControlFlow() const OVERRIDE { return true; }
HBasicBlock* GetSuccessor() const {
return GetBlock()->GetSuccessors().Get(0);
@@ -1067,7 +1107,7 @@
SetRawInputAt(0, input);
}
- virtual bool IsControlFlow() const { return true; }
+ bool IsControlFlow() const OVERRIDE { return true; }
HBasicBlock* IfTrueSuccessor() const {
return GetBlock()->GetSuccessors().Get(0);
@@ -1085,6 +1125,37 @@
DISALLOW_COPY_AND_ASSIGN(HIf);
};
+class HUnaryOperation : public HExpression<1> {
+ public:
+ HUnaryOperation(Primitive::Type result_type, HInstruction* input)
+ : HExpression(result_type, SideEffects::None()) {
+ SetRawInputAt(0, input);
+ }
+
+ HInstruction* GetInput() const { return InputAt(0); }
+ Primitive::Type GetResultType() const { return GetType(); }
+
+ virtual bool CanBeMoved() const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
+
+ // Try to statically evaluate `operation` and return a HConstant
+ // containing the result of this evaluation. If `operation` cannot
+ // be evaluated as a constant, return nullptr.
+ HConstant* TryStaticEvaluation() const;
+
+ // Apply this operation to `x`.
+ virtual int32_t Evaluate(int32_t x) const = 0;
+ virtual int64_t Evaluate(int64_t x) const = 0;
+
+ DECLARE_INSTRUCTION(UnaryOperation);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HUnaryOperation);
+};
+
class HBinaryOperation : public HExpression<2> {
public:
HBinaryOperation(Primitive::Type result_type,
@@ -1101,12 +1172,15 @@
virtual bool IsCommutative() { return false; }
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
- // Try to statically evaluate `operation` and return an HConstant
+ // Try to statically evaluate `operation` and return a HConstant
// containing the result of this evaluation. If `operation` cannot
// be evaluated as a constant, return nullptr.
- HConstant* TryStaticEvaluation(ArenaAllocator* allocator) const;
+ HConstant* TryStaticEvaluation() const;
// Apply this operation to `x` and `y`.
virtual int32_t Evaluate(int32_t x, int32_t y) const = 0;
@@ -1370,6 +1444,48 @@
DISALLOW_COPY_AND_ASSIGN(HConstant);
};
+class HFloatConstant : public HConstant {
+ public:
+ explicit HFloatConstant(float value) : HConstant(Primitive::kPrimFloat), value_(value) {}
+
+ float GetValue() const { return value_; }
+
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ return bit_cast<float, int32_t>(other->AsFloatConstant()->value_) ==
+ bit_cast<float, int32_t>(value_);
+ }
+
+ virtual size_t ComputeHashCode() const { return static_cast<size_t>(GetValue()); }
+
+ DECLARE_INSTRUCTION(FloatConstant);
+
+ private:
+ const float value_;
+
+ DISALLOW_COPY_AND_ASSIGN(HFloatConstant);
+};
+
+class HDoubleConstant : public HConstant {
+ public:
+ explicit HDoubleConstant(double value) : HConstant(Primitive::kPrimDouble), value_(value) {}
+
+ double GetValue() const { return value_; }
+
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ return bit_cast<double, int64_t>(other->AsDoubleConstant()->value_) ==
+ bit_cast<double, int64_t>(value_);
+ }
+
+ virtual size_t ComputeHashCode() const { return static_cast<size_t>(GetValue()); }
+
+ DECLARE_INSTRUCTION(DoubleConstant);
+
+ private:
+ const double value_;
+
+ DISALLOW_COPY_AND_ASSIGN(HDoubleConstant);
+};
+
// Constants of the type int. Those can be from Dex instructions, or
// synthesized (for example with the if-eqz instruction).
class HIntConstant : public HConstant {
@@ -1495,6 +1611,30 @@
DISALLOW_COPY_AND_ASSIGN(HInvokeVirtual);
};
+class HInvokeInterface : public HInvoke {
+ public:
+ HInvokeInterface(ArenaAllocator* arena,
+ uint32_t number_of_arguments,
+ Primitive::Type return_type,
+ uint32_t dex_pc,
+ uint32_t dex_method_index,
+ uint32_t imt_index)
+ : HInvoke(arena, number_of_arguments, return_type, dex_pc),
+ dex_method_index_(dex_method_index),
+ imt_index_(imt_index) {}
+
+ uint32_t GetImtIndex() const { return imt_index_; }
+ uint32_t GetDexMethodIndex() const { return dex_method_index_; }
+
+ DECLARE_INSTRUCTION(InvokeInterface);
+
+ private:
+ const uint32_t dex_method_index_;
+ const uint32_t imt_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(HInvokeInterface);
+};
+
class HNewInstance : public HExpression<0> {
public:
HNewInstance(uint32_t dex_pc, uint16_t type_index)
@@ -1517,6 +1657,44 @@
DISALLOW_COPY_AND_ASSIGN(HNewInstance);
};
+class HNeg : public HUnaryOperation {
+ public:
+ explicit HNeg(Primitive::Type result_type, HInstruction* input)
+ : HUnaryOperation(result_type, input) {}
+
+ virtual int32_t Evaluate(int32_t x) const OVERRIDE { return -x; }
+ virtual int64_t Evaluate(int64_t x) const OVERRIDE { return -x; }
+
+ DECLARE_INSTRUCTION(Neg);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HNeg);
+};
+
+class HNewArray : public HExpression<1> {
+ public:
+ HNewArray(HInstruction* length, uint32_t dex_pc, uint16_t type_index)
+ : HExpression(Primitive::kPrimNot, SideEffects::None()),
+ dex_pc_(dex_pc),
+ type_index_(type_index) {
+ SetRawInputAt(0, length);
+ }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+ uint16_t GetTypeIndex() const { return type_index_; }
+
+ // Calls runtime so needs an environment.
+ virtual bool NeedsEnvironment() const { return true; }
+
+ DECLARE_INSTRUCTION(NewArray);
+
+ private:
+ const uint32_t dex_pc_;
+ const uint16_t type_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(HNewArray);
+};
+
class HAdd : public HBinaryOperation {
public:
HAdd(Primitive::Type result_type, HInstruction* left, HInstruction* right)
@@ -1542,8 +1720,6 @@
HSub(Primitive::Type result_type, HInstruction* left, HInstruction* right)
: HBinaryOperation(result_type, left, right) {}
- virtual bool IsCommutative() { return false; }
-
virtual int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
return x - y;
}
@@ -1573,6 +1749,136 @@
DISALLOW_COPY_AND_ASSIGN(HMul);
};
+class HDiv : public HBinaryOperation {
+ public:
+ HDiv(Primitive::Type result_type, HInstruction* left, HInstruction* right, uint32_t dex_pc)
+ : HBinaryOperation(result_type, left, right), dex_pc_(dex_pc) {}
+
+ virtual int32_t Evaluate(int32_t x, int32_t y) const {
+ // Our graph structure ensures we never have 0 for `y` during constant folding.
+ DCHECK_NE(y, 0);
+ // Special case -1 to avoid getting a SIGFPE on x86(_64).
+ return (y == -1) ? -x : x / y;
+ }
+
+ virtual int64_t Evaluate(int64_t x, int64_t y) const {
+ DCHECK_NE(y, 0);
+ // Special case -1 to avoid getting a SIGFPE on x86(_64).
+ return (y == -1) ? -x : x / y;
+ }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+
+ DECLARE_INSTRUCTION(Div);
+
+ private:
+ const uint32_t dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(HDiv);
+};
+
+class HRem : public HBinaryOperation {
+ public:
+ HRem(Primitive::Type result_type, HInstruction* left, HInstruction* right, uint32_t dex_pc)
+ : HBinaryOperation(result_type, left, right), dex_pc_(dex_pc) {}
+
+ virtual int32_t Evaluate(int32_t x, int32_t y) const {
+ DCHECK_NE(y, 0);
+ // Special case -1 to avoid getting a SIGFPE on x86(_64).
+ return (y == -1) ? 0 : x % y;
+ }
+
+ virtual int64_t Evaluate(int64_t x, int64_t y) const {
+ DCHECK_NE(y, 0);
+ // Special case -1 to avoid getting a SIGFPE on x86(_64).
+ return (y == -1) ? 0 : x % y;
+ }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+
+ DECLARE_INSTRUCTION(Rem);
+
+ private:
+ const uint32_t dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(HRem);
+};
+
+class HDivZeroCheck : public HExpression<1> {
+ public:
+ HDivZeroCheck(HInstruction* value, uint32_t dex_pc)
+ : HExpression(value->GetType(), SideEffects::None()), dex_pc_(dex_pc) {
+ SetRawInputAt(0, value);
+ }
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
+ bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
+ UNUSED(other);
+ return true;
+ }
+
+ bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool CanThrow() const OVERRIDE { return true; }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+
+ DECLARE_INSTRUCTION(DivZeroCheck);
+
+ private:
+ const uint32_t dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(HDivZeroCheck);
+};
+
+class HAnd : public HBinaryOperation {
+ public:
+ HAnd(Primitive::Type result_type, HInstruction* left, HInstruction* right)
+ : HBinaryOperation(result_type, left, right) {}
+
+ bool IsCommutative() OVERRIDE { return true; }
+
+ int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { return x & y; }
+ int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { return x & y; }
+
+ DECLARE_INSTRUCTION(And);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HAnd);
+};
+
+class HOr : public HBinaryOperation {
+ public:
+ HOr(Primitive::Type result_type, HInstruction* left, HInstruction* right)
+ : HBinaryOperation(result_type, left, right) {}
+
+ bool IsCommutative() OVERRIDE { return true; }
+
+ int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { return x | y; }
+ int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { return x | y; }
+
+ DECLARE_INSTRUCTION(Or);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HOr);
+};
+
+class HXor : public HBinaryOperation {
+ public:
+ HXor(Primitive::Type result_type, HInstruction* left, HInstruction* right)
+ : HBinaryOperation(result_type, left, right) {}
+
+ bool IsCommutative() OVERRIDE { return true; }
+
+ int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { return x ^ y; }
+ int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { return x ^ y; }
+
+ DECLARE_INSTRUCTION(Xor);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HXor);
+};
+
// The value of a parameter in this method. Its location depends on
// the calling convention.
class HParameterValue : public HExpression<0> {
@@ -1592,14 +1898,19 @@
DISALLOW_COPY_AND_ASSIGN(HParameterValue);
};
-class HNot : public HExpression<1> {
+class HNot : public HUnaryOperation {
public:
- explicit HNot(HInstruction* input) : HExpression(Primitive::kPrimBoolean, SideEffects::None()) {
- SetRawInputAt(0, input);
- }
+ explicit HNot(Primitive::Type result_type, HInstruction* input)
+ : HUnaryOperation(result_type, input) {}
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
+
+ virtual int32_t Evaluate(int32_t x) const OVERRIDE { return ~x; }
+ virtual int64_t Evaluate(int64_t x) const OVERRIDE { return ~x; }
DECLARE_INSTRUCTION(Not);
@@ -1607,6 +1918,28 @@
DISALLOW_COPY_AND_ASSIGN(HNot);
};
+class HTypeConversion : public HExpression<1> {
+ public:
+ // Instantiate a type conversion of `input` to `result_type`.
+ HTypeConversion(Primitive::Type result_type, HInstruction* input)
+ : HExpression(result_type, SideEffects::None()) {
+ SetRawInputAt(0, input);
+ DCHECK_NE(input->GetType(), result_type);
+ }
+
+ HInstruction* GetInput() const { return InputAt(0); }
+ Primitive::Type GetInputType() const { return GetInput()->GetType(); }
+ Primitive::Type GetResultType() const { return GetType(); }
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; }
+
+ DECLARE_INSTRUCTION(TypeConversion);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HTypeConversion);
+};
+
class HPhi : public HInstruction {
public:
HPhi(ArenaAllocator* arena, uint32_t reg_number, size_t number_of_inputs, Primitive::Type type)
@@ -1656,7 +1989,10 @@
}
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
virtual bool NeedsEnvironment() const { return true; }
@@ -1731,6 +2067,8 @@
MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); }
Primitive::Type GetFieldType() const { return field_info_.GetFieldType(); }
+ HInstruction* GetValue() const { return InputAt(1); }
+
DECLARE_INSTRUCTION(InstanceFieldSet);
private:
@@ -1747,8 +2085,15 @@
SetRawInputAt(1, index);
}
- virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ bool CanBeMoved() const OVERRIDE { return true; }
+ bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
+ UNUSED(other);
+ return true;
+ }
+ void SetType(Primitive::Type type) { type_ = type; }
+
+ HInstruction* GetArray() const { return InputAt(0); }
+ HInstruction* GetIndex() const { return InputAt(1); }
DECLARE_INSTRUCTION(ArrayGet);
@@ -1761,31 +2106,52 @@
HArraySet(HInstruction* array,
HInstruction* index,
HInstruction* value,
- Primitive::Type component_type,
+ Primitive::Type expected_component_type,
uint32_t dex_pc)
: HTemplateInstruction(SideEffects::ChangesSomething()),
dex_pc_(dex_pc),
- component_type_(component_type) {
+ expected_component_type_(expected_component_type),
+ needs_type_check_(value->GetType() == Primitive::kPrimNot) {
SetRawInputAt(0, array);
SetRawInputAt(1, index);
SetRawInputAt(2, value);
}
- virtual bool NeedsEnvironment() const {
+ bool NeedsEnvironment() const {
// We currently always call a runtime method to catch array store
// exceptions.
- return InputAt(2)->GetType() == Primitive::kPrimNot;
+ return needs_type_check_;
}
+ void ClearNeedsTypeCheck() {
+ needs_type_check_ = false;
+ }
+
+ bool NeedsTypeCheck() const { return needs_type_check_; }
+
uint32_t GetDexPc() const { return dex_pc_; }
- Primitive::Type GetComponentType() const { return component_type_; }
+ HInstruction* GetArray() const { return InputAt(0); }
+ HInstruction* GetIndex() const { return InputAt(1); }
+ HInstruction* GetValue() const { return InputAt(2); }
+
+ Primitive::Type GetComponentType() const {
+ // The Dex format does not type floating point index operations. Since the
+ // `expected_component_type_` is set during building and can therefore not
+ // be correct, we also check what is the value type. If it is a floating
+ // point type, we must use that type.
+ Primitive::Type value_type = GetValue()->GetType();
+ return ((value_type == Primitive::kPrimFloat) || (value_type == Primitive::kPrimDouble))
+ ? value_type
+ : expected_component_type_;
+ }
DECLARE_INSTRUCTION(ArraySet);
private:
const uint32_t dex_pc_;
- const Primitive::Type component_type_;
+ const Primitive::Type expected_component_type_;
+ bool needs_type_check_;
DISALLOW_COPY_AND_ASSIGN(HArraySet);
};
@@ -1800,7 +2166,10 @@
}
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
DECLARE_INSTRUCTION(ArrayLength);
@@ -1818,7 +2187,10 @@
}
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
virtual bool NeedsEnvironment() const { return true; }
@@ -1838,8 +2210,8 @@
* Some DEX instructions are folded into multiple HInstructions that need
* to stay live until the last HInstruction. This class
* is used as a marker for the baseline compiler to ensure its preceding
- * HInstruction stays live. `index` is the temporary number that is used
- * for knowing the stack offset where to store the instruction.
+ * HInstruction stays live. `index` represents the stack location index of the
+ * instruction (the actual offset is computed as index * vreg_size).
*/
class HTemporary : public HTemplateInstruction<0> {
public:
@@ -1847,6 +2219,12 @@
size_t GetIndex() const { return index_; }
+ Primitive::Type GetType() const OVERRIDE {
+ // The previous instruction is the one that will be stored in the temporary location.
+ DCHECK(GetPrevious() != nullptr);
+ return GetPrevious()->GetType();
+ }
+
DECLARE_INSTRUCTION(Temporary);
private:
@@ -1874,7 +2252,323 @@
DISALLOW_COPY_AND_ASSIGN(HSuspendCheck);
};
-class MoveOperands : public ArenaObject {
+/**
+ * Instruction to load a Class object.
+ */
+class HLoadClass : public HExpression<0> {
+ public:
+ HLoadClass(uint16_t type_index,
+ bool is_referrers_class,
+ uint32_t dex_pc)
+ : HExpression(Primitive::kPrimNot, SideEffects::None()),
+ type_index_(type_index),
+ is_referrers_class_(is_referrers_class),
+ dex_pc_(dex_pc),
+ generate_clinit_check_(false) {}
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
+ bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
+ return other->AsLoadClass()->type_index_ == type_index_;
+ }
+
+ size_t ComputeHashCode() const OVERRIDE { return type_index_; }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+ uint16_t GetTypeIndex() const { return type_index_; }
+ bool IsReferrersClass() const { return is_referrers_class_; }
+
+ bool NeedsEnvironment() const OVERRIDE {
+ // Will call runtime and load the class if the class is not loaded yet.
+ // TODO: finer grain decision.
+ return !is_referrers_class_;
+ }
+
+ bool MustGenerateClinitCheck() const {
+ return generate_clinit_check_;
+ }
+
+ void SetMustGenerateClinitCheck() {
+ generate_clinit_check_ = true;
+ }
+
+ bool CanCallRuntime() const {
+ return MustGenerateClinitCheck() || !is_referrers_class_;
+ }
+
+ DECLARE_INSTRUCTION(LoadClass);
+
+ private:
+ const uint16_t type_index_;
+ const bool is_referrers_class_;
+ const uint32_t dex_pc_;
+ // Whether this instruction must generate the initialization check.
+ // Used for code generation.
+ bool generate_clinit_check_;
+
+ DISALLOW_COPY_AND_ASSIGN(HLoadClass);
+};
+
+class HLoadString : public HExpression<0> {
+ public:
+ HLoadString(uint32_t string_index, uint32_t dex_pc)
+ : HExpression(Primitive::kPrimNot, SideEffects::None()),
+ string_index_(string_index),
+ dex_pc_(dex_pc) {}
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
+ bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
+ return other->AsLoadString()->string_index_ == string_index_;
+ }
+
+ size_t ComputeHashCode() const OVERRIDE { return string_index_; }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+ uint32_t GetStringIndex() const { return string_index_; }
+
+ // TODO: Can we deopt or debug when we resolve a string?
+ bool NeedsEnvironment() const OVERRIDE { return false; }
+
+ DECLARE_INSTRUCTION(LoadString);
+
+ private:
+ const uint32_t string_index_;
+ const uint32_t dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(HLoadString);
+};
+
+// TODO: Pass this check to HInvokeStatic nodes.
+/**
+ * Performs an initialization check on its Class object input.
+ */
+class HClinitCheck : public HExpression<1> {
+ public:
+ explicit HClinitCheck(HLoadClass* constant, uint32_t dex_pc)
+ : HExpression(Primitive::kPrimNot, SideEffects::All()),
+ dex_pc_(dex_pc) {
+ SetRawInputAt(0, constant);
+ }
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+ bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
+ UNUSED(other);
+ return true;
+ }
+
+ bool NeedsEnvironment() const OVERRIDE {
+ // May call runtime to initialize the class.
+ return true;
+ }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+
+ HLoadClass* GetLoadClass() const { return InputAt(0)->AsLoadClass(); }
+
+ DECLARE_INSTRUCTION(ClinitCheck);
+
+ private:
+ const uint32_t dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(HClinitCheck);
+};
+
+class HStaticFieldGet : public HExpression<1> {
+ public:
+ HStaticFieldGet(HInstruction* cls,
+ Primitive::Type field_type,
+ MemberOffset field_offset)
+ : HExpression(field_type, SideEffects::DependsOnSomething()),
+ field_info_(field_offset, field_type) {
+ SetRawInputAt(0, cls);
+ }
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+ bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
+ size_t other_offset = other->AsStaticFieldGet()->GetFieldOffset().SizeValue();
+ return other_offset == GetFieldOffset().SizeValue();
+ }
+
+ size_t ComputeHashCode() const OVERRIDE {
+ return (HInstruction::ComputeHashCode() << 7) | GetFieldOffset().SizeValue();
+ }
+
+ MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); }
+ Primitive::Type GetFieldType() const { return field_info_.GetFieldType(); }
+
+ DECLARE_INSTRUCTION(StaticFieldGet);
+
+ private:
+ const FieldInfo field_info_;
+
+ DISALLOW_COPY_AND_ASSIGN(HStaticFieldGet);
+};
+
+class HStaticFieldSet : public HTemplateInstruction<2> {
+ public:
+ HStaticFieldSet(HInstruction* cls,
+ HInstruction* value,
+ Primitive::Type field_type,
+ MemberOffset field_offset)
+ : HTemplateInstruction(SideEffects::ChangesSomething()),
+ field_info_(field_offset, field_type) {
+ SetRawInputAt(0, cls);
+ SetRawInputAt(1, value);
+ }
+
+ MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); }
+ Primitive::Type GetFieldType() const { return field_info_.GetFieldType(); }
+
+ HInstruction* GetValue() const { return InputAt(1); }
+
+ DECLARE_INSTRUCTION(StaticFieldSet);
+
+ private:
+ const FieldInfo field_info_;
+
+ DISALLOW_COPY_AND_ASSIGN(HStaticFieldSet);
+};
+
+// Implement the move-exception DEX instruction.
+class HLoadException : public HExpression<0> {
+ public:
+ HLoadException() : HExpression(Primitive::kPrimNot, SideEffects::None()) {}
+
+ DECLARE_INSTRUCTION(LoadException);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HLoadException);
+};
+
+class HThrow : public HTemplateInstruction<1> {
+ public:
+ HThrow(HInstruction* exception, uint32_t dex_pc)
+ : HTemplateInstruction(SideEffects::None()), dex_pc_(dex_pc) {
+ SetRawInputAt(0, exception);
+ }
+
+ bool IsControlFlow() const OVERRIDE { return true; }
+
+ bool NeedsEnvironment() const OVERRIDE { return true; }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+
+ DECLARE_INSTRUCTION(Throw);
+
+ private:
+ uint32_t dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(HThrow);
+};
+
+class HInstanceOf : public HExpression<2> {
+ public:
+ HInstanceOf(HInstruction* object,
+ HLoadClass* constant,
+ bool class_is_final,
+ uint32_t dex_pc)
+ : HExpression(Primitive::kPrimBoolean, SideEffects::None()),
+ class_is_final_(class_is_final),
+ dex_pc_(dex_pc) {
+ SetRawInputAt(0, object);
+ SetRawInputAt(1, constant);
+ }
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ return true;
+ }
+
+ bool NeedsEnvironment() const OVERRIDE {
+ return false;
+ }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+
+ bool IsClassFinal() const { return class_is_final_; }
+
+ DECLARE_INSTRUCTION(InstanceOf);
+
+ private:
+ const bool class_is_final_;
+ const uint32_t dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(HInstanceOf);
+};
+
+class HCheckCast : public HTemplateInstruction<2> {
+ public:
+ HCheckCast(HInstruction* object,
+ HLoadClass* constant,
+ bool class_is_final,
+ uint32_t dex_pc)
+ : HTemplateInstruction(SideEffects::None()),
+ class_is_final_(class_is_final),
+ dex_pc_(dex_pc) {
+ SetRawInputAt(0, object);
+ SetRawInputAt(1, constant);
+ }
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ return true;
+ }
+
+ bool NeedsEnvironment() const OVERRIDE {
+ // Instruction may throw a CheckCastError.
+ return true;
+ }
+
+ bool CanThrow() const OVERRIDE { return true; }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+
+ bool IsClassFinal() const { return class_is_final_; }
+
+ DECLARE_INSTRUCTION(CheckCast);
+
+ private:
+ const bool class_is_final_;
+ const uint32_t dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(HCheckCast);
+};
+
+class HMonitorOperation : public HTemplateInstruction<1> {
+ public:
+ enum OperationKind {
+ kEnter,
+ kExit,
+ };
+
+ HMonitorOperation(HInstruction* object, OperationKind kind, uint32_t dex_pc)
+ : HTemplateInstruction(SideEffects::None()), kind_(kind), dex_pc_(dex_pc) {
+ SetRawInputAt(0, object);
+ }
+
+ // Instruction may throw a Java exception, so we need an environment.
+ bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool CanThrow() const OVERRIDE { return true; }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+
+ bool IsEnter() const { return kind_ == kEnter; }
+
+ DECLARE_INSTRUCTION(MonitorOperation);
+
+ protected:
+ const OperationKind kind_;
+ const uint32_t dex_pc_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HMonitorOperation);
+};
+
+
+class MoveOperands : public ArenaObject<kArenaAllocMisc> {
public:
MoveOperands(Location source, Location destination, HInstruction* instruction)
: source_(source), destination_(destination), instruction_(instruction) {}
@@ -1975,7 +2669,7 @@
explicit HGraphVisitor(HGraph* graph) : graph_(graph) {}
virtual ~HGraphVisitor() {}
- virtual void VisitInstruction(HInstruction* instruction) {}
+ virtual void VisitInstruction(HInstruction* instruction) { UNUSED(instruction); }
virtual void VisitBasicBlock(HBasicBlock* block);
// Visit the graph following basic block insertion order.
@@ -1995,7 +2689,7 @@
#undef DECLARE_VISIT_INSTRUCTION
private:
- HGraph* graph_;
+ HGraph* const graph_;
DISALLOW_COPY_AND_ASSIGN(HGraphVisitor);
};
diff --git a/compiler/optimizing/optimization.cc b/compiler/optimizing/optimization.cc
new file mode 100644
index 0000000..d1178d5
--- /dev/null
+++ b/compiler/optimizing/optimization.cc
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "optimization.h"
+
+#include "base/dumpable.h"
+#include "graph_checker.h"
+
+namespace art {
+
+void HOptimization::Check() {
+ if (kIsDebugBuild) {
+ if (is_in_ssa_form_) {
+ SSAChecker checker(graph_->GetArena(), graph_);
+ checker.Run();
+ if (!checker.IsValid()) {
+ LOG(FATAL) << Dumpable<SSAChecker>(checker);
+ }
+ } else {
+ GraphChecker checker(graph_->GetArena(), graph_);
+ checker.Run();
+ if (!checker.IsValid()) {
+ LOG(FATAL) << Dumpable<GraphChecker>(checker);
+ }
+ }
+ }
+}
+
+} // namespace art
diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h
new file mode 100644
index 0000000..d281248
--- /dev/null
+++ b/compiler/optimizing/optimization.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_OPTIMIZATION_H_
+#define ART_COMPILER_OPTIMIZING_OPTIMIZATION_H_
+
+#include "graph_visualizer.h"
+#include "nodes.h"
+
+namespace art {
+
+/**
+ * Abstraction to implement an optimization pass.
+ */
+class HOptimization : public ValueObject {
+ public:
+ HOptimization(HGraph* graph,
+ bool is_in_ssa_form,
+ const char* pass_name)
+ : graph_(graph),
+ is_in_ssa_form_(is_in_ssa_form),
+ pass_name_(pass_name) {}
+
+ virtual ~HOptimization() {}
+
+ // Return the name of the pass.
+ const char* GetPassName() const { return pass_name_; }
+
+ // Peform the analysis itself.
+ virtual void Run() = 0;
+
+ // Verify the graph; abort if it is not valid.
+ void Check();
+
+ protected:
+ HGraph* const graph_;
+
+ private:
+ // Does the analyzed graph use the SSA form?
+ const bool is_in_ssa_form_;
+ // Optimization pass name.
+ const char* pass_name_;
+
+ DISALLOW_COPY_AND_ASSIGN(HOptimization);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_OPTIMIZATION_H_
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 3cf5a0b..42ac77d 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -22,11 +22,16 @@
#include "builder.h"
#include "code_generator.h"
#include "compiler.h"
+#include "constant_folding.h"
+#include "dead_code_elimination.h"
#include "driver/compiler_driver.h"
#include "driver/dex_compilation_unit.h"
+#include "elf_writer_quick.h"
#include "graph_visualizer.h"
#include "gvn.h"
#include "instruction_simplifier.h"
+#include "jni/quick/jni_compiler.h"
+#include "mirror/art_method-inl.h"
#include "nodes.h"
#include "prepare_for_register_allocation.h"
#include "register_allocator.h"
@@ -86,15 +91,6 @@
jobject class_loader,
const DexFile& dex_file) const OVERRIDE;
- CompiledMethod* TryCompile(const DexFile::CodeItem* code_item,
- uint32_t access_flags,
- InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- jobject class_loader,
- const DexFile& dex_file) const;
-
- // For the following methods we will use the fallback. This is a delegation pattern.
CompiledMethod* JniCompile(uint32_t access_flags,
uint32_t method_idx,
const DexFile& dex_file) const OVERRIDE;
@@ -108,13 +104,16 @@
const std::string& android_root,
bool is_host) const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- Backend* GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const OVERRIDE;
+ Backend* GetCodeGenerator(CompilationUnit* cu ATTRIBUTE_UNUSED,
+ void* compilation_unit ATTRIBUTE_UNUSED) const OVERRIDE {
+ return nullptr;
+ }
- void InitCompilationUnit(CompilationUnit& cu) const OVERRIDE;
+ void InitCompilationUnit(CompilationUnit& cu ATTRIBUTE_UNUSED) const OVERRIDE {}
- void Init() const OVERRIDE;
+ void Init() const OVERRIDE {}
- void UnInit() const OVERRIDE;
+ void UnInit() const OVERRIDE {}
private:
// Whether we should run any optimization or register allocation. If false, will
@@ -126,10 +125,6 @@
std::unique_ptr<std::ostream> visualizer_output_;
- // Delegate to another compiler in case the optimizing compiler cannot compile a method.
- // Currently the fallback is the quick compiler.
- std::unique_ptr<Compiler> delegate_;
-
DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler);
};
@@ -141,21 +136,12 @@
driver->GetCompilerOptions().GetCompilerFilter() != CompilerOptions::kTime),
total_compiled_methods_(0),
unoptimized_compiled_methods_(0),
- optimized_compiled_methods_(0),
- delegate_(Create(driver, Compiler::Kind::kQuick)) {
+ optimized_compiled_methods_(0) {
if (kIsVisualizerEnabled) {
visualizer_output_.reset(new std::ofstream("art.cfg"));
}
}
-void OptimizingCompiler::Init() const {
- delegate_->Init();
-}
-
-void OptimizingCompiler::UnInit() const {
- delegate_->UnInit();
-}
-
OptimizingCompiler::~OptimizingCompiler() {
if (total_compiled_methods_ == 0) {
LOG(INFO) << "Did not compile any method.";
@@ -168,42 +154,69 @@
}
}
-bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx, const DexFile& dex_file,
- CompilationUnit* cu) const {
- return delegate_->CanCompileMethod(method_idx, dex_file, cu);
+bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx ATTRIBUTE_UNUSED,
+ const DexFile& dex_file ATTRIBUTE_UNUSED,
+ CompilationUnit* cu ATTRIBUTE_UNUSED) const {
+ return true;
}
CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
uint32_t method_idx,
const DexFile& dex_file) const {
- return delegate_->JniCompile(access_flags, method_idx, dex_file);
+ return ArtQuickJniCompileMethod(GetCompilerDriver(), access_flags, method_idx, dex_file);
}
uintptr_t OptimizingCompiler::GetEntryPointOf(mirror::ArtMethod* method) const {
- return delegate_->GetEntryPointOf(method);
+ return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
+ InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet())));
}
bool OptimizingCompiler::WriteElf(art::File* file, OatWriter* oat_writer,
const std::vector<const art::DexFile*>& dex_files,
const std::string& android_root, bool is_host) const {
- return delegate_->WriteElf(file, oat_writer, dex_files, android_root, is_host);
+ return art::ElfWriterQuick32::Create(file, oat_writer, dex_files, android_root, is_host,
+ *GetCompilerDriver());
}
-Backend* OptimizingCompiler::GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const {
- return delegate_->GetCodeGenerator(cu, compilation_unit);
+static bool IsInstructionSetSupported(InstructionSet instruction_set) {
+ return instruction_set == kArm64
+ || (instruction_set == kThumb2 && !kArm32QuickCodeUseSoftFloat)
+ || instruction_set == kX86
+ || instruction_set == kX86_64;
}
-void OptimizingCompiler::InitCompilationUnit(CompilationUnit& cu) const {
- delegate_->InitCompilationUnit(cu);
+static bool CanOptimize(const DexFile::CodeItem& code_item) {
+ // TODO: We currently cannot optimize methods with try/catch.
+ return code_item.tries_size_ == 0;
}
-CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_item,
- uint32_t access_flags,
- InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- jobject class_loader,
- const DexFile& dex_file) const {
+static void RunOptimizations(HGraph* graph, const HGraphVisualizer& visualizer) {
+ HDeadCodeElimination opt1(graph);
+ HConstantFolding opt2(graph);
+ SsaRedundantPhiElimination opt3(graph);
+ SsaDeadPhiElimination opt4(graph);
+ InstructionSimplifier opt5(graph);
+ GlobalValueNumberer opt6(graph->GetArena(), graph);
+ InstructionSimplifier opt7(graph);
+
+ HOptimization* optimizations[] = { &opt1, &opt2, &opt3, &opt4, &opt5, &opt6, &opt7 };
+
+ for (size_t i = 0; i < arraysize(optimizations); ++i) {
+ HOptimization* optimization = optimizations[i];
+ optimization->Run();
+ optimization->Check();
+ visualizer.DumpGraph(optimization->GetPassName());
+ }
+}
+
+CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const DexFile& dex_file) const {
+ UNUSED(invoke_type);
total_compiled_methods_++;
InstructionSet instruction_set = GetCompilerDriver()->GetInstructionSet();
// Always use the thumb2 assembler: some runtime functionality (like implicit stack
@@ -213,7 +226,11 @@
}
// Do not attempt to compile on architectures we do not support.
- if (instruction_set != kX86 && instruction_set != kX86_64 && instruction_set != kThumb2) {
+ if (!IsInstructionSetSupported(instruction_set)) {
+ return nullptr;
+ }
+
+ if (Compiler::IsPathologicalCase(*code_item, method_idx, dex_file)) {
return nullptr;
}
@@ -234,17 +251,13 @@
HGraph* graph = builder.BuildGraph(*code_item);
if (graph == nullptr) {
- if (shouldCompile) {
- LOG(FATAL) << "Could not build graph in optimizing compiler";
- }
+ CHECK(!shouldCompile) << "Could not build graph in optimizing compiler";
return nullptr;
}
CodeGenerator* codegen = CodeGenerator::Create(&arena, graph, instruction_set);
if (codegen == nullptr) {
- if (shouldCompile) {
- LOG(FATAL) << "Could not find code generator for optimizing compiler";
- }
+ CHECK(!shouldCompile) << "Could not find code generator for optimizing compiler";
return nullptr;
}
@@ -254,20 +267,18 @@
CodeVectorAllocator allocator;
- if (run_optimizations_ && RegisterAllocator::CanAllocateRegistersFor(*graph, instruction_set)) {
+ if (run_optimizations_
+ && CanOptimize(*code_item)
+ && RegisterAllocator::CanAllocateRegistersFor(*graph, instruction_set)) {
optimized_compiled_methods_++;
graph->BuildDominatorTree();
graph->TransformToSSA();
visualizer.DumpGraph("ssa");
graph->FindNaturalLoops();
- SsaRedundantPhiElimination(graph).Run();
- SsaDeadPhiElimination(graph).Run();
- InstructionSimplifier(graph).Run();
- GlobalValueNumberer(graph->GetArena(), graph).Run();
- visualizer.DumpGraph(kGVNPassName);
- PrepareForRegisterAllocation(graph).Run();
+ RunOptimizations(graph, visualizer);
+ PrepareForRegisterAllocation(graph).Run();
SsaLivenessAnalysis liveness(*graph, codegen);
liveness.Analyze();
visualizer.DumpGraph(kLivenessPassName);
@@ -297,22 +308,24 @@
stack_map);
} else if (shouldOptimize && RegisterAllocator::Supports(instruction_set)) {
LOG(FATAL) << "Could not allocate registers in optimizing compiler";
- return nullptr;
+ UNREACHABLE();
} else {
unoptimized_compiled_methods_++;
codegen->CompileBaseline(&allocator);
- // Run these phases to get some test coverage.
- graph->BuildDominatorTree();
- graph->TransformToSSA();
- visualizer.DumpGraph("ssa");
- graph->FindNaturalLoops();
- SsaRedundantPhiElimination(graph).Run();
- SsaDeadPhiElimination(graph).Run();
- GlobalValueNumberer(graph->GetArena(), graph).Run();
- SsaLivenessAnalysis liveness(*graph, codegen);
- liveness.Analyze();
- visualizer.DumpGraph(kLivenessPassName);
+ if (CanOptimize(*code_item)) {
+ // Run these phases to get some test coverage.
+ graph->BuildDominatorTree();
+ graph->TransformToSSA();
+ visualizer.DumpGraph("ssa");
+ graph->FindNaturalLoops();
+ SsaRedundantPhiElimination(graph).Run();
+ SsaDeadPhiElimination(graph).Run();
+ GlobalValueNumberer(graph->GetArena(), graph).Run();
+ SsaLivenessAnalysis liveness(*graph, codegen);
+ liveness.Analyze();
+ visualizer.DumpGraph(kLivenessPassName);
+ }
std::vector<uint8_t> mapping_table;
SrcMap src_mapping_table;
@@ -338,23 +351,6 @@
}
}
-CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
- uint32_t access_flags,
- InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- jobject class_loader,
- const DexFile& dex_file) const {
- CompiledMethod* method = TryCompile(code_item, access_flags, invoke_type, class_def_idx,
- method_idx, class_loader, dex_file);
- if (method != nullptr) {
- return method;
- }
-
- return delegate_->Compile(code_item, access_flags, invoke_type, class_def_idx, method_idx,
- class_loader, dex_file);
-}
-
Compiler* CreateOptimizingCompiler(CompilerDriver* driver) {
return new OptimizingCompiler(driver);
}
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 5b693dd..c4106b7 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -30,23 +30,23 @@
#define NUM_INSTRUCTIONS(...) \
(sizeof((uint16_t[]) {__VA_ARGS__}) /sizeof(uint16_t))
-#define ZERO_REGISTER_CODE_ITEM(...) \
- { 0, 0, 0, 0, 0, 0, NUM_INSTRUCTIONS(__VA_ARGS__), 0, __VA_ARGS__ }
+#define N_REGISTERS_CODE_ITEM(NUM_REGS, ...) \
+ { NUM_REGS, 0, 0, 0, 0, 0, NUM_INSTRUCTIONS(__VA_ARGS__), 0, __VA_ARGS__ }
-#define ONE_REGISTER_CODE_ITEM(...) \
- { 1, 0, 0, 0, 0, 0, NUM_INSTRUCTIONS(__VA_ARGS__), 0, __VA_ARGS__ }
+#define ZERO_REGISTER_CODE_ITEM(...) N_REGISTERS_CODE_ITEM(0, __VA_ARGS__)
+#define ONE_REGISTER_CODE_ITEM(...) N_REGISTERS_CODE_ITEM(1, __VA_ARGS__)
+#define TWO_REGISTERS_CODE_ITEM(...) N_REGISTERS_CODE_ITEM(2, __VA_ARGS__)
+#define THREE_REGISTERS_CODE_ITEM(...) N_REGISTERS_CODE_ITEM(3, __VA_ARGS__)
+#define FOUR_REGISTERS_CODE_ITEM(...) N_REGISTERS_CODE_ITEM(4, __VA_ARGS__)
+#define FIVE_REGISTERS_CODE_ITEM(...) N_REGISTERS_CODE_ITEM(5, __VA_ARGS__)
+#define SIX_REGISTERS_CODE_ITEM(...) N_REGISTERS_CODE_ITEM(6, __VA_ARGS__)
-#define TWO_REGISTERS_CODE_ITEM(...) \
- { 2, 0, 0, 0, 0, 0, NUM_INSTRUCTIONS(__VA_ARGS__), 0, __VA_ARGS__ }
-
-#define THREE_REGISTERS_CODE_ITEM(...) \
- { 3, 0, 0, 0, 0, 0, NUM_INSTRUCTIONS(__VA_ARGS__), 0, __VA_ARGS__ }
LiveInterval* BuildInterval(const size_t ranges[][2],
size_t number_of_ranges,
ArenaAllocator* allocator,
int reg = -1) {
- LiveInterval* interval = new (allocator) LiveInterval(allocator, Primitive::kPrimInt);
+ LiveInterval* interval = LiveInterval::MakeInterval(allocator, Primitive::kPrimInt);
for (size_t i = number_of_ranges; i > 0; --i) {
interval->AddRange(ranges[i - 1][0], ranges[i - 1][1]);
}
diff --git a/compiler/optimizing/parallel_move_resolver.cc b/compiler/optimizing/parallel_move_resolver.cc
index c71d93e..1e93ece 100644
--- a/compiler/optimizing/parallel_move_resolver.cc
+++ b/compiler/optimizing/parallel_move_resolver.cc
@@ -130,13 +130,13 @@
// this move's source or destination needs to have their source
// changed to reflect the state of affairs after the swap.
Location source = move->GetSource();
- Location destination = move->GetDestination();
+ Location swap_destination = move->GetDestination();
move->Eliminate();
for (size_t i = 0; i < moves_.Size(); ++i) {
const MoveOperands& other_move = *moves_.Get(i);
if (other_move.Blocks(source)) {
- moves_.Get(i)->SetSource(destination);
- } else if (other_move.Blocks(destination)) {
+ moves_.Get(i)->SetSource(swap_destination);
+ } else if (other_move.Blocks(swap_destination)) {
moves_.Get(i)->SetSource(source);
}
}
diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc
index 2bdcc61..62629bc 100644
--- a/compiler/optimizing/parallel_move_test.cc
+++ b/compiler/optimizing/parallel_move_test.cc
@@ -50,8 +50,8 @@
<< ")";
}
- virtual void SpillScratch(int reg) {}
- virtual void RestoreScratch(int reg) {}
+ virtual void SpillScratch(int reg ATTRIBUTE_UNUSED) {}
+ virtual void RestoreScratch(int reg ATTRIBUTE_UNUSED) {}
std::string GetMessage() const {
return message_.str();
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index a81dc1b..7186dbe 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -23,8 +23,9 @@
for (HReversePostOrderIterator it(*GetGraph()); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
// No need to visit the phis.
- for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- it.Current()->Accept(this);
+ for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done();
+ inst_it.Advance()) {
+ inst_it.Current()->Accept(this);
}
}
}
@@ -33,10 +34,25 @@
check->ReplaceWith(check->InputAt(0));
}
+void PrepareForRegisterAllocation::VisitDivZeroCheck(HDivZeroCheck* check) {
+ check->ReplaceWith(check->InputAt(0));
+}
+
void PrepareForRegisterAllocation::VisitBoundsCheck(HBoundsCheck* check) {
check->ReplaceWith(check->InputAt(0));
}
+void PrepareForRegisterAllocation::VisitClinitCheck(HClinitCheck* check) {
+ HLoadClass* cls = check->GetLoadClass();
+ check->ReplaceWith(cls);
+ if (check->GetPrevious() == cls) {
+ // Pass the initialization duty to the `HLoadClass` instruction,
+ // and remove the instruction from the graph.
+ cls->SetMustGenerateClinitCheck();
+ check->GetBlock()->RemoveInstruction(check);
+ }
+}
+
void PrepareForRegisterAllocation::VisitCondition(HCondition* condition) {
bool needs_materialization = false;
if (!condition->HasOnlyOneUse()) {
diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h
index e86a39b..0fdb65f 100644
--- a/compiler/optimizing/prepare_for_register_allocation.h
+++ b/compiler/optimizing/prepare_for_register_allocation.h
@@ -34,7 +34,9 @@
private:
virtual void VisitNullCheck(HNullCheck* check) OVERRIDE;
+ virtual void VisitDivZeroCheck(HDivZeroCheck* check) OVERRIDE;
virtual void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE;
+ virtual void VisitClinitCheck(HClinitCheck* check) OVERRIDE;
virtual void VisitCondition(HCondition* condition) OVERRIDE;
DISALLOW_COPY_AND_ASSIGN(PrepareForRegisterAllocation);
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 5055a76..2948496 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -16,6 +16,8 @@
#include "register_allocator.h"
+#include <sstream>
+
#include "base/bit_vector-inl.h"
#include "code_generator.h"
#include "ssa_liveness_analysis.h"
@@ -37,18 +39,21 @@
handled_(allocator, 0),
active_(allocator, 0),
inactive_(allocator, 0),
- physical_register_intervals_(allocator, codegen->GetNumberOfCoreRegisters()),
+ physical_core_register_intervals_(allocator, codegen->GetNumberOfCoreRegisters()),
+ physical_fp_register_intervals_(allocator, codegen->GetNumberOfFloatingPointRegisters()),
temp_intervals_(allocator, 4),
spill_slots_(allocator, kDefaultNumberOfSpillSlots),
safepoints_(allocator, 0),
processing_core_registers_(false),
number_of_registers_(-1),
registers_array_(nullptr),
- blocked_registers_(codegen->GetBlockedCoreRegisters()),
+ blocked_core_registers_(codegen->GetBlockedCoreRegisters()),
+ blocked_fp_registers_(codegen->GetBlockedFloatingPointRegisters()),
reserved_out_slots_(0),
maximum_number_of_live_registers_(0) {
codegen->SetupBlockedRegisters();
- physical_register_intervals_.SetSize(codegen->GetNumberOfCoreRegisters());
+ physical_core_register_intervals_.SetSize(codegen->GetNumberOfCoreRegisters());
+ physical_fp_register_intervals_.SetSize(codegen->GetNumberOfFloatingPointRegisters());
// Always reserve for the current method and the graph's max out registers.
// TODO: compute it instead.
reserved_out_slots_ = 1 + codegen->GetGraph()->GetMaximumNumberOfOutVRegs();
@@ -65,8 +70,10 @@
it.Advance()) {
HInstruction* current = it.Current();
if (current->GetType() == Primitive::kPrimLong && instruction_set != kX86_64) return false;
- if (current->GetType() == Primitive::kPrimFloat) return false;
- if (current->GetType() == Primitive::kPrimDouble) return false;
+ if ((current->GetType() == Primitive::kPrimFloat || current->GetType() == Primitive::kPrimDouble)
+ && instruction_set != kX86_64) {
+ return false;
+ }
}
}
return true;
@@ -93,14 +100,22 @@
void RegisterAllocator::BlockRegister(Location location,
size_t start,
- size_t end,
- Primitive::Type type) {
+ size_t end) {
int reg = location.reg();
- LiveInterval* interval = physical_register_intervals_.Get(reg);
+ DCHECK(location.IsRegister() || location.IsFpuRegister());
+ LiveInterval* interval = location.IsRegister()
+ ? physical_core_register_intervals_.Get(reg)
+ : physical_fp_register_intervals_.Get(reg);
+ Primitive::Type type = location.IsRegister()
+ ? Primitive::kPrimInt
+ : Primitive::kPrimDouble;
if (interval == nullptr) {
interval = LiveInterval::MakeFixedInterval(allocator_, reg, type);
- physical_register_intervals_.Put(reg, interval);
- inactive_.Add(interval);
+ if (location.IsRegister()) {
+ physical_core_register_intervals_.Put(reg, interval);
+ } else {
+ physical_fp_register_intervals_.Put(reg, interval);
+ }
}
DCHECK(interval->GetRegister() == reg);
interval->AddRange(start, end);
@@ -111,11 +126,12 @@
// is the one with the lowest start position.
for (HLinearPostOrderIterator it(liveness_); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
- for (HBackwardInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- ProcessInstruction(it.Current());
+ for (HBackwardInstructionIterator back_it(block->GetInstructions()); !back_it.Done();
+ back_it.Advance()) {
+ ProcessInstruction(back_it.Current());
}
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- ProcessInstruction(it.Current());
+ for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ ProcessInstruction(inst_it.Current());
}
}
@@ -123,8 +139,21 @@
registers_array_ = allocator_->AllocArray<size_t>(number_of_registers_);
processing_core_registers_ = true;
unhandled_ = &unhandled_core_intervals_;
+ for (size_t i = 0, e = physical_core_register_intervals_.Size(); i < e; ++i) {
+ LiveInterval* fixed = physical_core_register_intervals_.Get(i);
+ if (fixed != nullptr) {
+ // Fixed interval is added to inactive_ instead of unhandled_.
+ // It's also the only type of inactive interval whose start position
+ // can be after the current interval during linear scan.
+ // Fixed interval is never split and never moves to unhandled_.
+ inactive_.Add(fixed);
+ }
+ }
LinearScan();
+ size_t saved_maximum_number_of_live_registers = maximum_number_of_live_registers_;
+ maximum_number_of_live_registers_ = 0;
+
inactive_.Reset();
active_.Reset();
handled_.Reset();
@@ -133,9 +162,18 @@
registers_array_ = allocator_->AllocArray<size_t>(number_of_registers_);
processing_core_registers_ = false;
unhandled_ = &unhandled_fp_intervals_;
- // TODO: Enable FP register allocation.
- DCHECK(unhandled_->IsEmpty());
+ for (size_t i = 0, e = physical_fp_register_intervals_.Size(); i < e; ++i) {
+ LiveInterval* fixed = physical_fp_register_intervals_.Get(i);
+ if (fixed != nullptr) {
+ // Fixed interval is added to inactive_ instead of unhandled_.
+ // It's also the only type of inactive interval whose start position
+ // can be after the current interval during linear scan.
+ // Fixed interval is never split and never moves to unhandled_.
+ inactive_.Add(fixed);
+ }
+ }
LinearScan();
+ maximum_number_of_live_registers_ += saved_maximum_number_of_live_registers;
}
void RegisterAllocator::ProcessInstruction(HInstruction* instruction) {
@@ -147,9 +185,11 @@
// Create synthesized intervals for temporaries.
for (size_t i = 0; i < locations->GetTempCount(); ++i) {
Location temp = locations->GetTemp(i);
- if (temp.IsRegister()) {
- BlockRegister(temp, position, position + 1, Primitive::kPrimInt);
+ if (temp.IsRegister() || temp.IsFpuRegister()) {
+ BlockRegister(temp, position, position + 1);
} else {
+ DCHECK(temp.IsUnallocated());
+ DCHECK(temp.GetPolicy() == Location::kRequiresRegister);
LiveInterval* interval = LiveInterval::MakeTempInterval(allocator_, Primitive::kPrimInt);
temp_intervals_.Add(interval);
interval->AddRange(position, position + 1);
@@ -160,10 +200,6 @@
bool core_register = (instruction->GetType() != Primitive::kPrimDouble)
&& (instruction->GetType() != Primitive::kPrimFloat);
- GrowableArray<LiveInterval*>& unhandled = core_register
- ? unhandled_core_intervals_
- : unhandled_fp_intervals_;
-
if (locations->CanCall()) {
if (!instruction->IsSuspendCheck()) {
codegen_->MarkNotLeaf();
@@ -179,8 +215,16 @@
// By adding the following interval in the algorithm, we can compute this
// maximum before updating locations.
LiveInterval* interval = LiveInterval::MakeSlowPathInterval(allocator_, instruction);
- interval->AddRange(position, position + 1);
- unhandled.Add(interval);
+ // The start of the interval must be after the position of the safepoint, so that
+ // we can just check the number of active registers at that position. Note that this
+ // will include the current interval in the computation of
+ // `maximum_number_of_live_registers`, so we need a better strategy if this becomes
+ // a problem.
+ // TODO: We could put the logic in AddSorted, to ensure the safepoint range is
+ // after all other intervals starting at that same position.
+ interval->AddRange(position + 1, position + 2);
+ AddSorted(&unhandled_core_intervals_, interval);
+ AddSorted(&unhandled_fp_intervals_, interval);
}
}
@@ -189,22 +233,31 @@
for (size_t i = 0; i < codegen_->GetNumberOfCoreRegisters(); ++i) {
BlockRegister(Location::RegisterLocation(i),
position,
- position + 1,
- Primitive::kPrimInt);
+ position + 1);
+ }
+ for (size_t i = 0; i < codegen_->GetNumberOfFloatingPointRegisters(); ++i) {
+ BlockRegister(Location::FpuRegisterLocation(i),
+ position,
+ position + 1);
}
}
for (size_t i = 0; i < instruction->InputCount(); ++i) {
Location input = locations->InAt(i);
- if (input.IsRegister()) {
- BlockRegister(input, position, position + 1, instruction->InputAt(i)->GetType());
+ if (input.IsRegister() || input.IsFpuRegister()) {
+ BlockRegister(input, position, position + 1);
}
}
LiveInterval* current = instruction->GetLiveInterval();
if (current == nullptr) return;
+ GrowableArray<LiveInterval*>& unhandled = core_register
+ ? unhandled_core_intervals_
+ : unhandled_fp_intervals_;
+
DCHECK(unhandled.IsEmpty() || current->StartsBeforeOrAt(unhandled.Peek()));
+
// Some instructions define their output in fixed register/stack slot. We need
// to ensure we know these locations before doing register allocation. For a
// given register, we create an interval that covers these locations. The register
@@ -213,30 +266,38 @@
//
// The backwards walking ensures the ranges are ordered on increasing start positions.
Location output = locations->Out();
- if (output.IsRegister()) {
+ if (output.IsUnallocated() && output.GetPolicy() == Location::kSameAsFirstInput) {
+ Location first = locations->InAt(0);
+ if (first.IsRegister() || first.IsFpuRegister()) {
+ current->SetFrom(position + 1);
+ current->SetRegister(first.reg());
+ }
+ } else if (output.IsRegister() || output.IsFpuRegister()) {
// Shift the interval's start by one to account for the blocked register.
current->SetFrom(position + 1);
current->SetRegister(output.reg());
- BlockRegister(output, position, position + 1, instruction->GetType());
+ BlockRegister(output, position, position + 1);
} else if (output.IsStackSlot() || output.IsDoubleStackSlot()) {
current->SetSpillSlot(output.GetStackIndex());
}
// If needed, add interval to the list of unhandled intervals.
if (current->HasSpillSlot() || instruction->IsConstant()) {
- // Split before first register use.
+ // Split just before first register use.
size_t first_register_use = current->FirstRegisterUse();
if (first_register_use != kNoLifetime) {
- LiveInterval* split = Split(current, first_register_use);
- // Don't add direclty to `unhandled`, it needs to be sorted and the start
+ LiveInterval* split = Split(current, first_register_use - 1);
+ // Don't add directly to `unhandled`, it needs to be sorted and the start
// of this new interval might be after intervals already in the list.
AddSorted(&unhandled, split);
} else {
// Nothing to do, we won't allocate a register for this value.
}
} else {
- DCHECK(unhandled.IsEmpty() || current->StartsBeforeOrAt(unhandled.Peek()));
- unhandled.Add(current);
+ // Don't add directly to `unhandled`, temp or safepoint intervals
+ // for this instruction may have been added, and those can be
+ // processed first.
+ AddSorted(&unhandled, current);
}
}
@@ -278,10 +339,19 @@
}
}
- for (size_t i = 0, e = physical_register_intervals_.Size(); i < e; ++i) {
- LiveInterval* fixed = physical_register_intervals_.Get(i);
- if (fixed != nullptr && ShouldProcess(processing_core_registers_, fixed)) {
- intervals.Add(fixed);
+ if (processing_core_registers_) {
+ for (size_t i = 0, e = physical_core_register_intervals_.Size(); i < e; ++i) {
+ LiveInterval* fixed = physical_core_register_intervals_.Get(i);
+ if (fixed != nullptr) {
+ intervals.Add(fixed);
+ }
+ }
+ } else {
+ for (size_t i = 0, e = physical_fp_register_intervals_.Size(); i < e; ++i) {
+ LiveInterval* fixed = physical_fp_register_intervals_.Get(i);
+ if (fixed != nullptr) {
+ intervals.Add(fixed);
+ }
}
}
@@ -374,10 +444,10 @@
interval->Dump(stream);
stream << ": ";
if (interval->HasRegister()) {
- if (processing_core_registers_) {
- codegen_->DumpCoreRegister(stream, interval->GetRegister());
- } else {
+ if (interval->IsFloatingPoint()) {
codegen_->DumpFloatingPointRegister(stream, interval->GetRegister());
+ } else {
+ codegen_->DumpCoreRegister(stream, interval->GetRegister());
}
} else {
stream << "spilled";
@@ -385,14 +455,51 @@
stream << std::endl;
}
+void RegisterAllocator::DumpAllIntervals(std::ostream& stream) const {
+ stream << "inactive: " << std::endl;
+ for (size_t i = 0; i < inactive_.Size(); i ++) {
+ DumpInterval(stream, inactive_.Get(i));
+ }
+ stream << "active: " << std::endl;
+ for (size_t i = 0; i < active_.Size(); i ++) {
+ DumpInterval(stream, active_.Get(i));
+ }
+ stream << "unhandled: " << std::endl;
+ auto unhandled = (unhandled_ != nullptr) ?
+ unhandled_ : &unhandled_core_intervals_;
+ for (size_t i = 0; i < unhandled->Size(); i ++) {
+ DumpInterval(stream, unhandled->Get(i));
+ }
+ stream << "handled: " << std::endl;
+ for (size_t i = 0; i < handled_.Size(); i ++) {
+ DumpInterval(stream, handled_.Get(i));
+ }
+}
+
// By the book implementation of a linear scan register allocator.
void RegisterAllocator::LinearScan() {
while (!unhandled_->IsEmpty()) {
// (1) Remove interval with the lowest start position from unhandled.
LiveInterval* current = unhandled_->Pop();
DCHECK(!current->IsFixed() && !current->HasSpillSlot());
+ DCHECK(unhandled_->IsEmpty() || unhandled_->Peek()->GetStart() >= current->GetStart());
+
+ if (current->IsSlowPathSafepoint()) {
+ // Synthesized interval to record the maximum number of live registers
+ // at safepoints. No need to allocate a register for it.
+ // We know that current actives are all live at the safepoint (modulo
+ // the one created by the safepoint).
+ maximum_number_of_live_registers_ =
+ std::max(maximum_number_of_live_registers_, active_.Size());
+ continue;
+ }
+
size_t position = current->GetStart();
+ // Remember the inactive_ size here since the ones moved to inactive_ from
+ // active_ below shouldn't need to be re-checked.
+ size_t inactive_intervals_to_handle = inactive_.Size();
+
// (2) Remove currently active intervals that are dead at this position.
// Move active intervals that have a lifetime hole at this position
// to inactive.
@@ -411,27 +518,22 @@
// (3) Remove currently inactive intervals that are dead at this position.
// Move inactive intervals that cover this position to active.
- for (size_t i = 0; i < inactive_.Size(); ++i) {
+ for (size_t i = 0; i < inactive_intervals_to_handle; ++i) {
LiveInterval* interval = inactive_.Get(i);
+ DCHECK(interval->GetStart() < position || interval->IsFixed());
if (interval->IsDeadAt(position)) {
inactive_.Delete(interval);
--i;
+ --inactive_intervals_to_handle;
handled_.Add(interval);
} else if (interval->Covers(position)) {
inactive_.Delete(interval);
--i;
+ --inactive_intervals_to_handle;
active_.Add(interval);
}
}
- if (current->IsSlowPathSafepoint()) {
- // Synthesized interval to record the maximum number of live registers
- // at safepoints. No need to allocate a register for it.
- maximum_number_of_live_registers_ =
- std::max(maximum_number_of_live_registers_, active_.Size());
- continue;
- }
-
// (4) Try to find an available register.
bool success = TryAllocateFreeReg(current);
@@ -458,20 +560,6 @@
free_until[i] = kMaxLifetimePosition;
}
- // For each inactive interval, set its register to be free until
- // the next intersection with `current`.
- // Thanks to SSA, this should only be needed for intervals
- // that are the result of a split.
- for (size_t i = 0, e = inactive_.Size(); i < e; ++i) {
- LiveInterval* inactive = inactive_.Get(i);
- DCHECK(inactive->HasRegister());
- size_t next_intersection = inactive->FirstIntersectionWith(current);
- if (next_intersection != kNoLifetime) {
- free_until[inactive->GetRegister()] =
- std::min(free_until[inactive->GetRegister()], next_intersection);
- }
- }
-
// For each active interval, set its register to not free.
for (size_t i = 0, e = active_.Size(); i < e; ++i) {
LiveInterval* interval = active_.Get(i);
@@ -479,6 +567,33 @@
free_until[interval->GetRegister()] = 0;
}
+ // For each inactive interval, set its register to be free until
+ // the next intersection with `current`.
+ for (size_t i = 0, e = inactive_.Size(); i < e; ++i) {
+ LiveInterval* inactive = inactive_.Get(i);
+ // Temp/Slow-path-safepoint interval has no holes.
+ DCHECK(!inactive->IsTemp() && !inactive->IsSlowPathSafepoint());
+ if (!current->IsSplit() && !inactive->IsFixed()) {
+ // Neither current nor inactive are fixed.
+ // Thanks to SSA, a non-split interval starting in a hole of an
+ // inactive interval should never intersect with that inactive interval.
+ // Only if it's not fixed though, because fixed intervals don't come from SSA.
+ DCHECK_EQ(inactive->FirstIntersectionWith(current), kNoLifetime);
+ continue;
+ }
+
+ DCHECK(inactive->HasRegister());
+ if (free_until[inactive->GetRegister()] == 0) {
+ // Already used by some active interval. No need to intersect.
+ continue;
+ }
+ size_t next_intersection = inactive->FirstIntersectionWith(current);
+ if (next_intersection != kNoLifetime) {
+ free_until[inactive->GetRegister()] =
+ std::min(free_until[inactive->GetRegister()], next_intersection);
+ }
+ }
+
int reg = -1;
if (current->HasRegister()) {
// Some instructions have a fixed register output.
@@ -519,10 +634,9 @@
}
bool RegisterAllocator::IsBlocked(int reg) const {
- // TODO: This only works for core registers and needs to be adjusted for
- // floating point registers.
- DCHECK(processing_core_registers_);
- return blocked_registers_[reg];
+ return processing_core_registers_
+ ? blocked_core_registers_[reg]
+ : blocked_fp_registers_[reg];
}
// Find the register that is used the last, and spill the interval
@@ -558,10 +672,18 @@
// For each inactive interval, find the next use of its register after the
// start of current.
- // Thanks to SSA, this should only be needed for intervals
- // that are the result of a split.
for (size_t i = 0, e = inactive_.Size(); i < e; ++i) {
LiveInterval* inactive = inactive_.Get(i);
+ // Temp/Slow-path-safepoint interval has no holes.
+ DCHECK(!inactive->IsTemp() && !inactive->IsSlowPathSafepoint());
+ if (!current->IsSplit() && !inactive->IsFixed()) {
+ // Neither current nor inactive are fixed.
+ // Thanks to SSA, a non-split interval starting in a hole of an
+ // inactive interval should never intersect with that inactive interval.
+ // Only if it's not fixed though, because fixed intervals don't come from SSA.
+ DCHECK_EQ(inactive->FirstIntersectionWith(current), kNoLifetime);
+ continue;
+ }
DCHECK(inactive->HasRegister());
size_t next_intersection = inactive->FirstIntersectionWith(current);
if (next_intersection != kNoLifetime) {
@@ -591,7 +713,9 @@
// If the first use of that instruction is after the last use of the found
// register, we split this interval just before its first register use.
AllocateSpillSlotFor(current);
- LiveInterval* split = Split(current, first_register_use);
+ LiveInterval* split = Split(current, first_register_use - 1);
+ DCHECK_NE(current, split) << "There is not enough registers available for "
+ << split->GetParent()->GetDefinedBy()->DebugName();
AddSorted(unhandled_, split);
return false;
} else {
@@ -611,20 +735,29 @@
}
}
- for (size_t i = 0; i < inactive_.Size(); ++i) {
+ for (size_t i = 0, e = inactive_.Size(); i < e; ++i) {
LiveInterval* inactive = inactive_.Get(i);
if (inactive->GetRegister() == reg) {
+ if (!current->IsSplit() && !inactive->IsFixed()) {
+ // Neither current nor inactive are fixed.
+ // Thanks to SSA, a non-split interval starting in a hole of an
+ // inactive interval should never intersect with that inactive interval.
+ // Only if it's not fixed though, because fixed intervals don't come from SSA.
+ DCHECK_EQ(inactive->FirstIntersectionWith(current), kNoLifetime);
+ continue;
+ }
size_t next_intersection = inactive->FirstIntersectionWith(current);
if (next_intersection != kNoLifetime) {
if (inactive->IsFixed()) {
LiveInterval* split = Split(current, next_intersection);
AddSorted(unhandled_, split);
} else {
- LiveInterval* split = Split(inactive, current->GetStart());
+ LiveInterval* split = Split(inactive, next_intersection);
inactive_.DeleteAt(i);
+ --i;
+ --e;
handled_.Add(inactive);
AddSorted(unhandled_, split);
- --i;
}
}
}
@@ -635,6 +768,7 @@
}
void RegisterAllocator::AddSorted(GrowableArray<LiveInterval*>* array, LiveInterval* interval) {
+ DCHECK(!interval->IsFixed() && !interval->HasSpillSlot());
size_t insert_at = 0;
for (size_t i = array->Size(); i > 0; --i) {
LiveInterval* current = array->Get(i - 1);
@@ -723,17 +857,11 @@
parent->SetSpillSlot((slot + reserved_out_slots_) * kVRegSize);
}
-// We create a special marker for inputs moves to differentiate them from
-// moves created during resolution. They must be different instructions
-// because the input moves work on the assumption that the interval moves
-// have been executed.
-static constexpr size_t kInputMoveLifetimePosition = 0;
-static bool IsInputMove(HInstruction* instruction) {
- return instruction->GetLifetimePosition() == kInputMoveLifetimePosition;
-}
-
static bool IsValidDestination(Location destination) {
- return destination.IsRegister() || destination.IsStackSlot() || destination.IsDoubleStackSlot();
+ return destination.IsRegister()
+ || destination.IsFpuRegister()
+ || destination.IsStackSlot()
+ || destination.IsDoubleStackSlot();
}
void RegisterAllocator::AddInputMoveFor(HInstruction* user,
@@ -748,14 +876,14 @@
HParallelMove* move = nullptr;
if (previous == nullptr
|| !previous->IsParallelMove()
- || !IsInputMove(previous)) {
+ || previous->GetLifetimePosition() < user->GetLifetimePosition()) {
move = new (allocator_) HParallelMove(allocator_);
- move->SetLifetimePosition(kInputMoveLifetimePosition);
+ move->SetLifetimePosition(user->GetLifetimePosition());
user->GetBlock()->InsertInstructionBefore(move, user);
} else {
move = previous->AsParallelMove();
}
- DCHECK(IsInputMove(move));
+ DCHECK_EQ(move->GetLifetimePosition(), user->GetLifetimePosition());
move->AddMove(new (allocator_) MoveOperands(source, destination, nullptr));
}
@@ -768,7 +896,7 @@
HInstruction* at = liveness_.GetInstructionFromPosition(position / 2);
if (at == nullptr) {
- // Block boundary, don't no anything the connection of split siblings will handle it.
+ // Block boundary, don't do anything the connection of split siblings will handle it.
return;
}
HParallelMove* move;
@@ -778,7 +906,7 @@
move = at->GetNext()->AsParallelMove();
// This is a parallel move for connecting siblings in a same block. We need to
// differentiate it with moves for connecting blocks, and input moves.
- if (move == nullptr || IsInputMove(move) || move->GetLifetimePosition() > position) {
+ if (move == nullptr || move->GetLifetimePosition() > position) {
move = new (allocator_) HParallelMove(allocator_);
move->SetLifetimePosition(position);
at->GetBlock()->InsertInstructionBefore(move, at->GetNext());
@@ -786,12 +914,6 @@
} else {
// Move must happen before the instruction.
HInstruction* previous = at->GetPrevious();
- if (previous != nullptr && previous->IsParallelMove() && IsInputMove(previous)) {
- // This is a parallel move for connecting siblings in a same block. We need to
- // differentiate it with input moves.
- at = previous;
- previous = previous->GetPrevious();
- }
if (previous == nullptr
|| !previous->IsParallelMove()
|| previous->GetLifetimePosition() != position) {
@@ -889,7 +1011,9 @@
if (current->HasSpillSlot() && current->HasRegister()) {
// We spill eagerly, so move must be at definition.
InsertMoveAfter(interval->GetDefinedBy(),
- Location::RegisterLocation(interval->GetRegister()),
+ interval->IsFloatingPoint()
+ ? Location::FpuRegisterLocation(interval->GetRegister())
+ : Location::RegisterLocation(interval->GetRegister()),
interval->NeedsTwoSpillSlots()
? Location::DoubleStackSlot(interval->GetParent()->GetSpillSlot())
: Location::StackSlot(interval->GetParent()->GetSpillSlot()));
@@ -933,7 +1057,14 @@
HInstruction* safepoint = safepoints_.Get(i);
size_t position = safepoint->GetLifetimePosition();
LocationSummary* locations = safepoint->GetLocations();
- if (!current->Covers(position)) continue;
+ if (!current->Covers(position)) {
+ continue;
+ }
+ if (interval->GetStart() == position) {
+ // The safepoint is for this instruction, so the location of the instruction
+ // does not need to be saved.
+ continue;
+ }
if ((current->GetType() == Primitive::kPrimNot) && current->GetParent()->HasSpillSlot()) {
locations->SetStackBit(current->GetParent()->GetSpillSlot() / kVRegSize);
@@ -942,11 +1073,16 @@
switch (source.GetKind()) {
case Location::kRegister: {
locations->AddLiveRegister(source);
+ DCHECK_LE(locations->GetNumberOfLiveRegisters(), maximum_number_of_live_registers_);
if (current->GetType() == Primitive::kPrimNot) {
locations->SetRegisterBit(source.reg());
}
break;
}
+ case Location::kFpuRegister: {
+ locations->AddLiveRegister(source);
+ break;
+ }
case Location::kStackSlot: // Fall-through
case Location::kDoubleStackSlot: // Fall-through
case Location::kConstant: {
@@ -972,7 +1108,7 @@
}
size_t from_position = from->GetLifetimeEnd() - 1;
- // When an instructions dies at entry of another, and the latter is the beginning
+ // When an instruction dies at entry of another, and the latter is the beginning
// of a block, the register allocator ensures the former has a register
// at block->GetLifetimeStart() + 1. Since this is at a block boundary, it must
// must be handled in this method.
@@ -1041,11 +1177,11 @@
if (location.IsStackSlot()) {
location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
current->SetSpillSlot(location.GetStackIndex());
- locations->SetOut(location);
+ locations->UpdateOut(location);
} else if (location.IsDoubleStackSlot()) {
location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
current->SetSpillSlot(location.GetStackIndex());
- locations->SetOut(location);
+ locations->UpdateOut(location);
} else if (current->HasSpillSlot()) {
current->SetSpillSlot(current->GetSpillSlot() + codegen_->GetFrameSize());
}
@@ -1055,7 +1191,11 @@
if (location.IsUnallocated()) {
if (location.GetPolicy() == Location::kSameAsFirstInput) {
- locations->SetInAt(0, source);
+ if (locations->InAt(0).IsUnallocated()) {
+ locations->SetInAt(0, source);
+ } else {
+ DCHECK(locations->InAt(0).Equals(source));
+ }
}
locations->SetOut(source);
} else {
@@ -1085,8 +1225,8 @@
// Resolve phi inputs. Order does not matter.
for (HLinearOrderIterator it(liveness_); !it.Done(); it.Advance()) {
HBasicBlock* current = it.Current();
- for (HInstructionIterator it(current->GetPhis()); !it.Done(); it.Advance()) {
- HInstruction* phi = it.Current();
+ for (HInstructionIterator inst_it(current->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ HInstruction* phi = inst_it.Current();
for (size_t i = 0, e = current->GetPredecessors().Size(); i < e; ++i) {
HBasicBlock* predecessor = current->GetPredecessors().Get(i);
DCHECK_EQ(predecessor->GetSuccessors().Size(), 1u);
@@ -1110,6 +1250,7 @@
current = at;
}
LocationSummary* locations = at->GetLocations();
+ DCHECK(temp->GetType() == Primitive::kPrimInt);
locations->SetTempAt(
temp_index++, Location::RegisterLocation(temp->GetRegister()));
}
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index 0c3a9b3..976ee39 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -94,7 +94,7 @@
bool IsBlocked(int reg) const;
// Update the interval for the register in `location` to cover [start, end).
- void BlockRegister(Location location, size_t start, size_t end, Primitive::Type type);
+ void BlockRegister(Location location, size_t start, size_t end);
// Allocate a spill slot for the given interval.
void AllocateSpillSlotFor(LiveInterval* interval);
@@ -126,6 +126,7 @@
void ProcessInstruction(HInstruction* instruction);
bool ValidateInternal(bool log_fatal_on_failure) const;
void DumpInterval(std::ostream& stream, LiveInterval* interval) const;
+ void DumpAllIntervals(std::ostream& stream) const;
ArenaAllocator* const allocator_;
CodeGenerator* const codegen_;
@@ -156,7 +157,8 @@
// Fixed intervals for physical registers. Such intervals cover the positions
// where an instruction requires a specific register.
- GrowableArray<LiveInterval*> physical_register_intervals_;
+ GrowableArray<LiveInterval*> physical_core_register_intervals_;
+ GrowableArray<LiveInterval*> physical_fp_register_intervals_;
// Intervals for temporaries. Such intervals cover the positions
// where an instruction requires a temporary.
@@ -179,7 +181,8 @@
size_t* registers_array_;
// Blocked registers, as decided by the code generator.
- bool* const blocked_registers_;
+ bool* const blocked_core_registers_;
+ bool* const blocked_fp_registers_;
// Slots reserved for out arguments.
size_t reserved_out_slots_;
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 7517a6b..ba4be34 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -348,14 +348,14 @@
// Split at the next instruction.
interval = interval->SplitAt(first_add->GetLifetimePosition() + 2);
// The user of the split is the last add.
- ASSERT_EQ(interval->FirstRegisterUse(), last_add->GetLifetimePosition() - 1);
+ ASSERT_EQ(interval->FirstRegisterUse(), last_add->GetLifetimePosition());
// Split before the last add.
LiveInterval* new_interval = interval->SplitAt(last_add->GetLifetimePosition() - 1);
// Ensure the current interval has no register use...
ASSERT_EQ(interval->FirstRegisterUse(), kNoLifetime);
// And the new interval has it for the last add.
- ASSERT_EQ(new_interval->FirstRegisterUse(), last_add->GetLifetimePosition() - 1);
+ ASSERT_EQ(new_interval->FirstRegisterUse(), last_add->GetLifetimePosition());
}
TEST(RegisterAllocatorTest, DeadPhi) {
@@ -414,21 +414,24 @@
// Add an artifical range to cover the temps that will be put in the unhandled list.
LiveInterval* unhandled = graph->GetEntryBlock()->GetFirstInstruction()->GetLiveInterval();
unhandled->AddLoopRange(0, 60);
+ // For SSA value intervals, only an interval resulted from a split may intersect
+ // with inactive intervals.
+ unhandled = register_allocator.Split(unhandled, 5);
// Add three temps holding the same register, and starting at different positions.
// Put the one that should be picked in the middle of the inactive list to ensure
// we do not depend on an order.
- LiveInterval* interval = LiveInterval::MakeTempInterval(&allocator, Primitive::kPrimInt);
+ LiveInterval* interval = LiveInterval::MakeInterval(&allocator, Primitive::kPrimInt);
interval->SetRegister(0);
interval->AddRange(40, 50);
register_allocator.inactive_.Add(interval);
- interval = LiveInterval::MakeTempInterval(&allocator, Primitive::kPrimInt);
+ interval = LiveInterval::MakeInterval(&allocator, Primitive::kPrimInt);
interval->SetRegister(0);
interval->AddRange(20, 30);
register_allocator.inactive_.Add(interval);
- interval = LiveInterval::MakeTempInterval(&allocator, Primitive::kPrimInt);
+ interval = LiveInterval::MakeInterval(&allocator, Primitive::kPrimInt);
interval->SetRegister(0);
interval->AddRange(60, 70);
register_allocator.inactive_.Add(interval);
@@ -438,7 +441,7 @@
register_allocator.processing_core_registers_ = true;
register_allocator.unhandled_ = ®ister_allocator.unhandled_core_intervals_;
- register_allocator.TryAllocateFreeReg(unhandled);
+ ASSERT_TRUE(register_allocator.TryAllocateFreeReg(unhandled));
// Check that we have split the interval.
ASSERT_EQ(1u, register_allocator.unhandled_->Size());
@@ -619,7 +622,8 @@
liveness.Analyze();
// Check that the field gets put in the register expected by its use.
- ret->GetLocations()->SetInAt(0, Location::RegisterLocation(2));
+ // Don't use SetInAt because we are overriding an already allocated location.
+ ret->GetLocations()->inputs_.Put(0, Location::RegisterLocation(2));
RegisterAllocator register_allocator(&allocator, &codegen, liveness);
register_allocator.AllocateRegisters();
@@ -681,7 +685,8 @@
liveness.Analyze();
// check that both adds get the same register.
- first_add->InputAt(0)->GetLocations()->SetOut(Location::RegisterLocation(2));
+ // Don't use SetOutput because output is already allocated.
+ first_add->InputAt(0)->GetLocations()->output_ = Location::RegisterLocation(2);
ASSERT_EQ(first_add->GetLocations()->Out().GetPolicy(), Location::kSameAsFirstInput);
ASSERT_EQ(second_add->GetLocations()->Out().GetPolicy(), Location::kSameAsFirstInput);
@@ -693,4 +698,45 @@
}
}
+static HGraph* BuildDiv(ArenaAllocator* allocator,
+ HInstruction** div) {
+ HGraph* graph = new (allocator) HGraph(allocator);
+ HBasicBlock* entry = new (allocator) HBasicBlock(graph);
+ graph->AddBlock(entry);
+ graph->SetEntryBlock(entry);
+ HInstruction* first = new (allocator) HParameterValue(0, Primitive::kPrimInt);
+ HInstruction* second = new (allocator) HParameterValue(0, Primitive::kPrimInt);
+ entry->AddInstruction(first);
+ entry->AddInstruction(second);
+
+ HBasicBlock* block = new (allocator) HBasicBlock(graph);
+ graph->AddBlock(block);
+ entry->AddSuccessor(block);
+
+ *div = new (allocator) HDiv(Primitive::kPrimInt, first, second, 0); // don't care about dex_pc.
+ block->AddInstruction(*div);
+
+ block->AddInstruction(new (allocator) HExit());
+ return graph;
+}
+
+TEST(RegisterAllocatorTest, ExpectedExactInRegisterAndSameOutputHint) {
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HInstruction *div;
+
+ {
+ HGraph* graph = BuildDiv(&allocator, &div);
+ x86::CodeGeneratorX86 codegen(graph);
+ SsaLivenessAnalysis liveness(*graph, &codegen);
+ liveness.Analyze();
+
+ RegisterAllocator register_allocator(&allocator, &codegen, liveness);
+ register_allocator.AllocateRegisters();
+
+ // div on x86 requires its first input in eax and the output be the same as the first input.
+ ASSERT_EQ(div->GetLiveInterval()->GetRegister(), 0);
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index be2c039..b2cc119 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -109,8 +109,8 @@
HPhi* phi = new (GetGraph()->GetArena()) HPhi(
GetGraph()->GetArena(), local, block->GetPredecessors().Size(), Primitive::kPrimVoid);
for (size_t i = 0; i < block->GetPredecessors().Size(); i++) {
- HInstruction* value = ValueOfLocal(block->GetPredecessors().Get(i), local);
- phi->SetRawInputAt(i, value);
+ HInstruction* pred_value = ValueOfLocal(block->GetPredecessors().Get(i), local);
+ phi->SetRawInputAt(i, pred_value);
}
block->AddPhi(phi);
value = phi;
@@ -129,8 +129,109 @@
}
}
+/**
+ * Constants in the Dex format are not typed. So the builder types them as
+ * integers, but when doing the SSA form, we might realize the constant
+ * is used for floating point operations. We create a floating-point equivalent
+ * constant to make the operations correctly typed.
+ */
+static HFloatConstant* GetFloatEquivalent(HIntConstant* constant) {
+ // We place the floating point constant next to this constant.
+ HFloatConstant* result = constant->GetNext()->AsFloatConstant();
+ if (result == nullptr) {
+ HGraph* graph = constant->GetBlock()->GetGraph();
+ ArenaAllocator* allocator = graph->GetArena();
+ result = new (allocator) HFloatConstant(bit_cast<int32_t, float>(constant->GetValue()));
+ constant->GetBlock()->InsertInstructionBefore(result, constant->GetNext());
+ } else {
+ // If there is already a constant with the expected type, we know it is
+ // the floating point equivalent of this constant.
+ DCHECK_EQ((bit_cast<float, int32_t>(result->GetValue())), constant->GetValue());
+ }
+ return result;
+}
+
+/**
+ * Wide constants in the Dex format are not typed. So the builder types them as
+ * longs, but when doing the SSA form, we might realize the constant
+ * is used for floating point operations. We create a floating-point equivalent
+ * constant to make the operations correctly typed.
+ */
+static HDoubleConstant* GetDoubleEquivalent(HLongConstant* constant) {
+ // We place the floating point constant next to this constant.
+ HDoubleConstant* result = constant->GetNext()->AsDoubleConstant();
+ if (result == nullptr) {
+ HGraph* graph = constant->GetBlock()->GetGraph();
+ ArenaAllocator* allocator = graph->GetArena();
+ result = new (allocator) HDoubleConstant(bit_cast<int64_t, double>(constant->GetValue()));
+ constant->GetBlock()->InsertInstructionBefore(result, constant->GetNext());
+ } else {
+ // If there is already a constant with the expected type, we know it is
+ // the floating point equivalent of this constant.
+ DCHECK_EQ((bit_cast<double, int64_t>(result->GetValue())), constant->GetValue());
+ }
+ return result;
+}
+
+/**
+ * Because of Dex format, we might end up having the same phi being
+ * used for non floating point operations and floating point operations. Because
+ * we want the graph to be correctly typed (and thereafter avoid moves between
+ * floating point registers and core registers), we need to create a copy of the
+ * phi with a floating point type.
+ */
+static HPhi* GetFloatOrDoubleEquivalentOfPhi(HPhi* phi, Primitive::Type type) {
+ // We place the floating point phi next to this phi.
+ HInstruction* next = phi->GetNext();
+ if (next == nullptr || (next->AsPhi()->GetRegNumber() != phi->GetRegNumber())) {
+ ArenaAllocator* allocator = phi->GetBlock()->GetGraph()->GetArena();
+ HPhi* new_phi = new (allocator) HPhi(allocator, phi->GetRegNumber(), phi->InputCount(), type);
+ for (size_t i = 0, e = phi->InputCount(); i < e; ++i) {
+ // Copy the inputs. Note that the graph may not be correctly typed by doing this copy,
+ // but the type propagation phase will fix it.
+ new_phi->SetRawInputAt(i, phi->InputAt(i));
+ }
+ phi->GetBlock()->InsertPhiAfter(new_phi, phi);
+ return new_phi;
+ } else {
+ DCHECK_EQ(next->GetType(), type);
+ return next->AsPhi();
+ }
+}
+
+HInstruction* SsaBuilder::GetFloatOrDoubleEquivalent(HInstruction* user,
+ HInstruction* value,
+ Primitive::Type type) {
+ if (value->IsArrayGet()) {
+ // The verifier has checked that values in arrays cannot be used for both
+ // floating point and non-floating point operations. It is therefore safe to just
+ // change the type of the operation.
+ value->AsArrayGet()->SetType(type);
+ return value;
+ } else if (value->IsLongConstant()) {
+ return GetDoubleEquivalent(value->AsLongConstant());
+ } else if (value->IsIntConstant()) {
+ return GetFloatEquivalent(value->AsIntConstant());
+ } else if (value->IsPhi()) {
+ return GetFloatOrDoubleEquivalentOfPhi(value->AsPhi(), type);
+ } else {
+ // For other instructions, we assume the verifier has checked that the dex format is correctly
+ // typed and the value in a dex register will not be used for both floating point and
+ // non-floating point operations. So the only reason an instruction would want a floating
+ // point equivalent is for an unused phi that will be removed by the dead phi elimination phase.
+ DCHECK(user->IsPhi());
+ return value;
+ }
+}
+
void SsaBuilder::VisitLoadLocal(HLoadLocal* load) {
- load->ReplaceWith(current_locals_->Get(load->GetLocal()->GetRegNumber()));
+ HInstruction* value = current_locals_->Get(load->GetLocal()->GetRegNumber());
+ if (load->GetType() != value->GetType()
+ && (load->GetType() == Primitive::kPrimFloat || load->GetType() == Primitive::kPrimDouble)) {
+ // If the operation requests a specific type, we make sure its input is of that type.
+ value = GetFloatOrDoubleEquivalent(load, value, load->GetType());
+ }
+ load->ReplaceWith(value);
load->GetBlock()->RemoveInstruction(load);
}
@@ -149,4 +250,9 @@
instruction->SetEnvironment(environment);
}
+void SsaBuilder::VisitTemporary(HTemporary* temp) {
+ // Temporaries are only used by the baseline register allocator.
+ temp->GetBlock()->RemoveInstruction(temp);
+}
+
} // namespace art
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index 9d8c072..2207cd6 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -51,6 +51,11 @@
void VisitLoadLocal(HLoadLocal* load);
void VisitStoreLocal(HStoreLocal* store);
void VisitInstruction(HInstruction* instruction);
+ void VisitTemporary(HTemporary* instruction);
+
+ static HInstruction* GetFloatOrDoubleEquivalent(HInstruction* user,
+ HInstruction* instruction,
+ Primitive::Type type);
private:
// Locals for the current block being visited.
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index f0edc64..0085b27 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -107,15 +107,15 @@
HBasicBlock* block = it.Current();
block->SetLifetimeStart(lifetime_position);
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- HInstruction* current = it.Current();
+ for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ HInstruction* current = inst_it.Current();
current->Accept(location_builder);
LocationSummary* locations = current->GetLocations();
if (locations != nullptr && locations->Out().IsValid()) {
instructions_from_ssa_index_.Add(current);
current->SetSsaIndex(ssa_index++);
current->SetLiveInterval(
- new (graph_.GetArena()) LiveInterval(graph_.GetArena(), current->GetType(), current));
+ LiveInterval::MakeInterval(graph_.GetArena(), current->GetType(), current));
}
current->SetLifetimePosition(lifetime_position);
}
@@ -124,15 +124,16 @@
// Add a null marker to notify we are starting a block.
instructions_from_lifetime_position_.Add(nullptr);
- for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* current = it.Current();
+ for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done();
+ inst_it.Advance()) {
+ HInstruction* current = inst_it.Current();
current->Accept(codegen_->GetLocationBuilder());
LocationSummary* locations = current->GetLocations();
if (locations != nullptr && locations->Out().IsValid()) {
instructions_from_ssa_index_.Add(current);
current->SetSsaIndex(ssa_index++);
current->SetLiveInterval(
- new (graph_.GetArena()) LiveInterval(graph_.GetArena(), current->GetType(), current));
+ LiveInterval::MakeInterval(graph_.GetArena(), current->GetType(), current));
}
instructions_from_lifetime_position_.Add(current);
current->SetLifetimePosition(lifetime_position);
@@ -178,8 +179,8 @@
HBasicBlock* successor = block->GetSuccessors().Get(i);
live_in->Union(GetLiveInSet(*successor));
size_t phi_input_index = successor->GetPredecessorIndexOf(block);
- for (HInstructionIterator it(successor->GetPhis()); !it.Done(); it.Advance()) {
- HInstruction* phi = it.Current();
+ for (HInstructionIterator inst_it(successor->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ HInstruction* phi = inst_it.Current();
HInstruction* input = phi->InputAt(phi_input_index);
input->GetLiveInterval()->AddPhiUse(phi, phi_input_index, block);
// A phi input whose last user is the phi dies at the end of the predecessor block,
@@ -195,8 +196,9 @@
current->GetLiveInterval()->AddRange(block->GetLifetimeStart(), block->GetLifetimeEnd());
}
- for (HBackwardInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* current = it.Current();
+ for (HBackwardInstructionIterator back_it(block->GetInstructions()); !back_it.Done();
+ back_it.Advance()) {
+ HInstruction* current = back_it.Current();
if (current->HasSsaIndex()) {
// Kill the instruction and shorten its interval.
kill->SetBit(current->GetSsaIndex());
@@ -230,8 +232,8 @@
}
// Kill phis defined in this block.
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- HInstruction* current = it.Current();
+ for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ HInstruction* current = inst_it.Current();
if (current->HasSsaIndex()) {
kill->SetBit(current->GetSsaIndex());
live_in->ClearBit(current->GetSsaIndex());
@@ -319,7 +321,7 @@
if (user->IsPhi()) {
// If the phi has a register, try to use the same.
Location phi_location = user->GetLiveInterval()->ToLocation();
- if (phi_location.IsRegister() && free_until[phi_location.reg()] >= use_position) {
+ if (SameRegisterKind(phi_location) && free_until[phi_location.reg()] >= use_position) {
return phi_location.reg();
}
const GrowableArray<HBasicBlock*>& predecessors = user->GetBlock()->GetPredecessors();
@@ -345,7 +347,7 @@
// We use the user's lifetime position - 1 (and not `use_position`) because the
// register is blocked at the beginning of the user.
size_t position = user->GetLifetimePosition() - 1;
- if (expected.IsRegister() && free_until[expected.reg()] >= position) {
+ if (SameRegisterKind(expected) && free_until[expected.reg()] >= position) {
return expected.reg();
}
}
@@ -368,7 +370,7 @@
// If the input dies at the end of the predecessor, we know its register can
// be reused.
Location input_location = input_interval.ToLocation();
- if (input_location.IsRegister()) {
+ if (SameRegisterKind(input_location)) {
return input_location.reg();
}
}
@@ -384,7 +386,7 @@
// If the input dies at the start of this instruction, we know its register can
// be reused.
Location location = input_interval.ToLocation();
- if (location.IsRegister()) {
+ if (SameRegisterKind(location)) {
return location.reg();
}
}
@@ -393,13 +395,21 @@
return kNoRegister;
}
+bool LiveInterval::SameRegisterKind(Location other) const {
+ return IsFloatingPoint()
+ ? other.IsFpuRegister()
+ : other.IsRegister();
+}
+
bool LiveInterval::NeedsTwoSpillSlots() const {
return type_ == Primitive::kPrimLong || type_ == Primitive::kPrimDouble;
}
Location LiveInterval::ToLocation() const {
if (HasRegister()) {
- return Location::RegisterLocation(GetRegister());
+ return IsFloatingPoint()
+ ? Location::FpuRegisterLocation(GetRegister())
+ : Location::RegisterLocation(GetRegister());
} else {
HInstruction* defined_by = GetParent()->GetDefinedBy();
if (defined_by->IsConstant()) {
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index e9bd303..ca08d5b 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -25,13 +25,14 @@
static constexpr int kNoRegister = -1;
-class BlockInfo : public ArenaObject {
+class BlockInfo : public ArenaObject<kArenaAllocMisc> {
public:
BlockInfo(ArenaAllocator* allocator, const HBasicBlock& block, size_t number_of_ssa_values)
: block_(block),
live_in_(allocator, number_of_ssa_values, false),
live_out_(allocator, number_of_ssa_values, false),
kill_(allocator, number_of_ssa_values, false) {
+ UNUSED(block_);
live_in_.ClearAllBits();
live_out_.ClearAllBits();
kill_.ClearAllBits();
@@ -52,7 +53,7 @@
* A live range contains the start and end of a range where an instruction or a temporary
* is live.
*/
-class LiveRange : public ArenaObject {
+class LiveRange FINAL : public ArenaObject<kArenaAllocMisc> {
public:
LiveRange(size_t start, size_t end, LiveRange* next) : start_(start), end_(end), next_(next) {
DCHECK_LT(start, end);
@@ -63,16 +64,16 @@
size_t GetEnd() const { return end_; }
LiveRange* GetNext() const { return next_; }
- bool IntersectsWith(const LiveRange& other) {
+ bool IntersectsWith(const LiveRange& other) const {
return (start_ >= other.start_ && start_ < other.end_)
|| (other.start_ >= start_ && other.start_ < end_);
}
- bool IsBefore(const LiveRange& other) {
+ bool IsBefore(const LiveRange& other) const {
return end_ <= other.start_;
}
- void Dump(std::ostream& stream) {
+ void Dump(std::ostream& stream) const {
stream << "[" << start_ << ", " << end_ << ")";
}
@@ -89,7 +90,7 @@
/**
* A use position represents a live interval use at a given position.
*/
-class UsePosition : public ArenaObject {
+class UsePosition : public ArenaObject<kArenaAllocMisc> {
public:
UsePosition(HInstruction* user,
size_t input_index,
@@ -136,28 +137,13 @@
* An interval is a list of disjoint live ranges where an instruction is live.
* Each instruction that has uses gets an interval.
*/
-class LiveInterval : public ArenaObject {
+class LiveInterval : public ArenaObject<kArenaAllocMisc> {
public:
- LiveInterval(ArenaAllocator* allocator,
- Primitive::Type type,
- HInstruction* defined_by = nullptr,
- bool is_fixed = false,
- int reg = kNoRegister,
- bool is_temp = false,
- bool is_slow_path_safepoint = false)
- : allocator_(allocator),
- first_range_(nullptr),
- last_range_(nullptr),
- first_use_(nullptr),
- type_(type),
- next_sibling_(nullptr),
- parent_(this),
- register_(reg),
- spill_slot_(kNoSpillSlot),
- is_fixed_(is_fixed),
- is_temp_(is_temp),
- is_slow_path_safepoint_(is_slow_path_safepoint),
- defined_by_(defined_by) {}
+ static LiveInterval* MakeInterval(ArenaAllocator* allocator,
+ Primitive::Type type,
+ HInstruction* instruction = nullptr) {
+ return new (allocator) LiveInterval(allocator, type, instruction);
+ }
static LiveInterval* MakeSlowPathInterval(ArenaAllocator* allocator, HInstruction* instruction) {
return new (allocator) LiveInterval(
@@ -173,7 +159,10 @@
}
bool IsFixed() const { return is_fixed_; }
+ bool IsTemp() const { return is_temp_; }
bool IsSlowPathSafepoint() const { return is_slow_path_safepoint_; }
+ // This interval is the result of a split.
+ bool IsSplit() const { return parent_ != this; }
void AddUse(HInstruction* instruction, size_t input_index, bool is_environment) {
// Set the use within the instruction.
@@ -188,10 +177,14 @@
&& (first_use_->GetPosition() < position)) {
// The user uses the instruction multiple times, and one use dies before the other.
// We update the use list so that the latter is first.
+ UsePosition* cursor = first_use_;
+ while ((cursor->GetNext() != nullptr) && (cursor->GetNext()->GetPosition() < position)) {
+ cursor = cursor->GetNext();
+ }
DCHECK(first_use_->GetPosition() + 1 == position);
UsePosition* new_use = new (allocator_) UsePosition(
- instruction, input_index, is_environment, position, first_use_->GetNext());
- first_use_->SetNext(new_use);
+ instruction, input_index, is_environment, position, cursor->GetNext());
+ cursor->SetNext(new_use);
if (first_range_->GetEnd() == first_use_->GetPosition()) {
first_range_->end_ = position;
}
@@ -354,6 +347,10 @@
|| (location.GetPolicy() == Location::kSameAsFirstInput
&& locations->InAt(0).GetPolicy() == Location::kRequiresRegister)) {
return position;
+ } else if ((location.GetPolicy() == Location::kRequiresFpuRegister)
+ || (location.GetPolicy() == Location::kSameAsFirstInput
+ && locations->InAt(0).GetPolicy() == Location::kRequiresFpuRegister)) {
+ return position;
}
}
}
@@ -362,12 +359,12 @@
size_t end = GetEnd();
while (use != nullptr && use->GetPosition() <= end) {
size_t use_position = use->GetPosition();
- if (use_position >= position && !use->GetIsEnvironment()) {
+ if (use_position > position && !use->GetIsEnvironment()) {
Location location = use->GetUser()->GetLocations()->InAt(use->GetInputIndex());
- if (location.IsUnallocated() && location.GetPolicy() == Location::kRequiresRegister) {
- // Return the lifetime just before the user, so that the interval has a register
- // when entering the user.
- return use->GetUser()->GetLifetimePosition() - 1;
+ if (location.IsUnallocated()
+ && (location.GetPolicy() == Location::kRequiresRegister
+ || location.GetPolicy() == Location::kRequiresFpuRegister)) {
+ return use_position;
}
}
use = use->GetNext();
@@ -480,6 +477,7 @@
} while ((use = use->GetNext()) != nullptr);
}
stream << "}";
+ stream << " is_fixed: " << is_fixed_ << ", is_split: " << IsSplit();
}
LiveInterval* GetNextSibling() const { return next_sibling_; }
@@ -498,6 +496,10 @@
// slots for spilling.
bool NeedsTwoSpillSlots() const;
+ bool IsFloatingPoint() const {
+ return type_ == Primitive::kPrimFloat || type_ == Primitive::kPrimDouble;
+ }
+
// Converts the location of the interval to a `Location` object.
Location ToLocation() const;
@@ -507,9 +509,31 @@
// Finds the interval that covers `position`.
const LiveInterval& GetIntervalAt(size_t position) const;
- bool IsTemp() const { return is_temp_; }
+ // Returns whether `other` and `this` share the same kind of register.
+ bool SameRegisterKind(Location other) const;
private:
+ LiveInterval(ArenaAllocator* allocator,
+ Primitive::Type type,
+ HInstruction* defined_by = nullptr,
+ bool is_fixed = false,
+ int reg = kNoRegister,
+ bool is_temp = false,
+ bool is_slow_path_safepoint = false)
+ : allocator_(allocator),
+ first_range_(nullptr),
+ last_range_(nullptr),
+ first_use_(nullptr),
+ type_(type),
+ next_sibling_(nullptr),
+ parent_(this),
+ register_(reg),
+ spill_slot_(kNoSpillSlot),
+ is_fixed_(is_fixed),
+ is_temp_(is_temp),
+ is_slow_path_safepoint_(is_slow_path_safepoint),
+ defined_by_(defined_by) {}
+
ArenaAllocator* const allocator_;
// Ranges of this interval. We need a quick access to the last range to test
diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc
index e02a182..56979e1 100644
--- a/compiler/optimizing/ssa_phi_elimination.cc
+++ b/compiler/optimizing/ssa_phi_elimination.cc
@@ -22,20 +22,15 @@
// Add to the worklist phis referenced by non-phi instructions.
for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- HPhi* phi = it.Current()->AsPhi();
- if (phi->HasEnvironmentUses()) {
- // TODO: Do we want to keep that phi alive?
- worklist_.Add(phi);
- phi->SetLive();
- continue;
- }
- for (HUseIterator<HInstruction> it(phi->GetUses()); !it.Done(); it.Advance()) {
- HUseListNode<HInstruction>* current = it.Current();
+ for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ HPhi* phi = inst_it.Current()->AsPhi();
+ for (HUseIterator<HInstruction> use_it(phi->GetUses()); !use_it.Done(); use_it.Advance()) {
+ HUseListNode<HInstruction>* current = use_it.Current();
HInstruction* user = current->GetUser();
if (!user->IsPhi()) {
worklist_.Add(phi);
phi->SetLive();
+ break;
} else {
phi->SetDead();
}
@@ -66,8 +61,9 @@
next = current->GetNext();
if (current->AsPhi()->IsDead()) {
if (current->HasUses()) {
- for (HUseIterator<HInstruction> it(current->GetUses()); !it.Done(); it.Advance()) {
- HUseListNode<HInstruction>* user_node = it.Current();
+ for (HUseIterator<HInstruction> use_it(current->GetUses()); !use_it.Done();
+ use_it.Advance()) {
+ HUseListNode<HInstruction>* user_node = use_it.Current();
HInstruction* user = user_node->GetUser();
DCHECK(user->IsLoopHeaderPhi());
DCHECK(user->AsPhi()->IsDead());
@@ -76,6 +72,15 @@
current->RemoveUser(user, user_node->GetIndex());
}
}
+ if (current->HasEnvironmentUses()) {
+ for (HUseIterator<HEnvironment> use_it(current->GetEnvUses()); !use_it.Done();
+ use_it.Advance()) {
+ HUseListNode<HEnvironment>* user_node = use_it.Current();
+ HEnvironment* user = user_node->GetUser();
+ user->SetRawEnvAt(user_node->GetIndex(), nullptr);
+ current->RemoveEnvironmentUser(user, user_node->GetIndex());
+ }
+ }
block->RemovePhi(current->AsPhi());
}
current = next;
@@ -87,8 +92,8 @@
// Add all phis in the worklist.
for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- worklist_.Add(it.Current()->AsPhi());
+ for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ worklist_.Add(inst_it.Current()->AsPhi());
}
}
diff --git a/compiler/optimizing/ssa_phi_elimination.h b/compiler/optimizing/ssa_phi_elimination.h
index 5274f09..b789971 100644
--- a/compiler/optimizing/ssa_phi_elimination.h
+++ b/compiler/optimizing/ssa_phi_elimination.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_OPTIMIZING_SSA_PHI_ELIMINATION_H_
#include "nodes.h"
+#include "optimization.h"
namespace art {
@@ -25,15 +26,15 @@
* Optimization phase that removes dead phis from the graph. Dead phis are unused
* phis, or phis only used by other phis.
*/
-class SsaDeadPhiElimination : public ValueObject {
+class SsaDeadPhiElimination : public HOptimization {
public:
explicit SsaDeadPhiElimination(HGraph* graph)
- : graph_(graph), worklist_(graph->GetArena(), kDefaultWorklistSize) {}
+ : HOptimization(graph, true, "dead_phi_elimination"),
+ worklist_(graph->GetArena(), kDefaultWorklistSize) {}
- void Run();
+ void Run() OVERRIDE;
private:
- HGraph* const graph_;
GrowableArray<HPhi*> worklist_;
static constexpr size_t kDefaultWorklistSize = 8;
@@ -47,15 +48,15 @@
* registers might be updated with the same value, or not updated at all. We can just
* replace the phi with the value when entering the loop.
*/
-class SsaRedundantPhiElimination : public ValueObject {
+class SsaRedundantPhiElimination : public HOptimization {
public:
explicit SsaRedundantPhiElimination(HGraph* graph)
- : graph_(graph), worklist_(graph->GetArena(), kDefaultWorklistSize) {}
+ : HOptimization(graph, true, "redundant_phi_elimination"),
+ worklist_(graph->GetArena(), kDefaultWorklistSize) {}
- void Run();
+ void Run() OVERRIDE;
private:
- HGraph* const graph_;
GrowableArray<HPhi*> worklist_;
static constexpr size_t kDefaultWorklistSize = 8;
diff --git a/compiler/optimizing/ssa_type_propagation.cc b/compiler/optimizing/ssa_type_propagation.cc
index a860cb7..cb5ce20 100644
--- a/compiler/optimizing/ssa_type_propagation.cc
+++ b/compiler/optimizing/ssa_type_propagation.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "ssa_builder.h"
#include "ssa_type_propagation.h"
#include "nodes.h"
@@ -38,15 +39,31 @@
// Re-compute and update the type of the instruction. Returns
// whether or not the type was changed.
-static bool UpdateType(HPhi* phi) {
+bool SsaTypePropagation::UpdateType(HPhi* phi) {
Primitive::Type existing = phi->GetType();
- Primitive::Type new_type = Primitive::kPrimVoid;
+ Primitive::Type new_type = existing;
for (size_t i = 0, e = phi->InputCount(); i < e; ++i) {
Primitive::Type input_type = phi->InputAt(i)->GetType();
new_type = MergeTypes(new_type, input_type);
}
phi->SetType(new_type);
+
+ if (new_type == Primitive::kPrimDouble || new_type == Primitive::kPrimFloat) {
+ // If the phi is of floating point type, we need to update its inputs to that
+ // type. For inputs that are phis, we need to recompute their types.
+ for (size_t i = 0, e = phi->InputCount(); i < e; ++i) {
+ HInstruction* input = phi->InputAt(i);
+ if (input->GetType() != new_type) {
+ HInstruction* equivalent = SsaBuilder::GetFloatOrDoubleEquivalent(phi, input, new_type);
+ phi->ReplaceInput(equivalent, i);
+ if (equivalent->IsPhi()) {
+ AddToWorklist(equivalent->AsPhi());
+ }
+ }
+ }
+ }
+
return existing != new_type;
}
@@ -63,15 +80,22 @@
HPhi* phi = it.Current()->AsPhi();
// Set the initial type for the phi. Use the non back edge input for reaching
// a fixed point faster.
- phi->SetType(phi->InputAt(0)->GetType());
+ Primitive::Type phi_type = phi->GetType();
+ // We merge with the existing type, that has been set by the SSA builder.
+ DCHECK(phi_type == Primitive::kPrimVoid
+ || phi_type == Primitive::kPrimFloat
+ || phi_type == Primitive::kPrimDouble);
+ phi->SetType(MergeTypes(phi->InputAt(0)->GetType(), phi->GetType()));
AddToWorklist(phi);
}
} else {
for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- HPhi* phi = it.Current()->AsPhi();
- if (UpdateType(phi)) {
- AddDependentInstructionsToWorklist(phi);
- }
+ // Eagerly compute the type of the phi, for quicker convergence. Note
+ // that we don't need to add users to the worklist because we are
+ // doing a reverse post-order visit, therefore either the phi users are
+ // non-loop phi and will be visited later in the visit, or are loop-phis,
+ // and they are already in the work list.
+ UpdateType(it.Current()->AsPhi());
}
}
}
diff --git a/compiler/optimizing/ssa_type_propagation.h b/compiler/optimizing/ssa_type_propagation.h
index 5f471a9..f4d3d63 100644
--- a/compiler/optimizing/ssa_type_propagation.h
+++ b/compiler/optimizing/ssa_type_propagation.h
@@ -34,6 +34,7 @@
void ProcessWorklist();
void AddToWorklist(HPhi* phi);
void AddDependentInstructionsToWorklist(HPhi* phi);
+ bool UpdateType(HPhi* phi);
HGraph* const graph_;
GrowableArray<HPhi*> worklist_;
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 5f74c33..9cfa71c 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -167,33 +167,33 @@
}
// Set the register map.
- MemoryRegion region = dex_register_maps_region.Subregion(
+ MemoryRegion register_region = dex_register_maps_region.Subregion(
next_dex_register_map_offset,
DexRegisterMap::kFixedSize + entry.num_dex_registers * DexRegisterMap::SingleEntrySize());
- next_dex_register_map_offset += region.size();
- DexRegisterMap dex_register_map(region);
- stack_map.SetDexRegisterMapOffset(region.start() - memory_start);
+ next_dex_register_map_offset += register_region.size();
+ DexRegisterMap dex_register_map(register_region);
+ stack_map.SetDexRegisterMapOffset(register_region.start() - memory_start);
- for (size_t i = 0; i < entry.num_dex_registers; ++i) {
+ for (size_t j = 0; j < entry.num_dex_registers; ++j) {
DexRegisterEntry register_entry =
- dex_register_maps_.Get(i + entry.dex_register_maps_start_index);
- dex_register_map.SetRegisterInfo(i, register_entry.kind, register_entry.value);
+ dex_register_maps_.Get(j + entry.dex_register_maps_start_index);
+ dex_register_map.SetRegisterInfo(j, register_entry.kind, register_entry.value);
}
// Set the inlining info.
if (entry.inlining_depth != 0) {
- MemoryRegion region = inline_infos_region.Subregion(
+ MemoryRegion inline_region = inline_infos_region.Subregion(
next_inline_info_offset,
InlineInfo::kFixedSize + entry.inlining_depth * InlineInfo::SingleEntrySize());
- next_inline_info_offset += region.size();
- InlineInfo inline_info(region);
+ next_inline_info_offset += inline_region.size();
+ InlineInfo inline_info(inline_region);
- stack_map.SetInlineDescriptorOffset(region.start() - memory_start);
+ stack_map.SetInlineDescriptorOffset(inline_region.start() - memory_start);
inline_info.SetDepth(entry.inlining_depth);
- for (size_t i = 0; i < entry.inlining_depth; ++i) {
- InlineInfoEntry inline_entry = inline_infos_.Get(i + entry.inline_infos_start_index);
- inline_info.SetMethodReferenceIndexAtDepth(i, inline_entry.method_index);
+ for (size_t j = 0; j < entry.inlining_depth; ++j) {
+ InlineInfoEntry inline_entry = inline_infos_.Get(j + entry.inline_infos_start_index);
+ inline_info.SetMethodReferenceIndexAtDepth(j, inline_entry.method_index);
}
} else {
stack_map.SetInlineDescriptorOffset(InlineInfo::kNoInlineInfo);
diff --git a/compiler/output_stream.cc b/compiler/output_stream.cc
new file mode 100644
index 0000000..a8b64ca
--- /dev/null
+++ b/compiler/output_stream.cc
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "output_stream.h"
+
+namespace art {
+
+std::ostream& operator<<(std::ostream& os, const Whence& rhs) {
+ switch (rhs) {
+ case kSeekSet: os << "SEEK_SET"; break;
+ case kSeekCurrent: os << "SEEK_CUR"; break;
+ case kSeekEnd: os << "SEEK_END"; break;
+ default: UNREACHABLE();
+ }
+ return os;
+}
+
+} // namespace art
diff --git a/compiler/output_stream.h b/compiler/output_stream.h
index 97ccc2c..4d30b83 100644
--- a/compiler/output_stream.h
+++ b/compiler/output_stream.h
@@ -17,9 +17,7 @@
#ifndef ART_COMPILER_OUTPUT_STREAM_H_
#define ART_COMPILER_OUTPUT_STREAM_H_
-#include <stdint.h>
-#include <sys/types.h>
-
+#include <ostream>
#include <string>
#include "base/macros.h"
@@ -31,6 +29,7 @@
kSeekCurrent = SEEK_CUR,
kSeekEnd = SEEK_END,
};
+std::ostream& operator<<(std::ostream& os, const Whence& rhs);
class OutputStream {
public:
diff --git a/compiler/utils/arena_allocator.cc b/compiler/utils/arena_allocator.cc
index 0c93f0a..004af98 100644
--- a/compiler/utils/arena_allocator.cc
+++ b/compiler/utils/arena_allocator.cc
@@ -114,7 +114,7 @@
<< num_allocations << ", avg size: " << bytes_allocated / num_allocations << "\n";
}
os << "===== Allocation by kind\n";
- COMPILE_ASSERT(arraysize(kAllocNames) == kNumArenaAllocKinds, check_arraysize_kAllocNames);
+ static_assert(arraysize(kAllocNames) == kNumArenaAllocKinds, "arraysize of kAllocNames");
for (int i = 0; i < kNumArenaAllocKinds; i++) {
os << kAllocNames[i] << std::setw(10) << alloc_stats_[i] << "\n";
}
diff --git a/compiler/utils/arena_allocator.h b/compiler/utils/arena_allocator.h
index b2f5ca9..6d21399 100644
--- a/compiler/utils/arena_allocator.h
+++ b/compiler/utils/arena_allocator.h
@@ -82,7 +82,7 @@
ArenaAllocatorStatsImpl& operator = (const ArenaAllocatorStatsImpl& other) = delete;
void Copy(const ArenaAllocatorStatsImpl& other) { UNUSED(other); }
- void RecordAlloc(size_t bytes, ArenaAllocKind kind) { UNUSED(bytes); UNUSED(kind); }
+ void RecordAlloc(size_t bytes, ArenaAllocKind kind) { UNUSED(bytes, kind); }
size_t NumAllocations() const { return 0u; }
size_t BytesAllocated() const { return 0u; }
void Dump(std::ostream& os, const Arena* first, ssize_t lost_bytes_adjustment) const {
diff --git a/compiler/utils/arena_bit_vector.cc b/compiler/utils/arena_bit_vector.cc
index de35f3d..f17e5a9 100644
--- a/compiler/utils/arena_bit_vector.cc
+++ b/compiler/utils/arena_bit_vector.cc
@@ -16,12 +16,12 @@
#include "arena_allocator.h"
#include "arena_bit_vector.h"
-#include "base/allocator.h"
namespace art {
template <typename ArenaAlloc>
-class ArenaBitVectorAllocator FINAL : public Allocator {
+class ArenaBitVectorAllocator FINAL : public Allocator,
+ public ArenaObject<kArenaAllocGrowableBitMap> {
public:
explicit ArenaBitVectorAllocator(ArenaAlloc* arena) : arena_(arena) {}
~ArenaBitVectorAllocator() {}
@@ -32,11 +32,6 @@
virtual void Free(void*) {} // Nop.
- static void* operator new(size_t size, ArenaAlloc* arena) {
- return arena->Alloc(sizeof(ArenaBitVectorAllocator), kArenaAllocGrowableBitMap);
- }
- static void operator delete(void* p) {} // Nop.
-
private:
ArenaAlloc* const arena_;
DISALLOW_COPY_AND_ASSIGN(ArenaBitVectorAllocator);
diff --git a/compiler/utils/arena_bit_vector.h b/compiler/utils/arena_bit_vector.h
index c92658f..34f1ca9 100644
--- a/compiler/utils/arena_bit_vector.h
+++ b/compiler/utils/arena_bit_vector.h
@@ -17,12 +17,14 @@
#ifndef ART_COMPILER_UTILS_ARENA_BIT_VECTOR_H_
#define ART_COMPILER_UTILS_ARENA_BIT_VECTOR_H_
+#include "arena_object.h"
#include "base/bit_vector.h"
-#include "utils/arena_allocator.h"
-#include "utils/scoped_arena_allocator.h"
namespace art {
+class ArenaAllocator;
+class ScopedArenaAllocator;
+
// Type of growable bitmap for memory tuning.
enum OatBitMapKind {
kBitMapMisc = 0,
@@ -50,7 +52,7 @@
/*
* A BitVector implementation that uses Arena allocation.
*/
-class ArenaBitVector : public BitVector {
+class ArenaBitVector : public BitVector, public ArenaObject<kArenaAllocGrowableBitMap> {
public:
ArenaBitVector(ArenaAllocator* arena, uint32_t start_bits, bool expandable,
OatBitMapKind kind = kBitMapMisc);
@@ -58,16 +60,10 @@
OatBitMapKind kind = kBitMapMisc);
~ArenaBitVector() {}
- static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(sizeof(ArenaBitVector), kArenaAllocGrowableBitMap);
- }
- static void* operator new(size_t size, ScopedArenaAllocator* arena) {
- return arena->Alloc(sizeof(ArenaBitVector), kArenaAllocGrowableBitMap);
- }
- static void operator delete(void* p) {} // Nop.
-
private:
const OatBitMapKind kind_; // for memory use tuning. TODO: currently unused.
+
+ DISALLOW_COPY_AND_ASSIGN(ArenaBitVector);
};
diff --git a/compiler/utils/arena_containers.h b/compiler/utils/arena_containers.h
index c48b0c8..8252591 100644
--- a/compiler/utils/arena_containers.h
+++ b/compiler/utils/arena_containers.h
@@ -66,7 +66,7 @@
class ArenaAllocatorAdapterKindImpl<false> {
public:
// Not tracking allocations, ignore the supplied kind and arbitrarily provide kArenaAllocSTL.
- explicit ArenaAllocatorAdapterKindImpl(ArenaAllocKind kind) { }
+ explicit ArenaAllocatorAdapterKindImpl(ArenaAllocKind kind) { UNUSED(kind); }
ArenaAllocatorAdapterKindImpl& operator=(const ArenaAllocatorAdapterKindImpl& other) = default;
ArenaAllocKind Kind() { return kArenaAllocSTL; }
};
@@ -159,11 +159,13 @@
const_pointer address(const_reference x) const { return &x; }
pointer allocate(size_type n, ArenaAllocatorAdapter<void>::pointer hint = nullptr) {
+ UNUSED(hint);
DCHECK_LE(n, max_size());
return reinterpret_cast<T*>(arena_allocator_->Alloc(n * sizeof(T),
ArenaAllocatorAdapterKind::Kind()));
}
void deallocate(pointer p, size_type n) {
+ UNUSED(p, n);
}
void construct(pointer p, const_reference val) {
diff --git a/compiler/utils/arena_object.h b/compiler/utils/arena_object.h
index 50909f75..d64c419 100644
--- a/compiler/utils/arena_object.h
+++ b/compiler/utils/arena_object.h
@@ -19,18 +19,46 @@
#include "arena_allocator.h"
#include "base/logging.h"
+#include "scoped_arena_allocator.h"
namespace art {
+// Parent for arena allocated objects giving appropriate new and delete operators.
+template<enum ArenaAllocKind kAllocKind>
class ArenaObject {
public:
// Allocate a new ArenaObject of 'size' bytes in the Arena.
void* operator new(size_t size, ArenaAllocator* allocator) {
- return allocator->Alloc(size, kArenaAllocMisc);
+ return allocator->Alloc(size, kAllocKind);
+ }
+
+ static void* operator new(size_t size, ScopedArenaAllocator* arena) {
+ return arena->Alloc(size, kAllocKind);
}
void operator delete(void*, size_t) {
LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
+ }
+};
+
+
+// Parent for arena allocated objects that get deleted, gives appropriate new and delete operators.
+// Currently this is used by the quick compiler for debug reference counting arena allocations.
+template<enum ArenaAllocKind kAllocKind>
+class DeletableArenaObject {
+ public:
+ // Allocate a new ArenaObject of 'size' bytes in the Arena.
+ void* operator new(size_t size, ArenaAllocator* allocator) {
+ return allocator->Alloc(size, kAllocKind);
+ }
+
+ static void* operator new(size_t size, ScopedArenaAllocator* arena) {
+ return arena->Alloc(size, kAllocKind);
+ }
+
+ void operator delete(void*, size_t) {
+ // Nop.
}
};
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index b430c7e..9c84bc1 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -92,16 +92,29 @@
break;
case kRegister:
if (is_shift_) {
+ uint32_t shift_type;
+ switch (shift_) {
+ case arm::Shift::ROR:
+ shift_type = static_cast<uint32_t>(shift_);
+ CHECK_NE(immed_, 0U);
+ break;
+ case arm::Shift::RRX:
+ shift_type = static_cast<uint32_t>(arm::Shift::ROR); // Same encoding as ROR.
+ CHECK_EQ(immed_, 0U);
+ break;
+ default:
+ shift_type = static_cast<uint32_t>(shift_);
+ }
// Shifted immediate or register.
if (rs_ == kNoRegister) {
// Immediate shift.
return immed_ << kShiftImmShift |
- static_cast<uint32_t>(shift_) << kShiftShift |
+ shift_type << kShiftShift |
static_cast<uint32_t>(rm_);
} else {
// Register shift.
return static_cast<uint32_t>(rs_) << kShiftRegisterShift |
- static_cast<uint32_t>(shift_) << kShiftShift | (1 << 4) |
+ shift_type << kShiftShift | (1 << 4) |
static_cast<uint32_t>(rm_);
}
} else {
@@ -301,11 +314,11 @@
CHECK(IsAbsoluteUint(10, offset)); // In the range -1020 to +1020.
CHECK_ALIGNED(offset, 2); // Multiple of 4.
CHECK((am_ == Offset) || (am_ == NegOffset));
- uint32_t vencoding = (encoding & (0xf << kRnShift)) | (offset >> 2);
+ uint32_t vencoding_value = (encoding & (0xf << kRnShift)) | (offset >> 2);
if (am_ == Offset) {
- vencoding |= 1 << 23;
+ vencoding_value |= 1 << 23;
}
- return vencoding;
+ return vencoding_value;
}
@@ -324,7 +337,7 @@
return IsAbsoluteUint(10, offset); // VFP addressing mode.
default:
LOG(FATAL) << "UNREACHABLE";
- return false;
+ UNREACHABLE();
}
}
@@ -342,7 +355,7 @@
return IsAbsoluteUint(10, offset); // VFP addressing mode.
default:
LOG(FATAL) << "UNREACHABLE";
- return false;
+ UNREACHABLE();
}
}
@@ -359,9 +372,9 @@
return IsAbsoluteUint(10, offset); // VFP addressing mode.
case kLoadWordPair:
return IsAbsoluteUint(10, offset);
- default:
+ default:
LOG(FATAL) << "UNREACHABLE";
- return false;
+ UNREACHABLE();
}
}
@@ -377,9 +390,9 @@
return IsAbsoluteUint(10, offset); // VFP addressing mode.
case kStoreWordPair:
return IsAbsoluteUint(10, offset);
- default:
+ default:
LOG(FATAL) << "UNREACHABLE";
- return false;
+ UNREACHABLE();
}
}
@@ -417,9 +430,23 @@
StoreToOffset(kStoreWord, R0, SP, 0);
// Write out entry spills.
+ int32_t offset = frame_size + sizeof(StackReference<mirror::ArtMethod>);
for (size_t i = 0; i < entry_spills.size(); ++i) {
- Register reg = entry_spills.at(i).AsArm().AsCoreRegister();
- StoreToOffset(kStoreWord, reg, SP, frame_size + kFramePointerSize + (i * kFramePointerSize));
+ ArmManagedRegister reg = entry_spills.at(i).AsArm();
+ if (reg.IsNoRegister()) {
+ // only increment stack offset.
+ ManagedRegisterSpill spill = entry_spills.at(i);
+ offset += spill.getSize();
+ } else if (reg.IsCoreRegister()) {
+ StoreToOffset(kStoreWord, reg.AsCoreRegister(), SP, offset);
+ offset += 4;
+ } else if (reg.IsSRegister()) {
+ StoreSToOffset(reg.AsSRegister(), SP, offset);
+ offset += 4;
+ } else if (reg.IsDRegister()) {
+ StoreDToOffset(reg.AsDRegister(), SP, offset);
+ offset += 8;
+ }
}
}
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index 14d48b7..d288b70 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -20,6 +20,7 @@
#include <vector>
#include "base/logging.h"
+#include "base/value_object.h"
#include "constants_arm.h"
#include "utils/arm/managed_register_arm.h"
#include "utils/assembler.h"
@@ -179,8 +180,12 @@
DB_W = (8|0|1) << 21, // decrement before with writeback to base
IB_W = (8|4|1) << 21 // increment before with writeback to base
};
+inline std::ostream& operator<<(std::ostream& os, const BlockAddressMode& rhs) {
+ os << static_cast<int>(rhs);
+ return os;
+}
-class Address {
+class Address : public ValueObject {
public:
// Memory operand addressing mode (in ARM encoding form. For others we need
// to adjust)
@@ -260,13 +265,17 @@
}
private:
- Register rn_;
- Register rm_;
- int32_t offset_; // Used as shift amount for register offset.
- Mode am_;
- bool is_immed_offset_;
- Shift shift_;
+ const Register rn_;
+ const Register rm_;
+ const int32_t offset_; // Used as shift amount for register offset.
+ const Mode am_;
+ const bool is_immed_offset_;
+ const Shift shift_;
};
+inline std::ostream& operator<<(std::ostream& os, const Address::Mode& rhs) {
+ os << static_cast<int>(rhs);
+ return os;
+}
// Instruction encoding bits.
enum {
@@ -344,10 +353,6 @@
extern const char* kRegisterNames[];
extern const char* kConditionNames[];
-extern std::ostream& operator<<(std::ostream& os, const Register& rhs);
-extern std::ostream& operator<<(std::ostream& os, const SRegister& rhs);
-extern std::ostream& operator<<(std::ostream& os, const DRegister& rhs);
-extern std::ostream& operator<<(std::ostream& os, const Condition& rhs);
// This is an abstract ARM assembler. Subclasses provide assemblers for the individual
// instruction sets (ARM32, Thumb2, etc.)
@@ -416,6 +421,12 @@
virtual void sdiv(Register rd, Register rn, Register rm, Condition cond = AL) = 0;
virtual void udiv(Register rd, Register rn, Register rm, Condition cond = AL) = 0;
+ // Bit field extract instructions.
+ virtual void sbfx(Register rd, Register rn, uint32_t lsb, uint32_t width,
+ Condition cond = AL) = 0;
+ virtual void ubfx(Register rd, Register rn, uint32_t lsb, uint32_t width,
+ Condition cond = AL) = 0;
+
// Load/store instructions.
virtual void ldr(Register rd, const Address& ad, Condition cond = AL) = 0;
virtual void str(Register rd, const Address& ad, Condition cond = AL) = 0;
@@ -448,8 +459,10 @@
virtual void bkpt(uint16_t imm16) = 0;
virtual void svc(uint32_t imm24) = 0;
- virtual void it(Condition firstcond, ItState i1 = kItOmitted,
- ItState i2 = kItOmitted, ItState i3 = kItOmitted) {
+ virtual void it(Condition firstcond ATTRIBUTE_UNUSED,
+ ItState i1 ATTRIBUTE_UNUSED = kItOmitted,
+ ItState i2 ATTRIBUTE_UNUSED = kItOmitted,
+ ItState i3 ATTRIBUTE_UNUSED = kItOmitted) {
// Ignored if not supported.
}
@@ -523,6 +536,9 @@
virtual void blx(Register rm, Condition cond = AL) = 0;
virtual void bx(Register rm, Condition cond = AL) = 0;
+ // Memory barriers.
+ virtual void dmb(DmbOptions flavor) = 0;
+
void Pad(uint32_t bytes);
// Macros.
@@ -534,14 +550,9 @@
Condition cond = AL) = 0;
virtual void AddConstantSetFlags(Register rd, Register rn, int32_t value,
Condition cond = AL) = 0;
- virtual void AddConstantWithCarry(Register rd, Register rn, int32_t value,
- Condition cond = AL) = 0;
// Load and Store. May clobber IP.
virtual void LoadImmediate(Register rd, int32_t value, Condition cond = AL) = 0;
- virtual void LoadSImmediate(SRegister sd, float value, Condition cond = AL) = 0;
- virtual void LoadDImmediate(DRegister dd, double value,
- Register scratch, Condition cond = AL) = 0;
virtual void MarkExceptionHandler(Label* label) = 0;
virtual void LoadFromOffset(LoadOperandType type,
Register reg,
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
index 6af69c8..a1594b0 100644
--- a/compiler/utils/arm/assembler_arm32.cc
+++ b/compiler/utils/arm/assembler_arm32.cc
@@ -208,6 +208,44 @@
}
+void Arm32Assembler::sbfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond) {
+ CHECK_NE(rd, kNoRegister);
+ CHECK_NE(rn, kNoRegister);
+ CHECK_NE(cond, kNoCondition);
+ CHECK_LE(lsb, 31U);
+ CHECK(1U <= width && width <= 32U) << width;
+ uint32_t widthminus1 = width - 1;
+
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B26 | B25 | B24 | B23 | B21 |
+ (widthminus1 << 16) |
+ (static_cast<uint32_t>(rd) << 12) |
+ (lsb << 7) |
+ B6 | B4 |
+ static_cast<uint32_t>(rn);
+ Emit(encoding);
+}
+
+
+void Arm32Assembler::ubfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond) {
+ CHECK_NE(rd, kNoRegister);
+ CHECK_NE(rn, kNoRegister);
+ CHECK_NE(cond, kNoCondition);
+ CHECK_LE(lsb, 31U);
+ CHECK(1U <= width && width <= 32U) << width;
+ uint32_t widthminus1 = width - 1;
+
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B26 | B25 | B24 | B23 | B22 | B21 |
+ (widthminus1 << 16) |
+ (static_cast<uint32_t>(rd) << 12) |
+ (lsb << 7) |
+ B6 | B4 |
+ static_cast<uint32_t>(rn);
+ Emit(encoding);
+}
+
+
void Arm32Assembler::ldr(Register rd, const Address& ad, Condition cond) {
EmitMemOp(cond, true, false, rd, ad);
}
@@ -1303,7 +1341,6 @@
}
}
-
void Arm32Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
ShifterOperand shifter_op;
if (ShifterOperand::CanHoldArm(value, &shifter_op)) {
@@ -1356,6 +1393,7 @@
break;
default:
LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
}
}
@@ -1427,6 +1465,7 @@
break;
default:
LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
}
}
@@ -1469,19 +1508,22 @@
void Arm32Assembler::MemoryBarrier(ManagedRegister mscratch) {
CHECK_EQ(mscratch.AsArm().AsCoreRegister(), R12);
-#if ANDROID_SMP != 0
- int32_t encoding = 0xf57ff05f; // dmb
- Emit(encoding);
-#endif
+ dmb(SY);
}
-void Arm32Assembler::cbz(Register rn, Label* target) {
+void Arm32Assembler::dmb(DmbOptions flavor) {
+ int32_t encoding = 0xf57ff05f; // dmb
+ Emit(encoding | flavor);
+}
+
+
+void Arm32Assembler::cbz(Register rn ATTRIBUTE_UNUSED, Label* target ATTRIBUTE_UNUSED) {
LOG(FATAL) << "cbz is not supported on ARM32";
}
-void Arm32Assembler::cbnz(Register rn, Label* target) {
+void Arm32Assembler::cbnz(Register rn ATTRIBUTE_UNUSED, Label* target ATTRIBUTE_UNUSED) {
LOG(FATAL) << "cbnz is not supported on ARM32";
}
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
index c89fd04..0b009e1 100644
--- a/compiler/utils/arm/assembler_arm32.h
+++ b/compiler/utils/arm/assembler_arm32.h
@@ -96,6 +96,10 @@
void sdiv(Register rd, Register rn, Register rm, Condition cond = AL) OVERRIDE;
void udiv(Register rd, Register rn, Register rm, Condition cond = AL) OVERRIDE;
+ // Bit field extract instructions.
+ void sbfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond = AL) OVERRIDE;
+ void ubfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond = AL) OVERRIDE;
+
// Load/store instructions.
void ldr(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
void str(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
@@ -228,6 +232,8 @@
void CompareAndBranchIfZero(Register r, Label* label) OVERRIDE;
void CompareAndBranchIfNonZero(Register r, Label* label) OVERRIDE;
+ // Memory barriers.
+ void dmb(DmbOptions flavor) OVERRIDE;
// Macros.
// Add signed constant value to rd. May clobber IP.
@@ -236,14 +242,9 @@
Condition cond = AL) OVERRIDE;
void AddConstantSetFlags(Register rd, Register rn, int32_t value,
Condition cond = AL) OVERRIDE;
- void AddConstantWithCarry(Register rd, Register rn, int32_t value,
- Condition cond = AL) {}
// Load and Store. May clobber IP.
void LoadImmediate(Register rd, int32_t value, Condition cond = AL) OVERRIDE;
- void LoadSImmediate(SRegister sd, float value, Condition cond = AL) {}
- void LoadDImmediate(DRegister dd, double value,
- Register scratch, Condition cond = AL) {}
void MarkExceptionHandler(Label* label) OVERRIDE;
void LoadFromOffset(LoadOperandType type,
Register reg,
diff --git a/compiler/utils/arm/assembler_arm32_test.cc b/compiler/utils/arm/assembler_arm32_test.cc
new file mode 100644
index 0000000..837fe1e
--- /dev/null
+++ b/compiler/utils/arm/assembler_arm32_test.cc
@@ -0,0 +1,691 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler_arm32.h"
+
+#include <functional>
+#include <type_traits>
+
+#include "base/macros.h"
+#include "base/stl_util.h"
+#include "utils/arm/assembler_arm_test.h"
+
+namespace art {
+
+using std::placeholders::_1;
+using std::placeholders::_2;
+using std::placeholders::_3;
+using std::placeholders::_4;
+using std::placeholders::_5;
+
+// To speed up tests, don't use all register combinations.
+static constexpr bool kUseSparseRegisterList = true;
+
+// To speed up tests, don't use all condition codes.
+static constexpr bool kUseSparseConditionList = true;
+
+// To speed up tests, don't use all shift immediates.
+static constexpr bool kUseSparseShiftImmediates = true;
+
+class AssemblerArm32Test : public AssemblerArmTest<arm::Arm32Assembler,
+ arm::Register, arm::SRegister,
+ uint32_t, arm::ShifterOperand, arm::Condition> {
+ protected:
+ std::string GetArchitectureString() OVERRIDE {
+ return "arm";
+ }
+
+ std::string GetAssemblerParameters() OVERRIDE {
+ return " -march=armv7-a -mcpu=cortex-a15"; // Arm-v7a, cortex-a15 (means we have sdiv).
+ }
+
+ const char* GetAssemblyHeader() OVERRIDE {
+ return kArm32AssemblyHeader;
+ }
+
+ std::string GetDisassembleParameters() OVERRIDE {
+ return " -D -bbinary -marm --no-show-raw-insn";
+ }
+
+ void SetUpHelpers() OVERRIDE {
+ if (registers_.size() == 0) {
+ if (kUseSparseRegisterList) {
+ registers_.insert(end(registers_),
+ { // NOLINT(whitespace/braces)
+ new arm::Register(arm::R0),
+ new arm::Register(arm::R1),
+ new arm::Register(arm::R4),
+ new arm::Register(arm::R8),
+ new arm::Register(arm::R11),
+ new arm::Register(arm::R12),
+ new arm::Register(arm::R13),
+ new arm::Register(arm::R14),
+ new arm::Register(arm::R15)
+ });
+ } else {
+ registers_.insert(end(registers_),
+ { // NOLINT(whitespace/braces)
+ new arm::Register(arm::R0),
+ new arm::Register(arm::R1),
+ new arm::Register(arm::R2),
+ new arm::Register(arm::R3),
+ new arm::Register(arm::R4),
+ new arm::Register(arm::R5),
+ new arm::Register(arm::R6),
+ new arm::Register(arm::R7),
+ new arm::Register(arm::R8),
+ new arm::Register(arm::R9),
+ new arm::Register(arm::R10),
+ new arm::Register(arm::R11),
+ new arm::Register(arm::R12),
+ new arm::Register(arm::R13),
+ new arm::Register(arm::R14),
+ new arm::Register(arm::R15)
+ });
+ }
+ }
+
+ if (!kUseSparseConditionList) {
+ conditions_.push_back(arm::Condition::EQ);
+ conditions_.push_back(arm::Condition::NE);
+ conditions_.push_back(arm::Condition::CS);
+ conditions_.push_back(arm::Condition::CC);
+ conditions_.push_back(arm::Condition::MI);
+ conditions_.push_back(arm::Condition::PL);
+ conditions_.push_back(arm::Condition::VS);
+ conditions_.push_back(arm::Condition::VC);
+ conditions_.push_back(arm::Condition::HI);
+ conditions_.push_back(arm::Condition::LS);
+ conditions_.push_back(arm::Condition::GE);
+ conditions_.push_back(arm::Condition::LT);
+ conditions_.push_back(arm::Condition::GT);
+ conditions_.push_back(arm::Condition::LE);
+ conditions_.push_back(arm::Condition::AL);
+ } else {
+ conditions_.push_back(arm::Condition::EQ);
+ conditions_.push_back(arm::Condition::NE);
+ conditions_.push_back(arm::Condition::CC);
+ conditions_.push_back(arm::Condition::VC);
+ conditions_.push_back(arm::Condition::HI);
+ conditions_.push_back(arm::Condition::LT);
+ conditions_.push_back(arm::Condition::AL);
+ }
+
+ shifter_operands_.push_back(arm::ShifterOperand(0));
+ shifter_operands_.push_back(arm::ShifterOperand(1));
+ shifter_operands_.push_back(arm::ShifterOperand(2));
+ shifter_operands_.push_back(arm::ShifterOperand(3));
+ shifter_operands_.push_back(arm::ShifterOperand(4));
+ shifter_operands_.push_back(arm::ShifterOperand(5));
+ shifter_operands_.push_back(arm::ShifterOperand(127));
+ shifter_operands_.push_back(arm::ShifterOperand(128));
+ shifter_operands_.push_back(arm::ShifterOperand(254));
+ shifter_operands_.push_back(arm::ShifterOperand(255));
+
+ if (!kUseSparseRegisterList) {
+ shifter_operands_.push_back(arm::ShifterOperand(arm::R0));
+ shifter_operands_.push_back(arm::ShifterOperand(arm::R1));
+ shifter_operands_.push_back(arm::ShifterOperand(arm::R2));
+ shifter_operands_.push_back(arm::ShifterOperand(arm::R3));
+ shifter_operands_.push_back(arm::ShifterOperand(arm::R4));
+ shifter_operands_.push_back(arm::ShifterOperand(arm::R5));
+ shifter_operands_.push_back(arm::ShifterOperand(arm::R6));
+ shifter_operands_.push_back(arm::ShifterOperand(arm::R7));
+ shifter_operands_.push_back(arm::ShifterOperand(arm::R8));
+ shifter_operands_.push_back(arm::ShifterOperand(arm::R9));
+ shifter_operands_.push_back(arm::ShifterOperand(arm::R10));
+ shifter_operands_.push_back(arm::ShifterOperand(arm::R11));
+ shifter_operands_.push_back(arm::ShifterOperand(arm::R12));
+ shifter_operands_.push_back(arm::ShifterOperand(arm::R13));
+ } else {
+ shifter_operands_.push_back(arm::ShifterOperand(arm::R0));
+ shifter_operands_.push_back(arm::ShifterOperand(arm::R1));
+ shifter_operands_.push_back(arm::ShifterOperand(arm::R4));
+ shifter_operands_.push_back(arm::ShifterOperand(arm::R8));
+ shifter_operands_.push_back(arm::ShifterOperand(arm::R11));
+ shifter_operands_.push_back(arm::ShifterOperand(arm::R12));
+ shifter_operands_.push_back(arm::ShifterOperand(arm::R13));
+ }
+
+ std::vector<arm::Shift> shifts {
+ arm::Shift::LSL, arm::Shift::LSR, arm::Shift::ASR, arm::Shift::ROR, arm::Shift::RRX
+ };
+
+ // ShifterOperands of form "reg shift-type imm."
+ for (arm::Shift shift : shifts) {
+ for (arm::Register* reg : registers_) { // Note: this will pick up the sparse set.
+ if (*reg == arm::R15) { // Skip PC.
+ continue;
+ }
+ if (shift != arm::Shift::RRX) {
+ if (!kUseSparseShiftImmediates) {
+ for (uint32_t imm = 1; imm < 32; ++imm) {
+ shifter_operands_.push_back(arm::ShifterOperand(*reg, shift, imm));
+ }
+ } else {
+ shifter_operands_.push_back(arm::ShifterOperand(*reg, shift, 1));
+ shifter_operands_.push_back(arm::ShifterOperand(*reg, shift, 2));
+ shifter_operands_.push_back(arm::ShifterOperand(*reg, shift, 3));
+ shifter_operands_.push_back(arm::ShifterOperand(*reg, shift, 7));
+ shifter_operands_.push_back(arm::ShifterOperand(*reg, shift, 15));
+ shifter_operands_.push_back(arm::ShifterOperand(*reg, shift, 16));
+ shifter_operands_.push_back(arm::ShifterOperand(*reg, shift, 30));
+ shifter_operands_.push_back(arm::ShifterOperand(*reg, shift, 31));
+ }
+ } else {
+ // RRX doesn't have an immediate.
+ shifter_operands_.push_back(arm::ShifterOperand(*reg, shift, 0));
+ }
+ }
+ }
+ }
+
+ std::vector<arm::ShifterOperand> CreateRegisterShifts(std::vector<arm::Register*>& base_regs,
+ int32_t shift_min, int32_t shift_max) {
+ std::vector<arm::ShifterOperand> res;
+ static constexpr arm::Shift kShifts[] = { arm::Shift::LSL, arm::Shift::LSR, arm::Shift::ASR,
+ arm::Shift::ROR };
+
+ for (arm::Shift shift : kShifts) {
+ for (arm::Register* reg : base_regs) {
+ // Take the min, the max, and three values in between.
+ res.push_back(arm::ShifterOperand(*reg, shift, shift_min));
+ if (shift_min != shift_max) {
+ res.push_back(arm::ShifterOperand(*reg, shift, shift_max));
+ int32_t middle = (shift_min + shift_max) / 2;
+ res.push_back(arm::ShifterOperand(*reg, shift, middle));
+ res.push_back(arm::ShifterOperand(*reg, shift, middle - 1));
+ res.push_back(arm::ShifterOperand(*reg, shift, middle + 1));
+ }
+ }
+ }
+
+ return res;
+ }
+
+ void TearDown() OVERRIDE {
+ AssemblerArmTest::TearDown();
+ STLDeleteElements(®isters_);
+ }
+
+ std::vector<arm::Register*> GetRegisters() OVERRIDE {
+ return registers_;
+ }
+
+ uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
+ return imm_value;
+ }
+
+ std::vector<arm::Condition>& GetConditions() OVERRIDE {
+ return conditions_;
+ }
+
+ std::string GetConditionString(arm::Condition c) OVERRIDE {
+ std::ostringstream oss;
+ oss << c;
+ return oss.str();
+ }
+
+ arm::Register GetPCRegister() OVERRIDE {
+ return arm::R15;
+ }
+
+ std::vector<arm::ShifterOperand>& GetShiftOperands() OVERRIDE {
+ return shifter_operands_;
+ }
+
+ std::string GetShiftString(arm::ShifterOperand sop) OVERRIDE {
+ std::ostringstream oss;
+ if (sop.IsShift()) {
+ // Not a rotate...
+ if (sop.GetShift() == arm::Shift::RRX) {
+ oss << sop.GetRegister() << ", " << sop.GetShift();
+ } else {
+ oss << sop.GetRegister() << ", " << sop.GetShift() << " #" << sop.GetImmediate();
+ }
+ } else if (sop.IsRegister()) {
+ oss << sop.GetRegister();
+ } else {
+ CHECK(sop.IsImmediate());
+ oss << "#" << sop.GetImmediate();
+ }
+ return oss.str();
+ }
+
+ static const char* GetRegTokenFromDepth(int depth) {
+ switch (depth) {
+ case 0:
+ return Base::REG1_TOKEN;
+ case 1:
+ return Base::REG2_TOKEN;
+ case 2:
+ return REG3_TOKEN;
+ case 3:
+ return REG4_TOKEN;
+ default:
+ LOG(FATAL) << "Depth problem.";
+ UNREACHABLE();
+ }
+ }
+
+ void ExecuteAndPrint(std::function<void()> f, std::string fmt, std::ostringstream& oss) {
+ if (first_) {
+ first_ = false;
+ } else {
+ oss << "\n";
+ }
+ oss << fmt;
+
+ f();
+ }
+
+ void TemplateHelper(std::function<void(arm::Register)> f, int depth ATTRIBUTE_UNUSED,
+ bool without_pc,
+ std::string fmt, std::ostringstream& oss) {
+ std::vector<arm::Register*> registers = without_pc ? GetRegistersWithoutPC() : GetRegisters();
+ for (auto reg : registers) {
+ std::string after_reg = fmt;
+
+ std::string reg_string = GetRegName<RegisterView::kUsePrimaryName>(*reg);
+ size_t reg_index;
+ const char* reg_token = GetRegTokenFromDepth(depth);
+
+ while ((reg_index = after_reg.find(reg_token)) != std::string::npos) {
+ after_reg.replace(reg_index, strlen(reg_token), reg_string);
+ }
+
+ ExecuteAndPrint([&] () { f(*reg); }, after_reg, oss);
+ }
+ }
+
+ void TemplateHelper(std::function<void(const arm::ShifterOperand&)> f, int depth ATTRIBUTE_UNUSED,
+ bool without_pc ATTRIBUTE_UNUSED, std::string fmt, std::ostringstream& oss) {
+ for (const arm::ShifterOperand& shift : GetShiftOperands()) {
+ std::string after_shift = fmt;
+
+ std::string shift_string = GetShiftString(shift);
+ size_t shift_index;
+ while ((shift_index = after_shift.find(SHIFT_TOKEN)) != std::string::npos) {
+ after_shift.replace(shift_index, ConstexprStrLen(SHIFT_TOKEN), shift_string);
+ }
+
+ ExecuteAndPrint([&] () { f(shift); }, after_shift, oss);
+ }
+ }
+
+ void TemplateHelper(std::function<void(arm::Condition)> f, int depth ATTRIBUTE_UNUSED,
+ bool without_pc ATTRIBUTE_UNUSED, std::string fmt, std::ostringstream& oss) {
+ for (arm::Condition c : GetConditions()) {
+ std::string after_cond = fmt;
+
+ size_t cond_index = after_cond.find(COND_TOKEN);
+ if (cond_index != std::string::npos) {
+ after_cond.replace(cond_index, ConstexprStrLen(IMM1_TOKEN), GetConditionString(c));
+ }
+
+ ExecuteAndPrint([&] () { f(c); }, after_cond, oss);
+ }
+ }
+
+ template <typename... Args>
+ void TemplateHelper(std::function<void(arm::Register, Args...)> f, int depth, bool without_pc,
+ std::string fmt, std::ostringstream& oss) {
+ std::vector<arm::Register*> registers = without_pc ? GetRegistersWithoutPC() : GetRegisters();
+ for (auto reg : registers) {
+ std::string after_reg = fmt;
+
+ std::string reg_string = GetRegName<RegisterView::kUsePrimaryName>(*reg);
+ size_t reg_index;
+ const char* reg_token = GetRegTokenFromDepth(depth);
+
+ while ((reg_index = after_reg.find(reg_token)) != std::string::npos) {
+ after_reg.replace(reg_index, strlen(reg_token), reg_string);
+ }
+
+ auto lambda = [&] (Args... args) { f(*reg, args...); }; // NOLINT [readability/braces] [4]
+ TemplateHelper(std::function<void(Args...)>(lambda), depth + 1, without_pc,
+ after_reg, oss);
+ }
+ }
+
+ template <typename... Args>
+ void TemplateHelper(std::function<void(const arm::ShifterOperand&, Args...)> f, int depth,
+ bool without_pc, std::string fmt, std::ostringstream& oss) {
+ for (const arm::ShifterOperand& shift : GetShiftOperands()) {
+ std::string after_shift = fmt;
+
+ std::string shift_string = GetShiftString(shift);
+ size_t shift_index;
+ while ((shift_index = after_shift.find(SHIFT_TOKEN)) != std::string::npos) {
+ after_shift.replace(shift_index, ConstexprStrLen(SHIFT_TOKEN), shift_string);
+ }
+
+ auto lambda = [&] (Args... args) { f(shift, args...); }; // NOLINT [readability/braces] [4]
+ TemplateHelper(std::function<void(Args...)>(lambda), depth, without_pc,
+ after_shift, oss);
+ }
+ }
+
+ template <typename... Args>
+ void TemplateHelper(std::function<void(arm::Condition, Args...)> f, int depth, bool without_pc,
+ std::string fmt, std::ostringstream& oss) {
+ for (arm::Condition c : GetConditions()) {
+ std::string after_cond = fmt;
+
+ size_t cond_index = after_cond.find(COND_TOKEN);
+ if (cond_index != std::string::npos) {
+ after_cond.replace(cond_index, ConstexprStrLen(IMM1_TOKEN), GetConditionString(c));
+ }
+
+ auto lambda = [&] (Args... args) { f(c, args...); }; // NOLINT [readability/braces] [4]
+ TemplateHelper(std::function<void(Args...)>(lambda), depth, without_pc,
+ after_cond, oss);
+ }
+ }
+
+ template <typename T1, typename T2>
+ std::function<void(T1, T2)> GetBoundFunction2(void (arm::Arm32Assembler::*f)(T1, T2)) {
+ return std::bind(f, GetAssembler(), _1, _2);
+ }
+
+ template <typename T1, typename T2, typename T3>
+ std::function<void(T1, T2, T3)> GetBoundFunction3(void (arm::Arm32Assembler::*f)(T1, T2, T3)) {
+ return std::bind(f, GetAssembler(), _1, _2, _3);
+ }
+
+ template <typename T1, typename T2, typename T3, typename T4>
+ std::function<void(T1, T2, T3, T4)> GetBoundFunction4(
+ void (arm::Arm32Assembler::*f)(T1, T2, T3, T4)) {
+ return std::bind(f, GetAssembler(), _1, _2, _3, _4);
+ }
+
+ template <typename T1, typename T2, typename T3, typename T4, typename T5>
+ std::function<void(T1, T2, T3, T4, T5)> GetBoundFunction5(
+ void (arm::Arm32Assembler::*f)(T1, T2, T3, T4, T5)) {
+ return std::bind(f, GetAssembler(), _1, _2, _3, _4, _5);
+ }
+
+ template <typename... Args>
+ void GenericTemplateHelper(std::function<void(Args...)> f, bool without_pc,
+ std::string fmt, std::string test_name) {
+ first_ = false;
+ WarnOnCombinations(CountHelper<Args...>(without_pc));
+
+ std::ostringstream oss;
+
+ TemplateHelper(f, 0, without_pc, fmt, oss);
+
+ oss << "\n"; // Trailing newline.
+
+ DriverStr(oss.str(), test_name);
+ }
+
+ template <typename... Args>
+ void T2Helper(void (arm::Arm32Assembler::*f)(Args...), bool without_pc, std::string fmt,
+ std::string test_name) {
+ GenericTemplateHelper(GetBoundFunction2(f), without_pc, fmt, test_name);
+ }
+
+ template <typename... Args>
+ void T3Helper(void (arm::Arm32Assembler::*f)(Args...), bool without_pc, std::string fmt,
+ std::string test_name) {
+ GenericTemplateHelper(GetBoundFunction3(f), without_pc, fmt, test_name);
+ }
+
+ template <typename... Args>
+ void T4Helper(void (arm::Arm32Assembler::*f)(Args...), bool without_pc, std::string fmt,
+ std::string test_name) {
+ GenericTemplateHelper(GetBoundFunction4(f), without_pc, fmt, test_name);
+ }
+
+ template <typename... Args>
+ void T5Helper(void (arm::Arm32Assembler::*f)(Args...), bool without_pc, std::string fmt,
+ std::string test_name) {
+ GenericTemplateHelper(GetBoundFunction5(f), without_pc, fmt, test_name);
+ }
+
+ private:
+ template <typename T>
+ size_t CountHelper(bool without_pc) {
+ size_t tmp;
+ if (std::is_same<T, arm::Register>::value) {
+ tmp = GetRegisters().size();
+ if (without_pc) {
+ tmp--;; // Approximation...
+ }
+ return tmp;
+ } else if (std::is_same<T, const arm::ShifterOperand&>::value) {
+ return GetShiftOperands().size();
+ } else if (std::is_same<T, arm::Condition>::value) {
+ return GetConditions().size();
+ } else {
+ LOG(WARNING) << "Unknown type while counting.";
+ return 1;
+ }
+ }
+
+ template <typename T1, typename T2, typename... Args>
+ size_t CountHelper(bool without_pc) {
+ size_t tmp;
+ if (std::is_same<T1, arm::Register>::value) {
+ tmp = GetRegisters().size();
+ if (without_pc) {
+ tmp--;; // Approximation...
+ }
+ } else if (std::is_same<T1, const arm::ShifterOperand&>::value) {
+ tmp = GetShiftOperands().size();
+ } else if (std::is_same<T1, arm::Condition>::value) {
+ tmp = GetConditions().size();
+ } else {
+ LOG(WARNING) << "Unknown type while counting.";
+ tmp = 1;
+ }
+ size_t rec = CountHelper<T2, Args...>(without_pc);
+ return rec * tmp;
+ }
+
+ bool first_;
+
+ static constexpr const char* kArm32AssemblyHeader = ".arm\n";
+
+ std::vector<arm::Register*> registers_;
+ std::vector<arm::Condition> conditions_;
+ std::vector<arm::ShifterOperand> shifter_operands_;
+};
+
+
+TEST_F(AssemblerArm32Test, Toolchain) {
+ EXPECT_TRUE(CheckTools());
+}
+
+TEST_F(AssemblerArm32Test, Sbfx) {
+ std::vector<std::pair<uint32_t, uint32_t>> immediates;
+ immediates.push_back({0, 1});
+ immediates.push_back({0, 8});
+ immediates.push_back({0, 15});
+ immediates.push_back({0, 16});
+ immediates.push_back({0, 31});
+ immediates.push_back({0, 32});
+
+ immediates.push_back({1, 1});
+ immediates.push_back({1, 15});
+ immediates.push_back({1, 31});
+
+ immediates.push_back({8, 1});
+ immediates.push_back({8, 15});
+ immediates.push_back({8, 16});
+ immediates.push_back({8, 24});
+
+ immediates.push_back({31, 1});
+
+ DriverStr(RepeatRRiiC(&arm::Arm32Assembler::sbfx, immediates,
+ "sbfx{cond} {reg1}, {reg2}, #{imm1}, #{imm2}"), "sbfx");
+}
+
+TEST_F(AssemblerArm32Test, Ubfx) {
+ std::vector<std::pair<uint32_t, uint32_t>> immediates;
+ immediates.push_back({0, 1});
+ immediates.push_back({0, 8});
+ immediates.push_back({0, 15});
+ immediates.push_back({0, 16});
+ immediates.push_back({0, 31});
+ immediates.push_back({0, 32});
+
+ immediates.push_back({1, 1});
+ immediates.push_back({1, 15});
+ immediates.push_back({1, 31});
+
+ immediates.push_back({8, 1});
+ immediates.push_back({8, 15});
+ immediates.push_back({8, 16});
+ immediates.push_back({8, 24});
+
+ immediates.push_back({31, 1});
+
+ DriverStr(RepeatRRiiC(&arm::Arm32Assembler::ubfx, immediates,
+ "ubfx{cond} {reg1}, {reg2}, #{imm1}, #{imm2}"), "ubfx");
+}
+
+TEST_F(AssemblerArm32Test, Mul) {
+ T4Helper(&arm::Arm32Assembler::mul, true, "mul{cond} {reg1}, {reg2}, {reg3}", "mul");
+}
+
+TEST_F(AssemblerArm32Test, Mla) {
+ T5Helper(&arm::Arm32Assembler::mla, true, "mla{cond} {reg1}, {reg2}, {reg3}, {reg4}", "mul");
+}
+
+/* TODO: Needs support to filter out register combinations, as rdhi must not be equal to rdlo.
+TEST_F(AssemblerArm32Test, Umull) {
+ T5Helper(&arm::Arm32Assembler::umull, true, "umull{cond} {reg1}, {reg2}, {reg3}, {reg4}",
+ "umull");
+}
+*/
+
+TEST_F(AssemblerArm32Test, Sdiv) {
+ T4Helper(&arm::Arm32Assembler::sdiv, true, "sdiv{cond} {reg1}, {reg2}, {reg3}", "sdiv");
+}
+
+TEST_F(AssemblerArm32Test, Udiv) {
+ T4Helper(&arm::Arm32Assembler::udiv, true, "udiv{cond} {reg1}, {reg2}, {reg3}", "udiv");
+}
+
+TEST_F(AssemblerArm32Test, And) {
+ T4Helper(&arm::Arm32Assembler::and_, true, "and{cond} {reg1}, {reg2}, {shift}", "and");
+}
+
+TEST_F(AssemblerArm32Test, Eor) {
+ T4Helper(&arm::Arm32Assembler::eor, true, "eor{cond} {reg1}, {reg2}, {shift}", "eor");
+}
+
+TEST_F(AssemblerArm32Test, Orr) {
+ T4Helper(&arm::Arm32Assembler::orr, true, "orr{cond} {reg1}, {reg2}, {shift}", "orr");
+}
+
+TEST_F(AssemblerArm32Test, Orrs) {
+ T4Helper(&arm::Arm32Assembler::orrs, true, "orr{cond}s {reg1}, {reg2}, {shift}", "orrs");
+}
+
+TEST_F(AssemblerArm32Test, Bic) {
+ T4Helper(&arm::Arm32Assembler::bic, true, "bic{cond} {reg1}, {reg2}, {shift}", "bic");
+}
+
+TEST_F(AssemblerArm32Test, Mov) {
+ T3Helper(&arm::Arm32Assembler::mov, true, "mov{cond} {reg1}, {shift}", "mov");
+}
+
+TEST_F(AssemblerArm32Test, Movs) {
+ T3Helper(&arm::Arm32Assembler::movs, true, "mov{cond}s {reg1}, {shift}", "movs");
+}
+
+TEST_F(AssemblerArm32Test, Mvn) {
+ T3Helper(&arm::Arm32Assembler::mvn, true, "mvn{cond} {reg1}, {shift}", "mvn");
+}
+
+TEST_F(AssemblerArm32Test, Mvns) {
+ T3Helper(&arm::Arm32Assembler::mvns, true, "mvn{cond}s {reg1}, {shift}", "mvns");
+}
+
+TEST_F(AssemblerArm32Test, Add) {
+ T4Helper(&arm::Arm32Assembler::add, false, "add{cond} {reg1}, {reg2}, {shift}", "add");
+}
+
+TEST_F(AssemblerArm32Test, Adds) {
+ T4Helper(&arm::Arm32Assembler::adds, false, "add{cond}s {reg1}, {reg2}, {shift}", "adds");
+}
+
+TEST_F(AssemblerArm32Test, Adc) {
+ T4Helper(&arm::Arm32Assembler::adc, false, "adc{cond} {reg1}, {reg2}, {shift}", "adc");
+}
+
+TEST_F(AssemblerArm32Test, Sub) {
+ T4Helper(&arm::Arm32Assembler::sub, false, "sub{cond} {reg1}, {reg2}, {shift}", "sub");
+}
+
+TEST_F(AssemblerArm32Test, Subs) {
+ T4Helper(&arm::Arm32Assembler::subs, false, "sub{cond}s {reg1}, {reg2}, {shift}", "subs");
+}
+
+TEST_F(AssemblerArm32Test, Sbc) {
+ T4Helper(&arm::Arm32Assembler::sbc, false, "sbc{cond} {reg1}, {reg2}, {shift}", "sbc");
+}
+
+TEST_F(AssemblerArm32Test, Rsb) {
+ T4Helper(&arm::Arm32Assembler::rsb, true, "rsb{cond} {reg1}, {reg2}, {shift}", "rsb");
+}
+
+TEST_F(AssemblerArm32Test, Rsbs) {
+ T4Helper(&arm::Arm32Assembler::rsbs, true, "rsb{cond}s {reg1}, {reg2}, {shift}", "rsbs");
+}
+
+TEST_F(AssemblerArm32Test, Rsc) {
+ T4Helper(&arm::Arm32Assembler::rsc, true, "rsc{cond} {reg1}, {reg2}, {shift}", "rsc");
+}
+
+/* TODO: Needs support to filter out register combinations, as reg1 must not be equal to reg3.
+TEST_F(AssemblerArm32Test, Strex) {
+ RRRCWithoutPCHelper(&arm::Arm32Assembler::strex, "strex{cond} {reg1}, {reg2}, [{reg3}]", "strex");
+}
+*/
+
+TEST_F(AssemblerArm32Test, Clz) {
+ T3Helper(&arm::Arm32Assembler::clz, true, "clz{cond} {reg1}, {reg2}", "clz");
+}
+
+TEST_F(AssemblerArm32Test, Tst) {
+ T3Helper(&arm::Arm32Assembler::tst, true, "tst{cond} {reg1}, {shift}", "tst");
+}
+
+TEST_F(AssemblerArm32Test, Teq) {
+ T3Helper(&arm::Arm32Assembler::teq, true, "teq{cond} {reg1}, {shift}", "teq");
+}
+
+TEST_F(AssemblerArm32Test, Cmp) {
+ T3Helper(&arm::Arm32Assembler::cmp, true, "cmp{cond} {reg1}, {shift}", "cmp");
+}
+
+TEST_F(AssemblerArm32Test, Cmn) {
+ T3Helper(&arm::Arm32Assembler::cmn, true, "cmn{cond} {reg1}, {shift}", "cmn");
+}
+
+TEST_F(AssemblerArm32Test, Blx) {
+ T2Helper(&arm::Arm32Assembler::blx, true, "blx{cond} {reg1}", "blx");
+}
+
+TEST_F(AssemblerArm32Test, Bx) {
+ T2Helper(&arm::Arm32Assembler::bx, true, "bx{cond} {reg1}", "bx");
+}
+
+} // namespace art
diff --git a/compiler/utils/arm/assembler_arm_test.h b/compiler/utils/arm/assembler_arm_test.h
new file mode 100644
index 0000000..838abb6
--- /dev/null
+++ b/compiler/utils/arm/assembler_arm_test.h
@@ -0,0 +1,545 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_TEST_H_
+#define ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_TEST_H_
+
+#include "utils/assembler_test.h"
+
+namespace art {
+
+template<typename Ass, typename Reg, typename FPReg, typename Imm, typename SOp, typename Cond>
+class AssemblerArmTest : public AssemblerTest<Ass, Reg, FPReg, Imm> {
+ public:
+ typedef AssemblerTest<Ass, Reg, FPReg, Imm> Base;
+
+ using Base::GetRegisters;
+ using Base::GetRegName;
+ using Base::CreateImmediate;
+ using Base::WarnOnCombinations;
+
+ static constexpr int64_t kFullImmRangeThreshold = 32;
+
+ virtual void FillImmediates(std::vector<Imm>& immediates, int64_t imm_min, int64_t imm_max) {
+ // Small range: do completely.
+ if (imm_max - imm_min <= kFullImmRangeThreshold) {
+ for (int64_t i = imm_min; i <= imm_max; ++i) {
+ immediates.push_back(CreateImmediate(i));
+ }
+ } else {
+ immediates.push_back(CreateImmediate(imm_min));
+ immediates.push_back(CreateImmediate(imm_max));
+ if (imm_min < imm_max - 1) {
+ immediates.push_back(CreateImmediate(imm_min + 1));
+ }
+ if (imm_min < imm_max - 2) {
+ immediates.push_back(CreateImmediate(imm_min + 2));
+ }
+ if (imm_min < imm_max - 3) {
+ immediates.push_back(CreateImmediate(imm_max - 1));
+ }
+ if (imm_min < imm_max - 4) {
+ immediates.push_back(CreateImmediate((imm_min + imm_max) / 2));
+ }
+ }
+ }
+
+ std::string RepeatRRIIC(void (Ass::*f)(Reg, Reg, Imm, Imm, Cond),
+ int64_t imm1_min, int64_t imm1_max,
+ int64_t imm2_min, int64_t imm2_max,
+ std::string fmt) {
+ return RepeatTemplatedRRIIC(f, GetRegisters(), GetRegisters(),
+ &AssemblerArmTest::template GetRegName<RegisterView::kUsePrimaryName>,
+ &AssemblerArmTest::template GetRegName<RegisterView::kUsePrimaryName>,
+ imm1_min, imm1_max, imm2_min, imm2_max,
+ fmt);
+ }
+
+ template <typename Reg1, typename Reg2>
+ std::string RepeatTemplatedRRIIC(void (Ass::*f)(Reg1, Reg2, Imm, Imm, Cond),
+ const std::vector<Reg1*> reg1_registers,
+ const std::vector<Reg2*> reg2_registers,
+ std::string (AssemblerArmTest::*GetName1)(const Reg1&),
+ std::string (AssemblerArmTest::*GetName2)(const Reg2&),
+ int64_t imm1_min, int64_t imm1_max,
+ int64_t imm2_min, int64_t imm2_max,
+ std::string fmt) {
+ std::vector<Imm> immediates1;
+ FillImmediates(immediates1, imm1_min, imm1_max);
+ std::vector<Imm> immediates2;
+ FillImmediates(immediates2, imm2_min, imm2_max);
+
+ std::vector<Cond>& cond = GetConditions();
+
+ WarnOnCombinations(cond.size() * immediates1.size() * immediates2.size() *
+ reg1_registers.size() * reg2_registers.size());
+
+ std::ostringstream oss;
+ bool first = true;
+ for (Cond& c : cond) {
+ std::string after_cond = fmt;
+
+ size_t cond_index = after_cond.find(COND_TOKEN);
+ if (cond_index != std::string::npos) {
+ after_cond.replace(cond_index, ConstexprStrLen(IMM1_TOKEN), GetConditionString(c));
+ }
+
+ for (Imm i : immediates1) {
+ std::string base = after_cond;
+
+ size_t imm1_index = base.find(IMM1_TOKEN);
+ if (imm1_index != std::string::npos) {
+ std::ostringstream sreg;
+ sreg << i;
+ std::string imm_string = sreg.str();
+ base.replace(imm1_index, ConstexprStrLen(IMM1_TOKEN), imm_string);
+ }
+
+ for (Imm j : immediates2) {
+ std::string base2 = base;
+
+ size_t imm2_index = base2.find(IMM2_TOKEN);
+ if (imm2_index != std::string::npos) {
+ std::ostringstream sreg;
+ sreg << j;
+ std::string imm_string = sreg.str();
+ base2.replace(imm2_index, ConstexprStrLen(IMM2_TOKEN), imm_string);
+ }
+
+ for (auto reg1 : reg1_registers) {
+ std::string base3 = base2;
+
+ std::string reg1_string = (this->*GetName1)(*reg1);
+ size_t reg1_index;
+ while ((reg1_index = base3.find(Base::REG1_TOKEN)) != std::string::npos) {
+ base3.replace(reg1_index, ConstexprStrLen(Base::REG1_TOKEN), reg1_string);
+ }
+
+ for (auto reg2 : reg2_registers) {
+ std::string base4 = base3;
+
+ std::string reg2_string = (this->*GetName2)(*reg2);
+ size_t reg2_index;
+ while ((reg2_index = base4.find(Base::REG2_TOKEN)) != std::string::npos) {
+ base4.replace(reg2_index, ConstexprStrLen(Base::REG2_TOKEN), reg2_string);
+ }
+
+ if (first) {
+ first = false;
+ } else {
+ oss << "\n";
+ }
+ oss << base4;
+
+ (Base::GetAssembler()->*f)(*reg1, *reg2, i, j, c);
+ }
+ }
+ }
+ }
+ }
+ // Add a newline at the end.
+ oss << "\n";
+
+ return oss.str();
+ }
+
+ std::string RepeatRRiiC(void (Ass::*f)(Reg, Reg, Imm, Imm, Cond),
+ std::vector<std::pair<Imm, Imm>>& immediates,
+ std::string fmt) {
+ return RepeatTemplatedRRiiC<Reg, Reg>(f, GetRegisters(), GetRegisters(),
+ &AssemblerArmTest::template GetRegName<RegisterView::kUsePrimaryName>,
+ &AssemblerArmTest::template GetRegName<RegisterView::kUsePrimaryName>,
+ immediates, fmt);
+ }
+
+ template <typename Reg1, typename Reg2>
+ std::string RepeatTemplatedRRiiC(void (Ass::*f)(Reg1, Reg2, Imm, Imm, Cond),
+ const std::vector<Reg1*> reg1_registers,
+ const std::vector<Reg2*> reg2_registers,
+ std::string (AssemblerArmTest::*GetName1)(const Reg1&),
+ std::string (AssemblerArmTest::*GetName2)(const Reg2&),
+ std::vector<std::pair<Imm, Imm>>& immediates,
+ std::string fmt) {
+ std::vector<Cond>& cond = GetConditions();
+
+ WarnOnCombinations(cond.size() * immediates.size() * reg1_registers.size() *
+ reg2_registers.size());
+
+ std::ostringstream oss;
+ bool first = true;
+ for (Cond& c : cond) {
+ std::string after_cond = fmt;
+
+ size_t cond_index = after_cond.find(COND_TOKEN);
+ if (cond_index != std::string::npos) {
+ after_cond.replace(cond_index, ConstexprStrLen(IMM1_TOKEN), GetConditionString(c));
+ }
+
+ for (std::pair<Imm, Imm>& pair : immediates) {
+ Imm i = pair.first;
+ Imm j = pair.second;
+ std::string after_imm1 = after_cond;
+
+ size_t imm1_index = after_imm1.find(IMM1_TOKEN);
+ if (imm1_index != std::string::npos) {
+ std::ostringstream sreg;
+ sreg << i;
+ std::string imm_string = sreg.str();
+ after_imm1.replace(imm1_index, ConstexprStrLen(IMM1_TOKEN), imm_string);
+ }
+
+ std::string after_imm2 = after_imm1;
+
+ size_t imm2_index = after_imm2.find(IMM2_TOKEN);
+ if (imm2_index != std::string::npos) {
+ std::ostringstream sreg;
+ sreg << j;
+ std::string imm_string = sreg.str();
+ after_imm2.replace(imm2_index, ConstexprStrLen(IMM2_TOKEN), imm_string);
+ }
+
+ for (auto reg1 : reg1_registers) {
+ std::string after_reg1 = after_imm2;
+
+ std::string reg1_string = (this->*GetName1)(*reg1);
+ size_t reg1_index;
+ while ((reg1_index = after_reg1.find(Base::REG1_TOKEN)) != std::string::npos) {
+ after_reg1.replace(reg1_index, ConstexprStrLen(Base::REG1_TOKEN), reg1_string);
+ }
+
+ for (auto reg2 : reg2_registers) {
+ std::string after_reg2 = after_reg1;
+
+ std::string reg2_string = (this->*GetName2)(*reg2);
+ size_t reg2_index;
+ while ((reg2_index = after_reg2.find(Base::REG2_TOKEN)) != std::string::npos) {
+ after_reg2.replace(reg2_index, ConstexprStrLen(Base::REG2_TOKEN), reg2_string);
+ }
+
+ if (first) {
+ first = false;
+ } else {
+ oss << "\n";
+ }
+ oss << after_reg2;
+
+ (Base::GetAssembler()->*f)(*reg1, *reg2, i, j, c);
+ }
+ }
+ }
+ }
+ // Add a newline at the end.
+ oss << "\n";
+
+ return oss.str();
+ }
+
+ std::string RepeatRRC(void (Ass::*f)(Reg, Reg, Cond), std::string fmt) {
+ return RepeatTemplatedRRC(f, GetRegisters(), GetRegisters(), GetConditions(),
+ &AssemblerArmTest::template GetRegName<RegisterView::kUsePrimaryName>,
+ &AssemblerArmTest::template GetRegName<RegisterView::kUsePrimaryName>,
+ fmt);
+ }
+
+ template <typename Reg1, typename Reg2>
+ std::string RepeatTemplatedRRC(void (Ass::*f)(Reg1, Reg2, Cond),
+ const std::vector<Reg1*>& reg1_registers,
+ const std::vector<Reg2*>& reg2_registers,
+ const std::vector<Cond>& cond,
+ std::string (AssemblerArmTest::*GetName1)(const Reg1&),
+ std::string (AssemblerArmTest::*GetName2)(const Reg2&),
+ std::string fmt) {
+ WarnOnCombinations(cond.size() * reg1_registers.size() * reg2_registers.size());
+
+ std::ostringstream oss;
+ bool first = true;
+ for (const Cond& c : cond) {
+ std::string after_cond = fmt;
+
+ size_t cond_index = after_cond.find(COND_TOKEN);
+ if (cond_index != std::string::npos) {
+ after_cond.replace(cond_index, ConstexprStrLen(IMM1_TOKEN), GetConditionString(c));
+ }
+
+ for (auto reg1 : reg1_registers) {
+ std::string after_reg1 = after_cond;
+
+ std::string reg1_string = (this->*GetName1)(*reg1);
+ size_t reg1_index;
+ while ((reg1_index = after_reg1.find(Base::REG1_TOKEN)) != std::string::npos) {
+ after_reg1.replace(reg1_index, ConstexprStrLen(Base::REG1_TOKEN), reg1_string);
+ }
+
+ for (auto reg2 : reg2_registers) {
+ std::string after_reg2 = after_reg1;
+
+ std::string reg2_string = (this->*GetName2)(*reg2);
+ size_t reg2_index;
+ while ((reg2_index = after_reg2.find(Base::REG2_TOKEN)) != std::string::npos) {
+ after_reg2.replace(reg2_index, ConstexprStrLen(Base::REG2_TOKEN), reg2_string);
+ }
+
+ if (first) {
+ first = false;
+ } else {
+ oss << "\n";
+ }
+ oss << after_reg2;
+
+ (Base::GetAssembler()->*f)(*reg1, *reg2, c);
+ }
+ }
+ }
+ // Add a newline at the end.
+ oss << "\n";
+
+ return oss.str();
+ }
+
+ std::string RepeatRRRC(void (Ass::*f)(Reg, Reg, Reg, Cond), std::string fmt) {
+ return RepeatTemplatedRRRC(f, GetRegisters(), GetRegisters(), GetRegisters(), GetConditions(),
+ &AssemblerArmTest::template GetRegName<RegisterView::kUsePrimaryName>,
+ &AssemblerArmTest::template GetRegName<RegisterView::kUsePrimaryName>,
+ &AssemblerArmTest::template GetRegName<RegisterView::kUsePrimaryName>,
+ fmt);
+ }
+
+ template <typename Reg1, typename Reg2, typename Reg3>
+ std::string RepeatTemplatedRRRC(void (Ass::*f)(Reg1, Reg2, Reg3, Cond),
+ const std::vector<Reg1*>& reg1_registers,
+ const std::vector<Reg2*>& reg2_registers,
+ const std::vector<Reg3*>& reg3_registers,
+ const std::vector<Cond>& cond,
+ std::string (AssemblerArmTest::*GetName1)(const Reg1&),
+ std::string (AssemblerArmTest::*GetName2)(const Reg2&),
+ std::string (AssemblerArmTest::*GetName3)(const Reg3&),
+ std::string fmt) {
+ WarnOnCombinations(cond.size() * reg1_registers.size() * reg2_registers.size() *
+ reg3_registers.size());
+
+ std::ostringstream oss;
+ bool first = true;
+ for (const Cond& c : cond) {
+ std::string after_cond = fmt;
+
+ size_t cond_index = after_cond.find(COND_TOKEN);
+ if (cond_index != std::string::npos) {
+ after_cond.replace(cond_index, ConstexprStrLen(IMM1_TOKEN), GetConditionString(c));
+ }
+
+ for (auto reg1 : reg1_registers) {
+ std::string after_reg1 = after_cond;
+
+ std::string reg1_string = (this->*GetName1)(*reg1);
+ size_t reg1_index;
+ while ((reg1_index = after_reg1.find(Base::REG1_TOKEN)) != std::string::npos) {
+ after_reg1.replace(reg1_index, ConstexprStrLen(Base::REG1_TOKEN), reg1_string);
+ }
+
+ for (auto reg2 : reg2_registers) {
+ std::string after_reg2 = after_reg1;
+
+ std::string reg2_string = (this->*GetName2)(*reg2);
+ size_t reg2_index;
+ while ((reg2_index = after_reg2.find(Base::REG2_TOKEN)) != std::string::npos) {
+ after_reg2.replace(reg2_index, ConstexprStrLen(Base::REG2_TOKEN), reg2_string);
+ }
+
+ for (auto reg3 : reg3_registers) {
+ std::string after_reg3 = after_reg2;
+
+ std::string reg3_string = (this->*GetName3)(*reg3);
+ size_t reg3_index;
+ while ((reg3_index = after_reg3.find(REG3_TOKEN)) != std::string::npos) {
+ after_reg3.replace(reg3_index, ConstexprStrLen(REG3_TOKEN), reg3_string);
+ }
+
+ if (first) {
+ first = false;
+ } else {
+ oss << "\n";
+ }
+ oss << after_reg3;
+
+ (Base::GetAssembler()->*f)(*reg1, *reg2, *reg3, c);
+ }
+ }
+ }
+ }
+ // Add a newline at the end.
+ oss << "\n";
+
+ return oss.str();
+ }
+
+ template <typename RegT>
+ std::string RepeatTemplatedRSC(void (Ass::*f)(RegT, SOp, Cond),
+ const std::vector<RegT*>& registers,
+ const std::vector<SOp>& shifts,
+ const std::vector<Cond>& cond,
+ std::string (AssemblerArmTest::*GetName)(const RegT&),
+ std::string fmt) {
+ WarnOnCombinations(cond.size() * registers.size() * shifts.size());
+
+ std::ostringstream oss;
+ bool first = true;
+ for (const Cond& c : cond) {
+ std::string after_cond = fmt;
+
+ size_t cond_index = after_cond.find(COND_TOKEN);
+ if (cond_index != std::string::npos) {
+ after_cond.replace(cond_index, ConstexprStrLen(IMM1_TOKEN), GetConditionString(c));
+ }
+
+ for (const SOp& shift : shifts) {
+ std::string after_shift = after_cond;
+
+ std::string shift_string = GetShiftString(shift);
+ size_t shift_index;
+ while ((shift_index = after_shift.find(Base::SHIFT_TOKEN)) != std::string::npos) {
+ after_shift.replace(shift_index, ConstexprStrLen(Base::SHIFT_TOKEN), shift_string);
+ }
+
+ for (auto reg : registers) {
+ std::string after_reg = after_shift;
+
+ std::string reg_string = (this->*GetName)(*reg);
+ size_t reg_index;
+ while ((reg_index = after_reg.find(Base::REG_TOKEN)) != std::string::npos) {
+ after_reg.replace(reg_index, ConstexprStrLen(Base::REG_TOKEN), reg_string);
+ }
+
+ if (first) {
+ first = false;
+ } else {
+ oss << "\n";
+ }
+ oss << after_reg;
+
+ (Base::GetAssembler()->*f)(*reg, shift, c);
+ }
+ }
+ }
+ // Add a newline at the end.
+ oss << "\n";
+
+ return oss.str();
+ }
+
+ template <typename Reg1, typename Reg2>
+ std::string RepeatTemplatedRRSC(void (Ass::*f)(Reg1, Reg2, const SOp&, Cond),
+ const std::vector<Reg1*>& reg1_registers,
+ const std::vector<Reg2*>& reg2_registers,
+ const std::vector<SOp>& shifts,
+ const std::vector<Cond>& cond,
+ std::string (AssemblerArmTest::*GetName1)(const Reg1&),
+ std::string (AssemblerArmTest::*GetName2)(const Reg2&),
+ std::string fmt) {
+ WarnOnCombinations(cond.size() * reg1_registers.size() * reg2_registers.size() * shifts.size());
+
+ std::ostringstream oss;
+ bool first = true;
+ for (const Cond& c : cond) {
+ std::string after_cond = fmt;
+
+ size_t cond_index = after_cond.find(COND_TOKEN);
+ if (cond_index != std::string::npos) {
+ after_cond.replace(cond_index, ConstexprStrLen(IMM1_TOKEN), GetConditionString(c));
+ }
+
+ for (const SOp& shift : shifts) {
+ std::string after_shift = after_cond;
+
+ std::string shift_string = GetShiftString(shift);
+ size_t shift_index;
+ while ((shift_index = after_shift.find(SHIFT_TOKEN)) != std::string::npos) {
+ after_shift.replace(shift_index, ConstexprStrLen(SHIFT_TOKEN), shift_string);
+ }
+
+ for (auto reg1 : reg1_registers) {
+ std::string after_reg1 = after_shift;
+
+ std::string reg1_string = (this->*GetName1)(*reg1);
+ size_t reg1_index;
+ while ((reg1_index = after_reg1.find(Base::REG1_TOKEN)) != std::string::npos) {
+ after_reg1.replace(reg1_index, ConstexprStrLen(Base::REG1_TOKEN), reg1_string);
+ }
+
+ for (auto reg2 : reg2_registers) {
+ std::string after_reg2 = after_reg1;
+
+ std::string reg2_string = (this->*GetName2)(*reg2);
+ size_t reg2_index;
+ while ((reg2_index = after_reg2.find(Base::REG2_TOKEN)) != std::string::npos) {
+ after_reg2.replace(reg2_index, ConstexprStrLen(Base::REG2_TOKEN), reg2_string);
+ }
+
+ if (first) {
+ first = false;
+ } else {
+ oss << "\n";
+ }
+ oss << after_reg2;
+
+ (Base::GetAssembler()->*f)(*reg1, *reg2, shift, c);
+ }
+ }
+ }
+ }
+ // Add a newline at the end.
+ oss << "\n";
+
+ return oss.str();
+ }
+
+ protected:
+ explicit AssemblerArmTest() {}
+
+ virtual std::vector<Cond>& GetConditions() = 0;
+ virtual std::string GetConditionString(Cond c) = 0;
+
+ virtual std::vector<SOp>& GetShiftOperands() = 0;
+ virtual std::string GetShiftString(SOp sop) = 0;
+
+ virtual Reg GetPCRegister() = 0;
+ virtual std::vector<Reg*> GetRegistersWithoutPC() {
+ std::vector<Reg*> without_pc = GetRegisters();
+ Reg pc_reg = GetPCRegister();
+
+ for (auto it = without_pc.begin(); it != without_pc.end(); ++it) {
+ if (**it == pc_reg) {
+ without_pc.erase(it);
+ break;
+ }
+ }
+
+ return without_pc;
+ }
+
+ static constexpr const char* IMM1_TOKEN = "{imm1}";
+ static constexpr const char* IMM2_TOKEN = "{imm2}";
+ static constexpr const char* REG3_TOKEN = "{reg3}";
+ static constexpr const char* REG4_TOKEN = "{reg4}";
+ static constexpr const char* COND_TOKEN = "{cond}";
+ static constexpr const char* SHIFT_TOKEN = "{shift}";
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AssemblerArmTest);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_TEST_H_
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 7968a77..a349209 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -152,6 +152,8 @@
void Thumb2Assembler::mul(Register rd, Register rn, Register rm, Condition cond) {
+ CheckCondition(cond);
+
if (rd == rm && !IsHighRegister(rd) && !IsHighRegister(rn) && !force_32bit_) {
// 16 bit.
int16_t encoding = B14 | B9 | B8 | B6 |
@@ -176,6 +178,8 @@
void Thumb2Assembler::mla(Register rd, Register rn, Register rm, Register ra,
Condition cond) {
+ CheckCondition(cond);
+
uint32_t op1 = 0U /* 0b000 */;
uint32_t op2 = 0U /* 0b00 */;
int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 |
@@ -192,6 +196,8 @@
void Thumb2Assembler::mls(Register rd, Register rn, Register rm, Register ra,
Condition cond) {
+ CheckCondition(cond);
+
uint32_t op1 = 0U /* 0b000 */;
uint32_t op2 = 01 /* 0b01 */;
int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 |
@@ -208,6 +214,8 @@
void Thumb2Assembler::umull(Register rd_lo, Register rd_hi, Register rn,
Register rm, Condition cond) {
+ CheckCondition(cond);
+
uint32_t op1 = 2U /* 0b010; */;
uint32_t op2 = 0U /* 0b0000 */;
int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 | B23 |
@@ -223,6 +231,8 @@
void Thumb2Assembler::sdiv(Register rd, Register rn, Register rm, Condition cond) {
+ CheckCondition(cond);
+
uint32_t op1 = 1U /* 0b001 */;
uint32_t op2 = 15U /* 0b1111 */;
int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 | B23 | B20 |
@@ -238,6 +248,8 @@
void Thumb2Assembler::udiv(Register rd, Register rn, Register rm, Condition cond) {
+ CheckCondition(cond);
+
uint32_t op1 = 1U /* 0b001 */;
uint32_t op2 = 15U /* 0b1111 */;
int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 | B23 | B21 | B20 |
@@ -252,6 +264,48 @@
}
+void Thumb2Assembler::sbfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond) {
+ CheckCondition(cond);
+ CHECK_LE(lsb, 31U);
+ CHECK(1U <= width && width <= 32U) << width;
+ uint32_t widthminus1 = width - 1;
+ uint32_t imm2 = lsb & (B1 | B0); // Bits 0-1 of `lsb`.
+ uint32_t imm3 = (lsb & (B4 | B3 | B2)) >> 2; // Bits 2-4 of `lsb`.
+
+ uint32_t op = 20U /* 0b10100 */;
+ int32_t encoding = B31 | B30 | B29 | B28 | B25 |
+ op << 20 |
+ static_cast<uint32_t>(rn) << 16 |
+ imm3 << 12 |
+ static_cast<uint32_t>(rd) << 8 |
+ imm2 << 6 |
+ widthminus1;
+
+ Emit32(encoding);
+}
+
+
+void Thumb2Assembler::ubfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond) {
+ CheckCondition(cond);
+ CHECK_LE(lsb, 31U);
+ CHECK(1U <= width && width <= 32U) << width;
+ uint32_t widthminus1 = width - 1;
+ uint32_t imm2 = lsb & (B1 | B0); // Bits 0-1 of `lsb`.
+ uint32_t imm3 = (lsb & (B4 | B3 | B2)) >> 2; // Bits 2-4 of `lsb`.
+
+ uint32_t op = 28U /* 0b11100 */;
+ int32_t encoding = B31 | B30 | B29 | B28 | B25 |
+ op << 20 |
+ static_cast<uint32_t>(rn) << 16 |
+ imm3 << 12 |
+ static_cast<uint32_t>(rd) << 8 |
+ imm2 << 6 |
+ widthminus1;
+
+ Emit32(encoding);
+}
+
+
void Thumb2Assembler::ldr(Register rd, const Address& ad, Condition cond) {
EmitLoadStore(cond, true, false, false, false, rd, ad);
}
@@ -293,6 +347,7 @@
void Thumb2Assembler::ldrd(Register rd, const Address& ad, Condition cond) {
+ CheckCondition(cond);
CHECK_EQ(rd % 2, 0);
// This is different from other loads. The encoding is like ARM.
int32_t encoding = B31 | B30 | B29 | B27 | B22 | B20 |
@@ -304,6 +359,7 @@
void Thumb2Assembler::strd(Register rd, const Address& ad, Condition cond) {
+ CheckCondition(cond);
CHECK_EQ(rd % 2, 0);
// This is different from other loads. The encoding is like ARM.
int32_t encoding = B31 | B30 | B29 | B27 | B22 |
@@ -609,9 +665,9 @@
}
-bool Thumb2Assembler::Is32BitDataProcessing(Condition cond,
+bool Thumb2Assembler::Is32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
Opcode opcode,
- int set_cc,
+ bool set_cc ATTRIBUTE_UNUSED,
Register rn,
Register rd,
const ShifterOperand& so) {
@@ -727,9 +783,9 @@
}
-void Thumb2Assembler::Emit32BitDataProcessing(Condition cond,
+void Thumb2Assembler::Emit32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
Opcode opcode,
- int set_cc,
+ bool set_cc,
Register rn,
Register rd,
const ShifterOperand& so) {
@@ -789,7 +845,7 @@
}
encoding = B31 | B30 | B29 | B28 |
thumb_opcode << 21 |
- set_cc << 20 |
+ (set_cc ? 1 : 0) << 20 |
rn << 16 |
rd << 8 |
imm;
@@ -798,7 +854,7 @@
// Register (possibly shifted)
encoding = B31 | B30 | B29 | B27 | B25 |
thumb_opcode << 21 |
- set_cc << 20 |
+ (set_cc ? 1 : 0) << 20 |
rn << 16 |
rd << 8 |
so.encodingThumb();
@@ -809,7 +865,7 @@
void Thumb2Assembler::Emit16BitDataProcessing(Condition cond,
Opcode opcode,
- int set_cc,
+ bool set_cc,
Register rn,
Register rd,
const ShifterOperand& so) {
@@ -936,9 +992,9 @@
// ADD and SUB are complex enough to warrant their own emitter.
-void Thumb2Assembler::Emit16BitAddSub(Condition cond,
+void Thumb2Assembler::Emit16BitAddSub(Condition cond ATTRIBUTE_UNUSED,
Opcode opcode,
- int set_cc,
+ bool set_cc ATTRIBUTE_UNUSED,
Register rn,
Register rd,
const ShifterOperand& so) {
@@ -1075,7 +1131,7 @@
void Thumb2Assembler::EmitDataProcessing(Condition cond,
Opcode opcode,
- int set_cc,
+ bool set_cc,
Register rn,
Register rd,
const ShifterOperand& so) {
@@ -1405,7 +1461,7 @@
void Thumb2Assembler::EmitMultiMemOp(Condition cond,
- BlockAddressMode am,
+ BlockAddressMode bam,
bool load,
Register base,
RegList regs) {
@@ -1417,7 +1473,7 @@
must_be_32bit = true;
}
- uint32_t w_bit = am == IA_W || am == DB_W || am == DA_W || am == IB_W;
+ bool w_bit = bam == IA_W || bam == DB_W || bam == DA_W || bam == IB_W;
// 16 bit always uses writeback.
if (!w_bit) {
must_be_32bit = true;
@@ -1425,7 +1481,7 @@
if (must_be_32bit) {
uint32_t op = 0;
- switch (am) {
+ switch (bam) {
case IA:
case IA_W:
op = 1U /* 0b01 */;
@@ -1438,7 +1494,7 @@
case IB:
case DA_W:
case IB_W:
- LOG(FATAL) << "LDM/STM mode not supported on thumb: " << am;
+ LOG(FATAL) << "LDM/STM mode not supported on thumb: " << bam;
}
if (load) {
// Cannot have SP in the list.
@@ -2107,8 +2163,8 @@
branch->ResetSize(Branch::k16Bit);
// Now add a compare instruction in the place the branch was.
- int16_t cmp = B13 | B11 | static_cast<int16_t>(branch->GetRegister()) << 8;
- buffer_.Store<int16_t>(branch_location, cmp);
+ buffer_.Store<int16_t>(branch_location,
+ B13 | B11 | static_cast<int16_t>(branch->GetRegister()) << 8);
// Since have moved made a hole in the code we need to reload the
// current pc.
@@ -2354,7 +2410,6 @@
}
}
-
void Thumb2Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
ShifterOperand shifter_op;
if (ShifterOperand::CanHoldThumb(rd, R0, MOV, value, &shifter_op)) {
@@ -2378,7 +2433,7 @@
int32_t offset,
Condition cond) {
if (!Address::CanHoldLoadOffsetThumb(type, offset)) {
- CHECK(base != IP);
+ CHECK_NE(base, IP);
LoadImmediate(IP, offset, cond);
add(IP, IP, ShifterOperand(base), cond);
base = IP;
@@ -2406,6 +2461,7 @@
break;
default:
LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
}
}
@@ -2453,12 +2509,26 @@
Register base,
int32_t offset,
Condition cond) {
+ Register tmp_reg = kNoRegister;
if (!Address::CanHoldStoreOffsetThumb(type, offset)) {
- CHECK(reg != IP);
- CHECK(base != IP);
- LoadImmediate(IP, offset, cond);
- add(IP, IP, ShifterOperand(base), cond);
- base = IP;
+ CHECK_NE(base, IP);
+ if (reg != IP) {
+ tmp_reg = IP;
+ } else {
+ // Be careful not to use IP twice (for `reg` and to build the
+ // Address object used by the store instruction(s) below).
+ // Instead, save R5 on the stack (or R6 if R5 is not available),
+ // use it as secondary temporary register, and restore it after
+ // the store instruction has been emitted.
+ tmp_reg = base != R5 ? R5 : R6;
+ Push(tmp_reg);
+ if (base == SP) {
+ offset += kRegisterSize;
+ }
+ }
+ LoadImmediate(tmp_reg, offset, cond);
+ add(tmp_reg, tmp_reg, ShifterOperand(base), cond);
+ base = tmp_reg;
offset = 0;
}
CHECK(Address::CanHoldStoreOffsetThumb(type, offset));
@@ -2477,6 +2547,11 @@
break;
default:
LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
+ }
+ if (tmp_reg != kNoRegister && tmp_reg != IP) {
+ DCHECK(tmp_reg == R5 || tmp_reg == R6);
+ Pop(tmp_reg);
}
}
@@ -2519,10 +2594,13 @@
void Thumb2Assembler::MemoryBarrier(ManagedRegister mscratch) {
CHECK_EQ(mscratch.AsArm().AsCoreRegister(), R12);
-#if ANDROID_SMP != 0
- int32_t encoding = 0xf3bf8f5f; // dmb in T1 encoding.
- Emit32(encoding);
-#endif
+ dmb(SY);
+}
+
+
+void Thumb2Assembler::dmb(DmbOptions flavor) {
+ int32_t encoding = 0xf3bf8f50; // dmb in T1 encoding.
+ Emit32(encoding | flavor);
}
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index 9e7d394..cfa251a 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -118,6 +118,10 @@
void sdiv(Register rd, Register rn, Register rm, Condition cond = AL) OVERRIDE;
void udiv(Register rd, Register rn, Register rm, Condition cond = AL) OVERRIDE;
+ // Bit field extract instructions.
+ void sbfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond = AL) OVERRIDE;
+ void ubfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond = AL) OVERRIDE;
+
// Load/store instructions.
void ldr(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
void str(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
@@ -259,6 +263,9 @@
void CompareAndBranchIfZero(Register r, Label* label) OVERRIDE;
void CompareAndBranchIfNonZero(Register r, Label* label) OVERRIDE;
+ // Memory barriers.
+ void dmb(DmbOptions flavor) OVERRIDE;
+
// Macros.
// Add signed constant value to rd. May clobber IP.
void AddConstant(Register rd, int32_t value, Condition cond = AL) OVERRIDE;
@@ -266,14 +273,9 @@
Condition cond = AL) OVERRIDE;
void AddConstantSetFlags(Register rd, Register rn, int32_t value,
Condition cond = AL) OVERRIDE;
- void AddConstantWithCarry(Register rd, Register rn, int32_t value,
- Condition cond = AL) {}
// Load and Store. May clobber IP.
void LoadImmediate(Register rd, int32_t value, Condition cond = AL) OVERRIDE;
- void LoadSImmediate(SRegister sd, float value, Condition cond = AL) {}
- void LoadDImmediate(DRegister dd, double value,
- Register scratch, Condition cond = AL) {}
void MarkExceptionHandler(Label* label) OVERRIDE;
void LoadFromOffset(LoadOperandType type,
Register reg,
@@ -321,40 +323,40 @@
private:
// Emit a single 32 or 16 bit data processing instruction.
void EmitDataProcessing(Condition cond,
- Opcode opcode,
- int set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so);
+ Opcode opcode,
+ bool set_cc,
+ Register rn,
+ Register rd,
+ const ShifterOperand& so);
// Must the instruction be 32 bits or can it possibly be encoded
// in 16 bits?
bool Is32BitDataProcessing(Condition cond,
- Opcode opcode,
- int set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so);
+ Opcode opcode,
+ bool set_cc,
+ Register rn,
+ Register rd,
+ const ShifterOperand& so);
// Emit a 32 bit data processing instruction.
void Emit32BitDataProcessing(Condition cond,
- Opcode opcode,
- int set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so);
+ Opcode opcode,
+ bool set_cc,
+ Register rn,
+ Register rd,
+ const ShifterOperand& so);
// Emit a 16 bit data processing instruction.
void Emit16BitDataProcessing(Condition cond,
- Opcode opcode,
- int set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so);
+ Opcode opcode,
+ bool set_cc,
+ Register rn,
+ Register rd,
+ const ShifterOperand& so);
void Emit16BitAddSub(Condition cond,
Opcode opcode,
- int set_cc,
+ bool set_cc,
Register rn,
Register rd,
const ShifterOperand& so);
@@ -362,12 +364,12 @@
uint16_t EmitCompareAndBranch(Register rn, uint16_t prev, bool n);
void EmitLoadStore(Condition cond,
- bool load,
- bool byte,
- bool half,
- bool is_signed,
- Register rd,
- const Address& ad);
+ bool load,
+ bool byte,
+ bool half,
+ bool is_signed,
+ Register rd,
+ const Address& ad);
void EmitMemOpAddressMode3(Condition cond,
int32_t mode,
diff --git a/compiler/utils/arm/assembler_thumb2_test.cc b/compiler/utils/arm/assembler_thumb2_test.cc
new file mode 100644
index 0000000..65d6d45
--- /dev/null
+++ b/compiler/utils/arm/assembler_thumb2_test.cc
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler_thumb2.h"
+
+#include "base/stl_util.h"
+#include "utils/assembler_test.h"
+
+namespace art {
+
+class AssemblerThumb2Test : public AssemblerTest<arm::Thumb2Assembler,
+ arm::Register, arm::SRegister,
+ uint32_t> {
+ protected:
+ std::string GetArchitectureString() OVERRIDE {
+ return "arm";
+ }
+
+ std::string GetAssemblerParameters() OVERRIDE {
+ return " -mthumb";
+ }
+
+ std::string GetDisassembleParameters() OVERRIDE {
+ return " -D -bbinary -marm --no-show-raw-insn";
+ }
+
+ void SetUpHelpers() OVERRIDE {
+ if (registers_.size() == 0) {
+ registers_.insert(end(registers_),
+ { // NOLINT(whitespace/braces)
+ new arm::Register(arm::R0),
+ new arm::Register(arm::R1),
+ new arm::Register(arm::R2),
+ new arm::Register(arm::R3),
+ new arm::Register(arm::R4),
+ new arm::Register(arm::R5),
+ new arm::Register(arm::R6),
+ new arm::Register(arm::R7),
+ new arm::Register(arm::R8),
+ new arm::Register(arm::R9),
+ new arm::Register(arm::R10),
+ new arm::Register(arm::R11),
+ new arm::Register(arm::R12),
+ new arm::Register(arm::R13),
+ new arm::Register(arm::R14),
+ new arm::Register(arm::R15)
+ });
+ }
+ }
+
+ void TearDown() OVERRIDE {
+ AssemblerTest::TearDown();
+ STLDeleteElements(®isters_);
+ }
+
+ std::vector<arm::Register*> GetRegisters() OVERRIDE {
+ return registers_;
+ }
+
+ uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
+ return imm_value;
+ }
+
+ private:
+ std::vector<arm::Register*> registers_;
+};
+
+
+TEST_F(AssemblerThumb2Test, Toolchain) {
+ EXPECT_TRUE(CheckTools());
+}
+
+
+TEST_F(AssemblerThumb2Test, Sbfx) {
+ GetAssembler()->sbfx(arm::R0, arm::R1, 0, 1);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 0, 8);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 0, 16);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 0, 32);
+
+ GetAssembler()->sbfx(arm::R0, arm::R1, 8, 1);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 8, 8);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 8, 16);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 8, 24);
+
+ GetAssembler()->sbfx(arm::R0, arm::R1, 16, 1);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 16, 8);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 16, 16);
+
+ GetAssembler()->sbfx(arm::R0, arm::R1, 31, 1);
+
+ const char* expected =
+ "sbfx r0, r1, #0, #1\n"
+ "sbfx r0, r1, #0, #8\n"
+ "sbfx r0, r1, #0, #16\n"
+ "sbfx r0, r1, #0, #32\n"
+
+ "sbfx r0, r1, #8, #1\n"
+ "sbfx r0, r1, #8, #8\n"
+ "sbfx r0, r1, #8, #16\n"
+ "sbfx r0, r1, #8, #24\n"
+
+ "sbfx r0, r1, #16, #1\n"
+ "sbfx r0, r1, #16, #8\n"
+ "sbfx r0, r1, #16, #16\n"
+
+ "sbfx r0, r1, #31, #1\n";
+ DriverStr(expected, "sbfx");
+}
+
+TEST_F(AssemblerThumb2Test, Ubfx) {
+ GetAssembler()->ubfx(arm::R0, arm::R1, 0, 1);
+ GetAssembler()->ubfx(arm::R0, arm::R1, 0, 8);
+ GetAssembler()->ubfx(arm::R0, arm::R1, 0, 16);
+ GetAssembler()->ubfx(arm::R0, arm::R1, 0, 32);
+
+ GetAssembler()->ubfx(arm::R0, arm::R1, 8, 1);
+ GetAssembler()->ubfx(arm::R0, arm::R1, 8, 8);
+ GetAssembler()->ubfx(arm::R0, arm::R1, 8, 16);
+ GetAssembler()->ubfx(arm::R0, arm::R1, 8, 24);
+
+ GetAssembler()->ubfx(arm::R0, arm::R1, 16, 1);
+ GetAssembler()->ubfx(arm::R0, arm::R1, 16, 8);
+ GetAssembler()->ubfx(arm::R0, arm::R1, 16, 16);
+
+ GetAssembler()->ubfx(arm::R0, arm::R1, 31, 1);
+
+ const char* expected =
+ "ubfx r0, r1, #0, #1\n"
+ "ubfx r0, r1, #0, #8\n"
+ "ubfx r0, r1, #0, #16\n"
+ "ubfx r0, r1, #0, #32\n"
+
+ "ubfx r0, r1, #8, #1\n"
+ "ubfx r0, r1, #8, #8\n"
+ "ubfx r0, r1, #8, #16\n"
+ "ubfx r0, r1, #8, #24\n"
+
+ "ubfx r0, r1, #16, #1\n"
+ "ubfx r0, r1, #16, #8\n"
+ "ubfx r0, r1, #16, #16\n"
+
+ "ubfx r0, r1, #31, #1\n";
+ DriverStr(expected, "ubfx");
+}
+
+} // namespace art
diff --git a/compiler/utils/arm/constants_arm.h b/compiler/utils/arm/constants_arm.h
index 092c891..1513296 100644
--- a/compiler/utils/arm/constants_arm.h
+++ b/compiler/utils/arm/constants_arm.h
@@ -38,15 +38,16 @@
// Constants for specific fields are defined in their respective named enums.
// General constants are in an anonymous enum in class Instr.
-
-// We support both VFPv3-D16 and VFPv3-D32 profiles, but currently only one at
-// a time, so that compile time optimizations can be applied.
-// Warning: VFPv3-D32 is untested.
-#define VFPv3_D16
-#if defined(VFPv3_D16) == defined(VFPv3_D32)
-#error "Exactly one of VFPv3_D16 or VFPv3_D32 can be defined at a time."
-#endif
-
+// 4 bits option for the dmb instruction.
+// Order and values follows those of the ARM Architecture Reference Manual.
+enum DmbOptions {
+ SY = 0xf,
+ ST = 0xe,
+ ISH = 0xb,
+ ISHST = 0xa,
+ NSH = 0x7,
+ NSHST = 0x6
+};
enum ScaleFactor {
TIMES_1 = 0,
@@ -56,26 +57,23 @@
};
// Values for double-precision floating point registers.
-enum DRegister {
- D0 = 0,
- D1 = 1,
- D2 = 2,
- D3 = 3,
- D4 = 4,
- D5 = 5,
- D6 = 6,
- D7 = 7,
- D8 = 8,
- D9 = 9,
+enum DRegister { // private marker to avoid generate-operator-out.py from processing.
+ D0 = 0,
+ D1 = 1,
+ D2 = 2,
+ D3 = 3,
+ D4 = 4,
+ D5 = 5,
+ D6 = 6,
+ D7 = 7,
+ D8 = 8,
+ D9 = 9,
D10 = 10,
D11 = 11,
D12 = 12,
D13 = 13,
D14 = 14,
D15 = 15,
-#ifdef VFPv3_D16
- kNumberOfDRegisters = 16,
-#else
D16 = 16,
D17 = 17,
D18 = 18,
@@ -93,7 +91,6 @@
D30 = 30,
D31 = 31,
kNumberOfDRegisters = 32,
-#endif
kNumberOfOverlappingDRegisters = 16,
kNoDRegister = -1,
};
@@ -101,18 +98,18 @@
// Values for the condition field as defined in section A3.2.
-enum Condition {
+enum Condition { // private marker to avoid generate-operator-out.py from processing.
kNoCondition = -1,
- EQ = 0, // equal
- NE = 1, // not equal
- CS = 2, // carry set/unsigned higher or same
- CC = 3, // carry clear/unsigned lower
- MI = 4, // minus/negative
- PL = 5, // plus/positive or zero
- VS = 6, // overflow
- VC = 7, // no overflow
- HI = 8, // unsigned higher
- LS = 9, // unsigned lower or same
+ EQ = 0, // equal
+ NE = 1, // not equal
+ CS = 2, // carry set/unsigned higher or same
+ CC = 3, // carry clear/unsigned lower
+ MI = 4, // minus/negative
+ PL = 5, // plus/positive or zero
+ VS = 6, // overflow
+ VC = 7, // no overflow
+ HI = 8, // unsigned higher
+ LS = 9, // unsigned lower or same
GE = 10, // signed greater than or equal
LT = 11, // signed less than
GT = 12, // signed greater than
@@ -128,16 +125,16 @@
// as defined in section A3.4
enum Opcode {
kNoOperand = -1,
- AND = 0, // Logical AND
- EOR = 1, // Logical Exclusive OR
- SUB = 2, // Subtract
- RSB = 3, // Reverse Subtract
- ADD = 4, // Add
- ADC = 5, // Add with Carry
- SBC = 6, // Subtract with Carry
- RSC = 7, // Reverse Subtract with Carry
- TST = 8, // Test
- TEQ = 9, // Test Equivalence
+ AND = 0, // Logical AND
+ EOR = 1, // Logical Exclusive OR
+ SUB = 2, // Subtract
+ RSB = 3, // Reverse Subtract
+ ADD = 4, // Add
+ ADC = 5, // Add with Carry
+ SBC = 6, // Subtract with Carry
+ RSC = 7, // Reverse Subtract with Carry
+ TST = 8, // Test
+ TEQ = 9, // Test Equivalence
CMP = 10, // Compare
CMN = 11, // Compare Negated
ORR = 12, // Logical (inclusive) OR
@@ -146,7 +143,7 @@
MVN = 15, // Move Not
kMaxOperand = 16
};
-
+std::ostream& operator<<(std::ostream& os, const Opcode& rhs);
// Shifter types for Data-processing operands as defined in section A5.1.2.
enum Shift {
@@ -158,11 +155,11 @@
RRX = 4, // Rotate right with extend.
kMaxShift
};
-
+std::ostream& operator<<(std::ostream& os, const Shift& rhs);
// Constants used for the decoding or encoding of the individual fields of
// instructions. Based on the "Figure 3-1 ARM instruction set summary".
-enum InstructionFields {
+enum InstructionFields { // private marker to avoid generate-operator-out.py from processing.
kConditionShift = 28,
kConditionBits = 4,
kTypeShift = 25,
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 1af7374..390f2ea 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -474,11 +474,9 @@
UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
}
-void Arm64Assembler::MemoryBarrier(ManagedRegister m_scratch) {
+void Arm64Assembler::MemoryBarrier(ManagedRegister m_scratch ATTRIBUTE_UNUSED) {
// TODO: Should we check that m_scratch is IP? - see arm.
-#if ANDROID_SMP != 0
___ Dmb(vixl::InnerShareable, vixl::BarrierAll);
-#endif
}
void Arm64Assembler::SignExtend(ManagedRegister mreg, size_t size) {
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index c144991..a69be25 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -27,8 +27,13 @@
#include "utils/assembler.h"
#include "offsets.h"
#include "utils.h"
+
+// TODO: make vixl clean wrt -Wshadow.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
#include "a64/macro-assembler-a64.h"
#include "a64/disasm-a64.h"
+#pragma GCC diagnostic pop
namespace art {
namespace arm64 {
@@ -219,12 +224,13 @@
void AddConstant(XRegister rd, int32_t value, vixl::Condition cond = vixl::al);
void AddConstant(XRegister rd, XRegister rn, int32_t value, vixl::Condition cond = vixl::al);
- // Vixl assembler.
- vixl::MacroAssembler* const vixl_masm_;
-
// List of exception blocks to generate at the end of the code cache.
std::vector<Arm64Exception*> exception_blocks_;
+ public:
+ // Vixl assembler.
+ vixl::MacroAssembler* const vixl_masm_;
+
// Used for testing.
friend class Arm64ManagedRegister_VixlRegisters_Test;
};
diff --git a/compiler/utils/array_ref.h b/compiler/utils/array_ref.h
index e6b4a6a..1a7f2e8 100644
--- a/compiler/utils/array_ref.h
+++ b/compiler/utils/array_ref.h
@@ -68,18 +68,13 @@
template <typename U, size_t size>
constexpr ArrayRef(U (&array)[size],
- typename std::enable_if<std::is_same<T, const U>::value, tag>::type t = tag())
+ typename std::enable_if<std::is_same<T, const U>::value, tag>::type
+ t ATTRIBUTE_UNUSED = tag())
: array_(array), size_(size) {
}
- constexpr ArrayRef(T* array, size_t size)
- : array_(array), size_(size) {
- }
-
- template <typename U>
- constexpr ArrayRef(U* array, size_t size,
- typename std::enable_if<std::is_same<T, const U>::value, tag>::type t = tag())
- : array_(array), size_(size) {
+ constexpr ArrayRef(T* array_in, size_t size_in)
+ : array_(array_in), size_(size_in) {
}
template <typename Alloc>
@@ -89,7 +84,8 @@
template <typename U, typename Alloc>
ArrayRef(const std::vector<U, Alloc>& v,
- typename std::enable_if<std::is_same<T, const U>::value, tag>::tag t = tag())
+ typename std::enable_if<std::is_same<T, const U>::value, tag>::tag
+ t ATTRIBUTE_UNUSED = tag())
: array_(v.data()), size_(v.size()) {
}
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index 8a1289d..6834512 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -125,77 +125,91 @@
}
}
-void Assembler::StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm,
- ManagedRegister scratch) {
+void Assembler::StoreImmediateToThread32(ThreadOffset<4> dest ATTRIBUTE_UNUSED,
+ uint32_t imm ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm,
- ManagedRegister scratch) {
+void Assembler::StoreImmediateToThread64(ThreadOffset<8> dest ATTRIBUTE_UNUSED,
+ uint32_t imm ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister scratch) {
+void Assembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED,
+ FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackOffsetToThread64(ThreadOffset<8> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister scratch) {
+void Assembler::StoreStackOffsetToThread64(ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED,
+ FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs) {
+void Assembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackPointerToThread64(ThreadOffset<8> thr_offs) {
+void Assembler::StoreStackPointerToThread64(ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size) {
+void Assembler::LoadFromThread32(ManagedRegister dest ATTRIBUTE_UNUSED,
+ ThreadOffset<4> src ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size) {
+void Assembler::LoadFromThread64(ManagedRegister dest ATTRIBUTE_UNUSED,
+ ThreadOffset<8> src ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset<4> offs) {
+void Assembler::LoadRawPtrFromThread32(ManagedRegister dest ATTRIBUTE_UNUSED,
+ ThreadOffset<4> offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs) {
+void Assembler::LoadRawPtrFromThread64(ManagedRegister dest ATTRIBUTE_UNUSED,
+ ThreadOffset<8> offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset<4> thr_offs,
- ManagedRegister scratch) {
+void Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<8> thr_offs,
- ManagedRegister scratch) {
+void Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs,
- ManagedRegister scratch) {
+void Assembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED,
+ FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CopyRawPtrToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs,
- ManagedRegister scratch) {
+void Assembler::CopyRawPtrToThread64(ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED,
+ FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CallFromThread32(ThreadOffset<4> offset, ManagedRegister scratch) {
+void Assembler::CallFromThread32(ThreadOffset<4> offset ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CallFromThread64(ThreadOffset<8> offset, ManagedRegister scratch) {
+void Assembler::CallFromThread64(ThreadOffset<8> offset ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 91b8d8ab..67711e3 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -19,16 +19,16 @@
#include <vector>
+#include "arch/instruction_set.h"
#include "base/logging.h"
#include "base/macros.h"
#include "arm/constants_arm.h"
-#include "mips/constants_mips.h"
-#include "x86/constants_x86.h"
-#include "x86_64/constants_x86_64.h"
-#include "instruction_set.h"
#include "managed_register.h"
#include "memory_region.h"
+#include "mips/constants_mips.h"
#include "offsets.h"
+#include "x86/constants_x86.h"
+#include "x86_64/constants_x86_64.h"
namespace art {
@@ -56,9 +56,9 @@
class ExternalLabel {
public:
- ExternalLabel(const char* name, uintptr_t address)
- : name_(name), address_(address) {
- DCHECK(name != nullptr);
+ ExternalLabel(const char* name_in, uintptr_t address_in)
+ : name_(name_in), address_(address_in) {
+ DCHECK(name_in != nullptr);
}
const char* name() const { return name_; }
@@ -118,6 +118,7 @@
friend class arm::ArmAssembler;
friend class arm::Arm32Assembler;
friend class arm::Thumb2Assembler;
+ friend class arm64::Arm64Assembler;
friend class mips::MipsAssembler;
friend class x86::X86Assembler;
friend class x86_64::X86_64Assembler;
@@ -139,10 +140,10 @@
int position_;
AssemblerFixup* previous() const { return previous_; }
- void set_previous(AssemblerFixup* previous) { previous_ = previous; }
+ void set_previous(AssemblerFixup* previous_in) { previous_ = previous_in; }
int position() const { return position_; }
- void set_position(int position) { position_ = position; }
+ void set_position(int position_in) { position_ = position_in; }
friend class AssemblerBuffer;
};
@@ -365,7 +366,7 @@
}
// TODO: Implement with disassembler.
- virtual void Comment(const char* format, ...) { }
+ virtual void Comment(const char* format, ...) { UNUSED(format); }
// Emit code that will create an activation on the stack
virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index 3742913..1fadb91 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -24,27 +24,36 @@
#include <cstdio>
#include <cstdlib>
#include <fstream>
-#include <iostream>
#include <iterator>
#include <sys/stat.h>
namespace art {
+// Helper for a constexpr string length.
+constexpr size_t ConstexprStrLen(char const* str, size_t count = 0) {
+ return ('\0' == str[0]) ? count : ConstexprStrLen(str+1, count+1);
+}
+
// Use a glocal static variable to keep the same name for all test data. Else we'll just spam the
// temp directory.
static std::string tmpnam_;
-template<typename Ass, typename Reg, typename Imm>
+enum class RegisterView { // private
+ kUsePrimaryName,
+ kUseSecondaryName
+};
+
+template<typename Ass, typename Reg, typename FPReg, typename Imm>
class AssemblerTest : public testing::Test {
public:
Ass* GetAssembler() {
return assembler_.get();
}
- typedef std::string (*TestFn)(Ass* assembler);
+ typedef std::string (*TestFn)(AssemblerTest* assembler_test, Ass* assembler);
void DriverFn(TestFn f, std::string test_name) {
- Driver(f(assembler_.get()), test_name);
+ Driver(f(this, assembler_.get()), test_name);
}
// This driver assumes the assembler has already been called.
@@ -53,118 +62,117 @@
}
std::string RepeatR(void (Ass::*f)(Reg), std::string fmt) {
- const std::vector<Reg*> registers = GetRegisters();
- std::string str;
- for (auto reg : registers) {
- (assembler_.get()->*f)(*reg);
- std::string base = fmt;
+ return RepeatTemplatedRegister<Reg>(f,
+ GetRegisters(),
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ fmt);
+ }
- size_t reg_index = base.find("{reg}");
- if (reg_index != std::string::npos) {
- std::ostringstream sreg;
- sreg << *reg;
- std::string reg_string = sreg.str();
- base.replace(reg_index, 5, reg_string);
- }
-
- if (str.size() > 0) {
- str += "\n";
- }
- str += base;
- }
- // Add a newline at the end.
- str += "\n";
- return str;
+ std::string Repeatr(void (Ass::*f)(Reg), std::string fmt) {
+ return RepeatTemplatedRegister<Reg>(f,
+ GetRegisters(),
+ &AssemblerTest::GetRegName<RegisterView::kUseSecondaryName>,
+ fmt);
}
std::string RepeatRR(void (Ass::*f)(Reg, Reg), std::string fmt) {
- const std::vector<Reg*> registers = GetRegisters();
- std::string str;
- for (auto reg1 : registers) {
- for (auto reg2 : registers) {
- (assembler_.get()->*f)(*reg1, *reg2);
- std::string base = fmt;
+ return RepeatTemplatedRegisters<Reg, Reg>(f,
+ GetRegisters(),
+ GetRegisters(),
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ fmt);
+ }
- size_t reg1_index = base.find("{reg1}");
- if (reg1_index != std::string::npos) {
- std::ostringstream sreg;
- sreg << *reg1;
- std::string reg_string = sreg.str();
- base.replace(reg1_index, 6, reg_string);
- }
+ std::string Repeatrr(void (Ass::*f)(Reg, Reg), std::string fmt) {
+ return RepeatTemplatedRegisters<Reg, Reg>(f,
+ GetRegisters(),
+ GetRegisters(),
+ &AssemblerTest::GetRegName<RegisterView::kUseSecondaryName>,
+ &AssemblerTest::GetRegName<RegisterView::kUseSecondaryName>,
+ fmt);
+ }
- size_t reg2_index = base.find("{reg2}");
- if (reg2_index != std::string::npos) {
- std::ostringstream sreg;
- sreg << *reg2;
- std::string reg_string = sreg.str();
- base.replace(reg2_index, 6, reg_string);
- }
-
- if (str.size() > 0) {
- str += "\n";
- }
- str += base;
- }
- }
- // Add a newline at the end.
- str += "\n";
- return str;
+ std::string RepeatRr(void (Ass::*f)(Reg, Reg), std::string fmt) {
+ return RepeatTemplatedRegisters<Reg, Reg>(f,
+ GetRegisters(),
+ GetRegisters(),
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ &AssemblerTest::GetRegName<RegisterView::kUseSecondaryName>,
+ fmt);
}
std::string RepeatRI(void (Ass::*f)(Reg, const Imm&), size_t imm_bytes, std::string fmt) {
- const std::vector<Reg*> registers = GetRegisters();
- std::string str;
- std::vector<int64_t> imms = CreateImmediateValues(imm_bytes);
- for (auto reg : registers) {
- for (int64_t imm : imms) {
- Imm* new_imm = CreateImmediate(imm);
- (assembler_.get()->*f)(*reg, *new_imm);
- delete new_imm;
- std::string base = fmt;
-
- size_t reg_index = base.find("{reg}");
- if (reg_index != std::string::npos) {
- std::ostringstream sreg;
- sreg << *reg;
- std::string reg_string = sreg.str();
- base.replace(reg_index, 5, reg_string);
- }
-
- size_t imm_index = base.find("{imm}");
- if (imm_index != std::string::npos) {
- std::ostringstream sreg;
- sreg << imm;
- std::string imm_string = sreg.str();
- base.replace(imm_index, 5, imm_string);
- }
-
- if (str.size() > 0) {
- str += "\n";
- }
- str += base;
- }
- }
- // Add a newline at the end.
- str += "\n";
- return str;
+ return RepeatRegisterImm<RegisterView::kUsePrimaryName>(f, imm_bytes, fmt);
}
- std::string RepeatI(void (Ass::*f)(const Imm&), size_t imm_bytes, std::string fmt) {
+ std::string Repeatri(void (Ass::*f)(Reg, const Imm&), size_t imm_bytes, std::string fmt) {
+ return RepeatRegisterImm<RegisterView::kUseSecondaryName>(f, imm_bytes, fmt);
+ }
+
+ std::string RepeatFF(void (Ass::*f)(FPReg, FPReg), std::string fmt) {
+ return RepeatTemplatedRegisters<FPReg, FPReg>(f,
+ GetFPRegisters(),
+ GetFPRegisters(),
+ &AssemblerTest::GetFPRegName,
+ &AssemblerTest::GetFPRegName,
+ fmt);
+ }
+
+ std::string RepeatFR(void (Ass::*f)(FPReg, Reg), std::string fmt) {
+ return RepeatTemplatedRegisters<FPReg, Reg>(f,
+ GetFPRegisters(),
+ GetRegisters(),
+ &AssemblerTest::GetFPRegName,
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ fmt);
+ }
+
+ std::string RepeatFr(void (Ass::*f)(FPReg, Reg), std::string fmt) {
+ return RepeatTemplatedRegisters<FPReg, Reg>(f,
+ GetFPRegisters(),
+ GetRegisters(),
+ &AssemblerTest::GetFPRegName,
+ &AssemblerTest::GetRegName<RegisterView::kUseSecondaryName>,
+ fmt);
+ }
+
+ std::string RepeatRF(void (Ass::*f)(Reg, FPReg), std::string fmt) {
+ return RepeatTemplatedRegisters<Reg, FPReg>(f,
+ GetRegisters(),
+ GetFPRegisters(),
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ &AssemblerTest::GetFPRegName,
+ fmt);
+ }
+
+ std::string RepeatrF(void (Ass::*f)(Reg, FPReg), std::string fmt) {
+ return RepeatTemplatedRegisters<Reg, FPReg>(f,
+ GetRegisters(),
+ GetFPRegisters(),
+ &AssemblerTest::GetRegName<RegisterView::kUseSecondaryName>,
+ &AssemblerTest::GetFPRegName,
+ fmt);
+ }
+
+ std::string RepeatI(void (Ass::*f)(const Imm&), size_t imm_bytes, std::string fmt,
+ bool as_uint = false) {
std::string str;
- std::vector<int64_t> imms = CreateImmediateValues(imm_bytes);
+ std::vector<int64_t> imms = CreateImmediateValues(imm_bytes, as_uint);
+
+ WarnOnCombinations(imms.size());
+
for (int64_t imm : imms) {
- Imm* new_imm = CreateImmediate(imm);
- (assembler_.get()->*f)(*new_imm);
- delete new_imm;
+ Imm new_imm = CreateImmediate(imm);
+ (assembler_.get()->*f)(new_imm);
std::string base = fmt;
- size_t imm_index = base.find("{imm}");
+ size_t imm_index = base.find(IMM_TOKEN);
if (imm_index != std::string::npos) {
std::ostringstream sreg;
sreg << imm;
std::string imm_string = sreg.str();
- base.replace(imm_index, 5, imm_string);
+ base.replace(imm_index, ConstexprStrLen(IMM_TOKEN), imm_string);
}
if (str.size() > 0) {
@@ -179,12 +187,12 @@
// This is intended to be run as a test.
bool CheckTools() {
- if (!FileExists(GetAssemblerCommand())) {
+ if (!FileExists(FindTool(GetAssemblerCmdName()))) {
return false;
}
LOG(INFO) << "Chosen assembler command: " << GetAssemblerCommand();
- if (!FileExists(GetObjdumpCommand())) {
+ if (!FileExists(FindTool(GetObjdumpCmdName()))) {
return false;
}
LOG(INFO) << "Chosen objdump command: " << GetObjdumpCommand();
@@ -192,7 +200,7 @@
// Disassembly is optional.
std::string disassembler = GetDisassembleCommand();
if (disassembler.length() != 0) {
- if (!FileExists(disassembler)) {
+ if (!FileExists(FindTool(GetDisassembleCmdName()))) {
return false;
}
LOG(INFO) << "Chosen disassemble command: " << GetDisassembleCommand();
@@ -203,7 +211,24 @@
return true;
}
+ // The following functions are public so that TestFn can use them...
+
+ virtual std::vector<Reg*> GetRegisters() = 0;
+
+ virtual std::vector<FPReg*> GetFPRegisters() {
+ UNIMPLEMENTED(FATAL) << "Architecture does not support floating-point registers";
+ UNREACHABLE();
+ }
+
+ // Secondary register names are the secondary view on registers, e.g., 32b on 64b systems.
+ virtual std::string GetSecondaryRegisterName(const Reg& reg ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL) << "Architecture does not support secondary registers";
+ UNREACHABLE();
+ }
+
protected:
+ explicit AssemblerTest() {}
+
void SetUp() OVERRIDE {
assembler_.reset(new Ass());
@@ -222,8 +247,6 @@
// Override this to set up any architecture-specific things, e.g., register vectors.
virtual void SetUpHelpers() {}
- virtual std::vector<Reg*> GetRegisters() = 0;
-
// Get the typically used name for this architecture, e.g., aarch64, x86_64, ...
virtual std::string GetArchitectureString() = 0;
@@ -251,7 +274,7 @@
resolved_assembler_cmd_ = line + GetAssemblerParameters();
- return line;
+ return resolved_assembler_cmd_;
}
// Get the name of the objdump, e.g., "objdump" by default.
@@ -278,7 +301,7 @@
resolved_objdump_cmd_ = line + GetObjdumpParameters();
- return line;
+ return resolved_objdump_cmd_;
}
// Get the name of the objdump, e.g., "objdump" by default.
@@ -304,27 +327,45 @@
resolved_disassemble_cmd_ = line + GetDisassembleParameters();
- return line;
+ return resolved_disassemble_cmd_;
}
// Create a couple of immediate values up to the number of bytes given.
- virtual std::vector<int64_t> CreateImmediateValues(size_t imm_bytes) {
+ virtual std::vector<int64_t> CreateImmediateValues(size_t imm_bytes, bool as_uint = false) {
std::vector<int64_t> res;
res.push_back(0);
- res.push_back(-1);
+ if (!as_uint) {
+ res.push_back(-1);
+ } else {
+ res.push_back(0xFF);
+ }
res.push_back(0x12);
if (imm_bytes >= 2) {
res.push_back(0x1234);
- res.push_back(-0x1234);
+ if (!as_uint) {
+ res.push_back(-0x1234);
+ } else {
+ res.push_back(0xFFFF);
+ }
if (imm_bytes >= 4) {
res.push_back(0x12345678);
- res.push_back(-0x12345678);
+ if (!as_uint) {
+ res.push_back(-0x12345678);
+ } else {
+ res.push_back(0xFFFFFFFF);
+ }
if (imm_bytes >= 6) {
res.push_back(0x123456789ABC);
- res.push_back(-0x123456789ABC);
+ if (!as_uint) {
+ res.push_back(-0x123456789ABC);
+ }
if (imm_bytes >= 8) {
res.push_back(0x123456789ABCDEF0);
- res.push_back(-0x123456789ABCDEF0);
+ if (!as_uint) {
+ res.push_back(-0x123456789ABCDEF0);
+ } else {
+ res.push_back(0xFFFFFFFFFFFFFFFF);
+ }
}
}
}
@@ -333,9 +374,150 @@
}
// Create an immediate from the specific value.
- virtual Imm* CreateImmediate(int64_t imm_value) = 0;
+ virtual Imm CreateImmediate(int64_t imm_value) = 0;
+
+ template <typename RegType>
+ std::string RepeatTemplatedRegister(void (Ass::*f)(RegType),
+ const std::vector<RegType*> registers,
+ std::string (AssemblerTest::*GetName)(const RegType&),
+ std::string fmt) {
+ std::string str;
+ for (auto reg : registers) {
+ (assembler_.get()->*f)(*reg);
+ std::string base = fmt;
+
+ std::string reg_string = (this->*GetName)(*reg);
+ size_t reg_index;
+ if ((reg_index = base.find(REG_TOKEN)) != std::string::npos) {
+ base.replace(reg_index, ConstexprStrLen(REG_TOKEN), reg_string);
+ }
+
+ if (str.size() > 0) {
+ str += "\n";
+ }
+ str += base;
+ }
+ // Add a newline at the end.
+ str += "\n";
+ return str;
+ }
+
+ template <typename Reg1, typename Reg2>
+ std::string RepeatTemplatedRegisters(void (Ass::*f)(Reg1, Reg2),
+ const std::vector<Reg1*> reg1_registers,
+ const std::vector<Reg2*> reg2_registers,
+ std::string (AssemblerTest::*GetName1)(const Reg1&),
+ std::string (AssemblerTest::*GetName2)(const Reg2&),
+ std::string fmt) {
+ WarnOnCombinations(reg1_registers.size() * reg2_registers.size());
+
+ std::string str;
+ for (auto reg1 : reg1_registers) {
+ for (auto reg2 : reg2_registers) {
+ (assembler_.get()->*f)(*reg1, *reg2);
+ std::string base = fmt;
+
+ std::string reg1_string = (this->*GetName1)(*reg1);
+ size_t reg1_index;
+ while ((reg1_index = base.find(REG1_TOKEN)) != std::string::npos) {
+ base.replace(reg1_index, ConstexprStrLen(REG1_TOKEN), reg1_string);
+ }
+
+ std::string reg2_string = (this->*GetName2)(*reg2);
+ size_t reg2_index;
+ while ((reg2_index = base.find(REG2_TOKEN)) != std::string::npos) {
+ base.replace(reg2_index, ConstexprStrLen(REG2_TOKEN), reg2_string);
+ }
+
+ if (str.size() > 0) {
+ str += "\n";
+ }
+ str += base;
+ }
+ }
+ // Add a newline at the end.
+ str += "\n";
+ return str;
+ }
+
+ template <RegisterView kRegView>
+ std::string GetRegName(const Reg& reg) {
+ std::ostringstream sreg;
+ switch (kRegView) {
+ case RegisterView::kUsePrimaryName:
+ sreg << reg;
+ break;
+
+ case RegisterView::kUseSecondaryName:
+ sreg << GetSecondaryRegisterName(reg);
+ break;
+ }
+ return sreg.str();
+ }
+
+ std::string GetFPRegName(const FPReg& reg) {
+ std::ostringstream sreg;
+ sreg << reg;
+ return sreg.str();
+ }
+
+ // If the assembly file needs a header, return it in a sub-class.
+ virtual const char* GetAssemblyHeader() {
+ return nullptr;
+ }
+
+ void WarnOnCombinations(size_t count) {
+ if (count > kWarnManyCombinationsThreshold) {
+ GTEST_LOG_(WARNING) << "Many combinations (" << count << "), test generation might be slow.";
+ }
+ }
+
+ static constexpr const char* REG_TOKEN = "{reg}";
+ static constexpr const char* REG1_TOKEN = "{reg1}";
+ static constexpr const char* REG2_TOKEN = "{reg2}";
+ static constexpr const char* IMM_TOKEN = "{imm}";
private:
+ template <RegisterView kRegView>
+ std::string RepeatRegisterImm(void (Ass::*f)(Reg, const Imm&), size_t imm_bytes,
+ std::string fmt) {
+ const std::vector<Reg*> registers = GetRegisters();
+ std::string str;
+ std::vector<int64_t> imms = CreateImmediateValues(imm_bytes);
+
+ WarnOnCombinations(registers.size() * imms.size());
+
+ for (auto reg : registers) {
+ for (int64_t imm : imms) {
+ Imm new_imm = CreateImmediate(imm);
+ (assembler_.get()->*f)(*reg, new_imm);
+ std::string base = fmt;
+
+ std::string reg_string = GetRegName<kRegView>(*reg);
+ size_t reg_index;
+ while ((reg_index = base.find(REG_TOKEN)) != std::string::npos) {
+ base.replace(reg_index, ConstexprStrLen(REG_TOKEN), reg_string);
+ }
+
+ size_t imm_index = base.find(IMM_TOKEN);
+ if (imm_index != std::string::npos) {
+ std::ostringstream sreg;
+ sreg << imm;
+ std::string imm_string = sreg.str();
+ base.replace(imm_index, ConstexprStrLen(IMM_TOKEN), imm_string);
+ }
+
+ if (str.size() > 0) {
+ str += "\n";
+ }
+ str += base;
+ }
+ }
+ // Add a newline at the end.
+ str += "\n";
+ return str;
+ }
+
// Driver() assembles and compares the results. If the results are not equal and we have a
// disassembler, disassemble both and check whether they have the same mnemonics (in which case
// we just warn).
@@ -373,7 +555,7 @@
}
} else {
// This will output the assembly.
- EXPECT_EQ(*data, *res.code) << "Outputs (and disassembly) not identical.";
+ EXPECT_EQ(*res.code, *data) << "Outputs (and disassembly) not identical.";
}
}
}
@@ -389,7 +571,7 @@
// Compile the assembly file from_file to a binary file to_file. Returns true on success.
bool Assemble(const char* from_file, const char* to_file, std::string* error_msg) {
- bool have_assembler = FileExists(GetAssemblerCommand());
+ bool have_assembler = FileExists(FindTool(GetAssemblerCmdName()));
EXPECT_TRUE(have_assembler) << "Cannot find assembler:" << GetAssemblerCommand();
if (!have_assembler) {
return false;
@@ -397,18 +579,34 @@
std::vector<std::string> args;
+ // Encaspulate the whole command line in a single string passed to
+ // the shell, so that GetAssemblerCommand() may contain arguments
+ // in addition to the program name.
args.push_back(GetAssemblerCommand());
args.push_back("-o");
args.push_back(to_file);
args.push_back(from_file);
+ std::string cmd = Join(args, ' ');
- return Exec(args, error_msg);
+ args.clear();
+ args.push_back("/bin/sh");
+ args.push_back("-c");
+ args.push_back(cmd);
+
+ bool success = Exec(args, error_msg);
+ if (!success) {
+ LOG(INFO) << "Assembler command line:";
+ for (std::string arg : args) {
+ LOG(INFO) << arg;
+ }
+ }
+ return success;
}
// Runs objdump -h on the binary file and extracts the first line with .text.
// Returns "" on failure.
std::string Objdump(std::string file) {
- bool have_objdump = FileExists(GetObjdumpCommand());
+ bool have_objdump = FileExists(FindTool(GetObjdumpCmdName()));
EXPECT_TRUE(have_objdump) << "Cannot find objdump: " << GetObjdumpCommand();
if (!have_objdump) {
return "";
@@ -417,6 +615,9 @@
std::string error_msg;
std::vector<std::string> args;
+ // Encaspulate the whole command line in a single string passed to
+ // the shell, so that GetObjdumpCommand() may contain arguments
+ // in addition to the program name.
args.push_back(GetObjdumpCommand());
args.push_back(file);
args.push_back(">");
@@ -480,12 +681,12 @@
bool result = CompareFiles(data_name + ".dis", as_name + ".dis");
- if (result) {
- std::remove(data_name.c_str());
- std::remove(as_name.c_str());
- std::remove((data_name + ".dis").c_str());
- std::remove((as_name + ".dis").c_str());
- }
+ // If you want to take a look at the differences between the ART assembler and GCC, comment
+ // out the removal code.
+// std::remove(data_name.c_str());
+// std::remove(as_name.c_str());
+// std::remove((data_name + ".dis").c_str());
+// std::remove((as_name + ".dis").c_str());
return result;
}
@@ -493,6 +694,9 @@
bool DisassembleBinary(std::string file, std::string* error_msg) {
std::vector<std::string> args;
+ // Encaspulate the whole command line in a single string passed to
+ // the shell, so that GetDisassembleCommand() may contain arguments
+ // in addition to the program name.
args.push_back(GetDisassembleCommand());
args.push_back(file);
args.push_back("| sed -n \'/<.data>/,$p\' | sed -e \'s/.*://\'");
@@ -541,6 +745,10 @@
// TODO: Lots of error checking.
std::ofstream s_out(res->base_name + ".S");
+ const char* header = GetAssemblyHeader();
+ if (header != nullptr) {
+ s_out << header;
+ }
s_out << assembly_code;
s_out.close();
@@ -689,6 +897,9 @@
return tmpnam_;
}
+ static constexpr size_t kWarnManyCombinationsThreshold = 500;
+ static constexpr size_t OBJDUMP_SECTION_LINE_MIN_TOKENS = 6;
+
std::unique_ptr<Ass> assembler_;
std::string resolved_assembler_cmd_;
@@ -697,7 +908,7 @@
std::string android_data_;
- static constexpr size_t OBJDUMP_SECTION_LINE_MIN_TOKENS = 6;
+ DISALLOW_COPY_AND_ASSIGN(AssemblerTest);
};
} // namespace art
diff --git a/compiler/utils/growable_array.h b/compiler/utils/growable_array.h
index 61e420c..fde65e7 100644
--- a/compiler/utils/growable_array.h
+++ b/compiler/utils/growable_array.h
@@ -19,26 +19,20 @@
#include <stdint.h>
#include <stddef.h>
-#include "arena_allocator.h"
+
+#include "arena_object.h"
namespace art {
-// Type of growable list for memory tuning.
-enum OatListKind {
- kGrowableArrayMisc = 0,
- kGNumListKinds
-};
-
// Deprecated
// TODO: Replace all uses with ArenaVector<T>.
template<typename T>
-class GrowableArray {
+class GrowableArray : public ArenaObject<kArenaAllocGrowableArray> {
public:
- GrowableArray(ArenaAllocator* arena, size_t init_length, OatListKind kind = kGrowableArrayMisc)
+ GrowableArray(ArenaAllocator* arena, size_t init_length)
: arena_(arena),
num_allocated_(init_length),
- num_used_(0),
- kind_(kind) {
+ num_used_(0) {
elem_list_ = static_cast<T*>(arena_->Alloc(sizeof(T) * init_length,
kArenaAllocGrowableArray));
}
@@ -152,16 +146,10 @@
T* GetRawStorage() const { return elem_list_; }
- static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(sizeof(GrowableArray<T>), kArenaAllocGrowableArray);
- }
- static void operator delete(void* p) {} // Nop.
-
private:
ArenaAllocator* const arena_;
size_t num_allocated_;
size_t num_used_;
- OatListKind kind_;
T* elem_list_;
};
diff --git a/compiler/utils/scoped_arena_allocator.cc b/compiler/utils/scoped_arena_allocator.cc
index 2616150..d9e0619 100644
--- a/compiler/utils/scoped_arena_allocator.cc
+++ b/compiler/utils/scoped_arena_allocator.cc
@@ -96,6 +96,7 @@
uint8_t* ptr = top_ptr_;
if (UNLIKELY(static_cast<size_t>(top_end_ - ptr) < rounded_bytes)) {
ptr = AllocateFromNextArena(rounded_bytes);
+ CHECK(ptr != nullptr) << "Failed to allocate memory";
}
CurrentStats()->RecordAlloc(bytes, kind);
top_ptr_ = ptr + rounded_bytes;
diff --git a/compiler/utils/scoped_arena_containers.h b/compiler/utils/scoped_arena_containers.h
index 0de7403..df93b27 100644
--- a/compiler/utils/scoped_arena_containers.h
+++ b/compiler/utils/scoped_arena_containers.h
@@ -140,12 +140,15 @@
const_pointer address(const_reference x) const { return &x; }
pointer allocate(size_type n, ScopedArenaAllocatorAdapter<void>::pointer hint = nullptr) {
+ UNUSED(hint);
DCHECK_LE(n, max_size());
DebugStackIndirectTopRef::CheckTop();
return reinterpret_cast<T*>(arena_stack_->Alloc(n * sizeof(T),
ArenaAllocatorAdapterKind::Kind()));
}
void deallocate(pointer p, size_type n) {
+ UNUSED(p);
+ UNUSED(n);
DebugStackIndirectTopRef::CheckTop();
}
diff --git a/compiler/utils/stack_checks.h b/compiler/utils/stack_checks.h
index ce01077..c348f2c 100644
--- a/compiler/utils/stack_checks.h
+++ b/compiler/utils/stack_checks.h
@@ -17,7 +17,7 @@
#ifndef ART_COMPILER_UTILS_STACK_CHECKS_H_
#define ART_COMPILER_UTILS_STACK_CHECKS_H_
-#include "instruction_set.h"
+#include "arch/instruction_set.h"
namespace art {
@@ -34,7 +34,7 @@
// stack overflow check on method entry.
//
// A frame is considered large when it's above kLargeFrameSize.
-static inline bool FrameNeedsStackCheck(size_t size, InstructionSet isa) {
+static inline bool FrameNeedsStackCheck(size_t size, InstructionSet isa ATTRIBUTE_UNUSED) {
return size >= kLargeFrameSize;
}
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 4ddf979..afa4a3b 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -873,6 +873,13 @@
}
+void X86Assembler::andl(Register reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x23);
+ EmitOperand(reg, address);
+}
+
+
void X86Assembler::andl(Register dst, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitComplex(4, Operand(dst), imm);
@@ -886,6 +893,13 @@
}
+void X86Assembler::orl(Register reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0B);
+ EmitOperand(reg, address);
+}
+
+
void X86Assembler::orl(Register dst, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitComplex(1, Operand(dst), imm);
@@ -898,11 +912,20 @@
EmitOperand(dst, Operand(src));
}
+
+void X86Assembler::xorl(Register reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x33);
+ EmitOperand(reg, address);
+}
+
+
void X86Assembler::xorl(Register dst, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitComplex(6, Operand(dst), imm);
}
+
void X86Assembler::addl(Register reg, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitComplex(0, Operand(reg), imm);
@@ -1807,9 +1830,7 @@
}
void X86Assembler::MemoryBarrier(ManagedRegister) {
-#if ANDROID_SMP != 0
mfence();
-#endif
}
void X86Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index c7eada3..8aed934 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -29,9 +29,9 @@
namespace art {
namespace x86 {
-class Immediate {
+class Immediate : public ValueObject {
public:
- explicit Immediate(int32_t value) : value_(value) {}
+ explicit Immediate(int32_t value_in) : value_(value_in) {}
int32_t value() const { return value_; }
@@ -47,7 +47,7 @@
};
-class Operand {
+class Operand : public ValueObject {
public:
uint8_t mod() const {
return (encoding_at(0) >> 6) & 3;
@@ -90,16 +90,16 @@
// Operand can be sub classed (e.g: Address).
Operand() : length_(0) { }
- void SetModRM(int mod, Register rm) {
- CHECK_EQ(mod & ~3, 0);
- encoding_[0] = (mod << 6) | rm;
+ void SetModRM(int mod_in, Register rm_in) {
+ CHECK_EQ(mod_in & ~3, 0);
+ encoding_[0] = (mod_in << 6) | rm_in;
length_ = 1;
}
- void SetSIB(ScaleFactor scale, Register index, Register base) {
+ void SetSIB(ScaleFactor scale_in, Register index_in, Register base_in) {
CHECK_EQ(length_, 1);
- CHECK_EQ(scale & ~3, 0);
- encoding_[1] = (scale << 6) | (index << 3) | base;
+ CHECK_EQ(scale_in & ~3, 0);
+ encoding_[1] = (scale_in << 6) | (index_in << 3) | base_in;
length_ = 2;
}
@@ -122,72 +122,69 @@
explicit Operand(Register reg) { SetModRM(3, reg); }
// Get the operand encoding byte at the given index.
- uint8_t encoding_at(int index) const {
- CHECK_GE(index, 0);
- CHECK_LT(index, length_);
- return encoding_[index];
+ uint8_t encoding_at(int index_in) const {
+ CHECK_GE(index_in, 0);
+ CHECK_LT(index_in, length_);
+ return encoding_[index_in];
}
friend class X86Assembler;
-
- DISALLOW_COPY_AND_ASSIGN(Operand);
};
class Address : public Operand {
public:
- Address(Register base, int32_t disp) {
- Init(base, disp);
+ Address(Register base_in, int32_t disp) {
+ Init(base_in, disp);
}
- Address(Register base, Offset disp) {
- Init(base, disp.Int32Value());
+ Address(Register base_in, Offset disp) {
+ Init(base_in, disp.Int32Value());
}
- Address(Register base, FrameOffset disp) {
- CHECK_EQ(base, ESP);
+ Address(Register base_in, FrameOffset disp) {
+ CHECK_EQ(base_in, ESP);
Init(ESP, disp.Int32Value());
}
- Address(Register base, MemberOffset disp) {
- Init(base, disp.Int32Value());
+ Address(Register base_in, MemberOffset disp) {
+ Init(base_in, disp.Int32Value());
}
- void Init(Register base, int32_t disp) {
- if (disp == 0 && base != EBP) {
- SetModRM(0, base);
- if (base == ESP) SetSIB(TIMES_1, ESP, base);
+ void Init(Register base_in, int32_t disp) {
+ if (disp == 0 && base_in != EBP) {
+ SetModRM(0, base_in);
+ if (base_in == ESP) SetSIB(TIMES_1, ESP, base_in);
} else if (disp >= -128 && disp <= 127) {
- SetModRM(1, base);
- if (base == ESP) SetSIB(TIMES_1, ESP, base);
+ SetModRM(1, base_in);
+ if (base_in == ESP) SetSIB(TIMES_1, ESP, base_in);
SetDisp8(disp);
} else {
- SetModRM(2, base);
- if (base == ESP) SetSIB(TIMES_1, ESP, base);
+ SetModRM(2, base_in);
+ if (base_in == ESP) SetSIB(TIMES_1, ESP, base_in);
SetDisp32(disp);
}
}
-
- Address(Register index, ScaleFactor scale, int32_t disp) {
- CHECK_NE(index, ESP); // Illegal addressing mode.
+ Address(Register index_in, ScaleFactor scale_in, int32_t disp) {
+ CHECK_NE(index_in, ESP); // Illegal addressing mode.
SetModRM(0, ESP);
- SetSIB(scale, index, EBP);
+ SetSIB(scale_in, index_in, EBP);
SetDisp32(disp);
}
- Address(Register base, Register index, ScaleFactor scale, int32_t disp) {
- CHECK_NE(index, ESP); // Illegal addressing mode.
- if (disp == 0 && base != EBP) {
+ Address(Register base_in, Register index_in, ScaleFactor scale_in, int32_t disp) {
+ CHECK_NE(index_in, ESP); // Illegal addressing mode.
+ if (disp == 0 && base_in != EBP) {
SetModRM(0, ESP);
- SetSIB(scale, index, base);
+ SetSIB(scale_in, index_in, base_in);
} else if (disp >= -128 && disp <= 127) {
SetModRM(1, ESP);
- SetSIB(scale, index, base);
+ SetSIB(scale_in, index_in, base_in);
SetDisp8(disp);
} else {
SetModRM(2, ESP);
- SetSIB(scale, index, base);
+ SetSIB(scale_in, index_in, base_in);
SetDisp32(disp);
}
}
@@ -205,14 +202,12 @@
private:
Address() {}
-
- DISALLOW_COPY_AND_ASSIGN(Address);
};
class X86Assembler FINAL : public Assembler {
public:
- explicit X86Assembler() {}
+ explicit X86Assembler() : cfi_cfa_offset_(0), cfi_pc_(0) {}
virtual ~X86Assembler() {}
/*
@@ -355,12 +350,15 @@
void andl(Register dst, const Immediate& imm);
void andl(Register dst, Register src);
+ void andl(Register dst, const Address& address);
void orl(Register dst, const Immediate& imm);
void orl(Register dst, Register src);
+ void orl(Register dst, const Address& address);
void xorl(Register dst, Register src);
void xorl(Register dst, const Immediate& imm);
+ void xorl(Register dst, const Address& address);
void addl(Register dst, Register src);
void addl(Register reg, const Immediate& imm);
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index db7151c..8c428f4 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -120,6 +120,7 @@
void X86_64Assembler::movl(CpuRegister dst, const Immediate& imm) {
+ CHECK(imm.is_int32());
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst);
EmitUint8(0xB8 + dst.LowBits());
@@ -317,7 +318,7 @@
EmitOptionalRex32(dst, src);
EmitUint8(0x0F);
EmitUint8(0x28);
- EmitXmmRegisterOperand(src.LowBits(), dst);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
}
@@ -344,17 +345,33 @@
void X86_64Assembler::movss(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF3);
- EmitOptionalRex32(dst, src);
+ EmitOptionalRex32(src, dst); // Movss is MR encoding instead of the usual RM.
EmitUint8(0x0F);
EmitUint8(0x11);
EmitXmmRegisterOperand(src.LowBits(), dst);
}
+void X86_64Assembler::movsxd(CpuRegister dst, CpuRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(dst, src);
+ EmitUint8(0x63);
+ EmitRegisterOperand(dst.LowBits(), src.LowBits());
+}
+
+
+void X86_64Assembler::movsxd(CpuRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(dst);
+ EmitUint8(0x63);
+ EmitOperand(dst.LowBits(), src);
+}
+
+
void X86_64Assembler::movd(XmmRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
- EmitOptionalRex32(dst, src);
+ EmitRex64(dst, src);
EmitUint8(0x0F);
EmitUint8(0x6E);
EmitOperand(dst.LowBits(), Operand(src));
@@ -364,7 +381,7 @@
void X86_64Assembler::movd(CpuRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
- EmitOptionalRex32(src, dst);
+ EmitRex64(src, dst);
EmitUint8(0x0F);
EmitUint8(0x7E);
EmitOperand(src.LowBits(), Operand(dst));
@@ -488,7 +505,7 @@
void X86_64Assembler::movsd(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF2);
- EmitOptionalRex32(dst, src);
+ EmitOptionalRex32(src, dst); // Movsd is MR encoding instead of the usual RM.
EmitUint8(0x0F);
EmitUint8(0x11);
EmitXmmRegisterOperand(src.LowBits(), dst);
@@ -839,17 +856,46 @@
void X86_64Assembler::xchgl(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitOptionalRex32(dst, src);
+ // There is a short version for rax.
+ // It's a bit awkward, as CpuRegister has a const field, so assignment and thus swapping doesn't
+ // work.
+ const bool src_rax = src.AsRegister() == RAX;
+ const bool dst_rax = dst.AsRegister() == RAX;
+ if (src_rax || dst_rax) {
+ EmitOptionalRex32(src_rax ? dst : src);
+ EmitUint8(0x90 + (src_rax ? dst.LowBits() : src.LowBits()));
+ return;
+ }
+
+ // General case.
+ EmitOptionalRex32(src, dst);
EmitUint8(0x87);
- EmitRegisterOperand(dst.LowBits(), src.LowBits());
+ EmitRegisterOperand(src.LowBits(), dst.LowBits());
}
void X86_64Assembler::xchgq(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitRex64(dst, src);
+ // There is a short version for rax.
+ // It's a bit awkward, as CpuRegister has a const field, so assignment and thus swapping doesn't
+ // work.
+ const bool src_rax = src.AsRegister() == RAX;
+ const bool dst_rax = dst.AsRegister() == RAX;
+ if (src_rax || dst_rax) {
+ // If src == target, emit a nop instead.
+ if (src_rax && dst_rax) {
+ EmitUint8(0x90);
+ } else {
+ EmitRex64(src_rax ? dst : src);
+ EmitUint8(0x90 + (src_rax ? dst.LowBits() : src.LowBits()));
+ }
+ return;
+ }
+
+ // General case.
+ EmitRex64(src, dst);
EmitUint8(0x87);
- EmitOperand(dst.LowBits(), Operand(src));
+ EmitRegisterOperand(src.LowBits(), dst.LowBits());
}
@@ -892,6 +938,21 @@
}
+void X86_64Assembler::cmpl(const Address& address, CpuRegister reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(reg, address);
+ EmitUint8(0x39);
+ EmitOperand(reg.LowBits(), address);
+}
+
+
+void X86_64Assembler::cmpl(const Address& address, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(address);
+ EmitComplex(7, address, imm);
+}
+
+
void X86_64Assembler::cmpq(CpuRegister reg0, CpuRegister reg1) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitRex64(reg0, reg1);
@@ -916,6 +977,14 @@
}
+void X86_64Assembler::cmpq(const Address& address, const Immediate& imm) {
+ CHECK(imm.is_int32()); // cmpq only supports 32b immediate.
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(address);
+ EmitComplex(7, address, imm);
+}
+
+
void X86_64Assembler::addl(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst, src);
@@ -932,21 +1001,6 @@
}
-void X86_64Assembler::cmpl(const Address& address, CpuRegister reg) {
- AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitOptionalRex32(reg, address);
- EmitUint8(0x39);
- EmitOperand(reg.LowBits(), address);
-}
-
-
-void X86_64Assembler::cmpl(const Address& address, const Immediate& imm) {
- AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitOptionalRex32(address);
- EmitComplex(7, address, imm);
-}
-
-
void X86_64Assembler::testl(CpuRegister reg1, CpuRegister reg2) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(reg1, reg2);
@@ -981,6 +1035,14 @@
}
+void X86_64Assembler::testq(CpuRegister reg1, CpuRegister reg2) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(reg1, reg2);
+ EmitUint8(0x85);
+ EmitRegisterOperand(reg1.LowBits(), reg2.LowBits());
+}
+
+
void X86_64Assembler::testq(CpuRegister reg, const Address& address) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitRex64(reg);
@@ -997,6 +1059,14 @@
}
+void X86_64Assembler::andl(CpuRegister reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(reg, address);
+ EmitUint8(0x23);
+ EmitOperand(reg.LowBits(), address);
+}
+
+
void X86_64Assembler::andl(CpuRegister dst, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst);
@@ -1012,6 +1082,14 @@
}
+void X86_64Assembler::andq(CpuRegister dst, CpuRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(dst, src);
+ EmitUint8(0x23);
+ EmitOperand(dst.LowBits(), Operand(src));
+}
+
+
void X86_64Assembler::orl(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst, src);
@@ -1020,6 +1098,14 @@
}
+void X86_64Assembler::orl(CpuRegister reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(reg, address);
+ EmitUint8(0x0B);
+ EmitOperand(reg.LowBits(), address);
+}
+
+
void X86_64Assembler::orl(CpuRegister dst, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst);
@@ -1027,6 +1113,14 @@
}
+void X86_64Assembler::orq(CpuRegister dst, CpuRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(dst, src);
+ EmitUint8(0x0B);
+ EmitOperand(dst.LowBits(), Operand(src));
+}
+
+
void X86_64Assembler::xorl(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst, src);
@@ -1035,6 +1129,21 @@
}
+void X86_64Assembler::xorl(CpuRegister reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(reg, address);
+ EmitUint8(0x33);
+ EmitOperand(reg.LowBits(), address);
+}
+
+
+void X86_64Assembler::xorl(CpuRegister dst, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(dst);
+ EmitComplex(6, Operand(dst), imm);
+}
+
+
void X86_64Assembler::xorq(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitRex64(dst, src);
@@ -1203,6 +1312,13 @@
}
+void X86_64Assembler::cqo() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64();
+ EmitUint8(0x99);
+}
+
+
void X86_64Assembler::idivl(CpuRegister reg) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(reg);
@@ -1211,6 +1327,14 @@
}
+void X86_64Assembler::idivq(CpuRegister reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(reg);
+ EmitUint8(0xF7);
+ EmitUint8(0xF8 | reg.LowBits());
+}
+
+
void X86_64Assembler::imull(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst, src);
@@ -1219,13 +1343,25 @@
EmitOperand(dst.LowBits(), Operand(src));
}
-
void X86_64Assembler::imull(CpuRegister reg, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitOptionalRex32(reg);
- EmitUint8(0x69);
- EmitOperand(reg.LowBits(), Operand(reg));
- EmitImmediate(imm);
+ CHECK(imm.is_int32()); // imull only supports 32b immediate.
+
+ EmitOptionalRex32(reg, reg);
+
+ // See whether imm can be represented as a sign-extended 8bit value.
+ int32_t v32 = static_cast<int32_t>(imm.value());
+ if (IsInt32(8, v32)) {
+ // Sign-extension works.
+ EmitUint8(0x6B);
+ EmitOperand(reg.LowBits(), Operand(reg));
+ EmitUint8(static_cast<uint8_t>(v32 & 0xFF));
+ } else {
+ // Not representable, use full immediate.
+ EmitUint8(0x69);
+ EmitOperand(reg.LowBits(), Operand(reg));
+ EmitImmediate(imm);
+ }
}
@@ -1250,10 +1386,22 @@
void X86_64Assembler::imulq(CpuRegister reg, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
CHECK(imm.is_int32()); // imulq only supports 32b immediate.
- EmitRex64(reg);
- EmitUint8(0x69);
- EmitOperand(reg.LowBits(), Operand(reg));
- EmitImmediate(imm);
+
+ EmitRex64(reg, reg);
+
+ // See whether imm can be represented as a sign-extended 8bit value.
+ int64_t v64 = imm.value();
+ if (IsInt64(8, v64)) {
+ // Sign-extension works.
+ EmitUint8(0x6B);
+ EmitOperand(reg.LowBits(), Operand(reg));
+ EmitUint8(static_cast<uint8_t>(v64 & 0xFF));
+ } else {
+ // Not representable, use full immediate.
+ EmitUint8(0x69);
+ EmitOperand(reg.LowBits(), Operand(reg));
+ EmitImmediate(imm);
+ }
}
@@ -1341,6 +1489,14 @@
}
+void X86_64Assembler::negq(CpuRegister reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(reg);
+ EmitUint8(0xF7);
+ EmitOperand(3, Operand(reg));
+}
+
+
void X86_64Assembler::notl(CpuRegister reg) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(reg);
@@ -1349,6 +1505,14 @@
}
+void X86_64Assembler::notq(CpuRegister reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(reg);
+ EmitUint8(0xF7);
+ EmitOperand(2, Operand(reg));
+}
+
+
void X86_64Assembler::enter(const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xC8);
@@ -1648,6 +1812,8 @@
CHECK(imm.is_int8());
if (wide) {
EmitRex64(reg);
+ } else {
+ EmitOptionalRex32(reg);
}
if (imm.value() == 1) {
EmitUint8(0xD1);
@@ -1665,6 +1831,7 @@
CpuRegister shifter) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
CHECK_EQ(shifter.AsRegister(), RCX);
+ EmitOptionalRex32(operand);
EmitUint8(0xD3);
EmitOperand(reg_or_opcode, Operand(operand));
}
@@ -1740,14 +1907,28 @@
}
}
+void X86_64Assembler::EmitRex64() {
+ EmitOptionalRex(false, true, false, false, false);
+}
+
void X86_64Assembler::EmitRex64(CpuRegister reg) {
EmitOptionalRex(false, true, false, false, reg.NeedsRex());
}
+void X86_64Assembler::EmitRex64(const Operand& operand) {
+ uint8_t rex = operand.rex();
+ rex |= 0x48; // REX.W000
+ EmitUint8(rex);
+}
+
void X86_64Assembler::EmitRex64(CpuRegister dst, CpuRegister src) {
EmitOptionalRex(false, true, dst.NeedsRex(), false, src.NeedsRex());
}
+void X86_64Assembler::EmitRex64(XmmRegister dst, CpuRegister src) {
+ EmitOptionalRex(false, true, dst.NeedsRex(), false, src.NeedsRex());
+}
+
void X86_64Assembler::EmitRex64(CpuRegister dst, const Operand& operand) {
uint8_t rex = 0x48 | operand.rex(); // REX.W000
if (dst.NeedsRex()) {
@@ -2190,9 +2371,7 @@
}
void X86_64Assembler::MemoryBarrier(ManagedRegister) {
-#if ANDROID_SMP != 0
mfence();
-#endif
}
void X86_64Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 4ffb6b5..4dd70e2 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -36,9 +36,9 @@
//
// Note: As we support cross-compilation, the value type must be int64_t. Please be aware of
// conversion rules in expressions regarding negation, especially size_t on 32b.
-class Immediate {
+class Immediate : public ValueObject {
public:
- explicit Immediate(int64_t value) : value_(value) {}
+ explicit Immediate(int64_t value_in) : value_(value_in) {}
int64_t value() const { return value_; }
@@ -54,12 +54,10 @@
private:
const int64_t value_;
-
- DISALLOW_COPY_AND_ASSIGN(Immediate);
};
-class Operand {
+class Operand : public ValueObject {
public:
uint8_t mod() const {
return (encoding_at(0) >> 6) & 3;
@@ -107,26 +105,26 @@
// Operand can be sub classed (e.g: Address).
Operand() : rex_(0), length_(0) { }
- void SetModRM(uint8_t mod, CpuRegister rm) {
- CHECK_EQ(mod & ~3, 0);
- if (rm.NeedsRex()) {
+ void SetModRM(uint8_t mod_in, CpuRegister rm_in) {
+ CHECK_EQ(mod_in & ~3, 0);
+ if (rm_in.NeedsRex()) {
rex_ |= 0x41; // REX.000B
}
- encoding_[0] = (mod << 6) | rm.LowBits();
+ encoding_[0] = (mod_in << 6) | rm_in.LowBits();
length_ = 1;
}
- void SetSIB(ScaleFactor scale, CpuRegister index, CpuRegister base) {
+ void SetSIB(ScaleFactor scale_in, CpuRegister index_in, CpuRegister base_in) {
CHECK_EQ(length_, 1);
- CHECK_EQ(scale & ~3, 0);
- if (base.NeedsRex()) {
+ CHECK_EQ(scale_in & ~3, 0);
+ if (base_in.NeedsRex()) {
rex_ |= 0x41; // REX.000B
}
- if (index.NeedsRex()) {
+ if (index_in.NeedsRex()) {
rex_ |= 0x42; // REX.00X0
}
- encoding_[1] = (scale << 6) | (static_cast<uint8_t>(index.LowBits()) << 3) |
- static_cast<uint8_t>(base.LowBits());
+ encoding_[1] = (scale_in << 6) | (static_cast<uint8_t>(index_in.LowBits()) << 3) |
+ static_cast<uint8_t>(base_in.LowBits());
length_ = 2;
}
@@ -150,78 +148,76 @@
explicit Operand(CpuRegister reg) : rex_(0), length_(0) { SetModRM(3, reg); }
// Get the operand encoding byte at the given index.
- uint8_t encoding_at(int index) const {
- CHECK_GE(index, 0);
- CHECK_LT(index, length_);
- return encoding_[index];
+ uint8_t encoding_at(int index_in) const {
+ CHECK_GE(index_in, 0);
+ CHECK_LT(index_in, length_);
+ return encoding_[index_in];
}
friend class X86_64Assembler;
-
- DISALLOW_COPY_AND_ASSIGN(Operand);
};
class Address : public Operand {
public:
- Address(CpuRegister base, int32_t disp) {
- Init(base, disp);
+ Address(CpuRegister base_in, int32_t disp) {
+ Init(base_in, disp);
}
- Address(CpuRegister base, Offset disp) {
- Init(base, disp.Int32Value());
+ Address(CpuRegister base_in, Offset disp) {
+ Init(base_in, disp.Int32Value());
}
- Address(CpuRegister base, FrameOffset disp) {
- CHECK_EQ(base.AsRegister(), RSP);
+ Address(CpuRegister base_in, FrameOffset disp) {
+ CHECK_EQ(base_in.AsRegister(), RSP);
Init(CpuRegister(RSP), disp.Int32Value());
}
- Address(CpuRegister base, MemberOffset disp) {
- Init(base, disp.Int32Value());
+ Address(CpuRegister base_in, MemberOffset disp) {
+ Init(base_in, disp.Int32Value());
}
- void Init(CpuRegister base, int32_t disp) {
- if (disp == 0 && base.AsRegister() != RBP) {
- SetModRM(0, base);
- if (base.AsRegister() == RSP) {
- SetSIB(TIMES_1, CpuRegister(RSP), base);
+ void Init(CpuRegister base_in, int32_t disp) {
+ if (disp == 0 && base_in.AsRegister() != RBP) {
+ SetModRM(0, base_in);
+ if (base_in.AsRegister() == RSP) {
+ SetSIB(TIMES_1, CpuRegister(RSP), base_in);
}
} else if (disp >= -128 && disp <= 127) {
- SetModRM(1, base);
- if (base.AsRegister() == RSP) {
- SetSIB(TIMES_1, CpuRegister(RSP), base);
+ SetModRM(1, base_in);
+ if (base_in.AsRegister() == RSP) {
+ SetSIB(TIMES_1, CpuRegister(RSP), base_in);
}
SetDisp8(disp);
} else {
- SetModRM(2, base);
- if (base.AsRegister() == RSP) {
- SetSIB(TIMES_1, CpuRegister(RSP), base);
+ SetModRM(2, base_in);
+ if (base_in.AsRegister() == RSP) {
+ SetSIB(TIMES_1, CpuRegister(RSP), base_in);
}
SetDisp32(disp);
}
}
- Address(CpuRegister index, ScaleFactor scale, int32_t disp) {
- CHECK_NE(index.AsRegister(), RSP); // Illegal addressing mode.
+ Address(CpuRegister index_in, ScaleFactor scale_in, int32_t disp) {
+ CHECK_NE(index_in.AsRegister(), RSP); // Illegal addressing mode.
SetModRM(0, CpuRegister(RSP));
- SetSIB(scale, index, CpuRegister(RBP));
+ SetSIB(scale_in, index_in, CpuRegister(RBP));
SetDisp32(disp);
}
- Address(CpuRegister base, CpuRegister index, ScaleFactor scale, int32_t disp) {
- CHECK_NE(index.AsRegister(), RSP); // Illegal addressing mode.
- if (disp == 0 && base.AsRegister() != RBP) {
+ Address(CpuRegister base_in, CpuRegister index_in, ScaleFactor scale_in, int32_t disp) {
+ CHECK_NE(index_in.AsRegister(), RSP); // Illegal addressing mode.
+ if (disp == 0 && base_in.AsRegister() != RBP) {
SetModRM(0, CpuRegister(RSP));
- SetSIB(scale, index, base);
+ SetSIB(scale_in, index_in, base_in);
} else if (disp >= -128 && disp <= 127) {
SetModRM(1, CpuRegister(RSP));
- SetSIB(scale, index, base);
+ SetSIB(scale_in, index_in, base_in);
SetDisp8(disp);
} else {
SetModRM(2, CpuRegister(RSP));
- SetSIB(scale, index, base);
+ SetSIB(scale_in, index_in, base_in);
SetDisp32(disp);
}
}
@@ -247,8 +243,6 @@
private:
Address() {}
-
- DISALLOW_COPY_AND_ASSIGN(Address);
};
@@ -306,6 +300,9 @@
void movss(const Address& dst, XmmRegister src);
void movss(XmmRegister dst, XmmRegister src);
+ void movsxd(CpuRegister dst, CpuRegister src);
+ void movsxd(CpuRegister dst, const Address& src);
+
void movd(XmmRegister dst, CpuRegister src);
void movd(CpuRegister dst, XmmRegister src);
@@ -331,17 +328,17 @@
void divsd(XmmRegister dst, XmmRegister src);
void divsd(XmmRegister dst, const Address& src);
- void cvtsi2ss(XmmRegister dst, CpuRegister src);
- void cvtsi2sd(XmmRegister dst, CpuRegister src);
+ void cvtsi2ss(XmmRegister dst, CpuRegister src); // Note: this is the r/m32 version.
+ void cvtsi2sd(XmmRegister dst, CpuRegister src); // Note: this is the r/m32 version.
- void cvtss2si(CpuRegister dst, XmmRegister src);
+ void cvtss2si(CpuRegister dst, XmmRegister src); // Note: this is the r32 version.
void cvtss2sd(XmmRegister dst, XmmRegister src);
- void cvtsd2si(CpuRegister dst, XmmRegister src);
+ void cvtsd2si(CpuRegister dst, XmmRegister src); // Note: this is the r32 version.
void cvtsd2ss(XmmRegister dst, XmmRegister src);
- void cvttss2si(CpuRegister dst, XmmRegister src);
- void cvttsd2si(CpuRegister dst, XmmRegister src);
+ void cvttss2si(CpuRegister dst, XmmRegister src); // Note: this is the r32 version.
+ void cvttsd2si(CpuRegister dst, XmmRegister src); // Note: this is the r32 version.
void cvtdq2pd(XmmRegister dst, XmmRegister src);
@@ -393,20 +390,28 @@
void cmpq(CpuRegister reg0, CpuRegister reg1);
void cmpq(CpuRegister reg0, const Immediate& imm);
void cmpq(CpuRegister reg0, const Address& address);
+ void cmpq(const Address& address, const Immediate& imm);
void testl(CpuRegister reg1, CpuRegister reg2);
void testl(CpuRegister reg, const Immediate& imm);
+ void testq(CpuRegister reg1, CpuRegister reg2);
void testq(CpuRegister reg, const Address& address);
void andl(CpuRegister dst, const Immediate& imm);
void andl(CpuRegister dst, CpuRegister src);
+ void andl(CpuRegister reg, const Address& address);
void andq(CpuRegister dst, const Immediate& imm);
+ void andq(CpuRegister dst, CpuRegister src);
void orl(CpuRegister dst, const Immediate& imm);
void orl(CpuRegister dst, CpuRegister src);
+ void orl(CpuRegister reg, const Address& address);
+ void orq(CpuRegister dst, CpuRegister src);
void xorl(CpuRegister dst, CpuRegister src);
+ void xorl(CpuRegister dst, const Immediate& imm);
+ void xorl(CpuRegister reg, const Address& address);
void xorq(CpuRegister dst, const Immediate& imm);
void xorq(CpuRegister dst, CpuRegister src);
@@ -429,8 +434,10 @@
void subq(CpuRegister dst, const Address& address);
void cdq();
+ void cqo();
void idivl(CpuRegister reg);
+ void idivq(CpuRegister reg);
void imull(CpuRegister dst, CpuRegister src);
void imull(CpuRegister reg, const Immediate& imm);
@@ -456,7 +463,10 @@
void shrq(CpuRegister reg, const Immediate& imm);
void negl(CpuRegister reg);
+ void negq(CpuRegister reg);
+
void notl(CpuRegister reg);
+ void notq(CpuRegister reg);
void enter(const Immediate& imm);
void leave();
@@ -663,9 +673,12 @@
void EmitOptionalRex32(XmmRegister dst, const Operand& operand);
// Emit a REX.W prefix plus necessary register bit encodings.
+ void EmitRex64();
void EmitRex64(CpuRegister reg);
+ void EmitRex64(const Operand& operand);
void EmitRex64(CpuRegister dst, CpuRegister src);
void EmitRex64(CpuRegister dst, const Operand& operand);
+ void EmitRex64(XmmRegister dst, CpuRegister src);
// Emit a REX prefix to normalize byte registers plus necessary register bit encodings.
void EmitOptionalByteRegNormalizingRex32(CpuRegister dst, CpuRegister src);
@@ -686,13 +699,17 @@
}
inline void X86_64Assembler::EmitInt64(int64_t value) {
- buffer_.Emit<int64_t>(value);
+ // Write this 64-bit value as two 32-bit words for alignment reasons
+ // (this is essentially when running on ARM, which does not allow
+ // 64-bit unaligned accesses). We assume little-endianness here.
+ EmitInt32(Low32Bits(value));
+ EmitInt32(High32Bits(value));
}
inline void X86_64Assembler::EmitRegisterOperand(uint8_t rm, uint8_t reg) {
CHECK_GE(rm, 0);
CHECK_LT(rm, 8);
- buffer_.Emit<uint8_t>(0xC0 + (rm << 3) + reg);
+ buffer_.Emit<uint8_t>((0xC0 | (reg & 7)) + (rm << 3));
}
inline void X86_64Assembler::EmitXmmRegisterOperand(uint8_t rm, XmmRegister reg) {
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 69a5fa0..af389e6 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -16,8 +16,13 @@
#include "assembler_x86_64.h"
+#include <inttypes.h>
+#include <map>
+#include <random>
+
#include "base/stl_util.h"
#include "utils/assembler_test.h"
+#include "utils.h"
namespace art {
@@ -30,8 +35,88 @@
ASSERT_EQ(static_cast<size_t>(5), buffer.Size());
}
+#ifdef HAVE_ANDROID_OS
+static constexpr size_t kRandomIterations = 1000; // Devices might be puny, don't stress them...
+#else
+static constexpr size_t kRandomIterations = 100000; // Hosts are pretty powerful.
+#endif
+
+TEST(AssemblerX86_64, SignExtension) {
+ // 32bit.
+ for (int32_t i = 0; i < 128; i++) {
+ EXPECT_TRUE(IsInt32(8, i)) << i;
+ }
+ for (int32_t i = 128; i < 255; i++) {
+ EXPECT_FALSE(IsInt32(8, i)) << i;
+ }
+ // Do some higher ones randomly.
+ std::random_device rd;
+ std::default_random_engine e1(rd());
+ std::uniform_int_distribution<int32_t> uniform_dist(256, INT32_MAX);
+ for (size_t i = 0; i < kRandomIterations; i++) {
+ int32_t value = uniform_dist(e1);
+ EXPECT_FALSE(IsInt32(8, value)) << value;
+ }
+
+ // Negative ones.
+ for (int32_t i = -1; i >= -128; i--) {
+ EXPECT_TRUE(IsInt32(8, i)) << i;
+ }
+
+ for (int32_t i = -129; i > -256; i--) {
+ EXPECT_FALSE(IsInt32(8, i)) << i;
+ }
+
+ // Do some lower ones randomly.
+ std::uniform_int_distribution<int32_t> uniform_dist2(INT32_MIN, -256);
+ for (size_t i = 0; i < 100; i++) {
+ int32_t value = uniform_dist2(e1);
+ EXPECT_FALSE(IsInt32(8, value)) << value;
+ }
+
+ // 64bit.
+ for (int64_t i = 0; i < 128; i++) {
+ EXPECT_TRUE(IsInt64(8, i)) << i;
+ }
+ for (int32_t i = 128; i < 255; i++) {
+ EXPECT_FALSE(IsInt64(8, i)) << i;
+ }
+ // Do some higher ones randomly.
+ std::uniform_int_distribution<int64_t> uniform_dist3(256, INT64_MAX);
+ for (size_t i = 0; i < 100; i++) {
+ int64_t value = uniform_dist3(e1);
+ EXPECT_FALSE(IsInt64(8, value)) << value;
+ }
+
+ // Negative ones.
+ for (int64_t i = -1; i >= -128; i--) {
+ EXPECT_TRUE(IsInt64(8, i)) << i;
+ }
+
+ for (int64_t i = -129; i > -256; i--) {
+ EXPECT_FALSE(IsInt64(8, i)) << i;
+ }
+
+ // Do some lower ones randomly.
+ std::uniform_int_distribution<int64_t> uniform_dist4(INT64_MIN, -256);
+ for (size_t i = 0; i < kRandomIterations; i++) {
+ int64_t value = uniform_dist4(e1);
+ EXPECT_FALSE(IsInt64(8, value)) << value;
+ }
+}
+
+struct X86_64CpuRegisterCompare {
+ bool operator()(const x86_64::CpuRegister& a, const x86_64::CpuRegister& b) const {
+ return a.AsRegister() < b.AsRegister();
+ }
+};
+
class AssemblerX86_64Test : public AssemblerTest<x86_64::X86_64Assembler, x86_64::CpuRegister,
- x86_64::Immediate> {
+ x86_64::XmmRegister, x86_64::Immediate> {
+ public:
+ typedef AssemblerTest<x86_64::X86_64Assembler, x86_64::CpuRegister,
+ x86_64::XmmRegister, x86_64::Immediate> Base;
+
protected:
// Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
std::string GetArchitectureString() OVERRIDE {
@@ -60,24 +145,71 @@
registers_.push_back(new x86_64::CpuRegister(x86_64::R13));
registers_.push_back(new x86_64::CpuRegister(x86_64::R14));
registers_.push_back(new x86_64::CpuRegister(x86_64::R15));
+
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::RAX), "eax");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::RBX), "ebx");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::RCX), "ecx");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::RDX), "edx");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::RBP), "ebp");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::RSP), "esp");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::RSI), "esi");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::RDI), "edi");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::R8), "r8d");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::R9), "r9d");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::R10), "r10d");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::R11), "r11d");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::R12), "r12d");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::R13), "r13d");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::R14), "r14d");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::R15), "r15d");
+
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM0));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM1));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM2));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM3));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM4));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM5));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM6));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM7));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM8));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM9));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM10));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM11));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM12));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM13));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM14));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM15));
}
}
void TearDown() OVERRIDE {
AssemblerTest::TearDown();
STLDeleteElements(®isters_);
+ STLDeleteElements(&fp_registers_);
}
std::vector<x86_64::CpuRegister*> GetRegisters() OVERRIDE {
return registers_;
}
- x86_64::Immediate* CreateImmediate(int64_t imm_value) OVERRIDE {
- return new x86_64::Immediate(imm_value);
+ std::vector<x86_64::XmmRegister*> GetFPRegisters() OVERRIDE {
+ return fp_registers_;
+ }
+
+ x86_64::Immediate CreateImmediate(int64_t imm_value) OVERRIDE {
+ return x86_64::Immediate(imm_value);
+ }
+
+ std::string GetSecondaryRegisterName(const x86_64::CpuRegister& reg) OVERRIDE {
+ CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
+ return secondary_register_names_[reg];
}
private:
std::vector<x86_64::CpuRegister*> registers_;
+ std::map<x86_64::CpuRegister, std::string, X86_64CpuRegisterCompare> secondary_register_names_;
+
+ std::vector<x86_64::XmmRegister*> fp_registers_;
};
@@ -94,7 +226,6 @@
DriverStr(RepeatI(&x86_64::X86_64Assembler::pushq, 4U, "pushq ${imm}"), "pushqi");
}
-
TEST_F(AssemblerX86_64Test, MovqRegs) {
DriverStr(RepeatRR(&x86_64::X86_64Assembler::movq, "movq %{reg2}, %{reg1}"), "movq");
}
@@ -103,6 +234,13 @@
DriverStr(RepeatRI(&x86_64::X86_64Assembler::movq, 8U, "movq ${imm}, %{reg}"), "movqi");
}
+TEST_F(AssemblerX86_64Test, MovlRegs) {
+ DriverStr(Repeatrr(&x86_64::X86_64Assembler::movl, "mov %{reg2}, %{reg1}"), "movl");
+}
+
+TEST_F(AssemblerX86_64Test, MovlImm) {
+ DriverStr(Repeatri(&x86_64::X86_64Assembler::movl, 4U, "mov ${imm}, %{reg}"), "movli");
+}
TEST_F(AssemblerX86_64Test, AddqRegs) {
DriverStr(RepeatRR(&x86_64::X86_64Assembler::addq, "addq %{reg2}, %{reg1}"), "addq");
@@ -112,10 +250,36 @@
DriverStr(RepeatRI(&x86_64::X86_64Assembler::addq, 4U, "addq ${imm}, %{reg}"), "addqi");
}
+TEST_F(AssemblerX86_64Test, AddlRegs) {
+ DriverStr(Repeatrr(&x86_64::X86_64Assembler::addl, "add %{reg2}, %{reg1}"), "addl");
+}
+
+TEST_F(AssemblerX86_64Test, AddlImm) {
+ DriverStr(Repeatri(&x86_64::X86_64Assembler::addl, 4U, "add ${imm}, %{reg}"), "addli");
+}
+
TEST_F(AssemblerX86_64Test, ImulqRegs) {
DriverStr(RepeatRR(&x86_64::X86_64Assembler::imulq, "imulq %{reg2}, %{reg1}"), "imulq");
}
+TEST_F(AssemblerX86_64Test, ImulqImm) {
+ DriverStr(RepeatRI(&x86_64::X86_64Assembler::imulq, 4U, "imulq ${imm}, %{reg}, %{reg}"),
+ "imulqi");
+}
+
+TEST_F(AssemblerX86_64Test, ImullRegs) {
+ DriverStr(Repeatrr(&x86_64::X86_64Assembler::imull, "imul %{reg2}, %{reg1}"), "imull");
+}
+
+TEST_F(AssemblerX86_64Test, ImullImm) {
+ DriverStr(Repeatri(&x86_64::X86_64Assembler::imull, 4U, "imull ${imm}, %{reg}, %{reg}"),
+ "imulli");
+}
+
+TEST_F(AssemblerX86_64Test, Mull) {
+ DriverStr(Repeatr(&x86_64::X86_64Assembler::mull, "mull %{reg}"), "mull");
+}
+
TEST_F(AssemblerX86_64Test, SubqRegs) {
DriverStr(RepeatRR(&x86_64::X86_64Assembler::subq, "subq %{reg2}, %{reg1}"), "subq");
}
@@ -124,19 +288,178 @@
DriverStr(RepeatRI(&x86_64::X86_64Assembler::subq, 4U, "subq ${imm}, %{reg}"), "subqi");
}
+TEST_F(AssemblerX86_64Test, SublRegs) {
+ DriverStr(Repeatrr(&x86_64::X86_64Assembler::subl, "sub %{reg2}, %{reg1}"), "subl");
+}
+
+TEST_F(AssemblerX86_64Test, SublImm) {
+ DriverStr(Repeatri(&x86_64::X86_64Assembler::subl, 4U, "sub ${imm}, %{reg}"), "subli");
+}
+
+// Shll only allows CL as the shift register.
+std::string shll_fn(AssemblerX86_64Test::Base* assembler_test, x86_64::X86_64Assembler* assembler) {
+ std::ostringstream str;
+
+ std::vector<x86_64::CpuRegister*> registers = assembler_test->GetRegisters();
+
+ x86_64::CpuRegister shifter(x86_64::RCX);
+ for (auto reg : registers) {
+ assembler->shll(*reg, shifter);
+ str << "shll %cl, %" << assembler_test->GetSecondaryRegisterName(*reg) << "\n";
+ }
+
+ return str.str();
+}
+
+TEST_F(AssemblerX86_64Test, ShllReg) {
+ DriverFn(&shll_fn, "shll");
+}
+
+TEST_F(AssemblerX86_64Test, ShllImm) {
+ DriverStr(Repeatri(&x86_64::X86_64Assembler::shll, 1U, "shll ${imm}, %{reg}"), "shlli");
+}
+
+// Shrl only allows CL as the shift register.
+std::string shrl_fn(AssemblerX86_64Test::Base* assembler_test, x86_64::X86_64Assembler* assembler) {
+ std::ostringstream str;
+
+ std::vector<x86_64::CpuRegister*> registers = assembler_test->GetRegisters();
+
+ x86_64::CpuRegister shifter(x86_64::RCX);
+ for (auto reg : registers) {
+ assembler->shrl(*reg, shifter);
+ str << "shrl %cl, %" << assembler_test->GetSecondaryRegisterName(*reg) << "\n";
+ }
+
+ return str.str();
+}
+
+TEST_F(AssemblerX86_64Test, ShrlReg) {
+ DriverFn(&shrl_fn, "shrl");
+}
+
+TEST_F(AssemblerX86_64Test, ShrlImm) {
+ DriverStr(Repeatri(&x86_64::X86_64Assembler::shrl, 1U, "shrl ${imm}, %{reg}"), "shrli");
+}
+
+// Sarl only allows CL as the shift register.
+std::string sarl_fn(AssemblerX86_64Test::Base* assembler_test, x86_64::X86_64Assembler* assembler) {
+ std::ostringstream str;
+
+ std::vector<x86_64::CpuRegister*> registers = assembler_test->GetRegisters();
+
+ x86_64::CpuRegister shifter(x86_64::RCX);
+ for (auto reg : registers) {
+ assembler->sarl(*reg, shifter);
+ str << "sarl %cl, %" << assembler_test->GetSecondaryRegisterName(*reg) << "\n";
+ }
+
+ return str.str();
+}
+
+TEST_F(AssemblerX86_64Test, SarlReg) {
+ DriverFn(&sarl_fn, "sarl");
+}
+
+TEST_F(AssemblerX86_64Test, SarlImm) {
+ DriverStr(Repeatri(&x86_64::X86_64Assembler::sarl, 1U, "sarl ${imm}, %{reg}"), "sarli");
+}
TEST_F(AssemblerX86_64Test, CmpqRegs) {
DriverStr(RepeatRR(&x86_64::X86_64Assembler::cmpq, "cmpq %{reg2}, %{reg1}"), "cmpq");
}
+TEST_F(AssemblerX86_64Test, CmpqImm) {
+ DriverStr(RepeatRI(&x86_64::X86_64Assembler::cmpq, 4U /* cmpq only supports 32b imm */,
+ "cmpq ${imm}, %{reg}"), "cmpqi");
+}
+
+TEST_F(AssemblerX86_64Test, CmplRegs) {
+ DriverStr(Repeatrr(&x86_64::X86_64Assembler::cmpl, "cmp %{reg2}, %{reg1}"), "cmpl");
+}
+
+TEST_F(AssemblerX86_64Test, CmplImm) {
+ DriverStr(Repeatri(&x86_64::X86_64Assembler::cmpl, 4U, "cmpl ${imm}, %{reg}"), "cmpli");
+}
+
+TEST_F(AssemblerX86_64Test, Testl) {
+ // Note: uses different order for GCC than usual. This makes GCC happy, and doesn't have an
+ // impact on functional correctness.
+ DriverStr(Repeatrr(&x86_64::X86_64Assembler::testl, "testl %{reg1}, %{reg2}"), "testl");
+}
+
+TEST_F(AssemblerX86_64Test, Negq) {
+ DriverStr(RepeatR(&x86_64::X86_64Assembler::negq, "negq %{reg}"), "negq");
+}
+
+TEST_F(AssemblerX86_64Test, Negl) {
+ DriverStr(Repeatr(&x86_64::X86_64Assembler::negl, "negl %{reg}"), "negl");
+}
+
+TEST_F(AssemblerX86_64Test, Notq) {
+ DriverStr(RepeatR(&x86_64::X86_64Assembler::notq, "notq %{reg}"), "notq");
+}
+
+TEST_F(AssemblerX86_64Test, Notl) {
+ DriverStr(Repeatr(&x86_64::X86_64Assembler::notl, "notl %{reg}"), "notl");
+}
+
+TEST_F(AssemblerX86_64Test, AndqRegs) {
+ DriverStr(RepeatRR(&x86_64::X86_64Assembler::andq, "andq %{reg2}, %{reg1}"), "andq");
+}
+
+TEST_F(AssemblerX86_64Test, AndqImm) {
+ DriverStr(RepeatRI(&x86_64::X86_64Assembler::andq, 4U /* andq only supports 32b imm */,
+ "andq ${imm}, %{reg}"), "andqi");
+}
+
+TEST_F(AssemblerX86_64Test, AndlRegs) {
+ DriverStr(Repeatrr(&x86_64::X86_64Assembler::andl, "andl %{reg2}, %{reg1}"), "andl");
+}
+
+TEST_F(AssemblerX86_64Test, AndlImm) {
+ DriverStr(Repeatri(&x86_64::X86_64Assembler::andl, 4U, "andl ${imm}, %{reg}"), "andli");
+}
+
+TEST_F(AssemblerX86_64Test, OrqRegs) {
+ DriverStr(RepeatRR(&x86_64::X86_64Assembler::orq, "orq %{reg2}, %{reg1}"), "orq");
+}
+
+TEST_F(AssemblerX86_64Test, OrlRegs) {
+ DriverStr(Repeatrr(&x86_64::X86_64Assembler::orl, "orl %{reg2}, %{reg1}"), "orl");
+}
+
+TEST_F(AssemblerX86_64Test, OrlImm) {
+ DriverStr(Repeatri(&x86_64::X86_64Assembler::orl, 4U, "orl ${imm}, %{reg}"), "orli");
+}
+
+TEST_F(AssemblerX86_64Test, XorqRegs) {
+ DriverStr(RepeatRR(&x86_64::X86_64Assembler::xorq, "xorq %{reg2}, %{reg1}"), "xorq");
+}
TEST_F(AssemblerX86_64Test, XorqImm) {
DriverStr(RepeatRI(&x86_64::X86_64Assembler::xorq, 4U, "xorq ${imm}, %{reg}"), "xorqi");
}
+TEST_F(AssemblerX86_64Test, XorlRegs) {
+ DriverStr(Repeatrr(&x86_64::X86_64Assembler::xorl, "xor %{reg2}, %{reg1}"), "xorl");
+}
+
+TEST_F(AssemblerX86_64Test, XorlImm) {
+ DriverStr(Repeatri(&x86_64::X86_64Assembler::xorl, 4U, "xor ${imm}, %{reg}"), "xorli");
+}
+
+TEST_F(AssemblerX86_64Test, Xchgq) {
+ DriverStr(RepeatRR(&x86_64::X86_64Assembler::xchgq, "xchgq %{reg2}, %{reg1}"), "xchgq");
+}
+
+TEST_F(AssemblerX86_64Test, Xchgl) {
+ // Test is disabled because GCC generates 0x87 0xC0 for xchgl eax, eax. All other cases are the
+ // same. Anyone know why it doesn't emit a simple 0x90? It does so for xchgq rax, rax...
+ // DriverStr(Repeatrr(&x86_64::X86_64Assembler::xchgl, "xchgl %{reg2}, %{reg1}"), "xchgl");
+}
+
TEST_F(AssemblerX86_64Test, Movl) {
- GetAssembler()->movl(x86_64::CpuRegister(x86_64::R8), x86_64::CpuRegister(x86_64::R11));
- GetAssembler()->movl(x86_64::CpuRegister(x86_64::RAX), x86_64::CpuRegister(x86_64::R11));
GetAssembler()->movl(x86_64::CpuRegister(x86_64::RAX), x86_64::Address(
x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::RBX), x86_64::TIMES_4, 12));
GetAssembler()->movl(x86_64::CpuRegister(x86_64::RAX), x86_64::Address(
@@ -144,8 +467,6 @@
GetAssembler()->movl(x86_64::CpuRegister(x86_64::R8), x86_64::Address(
x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_4, 12));
const char* expected =
- "movl %R11d, %R8d\n"
- "movl %R11d, %EAX\n"
"movl 0xc(%RDI,%RBX,4), %EAX\n"
"movl 0xc(%RDI,%R9,4), %EAX\n"
"movl 0xc(%RDI,%R9,4), %R8d\n";
@@ -160,8 +481,201 @@
DriverStr(expected, "movw");
}
+TEST_F(AssemblerX86_64Test, Movsxd) {
+ DriverStr(RepeatRr(&x86_64::X86_64Assembler::movsxd, "movsxd %{reg2}, %{reg1}"), "movsxd");
+}
-std::string setcc_test_fn(x86_64::X86_64Assembler* assembler) {
+///////////////////
+// FP Operations //
+///////////////////
+
+TEST_F(AssemblerX86_64Test, Movaps) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::movaps, "movaps %{reg2}, %{reg1}"), "movaps");
+}
+
+TEST_F(AssemblerX86_64Test, Movss) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::movss, "movss %{reg2}, %{reg1}"), "movss");
+}
+
+TEST_F(AssemblerX86_64Test, Movsd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::movsd, "movsd %{reg2}, %{reg1}"), "movsd");
+}
+
+TEST_F(AssemblerX86_64Test, Movd1) {
+ DriverStr(RepeatFR(&x86_64::X86_64Assembler::movd, "movd %{reg2}, %{reg1}"), "movd.1");
+}
+
+TEST_F(AssemblerX86_64Test, Movd2) {
+ DriverStr(RepeatRF(&x86_64::X86_64Assembler::movd, "movd %{reg2}, %{reg1}"), "movd.2");
+}
+
+TEST_F(AssemblerX86_64Test, Addss) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::addss, "addss %{reg2}, %{reg1}"), "addss");
+}
+
+TEST_F(AssemblerX86_64Test, Addsd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::addsd, "addsd %{reg2}, %{reg1}"), "addsd");
+}
+
+TEST_F(AssemblerX86_64Test, Subss) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::subss, "subss %{reg2}, %{reg1}"), "subss");
+}
+
+TEST_F(AssemblerX86_64Test, Subsd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::subsd, "subsd %{reg2}, %{reg1}"), "subsd");
+}
+
+TEST_F(AssemblerX86_64Test, Mulss) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::mulss, "mulss %{reg2}, %{reg1}"), "mulss");
+}
+
+TEST_F(AssemblerX86_64Test, Mulsd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::mulsd, "mulsd %{reg2}, %{reg1}"), "mulsd");
+}
+
+TEST_F(AssemblerX86_64Test, Divss) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::divss, "divss %{reg2}, %{reg1}"), "divss");
+}
+
+TEST_F(AssemblerX86_64Test, Divsd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::divsd, "divsd %{reg2}, %{reg1}"), "divsd");
+}
+
+TEST_F(AssemblerX86_64Test, Cvtsi2ss) {
+ DriverStr(RepeatFr(&x86_64::X86_64Assembler::cvtsi2ss, "cvtsi2ss %{reg2}, %{reg1}"), "cvtsi2ss");
+}
+
+TEST_F(AssemblerX86_64Test, Cvtsi2sd) {
+ DriverStr(RepeatFr(&x86_64::X86_64Assembler::cvtsi2sd, "cvtsi2sd %{reg2}, %{reg1}"), "cvtsi2sd");
+}
+
+
+TEST_F(AssemblerX86_64Test, Cvtss2si) {
+ DriverStr(RepeatrF(&x86_64::X86_64Assembler::cvtss2si, "cvtss2si %{reg2}, %{reg1}"), "cvtss2si");
+}
+
+
+TEST_F(AssemblerX86_64Test, Cvtss2sd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::cvtss2sd, "cvtss2sd %{reg2}, %{reg1}"), "cvtss2sd");
+}
+
+
+TEST_F(AssemblerX86_64Test, Cvtsd2si) {
+ DriverStr(RepeatrF(&x86_64::X86_64Assembler::cvtsd2si, "cvtsd2si %{reg2}, %{reg1}"), "cvtsd2si");
+}
+
+TEST_F(AssemblerX86_64Test, Cvttss2si) {
+ DriverStr(RepeatrF(&x86_64::X86_64Assembler::cvttss2si, "cvttss2si %{reg2}, %{reg1}"),
+ "cvttss2si");
+}
+
+TEST_F(AssemblerX86_64Test, Cvttsd2si) {
+ DriverStr(RepeatrF(&x86_64::X86_64Assembler::cvttsd2si, "cvttsd2si %{reg2}, %{reg1}"),
+ "cvttsd2si");
+}
+
+TEST_F(AssemblerX86_64Test, Cvtsd2ss) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::cvtsd2ss, "cvtsd2ss %{reg2}, %{reg1}"), "cvtsd2ss");
+}
+
+TEST_F(AssemblerX86_64Test, Cvtdq2pd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::cvtdq2pd, "cvtdq2pd %{reg2}, %{reg1}"), "cvtdq2pd");
+}
+
+TEST_F(AssemblerX86_64Test, Comiss) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::comiss, "comiss %{reg2}, %{reg1}"), "comiss");
+}
+
+TEST_F(AssemblerX86_64Test, Comisd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::comisd, "comisd %{reg2}, %{reg1}"), "comisd");
+}
+
+TEST_F(AssemblerX86_64Test, Sqrtss) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::sqrtss, "sqrtss %{reg2}, %{reg1}"), "sqrtss");
+}
+
+TEST_F(AssemblerX86_64Test, Sqrtsd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::sqrtsd, "sqrtsd %{reg2}, %{reg1}"), "sqrtsd");
+}
+
+TEST_F(AssemblerX86_64Test, Xorps) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::xorps, "xorps %{reg2}, %{reg1}"), "xorps");
+}
+
+TEST_F(AssemblerX86_64Test, Xorpd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::xorpd, "xorpd %{reg2}, %{reg1}"), "xorpd");
+}
+
+// X87
+
+std::string x87_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+ x86_64::X86_64Assembler* assembler) {
+ std::ostringstream str;
+
+ assembler->fincstp();
+ str << "fincstp\n";
+
+ assembler->fsin();
+ str << "fsin\n";
+
+ assembler->fcos();
+ str << "fcos\n";
+
+ assembler->fptan();
+ str << "fptan\n";
+
+ return str.str();
+}
+
+TEST_F(AssemblerX86_64Test, X87) {
+ DriverFn(&x87_fn, "x87");
+}
+
+////////////////
+// CALL / JMP //
+////////////////
+
+TEST_F(AssemblerX86_64Test, Call) {
+ DriverStr(RepeatR(&x86_64::X86_64Assembler::call, "call *%{reg}"), "call");
+}
+
+TEST_F(AssemblerX86_64Test, Jmp) {
+ DriverStr(RepeatR(&x86_64::X86_64Assembler::jmp, "jmp *%{reg}"), "jmp");
+}
+
+TEST_F(AssemblerX86_64Test, Enter) {
+ DriverStr(RepeatI(&x86_64::X86_64Assembler::enter, 2U /* 16b immediate */, "enter ${imm}, $0",
+ true /* Only non-negative number */), "enter");
+}
+
+TEST_F(AssemblerX86_64Test, RetImm) {
+ DriverStr(RepeatI(&x86_64::X86_64Assembler::ret, 2U /* 16b immediate */, "ret ${imm}",
+ true /* Only non-negative number */), "reti");
+}
+
+std::string ret_and_leave_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+ x86_64::X86_64Assembler* assembler) {
+ std::ostringstream str;
+
+ assembler->ret();
+ str << "ret\n";
+
+ assembler->leave();
+ str << "leave\n";
+
+ return str.str();
+}
+
+TEST_F(AssemblerX86_64Test, RetAndLeave) {
+ DriverFn(&ret_and_leave_fn, "retleave");
+}
+
+//////////
+// MISC //
+//////////
+
+std::string setcc_test_fn(AssemblerX86_64Test::Base* assembler_test,
+ x86_64::X86_64Assembler* assembler) {
// From Condition
/*
kOverflow = 0,
@@ -183,23 +697,7 @@
std::string suffixes[15] = { "o", "no", "b", "ae", "e", "ne", "be", "a", "s", "ns", "pe", "po",
"l", "ge", "le" };
- std::vector<x86_64::CpuRegister*> registers;
- registers.push_back(new x86_64::CpuRegister(x86_64::RAX));
- registers.push_back(new x86_64::CpuRegister(x86_64::RBX));
- registers.push_back(new x86_64::CpuRegister(x86_64::RCX));
- registers.push_back(new x86_64::CpuRegister(x86_64::RDX));
- registers.push_back(new x86_64::CpuRegister(x86_64::RBP));
- registers.push_back(new x86_64::CpuRegister(x86_64::RSP));
- registers.push_back(new x86_64::CpuRegister(x86_64::RSI));
- registers.push_back(new x86_64::CpuRegister(x86_64::RDI));
- registers.push_back(new x86_64::CpuRegister(x86_64::R8));
- registers.push_back(new x86_64::CpuRegister(x86_64::R9));
- registers.push_back(new x86_64::CpuRegister(x86_64::R10));
- registers.push_back(new x86_64::CpuRegister(x86_64::R11));
- registers.push_back(new x86_64::CpuRegister(x86_64::R12));
- registers.push_back(new x86_64::CpuRegister(x86_64::R13));
- registers.push_back(new x86_64::CpuRegister(x86_64::R14));
- registers.push_back(new x86_64::CpuRegister(x86_64::R15));
+ std::vector<x86_64::CpuRegister*> registers = assembler_test->GetRegisters();
std::string byte_regs[16];
byte_regs[x86_64::RAX] = "al";
@@ -228,7 +726,6 @@
}
}
- STLDeleteElements(®isters);
return str.str();
}
@@ -244,7 +741,8 @@
return x86_64::X86_64ManagedRegister::FromXmmRegister(r);
}
-std::string buildframe_test_fn(x86_64::X86_64Assembler* assembler) {
+std::string buildframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+ x86_64::X86_64Assembler* assembler) {
// TODO: more interesting spill registers / entry spills.
// Two random spill regs.
@@ -288,7 +786,8 @@
DriverFn(&buildframe_test_fn, "BuildFrame");
}
-std::string removeframe_test_fn(x86_64::X86_64Assembler* assembler) {
+std::string removeframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+ x86_64::X86_64Assembler* assembler) {
// TODO: more interesting spill registers / entry spills.
// Two random spill regs.
@@ -316,7 +815,8 @@
DriverFn(&removeframe_test_fn, "RemoveFrame");
}
-std::string increaseframe_test_fn(x86_64::X86_64Assembler* assembler) {
+std::string increaseframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+ x86_64::X86_64Assembler* assembler) {
assembler->IncreaseFrameSize(0U);
assembler->IncreaseFrameSize(kStackAlignment);
assembler->IncreaseFrameSize(10 * kStackAlignment);
@@ -334,7 +834,8 @@
DriverFn(&increaseframe_test_fn, "IncreaseFrame");
}
-std::string decreaseframe_test_fn(x86_64::X86_64Assembler* assembler) {
+std::string decreaseframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+ x86_64::X86_64Assembler* assembler) {
assembler->DecreaseFrameSize(0U);
assembler->DecreaseFrameSize(kStackAlignment);
assembler->DecreaseFrameSize(10 * kStackAlignment);
diff --git a/dalvikvm/Android.mk b/dalvikvm/Android.mk
index 0bab429..0ef20d6 100644
--- a/dalvikvm/Android.mk
+++ b/dalvikvm/Android.mk
@@ -24,10 +24,11 @@
LOCAL_MODULE := dalvikvm
LOCAL_MODULE_TAGS := optional
LOCAL_CPP_EXTENSION := cc
-LOCAL_SRC_FILES := dalvikvm.cc ../sigchainlib/sigchain.cc
+LOCAL_SRC_FILES := dalvikvm.cc
LOCAL_CFLAGS := $(dalvikvm_cflags)
LOCAL_C_INCLUDES := art/runtime
LOCAL_SHARED_LIBRARIES := libdl liblog libnativehelper
+LOCAL_WHOLE_STATIC_LIBRARIES := libsigchain
LOCAL_LDFLAGS := -Wl,--version-script,art/sigchainlib/version-script.txt -Wl,--export-dynamic
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.common.mk
@@ -51,10 +52,11 @@
LOCAL_MODULE_TAGS := optional
LOCAL_CLANG := true
LOCAL_CPP_EXTENSION := cc
-LOCAL_SRC_FILES := dalvikvm.cc ../sigchainlib/sigchain.cc
+LOCAL_SRC_FILES := dalvikvm.cc
LOCAL_CFLAGS := $(dalvikvm_cflags)
LOCAL_C_INCLUDES := art/runtime
LOCAL_SHARED_LIBRARIES := libnativehelper
+LOCAL_WHOLE_STATIC_LIBRARIES := libsigchain
LOCAL_LDFLAGS := -ldl -lpthread
# Mac OS linker doesn't understand --export-dynamic.
ifneq ($(HOST_OS),darwin)
diff --git a/dex2oat/Android.mk b/dex2oat/Android.mk
index 2ef826b..4f39c42 100644
--- a/dex2oat/Android.mk
+++ b/dex2oat/Android.mk
@@ -38,8 +38,8 @@
# We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
ifeq ($(ART_BUILD_HOST_NDEBUG),true)
- $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libart-compiler,art/compiler,host,ndebug))
+ $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libart-compiler libziparchive-host,art/compiler,host,ndebug))
endif
ifeq ($(ART_BUILD_HOST_DEBUG),true)
- $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libartd-compiler,art/compiler,host,debug))
+ $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libartd-compiler libziparchive-host,art/compiler,host,debug))
endif
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 326fa72..7d4b726 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -30,6 +30,11 @@
#include <sys/utsname.h>
#endif
+#define ATRACE_TAG ATRACE_TAG_DALVIK
+#include <cutils/trace.h>
+
+#include "arch/instruction_set_features.h"
+#include "base/dumpable.h"
#include "base/stl_util.h"
#include "base/stringpiece.h"
#include "base/timing_logger.h"
@@ -44,6 +49,7 @@
#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
+#include "elf_file.h"
#include "elf_writer.h"
#include "gc/space/image_space.h"
#include "gc/space/space-inl.h"
@@ -153,6 +159,9 @@
UsageError(" Example: --instruction-set-features=div");
UsageError(" Default: default");
UsageError("");
+ UsageError(" --compile-pic: Force indirect use of code, methods, and classes");
+ UsageError(" Default: disabled");
+ UsageError("");
UsageError(" --compiler-backend=(Quick|Optimizing|Portable): select compiler backend");
UsageError(" set.");
UsageError(" Example: --compiler-backend=Portable");
@@ -244,363 +253,6 @@
exit(EXIT_FAILURE);
}
-class Dex2Oat {
- public:
- static bool Create(Dex2Oat** p_dex2oat,
- const RuntimeOptions& runtime_options,
- const CompilerOptions& compiler_options,
- Compiler::Kind compiler_kind,
- InstructionSet instruction_set,
- const InstructionSetFeatures* instruction_set_features,
- VerificationResults* verification_results,
- DexFileToMethodInlinerMap* method_inliner_map,
- size_t thread_count)
- SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_) {
- CHECK(verification_results != nullptr);
- CHECK(method_inliner_map != nullptr);
- if (instruction_set == kRuntimeISA) {
- std::unique_ptr<const InstructionSetFeatures> runtime_features(
- InstructionSetFeatures::FromCppDefines());
- if (!instruction_set_features->Equals(runtime_features.get())) {
- LOG(WARNING) << "Mismatch between dex2oat instruction set features ("
- << *instruction_set_features << ") and those of dex2oat executable ("
- << *runtime_features <<") for the command line:\n"
- << CommandLine();
- }
- }
- std::unique_ptr<Dex2Oat> dex2oat(new Dex2Oat(&compiler_options,
- compiler_kind,
- instruction_set,
- instruction_set_features,
- verification_results,
- method_inliner_map,
- thread_count));
- if (!dex2oat->CreateRuntime(runtime_options, instruction_set)) {
- *p_dex2oat = nullptr;
- return false;
- }
- *p_dex2oat = dex2oat.release();
- return true;
- }
-
- ~Dex2Oat() {
- delete runtime_;
- LogCompletionTime();
- }
-
- void LogCompletionTime() {
- LOG(INFO) << "dex2oat took " << PrettyDuration(NanoTime() - start_ns_)
- << " (threads: " << thread_count_ << ")";
- }
-
-
- // Reads the class names (java.lang.Object) and returns a set of descriptors (Ljava/lang/Object;)
- std::set<std::string>* ReadImageClassesFromFile(const char* image_classes_filename) {
- std::unique_ptr<std::ifstream> image_classes_file(new std::ifstream(image_classes_filename,
- std::ifstream::in));
- if (image_classes_file.get() == nullptr) {
- LOG(ERROR) << "Failed to open image classes file " << image_classes_filename;
- return nullptr;
- }
- std::unique_ptr<std::set<std::string>> result(ReadImageClasses(*image_classes_file));
- image_classes_file->close();
- return result.release();
- }
-
- std::set<std::string>* ReadImageClasses(std::istream& image_classes_stream) {
- std::unique_ptr<std::set<std::string>> image_classes(new std::set<std::string>);
- while (image_classes_stream.good()) {
- std::string dot;
- std::getline(image_classes_stream, dot);
- if (StartsWith(dot, "#") || dot.empty()) {
- continue;
- }
- std::string descriptor(DotToDescriptor(dot.c_str()));
- image_classes->insert(descriptor);
- }
- return image_classes.release();
- }
-
- // Reads the class names (java.lang.Object) and returns a set of descriptors (Ljava/lang/Object;)
- std::set<std::string>* ReadImageClassesFromZip(const char* zip_filename,
- const char* image_classes_filename,
- std::string* error_msg) {
- std::unique_ptr<ZipArchive> zip_archive(ZipArchive::Open(zip_filename, error_msg));
- if (zip_archive.get() == nullptr) {
- return nullptr;
- }
- std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(image_classes_filename, error_msg));
- if (zip_entry.get() == nullptr) {
- *error_msg = StringPrintf("Failed to find '%s' within '%s': %s", image_classes_filename,
- zip_filename, error_msg->c_str());
- return nullptr;
- }
- std::unique_ptr<MemMap> image_classes_file(zip_entry->ExtractToMemMap(zip_filename,
- image_classes_filename,
- error_msg));
- if (image_classes_file.get() == nullptr) {
- *error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", image_classes_filename,
- zip_filename, error_msg->c_str());
- return nullptr;
- }
- const std::string image_classes_string(reinterpret_cast<char*>(image_classes_file->Begin()),
- image_classes_file->Size());
- std::istringstream image_classes_stream(image_classes_string);
- return ReadImageClasses(image_classes_stream);
- }
-
- void Compile(const std::string& boot_image_option,
- const std::vector<const DexFile*>& dex_files,
- const std::string& bitcode_filename,
- bool image,
- std::unique_ptr<std::set<std::string>>& image_classes,
- bool dump_stats,
- bool dump_passes,
- TimingLogger* timings,
- CumulativeLogger* compiler_phases_timings,
- const std::string& profile_file) {
- // Handle and ClassLoader creation needs to come after Runtime::Create
- jobject class_loader = nullptr;
- Thread* self = Thread::Current();
- if (!boot_image_option.empty()) {
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- std::vector<const DexFile*> class_path_files(dex_files);
- OpenClassPathFiles(runtime_->GetClassPathString(), class_path_files);
- ScopedObjectAccess soa(self);
- for (size_t i = 0; i < class_path_files.size(); i++) {
- class_linker->RegisterDexFile(*class_path_files[i]);
- }
- soa.Env()->AllocObject(WellKnownClasses::dalvik_system_PathClassLoader);
- ScopedLocalRef<jobject> class_loader_local(soa.Env(),
- soa.Env()->AllocObject(WellKnownClasses::dalvik_system_PathClassLoader));
- class_loader = soa.Env()->NewGlobalRef(class_loader_local.get());
- Runtime::Current()->SetCompileTimeClassPath(class_loader, class_path_files);
- }
-
- driver_.reset(new CompilerDriver(compiler_options_,
- verification_results_,
- method_inliner_map_,
- compiler_kind_,
- instruction_set_,
- instruction_set_features_,
- image,
- image_classes.release(),
- thread_count_,
- dump_stats,
- dump_passes,
- compiler_phases_timings,
- profile_file));
-
- driver_->GetCompiler()->SetBitcodeFileName(*driver_, bitcode_filename);
-
- driver_->CompileAll(class_loader, dex_files, timings);
- }
-
- void PrepareImageWriter(uintptr_t image_base) {
- image_writer_.reset(new ImageWriter(*driver_, image_base));
- }
-
- bool CreateOatFile(const std::vector<const DexFile*>& dex_files,
- const std::string& android_root,
- bool is_host,
- File* oat_file,
- const std::string& oat_location,
- TimingLogger* timings,
- SafeMap<std::string, std::string>* key_value_store) {
- CHECK(key_value_store != nullptr);
-
- TimingLogger::ScopedTiming t2("dex2oat OatWriter", timings);
- std::string image_file_location;
- uint32_t image_file_location_oat_checksum = 0;
- uintptr_t image_file_location_oat_data_begin = 0;
- int32_t image_patch_delta = 0;
- if (!driver_->IsImage()) {
- TimingLogger::ScopedTiming t3("Loading image checksum", timings);
- gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
- image_file_location_oat_checksum = image_space->GetImageHeader().GetOatChecksum();
- image_file_location_oat_data_begin =
- reinterpret_cast<uintptr_t>(image_space->GetImageHeader().GetOatDataBegin());
- image_file_location = image_space->GetImageFilename();
- image_patch_delta = image_space->GetImageHeader().GetPatchDelta();
- }
-
- if (!image_file_location.empty()) {
- key_value_store->Put(OatHeader::kImageLocationKey, image_file_location);
- }
-
- OatWriter oat_writer(dex_files, image_file_location_oat_checksum,
- image_file_location_oat_data_begin,
- image_patch_delta,
- driver_.get(),
- image_writer_.get(),
- timings,
- key_value_store);
-
- if (driver_->IsImage()) {
- // The OatWriter constructor has already updated offsets in methods and we need to
- // prepare method offsets in the image address space for direct method patching.
- t2.NewTiming("Preparing image address space");
- if (!image_writer_->PrepareImageAddressSpace()) {
- LOG(ERROR) << "Failed to prepare image address space.";
- return false;
- }
- }
-
- t2.NewTiming("Writing ELF");
- if (!driver_->WriteElf(android_root, is_host, dex_files, &oat_writer, oat_file)) {
- LOG(ERROR) << "Failed to write ELF file " << oat_file->GetPath();
- return false;
- }
-
- // Flush result to disk.
- t2.NewTiming("Flushing ELF");
- if (oat_file->Flush() != 0) {
- LOG(ERROR) << "Failed to flush ELF file " << oat_file->GetPath();
- return false;
- }
-
- return true;
- }
-
- bool CreateImageFile(const std::string& image_filename,
- const std::string& oat_filename,
- const std::string& oat_location)
- LOCKS_EXCLUDED(Locks::mutator_lock_) {
- CHECK(image_writer_ != nullptr);
- if (!image_writer_->Write(image_filename, oat_filename, oat_location)) {
- LOG(ERROR) << "Failed to create image file " << image_filename;
- return false;
- }
- uintptr_t oat_data_begin = image_writer_->GetOatDataBegin();
-
- // Destroy ImageWriter before doing FixupElf.
- image_writer_.reset();
-
- std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_filename.c_str()));
- if (oat_file.get() == nullptr) {
- PLOG(ERROR) << "Failed to open ELF file: " << oat_filename;
- return false;
- }
- if (!ElfWriter::Fixup(oat_file.get(), oat_data_begin)) {
- LOG(ERROR) << "Failed to fixup ELF file " << oat_file->GetPath();
- return false;
- }
- return true;
- }
-
- private:
- explicit Dex2Oat(const CompilerOptions* compiler_options,
- Compiler::Kind compiler_kind,
- InstructionSet instruction_set,
- const InstructionSetFeatures* instruction_set_features,
- VerificationResults* verification_results,
- DexFileToMethodInlinerMap* method_inliner_map,
- size_t thread_count)
- : compiler_options_(compiler_options),
- compiler_kind_(compiler_kind),
- instruction_set_(instruction_set),
- instruction_set_features_(instruction_set_features),
- verification_results_(verification_results),
- method_inliner_map_(method_inliner_map),
- runtime_(nullptr),
- thread_count_(thread_count),
- start_ns_(NanoTime()),
- driver_(nullptr),
- image_writer_(nullptr) {
- CHECK(compiler_options != nullptr);
- CHECK(verification_results != nullptr);
- CHECK(method_inliner_map != nullptr);
- }
-
- bool CreateRuntime(const RuntimeOptions& runtime_options, InstructionSet instruction_set)
- SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_) {
- if (!Runtime::Create(runtime_options, false)) {
- LOG(ERROR) << "Failed to create runtime";
- return false;
- }
- Runtime* runtime = Runtime::Current();
- runtime->SetInstructionSet(instruction_set);
- for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
- Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
- if (!runtime->HasCalleeSaveMethod(type)) {
- runtime->SetCalleeSaveMethod(runtime->CreateCalleeSaveMethod(type), type);
- }
- }
- runtime->GetClassLinker()->FixupDexCaches(runtime->GetResolutionMethod());
- runtime->GetClassLinker()->RunRootClinits();
- runtime_ = runtime;
- return true;
- }
-
- // Appends to dex_files any elements of class_path that it doesn't already
- // contain. This will open those dex files as necessary.
- static void OpenClassPathFiles(const std::string& class_path,
- std::vector<const DexFile*>& dex_files) {
- std::vector<std::string> parsed;
- Split(class_path, ':', &parsed);
- // Take Locks::mutator_lock_ so that lock ordering on the ClassLinker::dex_lock_ is maintained.
- ScopedObjectAccess soa(Thread::Current());
- for (size_t i = 0; i < parsed.size(); ++i) {
- if (DexFilesContains(dex_files, parsed[i])) {
- continue;
- }
- std::string error_msg;
- if (!DexFile::Open(parsed[i].c_str(), parsed[i].c_str(), &error_msg, &dex_files)) {
- LOG(WARNING) << "Failed to open dex file '" << parsed[i] << "': " << error_msg;
- }
- }
- }
-
- // Returns true if dex_files has a dex with the named location.
- static bool DexFilesContains(const std::vector<const DexFile*>& dex_files,
- const std::string& location) {
- for (size_t i = 0; i < dex_files.size(); ++i) {
- if (dex_files[i]->GetLocation() == location) {
- return true;
- }
- }
- return false;
- }
-
- const CompilerOptions* const compiler_options_;
- const Compiler::Kind compiler_kind_;
-
- const InstructionSet instruction_set_;
- const InstructionSetFeatures* const instruction_set_features_;
-
- VerificationResults* const verification_results_;
- DexFileToMethodInlinerMap* const method_inliner_map_;
- Runtime* runtime_;
- size_t thread_count_;
- uint64_t start_ns_;
- std::unique_ptr<CompilerDriver> driver_;
- std::unique_ptr<ImageWriter> image_writer_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(Dex2Oat);
-};
-
-static size_t OpenDexFiles(const std::vector<const char*>& dex_filenames,
- const std::vector<const char*>& dex_locations,
- std::vector<const DexFile*>& dex_files) {
- size_t failure_count = 0;
- for (size_t i = 0; i < dex_filenames.size(); i++) {
- const char* dex_filename = dex_filenames[i];
- const char* dex_location = dex_locations[i];
- ATRACE_BEGIN(StringPrintf("Opening dex file '%s'", dex_filenames[i]).c_str());
- std::string error_msg;
- if (!OS::FileExists(dex_filename)) {
- LOG(WARNING) << "Skipping non-existent dex file '" << dex_filename << "'";
- continue;
- }
- if (!DexFile::Open(dex_filename, dex_location, &error_msg, &dex_files)) {
- LOG(WARNING) << "Failed to open .dex from file '" << dex_filename << "': " << error_msg;
- ++failure_count;
- }
- ATRACE_END();
- }
- return failure_count;
-}
-
// The primary goal of the watchdog is to prevent stuck build servers
// during development when fatal aborts lead to a cascade of failures
// that result in a deadlock.
@@ -715,17 +367,13 @@
// When setting timeouts, keep in mind that the build server may not be as fast as your desktop.
// Debug builds are slower so they have larger timeouts.
static const unsigned int kSlowdownFactor = kIsDebugBuild ? 5U : 1U;
-#if ART_USE_PORTABLE_COMPILER
- // 2 minutes scaled by kSlowdownFactor.
- static const unsigned int kWatchDogWarningSeconds = kSlowdownFactor * 2 * 60;
- // 30 minutes scaled by kSlowdownFactor.
- static const unsigned int kWatchDogTimeoutSeconds = kSlowdownFactor * 30 * 60;
-#else
- // 1 minutes scaled by kSlowdownFactor.
- static const unsigned int kWatchDogWarningSeconds = kSlowdownFactor * 1 * 60;
- // 6 minutes scaled by kSlowdownFactor.
- static const unsigned int kWatchDogTimeoutSeconds = kSlowdownFactor * 6 * 60;
-#endif
+
+ static const unsigned int kWatchDogWarningSeconds = kUsePortableCompiler ?
+ kSlowdownFactor * 2 * 60 : // 2 minutes scaled by kSlowdownFactor (portable).
+ kSlowdownFactor * 1 * 60; // 1 minute scaled by kSlowdownFactor (not-portable).
+ static const unsigned int kWatchDogTimeoutSeconds = kUsePortableCompiler ?
+ kSlowdownFactor * 30 * 60 : // 30 minutes scaled by kSlowdownFactor (portable).
+ kSlowdownFactor * 6 * 60; // 6 minutes scaled by kSlowdownFactor (not-portable).
bool is_watch_dog_enabled_;
bool shutting_down_;
@@ -735,10 +383,8 @@
pthread_attr_t attr_;
pthread_t pthread_;
};
-const unsigned int WatchDog::kWatchDogWarningSeconds;
-const unsigned int WatchDog::kWatchDogTimeoutSeconds;
-void ParseStringAfterChar(const std::string& s, char c, std::string* parsed_value) {
+static void ParseStringAfterChar(const std::string& s, char c, std::string* parsed_value) {
std::string::size_type colon = s.find(c);
if (colon == std::string::npos) {
Usage("Missing char %c in option %s\n", c, s.c_str());
@@ -747,8 +393,8 @@
*parsed_value = s.substr(colon + 1);
}
-void ParseDouble(const std::string& option, char after_char,
- double min, double max, double* parsed_value) {
+static void ParseDouble(const std::string& option, char after_char, double min, double max,
+ double* parsed_value) {
std::string substring;
ParseStringAfterChar(option, after_char, &substring);
bool sane_val = true;
@@ -770,680 +416,746 @@
*parsed_value = value;
}
-static int dex2oat(int argc, char** argv) {
-#if defined(__linux__) && defined(__arm__)
- int major, minor;
- struct utsname uts;
- if (uname(&uts) != -1 &&
- sscanf(uts.release, "%d.%d", &major, &minor) == 2 &&
- ((major < 3) || ((major == 3) && (minor < 4)))) {
- // Kernels before 3.4 don't handle the ASLR well and we can run out of address
- // space (http://b/13564922). Work around the issue by inhibiting further mmap() randomization.
- int old_personality = personality(0xffffffff);
- if ((old_personality & ADDR_NO_RANDOMIZE) == 0) {
- int new_personality = personality(old_personality | ADDR_NO_RANDOMIZE);
- if (new_personality == -1) {
- LOG(WARNING) << "personality(. | ADDR_NO_RANDOMIZE) failed.";
- }
+class Dex2Oat FINAL {
+ public:
+ explicit Dex2Oat(TimingLogger* timings) :
+ compiler_kind_(kUsePortableCompiler ? Compiler::kPortable : Compiler::kQuick),
+ instruction_set_(kRuntimeISA),
+ // Take the default set of instruction features from the build.
+ method_inliner_map_(),
+ runtime_(nullptr),
+ thread_count_(sysconf(_SC_NPROCESSORS_CONF)),
+ start_ns_(NanoTime()),
+ oat_fd_(-1),
+ zip_fd_(-1),
+ image_base_(0U),
+ image_classes_zip_filename_(nullptr),
+ image_classes_filename_(nullptr),
+ compiled_classes_zip_filename_(nullptr),
+ compiled_classes_filename_(nullptr),
+ image_(false),
+ is_host_(false),
+ dump_stats_(false),
+ dump_passes_(false),
+ dump_timing_(false),
+ dump_slow_timing_(kIsDebugBuild),
+ timings_(timings) {}
+
+ ~Dex2Oat() {
+ if (kIsDebugBuild || (RUNNING_ON_VALGRIND != 0)) {
+ delete runtime_; // See field declaration for why this is manual.
}
- }
-#endif
-
- original_argc = argc;
- original_argv = argv;
-
- TimingLogger timings("compiler", false, false);
- CumulativeLogger compiler_phases_timings("compilation times");
-
- InitLogging(argv);
-
- // Skip over argv[0].
- argv++;
- argc--;
-
- if (argc == 0) {
- Usage("No arguments specified");
+ LogCompletionTime();
}
- std::vector<const char*> dex_filenames;
- std::vector<const char*> dex_locations;
- int zip_fd = -1;
- std::string zip_location;
- std::string oat_filename;
- std::string oat_symbols;
- std::string oat_location;
- int oat_fd = -1;
- std::string bitcode_filename;
- const char* image_classes_zip_filename = nullptr;
- const char* image_classes_filename = nullptr;
- std::string image_filename;
- std::string boot_image_filename;
- uintptr_t image_base = 0;
- std::string android_root;
- std::vector<const char*> runtime_args;
- int thread_count = sysconf(_SC_NPROCESSORS_CONF);
- Compiler::Kind compiler_kind = kUsePortableCompiler
- ? Compiler::kPortable
- : Compiler::kQuick;
- const char* compiler_filter_string = nullptr;
- bool compile_pic = false;
- int huge_method_threshold = CompilerOptions::kDefaultHugeMethodThreshold;
- int large_method_threshold = CompilerOptions::kDefaultLargeMethodThreshold;
- int small_method_threshold = CompilerOptions::kDefaultSmallMethodThreshold;
- int tiny_method_threshold = CompilerOptions::kDefaultTinyMethodThreshold;
- int num_dex_methods_threshold = CompilerOptions::kDefaultNumDexMethodsThreshold;
+ // Parse the arguments from the command line. In case of an unrecognized option or impossible
+ // values/combinations, a usage error will be displayed and exit() is called. Thus, if the method
+ // returns, arguments have been successfully parsed.
+ void ParseArgs(int argc, char** argv) {
+ original_argc = argc;
+ original_argv = argv;
- // Initialize ISA and ISA features to default values.
- InstructionSet instruction_set = kRuntimeISA;
- std::string error_msg;
- std::unique_ptr<const InstructionSetFeatures> instruction_set_features(
- InstructionSetFeatures::FromFeatureString(kNone, "default", &error_msg));
- CHECK(instruction_set_features.get() != nullptr) << error_msg;
+ InitLogging(argv);
- // Profile file to use
- std::string profile_file;
- double top_k_profile_threshold = CompilerOptions::kDefaultTopKProfileThreshold;
+ // Skip over argv[0].
+ argv++;
+ argc--;
- bool is_host = false;
- bool dump_stats = false;
- bool dump_timing = false;
- bool dump_passes = false;
- bool print_pass_options = false;
- bool include_patch_information = CompilerOptions::kDefaultIncludePatchInformation;
- bool include_debug_symbols = kIsDebugBuild;
- bool dump_slow_timing = kIsDebugBuild;
- bool watch_dog_enabled = true;
- bool generate_gdb_information = kIsDebugBuild;
-
- // Checks are all explicit until we know the architecture.
- bool implicit_null_checks = false;
- bool implicit_so_checks = false;
- bool implicit_suspend_checks = false;
-
- for (int i = 0; i < argc; i++) {
- const StringPiece option(argv[i]);
- const bool log_options = false;
- if (log_options) {
- LOG(INFO) << "dex2oat: option[" << i << "]=" << argv[i];
+ if (argc == 0) {
+ Usage("No arguments specified");
}
- if (option.starts_with("--dex-file=")) {
- dex_filenames.push_back(option.substr(strlen("--dex-file=")).data());
- } else if (option.starts_with("--dex-location=")) {
- dex_locations.push_back(option.substr(strlen("--dex-location=")).data());
- } else if (option.starts_with("--zip-fd=")) {
- const char* zip_fd_str = option.substr(strlen("--zip-fd=")).data();
- if (!ParseInt(zip_fd_str, &zip_fd)) {
- Usage("Failed to parse --zip-fd argument '%s' as an integer", zip_fd_str);
- }
- if (zip_fd < 0) {
- Usage("--zip-fd passed a negative value %d", zip_fd);
- }
- } else if (option.starts_with("--zip-location=")) {
- zip_location = option.substr(strlen("--zip-location=")).data();
- } else if (option.starts_with("--oat-file=")) {
- oat_filename = option.substr(strlen("--oat-file=")).data();
- } else if (option.starts_with("--oat-symbols=")) {
- oat_symbols = option.substr(strlen("--oat-symbols=")).data();
- } else if (option.starts_with("--oat-fd=")) {
- const char* oat_fd_str = option.substr(strlen("--oat-fd=")).data();
- if (!ParseInt(oat_fd_str, &oat_fd)) {
- Usage("Failed to parse --oat-fd argument '%s' as an integer", oat_fd_str);
- }
- if (oat_fd < 0) {
- Usage("--oat-fd passed a negative value %d", oat_fd);
- }
- } else if (option == "--watch-dog") {
- watch_dog_enabled = true;
- } else if (option == "--no-watch-dog") {
- watch_dog_enabled = false;
- } else if (option == "--gen-gdb-info") {
- generate_gdb_information = true;
- // Debug symbols are needed for gdb information.
- include_debug_symbols = true;
- } else if (option == "--no-gen-gdb-info") {
- generate_gdb_information = false;
- } else if (option.starts_with("-j")) {
- const char* thread_count_str = option.substr(strlen("-j")).data();
- if (!ParseInt(thread_count_str, &thread_count)) {
- Usage("Failed to parse -j argument '%s' as an integer", thread_count_str);
- }
- } else if (option.starts_with("--oat-location=")) {
- oat_location = option.substr(strlen("--oat-location=")).data();
- } else if (option.starts_with("--bitcode=")) {
- bitcode_filename = option.substr(strlen("--bitcode=")).data();
- } else if (option.starts_with("--image=")) {
- image_filename = option.substr(strlen("--image=")).data();
- } else if (option.starts_with("--image-classes=")) {
- image_classes_filename = option.substr(strlen("--image-classes=")).data();
- } else if (option.starts_with("--image-classes-zip=")) {
- image_classes_zip_filename = option.substr(strlen("--image-classes-zip=")).data();
- } else if (option.starts_with("--base=")) {
- const char* image_base_str = option.substr(strlen("--base=")).data();
- char* end;
- image_base = strtoul(image_base_str, &end, 16);
- if (end == image_base_str || *end != '\0') {
- Usage("Failed to parse hexadecimal value for option %s", option.data());
- }
- } else if (option.starts_with("--boot-image=")) {
- boot_image_filename = option.substr(strlen("--boot-image=")).data();
- } else if (option.starts_with("--android-root=")) {
- android_root = option.substr(strlen("--android-root=")).data();
- } else if (option.starts_with("--instruction-set=")) {
- StringPiece instruction_set_str = option.substr(strlen("--instruction-set=")).data();
- if (instruction_set_str == "arm") {
- instruction_set = kThumb2;
- } else if (instruction_set_str == "arm64") {
- instruction_set = kArm64;
- } else if (instruction_set_str == "mips") {
- instruction_set = kMips;
- } else if (instruction_set_str == "x86") {
- instruction_set = kX86;
- } else if (instruction_set_str == "x86_64") {
- instruction_set = kX86_64;
- }
- } else if (option.starts_with("--instruction-set-variant=")) {
- StringPiece str = option.substr(strlen("--instruction-set-variant=")).data();
- instruction_set_features.reset(
- InstructionSetFeatures::FromVariant(instruction_set, str.as_string(), &error_msg));
- if (instruction_set_features.get() == nullptr) {
- Usage("%s", error_msg.c_str());
- }
- } else if (option.starts_with("--instruction-set-features=")) {
- StringPiece str = option.substr(strlen("--instruction-set-features=")).data();
- instruction_set_features.reset(
- InstructionSetFeatures::FromFeatureString(instruction_set, str.as_string(), &error_msg));
- if (instruction_set_features.get() == nullptr) {
- Usage("%s", error_msg.c_str());
- }
- } else if (option.starts_with("--compiler-backend=")) {
- StringPiece backend_str = option.substr(strlen("--compiler-backend=")).data();
- if (backend_str == "Quick") {
- compiler_kind = Compiler::kQuick;
- } else if (backend_str == "Optimizing") {
- compiler_kind = Compiler::kOptimizing;
- } else if (backend_str == "Portable") {
- compiler_kind = Compiler::kPortable;
- }
- } else if (option.starts_with("--compiler-filter=")) {
- compiler_filter_string = option.substr(strlen("--compiler-filter=")).data();
- } else if (option == "--compile-pic") {
- compile_pic = true;
- } else if (option.starts_with("--huge-method-max=")) {
- const char* threshold = option.substr(strlen("--huge-method-max=")).data();
- if (!ParseInt(threshold, &huge_method_threshold)) {
- Usage("Failed to parse --huge-method-max '%s' as an integer", threshold);
- }
- if (huge_method_threshold < 0) {
- Usage("--huge-method-max passed a negative value %s", huge_method_threshold);
- }
- } else if (option.starts_with("--large-method-max=")) {
- const char* threshold = option.substr(strlen("--large-method-max=")).data();
- if (!ParseInt(threshold, &large_method_threshold)) {
- Usage("Failed to parse --large-method-max '%s' as an integer", threshold);
- }
- if (large_method_threshold < 0) {
- Usage("--large-method-max passed a negative value %s", large_method_threshold);
- }
- } else if (option.starts_with("--small-method-max=")) {
- const char* threshold = option.substr(strlen("--small-method-max=")).data();
- if (!ParseInt(threshold, &small_method_threshold)) {
- Usage("Failed to parse --small-method-max '%s' as an integer", threshold);
- }
- if (small_method_threshold < 0) {
- Usage("--small-method-max passed a negative value %s", small_method_threshold);
- }
- } else if (option.starts_with("--tiny-method-max=")) {
- const char* threshold = option.substr(strlen("--tiny-method-max=")).data();
- if (!ParseInt(threshold, &tiny_method_threshold)) {
- Usage("Failed to parse --tiny-method-max '%s' as an integer", threshold);
- }
- if (tiny_method_threshold < 0) {
- Usage("--tiny-method-max passed a negative value %s", tiny_method_threshold);
- }
- } else if (option.starts_with("--num-dex-methods=")) {
- const char* threshold = option.substr(strlen("--num-dex-methods=")).data();
- if (!ParseInt(threshold, &num_dex_methods_threshold)) {
- Usage("Failed to parse --num-dex-methods '%s' as an integer", threshold);
- }
- if (num_dex_methods_threshold < 0) {
- Usage("--num-dex-methods passed a negative value %s", num_dex_methods_threshold);
- }
- } else if (option == "--host") {
- is_host = true;
- } else if (option == "--runtime-arg") {
- if (++i >= argc) {
- Usage("Missing required argument for --runtime-arg");
- }
+
+ std::string oat_symbols;
+ std::string boot_image_filename;
+ const char* compiler_filter_string = nullptr;
+ bool compile_pic = false;
+ int huge_method_threshold = CompilerOptions::kDefaultHugeMethodThreshold;
+ int large_method_threshold = CompilerOptions::kDefaultLargeMethodThreshold;
+ int small_method_threshold = CompilerOptions::kDefaultSmallMethodThreshold;
+ int tiny_method_threshold = CompilerOptions::kDefaultTinyMethodThreshold;
+ int num_dex_methods_threshold = CompilerOptions::kDefaultNumDexMethodsThreshold;
+
+ // Profile file to use
+ double top_k_profile_threshold = CompilerOptions::kDefaultTopKProfileThreshold;
+
+ bool print_pass_options = false;
+ bool include_patch_information = CompilerOptions::kDefaultIncludePatchInformation;
+ bool include_debug_symbols = kIsDebugBuild;
+ bool watch_dog_enabled = true;
+ bool generate_gdb_information = kIsDebugBuild;
+
+ std::string error_msg;
+
+ for (int i = 0; i < argc; i++) {
+ const StringPiece option(argv[i]);
+ const bool log_options = false;
if (log_options) {
LOG(INFO) << "dex2oat: option[" << i << "]=" << argv[i];
}
- runtime_args.push_back(argv[i]);
- } else if (option == "--dump-timing") {
- dump_timing = true;
- } else if (option == "--dump-passes") {
- dump_passes = true;
- } else if (option == "--dump-stats") {
- dump_stats = true;
- } else if (option == "--include-debug-symbols" || option == "--no-strip-symbols") {
- include_debug_symbols = true;
- } else if (option == "--no-include-debug-symbols" || option == "--strip-symbols") {
- include_debug_symbols = false;
- generate_gdb_information = false; // Depends on debug symbols, see above.
- } else if (option.starts_with("--profile-file=")) {
- profile_file = option.substr(strlen("--profile-file=")).data();
- VLOG(compiler) << "dex2oat: profile file is " << profile_file;
- } else if (option == "--no-profile-file") {
- // No profile
- } else if (option.starts_with("--top-k-profile-threshold=")) {
- ParseDouble(option.data(), '=', 0.0, 100.0, &top_k_profile_threshold);
- } else if (option == "--print-pass-names") {
- PassDriverMEOpts::PrintPassNames();
- } else if (option.starts_with("--disable-passes=")) {
- std::string disable_passes = option.substr(strlen("--disable-passes=")).data();
- PassDriverMEOpts::CreateDefaultPassList(disable_passes);
- } else if (option.starts_with("--print-passes=")) {
- std::string print_passes = option.substr(strlen("--print-passes=")).data();
- PassDriverMEOpts::SetPrintPassList(print_passes);
- } else if (option == "--print-all-passes") {
- PassDriverMEOpts::SetPrintAllPasses();
- } else if (option.starts_with("--dump-cfg-passes=")) {
- std::string dump_passes = option.substr(strlen("--dump-cfg-passes=")).data();
- PassDriverMEOpts::SetDumpPassList(dump_passes);
- } else if (option == "--print-pass-options") {
- print_pass_options = true;
- } else if (option.starts_with("--pass-options=")) {
- std::string options = option.substr(strlen("--pass-options=")).data();
- PassDriverMEOpts::SetOverriddenPassOptions(options);
- } else if (option == "--include-patch-information") {
- include_patch_information = true;
- } else if (option == "--no-include-patch-information") {
- include_patch_information = false;
- } else {
- Usage("Unknown argument %s", option.data());
- }
- }
-
- if (oat_filename.empty() && oat_fd == -1) {
- Usage("Output must be supplied with either --oat-file or --oat-fd");
- }
-
- if (!oat_filename.empty() && oat_fd != -1) {
- Usage("--oat-file should not be used with --oat-fd");
- }
-
- if (!oat_symbols.empty() && oat_fd != -1) {
- Usage("--oat-symbols should not be used with --oat-fd");
- }
-
- if (!oat_symbols.empty() && is_host) {
- Usage("--oat-symbols should not be used with --host");
- }
-
- if (oat_fd != -1 && !image_filename.empty()) {
- Usage("--oat-fd should not be used with --image");
- }
-
- if (android_root.empty()) {
- const char* android_root_env_var = getenv("ANDROID_ROOT");
- if (android_root_env_var == nullptr) {
- Usage("--android-root unspecified and ANDROID_ROOT not set");
- }
- android_root += android_root_env_var;
- }
-
- bool image = (!image_filename.empty());
- if (!image && boot_image_filename.empty()) {
- boot_image_filename += android_root;
- boot_image_filename += "/framework/boot.art";
- }
- std::string boot_image_option;
- if (!boot_image_filename.empty()) {
- boot_image_option += "-Ximage:";
- boot_image_option += boot_image_filename;
- }
-
- if (image_classes_filename != nullptr && !image) {
- Usage("--image-classes should only be used with --image");
- }
-
- if (image_classes_filename != nullptr && !boot_image_option.empty()) {
- Usage("--image-classes should not be used with --boot-image");
- }
-
- if (image_classes_zip_filename != nullptr && image_classes_filename == nullptr) {
- Usage("--image-classes-zip should be used with --image-classes");
- }
-
- if (dex_filenames.empty() && zip_fd == -1) {
- Usage("Input must be supplied with either --dex-file or --zip-fd");
- }
-
- if (!dex_filenames.empty() && zip_fd != -1) {
- Usage("--dex-file should not be used with --zip-fd");
- }
-
- if (!dex_filenames.empty() && !zip_location.empty()) {
- Usage("--dex-file should not be used with --zip-location");
- }
-
- if (dex_locations.empty()) {
- for (size_t i = 0; i < dex_filenames.size(); i++) {
- dex_locations.push_back(dex_filenames[i]);
- }
- } else if (dex_locations.size() != dex_filenames.size()) {
- Usage("--dex-location arguments do not match --dex-file arguments");
- }
-
- if (zip_fd != -1 && zip_location.empty()) {
- Usage("--zip-location should be supplied with --zip-fd");
- }
-
- if (boot_image_option.empty()) {
- if (image_base == 0) {
- Usage("Non-zero --base not specified");
- }
- }
-
- std::string oat_stripped(oat_filename);
- std::string oat_unstripped;
- if (!oat_symbols.empty()) {
- oat_unstripped += oat_symbols;
- } else {
- oat_unstripped += oat_filename;
- }
-
- // If no instruction set feature was given, use the default one for the target
- // instruction set.
- if (instruction_set_features->GetInstructionSet() == kNone) {
- instruction_set_features.reset(
- InstructionSetFeatures::FromFeatureString(instruction_set, "default", &error_msg));
- }
-
- if (compiler_filter_string == nullptr) {
- if (instruction_set == kMips64) {
- // TODO: fix compiler for Mips64.
- compiler_filter_string = "interpret-only";
- } else if (image) {
- compiler_filter_string = "speed";
- } else {
-#if ART_SMALL_MODE
- compiler_filter_string = "interpret-only";
-#else
- compiler_filter_string = "speed";
-#endif
- }
- }
- CHECK(compiler_filter_string != nullptr);
- CompilerOptions::CompilerFilter compiler_filter = CompilerOptions::kDefaultCompilerFilter;
- if (strcmp(compiler_filter_string, "verify-none") == 0) {
- compiler_filter = CompilerOptions::kVerifyNone;
- } else if (strcmp(compiler_filter_string, "interpret-only") == 0) {
- compiler_filter = CompilerOptions::kInterpretOnly;
- } else if (strcmp(compiler_filter_string, "space") == 0) {
- compiler_filter = CompilerOptions::kSpace;
- } else if (strcmp(compiler_filter_string, "balanced") == 0) {
- compiler_filter = CompilerOptions::kBalanced;
- } else if (strcmp(compiler_filter_string, "speed") == 0) {
- compiler_filter = CompilerOptions::kSpeed;
- } else if (strcmp(compiler_filter_string, "everything") == 0) {
- compiler_filter = CompilerOptions::kEverything;
- } else if (strcmp(compiler_filter_string, "time") == 0) {
- compiler_filter = CompilerOptions::kTime;
- } else {
- Usage("Unknown --compiler-filter value %s", compiler_filter_string);
- }
-
- // Set the compilation target's implicit checks options.
- switch (instruction_set) {
- case kArm:
- case kThumb2:
- case kArm64:
- case kX86:
- case kX86_64:
- implicit_null_checks = true;
- implicit_so_checks = true;
- break;
-
- default:
- // Defaults are correct.
- break;
- }
-
- if (print_pass_options) {
- PassDriverMEOpts::PrintPassOptions();
- }
-
- std::unique_ptr<CompilerOptions> compiler_options(new CompilerOptions(compiler_filter,
- huge_method_threshold,
- large_method_threshold,
- small_method_threshold,
- tiny_method_threshold,
- num_dex_methods_threshold,
- generate_gdb_information,
- include_patch_information,
- top_k_profile_threshold,
- include_debug_symbols,
- implicit_null_checks,
- implicit_so_checks,
- implicit_suspend_checks,
- compile_pic
-#ifdef ART_SEA_IR_MODE
- , compiler_options.sea_ir_ =
- true;
-#endif
- )); // NOLINT(whitespace/parens)
-
- // Done with usage checks, enable watchdog if requested
- WatchDog watch_dog(watch_dog_enabled);
-
- // Check early that the result of compilation can be written
- std::unique_ptr<File> oat_file;
- bool create_file = !oat_unstripped.empty(); // as opposed to using open file descriptor
- if (create_file) {
- oat_file.reset(OS::CreateEmptyFile(oat_unstripped.c_str()));
- if (oat_location.empty()) {
- oat_location = oat_filename;
- }
- } else {
- oat_file.reset(new File(oat_fd, oat_location));
- oat_file->DisableAutoClose();
- oat_file->SetLength(0);
- }
- if (oat_file.get() == nullptr) {
- PLOG(ERROR) << "Failed to create oat file: " << oat_location;
- return EXIT_FAILURE;
- }
- if (create_file && fchmod(oat_file->Fd(), 0644) != 0) {
- PLOG(ERROR) << "Failed to make oat file world readable: " << oat_location;
- return EXIT_FAILURE;
- }
-
- timings.StartTiming("dex2oat Setup");
- LOG(INFO) << CommandLine();
-
- RuntimeOptions runtime_options;
- std::vector<const DexFile*> boot_class_path;
- art::MemMap::Init(); // For ZipEntry::ExtractToMemMap.
- if (boot_image_option.empty()) {
- size_t failure_count = OpenDexFiles(dex_filenames, dex_locations, boot_class_path);
- if (failure_count > 0) {
- LOG(ERROR) << "Failed to open some dex files: " << failure_count;
- return EXIT_FAILURE;
- }
- runtime_options.push_back(std::make_pair("bootclasspath", &boot_class_path));
- } else {
- runtime_options.push_back(std::make_pair(boot_image_option.c_str(), nullptr));
- }
- for (size_t i = 0; i < runtime_args.size(); i++) {
- runtime_options.push_back(std::make_pair(runtime_args[i], nullptr));
- }
-
- std::unique_ptr<VerificationResults> verification_results(new VerificationResults(
- compiler_options.get()));
- DexFileToMethodInlinerMap method_inliner_map;
- QuickCompilerCallbacks callbacks(verification_results.get(), &method_inliner_map);
- runtime_options.push_back(std::make_pair("compilercallbacks", &callbacks));
- runtime_options.push_back(
- std::make_pair("imageinstructionset", GetInstructionSetString(instruction_set)));
-
- Dex2Oat* p_dex2oat;
- if (!Dex2Oat::Create(&p_dex2oat,
- runtime_options,
- *compiler_options,
- compiler_kind,
- instruction_set,
- instruction_set_features.get(),
- verification_results.get(),
- &method_inliner_map,
- thread_count)) {
- LOG(ERROR) << "Failed to create dex2oat";
- return EXIT_FAILURE;
- }
- std::unique_ptr<Dex2Oat> dex2oat(p_dex2oat);
-
- // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start,
- // give it away now so that we don't starve GC.
- Thread* self = Thread::Current();
- self->TransitionFromRunnableToSuspended(kNative);
- // If we're doing the image, override the compiler filter to force full compilation. Must be
- // done ahead of WellKnownClasses::Init that causes verification. Note: doesn't force
- // compilation of class initializers.
- // Whilst we're in native take the opportunity to initialize well known classes.
- WellKnownClasses::Init(self->GetJniEnv());
-
- // If --image-classes was specified, calculate the full list of classes to include in the image
- std::unique_ptr<std::set<std::string>> image_classes(nullptr);
- if (image_classes_filename != nullptr) {
- std::string error_msg;
- if (image_classes_zip_filename != nullptr) {
- image_classes.reset(dex2oat->ReadImageClassesFromZip(image_classes_zip_filename,
- image_classes_filename,
- &error_msg));
- } else {
- image_classes.reset(dex2oat->ReadImageClassesFromFile(image_classes_filename));
- }
- if (image_classes.get() == nullptr) {
- LOG(ERROR) << "Failed to create list of image classes from '" << image_classes_filename <<
- "': " << error_msg;
- return EXIT_FAILURE;
- }
- } else if (image) {
- image_classes.reset(new std::set<std::string>);
- }
-
- std::vector<const DexFile*> dex_files;
- if (boot_image_option.empty()) {
- dex_files = Runtime::Current()->GetClassLinker()->GetBootClassPath();
- } else {
- if (dex_filenames.empty()) {
- ATRACE_BEGIN("Opening zip archive from file descriptor");
- std::string error_msg;
- std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(zip_fd, zip_location.c_str(),
- &error_msg));
- if (zip_archive.get() == nullptr) {
- LOG(ERROR) << "Failed to open zip from file descriptor for '" << zip_location << "': "
- << error_msg;
- return EXIT_FAILURE;
+ if (option.starts_with("--dex-file=")) {
+ dex_filenames_.push_back(option.substr(strlen("--dex-file=")).data());
+ } else if (option.starts_with("--dex-location=")) {
+ dex_locations_.push_back(option.substr(strlen("--dex-location=")).data());
+ } else if (option.starts_with("--zip-fd=")) {
+ const char* zip_fd_str = option.substr(strlen("--zip-fd=")).data();
+ if (!ParseInt(zip_fd_str, &zip_fd_)) {
+ Usage("Failed to parse --zip-fd argument '%s' as an integer", zip_fd_str);
+ }
+ if (zip_fd_ < 0) {
+ Usage("--zip-fd passed a negative value %d", zip_fd_);
+ }
+ } else if (option.starts_with("--zip-location=")) {
+ zip_location_ = option.substr(strlen("--zip-location=")).data();
+ } else if (option.starts_with("--oat-file=")) {
+ oat_filename_ = option.substr(strlen("--oat-file=")).data();
+ } else if (option.starts_with("--oat-symbols=")) {
+ oat_symbols = option.substr(strlen("--oat-symbols=")).data();
+ } else if (option.starts_with("--oat-fd=")) {
+ const char* oat_fd_str = option.substr(strlen("--oat-fd=")).data();
+ if (!ParseInt(oat_fd_str, &oat_fd_)) {
+ Usage("Failed to parse --oat-fd argument '%s' as an integer", oat_fd_str);
+ }
+ if (oat_fd_ < 0) {
+ Usage("--oat-fd passed a negative value %d", oat_fd_);
+ }
+ } else if (option == "--watch-dog") {
+ watch_dog_enabled = true;
+ } else if (option == "--no-watch-dog") {
+ watch_dog_enabled = false;
+ } else if (option == "--gen-gdb-info") {
+ generate_gdb_information = true;
+ // Debug symbols are needed for gdb information.
+ include_debug_symbols = true;
+ } else if (option == "--no-gen-gdb-info") {
+ generate_gdb_information = false;
+ } else if (option.starts_with("-j")) {
+ const char* thread_count_str = option.substr(strlen("-j")).data();
+ if (!ParseUint(thread_count_str, &thread_count_)) {
+ Usage("Failed to parse -j argument '%s' as an integer", thread_count_str);
+ }
+ } else if (option.starts_with("--oat-location=")) {
+ oat_location_ = option.substr(strlen("--oat-location=")).data();
+ } else if (option.starts_with("--bitcode=")) {
+ bitcode_filename_ = option.substr(strlen("--bitcode=")).data();
+ } else if (option.starts_with("--image=")) {
+ image_filename_ = option.substr(strlen("--image=")).data();
+ } else if (option.starts_with("--image-classes=")) {
+ image_classes_filename_ = option.substr(strlen("--image-classes=")).data();
+ } else if (option.starts_with("--image-classes-zip=")) {
+ image_classes_zip_filename_ = option.substr(strlen("--image-classes-zip=")).data();
+ } else if (option.starts_with("--compiled-classes=")) {
+ compiled_classes_filename_ = option.substr(strlen("--compiled-classes=")).data();
+ } else if (option.starts_with("--compiled-classes-zip=")) {
+ compiled_classes_zip_filename_ = option.substr(strlen("--compiled-classes-zip=")).data();
+ } else if (option.starts_with("--base=")) {
+ const char* image_base_str = option.substr(strlen("--base=")).data();
+ char* end;
+ image_base_ = strtoul(image_base_str, &end, 16);
+ if (end == image_base_str || *end != '\0') {
+ Usage("Failed to parse hexadecimal value for option %s", option.data());
+ }
+ } else if (option.starts_with("--boot-image=")) {
+ boot_image_filename = option.substr(strlen("--boot-image=")).data();
+ } else if (option.starts_with("--android-root=")) {
+ android_root_ = option.substr(strlen("--android-root=")).data();
+ } else if (option.starts_with("--instruction-set=")) {
+ StringPiece instruction_set_str = option.substr(strlen("--instruction-set=")).data();
+ // StringPiece is not necessarily zero-terminated, so need to make a copy and ensure it.
+ std::unique_ptr<char> buf(new char[instruction_set_str.length() + 1]);
+ strncpy(buf.get(), instruction_set_str.data(), instruction_set_str.length());
+ buf.get()[instruction_set_str.length()] = 0;
+ instruction_set_ = GetInstructionSetFromString(buf.get());
+ // arm actually means thumb2.
+ if (instruction_set_ == InstructionSet::kArm) {
+ instruction_set_ = InstructionSet::kThumb2;
+ }
+ } else if (option.starts_with("--instruction-set-variant=")) {
+ StringPiece str = option.substr(strlen("--instruction-set-variant=")).data();
+ instruction_set_features_.reset(
+ InstructionSetFeatures::FromVariant(instruction_set_, str.as_string(), &error_msg));
+ if (instruction_set_features_.get() == nullptr) {
+ Usage("%s", error_msg.c_str());
+ }
+ } else if (option.starts_with("--instruction-set-features=")) {
+ StringPiece str = option.substr(strlen("--instruction-set-features=")).data();
+ if (instruction_set_features_.get() == nullptr) {
+ instruction_set_features_.reset(
+ InstructionSetFeatures::FromVariant(instruction_set_, "default", &error_msg));
+ if (instruction_set_features_.get() == nullptr) {
+ Usage("Problem initializing default instruction set features variant: %s",
+ error_msg.c_str());
+ }
+ }
+ instruction_set_features_.reset(
+ instruction_set_features_->AddFeaturesFromString(str.as_string(), &error_msg));
+ if (instruction_set_features_.get() == nullptr) {
+ Usage("Error parsing '%s': %s", option.data(), error_msg.c_str());
+ }
+ } else if (option.starts_with("--compiler-backend=")) {
+ StringPiece backend_str = option.substr(strlen("--compiler-backend=")).data();
+ if (backend_str == "Quick") {
+ compiler_kind_ = Compiler::kQuick;
+ } else if (backend_str == "Optimizing") {
+ compiler_kind_ = Compiler::kOptimizing;
+ compile_pic = true;
+ } else if (backend_str == "Portable") {
+ compiler_kind_ = Compiler::kPortable;
+ } else {
+ Usage("Unknown compiler backend: %s", backend_str.data());
+ }
+ } else if (option.starts_with("--compiler-filter=")) {
+ compiler_filter_string = option.substr(strlen("--compiler-filter=")).data();
+ } else if (option == "--compile-pic") {
+ compile_pic = true;
+ } else if (option.starts_with("--huge-method-max=")) {
+ const char* threshold = option.substr(strlen("--huge-method-max=")).data();
+ if (!ParseInt(threshold, &huge_method_threshold)) {
+ Usage("Failed to parse --huge-method-max '%s' as an integer", threshold);
+ }
+ if (huge_method_threshold < 0) {
+ Usage("--huge-method-max passed a negative value %s", huge_method_threshold);
+ }
+ } else if (option.starts_with("--large-method-max=")) {
+ const char* threshold = option.substr(strlen("--large-method-max=")).data();
+ if (!ParseInt(threshold, &large_method_threshold)) {
+ Usage("Failed to parse --large-method-max '%s' as an integer", threshold);
+ }
+ if (large_method_threshold < 0) {
+ Usage("--large-method-max passed a negative value %s", large_method_threshold);
+ }
+ } else if (option.starts_with("--small-method-max=")) {
+ const char* threshold = option.substr(strlen("--small-method-max=")).data();
+ if (!ParseInt(threshold, &small_method_threshold)) {
+ Usage("Failed to parse --small-method-max '%s' as an integer", threshold);
+ }
+ if (small_method_threshold < 0) {
+ Usage("--small-method-max passed a negative value %s", small_method_threshold);
+ }
+ } else if (option.starts_with("--tiny-method-max=")) {
+ const char* threshold = option.substr(strlen("--tiny-method-max=")).data();
+ if (!ParseInt(threshold, &tiny_method_threshold)) {
+ Usage("Failed to parse --tiny-method-max '%s' as an integer", threshold);
+ }
+ if (tiny_method_threshold < 0) {
+ Usage("--tiny-method-max passed a negative value %s", tiny_method_threshold);
+ }
+ } else if (option.starts_with("--num-dex-methods=")) {
+ const char* threshold = option.substr(strlen("--num-dex-methods=")).data();
+ if (!ParseInt(threshold, &num_dex_methods_threshold)) {
+ Usage("Failed to parse --num-dex-methods '%s' as an integer", threshold);
+ }
+ if (num_dex_methods_threshold < 0) {
+ Usage("--num-dex-methods passed a negative value %s", num_dex_methods_threshold);
+ }
+ } else if (option == "--host") {
+ is_host_ = true;
+ } else if (option == "--runtime-arg") {
+ if (++i >= argc) {
+ Usage("Missing required argument for --runtime-arg");
+ }
+ if (log_options) {
+ LOG(INFO) << "dex2oat: option[" << i << "]=" << argv[i];
+ }
+ runtime_args_.push_back(argv[i]);
+ } else if (option == "--dump-timing") {
+ dump_timing_ = true;
+ } else if (option == "--dump-passes") {
+ dump_passes_ = true;
+ } else if (option == "--dump-stats") {
+ dump_stats_ = true;
+ } else if (option == "--include-debug-symbols" || option == "--no-strip-symbols") {
+ include_debug_symbols = true;
+ } else if (option == "--no-include-debug-symbols" || option == "--strip-symbols") {
+ include_debug_symbols = false;
+ generate_gdb_information = false; // Depends on debug symbols, see above.
+ } else if (option.starts_with("--profile-file=")) {
+ profile_file_ = option.substr(strlen("--profile-file=")).data();
+ VLOG(compiler) << "dex2oat: profile file is " << profile_file_;
+ } else if (option == "--no-profile-file") {
+ // No profile
+ } else if (option.starts_with("--top-k-profile-threshold=")) {
+ ParseDouble(option.data(), '=', 0.0, 100.0, &top_k_profile_threshold);
+ } else if (option == "--print-pass-names") {
+ PassDriverMEOpts::PrintPassNames();
+ } else if (option.starts_with("--disable-passes=")) {
+ std::string disable_passes = option.substr(strlen("--disable-passes=")).data();
+ PassDriverMEOpts::CreateDefaultPassList(disable_passes);
+ } else if (option.starts_with("--print-passes=")) {
+ std::string print_passes = option.substr(strlen("--print-passes=")).data();
+ PassDriverMEOpts::SetPrintPassList(print_passes);
+ } else if (option == "--print-all-passes") {
+ PassDriverMEOpts::SetPrintAllPasses();
+ } else if (option.starts_with("--dump-cfg-passes=")) {
+ std::string dump_passes_string = option.substr(strlen("--dump-cfg-passes=")).data();
+ PassDriverMEOpts::SetDumpPassList(dump_passes_string);
+ } else if (option == "--print-pass-options") {
+ print_pass_options = true;
+ } else if (option.starts_with("--pass-options=")) {
+ std::string options = option.substr(strlen("--pass-options=")).data();
+ PassDriverMEOpts::SetOverriddenPassOptions(options);
+ } else if (option == "--include-patch-information") {
+ include_patch_information = true;
+ } else if (option == "--no-include-patch-information") {
+ include_patch_information = false;
+ } else if (option.starts_with("--verbose-methods=")) {
+ // TODO: rather than switch off compiler logging, make all VLOG(compiler) messages conditional
+ // on having verbost methods.
+ gLogVerbosity.compiler = false;
+ Split(option.substr(strlen("--verbose-methods=")).ToString(), ',', &verbose_methods_);
+ } else {
+ Usage("Unknown argument %s", option.data());
}
- if (!DexFile::OpenFromZip(*zip_archive.get(), zip_location, &error_msg, &dex_files)) {
- LOG(ERROR) << "Failed to open dex from file descriptor for zip file '" << zip_location
- << "': " << error_msg;
- return EXIT_FAILURE;
+ }
+
+ if (oat_filename_.empty() && oat_fd_ == -1) {
+ Usage("Output must be supplied with either --oat-file or --oat-fd");
+ }
+
+ if (!oat_filename_.empty() && oat_fd_ != -1) {
+ Usage("--oat-file should not be used with --oat-fd");
+ }
+
+ if (!oat_symbols.empty() && oat_fd_ != -1) {
+ Usage("--oat-symbols should not be used with --oat-fd");
+ }
+
+ if (!oat_symbols.empty() && is_host_) {
+ Usage("--oat-symbols should not be used with --host");
+ }
+
+ if (oat_fd_ != -1 && !image_filename_.empty()) {
+ Usage("--oat-fd should not be used with --image");
+ }
+
+ if (android_root_.empty()) {
+ const char* android_root_env_var = getenv("ANDROID_ROOT");
+ if (android_root_env_var == nullptr) {
+ Usage("--android-root unspecified and ANDROID_ROOT not set");
}
- ATRACE_END();
+ android_root_ += android_root_env_var;
+ }
+
+ image_ = (!image_filename_.empty());
+ if (!image_ && boot_image_filename.empty()) {
+ boot_image_filename += android_root_;
+ boot_image_filename += "/framework/boot.art";
+ }
+ if (!boot_image_filename.empty()) {
+ boot_image_option_ += "-Ximage:";
+ boot_image_option_ += boot_image_filename;
+ }
+
+ if (image_classes_filename_ != nullptr && !image_) {
+ Usage("--image-classes should only be used with --image");
+ }
+
+ if (image_classes_filename_ != nullptr && !boot_image_option_.empty()) {
+ Usage("--image-classes should not be used with --boot-image");
+ }
+
+ if (image_classes_zip_filename_ != nullptr && image_classes_filename_ == nullptr) {
+ Usage("--image-classes-zip should be used with --image-classes");
+ }
+
+ if (compiled_classes_filename_ != nullptr && !image_) {
+ Usage("--compiled-classes should only be used with --image");
+ }
+
+ if (compiled_classes_filename_ != nullptr && !boot_image_option_.empty()) {
+ Usage("--compiled-classes should not be used with --boot-image");
+ }
+
+ if (compiled_classes_zip_filename_ != nullptr && compiled_classes_filename_ == nullptr) {
+ Usage("--compiled-classes-zip should be used with --compiled-classes");
+ }
+
+ if (dex_filenames_.empty() && zip_fd_ == -1) {
+ Usage("Input must be supplied with either --dex-file or --zip-fd");
+ }
+
+ if (!dex_filenames_.empty() && zip_fd_ != -1) {
+ Usage("--dex-file should not be used with --zip-fd");
+ }
+
+ if (!dex_filenames_.empty() && !zip_location_.empty()) {
+ Usage("--dex-file should not be used with --zip-location");
+ }
+
+ if (dex_locations_.empty()) {
+ for (const char* dex_file_name : dex_filenames_) {
+ dex_locations_.push_back(dex_file_name);
+ }
+ } else if (dex_locations_.size() != dex_filenames_.size()) {
+ Usage("--dex-location arguments do not match --dex-file arguments");
+ }
+
+ if (zip_fd_ != -1 && zip_location_.empty()) {
+ Usage("--zip-location should be supplied with --zip-fd");
+ }
+
+ if (boot_image_option_.empty()) {
+ if (image_base_ == 0) {
+ Usage("Non-zero --base not specified");
+ }
+ }
+
+ oat_stripped_ = oat_filename_;
+ if (!oat_symbols.empty()) {
+ oat_unstripped_ = oat_symbols;
} else {
- size_t failure_count = OpenDexFiles(dex_filenames, dex_locations, dex_files);
+ oat_unstripped_ = oat_filename_;
+ }
+
+ // If no instruction set feature was given, use the default one for the target
+ // instruction set.
+ if (instruction_set_features_.get() == nullptr) {
+ instruction_set_features_.reset(
+ InstructionSetFeatures::FromVariant(instruction_set_, "default", &error_msg));
+ if (instruction_set_features_.get() == nullptr) {
+ Usage("Problem initializing default instruction set features variant: %s",
+ error_msg.c_str());
+ }
+ }
+
+ if (instruction_set_ == kRuntimeISA) {
+ std::unique_ptr<const InstructionSetFeatures> runtime_features(
+ InstructionSetFeatures::FromCppDefines());
+ if (!instruction_set_features_->Equals(runtime_features.get())) {
+ LOG(WARNING) << "Mismatch between dex2oat instruction set features ("
+ << *instruction_set_features_ << ") and those of dex2oat executable ("
+ << *runtime_features <<") for the command line:\n"
+ << CommandLine();
+ }
+ }
+
+ if (compiler_filter_string == nullptr) {
+ if (instruction_set_ == kMips64) {
+ // TODO: fix compiler for Mips64.
+ compiler_filter_string = "interpret-only";
+ } else if (image_) {
+ compiler_filter_string = "speed";
+ } else {
+ // TODO: Migrate SMALL mode to command line option.
+ #if ART_SMALL_MODE
+ compiler_filter_string = "interpret-only";
+ #else
+ compiler_filter_string = "speed";
+ #endif
+ }
+ }
+ CHECK(compiler_filter_string != nullptr);
+ CompilerOptions::CompilerFilter compiler_filter = CompilerOptions::kDefaultCompilerFilter;
+ if (strcmp(compiler_filter_string, "verify-none") == 0) {
+ compiler_filter = CompilerOptions::kVerifyNone;
+ } else if (strcmp(compiler_filter_string, "interpret-only") == 0) {
+ compiler_filter = CompilerOptions::kInterpretOnly;
+ } else if (strcmp(compiler_filter_string, "space") == 0) {
+ compiler_filter = CompilerOptions::kSpace;
+ } else if (strcmp(compiler_filter_string, "balanced") == 0) {
+ compiler_filter = CompilerOptions::kBalanced;
+ } else if (strcmp(compiler_filter_string, "speed") == 0) {
+ compiler_filter = CompilerOptions::kSpeed;
+ } else if (strcmp(compiler_filter_string, "everything") == 0) {
+ compiler_filter = CompilerOptions::kEverything;
+ } else if (strcmp(compiler_filter_string, "time") == 0) {
+ compiler_filter = CompilerOptions::kTime;
+ } else {
+ Usage("Unknown --compiler-filter value %s", compiler_filter_string);
+ }
+
+ // Checks are all explicit until we know the architecture.
+ bool implicit_null_checks = false;
+ bool implicit_so_checks = false;
+ bool implicit_suspend_checks = false;
+ // Set the compilation target's implicit checks options.
+ switch (instruction_set_) {
+ case kArm:
+ case kThumb2:
+ case kArm64:
+ case kX86:
+ case kX86_64:
+ implicit_null_checks = true;
+ implicit_so_checks = true;
+ break;
+
+ default:
+ // Defaults are correct.
+ break;
+ }
+
+ if (print_pass_options) {
+ PassDriverMEOpts::PrintPassOptions();
+ }
+
+ compiler_options_.reset(new CompilerOptions(compiler_filter,
+ huge_method_threshold,
+ large_method_threshold,
+ small_method_threshold,
+ tiny_method_threshold,
+ num_dex_methods_threshold,
+ generate_gdb_information,
+ include_patch_information,
+ top_k_profile_threshold,
+ include_debug_symbols,
+ implicit_null_checks,
+ implicit_so_checks,
+ implicit_suspend_checks,
+ compile_pic,
+ #ifdef ART_SEA_IR_MODE
+ true,
+ #endif
+ verbose_methods_.empty() ?
+ nullptr :
+ &verbose_methods_));
+
+ // Done with usage checks, enable watchdog if requested
+ if (watch_dog_enabled) {
+ watchdog_.reset(new WatchDog(true));
+ }
+
+ // Fill some values into the key-value store for the oat header.
+ key_value_store_.reset(new SafeMap<std::string, std::string>());
+
+ // Insert some compiler things.
+ {
+ std::ostringstream oss;
+ for (int i = 0; i < argc; ++i) {
+ if (i > 0) {
+ oss << ' ';
+ }
+ oss << argv[i];
+ }
+ key_value_store_->Put(OatHeader::kDex2OatCmdLineKey, oss.str());
+ oss.str(""); // Reset.
+ oss << kRuntimeISA;
+ key_value_store_->Put(OatHeader::kDex2OatHostKey, oss.str());
+ key_value_store_->Put(OatHeader::kPicKey, compile_pic ? "true" : "false");
+ }
+ }
+
+ // Check whether the oat output file is writable, and open it for later.
+ bool OpenFile() {
+ bool create_file = !oat_unstripped_.empty(); // as opposed to using open file descriptor
+ if (create_file) {
+ oat_file_.reset(OS::CreateEmptyFile(oat_unstripped_.c_str()));
+ if (oat_location_.empty()) {
+ oat_location_ = oat_filename_;
+ }
+ } else {
+ oat_file_.reset(new File(oat_fd_, oat_location_, true));
+ oat_file_->DisableAutoClose();
+ if (oat_file_->SetLength(0) != 0) {
+ PLOG(WARNING) << "Truncating oat file " << oat_location_ << " failed.";
+ }
+ }
+ if (oat_file_.get() == nullptr) {
+ PLOG(ERROR) << "Failed to create oat file: " << oat_location_;
+ return false;
+ }
+ if (create_file && fchmod(oat_file_->Fd(), 0644) != 0) {
+ PLOG(ERROR) << "Failed to make oat file world readable: " << oat_location_;
+ oat_file_->Erase();
+ return false;
+ }
+ return true;
+ }
+
+ // Set up the environment for compilation. Includes starting the runtime and loading/opening the
+ // boot class path.
+ bool Setup() {
+ TimingLogger::ScopedTiming t("dex2oat Setup", timings_);
+ RuntimeOptions runtime_options;
+ std::vector<const DexFile*> boot_class_path;
+ art::MemMap::Init(); // For ZipEntry::ExtractToMemMap.
+ if (boot_image_option_.empty()) {
+ size_t failure_count = OpenDexFiles(dex_filenames_, dex_locations_, boot_class_path);
if (failure_count > 0) {
LOG(ERROR) << "Failed to open some dex files: " << failure_count;
- return EXIT_FAILURE;
+ return false;
}
+ runtime_options.push_back(std::make_pair("bootclasspath", &boot_class_path));
+ } else {
+ runtime_options.push_back(std::make_pair(boot_image_option_.c_str(), nullptr));
+ }
+ for (size_t i = 0; i < runtime_args_.size(); i++) {
+ runtime_options.push_back(std::make_pair(runtime_args_[i], nullptr));
}
- const bool kSaveDexInput = false;
- if (kSaveDexInput) {
- for (size_t i = 0; i < dex_files.size(); ++i) {
- const DexFile* dex_file = dex_files[i];
- std::string tmp_file_name(StringPrintf("/data/local/tmp/dex2oat.%d.%zd.dex", getpid(), i));
- std::unique_ptr<File> tmp_file(OS::CreateEmptyFile(tmp_file_name.c_str()));
- if (tmp_file.get() == nullptr) {
- PLOG(ERROR) << "Failed to open file " << tmp_file_name
- << ". Try: adb shell chmod 777 /data/local/tmp";
- continue;
+ verification_results_.reset(new VerificationResults(compiler_options_.get()));
+ callbacks_.reset(new QuickCompilerCallbacks(verification_results_.get(), &method_inliner_map_));
+ runtime_options.push_back(std::make_pair("compilercallbacks", callbacks_.get()));
+ runtime_options.push_back(
+ std::make_pair("imageinstructionset", GetInstructionSetString(instruction_set_)));
+
+ if (!CreateRuntime(runtime_options)) {
+ return false;
+ }
+
+ // Runtime::Create acquired the mutator_lock_ that is normally given away when we
+ // Runtime::Start, give it away now so that we don't starve GC.
+ Thread* self = Thread::Current();
+ self->TransitionFromRunnableToSuspended(kNative);
+ // If we're doing the image, override the compiler filter to force full compilation. Must be
+ // done ahead of WellKnownClasses::Init that causes verification. Note: doesn't force
+ // compilation of class initializers.
+ // Whilst we're in native take the opportunity to initialize well known classes.
+ WellKnownClasses::Init(self->GetJniEnv());
+
+ // If --image-classes was specified, calculate the full list of classes to include in the image
+ if (image_classes_filename_ != nullptr) {
+ std::string error_msg;
+ if (image_classes_zip_filename_ != nullptr) {
+ image_classes_.reset(ReadImageClassesFromZip(image_classes_zip_filename_,
+ image_classes_filename_,
+ &error_msg));
+ } else {
+ image_classes_.reset(ReadImageClassesFromFile(image_classes_filename_));
+ }
+ if (image_classes_.get() == nullptr) {
+ LOG(ERROR) << "Failed to create list of image classes from '" << image_classes_filename_ <<
+ "': " << error_msg;
+ return false;
+ }
+ } else if (image_) {
+ image_classes_.reset(new std::set<std::string>);
+ }
+ // If --compiled-classes was specified, calculate the full list of classes to compile in the
+ // image.
+ if (compiled_classes_filename_ != nullptr) {
+ std::string error_msg;
+ if (compiled_classes_zip_filename_ != nullptr) {
+ compiled_classes_.reset(ReadImageClassesFromZip(compiled_classes_zip_filename_,
+ compiled_classes_filename_,
+ &error_msg));
+ } else {
+ compiled_classes_.reset(ReadImageClassesFromFile(compiled_classes_filename_));
+ }
+ if (compiled_classes_.get() == nullptr) {
+ LOG(ERROR) << "Failed to create list of compiled classes from '"
+ << compiled_classes_filename_ << "': " << error_msg;
+ return false;
+ }
+ } else if (image_) {
+ compiled_classes_.reset(nullptr); // By default compile everything.
+ }
+
+ if (boot_image_option_.empty()) {
+ dex_files_ = Runtime::Current()->GetClassLinker()->GetBootClassPath();
+ } else {
+ if (dex_filenames_.empty()) {
+ ATRACE_BEGIN("Opening zip archive from file descriptor");
+ std::string error_msg;
+ std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(zip_fd_,
+ zip_location_.c_str(),
+ &error_msg));
+ if (zip_archive.get() == nullptr) {
+ LOG(ERROR) << "Failed to open zip from file descriptor for '" << zip_location_ << "': "
+ << error_msg;
+ return false;
}
- tmp_file->WriteFully(dex_file->Begin(), dex_file->Size());
- LOG(INFO) << "Wrote input to " << tmp_file_name;
+ if (!DexFile::OpenFromZip(*zip_archive.get(), zip_location_, &error_msg, &dex_files_)) {
+ LOG(ERROR) << "Failed to open dex from file descriptor for zip file '" << zip_location_
+ << "': " << error_msg;
+ return false;
+ }
+ ATRACE_END();
+ } else {
+ size_t failure_count = OpenDexFiles(dex_filenames_, dex_locations_, dex_files_);
+ if (failure_count > 0) {
+ LOG(ERROR) << "Failed to open some dex files: " << failure_count;
+ return false;
+ }
+ }
+
+ constexpr bool kSaveDexInput = false;
+ if (kSaveDexInput) {
+ for (size_t i = 0; i < dex_files_.size(); ++i) {
+ const DexFile* dex_file = dex_files_[i];
+ std::string tmp_file_name(StringPrintf("/data/local/tmp/dex2oat.%d.%zd.dex", getpid(), i));
+ std::unique_ptr<File> tmp_file(OS::CreateEmptyFile(tmp_file_name.c_str()));
+ if (tmp_file.get() == nullptr) {
+ PLOG(ERROR) << "Failed to open file " << tmp_file_name
+ << ". Try: adb shell chmod 777 /data/local/tmp";
+ continue;
+ }
+ // This is just dumping files for debugging. Ignore errors, and leave remnants.
+ UNUSED(tmp_file->WriteFully(dex_file->Begin(), dex_file->Size()));
+ UNUSED(tmp_file->Flush());
+ UNUSED(tmp_file->Close());
+ LOG(INFO) << "Wrote input to " << tmp_file_name;
+ }
}
}
- }
- // Ensure opened dex files are writable for dex-to-dex transformations.
- for (const auto& dex_file : dex_files) {
- if (!dex_file->EnableWrite()) {
- PLOG(ERROR) << "Failed to make .dex file writeable '" << dex_file->GetLocation() << "'\n";
+ // Ensure opened dex files are writable for dex-to-dex transformations.
+ for (const auto& dex_file : dex_files_) {
+ if (!dex_file->EnableWrite()) {
+ PLOG(ERROR) << "Failed to make .dex file writeable '" << dex_file->GetLocation() << "'\n";
+ }
}
- }
- /*
- * If we're not in interpret-only or verify-none mode, go ahead and compile small applications.
- * Don't bother to check if we're doing the image.
- */
- if (!image && compiler_options->IsCompilationEnabled() && compiler_kind == Compiler::kQuick) {
- size_t num_methods = 0;
- for (size_t i = 0; i != dex_files.size(); ++i) {
- const DexFile* dex_file = dex_files[i];
- CHECK(dex_file != nullptr);
- num_methods += dex_file->NumMethodIds();
+ /*
+ * If we're not in interpret-only or verify-none mode, go ahead and compile small applications.
+ * Don't bother to check if we're doing the image.
+ */
+ if (!image_ && compiler_options_->IsCompilationEnabled() && compiler_kind_ == Compiler::kQuick) {
+ size_t num_methods = 0;
+ for (size_t i = 0; i != dex_files_.size(); ++i) {
+ const DexFile* dex_file = dex_files_[i];
+ CHECK(dex_file != nullptr);
+ num_methods += dex_file->NumMethodIds();
+ }
+ if (num_methods <= compiler_options_->GetNumDexMethodsThreshold()) {
+ compiler_options_->SetCompilerFilter(CompilerOptions::kSpeed);
+ VLOG(compiler) << "Below method threshold, compiling anyways";
+ }
}
- if (num_methods <= compiler_options->GetNumDexMethodsThreshold()) {
- compiler_options->SetCompilerFilter(CompilerOptions::kSpeed);
- VLOG(compiler) << "Below method threshold, compiling anyways";
+
+ return true;
+ }
+
+ // Create and invoke the compiler driver. This will compile all the dex files.
+ void Compile() {
+ TimingLogger::ScopedTiming t("dex2oat Compile", timings_);
+ compiler_phases_timings_.reset(new CumulativeLogger("compilation times"));
+
+ // Handle and ClassLoader creation needs to come after Runtime::Create
+ jobject class_loader = nullptr;
+ Thread* self = Thread::Current();
+ if (!boot_image_option_.empty()) {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ std::vector<const DexFile*> class_path_files(dex_files_);
+ OpenClassPathFiles(runtime_->GetClassPathString(), class_path_files);
+ ScopedObjectAccess soa(self);
+ for (size_t i = 0; i < class_path_files.size(); i++) {
+ class_linker->RegisterDexFile(*class_path_files[i]);
+ }
+ soa.Env()->AllocObject(WellKnownClasses::dalvik_system_PathClassLoader);
+ ScopedLocalRef<jobject> class_loader_local(soa.Env(),
+ soa.Env()->AllocObject(WellKnownClasses::dalvik_system_PathClassLoader));
+ class_loader = soa.Env()->NewGlobalRef(class_loader_local.get());
+ Runtime::Current()->SetCompileTimeClassPath(class_loader, class_path_files);
}
+
+ driver_.reset(new CompilerDriver(compiler_options_.get(),
+ verification_results_.get(),
+ &method_inliner_map_,
+ compiler_kind_,
+ instruction_set_,
+ instruction_set_features_.get(),
+ image_,
+ image_classes_.release(),
+ compiled_classes_.release(),
+ thread_count_,
+ dump_stats_,
+ dump_passes_,
+ compiler_phases_timings_.get(),
+ profile_file_));
+
+ driver_->GetCompiler()->SetBitcodeFileName(*driver_, bitcode_filename_);
+
+ driver_->CompileAll(class_loader, dex_files_, timings_);
}
- // Fill some values into the key-value store for the oat header.
- std::unique_ptr<SafeMap<std::string, std::string> > key_value_store(
- new SafeMap<std::string, std::string>());
-
- // Insert some compiler things.
- std::ostringstream oss;
- for (int i = 0; i < argc; ++i) {
- if (i > 0) {
- oss << ' ';
- }
- oss << argv[i];
- }
- key_value_store->Put(OatHeader::kDex2OatCmdLineKey, oss.str());
- oss.str(""); // Reset.
- oss << kRuntimeISA;
- key_value_store->Put(OatHeader::kDex2OatHostKey, oss.str());
-
- dex2oat->Compile(boot_image_option,
- dex_files,
- bitcode_filename,
- image,
- image_classes,
- dump_stats,
- dump_passes,
- &timings,
- &compiler_phases_timings,
- profile_file);
-
- if (image) {
- dex2oat->PrepareImageWriter(image_base);
- }
-
- if (!dex2oat->CreateOatFile(dex_files,
- android_root,
- is_host,
- oat_file.get(),
- oat_location,
- &timings,
- key_value_store.get())) {
- LOG(ERROR) << "Failed to create oat file: " << oat_location;
- return EXIT_FAILURE;
- }
-
- VLOG(compiler) << "Oat file written successfully (unstripped): " << oat_location;
-
// Notes on the interleaving of creating the image and oat file to
// ensure the references between the two are correct.
//
@@ -1504,87 +1216,581 @@
//
// Steps 1.-3. are done by the CreateOatFile() above, steps 4.-5.
// are done by the CreateImageFile() below.
- //
- if (image) {
- TimingLogger::ScopedTiming t("dex2oat ImageWriter", &timings);
- bool image_creation_success = dex2oat->CreateImageFile(image_filename,
- oat_unstripped,
- oat_location);
- if (!image_creation_success) {
- return EXIT_FAILURE;
+
+
+ // Write out the generated code part. Calls the OatWriter and ElfBuilder. Also prepares the
+ // ImageWriter, if necessary.
+ // Note: Flushing (and closing) the file is the caller's responsibility, except for the failure
+ // case (when the file will be explicitly erased).
+ bool CreateOatFile() {
+ CHECK(key_value_store_.get() != nullptr);
+
+ TimingLogger::ScopedTiming t("dex2oat Oat", timings_);
+
+ std::unique_ptr<OatWriter> oat_writer;
+ {
+ TimingLogger::ScopedTiming t2("dex2oat OatWriter", timings_);
+ std::string image_file_location;
+ uint32_t image_file_location_oat_checksum = 0;
+ uintptr_t image_file_location_oat_data_begin = 0;
+ int32_t image_patch_delta = 0;
+ if (image_) {
+ PrepareImageWriter(image_base_);
+ } else {
+ TimingLogger::ScopedTiming t3("Loading image checksum", timings_);
+ gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
+ image_file_location_oat_checksum = image_space->GetImageHeader().GetOatChecksum();
+ image_file_location_oat_data_begin =
+ reinterpret_cast<uintptr_t>(image_space->GetImageHeader().GetOatDataBegin());
+ image_file_location = image_space->GetImageFilename();
+ image_patch_delta = image_space->GetImageHeader().GetPatchDelta();
+ }
+
+ if (!image_file_location.empty()) {
+ key_value_store_->Put(OatHeader::kImageLocationKey, image_file_location);
+ }
+
+ oat_writer.reset(new OatWriter(dex_files_, image_file_location_oat_checksum,
+ image_file_location_oat_data_begin,
+ image_patch_delta,
+ driver_.get(),
+ image_writer_.get(),
+ timings_,
+ key_value_store_.get()));
}
- VLOG(compiler) << "Image written successfully: " << image_filename;
+
+ if (image_) {
+ // The OatWriter constructor has already updated offsets in methods and we need to
+ // prepare method offsets in the image address space for direct method patching.
+ TimingLogger::ScopedTiming t2("dex2oat Prepare image address space", timings_);
+ if (!image_writer_->PrepareImageAddressSpace()) {
+ LOG(ERROR) << "Failed to prepare image address space.";
+ return false;
+ }
+ }
+
+ {
+ TimingLogger::ScopedTiming t2("dex2oat Write ELF", timings_);
+ if (!driver_->WriteElf(android_root_, is_host_, dex_files_, oat_writer.get(),
+ oat_file_.get())) {
+ LOG(ERROR) << "Failed to write ELF file " << oat_file_->GetPath();
+ oat_file_->Erase();
+ return false;
+ }
+ }
+
+ VLOG(compiler) << "Oat file written successfully (unstripped): " << oat_location_;
+ return true;
}
- if (is_host) {
- timings.EndTiming();
- if (dump_timing || (dump_slow_timing && timings.GetTotalNs() > MsToNs(1000))) {
- LOG(INFO) << Dumpable<TimingLogger>(timings);
+ // If we are compiling an image, invoke the image creation routine. Else just skip.
+ bool HandleImage() {
+ if (image_) {
+ TimingLogger::ScopedTiming t("dex2oat ImageWriter", timings_);
+ if (!CreateImageFile()) {
+ return false;
+ }
+ VLOG(compiler) << "Image written successfully: " << image_filename_;
}
- if (dump_passes) {
- LOG(INFO) << Dumpable<CumulativeLogger>(compiler_phases_timings);
+ return true;
+ }
+
+ // Create a copy from unstripped to stripped.
+ bool CopyUnstrippedToStripped() {
+ // If we don't want to strip in place, copy from unstripped location to stripped location.
+ // We need to strip after image creation because FixupElf needs to use .strtab.
+ if (oat_unstripped_ != oat_stripped_) {
+ // If the oat file is still open, flush it.
+ if (oat_file_.get() != nullptr && oat_file_->IsOpened()) {
+ if (!FlushCloseOatFile()) {
+ return false;
+ }
+ }
+
+ TimingLogger::ScopedTiming t("dex2oat OatFile copy", timings_);
+ std::unique_ptr<File> in(OS::OpenFileForReading(oat_unstripped_.c_str()));
+ std::unique_ptr<File> out(OS::CreateEmptyFile(oat_stripped_.c_str()));
+ size_t buffer_size = 8192;
+ std::unique_ptr<uint8_t> buffer(new uint8_t[buffer_size]);
+ while (true) {
+ int bytes_read = TEMP_FAILURE_RETRY(read(in->Fd(), buffer.get(), buffer_size));
+ if (bytes_read <= 0) {
+ break;
+ }
+ bool write_ok = out->WriteFully(buffer.get(), bytes_read);
+ CHECK(write_ok);
+ }
+ if (kUsePortableCompiler) {
+ oat_file_.reset(out.release());
+ } else {
+ if (out->FlushCloseOrErase() != 0) {
+ PLOG(ERROR) << "Failed to flush and close copied oat file: " << oat_stripped_;
+ return false;
+ }
+ }
+ VLOG(compiler) << "Oat file copied successfully (stripped): " << oat_stripped_;
}
+ return true;
+ }
+
+ // Run the ElfStripper. Currently only relevant for the portable compiler.
+ bool Strip() {
+ if (kUsePortableCompiler) {
+ // Portable includes debug symbols unconditionally. If we are not supposed to create them,
+ // strip them now. Quick generates debug symbols only when the flag(s) are set.
+ if (!compiler_options_->GetIncludeDebugSymbols()) {
+ CHECK(oat_file_.get() != nullptr && oat_file_->IsOpened());
+
+ TimingLogger::ScopedTiming t("dex2oat ElfStripper", timings_);
+ // Strip unneeded sections for target
+ off_t seek_actual = lseek(oat_file_->Fd(), 0, SEEK_SET);
+ CHECK_EQ(0, seek_actual);
+ std::string error_msg;
+ if (!ElfFile::Strip(oat_file_.get(), &error_msg)) {
+ LOG(ERROR) << "Failed to strip elf file: " << error_msg;
+ oat_file_->Erase();
+ return false;
+ }
+
+ if (!FlushCloseOatFile()) {
+ return false;
+ }
+
+ // We wrote the oat file successfully, and want to keep it.
+ VLOG(compiler) << "Oat file written successfully (stripped): " << oat_location_;
+ } else {
+ VLOG(compiler) << "Oat file written successfully without stripping: " << oat_location_;
+ }
+ }
+
+ return true;
+ }
+
+ bool FlushOatFile() {
+ if (oat_file_.get() != nullptr) {
+ TimingLogger::ScopedTiming t2("dex2oat Flush ELF", timings_);
+ if (oat_file_->Flush() != 0) {
+ PLOG(ERROR) << "Failed to flush oat file: " << oat_location_ << " / "
+ << oat_filename_;
+ oat_file_->Erase();
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool FlushCloseOatFile() {
+ if (oat_file_.get() != nullptr) {
+ std::unique_ptr<File> tmp(oat_file_.release());
+ if (tmp->FlushCloseOrErase() != 0) {
+ PLOG(ERROR) << "Failed to flush and close oat file: " << oat_location_ << " / "
+ << oat_filename_;
+ return false;
+ }
+ }
+ return true;
+ }
+
+ void DumpTiming() {
+ if (dump_timing_ || (dump_slow_timing_ && timings_->GetTotalNs() > MsToNs(1000))) {
+ LOG(INFO) << Dumpable<TimingLogger>(*timings_);
+ }
+ if (dump_passes_) {
+ LOG(INFO) << Dumpable<CumulativeLogger>(*driver_->GetTimingsLogger());
+ }
+ }
+
+ CompilerOptions* GetCompilerOptions() const {
+ return compiler_options_.get();
+ }
+
+ bool IsImage() const {
+ return image_;
+ }
+
+ bool IsHost() const {
+ return is_host_;
+ }
+
+ private:
+ static size_t OpenDexFiles(const std::vector<const char*>& dex_filenames,
+ const std::vector<const char*>& dex_locations,
+ std::vector<const DexFile*>& dex_files) {
+ size_t failure_count = 0;
+ for (size_t i = 0; i < dex_filenames.size(); i++) {
+ const char* dex_filename = dex_filenames[i];
+ const char* dex_location = dex_locations[i];
+ ATRACE_BEGIN(StringPrintf("Opening dex file '%s'", dex_filenames[i]).c_str());
+ std::string error_msg;
+ if (!OS::FileExists(dex_filename)) {
+ LOG(WARNING) << "Skipping non-existent dex file '" << dex_filename << "'";
+ continue;
+ }
+ if (!DexFile::Open(dex_filename, dex_location, &error_msg, &dex_files)) {
+ LOG(WARNING) << "Failed to open .dex from file '" << dex_filename << "': " << error_msg;
+ ++failure_count;
+ }
+ ATRACE_END();
+ }
+ return failure_count;
+ }
+
+ // Returns true if dex_files has a dex with the named location.
+ static bool DexFilesContains(const std::vector<const DexFile*>& dex_files,
+ const std::string& location) {
+ for (size_t i = 0; i < dex_files.size(); ++i) {
+ if (dex_files[i]->GetLocation() == location) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // Appends to dex_files any elements of class_path that it doesn't already
+ // contain. This will open those dex files as necessary.
+ static void OpenClassPathFiles(const std::string& class_path,
+ std::vector<const DexFile*>& dex_files) {
+ std::vector<std::string> parsed;
+ Split(class_path, ':', &parsed);
+ // Take Locks::mutator_lock_ so that lock ordering on the ClassLinker::dex_lock_ is maintained.
+ ScopedObjectAccess soa(Thread::Current());
+ for (size_t i = 0; i < parsed.size(); ++i) {
+ if (DexFilesContains(dex_files, parsed[i])) {
+ continue;
+ }
+ std::string error_msg;
+ if (!DexFile::Open(parsed[i].c_str(), parsed[i].c_str(), &error_msg, &dex_files)) {
+ LOG(WARNING) << "Failed to open dex file '" << parsed[i] << "': " << error_msg;
+ }
+ }
+ }
+
+ // Create a runtime necessary for compilation.
+ bool CreateRuntime(const RuntimeOptions& runtime_options)
+ SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_) {
+ if (!Runtime::Create(runtime_options, false)) {
+ LOG(ERROR) << "Failed to create runtime";
+ return false;
+ }
+ Runtime* runtime = Runtime::Current();
+ runtime->SetInstructionSet(instruction_set_);
+ for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
+ Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
+ if (!runtime->HasCalleeSaveMethod(type)) {
+ runtime->SetCalleeSaveMethod(runtime->CreateCalleeSaveMethod(), type);
+ }
+ }
+ runtime->GetClassLinker()->FixupDexCaches(runtime->GetResolutionMethod());
+ runtime->GetClassLinker()->RunRootClinits();
+ runtime_ = runtime;
+ return true;
+ }
+
+ void PrepareImageWriter(uintptr_t image_base) {
+ image_writer_.reset(new ImageWriter(*driver_, image_base, compiler_options_->GetCompilePic()));
+ }
+
+ // Let the ImageWriter write the image file. If we do not compile PIC, also fix up the oat file.
+ bool CreateImageFile()
+ LOCKS_EXCLUDED(Locks::mutator_lock_) {
+ CHECK(image_writer_ != nullptr);
+ if (!image_writer_->Write(image_filename_, oat_unstripped_, oat_location_)) {
+ LOG(ERROR) << "Failed to create image file " << image_filename_;
+ return false;
+ }
+ uintptr_t oat_data_begin = image_writer_->GetOatDataBegin();
+
+ // Destroy ImageWriter before doing FixupElf.
+ image_writer_.reset();
+
+ // Do not fix up the ELF file if we are --compile-pic
+ if (!compiler_options_->GetCompilePic()) {
+ std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_unstripped_.c_str()));
+ if (oat_file.get() == nullptr) {
+ PLOG(ERROR) << "Failed to open ELF file: " << oat_unstripped_;
+ return false;
+ }
+
+ if (!ElfWriter::Fixup(oat_file.get(), oat_data_begin)) {
+ oat_file->Erase();
+ LOG(ERROR) << "Failed to fixup ELF file " << oat_file->GetPath();
+ return false;
+ }
+
+ if (oat_file->FlushCloseOrErase()) {
+ PLOG(ERROR) << "Failed to flush and close fixed ELF file " << oat_file->GetPath();
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ // Reads the class names (java.lang.Object) and returns a set of descriptors (Ljava/lang/Object;)
+ static std::set<std::string>* ReadImageClassesFromFile(const char* image_classes_filename) {
+ std::unique_ptr<std::ifstream> image_classes_file(new std::ifstream(image_classes_filename,
+ std::ifstream::in));
+ if (image_classes_file.get() == nullptr) {
+ LOG(ERROR) << "Failed to open image classes file " << image_classes_filename;
+ return nullptr;
+ }
+ std::unique_ptr<std::set<std::string>> result(ReadImageClasses(*image_classes_file));
+ image_classes_file->close();
+ return result.release();
+ }
+
+ static std::set<std::string>* ReadImageClasses(std::istream& image_classes_stream) {
+ std::unique_ptr<std::set<std::string>> image_classes(new std::set<std::string>);
+ while (image_classes_stream.good()) {
+ std::string dot;
+ std::getline(image_classes_stream, dot);
+ if (StartsWith(dot, "#") || dot.empty()) {
+ continue;
+ }
+ std::string descriptor(DotToDescriptor(dot.c_str()));
+ image_classes->insert(descriptor);
+ }
+ return image_classes.release();
+ }
+
+ // Reads the class names (java.lang.Object) and returns a set of descriptors (Ljava/lang/Object;)
+ static std::set<std::string>* ReadImageClassesFromZip(const char* zip_filename,
+ const char* image_classes_filename,
+ std::string* error_msg) {
+ std::unique_ptr<ZipArchive> zip_archive(ZipArchive::Open(zip_filename, error_msg));
+ if (zip_archive.get() == nullptr) {
+ return nullptr;
+ }
+ std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(image_classes_filename, error_msg));
+ if (zip_entry.get() == nullptr) {
+ *error_msg = StringPrintf("Failed to find '%s' within '%s': %s", image_classes_filename,
+ zip_filename, error_msg->c_str());
+ return nullptr;
+ }
+ std::unique_ptr<MemMap> image_classes_file(zip_entry->ExtractToMemMap(zip_filename,
+ image_classes_filename,
+ error_msg));
+ if (image_classes_file.get() == nullptr) {
+ *error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", image_classes_filename,
+ zip_filename, error_msg->c_str());
+ return nullptr;
+ }
+ const std::string image_classes_string(reinterpret_cast<char*>(image_classes_file->Begin()),
+ image_classes_file->Size());
+ std::istringstream image_classes_stream(image_classes_string);
+ return ReadImageClasses(image_classes_stream);
+ }
+
+ void LogCompletionTime() const {
+ LOG(INFO) << "dex2oat took " << PrettyDuration(NanoTime() - start_ns_)
+ << " (threads: " << thread_count_ << ")";
+ }
+
+ std::unique_ptr<CompilerOptions> compiler_options_;
+ Compiler::Kind compiler_kind_;
+
+ InstructionSet instruction_set_;
+ std::unique_ptr<const InstructionSetFeatures> instruction_set_features_;
+
+ std::unique_ptr<SafeMap<std::string, std::string> > key_value_store_;
+
+ std::unique_ptr<VerificationResults> verification_results_;
+ DexFileToMethodInlinerMap method_inliner_map_;
+ std::unique_ptr<QuickCompilerCallbacks> callbacks_;
+
+ // Not a unique_ptr as we want to just exit on non-debug builds, not bringing the runtime down
+ // in an orderly fashion. The destructor takes care of deleting this.
+ Runtime* runtime_;
+
+ size_t thread_count_;
+ uint64_t start_ns_;
+ std::unique_ptr<WatchDog> watchdog_;
+ std::unique_ptr<File> oat_file_;
+ std::string oat_stripped_;
+ std::string oat_unstripped_;
+ std::string oat_location_;
+ std::string oat_filename_;
+ int oat_fd_;
+ std::string bitcode_filename_;
+ std::vector<const char*> dex_filenames_;
+ std::vector<const char*> dex_locations_;
+ int zip_fd_;
+ std::string zip_location_;
+ std::string boot_image_option_;
+ std::vector<const char*> runtime_args_;
+ std::string image_filename_;
+ uintptr_t image_base_;
+ const char* image_classes_zip_filename_;
+ const char* image_classes_filename_;
+ const char* compiled_classes_zip_filename_;
+ const char* compiled_classes_filename_;
+ std::unique_ptr<std::set<std::string>> image_classes_;
+ std::unique_ptr<std::set<std::string>> compiled_classes_;
+ bool image_;
+ std::unique_ptr<ImageWriter> image_writer_;
+ bool is_host_;
+ std::string android_root_;
+ std::vector<const DexFile*> dex_files_;
+ std::unique_ptr<CompilerDriver> driver_;
+ std::vector<std::string> verbose_methods_;
+ bool dump_stats_;
+ bool dump_passes_;
+ bool dump_timing_;
+ bool dump_slow_timing_;
+ std::string profile_file_; // Profile file to use
+ TimingLogger* timings_;
+ std::unique_ptr<CumulativeLogger> compiler_phases_timings_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Dex2Oat);
+};
+
+const unsigned int WatchDog::kWatchDogWarningSeconds;
+const unsigned int WatchDog::kWatchDogTimeoutSeconds;
+
+static void b13564922() {
+#if defined(__linux__) && defined(__arm__)
+ int major, minor;
+ struct utsname uts;
+ if (uname(&uts) != -1 &&
+ sscanf(uts.release, "%d.%d", &major, &minor) == 2 &&
+ ((major < 3) || ((major == 3) && (minor < 4)))) {
+ // Kernels before 3.4 don't handle the ASLR well and we can run out of address
+ // space (http://b/13564922). Work around the issue by inhibiting further mmap() randomization.
+ int old_personality = personality(0xffffffff);
+ if ((old_personality & ADDR_NO_RANDOMIZE) == 0) {
+ int new_personality = personality(old_personality | ADDR_NO_RANDOMIZE);
+ if (new_personality == -1) {
+ LOG(WARNING) << "personality(. | ADDR_NO_RANDOMIZE) failed.";
+ }
+ }
+ }
+#endif
+}
+
+static int CompileImage(Dex2Oat& dex2oat) {
+ dex2oat.Compile();
+
+ // Create the boot.oat.
+ if (!dex2oat.CreateOatFile()) {
+ return EXIT_FAILURE;
+ }
+
+ // Flush and close the boot.oat. We always expect the output file by name, and it will be
+ // re-opened from the unstripped name.
+ if (!dex2oat.FlushCloseOatFile()) {
+ return EXIT_FAILURE;
+ }
+
+ // Creates the boot.art and patches the boot.oat.
+ if (!dex2oat.HandleImage()) {
+ return EXIT_FAILURE;
+ }
+
+ // When given --host, finish early without stripping.
+ if (dex2oat.IsHost()) {
+ dex2oat.DumpTiming();
return EXIT_SUCCESS;
}
- // If we don't want to strip in place, copy from unstripped location to stripped location.
- // We need to strip after image creation because FixupElf needs to use .strtab.
- if (oat_unstripped != oat_stripped) {
- TimingLogger::ScopedTiming t("dex2oat OatFile copy", &timings);
- oat_file.reset();
- std::unique_ptr<File> in(OS::OpenFileForReading(oat_unstripped.c_str()));
- std::unique_ptr<File> out(OS::CreateEmptyFile(oat_stripped.c_str()));
- size_t buffer_size = 8192;
- std::unique_ptr<uint8_t> buffer(new uint8_t[buffer_size]);
- while (true) {
- int bytes_read = TEMP_FAILURE_RETRY(read(in->Fd(), buffer.get(), buffer_size));
- if (bytes_read <= 0) {
- break;
- }
- bool write_ok = out->WriteFully(buffer.get(), bytes_read);
- CHECK(write_ok);
- }
- oat_file.reset(out.release());
- VLOG(compiler) << "Oat file copied successfully (stripped): " << oat_stripped;
+ // Copy unstripped to stripped location, if necessary.
+ if (!dex2oat.CopyUnstrippedToStripped()) {
+ return EXIT_FAILURE;
}
-#if ART_USE_PORTABLE_COMPILER // We currently only generate symbols on Portable
- if (!compiler_options.GetIncludeDebugSymbols()) {
- timings.NewSplit("dex2oat ElfStripper");
- // Strip unneeded sections for target
- off_t seek_actual = lseek(oat_file->Fd(), 0, SEEK_SET);
- CHECK_EQ(0, seek_actual);
- std::string error_msg;
- CHECK(ElfStripper::Strip(oat_file.get(), &error_msg)) << error_msg;
-
-
- // We wrote the oat file successfully, and want to keep it.
- VLOG(compiler) << "Oat file written successfully (stripped): " << oat_location;
- } else {
- VLOG(compiler) << "Oat file written successfully without stripping: " << oat_location;
- }
-#endif // ART_USE_PORTABLE_COMPILER
-
- timings.EndTiming();
-
- if (dump_timing || (dump_slow_timing && timings.GetTotalNs() > MsToNs(1000))) {
- LOG(INFO) << Dumpable<TimingLogger>(timings);
- }
- if (dump_passes) {
- LOG(INFO) << Dumpable<CumulativeLogger>(compiler_phases_timings);
+ // Strip, if necessary.
+ if (!dex2oat.Strip()) {
+ return EXIT_FAILURE;
}
- // Everything was successfully written, do an explicit exit here to avoid running Runtime
- // destructors that take time (bug 10645725) unless we're a debug build or running on valgrind.
- if (!kIsDebugBuild && (RUNNING_ON_VALGRIND == 0)) {
- dex2oat->LogCompletionTime();
- exit(EXIT_SUCCESS);
+ // FlushClose again, as stripping might have re-opened the oat file.
+ if (!dex2oat.FlushCloseOatFile()) {
+ return EXIT_FAILURE;
}
+ dex2oat.DumpTiming();
return EXIT_SUCCESS;
-} // NOLINT(readability/fn_size)
+}
+
+static int CompileApp(Dex2Oat& dex2oat) {
+ dex2oat.Compile();
+
+ // Create the app oat.
+ if (!dex2oat.CreateOatFile()) {
+ return EXIT_FAILURE;
+ }
+
+ // Do not close the oat file here. We might haven gotten the output file by file descriptor,
+ // which we would lose.
+ if (!dex2oat.FlushOatFile()) {
+ return EXIT_FAILURE;
+ }
+
+ // When given --host, finish early without stripping.
+ if (dex2oat.IsHost()) {
+ if (!dex2oat.FlushCloseOatFile()) {
+ return EXIT_FAILURE;
+ }
+
+ dex2oat.DumpTiming();
+ return EXIT_SUCCESS;
+ }
+
+ // Copy unstripped to stripped location, if necessary. This will implicitly flush & close the
+ // unstripped version. If this is given, we expect to be able to open writable files by name.
+ if (!dex2oat.CopyUnstrippedToStripped()) {
+ return EXIT_FAILURE;
+ }
+
+ // Strip, if necessary.
+ if (!dex2oat.Strip()) {
+ return EXIT_FAILURE;
+ }
+
+ // Flush and close the file.
+ if (!dex2oat.FlushCloseOatFile()) {
+ return EXIT_FAILURE;
+ }
+
+ dex2oat.DumpTiming();
+ return EXIT_SUCCESS;
+}
+
+static int dex2oat(int argc, char** argv) {
+ b13564922();
+
+ TimingLogger timings("compiler", false, false);
+
+ Dex2Oat dex2oat(&timings);
+
+ // Parse arguments. Argument mistakes will lead to exit(EXIT_FAILURE) in UsageError.
+ dex2oat.ParseArgs(argc, argv);
+
+ // Check early that the result of compilation can be written
+ if (!dex2oat.OpenFile()) {
+ return EXIT_FAILURE;
+ }
+
+ LOG(INFO) << CommandLine();
+
+ if (!dex2oat.Setup()) {
+ return EXIT_FAILURE;
+ }
+
+ if (dex2oat.IsImage()) {
+ return CompileImage(dex2oat);
+ } else {
+ return CompileApp(dex2oat);
+ }
+}
} // namespace art
int main(int argc, char** argv) {
- return art::dex2oat(argc, argv);
+ int result = art::dex2oat(argc, argv);
+ // Everything was done, do an explicit exit here to avoid running Runtime destructors that take
+ // time (bug 10645725) unless we're a debug build or running on valgrind. Note: The Dex2Oat class
+ // should not destruct the runtime in this case.
+ if (!art::kIsDebugBuild && (RUNNING_ON_VALGRIND == 0)) {
+ exit(result);
+ }
+ return result;
}
diff --git a/disassembler/Android.mk b/disassembler/Android.mk
index eb3b024..f2dd1ee 100644
--- a/disassembler/Android.mk
+++ b/disassembler/Android.mk
@@ -84,11 +84,11 @@
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
include external/libcxx/libcxx.mk
+ # For disassembler_arm64.
+ LOCAL_SHARED_LIBRARIES += libvixl
ifeq ($$(art_target_or_host),target)
- LOCAL_SHARED_LIBRARIES += libcutils libvixl
include $(BUILD_SHARED_LIBRARY)
else # host
- LOCAL_STATIC_LIBRARIES += libcutils libvixl
include $(BUILD_HOST_SHARED_LIBRARY)
endif
endef
diff --git a/disassembler/disassembler.cc b/disassembler/disassembler.cc
index c97bf64..bf68204 100644
--- a/disassembler/disassembler.cc
+++ b/disassembler/disassembler.cc
@@ -16,7 +16,7 @@
#include "disassembler.h"
-#include <iostream>
+#include <ostream>
#include "base/logging.h"
#include "base/stringprintf.h"
diff --git a/disassembler/disassembler.h b/disassembler/disassembler.h
index 487f433..966ee3a 100644
--- a/disassembler/disassembler.h
+++ b/disassembler/disassembler.h
@@ -21,8 +21,8 @@
#include <iosfwd>
+#include "arch/instruction_set.h"
#include "base/macros.h"
-#include "instruction_set.h"
namespace art {
@@ -34,8 +34,14 @@
// Base addess for calculating relative code offsets when absolute_addresses_ is false.
const uint8_t* const base_address_;
- DisassemblerOptions(bool absolute_addresses, const uint8_t* base_address)
- : absolute_addresses_(absolute_addresses), base_address_(base_address) {}
+ // If set, the disassembler is allowed to look at load targets in literal
+ // pools.
+ const bool can_read_literals_;
+
+ DisassemblerOptions(bool absolute_addresses, const uint8_t* base_address,
+ bool can_read_literals)
+ : absolute_addresses_(absolute_addresses), base_address_(base_address),
+ can_read_literals_(can_read_literals) {}
private:
DISALLOW_COPY_AND_ASSIGN(DisassemblerOptions);
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index ac883fe..9243b1a 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -18,7 +18,8 @@
#include <inttypes.h>
-#include <iostream>
+#include <ostream>
+#include <sstream>
#include "base/logging.h"
#include "base/stringprintf.h"
@@ -124,8 +125,10 @@
};
struct ArmRegister {
- explicit ArmRegister(uint32_t r) : r(r) { CHECK_LE(r, 15U); }
- ArmRegister(uint32_t instruction, uint32_t at_bit) : r((instruction >> at_bit) & 0xf) { CHECK_LE(r, 15U); }
+ explicit ArmRegister(uint32_t r_in) : r(r_in) { CHECK_LE(r_in, 15U); }
+ ArmRegister(uint32_t instruction, uint32_t at_bit) : r((instruction >> at_bit) & 0xf) {
+ CHECK_LE(r, 15U);
+ }
uint32_t r;
};
std::ostream& operator<<(std::ostream& os, const ArmRegister& r) {
@@ -389,7 +392,7 @@
return (bit_a << 31) | ((1 << 30) - (bit_b << 25)) | (slice << 19);
}
-uint64_t VFPExpand64(uint32_t imm8) {
+static uint64_t VFPExpand64(uint32_t imm8) {
CHECK_EQ(imm8 & 0xffu, imm8);
uint64_t bit_a = (imm8 >> 7) & 1;
uint64_t bit_b = (imm8 >> 6) & 1;
@@ -397,45 +400,6 @@
return (bit_a << 31) | ((UINT64_C(1) << 62) - (bit_b << 54)) | (slice << 48);
}
-uint64_t AdvSIMDExpand(uint32_t op, uint32_t cmode, uint32_t imm8) {
- CHECK_EQ(op & 1, op);
- CHECK_EQ(cmode & 0xf, cmode);
- CHECK_EQ(imm8 & 0xff, imm8);
- int32_t cmode321 = cmode >> 1;
- if (imm8 == 0 && cmode321 != 0 && cmode321 != 4 && cmode321 != 7) {
- return INT64_C(0x00000000deadbeef); // UNPREDICTABLE
- }
- uint64_t imm = imm8;
- switch (cmode321) {
- case 3: imm <<= 8; FALLTHROUGH_INTENDED;
- case 2: imm <<= 8; FALLTHROUGH_INTENDED;
- case 1: imm <<= 8; FALLTHROUGH_INTENDED;
- case 0: return static_cast<int64_t>((imm << 32) | imm);
- case 5: imm <<= 8; FALLTHROUGH_INTENDED;
- case 4: return static_cast<int64_t>((imm << 48) | (imm << 32) | (imm << 16) | imm);
- case 6:
- imm = ((imm + 1u) << ((cmode & 1) != 0 ? 16 : 8)) - 1u; // Add 8 or 16 ones.
- return static_cast<int64_t>((imm << 32) | imm);
- default:
- CHECK_EQ(cmode321, 7);
- if ((cmode & 1) == 0 && op == 0) {
- imm = (imm << 8) | imm;
- return static_cast<int64_t>((imm << 48) | (imm << 32) | (imm << 16) | imm);
- } else if ((cmode & 1) == 0 && op != 0) {
- for (int i = 1; i != 8; ++i) {
- imm |= ((imm >> i) & UINT64_C(1)) << (i * 8);
- }
- imm = imm & ~UINT64_C(0xfe);
- return static_cast<int64_t>((imm << 8) - imm);
- } else if ((cmode & 1) != 0 && op == 0) {
- imm = static_cast<uint32_t>(VFPExpand32(imm8));
- return static_cast<int64_t>((imm << 32) | imm);
- } else {
- return INT64_C(0xdeadbeef00000000); // UNDEFINED
- }
- }
-}
-
size_t DisassemblerArm::DumpThumb32(std::ostream& os, const uint8_t* instr_ptr) {
uint32_t instr = (ReadU16(instr_ptr) << 16) | ReadU16(instr_ptr + 2);
// |111|1 1|1000000|0000|1111110000000000|
@@ -1358,8 +1322,6 @@
}
} else {
// STR Rt, [Rn, Rm, LSL #imm2] - 111 11 000 010 0 nnnn tttt 000000iimmmm
- ArmRegister Rn(instr, 16);
- ArmRegister Rt(instr, 12);
ArmRegister Rm(instr, 0);
uint32_t imm2 = (instr >> 4) & 3;
opcode << "str.w";
diff --git a/disassembler/disassembler_arm64.cc b/disassembler/disassembler_arm64.cc
index fc1065a..fe50421 100644
--- a/disassembler/disassembler_arm64.cc
+++ b/disassembler/disassembler_arm64.cc
@@ -18,7 +18,7 @@
#include <inttypes.h>
-#include <iostream>
+#include <ostream>
#include "base/logging.h"
#include "base/stringprintf.h"
@@ -27,10 +27,88 @@
namespace art {
namespace arm64 {
+void CustomDisassembler::AppendRegisterNameToOutput(
+ const vixl::Instruction* instr,
+ const vixl::CPURegister& reg) {
+ USE(instr);
+ if (reg.IsRegister()) {
+ // This enumeration should mirror the declarations in
+ // runtime/arch/arm64/registers_arm64.h. We do not include that file to
+ // avoid a dependency on libart.
+ enum {
+ TR = 18,
+ ETR = 21,
+ IP0 = 16,
+ IP1 = 17,
+ FP = 29,
+ LR = 30
+ };
+ switch (reg.code()) {
+ case IP0: AppendToOutput(reg.Is64Bits() ? "ip0" : "wip0"); return;
+ case IP1: AppendToOutput(reg.Is64Bits() ? "ip1" : "wip1"); return;
+ case TR: AppendToOutput(reg.Is64Bits() ? "tr" : "w18"); return;
+ case ETR: AppendToOutput(reg.Is64Bits() ? "etr" : "w21"); return;
+ case FP: AppendToOutput(reg.Is64Bits() ? "fp" : "w29"); return;
+ case LR: AppendToOutput(reg.Is64Bits() ? "lr" : "w30"); return;
+ default:
+ // Fall through.
+ break;
+ }
+ }
+ // Print other register names as usual.
+ Disassembler::AppendRegisterNameToOutput(instr, reg);
+}
+
+void CustomDisassembler::VisitLoadLiteral(const vixl::Instruction* instr) {
+ Disassembler::VisitLoadLiteral(instr);
+
+ if (!read_literals_) {
+ return;
+ }
+
+ char* buffer = buffer_;
+ char* buffer_end = buffer_ + buffer_size_;
+
+ // Find the end position in the buffer.
+ while ((*buffer != 0) && (buffer < buffer_end)) {
+ ++buffer;
+ }
+
+ void* data_address = instr->LiteralAddress();
+ ptrdiff_t buf_size_remaining = buffer_end - buffer;
+ vixl::Instr op = instr->Mask(vixl::LoadLiteralMask);
+
+ switch (op) {
+ case vixl::LDR_w_lit:
+ case vixl::LDR_x_lit:
+ case vixl::LDRSW_x_lit: {
+ int64_t data = op == vixl::LDR_x_lit ? *reinterpret_cast<int64_t*>(data_address)
+ : *reinterpret_cast<int32_t*>(data_address);
+ snprintf(buffer, buf_size_remaining, " (0x%" PRIx64 " / %" PRId64 ")", data, data);
+ break;
+ }
+ case vixl::LDR_s_lit:
+ case vixl::LDR_d_lit: {
+ double data = (op == vixl::LDR_s_lit) ? *reinterpret_cast<float*>(data_address)
+ : *reinterpret_cast<double*>(data_address);
+ snprintf(buffer, buf_size_remaining, " (%g)", data);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
size_t DisassemblerArm64::Dump(std::ostream& os, const uint8_t* begin) {
const vixl::Instruction* instr = reinterpret_cast<const vixl::Instruction*>(begin);
decoder.Decode(instr);
- os << FormatInstructionPointer(begin)
+ // TODO: Use FormatInstructionPointer() once VIXL provides the appropriate
+ // features.
+ // VIXL does not yet allow remapping addresses disassembled. Using
+ // FormatInstructionPointer() would show incoherences between the instruction
+ // location addresses and the target addresses disassembled by VIXL (eg. for
+ // branch instructions).
+ os << StringPrintf("%p", instr)
<< StringPrintf(": %08x\t%s\n", instr->InstructionBits(), disasm.GetOutput());
return vixl::kInstructionSize;
}
diff --git a/disassembler/disassembler_arm64.h b/disassembler/disassembler_arm64.h
index ad20c70..a370b8d 100644
--- a/disassembler/disassembler_arm64.h
+++ b/disassembler/disassembler_arm64.h
@@ -19,15 +19,44 @@
#include "disassembler.h"
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
#include "a64/decoder-a64.h"
#include "a64/disasm-a64.h"
+#pragma GCC diagnostic pop
namespace art {
namespace arm64 {
+class CustomDisassembler FINAL : public vixl::Disassembler {
+ public:
+ explicit CustomDisassembler(bool read_literals) :
+ vixl::Disassembler(), read_literals_(read_literals) {}
+
+ // Use register aliases in the disassembly.
+ virtual void AppendRegisterNameToOutput(const vixl::Instruction* instr,
+ const vixl::CPURegister& reg) OVERRIDE;
+
+ // Improve the disassembly of literal load instructions.
+ virtual void VisitLoadLiteral(const vixl::Instruction* instr) OVERRIDE;
+
+ private:
+ // Indicate if the disassembler should read data loaded from literal pools.
+ // This should only be enabled if reading the target of literal loads is safe.
+ // Here are possible outputs when the option is on or off:
+ // read_literals_ | disassembly
+ // true | 0x72681558: 1c000acb ldr s11, pc+344 (addr 0x726816b0)
+ // false | 0x72681558: 1c000acb ldr s11, pc+344 (addr 0x726816b0) (3.40282e+38)
+ const bool read_literals_;
+};
+
class DisassemblerArm64 FINAL : public Disassembler {
public:
- explicit DisassemblerArm64(DisassemblerOptions* options) : Disassembler(options) {
+ // TODO: Update this code once VIXL provides the ability to map code addresses
+ // to disassemble as a different address (the way FormatInstructionPointer()
+ // does).
+ explicit DisassemblerArm64(DisassemblerOptions* options) :
+ Disassembler(options), disasm(options->can_read_literals_) {
decoder.AppendVisitor(&disasm);
}
@@ -36,7 +65,7 @@
private:
vixl::Decoder decoder;
- vixl::Disassembler disasm;
+ CustomDisassembler disasm;
DISALLOW_COPY_AND_ASSIGN(DisassemblerArm64);
};
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index bd5fac7..97c06f1 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -16,7 +16,8 @@
#include "disassembler_mips.h"
-#include <iostream>
+#include <ostream>
+#include <sstream>
#include "base/logging.h"
#include "base/stringprintf.h"
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index 63a74c7..b3af8a6 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -16,12 +16,14 @@
#include "disassembler_x86.h"
-#include <iostream>
+#include <inttypes.h>
+
+#include <ostream>
+#include <sstream>
#include "base/logging.h"
#include "base/stringprintf.h"
#include "thread.h"
-#include <inttypes.h>
namespace art {
namespace x86 {
@@ -157,7 +159,6 @@
const uint8_t* begin_instr = instr;
bool have_prefixes = true;
uint8_t prefix[4] = {0, 0, 0, 0};
- const char** modrm_opcodes = NULL;
do {
switch (*instr) {
// Group 1 - lock and repeat prefixes:
@@ -195,6 +196,7 @@
if (rex != 0) {
instr++;
}
+ const char** modrm_opcodes = nullptr;
bool has_modrm = false;
bool reg_is_opcode = false;
size_t immediate_bytes = 0;
@@ -203,7 +205,9 @@
bool store = false; // stores to memory (ie rm is on the left)
bool load = false; // loads from memory (ie rm is on the right)
bool byte_operand = false; // true when the opcode is dealing with byte operands
- bool byte_second_operand = false; // true when the source operand is a byte register but the target register isn't (ie movsxb/movzxb).
+ // true when the source operand is a byte register but the target register isn't
+ // (ie movsxb/movzxb).
+ bool byte_second_operand = false;
bool target_specific = false; // register name depends on target (64 vs 32 bits).
bool ax = false; // implicit use of ax
bool cx = false; // implicit use of cx
@@ -506,7 +510,7 @@
case 0x5D: opcode << "min"; break;
case 0x5E: opcode << "div"; break;
case 0x5F: opcode << "max"; break;
- default: LOG(FATAL) << "Unreachable";
+ default: LOG(FATAL) << "Unreachable"; UNREACHABLE();
}
if (prefix[2] == 0x66) {
opcode << "pd";
@@ -627,7 +631,9 @@
} else {
dst_reg_file = MMX;
}
- static const char* x71_opcodes[] = {"unknown-71", "unknown-71", "psrlw", "unknown-71", "psraw", "unknown-71", "psllw", "unknown-71"};
+ static const char* x71_opcodes[] = {
+ "unknown-71", "unknown-71", "psrlw", "unknown-71",
+ "psraw", "unknown-71", "psllw", "unknown-71"};
modrm_opcodes = x71_opcodes;
reg_is_opcode = true;
has_modrm = true;
@@ -641,7 +647,9 @@
} else {
dst_reg_file = MMX;
}
- static const char* x72_opcodes[] = {"unknown-72", "unknown-72", "psrld", "unknown-72", "psrad", "unknown-72", "pslld", "unknown-72"};
+ static const char* x72_opcodes[] = {
+ "unknown-72", "unknown-72", "psrld", "unknown-72",
+ "psrad", "unknown-72", "pslld", "unknown-72"};
modrm_opcodes = x72_opcodes;
reg_is_opcode = true;
has_modrm = true;
@@ -655,7 +663,9 @@
} else {
dst_reg_file = MMX;
}
- static const char* x73_opcodes[] = {"unknown-73", "unknown-73", "psrlq", "psrldq", "unknown-73", "unknown-73", "psllq", "unknown-73"};
+ static const char* x73_opcodes[] = {
+ "unknown-73", "unknown-73", "psrlq", "psrldq",
+ "unknown-73", "unknown-73", "psllq", "unknown-73"};
modrm_opcodes = x73_opcodes;
reg_is_opcode = true;
has_modrm = true;
@@ -696,7 +706,7 @@
case 0x90: case 0x91: case 0x92: case 0x93: case 0x94: case 0x95: case 0x96: case 0x97:
case 0x98: case 0x99: case 0x9A: case 0x9B: case 0x9C: case 0x9D: case 0x9E: case 0x9F:
opcode << "set" << condition_codes[*instr & 0xF];
- modrm_opcodes = NULL;
+ modrm_opcodes = nullptr;
reg_is_opcode = true;
has_modrm = true;
store = true;
@@ -728,7 +738,9 @@
case 0xAE:
if (prefix[0] == 0xF3) {
prefix[0] = 0; // clear prefix now it's served its purpose as part of the opcode
- static const char* xAE_opcodes[] = {"rdfsbase", "rdgsbase", "wrfsbase", "wrgsbase", "unknown-AE", "unknown-AE", "unknown-AE", "unknown-AE"};
+ static const char* xAE_opcodes[] = {
+ "rdfsbase", "rdgsbase", "wrfsbase", "wrgsbase",
+ "unknown-AE", "unknown-AE", "unknown-AE", "unknown-AE"};
modrm_opcodes = xAE_opcodes;
reg_is_opcode = true;
has_modrm = true;
@@ -755,7 +767,9 @@
break;
}
} else {
- static const char* xAE_opcodes[] = {"unknown-AE", "unknown-AE", "unknown-AE", "unknown-AE", "unknown-AE", "lfence", "mfence", "sfence"};
+ static const char* xAE_opcodes[] = {
+ "unknown-AE", "unknown-AE", "unknown-AE", "unknown-AE",
+ "unknown-AE", "lfence", "mfence", "sfence"};
modrm_opcodes = xAE_opcodes;
reg_is_opcode = true;
has_modrm = true;
@@ -763,13 +777,44 @@
no_ops = true;
}
break;
- case 0xAF: opcode << "imul"; has_modrm = true; load = true; break;
- case 0xB1: opcode << "cmpxchg"; has_modrm = true; store = true; break;
- case 0xB6: opcode << "movzxb"; has_modrm = true; load = true; byte_second_operand = true; break;
- case 0xB7: opcode << "movzxw"; has_modrm = true; load = true; break;
- case 0xBE: opcode << "movsxb"; has_modrm = true; load = true; byte_second_operand = true; rex |= (rex == 0 ? 0 : REX_W); break;
- case 0xBF: opcode << "movsxw"; has_modrm = true; load = true; break;
- case 0xC3: opcode << "movnti"; store = true; has_modrm = true; break;
+ case 0xAF:
+ opcode << "imul";
+ has_modrm = true;
+ load = true;
+ break;
+ case 0xB1:
+ opcode << "cmpxchg";
+ has_modrm = true;
+ store = true;
+ break;
+ case 0xB6:
+ opcode << "movzxb";
+ has_modrm = true;
+ load = true;
+ byte_second_operand = true;
+ break;
+ case 0xB7:
+ opcode << "movzxw";
+ has_modrm = true;
+ load = true;
+ break;
+ case 0xBE:
+ opcode << "movsxb";
+ has_modrm = true;
+ load = true;
+ byte_second_operand = true;
+ rex |= (rex == 0 ? 0 : REX_W);
+ break;
+ case 0xBF:
+ opcode << "movsxw";
+ has_modrm = true;
+ load = true;
+ break;
+ case 0xC3:
+ opcode << "movnti";
+ store = true;
+ has_modrm = true;
+ break;
case 0xC5:
if (prefix[2] == 0x66) {
opcode << "pextrw";
@@ -795,7 +840,9 @@
immediate_bytes = 1;
break;
case 0xC7:
- static const char* x0FxC7_opcodes[] = { "unknown-0f-c7", "cmpxchg8b", "unknown-0f-c7", "unknown-0f-c7", "unknown-0f-c7", "unknown-0f-c7", "unknown-0f-c7", "unknown-0f-c7" };
+ static const char* x0FxC7_opcodes[] = {
+ "unknown-0f-c7", "cmpxchg8b", "unknown-0f-c7", "unknown-0f-c7",
+ "unknown-0f-c7", "unknown-0f-c7", "unknown-0f-c7", "unknown-0f-c7"};
modrm_opcodes = x0FxC7_opcodes;
has_modrm = true;
reg_is_opcode = true;
@@ -971,7 +1018,9 @@
break;
case 0xC3: opcode << "ret"; break;
case 0xC6:
- static const char* c6_opcodes[] = {"mov", "unknown-c6", "unknown-c6", "unknown-c6", "unknown-c6", "unknown-c6", "unknown-c6", "unknown-c6"};
+ static const char* c6_opcodes[] = {"mov", "unknown-c6", "unknown-c6",
+ "unknown-c6", "unknown-c6", "unknown-c6",
+ "unknown-c6", "unknown-c6"};
modrm_opcodes = c6_opcodes;
store = true;
immediate_bytes = 1;
@@ -980,7 +1029,9 @@
byte_operand = true;
break;
case 0xC7:
- static const char* c7_opcodes[] = {"mov", "unknown-c7", "unknown-c7", "unknown-c7", "unknown-c7", "unknown-c7", "unknown-c7", "unknown-c7"};
+ static const char* c7_opcodes[] = {"mov", "unknown-c7", "unknown-c7",
+ "unknown-c7", "unknown-c7", "unknown-c7",
+ "unknown-c7", "unknown-c7"};
modrm_opcodes = c7_opcodes;
store = true;
immediate_bytes = 4;
@@ -1010,21 +1061,27 @@
}
break;
case 0xDB:
- static const char* db_opcodes[] = {"fildl", "unknown-db", "unknown-db", "unknown-db", "unknown-db", "unknown-db", "unknown-db", "unknown-db"};
+ static const char* db_opcodes[] = {"fildl", "unknown-db", "unknown-db",
+ "unknown-db", "unknown-db", "unknown-db",
+ "unknown-db", "unknown-db"};
modrm_opcodes = db_opcodes;
load = true;
has_modrm = true;
reg_is_opcode = true;
break;
case 0xDD:
- static const char* dd_opcodes[] = {"fldl", "fisttp", "fstl", "fstpl", "frstor", "unknown-dd", "fnsave", "fnstsw"};
+ static const char* dd_opcodes[] = {"fldl", "fisttp", "fstl",
+ "fstpl", "frstor", "unknown-dd",
+ "fnsave", "fnstsw"};
modrm_opcodes = dd_opcodes;
store = true;
has_modrm = true;
reg_is_opcode = true;
break;
case 0xDF:
- static const char* df_opcodes[] = {"fild", "unknown-df", "unknown-df", "unknown-df", "unknown-df", "fildll", "unknown-df", "unknown-df"};
+ static const char* df_opcodes[] = {"fild", "unknown-df", "unknown-df",
+ "unknown-df", "unknown-df", "fildll",
+ "unknown-df", "unknown-df"};
modrm_opcodes = df_opcodes;
load = true;
has_modrm = true;
@@ -1036,7 +1093,10 @@
case 0xEB: opcode << "jmp"; branch_bytes = 1; break;
case 0xF5: opcode << "cmc"; break;
case 0xF6: case 0xF7:
- static const char* f7_opcodes[] = {"test", "unknown-f7", "not", "neg", "mul edx:eax, eax *", "imul edx:eax, eax *", "div edx:eax, edx:eax /", "idiv edx:eax, edx:eax /"};
+ static const char* f7_opcodes[] = {
+ "test", "unknown-f7", "not", "neg", "mul edx:eax, eax *",
+ "imul edx:eax, eax *", "div edx:eax, edx:eax /",
+ "idiv edx:eax, edx:eax /"};
modrm_opcodes = f7_opcodes;
has_modrm = true;
reg_is_opcode = true;
@@ -1045,7 +1105,9 @@
break;
case 0xFF:
{
- static const char* ff_opcodes[] = {"inc", "dec", "call", "call", "jmp", "jmp", "push", "unknown-ff"};
+ static const char* ff_opcodes[] = {
+ "inc", "dec", "call", "call",
+ "jmp", "jmp", "push", "unknown-ff"};
modrm_opcodes = ff_opcodes;
has_modrm = true;
reg_is_opcode = true;
@@ -1146,7 +1208,7 @@
}
}
- if (reg_is_opcode && modrm_opcodes != NULL) {
+ if (reg_is_opcode && modrm_opcodes != nullptr) {
opcode << modrm_opcodes[reg_or_opcode];
}
@@ -1237,7 +1299,7 @@
case 0xF2: prefixed_opcode << "repne "; break;
case 0xF3: prefixed_opcode << "repe "; break;
case 0: break;
- default: LOG(FATAL) << "Unreachable";
+ default: LOG(FATAL) << "Unreachable"; UNREACHABLE();
}
prefixed_opcode << opcode.str();
os << FormatInstructionPointer(begin_instr)
diff --git a/oatdump/Android.mk b/oatdump/Android.mk
index 25d10bd..b8a3b49 100644
--- a/oatdump/Android.mk
+++ b/oatdump/Android.mk
@@ -25,14 +25,14 @@
$(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libcutils libart-disassembler libart-compiler,art/disassembler art/compiler,target,ndebug))
endif
ifeq ($(ART_BUILD_TARGET_DEBUG),true)
- $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libcutils libartd-disassembler libart-compiler,art/disassembler art/compiler,target,debug))
+ $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libcutils libartd-disassembler libartd-compiler,art/disassembler art/compiler,target,debug))
endif
ifeq ($(ART_BUILD_HOST_NDEBUG),true)
$(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libart-disassembler libart-compiler,art/disassembler art/compiler,host,ndebug))
endif
ifeq ($(ART_BUILD_HOST_DEBUG),true)
- $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libartd-disassembler libart-compiler,art/disassembler art/compiler,host,debug))
+ $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libartd-disassembler libartd-compiler,art/disassembler art/compiler,host,debug))
endif
########################################################################
@@ -53,14 +53,14 @@
.PHONY: dump-oat-core-host
ifeq ($(ART_BUILD_HOST),true)
-dump-oat-core-host: $(HOST_CORE_IMG_OUT) $(OATDUMP)
+dump-oat-core-host: $(HOST_CORE_IMG_OUTS) $(OATDUMP)
$(OATDUMP) --image=$(HOST_CORE_IMG_LOCATION) --output=$(ART_DUMP_OAT_PATH)/core.host.oatdump.txt
@echo Output in $(ART_DUMP_OAT_PATH)/core.host.oatdump.txt
endif
.PHONY: dump-oat-core-target
ifeq ($(ART_BUILD_TARGET),true)
-dump-oat-core-target: $(TARGET_CORE_IMG_OUT) $(OATDUMP)
+dump-oat-core-target: $(TARGET_CORE_IMAGE_default_no-pic_32) $(OATDUMP)
$(OATDUMP) --image=$(TARGET_CORE_IMG_LOCATION) \
--output=$(ART_DUMP_OAT_PATH)/core.target.oatdump.txt --instruction-set=$(TARGET_ARCH)
@echo Output in $(ART_DUMP_OAT_PATH)/core.target.oatdump.txt
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 14accac..08352de 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -23,6 +23,7 @@
#include <unordered_map>
#include <vector>
+#include "arch/instruction_set_features.h"
#include "base/stringpiece.h"
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
@@ -107,11 +108,16 @@
" --no-disassemble may be used to disable disassembly.\n"
" Example: --no-disassemble\n"
"\n");
+ fprintf(stderr,
+ " --method-filter=<method name>: only dumps methods that contain the filter.\n"
+ " Example: --method-filter=foo\n"
+ "\n");
}
const char* image_roots_descriptions_[] = {
"kResolutionMethod",
"kImtConflictMethod",
+ "kImtUnimplementedMethod",
"kDefaultImt",
"kCalleeSaveMethod",
"kRefsOnlySaveMethod",
@@ -176,8 +182,8 @@
bool result = builder_->Write();
- elf_output_->Flush();
- elf_output_->Close();
+ // Ignore I/O errors.
+ UNUSED(elf_output_->FlushClose());
return result;
}
@@ -262,10 +268,13 @@
method_access_flags);
}
- void RegisterForDedup(const DexFile::ClassDef& class_def, uint32_t class_method_index,
- const OatFile::OatMethod& oat_method, const DexFile& dex_file,
- uint32_t dex_method_idx, const DexFile::CodeItem* code_item,
- uint32_t method_access_flags) {
+ void RegisterForDedup(const DexFile::ClassDef& class_def ATTRIBUTE_UNUSED,
+ uint32_t class_method_index ATTRIBUTE_UNUSED,
+ const OatFile::OatMethod& oat_method,
+ const DexFile& dex_file ATTRIBUTE_UNUSED,
+ uint32_t dex_method_idx ATTRIBUTE_UNUSED,
+ const DexFile::CodeItem* code_item ATTRIBUTE_UNUSED,
+ uint32_t method_access_flags ATTRIBUTE_UNUSED) {
state_[oat_method.GetCodeOffset()]++;
}
@@ -293,10 +302,13 @@
return DedupState::kDeduplicatedFirst;
}
- void AddSymbol(const DexFile::ClassDef& class_def, uint32_t class_method_index,
- const OatFile::OatMethod& oat_method, const DexFile& dex_file,
- uint32_t dex_method_idx, const DexFile::CodeItem* code_item,
- uint32_t method_access_flags) {
+ void AddSymbol(const DexFile::ClassDef& class_def ATTRIBUTE_UNUSED,
+ uint32_t class_method_index ATTRIBUTE_UNUSED,
+ const OatFile::OatMethod& oat_method,
+ const DexFile& dex_file,
+ uint32_t dex_method_idx,
+ const DexFile::CodeItem* code_item ATTRIBUTE_UNUSED,
+ uint32_t method_access_flags ATTRIBUTE_UNUSED) {
DedupState dedup = IsDuplicated(oat_method.GetCodeOffset());
if (dedup != DedupState::kDeduplicatedOther) {
std::string pretty_name = PrettyMethod(dex_method_idx, dex_file, true);
@@ -315,7 +327,7 @@
}
// Set oat data offset. Required by ElfBuilder/CodeOutput.
- void SetCodeOffset(size_t offset) {
+ void SetCodeOffset(size_t offset ATTRIBUTE_UNUSED) {
// Nothing to do.
}
@@ -349,12 +361,14 @@
bool dump_vmap,
bool disassemble_code,
bool absolute_addresses,
+ const char* method_filter,
Handle<mirror::ClassLoader>* class_loader)
: dump_raw_mapping_table_(dump_raw_mapping_table),
dump_raw_gc_map_(dump_raw_gc_map),
dump_vmap_(dump_vmap),
disassemble_code_(disassemble_code),
absolute_addresses_(absolute_addresses),
+ method_filter_(method_filter),
class_loader_(class_loader) {}
const bool dump_raw_mapping_table_;
@@ -362,6 +376,7 @@
const bool dump_vmap_;
const bool disassemble_code_;
const bool absolute_addresses_;
+ const char* const method_filter_;
Handle<mirror::ClassLoader>* class_loader_;
};
@@ -371,9 +386,11 @@
: oat_file_(oat_file),
oat_dex_files_(oat_file.GetOatDexFiles()),
options_(options),
- disassembler_(Disassembler::Create(oat_file_.GetOatHeader().GetInstructionSet(),
+ instruction_set_(oat_file_.GetOatHeader().GetInstructionSet()),
+ disassembler_(Disassembler::Create(instruction_set_,
new DisassemblerOptions(options_->absolute_addresses_,
- oat_file.Begin()))) {
+ oat_file.Begin(),
+ true /* can_read_litals_ */))) {
CHECK(options_->class_loader_ != nullptr);
AddAllOffsets();
}
@@ -383,6 +400,10 @@
delete disassembler_;
}
+ InstructionSet GetInstructionSet() {
+ return instruction_set_;
+ }
+
bool Dump(std::ostream& os) {
bool success = true;
const OatHeader& oat_header = oat_file_.GetOatHeader();
@@ -499,7 +520,7 @@
return end_offset - begin_offset;
}
- InstructionSet GetInstructionSet() {
+ InstructionSet GetOatInstructionSet() {
return oat_file_.GetOatHeader().GetInstructionSet();
}
@@ -513,8 +534,9 @@
LOG(WARNING) << "Failed to open dex file '" << oat_dex_file->GetDexFileLocation()
<< "': " << error_msg;
} else {
+ const char* descriptor = m->GetDeclaringClassDescriptor();
const DexFile::ClassDef* class_def =
- dex_file->FindClassDef(m->GetDeclaringClassDescriptor());
+ dex_file->FindClassDef(descriptor, ComputeModifiedUtf8Hash(descriptor));
if (class_def != nullptr) {
uint16_t class_def_index = dex_file->GetIndexForClassDef(*class_def);
const OatFile::OatClass oat_class = oat_dex_file->GetOatClass(class_def_index);
@@ -678,8 +700,13 @@
uint32_t dex_method_idx, const DexFile::CodeItem* code_item,
uint32_t method_access_flags) {
bool success = true;
+ std::string pretty_method = PrettyMethod(dex_method_idx, dex_file, true);
+ if (pretty_method.find(options_->method_filter_) == std::string::npos) {
+ return success;
+ }
+
os << StringPrintf("%d: %s (dex_method_idx=%d)\n",
- class_method_index, PrettyMethod(dex_method_idx, dex_file, true).c_str(),
+ class_method_index, pretty_method.c_str(),
dex_method_idx);
Indenter indent1_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
std::unique_ptr<std::ostream> indent1_os(new std::ostream(&indent1_filter));
@@ -1238,6 +1265,7 @@
const OatFile& oat_file_;
const std::vector<const OatFile::OatDexFile*> oat_dex_files_;
const OatDumperOptions* options_;
+ InstructionSet instruction_set_;
std::set<uintptr_t> offsets_;
Disassembler* disassembler_;
};
@@ -1272,6 +1300,8 @@
os << "PATCH DELTA:" << image_header_.GetPatchDelta() << "\n\n";
+ os << "COMPILE PIC: " << (image_header_.CompilePic() ? "yes" : "no") << "\n\n";
+
{
os << "ROOTS: " << reinterpret_cast<void*>(image_header_.GetImageRoots()) << "\n";
Indenter indent1_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
@@ -1287,26 +1317,26 @@
std::ostream indent2_os(&indent2_filter);
mirror::ObjectArray<mirror::Object>* image_root_object_array
= image_root_object->AsObjectArray<mirror::Object>();
- for (int i = 0; i < image_root_object_array->GetLength(); i++) {
- mirror::Object* value = image_root_object_array->Get(i);
+ for (int j = 0; j < image_root_object_array->GetLength(); j++) {
+ mirror::Object* value = image_root_object_array->Get(j);
size_t run = 0;
- for (int32_t j = i + 1; j < image_root_object_array->GetLength(); j++) {
- if (value == image_root_object_array->Get(j)) {
+ for (int32_t k = j + 1; k < image_root_object_array->GetLength(); k++) {
+ if (value == image_root_object_array->Get(k)) {
run++;
} else {
break;
}
}
if (run == 0) {
- indent2_os << StringPrintf("%d: ", i);
+ indent2_os << StringPrintf("%d: ", j);
} else {
- indent2_os << StringPrintf("%d to %zd: ", i, i + run);
- i = i + run;
+ indent2_os << StringPrintf("%d to %zd: ", j, j + run);
+ j = j + run;
}
if (value != nullptr) {
PrettyObjectValue(indent2_os, value->GetClass(), value);
} else {
- indent2_os << i << ": null\n";
+ indent2_os << j << ": null\n";
}
}
}
@@ -1322,7 +1352,7 @@
std::string error_msg;
const OatFile* oat_file = class_linker->FindOpenedOatFileFromOatLocation(oat_location);
if (oat_file == nullptr) {
- oat_file = OatFile::Open(oat_location, oat_location, nullptr, false, &error_msg);
+ oat_file = OatFile::Open(oat_location, oat_location, nullptr, nullptr, false, &error_msg);
if (oat_file == nullptr) {
os << "NOT FOUND: " << error_msg << "\n";
return false;
@@ -1497,7 +1527,8 @@
const void* GetQuickOatCodeBegin(mirror::ArtMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- const void* quick_code = m->GetEntryPointFromQuickCompiledCode();
+ const void* quick_code = m->GetEntryPointFromQuickCompiledCodePtrSize(
+ InstructionSetPointerSize(oat_dumper_->GetOatInstructionSet()));
if (Runtime::Current()->GetClassLinker()->IsQuickResolutionStub(quick_code)) {
quick_code = oat_dumper_->GetQuickOatCode(m);
}
@@ -1598,11 +1629,14 @@
}
}
} else if (obj->IsArtMethod()) {
+ const size_t image_pointer_size = InstructionSetPointerSize(
+ state->oat_dumper_->GetOatInstructionSet());
mirror::ArtMethod* method = obj->AsArtMethod();
if (method->IsNative()) {
// TODO: portable dumping.
- DCHECK(method->GetNativeGcMap() == nullptr) << PrettyMethod(method);
- DCHECK(method->GetMappingTable() == nullptr) << PrettyMethod(method);
+ DCHECK(method->GetNativeGcMapPtrSize(image_pointer_size) == nullptr)
+ << PrettyMethod(method);
+ DCHECK(method->GetMappingTable(image_pointer_size) == nullptr) << PrettyMethod(method);
bool first_occurrence;
const void* quick_oat_code = state->GetQuickOatCodeBegin(method);
uint32_t quick_oat_code_size = state->GetQuickOatCodeSize(method);
@@ -1610,33 +1644,36 @@
if (first_occurrence) {
state->stats_.native_to_managed_code_bytes += quick_oat_code_size;
}
- if (quick_oat_code != method->GetEntryPointFromQuickCompiledCode()) {
+ if (quick_oat_code != method->GetEntryPointFromQuickCompiledCodePtrSize(
+ image_pointer_size)) {
indent_os << StringPrintf("OAT CODE: %p\n", quick_oat_code);
}
} else if (method->IsAbstract() || method->IsCalleeSaveMethod() ||
method->IsResolutionMethod() || method->IsImtConflictMethod() ||
- method->IsClassInitializer()) {
- DCHECK(method->GetNativeGcMap() == nullptr) << PrettyMethod(method);
- DCHECK(method->GetMappingTable() == nullptr) << PrettyMethod(method);
+ method->IsImtUnimplementedMethod() || method->IsClassInitializer()) {
+ DCHECK(method->GetNativeGcMapPtrSize(image_pointer_size) == nullptr)
+ << PrettyMethod(method);
+ DCHECK(method->GetMappingTable(image_pointer_size) == nullptr) << PrettyMethod(method);
} else {
const DexFile::CodeItem* code_item = method->GetCodeItem();
size_t dex_instruction_bytes = code_item->insns_size_in_code_units_ * 2;
state->stats_.dex_instruction_bytes += dex_instruction_bytes;
bool first_occurrence;
- size_t gc_map_bytes = state->ComputeOatSize(method->GetNativeGcMap(), &first_occurrence);
+ size_t gc_map_bytes = state->ComputeOatSize(
+ method->GetNativeGcMapPtrSize(image_pointer_size), &first_occurrence);
if (first_occurrence) {
state->stats_.gc_map_bytes += gc_map_bytes;
}
size_t pc_mapping_table_bytes =
- state->ComputeOatSize(method->GetMappingTable(), &first_occurrence);
+ state->ComputeOatSize(method->GetMappingTable(image_pointer_size), &first_occurrence);
if (first_occurrence) {
state->stats_.pc_mapping_table_bytes += pc_mapping_table_bytes;
}
size_t vmap_table_bytes =
- state->ComputeOatSize(method->GetVmapTable(), &first_occurrence);
+ state->ComputeOatSize(method->GetVmapTable(image_pointer_size), &first_occurrence);
if (first_occurrence) {
state->stats_.vmap_table_bytes += vmap_table_bytes;
}
@@ -1738,20 +1775,20 @@
dex_instruction_bytes(0) {}
struct SizeAndCount {
- SizeAndCount(size_t bytes, size_t count) : bytes(bytes), count(count) {}
+ SizeAndCount(size_t bytes_in, size_t count_in) : bytes(bytes_in), count(count_in) {}
size_t bytes;
size_t count;
};
typedef SafeMap<std::string, SizeAndCount> SizeAndCountTable;
SizeAndCountTable sizes_and_counts;
- void Update(const char* descriptor, size_t object_bytes) {
+ void Update(const char* descriptor, size_t object_bytes_in) {
SizeAndCountTable::iterator it = sizes_and_counts.find(descriptor);
if (it != sizes_and_counts.end()) {
- it->second.bytes += object_bytes;
+ it->second.bytes += object_bytes_in;
it->second.count += 1;
} else {
- sizes_and_counts.Put(descriptor, SizeAndCount(object_bytes, 1));
+ sizes_and_counts.Put(descriptor, SizeAndCount(object_bytes_in, 1));
}
}
@@ -2086,7 +2123,7 @@
static int DumpOat(Runtime* runtime, const char* oat_filename, OatDumperOptions* options,
std::ostream* os) {
std::string error_msg;
- OatFile* oat_file = OatFile::Open(oat_filename, oat_filename, nullptr, false, &error_msg);
+ OatFile* oat_file = OatFile::Open(oat_filename, oat_filename, nullptr, nullptr, false, &error_msg);
if (oat_file == nullptr) {
fprintf(stderr, "Failed to open oat file from '%s': %s\n", oat_filename, error_msg.c_str());
return EXIT_FAILURE;
@@ -2101,7 +2138,7 @@
static int SymbolizeOat(const char* oat_filename, std::string& output_name) {
std::string error_msg;
- OatFile* oat_file = OatFile::Open(oat_filename, oat_filename, nullptr, false, &error_msg);
+ OatFile* oat_file = OatFile::Open(oat_filename, oat_filename, nullptr, nullptr, false, &error_msg);
if (oat_file == nullptr) {
fprintf(stderr, "Failed to open oat file from '%s': %s\n", oat_filename, error_msg.c_str());
return EXIT_FAILURE;
@@ -2169,6 +2206,8 @@
} else if (option.starts_with("--symbolize=")) {
oat_filename_ = option.substr(strlen("--symbolize=")).data();
symbolize_ = true;
+ } else if (option.starts_with("--method-filter=")) {
+ method_filter_ = option.substr(strlen("--method-filter=")).data();
} else {
fprintf(stderr, "Unknown argument %s\n", option.data());
usage();
@@ -2190,6 +2229,7 @@
}
const char* oat_filename_ = nullptr;
+ const char* method_filter_ = "";
const char* image_location_ = nullptr;
const char* boot_image_location_ = nullptr;
InstructionSet instruction_set_ = kRuntimeISA;
@@ -2221,6 +2261,7 @@
args.dump_vmap_,
args.disassemble_code_,
absolute_addresses,
+ args.method_filter_,
nullptr));
std::unique_ptr<Runtime> runtime;
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index c0c96e5..b15c712 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -24,6 +24,7 @@
#include <string>
#include <vector>
+#include "base/dumpable.h"
#include "base/scoped_flock.h"
#include "base/stringpiece.h"
#include "base/stringprintf.h"
@@ -33,12 +34,8 @@
#include "elf_file_impl.h"
#include "gc/space/image_space.h"
#include "image.h"
-#include "instruction_set.h"
-#include "mirror/art_field.h"
#include "mirror/art_field-inl.h"
-#include "mirror/art_method.h"
#include "mirror/art_method-inl.h"
-#include "mirror/object.h"
#include "mirror/object-inl.h"
#include "mirror/reference.h"
#include "noop_compiler_callbacks.h"
@@ -73,7 +70,7 @@
bool has_system = false;
bool has_cache = false;
// image_location = /system/framework/boot.art
- // system_image_location = /system/framework/<image_isa>/boot.art
+ // system_image_filename = /system/framework/<image_isa>/boot.art
std::string system_filename(GetSystemImageFilename(location.c_str(), isa));
if (OS::FileExists(system_filename.c_str())) {
has_system = true;
@@ -132,6 +129,7 @@
<< " for location " << image_location;
return false;
}
+
int64_t image_len = input_image->GetLength();
if (image_len < 0) {
LOG(ERROR) << "Error while getting image length";
@@ -144,6 +142,10 @@
return false;
}
+ /*bool is_image_pic = */IsImagePic(image_header, input_image->GetPath());
+ // Nothing special to do right now since the image always needs to get patched.
+ // Perhaps in some far-off future we may have images with relative addresses that are true-PIC.
+
// Set up the runtime
RuntimeOptions options;
NoopCompilerCallbacks callbacks;
@@ -173,7 +175,7 @@
}
gc::space::ImageSpace* ispc = Runtime::Current()->GetHeap()->GetImageSpace();
- PatchOat p(image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(),
+ PatchOat p(isa, image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(),
delta, timings);
t.NewTiming("Patching files");
if (!p.PatchImage()) {
@@ -188,9 +190,11 @@
return true;
}
-bool PatchOat::Patch(const File* input_oat, const std::string& image_location, off_t delta,
+bool PatchOat::Patch(File* input_oat, const std::string& image_location, off_t delta,
File* output_oat, File* output_image, InstructionSet isa,
- TimingLogger* timings) {
+ TimingLogger* timings,
+ bool output_oat_opened_from_fd,
+ bool new_oat_out) {
CHECK(Runtime::Current() == nullptr);
CHECK(output_image != nullptr);
CHECK_GE(output_image->Fd(), 0);
@@ -233,6 +237,10 @@
LOG(ERROR) << "Unable to read image header from image file " << input_image->GetPath();
}
+ /*bool is_image_pic = */IsImagePic(image_header, input_image->GetPath());
+ // Nothing special to do right now since the image always needs to get patched.
+ // Perhaps in some far-off future we may have images with relative addresses that are true-PIC.
+
// Set up the runtime
RuntimeOptions options;
NoopCompilerCallbacks callbacks;
@@ -262,17 +270,37 @@
}
gc::space::ImageSpace* ispc = Runtime::Current()->GetHeap()->GetImageSpace();
- std::unique_ptr<ElfFile> elf(ElfFile::Open(const_cast<File*>(input_oat),
+ std::unique_ptr<ElfFile> elf(ElfFile::Open(input_oat,
PROT_READ | PROT_WRITE, MAP_PRIVATE, &error_msg));
if (elf.get() == nullptr) {
LOG(ERROR) << "unable to open oat file " << input_oat->GetPath() << " : " << error_msg;
return false;
}
- PatchOat p(elf.release(), image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(),
+ bool skip_patching_oat = false;
+ MaybePic is_oat_pic = IsOatPic(elf.get());
+ if (is_oat_pic >= ERROR_FIRST) {
+ // Error logged by IsOatPic
+ return false;
+ } else if (is_oat_pic == PIC) {
+ // Do not need to do ELF-file patching. Create a symlink and skip the ELF patching.
+ if (!ReplaceOatFileWithSymlink(input_oat->GetPath(),
+ output_oat->GetPath(),
+ output_oat_opened_from_fd,
+ new_oat_out)) {
+ // Errors already logged by above call.
+ return false;
+ }
+ // Don't patch the OAT, since we just symlinked it. Image still needs patching.
+ skip_patching_oat = true;
+ } else {
+ CHECK(is_oat_pic == NOT_PIC);
+ }
+
+ PatchOat p(isa, elf.release(), image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(),
delta, timings);
t.NewTiming("Patching files");
- if (!p.PatchElf()) {
+ if (!skip_patching_oat && !p.PatchElf()) {
LOG(ERROR) << "Failed to patch oat file " << input_oat->GetPath();
return false;
}
@@ -282,10 +310,12 @@
}
t.NewTiming("Writing files");
- if (!p.WriteElf(output_oat)) {
+ if (!skip_patching_oat && !p.WriteElf(output_oat)) {
+ LOG(ERROR) << "Failed to write oat file " << input_oat->GetPath();
return false;
}
if (!p.WriteImage(output_image)) {
+ LOG(ERROR) << "Failed to write image file " << input_image->GetPath();
return false;
}
return true;
@@ -325,6 +355,83 @@
}
}
+bool PatchOat::IsImagePic(const ImageHeader& image_header, const std::string& image_path) {
+ if (!image_header.CompilePic()) {
+ if (kIsDebugBuild) {
+ LOG(INFO) << "image at location " << image_path << " was *not* compiled pic";
+ }
+ return false;
+ }
+
+ if (kIsDebugBuild) {
+ LOG(INFO) << "image at location " << image_path << " was compiled PIC";
+ }
+
+ return true;
+}
+
+PatchOat::MaybePic PatchOat::IsOatPic(const ElfFile* oat_in) {
+ if (oat_in == nullptr) {
+ LOG(ERROR) << "No ELF input oat fie available";
+ return ERROR_OAT_FILE;
+ }
+
+ const std::string& file_path = oat_in->GetFile().GetPath();
+
+ const OatHeader* oat_header = GetOatHeader(oat_in);
+ if (oat_header == nullptr) {
+ LOG(ERROR) << "Failed to find oat header in oat file " << file_path;
+ return ERROR_OAT_FILE;
+ }
+
+ if (!oat_header->IsValid()) {
+ LOG(ERROR) << "Elf file " << file_path << " has an invalid oat header";
+ return ERROR_OAT_FILE;
+ }
+
+ bool is_pic = oat_header->IsPic();
+ if (kIsDebugBuild) {
+ LOG(INFO) << "Oat file at " << file_path << " is " << (is_pic ? "PIC" : "not pic");
+ }
+
+ return is_pic ? PIC : NOT_PIC;
+}
+
+bool PatchOat::ReplaceOatFileWithSymlink(const std::string& input_oat_filename,
+ const std::string& output_oat_filename,
+ bool output_oat_opened_from_fd,
+ bool new_oat_out) {
+ // Need a file when we are PIC, since we symlink over it. Refusing to symlink into FD.
+ if (output_oat_opened_from_fd) {
+ // TODO: installd uses --output-oat-fd. Should we change class linking logic for PIC?
+ LOG(ERROR) << "No output oat filename specified, needs filename for when we are PIC";
+ return false;
+ }
+
+ // Image was PIC. Create symlink where the oat is supposed to go.
+ if (!new_oat_out) {
+ LOG(ERROR) << "Oat file " << output_oat_filename << " already exists, refusing to overwrite";
+ return false;
+ }
+
+ // Delete the original file, since we won't need it.
+ TEMP_FAILURE_RETRY(unlink(output_oat_filename.c_str()));
+
+ // Create a symlink from the old oat to the new oat
+ if (symlink(input_oat_filename.c_str(), output_oat_filename.c_str()) < 0) {
+ int err = errno;
+ LOG(ERROR) << "Failed to create symlink at " << output_oat_filename
+ << " error(" << err << "): " << strerror(err);
+ return false;
+ }
+
+ if (kIsDebugBuild) {
+ LOG(INFO) << "Created symlink " << output_oat_filename << " -> " << input_oat_filename;
+ }
+
+ return true;
+}
+
bool PatchOat::PatchImage() {
ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
CHECK_GT(image_->Size(), sizeof(ImageHeader));
@@ -355,14 +462,15 @@
}
void PatchOat::PatchVisitor::operator() (mirror::Object* obj, MemberOffset off,
- bool is_static_unused) const {
+ bool is_static_unused ATTRIBUTE_UNUSED) const {
mirror::Object* referent = obj->GetFieldObject<mirror::Object, kVerifyNone>(off);
DCHECK(patcher_->InHeap(referent)) << "Referent is not in the heap.";
mirror::Object* moved_object = patcher_->RelocatedAddressOf(referent);
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(off, moved_object);
}
-void PatchOat::PatchVisitor::operator() (mirror::Class* cls, mirror::Reference* ref) const {
+void PatchOat::PatchVisitor::operator() (mirror::Class* cls ATTRIBUTE_UNUSED,
+ mirror::Reference* ref) const {
MemberOffset off = mirror::Reference::ReferentOffset();
mirror::Object* referent = ref->GetReferent();
DCHECK(patcher_->InHeap(referent)) << "Referent is not in the heap.";
@@ -390,6 +498,25 @@
}
}
+const OatHeader* PatchOat::GetOatHeader(const ElfFile* elf_file) {
+ if (elf_file->Is64Bit()) {
+ return GetOatHeader<ElfFileImpl64>(elf_file->GetImpl64());
+ } else {
+ return GetOatHeader<ElfFileImpl32>(elf_file->GetImpl32());
+ }
+}
+
+template <typename ElfFileImpl>
+const OatHeader* PatchOat::GetOatHeader(const ElfFileImpl* elf_file) {
+ auto rodata_sec = elf_file->FindSectionByName(".rodata");
+ if (rodata_sec == nullptr) {
+ return nullptr;
+ }
+
+ OatHeader* oat_header = reinterpret_cast<OatHeader*>(elf_file->Begin() + rodata_sec->sh_offset);
+ return oat_header;
+}
+
// Called by BitmapCallback
void PatchOat::VisitObject(mirror::Object* object) {
mirror::Object* copy = RelocatedCopyOf(object);
@@ -405,43 +532,49 @@
PatchOat::PatchVisitor visitor(this, copy);
object->VisitReferences<true, kVerifyNone>(visitor, visitor);
if (object->IsArtMethod<kVerifyNone>()) {
- FixupMethod(static_cast<mirror::ArtMethod*>(object),
- static_cast<mirror::ArtMethod*>(copy));
+ FixupMethod(down_cast<mirror::ArtMethod*>(object), down_cast<mirror::ArtMethod*>(copy));
}
}
void PatchOat::FixupMethod(mirror::ArtMethod* object, mirror::ArtMethod* copy) {
+ const size_t pointer_size = InstructionSetPointerSize(isa_);
// Just update the entry points if it looks like we should.
// TODO: sanity check all the pointers' values
uintptr_t portable = reinterpret_cast<uintptr_t>(
- object->GetEntryPointFromPortableCompiledCode<kVerifyNone>());
+ object->GetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(pointer_size));
if (portable != 0) {
- copy->SetEntryPointFromPortableCompiledCode(reinterpret_cast<void*>(portable + delta_));
+ copy->SetEntryPointFromPortableCompiledCodePtrSize(reinterpret_cast<void*>(portable + delta_),
+ pointer_size);
}
uintptr_t quick= reinterpret_cast<uintptr_t>(
- object->GetEntryPointFromQuickCompiledCode<kVerifyNone>());
+ object->GetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(pointer_size));
if (quick != 0) {
- copy->SetEntryPointFromQuickCompiledCode(reinterpret_cast<void*>(quick + delta_));
+ copy->SetEntryPointFromQuickCompiledCodePtrSize(reinterpret_cast<void*>(quick + delta_),
+ pointer_size);
}
uintptr_t interpreter = reinterpret_cast<uintptr_t>(
- object->GetEntryPointFromInterpreter<kVerifyNone>());
+ object->GetEntryPointFromInterpreterPtrSize<kVerifyNone>(pointer_size));
if (interpreter != 0) {
- copy->SetEntryPointFromInterpreter(
- reinterpret_cast<mirror::EntryPointFromInterpreter*>(interpreter + delta_));
+ copy->SetEntryPointFromInterpreterPtrSize(
+ reinterpret_cast<mirror::EntryPointFromInterpreter*>(interpreter + delta_), pointer_size);
}
- uintptr_t native_method = reinterpret_cast<uintptr_t>(object->GetNativeMethod());
+ uintptr_t native_method = reinterpret_cast<uintptr_t>(
+ object->GetEntryPointFromJniPtrSize(pointer_size));
if (native_method != 0) {
- copy->SetNativeMethod(reinterpret_cast<void*>(native_method + delta_));
+ copy->SetEntryPointFromJniPtrSize(reinterpret_cast<void*>(native_method + delta_),
+ pointer_size);
}
- uintptr_t native_gc_map = reinterpret_cast<uintptr_t>(object->GetNativeGcMap());
+ uintptr_t native_gc_map = reinterpret_cast<uintptr_t>(
+ object->GetNativeGcMapPtrSize(pointer_size));
if (native_gc_map != 0) {
- copy->SetNativeGcMap(reinterpret_cast<uint8_t*>(native_gc_map + delta_));
+ copy->SetNativeGcMapPtrSize(reinterpret_cast<uint8_t*>(native_gc_map + delta_), pointer_size);
}
}
-bool PatchOat::Patch(File* input_oat, off_t delta, File* output_oat, TimingLogger* timings) {
+bool PatchOat::Patch(File* input_oat, off_t delta, File* output_oat, TimingLogger* timings,
+ bool output_oat_opened_from_fd, bool new_oat_out) {
CHECK(input_oat != nullptr);
CHECK(output_oat != nullptr);
CHECK_GE(input_oat->Fd(), 0);
@@ -449,13 +582,28 @@
TimingLogger::ScopedTiming t("Setup Oat File Patching", timings);
std::string error_msg;
- std::unique_ptr<ElfFile> elf(ElfFile::Open(const_cast<File*>(input_oat),
+ std::unique_ptr<ElfFile> elf(ElfFile::Open(input_oat,
PROT_READ | PROT_WRITE, MAP_PRIVATE, &error_msg));
if (elf.get() == nullptr) {
LOG(ERROR) << "unable to open oat file " << input_oat->GetPath() << " : " << error_msg;
return false;
}
+ MaybePic is_oat_pic = IsOatPic(elf.get());
+ if (is_oat_pic >= ERROR_FIRST) {
+ // Error logged by IsOatPic
+ return false;
+ } else if (is_oat_pic == PIC) {
+ // Do not need to do ELF-file patching. Create a symlink and skip the rest.
+ // Any errors will be logged by the function call.
+ return ReplaceOatFileWithSymlink(input_oat->GetPath(),
+ output_oat->GetPath(),
+ output_oat_opened_from_fd,
+ new_oat_out);
+ } else {
+ CHECK(is_oat_pic == NOT_PIC);
+ }
+
PatchOat p(elf.release(), delta, timings);
t.NewTiming("Patch Oat file");
if (!p.PatchElf()) {
@@ -756,6 +904,20 @@
}
}
+// Either try to close the file (close=true), or erase it.
+static bool FinishFile(File* file, bool close) {
+ if (close) {
+ if (file->FlushCloseOrErase() != 0) {
+ PLOG(ERROR) << "Failed to flush and close file.";
+ return false;
+ }
+ return true;
+ } else {
+ file->Erase();
+ return false;
+ }
+}
+
static int patchoat(int argc, char **argv) {
InitLogging(argv);
MemMap::Init();
@@ -1027,7 +1189,7 @@
if (output_image_filename.empty()) {
output_image_filename = "output-image-file";
}
- output_image.reset(new File(output_image_fd, output_image_filename));
+ output_image.reset(new File(output_image_fd, output_image_filename, true));
} else {
CHECK(!output_image_filename.empty());
output_image.reset(CreateOrOpen(output_image_filename.c_str(), &new_image_out));
@@ -1041,12 +1203,18 @@
if (input_oat_filename.empty()) {
input_oat_filename = "input-oat-file";
}
- input_oat.reset(new File(input_oat_fd, input_oat_filename));
+ input_oat.reset(new File(input_oat_fd, input_oat_filename, false));
+ if (input_oat == nullptr) {
+ // Unlikely, but ensure exhaustive logging in non-0 exit code case
+ LOG(ERROR) << "Failed to open input oat file by its FD" << input_oat_fd;
+ }
} else {
CHECK(!input_oat_filename.empty());
input_oat.reset(OS::OpenFileForReading(input_oat_filename.c_str()));
- if (input_oat.get() == nullptr) {
- LOG(ERROR) << "Could not open input oat file: " << strerror(errno);
+ if (input_oat == nullptr) {
+ int err = errno;
+ LOG(ERROR) << "Failed to open input oat file " << input_oat_filename
+ << ": " << strerror(err) << "(" << err << ")";
}
}
@@ -1054,13 +1222,23 @@
if (output_oat_filename.empty()) {
output_oat_filename = "output-oat-file";
}
- output_oat.reset(new File(output_oat_fd, output_oat_filename));
+ output_oat.reset(new File(output_oat_fd, output_oat_filename, true));
+ if (output_oat == nullptr) {
+ // Unlikely, but ensure exhaustive logging in non-0 exit code case
+ LOG(ERROR) << "Failed to open output oat file by its FD" << output_oat_fd;
+ }
} else {
CHECK(!output_oat_filename.empty());
output_oat.reset(CreateOrOpen(output_oat_filename.c_str(), &new_oat_out));
+ if (output_oat == nullptr) {
+ int err = errno;
+ LOG(ERROR) << "Failed to open output oat file " << output_oat_filename
+ << ": " << strerror(err) << "(" << err << ")";
+ }
}
}
+ // TODO: get rid of this.
auto cleanup = [&output_image_filename, &output_oat_filename,
&new_oat_out, &new_image_out, &timings, &dump_timings](bool success) {
timings.EndTiming();
@@ -1077,14 +1255,29 @@
if (dump_timings) {
LOG(INFO) << Dumpable<TimingLogger>(timings);
}
+
+ if (kIsDebugBuild) {
+ LOG(INFO) << "Cleaning up.. success? " << success;
+ }
};
- if ((have_oat_files && (input_oat.get() == nullptr || output_oat.get() == nullptr)) ||
- (have_image_files && output_image.get() == nullptr)) {
+ if (have_oat_files && (input_oat.get() == nullptr || output_oat.get() == nullptr)) {
+ LOG(ERROR) << "Failed to open input/output oat files";
+ cleanup(false);
+ return EXIT_FAILURE;
+ } else if (have_image_files && output_image.get() == nullptr) {
+ LOG(ERROR) << "Failed to open output image file";
cleanup(false);
return EXIT_FAILURE;
}
+ if (debug) {
+ LOG(INFO) << "moving offset by " << base_delta
+ << " (0x" << std::hex << base_delta << ") bytes or "
+ << std::dec << (base_delta/kPageSize) << " pages.";
+ }
+
+ // TODO: is it going to be promatic to unlink a file that was flock-ed?
ScopedFlock output_oat_lock;
if (lock_output) {
std::string error_msg;
@@ -1095,24 +1288,34 @@
}
}
- if (debug) {
- LOG(INFO) << "moving offset by " << base_delta
- << " (0x" << std::hex << base_delta << ") bytes or "
- << std::dec << (base_delta/kPageSize) << " pages.";
- }
-
bool ret;
if (have_image_files && have_oat_files) {
TimingLogger::ScopedTiming pt("patch image and oat", &timings);
ret = PatchOat::Patch(input_oat.get(), input_image_location, base_delta,
- output_oat.get(), output_image.get(), isa, &timings);
+ output_oat.get(), output_image.get(), isa, &timings,
+ output_oat_fd >= 0, // was it opened from FD?
+ new_oat_out);
+ // The order here doesn't matter. If the first one is successfully saved and the second one
+ // erased, ImageSpace will still detect a problem and not use the files.
+ ret = ret && FinishFile(output_image.get(), ret);
+ ret = ret && FinishFile(output_oat.get(), ret);
} else if (have_oat_files) {
TimingLogger::ScopedTiming pt("patch oat", &timings);
- ret = PatchOat::Patch(input_oat.get(), base_delta, output_oat.get(), &timings);
- } else {
+ ret = PatchOat::Patch(input_oat.get(), base_delta, output_oat.get(), &timings,
+ output_oat_fd >= 0, // was it opened from FD?
+ new_oat_out);
+ ret = ret && FinishFile(output_oat.get(), ret);
+ } else if (have_image_files) {
TimingLogger::ScopedTiming pt("patch image", &timings);
- CHECK(have_image_files);
ret = PatchOat::Patch(input_image_location, base_delta, output_image.get(), isa, &timings);
+ ret = ret && FinishFile(output_image.get(), ret);
+ } else {
+ CHECK(false);
+ ret = true;
+ }
+
+ if (kIsDebugBuild) {
+ LOG(INFO) << "Exiting with return ... " << ret;
}
cleanup(ret);
return (ret) ? EXIT_SUCCESS : EXIT_FAILURE;
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index fd36ad5..578df3a 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -17,19 +17,20 @@
#ifndef ART_PATCHOAT_PATCHOAT_H_
#define ART_PATCHOAT_PATCHOAT_H_
+#include "arch/instruction_set.h"
#include "base/macros.h"
#include "base/mutex.h"
-#include "instruction_set.h"
-#include "os.h"
#include "elf_file.h"
#include "elf_utils.h"
#include "gc/accounting/space_bitmap.h"
#include "gc/heap.h"
+#include "os.h"
#include "utils.h"
namespace art {
class ImageHeader;
+class OatHeader;
namespace mirror {
class Object;
@@ -40,30 +41,58 @@
class PatchOat {
public:
- static bool Patch(File* oat_in, off_t delta, File* oat_out, TimingLogger* timings);
+ // Patch only the oat file
+ static bool Patch(File* oat_in, off_t delta, File* oat_out, TimingLogger* timings,
+ bool output_oat_opened_from_fd, // Was this using --oatput-oat-fd ?
+ bool new_oat_out); // Output oat was a new file created by us?
+ // Patch only the image (art file)
static bool Patch(const std::string& art_location, off_t delta, File* art_out, InstructionSet isa,
TimingLogger* timings);
- static bool Patch(const File* oat_in, const std::string& art_location,
+ // Patch both the image and the oat file
+ static bool Patch(File* oat_in, const std::string& art_location,
off_t delta, File* oat_out, File* art_out, InstructionSet isa,
- TimingLogger* timings);
+ TimingLogger* timings,
+ bool output_oat_opened_from_fd, // Was this using --oatput-oat-fd ?
+ bool new_oat_out); // Output oat was a new file created by us?
private:
// Takes ownership only of the ElfFile. All other pointers are only borrowed.
PatchOat(ElfFile* oat_file, off_t delta, TimingLogger* timings)
: oat_file_(oat_file), image_(nullptr), bitmap_(nullptr), heap_(nullptr), delta_(delta),
- timings_(timings) {}
- PatchOat(MemMap* image, gc::accounting::ContinuousSpaceBitmap* bitmap,
+ isa_(kNone), timings_(timings) {}
+ PatchOat(InstructionSet isa, MemMap* image, gc::accounting::ContinuousSpaceBitmap* bitmap,
MemMap* heap, off_t delta, TimingLogger* timings)
: image_(image), bitmap_(bitmap), heap_(heap),
- delta_(delta), timings_(timings) {}
- PatchOat(ElfFile* oat_file, MemMap* image, gc::accounting::ContinuousSpaceBitmap* bitmap,
- MemMap* heap, off_t delta, TimingLogger* timings)
+ delta_(delta), isa_(isa), timings_(timings) {}
+ PatchOat(InstructionSet isa, ElfFile* oat_file, MemMap* image,
+ gc::accounting::ContinuousSpaceBitmap* bitmap, MemMap* heap, off_t delta,
+ TimingLogger* timings)
: oat_file_(oat_file), image_(image), bitmap_(bitmap), heap_(heap),
- delta_(delta), timings_(timings) {}
+ delta_(delta), isa_(isa), timings_(timings) {}
~PatchOat() {}
+ // Was the .art image at image_path made with --compile-pic ?
+ static bool IsImagePic(const ImageHeader& image_header, const std::string& image_path);
+
+ enum MaybePic {
+ NOT_PIC, // Code not pic. Patch as usual.
+ PIC, // Code was pic. Create symlink; skip OAT patching.
+ ERROR_OAT_FILE, // Failed to symlink oat file
+ ERROR_FIRST = ERROR_OAT_FILE,
+ };
+
+ // Was the .oat image at oat_in made with --compile-pic ?
+ static MaybePic IsOatPic(const ElfFile* oat_in);
+
+ // Attempt to replace the file with a symlink
+ // Returns false if it fails
+ static bool ReplaceOatFileWithSymlink(const std::string& input_oat_filename,
+ const std::string& output_oat_filename,
+ bool output_oat_opened_from_fd,
+ bool new_oat_out); // Output oat was newly created?
+
static void BitmapCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
reinterpret_cast<PatchOat*>(arg)->VisitObject(obj);
@@ -95,6 +124,13 @@
mirror::Object* RelocatedCopyOf(mirror::Object*);
mirror::Object* RelocatedAddressOf(mirror::Object* obj);
+ // Look up the oat header from any elf file.
+ static const OatHeader* GetOatHeader(const ElfFile* elf_file);
+
+ // Templatized version to actually look up the oat header
+ template <typename ElfFileImpl>
+ static const OatHeader* GetOatHeader(const ElfFileImpl* elf_file);
+
// Walks through the old image and patches the mmap'd copy of it to the new offset. It does not
// change the heap.
class PatchVisitor {
@@ -121,8 +157,10 @@
const MemMap* const heap_;
// The amount we are changing the offset by.
const off_t delta_;
- // Timing splits.
- TimingLogger* const timings_;
+ // Active instruction set, used to know the entrypoint size.
+ const InstructionSet isa_;
+
+ TimingLogger* timings_;
DISALLOW_IMPLICIT_CONSTRUCTORS(PatchOat);
};
diff --git a/runtime/Android.mk b/runtime/Android.mk
index d9b4139..087c0ea 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -31,7 +31,6 @@
base/stringprintf.cc \
base/timing_logger.cc \
base/unix_file/fd_file.cc \
- base/unix_file/mapped_file.cc \
base/unix_file/null_file.cc \
base/unix_file/random_access_file_utils.cc \
base/unix_file/string_file.cc \
@@ -74,7 +73,6 @@
hprof/hprof.cc \
image.cc \
indirect_reference_table.cc \
- instruction_set.cc \
instrumentation.cc \
intern_table.cc \
interpreter/interpreter.cc \
@@ -166,11 +164,18 @@
LIBART_COMMON_SRC_FILES += \
arch/context.cc \
+ arch/instruction_set.cc \
+ arch/instruction_set_features.cc \
arch/memcmp16.cc \
+ arch/arm/instruction_set_features_arm.cc \
arch/arm/registers_arm.cc \
+ arch/arm64/instruction_set_features_arm64.cc \
arch/arm64/registers_arm64.cc \
- arch/x86/registers_x86.cc \
+ arch/mips/instruction_set_features_mips.cc \
arch/mips/registers_mips.cc \
+ arch/x86/instruction_set_features_x86.cc \
+ arch/x86/registers_x86.cc \
+ arch/x86_64/registers_x86_64.cc \
entrypoints/entrypoint_utils.cc \
entrypoints/interpreter/interpreter_entrypoints.cc \
entrypoints/jni/jni_entrypoints.cc \
@@ -209,7 +214,6 @@
LIBART_TARGET_SRC_FILES := \
$(LIBART_COMMON_SRC_FILES) \
- base/logging_android.cc \
jdwp/jdwp_adb.cc \
monitor_android.cc \
runtime_android.cc \
@@ -218,11 +222,12 @@
LIBART_TARGET_SRC_FILES_arm := \
arch/arm/context_arm.cc.arm \
arch/arm/entrypoints_init_arm.cc \
- arch/arm/instruction_set_features_arm.S \
+ arch/arm/instruction_set_features_assembly_tests.S \
arch/arm/jni_entrypoints_arm.S \
arch/arm/memcmp16_arm.S \
arch/arm/portable_entrypoints_arm.S \
arch/arm/quick_entrypoints_arm.S \
+ arch/arm/quick_entrypoints_cc_arm.cc \
arch/arm/thread_arm.cc \
arch/arm/fault_handler_arm.cc
@@ -282,7 +287,6 @@
LIBART_HOST_SRC_FILES := \
$(LIBART_COMMON_SRC_FILES) \
- base/logging_linux.cc \
monitor_linux.cc \
runtime_linux.cc \
thread_linux.cc
@@ -294,17 +298,21 @@
$(LIBART_SRC_FILES_x86_64)
LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := \
- arch/x86_64/registers_x86_64.h \
+ arch/instruction_set.h \
base/allocator.h \
base/mutex.h \
+ debugger.h \
+ base/unix_file/fd_file.h \
dex_file.h \
dex_instruction.h \
+ gc/allocator/rosalloc.h \
gc/collector/gc_type.h \
+ gc/allocator_type.h \
gc/collector_type.h \
gc/space/space.h \
gc/heap.h \
+ instrumentation.h \
indirect_reference_table.h \
- instruction_set.h \
invoke_type.h \
jdwp/jdwp.h \
jdwp/jdwp_constants.h \
@@ -312,7 +320,10 @@
mirror/class.h \
oat.h \
object_callbacks.h \
+ profiler_options.h \
quick/inline_method_analyser.h \
+ runtime.h \
+ stack.h \
thread.h \
thread_state.h \
verifier/method_verifier.h
@@ -334,7 +345,7 @@
2ND_LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES := default
ifeq ($(DEX2OAT_TARGET_ARCH),arm)
ifneq (,$(filter $(DEX2OAT_TARGET_CPU_VARIANT),cortex-a15 krait denver))
- LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES := lpae,div
+ LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES := atomic_ldrd_strd,div
else
ifneq (,$(filter $(DEX2OAT_TARGET_CPU_VARIANT),cortex-a7))
LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES := div
@@ -343,7 +354,7 @@
endif
ifeq ($(2ND_DEX2OAT_TARGET_ARCH),arm)
ifneq (,$(filter $(DEX2OAT_TARGET_CPU_VARIANT),cortex-a15 krait denver))
- 2ND_LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES := lpae,div
+ 2ND_LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES := atomic_ldrd_strd,div
else
ifneq (,$(filter $(DEX2OAT_TARGET_CPU_VARIANT),cortex-a7))
2ND_LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES := div
@@ -379,7 +390,9 @@
LOCAL_CPP_EXTENSION := $$(ART_CPP_EXTENSION)
ifeq ($$(art_ndebug_or_debug),ndebug)
LOCAL_MODULE := libart
- LOCAL_FDO_SUPPORT := true
+ ifeq ($$(art_target_or_host),target)
+ LOCAL_FDO_SUPPORT := true
+ endif
else # debug
LOCAL_MODULE := libartd
endif
@@ -458,15 +471,21 @@
LOCAL_C_INCLUDES += $$(ART_C_INCLUDES)
LOCAL_C_INCLUDES += art/sigchainlib
- LOCAL_SHARED_LIBRARIES += liblog libnativehelper libnativebridge
+ LOCAL_SHARED_LIBRARIES := libnativehelper libnativebridge libsigchain
include external/libcxx/libcxx.mk
- LOCAL_SHARED_LIBRARIES += libbacktrace_libc++
+ LOCAL_SHARED_LIBRARIES += libbacktrace
ifeq ($$(art_target_or_host),target)
- LOCAL_SHARED_LIBRARIES += libcutils libdl libutils libsigchain
+ LOCAL_SHARED_LIBRARIES += libdl
+ # ZipArchive support, the order matters here to get all symbols.
LOCAL_STATIC_LIBRARIES := libziparchive libz
+ # For android::FileMap used by libziparchive.
+ LOCAL_SHARED_LIBRARIES += libutils
+ # For liblog, atrace, properties, ashmem, set_sched_policy and socket_peer_is_trusted.
+ LOCAL_SHARED_LIBRARIES += libcutils
else # host
- LOCAL_STATIC_LIBRARIES += libcutils libziparchive-host libz libutils
- LOCAL_SHARED_LIBRARIES += libsigchain
+ LOCAL_SHARED_LIBRARIES += libziparchive-host
+ # For ashmem_create_region.
+ LOCAL_STATIC_LIBRARIES += libcutils
endif
ifeq ($$(ART_USE_PORTABLE_COMPILER),true)
include $$(LLVM_GEN_INTRINSICS_MK)
@@ -477,7 +496,7 @@
endif
endif
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
-# LOCAL_ADDITIONAL_DEPENDENCIES += $$(LOCAL_PATH)/Android.mk
+ LOCAL_ADDITIONAL_DEPENDENCIES += $$(LOCAL_PATH)/Android.mk
ifeq ($$(art_target_or_host),target)
LOCAL_MODULE_TARGET_ARCH := $$(ART_TARGET_SUPPORTED_ARCH)
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index 5220dc3..cac500c 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -32,7 +32,7 @@
t->TransitionFromSuspendedToRunnable(); // So we can create callee-save methods.
r->SetInstructionSet(isa);
- mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod(type);
+ mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod();
r->SetCalleeSaveMethod(save_method, type);
QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for "
@@ -43,398 +43,98 @@
}
};
+// Common tests are declared next to the constants.
+#define ADD_TEST_EQ(x, y) EXPECT_EQ(x, y);
+#include "asm_support.h"
-TEST_F(ArchTest, ARM) {
+TEST_F(ArchTest, CheckCommonOffsetsAndSizes) {
+ CheckAsmSupportOffsetsAndSizes();
+}
+
+// Grab architecture specific constants.
+namespace arm {
#include "arch/arm/asm_support_arm.h"
-#undef ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_
-
-
-#ifdef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
- CheckFrameSize(InstructionSet::kArm, Runtime::kSaveAll, FRAME_SIZE_SAVE_ALL_CALLEE_SAVE);
-#else
- LOG(WARNING) << "No frame size for SaveAll";
-#endif
-#ifdef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
- CheckFrameSize(InstructionSet::kArm, Runtime::kRefsOnly, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE);
-#else
- LOG(WARNING) << "No frame size for RefsOnly";
-#endif
-#ifdef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
- CheckFrameSize(InstructionSet::kArm, Runtime::kRefsAndArgs, FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE);
-#else
- LOG(WARNING) << "No frame size for RefsAndArgs";
-#endif
-
-
-#ifdef RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET
-#undef RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET
-#endif
-#ifdef RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET
-#undef RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET
-#endif
-#ifdef RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET
-#undef RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET
-#endif
-#ifdef THREAD_SELF_OFFSET
-#undef THREAD_SELF_OFFSET
-#endif
-#ifdef THREAD_CARD_TABLE_OFFSET
-#undef THREAD_CARD_TABLE_OFFSET
-#endif
-#ifdef THREAD_EXCEPTION_OFFSET
-#undef THREAD_EXCEPTION_OFFSET
-#endif
-#ifdef THREAD_ID_OFFSET
-#undef THREAD_ID_OFFSET
-#endif
-#ifdef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
+static constexpr size_t kFrameSizeSaveAllCalleeSave = FRAME_SIZE_SAVE_ALL_CALLEE_SAVE;
#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
-#endif
-#ifdef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
+static constexpr size_t kFrameSizeRefsOnlyCalleeSave = FRAME_SIZE_REFS_ONLY_CALLEE_SAVE;
#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
-#endif
-#ifdef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
+static constexpr size_t kFrameSizeRefsAndArgsCalleeSave = FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE;
#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
-#endif
-#ifdef HEAP_REFERENCE_SIZE
-#undef HEAP_REFERENCE_SIZE
-#endif
+}
+
+namespace arm64 {
+#include "arch/arm64/asm_support_arm64.h"
+static constexpr size_t kFrameSizeSaveAllCalleeSave = FRAME_SIZE_SAVE_ALL_CALLEE_SAVE;
+#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
+static constexpr size_t kFrameSizeRefsOnlyCalleeSave = FRAME_SIZE_REFS_ONLY_CALLEE_SAVE;
+#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
+static constexpr size_t kFrameSizeRefsAndArgsCalleeSave = FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE;
+#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
+}
+
+namespace mips {
+#include "arch/mips/asm_support_mips.h"
+static constexpr size_t kFrameSizeSaveAllCalleeSave = FRAME_SIZE_SAVE_ALL_CALLEE_SAVE;
+#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
+static constexpr size_t kFrameSizeRefsOnlyCalleeSave = FRAME_SIZE_REFS_ONLY_CALLEE_SAVE;
+#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
+static constexpr size_t kFrameSizeRefsAndArgsCalleeSave = FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE;
+#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
+}
+
+namespace x86 {
+#include "arch/x86/asm_support_x86.h"
+static constexpr size_t kFrameSizeSaveAllCalleeSave = FRAME_SIZE_SAVE_ALL_CALLEE_SAVE;
+#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
+static constexpr size_t kFrameSizeRefsOnlyCalleeSave = FRAME_SIZE_REFS_ONLY_CALLEE_SAVE;
+#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
+static constexpr size_t kFrameSizeRefsAndArgsCalleeSave = FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE;
+#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
+}
+
+namespace x86_64 {
+#include "arch/x86_64/asm_support_x86_64.h"
+static constexpr size_t kFrameSizeSaveAllCalleeSave = FRAME_SIZE_SAVE_ALL_CALLEE_SAVE;
+#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
+static constexpr size_t kFrameSizeRefsOnlyCalleeSave = FRAME_SIZE_REFS_ONLY_CALLEE_SAVE;
+#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
+static constexpr size_t kFrameSizeRefsAndArgsCalleeSave = FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE;
+#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
+}
+
+// Check architecture specific constants are sound.
+TEST_F(ArchTest, ARM) {
+ CheckFrameSize(InstructionSet::kArm, Runtime::kSaveAll, arm::kFrameSizeSaveAllCalleeSave);
+ CheckFrameSize(InstructionSet::kArm, Runtime::kRefsOnly, arm::kFrameSizeRefsOnlyCalleeSave);
+ CheckFrameSize(InstructionSet::kArm, Runtime::kRefsAndArgs, arm::kFrameSizeRefsAndArgsCalleeSave);
}
TEST_F(ArchTest, ARM64) {
-#include "arch/arm64/asm_support_arm64.h"
-#undef ART_RUNTIME_ARCH_ARM64_ASM_SUPPORT_ARM64_H_
-
-
-#ifdef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
- CheckFrameSize(InstructionSet::kArm64, Runtime::kSaveAll, FRAME_SIZE_SAVE_ALL_CALLEE_SAVE);
-#else
- LOG(WARNING) << "No frame size for SaveAll";
-#endif
-#ifdef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
- CheckFrameSize(InstructionSet::kArm64, Runtime::kRefsOnly, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE);
-#else
- LOG(WARNING) << "No frame size for RefsOnly";
-#endif
-#ifdef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
- CheckFrameSize(InstructionSet::kArm64, Runtime::kRefsAndArgs, FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE);
-#else
- LOG(WARNING) << "No frame size for RefsAndArgs";
-#endif
-
-
-#ifdef RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET
-#undef RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET
-#endif
-#ifdef RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET
-#undef RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET
-#endif
-#ifdef RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET
-#undef RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET
-#endif
-#ifdef THREAD_SELF_OFFSET
-#undef THREAD_SELF_OFFSET
-#endif
-#ifdef THREAD_CARD_TABLE_OFFSET
-#undef THREAD_CARD_TABLE_OFFSET
-#endif
-#ifdef THREAD_EXCEPTION_OFFSET
-#undef THREAD_EXCEPTION_OFFSET
-#endif
-#ifdef THREAD_ID_OFFSET
-#undef THREAD_ID_OFFSET
-#endif
-#ifdef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
-#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
-#endif
-#ifdef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
-#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
-#endif
-#ifdef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
-#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
-#endif
-#ifdef HEAP_REFERENCE_SIZE
-#undef HEAP_REFERENCE_SIZE
-#endif
+ CheckFrameSize(InstructionSet::kArm64, Runtime::kSaveAll, arm64::kFrameSizeSaveAllCalleeSave);
+ CheckFrameSize(InstructionSet::kArm64, Runtime::kRefsOnly, arm64::kFrameSizeRefsOnlyCalleeSave);
+ CheckFrameSize(InstructionSet::kArm64, Runtime::kRefsAndArgs,
+ arm64::kFrameSizeRefsAndArgsCalleeSave);
}
-
TEST_F(ArchTest, MIPS) {
-#include "arch/mips/asm_support_mips.h"
-#undef ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_
-
-
-#ifdef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
- CheckFrameSize(InstructionSet::kMips, Runtime::kSaveAll, FRAME_SIZE_SAVE_ALL_CALLEE_SAVE);
-#else
- LOG(WARNING) << "No frame size for SaveAll";
-#endif
-#ifdef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
- CheckFrameSize(InstructionSet::kMips, Runtime::kRefsOnly, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE);
-#else
- LOG(WARNING) << "No frame size for RefsOnly";
-#endif
-#ifdef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
- CheckFrameSize(InstructionSet::kMips, Runtime::kRefsAndArgs, FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE);
-#else
- LOG(WARNING) << "No frame size for RefsAndArgs";
-#endif
-
-
-#ifdef RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET
-#undef RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET
-#endif
-#ifdef RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET
-#undef RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET
-#endif
-#ifdef RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET
-#undef RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET
-#endif
-#ifdef THREAD_SELF_OFFSET
-#undef THREAD_SELF_OFFSET
-#endif
-#ifdef THREAD_CARD_TABLE_OFFSET
-#undef THREAD_CARD_TABLE_OFFSET
-#endif
-#ifdef THREAD_EXCEPTION_OFFSET
-#undef THREAD_EXCEPTION_OFFSET
-#endif
-#ifdef THREAD_ID_OFFSET
-#undef THREAD_ID_OFFSET
-#endif
-#ifdef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
-#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
-#endif
-#ifdef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
-#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
-#endif
-#ifdef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
-#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
-#endif
-#ifdef HEAP_REFERENCE_SIZE
-#undef HEAP_REFERENCE_SIZE
-#endif
+ CheckFrameSize(InstructionSet::kMips, Runtime::kSaveAll, mips::kFrameSizeSaveAllCalleeSave);
+ CheckFrameSize(InstructionSet::kMips, Runtime::kRefsOnly, mips::kFrameSizeRefsOnlyCalleeSave);
+ CheckFrameSize(InstructionSet::kMips, Runtime::kRefsAndArgs,
+ mips::kFrameSizeRefsAndArgsCalleeSave);
}
-
TEST_F(ArchTest, X86) {
-#include "arch/x86/asm_support_x86.h"
-#undef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_
-
-
-#ifdef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
- CheckFrameSize(InstructionSet::kX86, Runtime::kSaveAll, FRAME_SIZE_SAVE_ALL_CALLEE_SAVE);
-#else
- LOG(WARNING) << "No frame size for SaveAll";
-#endif
-#ifdef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
- CheckFrameSize(InstructionSet::kX86, Runtime::kRefsOnly, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE);
-#else
- LOG(WARNING) << "No frame size for RefsOnly";
-#endif
-#ifdef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
- CheckFrameSize(InstructionSet::kX86, Runtime::kRefsAndArgs, FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE);
-#else
- LOG(WARNING) << "No frame size for RefsAndArgs";
-#endif
-
-
-#ifdef RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET
-#undef RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET
-#endif
-#ifdef RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET
-#undef RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET
-#endif
-#ifdef RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET
-#undef RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET
-#endif
-#ifdef THREAD_SELF_OFFSET
-#undef THREAD_SELF_OFFSET
-#endif
-#ifdef THREAD_CARD_TABLE_OFFSET
-#undef THREAD_CARD_TABLE_OFFSET
-#endif
-#ifdef THREAD_EXCEPTION_OFFSET
-#undef THREAD_EXCEPTION_OFFSET
-#endif
-#ifdef THREAD_ID_OFFSET
-#undef THREAD_ID_OFFSET
-#endif
-#ifdef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
-#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
-#endif
-#ifdef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
-#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
-#endif
-#ifdef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
-#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
-#endif
-#ifdef HEAP_REFERENCE_SIZE
-#undef HEAP_REFERENCE_SIZE
-#endif
+ CheckFrameSize(InstructionSet::kX86, Runtime::kSaveAll, x86::kFrameSizeSaveAllCalleeSave);
+ CheckFrameSize(InstructionSet::kX86, Runtime::kRefsOnly, x86::kFrameSizeRefsOnlyCalleeSave);
+ CheckFrameSize(InstructionSet::kX86, Runtime::kRefsAndArgs, x86::kFrameSizeRefsAndArgsCalleeSave);
}
-
TEST_F(ArchTest, X86_64) {
-#include "arch/x86_64/asm_support_x86_64.h"
-#undef ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_H_
-
-
-#ifdef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
- CheckFrameSize(InstructionSet::kX86_64, Runtime::kSaveAll, FRAME_SIZE_SAVE_ALL_CALLEE_SAVE);
-#else
- LOG(WARNING) << "No frame size for SaveAll";
-#endif
-#ifdef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
- CheckFrameSize(InstructionSet::kX86_64, Runtime::kRefsOnly, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE);
-#else
- LOG(WARNING) << "No frame size for RefsOnly";
-#endif
-#ifdef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
- CheckFrameSize(InstructionSet::kX86_64, Runtime::kRefsAndArgs, FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE);
-#else
- LOG(WARNING) << "No frame size for RefsAndArgs";
-#endif
-
-
-#ifdef RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET
-#undef RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET
-#endif
-#ifdef RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET
-#undef RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET
-#endif
-#ifdef RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET
-#undef RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET
-#endif
-#ifdef THREAD_SELF_OFFSET
-#undef THREAD_SELF_OFFSET
-#endif
-#ifdef THREAD_CARD_TABLE_OFFSET
-#undef THREAD_CARD_TABLE_OFFSET
-#endif
-#ifdef THREAD_EXCEPTION_OFFSET
-#undef THREAD_EXCEPTION_OFFSET
-#endif
-#ifdef THREAD_ID_OFFSET
-#undef THREAD_ID_OFFSET
-#endif
-#ifdef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
-#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
-#endif
-#ifdef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
-#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
-#endif
-#ifdef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
-#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
-#endif
-#ifdef HEAP_REFERENCE_SIZE
-#undef HEAP_REFERENCE_SIZE
-#endif
-}
-
-
-// The following tests are all for the running architecture. So we get away
-// with just including it and not undefining it every time.
-
-#if defined(__arm__)
-#include "arch/arm/asm_support_arm.h"
-#elif defined(__aarch64__)
-#include "arch/arm64/asm_support_arm64.h"
-#elif defined(__mips__)
-#include "arch/mips/asm_support_mips.h"
-#elif defined(__i386__)
-#include "arch/x86/asm_support_x86.h"
-#elif defined(__x86_64__)
-#include "arch/x86_64/asm_support_x86_64.h"
-#else
- // This happens for the host test.
-#ifdef __LP64__
-#include "arch/x86_64/asm_support_x86_64.h"
-#else
-#include "arch/x86/asm_support_x86.h"
-#endif
-#endif
-
-
-TEST_F(ArchTest, ThreadOffsets) {
- // Ugly hack, change when possible.
-#ifdef __LP64__
-#define POINTER_SIZE 8
-#else
-#define POINTER_SIZE 4
-#endif
-
-#if defined(THREAD_SELF_OFFSET)
- ThreadOffset<POINTER_SIZE> self_offset = Thread::SelfOffset<POINTER_SIZE>();
- EXPECT_EQ(self_offset.Int32Value(), THREAD_SELF_OFFSET);
-#else
- LOG(INFO) << "No Thread Self Offset found.";
-#endif
-
-#if defined(THREAD_CARD_TABLE_OFFSET)
- ThreadOffset<POINTER_SIZE> card_offset = Thread::CardTableOffset<POINTER_SIZE>();
- EXPECT_EQ(card_offset.Int32Value(), THREAD_CARD_TABLE_OFFSET);
-#else
- LOG(INFO) << "No Thread Card Table Offset found.";
-#endif
-
-#if defined(THREAD_EXCEPTION_OFFSET)
- ThreadOffset<POINTER_SIZE> exc_offset = Thread::ExceptionOffset<POINTER_SIZE>();
- EXPECT_EQ(exc_offset.Int32Value(), THREAD_EXCEPTION_OFFSET);
-#else
- LOG(INFO) << "No Thread Exception Offset found.";
-#endif
-
-#if defined(THREAD_ID_OFFSET)
- ThreadOffset<POINTER_SIZE> id_offset = Thread::ThinLockIdOffset<POINTER_SIZE>();
- EXPECT_EQ(id_offset.Int32Value(), THREAD_ID_OFFSET);
-#else
- LOG(INFO) << "No Thread ID Offset found.";
-#endif
-}
-
-
-TEST_F(ArchTest, CalleeSaveMethodOffsets) {
-#if defined(RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET)
- EXPECT_EQ(Runtime::GetCalleeSaveMethodOffset(Runtime::kSaveAll),
- static_cast<size_t>(RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET));
-#else
- LOG(INFO) << "No Runtime Save-all Offset found.";
-#endif
-
-#if defined(RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET)
- EXPECT_EQ(Runtime::GetCalleeSaveMethodOffset(Runtime::kRefsOnly),
- static_cast<size_t>(RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET));
-#else
- LOG(INFO) << "No Runtime Refs-only Offset found.";
-#endif
-
-#if defined(RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET)
- EXPECT_EQ(Runtime::GetCalleeSaveMethodOffset(Runtime::kRefsAndArgs),
- static_cast<size_t>(RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET));
-#else
- LOG(INFO) << "No Runtime Refs-and-Args Offset found.";
-#endif
-}
-
-
-TEST_F(ArchTest, HeapReferenceSize) {
-#if defined(HEAP_REFERENCE_SIZE)
- EXPECT_EQ(sizeof(mirror::HeapReference<mirror::Object>),
- static_cast<size_t>(HEAP_REFERENCE_SIZE));
-#else
- LOG(INFO) << "No expected HeapReference Size found.";
-#endif
-}
-
-TEST_F(ArchTest, StackReferenceSize) {
-#if defined(STACK_REFERENCE_SIZE)
- EXPECT_EQ(sizeof(StackReference<mirror::Object>),
- static_cast<size_t>(STACK_REFERENCE_SIZE));
-#else
- LOG(INFO) << "No expected StackReference Size #define found.";
-#endif
+ CheckFrameSize(InstructionSet::kX86_64, Runtime::kSaveAll, x86_64::kFrameSizeSaveAllCalleeSave);
+ CheckFrameSize(InstructionSet::kX86_64, Runtime::kRefsOnly, x86_64::kFrameSizeRefsOnlyCalleeSave);
+ CheckFrameSize(InstructionSet::kX86_64, Runtime::kRefsAndArgs,
+ x86_64::kFrameSizeRefsAndArgsCalleeSave);
}
} // namespace art
diff --git a/runtime/arch/arm/asm_support_arm.S b/runtime/arch/arm/asm_support_arm.S
index fb6458c..2af636e 100644
--- a/runtime/arch/arm/asm_support_arm.S
+++ b/runtime/arch/arm/asm_support_arm.S
@@ -30,38 +30,92 @@
.arch armv7-a
.thumb
+// Macro to generate the value of Runtime::Current into rDest clobbering rTemp. As it uses labels
+// then the labels need to be unique. We bind these to the function name in the ENTRY macros.
+.macro RUNTIME_CURRENT name, num, rDest, rTemp
+ .if .Lruntime_current\num\()_used
+ .error
+ .endif
+ .set .Lruntime_current\num\()_used, 1
+ ldr \rDest, .Lgot_\name\()_\num @ Load offset of the GOT.
+ ldr \rTemp, .Lruntime_instance_\name\()_\num @ Load GOT offset of Runtime::instance_.
+.Lload_got_\name\()_\num\():
+ add \rDest, pc @ Fixup GOT address.
+ ldr \rDest, [\rDest, \rTemp] @ Load address of Runtime::instance_.
+ ldr \rDest, [\rDest] @ Load Runtime::instance_.
+.endm
+
+// Common ENTRY declaration code for ARM and thumb, an ENTRY should always be paired with an END.
+// Declares the RUNTIME_CURRENT[123] macros that can be used within an ENTRY and will have literals
+// generated at END.
+.macro DEF_ENTRY thumb_or_arm, name
+ \thumb_or_arm
+ .type \name, #function
+ .hidden \name // Hide this as a global symbol, so we do not incur plt calls.
+ .global \name
+ // Cache alignment for function entry.
+ .balign 16
+\name:
+ .cfi_startproc
+ .fnstart
+ // Track whether RUNTIME_CURRENT was used.
+ .set .Lruntime_current1_used, 0
+ .set .Lruntime_current2_used, 0
+ .set .Lruntime_current3_used, 0
+ // The RUNTIME_CURRENT macros that are bound to the \name argument of DEF_ENTRY to ensure
+ // that label names are unique.
+ .macro RUNTIME_CURRENT1 rDest, rTemp
+ RUNTIME_CURRENT \name, 1, \rDest, \rTemp
+ .endm
+ .macro RUNTIME_CURRENT2 rDest, rTemp
+ RUNTIME_CURRENT \name, 2, \rDest, \rTemp
+ .endm
+ .macro RUNTIME_CURRENT3 rDest, rTemp
+ RUNTIME_CURRENT \name, 3, \rDest, \rTemp
+ .endm
+.endm
+
+// A thumb2 style ENTRY.
.macro ENTRY name
- .thumb_func
- .type \name, #function
- .hidden \name // Hide this as a global symbol, so we do not incur plt calls.
- .global \name
- /* Cache alignment for function entry */
- .balign 16
-\name:
- .cfi_startproc
- .fnstart
+ DEF_ENTRY .thumb_func, \name
.endm
+// A ARM style ENTRY.
.macro ARM_ENTRY name
- .arm
- .type \name, #function
- .hidden \name // Hide this as a global symbol, so we do not incur plt calls.
- .global \name
- /* Cache alignment for function entry */
- .balign 16
-\name:
- .cfi_startproc
- /* Ensure we get a sane starting CFA. */
- .cfi_def_cfa sp,0
- .fnstart
+ DEF_ENTRY .arm, \name
.endm
+// Terminate an ENTRY and generate GOT references.
.macro END name
+ // Generate offsets of GOT and Runtime::instance_ used in RUNTIME_CURRENT.
+ .if .Lruntime_current1_used
+ .Lgot_\name\()_1:
+ .word _GLOBAL_OFFSET_TABLE_-(.Lload_got_\name\()_1+4)
+ .Lruntime_instance_\name\()_1:
+ .word _ZN3art7Runtime9instance_E(GOT)
+ .endif
+ .if .Lruntime_current2_used
+ .Lgot_\name\()_2:
+ .word _GLOBAL_OFFSET_TABLE_-(.Lload_got_\name\()_2+4)
+ .Lruntime_instance_\name\()_2:
+ .word _ZN3art7Runtime9instance_E(GOT)
+ .endif
+ .if .Lruntime_current3_used
+ .Lgot_\name\()_3:
+ .word _GLOBAL_OFFSET_TABLE_-(.Lload_got_\name\()_3+4)
+ .Lruntime_instance_\name\()_3:
+ .word _ZN3art7Runtime9instance_E(GOT)
+ .endif
+ // Remove the RUNTIME_CURRENTx macros so they get rebound in the next function entry.
+ .purgem RUNTIME_CURRENT1
+ .purgem RUNTIME_CURRENT2
+ .purgem RUNTIME_CURRENT3
.fnend
.cfi_endproc
.size \name, .-\name
.endm
+// Declare an unimplemented ENTRY that will halt a debugger.
.macro UNIMPLEMENTED name
ENTRY \name
bkpt
diff --git a/runtime/arch/arm/asm_support_arm.h b/runtime/arch/arm/asm_support_arm.h
index 330924e..1fa566b 100644
--- a/runtime/arch/arm/asm_support_arm.h
+++ b/runtime/arch/arm/asm_support_arm.h
@@ -19,22 +19,11 @@
#include "asm_support.h"
-// Offset of field Thread::tls32_.state_and_flags verified in InitCpu
-#define THREAD_FLAGS_OFFSET 0
-// Offset of field Thread::tls32_.thin_lock_thread_id verified in InitCpu
-#define THREAD_ID_OFFSET 12
-// Offset of field Thread::tlsPtr_.card_table verified in InitCpu
-#define THREAD_CARD_TABLE_OFFSET 120
-// Offset of field Thread::tlsPtr_.exception verified in InitCpu
-#define THREAD_EXCEPTION_OFFSET 124
-
-#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 176
+#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 112
#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 32
-#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 48
+#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 112
-// Expected size of a heap reference
-#define HEAP_REFERENCE_SIZE 4
// Flag for enabling R4 optimization in arm runtime
-#define ARM_R4_SUSPEND_FLAG
+// #define ARM_R4_SUSPEND_FLAG
#endif // ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_
diff --git a/runtime/arch/arm/context_arm.cc b/runtime/arch/arm/context_arm.cc
index 96ffc93..9e8d282 100644
--- a/runtime/arch/arm/context_arm.cc
+++ b/runtime/arch/arm/context_arm.cc
@@ -17,10 +17,8 @@
#include "context_arm.h"
#include "mirror/art_method-inl.h"
-#include "mirror/object-inl.h"
#include "quick/quick_method_frame_info.h"
-#include "stack.h"
-#include "thread.h"
+#include "utils.h"
namespace art {
namespace arm {
@@ -97,6 +95,23 @@
gprs_[R1] = const_cast<uint32_t*>(&gZero);
gprs_[R2] = nullptr;
gprs_[R3] = nullptr;
+
+ fprs_[S0] = nullptr;
+ fprs_[S1] = nullptr;
+ fprs_[S2] = nullptr;
+ fprs_[S3] = nullptr;
+ fprs_[S4] = nullptr;
+ fprs_[S5] = nullptr;
+ fprs_[S6] = nullptr;
+ fprs_[S7] = nullptr;
+ fprs_[S8] = nullptr;
+ fprs_[S9] = nullptr;
+ fprs_[S10] = nullptr;
+ fprs_[S11] = nullptr;
+ fprs_[S12] = nullptr;
+ fprs_[S13] = nullptr;
+ fprs_[S14] = nullptr;
+ fprs_[S15] = nullptr;
}
extern "C" void art_quick_do_long_jump(uint32_t*, uint32_t*);
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index ff0eb4a..85a0dd2 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -18,6 +18,7 @@
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/portable/portable_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
+#include "entrypoints/quick/quick_default_externs.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/entrypoint_utils.h"
#include "entrypoints/math_entrypoints.h"
@@ -26,112 +27,26 @@
namespace art {
-// Portable entrypoints.
-extern "C" void art_portable_resolution_trampoline(mirror::ArtMethod*);
-extern "C" void art_portable_to_interpreter_bridge(mirror::ArtMethod*);
-
// Cast entrypoints.
extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass,
const mirror::Class* ref_class);
-extern "C" void art_quick_check_cast(void*, void*);
-// DexCache entrypoints.
-extern "C" void* art_quick_initialize_static_storage(uint32_t, void*);
-extern "C" void* art_quick_initialize_type(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*);
-extern "C" void* art_quick_resolve_string(void*, uint32_t);
-// Field entrypoints.
-extern "C" int art_quick_set8_instance(uint32_t, void*, int8_t);
-extern "C" int art_quick_set8_static(uint32_t, int8_t);
-extern "C" int art_quick_set16_instance(uint32_t, void*, int16_t);
-extern "C" int art_quick_set16_static(uint32_t, int16_t);
-extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t);
-extern "C" int art_quick_set32_static(uint32_t, int32_t);
-extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t);
-extern "C" int art_quick_set64_static(uint32_t, int64_t);
-extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*);
-extern "C" int art_quick_set_obj_static(uint32_t, void*);
-extern "C" int8_t art_quick_get_byte_instance(uint32_t, void*);
-extern "C" uint8_t art_quick_get_boolean_instance(uint32_t, void*);
-extern "C" int8_t art_quick_get_byte_static(uint32_t);
-extern "C" uint8_t art_quick_get_boolean_static(uint32_t);
-extern "C" int16_t art_quick_get_short_instance(uint32_t, void*);
-extern "C" uint16_t art_quick_get_char_instance(uint32_t, void*);
-extern "C" int16_t art_quick_get_short_static(uint32_t);
-extern "C" uint16_t art_quick_get_char_static(uint32_t);
-extern "C" int32_t art_quick_get32_instance(uint32_t, void*);
-extern "C" int32_t art_quick_get32_static(uint32_t);
-extern "C" int64_t art_quick_get64_instance(uint32_t, void*);
-extern "C" int64_t art_quick_get64_static(uint32_t);
-extern "C" void* art_quick_get_obj_instance(uint32_t, void*);
-extern "C" void* art_quick_get_obj_static(uint32_t);
-
-// Array entrypoints.
-extern "C" void art_quick_aput_obj_with_null_and_bound_check(void*, uint32_t, void*);
-extern "C" void art_quick_aput_obj_with_bound_check(void*, uint32_t, void*);
-extern "C" void art_quick_aput_obj(void*, uint32_t, void*);
-extern "C" void art_quick_handle_fill_data(void*, void*);
-
-// Lock entrypoints.
-extern "C" void art_quick_lock_object(void*);
-extern "C" void art_quick_unlock_object(void*);
-
-// Math entrypoints.
-extern int32_t CmpgDouble(double a, double b);
-extern int32_t CmplDouble(double a, double b);
-extern int32_t CmpgFloat(float a, float b);
-extern int32_t CmplFloat(float a, float b);
-
-// Math conversions.
-extern "C" int32_t __aeabi_f2iz(float op1); // FLOAT_TO_INT
-extern "C" int32_t __aeabi_d2iz(double op1); // DOUBLE_TO_INT
-extern "C" float __aeabi_l2f(int64_t op1); // LONG_TO_FLOAT
-extern "C" double __aeabi_l2d(int64_t op1); // LONG_TO_DOUBLE
-
+// Used by soft float.
// Single-precision FP arithmetics.
-extern "C" float fmodf(float a, float b); // REM_FLOAT[_2ADDR]
-
+extern "C" float fmodf(float a, float b); // REM_FLOAT[_2ADDR]
// Double-precision FP arithmetics.
-extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR]
+extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR]
+
+// Used by hard float.
+extern "C" float art_quick_fmodf(float a, float b); // REM_FLOAT[_2ADDR]
+extern "C" double art_quick_fmod(double a, double b); // REM_DOUBLE[_2ADDR]
// Integer arithmetics.
extern "C" int __aeabi_idivmod(int32_t, int32_t); // [DIV|REM]_INT[_2ADDR|_LIT8|_LIT16]
// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR]
extern "C" int64_t __aeabi_ldivmod(int64_t, int64_t);
-extern "C" int64_t art_quick_mul_long(int64_t, int64_t);
-extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t);
-
-// Intrinsic entrypoints.
-extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
-extern "C" int32_t art_quick_string_compareto(void*, void*);
-
-// Invoke entrypoints.
-extern "C" void art_quick_imt_conflict_trampoline(mirror::ArtMethod*);
-extern "C" void art_quick_resolution_trampoline(mirror::ArtMethod*);
-extern "C" void art_quick_to_interpreter_bridge(mirror::ArtMethod*);
-extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
-
-// Thread entrypoints.
-extern "C" void art_quick_test_suspend();
-
-// Throw entrypoints.
-extern "C" void art_quick_deliver_exception(void*);
-extern "C" void art_quick_throw_array_bounds(int32_t index, int32_t limit);
-extern "C" void art_quick_throw_div_zero();
-extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
-extern "C" void art_quick_throw_null_pointer_exception();
-extern "C" void art_quick_throw_stack_overflow(void*);
-
-// Generic JNI downcall
-extern "C" void art_quick_generic_jni_trampoline(mirror::ArtMethod*);
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
@@ -205,25 +120,24 @@
qpoints->pUnlockObject = art_quick_unlock_object;
// Math
- qpoints->pCmpgDouble = CmpgDouble;
- qpoints->pCmpgFloat = CmpgFloat;
- qpoints->pCmplDouble = CmplDouble;
- qpoints->pCmplFloat = CmplFloat;
- qpoints->pFmod = fmod;
- qpoints->pL2d = __aeabi_l2d;
- qpoints->pFmodf = fmodf;
- qpoints->pL2f = __aeabi_l2f;
- qpoints->pD2iz = __aeabi_d2iz;
- qpoints->pF2iz = __aeabi_f2iz;
qpoints->pIdivmod = __aeabi_idivmod;
- qpoints->pD2l = art_d2l;
- qpoints->pF2l = art_f2l;
qpoints->pLdiv = __aeabi_ldivmod;
qpoints->pLmod = __aeabi_ldivmod; // result returned in r2:r3
qpoints->pLmul = art_quick_mul_long;
qpoints->pShlLong = art_quick_shl_long;
qpoints->pShrLong = art_quick_shr_long;
qpoints->pUshrLong = art_quick_ushr_long;
+ if (kArm32QuickCodeUseSoftFloat) {
+ qpoints->pFmod = fmod;
+ qpoints->pFmodf = fmodf;
+ qpoints->pD2l = art_d2l;
+ qpoints->pF2l = art_f2l;
+ } else {
+ qpoints->pFmod = art_quick_fmod;
+ qpoints->pFmodf = art_quick_fmodf;
+ qpoints->pD2l = art_quick_d2l;
+ qpoints->pF2l = art_quick_f2l;
+ }
// Intrinsics
qpoints->pIndexOf = art_quick_indexof;
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
index 564fcba..325b283 100644
--- a/runtime/arch/arm/fault_handler_arm.cc
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -23,7 +23,6 @@
#include "globals.h"
#include "base/logging.h"
#include "base/hex_dump.h"
-#include "instruction_set.h"
#include "mirror/art_method.h"
#include "mirror/art_method-inl.h"
#include "thread.h"
@@ -47,7 +46,8 @@
return instr_size;
}
-void FaultManager::HandleNestedSignal(int sig, siginfo_t* info, void* context) {
+void FaultManager::HandleNestedSignal(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context) {
// Note that in this handler we set up the registers and return to
// longjmp directly rather than going through an assembly language stub. The
// reason for this is that longjmp is (currently) in ARM mode and that would
@@ -64,7 +64,7 @@
VLOG(signals) << "longjmp address: " << reinterpret_cast<void*>(sc->arm_pc);
}
-void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
+void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED, void* context,
mirror::ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
@@ -100,7 +100,8 @@
*out_return_pc = (sc->arm_pc + instr_size) | 1;
}
-bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
+bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context) {
// The code that looks for the catch location needs to know the value of the
// ARM PC at the point of call. For Null checks we insert a GC map that is immediately after
// the load/store instruction that might cause the fault. However the mapping table has
@@ -127,7 +128,8 @@
// The offset from r9 is Thread::ThreadSuspendTriggerOffset().
// To check for a suspend check, we examine the instructions that caused
// the fault (at PC-4 and PC).
-bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
+bool SuspensionHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context) {
// These are the instructions to check for. The first one is the ldr r0,[r9,#xxx]
// where xxx is the offset of the suspend trigger.
uint32_t checkinst1 = 0xf8d90000 + Thread::ThreadSuspendTriggerOffset<4>().Int32Value();
@@ -196,7 +198,8 @@
// If we determine this is a stack overflow we need to move the stack pointer
// to the overflow region below the protected region.
-bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
+bool StackOverflowHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context) {
struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
VLOG(signals) << "stack overflow handler with sp at " << std::hex << &uc;
diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc
new file mode 100644
index 0000000..f8590d3
--- /dev/null
+++ b/runtime/arch/arm/instruction_set_features_arm.cc
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_set_features_arm.h"
+
+#if defined(HAVE_ANDROID_OS) && defined(__arm__)
+#include <sys/auxv.h>
+#include <asm/hwcap.h>
+#endif
+
+#include "signal.h"
+#include <fstream>
+
+#include "base/stringprintf.h"
+#include "utils.h" // For Trim.
+
+#if defined(__arm__)
+extern "C" bool artCheckForArmSdivInstruction();
+#endif
+
+namespace art {
+
+const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromVariant(
+ const std::string& variant, std::string* error_msg) {
+ // Assume all ARM processors are SMP.
+ // TODO: set the SMP support based on variant.
+ const bool smp = true;
+
+ // Look for variants that have divide support.
+ static const char* arm_variants_with_div[] = {
+ "cortex-a7", "cortex-a12", "cortex-a15", "cortex-a17", "cortex-a53", "cortex-a57",
+ "cortex-m3", "cortex-m4", "cortex-r4", "cortex-r5",
+ "cyclone", "denver", "krait", "swift"};
+
+ bool has_div = FindVariantInArray(arm_variants_with_div, arraysize(arm_variants_with_div),
+ variant);
+
+ // Look for variants that have LPAE support.
+ static const char* arm_variants_with_lpae[] = {
+ "cortex-a7", "cortex-a15", "krait", "denver"
+ };
+ bool has_lpae = FindVariantInArray(arm_variants_with_lpae, arraysize(arm_variants_with_lpae),
+ variant);
+
+ if (has_div == false && has_lpae == false) {
+ // Avoid unsupported variants.
+ static const char* unsupported_arm_variants[] = {
+ // ARM processors that aren't ARMv7 compatible aren't supported.
+ "arm2", "arm250", "arm3", "arm6", "arm60", "arm600", "arm610", "arm620",
+ "cortex-m0", "cortex-m0plus", "cortex-m1",
+ "fa526", "fa626", "fa606te", "fa626te", "fmp626", "fa726te",
+ "iwmmxt", "iwmmxt2",
+ "strongarm", "strongarm110", "strongarm1100", "strongarm1110",
+ "xscale"
+ };
+ if (FindVariantInArray(unsupported_arm_variants, arraysize(unsupported_arm_variants),
+ variant)) {
+ *error_msg = StringPrintf("Attempt to use unsupported ARM variant: %s", variant.c_str());
+ return nullptr;
+ }
+ // Warn if the variant is unknown.
+ // TODO: some of the variants below may have feature support, but that support is currently
+ // unknown so we'll choose conservative (sub-optimal) defaults without warning.
+ // TODO: some of the architectures may not support all features required by ART and should be
+ // moved to unsupported_arm_variants[] above.
+ static const char* arm_variants_without_known_features[] = {
+ "default",
+ "arm7", "arm7m", "arm7d", "arm7dm", "arm7di", "arm7dmi", "arm70", "arm700", "arm700i",
+ "arm710", "arm710c", "arm7100", "arm720", "arm7500", "arm7500fe", "arm7tdmi", "arm7tdmi-s",
+ "arm710t", "arm720t", "arm740t",
+ "arm8", "arm810",
+ "arm9", "arm9e", "arm920", "arm920t", "arm922t", "arm946e-s", "arm966e-s", "arm968e-s",
+ "arm926ej-s", "arm940t", "arm9tdmi",
+ "arm10tdmi", "arm1020t", "arm1026ej-s", "arm10e", "arm1020e", "arm1022e",
+ "arm1136j-s", "arm1136jf-s",
+ "arm1156t2-s", "arm1156t2f-s", "arm1176jz-s", "arm1176jzf-s",
+ "cortex-a5", "cortex-a8", "cortex-a9", "cortex-a9-mp", "cortex-r4f",
+ "marvell-pj4", "mpcore", "mpcorenovfp"
+ };
+ if (!FindVariantInArray(arm_variants_without_known_features,
+ arraysize(arm_variants_without_known_features),
+ variant)) {
+ LOG(WARNING) << "Unknown instruction set features for ARM CPU variant (" << variant
+ << ") using conservative defaults";
+ }
+ }
+ return new ArmInstructionSetFeatures(smp, has_div, has_lpae);
+}
+
+const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromBitmap(uint32_t bitmap) {
+ bool smp = (bitmap & kSmpBitfield) != 0;
+ bool has_div = (bitmap & kDivBitfield) != 0;
+ bool has_atomic_ldrd_strd = (bitmap & kAtomicLdrdStrdBitfield) != 0;
+ return new ArmInstructionSetFeatures(smp, has_div, has_atomic_ldrd_strd);
+}
+
+const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromCppDefines() {
+ const bool smp = true;
+#if defined(__ARM_ARCH_EXT_IDIV__)
+ const bool has_div = true;
+#else
+ const bool has_div = false;
+#endif
+#if defined(__ARM_FEATURE_LPAE)
+ const bool has_lpae = true;
+#else
+ const bool has_lpae = false;
+#endif
+ return new ArmInstructionSetFeatures(smp, has_div, has_lpae);
+}
+
+const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromCpuInfo() {
+ // Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that
+ // the kernel puts the appropriate feature flags in here. Sometimes it doesn't.
+ bool smp = false;
+ bool has_lpae = false;
+ bool has_div = false;
+
+ std::ifstream in("/proc/cpuinfo");
+ if (!in.fail()) {
+ while (!in.eof()) {
+ std::string line;
+ std::getline(in, line);
+ if (!in.eof()) {
+ LOG(INFO) << "cpuinfo line: " << line;
+ if (line.find("Features") != std::string::npos) {
+ LOG(INFO) << "found features";
+ if (line.find("idivt") != std::string::npos) {
+ // We always expect both ARM and Thumb divide instructions to be available or not
+ // available.
+ CHECK_NE(line.find("idiva"), std::string::npos);
+ has_div = true;
+ }
+ if (line.find("lpae") != std::string::npos) {
+ has_lpae = true;
+ }
+ } else if (line.find("processor") != std::string::npos &&
+ line.find(": 1") != std::string::npos) {
+ smp = true;
+ }
+ }
+ }
+ in.close();
+ } else {
+ LOG(ERROR) << "Failed to open /proc/cpuinfo";
+ }
+ return new ArmInstructionSetFeatures(smp, has_div, has_lpae);
+}
+
+const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromHwcap() {
+ bool smp = sysconf(_SC_NPROCESSORS_CONF) > 1;
+
+ bool has_div = false;
+ bool has_lpae = false;
+
+#if defined(HAVE_ANDROID_OS) && defined(__arm__)
+ uint64_t hwcaps = getauxval(AT_HWCAP);
+ LOG(INFO) << "hwcaps=" << hwcaps;
+ if ((hwcaps & HWCAP_IDIVT) != 0) {
+ // We always expect both ARM and Thumb divide instructions to be available or not
+ // available.
+ CHECK_NE(hwcaps & HWCAP_IDIVA, 0U);
+ has_div = true;
+ }
+ if ((hwcaps & HWCAP_LPAE) != 0) {
+ has_lpae = true;
+ }
+#endif
+
+ return new ArmInstructionSetFeatures(smp, has_div, has_lpae);
+}
+
+// A signal handler called by a fault for an illegal instruction. We record the fact in r0
+// and then increment the PC in the signal context to return to the next instruction. We know the
+// instruction is an sdiv (4 bytes long).
+static void bad_divide_inst_handle(int signo ATTRIBUTE_UNUSED, siginfo_t* si ATTRIBUTE_UNUSED,
+ void* data) {
+#if defined(__arm__)
+ struct ucontext *uc = (struct ucontext *)data;
+ struct sigcontext *sc = &uc->uc_mcontext;
+ sc->arm_r0 = 0; // Set R0 to #0 to signal error.
+ sc->arm_pc += 4; // Skip offending instruction.
+#else
+ UNUSED(data);
+#endif
+}
+
+const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromAssembly() {
+ const bool smp = true;
+
+ // See if have a sdiv instruction. Register a signal handler and try to execute an sdiv
+ // instruction. If we get a SIGILL then it's not supported.
+ struct sigaction sa, osa;
+ sa.sa_flags = SA_ONSTACK | SA_RESTART | SA_SIGINFO;
+ sa.sa_sigaction = bad_divide_inst_handle;
+ sigaction(SIGILL, &sa, &osa);
+
+ bool has_div = false;
+#if defined(__arm__)
+ if (artCheckForArmSdivInstruction()) {
+ has_div = true;
+ }
+#endif
+
+ // Restore the signal handler.
+ sigaction(SIGILL, &osa, nullptr);
+
+ // Use compile time features to "detect" LPAE support.
+ // TODO: write an assembly LPAE support test.
+#if defined(__ARM_FEATURE_LPAE)
+ const bool has_lpae = true;
+#else
+ const bool has_lpae = false;
+#endif
+ return new ArmInstructionSetFeatures(smp, has_div, has_lpae);
+}
+
+bool ArmInstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
+ if (kArm != other->GetInstructionSet()) {
+ return false;
+ }
+ const ArmInstructionSetFeatures* other_as_arm = other->AsArmInstructionSetFeatures();
+ return IsSmp() == other_as_arm->IsSmp() &&
+ has_div_ == other_as_arm->has_div_ &&
+ has_atomic_ldrd_strd_ == other_as_arm->has_atomic_ldrd_strd_;
+}
+
+uint32_t ArmInstructionSetFeatures::AsBitmap() const {
+ return (IsSmp() ? kSmpBitfield : 0) |
+ (has_div_ ? kDivBitfield : 0) |
+ (has_atomic_ldrd_strd_ ? kAtomicLdrdStrdBitfield : 0);
+}
+
+std::string ArmInstructionSetFeatures::GetFeatureString() const {
+ std::string result;
+ if (IsSmp()) {
+ result += "smp";
+ } else {
+ result += "-smp";
+ }
+ if (has_div_) {
+ result += ",div";
+ } else {
+ result += ",-div";
+ }
+ if (has_atomic_ldrd_strd_) {
+ result += ",atomic_ldrd_strd";
+ } else {
+ result += ",-atomic_ldrd_strd";
+ }
+ return result;
+}
+
+const InstructionSetFeatures* ArmInstructionSetFeatures::AddFeaturesFromSplitString(
+ const bool smp, const std::vector<std::string>& features, std::string* error_msg) const {
+ bool has_atomic_ldrd_strd = has_atomic_ldrd_strd_;
+ bool has_div = has_div_;
+ for (auto i = features.begin(); i != features.end(); i++) {
+ std::string feature = Trim(*i);
+ if (feature == "div") {
+ has_div = true;
+ } else if (feature == "-div") {
+ has_div = false;
+ } else if (feature == "atomic_ldrd_strd") {
+ has_atomic_ldrd_strd = true;
+ } else if (feature == "-atomic_ldrd_strd") {
+ has_atomic_ldrd_strd = false;
+ } else {
+ *error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str());
+ return nullptr;
+ }
+ }
+ return new ArmInstructionSetFeatures(smp, has_div, has_atomic_ldrd_strd);
+}
+
+} // namespace art
diff --git a/runtime/arch/arm/instruction_set_features_arm.h b/runtime/arch/arm/instruction_set_features_arm.h
new file mode 100644
index 0000000..221bf1f
--- /dev/null
+++ b/runtime/arch/arm/instruction_set_features_arm.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_ARM_INSTRUCTION_SET_FEATURES_ARM_H_
+#define ART_RUNTIME_ARCH_ARM_INSTRUCTION_SET_FEATURES_ARM_H_
+
+#include "arch/instruction_set_features.h"
+
+namespace art {
+
+// Instruction set features relevant to the ARM architecture.
+class ArmInstructionSetFeatures FINAL : public InstructionSetFeatures {
+ public:
+ // Process a CPU variant string like "krait" or "cortex-a15" and create InstructionSetFeatures.
+ static const ArmInstructionSetFeatures* FromVariant(const std::string& variant,
+ std::string* error_msg);
+
+ // Parse a bitmap and create an InstructionSetFeatures.
+ static const ArmInstructionSetFeatures* FromBitmap(uint32_t bitmap);
+
+ // Turn C pre-processor #defines into the equivalent instruction set features.
+ static const ArmInstructionSetFeatures* FromCppDefines();
+
+ // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
+ static const ArmInstructionSetFeatures* FromCpuInfo();
+
+ // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
+ // InstructionSetFeatures.
+ static const ArmInstructionSetFeatures* FromHwcap();
+
+ // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
+ // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
+ static const ArmInstructionSetFeatures* FromAssembly();
+
+ bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+
+ InstructionSet GetInstructionSet() const OVERRIDE {
+ return kArm;
+ }
+
+ uint32_t AsBitmap() const OVERRIDE;
+
+ // Return a string of the form "div,lpae" or "none".
+ std::string GetFeatureString() const OVERRIDE;
+
+ // Is the divide instruction feature enabled?
+ bool HasDivideInstruction() const {
+ return has_div_;
+ }
+
+ // Are the ldrd and strd instructions atomic? This is commonly true when the Large Physical
+ // Address Extension (LPAE) is present.
+ bool HasAtomicLdrdAndStrd() const {
+ return has_atomic_ldrd_strd_;
+ }
+
+ virtual ~ArmInstructionSetFeatures() {}
+
+ protected:
+ // Parse a vector of the form "div", "lpae" adding these to a new ArmInstructionSetFeatures.
+ const InstructionSetFeatures*
+ AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
+ std::string* error_msg) const OVERRIDE;
+
+ private:
+ ArmInstructionSetFeatures(bool smp, bool has_div, bool has_atomic_ldrd_strd)
+ : InstructionSetFeatures(smp),
+ has_div_(has_div), has_atomic_ldrd_strd_(has_atomic_ldrd_strd) {
+ }
+
+ // Bitmap positions for encoding features as a bitmap.
+ enum {
+ kSmpBitfield = 1,
+ kDivBitfield = 2,
+ kAtomicLdrdStrdBitfield = 4,
+ };
+
+ const bool has_div_;
+ const bool has_atomic_ldrd_strd_;
+
+ DISALLOW_COPY_AND_ASSIGN(ArmInstructionSetFeatures);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_ARM_INSTRUCTION_SET_FEATURES_ARM_H_
diff --git a/runtime/arch/arm/instruction_set_features_arm_test.cc b/runtime/arch/arm/instruction_set_features_arm_test.cc
new file mode 100644
index 0000000..44b1640
--- /dev/null
+++ b/runtime/arch/arm/instruction_set_features_arm_test.cc
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_set_features_arm.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(ArmInstructionSetFeaturesTest, ArmFeaturesFromVariant) {
+ // Build features for a 32-bit ARM krait processor.
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> krait_features(
+ InstructionSetFeatures::FromVariant(kArm, "krait", &error_msg));
+ ASSERT_TRUE(krait_features.get() != nullptr) << error_msg;
+
+ ASSERT_EQ(krait_features->GetInstructionSet(), kArm);
+ EXPECT_TRUE(krait_features->Equals(krait_features.get()));
+ EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
+ EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
+ EXPECT_STREQ("smp,div,atomic_ldrd_strd", krait_features->GetFeatureString().c_str());
+ EXPECT_EQ(krait_features->AsBitmap(), 7U);
+
+ // Build features for a 32-bit ARM denver processor.
+ std::unique_ptr<const InstructionSetFeatures> denver_features(
+ InstructionSetFeatures::FromVariant(kArm, "denver", &error_msg));
+ ASSERT_TRUE(denver_features.get() != nullptr) << error_msg;
+
+ EXPECT_TRUE(denver_features->Equals(denver_features.get()));
+ EXPECT_TRUE(denver_features->Equals(krait_features.get()));
+ EXPECT_TRUE(krait_features->Equals(denver_features.get()));
+ EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
+ EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
+ EXPECT_STREQ("smp,div,atomic_ldrd_strd", denver_features->GetFeatureString().c_str());
+ EXPECT_EQ(denver_features->AsBitmap(), 7U);
+
+ // Build features for a 32-bit ARMv7 processor.
+ std::unique_ptr<const InstructionSetFeatures> arm7_features(
+ InstructionSetFeatures::FromVariant(kArm, "arm7", &error_msg));
+ ASSERT_TRUE(arm7_features.get() != nullptr) << error_msg;
+
+ EXPECT_TRUE(arm7_features->Equals(arm7_features.get()));
+ EXPECT_FALSE(arm7_features->Equals(krait_features.get()));
+ EXPECT_FALSE(krait_features->Equals(arm7_features.get()));
+ EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
+ EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
+ EXPECT_STREQ("smp,-div,-atomic_ldrd_strd", arm7_features->GetFeatureString().c_str());
+ EXPECT_EQ(arm7_features->AsBitmap(), 1U);
+
+ // ARM6 is not a supported architecture variant.
+ std::unique_ptr<const InstructionSetFeatures> arm6_features(
+ InstructionSetFeatures::FromVariant(kArm, "arm6", &error_msg));
+ EXPECT_TRUE(arm6_features.get() == nullptr);
+ EXPECT_NE(error_msg.size(), 0U);
+}
+
+TEST(ArmInstructionSetFeaturesTest, ArmAddFeaturesFromString) {
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> base_features(
+ InstructionSetFeatures::FromVariant(kArm, "arm7", &error_msg));
+ ASSERT_TRUE(base_features.get() != nullptr) << error_msg;
+
+ // Build features for a 32-bit ARM with LPAE and div processor.
+ std::unique_ptr<const InstructionSetFeatures> krait_features(
+ base_features->AddFeaturesFromString("atomic_ldrd_strd,div", &error_msg));
+ ASSERT_TRUE(krait_features.get() != nullptr) << error_msg;
+
+ ASSERT_EQ(krait_features->GetInstructionSet(), kArm);
+ EXPECT_TRUE(krait_features->Equals(krait_features.get()));
+ EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
+ EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
+ EXPECT_STREQ("smp,div,atomic_ldrd_strd", krait_features->GetFeatureString().c_str());
+ EXPECT_EQ(krait_features->AsBitmap(), 7U);
+
+ // Build features for a 32-bit ARM processor with LPAE and div flipped.
+ std::unique_ptr<const InstructionSetFeatures> denver_features(
+ base_features->AddFeaturesFromString("div,atomic_ldrd_strd", &error_msg));
+ ASSERT_TRUE(denver_features.get() != nullptr) << error_msg;
+
+ EXPECT_TRUE(denver_features->Equals(denver_features.get()));
+ EXPECT_TRUE(denver_features->Equals(krait_features.get()));
+ EXPECT_TRUE(krait_features->Equals(denver_features.get()));
+ EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
+ EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
+ EXPECT_STREQ("smp,div,atomic_ldrd_strd", denver_features->GetFeatureString().c_str());
+ EXPECT_EQ(denver_features->AsBitmap(), 7U);
+
+ // Build features for a 32-bit default ARM processor.
+ std::unique_ptr<const InstructionSetFeatures> arm7_features(
+ base_features->AddFeaturesFromString("default", &error_msg));
+ ASSERT_TRUE(arm7_features.get() != nullptr) << error_msg;
+
+ EXPECT_TRUE(arm7_features->Equals(arm7_features.get()));
+ EXPECT_FALSE(arm7_features->Equals(krait_features.get()));
+ EXPECT_FALSE(krait_features->Equals(arm7_features.get()));
+ EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
+ EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
+ EXPECT_STREQ("smp,-div,-atomic_ldrd_strd", arm7_features->GetFeatureString().c_str());
+ EXPECT_EQ(arm7_features->AsBitmap(), 1U);
+}
+
+} // namespace art
diff --git a/runtime/arch/arm/instruction_set_features_arm.S b/runtime/arch/arm/instruction_set_features_assembly_tests.S
similarity index 94%
rename from runtime/arch/arm/instruction_set_features_arm.S
rename to runtime/arch/arm/instruction_set_features_assembly_tests.S
index c26f2cd..c1086df 100644
--- a/runtime/arch/arm/instruction_set_features_arm.S
+++ b/runtime/arch/arm/instruction_set_features_assembly_tests.S
@@ -23,7 +23,7 @@
// caller must arrange for the signal handler to set the r0
// register to 0 and move the pc forward by 4 bytes (to skip
// the invalid instruction).
-ENTRY artCheckForARMSDIVInstruction
+ENTRY artCheckForArmSdivInstruction
mov r1,#1
// depending on the architecture, the assembler will not allow an
// sdiv instruction, so we will have to output the bytes directly.
@@ -35,4 +35,4 @@
// It will have 0 otherwise (set by the signal handler)
// the value is just returned from this function.
bx lr
-END artCheckForARMSDIVInstruction
+END artCheckForArmSdivInstruction
diff --git a/runtime/arch/arm/memcmp16_arm.S b/runtime/arch/arm/memcmp16_arm.S
index 3762194..b623a2a 100644
--- a/runtime/arch/arm/memcmp16_arm.S
+++ b/runtime/arch/arm/memcmp16_arm.S
@@ -65,7 +65,7 @@
/* save registers */
-0: stmfd sp!, {r4, lr}
+0: push {r4, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r4, 0
.cfi_rel_offset lr, 4
@@ -79,7 +79,7 @@
sub r2, r2, #1
subs r0, r0, ip
/* restore registers and return */
- ldmnefd sp!, {r4, lr}
+ popne {r4, lr}
bxne lr
@@ -110,25 +110,25 @@
eors r0, r0, ip
ldreq r0, [r3], #4
ldreq ip, [r1, #4]!
- eoreqs r0, r0, lr
+ eorseq r0, r0, lr
ldreq r0, [r3], #4
ldreq lr, [r1, #4]!
- eoreqs r0, r0, ip
+ eorseq r0, r0, ip
ldreq r0, [r3], #4
ldreq ip, [r1, #4]!
- eoreqs r0, r0, lr
+ eorseq r0, r0, lr
ldreq r0, [r3], #4
ldreq lr, [r1, #4]!
- eoreqs r0, r0, ip
+ eorseq r0, r0, ip
ldreq r0, [r3], #4
ldreq ip, [r1, #4]!
- eoreqs r0, r0, lr
+ eorseq r0, r0, lr
ldreq r0, [r3], #4
ldreq lr, [r1, #4]!
- eoreqs r0, r0, ip
+ eorseq r0, r0, ip
ldreq r0, [r3], #4
ldreq ip, [r1, #4]!
- eoreqs r0, r0, lr
+ eorseq r0, r0, lr
bne 2f
subs r2, r2, #16
bhs 0b
@@ -150,18 +150,24 @@
bne 8f
/* restore registers and return */
mov r0, #0
- ldmfd sp!, {r4, lr}
+ pop {r4, lr}
+ .cfi_restore r4
+ .cfi_restore lr
+ .cfi_adjust_cfa_offset -8
bx lr
2: /* the last 2 words are different, restart them */
ldrh r0, [r3, #-4]
ldrh ip, [r1, #-4]
subs r0, r0, ip
- ldreqh r0, [r3, #-2]
- ldreqh ip, [r1, #-2]
- subeqs r0, r0, ip
+ ldrheq r0, [r3, #-2]
+ ldrheq ip, [r1, #-2]
+ subseq r0, r0, ip
/* restore registers and return */
- ldmfd sp!, {r4, lr}
+ pop {r4, lr}
+ .cfi_restore r4
+ .cfi_restore lr
+ .cfi_adjust_cfa_offset -8
bx lr
/* process the last few words */
@@ -173,7 +179,10 @@
bne 8b
9: /* restore registers and return */
- ldmfd sp!, {r4, lr}
+ pop {r4, lr}
+ .cfi_restore r4
+ .cfi_restore lr
+ .cfi_adjust_cfa_offset -8
bx lr
@@ -196,17 +205,17 @@
ldreq lr, [r1], #4
ldreq r0, [r3], #4
orreq ip, ip, lr, lsl #16
- eoreqs r0, r0, ip
+ eorseq r0, r0, ip
moveq ip, lr, lsr #16
ldreq lr, [r1], #4
ldreq r0, [r3], #4
orreq ip, ip, lr, lsl #16
- eoreqs r0, r0, ip
+ eorseq r0, r0, ip
moveq ip, lr, lsr #16
ldreq lr, [r1], #4
ldreq r0, [r3], #4
orreq ip, ip, lr, lsl #16
- eoreqs r0, r0, ip
+ eorseq r0, r0, ip
bne 7f
subs r2, r2, #8
bhs 6b
diff --git a/runtime/arch/arm/portable_entrypoints_arm.S b/runtime/arch/arm/portable_entrypoints_arm.S
index a34db6c..89ac1f7 100644
--- a/runtime/arch/arm/portable_entrypoints_arm.S
+++ b/runtime/arch/arm/portable_entrypoints_arm.S
@@ -53,7 +53,7 @@
mov ip, #0 @ set ip to 0
str ip, [sp] @ store NULL for method* at bottom of frame
add sp, #16 @ first 4 args are not passed on stack for portable
- ldr ip, [r0, #METHOD_PORTABLE_CODE_OFFSET] @ get pointer to the code
+ ldr ip, [r0, #MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_32] @ get pointer to the code
blx ip @ call the method
mov sp, r11 @ restore the stack pointer
ldr ip, [sp, #24] @ load the result pointer
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 3d619be..1782db5 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -27,8 +27,8 @@
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kSaveAll)
*/
-.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
- push {r4-r11, lr} @ 9 words of callee saves
+.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME rTemp1, rTemp2
+ push {r4-r11, lr} @ 9 words (36 bytes) of callee saves.
.save {r4-r11, lr}
.cfi_adjust_cfa_offset 36
.cfi_rel_offset r4, 0
@@ -40,15 +40,20 @@
.cfi_rel_offset r10, 24
.cfi_rel_offset r11, 28
.cfi_rel_offset lr, 32
- vpush {s0-s31}
- .pad #128
- .cfi_adjust_cfa_offset 128
- sub sp, #12 @ 3 words of space, bottom word will hold Method*
+ vpush {s16-s31} @ 16 words (64 bytes) of floats.
+ .pad #64
+ .cfi_adjust_cfa_offset 64
+ sub sp, #12 @ 3 words of space, bottom word will hold Method*
.pad #12
.cfi_adjust_cfa_offset 12
+ RUNTIME_CURRENT1 \rTemp1, \rTemp2 @ Load Runtime::Current into rTemp1.
+ THIS_LOAD_REQUIRES_READ_BARRIER
+ ldr \rTemp1, [\rTemp1, #RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET] @ rTemp1 is kSaveAll Method*.
+ str \rTemp1, [sp, #0] @ Place Method* at bottom of stack.
+ str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 36 + 128 + 12)
+#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 36 + 64 + 12)
#error "SAVE_ALL_CALLEE_SAVE_FRAME(ARM) size not as expected."
#endif
.endm
@@ -57,8 +62,8 @@
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsOnly).
*/
-.macro SETUP_REF_ONLY_CALLEE_SAVE_FRAME
- push {r5-r8, r10-r11, lr} @ 7 words of callee saves
+.macro SETUP_REFS_ONLY_CALLEE_SAVE_FRAME rTemp1, rTemp2
+ push {r5-r8, r10-r11, lr} @ 7 words of callee saves
.save {r5-r8, r10-r11, lr}
.cfi_adjust_cfa_offset 28
.cfi_rel_offset r5, 0
@@ -68,9 +73,14 @@
.cfi_rel_offset r10, 16
.cfi_rel_offset r11, 20
.cfi_rel_offset lr, 24
- sub sp, #4 @ bottom word will hold Method*
+ sub sp, #4 @ bottom word will hold Method*
.pad #4
.cfi_adjust_cfa_offset 4
+ RUNTIME_CURRENT2 \rTemp1, \rTemp2 @ Load Runtime::Current into rTemp1.
+ THIS_LOAD_REQUIRES_READ_BARRIER
+ ldr \rTemp1, [\rTemp1, #RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET] @ rTemp1 is kRefsOnly Method*.
+ str \rTemp1, [sp, #0] @ Place Method* at bottom of stack.
+ str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
// Ugly compile-time check, but we only have the preprocessor.
#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 28 + 4)
@@ -78,7 +88,7 @@
#endif
.endm
-.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
add sp, #4 @ bottom word holds Method*
pop {r5-r8, r10-r11, lr} @ 7 words of callee saves
.cfi_restore r5
@@ -87,19 +97,11 @@
.cfi_restore r8
.cfi_restore r10
.cfi_restore r11
- .cfi_adjust_cfa_offset -32
+ .cfi_adjust_cfa_offset -FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
.endm
-.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
- add sp, #4 @ bottom word holds Method*
- pop {r5-r8, r10-r11, lr} @ 7 words of callee saves
- .cfi_restore r5
- .cfi_restore r6
- .cfi_restore r7
- .cfi_restore r8
- .cfi_restore r10
- .cfi_restore r11
- .cfi_adjust_cfa_offset -32
+.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
bx lr @ return
.endm
@@ -107,9 +109,10 @@
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsAndArgs).
*/
-.macro SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
- push {r1-r3, r5-r8, r10-r11, lr} @ 10 words of callee saves
+.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY
+ push {r1-r3, r5-r8, r10-r11, lr} @ 10 words of callee saves and args.
.save {r1-r3, r5-r8, r10-r11, lr}
+ .cfi_adjust_cfa_offset 40
.cfi_rel_offset r1, 0
.cfi_rel_offset r2, 4
.cfi_rel_offset r3, 8
@@ -120,19 +123,39 @@
.cfi_rel_offset r10, 28
.cfi_rel_offset r11, 32
.cfi_rel_offset lr, 36
- .cfi_adjust_cfa_offset 40
- sub sp, #8 @ 2 words of space, bottom word will hold Method*
+ vpush {s0-s15} @ 16 words of float args.
+ .pad #64
+ .cfi_adjust_cfa_offset 64
+ sub sp, #8 @ 2 words of space, bottom word will hold Method*
.pad #8
.cfi_adjust_cfa_offset 8
-
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 40 + 8)
+#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 40 + 64 + 8)
#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(ARM) size not as expected."
#endif
.endm
-.macro RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME rTemp1, rTemp2
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY
+ RUNTIME_CURRENT3 \rTemp1, \rTemp2 @ Load Runtime::Current into rTemp1.
+ THIS_LOAD_REQUIRES_READ_BARRIER
+ @ rTemp1 is kRefsAndArgs Method*.
+ ldr \rTemp1, [\rTemp1, #RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET]
+ str \rTemp1, [sp, #0] @ Place Method* at bottom of stack.
+ str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
+.endm
+
+.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_R0
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY
+ str r0, [sp, #0] @ Store ArtMethod* to bottom of stack.
+ str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
+.endm
+
+.macro RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
add sp, #8 @ rewind sp
+ .cfi_adjust_cfa_offset -8
+ vpop {s0-s15}
+ .cfi_adjust_cfa_offset -64
pop {r1-r3, r5-r8, r10-r11, lr} @ 10 words of callee saves
.cfi_restore r1
.cfi_restore r2
@@ -143,9 +166,10 @@
.cfi_restore r8
.cfi_restore r10
.cfi_restore r11
- .cfi_adjust_cfa_offset -48
+ .cfi_adjust_cfa_offset -40
.endm
+
.macro RETURN_IF_RESULT_IS_ZERO
cbnz r0, 1f @ result non-zero branch over
bx lr @ return
@@ -165,41 +189,35 @@
.macro DELIVER_PENDING_EXCEPTION
.fnend
.fnstart
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME @ save callee saves for throw
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0, r1 @ save callee saves for throw
mov r0, r9 @ pass Thread::Current
- mov r1, sp @ pass SP
- b artDeliverPendingExceptionFromCode @ artDeliverPendingExceptionFromCode(Thread*, SP)
+ b artDeliverPendingExceptionFromCode @ artDeliverPendingExceptionFromCode(Thread*)
.endm
.macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0, r1 // save all registers as basis for long jump context
mov r0, r9 @ pass Thread::Current
- mov r1, sp @ pass SP
- b \cxx_name @ \cxx_name(Thread*, SP)
+ b \cxx_name @ \cxx_name(Thread*)
END \c_name
.endm
.macro ONE_ARG_RUNTIME_EXCEPTION c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r1, r2 // save all registers as basis for long jump context
mov r1, r9 @ pass Thread::Current
- mov r2, sp @ pass SP
- b \cxx_name @ \cxx_name(Thread*, SP)
- bkpt
+ b \cxx_name @ \cxx_name(Thread*)
END \c_name
.endm
.macro TWO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r2, r3 // save all registers as basis for long jump context
mov r2, r9 @ pass Thread::Current
- mov r3, sp @ pass SP
- b \cxx_name @ \cxx_name(Thread*, SP)
- bkpt
+ b \cxx_name @ \cxx_name(Thread*)
END \c_name
.endm
@@ -224,12 +242,11 @@
.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
- ldr r1, [sp, #32] @ pass referrer
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 @ save callee saves in case of GC
+ ldr r1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
mov r2, r9 @ pass Thread::Current
- mov r3, sp @ pass SP
- bl \entrypoint @ (uint32_t field_idx, const Method* referrer, Thread*, SP)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ bl \entrypoint @ (uint32_t field_idx, const Method* referrer, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
\return
END \name
.endm
@@ -237,17 +254,11 @@
.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
- ldr r2, [sp, #32] @ pass referrer
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case of GC
+ ldr r2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
mov r3, r9 @ pass Thread::Current
- mov r12, sp
- str r12, [sp, #-16]! @ expand the frame and pass SP
- .pad #16
- .cfi_adjust_cfa_offset 16
- bl \entrypoint @ (field_idx, Object*, referrer, Thread*, SP)
- add sp, #16 @ strip the extra frame
- .cfi_adjust_cfa_offset -16
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ bl \entrypoint @ (field_idx, Object*, referrer, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
\return
END \name
.endm
@@ -255,21 +266,15 @@
.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
- ldr r3, [sp, #32] @ pass referrer
- mov r12, sp @ save SP
- sub sp, #8 @ grow frame for alignment with stack args
- .pad #8
- .cfi_adjust_cfa_offset 8
- push {r9, r12} @ pass Thread::Current and SP
- .save {r9, r12}
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset r9, 0
- .cfi_rel_offset r12, 4
- bl \entrypoint @ (field_idx, Object*, new_val, referrer, Thread*, SP)
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r3, r12 @ save callee saves in case of GC
+ ldr r3, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
+ str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
+ .pad #16
+ .cfi_adjust_cfa_offset 16
+ bl \entrypoint @ (field_idx, Object*, new_val, referrer, Thread*)
add sp, #16 @ release out args
.cfi_adjust_cfa_offset -16
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here
\return
END \name
.endm
@@ -325,8 +330,8 @@
.macro INVOKE_TRAMPOLINE c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME @ save callee saves in case allocation triggers GC
- ldr r2, [sp, #48] @ pass caller Method*
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case allocation triggers GC
+ ldr r2, [sp, #FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE] @ pass caller Method*
mov r3, r9 @ pass Thread::Current
mov r12, sp
str r12, [sp, #-16]! @ expand the frame and pass SP
@@ -336,7 +341,7 @@
add sp, #16 @ strip the extra frame
.cfi_adjust_cfa_offset -16
mov r12, r1 @ save Method*->code_
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
cbz r0, 1f @ did we find the target? if not go to exception delivery
bx r12 @ tail call to target
1:
@@ -353,60 +358,91 @@
INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
/*
- * Quick invocation stub.
+ * Quick invocation stub internal.
* On entry:
* r0 = method pointer
* r1 = argument array or NULL for no argument methods
* r2 = size of argument array in bytes
* r3 = (managed) thread pointer
* [sp] = JValue* result
- * [sp + 4] = shorty
+ * [sp + 4] = result_in_float
+ * [sp + 8] = core register argument array
+ * [sp + 12] = fp register argument array
+ * +-------------------------+
+ * | uint32_t* fp_reg_args |
+ * | uint32_t* core_reg_args |
+ * | result_in_float | <- Caller frame
+ * | Jvalue* result |
+ * +-------------------------+
+ * | lr |
+ * | r11 |
+ * | r9 |
+ * | r4 | <- r11
+ * +-------------------------+
+ * | uint32_t out[n-1] |
+ * | : : | Outs
+ * | uint32_t out[0] |
+ * | StackRef<ArtMethod> | <- SP value=null
+ * +-------------------------+
*/
-ENTRY art_quick_invoke_stub
- push {r0, r4, r5, r9, r11, lr} @ spill regs
- .save {r0, r4, r5, r9, r11, lr}
- .pad #24
- .cfi_adjust_cfa_offset 24
- .cfi_rel_offset r0, 0
- .cfi_rel_offset r4, 4
- .cfi_rel_offset r5, 8
- .cfi_rel_offset r9, 12
- .cfi_rel_offset r11, 16
- .cfi_rel_offset lr, 20
+ENTRY art_quick_invoke_stub_internal
+ push {r4, r9, r11, lr} @ spill regs
+ .save {r4, r9, r11, lr}
+ .pad #16
+ .cfi_adjust_cfa_offset 16
+ .cfi_rel_offset r4, 0
+ .cfi_rel_offset r9, 4
+ .cfi_rel_offset r11, 8
+ .cfi_rel_offset lr, 12
mov r11, sp @ save the stack pointer
.cfi_def_cfa_register r11
+
mov r9, r3 @ move managed thread pointer into r9
+
+ add r4, r2, #4 @ create space for method pointer in frame
+ sub r4, sp, r4 @ reserve & align *stack* to 16 bytes: native calling
+ and r4, #0xFFFFFFF0 @ convention only aligns to 8B, so we have to ensure ART
+ mov sp, r4 @ 16B alignment ourselves.
+
+ mov r4, r0 @ save method*
+ add r0, sp, #4 @ pass stack pointer + method ptr as dest for memcpy
+ bl memcpy @ memcpy (dest, src, bytes)
+ mov ip, #0 @ set ip to 0
+ str ip, [sp] @ store NULL for method* at bottom of frame
+
+ ldr ip, [r11, #28] @ load fp register argument array pointer
+ vldm ip, {s0-s15} @ copy s0 - s15
+
+ ldr ip, [r11, #24] @ load core register argument array pointer
+ mov r0, r4 @ restore method*
+ add ip, ip, #4 @ skip r0
+ ldm ip, {r1-r3} @ copy r1 - r3
+
#ifdef ARM_R4_SUSPEND_FLAG
mov r4, #SUSPEND_CHECK_INTERVAL @ reset r4 to suspend check interval
#endif
- add r5, r2, #4 @ create space for method pointer in frame
- sub r5, sp, r5 @ reserve & align *stack* to 16 bytes: native calling
- and r5, #0xFFFFFFF0 @ convention only aligns to 8B, so we have to ensure ART
- mov sp, r5 @ 16B alignment ourselves.
-
- add r0, sp, #4 @ pass stack pointer + method ptr as dest for memcpy
- bl memcpy @ memcpy (dest, src, bytes)
- ldr r0, [r11] @ restore method*
- ldr r1, [sp, #4] @ copy arg value for r1
- ldr r2, [sp, #8] @ copy arg value for r2
- ldr r3, [sp, #12] @ copy arg value for r3
- mov ip, #0 @ set ip to 0
- str ip, [sp] @ store NULL for method* at bottom of frame
- ldr ip, [r0, #METHOD_QUICK_CODE_OFFSET] @ get pointer to the code
+ ldr ip, [r0, #MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32] @ get pointer to the code
blx ip @ call the method
+
mov sp, r11 @ restore the stack pointer
- ldr ip, [sp, #24] @ load the result pointer
- strd r0, [ip] @ store r0/r1 into result pointer
- pop {r0, r4, r5, r9, r11, lr} @ restore spill regs
- .cfi_restore r0
+ .cfi_def_cfa_register sp
+
+ ldr r4, [sp, #20] @ load result_is_float
+ ldr r9, [sp, #16] @ load the result pointer
+ cmp r4, #0
+ ite eq
+ strdeq r0, [r9] @ store r0/r1 into result pointer
+ vstrne d0, [r9] @ store s0-s1/d0 into result pointer
+
+ pop {r4, r9, r11, lr} @ restore spill regs
.cfi_restore r4
- .cfi_restore r5
.cfi_restore r9
+ .cfi_restore r11
.cfi_restore lr
- .cfi_adjust_cfa_offset -24
+ .cfi_adjust_cfa_offset -16
bx lr
-END art_quick_invoke_stub
+END art_quick_invoke_stub_internal
/*
* On entry r0 is uint32_t* gprs_ and r1 is uint32_t* fprs_
@@ -437,10 +473,10 @@
cbz r0, .Lslow_lock
.Lretry_lock:
ldr r2, [r9, #THREAD_ID_OFFSET]
- ldrex r1, [r0, #LOCK_WORD_OFFSET]
+ ldrex r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
cbnz r1, .Lnot_unlocked @ already thin locked
@ unlocked case - r2 holds thread id with count of 0
- strex r3, r2, [r0, #LOCK_WORD_OFFSET]
+ strex r3, r2, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
cbnz r3, .Lstrex_fail @ store failed, retry
dmb ish @ full (LoadLoad|LoadStore) memory barrier
bx lr
@@ -456,14 +492,13 @@
add r2, r1, #65536 @ increment count in lock word placing in r2 for storing
lsr r1, r2, 30 @ if either of the top two bits are set, we overflowed.
cbnz r1, .Lslow_lock @ if we overflow the count go slow path
- str r2, [r0, #LOCK_WORD_OFFSET] @ no need for strex as we hold the lock
+ str r2, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] @ no need for strex as we hold the lock
bx lr
.Lslow_lock:
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case we block
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 @ save callee saves in case we block
mov r1, r9 @ pass Thread::Current
- mov r2, sp @ pass SP
- bl artLockObjectFromCode @ (Object* obj, Thread*, SP)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ bl artLockObjectFromCode @ (Object* obj, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_IF_RESULT_IS_ZERO
DELIVER_PENDING_EXCEPTION
END art_quick_lock_object
@@ -475,7 +510,7 @@
.extern artUnlockObjectFromCode
ENTRY art_quick_unlock_object
cbz r0, .Lslow_unlock
- ldr r1, [r0, #LOCK_WORD_OFFSET]
+ ldr r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
lsr r2, r1, 30
cbnz r2, .Lslow_unlock @ if either of the top two bits are set, go slow path
ldr r2, [r9, #THREAD_ID_OFFSET]
@@ -486,18 +521,18 @@
bpl .Lrecursive_thin_unlock
@ transition to unlocked, r3 holds 0
dmb ish @ full (LoadStore|StoreStore) memory barrier
- str r3, [r0, #LOCK_WORD_OFFSET]
+ str r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
bx lr
.Lrecursive_thin_unlock:
sub r1, r1, #65536
- str r1, [r0, #LOCK_WORD_OFFSET]
+ str r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
bx lr
.Lslow_unlock:
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case exception allocation triggers GC
+ @ save callee saves in case exception allocation triggers GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2
mov r1, r9 @ pass Thread::Current
- mov r2, sp @ pass SP
- bl artUnlockObjectFromCode @ (Object* obj, Thread*, SP)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ bl artUnlockObjectFromCode @ (Object* obj, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_IF_RESULT_IS_ZERO
DELIVER_PENDING_EXCEPTION
END art_quick_unlock_object
@@ -528,10 +563,9 @@
pop {r0-r1, lr}
.cfi_restore r0
.cfi_restore r1
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r2, r3 // save all registers as basis for long jump context
mov r2, r9 @ pass Thread::Current
- mov r3, sp @ pass SP
- b artThrowClassCastException @ (Class*, Class*, Thread*, SP)
+ b artThrowClassCastException @ (Class*, Class*, Thread*)
bkpt
END art_quick_check_cast
@@ -548,7 +582,7 @@
.hidden art_quick_aput_obj_with_bound_check
ENTRY art_quick_aput_obj_with_bound_check
- ldr r3, [r0, #ARRAY_LENGTH_OFFSET]
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]
cmp r3, r1
bhi art_quick_aput_obj
mov r0, r1
@@ -559,20 +593,20 @@
.hidden art_quick_aput_obj
ENTRY art_quick_aput_obj
cbz r2, .Ldo_aput_null
- ldr r3, [r0, #CLASS_OFFSET]
- ldr ip, [r2, #CLASS_OFFSET]
- ldr r3, [r3, #CLASS_COMPONENT_TYPE_OFFSET]
+ ldr r3, [r0, #MIRROR_OBJECT_CLASS_OFFSET]
+ ldr ip, [r2, #MIRROR_OBJECT_CLASS_OFFSET]
+ ldr r3, [r3, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET]
cmp r3, ip @ value's type == array's component type - trivial assignability
bne .Lcheck_assignability
.Ldo_aput:
- add r3, r0, #OBJECT_ARRAY_DATA_OFFSET
+ add r3, r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
str r2, [r3, r1, lsl #2]
ldr r3, [r9, #THREAD_CARD_TABLE_OFFSET]
lsr r0, r0, #7
strb r3, [r3, r0]
blx lr
.Ldo_aput_null:
- add r3, r0, #OBJECT_ARRAY_DATA_OFFSET
+ add r3, r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
str r2, [r3, r1, lsl #2]
blx lr
.Lcheck_assignability:
@@ -593,7 +627,7 @@
.cfi_restore r2
.cfi_restore lr
.cfi_adjust_cfa_offset -16
- add r3, r0, #OBJECT_ARRAY_DATA_OFFSET
+ add r3, r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
str r2, [r3, r1, lsl #2]
ldr r3, [r9, #THREAD_CARD_TABLE_OFFSET]
lsr r0, r0, #7
@@ -606,12 +640,11 @@
.cfi_restore r2
.cfi_restore lr
.cfi_adjust_cfa_offset -16
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r3, ip
mov r1, r2
- mov r2, r9 @ pass Thread::Current
- mov r3, sp @ pass SP
- b artThrowArrayStoreException @ (Class*, Class*, Thread*, SP)
- bkpt @ unreached
+ mov r2, r9 @ pass Thread::Current
+ b artThrowArrayStoreException @ (Class*, Class*, Thread*)
+ bkpt @ unreached
END art_quick_aput_obj
/*
@@ -621,12 +654,11 @@
*/
.extern artInitializeStaticStorageFromCode
ENTRY art_quick_initialize_static_storage
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case of GC
mov r2, r9 @ pass Thread::Current
- mov r3, sp @ pass SP
- @ artInitializeStaticStorageFromCode(uint32_t type_idx, Method* referrer, Thread*, SP)
+ @ artInitializeStaticStorageFromCode(uint32_t type_idx, Method* referrer, Thread*)
bl artInitializeStaticStorageFromCode
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_IF_RESULT_IS_NON_ZERO
DELIVER_PENDING_EXCEPTION
END art_quick_initialize_static_storage
@@ -636,12 +668,11 @@
*/
.extern artInitializeTypeFromCode
ENTRY art_quick_initialize_type
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case of GC
mov r2, r9 @ pass Thread::Current
- mov r3, sp @ pass SP
- @ artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*, SP)
+ @ artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*)
bl artInitializeTypeFromCode
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_IF_RESULT_IS_NON_ZERO
DELIVER_PENDING_EXCEPTION
END art_quick_initialize_type
@@ -652,12 +683,11 @@
*/
.extern artInitializeTypeAndVerifyAccessFromCode
ENTRY art_quick_initialize_type_and_verify_access
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case of GC
mov r2, r9 @ pass Thread::Current
- mov r3, sp @ pass SP
- @ artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, Method* referrer, Thread*, SP)
+ @ artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, Method* referrer, Thread*)
bl artInitializeTypeAndVerifyAccessFromCode
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_IF_RESULT_IS_NON_ZERO
DELIVER_PENDING_EXCEPTION
END art_quick_initialize_type_and_verify_access
@@ -676,13 +706,12 @@
*/
.extern artGet64StaticFromCode
ENTRY art_quick_get64_static
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
- ldr r1, [sp, #32] @ pass referrer
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case of GC
+ ldr r1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
mov r2, r9 @ pass Thread::Current
- mov r3, sp @ pass SP
- bl artGet64StaticFromCode @ (uint32_t field_idx, const Method* referrer, Thread*, SP)
+ bl artGet64StaticFromCode @ (uint32_t field_idx, const Method* referrer, Thread*)
ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
cbnz r2, 1f @ success if no exception pending
bx lr @ return on success
1:
@@ -703,18 +732,12 @@
*/
.extern artGet64InstanceFromCode
ENTRY art_quick_get64_instance
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
- ldr r2, [sp, #32] @ pass referrer
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case of GC
+ ldr r2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
mov r3, r9 @ pass Thread::Current
- mov r12, sp
- str r12, [sp, #-16]! @ expand the frame and pass SP
- .pad #16
- .cfi_adjust_cfa_offset 16
- bl artGet64InstanceFromCode @ (field_idx, Object*, referrer, Thread*, SP)
- add sp, #16 @ strip the extra frame
- .cfi_adjust_cfa_offset -16
+ bl artGet64InstanceFromCode @ (field_idx, Object*, referrer, Thread*)
ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
cbnz r2, 1f @ success if no exception pending
bx lr @ return on success
1:
@@ -734,22 +757,17 @@
*/
.extern artSet64StaticFromCode
ENTRY art_quick_set64_static
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r3, r12 @ save callee saves in case of GC
mov r3, r2 @ pass one half of wide argument
mov r2, r1 @ pass other half of wide argument
- ldr r1, [sp, #32] @ pass referrer
- mov r12, sp @ save SP
- sub sp, #8 @ grow frame for alignment with stack args
- .pad #8
- .cfi_adjust_cfa_offset 8
- push {r9, r12} @ pass Thread::Current and SP
- .save {r9, r12}
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset r9, 0
- bl artSet64StaticFromCode @ (field_idx, referrer, new_val, Thread*, SP)
+ ldr r1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
+ str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
+ .pad #16
+ .cfi_adjust_cfa_offset 16
+ bl artSet64StaticFromCode @ (field_idx, referrer, new_val, Thread*)
add sp, #16 @ release out args
.cfi_adjust_cfa_offset -16
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here
RETURN_IF_RESULT_IS_ZERO
DELIVER_PENDING_EXCEPTION
END art_quick_set64_static
@@ -766,19 +784,18 @@
*/
.extern artSet64InstanceFromCode
ENTRY art_quick_set64_instance
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
- mov r12, sp @ save SP
- sub sp, #8 @ grow frame for alignment with stack args
- .pad #8
- .cfi_adjust_cfa_offset 8
- push {r9, r12} @ pass Thread::Current and SP
- .save {r9, r12}
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset r9, 0
- bl artSet64InstanceFromCode @ (field_idx, Object*, new_val, Thread*, SP)
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r12, lr @ save callee saves in case of GC
+ ldr r12, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
+ str r9, [sp, #-12]! @ expand the frame and pass Thread::Current
+ .pad #12
+ .cfi_adjust_cfa_offset 12
+ str r12, [sp, #-4]! @ expand the frame and pass the referrer
+ .pad #4
+ .cfi_adjust_cfa_offset 4
+ bl artSet64InstanceFromCode @ (field_idx, Object*, new_val, Method* referrer, Thread*)
add sp, #16 @ release out args
.cfi_adjust_cfa_offset -16
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here
RETURN_IF_RESULT_IS_ZERO
DELIVER_PENDING_EXCEPTION
END art_quick_set64_instance
@@ -791,12 +808,11 @@
*/
.extern artResolveStringFromCode
ENTRY art_quick_resolve_string
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case of GC
mov r2, r9 @ pass Thread::Current
- mov r3, sp @ pass SP
- @ artResolveStringFromCode(Method* referrer, uint32_t string_idx, Thread*, SP)
+ @ artResolveStringFromCode(Method* referrer, uint32_t string_idx, Thread*)
bl artResolveStringFromCode
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_IF_RESULT_IS_NON_ZERO
DELIVER_PENDING_EXCEPTION
END art_quick_resolve_string
@@ -805,11 +821,10 @@
.macro TWO_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case of GC
mov r2, r9 @ pass Thread::Current
- mov r3, sp @ pass SP
- bl \entrypoint @ (uint32_t type_idx, Method* method, Thread*, SP)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ bl \entrypoint @ (uint32_t type_idx, Method* method, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
\return
DELIVER_PENDING_EXCEPTION
END \name
@@ -819,17 +834,11 @@
.macro THREE_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r3, r12 @ save callee saves in case of GC
mov r3, r9 @ pass Thread::Current
- mov r12, sp
- str r12, [sp, #-16]! @ expand the frame and pass SP
- .pad #16
- .cfi_adjust_cfa_offset 16
- @ (uint32_t type_idx, Method* method, int32_t component_count, Thread*, SP)
+ @ (uint32_t type_idx, Method* method, int32_t component_count, Thread*)
bl \entrypoint
- add sp, #16 @ strip the extra frame
- .cfi_adjust_cfa_offset -16
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
\return
DELIVER_PENDING_EXCEPTION
END \name
@@ -844,25 +853,24 @@
.extern artTestSuspendFromCode
ENTRY art_quick_test_suspend
#ifdef ARM_R4_SUSPEND_FLAG
- ldrh r0, [rSELF, #THREAD_FLAGS_OFFSET]
+ ldrh r0, [rSELF, #THREAD_FLAGS_OFFSET]
mov rSUSPEND, #SUSPEND_CHECK_INTERVAL @ reset rSUSPEND to SUSPEND_CHECK_INTERVAL
cbnz r0, 1f @ check Thread::Current()->suspend_count_ == 0
bx lr @ return if suspend_count_ == 0
1:
#endif
mov r0, rSELF
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves for stack crawl
- mov r1, sp
- bl artTestSuspendFromCode @ (Thread*, SP)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 @ save callee saves for GC stack crawl
+ @ TODO: save FPRs to enable access in the debugger?
+ bl artTestSuspendFromCode @ (Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
END art_quick_test_suspend
ENTRY art_quick_implicit_suspend
mov r0, rSELF
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves for stack crawl
- mov r1, sp
- bl artTestSuspendFromCode @ (Thread*, SP)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 @ save callee saves for stack crawl
+ bl artTestSuspendFromCode @ (Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
END art_quick_implicit_suspend
/*
@@ -872,19 +880,19 @@
*/
.extern artQuickProxyInvokeHandler
ENTRY art_quick_proxy_invoke_handler
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
- str r0, [sp, #0] @ place proxy method at bottom of frame
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_R0
mov r2, r9 @ pass Thread::Current
mov r3, sp @ pass SP
blx artQuickProxyInvokeHandler @ (Method* proxy method, receiver, Thread*, SP)
ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
- add sp, #16 @ skip r1-r3, 4 bytes padding.
- .cfi_adjust_cfa_offset -16
+ // Tear down the callee-save frame. Skip arg registers.
+ add sp, #(FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
+ .cfi_adjust_cfa_offset -(FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
cbnz r2, 1f @ success if no exception is pending
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ vmov d0, r0, r1 @ store into fpr, for when it's a fpr return...
bx lr @ return on success
1:
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
DELIVER_PENDING_EXCEPTION
END art_quick_proxy_invoke_handler
@@ -894,25 +902,25 @@
*/
ENTRY art_quick_imt_conflict_trampoline
ldr r0, [sp, #0] @ load caller Method*
- ldr r0, [r0, #METHOD_DEX_CACHE_METHODS_OFFSET] @ load dex_cache_resolved_methods
- add r0, #OBJECT_ARRAY_DATA_OFFSET @ get starting address of data
+ ldr r0, [r0, #MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET] @ load dex_cache_resolved_methods
+ add r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET @ get starting address of data
ldr r0, [r0, r12, lsl 2] @ load the target method
b art_quick_invoke_interface_trampoline
END art_quick_imt_conflict_trampoline
.extern artQuickResolutionTrampoline
ENTRY art_quick_resolution_trampoline
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r2, r3
mov r2, r9 @ pass Thread::Current
mov r3, sp @ pass SP
blx artQuickResolutionTrampoline @ (Method* called, receiver, Thread*, SP)
cbz r0, 1f @ is code pointer null? goto exception
mov r12, r0
ldr r0, [sp, #0] @ load resolved method in r0
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
bx r12 @ tail-call into actual code
1:
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
DELIVER_PENDING_EXCEPTION
END art_quick_resolution_trampoline
@@ -920,8 +928,7 @@
* Called to do a generic JNI down-call
*/
ENTRY art_quick_generic_jni_trampoline
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
- str r0, [sp, #0] // Store native ArtMethod* to bottom of stack.
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_R0
// Save rSELF
mov r11, rSELF
@@ -987,20 +994,13 @@
ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
cbnz r2, .Lexception_in_native
- // Tear down the callee-save frame.
- add sp, #12 @ rewind sp
- // Do not pop r0 and r1, they contain the return value.
- pop {r2-r3, r5-r8, r10-r11, lr} @ 9 words of callee saves
- .cfi_restore r2
- .cfi_restore r3
- .cfi_restore r5
- .cfi_restore r6
- .cfi_restore r7
- .cfi_restore r8
- .cfi_restore r10
- .cfi_restore r11
- .cfi_adjust_cfa_offset -48
+ // Tear down the callee-save frame. Skip arg registers.
+ add sp, #FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE-FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
+ .cfi_adjust_cfa_offset -(FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE-FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ // store into fpr, for when it's a fpr return...
+ vmov d0, r0, r1
bx lr // ret
.Lentry_error:
@@ -1008,23 +1008,25 @@
.cfi_def_cfa_register sp
mov r9, r11
.Lexception_in_native:
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
DELIVER_PENDING_EXCEPTION
END art_quick_generic_jni_trampoline
.extern artQuickToInterpreterBridge
ENTRY art_quick_to_interpreter_bridge
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r1, r2
mov r1, r9 @ pass Thread::Current
mov r2, sp @ pass SP
blx artQuickToInterpreterBridge @ (Method* method, Thread*, SP)
ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
- add sp, #16 @ skip r1-r3, 4 bytes padding.
- .cfi_adjust_cfa_offset -16
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ // Tear down the callee-save frame. Skip arg registers.
+ add sp, #(FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
+ .cfi_adjust_cfa_offset -(FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
cbnz r2, 1f @ success if no exception is pending
- bx lr @ return on success
+ vmov d0, r0, r1 @ store into fpr, for when it's a fpr return...
+ bx lr @ return on success
1:
DELIVER_PENDING_EXCEPTION
END art_quick_to_interpreter_bridge
@@ -1035,30 +1037,23 @@
.extern artInstrumentationMethodEntryFromCode
.extern artInstrumentationMethodExitFromCode
ENTRY art_quick_instrumentation_entry
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
- str r0, [sp, #4] @ preserve r0
- mov r12, sp @ remember sp
- str lr, [sp, #-16]! @ expand the frame and pass LR
- .pad #16
- .cfi_adjust_cfa_offset 16
- .cfi_rel_offset lr, 0
+ @ Make stack crawlable and clobber r2 and r3 (post saving)
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r2, r3
+ @ preserve r0 (not normally an arg) knowing there is a spare slot in kRefsAndArgs.
+ str r0, [sp, #4]
mov r2, r9 @ pass Thread::Current
- mov r3, r12 @ pass SP
- blx artInstrumentationMethodEntryFromCode @ (Method*, Object*, Thread*, SP, LR)
- add sp, #16 @ remove out argument and padding from stack
- .cfi_adjust_cfa_offset -16
+ mov r3, lr @ pass LR
+ blx artInstrumentationMethodEntryFromCode @ (Method*, Object*, Thread*, LR)
mov r12, r0 @ r12 holds reference to code
ldr r0, [sp, #4] @ restore r0
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
blx r12 @ call method with lr set to art_quick_instrumentation_exit
-END art_quick_instrumentation_entry
+@ Deliberate fall-through into art_quick_instrumentation_exit.
.type art_quick_instrumentation_exit, #function
.global art_quick_instrumentation_exit
art_quick_instrumentation_exit:
- .cfi_startproc
- .fnstart
mov lr, #0 @ link register is to here, so clobber with 0 for later checks
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ set up frame knowing r2 and r3 must be dead on exit
mov r12, sp @ remember bottom of caller's frame
push {r0-r1} @ save return value
.save {r0-r1}
@@ -1085,7 +1080,7 @@
add sp, #32 @ remove callee save frame
.cfi_adjust_cfa_offset -32
bx r2 @ return
-END art_quick_instrumentation_exit
+END art_quick_instrumentation_entry
/*
* Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
@@ -1093,10 +1088,9 @@
*/
.extern artDeoptimize
ENTRY art_quick_deoptimize
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0, r1
mov r0, r9 @ Set up args.
- mov r1, sp
- blx artDeoptimize @ artDeoptimize(Thread*, SP)
+ blx artDeoptimize @ artDeoptimize(Thread*)
END art_quick_deoptimize
/*
@@ -1219,9 +1213,9 @@
.cfi_rel_offset r10, 4
.cfi_rel_offset r11, 8
.cfi_rel_offset lr, 12
- ldr r3, [r0, #STRING_COUNT_OFFSET]
- ldr r12, [r0, #STRING_OFFSET_OFFSET]
- ldr r0, [r0, #STRING_VALUE_OFFSET]
+ ldr r3, [r0, #MIRROR_STRING_COUNT_OFFSET]
+ ldr r12, [r0, #MIRROR_STRING_OFFSET_OFFSET]
+ ldr r0, [r0, #MIRROR_STRING_VALUE_OFFSET]
/* Clamp start to [0..count] */
cmp r2, #0
@@ -1232,7 +1226,7 @@
movgt r2, r3
/* Build a pointer to the start of string data */
- add r0, #STRING_DATA_OFFSET
+ add r0, #MIRROR_CHAR_ARRAY_DATA_OFFSET
add r0, r0, r12, lsl #1
/* Save a copy in r12 to later compute result */
@@ -1341,12 +1335,12 @@
.cfi_rel_offset r12, 24
.cfi_rel_offset lr, 28
- ldr r4, [r2, #STRING_OFFSET_OFFSET]
- ldr r9, [r1, #STRING_OFFSET_OFFSET]
- ldr r7, [r2, #STRING_COUNT_OFFSET]
- ldr r10, [r1, #STRING_COUNT_OFFSET]
- ldr r2, [r2, #STRING_VALUE_OFFSET]
- ldr r1, [r1, #STRING_VALUE_OFFSET]
+ ldr r4, [r2, #MIRROR_STRING_OFFSET_OFFSET]
+ ldr r9, [r1, #MIRROR_STRING_OFFSET_OFFSET]
+ ldr r7, [r2, #MIRROR_STRING_COUNT_OFFSET]
+ ldr r10, [r1, #MIRROR_STRING_COUNT_OFFSET]
+ ldr r2, [r2, #MIRROR_STRING_VALUE_OFFSET]
+ ldr r1, [r1, #MIRROR_STRING_VALUE_OFFSET]
/*
* At this point, we have:
@@ -1368,8 +1362,8 @@
* Note: data pointers point to previous element so we can use pre-index
* mode with base writeback.
*/
- add r2, #STRING_DATA_OFFSET-2 @ offset to contents[-1]
- add r1, #STRING_DATA_OFFSET-2 @ offset to contents[-1]
+ add r2, #MIRROR_CHAR_ARRAY_DATA_OFFSET-2 @ offset to contents[-1]
+ add r1, #MIRROR_CHAR_ARRAY_DATA_OFFSET-2 @ offset to contents[-1]
/*
* At this point we have:
@@ -1453,3 +1447,54 @@
.Ldone:
pop {r4, r7-r12, pc}
END art_quick_string_compareto
+
+ /* Assembly routines used to handle ABI differences. */
+
+ /* double fmod(double a, double b) */
+ .extern fmod
+ENTRY art_quick_fmod
+ push {lr}
+ .cfi_adjust_cfa_offset 4
+ .cfi_rel_offset lr, 0
+ sub sp, #4
+ .cfi_adjust_cfa_offset 4
+ vmov r0, r1, d0
+ vmov r2, r3, d1
+ bl fmod
+ vmov d0, r0, r1
+ add sp, #4
+ .cfi_adjust_cfa_offset -4
+ pop {pc}
+ .cfi_adjust_cfa_offset -4
+END art_quick_fmod
+
+ /* float fmodf(float a, float b) */
+ .extern fmodf
+ENTRY art_quick_fmodf
+ push {lr}
+ .cfi_adjust_cfa_offset 4
+ .cfi_rel_offset lr, 0
+ sub sp, #4
+ .cfi_adjust_cfa_offset 4
+ vmov r0, r1, d0
+ bl fmodf
+ vmov s0, r0
+ add sp, #4
+ .cfi_adjust_cfa_offset -4
+ pop {pc}
+ .cfi_adjust_cfa_offset -4
+END art_quick_fmod
+
+ /* int64_t art_d2l(double d) */
+ .extern art_d2l
+ENTRY art_quick_d2l
+ vmov r0, r1, d0
+ b art_d2l
+END art_quick_d2l
+
+ /* int64_t art_f2l(float f) */
+ .extern art_f2l
+ENTRY art_quick_f2l
+ vmov r0, s0
+ b art_f2l
+END art_quick_f2l
diff --git a/runtime/arch/arm/quick_entrypoints_cc_arm.cc b/runtime/arch/arm/quick_entrypoints_cc_arm.cc
new file mode 100644
index 0000000..e21e6c1
--- /dev/null
+++ b/runtime/arch/arm/quick_entrypoints_cc_arm.cc
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mirror/art_method.h"
+#include "utils.h" // For RoundUp().
+
+namespace art {
+
+// Assembly stub that does the final part of the up-call into Java.
+extern "C" void art_quick_invoke_stub_internal(mirror::ArtMethod*, uint32_t*, uint32_t,
+ Thread* self, JValue* result, uint32_t, uint32_t*,
+ uint32_t*);
+
+template <bool kIsStatic>
+static void quick_invoke_reg_setup(mirror::ArtMethod* method, uint32_t* args, uint32_t args_size,
+ Thread* self, JValue* result, const char* shorty) {
+ // Note: We do not follow aapcs ABI in quick code for both softfp and hardfp.
+ uint32_t core_reg_args[4]; // r0 ~ r3
+ uint32_t fp_reg_args[16]; // s0 ~ s15 (d0 ~ d7)
+ uint32_t gpr_index = 1; // Index into core registers. Reserve r0 for mirror::ArtMethod*.
+ uint32_t fpr_index = 0; // Index into float registers.
+ uint32_t fpr_double_index = 0; // Index into float registers for doubles.
+ uint32_t arg_index = 0; // Index into argument array.
+ const uint32_t result_in_float = kArm32QuickCodeUseSoftFloat ? 0 :
+ (shorty[0] == 'F' || shorty[0] == 'D') ? 1 : 0;
+
+ if (!kIsStatic) {
+ // Copy receiver for non-static methods.
+ core_reg_args[gpr_index++] = args[arg_index++];
+ }
+
+ for (uint32_t shorty_index = 1; shorty[shorty_index] != '\0'; ++shorty_index, ++arg_index) {
+ char arg_type = shorty[shorty_index];
+ if (kArm32QuickCodeUseSoftFloat) {
+ arg_type = (arg_type == 'D') ? 'J' : arg_type; // Regard double as long.
+ arg_type = (arg_type == 'F') ? 'I' : arg_type; // Regard float as int.
+ }
+ switch (arg_type) {
+ case 'D': {
+ // Copy double argument into fp_reg_args if there are still floating point reg arguments.
+ // Double should not overlap with float.
+ fpr_double_index = std::max(fpr_double_index, RoundUp(fpr_index, 2));
+ if (fpr_double_index < arraysize(fp_reg_args)) {
+ fp_reg_args[fpr_double_index++] = args[arg_index];
+ fp_reg_args[fpr_double_index++] = args[arg_index + 1];
+ }
+ ++arg_index;
+ break;
+ }
+ case 'F':
+ // Copy float argument into fp_reg_args if there are still floating point reg arguments.
+ // If fpr_index is odd then its pointing at a hole next to an existing float argument. If we
+ // encounter a float argument then pick it up from that hole. In the case fpr_index is even,
+ // ensure that we don't pick up an argument that overlaps with with a double from
+ // fpr_double_index. In either case, take care not to go beyond the maximum number of
+ // floating point arguments.
+ if (fpr_index % 2 == 0) {
+ fpr_index = std::max(fpr_double_index, fpr_index);
+ }
+ if (fpr_index < arraysize(fp_reg_args)) {
+ fp_reg_args[fpr_index++] = args[arg_index];
+ }
+ break;
+ case 'J':
+ if (gpr_index < arraysize(core_reg_args)) {
+ core_reg_args[gpr_index++] = args[arg_index];
+ }
+ ++arg_index;
+ FALLTHROUGH_INTENDED; // Fall-through to take of the high part.
+ default:
+ if (gpr_index < arraysize(core_reg_args)) {
+ core_reg_args[gpr_index++] = args[arg_index];
+ }
+ break;
+ }
+ }
+
+ art_quick_invoke_stub_internal(method, args, args_size, self, result, result_in_float,
+ core_reg_args, fp_reg_args);
+}
+
+// Called by art::mirror::ArtMethod::Invoke to do entry into a non-static method.
+// TODO: migrate into an assembly implementation as with ARM64.
+extern "C" void art_quick_invoke_stub(mirror::ArtMethod* method, uint32_t* args, uint32_t args_size,
+ Thread* self, JValue* result, const char* shorty) {
+ quick_invoke_reg_setup<false>(method, args, args_size, self, result, shorty);
+}
+
+// Called by art::mirror::ArtMethod::Invoke to do entry into a static method.
+// TODO: migrate into an assembly implementation as with ARM64.
+extern "C" void art_quick_invoke_static_stub(mirror::ArtMethod* method, uint32_t* args,
+ uint32_t args_size, Thread* self, JValue* result,
+ const char* shorty) {
+ quick_invoke_reg_setup<true>(method, args, args_size, self, result, shorty);
+}
+
+} // namespace art
diff --git a/runtime/arch/arm/quick_method_frame_info_arm.h b/runtime/arch/arm/quick_method_frame_info_arm.h
index 7595e94..c1f3fc2 100644
--- a/runtime/arch/arm/quick_method_frame_info_arm.h
+++ b/runtime/arch/arm/quick_method_frame_info_arm.h
@@ -25,6 +25,8 @@
namespace art {
namespace arm {
+static constexpr uint32_t kArmCalleeSaveAlwaysSpills =
+ (1 << art::arm::LR);
static constexpr uint32_t kArmCalleeSaveRefSpills =
(1 << art::arm::R5) | (1 << art::arm::R6) | (1 << art::arm::R7) | (1 << art::arm::R8) |
(1 << art::arm::R10) | (1 << art::arm::R11);
@@ -32,23 +34,30 @@
(1 << art::arm::R1) | (1 << art::arm::R2) | (1 << art::arm::R3);
static constexpr uint32_t kArmCalleeSaveAllSpills =
(1 << art::arm::R4) | (1 << art::arm::R9);
-static constexpr uint32_t kArmCalleeSaveFpAllSpills =
+
+static constexpr uint32_t kArmCalleeSaveFpAlwaysSpills = 0;
+static constexpr uint32_t kArmCalleeSaveFpRefSpills = 0;
+static constexpr uint32_t kArmCalleeSaveFpArgSpills =
(1 << art::arm::S0) | (1 << art::arm::S1) | (1 << art::arm::S2) | (1 << art::arm::S3) |
(1 << art::arm::S4) | (1 << art::arm::S5) | (1 << art::arm::S6) | (1 << art::arm::S7) |
(1 << art::arm::S8) | (1 << art::arm::S9) | (1 << art::arm::S10) | (1 << art::arm::S11) |
- (1 << art::arm::S12) | (1 << art::arm::S13) | (1 << art::arm::S14) | (1 << art::arm::S15) |
+ (1 << art::arm::S12) | (1 << art::arm::S13) | (1 << art::arm::S14) | (1 << art::arm::S15);
+static constexpr uint32_t kArmCalleeSaveFpAllSpills =
(1 << art::arm::S16) | (1 << art::arm::S17) | (1 << art::arm::S18) | (1 << art::arm::S19) |
(1 << art::arm::S20) | (1 << art::arm::S21) | (1 << art::arm::S22) | (1 << art::arm::S23) |
(1 << art::arm::S24) | (1 << art::arm::S25) | (1 << art::arm::S26) | (1 << art::arm::S27) |
(1 << art::arm::S28) | (1 << art::arm::S29) | (1 << art::arm::S30) | (1 << art::arm::S31);
constexpr uint32_t ArmCalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
- return kArmCalleeSaveRefSpills | (type == Runtime::kRefsAndArgs ? kArmCalleeSaveArgSpills : 0) |
- (type == Runtime::kSaveAll ? kArmCalleeSaveAllSpills : 0) | (1 << art::arm::LR);
+ return kArmCalleeSaveAlwaysSpills | kArmCalleeSaveRefSpills |
+ (type == Runtime::kRefsAndArgs ? kArmCalleeSaveArgSpills : 0) |
+ (type == Runtime::kSaveAll ? kArmCalleeSaveAllSpills : 0);
}
constexpr uint32_t ArmCalleeSaveFpSpills(Runtime::CalleeSaveType type) {
- return type == Runtime::kSaveAll ? kArmCalleeSaveFpAllSpills : 0;
+ return kArmCalleeSaveFpAlwaysSpills | kArmCalleeSaveFpRefSpills |
+ (type == Runtime::kRefsAndArgs ? kArmCalleeSaveFpArgSpills: 0) |
+ (type == Runtime::kSaveAll ? kArmCalleeSaveFpAllSpills : 0);
}
constexpr uint32_t ArmCalleeSaveFrameSize(Runtime::CalleeSaveType type) {
diff --git a/runtime/arch/arm64/asm_support_arm64.h b/runtime/arch/arm64/asm_support_arm64.h
index a926449..989ecc6 100644
--- a/runtime/arch/arm64/asm_support_arm64.h
+++ b/runtime/arch/arm64/asm_support_arm64.h
@@ -19,30 +19,8 @@
#include "asm_support.h"
-// Note: these callee save methods loads require read barriers.
-// Offset of field Runtime::callee_save_methods_[kSaveAll] verified in InitCpu
-#define RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET 0
-// Offset of field Runtime::callee_save_methods_[kRefsOnly] verified in InitCpu
-#define RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET 8
-// Offset of field Runtime::callee_save_methods_[kRefsAndArgs] verified in InitCpu
-#define RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET 16
-
-// Offset of field Thread::suspend_count_
-#define THREAD_FLAGS_OFFSET 0
-// Offset of field Thread::card_table_
-#define THREAD_CARD_TABLE_OFFSET 120
-// Offset of field Thread::exception_
-#define THREAD_EXCEPTION_OFFSET 128
-// Offset of field Thread::thin_lock_thread_id_
-#define THREAD_ID_OFFSET 12
-
#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 176
#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 96
#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 224
-// Expected size of a heap reference
-#define HEAP_REFERENCE_SIZE 4
-// Expected size of a stack reference
-#define STACK_REFERENCE_SIZE 4
-
#endif // ART_RUNTIME_ARCH_ARM64_ASM_SUPPORT_ARM64_H_
diff --git a/runtime/arch/arm64/context_arm64.cc b/runtime/arch/arm64/context_arm64.cc
index 6aacda4..0a31480 100644
--- a/runtime/arch/arm64/context_arm64.cc
+++ b/runtime/arch/arm64/context_arm64.cc
@@ -19,11 +19,8 @@
#include "context_arm64.h"
#include "mirror/art_method-inl.h"
-#include "mirror/object-inl.h"
#include "quick/quick_method_frame_info.h"
-#include "stack.h"
-#include "thread.h"
-
+#include "utils.h"
namespace art {
namespace arm64 {
diff --git a/runtime/arch/arm64/context_arm64.h b/runtime/arch/arm64/context_arm64.h
index 7b6aac9..d9a433b 100644
--- a/runtime/arch/arm64/context_arm64.h
+++ b/runtime/arch/arm64/context_arm64.h
@@ -34,7 +34,7 @@
void Reset() OVERRIDE;
- void FillCalleeSaves(const StackVisitor& fr) OVERRIDE;
+ void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetSP(uintptr_t new_sp) OVERRIDE {
bool success = SetGPR(SP, new_sp);
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 871e1d1..2d26c03 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -17,6 +17,8 @@
#include "entrypoints/interpreter/interpreter_entrypoints.h"
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/portable/portable_entrypoints.h"
+#include "entrypoints/quick/quick_alloc_entrypoints.h"
+#include "entrypoints/quick/quick_default_externs.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/entrypoint_utils.h"
#include "entrypoints/math_entrypoints.h"
@@ -25,95 +27,16 @@
namespace art {
-// Portable entrypoints.
-extern "C" void art_portable_resolution_trampoline(mirror::ArtMethod*);
-extern "C" void art_portable_to_interpreter_bridge(mirror::ArtMethod*);
-
// Cast entrypoints.
extern "C" uint32_t art_quick_assignable_from_code(const mirror::Class* klass,
const mirror::Class* ref_class);
-extern "C" void art_quick_check_cast(void*, void*);
-
-// DexCache entrypoints.
-extern "C" void* art_quick_initialize_static_storage(uint32_t, void*);
-extern "C" void* art_quick_initialize_type(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*);
-extern "C" void* art_quick_resolve_string(void*, uint32_t);
-
-// Field entrypoints.
-extern "C" int art_quick_set8_instance(uint32_t, void*, int8_t);
-extern "C" int art_quick_set8_static(uint32_t, int8_t);
-extern "C" int art_quick_set16_instance(uint32_t, void*, int16_t);
-extern "C" int art_quick_set16_static(uint32_t, int16_t);
-extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t);
-extern "C" int art_quick_set32_static(uint32_t, int32_t);
-extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t);
-extern "C" int art_quick_set64_static(uint32_t, int64_t);
-extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*);
-extern "C" int art_quick_set_obj_static(uint32_t, void*);
-extern "C" uint8_t art_quick_get_boolean_instance(uint32_t, void*);
-extern "C" int8_t art_quick_get_byte_instance(uint32_t, void*);
-extern "C" uint8_t art_quick_get_boolean_static(uint32_t);
-extern "C" int8_t art_quick_get_byte_static(uint32_t);
-extern "C" uint16_t art_quick_get_char_instance(uint32_t, void*);
-extern "C" int16_t art_quick_get_short_instance(uint32_t, void*);
-extern "C" uint16_t art_quick_get_char_static(uint32_t);
-extern "C" int16_t art_quick_get_short_static(uint32_t);
-extern "C" int32_t art_quick_get32_instance(uint32_t, void*);
-extern "C" int32_t art_quick_get32_static(uint32_t);
-extern "C" int64_t art_quick_get64_instance(uint32_t, void*);
-extern "C" int64_t art_quick_get64_static(uint32_t);
-extern "C" void* art_quick_get_obj_instance(uint32_t, void*);
-extern "C" void* art_quick_get_obj_static(uint32_t);
-
-// Array entrypoints.
-extern "C" void art_quick_aput_obj_with_null_and_bound_check(void*, uint32_t, void*);
-extern "C" void art_quick_aput_obj_with_bound_check(void*, uint32_t, void*);
-extern "C" void art_quick_aput_obj(void*, uint32_t, void*);
-extern "C" void art_quick_handle_fill_data(void*, void*);
-
-// Lock entrypoints.
-extern "C" void art_quick_lock_object(void*);
-extern "C" void art_quick_unlock_object(void*);
// Single-precision FP arithmetics.
extern "C" float art_quick_fmodf(float a, float b); // REM_FLOAT[_2ADDR]
// Double-precision FP arithmetics.
-extern "C" double art_quick_fmod(double a, double b); // REM_DOUBLE[_2ADDR]
+extern "C" double art_quick_fmod(double a, double b); // REM_DOUBLE[_2ADDR]
-// Memcpy
-extern "C" void* art_quick_memcpy(void* __restrict, const void* __restrict, size_t);
-
-// Intrinsic entrypoints.
-extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
-extern "C" int32_t art_quick_string_compareto(void*, void*);
-
-// Invoke entrypoints.
-extern "C" void art_quick_imt_conflict_trampoline(mirror::ArtMethod*);
-extern "C" void art_quick_resolution_trampoline(mirror::ArtMethod*);
-extern "C" void art_quick_to_interpreter_bridge(mirror::ArtMethod*);
-extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
-
-// Thread entrypoints.
-extern "C" void art_quick_test_suspend();
-
-// Throw entrypoints.
-extern "C" void art_quick_deliver_exception(void*);
-extern "C" void art_quick_throw_array_bounds(int32_t index, int32_t limit);
-extern "C" void art_quick_throw_div_zero();
-extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
-extern "C" void art_quick_throw_null_pointer_exception();
-extern "C" void art_quick_throw_stack_overflow(void*);
-
-extern void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints);
-
-// Generic JNI downcall
-extern "C" void art_quick_generic_jni_trampoline(mirror::ArtMethod*);
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
diff --git a/runtime/arch/arm64/fault_handler_arm64.cc b/runtime/arch/arm64/fault_handler_arm64.cc
index 687d232..c914d85 100644
--- a/runtime/arch/arm64/fault_handler_arm64.cc
+++ b/runtime/arch/arm64/fault_handler_arm64.cc
@@ -37,7 +37,8 @@
namespace art {
-void FaultManager::HandleNestedSignal(int sig, siginfo_t* info, void* context) {
+void FaultManager::HandleNestedSignal(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context) {
// To match the case used in ARM we return directly to the longjmp function
// rather than through a trivial assembly language stub.
@@ -51,7 +52,7 @@
sc->pc = reinterpret_cast<uintptr_t>(longjmp);
}
-void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
+void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED, void* context,
mirror::ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
struct ucontext *uc = reinterpret_cast<struct ucontext *>(context);
@@ -82,7 +83,8 @@
*out_return_pc = sc->pc + 4;
}
-bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
+bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context) {
// The code that looks for the catch location needs to know the value of the
// PC at the point of call. For Null checks we insert a GC map that is immediately after
// the load/store instruction that might cause the fault.
@@ -105,7 +107,8 @@
// The offset from r18 is Thread::ThreadSuspendTriggerOffset().
// To check for a suspend check, we examine the instructions that caused
// the fault (at PC-4 and PC).
-bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
+bool SuspensionHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context) {
// These are the instructions to check for. The first one is the ldr x0,[r18,#xxx]
// where xxx is the offset of the suspend trigger.
uint32_t checkinst1 = 0xf9400240 | (Thread::ThreadSuspendTriggerOffset<8>().Int32Value() << 7);
@@ -155,7 +158,8 @@
return false;
}
-bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
+bool StackOverflowHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context) {
struct ucontext *uc = reinterpret_cast<struct ucontext *>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
VLOG(signals) << "stack overflow handler with sp at " << std::hex << &uc;
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.cc b/runtime/arch/arm64/instruction_set_features_arm64.cc
new file mode 100644
index 0000000..a1270dc
--- /dev/null
+++ b/runtime/arch/arm64/instruction_set_features_arm64.cc
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_set_features_arm64.h"
+
+#include <fstream>
+#include <sstream>
+
+#include "base/stringprintf.h"
+#include "utils.h" // For Trim.
+
+namespace art {
+
+const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromVariant(
+ const std::string& variant ATTRIBUTE_UNUSED, std::string* error_msg ATTRIBUTE_UNUSED) {
+ const bool smp = true; // Conservative default.
+
+ // Look for variants that need a fix for a53 erratum 835769.
+ static const char* arm64_variants_with_a53_835769_bug[] = {
+ "default", "generic" // Pessimistically assume all generic ARM64s are A53s.
+ };
+ bool needs_a53_835769_fix = FindVariantInArray(arm64_variants_with_a53_835769_bug,
+ arraysize(arm64_variants_with_a53_835769_bug),
+ variant);
+
+ if (!needs_a53_835769_fix) {
+ // Check to see if this is an expected variant.
+ static const char* arm64_known_variants[] = {
+ "denver64"
+ };
+ if (!FindVariantInArray(arm64_known_variants, arraysize(arm64_known_variants), variant)) {
+ std::ostringstream os;
+ os << "Unexpected CPU variant for Arm64: " << variant;
+ *error_msg = os.str();
+ return nullptr;
+ }
+ }
+ return new Arm64InstructionSetFeatures(smp, needs_a53_835769_fix);
+}
+
+const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromBitmap(uint32_t bitmap) {
+ bool smp = (bitmap & kSmpBitfield) != 0;
+ bool is_a53 = (bitmap & kA53Bitfield) != 0;
+ return new Arm64InstructionSetFeatures(smp, is_a53);
+}
+
+const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromCppDefines() {
+ const bool smp = true;
+ const bool is_a53 = true; // Pessimistically assume all ARM64s are A53s.
+ return new Arm64InstructionSetFeatures(smp, is_a53);
+}
+
+const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromCpuInfo() {
+ // Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that
+ // the kernel puts the appropriate feature flags in here. Sometimes it doesn't.
+ bool smp = false;
+ const bool is_a53 = true; // Conservative default.
+
+ std::ifstream in("/proc/cpuinfo");
+ if (!in.fail()) {
+ while (!in.eof()) {
+ std::string line;
+ std::getline(in, line);
+ if (!in.eof()) {
+ LOG(INFO) << "cpuinfo line: " << line;
+ if (line.find("processor") != std::string::npos && line.find(": 1") != std::string::npos) {
+ smp = true;
+ }
+ }
+ }
+ in.close();
+ } else {
+ LOG(ERROR) << "Failed to open /proc/cpuinfo";
+ }
+ return new Arm64InstructionSetFeatures(smp, is_a53);
+}
+
+const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromHwcap() {
+ bool smp = sysconf(_SC_NPROCESSORS_CONF) > 1;
+ const bool is_a53 = true; // Pessimistically assume all ARM64s are A53s.
+ return new Arm64InstructionSetFeatures(smp, is_a53);
+}
+
+const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromAssembly() {
+ UNIMPLEMENTED(WARNING);
+ return FromCppDefines();
+}
+
+bool Arm64InstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
+ if (kArm64 != other->GetInstructionSet()) {
+ return false;
+ }
+ const Arm64InstructionSetFeatures* other_as_arm = other->AsArm64InstructionSetFeatures();
+ return fix_cortex_a53_835769_ == other_as_arm->fix_cortex_a53_835769_;
+}
+
+uint32_t Arm64InstructionSetFeatures::AsBitmap() const {
+ return (IsSmp() ? kSmpBitfield : 0) | (fix_cortex_a53_835769_ ? kA53Bitfield : 0);
+}
+
+std::string Arm64InstructionSetFeatures::GetFeatureString() const {
+ std::string result;
+ if (IsSmp()) {
+ result += "smp";
+ } else {
+ result += "-smp";
+ }
+ if (fix_cortex_a53_835769_) {
+ result += ",a53";
+ } else {
+ result += ",-a53";
+ }
+ return result;
+}
+
+const InstructionSetFeatures* Arm64InstructionSetFeatures::AddFeaturesFromSplitString(
+ const bool smp, const std::vector<std::string>& features, std::string* error_msg) const {
+ bool is_a53 = fix_cortex_a53_835769_;
+ for (auto i = features.begin(); i != features.end(); i++) {
+ std::string feature = Trim(*i);
+ if (feature == "a53") {
+ is_a53 = true;
+ } else if (feature == "-a53") {
+ is_a53 = false;
+ } else {
+ *error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str());
+ return nullptr;
+ }
+ }
+ return new Arm64InstructionSetFeatures(smp, is_a53);
+}
+
+} // namespace art
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.h b/runtime/arch/arm64/instruction_set_features_arm64.h
new file mode 100644
index 0000000..b0c66b3
--- /dev/null
+++ b/runtime/arch/arm64/instruction_set_features_arm64.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_ARM64_INSTRUCTION_SET_FEATURES_ARM64_H_
+#define ART_RUNTIME_ARCH_ARM64_INSTRUCTION_SET_FEATURES_ARM64_H_
+
+#include "arch/instruction_set_features.h"
+
+namespace art {
+
+// Instruction set features relevant to the ARM64 architecture.
+class Arm64InstructionSetFeatures FINAL : public InstructionSetFeatures {
+ public:
+ // Process a CPU variant string like "krait" or "cortex-a15" and create InstructionSetFeatures.
+ static const Arm64InstructionSetFeatures* FromVariant(const std::string& variant,
+ std::string* error_msg);
+
+ // Parse a bitmap and create an InstructionSetFeatures.
+ static const Arm64InstructionSetFeatures* FromBitmap(uint32_t bitmap);
+
+ // Turn C pre-processor #defines into the equivalent instruction set features.
+ static const Arm64InstructionSetFeatures* FromCppDefines();
+
+ // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
+ static const Arm64InstructionSetFeatures* FromCpuInfo();
+
+ // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
+ // InstructionSetFeatures.
+ static const Arm64InstructionSetFeatures* FromHwcap();
+
+ // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
+ // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
+ static const Arm64InstructionSetFeatures* FromAssembly();
+
+ bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+
+ InstructionSet GetInstructionSet() const OVERRIDE {
+ return kArm64;
+ }
+
+ uint32_t AsBitmap() const OVERRIDE;
+
+ // Return a string of the form "a53" or "none".
+ std::string GetFeatureString() const OVERRIDE;
+
+ // Generate code addressing Cortex-A53 erratum 835769?
+ bool NeedFixCortexA53_835769() const {
+ return fix_cortex_a53_835769_;
+ }
+
+ virtual ~Arm64InstructionSetFeatures() {}
+
+ protected:
+ // Parse a vector of the form "a53" adding these to a new ArmInstructionSetFeatures.
+ const InstructionSetFeatures*
+ AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
+ std::string* error_msg) const OVERRIDE;
+
+ private:
+ explicit Arm64InstructionSetFeatures(bool smp, bool needs_a53_835769_fix)
+ : InstructionSetFeatures(smp), fix_cortex_a53_835769_(needs_a53_835769_fix) {
+ }
+
+ // Bitmap positions for encoding features as a bitmap.
+ enum {
+ kSmpBitfield = 1,
+ kA53Bitfield = 2,
+ };
+
+ const bool fix_cortex_a53_835769_;
+
+ DISALLOW_COPY_AND_ASSIGN(Arm64InstructionSetFeatures);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_ARM64_INSTRUCTION_SET_FEATURES_ARM64_H_
diff --git a/runtime/arch/arm64/instruction_set_features_arm64_test.cc b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
new file mode 100644
index 0000000..027e59c
--- /dev/null
+++ b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_set_features_arm64.h"
+
+#include <gtest/gtest.h>
+
+namespace art {
+
+TEST(Arm64InstructionSetFeaturesTest, Arm64Features) {
+ // Build features for an ARM64 processor.
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> arm64_features(
+ InstructionSetFeatures::FromVariant(kArm64, "default", &error_msg));
+ ASSERT_TRUE(arm64_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(arm64_features->GetInstructionSet(), kArm64);
+ EXPECT_TRUE(arm64_features->Equals(arm64_features.get()));
+ EXPECT_STREQ("smp,a53", arm64_features->GetFeatureString().c_str());
+ EXPECT_EQ(arm64_features->AsBitmap(), 3U);
+}
+
+} // namespace art
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index ab9bf2d..4415935 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -79,13 +79,16 @@
// Loads appropriate callee-save-method
str xIP0, [sp] // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs]
+ // Place sp in Thread::Current()->top_quick_frame.
+ mov xIP0, sp
+ str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
.endm
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsOnly).
*/
-.macro SETUP_REF_ONLY_CALLEE_SAVE_FRAME
+.macro SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
adrp xIP0, :got:_ZN3art7Runtime9instance_E
ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
@@ -133,11 +136,14 @@
mov xETR, xSELF
// Loads appropriate callee-save-method
- str xIP0, [sp] // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs]
+ str xIP0, [sp] // Store ArtMethod* Runtime::callee_save_methods_[kRefsOnly]
+ // Place sp in Thread::Current()->top_quick_frame.
+ mov xIP0, sp
+ str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
.endm
// TODO: Probably no need to restore registers preserved by aapcs64.
-.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
// Restore xSELF.
mov xSELF, xETR
@@ -170,7 +176,7 @@
.cfi_adjust_cfa_offset -96
.endm
-.macro POP_REF_ONLY_CALLEE_SAVE_FRAME
+.macro POP_REFS_ONLY_CALLEE_SAVE_FRAME
// Restore xSELF as it might be scratched.
mov xSELF, xETR
// ETR
@@ -181,13 +187,13 @@
.cfi_adjust_cfa_offset -96
.endm
-.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
ret
.endm
-.macro SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
+.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
sub sp, sp, #224
.cfi_adjust_cfa_offset 224
@@ -251,7 +257,7 @@
*
* TODO This is probably too conservative - saving FP & LR.
*/
-.macro SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
adrp xIP0, :got:_ZN3art7Runtime9instance_E
ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
@@ -260,15 +266,26 @@
// xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
THIS_LOAD_REQUIRES_READ_BARRIER
- ldr xIP0, [xIP0, RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET ]
+ ldr xIP0, [xIP0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET ]
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
str xIP0, [sp] // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs]
+ // Place sp in Thread::Current()->top_quick_frame.
+ mov xIP0, sp
+ str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
+.endm
+
+.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_X0
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
+ str x0, [sp, #0] // Store ArtMethod* to bottom of stack.
+ // Place sp in Thread::Current()->top_quick_frame.
+ mov xIP0, sp
+ str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
.endm
// TODO: Probably no need to restore registers preserved by aapcs64.
-.macro RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+.macro RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
// Restore xSELF.
mov xSELF, xETR
@@ -340,10 +357,9 @@
.macro DELIVER_PENDING_EXCEPTION
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
mov x0, xSELF
- mov x1, sp
// Point of no return.
- b artDeliverPendingExceptionFromCode // artDeliverPendingExceptionFromCode(Thread*, SP)
+ b artDeliverPendingExceptionFromCode // artDeliverPendingExceptionFromCode(Thread*)
brk 0 // Unreached
.endm
@@ -376,8 +392,7 @@
ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
mov x0, xSELF // pass Thread::Current
- mov x1, sp // pass SP
- b \cxx_name // \cxx_name(Thread*, SP)
+ b \cxx_name // \cxx_name(Thread*)
END \c_name
.endm
@@ -386,8 +401,7 @@
ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context.
mov x1, xSELF // pass Thread::Current.
- mov x2, sp // pass SP.
- b \cxx_name // \cxx_name(arg, Thread*, SP).
+ b \cxx_name // \cxx_name(arg, Thread*).
brk 0
END \c_name
.endm
@@ -397,8 +411,7 @@
ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
mov x2, xSELF // pass Thread::Current
- mov x3, sp // pass SP
- b \cxx_name // \cxx_name(arg1, arg2, Thread*, SP)
+ b \cxx_name // \cxx_name(arg1, arg2, Thread*)
brk 0
END \c_name
.endm
@@ -458,7 +471,7 @@
.macro INVOKE_TRAMPOLINE c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save callee saves in case allocation triggers GC
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME // save callee saves in case allocation triggers GC
// Helper signature is always
// (method_idx, *this_object, *caller_method, *self, sp)
@@ -467,7 +480,7 @@
mov x4, sp
bl \cxx_name // (method_idx, this, caller, Thread*, SP)
mov xIP0, x1 // save Method*->code_
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
cbz x0, 1f // did we find the target? if not go to exception delivery
br xIP0 // tail call to target
1:
@@ -551,7 +564,7 @@
.macro INVOKE_STUB_CALL_AND_RETURN
// load method-> METHOD_QUICK_CODE_OFFSET
- ldr x9, [x0 , #METHOD_QUICK_CODE_OFFSET]
+ ldr x9, [x0 , #MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64]
// Branch to method.
blr x9
@@ -945,7 +958,7 @@
.extern artLockObjectFromCode
ENTRY art_quick_lock_object
cbz w0, .Lslow_lock
- add x4, x0, #LOCK_WORD_OFFSET // exclusive load/store had no immediate anymore
+ add x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET // exclusive load/store has no immediate anymore
.Lretry_lock:
ldr w2, [xSELF, #THREAD_ID_OFFSET] // TODO: Can the thread ID really change during the loop?
ldxr w1, [x4]
@@ -966,14 +979,13 @@
add w2, w1, #65536 // increment count in lock word placing in w2 for storing
lsr w1, w2, 30 // if either of the top two bits are set, we overflowed.
cbnz w1, .Lslow_lock // if we overflow the count go slow path
- str w2, [x0, #LOCK_WORD_OFFSET]// no need for stxr as we hold the lock
+ str w2, [x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] // no need for stxr as we hold the lock
ret
.Lslow_lock:
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves in case we block
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case we block
mov x1, xSELF // pass Thread::Current
- mov x2, sp // pass SP
- bl artLockObjectFromCode // (Object* obj, Thread*, SP)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ bl artLockObjectFromCode // (Object* obj, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_IF_W0_IS_ZERO_OR_DELIVER
END art_quick_lock_object
@@ -986,7 +998,7 @@
.extern artUnlockObjectFromCode
ENTRY art_quick_unlock_object
cbz x0, .Lslow_unlock
- ldr w1, [x0, #LOCK_WORD_OFFSET]
+ ldr w1, [x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
lsr w2, w1, 30
cbnz w2, .Lslow_unlock // if either of the top two bits are set, go slow path
ldr w2, [xSELF, #THREAD_ID_OFFSET]
@@ -997,18 +1009,17 @@
bpl .Lrecursive_thin_unlock
// transition to unlocked, w3 holds 0
dmb ish // full (LoadStore|StoreStore) memory barrier
- str w3, [x0, #LOCK_WORD_OFFSET]
+ str w3, [x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
ret
.Lrecursive_thin_unlock:
sub w1, w1, #65536
- str w1, [x0, #LOCK_WORD_OFFSET]
+ str w1, [x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
ret
.Lslow_unlock:
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves in case exception allocation triggers GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case exception allocation triggers GC
mov x1, xSELF // pass Thread::Current
- mov x2, sp // pass SP
- bl artUnlockObjectFromCode // (Object* obj, Thread*, SP)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ bl artUnlockObjectFromCode // (Object* obj, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_IF_W0_IS_ZERO_OR_DELIVER
END art_quick_unlock_object
@@ -1058,8 +1069,7 @@
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
mov x2, xSELF // pass Thread::Current
- mov x3, sp // pass SP
- b artThrowClassCastException // (Class*, Class*, Thread*, SP)
+ b artThrowClassCastException // (Class*, Class*, Thread*)
brk 0 // We should not return here...
END art_quick_check_cast
@@ -1082,7 +1092,7 @@
END art_quick_aput_obj_with_null_and_bound_check
ENTRY art_quick_aput_obj_with_bound_check
- ldr w3, [x0, #ARRAY_LENGTH_OFFSET]
+ ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]
cmp w3, w1
bhi art_quick_aput_obj
mov x0, x1
@@ -1092,16 +1102,16 @@
ENTRY art_quick_aput_obj
cbz x2, .Ldo_aput_null
- ldr w3, [x0, #CLASS_OFFSET] // Heap reference = 32b
+ ldr w3, [x0, #MIRROR_OBJECT_CLASS_OFFSET] // Heap reference = 32b
// This also zero-extends to x3
- ldr w4, [x2, #CLASS_OFFSET] // Heap reference = 32b
+ ldr w4, [x2, #MIRROR_OBJECT_CLASS_OFFSET] // Heap reference = 32b
// This also zero-extends to x4
- ldr w3, [x3, #CLASS_COMPONENT_TYPE_OFFSET] // Heap reference = 32b
+ ldr w3, [x3, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET] // Heap reference = 32b
// This also zero-extends to x3
cmp w3, w4 // value's type == array's component type - trivial assignability
bne .Lcheck_assignability
.Ldo_aput:
- add x3, x0, #OBJECT_ARRAY_DATA_OFFSET
+ add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
// "Compress" = do nothing
str w2, [x3, x1, lsl #2] // Heap reference = 32b
ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET]
@@ -1109,7 +1119,7 @@
strb w3, [x3, x0]
ret
.Ldo_aput_null:
- add x3, x0, #OBJECT_ARRAY_DATA_OFFSET
+ add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
// "Compress" = do nothing
str w2, [x3, x1, lsl #2] // Heap reference = 32b
ret
@@ -1146,7 +1156,7 @@
add sp, sp, #48
.cfi_adjust_cfa_offset -48
- add x3, x0, #OBJECT_ARRAY_DATA_OFFSET
+ add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
// "Compress" = do nothing
str w2, [x3, x1, lsl #2] // Heap reference = 32b
ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET]
@@ -1168,8 +1178,7 @@
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
mov x1, x2 // Pass value.
mov x2, xSELF // Pass Thread::Current.
- mov x3, sp // Pass SP.
- b artThrowArrayStoreException // (Object*, Object*, Thread*, SP).
+ b artThrowArrayStoreException // (Object*, Object*, Thread*).
brk 0 // Unreached.
END art_quick_aput_obj
@@ -1177,11 +1186,10 @@
.macro TWO_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
mov x2, xSELF // pass Thread::Current
- mov x3, sp // pass SP
- bl \entrypoint // (uint32_t type_idx, Method* method, Thread*, SP)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ bl \entrypoint // (uint32_t type_idx, Method* method, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
\return
DELIVER_PENDING_EXCEPTION
END \name
@@ -1191,11 +1199,10 @@
.macro THREE_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
mov x3, xSELF // pass Thread::Current
- mov x4, sp // pass SP
bl \entrypoint
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
\return
DELIVER_PENDING_EXCEPTION
END \name
@@ -1205,12 +1212,11 @@
.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
ldr w1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
mov x2, xSELF // pass Thread::Current
- mov x3, sp // pass SP
bl \entrypoint // (uint32_t type_idx, Method* method, Thread*, SP)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
\return
END \name
.endm
@@ -1218,12 +1224,11 @@
.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
ldr w2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
mov x3, xSELF // pass Thread::Current
- mov x4, sp // pass SP
bl \entrypoint
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
\return
END \name
.endm
@@ -1231,12 +1236,11 @@
.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
ldr w3, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
mov x4, xSELF // pass Thread::Current
- mov x5, sp // pass SP
bl \entrypoint
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
\return
END \name
.endm
@@ -1281,20 +1285,19 @@
THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
-THREE_ARG_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
+THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
// This is separated out as the argument order is different.
.extern artSet64StaticFromCode
ENTRY art_quick_set64_static
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
mov x3, x1 // Store value
ldr w1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
mov x2, x3 // Put value param
mov x3, xSELF // pass Thread::Current
- mov x4, sp // pass SP
bl artSet64StaticFromCode
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_IF_W0_IS_ZERO_OR_DELIVER
END art_quick_set64_static
@@ -1320,18 +1323,16 @@
ret // return if flags == 0
.Lneed_suspend:
mov x0, xSELF
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves for stack crawl
- mov x1, sp
- bl artTestSuspendFromCode // (Thread*, SP)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves for stack crawl
+ bl artTestSuspendFromCode // (Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
END art_quick_test_suspend
ENTRY art_quick_implicit_suspend
mov x0, xSELF
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves for stack crawl
- mov x1, sp
- bl artTestSuspendFromCode // (Thread*, SP)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves for stack crawl
+ bl artTestSuspendFromCode // (Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
END art_quick_implicit_suspend
/*
@@ -1341,19 +1342,18 @@
*/
.extern artQuickProxyInvokeHandler
ENTRY art_quick_proxy_invoke_handler
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
- str x0, [sp, #0] // place proxy method at bottom of frame
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_X0
mov x2, xSELF // pass Thread::Current
mov x3, sp // pass SP
bl artQuickProxyInvokeHandler // (Method* proxy method, receiver, Thread*, SP)
// Use xETR as xSELF might be scratched by native function above.
ldr x2, [xETR, THREAD_EXCEPTION_OFFSET]
cbnz x2, .Lexception_in_proxy // success if no exception is pending
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME // Restore frame
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME // Restore frame
fmov d0, x0 // Store result in d0 in case it was float or double
ret // return on success
.Lexception_in_proxy:
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
DELIVER_PENDING_EXCEPTION
END art_quick_proxy_invoke_handler
@@ -1363,24 +1363,24 @@
*/
ENTRY art_quick_imt_conflict_trampoline
ldr w0, [sp, #0] // load caller Method*
- ldr w0, [x0, #METHOD_DEX_CACHE_METHODS_OFFSET] // load dex_cache_resolved_methods
- add x0, x0, #OBJECT_ARRAY_DATA_OFFSET // get starting address of data
+ ldr w0, [x0, #MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET] // load dex_cache_resolved_methods
+ add x0, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET // get starting address of data
ldr w0, [x0, xIP1, lsl 2] // load the target method
b art_quick_invoke_interface_trampoline
END art_quick_imt_conflict_trampoline
ENTRY art_quick_resolution_trampoline
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
mov x2, xSELF
mov x3, sp
bl artQuickResolutionTrampoline // (called, receiver, Thread*, SP)
cbz x0, 1f
mov xIP0, x0 // Remember returned code pointer in xIP0.
ldr w0, [sp, #0] // artQuickResolutionTrampoline puts called method in *SP.
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
br xIP0
1:
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
DELIVER_PENDING_EXCEPTION
END art_quick_resolution_trampoline
@@ -1439,8 +1439,7 @@
* Called to do a generic JNI down-call
*/
ENTRY art_quick_generic_jni_trampoline
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
- str x0, [sp, #0] // Store native ArtMethod* to bottom of stack.
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_X0
// Save SP , so we can have static CFI info.
mov x28, sp
@@ -1513,7 +1512,7 @@
cbnz x1, .Lexception_in_native
// Tear down the callee-save frame.
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
// store into fpr, for when it's a fpr return...
fmov d0, x0
@@ -1523,7 +1522,7 @@
mov sp, x28
.cfi_def_cfa_register sp
.Lexception_in_native:
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
DELIVER_PENDING_EXCEPTION
END art_quick_generic_jni_trampoline
@@ -1535,7 +1534,7 @@
* x1..x7, d0..d7 = arguments to that method.
*/
ENTRY art_quick_to_interpreter_bridge
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // Set up frame and save arguments.
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME // Set up frame and save arguments.
// x0 will contain mirror::ArtMethod* method.
mov x1, xSELF // How to get Thread::Current() ???
@@ -1545,7 +1544,7 @@
// mirror::ArtMethod** sp)
bl artQuickToInterpreterBridge
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME // TODO: no need to restore arguments in this case.
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME // TODO: no need to restore arguments in this case.
fmov d0, x0
@@ -1558,19 +1557,18 @@
//
.extern artInstrumentationMethodEntryFromCode
ENTRY art_quick_instrumentation_entry
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
mov x20, x0 // Preserve method reference in a callee-save.
mov x2, xSELF
- mov x3, sp
- mov x4, xLR
- bl artInstrumentationMethodEntryFromCode // (Method*, Object*, Thread*, SP, LR)
+ mov x3, xLR
+ bl artInstrumentationMethodEntryFromCode // (Method*, Object*, Thread*, LR)
mov xIP0, x0 // x0 = result of call.
mov x0, x20 // Reload method reference.
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME // Note: will restore xSELF
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME // Note: will restore xSELF
adr xLR, art_quick_instrumentation_exit
br xIP0 // Tail-call method with lr set to art_quick_instrumentation_exit.
END art_quick_instrumentation_entry
@@ -1579,7 +1577,7 @@
ENTRY art_quick_instrumentation_exit
mov xLR, #0 // Clobber LR for later checks.
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
// We need to save x0 and d0. We could use a callee-save from SETUP_REF_ONLY, but then
// we would need to fully restore it. As there are a lot of callee-save registers, it seems
@@ -1602,7 +1600,7 @@
ldr x0, [sp], 16 // Restore integer result, and drop stack area.
.cfi_adjust_cfa_offset 16
- POP_REF_ONLY_CALLEE_SAVE_FRAME
+ POP_REFS_ONLY_CALLEE_SAVE_FRAME
br xIP0 // Tail-call out.
END art_quick_instrumentation_exit
@@ -1615,8 +1613,7 @@
ENTRY art_quick_deoptimize
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
mov x0, xSELF // Pass thread.
- mov x1, sp // Pass SP.
- bl artDeoptimize // artDeoptimize(Thread*, SP)
+ bl artDeoptimize // artDeoptimize(Thread*)
brk 0
END art_quick_deoptimize
@@ -1631,9 +1628,9 @@
* w2: Starting offset in string data
*/
ENTRY art_quick_indexof
- ldr w3, [x0, #STRING_COUNT_OFFSET]
- ldr w4, [x0, #STRING_OFFSET_OFFSET]
- ldr w0, [x0, #STRING_VALUE_OFFSET] // x0 ?
+ ldr w3, [x0, #MIRROR_STRING_COUNT_OFFSET]
+ ldr w4, [x0, #MIRROR_STRING_OFFSET_OFFSET]
+ ldr w0, [x0, #MIRROR_STRING_VALUE_OFFSET] // x0 ?
/* Clamp start to [0..count] */
cmp w2, #0
@@ -1642,7 +1639,7 @@
csel w2, w3, w2, gt
/* Build a pointer to the start of the string data */
- add x0, x0, #STRING_DATA_OFFSET
+ add x0, x0, #MIRROR_CHAR_ARRAY_DATA_OFFSET
add x0, x0, x4, lsl #1
/* Save a copy to compute result */
@@ -1736,12 +1733,12 @@
ret
1: // Different string objects.
- ldr w6, [x2, #STRING_OFFSET_OFFSET]
- ldr w5, [x1, #STRING_OFFSET_OFFSET]
- ldr w4, [x2, #STRING_COUNT_OFFSET]
- ldr w3, [x1, #STRING_COUNT_OFFSET]
- ldr w2, [x2, #STRING_VALUE_OFFSET]
- ldr w1, [x1, #STRING_VALUE_OFFSET]
+ ldr w6, [x2, #MIRROR_STRING_OFFSET_OFFSET]
+ ldr w5, [x1, #MIRROR_STRING_OFFSET_OFFSET]
+ ldr w4, [x2, #MIRROR_STRING_COUNT_OFFSET]
+ ldr w3, [x1, #MIRROR_STRING_COUNT_OFFSET]
+ ldr w2, [x2, #MIRROR_STRING_VALUE_OFFSET]
+ ldr w1, [x1, #MIRROR_STRING_VALUE_OFFSET]
/*
* Now: CharArray* Offset Count
@@ -1761,8 +1758,8 @@
add x1, x1, w5, sxtw #1
// Add offset in CharArray to array.
- add x2, x2, #STRING_DATA_OFFSET
- add x1, x1, #STRING_DATA_OFFSET
+ add x2, x2, #MIRROR_CHAR_ARRAY_DATA_OFFSET
+ add x1, x1, #MIRROR_CHAR_ARRAY_DATA_OFFSET
// TODO: Tune this value.
// Check for long string, do memcmp16 for them.
diff --git a/runtime/arch/arm64/quick_method_frame_info_arm64.h b/runtime/arch/arm64/quick_method_frame_info_arm64.h
index 15c6c07..0e1e32b 100644
--- a/runtime/arch/arm64/quick_method_frame_info_arm64.h
+++ b/runtime/arch/arm64/quick_method_frame_info_arm64.h
@@ -54,7 +54,7 @@
(1 << art::arm64::D0) | (1 << art::arm64::D1) | (1 << art::arm64::D2) |
(1 << art::arm64::D3) | (1 << art::arm64::D4) | (1 << art::arm64::D5) |
(1 << art::arm64::D6) | (1 << art::arm64::D7);
-static constexpr uint32_t kArm64FpAllSpills =
+static constexpr uint32_t kArm64CalleeSaveFpAllSpills =
(1 << art::arm64::D8) | (1 << art::arm64::D9) | (1 << art::arm64::D10) |
(1 << art::arm64::D11) | (1 << art::arm64::D12) | (1 << art::arm64::D13) |
(1 << art::arm64::D14) | (1 << art::arm64::D15);
@@ -68,7 +68,7 @@
constexpr uint32_t Arm64CalleeSaveFpSpills(Runtime::CalleeSaveType type) {
return kArm64CalleeSaveFpAlwaysSpills | kArm64CalleeSaveFpRefSpills |
(type == Runtime::kRefsAndArgs ? kArm64CalleeSaveFpArgSpills: 0) |
- (type == Runtime::kSaveAll ? kArm64FpAllSpills : 0);
+ (type == Runtime::kSaveAll ? kArm64CalleeSaveFpAllSpills : 0);
}
constexpr uint32_t Arm64CalleeSaveFrameSize(Runtime::CalleeSaveType type) {
diff --git a/runtime/arch/instruction_set.cc b/runtime/arch/instruction_set.cc
new file mode 100644
index 0000000..92fa727
--- /dev/null
+++ b/runtime/arch/instruction_set.cc
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_set.h"
+
+#include "globals.h"
+
+namespace art {
+
+const char* GetInstructionSetString(const InstructionSet isa) {
+ switch (isa) {
+ case kArm:
+ case kThumb2:
+ return "arm";
+ case kArm64:
+ return "arm64";
+ case kX86:
+ return "x86";
+ case kX86_64:
+ return "x86_64";
+ case kMips:
+ return "mips";
+ case kMips64:
+ return "mips64";
+ case kNone:
+ return "none";
+ default:
+ LOG(FATAL) << "Unknown ISA " << isa;
+ UNREACHABLE();
+ }
+}
+
+InstructionSet GetInstructionSetFromString(const char* isa_str) {
+ CHECK(isa_str != nullptr);
+
+ if (strcmp("arm", isa_str) == 0) {
+ return kArm;
+ } else if (strcmp("arm64", isa_str) == 0) {
+ return kArm64;
+ } else if (strcmp("x86", isa_str) == 0) {
+ return kX86;
+ } else if (strcmp("x86_64", isa_str) == 0) {
+ return kX86_64;
+ } else if (strcmp("mips", isa_str) == 0) {
+ return kMips;
+ } else if (strcmp("mips64", isa_str) == 0) {
+ return kMips;
+ }
+
+ return kNone;
+}
+
+size_t GetInstructionSetAlignment(InstructionSet isa) {
+ switch (isa) {
+ case kArm:
+ // Fall-through.
+ case kThumb2:
+ return kArmAlignment;
+ case kArm64:
+ return kArm64Alignment;
+ case kX86:
+ // Fall-through.
+ case kX86_64:
+ return kX86Alignment;
+ case kMips:
+ return kMipsAlignment;
+ case kNone:
+ LOG(FATAL) << "ISA kNone does not have alignment.";
+ UNREACHABLE();
+ default:
+ LOG(FATAL) << "Unknown ISA " << isa;
+ UNREACHABLE();
+ }
+}
+
+static constexpr size_t kDefaultStackOverflowReservedBytes = 16 * KB;
+static constexpr size_t kMipsStackOverflowReservedBytes = kDefaultStackOverflowReservedBytes;
+
+static constexpr size_t kArmStackOverflowReservedBytes = 8 * KB;
+static constexpr size_t kArm64StackOverflowReservedBytes = 8 * KB;
+static constexpr size_t kX86StackOverflowReservedBytes = 8 * KB;
+static constexpr size_t kX86_64StackOverflowReservedBytes = 8 * KB;
+
+size_t GetStackOverflowReservedBytes(InstructionSet isa) {
+ switch (isa) {
+ case kArm: // Intentional fall-through.
+ case kThumb2:
+ return kArmStackOverflowReservedBytes;
+
+ case kArm64:
+ return kArm64StackOverflowReservedBytes;
+
+ case kMips:
+ return kMipsStackOverflowReservedBytes;
+
+ case kX86:
+ return kX86StackOverflowReservedBytes;
+
+ case kX86_64:
+ return kX86_64StackOverflowReservedBytes;
+
+ case kNone:
+ LOG(FATAL) << "kNone has no stack overflow size";
+ UNREACHABLE();
+
+ default:
+ LOG(FATAL) << "Unknown instruction set" << isa;
+ UNREACHABLE();
+ }
+}
+
+} // namespace art
diff --git a/runtime/arch/instruction_set.h b/runtime/arch/instruction_set.h
new file mode 100644
index 0000000..e413880
--- /dev/null
+++ b/runtime/arch/instruction_set.h
@@ -0,0 +1,247 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_INSTRUCTION_SET_H_
+#define ART_RUNTIME_ARCH_INSTRUCTION_SET_H_
+
+#include <iosfwd>
+#include <string>
+
+#include "base/logging.h" // Logging is required for FATAL in the helper functions.
+
+namespace art {
+
+enum InstructionSet {
+ kNone,
+ kArm,
+ kArm64,
+ kThumb2,
+ kX86,
+ kX86_64,
+ kMips,
+ kMips64
+};
+std::ostream& operator<<(std::ostream& os, const InstructionSet& rhs);
+
+#if defined(__arm__)
+static constexpr InstructionSet kRuntimeISA = kArm;
+#elif defined(__aarch64__)
+static constexpr InstructionSet kRuntimeISA = kArm64;
+#elif defined(__mips__)
+static constexpr InstructionSet kRuntimeISA = kMips;
+#elif defined(__i386__)
+static constexpr InstructionSet kRuntimeISA = kX86;
+#elif defined(__x86_64__)
+static constexpr InstructionSet kRuntimeISA = kX86_64;
+#else
+static constexpr InstructionSet kRuntimeISA = kNone;
+#endif
+
+// Architecture-specific pointer sizes
+static constexpr size_t kArmPointerSize = 4;
+static constexpr size_t kArm64PointerSize = 8;
+static constexpr size_t kMipsPointerSize = 4;
+static constexpr size_t kMips64PointerSize = 8;
+static constexpr size_t kX86PointerSize = 4;
+static constexpr size_t kX86_64PointerSize = 8;
+
+// ARM instruction alignment. ARM processors require code to be 4-byte aligned,
+// but ARM ELF requires 8..
+static constexpr size_t kArmAlignment = 8;
+
+// ARM64 instruction alignment. This is the recommended alignment for maximum performance.
+static constexpr size_t kArm64Alignment = 16;
+
+// MIPS instruction alignment. MIPS processors require code to be 4-byte aligned.
+// TODO: Can this be 4?
+static constexpr size_t kMipsAlignment = 8;
+
+// X86 instruction alignment. This is the recommended alignment for maximum performance.
+static constexpr size_t kX86Alignment = 16;
+
+
+const char* GetInstructionSetString(InstructionSet isa);
+
+// Note: Returns kNone when the string cannot be parsed to a known value.
+InstructionSet GetInstructionSetFromString(const char* instruction_set);
+
+static inline size_t GetInstructionSetPointerSize(InstructionSet isa) {
+ switch (isa) {
+ case kArm:
+ // Fall-through.
+ case kThumb2:
+ return kArmPointerSize;
+ case kArm64:
+ return kArm64PointerSize;
+ case kX86:
+ return kX86PointerSize;
+ case kX86_64:
+ return kX86_64PointerSize;
+ case kMips:
+ return kMipsPointerSize;
+ case kMips64:
+ return kMips64PointerSize;
+ case kNone:
+ LOG(FATAL) << "ISA kNone does not have pointer size.";
+ UNREACHABLE();
+ default:
+ LOG(FATAL) << "Unknown ISA " << isa;
+ UNREACHABLE();
+ }
+}
+
+size_t GetInstructionSetAlignment(InstructionSet isa);
+
+static inline bool Is64BitInstructionSet(InstructionSet isa) {
+ switch (isa) {
+ case kArm:
+ case kThumb2:
+ case kX86:
+ case kMips:
+ return false;
+
+ case kArm64:
+ case kX86_64:
+ case kMips64:
+ return true;
+
+ case kNone:
+ LOG(FATAL) << "ISA kNone does not have bit width.";
+ UNREACHABLE();
+ default:
+ LOG(FATAL) << "Unknown ISA " << isa;
+ UNREACHABLE();
+ }
+}
+
+static inline size_t InstructionSetPointerSize(InstructionSet isa) {
+ return Is64BitInstructionSet(isa) ? 8U : 4U;
+}
+
+static inline size_t GetBytesPerGprSpillLocation(InstructionSet isa) {
+ switch (isa) {
+ case kArm:
+ // Fall-through.
+ case kThumb2:
+ return 4;
+ case kArm64:
+ return 8;
+ case kX86:
+ return 4;
+ case kX86_64:
+ return 8;
+ case kMips:
+ return 4;
+ case kNone:
+ LOG(FATAL) << "ISA kNone does not have spills.";
+ UNREACHABLE();
+ default:
+ LOG(FATAL) << "Unknown ISA " << isa;
+ UNREACHABLE();
+ }
+}
+
+static inline size_t GetBytesPerFprSpillLocation(InstructionSet isa) {
+ switch (isa) {
+ case kArm:
+ // Fall-through.
+ case kThumb2:
+ return 4;
+ case kArm64:
+ return 8;
+ case kX86:
+ return 8;
+ case kX86_64:
+ return 8;
+ case kMips:
+ return 4;
+ case kNone:
+ LOG(FATAL) << "ISA kNone does not have spills.";
+ UNREACHABLE();
+ default:
+ LOG(FATAL) << "Unknown ISA " << isa;
+ UNREACHABLE();
+ }
+}
+
+size_t GetStackOverflowReservedBytes(InstructionSet isa);
+
+// The following definitions create return types for two word-sized entities that will be passed
+// in registers so that memory operations for the interface trampolines can be avoided. The entities
+// are the resolved method and the pointer to the code to be invoked.
+//
+// On x86, ARM32 and MIPS, this is given for a *scalar* 64bit value. The definition thus *must* be
+// uint64_t or long long int.
+//
+// On x86_64 and ARM64, structs are decomposed for allocation, so we can create a structs of two
+// size_t-sized values.
+//
+// We need two operations:
+//
+// 1) A flag value that signals failure. The assembly stubs expect the lower part to be "0".
+// GetTwoWordFailureValue() will return a value that has lower part == 0.
+//
+// 2) A value that combines two word-sized values.
+// GetTwoWordSuccessValue() constructs this.
+//
+// IMPORTANT: If you use this to transfer object pointers, it is your responsibility to ensure
+// that the object does not move or the value is updated. Simple use of this is NOT SAFE
+// when the garbage collector can move objects concurrently. Ensure that required locks
+// are held when using!
+
+#if defined(__i386__) || defined(__arm__) || defined(__mips__)
+typedef uint64_t TwoWordReturn;
+
+// Encodes method_ptr==nullptr and code_ptr==nullptr
+static inline constexpr TwoWordReturn GetTwoWordFailureValue() {
+ return 0;
+}
+
+// Use the lower 32b for the method pointer and the upper 32b for the code pointer.
+static inline TwoWordReturn GetTwoWordSuccessValue(uintptr_t hi, uintptr_t lo) {
+ static_assert(sizeof(uint32_t) == sizeof(uintptr_t), "Unexpected size difference");
+ uint32_t lo32 = lo;
+ uint64_t hi64 = static_cast<uint64_t>(hi);
+ return ((hi64 << 32) | lo32);
+}
+
+#elif defined(__x86_64__) || defined(__aarch64__)
+struct TwoWordReturn {
+ uintptr_t lo;
+ uintptr_t hi;
+};
+
+// Encodes method_ptr==nullptr. Leaves random value in code pointer.
+static inline TwoWordReturn GetTwoWordFailureValue() {
+ TwoWordReturn ret;
+ ret.lo = 0;
+ return ret;
+}
+
+// Write values into their respective members.
+static inline TwoWordReturn GetTwoWordSuccessValue(uintptr_t hi, uintptr_t lo) {
+ TwoWordReturn ret;
+ ret.lo = lo;
+ ret.hi = hi;
+ return ret;
+}
+#else
+#error "Unsupported architecture"
+#endif
+
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_INSTRUCTION_SET_H_
diff --git a/runtime/arch/instruction_set_features.cc b/runtime/arch/instruction_set_features.cc
new file mode 100644
index 0000000..1072562
--- /dev/null
+++ b/runtime/arch/instruction_set_features.cc
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_set_features.h"
+
+#include "base/casts.h"
+#include "utils.h"
+
+
+#include "arm/instruction_set_features_arm.h"
+#include "arm64/instruction_set_features_arm64.h"
+#include "mips/instruction_set_features_mips.h"
+#include "x86/instruction_set_features_x86.h"
+#include "x86_64/instruction_set_features_x86_64.h"
+
+namespace art {
+
+const InstructionSetFeatures* InstructionSetFeatures::FromVariant(InstructionSet isa,
+ const std::string& variant,
+ std::string* error_msg) {
+ const InstructionSetFeatures* result;
+ switch (isa) {
+ case kArm:
+ case kThumb2:
+ result = ArmInstructionSetFeatures::FromVariant(variant, error_msg);
+ break;
+ case kArm64:
+ result = Arm64InstructionSetFeatures::FromVariant(variant, error_msg);
+ break;
+ case kMips:
+ result = MipsInstructionSetFeatures::FromVariant(variant, error_msg);
+ break;
+ case kX86:
+ result = X86InstructionSetFeatures::FromVariant(variant, error_msg);
+ break;
+ case kX86_64:
+ result = X86_64InstructionSetFeatures::FromVariant(variant, error_msg);
+ break;
+ default:
+ UNIMPLEMENTED(FATAL) << isa;
+ UNREACHABLE();
+ }
+ CHECK_EQ(result == nullptr, error_msg->size() != 0);
+ return result;
+}
+
+const InstructionSetFeatures* InstructionSetFeatures::FromBitmap(InstructionSet isa,
+ uint32_t bitmap) {
+ const InstructionSetFeatures* result;
+ switch (isa) {
+ case kArm:
+ case kThumb2:
+ result = ArmInstructionSetFeatures::FromBitmap(bitmap);
+ break;
+ case kArm64:
+ result = Arm64InstructionSetFeatures::FromBitmap(bitmap);
+ break;
+ case kMips:
+ result = MipsInstructionSetFeatures::FromBitmap(bitmap);
+ break;
+ case kX86:
+ result = X86InstructionSetFeatures::FromBitmap(bitmap);
+ break;
+ case kX86_64:
+ result = X86_64InstructionSetFeatures::FromBitmap(bitmap);
+ break;
+ default:
+ UNIMPLEMENTED(FATAL) << isa;
+ UNREACHABLE();
+ }
+ CHECK_EQ(bitmap, result->AsBitmap());
+ return result;
+}
+
+const InstructionSetFeatures* InstructionSetFeatures::FromCppDefines() {
+ const InstructionSetFeatures* result;
+ switch (kRuntimeISA) {
+ case kArm:
+ case kThumb2:
+ result = ArmInstructionSetFeatures::FromCppDefines();
+ break;
+ case kArm64:
+ result = Arm64InstructionSetFeatures::FromCppDefines();
+ break;
+ case kMips:
+ result = MipsInstructionSetFeatures::FromCppDefines();
+ break;
+ case kX86:
+ result = X86InstructionSetFeatures::FromCppDefines();
+ break;
+ case kX86_64:
+ result = X86_64InstructionSetFeatures::FromCppDefines();
+ break;
+ default:
+ UNIMPLEMENTED(FATAL) << kRuntimeISA;
+ UNREACHABLE();
+ }
+ return result;
+}
+
+
+const InstructionSetFeatures* InstructionSetFeatures::FromCpuInfo() {
+ const InstructionSetFeatures* result;
+ switch (kRuntimeISA) {
+ case kArm:
+ case kThumb2:
+ result = ArmInstructionSetFeatures::FromCpuInfo();
+ break;
+ case kArm64:
+ result = Arm64InstructionSetFeatures::FromCpuInfo();
+ break;
+ case kMips:
+ result = MipsInstructionSetFeatures::FromCpuInfo();
+ break;
+ case kX86:
+ result = X86InstructionSetFeatures::FromCpuInfo();
+ break;
+ case kX86_64:
+ result = X86_64InstructionSetFeatures::FromCpuInfo();
+ break;
+ default:
+ UNIMPLEMENTED(FATAL) << kRuntimeISA;
+ UNREACHABLE();
+ }
+ return result;
+}
+
+const InstructionSetFeatures* InstructionSetFeatures::FromHwcap() {
+ const InstructionSetFeatures* result;
+ switch (kRuntimeISA) {
+ case kArm:
+ case kThumb2:
+ result = ArmInstructionSetFeatures::FromHwcap();
+ break;
+ case kArm64:
+ result = Arm64InstructionSetFeatures::FromHwcap();
+ break;
+ case kMips:
+ result = MipsInstructionSetFeatures::FromHwcap();
+ break;
+ case kX86:
+ result = X86InstructionSetFeatures::FromHwcap();
+ break;
+ case kX86_64:
+ result = X86_64InstructionSetFeatures::FromHwcap();
+ break;
+ default:
+ UNIMPLEMENTED(FATAL) << kRuntimeISA;
+ UNREACHABLE();
+ }
+ return result;
+}
+
+const InstructionSetFeatures* InstructionSetFeatures::FromAssembly() {
+ const InstructionSetFeatures* result;
+ switch (kRuntimeISA) {
+ case kArm:
+ case kThumb2:
+ result = ArmInstructionSetFeatures::FromAssembly();
+ break;
+ case kArm64:
+ result = Arm64InstructionSetFeatures::FromAssembly();
+ break;
+ case kMips:
+ result = MipsInstructionSetFeatures::FromAssembly();
+ break;
+ case kX86:
+ result = X86InstructionSetFeatures::FromAssembly();
+ break;
+ case kX86_64:
+ result = X86_64InstructionSetFeatures::FromAssembly();
+ break;
+ default:
+ UNIMPLEMENTED(FATAL) << kRuntimeISA;
+ UNREACHABLE();
+ }
+ return result;
+}
+
+const InstructionSetFeatures* InstructionSetFeatures::AddFeaturesFromString(
+ const std::string& feature_list, std::string* error_msg) const {
+ if (feature_list.empty()) {
+ *error_msg = "No instruction set features specified";
+ return nullptr;
+ }
+ std::vector<std::string> features;
+ Split(feature_list, ',', &features);
+ bool smp = smp_;
+ bool use_default = false; // Have we seen the 'default' feature?
+ bool first = false; // Is this first feature?
+ for (auto it = features.begin(); it != features.end();) {
+ if (use_default) {
+ *error_msg = "Unexpected instruction set features after 'default'";
+ return nullptr;
+ }
+ std::string feature = Trim(*it);
+ bool erase = false;
+ if (feature == "default") {
+ if (!first) {
+ use_default = true;
+ erase = true;
+ } else {
+ *error_msg = "Unexpected instruction set features before 'default'";
+ return nullptr;
+ }
+ } else if (feature == "smp") {
+ smp = true;
+ erase = true;
+ } else if (feature == "-smp") {
+ smp = false;
+ erase = true;
+ }
+ // Erase the smp feature once processed.
+ if (!erase) {
+ ++it;
+ } else {
+ it = features.erase(it);
+ }
+ first = true;
+ }
+ DCHECK_EQ(use_default, features.empty());
+ return AddFeaturesFromSplitString(smp, features, error_msg);
+}
+
+const ArmInstructionSetFeatures* InstructionSetFeatures::AsArmInstructionSetFeatures() const {
+ DCHECK_EQ(kArm, GetInstructionSet());
+ return down_cast<const ArmInstructionSetFeatures*>(this);
+}
+
+const Arm64InstructionSetFeatures* InstructionSetFeatures::AsArm64InstructionSetFeatures() const {
+ DCHECK_EQ(kArm64, GetInstructionSet());
+ return down_cast<const Arm64InstructionSetFeatures*>(this);
+}
+
+const MipsInstructionSetFeatures* InstructionSetFeatures::AsMipsInstructionSetFeatures() const {
+ DCHECK_EQ(kMips, GetInstructionSet());
+ return down_cast<const MipsInstructionSetFeatures*>(this);
+}
+
+const X86InstructionSetFeatures* InstructionSetFeatures::AsX86InstructionSetFeatures() const {
+ DCHECK(kX86 == GetInstructionSet() || kX86_64 == GetInstructionSet());
+ return down_cast<const X86InstructionSetFeatures*>(this);
+}
+
+const X86_64InstructionSetFeatures* InstructionSetFeatures::AsX86_64InstructionSetFeatures() const {
+ DCHECK_EQ(kX86_64, GetInstructionSet());
+ return down_cast<const X86_64InstructionSetFeatures*>(this);
+}
+
+bool InstructionSetFeatures::FindVariantInArray(const char* variants[], size_t num_variants,
+ const std::string& variant) {
+ const char** begin = variants;
+ const char** end = begin + num_variants;
+ return std::find(begin, end, variant) != end;
+}
+
+std::ostream& operator<<(std::ostream& os, const InstructionSetFeatures& rhs) {
+ os << "ISA: " << rhs.GetInstructionSet() << " Feature string: " << rhs.GetFeatureString();
+ return os;
+}
+
+} // namespace art
diff --git a/runtime/arch/instruction_set_features.h b/runtime/arch/instruction_set_features.h
new file mode 100644
index 0000000..2c6e699
--- /dev/null
+++ b/runtime/arch/instruction_set_features.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_INSTRUCTION_SET_FEATURES_H_
+#define ART_RUNTIME_ARCH_INSTRUCTION_SET_FEATURES_H_
+
+#include <ostream>
+#include <vector>
+
+#include "base/macros.h"
+#include "instruction_set.h"
+
+namespace art {
+
+class ArmInstructionSetFeatures;
+class Arm64InstructionSetFeatures;
+class MipsInstructionSetFeatures;
+class X86InstructionSetFeatures;
+class X86_64InstructionSetFeatures;
+
+// Abstraction used to describe features of a different instruction sets.
+class InstructionSetFeatures {
+ public:
+ // Process a CPU variant string for the given ISA and create an InstructionSetFeatures.
+ static const InstructionSetFeatures* FromVariant(InstructionSet isa,
+ const std::string& variant,
+ std::string* error_msg);
+
+ // Parse a bitmap for the given isa and create an InstructionSetFeatures.
+ static const InstructionSetFeatures* FromBitmap(InstructionSet isa, uint32_t bitmap);
+
+ // Turn C pre-processor #defines into the equivalent instruction set features for kRuntimeISA.
+ static const InstructionSetFeatures* FromCppDefines();
+
+ // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
+ static const InstructionSetFeatures* FromCpuInfo();
+
+ // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
+ // InstructionSetFeatures.
+ static const InstructionSetFeatures* FromHwcap();
+
+ // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
+ // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
+ static const InstructionSetFeatures* FromAssembly();
+
+ // Parse a string of the form "div,-atomic_ldrd_strd" adding and removing these features to
+ // create a new InstructionSetFeatures.
+ const InstructionSetFeatures* AddFeaturesFromString(const std::string& feature_list,
+ std::string* error_msg) const WARN_UNUSED;
+
+ // Are these features the same as the other given features?
+ virtual bool Equals(const InstructionSetFeatures* other) const = 0;
+
+ // Return the ISA these features relate to.
+ virtual InstructionSet GetInstructionSet() const = 0;
+
+ // Return a bitmap that represents the features. ISA specific.
+ virtual uint32_t AsBitmap() const = 0;
+
+ // Return a string of the form "div,lpae" or "none".
+ virtual std::string GetFeatureString() const = 0;
+
+ // Does the instruction set variant require instructions for correctness with SMP?
+ bool IsSmp() const {
+ return smp_;
+ }
+
+ // Down cast this ArmInstructionFeatures.
+ const ArmInstructionSetFeatures* AsArmInstructionSetFeatures() const;
+
+ // Down cast this Arm64InstructionFeatures.
+ const Arm64InstructionSetFeatures* AsArm64InstructionSetFeatures() const;
+
+ // Down cast this MipsInstructionFeatures.
+ const MipsInstructionSetFeatures* AsMipsInstructionSetFeatures() const;
+
+ // Down cast this X86InstructionFeatures.
+ const X86InstructionSetFeatures* AsX86InstructionSetFeatures() const;
+
+ // Down cast this X86_64InstructionFeatures.
+ const X86_64InstructionSetFeatures* AsX86_64InstructionSetFeatures() const;
+
+ virtual ~InstructionSetFeatures() {}
+
+ protected:
+ explicit InstructionSetFeatures(bool smp) : smp_(smp) {}
+
+ // Returns true if variant appears in the array variants.
+ static bool FindVariantInArray(const char* variants[], size_t num_variants,
+ const std::string& variant);
+
+ // Add architecture specific features in sub-classes.
+ virtual const InstructionSetFeatures*
+ AddFeaturesFromSplitString(bool smp, const std::vector<std::string>& features,
+ std::string* error_msg) const = 0;
+
+ private:
+ const bool smp_;
+
+ DISALLOW_COPY_AND_ASSIGN(InstructionSetFeatures);
+};
+std::ostream& operator<<(std::ostream& os, const InstructionSetFeatures& rhs);
+
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_INSTRUCTION_SET_FEATURES_H_
diff --git a/runtime/arch/instruction_set_features_test.cc b/runtime/arch/instruction_set_features_test.cc
new file mode 100644
index 0000000..e6f4e7a
--- /dev/null
+++ b/runtime/arch/instruction_set_features_test.cc
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_set_features.h"
+
+#include <gtest/gtest.h>
+
+#ifdef HAVE_ANDROID_OS
+#include "cutils/properties.h"
+#endif
+
+#include "base/stringprintf.h"
+
+namespace art {
+
+#ifdef HAVE_ANDROID_OS
+#if defined(__aarch64__)
+TEST(InstructionSetFeaturesTest, DISABLED_FeaturesFromSystemPropertyVariant) {
+ LOG(WARNING) << "Test disabled due to no CPP define for A53 erratum 835769";
+#else
+TEST(InstructionSetFeaturesTest, FeaturesFromSystemPropertyVariant) {
+#endif
+ // Take the default set of instruction features from the build.
+ std::unique_ptr<const InstructionSetFeatures> instruction_set_features(
+ InstructionSetFeatures::FromCppDefines());
+
+ // Read the variant property.
+ std::string key = StringPrintf("dalvik.vm.isa.%s.variant", GetInstructionSetString(kRuntimeISA));
+ char dex2oat_isa_variant[PROPERTY_VALUE_MAX];
+ if (property_get(key.c_str(), dex2oat_isa_variant, nullptr) > 0) {
+ // Use features from property to build InstructionSetFeatures and check against build's
+ // features.
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> property_features(
+ InstructionSetFeatures::FromVariant(kRuntimeISA, dex2oat_isa_variant, &error_msg));
+ ASSERT_TRUE(property_features.get() != nullptr) << error_msg;
+
+ EXPECT_TRUE(property_features->Equals(instruction_set_features.get()))
+ << "System property features: " << *property_features.get()
+ << "\nFeatures from build: " << *instruction_set_features.get();
+ }
+}
+
+#if defined(__aarch64__)
+TEST(InstructionSetFeaturesTest, DISABLED_FeaturesFromSystemPropertyString) {
+ LOG(WARNING) << "Test disabled due to no CPP define for A53 erratum 835769";
+#else
+TEST(InstructionSetFeaturesTest, FeaturesFromSystemPropertyString) {
+#endif
+ // Take the default set of instruction features from the build.
+ std::unique_ptr<const InstructionSetFeatures> instruction_set_features(
+ InstructionSetFeatures::FromCppDefines());
+
+ // Read the variant property.
+ std::string variant_key = StringPrintf("dalvik.vm.isa.%s.variant",
+ GetInstructionSetString(kRuntimeISA));
+ char dex2oat_isa_variant[PROPERTY_VALUE_MAX];
+ if (property_get(variant_key.c_str(), dex2oat_isa_variant, nullptr) > 0) {
+ // Read the features property.
+ std::string features_key = StringPrintf("dalvik.vm.isa.%s.features",
+ GetInstructionSetString(kRuntimeISA));
+ char dex2oat_isa_features[PROPERTY_VALUE_MAX];
+ if (property_get(features_key.c_str(), dex2oat_isa_features, nullptr) > 0) {
+ // Use features from property to build InstructionSetFeatures and check against build's
+ // features.
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> base_features(
+ InstructionSetFeatures::FromVariant(kRuntimeISA, dex2oat_isa_variant, &error_msg));
+ ASSERT_TRUE(base_features.get() != nullptr) << error_msg;
+
+ std::unique_ptr<const InstructionSetFeatures> property_features(
+ base_features->AddFeaturesFromString(dex2oat_isa_features, &error_msg));
+ ASSERT_TRUE(property_features.get() != nullptr) << error_msg;
+
+ EXPECT_TRUE(property_features->Equals(instruction_set_features.get()))
+ << "System property features: " << *property_features.get()
+ << "\nFeatures from build: " << *instruction_set_features.get();
+ }
+ }
+}
+
+#if defined(__arm__)
+TEST(InstructionSetFeaturesTest, DISABLED_FeaturesFromCpuInfo) {
+ LOG(WARNING) << "Test disabled due to buggy ARM kernels";
+#else
+TEST(InstructionSetFeaturesTest, FeaturesFromCpuInfo) {
+#endif
+ // Take the default set of instruction features from the build.
+ std::unique_ptr<const InstructionSetFeatures> instruction_set_features(
+ InstructionSetFeatures::FromCppDefines());
+
+ // Check we get the same instruction set features using /proc/cpuinfo.
+ std::unique_ptr<const InstructionSetFeatures> cpuinfo_features(
+ InstructionSetFeatures::FromCpuInfo());
+ EXPECT_TRUE(cpuinfo_features->Equals(instruction_set_features.get()))
+ << "CPU Info features: " << *cpuinfo_features.get()
+ << "\nFeatures from build: " << *instruction_set_features.get();
+}
+#endif
+
+#ifndef HAVE_ANDROID_OS
+TEST(InstructionSetFeaturesTest, HostFeaturesFromCppDefines) {
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> default_features(
+ InstructionSetFeatures::FromVariant(kRuntimeISA, "default", &error_msg));
+ ASSERT_TRUE(error_msg.empty());
+
+ std::unique_ptr<const InstructionSetFeatures> cpp_features(
+ InstructionSetFeatures::FromCppDefines());
+ EXPECT_TRUE(default_features->Equals(cpp_features.get()))
+ << "Default variant features: " << *default_features.get()
+ << "\nFeatures from build: " << *cpp_features.get();
+}
+#endif
+
+#if defined(__arm__)
+TEST(InstructionSetFeaturesTest, DISABLED_FeaturesFromHwcap) {
+ LOG(WARNING) << "Test disabled due to buggy ARM kernels";
+#else
+TEST(InstructionSetFeaturesTest, FeaturesFromHwcap) {
+#endif
+ // Take the default set of instruction features from the build.
+ std::unique_ptr<const InstructionSetFeatures> instruction_set_features(
+ InstructionSetFeatures::FromCppDefines());
+
+ // Check we get the same instruction set features using AT_HWCAP.
+ std::unique_ptr<const InstructionSetFeatures> hwcap_features(
+ InstructionSetFeatures::FromHwcap());
+ EXPECT_TRUE(hwcap_features->Equals(instruction_set_features.get()))
+ << "Hwcap features: " << *hwcap_features.get()
+ << "\nFeatures from build: " << *instruction_set_features.get();
+}
+
+TEST(InstructionSetFeaturesTest, FeaturesFromAssembly) {
+ // Take the default set of instruction features from the build.
+ std::unique_ptr<const InstructionSetFeatures> instruction_set_features(
+ InstructionSetFeatures::FromCppDefines());
+
+ // Check we get the same instruction set features using assembly tests.
+ std::unique_ptr<const InstructionSetFeatures> assembly_features(
+ InstructionSetFeatures::FromAssembly());
+ EXPECT_TRUE(assembly_features->Equals(instruction_set_features.get()))
+ << "Assembly features: " << *assembly_features.get()
+ << "\nFeatures from build: " << *instruction_set_features.get();
+}
+
+} // namespace art
diff --git a/runtime/arch/instruction_set_test.cc b/runtime/arch/instruction_set_test.cc
new file mode 100644
index 0000000..932ef32
--- /dev/null
+++ b/runtime/arch/instruction_set_test.cc
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_set.h"
+
+#include <gtest/gtest.h>
+
+#include "base/stringprintf.h"
+
+namespace art {
+
+TEST(InstructionSetTest, GetInstructionSetFromString) {
+ EXPECT_EQ(kArm, GetInstructionSetFromString("arm"));
+ EXPECT_EQ(kArm64, GetInstructionSetFromString("arm64"));
+ EXPECT_EQ(kX86, GetInstructionSetFromString("x86"));
+ EXPECT_EQ(kX86_64, GetInstructionSetFromString("x86_64"));
+ EXPECT_EQ(kMips, GetInstructionSetFromString("mips"));
+ EXPECT_EQ(kNone, GetInstructionSetFromString("none"));
+ EXPECT_EQ(kNone, GetInstructionSetFromString("random-string"));
+}
+
+TEST(InstructionSetTest, GetInstructionSetString) {
+ EXPECT_STREQ("arm", GetInstructionSetString(kArm));
+ EXPECT_STREQ("arm", GetInstructionSetString(kThumb2));
+ EXPECT_STREQ("arm64", GetInstructionSetString(kArm64));
+ EXPECT_STREQ("x86", GetInstructionSetString(kX86));
+ EXPECT_STREQ("x86_64", GetInstructionSetString(kX86_64));
+ EXPECT_STREQ("mips", GetInstructionSetString(kMips));
+ EXPECT_STREQ("none", GetInstructionSetString(kNone));
+}
+
+TEST(InstructionSetTest, TestRoundTrip) {
+ EXPECT_EQ(kRuntimeISA, GetInstructionSetFromString(GetInstructionSetString(kRuntimeISA)));
+}
+
+TEST(InstructionSetTest, PointerSize) {
+ EXPECT_EQ(sizeof(void*), GetInstructionSetPointerSize(kRuntimeISA));
+}
+
+} // namespace art
diff --git a/runtime/arch/memcmp16.cc b/runtime/arch/memcmp16.cc
index 5a3e73e..813df2f 100644
--- a/runtime/arch/memcmp16.cc
+++ b/runtime/arch/memcmp16.cc
@@ -19,6 +19,7 @@
// This linked against by assembly stubs, only.
#pragma GCC diagnostic ignored "-Wunused-function"
+int32_t memcmp16_generic_static(const uint16_t* s0, const uint16_t* s1, size_t count);
int32_t memcmp16_generic_static(const uint16_t* s0, const uint16_t* s1, size_t count) {
for (size_t i = 0; i < count; i++) {
if (s0[i] != s1[i]) {
diff --git a/runtime/arch/mips/asm_support_mips.S b/runtime/arch/mips/asm_support_mips.S
index d8ec9cd..0d18f1a 100644
--- a/runtime/arch/mips/asm_support_mips.S
+++ b/runtime/arch/mips/asm_support_mips.S
@@ -26,15 +26,31 @@
// Register holding Thread::Current().
#define rSELF $s1
-
- /* Cache alignment for function entry */
+ // Declare a function called name, sets up $gp.
.macro ENTRY name
.type \name, %function
.global \name
+ // Cache alignment for function entry.
.balign 16
\name:
.cfi_startproc
- /* Ensure we get a sane starting CFA. */
+ // Ensure we get a sane starting CFA.
+ .cfi_def_cfa $sp,0
+ // Load $gp. We expect that ".set noreorder" is in effect.
+ .cpload $t9
+ // Declare a local convenience label to be branched to when $gp is already set up.
+.L\name\()_gp_set:
+.endm
+
+ // Declare a function called name, doesn't set up $gp.
+.macro ENTRY_NO_GP name
+ .type \name, %function
+ .global \name
+ // Cache alignment for function entry.
+ .balign 16
+\name:
+ .cfi_startproc
+ // Ensure we get a sane starting CFA.
.cfi_def_cfa $sp,0
.endm
@@ -43,11 +59,6 @@
.size \name, .-\name
.endm
- /* Generates $gp for function calls */
-.macro GENERATE_GLOBAL_POINTER
- .cpload $t9
-.endm
-
.macro UNIMPLEMENTED name
ENTRY \name
break
diff --git a/runtime/arch/mips/asm_support_mips.h b/runtime/arch/mips/asm_support_mips.h
index 6add93b..02c0982 100644
--- a/runtime/arch/mips/asm_support_mips.h
+++ b/runtime/arch/mips/asm_support_mips.h
@@ -19,18 +19,8 @@
#include "asm_support.h"
-// Offset of field Thread::tls32_.state_and_flags verified in InitCpu
-#define THREAD_FLAGS_OFFSET 0
-// Offset of field Thread::tlsPtr_.card_table verified in InitCpu
-#define THREAD_CARD_TABLE_OFFSET 120
-// Offset of field Thread::tlsPtr_.exception verified in InitCpu
-#define THREAD_EXCEPTION_OFFSET 124
-
-#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 64
-#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 64
+#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 48
+#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 48
#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 64
-// Expected size of a heap reference
-#define HEAP_REFERENCE_SIZE 4
-
#endif // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_
diff --git a/runtime/arch/mips/context_mips.cc b/runtime/arch/mips/context_mips.cc
index 789dbbb..e1f6c06 100644
--- a/runtime/arch/mips/context_mips.cc
+++ b/runtime/arch/mips/context_mips.cc
@@ -17,9 +17,8 @@
#include "context_mips.h"
#include "mirror/art_method-inl.h"
-#include "mirror/object-inl.h"
#include "quick/quick_method_frame_info.h"
-#include "stack.h"
+#include "utils.h"
namespace art {
namespace mips {
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index db0f71f..e86aa1c 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -19,6 +19,7 @@
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/portable/portable_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
+#include "entrypoints/quick/quick_default_externs.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/entrypoint_utils.h"
#include "entrypoints/math_entrypoints.h"
@@ -27,56 +28,9 @@
namespace art {
-// Portable entrypoints.
-extern "C" void art_portable_resolution_trampoline(mirror::ArtMethod*);
-extern "C" void art_portable_to_interpreter_bridge(mirror::ArtMethod*);
-
// Cast entrypoints.
extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass,
const mirror::Class* ref_class);
-extern "C" void art_quick_check_cast(void*, void*);
-
-// DexCache entrypoints.
-extern "C" void* art_quick_initialize_static_storage(uint32_t, void*);
-extern "C" void* art_quick_initialize_type(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*);
-extern "C" void* art_quick_resolve_string(void*, uint32_t);
-
-// Field entrypoints.
-extern "C" int art_quick_set8_instance(uint32_t, void*, int8_t);
-extern "C" int art_quick_set8_static(uint32_t, int8_t);
-extern "C" int art_quick_set16_instance(uint32_t, void*, int16_t);
-extern "C" int art_quick_set16_static(uint32_t, int16_t);
-extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t);
-extern "C" int art_quick_set32_static(uint32_t, int32_t);
-extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t);
-extern "C" int art_quick_set64_static(uint32_t, int64_t);
-extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*);
-extern "C" int art_quick_set_obj_static(uint32_t, void*);
-extern "C" uint8_t art_quick_get_boolean_instance(uint32_t, void*);
-extern "C" int8_t art_quick_get_byte_instance(uint32_t, void*);
-extern "C" uint8_t art_quick_get_boolean_static(uint32_t);
-extern "C" int8_t art_quick_get_byte_static(uint32_t);
-extern "C" uint16_t art_quick_get_char_instance(uint32_t, void*);
-extern "C" int16_t art_quick_get_short_instance(uint32_t, void*);
-extern "C" uint16_t art_quick_get_char_static(uint32_t);
-extern "C" int16_t art_quick_get_short_static(uint32_t);
-extern "C" int32_t art_quick_get32_instance(uint32_t, void*);
-extern "C" int32_t art_quick_get32_static(uint32_t);
-extern "C" int64_t art_quick_get64_instance(uint32_t, void*);
-extern "C" int64_t art_quick_get64_static(uint32_t);
-extern "C" void* art_quick_get_obj_instance(uint32_t, void*);
-extern "C" void* art_quick_get_obj_static(uint32_t);
-
-// Array entrypoints.
-extern "C" void art_quick_aput_obj_with_null_and_bound_check(void*, uint32_t, void*);
-extern "C" void art_quick_aput_obj_with_bound_check(void*, uint32_t, void*);
-extern "C" void art_quick_aput_obj(void*, uint32_t, void*);
-extern "C" void art_quick_handle_fill_data(void*, void*);
-
-// Lock entrypoints.
-extern "C" void art_quick_lock_object(void*);
-extern "C" void art_quick_unlock_object(void*);
// Math entrypoints.
extern int32_t CmpgDouble(double a, double b);
@@ -104,39 +58,6 @@
// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR]
extern "C" int64_t __divdi3(int64_t, int64_t);
extern "C" int64_t __moddi3(int64_t, int64_t);
-extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t);
-
-// Intrinsic entrypoints.
-extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
-extern "C" int32_t art_quick_string_compareto(void*, void*);
-
-// Invoke entrypoints.
-extern "C" void art_quick_imt_conflict_trampoline(mirror::ArtMethod*);
-extern "C" void art_quick_resolution_trampoline(mirror::ArtMethod*);
-extern "C" void art_quick_to_interpreter_bridge(mirror::ArtMethod*);
-extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
-
-// Thread entrypoints.
-extern "C" void art_quick_test_suspend();
-
-// Throw entrypoints.
-extern "C" void art_quick_deliver_exception(void*);
-extern "C" void art_quick_throw_array_bounds(int32_t index, int32_t limit);
-extern "C" void art_quick_throw_div_zero();
-extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
-extern "C" void art_quick_throw_null_pointer_exception();
-extern "C" void art_quick_throw_stack_overflow(void*);
-
-// Generic JNI downcall
-extern "C" void art_quick_generic_jni_trampoline(mirror::ArtMethod*);
-
-extern void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints);
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc
index aa6d68a..c9949d4 100644
--- a/runtime/arch/mips/fault_handler_mips.cc
+++ b/runtime/arch/mips/fault_handler_mips.cc
@@ -29,23 +29,29 @@
namespace art {
-void FaultManager::HandleNestedSignal(int sig, siginfo_t* info, void* context) {
+void FaultManager::HandleNestedSignal(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context ATTRIBUTE_UNUSED) {
}
-void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
- mirror::ArtMethod** out_method,
- uintptr_t* out_return_pc, uintptr_t* out_sp) {
+void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED,
+ void* context ATTRIBUTE_UNUSED,
+ mirror::ArtMethod** out_method ATTRIBUTE_UNUSED,
+ uintptr_t* out_return_pc ATTRIBUTE_UNUSED,
+ uintptr_t* out_sp ATTRIBUTE_UNUSED) {
}
-bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
+bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context ATTRIBUTE_UNUSED) {
return false;
}
-bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
+bool SuspensionHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context ATTRIBUTE_UNUSED) {
return false;
}
-bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
+bool StackOverflowHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context ATTRIBUTE_UNUSED) {
return false;
}
} // namespace art
diff --git a/runtime/arch/mips/instruction_set_features_mips.cc b/runtime/arch/mips/instruction_set_features_mips.cc
new file mode 100644
index 0000000..11be2a8
--- /dev/null
+++ b/runtime/arch/mips/instruction_set_features_mips.cc
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_set_features_mips.h"
+
+#include <fstream>
+#include <sstream>
+
+#include "base/stringprintf.h"
+#include "utils.h" // For Trim.
+
+namespace art {
+
+const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromVariant(
+ const std::string& variant ATTRIBUTE_UNUSED, std::string* error_msg ATTRIBUTE_UNUSED) {
+ if (variant != "default") {
+ std::ostringstream os;
+ LOG(WARNING) << "Unexpected CPU variant for Mips using defaults: " << variant;
+ }
+ bool smp = true; // Conservative default.
+ bool fpu_32bit = true;
+ bool mips_isa_gte2 = true;
+ return new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2);
+}
+
+const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromBitmap(uint32_t bitmap) {
+ bool smp = (bitmap & kSmpBitfield) != 0;
+ bool fpu_32bit = (bitmap & kFpu32Bitfield) != 0;
+ bool mips_isa_gte2 = (bitmap & kIsaRevGte2Bitfield) != 0;
+ return new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2);
+}
+
+const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromCppDefines() {
+ const bool smp = true;
+
+ // TODO: here we assume the FPU is always 32-bit.
+ const bool fpu_32bit = true;
+
+#if __mips_isa_rev >= 2
+ const bool mips_isa_gte2 = true;
+#else
+ const bool mips_isa_gte2 = false;
+#endif
+
+ return new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2);
+}
+
+const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromCpuInfo() {
+ // Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that
+ // the kernel puts the appropriate feature flags in here. Sometimes it doesn't.
+ bool smp = false;
+
+ // TODO: here we assume the FPU is always 32-bit.
+ const bool fpu_32bit = true;
+
+ // TODO: here we assume all MIPS processors are >= v2.
+#if __mips_isa_rev >= 2
+ const bool mips_isa_gte2 = true;
+#else
+ const bool mips_isa_gte2 = false;
+#endif
+
+ std::ifstream in("/proc/cpuinfo");
+ if (!in.fail()) {
+ while (!in.eof()) {
+ std::string line;
+ std::getline(in, line);
+ if (!in.eof()) {
+ LOG(INFO) << "cpuinfo line: " << line;
+ if (line.find("processor") != std::string::npos && line.find(": 1") != std::string::npos) {
+ smp = true;
+ }
+ }
+ }
+ in.close();
+ } else {
+ LOG(ERROR) << "Failed to open /proc/cpuinfo";
+ }
+ return new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2);
+}
+
+const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromHwcap() {
+ UNIMPLEMENTED(WARNING);
+ return FromCppDefines();
+}
+
+const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromAssembly() {
+ UNIMPLEMENTED(WARNING);
+ return FromCppDefines();
+}
+
+bool MipsInstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
+ if (kMips != other->GetInstructionSet()) {
+ return false;
+ }
+ const MipsInstructionSetFeatures* other_as_mips = other->AsMipsInstructionSetFeatures();
+ return (IsSmp() == other->IsSmp()) &&
+ (fpu_32bit_ == other_as_mips->fpu_32bit_) &&
+ (mips_isa_gte2_ == other_as_mips->mips_isa_gte2_);
+}
+
+uint32_t MipsInstructionSetFeatures::AsBitmap() const {
+ return (IsSmp() ? kSmpBitfield : 0) |
+ (fpu_32bit_ ? kFpu32Bitfield : 0) |
+ (mips_isa_gte2_ ? kIsaRevGte2Bitfield : 0);
+}
+
+std::string MipsInstructionSetFeatures::GetFeatureString() const {
+ std::string result;
+ if (IsSmp()) {
+ result += "smp";
+ } else {
+ result += "-smp";
+ }
+ if (fpu_32bit_) {
+ result += ",fpu32";
+ } else {
+ result += ",-fpu32";
+ }
+ if (mips_isa_gte2_) {
+ result += ",mips2";
+ } else {
+ result += ",-mips2";
+ }
+ return result;
+}
+
+const InstructionSetFeatures* MipsInstructionSetFeatures::AddFeaturesFromSplitString(
+ const bool smp, const std::vector<std::string>& features, std::string* error_msg) const {
+ bool fpu_32bit = fpu_32bit_;
+ bool mips_isa_gte2 = mips_isa_gte2_;
+ for (auto i = features.begin(); i != features.end(); i++) {
+ std::string feature = Trim(*i);
+ if (feature == "fpu32") {
+ fpu_32bit = true;
+ } else if (feature == "-fpu32") {
+ fpu_32bit = false;
+ } else if (feature == "mips2") {
+ mips_isa_gte2 = true;
+ } else if (feature == "-mips2") {
+ mips_isa_gte2 = false;
+ } else {
+ *error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str());
+ return nullptr;
+ }
+ }
+ return new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2);
+}
+
+} // namespace art
diff --git a/runtime/arch/mips/instruction_set_features_mips.h b/runtime/arch/mips/instruction_set_features_mips.h
new file mode 100644
index 0000000..f7c64fe
--- /dev/null
+++ b/runtime/arch/mips/instruction_set_features_mips.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_MIPS_INSTRUCTION_SET_FEATURES_MIPS_H_
+#define ART_RUNTIME_ARCH_MIPS_INSTRUCTION_SET_FEATURES_MIPS_H_
+
+#include "arch/instruction_set_features.h"
+
+namespace art {
+
+// Instruction set features relevant to the MIPS architecture.
+class MipsInstructionSetFeatures FINAL : public InstructionSetFeatures {
+ public:
+ // Process a CPU variant string like "r4000" and create InstructionSetFeatures.
+ static const MipsInstructionSetFeatures* FromVariant(const std::string& variant,
+ std::string* error_msg);
+
+ // Parse a bitmap and create an InstructionSetFeatures.
+ static const MipsInstructionSetFeatures* FromBitmap(uint32_t bitmap);
+
+ // Turn C pre-processor #defines into the equivalent instruction set features.
+ static const MipsInstructionSetFeatures* FromCppDefines();
+
+ // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
+ static const MipsInstructionSetFeatures* FromCpuInfo();
+
+ // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
+ // InstructionSetFeatures.
+ static const MipsInstructionSetFeatures* FromHwcap();
+
+ // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
+ // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
+ static const MipsInstructionSetFeatures* FromAssembly();
+
+ bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+
+ InstructionSet GetInstructionSet() const OVERRIDE {
+ return kMips;
+ }
+
+ uint32_t AsBitmap() const OVERRIDE;
+
+ std::string GetFeatureString() const OVERRIDE;
+
+ // Is this an ISA revision greater than 2 opening up new opcodes.
+ bool IsMipsIsaRevGreaterThanEqual2() const {
+ return mips_isa_gte2_;
+ }
+
+ // Floating point double registers are encoded differently based on whether the Status.FR bit is
+ // set. When the FR bit is 0 then the FPU is 32-bit, 1 its 64-bit. Return true if the code should
+ // be generated assuming Status.FR is 0.
+ bool Is32BitFloatingPoint() const {
+ return fpu_32bit_;
+ }
+
+ virtual ~MipsInstructionSetFeatures() {}
+
+ protected:
+ // Parse a vector of the form "fpu32", "mips2" adding these to a new MipsInstructionSetFeatures.
+ virtual const InstructionSetFeatures*
+ AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
+ std::string* error_msg) const OVERRIDE;
+
+ private:
+ MipsInstructionSetFeatures(bool smp, bool fpu_32bit, bool mips_isa_gte2)
+ : InstructionSetFeatures(smp), fpu_32bit_(fpu_32bit), mips_isa_gte2_(mips_isa_gte2) {
+ }
+
+ // Bitmap positions for encoding features as a bitmap.
+ enum {
+ kSmpBitfield = 1,
+ kFpu32Bitfield = 2,
+ kIsaRevGte2Bitfield = 4,
+ };
+
+ const bool fpu_32bit_;
+ const bool mips_isa_gte2_;
+
+ DISALLOW_COPY_AND_ASSIGN(MipsInstructionSetFeatures);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_MIPS_INSTRUCTION_SET_FEATURES_MIPS_H_
diff --git a/runtime/arch/mips/instruction_set_features_mips_test.cc b/runtime/arch/mips/instruction_set_features_mips_test.cc
new file mode 100644
index 0000000..9b81ce2
--- /dev/null
+++ b/runtime/arch/mips/instruction_set_features_mips_test.cc
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_set_features_mips.h"
+
+#include <gtest/gtest.h>
+
+namespace art {
+
+TEST(MipsInstructionSetFeaturesTest, MipsFeatures) {
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> mips_features(
+ InstructionSetFeatures::FromVariant(kMips, "default", &error_msg));
+ ASSERT_TRUE(mips_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(mips_features->GetInstructionSet(), kMips);
+ EXPECT_TRUE(mips_features->Equals(mips_features.get()));
+ EXPECT_STREQ("smp,fpu32,mips2", mips_features->GetFeatureString().c_str());
+ EXPECT_EQ(mips_features->AsBitmap(), 7U);
+}
+
+} // namespace art
diff --git a/runtime/arch/mips/jni_entrypoints_mips.S b/runtime/arch/mips/jni_entrypoints_mips.S
index e5f4a79..9a79467 100644
--- a/runtime/arch/mips/jni_entrypoints_mips.S
+++ b/runtime/arch/mips/jni_entrypoints_mips.S
@@ -24,7 +24,6 @@
*/
.extern artFindNativeMethod
ENTRY art_jni_dlsym_lookup_stub
- GENERATE_GLOBAL_POINTER
addiu $sp, $sp, -32 # leave room for $a0, $a1, $a2, $a3, and $ra
.cfi_adjust_cfa_offset 32
sw $ra, 16($sp)
diff --git a/runtime/arch/mips/memcmp16_mips.S b/runtime/arch/mips/memcmp16_mips.S
index 0196edc..aef81af 100644
--- a/runtime/arch/mips/memcmp16_mips.S
+++ b/runtime/arch/mips/memcmp16_mips.S
@@ -20,7 +20,7 @@
#include "asm_support_mips.S"
// u4 __memcmp16(const u2*, const u2*, size_t);
-ENTRY __memcmp16
+ENTRY_NO_GP __memcmp16
li $t0,0
li $t1,0
beqz $a2,done /* 0 length string */
diff --git a/runtime/arch/mips/portable_entrypoints_mips.S b/runtime/arch/mips/portable_entrypoints_mips.S
index a171a1d..8d418e8 100644
--- a/runtime/arch/mips/portable_entrypoints_mips.S
+++ b/runtime/arch/mips/portable_entrypoints_mips.S
@@ -21,7 +21,6 @@
.extern artPortableProxyInvokeHandler
ENTRY art_portable_proxy_invoke_handler
- GENERATE_GLOBAL_POINTER
# Fake callee save ref and args frame set up, note portable doesn't use callee save frames.
# TODO: just save the registers that are needed in artPortableProxyInvokeHandler.
addiu $sp, $sp, -64
@@ -72,7 +71,6 @@
* [sp + 20] = result type char
*/
ENTRY art_portable_invoke_stub
- GENERATE_GLOBAL_POINTER
sw $a0, 0($sp) # save out a0
addiu $sp, $sp, -16 # spill s0, s1, fp, ra
.cfi_adjust_cfa_offset 16
@@ -87,7 +85,7 @@
move $fp, $sp # save sp in fp
.cfi_def_cfa_register 30
move $s1, $a3 # move managed thread pointer into s1
- addiu $s0, $zero, SUSPEND_CHECK_INTERVAL # reset s0 to suspend check interval
+ addiu $s0, $zero, SUSPEND_CHECK_INTERVAL # reset s0 to suspend check interval. TODO: unused?
addiu $t0, $a2, 16 # create space for method pointer in frame
srl $t0, $t0, 3 # shift the frame size right 3
sll $t0, $t0, 3 # shift the frame size left 3 to align to 16 bytes
@@ -100,7 +98,7 @@
lw $a1, 4($sp) # copy arg value for a1
lw $a2, 8($sp) # copy arg value for a2
lw $a3, 12($sp) # copy arg value for a3
- lw $t9, METHOD_PORTABLE_CODE_OFFSET($a0) # get pointer to the code
+ lw $t9, MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_32($a0) # get pointer to the code
jalr $t9 # call the method
sw $zero, 0($sp) # store NULL for method* at bottom of frame
move $sp, $fp # restore the stack
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 609c65a..44feee6 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -26,134 +26,144 @@
/* Deliver an exception pending on a thread */
.extern artDeliverPendingExceptionFromCode
+#define ARG_SLOT_SIZE 32 // space for a0-a3 plus 4 more words
+
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kSaveAll)
- * callee-save: $s0-$s8 + $gp + $ra, 11 total + 1 word padding + 4 open words for args
+ * Callee-save: $s0-$s8 + $gp + $ra, 11 total + 1 word for Method*
+ * Clobbers $t0 and $sp
+ * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
+ * Reserves FRAME_SIZE_SAVE_ALL_CALLEE_SAVE + ARG_SLOT_SIZE bytes on the stack
*/
.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
- addiu $sp, $sp, -64
- .cfi_adjust_cfa_offset 64
+ addiu $sp, $sp, -48
+ .cfi_adjust_cfa_offset 48
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 64)
+#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 48)
#error "SAVE_ALL_CALLEE_SAVE_FRAME(MIPS) size not as expected."
#endif
- sw $ra, 60($sp)
- .cfi_rel_offset 31, 60
- sw $s8, 56($sp)
- .cfi_rel_offset 30, 56
- sw $gp, 52($sp)
- .cfi_rel_offset 28, 52
- sw $s7, 48($sp)
- .cfi_rel_offset 23, 48
- sw $s6, 44($sp)
- .cfi_rel_offset 22, 44
- sw $s5, 40($sp)
- .cfi_rel_offset 21, 40
- sw $s4, 36($sp)
- .cfi_rel_offset 20, 36
- sw $s3, 32($sp)
- .cfi_rel_offset 19, 32
- sw $s2, 28($sp)
- .cfi_rel_offset 18, 28
- sw $s1, 24($sp)
- .cfi_rel_offset 17, 24
- sw $s0, 20($sp)
- .cfi_rel_offset 16, 20
- # 1 word for alignment, 4 open words for args $a0-$a3, bottom will hold Method*
+ sw $ra, 44($sp)
+ .cfi_rel_offset 31, 44
+ sw $s8, 40($sp)
+ .cfi_rel_offset 30, 40
+ sw $gp, 36($sp)
+ .cfi_rel_offset 28, 36
+ sw $s7, 32($sp)
+ .cfi_rel_offset 23, 32
+ sw $s6, 28($sp)
+ .cfi_rel_offset 22, 28
+ sw $s5, 24($sp)
+ .cfi_rel_offset 21, 24
+ sw $s4, 20($sp)
+ .cfi_rel_offset 20, 20
+ sw $s3, 16($sp)
+ .cfi_rel_offset 19, 16
+ sw $s2, 12($sp)
+ .cfi_rel_offset 18, 12
+ sw $s1, 8($sp)
+ .cfi_rel_offset 17, 8
+ sw $s0, 4($sp)
+ .cfi_rel_offset 16, 4
+ # 1 word for holding Method*
+
+ lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
+ lw $t0, 0($t0)
+ THIS_LOAD_REQUIRES_READ_BARRIER
+ lw $t0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET($t0)
+ sw $t0, 0($sp) # Place Method* at bottom of stack.
+ sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
+ addiu $sp, $sp, -ARG_SLOT_SIZE # reserve argument slots on the stack
+ .cfi_adjust_cfa_offset ARG_SLOT_SIZE
.endm
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsOnly). Restoration assumes non-moving GC.
* Does not include rSUSPEND or rSELF
- * callee-save: $s2-$s8 + $gp + $ra, 9 total + 3 words padding + 4 open words for args
+ * callee-save: $s2-$s8 + $gp + $ra, 9 total + 2 words padding + 1 word to hold Method*
+ * Clobbers $t0 and $sp
+ * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
+ * Reserves FRAME_SIZE_REFS_ONLY_CALLEE_SAVE + ARG_SLOT_SIZE bytes on the stack
*/
-.macro SETUP_REF_ONLY_CALLEE_SAVE_FRAME
- addiu $sp, $sp, -64
- .cfi_adjust_cfa_offset 64
+.macro SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+ addiu $sp, $sp, -48
+ .cfi_adjust_cfa_offset 48
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 64)
+#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 48)
#error "REFS_ONLY_CALLEE_SAVE_FRAME(MIPS) size not as expected."
#endif
- sw $ra, 60($sp)
- .cfi_rel_offset 31, 60
- sw $s8, 56($sp)
- .cfi_rel_offset 30, 56
- sw $gp, 52($sp)
- .cfi_rel_offset 28, 52
- sw $s7, 48($sp)
- .cfi_rel_offset 23, 48
- sw $s6, 44($sp)
- .cfi_rel_offset 22, 44
- sw $s5, 40($sp)
- .cfi_rel_offset 21, 40
- sw $s4, 36($sp)
- .cfi_rel_offset 20, 36
- sw $s3, 32($sp)
- .cfi_rel_offset 19, 32
- sw $s2, 28($sp)
- .cfi_rel_offset 18, 28
- # 3 words for alignment and extra args, 4 open words for args $a0-$a3, bottom will hold Method*
+ sw $ra, 44($sp)
+ .cfi_rel_offset 31, 44
+ sw $s8, 40($sp)
+ .cfi_rel_offset 30, 40
+ sw $gp, 36($sp)
+ .cfi_rel_offset 28, 36
+ sw $s7, 32($sp)
+ .cfi_rel_offset 23, 32
+ sw $s6, 28($sp)
+ .cfi_rel_offset 22, 28
+ sw $s5, 24($sp)
+ .cfi_rel_offset 21, 24
+ sw $s4, 20($sp)
+ .cfi_rel_offset 20, 20
+ sw $s3, 16($sp)
+ .cfi_rel_offset 19, 16
+ sw $s2, 12($sp)
+ .cfi_rel_offset 18, 12
+ # 2 words for alignment and bottom word will hold Method*
+
+ lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
+ lw $t0, 0($t0)
+ THIS_LOAD_REQUIRES_READ_BARRIER
+ lw $t0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET($t0)
+ sw $t0, 0($sp) # Place Method* at bottom of stack.
+ sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
+ addiu $sp, $sp, -ARG_SLOT_SIZE # reserve argument slots on the stack
+ .cfi_adjust_cfa_offset ARG_SLOT_SIZE
.endm
-.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
- lw $ra, 60($sp)
+.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ addiu $sp, $sp, ARG_SLOT_SIZE # remove argument slots on the stack
+ .cfi_adjust_cfa_offset -ARG_SLOT_SIZE
+ lw $ra, 44($sp)
.cfi_restore 31
- lw $s8, 56($sp)
+ lw $s8, 40($sp)
.cfi_restore 30
- lw $gp, 52($sp)
+ lw $gp, 36($sp)
.cfi_restore 28
- lw $s7, 48($sp)
+ lw $s7, 32($sp)
.cfi_restore 23
- lw $s6, 44($sp)
+ lw $s6, 28($sp)
.cfi_restore 22
- lw $s5, 40($sp)
+ lw $s5, 24($sp)
.cfi_restore 21
- lw $s4, 36($sp)
+ lw $s4, 20($sp)
.cfi_restore 20
- lw $s3, 32($sp)
+ lw $s3, 16($sp)
.cfi_restore 19
- lw $s2, 28($sp)
+ lw $s2, 12($sp)
.cfi_restore 18
- addiu $sp, $sp, 64
- .cfi_adjust_cfa_offset -64
+ addiu $sp, $sp, 48
+ .cfi_adjust_cfa_offset -48
.endm
-.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
- lw $ra, 60($sp)
- .cfi_restore 31
- lw $s8, 56($sp)
- .cfi_restore 30
- lw $gp, 52($sp)
- .cfi_restore 28
- lw $s7, 48($sp)
- .cfi_restore 23
- lw $s6, 44($sp)
- .cfi_restore 22
- lw $s5, 40($sp)
- .cfi_restore 21
- lw $s4, 36($sp)
- .cfi_restore 20
- lw $s3, 32($sp)
- .cfi_restore 19
- lw $s2, 28($sp)
- .cfi_restore 18
+.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
jr $ra
- addiu $sp, $sp, 64
- .cfi_adjust_cfa_offset -64
+ nop
.endm
/*
* Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kRefsAndArgs). Restoration assumes non-moving GC.
+ * Runtime::CreateCalleeSaveMethod(kRefsAndArgs).
* callee-save: $a1-$a3, $s2-$s8 + $gp + $ra, 12 total + 3 words padding + method*
*/
-.macro SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY
addiu $sp, $sp, -64
.cfi_adjust_cfa_offset 64
@@ -189,7 +199,46 @@
# bottom will hold Method*
.endm
-.macro RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kRefsAndArgs). Restoration assumes non-moving GC.
+ * callee-save: $a1-$a3, $s2-$s8 + $gp + $ra, 12 total + 3 words padding + method*
+ * Clobbers $t0 and $sp
+ * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
+ * Reserves FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE + ARG_SLOT_SIZE bytes on the stack
+ */
+.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY
+ lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
+ lw $t0, 0($t0)
+ THIS_LOAD_REQUIRES_READ_BARRIER
+ lw $t0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET($t0)
+ sw $t0, 0($sp) # Place Method* at bottom of stack.
+ sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
+ addiu $sp, $sp, -ARG_SLOT_SIZE # reserve argument slots on the stack
+ .cfi_adjust_cfa_offset ARG_SLOT_SIZE
+.endm
+
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kRefsAndArgs). Restoration assumes non-moving GC.
+ * callee-save: $a1-$a3, $s2-$s8 + $gp + $ra, 12 total + 3 words padding + method*
+ * Clobbers $sp
+ * Use $a0 as the Method* and loads it into bottom of stack.
+ * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
+ * Reserves FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE + ARG_SLOT_SIZE bytes on the stack
+ */
+.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_A0
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY
+ sw $a0, 0($sp) # Place Method* at bottom of stack.
+ sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
+ addiu $sp, $sp, -ARG_SLOT_SIZE # reserve argument slots on the stack
+ .cfi_adjust_cfa_offset ARG_SLOT_SIZE
+.endm
+
+.macro RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ addiu $sp, $sp, ARG_SLOT_SIZE # remove argument slots on the stack
+ .cfi_adjust_cfa_offset -ARG_SLOT_SIZE
lw $ra, 60($sp)
.cfi_restore 31
lw $s8, 56($sp)
@@ -224,15 +273,14 @@
*/
.macro DELIVER_PENDING_EXCEPTION
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME # save callee saves for throw
- move $a0, rSELF # pass Thread::Current
la $t9, artDeliverPendingExceptionFromCode
- jr $t9 # artDeliverPendingExceptionFromCode(Thread*, $sp)
- move $a1, $sp # pass $sp
+ jr $t9 # artDeliverPendingExceptionFromCode(Thread*)
+ move $a0, rSELF # pass Thread::Current
.endm
.macro RETURN_IF_NO_EXCEPTION
lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
bnez $t0, 1f # success if no exception is pending
nop
jr $ra
@@ -242,7 +290,7 @@
.endm
.macro RETURN_IF_ZERO
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
bnez $v0, 1f # success?
nop
jr $ra # return on success
@@ -252,7 +300,7 @@
.endm
.macro RETURN_IF_RESULT_IS_NON_ZERO
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
beqz $v0, 1f # success?
nop
jr $ra # return on success
@@ -342,12 +390,10 @@
* the bottom of the thread. On entry r0 holds Throwable*
*/
ENTRY art_quick_deliver_exception
- GENERATE_GLOBAL_POINTER
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
- move $a1, rSELF # pass Thread::Current
la $t9, artDeliverExceptionFromCode
- jr $t9 # artDeliverExceptionFromCode(Throwable*, Thread*, $sp)
- move $a2, $sp # pass $sp
+ jr $t9 # artDeliverExceptionFromCode(Throwable*, Thread*)
+ move $a1, rSELF # pass Thread::Current
END art_quick_deliver_exception
/*
@@ -355,13 +401,10 @@
*/
.extern artThrowNullPointerExceptionFromCode
ENTRY art_quick_throw_null_pointer_exception
- GENERATE_GLOBAL_POINTER
-.Lart_quick_throw_null_pointer_exception_gp_set:
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
- move $a0, rSELF # pass Thread::Current
la $t9, artThrowNullPointerExceptionFromCode
- jr $t9 # artThrowNullPointerExceptionFromCode(Thread*, $sp)
- move $a1, $sp # pass $sp
+ jr $t9 # artThrowNullPointerExceptionFromCode(Thread*)
+ move $a0, rSELF # pass Thread::Current
END art_quick_throw_null_pointer_exception
/*
@@ -369,12 +412,10 @@
*/
.extern artThrowDivZeroFromCode
ENTRY art_quick_throw_div_zero
- GENERATE_GLOBAL_POINTER
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
- move $a0, rSELF # pass Thread::Current
la $t9, artThrowDivZeroFromCode
- jr $t9 # artThrowDivZeroFromCode(Thread*, $sp)
- move $a1, $sp # pass $sp
+ jr $t9 # artThrowDivZeroFromCode(Thread*)
+ move $a0, rSELF # pass Thread::Current
END art_quick_throw_div_zero
/*
@@ -382,13 +423,10 @@
*/
.extern artThrowArrayBoundsFromCode
ENTRY art_quick_throw_array_bounds
- GENERATE_GLOBAL_POINTER
-.Lart_quick_throw_array_bounds_gp_set:
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
- move $a2, rSELF # pass Thread::Current
la $t9, artThrowArrayBoundsFromCode
- jr $t9 # artThrowArrayBoundsFromCode(index, limit, Thread*, $sp)
- move $a3, $sp # pass $sp
+ jr $t9 # artThrowArrayBoundsFromCode(index, limit, Thread*)
+ move $a2, rSELF # pass Thread::Current
END art_quick_throw_array_bounds
/*
@@ -396,12 +434,10 @@
*/
.extern artThrowStackOverflowFromCode
ENTRY art_quick_throw_stack_overflow
- GENERATE_GLOBAL_POINTER
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
- move $a0, rSELF # pass Thread::Current
la $t9, artThrowStackOverflowFromCode
- jr $t9 # artThrowStackOverflowFromCode(Thread*, $sp)
- move $a1, $sp # pass $sp
+ jr $t9 # artThrowStackOverflowFromCode(Thread*)
+ move $a0, rSELF # pass Thread::Current
END art_quick_throw_stack_overflow
/*
@@ -409,12 +445,10 @@
*/
.extern artThrowNoSuchMethodFromCode
ENTRY art_quick_throw_no_such_method
- GENERATE_GLOBAL_POINTER
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
- move $a1, rSELF # pass Thread::Current
la $t9, artThrowNoSuchMethodFromCode
- jr $t9 # artThrowNoSuchMethodFromCode(method_idx, Thread*, $sp)
- move $a2, $sp # pass $sp
+ jr $t9 # artThrowNoSuchMethodFromCode(method_idx, Thread*)
+ move $a1, rSELF # pass Thread::Current
END art_quick_throw_no_such_method
/*
@@ -436,23 +470,16 @@
.macro INVOKE_TRAMPOLINE c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
- GENERATE_GLOBAL_POINTER
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME # save callee saves in case allocation triggers GC
- lw $a2, 64($sp) # pass caller Method*
- move $t0, $sp # save $sp
- addiu $sp, $sp, -32 # make space for extra args
- .cfi_adjust_cfa_offset 32
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME # save callee saves in case allocation triggers GC
+ lw $a2, FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE+ARG_SLOT_SIZE($sp) # pass caller Method*
+ addiu $t0, $sp, ARG_SLOT_SIZE # save $sp (remove arg slots)
move $a3, rSELF # pass Thread::Current
- .cfi_rel_offset 28, 12
jal \cxx_name # (method_idx, this, caller, Thread*, $sp)
sw $t0, 16($sp) # pass $sp
- addiu $sp, $sp, 32 # release out args
- .cfi_adjust_cfa_offset -32
move $a0, $v0 # save target Method*
- move $t9, $v1 # save $v0->code_
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
beqz $v0, 1f
- nop
+ move $t9, $v1 # save $v0->code_
jr $t9
nop
1:
@@ -479,7 +506,6 @@
* [sp + 20] = shorty
*/
ENTRY art_quick_invoke_stub
- GENERATE_GLOBAL_POINTER
sw $a0, 0($sp) # save out a0
addiu $sp, $sp, -16 # spill s0, s1, fp, ra
.cfi_adjust_cfa_offset 16
@@ -495,10 +521,10 @@
.cfi_def_cfa_register 30
move $s1, $a3 # move managed thread pointer into s1
addiu $s0, $zero, SUSPEND_CHECK_INTERVAL # reset s0 to suspend check interval
- addiu $t0, $a2, 16 # create space for method pointer in frame
- srl $t0, $t0, 4 # shift the frame size right 4
- sll $t0, $t0, 4 # shift the frame size left 4 to align to 16 bytes
- subu $sp, $sp, $t0 # reserve stack space for argument array
+ addiu $t0, $a2, 4 # create space for method pointer in frame.
+ subu $t0, $sp, $t0 # reserve & align *stack* to 16 bytes:
+ srl $t0, $t0, 4 # native calling convention only aligns to 8B,
+ sll $sp, $t0, 4 # so we have to ensure ART 16B alignment ourselves.
addiu $a0, $sp, 4 # pass stack pointer + method ptr as dest for memcpy
jal memcpy # (dest, src, bytes)
addiu $sp, $sp, -16 # make space for argument slots for memcpy
@@ -507,7 +533,7 @@
lw $a1, 4($sp) # copy arg value for a1
lw $a2, 8($sp) # copy arg value for a2
lw $a3, 12($sp) # copy arg value for a3
- lw $t9, METHOD_QUICK_CODE_OFFSET($a0) # get pointer to the code
+ lw $t9, MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32($a0) # get pointer to the code
jalr $t9 # call the method
sw $zero, 0($sp) # store NULL for method* at bottom of frame
move $sp, $fp # restore the stack
@@ -543,12 +569,10 @@
*/
.extern artHandleFillArrayDataFromCode
ENTRY art_quick_handle_fill_data
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
- lw $a2, 64($sp) # pass referrer's Method*
+ lw $a2, 0($sp) # pass referrer's Method*
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
+ jal artHandleFillArrayDataFromCode # (payload offset, Array*, method, Thread*)
move $a3, rSELF # pass Thread::Current
- jal artHandleFillArrayDataFromCode # (payload offset, Array*, method, Thread*, $sp)
- sw $sp, 16($sp) # pass $sp
RETURN_IF_ZERO
END art_quick_handle_fill_data
@@ -557,13 +581,11 @@
*/
.extern artLockObjectFromCode
ENTRY art_quick_lock_object
- GENERATE_GLOBAL_POINTER
beqz $a0, .Lart_quick_throw_null_pointer_exception_gp_set
nop
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case we block
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case we block
+ jal artLockObjectFromCode # (Object* obj, Thread*)
move $a1, rSELF # pass Thread::Current
- jal artLockObjectFromCode # (Object* obj, Thread*, $sp)
- move $a2, $sp # pass $sp
RETURN_IF_ZERO
END art_quick_lock_object
@@ -572,13 +594,11 @@
*/
.extern artUnlockObjectFromCode
ENTRY art_quick_unlock_object
- GENERATE_GLOBAL_POINTER
beqz $a0, .Lart_quick_throw_null_pointer_exception_gp_set
nop
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
+ jal artUnlockObjectFromCode # (Object* obj, Thread*)
move $a1, rSELF # pass Thread::Current
- jal artUnlockObjectFromCode # (Object* obj, Thread*, $sp)
- move $a2, $sp # pass $sp
RETURN_IF_ZERO
END art_quick_unlock_object
@@ -587,7 +607,6 @@
*/
.extern artThrowClassCastException
ENTRY art_quick_check_cast
- GENERATE_GLOBAL_POINTER
addiu $sp, $sp, -16
.cfi_adjust_cfa_offset 16
sw $ra, 12($sp)
@@ -596,7 +615,8 @@
sw $a1, 4($sp)
sw $a0, 0($sp)
jal artIsAssignableFromCode
- nop
+ addiu $sp, $sp, -16 # reserve argument slots on the stack
+ addiu $sp, $sp, 16
beqz $v0, .Lthrow_class_cast_exception
lw $ra, 12($sp)
jr $ra
@@ -609,10 +629,9 @@
addiu $sp, $sp, 16
.cfi_adjust_cfa_offset -16
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
- move $a2, rSELF # pass Thread::Current
la $t9, artThrowClassCastException
- jr $t9 # artThrowClassCastException (Class*, Class*, Thread*, SP)
- move $a3, $sp # pass $sp
+ jr $t9 # artThrowClassCastException (Class*, Class*, Thread*)
+ move $a2, rSELF # pass Thread::Current
END art_quick_check_cast
/*
@@ -621,7 +640,6 @@
* a0 = array, a1 = index, a2 = value
*/
ENTRY art_quick_aput_obj_with_null_and_bound_check
- GENERATE_GLOBAL_POINTER
bnez $a0, .Lart_quick_aput_obj_with_bound_check_gp_set
nop
b .Lart_quick_throw_null_pointer_exception_gp_set
@@ -629,9 +647,7 @@
END art_quick_aput_obj_with_null_and_bound_check
ENTRY art_quick_aput_obj_with_bound_check
- GENERATE_GLOBAL_POINTER
-.Lart_quick_aput_obj_with_bound_check_gp_set:
- lw $t0, ARRAY_LENGTH_OFFSET($a0)
+ lw $t0, MIRROR_ARRAY_LENGTH_OFFSET($a0)
sltu $t1, $a1, $t0
bnez $t1, .Lart_quick_aput_obj_gp_set
nop
@@ -641,19 +657,17 @@
END art_quick_aput_obj_with_bound_check
ENTRY art_quick_aput_obj
- GENERATE_GLOBAL_POINTER
-.Lart_quick_aput_obj_gp_set:
beqz $a2, .Ldo_aput_null
nop
- lw $t0, CLASS_OFFSET($a0)
- lw $t1, CLASS_OFFSET($a2)
- lw $t0, CLASS_COMPONENT_TYPE_OFFSET($t0)
+ lw $t0, MIRROR_OBJECT_CLASS_OFFSET($a0)
+ lw $t1, MIRROR_OBJECT_CLASS_OFFSET($a2)
+ lw $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET($t0)
bne $t1, $t0, .Lcheck_assignability # value's type == array's component type - trivial assignability
nop
.Ldo_aput:
sll $a1, $a1, 2
add $t0, $a0, $a1
- sw $a2, OBJECT_ARRAY_DATA_OFFSET($t0)
+ sw $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0)
lw $t0, THREAD_CARD_TABLE_OFFSET(rSELF)
srl $t1, $a0, 7
add $t1, $t1, $t0
@@ -663,7 +677,7 @@
.Ldo_aput_null:
sll $a1, $a1, 2
add $t0, $a0, $a1
- sw $a2, OBJECT_ARRAY_DATA_OFFSET($t0)
+ sw $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0)
jr $ra
nop
.Lcheck_assignability:
@@ -678,7 +692,8 @@
move $a1, $t1
move $a0, $t0
jal artIsAssignableFromCode # (Class*, Class*)
- nop
+ addiu $sp, $sp, -16 # reserve argument slots on the stack
+ addiu $sp, $sp, 16
lw $ra, 28($sp)
lw $t9, 12($sp)
lw $a2, 8($sp)
@@ -690,10 +705,9 @@
nop
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
move $a1, $a2
- move $a2, rSELF # pass Thread::Current
la $t9, artThrowArrayStoreException
- jr $t9 # artThrowArrayStoreException(Class*, Class*, Thread*, SP)
- move $a3, $sp # pass $sp
+ jr $t9 # artThrowArrayStoreException(Class*, Class*, Thread*)
+ move $a2, rSELF # pass Thread::Current
END art_quick_aput_obj
/*
@@ -703,12 +717,10 @@
*/
.extern artInitializeStaticStorageFromCode
ENTRY art_quick_initialize_static_storage
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- move $a2, rSELF # pass Thread::Current
- # artInitializeStaticStorageFromCode(uint32_t type_idx, Method* referrer, Thread*, $sp)
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ # artInitializeStaticStorageFromCode(uint32_t type_idx, Method* referrer, Thread*)
jal artInitializeStaticStorageFromCode
- move $a3, $sp # pass $sp
+ move $a2, rSELF # pass Thread::Current
RETURN_IF_RESULT_IS_NON_ZERO
END art_quick_initialize_static_storage
@@ -717,12 +729,10 @@
*/
.extern artInitializeTypeFromCode
ENTRY art_quick_initialize_type
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- move $a2, rSELF # pass Thread::Current
- # artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*, $sp)
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ # artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*)
jal artInitializeTypeFromCode
- move $a3, $sp # pass $sp
+ move $a2, rSELF # pass Thread::Current
RETURN_IF_RESULT_IS_NON_ZERO
END art_quick_initialize_type
@@ -732,12 +742,10 @@
*/
.extern artInitializeTypeAndVerifyAccessFromCode
ENTRY art_quick_initialize_type_and_verify_access
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- move $a2, rSELF # pass Thread::Current
- # artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*, $sp)
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ # artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*)
jal artInitializeTypeAndVerifyAccessFromCode
- move $a3, $sp # pass $sp
+ move $a2, rSELF # pass Thread::Current
RETURN_IF_RESULT_IS_NON_ZERO
END art_quick_initialize_type_and_verify_access
/*
@@ -745,12 +753,10 @@
*/
.extern artGetBooleanStaticFromCode
ENTRY art_quick_get_boolean_static
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lw $a1, 64($sp) # pass referrer's Method*
+ lw $a1, 0($sp) # pass referrer's Method*
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal artGetBooleanStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
- jal artGetBooleanStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*, $sp)
- move $a3, $sp # pass $sp
RETURN_IF_NO_EXCEPTION
END art_quick_get_boolean_static
/*
@@ -758,12 +764,10 @@
*/
.extern artGetByteStaticFromCode
ENTRY art_quick_get_byte_static
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lw $a1, 64($sp) # pass referrer's Method*
+ lw $a1, 0($sp) # pass referrer's Method*
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal artGetByteStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
- jal artGetByteStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*, $sp)
- move $a3, $sp # pass $sp
RETURN_IF_NO_EXCEPTION
END art_quick_get_byte_static
@@ -772,12 +776,10 @@
*/
.extern artGetCharStaticFromCode
ENTRY art_quick_get_char_static
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lw $a1, 64($sp) # pass referrer's Method*
+ lw $a1, 0($sp) # pass referrer's Method*
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal artGetCharStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
- jal artGetCharStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*, $sp)
- move $a3, $sp # pass $sp
RETURN_IF_NO_EXCEPTION
END art_quick_get_char_static
/*
@@ -785,12 +787,10 @@
*/
.extern artGetShortStaticFromCode
ENTRY art_quick_get_short_static
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lw $a1, 64($sp) # pass referrer's Method*
+ lw $a1, 0($sp) # pass referrer's Method*
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal artGetShortStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
- jal artGetShortStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*, $sp)
- move $a3, $sp # pass $sp
RETURN_IF_NO_EXCEPTION
END art_quick_get_short_static
@@ -799,12 +799,10 @@
*/
.extern artGet32StaticFromCode
ENTRY art_quick_get32_static
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lw $a1, 64($sp) # pass referrer's Method*
+ lw $a1, 0($sp) # pass referrer's Method*
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal artGet32StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
- jal artGet32StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*, $sp)
- move $a3, $sp # pass $sp
RETURN_IF_NO_EXCEPTION
END art_quick_get32_static
@@ -813,12 +811,10 @@
*/
.extern artGet64StaticFromCode
ENTRY art_quick_get64_static
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lw $a1, 64($sp) # pass referrer's Method*
+ lw $a1, 0($sp) # pass referrer's Method*
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal artGet64StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
- jal artGet64StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*, $sp)
- move $a3, $sp # pass $sp
RETURN_IF_NO_EXCEPTION
END art_quick_get64_static
@@ -827,12 +823,10 @@
*/
.extern artGetObjStaticFromCode
ENTRY art_quick_get_obj_static
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lw $a1, 64($sp) # pass referrer's Method*
+ lw $a1, 0($sp) # pass referrer's Method*
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal artGetObjStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
- jal artGetObjStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*, $sp)
- move $a3, $sp # pass $sp
RETURN_IF_NO_EXCEPTION
END art_quick_get_obj_static
@@ -841,12 +835,10 @@
*/
.extern artGetBooleanInstanceFromCode
ENTRY art_quick_get_boolean_instance
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lw $a2, 64($sp) # pass referrer's Method*
+ lw $a2, 0($sp) # pass referrer's Method*
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal artGetBooleanInstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
- jal artGetBooleanInstanceFromCode # (field_idx, Object*, referrer, Thread*, $sp)
- sw $sp, 16($sp) # pass $sp
RETURN_IF_NO_EXCEPTION
END art_quick_get_boolean_instance
/*
@@ -854,12 +846,10 @@
*/
.extern artGetByteInstanceFromCode
ENTRY art_quick_get_byte_instance
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lw $a2, 64($sp) # pass referrer's Method*
+ lw $a2, 0($sp) # pass referrer's Method*
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal artGetByteInstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
- jal artGetByteInstanceFromCode # (field_idx, Object*, referrer, Thread*, $sp)
- sw $sp, 16($sp) # pass $sp
RETURN_IF_NO_EXCEPTION
END art_quick_get_byte_instance
@@ -868,12 +858,10 @@
*/
.extern artGetCharInstanceFromCode
ENTRY art_quick_get_char_instance
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lw $a2, 64($sp) # pass referrer's Method*
+ lw $a2, 0($sp) # pass referrer's Method*
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal artGetCharInstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
- jal artGetCharInstanceFromCode # (field_idx, Object*, referrer, Thread*, $sp)
- sw $sp, 16($sp) # pass $sp
RETURN_IF_NO_EXCEPTION
END art_quick_get_char_instance
/*
@@ -881,12 +869,10 @@
*/
.extern artGetShortInstanceFromCode
ENTRY art_quick_get_short_instance
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lw $a2, 64($sp) # pass referrer's Method*
+ lw $a2, 0($sp) # pass referrer's Method*
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal artGetShortInstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
- jal artGetShortInstanceFromCode # (field_idx, Object*, referrer, Thread*, $sp)
- sw $sp, 16($sp) # pass $sp
RETURN_IF_NO_EXCEPTION
END art_quick_get_short_instance
@@ -895,12 +881,10 @@
*/
.extern artGet32InstanceFromCode
ENTRY art_quick_get32_instance
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lw $a2, 64($sp) # pass referrer's Method*
+ lw $a2, 0($sp) # pass referrer's Method*
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal artGet32InstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
- jal artGet32InstanceFromCode # (field_idx, Object*, referrer, Thread*, $sp)
- sw $sp, 16($sp) # pass $sp
RETURN_IF_NO_EXCEPTION
END art_quick_get32_instance
@@ -909,12 +893,10 @@
*/
.extern artGet64InstanceFromCode
ENTRY art_quick_get64_instance
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lw $a2, 64($sp) # pass referrer's Method*
+ lw $a2, 0($sp) # pass referrer's Method*
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal artGet64InstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
- jal artGet64InstanceFromCode # (field_idx, Object*, referrer, Thread*, $sp)
- sw $sp, 16($sp) # pass $sp
RETURN_IF_NO_EXCEPTION
END art_quick_get64_instance
@@ -923,12 +905,10 @@
*/
.extern artGetObjInstanceFromCode
ENTRY art_quick_get_obj_instance
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lw $a2, 64($sp) # pass referrer's Method*
+ lw $a2, 0($sp) # pass referrer's Method*
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal artGetObjInstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
- jal artGetObjInstanceFromCode # (field_idx, Object*, referrer, Thread*, $sp)
- sw $sp, 16($sp) # pass $sp
RETURN_IF_NO_EXCEPTION
END art_quick_get_obj_instance
@@ -937,12 +917,10 @@
*/
.extern artSet8StaticFromCode
ENTRY art_quick_set8_static
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lw $a2, 64($sp) # pass referrer's Method*
+ lw $a2, 0($sp) # pass referrer's Method*
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal artSet8StaticFromCode # (field_idx, new_val, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
- jal artSet8StaticFromCode # (field_idx, new_val, referrer, Thread*, $sp)
- sw $sp, 16($sp) # pass $sp
RETURN_IF_ZERO
END art_quick_set8_static
@@ -951,12 +929,10 @@
*/
.extern artSet16StaticFromCode
ENTRY art_quick_set16_static
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lw $a2, 64($sp) # pass referrer's Method*
- move $a3, rSELF # pass Thread::Current
+ lw $a2, 0($sp) # pass referrer's Method*
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
jal artSet16StaticFromCode # (field_idx, new_val, referrer, Thread*, $sp)
- sw $sp, 16($sp) # pass $sp
+ move $a3, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_set16_static
@@ -965,12 +941,10 @@
*/
.extern artSet32StaticFromCode
ENTRY art_quick_set32_static
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lw $a2, 64($sp) # pass referrer's Method*
+ lw $a2, 0($sp) # pass referrer's Method*
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal artSet32StaticFromCode # (field_idx, new_val, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
- jal artSet32StaticFromCode # (field_idx, new_val, referrer, Thread*, $sp)
- sw $sp, 16($sp) # pass $sp
RETURN_IF_ZERO
END art_quick_set32_static
@@ -979,12 +953,10 @@
*/
.extern artSet64StaticFromCode
ENTRY art_quick_set64_static
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lw $a1, 64($sp) # pass referrer's Method*
+ lw $a1, 0($sp) # pass referrer's Method*
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal artSet64StaticFromCode # (field_idx, referrer, new_val, Thread*)
sw rSELF, 16($sp) # pass Thread::Current
- jal artSet64StaticFromCode # (field_idx, referrer, new_val, Thread*, $sp)
- sw $sp, 20($sp) # pass $sp
RETURN_IF_ZERO
END art_quick_set64_static
@@ -993,12 +965,10 @@
*/
.extern artSetObjStaticFromCode
ENTRY art_quick_set_obj_static
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lw $a2, 64($sp) # pass referrer's Method*
+ lw $a2, 0($sp) # pass referrer's Method*
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
move $a3, rSELF # pass Thread::Current
- jal artSetObjStaticFromCode # (field_idx, new_val, referrer, Thread*, $sp)
- sw $sp, 16($sp) # pass $sp
+ jal artSetObjStaticFromCode # (field_idx, new_val, referrer, Thread*)
RETURN_IF_ZERO
END art_quick_set_obj_static
@@ -1007,12 +977,10 @@
*/
.extern artSet8InstanceFromCode
ENTRY art_quick_set8_instance
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lw $a3, 64($sp) # pass referrer's Method*
+ lw $a3, 0($sp) # pass referrer's Method*
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal artSet8InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
sw rSELF, 16($sp) # pass Thread::Current
- jal artSet8InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*, $sp)
- sw $sp, 20($sp) # pass $sp
RETURN_IF_ZERO
END art_quick_set8_instance
@@ -1021,12 +989,10 @@
*/
.extern artSet16InstanceFromCode
ENTRY art_quick_set16_instance
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lw $a3, 64($sp) # pass referrer's Method*
+ lw $a3, 0($sp) # pass referrer's Method*
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal artSet16InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
sw rSELF, 16($sp) # pass Thread::Current
- jal artSet16InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*, $sp)
- sw $sp, 20($sp) # pass $sp
RETURN_IF_ZERO
END art_quick_set16_instance
@@ -1035,12 +1001,10 @@
*/
.extern artSet32InstanceFromCode
ENTRY art_quick_set32_instance
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lw $a3, 64($sp) # pass referrer's Method*
+ lw $a3, 0($sp) # pass referrer's Method*
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal artSet32InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
sw rSELF, 16($sp) # pass Thread::Current
- jal artSet32InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*, $sp)
- sw $sp, 20($sp) # pass $sp
RETURN_IF_ZERO
END art_quick_set32_instance
@@ -1049,11 +1013,11 @@
*/
.extern artSet64InstanceFromCode
ENTRY art_quick_set64_instance
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- sw rSELF, 16($sp) # pass Thread::Current
- jal artSet64InstanceFromCode # (field_idx, Object*, new_val, Thread*, $sp)
- sw $sp, 20($sp) # pass $sp
+ lw $t1, 0($sp) # load referrer's Method*
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ sw rSELF, 20($sp) # pass Thread::Current
+ jal artSet64InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
+ sw $t1, 16($sp) # pass referrer's Method*
RETURN_IF_ZERO
END art_quick_set64_instance
@@ -1062,12 +1026,10 @@
*/
.extern artSetObjInstanceFromCode
ENTRY art_quick_set_obj_instance
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- lw $a3, 64($sp) # pass referrer's Method*
+ lw $a3, 0($sp) # pass referrer's Method*
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal artSetObjInstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
sw rSELF, 16($sp) # pass Thread::Current
- jal artSetObjInstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*, $sp)
- sw $sp, 20($sp) # pass $sp
RETURN_IF_ZERO
END art_quick_set_obj_instance
@@ -1079,12 +1041,10 @@
*/
.extern artResolveStringFromCode
ENTRY art_quick_resolve_string
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- move $a2, rSELF # pass Thread::Current
- # artResolveStringFromCode(Method* referrer, uint32_t string_idx, Thread*, $sp)
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ # artResolveStringFromCode(Method* referrer, uint32_t string_idx, Thread*)
jal artResolveStringFromCode
- move $a3, $sp # pass $sp
+ move $a2, rSELF # pass Thread::Current
RETURN_IF_RESULT_IS_NON_ZERO
END art_quick_resolve_string
@@ -1093,11 +1053,9 @@
.macro TWO_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- move $a2, rSELF # pass Thread::Current
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
jal \entrypoint
- move $a3, $sp # pass $sp
+ move $a2, rSELF # pass Thread::Current
\return
END \name
.endm
@@ -1105,11 +1063,9 @@
.macro THREE_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- GENERATE_GLOBAL_POINTER
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- move $a3, rSELF # pass Thread::Current
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
jal \entrypoint
- sw $sp, 16($sp) # pass $sp
+ move $a3, rSELF # pass Thread::Current
\return
END \name
.endm
@@ -1122,18 +1078,16 @@
*/
.extern artTestSuspendFromCode
ENTRY art_quick_test_suspend
- GENERATE_GLOBAL_POINTER
lh $a0, THREAD_FLAGS_OFFSET(rSELF)
bnez $a0, 1f
addi rSUSPEND, $zero, SUSPEND_CHECK_INTERVAL # reset rSUSPEND to SUSPEND_CHECK_INTERVAL
jr $ra
nop
1:
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves for stack crawl
+ jal artTestSuspendFromCode # (Thread*)
move $a0, rSELF
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves for stack crawl
- jal artTestSuspendFromCode # (Thread*, $sp)
- move $a1, $sp
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
END art_quick_test_suspend
/*
@@ -1142,14 +1096,12 @@
*/
.extern artQuickProxyInvokeHandler
ENTRY art_quick_proxy_invoke_handler
- GENERATE_GLOBAL_POINTER
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
- sw $a0, 0($sp) # place proxy method at bottom of frame
- move $a2, rSELF # pass Thread::Current
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_A0
+ move $a2, rSELF # pass Thread::Current
jal artQuickProxyInvokeHandler # (Method* proxy method, receiver, Thread*, SP)
- move $a3, $sp # pass $sp
+ addiu $a3, $sp, ARG_SLOT_SIZE # pass $sp (remove arg slots)
lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
bnez $t0, 1f
mtc1 $v0, $f0 # place return value to FP return value
jr $ra
@@ -1163,45 +1115,101 @@
* dex method index.
*/
ENTRY art_quick_imt_conflict_trampoline
- GENERATE_GLOBAL_POINTER
lw $a0, 0($sp) # load caller Method*
- lw $a0, METHOD_DEX_CACHE_METHODS_OFFSET($a0) # load dex_cache_resolved_methods
+ lw $a0, MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET($a0) # load dex_cache_resolved_methods
sll $t0, 2 # convert target method offset to bytes
add $a0, $t0 # get address of target method
- lw $a0, OBJECT_ARRAY_DATA_OFFSET($a0) # load the target method
+ lw $a0, MIRROR_OBJECT_ARRAY_DATA_OFFSET($a0) # load the target method
la $t9, art_quick_invoke_interface_trampoline
jr $t9
END art_quick_imt_conflict_trampoline
.extern artQuickResolutionTrampoline
ENTRY art_quick_resolution_trampoline
- GENERATE_GLOBAL_POINTER
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
- move $a2, rSELF # pass Thread::Current
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ move $a2, rSELF # pass Thread::Current
jal artQuickResolutionTrampoline # (Method* called, receiver, Thread*, SP)
- move $a3, $sp # pass $sp
+ addiu $a3, $sp, ARG_SLOT_SIZE # pass $sp (remove arg slots)
beqz $v0, 1f
- lw $a0, 0($sp) # load resolved method to $a0
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ lw $a0, ARG_SLOT_SIZE($sp) # load resolved method to $a0
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
move $t9, $v0 # code pointer must be in $t9 to generate the global pointer
jr $v0 # tail call to method
nop
1:
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
DELIVER_PENDING_EXCEPTION
END art_quick_resolution_trampoline
-UNIMPLEMENTED art_quick_generic_jni_trampoline
+ .extern artQuickGenericJniTrampoline
+ .extern artQuickGenericJniEndTrampoline
+ENTRY art_quick_generic_jni_trampoline
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_A0
+ move $s8, $sp # save $sp to $s8
+ move $s3, $gp # save $gp to $s3
+
+ # prepare for call to artQuickGenericJniTrampoline(Thread*, SP)
+ move $a0, rSELF # pass Thread::Current
+ addiu $a1, $sp, ARG_SLOT_SIZE # save $sp (remove arg slots)
+ jal artQuickGenericJniTrampoline # (Thread*, SP)
+ addiu $sp, $sp, -5120 # reserve space on the stack
+
+ # The C call will have registered the complete save-frame on success.
+ # The result of the call is:
+ # v0: ptr to native code, 0 on error.
+ # v1: ptr to the bottom of the used area of the alloca, can restore stack till here.
+ beq $v0, $zero, 1f # check entry error
+ move $t9, $v0 # save the code ptr
+ move $sp, $v1 # release part of the alloca
+
+ # Load parameters from stack into registers
+ lw $a0, 0($sp)
+ lw $a1, 4($sp)
+ lw $a2, 8($sp)
+
+ # Load FPRs the same as GPRs. Look at BuildNativeCallFrameStateMachine.
+ jalr $t9 # native call
+ lw $a3, 12($sp)
+ addiu $sp, $sp, 16 # remove arg slots
+
+ move $gp, $s3 # restore $gp from $s3
+
+ # result sign extension is handled in C code
+ # prepare for call to artQuickGenericJniEndTrampoline(Thread*, result, result_f)
+ move $a0, rSELF # pass Thread::Current
+ move $a2, $v0 # pass result
+ move $a3, $v1
+ addiu $sp, $sp, -24 # reserve arg slots
+ jal artQuickGenericJniEndTrampoline
+ s.d $f0, 16($sp) # pass result_f
+ addiu $sp, $sp, 24 # remove arg slots
+
+ lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
+ bne $t0, $zero, 2f # check for pending exceptions
+ move $sp, $s8 # tear down the alloca
+
+ # tear dpown the callee-save frame
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+
+ mtc1 $v0, $f0 # place return value to FP return value
+ jr $ra
+ mtc1 $v1, $f1 # place return value to FP return value
+
+1:
+ move $sp, $s8 # tear down the alloca
+2:
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ DELIVER_PENDING_EXCEPTION
+END art_quick_generic_jni_trampoline
.extern artQuickToInterpreterBridge
ENTRY art_quick_to_interpreter_bridge
- GENERATE_GLOBAL_POINTER
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
- move $a1, rSELF # pass Thread::Current
- jal artQuickToInterpreterBridge # (Method* method, Thread*, SP)
- move $a2, $sp # pass $sp
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ move $a1, rSELF # pass Thread::Current
+ jal artQuickToInterpreterBridge # (Method* method, Thread*, SP)
+ addiu $a2, $sp, ARG_SLOT_SIZE # pass $sp (remove arg slots)
lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
bnez $t0, 1f
mtc1 $v0, $f0 # place return value to FP return value
jr $ra
@@ -1216,21 +1224,14 @@
.extern artInstrumentationMethodEntryFromCode
.extern artInstrumentationMethodExitFromCode
ENTRY art_quick_instrumentation_entry
- GENERATE_GLOBAL_POINTER
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
- move $t0, $sp # remember bottom of caller's frame
- addiu $sp, $sp, -32 # space for args, pad (3 words), arguments (5 words)
- .cfi_adjust_cfa_offset 32
- sw $a0, 28($sp) # save arg0
- sw $ra, 16($sp) # pass $ra
- move $a3, $t0 # pass $sp
- jal artInstrumentationMethodEntryFromCode # (Method*, Object*, Thread*, SP, LR)
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ sw $a0, 28($sp) # save arg0 in free arg slot
+ move $a3, $ra # pass $ra
+ jal artInstrumentationMethodEntryFromCode # (Method*, Object*, Thread*, LR)
move $a2, rSELF # pass Thread::Current
move $t9, $v0 # $t9 holds reference to code
- lw $a0, 28($sp) # restore arg0
- addiu $sp, $sp, 32 # remove args
- .cfi_adjust_cfa_offset -32
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ lw $a0, 28($sp) # restore arg0 from free arg slot
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
jalr $t9 # call method
nop
END art_quick_instrumentation_entry
@@ -1239,34 +1240,35 @@
art_quick_instrumentation_exit:
.cfi_startproc
addiu $t9, $ra, 4 # put current address into $t9 to rebuild $gp
- GENERATE_GLOBAL_POINTER
+ .cpload $t9
move $ra, $zero # link register is to here, so clobber with 0 for later checks
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME
- move $t0, $sp # remember bottom of caller's frame
- addiu $sp, $sp, -48 # save return values and set up args
- .cfi_adjust_cfa_offset 48
- sw $v0, 32($sp)
+
+ addiu $sp, $sp, -16 # allocate temp storage on the stack
+ .cfi_adjust_cfa_offset 16
+ sw $v0, 12($sp)
.cfi_rel_offset 2, 32
- sw $v1, 36($sp)
+ sw $v1, 8($sp)
.cfi_rel_offset 3, 36
- s.s $f0, 40($sp)
- s.s $f1, 44($sp)
+ s.s $f0, 4($sp)
+ s.s $f1, 0($sp)
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
s.s $f0, 16($sp) # pass fpr result
s.s $f1, 20($sp)
move $a2, $v0 # pass gpr result
move $a3, $v1
- move $a1, $t0 # pass $sp
+ addiu $a1, $sp, ARG_SLOT_SIZE # pass $sp (remove arg slots)
jal artInstrumentationMethodExitFromCode # (Thread*, SP, gpr_res, fpr_res)
move $a0, rSELF # pass Thread::Current
move $t0, $v0 # set aside returned link register
move $ra, $v1 # set link register for deoptimization
- lw $v0, 32($sp) # restore return values
- lw $v1, 36($sp)
- l.s $f0, 40($sp)
- l.s $f1, 44($sp)
+ addiu $sp, $sp, ARG_SLOT_SIZE+FRAME_SIZE_REFS_ONLY_CALLEE_SAVE # args slot + refs_only callee save frame
+ lw $v0, 12($sp) # restore return values
+ lw $v1, 8($sp)
+ l.s $f0, 4($sp)
+ l.s $f1, 0($sp)
jr $t0 # return
- addiu $sp, $sp, 112 # 48 bytes of args + 64 bytes of callee save frame
- .cfi_adjust_cfa_offset -112
+ addiu $sp, $sp, 16 # remove temp storage from stack
+ .cfi_adjust_cfa_offset -16
END art_quick_instrumentation_exit
/*
@@ -1274,14 +1276,11 @@
* will long jump to the upcall with a special exception of -1.
*/
.extern artDeoptimize
- .extern artEnterInterpreterFromDeoptimize
ENTRY art_quick_deoptimize
- GENERATE_GLOBAL_POINTER
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
- move $a0, rSELF # pass Thread::current
- jal artDeoptimize # artDeoptimize(Thread*, SP)
+ jal artDeoptimize # artDeoptimize(Thread*)
# Returns caller method's frame size.
- move $a1, $sp # pass $sp
+ move $a0, rSELF # pass Thread::current
END art_quick_deoptimize
/*
@@ -1294,7 +1293,7 @@
* $a1: high word
* $a2: shift count
*/
-ENTRY art_quick_shl_long
+ENTRY_NO_GP art_quick_shl_long
/* shl-long vAA, vBB, vCC */
sll $v0, $a0, $a2 # rlo<- alo << (shift&31)
not $v1, $a2 # rhi<- 31-shift (shift is 5b)
@@ -1318,8 +1317,7 @@
* $a1: high word
* $a2: shift count
*/
- .global art_quick_shr_long
-ENTRY art_quick_shr_long
+ENTRY_NO_GP art_quick_shr_long
sra $v1, $a1, $a2 # rhi<- ahi >> (shift&31)
srl $v0, $a0, $a2 # rlo<- alo >> (shift&31)
sra $a3, $a1, 31 # $a3<- sign(ah)
@@ -1344,8 +1342,7 @@
* r2: shift count
*/
/* ushr-long vAA, vBB, vCC */
- .global art_quick_ushr_long
-ENTRY art_quick_ushr_long
+ENTRY_NO_GP art_quick_ushr_long
srl $v1, $a1, $a2 # rhi<- ahi >> (shift&31)
srl $v0, $a0, $a2 # rlo<- alo >> (shift&31)
not $a0, $a2 # alo<- 31-shift (shift is 5b)
@@ -1358,12 +1355,5 @@
movn $v1, $zero, $a2 # rhi<- 0 (if shift&0x20)
END art_quick_ushr_long
-ENTRY art_quick_indexof
- jr $ra
- nop
-END art_quick_indexof
-
-ENTRY art_quick_string_compareto
- jr $ra
- nop
-END art_quick_string_compareto
+UNIMPLEMENTED art_quick_indexof
+UNIMPLEMENTED art_quick_string_compareto
diff --git a/runtime/arch/mips/quick_method_frame_info_mips.h b/runtime/arch/mips/quick_method_frame_info_mips.h
index 2a8bcf0..5fbffbc 100644
--- a/runtime/arch/mips/quick_method_frame_info_mips.h
+++ b/runtime/arch/mips/quick_method_frame_info_mips.h
@@ -40,8 +40,7 @@
constexpr uint32_t MipsCalleeSaveFrameSize(Runtime::CalleeSaveType type) {
return RoundUp((POPCOUNT(MipsCalleeSaveCoreSpills(type)) /* gprs */ +
- (type == Runtime::kRefsAndArgs ? 0 : 3) + 1 /* Method* */) *
- kMipsPointerSize, kStackAlignment);
+ 1 /* Method* */) * kMipsPointerSize, kStackAlignment);
}
constexpr QuickMethodFrameInfo MipsCalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index c9b9f04..0fcd297 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -41,7 +41,7 @@
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
if (!runtime_->HasCalleeSaveMethod(type)) {
- runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(type), type);
+ runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(), type);
}
}
}
@@ -530,18 +530,6 @@
#endif
}
- // Method with 32b arg0, 32b arg1, 64b arg2
- size_t Invoke3UUWithReferrer(uint32_t arg0, uint32_t arg1, uint64_t arg2, uintptr_t code,
- Thread* self, mirror::ArtMethod* referrer) {
-#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
- // Just pass through.
- return Invoke3WithReferrer(arg0, arg1, arg2, code, self, referrer);
-#else
- // TODO: Needs 4-param invoke.
- return 0;
-#endif
- }
-
static uintptr_t GetEntrypoint(Thread* self, QuickEntrypointEnum entrypoint) {
int32_t offset;
#ifdef __LP64__
@@ -744,17 +732,17 @@
EXPECT_EQ(LockWord::LockState::kFatLocked, iter_state);
}
} else {
- bool lock; // Whether to lock or unlock in this step.
+ bool take_lock; // Whether to lock or unlock in this step.
if (counts[index] == 0) {
- lock = true;
+ take_lock = true;
} else if (counts[index] == kThinLockLoops) {
- lock = false;
+ take_lock = false;
} else {
// Randomly.
- lock = r.next() % 2 == 0;
+ take_lock = r.next() % 2 == 0;
}
- if (lock) {
+ if (take_lock) {
test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U, art_quick_lock_object,
self);
counts[index]++;
@@ -1148,7 +1136,7 @@
// For some reason this does not work, as the type_idx is artificial and outside what the
// resolved types of c_obj allow...
- if (false) {
+ if ((false)) {
// Use an arbitrary method from c to use as referrer
size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex()), // type_idx
reinterpret_cast<size_t>(c_obj->GetVirtualMethod(0)), // arbitrary
@@ -1221,13 +1209,12 @@
// Use array so we can index into it and use a matrix for expected results
// Setup: The first half is standard. The second half uses a non-zero offset.
// TODO: Shared backing arrays.
- static constexpr size_t kBaseStringCount = 8;
- const char* c[kBaseStringCount] = { "", "", "a", "aa", "ab",
+ const char* c[] = { "", "", "a", "aa", "ab",
"aacaacaacaacaacaac", // This one's under the default limit to go to __memcmp16.
"aacaacaacaacaacaacaacaacaacaacaacaac", // This one's over.
"aacaacaacaacaacaacaacaacaacaacaacaaca" }; // As is this one. We need a separate one to
// defeat object-equal optimizations.
-
+ static constexpr size_t kBaseStringCount = arraysize(c);
static constexpr size_t kStringCount = 2 * kBaseStringCount;
StackHandleScope<kStringCount> hs(self);
@@ -1304,8 +1291,8 @@
}
-static void GetSetBooleanStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSetBooleanStatic(Handle<mirror::ArtField>* f, Thread* self,
+ mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
constexpr size_t num_values = 5;
@@ -1333,14 +1320,13 @@
std::cout << "Skipping set_boolean_static as I don't know how to do that on " << kRuntimeISA << std::endl;
#endif
}
-static void GetSetByteStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSetByteStatic(Handle<mirror::ArtField>* f, Thread* self,
+ mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
- constexpr size_t num_values = 5;
- int8_t values[num_values] = { -128, -64, 0, 64, 127 };
+ int8_t values[] = { -128, -64, 0, 64, 127 };
- for (size_t i = 0; i < num_values; ++i) {
+ for (size_t i = 0; i < arraysize(values); ++i) {
test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
static_cast<size_t>(values[i]),
0U,
@@ -1364,13 +1350,12 @@
static void GetSetBooleanInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
- Thread* self, mirror::ArtMethod* referrer, StubTest* test)
+ Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
- constexpr size_t num_values = 5;
- uint8_t values[num_values] = { 0, true, 2, 128, 0xFF };
+ uint8_t values[] = { 0, true, 2, 128, 0xFF };
- for (size_t i = 0; i < num_values; ++i) {
+ for (size_t i = 0; i < arraysize(values); ++i) {
test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
reinterpret_cast<size_t>(obj->Get()),
static_cast<size_t>(values[i]),
@@ -1401,10 +1386,9 @@
Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
- constexpr size_t num_values = 5;
- int8_t values[num_values] = { -128, -64, 0, 64, 127 };
+ int8_t values[] = { -128, -64, 0, 64, 127 };
- for (size_t i = 0; i < num_values; ++i) {
+ for (size_t i = 0; i < arraysize(values); ++i) {
test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
reinterpret_cast<size_t>(obj->Get()),
static_cast<size_t>(values[i]),
@@ -1431,14 +1415,13 @@
#endif
}
-static void GetSetCharStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSetCharStatic(Handle<mirror::ArtField>* f, Thread* self, mirror::ArtMethod* referrer,
+ StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
- constexpr size_t num_values = 6;
- uint16_t values[num_values] = { 0, 1, 2, 255, 32768, 0xFFFF };
+ uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
- for (size_t i = 0; i < num_values; ++i) {
+ for (size_t i = 0; i < arraysize(values); ++i) {
test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
static_cast<size_t>(values[i]),
0U,
@@ -1460,14 +1443,13 @@
std::cout << "Skipping set_char_static as I don't know how to do that on " << kRuntimeISA << std::endl;
#endif
}
-static void GetSetShortStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSetShortStatic(Handle<mirror::ArtField>* f, Thread* self,
+ mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
- constexpr size_t num_values = 6;
- int16_t values[num_values] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
+ int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
- for (size_t i = 0; i < num_values; ++i) {
+ for (size_t i = 0; i < arraysize(values); ++i) {
test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
static_cast<size_t>(values[i]),
0U,
@@ -1494,10 +1476,9 @@
Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
- constexpr size_t num_values = 6;
- uint16_t values[num_values] = { 0, 1, 2, 255, 32768, 0xFFFF };
+ uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
- for (size_t i = 0; i < num_values; ++i) {
+ for (size_t i = 0; i < arraysize(values); ++i) {
test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
reinterpret_cast<size_t>(obj->Get()),
static_cast<size_t>(values[i]),
@@ -1527,10 +1508,9 @@
Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
- constexpr size_t num_values = 6;
- int16_t values[num_values] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
+ int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
- for (size_t i = 0; i < num_values; ++i) {
+ for (size_t i = 0; i < arraysize(values); ++i) {
test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
reinterpret_cast<size_t>(obj->Get()),
static_cast<size_t>(values[i]),
@@ -1557,14 +1537,13 @@
#endif
}
-static void GetSet32Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSet32Static(Handle<mirror::ArtField>* f, Thread* self, mirror::ArtMethod* referrer,
+ StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
- constexpr size_t num_values = 7;
- uint32_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
+ uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
- for (size_t i = 0; i < num_values; ++i) {
+ for (size_t i = 0; i < arraysize(values); ++i) {
test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
static_cast<size_t>(values[i]),
0U,
@@ -1592,10 +1571,9 @@
Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
- constexpr size_t num_values = 7;
- uint32_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
+ uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
- for (size_t i = 0; i < num_values; ++i) {
+ for (size_t i = 0; i < arraysize(values); ++i) {
test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
reinterpret_cast<size_t>(obj->Get()),
static_cast<size_t>(values[i]),
@@ -1647,8 +1625,8 @@
}
#endif
-static void GetSetObjStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSetObjStatic(Handle<mirror::ArtField>* f, Thread* self, mirror::ArtMethod* referrer,
+ StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
set_and_check_static((*f)->GetDexFieldIndex(), nullptr, self, referrer, test);
@@ -1712,14 +1690,13 @@
// TODO: Complete these tests for 32b architectures.
-static void GetSet64Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSet64Static(Handle<mirror::ArtField>* f, Thread* self, mirror::ArtMethod* referrer,
+ StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
- constexpr size_t num_values = 8;
- uint64_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
+ uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
- for (size_t i = 0; i < num_values; ++i) {
+ for (size_t i = 0; i < arraysize(values); ++i) {
test->Invoke3UWithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
values[i],
StubTest::GetEntrypoint(self, kQuickSet64Static),
@@ -1735,6 +1712,7 @@
EXPECT_EQ(res, values[i]) << "Iteration " << i;
}
#else
+ UNUSED(f, self, referrer, test);
LOG(INFO) << "Skipping set64static as I don't know how to do that on " << kRuntimeISA;
// Force-print to std::cout so it's also outside the logcat.
std::cout << "Skipping set64static as I don't know how to do that on " << kRuntimeISA << std::endl;
@@ -1746,10 +1724,9 @@
Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
- constexpr size_t num_values = 8;
- uint64_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
+ uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
- for (size_t i = 0; i < num_values; ++i) {
+ for (size_t i = 0; i < arraysize(values); ++i) {
test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
reinterpret_cast<size_t>(obj->Get()),
static_cast<size_t>(values[i]),
@@ -1772,6 +1749,7 @@
EXPECT_EQ(res, static_cast<int64_t>(res2));
}
#else
+ UNUSED(obj, f, self, referrer, test);
LOG(INFO) << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA;
// Force-print to std::cout so it's also outside the logcat.
std::cout << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA << std::endl;
@@ -1801,47 +1779,47 @@
Handle<mirror::ObjectArray<mirror::ArtField>> fields(hs.NewHandle(c.Get()->GetSFields()));
int32_t num_fields = fields->GetLength();
for (int32_t i = 0; i < num_fields; ++i) {
- StackHandleScope<1> hs(self);
- Handle<mirror::ArtField> f(hs.NewHandle(fields->Get(i)));
+ StackHandleScope<1> hs2(self);
+ Handle<mirror::ArtField> f(hs2.NewHandle(fields->Get(i)));
Primitive::Type type = f->GetTypeAsPrimitiveType();
switch (type) {
case Primitive::Type::kPrimBoolean:
if (test_type == type) {
- GetSetBooleanStatic(&obj, &f, self, m.Get(), test);
+ GetSetBooleanStatic(&f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimByte:
if (test_type == type) {
- GetSetByteStatic(&obj, &f, self, m.Get(), test);
+ GetSetByteStatic(&f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimChar:
if (test_type == type) {
- GetSetCharStatic(&obj, &f, self, m.Get(), test);
+ GetSetCharStatic(&f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimShort:
if (test_type == type) {
- GetSetShortStatic(&obj, &f, self, m.Get(), test);
+ GetSetShortStatic(&f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimInt:
if (test_type == type) {
- GetSet32Static(&obj, &f, self, m.Get(), test);
+ GetSet32Static(&f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimLong:
if (test_type == type) {
- GetSet64Static(&obj, &f, self, m.Get(), test);
+ GetSet64Static(&f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimNot:
// Don't try array.
if (test_type == type && f->GetTypeDescriptor()[0] != '[') {
- GetSetObjStatic(&obj, &f, self, m.Get(), test);
+ GetSetObjStatic(&f, self, m.Get(), test);
}
break;
@@ -1856,8 +1834,8 @@
Handle<mirror::ObjectArray<mirror::ArtField>> fields(hs.NewHandle(c.Get()->GetIFields()));
int32_t num_fields = fields->GetLength();
for (int32_t i = 0; i < num_fields; ++i) {
- StackHandleScope<1> hs(self);
- Handle<mirror::ArtField> f(hs.NewHandle(fields->Get(i)));
+ StackHandleScope<1> hs2(self);
+ Handle<mirror::ArtField> f(hs2.NewHandle(fields->Get(i)));
Primitive::Type type = f->GetTypeAsPrimitiveType();
switch (type) {
@@ -2109,10 +2087,10 @@
// Use array so we can index into it and use a matrix for expected results
// Setup: The first half is standard. The second half uses a non-zero offset.
// TODO: Shared backing arrays.
- static constexpr size_t kStringCount = 7;
- const char* c_str[kStringCount] = { "", "a", "ba", "cba", "dcba", "edcba", "asdfghjkl" };
- static constexpr size_t kCharCount = 5;
- const char c_char[kCharCount] = { 'a', 'b', 'c', 'd', 'e' };
+ const char* c_str[] = { "", "a", "ba", "cba", "dcba", "edcba", "asdfghjkl" };
+ static constexpr size_t kStringCount = arraysize(c_str);
+ const char c_char[] = { 'a', 'b', 'c', 'd', 'e' };
+ static constexpr size_t kCharCount = arraysize(c_char);
StackHandleScope<kStringCount> hs(self);
Handle<mirror::String> s[kStringCount];
diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S
index 78b97e5..fea16da 100644
--- a/runtime/arch/x86/asm_support_x86.S
+++ b/runtime/arch/x86/asm_support_x86.S
@@ -164,10 +164,14 @@
SIZE(\name, 0)
END_MACRO
-MACRO0(SETUP_GOT_NOSAVE)
+MACRO1(SETUP_GOT_NOSAVE, got_reg)
#ifndef __APPLE__
- call __x86.get_pc_thunk.bx
- addl $_GLOBAL_OFFSET_TABLE_, %ebx
+ .ifc RAW_VAR(got_reg, 0), ebx
+ call __x86.get_pc_thunk.bx
+ addl $_GLOBAL_OFFSET_TABLE_, %ebx
+ .else
+ .error "Unknown GOT register \got_reg"
+ .endif
#endif
END_MACRO
diff --git a/runtime/arch/x86/asm_support_x86.h b/runtime/arch/x86/asm_support_x86.h
index c9f5a25..5a88f80 100644
--- a/runtime/arch/x86/asm_support_x86.h
+++ b/runtime/arch/x86/asm_support_x86.h
@@ -19,20 +19,8 @@
#include "asm_support.h"
-// Offset of field Thread::self_ verified in InitCpu
-#define THREAD_SELF_OFFSET 156
-// Offset of field Thread::card_table_ verified in InitCpu
-#define THREAD_CARD_TABLE_OFFSET 120
-// Offset of field Thread::exception_ verified in InitCpu
-#define THREAD_EXCEPTION_OFFSET 124
-// Offset of field Thread::thin_lock_thread_id_ verified in InitCpu
-#define THREAD_ID_OFFSET 12
-
#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 32
#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 32
#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 32
-// Expected size of a heap reference
-#define HEAP_REFERENCE_SIZE 4
-
#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_
diff --git a/runtime/arch/x86/context_x86.cc b/runtime/arch/x86/context_x86.cc
index a7beaa9..49aa326 100644
--- a/runtime/arch/x86/context_x86.cc
+++ b/runtime/arch/x86/context_x86.cc
@@ -17,9 +17,9 @@
#include "context_x86.h"
#include "mirror/art_method-inl.h"
-#include "mirror/object-inl.h"
#include "quick/quick_method_frame_info.h"
-#include "stack.h"
+#include "utils.h"
+
namespace art {
namespace x86 {
@@ -72,6 +72,16 @@
}
}
+bool X86Context::GetFPR(uint32_t reg ATTRIBUTE_UNUSED, uintptr_t* val ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Floating-point registers are all caller save in X86";
+ UNREACHABLE();
+}
+
+bool X86Context::SetFPR(uint32_t reg ATTRIBUTE_UNUSED, uintptr_t value ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Floating-point registers are all caller save in X86";
+ UNREACHABLE();
+}
+
void X86Context::DoLongJump() {
#if defined(__i386__)
// Array of GPR values, filled from the context backward for the long jump pop. We add a slot at
diff --git a/runtime/arch/x86/context_x86.h b/runtime/arch/x86/context_x86.h
index a350b25..01c8b82 100644
--- a/runtime/arch/x86/context_x86.h
+++ b/runtime/arch/x86/context_x86.h
@@ -62,15 +62,9 @@
bool SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
- bool GetFPR(uint32_t reg, uintptr_t* val) OVERRIDE {
- LOG(FATAL) << "Floating-point registers are all caller save in X86";
- return false;
- }
+ bool GetFPR(uint32_t reg, uintptr_t* val) OVERRIDE;
- bool SetFPR(uint32_t reg, uintptr_t value) OVERRIDE {
- LOG(FATAL) << "Floating-point registers are all caller save in X86";
- return false;
- }
+ bool SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
void SmashCallerSaves() OVERRIDE;
void DoLongJump() OVERRIDE;
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index f2b91cd..48d6c80 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -18,102 +18,16 @@
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/portable/portable_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
+#include "entrypoints/quick/quick_default_externs.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "interpreter/interpreter.h"
namespace art {
-// Portable entrypoints.
-extern "C" void art_portable_resolution_trampoline(mirror::ArtMethod*);
-extern "C" void art_portable_to_interpreter_bridge(mirror::ArtMethod*);
-
// Cast entrypoints.
extern "C" uint32_t art_quick_is_assignable(const mirror::Class* klass,
- const mirror::Class* ref_class);
-extern "C" void art_quick_check_cast(void*, void*);
-
-// DexCache entrypoints.
-extern "C" void* art_quick_initialize_static_storage(uint32_t, void*);
-extern "C" void* art_quick_initialize_type(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*);
-extern "C" void* art_quick_resolve_string(void*, uint32_t);
-
-// Field entrypoints.
-extern "C" int art_quick_set8_instance(uint32_t, void*, int8_t);
-extern "C" int art_quick_set8_static(uint32_t, int8_t);
-extern "C" int art_quick_set16_instance(uint32_t, void*, int16_t);
-extern "C" int art_quick_set16_static(uint32_t, int16_t);
-extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t);
-extern "C" int art_quick_set32_static(uint32_t, int32_t);
-extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t);
-extern "C" int art_quick_set64_static(uint32_t, int64_t);
-extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*);
-extern "C" int art_quick_set_obj_static(uint32_t, void*);
-extern "C" int8_t art_quick_get_byte_instance(uint32_t, void*);
-extern "C" uint8_t art_quick_get_boolean_instance(uint32_t, void*);
-extern "C" int8_t art_quick_get_byte_static(uint32_t);
-extern "C" uint8_t art_quick_get_boolean_static(uint32_t);
-extern "C" int16_t art_quick_get_short_instance(uint32_t, void*);
-extern "C" uint16_t art_quick_get_char_instance(uint32_t, void*);
-extern "C" int16_t art_quick_get_short_static(uint32_t);
-extern "C" uint16_t art_quick_get_char_static(uint32_t);
-extern "C" int32_t art_quick_get32_instance(uint32_t, void*);
-extern "C" int32_t art_quick_get32_static(uint32_t);
-extern "C" int64_t art_quick_get64_instance(uint32_t, void*);
-extern "C" int64_t art_quick_get64_static(uint32_t);
-extern "C" void* art_quick_get_obj_instance(uint32_t, void*);
-extern "C" void* art_quick_get_obj_static(uint32_t);
-
-// Array entrypoints.
-extern "C" void art_quick_aput_obj_with_null_and_bound_check(void*, uint32_t, void*);
-extern "C" void art_quick_aput_obj_with_bound_check(void*, uint32_t, void*);
-extern "C" void art_quick_aput_obj(void*, uint32_t, void*);
-extern "C" void art_quick_handle_fill_data(void*, void*);
-
-// Lock entrypoints.
-extern "C" void art_quick_lock_object(void*);
-extern "C" void art_quick_unlock_object(void*);
-
-// Math entrypoints.
-extern "C" int64_t art_quick_d2l(double);
-extern "C" int64_t art_quick_f2l(float);
-extern "C" int64_t art_quick_ldiv(int64_t, int64_t);
-extern "C" int64_t art_quick_lmod(int64_t, int64_t);
-extern "C" int64_t art_quick_lmul(int64_t, int64_t);
-extern "C" uint64_t art_quick_lshl(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_lshr(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_lushr(uint64_t, uint32_t);
-
-// Intrinsic entrypoints.
-extern "C" int32_t art_quick_string_compareto(void*, void*);
-extern "C" void* art_quick_memcpy(void*, const void*, size_t);
-
-// Invoke entrypoints.
-extern "C" void art_quick_imt_conflict_trampoline(mirror::ArtMethod*);
-extern "C" void art_quick_resolution_trampoline(mirror::ArtMethod*);
-extern "C" void art_quick_to_interpreter_bridge(mirror::ArtMethod*);
-extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
-
-// Thread entrypoints.
-extern "C" void art_quick_test_suspend();
-
-// Throw entrypoints.
-extern "C" void art_quick_deliver_exception(void*);
-extern "C" void art_quick_throw_array_bounds(int32_t index, int32_t limit);
-extern "C" void art_quick_throw_div_zero();
-extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
-extern "C" void art_quick_throw_null_pointer_exception();
-extern "C" void art_quick_throw_stack_overflow(void*);
-
-// Generic JNI downcall
-extern "C" void art_quick_generic_jni_trampoline(mirror::ArtMethod*);
-
-extern void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints);
+ const mirror::Class* ref_class);
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index 9d74ef5..ad962e2 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -231,7 +231,7 @@
return pc - startpc;
}
-void FaultManager::HandleNestedSignal(int sig, siginfo_t* info, void* context) {
+void FaultManager::HandleNestedSignal(int, siginfo_t*, void* context) {
// For the Intel architectures we need to go to an assembly language
// stub. This is because the 32 bit call to longjmp is much different
// from the 64 bit ABI call and pushing things onto the stack inside this
@@ -284,7 +284,7 @@
*out_return_pc = reinterpret_cast<uintptr_t>(pc + instr_size);
}
-bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
+bool NullPointerHandler::Action(int, siginfo_t*, void* context) {
struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
uint8_t* pc = reinterpret_cast<uint8_t*>(uc->CTX_EIP);
uint8_t* sp = reinterpret_cast<uint8_t*>(uc->CTX_ESP);
@@ -324,7 +324,7 @@
// The offset from fs is Thread::ThreadSuspendTriggerOffset().
// To check for a suspend check, we examine the instructions that caused
// the fault.
-bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
+bool SuspensionHandler::Action(int, siginfo_t*, void* context) {
// These are the instructions to check for. The first one is the mov eax, fs:[xxx]
// where xxx is the offset of the suspend trigger.
#if defined(__x86_64__)
@@ -398,7 +398,7 @@
// This is done before any frame is established in the method. The return
// address for the previous method is on the stack at ESP.
-bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
+bool StackOverflowHandler::Action(int, siginfo_t* info, void* context) {
struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
uintptr_t sp = static_cast<uintptr_t>(uc->CTX_ESP);
diff --git a/runtime/arch/x86/instruction_set_features_x86.cc b/runtime/arch/x86/instruction_set_features_x86.cc
new file mode 100644
index 0000000..a12773d
--- /dev/null
+++ b/runtime/arch/x86/instruction_set_features_x86.cc
@@ -0,0 +1,276 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_set_features_x86.h"
+
+#include <fstream>
+#include <sstream>
+
+#include "arch/x86_64/instruction_set_features_x86_64.h"
+#include "base/stringprintf.h"
+#include "utils.h" // For Trim.
+
+namespace art {
+
+const X86InstructionSetFeatures* X86InstructionSetFeatures::FromVariant(
+ const std::string& variant ATTRIBUTE_UNUSED, std::string* error_msg ATTRIBUTE_UNUSED,
+ bool x86_64) {
+ bool known_variant = false;
+ bool smp = true; // Conservative default.
+ static const char* x86_variants_with_ssse3[] = {
+ "atom"
+ };
+ bool has_SSSE3 = FindVariantInArray(x86_variants_with_ssse3, arraysize(x86_variants_with_ssse3),
+ variant);
+ bool has_SSE4_1 = false;
+ bool has_SSE4_2 = false;
+ bool has_AVX = false;
+ bool has_AVX2 = false;
+ if (!known_variant && variant != "default") {
+ std::ostringstream os;
+ LOG(WARNING) << "Unexpected CPU variant for X86 using defaults: " << variant;
+ }
+
+ if (x86_64) {
+ return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
+ has_AVX2);
+ } else {
+ return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
+ has_AVX2);
+ }
+}
+
+const X86InstructionSetFeatures* X86InstructionSetFeatures::FromBitmap(uint32_t bitmap,
+ bool x86_64) {
+ bool smp = (bitmap & kSmpBitfield) != 0;
+ bool has_SSSE3 = (bitmap & kSsse3Bitfield) != 0;
+ bool has_SSE4_1 = (bitmap & kSse4_1Bitfield) != 0;
+ bool has_SSE4_2 = (bitmap & kSse4_2Bitfield) != 0;
+ bool has_AVX = (bitmap & kAvxBitfield) != 0;
+ bool has_AVX2 = (bitmap & kAvxBitfield) != 0;
+ if (x86_64) {
+ return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2);
+ } else {
+ return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
+ has_AVX2);
+ }
+}
+
+const X86InstructionSetFeatures* X86InstructionSetFeatures::FromCppDefines(bool x86_64) {
+ const bool smp = true;
+
+#ifndef __SSSE3__
+ const bool has_SSSE3 = false;
+#else
+ const bool has_SSSE3 = true;
+#endif
+
+#ifndef __SSE4_1__
+ const bool has_SSE4_1 = false;
+#else
+ const bool has_SSE4_1 = true;
+#endif
+
+#ifndef __SSE4_2__
+ const bool has_SSE4_2 = false;
+#else
+ const bool has_SSE4_2 = true;
+#endif
+
+#ifndef __AVX__
+ const bool has_AVX = false;
+#else
+ const bool has_AVX = true;
+#endif
+
+#ifndef __AVX2__
+ const bool has_AVX2 = false;
+#else
+ const bool has_AVX2 = true;
+#endif
+
+ if (x86_64) {
+ return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2);
+ } else {
+ return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
+ has_AVX2);
+ }
+}
+
+const X86InstructionSetFeatures* X86InstructionSetFeatures::FromCpuInfo(bool x86_64) {
+ // Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that
+ // the kernel puts the appropriate feature flags in here. Sometimes it doesn't.
+ bool smp = false;
+ bool has_SSSE3 = false;
+ bool has_SSE4_1 = false;
+ bool has_SSE4_2 = false;
+ bool has_AVX = false;
+ bool has_AVX2 = false;
+
+ std::ifstream in("/proc/cpuinfo");
+ if (!in.fail()) {
+ while (!in.eof()) {
+ std::string line;
+ std::getline(in, line);
+ if (!in.eof()) {
+ LOG(INFO) << "cpuinfo line: " << line;
+ if (line.find("flags") != std::string::npos) {
+ LOG(INFO) << "found flags";
+ if (line.find("ssse3") != std::string::npos) {
+ has_SSSE3 = true;
+ }
+ if (line.find("sse4_1") != std::string::npos) {
+ has_SSE4_1 = true;
+ }
+ if (line.find("sse4_2") != std::string::npos) {
+ has_SSE4_2 = true;
+ }
+ if (line.find("avx") != std::string::npos) {
+ has_AVX = true;
+ }
+ if (line.find("avx2") != std::string::npos) {
+ has_AVX2 = true;
+ }
+ } else if (line.find("processor") != std::string::npos &&
+ line.find(": 1") != std::string::npos) {
+ smp = true;
+ }
+ }
+ }
+ in.close();
+ } else {
+ LOG(ERROR) << "Failed to open /proc/cpuinfo";
+ }
+ if (x86_64) {
+ return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2);
+ } else {
+ return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
+ has_AVX2);
+ }
+}
+
+const X86InstructionSetFeatures* X86InstructionSetFeatures::FromHwcap(bool x86_64) {
+ UNIMPLEMENTED(WARNING);
+ return FromCppDefines(x86_64);
+}
+
+const X86InstructionSetFeatures* X86InstructionSetFeatures::FromAssembly(bool x86_64) {
+ UNIMPLEMENTED(WARNING);
+ return FromCppDefines(x86_64);
+}
+
+bool X86InstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
+ if (GetInstructionSet() != other->GetInstructionSet()) {
+ return false;
+ }
+ const X86InstructionSetFeatures* other_as_x86 = other->AsX86InstructionSetFeatures();
+ return (IsSmp() == other->IsSmp()) &&
+ (has_SSSE3_ == other_as_x86->has_SSSE3_) &&
+ (has_SSE4_1_ == other_as_x86->has_SSE4_1_) &&
+ (has_SSE4_2_ == other_as_x86->has_SSE4_2_) &&
+ (has_AVX_ == other_as_x86->has_AVX_) &&
+ (has_AVX2_ == other_as_x86->has_AVX2_);
+}
+
+uint32_t X86InstructionSetFeatures::AsBitmap() const {
+ return (IsSmp() ? kSmpBitfield : 0) |
+ (has_SSSE3_ ? kSsse3Bitfield : 0) |
+ (has_SSE4_1_ ? kSse4_1Bitfield : 0) |
+ (has_SSE4_2_ ? kSse4_2Bitfield : 0) |
+ (has_AVX_ ? kAvxBitfield : 0) |
+ (has_AVX2_ ? kAvx2Bitfield : 0);
+}
+
+std::string X86InstructionSetFeatures::GetFeatureString() const {
+ std::string result;
+ if (IsSmp()) {
+ result += "smp";
+ } else {
+ result += "-smp";
+ }
+ if (has_SSSE3_) {
+ result += ",ssse3";
+ } else {
+ result += ",-ssse3";
+ }
+ if (has_SSE4_1_) {
+ result += ",sse4.1";
+ } else {
+ result += ",-sse4.1";
+ }
+ if (has_SSE4_2_) {
+ result += ",sse4.2";
+ } else {
+ result += ",-sse4.2";
+ }
+ if (has_AVX_) {
+ result += ",avx";
+ } else {
+ result += ",-avx";
+ }
+ if (has_AVX2_) {
+ result += ",avx2";
+ } else {
+ result += ",-avx2";
+ }
+ return result;
+}
+
+const InstructionSetFeatures* X86InstructionSetFeatures::AddFeaturesFromSplitString(
+ const bool smp, const std::vector<std::string>& features, bool x86_64,
+ std::string* error_msg) const {
+ bool has_SSSE3 = has_SSSE3_;
+ bool has_SSE4_1 = has_SSE4_1_;
+ bool has_SSE4_2 = has_SSE4_2_;
+ bool has_AVX = has_AVX_;
+ bool has_AVX2 = has_AVX2_;
+ for (auto i = features.begin(); i != features.end(); i++) {
+ std::string feature = Trim(*i);
+ if (feature == "ssse3") {
+ has_SSSE3 = true;
+ } else if (feature == "-ssse3") {
+ has_SSSE3 = false;
+ } else if (feature == "sse4.1") {
+ has_SSE4_1 = true;
+ } else if (feature == "-sse4.1") {
+ has_SSE4_1 = false;
+ } else if (feature == "sse4.2") {
+ has_SSE4_2 = true;
+ } else if (feature == "-sse4.2") {
+ has_SSE4_2 = false;
+ } else if (feature == "avx") {
+ has_AVX = true;
+ } else if (feature == "-avx") {
+ has_AVX = false;
+ } else if (feature == "avx2") {
+ has_AVX2 = true;
+ } else if (feature == "-avx2") {
+ has_AVX2 = false;
+ } else {
+ *error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str());
+ return nullptr;
+ }
+ }
+ if (x86_64) {
+ return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
+ has_AVX2);
+ } else {
+ return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
+ has_AVX2);
+ }
+}
+
+} // namespace art
diff --git a/runtime/arch/x86/instruction_set_features_x86.h b/runtime/arch/x86/instruction_set_features_x86.h
new file mode 100644
index 0000000..926fabb
--- /dev/null
+++ b/runtime/arch/x86/instruction_set_features_x86.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_X86_INSTRUCTION_SET_FEATURES_X86_H_
+#define ART_RUNTIME_ARCH_X86_INSTRUCTION_SET_FEATURES_X86_H_
+
+#include "arch/instruction_set_features.h"
+
+namespace art {
+
+// Instruction set features relevant to the X86 architecture.
+class X86InstructionSetFeatures : public InstructionSetFeatures {
+ public:
+ // Process a CPU variant string like "atom" or "nehalem" and create InstructionSetFeatures.
+ static const X86InstructionSetFeatures* FromVariant(const std::string& variant,
+ std::string* error_msg,
+ bool x86_64 = false);
+
+ // Parse a bitmap and create an InstructionSetFeatures.
+ static const X86InstructionSetFeatures* FromBitmap(uint32_t bitmap, bool x86_64 = false);
+
+ // Turn C pre-processor #defines into the equivalent instruction set features.
+ static const X86InstructionSetFeatures* FromCppDefines(bool x86_64 = false);
+
+ // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
+ static const X86InstructionSetFeatures* FromCpuInfo(bool x86_64 = false);
+
+ // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
+ // InstructionSetFeatures.
+ static const X86InstructionSetFeatures* FromHwcap(bool x86_64 = false);
+
+ // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
+ // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
+ static const X86InstructionSetFeatures* FromAssembly(bool x86_64 = false);
+
+ bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+
+ virtual InstructionSet GetInstructionSet() const OVERRIDE {
+ return kX86;
+ }
+
+ uint32_t AsBitmap() const OVERRIDE;
+
+ std::string GetFeatureString() const OVERRIDE;
+
+ virtual ~X86InstructionSetFeatures() {}
+
+ protected:
+ // Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures.
+ virtual const InstructionSetFeatures*
+ AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
+ std::string* error_msg) const OVERRIDE {
+ return AddFeaturesFromSplitString(smp, features, false, error_msg);
+ }
+
+ const InstructionSetFeatures*
+ AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
+ bool x86_64, std::string* error_msg) const;
+
+ X86InstructionSetFeatures(bool smp, bool has_SSSE3, bool has_SSE4_1, bool has_SSE4_2,
+ bool has_AVX, bool has_AVX2)
+ : InstructionSetFeatures(smp), has_SSSE3_(has_SSSE3), has_SSE4_1_(has_SSE4_1),
+ has_SSE4_2_(has_SSE4_2), has_AVX_(has_AVX), has_AVX2_(has_AVX2) {
+ }
+
+ private:
+ // Bitmap positions for encoding features as a bitmap.
+ enum {
+ kSmpBitfield = 1,
+ kSsse3Bitfield = 2,
+ kSse4_1Bitfield = 4,
+ kSse4_2Bitfield = 8,
+ kAvxBitfield = 16,
+ kAvx2Bitfield = 32,
+ };
+
+ const bool has_SSSE3_; // x86 128bit SIMD - Supplemental SSE.
+ const bool has_SSE4_1_; // x86 128bit SIMD SSE4.1.
+ const bool has_SSE4_2_; // x86 128bit SIMD SSE4.2.
+ const bool has_AVX_; // x86 256bit SIMD AVX.
+ const bool has_AVX2_; // x86 256bit SIMD AVX 2.0.
+
+ DISALLOW_COPY_AND_ASSIGN(X86InstructionSetFeatures);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_X86_INSTRUCTION_SET_FEATURES_X86_H_
diff --git a/runtime/arch/x86/instruction_set_features_x86_test.cc b/runtime/arch/x86/instruction_set_features_x86_test.cc
new file mode 100644
index 0000000..d231beb
--- /dev/null
+++ b/runtime/arch/x86/instruction_set_features_x86_test.cc
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_set_features_x86.h"
+
+#include <gtest/gtest.h>
+
+namespace art {
+
+TEST(X86InstructionSetFeaturesTest, X86FeaturesFromDefaultVariant) {
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> x86_features(
+ InstructionSetFeatures::FromVariant(kX86, "default", &error_msg));
+ ASSERT_TRUE(x86_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(x86_features->GetInstructionSet(), kX86);
+ EXPECT_TRUE(x86_features->Equals(x86_features.get()));
+ EXPECT_STREQ("smp,-ssse3,-sse4.1,-sse4.2,-avx,-avx2", x86_features->GetFeatureString().c_str());
+ EXPECT_EQ(x86_features->AsBitmap(), 1U);
+}
+
+TEST(X86InstructionSetFeaturesTest, X86FeaturesFromAtomVariant) {
+ // Build features for a 32-bit x86 atom processor.
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> x86_features(
+ InstructionSetFeatures::FromVariant(kX86, "atom", &error_msg));
+ ASSERT_TRUE(x86_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(x86_features->GetInstructionSet(), kX86);
+ EXPECT_TRUE(x86_features->Equals(x86_features.get()));
+ EXPECT_STREQ("smp,ssse3,-sse4.1,-sse4.2,-avx,-avx2", x86_features->GetFeatureString().c_str());
+ EXPECT_EQ(x86_features->AsBitmap(), 3U);
+
+ // Build features for a 32-bit x86 default processor.
+ std::unique_ptr<const InstructionSetFeatures> x86_default_features(
+ InstructionSetFeatures::FromVariant(kX86, "default", &error_msg));
+ ASSERT_TRUE(x86_default_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(x86_default_features->GetInstructionSet(), kX86);
+ EXPECT_TRUE(x86_default_features->Equals(x86_default_features.get()));
+ EXPECT_STREQ("smp,-ssse3,-sse4.1,-sse4.2,-avx,-avx2",
+ x86_default_features->GetFeatureString().c_str());
+ EXPECT_EQ(x86_default_features->AsBitmap(), 1U);
+
+ // Build features for a 64-bit x86-64 atom processor.
+ std::unique_ptr<const InstructionSetFeatures> x86_64_features(
+ InstructionSetFeatures::FromVariant(kX86_64, "atom", &error_msg));
+ ASSERT_TRUE(x86_64_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(x86_64_features->GetInstructionSet(), kX86_64);
+ EXPECT_TRUE(x86_64_features->Equals(x86_64_features.get()));
+ EXPECT_STREQ("smp,ssse3,-sse4.1,-sse4.2,-avx,-avx2",
+ x86_64_features->GetFeatureString().c_str());
+ EXPECT_EQ(x86_64_features->AsBitmap(), 3U);
+
+ EXPECT_FALSE(x86_64_features->Equals(x86_features.get()));
+ EXPECT_FALSE(x86_64_features->Equals(x86_default_features.get()));
+ EXPECT_FALSE(x86_features->Equals(x86_default_features.get()));
+}
+
+} // namespace art
diff --git a/runtime/arch/x86/portable_entrypoints_x86.S b/runtime/arch/x86/portable_entrypoints_x86.S
index 70c0ae2..1f0900e 100644
--- a/runtime/arch/x86/portable_entrypoints_x86.S
+++ b/runtime/arch/x86/portable_entrypoints_x86.S
@@ -37,7 +37,7 @@
andl LITERAL(0xFFFFFFF0), %ebx // align frame size to 16 bytes
subl LITERAL(12), %ebx // remove space for return address, ebx, and ebp
subl %ebx, %esp // reserve stack space for argument array
- SETUP_GOT_NOSAVE // reset ebx to GOT table
+ SETUP_GOT_NOSAVE ebx // reset ebx to GOT table
lea 4(%esp), %eax // use stack pointer + method ptr as dest for memcpy
pushl 20(%ebp) // push size of region to memcpy
pushl 16(%ebp) // push arg array as source of memcpy
@@ -46,7 +46,7 @@
addl LITERAL(12), %esp // pop arguments to memcpy
mov 12(%ebp), %eax // move method pointer into eax
mov %eax, (%esp) // push method pointer onto stack
- call *METHOD_PORTABLE_CODE_OFFSET(%eax) // call the method
+ call *MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_32(%eax) // call the method
mov %ebp, %esp // restore stack pointer
POP ebx // pop ebx
POP ebp // pop ebp
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index a158e6d..1ce01c4 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -22,12 +22,21 @@
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kSaveAll)
*/
-MACRO0(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME)
+MACRO2(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME, got_reg, temp_reg)
PUSH edi // Save callee saves (ebx is saved/restored by the upcall)
PUSH esi
PUSH ebp
- subl MACRO_LITERAL(16), %esp // Grow stack by 4 words, bottom word will hold Method*
- CFI_ADJUST_CFA_OFFSET(16)
+ subl MACRO_LITERAL(12), %esp // Grow stack by 3 words.
+ CFI_ADJUST_CFA_OFFSET(12)
+ SETUP_GOT_NOSAVE RAW_VAR(got_reg, 0)
+ // Load Runtime::instance_ from GOT.
+ movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg, 0)), REG_VAR(temp_reg, 1)
+ movl (REG_VAR(temp_reg, 1)), REG_VAR(temp_reg, 1)
+ // Push save all callee-save method.
+ pushl RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg, 1))
+ CFI_ADJUST_CFA_OFFSET(4)
+ // Store esp as the top quick frame.
+ movl %esp, %fs:THREAD_TOP_QUICK_FRAME_OFFSET
// Ugly compile-time check, but we only have the preprocessor.
// Last +4: implicit return address pushed on stack when caller made call.
#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 3*4 + 16 + 4)
@@ -39,12 +48,21 @@
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsOnly)
*/
-MACRO0(SETUP_REF_ONLY_CALLEE_SAVE_FRAME)
+MACRO2(SETUP_REFS_ONLY_CALLEE_SAVE_FRAME, got_reg, temp_reg)
PUSH edi // Save callee saves (ebx is saved/restored by the upcall)
PUSH esi
PUSH ebp
- subl MACRO_LITERAL(16), %esp // Grow stack by 4 words, bottom word will hold Method*
- CFI_ADJUST_CFA_OFFSET(16)
+ subl MACRO_LITERAL(12), %esp // Grow stack by 3 words.
+ CFI_ADJUST_CFA_OFFSET(12)
+ SETUP_GOT_NOSAVE VAR(got_reg, 0)
+ // Load Runtime::instance_ from GOT.
+ movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg, 0)), REG_VAR(temp_reg, 1)
+ movl (REG_VAR(temp_reg, 1)), REG_VAR(temp_reg, 1)
+ // Push save all callee-save method.
+ pushl RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg, 1))
+ CFI_ADJUST_CFA_OFFSET(4)
+ // Store esp as the top quick frame.
+ movl %esp, %fs:THREAD_TOP_QUICK_FRAME_OFFSET
// Ugly compile-time check, but we only have the preprocessor.
// Last +4: implicit return address pushed on stack when caller made call.
@@ -53,7 +71,7 @@
#endif
END_MACRO
-MACRO0(RESTORE_REF_ONLY_CALLEE_SAVE_FRAME)
+MACRO0(RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME)
addl MACRO_LITERAL(16), %esp // Unwind stack up to saved values
CFI_ADJUST_CFA_OFFSET(-16)
POP ebp // Restore callee saves (ebx is saved/restored by the upcall)
@@ -65,14 +83,22 @@
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsAndArgs)
*/
-MACRO0(SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME)
+MACRO2(SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME, got_reg, temp_reg)
PUSH edi // Save callee saves
PUSH esi
PUSH ebp
PUSH ebx // Save args
PUSH edx
PUSH ecx
- PUSH eax // Align stack, eax will be clobbered by Method*
+ SETUP_GOT_NOSAVE VAR(got_reg, 0)
+ // Load Runtime::instance_ from GOT.
+ movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg, 0)), REG_VAR(temp_reg, 1)
+ movl (REG_VAR(temp_reg, 1)), REG_VAR(temp_reg, 1)
+ // Push save all callee-save method.
+ pushl RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg, 1))
+ CFI_ADJUST_CFA_OFFSET(4)
+ // Store esp as the stop quick frame.
+ movl %esp, %fs:THREAD_TOP_QUICK_FRAME_OFFSET
// Ugly compile-time check, but we only have the preprocessor.
// Last +4: implicit return address pushed on stack when caller made call.
@@ -81,7 +107,23 @@
#endif
END_MACRO
-MACRO0(RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME)
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kRefsAndArgs) where the method is passed in EAX.
+ */
+MACRO0(SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_EAX)
+ PUSH edi // Save callee saves
+ PUSH esi
+ PUSH ebp
+ PUSH ebx // Save args
+ PUSH edx
+ PUSH ecx
+ PUSH eax // Store the ArtMethod reference at the bottom of the stack.
+ // Store esp as the stop quick frame.
+ movl %esp, %fs:THREAD_TOP_QUICK_FRAME_OFFSET
+END_MACRO
+
+MACRO0(RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME)
addl MACRO_LITERAL(4), %esp // Remove padding
CFI_ADJUST_CFA_OFFSET(-4)
POP ecx // Restore args except eax
@@ -97,59 +139,54 @@
* exception is Thread::Current()->exception_.
*/
MACRO0(DELIVER_PENDING_EXCEPTION)
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save callee saves for throw
- mov %esp, %ecx
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save callee saves for throw
// Outgoing argument set up
- subl MACRO_LITERAL(8), %esp // Alignment padding
- CFI_ADJUST_CFA_OFFSET(8)
- PUSH ecx // pass SP
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ subl MACRO_LITERAL(12), %esp // Alignment padding
+ CFI_ADJUST_CFA_OFFSET(12)
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- call SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*, SP)
- int3 // unreached
+ call SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*)
+ int3 // unreached
END_MACRO
MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
DEFINE_FUNCTION RAW_VAR(c_name, 0)
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
- mov %esp, %ecx
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save all registers as basis for long jump context
// Outgoing argument set up
- subl MACRO_LITERAL(8), %esp // alignment padding
- CFI_ADJUST_CFA_OFFSET(8)
- PUSH ecx // pass SP
+ subl MACRO_LITERAL(12), %esp // alignment padding
+ CFI_ADJUST_CFA_OFFSET(12)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- call VAR(cxx_name, 1) // cxx_name(Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(Thread*)
int3 // unreached
END_FUNCTION RAW_VAR(c_name, 0)
END_MACRO
MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
DEFINE_FUNCTION RAW_VAR(c_name, 0)
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save all registers as basis for long jump context
mov %esp, %ecx
// Outgoing argument set up
- PUSH eax // alignment padding
- PUSH ecx // pass SP
+ subl MACRO_LITERAL(8), %esp // alignment padding
+ CFI_ADJUST_CFA_OFFSET(8)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass arg1
- call VAR(cxx_name, 1) // cxx_name(arg1, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg1, Thread*)
int3 // unreached
END_FUNCTION RAW_VAR(c_name, 0)
END_MACRO
MACRO2(TWO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
DEFINE_FUNCTION RAW_VAR(c_name, 0)
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
- mov %esp, %edx
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save all registers as basis for long jump context
// Outgoing argument set up
- PUSH edx // pass SP
+ PUSH eax // alignment padding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass arg2
PUSH eax // pass arg1
- call VAR(cxx_name, 1) // cxx_name(arg1, arg2, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg1, arg2, Thread*)
int3 // unreached
END_FUNCTION RAW_VAR(c_name, 0)
END_MACRO
@@ -204,15 +241,7 @@
*/
MACRO2(INVOKE_TRAMPOLINE, c_name, cxx_name)
DEFINE_FUNCTION RAW_VAR(c_name, 0)
- // Set up the callee save frame to conform with Runtime::CreateCalleeSaveMethod(kRefsAndArgs)
- // return address
- PUSH edi
- PUSH esi
- PUSH ebp
- PUSH ebx // Save args
- PUSH edx
- PUSH ecx
- PUSH eax // <-- callee save Method* to go here
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME ebx, ebx
movl %esp, %edx // remember SP
// Outgoing argument set up
subl MACRO_LITERAL(12), %esp // alignment padding
@@ -224,7 +253,7 @@
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass arg2
PUSH eax // pass arg1
- call VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, Thread*, SP)
movl %edx, %edi // save code pointer in EDI
addl MACRO_LITERAL(36), %esp // Pop arguments skip eax
CFI_ADJUST_CFA_OFFSET(-36)
@@ -275,7 +304,7 @@
andl LITERAL(0xFFFFFFF0), %ebx // align frame size to 16 bytes
subl LITERAL(12), %ebx // remove space for return address, ebx, and ebp
subl %ebx, %esp // reserve stack space for argument array
- SETUP_GOT_NOSAVE // clobbers ebx (harmless here)
+ SETUP_GOT_NOSAVE ebx // clobbers ebx (harmless here)
lea 4(%esp), %eax // use stack pointer + method ptr as dest for memcpy
pushl 20(%ebp) // push size of region to memcpy
pushl 16(%ebp) // push arg array as source of memcpy
@@ -287,7 +316,7 @@
mov 4(%esp), %ecx // copy arg1 into ecx
mov 8(%esp), %edx // copy arg2 into edx
mov 12(%esp), %ebx // copy arg3 into ebx
- call *METHOD_QUICK_CODE_OFFSET(%eax) // call the method
+ call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32(%eax) // call the method
mov %ebp, %esp // restore stack pointer
CFI_DEF_CFA_REGISTER(esp)
POP ebx // pop ebx
@@ -311,120 +340,127 @@
MACRO3(NO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION RAW_VAR(c_name, 0)
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
- mov %esp, %edx // remember SP
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- subl MACRO_LITERAL(8), %esp // push padding
- CFI_ADJUST_CFA_OFFSET(8)
- PUSH edx // pass SP
+ subl MACRO_LITERAL(12), %esp // push padding
+ CFI_ADJUST_CFA_OFFSET(12)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- call VAR(cxx_name, 1) // cxx_name(Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(Thread*)
addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION RAW_VAR(c_name, 0)
END_MACRO
MACRO3(ONE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION RAW_VAR(c_name, 0)
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
- mov %esp, %edx // remember SP
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- PUSH eax // push padding
- PUSH edx // pass SP
+ subl MACRO_LITERAL(8), %esp // push padding
+ CFI_ADJUST_CFA_OFFSET(8)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass arg1
- call VAR(cxx_name, 1) // cxx_name(arg1, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg1, Thread*)
addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION RAW_VAR(c_name, 0)
END_MACRO
MACRO3(TWO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION RAW_VAR(c_name, 0)
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
- mov %esp, %edx // remember SP
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- PUSH edx // pass SP
+ PUSH eax // push padding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass arg2
PUSH eax // pass arg1
- call VAR(cxx_name, 1) // cxx_name(arg1, arg2, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg1, arg2, Thread*)
addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION RAW_VAR(c_name, 0)
END_MACRO
MACRO3(THREE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION RAW_VAR(c_name, 0)
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
- mov %esp, %ebx // remember SP
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- subl MACRO_LITERAL(12), %esp // alignment padding
- CFI_ADJUST_CFA_OFFSET(12)
- PUSH ebx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH edx // pass arg3
PUSH ecx // pass arg2
PUSH eax // pass arg1
- call VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, Thread*, SP)
- addl MACRO_LITERAL(32), %esp // pop arguments
- CFI_ADJUST_CFA_OFFSET(-32)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ call VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, Thread*)
+ addl MACRO_LITERAL(16), %esp // pop arguments
+ CFI_ADJUST_CFA_OFFSET(-16)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION RAW_VAR(c_name, 0)
END_MACRO
MACRO3(ONE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION RAW_VAR(c_name, 0)
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
- mov %esp, %edx // remember SP
- mov 32(%esp), %ecx // get referrer
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- PUSH edx // pass SP
+ mov FRAME_SIZE_REFS_ONLY_CALLEE_SAVE(%esp), %ecx // get referrer
+ PUSH eax // push padding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass referrer
PUSH eax // pass arg1
- call VAR(cxx_name, 1) // cxx_name(arg1, referrer, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg1, referrer, Thread*)
addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION RAW_VAR(c_name, 0)
END_MACRO
MACRO3(TWO_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION RAW_VAR(c_name, 0)
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
- mov %esp, %ebx // remember SP
- mov 32(%esp), %edx // get referrer
- subl MACRO_LITERAL(12), %esp // alignment padding
- CFI_ADJUST_CFA_OFFSET(12)
- PUSH ebx // pass SP
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
+ // Outgoing argument set up
+ mov FRAME_SIZE_REFS_ONLY_CALLEE_SAVE(%esp), %edx // get referrer
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- // Outgoing argument set up
PUSH edx // pass referrer
PUSH ecx // pass arg2
PUSH eax // pass arg1
- call VAR(cxx_name, 1) // cxx_name(arg1, arg2, referrer, Thread*, SP)
- addl MACRO_LITERAL(32), %esp // pop arguments
- CFI_ADJUST_CFA_OFFSET(-32)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ call VAR(cxx_name, 1) // cxx_name(arg1, arg2, referrer, Thread*)
+ addl MACRO_LITERAL(16), %esp // pop arguments
+ CFI_ADJUST_CFA_OFFSET(-16)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION RAW_VAR(c_name, 0)
END_MACRO
+MACRO3(THREE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
+ DEFINE_FUNCTION RAW_VAR(c_name, 0)
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
+ // Outgoing argument set up
+ mov FRAME_SIZE_REFS_ONLY_CALLEE_SAVE(%esp), %ebx // get referrer
+ subl MACRO_LITERAL(12), %esp // alignment padding
+ CFI_ADJUST_CFA_OFFSET(12)
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ CFI_ADJUST_CFA_OFFSET(4)
+ PUSH ebx // pass referrer
+ PUSH edx // pass arg3
+ PUSH ecx // pass arg2
+ PUSH eax // pass arg1
+ call VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, referrer, Thread*)
+ addl LITERAL(32), %esp // pop arguments
+ CFI_ADJUST_CFA_OFFSET(-32)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ CALL_MACRO(return_macro, 2) // return or deliver exception
+ END_FUNCTION RAW_VAR(c_name, 0)
+END_MACRO
MACRO0(RETURN_IF_RESULT_IS_NON_ZERO)
testl %eax, %eax // eax == 0 ?
@@ -443,9 +479,8 @@
END_MACRO
MACRO0(RETURN_OR_DELIVER_PENDING_EXCEPTION)
- mov %fs:THREAD_EXCEPTION_OFFSET, %ebx // get exception field
- testl %ebx, %ebx // ebx == 0 ?
- jnz 1f // if ebx != 0 goto 1
+ cmpl MACRO_LITERAL(0),%fs:THREAD_EXCEPTION_OFFSET // exception field == 0 ?
+ jne 1f // if exception field != 0 goto 1
ret // return
1: // deliver exception on current thread
DELIVER_PENDING_EXCEPTION
@@ -566,7 +601,7 @@
testl %eax, %eax // null check object/eax
jz .Lslow_lock
.Lretry_lock:
- movl LOCK_WORD_OFFSET(%eax), %ecx // ecx := lock word
+ movl MIRROR_OBJECT_LOCK_WORD_OFFSET(%eax), %ecx // ecx := lock word
test LITERAL(0xC0000000), %ecx // test the 2 high bits.
jne .Lslow_lock // slow path if either of the two high bits are set.
movl %fs:THREAD_ID_OFFSET, %edx // edx := thread id
@@ -575,11 +610,11 @@
// unlocked case - %edx holds thread id with count of 0
movl %eax, %ecx // remember object in case of retry
xor %eax, %eax // eax == 0 for comparison with lock word in cmpxchg
- lock cmpxchg %edx, LOCK_WORD_OFFSET(%ecx)
+ lock cmpxchg %edx, MIRROR_OBJECT_LOCK_WORD_OFFSET(%ecx)
jnz .Lcmpxchg_fail // cmpxchg failed retry
ret
.Lcmpxchg_fail:
- movl %ecx, %eax // restore eax
+ movl %ecx, %eax // restore eax
jmp .Lretry_lock
.Lalready_thin:
cmpw %cx, %dx // do we hold the lock already?
@@ -587,28 +622,28 @@
addl LITERAL(65536), %ecx // increment recursion count
test LITERAL(0xC0000000), %ecx // overflowed if either of top two bits are set
jne .Lslow_lock // count overflowed so go slow
- movl %ecx, LOCK_WORD_OFFSET(%eax) // update lockword, cmpxchg not necessary as we hold lock
+ // update lockword, cmpxchg not necessary as we hold lock
+ movl %ecx, MIRROR_OBJECT_LOCK_WORD_OFFSET(%eax)
ret
.Lslow_lock:
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
- mov %esp, %edx // remember SP
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- PUSH eax // push padding
- PUSH edx // pass SP
+ subl LITERAL(8), %esp // alignment padding
+ CFI_ADJUST_CFA_OFFSET(8)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass object
- call SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*, SP)
+ call SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_lock_object
DEFINE_FUNCTION art_quick_unlock_object
testl %eax, %eax // null check object/eax
jz .Lslow_unlock
- movl LOCK_WORD_OFFSET(%eax), %ecx // ecx := lock word
+ movl MIRROR_OBJECT_LOCK_WORD_OFFSET(%eax), %ecx // ecx := lock word
movl %fs:THREAD_ID_OFFSET, %edx // edx := thread id
test LITERAL(0xC0000000), %ecx
jnz .Lslow_unlock // lock word contains a monitor
@@ -616,25 +651,24 @@
jne .Lslow_unlock
cmpl LITERAL(65536), %ecx
jae .Lrecursive_thin_unlock
- movl LITERAL(0), LOCK_WORD_OFFSET(%eax)
+ movl LITERAL(0), MIRROR_OBJECT_LOCK_WORD_OFFSET(%eax)
ret
.Lrecursive_thin_unlock:
subl LITERAL(65536), %ecx
- mov %ecx, LOCK_WORD_OFFSET(%eax)
+ mov %ecx, MIRROR_OBJECT_LOCK_WORD_OFFSET(%eax)
ret
.Lslow_unlock:
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
- mov %esp, %edx // remember SP
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- PUSH eax // push padding
- PUSH edx // pass SP
+ subl LITERAL(8), %esp // alignment padding
+ CFI_ADJUST_CFA_OFFSET(8)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass object
- call SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*, SP)
+ call SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_unlock_object
@@ -663,15 +697,14 @@
POP ecx
addl LITERAL(4), %esp
CFI_ADJUST_CFA_OFFSET(-12)
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
- mov %esp, %edx
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save all registers as basis for long jump context
// Outgoing argument set up
- PUSH edx // pass SP
+ PUSH eax // alignment padding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass arg2
PUSH eax // pass arg1
- call SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*, SP)
+ call SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*)
int3 // unreached
END_FUNCTION art_quick_check_cast
@@ -687,7 +720,7 @@
END_FUNCTION art_quick_aput_obj_with_null_and_bound_check
DEFINE_FUNCTION art_quick_aput_obj_with_bound_check
- movl ARRAY_LENGTH_OFFSET(%eax), %ebx
+ movl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ebx
cmpl %ebx, %ecx
jb SYMBOL(art_quick_aput_obj)
mov %ecx, %eax
@@ -698,18 +731,19 @@
DEFINE_FUNCTION art_quick_aput_obj
test %edx, %edx // store of null
jz .Ldo_aput_null
- movl CLASS_OFFSET(%eax), %ebx
- movl CLASS_COMPONENT_TYPE_OFFSET(%ebx), %ebx
- cmpl CLASS_OFFSET(%edx), %ebx // value's type == array's component type - trivial assignability
+ movl MIRROR_OBJECT_CLASS_OFFSET(%eax), %ebx
+ movl MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%ebx), %ebx
+ // value's type == array's component type - trivial assignability
+ cmpl MIRROR_OBJECT_CLASS_OFFSET(%edx), %ebx
jne .Lcheck_assignability
.Ldo_aput:
- movl %edx, OBJECT_ARRAY_DATA_OFFSET(%eax, %ecx, 4)
+ movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%eax, %ecx, 4)
movl %fs:THREAD_CARD_TABLE_OFFSET, %edx
shrl LITERAL(7), %eax
movb %dl, (%edx, %eax)
ret
.Ldo_aput_null:
- movl %edx, OBJECT_ARRAY_DATA_OFFSET(%eax, %ecx, 4)
+ movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%eax, %ecx, 4)
ret
.Lcheck_assignability:
PUSH eax // save arguments
@@ -717,7 +751,7 @@
PUSH edx
subl LITERAL(8), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(8)
- pushl CLASS_OFFSET(%edx) // pass arg2 - type of the value to be stored
+ pushl MIRROR_OBJECT_CLASS_OFFSET(%edx) // pass arg2 - type of the value to be stored
CFI_ADJUST_CFA_OFFSET(4)
PUSH ebx // pass arg1 - component type of the array
call SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b)
@@ -728,7 +762,7 @@
POP edx
POP ecx
POP eax
- movl %edx, OBJECT_ARRAY_DATA_OFFSET(%eax, %ecx, 4) // do the aput
+ movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%eax, %ecx, 4) // do the aput
movl %fs:THREAD_CARD_TABLE_OFFSET, %edx
shrl LITERAL(7), %eax
movb %dl, (%edx, %eax)
@@ -737,20 +771,19 @@
POP edx
POP ecx
POP eax
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
- mov %esp, %ecx
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save all registers as basis for long jump context
// Outgoing argument set up
- PUSH ecx // pass SP
+ PUSH eax // alignment padding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH edx // pass arg2 - value
PUSH eax // pass arg1 - array
- call SYMBOL(artThrowArrayStoreException) // (array, value, Thread*, SP)
+ call SYMBOL(artThrowArrayStoreException) // (array, value, Thread*)
int3 // unreached
END_FUNCTION art_quick_aput_obj
DEFINE_FUNCTION art_quick_memcpy
- SETUP_GOT_NOSAVE // clobbers EBX
+ SETUP_GOT_NOSAVE ebx // clobbers EBX
PUSH edx // pass arg3
PUSH ecx // pass arg2
PUSH eax // pass arg1
@@ -856,236 +889,77 @@
ret
END_FUNCTION art_quick_lushr
-DEFINE_FUNCTION art_quick_set8_instance
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
- mov %esp, %ebx // remember SP
- subl LITERAL(8), %esp // alignment padding
- CFI_ADJUST_CFA_OFFSET(8)
- PUSH ebx // pass SP
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- CFI_ADJUST_CFA_OFFSET(4)
- mov 32(%ebx), %ebx // get referrer
- PUSH ebx // pass referrer
- PUSH edx // pass new_val
- PUSH ecx // pass object
- PUSH eax // pass field_idx
- call PLT_SYMBOL(artSet8InstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*, SP)
- addl LITERAL(32), %esp // pop arguments
- CFI_ADJUST_CFA_OFFSET(-32)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- RETURN_IF_EAX_ZERO // return or deliver exception
-END_FUNCTION art_quick_set8_instance
+ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-DEFINE_FUNCTION art_quick_set16_instance
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
- mov %esp, %ebx // remember SP
- subl LITERAL(8), %esp // alignment padding
- CFI_ADJUST_CFA_OFFSET(8)
- PUSH ebx // pass SP
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- CFI_ADJUST_CFA_OFFSET(4)
- mov 32(%ebx), %ebx // get referrer
- PUSH ebx // pass referrer
- PUSH edx // pass new_val
- PUSH ecx // pass object
- PUSH eax // pass field_idx
- call PLT_SYMBOL(artSet16InstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*, SP)
- addl LITERAL(32), %esp // pop arguments
- CFI_ADJUST_CFA_OFFSET(-32)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- RETURN_IF_EAX_ZERO // return or deliver exception
-END_FUNCTION art_quick_set16_instance
+TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-DEFINE_FUNCTION art_quick_set32_instance
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
- mov %esp, %ebx // remember SP
- subl LITERAL(8), %esp // alignment padding
- CFI_ADJUST_CFA_OFFSET(8)
- PUSH ebx // pass SP
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- CFI_ADJUST_CFA_OFFSET(4)
- mov 32(%ebx), %ebx // get referrer
- PUSH ebx // pass referrer
- PUSH edx // pass new_val
- PUSH ecx // pass object
- PUSH eax // pass field_idx
- call SYMBOL(artSet32InstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*, SP)
- addl LITERAL(32), %esp // pop arguments
- CFI_ADJUST_CFA_OFFSET(-32)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- RETURN_IF_EAX_ZERO // return or deliver exception
-END_FUNCTION art_quick_set32_instance
+TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCode, RETURN_IF_EAX_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCode, RETURN_IF_EAX_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_EAX_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_EAX_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_EAX_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_EAX_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_EAX_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_EAX_ZERO
+
+// Call artSet64InstanceFromCode with 4 word size arguments and the referrer.
DEFINE_FUNCTION art_quick_set64_instance
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ movd %ebx, %xmm0
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
+ movd %xmm0, %ebx
+ // Outgoing argument set up
subl LITERAL(8), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(8)
- PUSH esp // pass SP-8
- addl LITERAL(8), (%esp) // fix SP on stack by adding 8
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
+ pushl (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE+12)(%esp) // pass referrer
+ CFI_ADJUST_CFA_OFFSET(4)
PUSH ebx // pass high half of new_val
PUSH edx // pass low half of new_val
PUSH ecx // pass object
PUSH eax // pass field_idx
- call SYMBOL(artSet64InstanceFromCode) // (field_idx, Object*, new_val, Thread*, SP)
+ call SYMBOL(artSet64InstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*)
addl LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO // return or deliver exception
END_FUNCTION art_quick_set64_instance
-DEFINE_FUNCTION art_quick_set_obj_instance
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
- mov %esp, %ebx // remember SP
- subl LITERAL(8), %esp // alignment padding
- CFI_ADJUST_CFA_OFFSET(8)
- PUSH ebx // pass SP
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- CFI_ADJUST_CFA_OFFSET(4)
- mov 32(%ebx), %ebx // get referrer
- PUSH ebx // pass referrer
- PUSH edx // pass new_val
- PUSH ecx // pass object
- PUSH eax // pass field_idx
- call SYMBOL(artSetObjInstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*, SP)
- addl LITERAL(32), %esp // pop arguments
- CFI_ADJUST_CFA_OFFSET(-32)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- RETURN_IF_EAX_ZERO // return or deliver exception
-END_FUNCTION art_quick_set_obj_instance
-
-TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-
-DEFINE_FUNCTION art_quick_get64_instance
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
- mov %esp, %ebx // remember SP
- mov 32(%esp), %edx // get referrer
- subl LITERAL(12), %esp // alignment padding
- CFI_ADJUST_CFA_OFFSET(12)
- PUSH ebx // pass SP
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- CFI_ADJUST_CFA_OFFSET(4)
- PUSH edx // pass referrer
- PUSH ecx // pass object
- PUSH eax // pass field_idx
- call SYMBOL(artGet64InstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP)
- addl LITERAL(32), %esp // pop arguments
- CFI_ADJUST_CFA_OFFSET(-32)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
-END_FUNCTION art_quick_get64_instance
-
-DEFINE_FUNCTION art_quick_set8_static
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
- mov %esp, %ebx // remember SP
- mov 32(%esp), %edx // get referrer
- subl LITERAL(12), %esp // alignment padding
- CFI_ADJUST_CFA_OFFSET(12)
- PUSH ebx // pass SP
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- CFI_ADJUST_CFA_OFFSET(4)
- PUSH edx // pass referrer
- PUSH ecx // pass new_val
- PUSH eax // pass field_idx
- call SYMBOL(artSet8StaticFromCode) // (field_idx, new_val, referrer, Thread*, SP)
- addl LITERAL(32), %esp // pop arguments
- CFI_ADJUST_CFA_OFFSET(-32)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- RETURN_IF_EAX_ZERO // return or deliver exception
-END_FUNCTION art_quick_set8_static
-
-DEFINE_FUNCTION art_quick_set16_static
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
- mov %esp, %ebx // remember SP
- mov 32(%esp), %edx // get referrer
- subl LITERAL(12), %esp // alignment padding
- CFI_ADJUST_CFA_OFFSET(12)
- PUSH ebx // pass SP
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- CFI_ADJUST_CFA_OFFSET(4)
- PUSH edx // pass referrer
- PUSH ecx // pass new_val
- PUSH eax // pass field_idx
- call SYMBOL(artSet16StaticFromCode) // (field_idx, new_val, referrer, Thread*, SP)
- addl LITERAL(32), %esp // pop arguments
- CFI_ADJUST_CFA_OFFSET(-32)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- RETURN_IF_EAX_ZERO // return or deliver exception
-END_FUNCTION art_quick_set16_static
-
-DEFINE_FUNCTION art_quick_set32_static
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
- mov %esp, %ebx // remember SP
- mov 32(%esp), %edx // get referrer
- subl LITERAL(12), %esp // alignment padding
- CFI_ADJUST_CFA_OFFSET(12)
- PUSH ebx // pass SP
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- CFI_ADJUST_CFA_OFFSET(4)
- PUSH edx // pass referrer
- PUSH ecx // pass new_val
- PUSH eax // pass field_idx
- call SYMBOL(artSet32StaticFromCode) // (field_idx, new_val, referrer, Thread*, SP)
- addl LITERAL(32), %esp // pop arguments
- CFI_ADJUST_CFA_OFFSET(-32)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- RETURN_IF_EAX_ZERO // return or deliver exception
-END_FUNCTION art_quick_set32_static
-
+// Call artSet64StaticFromCode with 3 word size arguments plus with the referrer in the 2nd position
+// so that new_val is aligned on even registers were we passing arguments in registers.
DEFINE_FUNCTION art_quick_set64_static
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
- mov %esp, %ebx // remember SP
- subl LITERAL(8), %esp // alignment padding
- CFI_ADJUST_CFA_OFFSET(8)
- PUSH ebx // pass SP
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
+ mov FRAME_SIZE_REFS_ONLY_CALLEE_SAVE(%esp), %ebx // get referrer
+ subl LITERAL(12), %esp // alignment padding
+ CFI_ADJUST_CFA_OFFSET(12)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- mov 32(%ebx), %ebx // get referrer
PUSH edx // pass high half of new_val
PUSH ecx // pass low half of new_val
PUSH ebx // pass referrer
PUSH eax // pass field_idx
- call SYMBOL(artSet64StaticFromCode) // (field_idx, referrer, new_val, Thread*, SP)
+ call SYMBOL(artSet64StaticFromCode) // (field_idx, referrer, new_val, Thread*)
addl LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO // return or deliver exception
END_FUNCTION art_quick_set64_static
-DEFINE_FUNCTION art_quick_set_obj_static
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
- mov %esp, %ebx // remember SP
- mov 32(%esp), %edx // get referrer
- subl LITERAL(12), %esp // alignment padding
- CFI_ADJUST_CFA_OFFSET(12)
- PUSH ebx // pass SP
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- CFI_ADJUST_CFA_OFFSET(4)
- PUSH edx // pass referrer
- PUSH ecx // pass new_val
- PUSH eax // pass field_idx
- call SYMBOL(artSetObjStaticFromCode) // (field_idx, new_val, referrer, Thread*, SP)
- addl LITERAL(32), %esp // pop arguments
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- RETURN_IF_EAX_ZERO // return or deliver exception
-END_FUNCTION art_quick_set_obj_static
-
-ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-
DEFINE_FUNCTION art_quick_proxy_invoke_handler
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save frame and Method*
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_EAX
PUSH esp // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
@@ -1107,15 +981,15 @@
DEFINE_FUNCTION art_quick_imt_conflict_trampoline
PUSH ecx
movl 8(%esp), %eax // load caller Method*
- movl METHOD_DEX_CACHE_METHODS_OFFSET(%eax), %eax // load dex_cache_resolved_methods
+ movl MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET(%eax), %eax // load dex_cache_resolved_methods
movd %xmm0, %ecx // get target method index stored in xmm0
- movl OBJECT_ARRAY_DATA_OFFSET(%eax, %ecx, 4), %eax // load the target method
+ movl MIRROR_OBJECT_ARRAY_DATA_OFFSET(%eax, %ecx, 4), %eax // load the target method
POP ecx
jmp SYMBOL(art_quick_invoke_interface_trampoline)
END_FUNCTION art_quick_imt_conflict_trampoline
DEFINE_FUNCTION art_quick_resolution_trampoline
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME ebx, ebx
movl %esp, %edi
PUSH EDI // pass SP. do not just PUSH ESP; that messes up unwinding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
@@ -1136,14 +1010,12 @@
xchgl 0(%esp),%edi // restore EDI and place code pointer as only value on stack
ret // tail call into method
1:
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
DELIVER_PENDING_EXCEPTION
END_FUNCTION art_quick_resolution_trampoline
DEFINE_FUNCTION art_quick_generic_jni_trampoline
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
- // This also stores the native ArtMethod reference at the bottom of the stack.
-
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_EAX
movl %esp, %ebp // save SP at callee-save frame
CFI_DEF_CFA_REGISTER(ebp)
subl LITERAL(5120), %esp
@@ -1151,7 +1023,6 @@
// (Thread*, SP)
// (esp) 4(esp) <= C calling convention
// fs:... ebp <= where they are
- // Also: PLT, so need GOT in ebx.
subl LITERAL(8), %esp // Padding for 16B alignment.
pushl %ebp // Pass SP (to ArtMethod).
@@ -1216,12 +1087,12 @@
movl %ebp, %esp
CFI_DEF_CFA_REGISTER(esp)
.Lexception_in_native:
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
DELIVER_PENDING_EXCEPTION
END_FUNCTION art_quick_generic_jni_trampoline
DEFINE_FUNCTION art_quick_to_interpreter_bridge
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save frame
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME ebx, ebx // save frame
mov %esp, %edx // remember SP
PUSH eax // alignment padding
PUSH edx // pass SP
@@ -1234,7 +1105,7 @@
punpckldq %xmm1, %xmm0
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
END_FUNCTION art_quick_to_interpreter_bridge
@@ -1242,26 +1113,23 @@
* Routine that intercepts method calls and returns.
*/
DEFINE_FUNCTION art_quick_instrumentation_entry
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
- movl %esp, %edx // Save SP.
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME ebx, edx
PUSH eax // Save eax which will be clobbered by the callee-save method.
- subl LITERAL(8), %esp // Align stack.
- CFI_ADJUST_CFA_OFFSET(8)
- pushl 40(%esp) // Pass LR.
+ subl LITERAL(12), %esp // Align stack.
+ CFI_ADJUST_CFA_OFFSET(12)
+ pushl FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE-4+16(%esp) // Pass LR.
CFI_ADJUST_CFA_OFFSET(4)
- PUSH edx // Pass SP.
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // Pass receiver.
PUSH eax // Pass Method*.
- call SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Object*, Thread*, SP, LR)
- SETUP_GOT_NOSAVE
+ call SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Object*, Thread*, LR)
addl LITERAL(28), %esp // Pop arguments upto saved Method*.
movl 28(%esp), %edi // Restore edi.
movl %eax, 28(%esp) // Place code* over edi, just under return pc.
movl SYMBOL(art_quick_instrumentation_exit)@GOT(%ebx), %ebx
+ // Place instrumentation exit as return pc. ebx holds the GOT computed on entry.
movl %ebx, 32(%esp)
- // Place instrumentation exit as return pc.
movl (%esp), %eax // Restore eax.
movl 8(%esp), %ecx // Restore ecx.
movl 12(%esp), %edx // Restore edx.
@@ -1274,7 +1142,7 @@
DEFINE_FUNCTION art_quick_instrumentation_exit
pushl LITERAL(0) // Push a fake return PC as there will be none on the stack.
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx
mov %esp, %ecx // Remember SP
subl LITERAL(8), %esp // Save float return value.
CFI_ADJUST_CFA_OFFSET(8)
@@ -1300,7 +1168,7 @@
movq (%esp), %xmm0 // Restore fpr return value.
addl LITERAL(8), %esp
CFI_ADJUST_CFA_OFFSET(-8)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
addl LITERAL(4), %esp // Remove fake return pc.
jmp *%ecx // Return.
END_FUNCTION art_quick_instrumentation_exit
@@ -1311,14 +1179,12 @@
*/
DEFINE_FUNCTION art_quick_deoptimize
pushl %ebx // Fake that we were called.
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
- mov %esp, %ecx // Remember SP.
- subl LITERAL(8), %esp // Align stack.
- CFI_ADJUST_CFA_OFFSET(8)
- PUSH ecx // Pass SP.
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx
+ subl LITERAL(12), %esp // Align stack.
+ CFI_ADJUST_CFA_OFFSET(12)
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
CFI_ADJUST_CFA_OFFSET(4)
- call SYMBOL(artDeoptimize) // artDeoptimize(Thread*, SP)
+ call SYMBOL(artDeoptimize) // artDeoptimize(Thread*)
int3 // Unreachable.
END_FUNCTION art_quick_deoptimize
@@ -1332,15 +1198,15 @@
DEFINE_FUNCTION art_quick_string_compareto
PUSH esi // push callee save reg
PUSH edi // push callee save reg
- mov STRING_COUNT_OFFSET(%eax), %edx
- mov STRING_COUNT_OFFSET(%ecx), %ebx
- mov STRING_VALUE_OFFSET(%eax), %esi
- mov STRING_VALUE_OFFSET(%ecx), %edi
- mov STRING_OFFSET_OFFSET(%eax), %eax
- mov STRING_OFFSET_OFFSET(%ecx), %ecx
+ mov MIRROR_STRING_COUNT_OFFSET(%eax), %edx
+ mov MIRROR_STRING_COUNT_OFFSET(%ecx), %ebx
+ mov MIRROR_STRING_VALUE_OFFSET(%eax), %esi
+ mov MIRROR_STRING_VALUE_OFFSET(%ecx), %edi
+ mov MIRROR_STRING_OFFSET_OFFSET(%eax), %eax
+ mov MIRROR_STRING_OFFSET_OFFSET(%ecx), %ecx
/* Build pointers to the start of string data */
- lea STRING_DATA_OFFSET(%esi, %eax, 2), %esi
- lea STRING_DATA_OFFSET(%edi, %ecx, 2), %edi
+ lea MIRROR_CHAR_ARRAY_DATA_OFFSET(%esi, %eax, 2), %esi
+ lea MIRROR_CHAR_ARRAY_DATA_OFFSET(%edi, %ecx, 2), %edi
/* Calculate min length and count diff */
mov %edx, %ecx
mov %edx, %eax
@@ -1375,7 +1241,7 @@
// eax: address of jmp_buf in TLS
DEFINE_FUNCTION art_nested_signal_return
- SETUP_GOT_NOSAVE // sets %ebx for call into PLT
+ SETUP_GOT_NOSAVE ebx // sets %ebx for call into PLT
movl LITERAL(1), %ecx
pushl %ecx // second arg to longjmp (1)
pushl %eax // first arg to longjmp (jmp_buf)
diff --git a/runtime/arch/x86_64/asm_support_x86_64.h b/runtime/arch/x86_64/asm_support_x86_64.h
index 40958dc..eddd172 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.h
+++ b/runtime/arch/x86_64/asm_support_x86_64.h
@@ -19,30 +19,8 @@
#include "asm_support.h"
-// Note: these callee save methods loads require read barriers.
-// Offset of field Runtime::callee_save_methods_[kSaveAll]
-#define RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET 0
-// Offset of field Runtime::callee_save_methods_[kRefsOnly]
-#define RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET 8
-// Offset of field Runtime::callee_save_methods_[kRefsAndArgs]
-#define RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET 16
-
-// Offset of field Thread::self_ verified in InitCpu
-#define THREAD_SELF_OFFSET 192
-// Offset of field Thread::card_table_ verified in InitCpu
-#define THREAD_CARD_TABLE_OFFSET 120
-// Offset of field Thread::exception_ verified in InitCpu
-#define THREAD_EXCEPTION_OFFSET 128
-// Offset of field Thread::thin_lock_thread_id_ verified in InitCpu
-#define THREAD_ID_OFFSET 12
-
#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 64 + 4*8
#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 64 + 4*8
#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 176 + 4*8
-// Expected size of a heap reference
-#define HEAP_REFERENCE_SIZE 4
-// Expected size of a stack reference
-#define STACK_REFERENCE_SIZE 4
-
#endif // ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_H_
diff --git a/runtime/arch/x86_64/context_x86_64.cc b/runtime/arch/x86_64/context_x86_64.cc
index 79d0666..6e9b99c 100644
--- a/runtime/arch/x86_64/context_x86_64.cc
+++ b/runtime/arch/x86_64/context_x86_64.cc
@@ -17,9 +17,8 @@
#include "context_x86_64.h"
#include "mirror/art_method-inl.h"
-#include "mirror/object-inl.h"
#include "quick/quick_method_frame_info.h"
-#include "stack.h"
+#include "utils.h"
namespace art {
namespace x86_64 {
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index be73594..a2766f7 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -18,6 +18,7 @@
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/portable/portable_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
+#include "entrypoints/quick/quick_default_externs.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/math_entrypoints.h"
#include "entrypoints/runtime_asm_entrypoints.h"
@@ -25,100 +26,14 @@
namespace art {
-// Portable entrypoints.
-extern "C" void art_portable_resolution_trampoline(mirror::ArtMethod*);
-extern "C" void art_portable_to_interpreter_bridge(mirror::ArtMethod*);
-
// Cast entrypoints.
extern "C" uint32_t art_quick_assignable_from_code(const mirror::Class* klass,
- const mirror::Class* ref_class);
-extern "C" void art_quick_check_cast(void*, void*);
-
-// DexCache entrypoints.
-extern "C" void* art_quick_initialize_static_storage(uint32_t, void*);
-extern "C" void* art_quick_initialize_type(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*);
-extern "C" void* art_quick_resolve_string(void*, uint32_t);
-
-// Field entrypoints.
-extern "C" int art_quick_set8_instance(uint32_t, void*, int8_t);
-extern "C" int art_quick_set8_static(uint32_t, int8_t);
-extern "C" int art_quick_set16_instance(uint32_t, void*, int16_t);
-extern "C" int art_quick_set16_static(uint32_t, int16_t);
-extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t);
-extern "C" int art_quick_set32_static(uint32_t, int32_t);
-extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t);
-extern "C" int art_quick_set64_static(uint32_t, int64_t);
-extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*);
-extern "C" int art_quick_set_obj_static(uint32_t, void*);
-extern "C" int8_t art_quick_get_byte_instance(uint32_t, void*);
-extern "C" uint8_t art_quick_get_boolean_instance(uint32_t, void*);
-extern "C" int8_t art_quick_get_byte_static(uint32_t);
-extern "C" uint8_t art_quick_get_boolean_static(uint32_t);
-extern "C" int16_t art_quick_get_short_instance(uint32_t, void*);
-extern "C" uint16_t art_quick_get_char_instance(uint32_t, void*);
-extern "C" int16_t art_quick_get_short_static(uint32_t);
-extern "C" uint16_t art_quick_get_char_static(uint32_t);
-extern "C" int32_t art_quick_get32_instance(uint32_t, void*);
-extern "C" int32_t art_quick_get32_static(uint32_t);
-extern "C" int64_t art_quick_get64_instance(uint32_t, void*);
-extern "C" int64_t art_quick_get64_static(uint32_t);
-extern "C" void* art_quick_get_obj_instance(uint32_t, void*);
-extern "C" void* art_quick_get_obj_static(uint32_t);
-
-// Array entrypoints.
-extern "C" void art_quick_aput_obj_with_null_and_bound_check(void*, uint32_t, void*);
-extern "C" void art_quick_aput_obj_with_bound_check(void*, uint32_t, void*);
-extern "C" void art_quick_aput_obj(void*, uint32_t, void*);
-extern "C" void art_quick_handle_fill_data(void*, void*);
-
-// Lock entrypoints.
-extern "C" void art_quick_lock_object(void*);
-extern "C" void art_quick_unlock_object(void*);
-
-// Math entrypoints.
-extern "C" int64_t art_quick_d2l(double);
-extern "C" int64_t art_quick_f2l(float);
-extern "C" int64_t art_quick_ldiv(int64_t, int64_t);
-extern "C" int64_t art_quick_lmod(int64_t, int64_t);
-extern "C" int64_t art_quick_lmul(int64_t, int64_t);
-extern "C" uint64_t art_quick_lshl(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_lshr(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_lushr(uint64_t, uint32_t);
-
-// Intrinsic entrypoints.
-extern "C" int32_t art_quick_string_compareto(void*, void*);
-extern "C" void* art_quick_memcpy(void*, const void*, size_t);
-
-// Invoke entrypoints.
-extern "C" void art_quick_imt_conflict_trampoline(mirror::ArtMethod*);
-extern "C" void art_quick_resolution_trampoline(mirror::ArtMethod*);
-extern "C" void art_quick_to_interpreter_bridge(mirror::ArtMethod*);
-extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
-
-// Thread entrypoints.
-extern "C" void art_quick_test_suspend();
-
-// Throw entrypoints.
-extern "C" void art_quick_deliver_exception(void*);
-extern "C" void art_quick_throw_array_bounds(int32_t index, int32_t limit);
-extern "C" void art_quick_throw_div_zero();
-extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
-extern "C" void art_quick_throw_null_pointer_exception();
-extern "C" void art_quick_throw_stack_overflow(void*);
-
-// Generic JNI entrypoint
-extern "C" void art_quick_generic_jni_trampoline(mirror::ArtMethod*);
-
-extern void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints);
+ const mirror::Class* ref_class);
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
#if defined(__APPLE__)
+ UNUSED(ipoints, jpoints, ppoints, qpoints);
UNIMPLEMENTED(FATAL);
#else
// Interpreter
diff --git a/runtime/arch/x86_64/instruction_set_features_x86_64.h b/runtime/arch/x86_64/instruction_set_features_x86_64.h
new file mode 100644
index 0000000..3280177
--- /dev/null
+++ b/runtime/arch/x86_64/instruction_set_features_x86_64.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_X86_64_INSTRUCTION_SET_FEATURES_X86_64_H_
+#define ART_RUNTIME_ARCH_X86_64_INSTRUCTION_SET_FEATURES_X86_64_H_
+
+#include "arch/x86/instruction_set_features_x86.h"
+
+namespace art {
+
+// Instruction set features relevant to the X86_64 architecture.
+class X86_64InstructionSetFeatures FINAL : public X86InstructionSetFeatures {
+ public:
+ // Process a CPU variant string like "atom" or "nehalem" and create InstructionSetFeatures.
+ static const X86_64InstructionSetFeatures* FromVariant(const std::string& variant,
+ std::string* error_msg) {
+ return X86InstructionSetFeatures::FromVariant(variant, error_msg, true)
+ ->AsX86_64InstructionSetFeatures();
+ }
+
+ // Parse a bitmap and create an InstructionSetFeatures.
+ static const X86_64InstructionSetFeatures* FromBitmap(uint32_t bitmap) {
+ return X86InstructionSetFeatures::FromBitmap(bitmap, true)->AsX86_64InstructionSetFeatures();
+ }
+
+ // Turn C pre-processor #defines into the equivalent instruction set features.
+ static const X86_64InstructionSetFeatures* FromCppDefines() {
+ return X86InstructionSetFeatures::FromCppDefines(true)->AsX86_64InstructionSetFeatures();
+ }
+
+ // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
+ static const X86_64InstructionSetFeatures* FromCpuInfo() {
+ return X86InstructionSetFeatures::FromCpuInfo(true)->AsX86_64InstructionSetFeatures();
+ }
+
+ // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
+ // InstructionSetFeatures.
+ static const X86_64InstructionSetFeatures* FromHwcap() {
+ return X86InstructionSetFeatures::FromHwcap(true)->AsX86_64InstructionSetFeatures();
+ }
+
+ // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
+ // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
+ static const X86_64InstructionSetFeatures* FromAssembly() {
+ return X86InstructionSetFeatures::FromAssembly(true)->AsX86_64InstructionSetFeatures();
+ }
+
+ InstructionSet GetInstructionSet() const OVERRIDE {
+ return kX86_64;
+ }
+
+ virtual ~X86_64InstructionSetFeatures() {}
+
+ protected:
+ // Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures.
+ const InstructionSetFeatures*
+ AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
+ std::string* error_msg) const OVERRIDE {
+ return X86InstructionSetFeatures::AddFeaturesFromSplitString(smp, features, true, error_msg);
+ }
+
+ private:
+ X86_64InstructionSetFeatures(bool smp, bool has_SSSE3, bool has_SSE4_1, bool has_SSE4_2,
+ bool has_AVX, bool has_AVX2)
+ : X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2) {
+ }
+
+ friend class X86InstructionSetFeatures;
+
+ DISALLOW_COPY_AND_ASSIGN(X86_64InstructionSetFeatures);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_X86_64_INSTRUCTION_SET_FEATURES_X86_64_H_
diff --git a/runtime/arch/x86_64/instruction_set_features_x86_64_test.cc b/runtime/arch/x86_64/instruction_set_features_x86_64_test.cc
new file mode 100644
index 0000000..5171080
--- /dev/null
+++ b/runtime/arch/x86_64/instruction_set_features_x86_64_test.cc
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_set_features_x86_64.h"
+
+#include <gtest/gtest.h>
+
+namespace art {
+
+TEST(X86_64InstructionSetFeaturesTest, X86Features) {
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> x86_64_features(
+ InstructionSetFeatures::FromVariant(kX86_64, "default", &error_msg));
+ ASSERT_TRUE(x86_64_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(x86_64_features->GetInstructionSet(), kX86_64);
+ EXPECT_TRUE(x86_64_features->Equals(x86_64_features.get()));
+ EXPECT_STREQ("smp,-ssse3,-sse4.1,-sse4.2,-avx,-avx2",
+ x86_64_features->GetFeatureString().c_str());
+ EXPECT_EQ(x86_64_features->AsBitmap(), 1U);
+}
+
+} // namespace art
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 648a99a..a80e7d2 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -57,25 +57,25 @@
PUSH r12 // Callee save.
PUSH rbp // Callee save.
PUSH rbx // Callee save.
- // Create space for FPR args, plus padding for alignment
- subq LITERAL(4 * 8), %rsp
- CFI_ADJUST_CFA_OFFSET(4 * 8)
+ // Create space for FPR args, plus space for StackReference<ArtMethod>.
+ subq MACRO_LITERAL(4 * 8 + 8), %rsp
+ CFI_ADJUST_CFA_OFFSET(4 * 8 + 8)
// Save FPRs.
- movq %xmm12, 0(%rsp)
- movq %xmm13, 8(%rsp)
- movq %xmm14, 16(%rsp)
- movq %xmm15, 24(%rsp)
- subq MACRO_LITERAL(8), %rsp // Space for Method* (also aligns the frame).
- CFI_ADJUST_CFA_OFFSET(8)
+ movq %xmm12, 8(%rsp)
+ movq %xmm13, 16(%rsp)
+ movq %xmm14, 24(%rsp)
+ movq %xmm15, 32(%rsp)
// R10 := ArtMethod* for save all callee save frame method.
THIS_LOAD_REQUIRES_READ_BARRIER
movq RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10
// Store ArtMethod* to bottom of stack.
movq %r10, 0(%rsp)
+ // Store rsp as the top quick frame.
+ movq %rsp, %gs:THREAD_TOP_QUICK_FRAME_OFFSET
// Ugly compile-time check, but we only have the preprocessor.
// Last +8: implicit return address pushed on stack when caller made call.
-#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 6*8 + 4*8 + 8 + 8)
+#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 6 * 8 + 4 * 8 + 8 + 8)
#error "SAVE_ALL_CALLEE_SAVE_FRAME(X86_64) size not as expected."
#endif
#endif // __APPLE__
@@ -85,7 +85,7 @@
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsOnly)
*/
-MACRO0(SETUP_REF_ONLY_CALLEE_SAVE_FRAME)
+MACRO0(SETUP_REFS_ONLY_CALLEE_SAVE_FRAME)
#if defined(__APPLE__)
int3
int3
@@ -100,9 +100,9 @@
PUSH r12 // Callee save.
PUSH rbp // Callee save.
PUSH rbx // Callee save.
- // Create space for FPR args, plus padding for alignment
- subq LITERAL(8 + 4*8), %rsp
- CFI_ADJUST_CFA_OFFSET(8 + 4*8)
+ // Create space for FPR args, plus space for StackReference<ArtMethod>.
+ subq LITERAL(8 + 4 * 8), %rsp
+ CFI_ADJUST_CFA_OFFSET(8 + 4 * 8)
// Save FPRs.
movq %xmm12, 8(%rsp)
movq %xmm13, 16(%rsp)
@@ -113,16 +113,18 @@
movq RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10
// Store ArtMethod* to bottom of stack.
movq %r10, 0(%rsp)
+ // Store rsp as the stop quick frame.
+ movq %rsp, %gs:THREAD_TOP_QUICK_FRAME_OFFSET
// Ugly compile-time check, but we only have the preprocessor.
// Last +8: implicit return address pushed on stack when caller made call.
-#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 6*8 + 4*8 + 8 + 8)
+#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 6 * 8 + 4 * 8 + 8 + 8)
#error "REFS_ONLY_CALLEE_SAVE_FRAME(X86_64) size not as expected."
#endif
#endif // __APPLE__
END_MACRO
-MACRO0(RESTORE_REF_ONLY_CALLEE_SAVE_FRAME)
+MACRO0(RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME)
movq 8(%rsp), %xmm12
movq 16(%rsp), %xmm13
movq 24(%rsp), %xmm14
@@ -142,7 +144,7 @@
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsAndArgs)
*/
-MACRO0(SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME)
+MACRO0(SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME)
#if defined(__APPLE__)
int3
int3
@@ -162,12 +164,13 @@
PUSH rbx // Callee save.
PUSH rdx // Quick arg 2.
PUSH rcx // Quick arg 3.
- // Create space for FPR args and create 2 slots, 1 of padding and 1 for the ArtMethod*.
+ // Create space for FPR args and create 2 slots, 1 of padding and 1 for the
+ // StackReference<ArtMethod>.
subq MACRO_LITERAL(80 + 4 * 8), %rsp
CFI_ADJUST_CFA_OFFSET(80 + 4 * 8)
// R10 := ArtMethod* for ref and args callee save frame method.
THIS_LOAD_REQUIRES_READ_BARRIER
- movq RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10
+ movq RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10
// Save FPRs.
movq %xmm0, 16(%rsp)
movq %xmm1, 24(%rsp)
@@ -183,16 +186,54 @@
movq %xmm15, 104(%rsp)
// Store ArtMethod* to bottom of stack.
movq %r10, 0(%rsp)
+ // Store rsp as the top quick frame.
+ movq %rsp, %gs:THREAD_TOP_QUICK_FRAME_OFFSET
// Ugly compile-time check, but we only have the preprocessor.
// Last +8: implicit return address pushed on stack when caller made call.
-#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 11*8 + 4*8 + 80 + 8)
+#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 11 * 8 + 4 * 8 + 80 + 8)
#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(X86_64) size not as expected."
#endif
#endif // __APPLE__
END_MACRO
-MACRO0(RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME)
+MACRO0(SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_RDI)
+ // Save callee and GPR args, mixed together to agree with core spills bitmap.
+ PUSH r15 // Callee save.
+ PUSH r14 // Callee save.
+ PUSH r13 // Callee save.
+ PUSH r12 // Callee save.
+ PUSH r9 // Quick arg 5.
+ PUSH r8 // Quick arg 4.
+ PUSH rsi // Quick arg 1.
+ PUSH rbp // Callee save.
+ PUSH rbx // Callee save.
+ PUSH rdx // Quick arg 2.
+ PUSH rcx // Quick arg 3.
+ // Create space for FPR args and create 2 slots, 1 of padding and 1 for the
+ // StackReference<ArtMethod>.
+ subq LITERAL(80 + 4 * 8), %rsp
+ CFI_ADJUST_CFA_OFFSET(80 + 4 * 8)
+ // Save FPRs.
+ movq %xmm0, 16(%rsp)
+ movq %xmm1, 24(%rsp)
+ movq %xmm2, 32(%rsp)
+ movq %xmm3, 40(%rsp)
+ movq %xmm4, 48(%rsp)
+ movq %xmm5, 56(%rsp)
+ movq %xmm6, 64(%rsp)
+ movq %xmm7, 72(%rsp)
+ movq %xmm12, 80(%rsp)
+ movq %xmm13, 88(%rsp)
+ movq %xmm14, 96(%rsp)
+ movq %xmm15, 104(%rsp)
+ // Store ArtMethod to bottom of stack.
+ movq %rdi, 0(%rsp)
+ // Store rsp as the stop quick frame.
+ movq %rsp, %gs:THREAD_TOP_QUICK_FRAME_OFFSET
+END_MACRO
+
+MACRO0(RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME)
// Restore FPRs.
movq 16(%rsp), %xmm0
movq 24(%rsp), %xmm1
@@ -229,10 +270,9 @@
*/
MACRO0(DELIVER_PENDING_EXCEPTION)
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save callee saves for throw
- // (Thread*, SP) setup
+ // (Thread*) setup
movq %gs:THREAD_SELF_OFFSET, %rdi
- movq %rsp, %rsi
- call SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*, SP)
+ call SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*)
UNREACHABLE
END_MACRO
@@ -240,9 +280,8 @@
DEFINE_FUNCTION VAR(c_name, 0)
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
// Outgoing argument set up
- movq %rsp, %rsi // pass SP
movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current()
- call VAR(cxx_name, 1) // cxx_name(Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(Thread*)
UNREACHABLE
END_FUNCTION VAR(c_name, 0)
END_MACRO
@@ -251,9 +290,8 @@
DEFINE_FUNCTION VAR(c_name, 0)
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
// Outgoing argument set up
- movq %rsp, %rdx // pass SP
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
- call VAR(cxx_name, 1) // cxx_name(arg1, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg1, Thread*)
UNREACHABLE
END_FUNCTION VAR(c_name, 0)
END_MACRO
@@ -262,9 +300,8 @@
DEFINE_FUNCTION VAR(c_name, 0)
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
// Outgoing argument set up
- movq %rsp, %rcx // pass SP
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call VAR(cxx_name, 1) // cxx_name(Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(Thread*)
UNREACHABLE
END_FUNCTION VAR(c_name, 0)
END_MACRO
@@ -321,7 +358,7 @@
*/
MACRO2(INVOKE_TRAMPOLINE, c_name, cxx_name)
DEFINE_FUNCTION VAR(c_name, 0)
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save callee saves in case allocation triggers GC
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME // save callee saves in case allocation triggers GC
// Helper signature is always
// (method_idx, *this_object, *caller_method, *self, sp)
@@ -333,7 +370,7 @@
// save the code pointer
movq %rax, %rdi
movq %rdx, %rax
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
testq %rdi, %rdi
jz 1f
@@ -481,7 +518,7 @@
LOOP_OVER_SHORTY_LOADING_GPRS r8, r8d, .Lgpr_setup_finished
LOOP_OVER_SHORTY_LOADING_GPRS r9, r9d, .Lgpr_setup_finished
.Lgpr_setup_finished:
- call *METHOD_QUICK_CODE_OFFSET(%rdi) // Call the method.
+ call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method.
movq %rbp, %rsp // Restore stack pointer.
CFI_DEF_CFA_REGISTER(rsp)
POP r9 // Pop r9 - shorty*.
@@ -564,7 +601,7 @@
LOOP_OVER_SHORTY_LOADING_GPRS r8, r8d, .Lgpr_setup_finished2
LOOP_OVER_SHORTY_LOADING_GPRS r9, r9d, .Lgpr_setup_finished2
.Lgpr_setup_finished2:
- call *METHOD_QUICK_CODE_OFFSET(%rdi) // Call the method.
+ call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method.
movq %rbp, %rsp // Restore stack pointer.
CFI_DEF_CFA_REGISTER(rsp)
POP r9 // Pop r9 - shorty*.
@@ -639,88 +676,81 @@
MACRO3(NO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name, 0)
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
- movq %rsp, %rsi // pass SP
- movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current()
- call VAR(cxx_name, 1) // cxx_name(Thread*, SP)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- CALL_MACRO(return_macro, 2) // return or deliver exception
+ movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current()
+ call VAR(cxx_name, 1) // cxx_name(Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION VAR(c_name, 0)
END_MACRO
MACRO3(ONE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name, 0)
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
- movq %rsp, %rdx // pass SP
- movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
- call VAR(cxx_name, 1) // cxx_name(arg0, Thread*, SP)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- CALL_MACRO(return_macro, 2) // return or deliver exception
+ movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
+ call VAR(cxx_name, 1) // cxx_name(arg0, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION VAR(c_name, 0)
END_MACRO
MACRO3(TWO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name, 0)
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
- movq %rsp, %rcx // pass SP
- movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call VAR(cxx_name, 1) // cxx_name(arg0, arg1, Thread*, SP)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- CALL_MACRO(return_macro, 2) // return or deliver exception
+ movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
+ call VAR(cxx_name, 1) // cxx_name(arg0, arg1, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION VAR(c_name, 0)
END_MACRO
MACRO3(THREE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name, 0)
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
- movq %rsp, %r8 // pass SP
- movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
- call VAR(cxx_name, 1) // cxx_name(arg0, arg1, arg2, Thread*, SP)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- CALL_MACRO(return_macro, 2) // return or deliver exception
+ movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
+ call VAR(cxx_name, 1) // cxx_name(arg0, arg1, arg2, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION VAR(c_name, 0)
END_MACRO
MACRO3(ONE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name, 0)
- movl 8(%rsp), %esi // pass referrer
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME
- // arg0 is in rdi
- movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- movq %rsp, %rcx // pass SP
- call VAR(cxx_name, 1) // cxx_name(arg0, referrer, Thread*, SP)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ movl 8(%rsp), %esi // pass referrer
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+ // arg0 is in rdi
+ movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
+ call VAR(cxx_name, 1) // cxx_name(arg0, referrer, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2)
END_FUNCTION VAR(c_name, 0)
END_MACRO
MACRO3(TWO_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name, 0)
- movl 8(%rsp), %edx // pass referrer
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME
- // arg0 and arg1 are in rdi/rsi
- movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
- movq %rsp, %r8 // pass SP
- call VAR(cxx_name, 1) // (arg0, arg1, referrer, Thread*, SP)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ movl 8(%rsp), %edx // pass referrer
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+ // arg0 and arg1 are in rdi/rsi
+ movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
+ call VAR(cxx_name, 1) // (arg0, arg1, referrer, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2)
END_FUNCTION VAR(c_name, 0)
END_MACRO
MACRO3(THREE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name, 0)
- movl 8(%rsp), %ecx // pass referrer
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME
- // arg0, arg1, and arg2 are in rdi/rsi/rdx
+ movl 8(%rsp), %ecx // pass referrer
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+ // arg0, arg1, and arg2 are in rdi/rsi/rdx
movq %gs:THREAD_SELF_OFFSET, %r8 // pass Thread::Current()
- movq %rsp, %r9 // pass SP
- call VAR(cxx_name, 1) // cxx_name(arg0, arg1, arg2, referrer, Thread*, SP)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- CALL_MACRO(return_macro, 2) // return or deliver exception
+ call VAR(cxx_name, 1) // cxx_name(arg0, arg1, arg2, referrer, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION VAR(c_name, 0)
END_MACRO
@@ -864,7 +894,7 @@
testl %edi, %edi // Null check object/rdi.
jz .Lslow_lock
.Lretry_lock:
- movl LOCK_WORD_OFFSET(%edi), %ecx // ecx := lock word.
+ movl MIRROR_OBJECT_LOCK_WORD_OFFSET(%edi), %ecx // ecx := lock word.
test LITERAL(0xC0000000), %ecx // Test the 2 high bits.
jne .Lslow_lock // Slow path if either of the two high bits are set.
movl %gs:THREAD_ID_OFFSET, %edx // edx := thread id
@@ -872,7 +902,7 @@
jnz .Lalready_thin // Lock word contains a thin lock.
// unlocked case - %edx holds thread id with count of 0
xor %eax, %eax // eax == 0 for comparison with lock word in cmpxchg
- lock cmpxchg %edx, LOCK_WORD_OFFSET(%edi)
+ lock cmpxchg %edx, MIRROR_OBJECT_LOCK_WORD_OFFSET(%edi)
jnz .Lretry_lock // cmpxchg failed retry
ret
.Lalready_thin:
@@ -881,21 +911,21 @@
addl LITERAL(65536), %ecx // increment recursion count
test LITERAL(0xC0000000), %ecx // overflowed if either of top two bits are set
jne .Lslow_lock // count overflowed so go slow
- movl %ecx, LOCK_WORD_OFFSET(%edi) // update lockword, cmpxchg not necessary as we hold lock
+ // update lockword, cmpxchg not necessary as we hold lock
+ movl %ecx, MIRROR_OBJECT_LOCK_WORD_OFFSET(%edi)
ret
.Lslow_lock:
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
- movq %rsp, %rdx // pass SP
- call SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*, SP)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ call SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_lock_object
DEFINE_FUNCTION art_quick_unlock_object
testl %edi, %edi // null check object/edi
jz .Lslow_unlock
- movl LOCK_WORD_OFFSET(%edi), %ecx // ecx := lock word
+ movl MIRROR_OBJECT_LOCK_WORD_OFFSET(%edi), %ecx // ecx := lock word
movl %gs:THREAD_ID_OFFSET, %edx // edx := thread id
test LITERAL(0xC0000000), %ecx
jnz .Lslow_unlock // lock word contains a monitor
@@ -903,18 +933,17 @@
jne .Lslow_unlock
cmpl LITERAL(65536), %ecx
jae .Lrecursive_thin_unlock
- movl LITERAL(0), LOCK_WORD_OFFSET(%edi)
+ movl LITERAL(0), MIRROR_OBJECT_LOCK_WORD_OFFSET(%edi)
ret
.Lrecursive_thin_unlock:
subl LITERAL(65536), %ecx
- mov %ecx, LOCK_WORD_OFFSET(%edi)
+ mov %ecx, MIRROR_OBJECT_LOCK_WORD_OFFSET(%edi)
ret
.Lslow_unlock:
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
- movq %rsp, %rdx // pass SP
- call SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*, SP)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ call SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_unlock_object
@@ -935,9 +964,8 @@
POP rsi // Pop arguments
POP rdi
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
- mov %rsp, %rcx // pass SP
mov %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*, SP)
+ call SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*)
int3 // unreached
END_FUNCTION art_quick_check_cast
@@ -969,8 +997,8 @@
int3
int3
#else
- movl ARRAY_LENGTH_OFFSET(%edi), %ecx
-// movl ARRAY_LENGTH_OFFSET(%rdi), %ecx // This zero-extends, so value(%rcx)=value(%ecx)
+ movl MIRROR_ARRAY_LENGTH_OFFSET(%edi), %ecx
+// movl MIRROR_ARRAY_LENGTH_OFFSET(%rdi), %ecx // This zero-extends, so value(%rcx)=value(%ecx)
cmpl %ecx, %esi
jb art_quick_aput_obj
mov %esi, %edi
@@ -986,24 +1014,24 @@
testl %edx, %edx // store of null
// test %rdx, %rdx
jz .Ldo_aput_null
- movl CLASS_OFFSET(%edi), %ecx
-// movq CLASS_OFFSET(%rdi), %rcx
- movl CLASS_COMPONENT_TYPE_OFFSET(%ecx), %ecx
-// movq CLASS_COMPONENT_TYPE_OFFSET(%rcx), %rcx
- cmpl CLASS_OFFSET(%edx), %ecx // value's type == array's component type - trivial assignability
-// cmpq CLASS_OFFSET(%rdx), %rcx
+ movl MIRROR_OBJECT_CLASS_OFFSET(%edi), %ecx
+// movq MIRROR_OBJECT_CLASS_OFFSET(%rdi), %rcx
+ movl MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%ecx), %ecx
+// movq MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%rcx), %rcx
+ cmpl MIRROR_OBJECT_CLASS_OFFSET(%edx), %ecx // value's type == array's component type - trivial assignability
+// cmpq MIRROR_CLASS_OFFSET(%rdx), %rcx
jne .Lcheck_assignability
.Ldo_aput:
- movl %edx, OBJECT_ARRAY_DATA_OFFSET(%edi, %esi, 4)
-// movq %rdx, OBJECT_ARRAY_DATA_OFFSET(%rdi, %rsi, 4)
+ movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%edi, %esi, 4)
+// movq %rdx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdi, %rsi, 4)
movq %gs:THREAD_CARD_TABLE_OFFSET, %rdx
shrl LITERAL(7), %edi
// shrl LITERAL(7), %rdi
movb %dl, (%rdx, %rdi) // Note: this assumes that top 32b of %rdi are zero
ret
.Ldo_aput_null:
- movl %edx, OBJECT_ARRAY_DATA_OFFSET(%edi, %esi, 4)
-// movq %rdx, OBJECT_ARRAY_DATA_OFFSET(%rdi, %rsi, 4)
+ movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%edi, %esi, 4)
+// movq %rdx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdi, %rsi, 4)
ret
.Lcheck_assignability:
// Save arguments.
@@ -1015,7 +1043,7 @@
SETUP_FP_CALLEE_SAVE_FRAME
// "Uncompress" = do nothing, as already zero-extended on load.
- movl CLASS_OFFSET(%edx), %esi // Pass arg2 = value's class.
+ movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %esi // Pass arg2 = value's class.
movq %rcx, %rdi // Pass arg1 = array's component type.
call SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b)
@@ -1032,8 +1060,8 @@
POP rsi
POP rdi
- movl %edx, OBJECT_ARRAY_DATA_OFFSET(%edi, %esi, 4)
-// movq %rdx, OBJECT_ARRAY_DATA_OFFSET(%rdi, %rsi, 4)
+ movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%edi, %esi, 4)
+// movq %rdx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdi, %rsi, 4)
movq %gs:THREAD_CARD_TABLE_OFFSET, %rdx
shrl LITERAL(7), %edi
// shrl LITERAL(7), %rdi
@@ -1052,12 +1080,10 @@
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // Save all registers as basis for long jump context.
// Outgoing argument set up.
- movq %rsp, %rcx // Pass arg 4 = SP.
movq %rdx, %rsi // Pass arg 2 = value.
- movq %gs:THREAD_SELF_OFFSET, %rdx // Pass arg 3 = Thread::Current().
+ movq %gs:THREAD_SELF_OFFSET, %rdx // Pass arg 3 = Thread::Current().
// Pass arg 1 = array.
-
- call SYMBOL(artThrowArrayStoreException) // (array, value, Thread*, SP)
+ call SYMBOL(artThrowArrayStoreException) // (array, value, Thread*)
int3 // unreached
END_FUNCTION art_quick_aput_obj
@@ -1079,7 +1105,7 @@
THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_EAX_ZERO
THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_EAX_ZERO
THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_EAX_ZERO
-THREE_ARG_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCode, RETURN_IF_EAX_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCode, RETURN_IF_EAX_ZERO
THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_EAX_ZERO
TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
@@ -1105,55 +1131,25 @@
// This is singled out as the argument order is different.
DEFINE_FUNCTION art_quick_set64_static
- movq %rsi, %rdx // pass new_val
- movl 8(%rsp), %esi // pass referrer
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME
- // field_idx is in rdi
- movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
- movq %rsp, %r8 // pass SP
- call SYMBOL(artSet64StaticFromCode) // (field_idx, referrer, new_val, Thread*, SP)
- RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- RETURN_IF_EAX_ZERO // return or deliver exception
+ movq %rsi, %rdx // pass new_val
+ movl 8(%rsp), %esi // pass referrer
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
+ // field_idx is in rdi
+ movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
+ call SYMBOL(artSet64StaticFromCode) // (field_idx, referrer, new_val, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RETURN_IF_EAX_ZERO // return or deliver exception
END_FUNCTION art_quick_set64_static
DEFINE_FUNCTION art_quick_proxy_invoke_handler
- // Save callee and GPR args, mixed together to agree with core spills bitmap of ref. and args
- // callee save frame.
- PUSH r15 // Callee save.
- PUSH r14 // Callee save.
- PUSH r13 // Callee save.
- PUSH r12 // Callee save.
- PUSH r9 // Quick arg 5.
- PUSH r8 // Quick arg 4.
- PUSH rsi // Quick arg 1.
- PUSH rbp // Callee save.
- PUSH rbx // Callee save.
- PUSH rdx // Quick arg 2.
- PUSH rcx // Quick arg 3.
- // Create space for FPR args and create 2 slots, 1 of padding and 1 for the ArtMethod*.
- subq LITERAL(80 + 4*8), %rsp
- CFI_ADJUST_CFA_OFFSET(80 + 4*8)
- // Save FPRs.
- movq %xmm0, 16(%rsp)
- movq %xmm1, 24(%rsp)
- movq %xmm2, 32(%rsp)
- movq %xmm3, 40(%rsp)
- movq %xmm4, 48(%rsp)
- movq %xmm5, 56(%rsp)
- movq %xmm6, 64(%rsp)
- movq %xmm7, 72(%rsp)
- movq %xmm12, 80(%rsp)
- movq %xmm13, 88(%rsp)
- movq %xmm14, 96(%rsp)
- movq %xmm15, 104(%rsp)
- // Store proxy method to bottom of stack.
- movq %rdi, 0(%rsp)
- movq %gs:THREAD_SELF_OFFSET, %rdx // Pass Thread::Current().
- movq %rsp, %rcx // Pass SP.
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_RDI
+
+ movq %gs:THREAD_SELF_OFFSET, %rdx // Pass Thread::Current().
+ movq %rsp, %rcx // Pass SP.
call SYMBOL(artQuickProxyInvokeHandler) // (proxy method, receiver, Thread*, SP)
- movq %rax, %xmm0 // Copy return value in case of float returns.
- addq LITERAL(168 + 4*8), %rsp // Pop arguments.
+ movq %rax, %xmm0 // Copy return value in case of float returns.
+ addq LITERAL(168 + 4*8), %rsp // Pop arguments.
CFI_ADJUST_CFA_OFFSET(-168 - 4*8)
RETURN_OR_DELIVER_PENDING_EXCEPTION
END_FUNCTION art_quick_proxy_invoke_handler
@@ -1168,20 +1164,20 @@
int3
#else
movl 8(%rsp), %edi // load caller Method*
- movl METHOD_DEX_CACHE_METHODS_OFFSET(%rdi), %edi // load dex_cache_resolved_methods
- movl OBJECT_ARRAY_DATA_OFFSET(%rdi, %rax, 4), %edi // load the target method
+ movl MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET(%rdi), %edi // load dex_cache_resolved_methods
+ movl MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdi, %rax, 4), %edi // load the target method
jmp art_quick_invoke_interface_trampoline
#endif // __APPLE__
END_FUNCTION art_quick_imt_conflict_trampoline
DEFINE_FUNCTION art_quick_resolution_trampoline
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
movq %gs:THREAD_SELF_OFFSET, %rdx
movq %rsp, %rcx
call SYMBOL(artQuickResolutionTrampoline) // (called, receiver, Thread*, SP)
movq %rax, %r10 // Remember returned code pointer in R10.
movq (%rsp), %rdi // Load called method into RDI.
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
testq %r10, %r10 // If code pointer is NULL goto deliver pending exception.
jz 1f
jmp *%r10 // Tail call into method.
@@ -1267,37 +1263,11 @@
* Called to do a generic JNI down-call
*/
DEFINE_FUNCTION art_quick_generic_jni_trampoline
- // Save callee and GPR args, mixed together to agree with core spills bitmap.
- PUSH r15 // Callee save.
- PUSH r14 // Callee save.
- PUSH r13 // Callee save.
- PUSH r12 // Callee save.
- PUSH r9 // Quick arg 5.
- PUSH r8 // Quick arg 4.
- PUSH rsi // Quick arg 1.
- PUSH rbp // Callee save.
- PUSH rbx // Callee save.
- PUSH rdx // Quick arg 2.
- PUSH rcx // Quick arg 3.
- // Create space for FPR args and create 2 slots, 1 of padding and 1 for the ArtMethod*.
- subq LITERAL(80 + 4*8), %rsp
- CFI_ADJUST_CFA_OFFSET(80 + 4*8)
- // Save FPRs.
- movq %xmm0, 16(%rsp)
- movq %xmm1, 24(%rsp)
- movq %xmm2, 32(%rsp)
- movq %xmm3, 40(%rsp)
- movq %xmm4, 48(%rsp)
- movq %xmm5, 56(%rsp)
- movq %xmm6, 64(%rsp)
- movq %xmm7, 72(%rsp)
- movq %xmm12, 80(%rsp)
- movq %xmm13, 88(%rsp)
- movq %xmm14, 96(%rsp)
- movq %xmm15, 104(%rsp)
- movq %rdi, 0(%rsp) // Store native ArtMethod* to bottom of stack.
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_RDI
+
movq %rsp, %rbp // save SP at (old) callee-save frame
CFI_DEF_CFA_REGISTER(rbp)
+
//
// reserve a lot of space
//
@@ -1454,11 +1424,11 @@
* RSI, RDX, RCX, R8, R9 are arguments to that method.
*/
DEFINE_FUNCTION art_quick_to_interpreter_bridge
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // Set up frame and save arguments.
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME // Set up frame and save arguments.
movq %gs:THREAD_SELF_OFFSET, %rsi // RSI := Thread::Current()
movq %rsp, %rdx // RDX := sp
call SYMBOL(artQuickToInterpreterBridge) // (method, Thread*, SP)
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME // TODO: no need to restore arguments in this case.
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME // TODO: no need to restore arguments in this case.
movq %rax, %xmm0 // Place return value also into floating point return value.
RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
END_FUNCTION art_quick_to_interpreter_bridge
@@ -1471,15 +1441,14 @@
int3
int3
#else
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
movq %rdi, %r12 // Preserve method pointer in a callee-save.
movq %gs:THREAD_SELF_OFFSET, %rdx // Pass thread.
- movq %rsp, %rcx // Pass SP.
- movq FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE-8(%rsp), %r8 // Pass return PC.
+ movq FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE-8(%rsp), %rcx // Pass return PC.
- call SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Object*, Thread*, SP, LR)
+ call SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Object*, Thread*, LR)
// %rax = result of call.
movq %r12, %rdi // Reload method pointer.
@@ -1487,7 +1456,7 @@
leaq art_quick_instrumentation_exit(%rip), %r12 // Set up return through instrumentation
movq %r12, FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE-8(%rsp) // exit.
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
jmp *%rax // Tail call to intended method.
#endif // __APPLE__
@@ -1496,7 +1465,7 @@
DEFINE_FUNCTION art_quick_instrumentation_exit
pushq LITERAL(0) // Push a fake return PC as there will be none on the stack.
- SETUP_REF_ONLY_CALLEE_SAVE_FRAME
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
// We need to save rax and xmm0. We could use a callee-save from SETUP_REF_ONLY, but then
// we would need to fully restore it. As there are a good number of callee-save registers, it
@@ -1536,9 +1505,8 @@
pushq %rsi // Fake that we were called. Use hidden arg.
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
// Stack should be aligned now.
- movq %rsp, %rsi // Pass SP.
movq %gs:THREAD_SELF_OFFSET, %rdi // Pass Thread.
- call SYMBOL(artDeoptimize) // artDeoptimize(Thread*, SP)
+ call SYMBOL(artDeoptimize) // artDeoptimize(Thread*)
int3 // Unreachable.
END_FUNCTION art_quick_deoptimize
@@ -1551,15 +1519,15 @@
* rsi: comp string object (known non-null)
*/
DEFINE_FUNCTION art_quick_string_compareto
- movl STRING_COUNT_OFFSET(%edi), %r8d
- movl STRING_COUNT_OFFSET(%esi), %r9d
- movl STRING_VALUE_OFFSET(%edi), %r10d
- movl STRING_VALUE_OFFSET(%esi), %r11d
- movl STRING_OFFSET_OFFSET(%edi), %eax
- movl STRING_OFFSET_OFFSET(%esi), %ecx
+ movl MIRROR_STRING_COUNT_OFFSET(%edi), %r8d
+ movl MIRROR_STRING_COUNT_OFFSET(%esi), %r9d
+ movl MIRROR_STRING_VALUE_OFFSET(%edi), %r10d
+ movl MIRROR_STRING_VALUE_OFFSET(%esi), %r11d
+ movl MIRROR_STRING_OFFSET_OFFSET(%edi), %eax
+ movl MIRROR_STRING_OFFSET_OFFSET(%esi), %ecx
/* Build pointers to the start of string data */
- leal STRING_DATA_OFFSET(%r10d, %eax, 2), %esi
- leal STRING_DATA_OFFSET(%r11d, %ecx, 2), %edi
+ leal MIRROR_CHAR_ARRAY_DATA_OFFSET(%r10d, %eax, 2), %esi
+ leal MIRROR_CHAR_ARRAY_DATA_OFFSET(%r11d, %ecx, 2), %edi
/* Calculate min length and count diff */
movl %r8d, %ecx
movl %r8d, %eax
@@ -1605,5 +1573,3 @@
call PLT_SYMBOL(longjmp)
int3 // won't get here
END_FUNCTION art_nested_signal_return
-
-
diff --git a/runtime/arch/x86_64/thread_x86_64.cc b/runtime/arch/x86_64/thread_x86_64.cc
index 6dff2b4..553b656 100644
--- a/runtime/arch/x86_64/thread_x86_64.cc
+++ b/runtime/arch/x86_64/thread_x86_64.cc
@@ -49,29 +49,16 @@
// Sanity check that reads from %gs point to this Thread*.
Thread* self_check;
- CHECK_EQ(THREAD_SELF_OFFSET, SelfOffset<8>().Int32Value());
__asm__ __volatile__("movq %%gs:(%1), %0"
: "=r"(self_check) // output
: "r"(THREAD_SELF_OFFSET) // input
:); // clobber
CHECK_EQ(self_check, this);
-
- // Sanity check other offsets.
- CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET),
- Runtime::GetCalleeSaveMethodOffset(Runtime::kSaveAll));
- CHECK_EQ(static_cast<size_t>(RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET),
- Runtime::GetCalleeSaveMethodOffset(Runtime::kRefsOnly));
- CHECK_EQ(static_cast<size_t>(RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET),
- Runtime::GetCalleeSaveMethodOffset(Runtime::kRefsAndArgs));
- CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<8>().Int32Value());
- CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<8>().Int32Value());
- CHECK_EQ(THREAD_ID_OFFSET, ThinLockIdOffset<8>().Int32Value());
}
void Thread::CleanupCpu() {
// Sanity check that reads from %gs point to this Thread*.
Thread* self_check;
- CHECK_EQ(THREAD_SELF_OFFSET, SelfOffset<8>().Int32Value());
__asm__ __volatile__("movq %%gs:(%1), %0"
: "=r"(self_check) // output
: "r"(THREAD_SELF_OFFSET) // input
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 62f3593..4b4c8855 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -17,56 +17,155 @@
#ifndef ART_RUNTIME_ASM_SUPPORT_H_
#define ART_RUNTIME_ASM_SUPPORT_H_
+#if defined(__cplusplus)
+#include "mirror/art_method.h"
+#include "mirror/class.h"
+#include "mirror/string.h"
+#include "runtime.h"
+#include "thread.h"
+#endif
+
#include "read_barrier_c.h"
-// Value loaded into rSUSPEND for quick. When this value is counted down to zero we do a suspend
-// check.
-#define SUSPEND_CHECK_INTERVAL (1000)
+#if defined(__arm__) || defined(__aarch64__) || defined(__mips__)
+// In quick code for ARM, ARM64 and MIPS we make poor use of registers and perform frequent suspend
+// checks in the event of loop back edges. The SUSPEND_CHECK_INTERVAL constant is loaded into a
+// register at the point of an up-call or after handling a suspend check. It reduces the number of
+// loads of the TLS suspend check value by the given amount (turning it into a decrement and compare
+// of a register). This increases the time for a thread to respond to requests from GC and the
+// debugger, damaging GC performance and creating other unwanted artifacts. For example, this count
+// has the effect of making loops and Java code look cold in profilers, where the count is reset
+// impacts where samples will occur. Reducing the count as much as possible improves profiler
+// accuracy in tools like traceview.
+// TODO: get a compiler that can do a proper job of loop optimization and remove this.
+#define SUSPEND_CHECK_INTERVAL 1000
+#endif
+
+#if defined(__cplusplus)
+
+#ifndef ADD_TEST_EQ // Allow #include-r to replace with their own.
+#define ADD_TEST_EQ(x, y) CHECK_EQ(x, y);
+#endif
+
+static inline void CheckAsmSupportOffsetsAndSizes() {
+#else
+#define ADD_TEST_EQ(x, y)
+#endif
+
+// Size of references to the heap on the stack.
+#define STACK_REFERENCE_SIZE 4
+ADD_TEST_EQ(static_cast<size_t>(STACK_REFERENCE_SIZE), sizeof(art::StackReference<art::mirror::Object>))
+
+// Note: these callee save methods loads require read barriers.
+// Offset of field Runtime::callee_save_methods_[kSaveAll]
+#define RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET 0
+ADD_TEST_EQ(static_cast<size_t>(RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET),
+ art::Runtime::GetCalleeSaveMethodOffset(art::Runtime::kSaveAll))
+
+// Offset of field Runtime::callee_save_methods_[kRefsOnly]
+#define RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET __SIZEOF_POINTER__
+ADD_TEST_EQ(static_cast<size_t>(RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET),
+ art::Runtime::GetCalleeSaveMethodOffset(art::Runtime::kRefsOnly))
+
+// Offset of field Runtime::callee_save_methods_[kRefsAndArgs]
+#define RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET (2 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(static_cast<size_t>(RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET),
+ art::Runtime::GetCalleeSaveMethodOffset(art::Runtime::kRefsAndArgs))
+
+// Offset of field Thread::tls32_.state_and_flags.
+#define THREAD_FLAGS_OFFSET 0
+ADD_TEST_EQ(THREAD_FLAGS_OFFSET,
+ art::Thread::ThreadFlagsOffset<__SIZEOF_POINTER__>().Int32Value())
+
+// Offset of field Thread::tls32_.thin_lock_thread_id.
+#define THREAD_ID_OFFSET 12
+ADD_TEST_EQ(THREAD_ID_OFFSET,
+ art::Thread::ThinLockIdOffset<__SIZEOF_POINTER__>().Int32Value())
+
+// Offset of field Thread::tlsPtr_.card_table.
+#define THREAD_CARD_TABLE_OFFSET 120
+ADD_TEST_EQ(THREAD_CARD_TABLE_OFFSET,
+ art::Thread::CardTableOffset<__SIZEOF_POINTER__>().Int32Value())
+
+// Offset of field Thread::tlsPtr_.exception.
+#define THREAD_EXCEPTION_OFFSET (THREAD_CARD_TABLE_OFFSET + __SIZEOF_POINTER__)
+ADD_TEST_EQ(THREAD_EXCEPTION_OFFSET,
+ art::Thread::ExceptionOffset<__SIZEOF_POINTER__>().Int32Value())
+
+// Offset of field Thread::tlsPtr_.managed_stack.top_quick_frame_.
+#define THREAD_TOP_QUICK_FRAME_OFFSET (THREAD_CARD_TABLE_OFFSET + (3 * __SIZEOF_POINTER__))
+ADD_TEST_EQ(THREAD_TOP_QUICK_FRAME_OFFSET,
+ art::Thread::TopOfManagedStackOffset<__SIZEOF_POINTER__>().Int32Value())
+
+// Offset of field Thread::tlsPtr_.managed_stack.top_quick_frame_.
+#define THREAD_SELF_OFFSET (THREAD_CARD_TABLE_OFFSET + (8 * __SIZEOF_POINTER__))
+ADD_TEST_EQ(THREAD_SELF_OFFSET,
+ art::Thread::SelfOffset<__SIZEOF_POINTER__>().Int32Value())
// Offsets within java.lang.Object.
-#define CLASS_OFFSET 0
-#define LOCK_WORD_OFFSET 4
+#define MIRROR_OBJECT_CLASS_OFFSET 0
+ADD_TEST_EQ(MIRROR_OBJECT_CLASS_OFFSET, art::mirror::Object::ClassOffset().Int32Value())
+#define MIRROR_OBJECT_LOCK_WORD_OFFSET 4
+ADD_TEST_EQ(MIRROR_OBJECT_LOCK_WORD_OFFSET, art::mirror::Object::MonitorOffset().Int32Value())
-#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
-
-// Offsets within java.lang.Class.
-#define CLASS_COMPONENT_TYPE_OFFSET 12
-
-// Array offsets.
-#define ARRAY_LENGTH_OFFSET 8
-#define OBJECT_ARRAY_DATA_OFFSET 12
-
-// Offsets within java.lang.String.
-#define STRING_VALUE_OFFSET 8
-#define STRING_COUNT_OFFSET 12
-#define STRING_OFFSET_OFFSET 20
-#define STRING_DATA_OFFSET 12
-
-// Offsets within java.lang.Method.
-#define METHOD_DEX_CACHE_METHODS_OFFSET 12
-#define METHOD_PORTABLE_CODE_OFFSET 40
-#define METHOD_QUICK_CODE_OFFSET 48
-
+#if defined(USE_BAKER_OR_BROOKS_READ_BARRIER)
+#define MIRROR_OBJECT_HEADER_SIZE 16
#else
+#define MIRROR_OBJECT_HEADER_SIZE 8
+#endif
+ADD_TEST_EQ(size_t(MIRROR_OBJECT_HEADER_SIZE), sizeof(art::mirror::Object))
// Offsets within java.lang.Class.
-#define CLASS_COMPONENT_TYPE_OFFSET 20
+#define MIRROR_CLASS_COMPONENT_TYPE_OFFSET (4 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_CLASS_COMPONENT_TYPE_OFFSET,
+ art::mirror::Class::ComponentTypeOffset().Int32Value())
// Array offsets.
-#define ARRAY_LENGTH_OFFSET 16
-#define OBJECT_ARRAY_DATA_OFFSET 20
+#define MIRROR_ARRAY_LENGTH_OFFSET MIRROR_OBJECT_HEADER_SIZE
+ADD_TEST_EQ(MIRROR_ARRAY_LENGTH_OFFSET, art::mirror::Array::LengthOffset().Int32Value())
+
+#define MIRROR_CHAR_ARRAY_DATA_OFFSET (4 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_CHAR_ARRAY_DATA_OFFSET,
+ art::mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value())
+
+#define MIRROR_OBJECT_ARRAY_DATA_OFFSET (4 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_OBJECT_ARRAY_DATA_OFFSET,
+ art::mirror::Array::DataOffset(
+ sizeof(art::mirror::HeapReference<art::mirror::Object>)).Int32Value())
// Offsets within java.lang.String.
-#define STRING_VALUE_OFFSET 16
-#define STRING_COUNT_OFFSET 20
-#define STRING_OFFSET_OFFSET 28
-#define STRING_DATA_OFFSET 20
+#define MIRROR_STRING_VALUE_OFFSET MIRROR_OBJECT_HEADER_SIZE
+ADD_TEST_EQ(MIRROR_STRING_VALUE_OFFSET, art::mirror::String::ValueOffset().Int32Value())
-// Offsets within java.lang.Method.
-#define METHOD_DEX_CACHE_METHODS_OFFSET 20
-#define METHOD_PORTABLE_CODE_OFFSET 48
-#define METHOD_QUICK_CODE_OFFSET 56
+#define MIRROR_STRING_COUNT_OFFSET (4 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_STRING_COUNT_OFFSET, art::mirror::String::CountOffset().Int32Value())
+#define MIRROR_STRING_OFFSET_OFFSET (12 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_STRING_OFFSET_OFFSET, art::mirror::String::OffsetOffset().Int32Value())
+
+// Offsets within java.lang.reflect.ArtMethod.
+#define MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET (4 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET,
+ art::mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value())
+
+#define MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_32 (48 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_32,
+ art::mirror::ArtMethod::EntryPointFromPortableCompiledCodeOffset(4).Int32Value())
+
+#define MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32 (40 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32,
+ art::mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(4).Int32Value())
+
+#define MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_64 (64 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_64,
+ art::mirror::ArtMethod::EntryPointFromPortableCompiledCodeOffset(8).Int32Value())
+
+#define MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64 (48 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64,
+ art::mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(8).Int32Value())
+
+#if defined(__cplusplus)
+} // End of CheckAsmSupportOffsets.
#endif
#endif // ART_RUNTIME_ASM_SUPPORT_H_
diff --git a/runtime/atomic.h b/runtime/atomic.h
index e57c0c0..cf61277 100644
--- a/runtime/atomic.h
+++ b/runtime/atomic.h
@@ -293,17 +293,17 @@
typedef Atomic<int32_t> AtomicInteger;
-COMPILE_ASSERT(sizeof(AtomicInteger) == sizeof(int32_t), weird_atomic_int_size);
-COMPILE_ASSERT(alignof(AtomicInteger) == alignof(int32_t),
- atomic_int_alignment_differs_from_that_of_underlying_type);
-COMPILE_ASSERT(sizeof(Atomic<int64_t>) == sizeof(int64_t), weird_atomic_int64_size);
+static_assert(sizeof(AtomicInteger) == sizeof(int32_t), "Weird AtomicInteger size");
+static_assert(alignof(AtomicInteger) == alignof(int32_t),
+ "AtomicInteger alignment differs from that of underlyingtype");
+static_assert(sizeof(Atomic<int64_t>) == sizeof(int64_t), "Weird Atomic<int64> size");
// Assert the alignment of 64-bit integers is 64-bit. This isn't true on certain 32-bit
// architectures (e.g. x86-32) but we know that 64-bit integers here are arranged to be 8-byte
// aligned.
#if defined(__LP64__)
- COMPILE_ASSERT(alignof(Atomic<int64_t>) == alignof(int64_t),
- atomic_int64_alignment_differs_from_that_of_underlying_type);
+ static_assert(alignof(Atomic<int64_t>) == alignof(int64_t),
+ "Atomic<int64> alignment differs from that of underlying type");
#endif
} // namespace art
diff --git a/runtime/barrier.cc b/runtime/barrier.cc
index b8edad3..5a8fbb3 100644
--- a/runtime/barrier.cc
+++ b/runtime/barrier.cc
@@ -23,7 +23,7 @@
Barrier::Barrier(int count)
: count_(count),
- lock_("GC barrier lock"),
+ lock_("GC barrier lock", kThreadSuspendCountLock),
condition_("GC barrier condition", lock_) {
}
diff --git a/runtime/barrier.h b/runtime/barrier.h
index 167e1d6..5ca88e8 100644
--- a/runtime/barrier.h
+++ b/runtime/barrier.h
@@ -50,7 +50,7 @@
// Counter, when this reaches 0 all people blocked on the barrier are signalled.
int count_ GUARDED_BY(lock_);
- Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ Mutex lock_ ACQUIRED_AFTER(Locks::abort_lock_);
ConditionVariable condition_ GUARDED_BY(lock_);
};
diff --git a/runtime/base/allocator.cc b/runtime/base/allocator.cc
index 994e235..4f2fc07 100644
--- a/runtime/base/allocator.cc
+++ b/runtime/base/allocator.cc
@@ -30,11 +30,11 @@
explicit MallocAllocator() {}
~MallocAllocator() {}
- virtual void* Alloc(size_t size) {
+ void* Alloc(size_t size) {
return calloc(sizeof(uint8_t), size);
}
- virtual void Free(void* p) {
+ void Free(void* p) {
free(p);
}
@@ -49,13 +49,15 @@
explicit NoopAllocator() {}
~NoopAllocator() {}
- virtual void* Alloc(size_t size) {
+ void* Alloc(size_t size) {
+ UNUSED(size);
LOG(FATAL) << "NoopAllocator::Alloc should not be called";
- return NULL;
+ UNREACHABLE();
}
- virtual void Free(void* p) {
+ void Free(void* p) {
// Noop.
+ UNUSED(p);
}
private:
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
index 95dd407..5a09c96 100644
--- a/runtime/base/allocator.h
+++ b/runtime/base/allocator.h
@@ -101,7 +101,7 @@
// Tracking allocator for use with STL types, tracks how much memory is used.
template<class T, AllocatorTag kTag>
-class TrackingAllocatorImpl {
+class TrackingAllocatorImpl : public std::allocator<T> {
public:
typedef typename std::allocator<T>::value_type value_type;
typedef typename std::allocator<T>::size_type size_type;
@@ -114,11 +114,12 @@
// Used internally by STL data structures.
template <class U>
TrackingAllocatorImpl(const TrackingAllocatorImpl<U, kTag>& alloc) throw() {
+ UNUSED(alloc);
}
// Used internally by STL data structures.
TrackingAllocatorImpl() throw() {
- COMPILE_ASSERT(kTag < kAllocatorTagCount, must_be_less_than_count);
+ static_assert(kTag < kAllocatorTagCount, "kTag must be less than kAllocatorTagCount");
}
// Enables an allocator for objects of one type to allocate storage for objects of another type.
@@ -129,6 +130,7 @@
};
pointer allocate(size_type n, const_pointer hint = 0) {
+ UNUSED(hint);
const size_t size = n * sizeof(T);
TrackedAllocators::RegisterAllocation(GetTag(), size);
return reinterpret_cast<pointer>(malloc(size));
diff --git a/runtime/base/bit_vector.cc b/runtime/base/bit_vector.cc
index 3d2f0de..4390180 100644
--- a/runtime/base/bit_vector.cc
+++ b/runtime/base/bit_vector.cc
@@ -16,6 +16,9 @@
#include "bit_vector.h"
+#include <limits>
+#include <sstream>
+
#include "allocator.h"
#include "bit_vector-inl.h"
@@ -38,8 +41,8 @@
storage_size_(storage_size),
allocator_(allocator),
expandable_(expandable) {
- COMPILE_ASSERT(sizeof(*storage_) == kWordBytes, check_word_bytes);
- COMPILE_ASSERT(sizeof(*storage_) * 8u == kWordBits, check_word_bits);
+ static_assert(sizeof(*storage_) == kWordBytes, "word bytes");
+ static_assert(sizeof(*storage_) * 8u == kWordBits, "word bits");
if (storage_ == nullptr) {
storage_size_ = BitsToWords(start_bits);
storage_ = static_cast<uint32_t*>(allocator_->Alloc(storage_size_ * kWordBytes));
@@ -145,10 +148,7 @@
// Is the storage size smaller than src's?
if (storage_size_ < union_with_size) {
- changed = true;
-
- // Set it to reallocate.
- SetBit(highest_bit);
+ EnsureSize(highest_bit);
// Paranoid: storage size should be big enough to hold this bit now.
DCHECK_LT(static_cast<uint32_t> (highest_bit), storage_size_ * kWordBits);
@@ -219,13 +219,13 @@
uint32_t idx;
// We can set every storage element with -1.
for (idx = 0; idx < WordIndex(num_bits); idx++) {
- storage_[idx] = -1;
+ storage_[idx] = std::numeric_limits<uint32_t>::max();
}
// Handle the potentially last few bits.
uint32_t rem_num_bits = num_bits & 0x1f;
if (rem_num_bits != 0) {
- storage_[idx] = (1 << rem_num_bits) - 1;
+ storage_[idx] = (1U << rem_num_bits) - 1;
++idx;
}
@@ -321,7 +321,12 @@
memcpy(new_storage, storage_, storage_size_ * kWordBytes);
// Zero out the new storage words.
memset(&new_storage[storage_size_], 0, (new_size - storage_size_) * kWordBytes);
- // TOTO: collect stats on space wasted because of resize.
+ // TODO: collect stats on space wasted because of resize.
+
+ // Free old storage.
+ allocator_->Free(storage_);
+
+ // Set fields.
storage_ = new_storage;
storage_size_ = new_size;
}
diff --git a/runtime/base/bit_vector_test.cc b/runtime/base/bit_vector_test.cc
index df5d79d..31fd0e7 100644
--- a/runtime/base/bit_vector_test.cc
+++ b/runtime/base/bit_vector_test.cc
@@ -141,4 +141,30 @@
EXPECT_EQ(64u, bv.NumSetBits());
}
+TEST(BitVector, UnionIfNotIn) {
+ {
+ BitVector first(2, true, Allocator::GetMallocAllocator());
+ BitVector second(5, true, Allocator::GetMallocAllocator());
+ BitVector third(5, true, Allocator::GetMallocAllocator());
+
+ second.SetBit(64);
+ third.SetBit(64);
+ bool changed = first.UnionIfNotIn(&second, &third);
+ EXPECT_EQ(0u, first.NumSetBits());
+ EXPECT_FALSE(changed);
+ }
+
+ {
+ BitVector first(2, true, Allocator::GetMallocAllocator());
+ BitVector second(5, true, Allocator::GetMallocAllocator());
+ BitVector third(5, true, Allocator::GetMallocAllocator());
+
+ second.SetBit(64);
+ bool changed = first.UnionIfNotIn(&second, &third);
+ EXPECT_EQ(1u, first.NumSetBits());
+ EXPECT_TRUE(changed);
+ EXPECT_TRUE(first.IsBitSet(64));
+ }
+}
+
} // namespace art
diff --git a/runtime/base/casts.h b/runtime/base/casts.h
index be94c2e..c7e39a2 100644
--- a/runtime/base/casts.h
+++ b/runtime/base/casts.h
@@ -19,6 +19,8 @@
#include <assert.h>
#include <string.h>
+#include <type_traits>
+
#include "base/macros.h"
namespace art {
@@ -65,16 +67,9 @@
template<typename To, typename From> // use like this: down_cast<T*>(foo);
inline To down_cast(From* f) { // so we only accept pointers
- // Ensures that To is a sub-type of From *. This test is here only
- // for compile-time type checking, and has no overhead in an
- // optimized build at run-time, as it will be optimized away
- // completely.
- if (false) {
- implicit_cast<From*, To>(0);
- }
+ static_assert(std::is_base_of<From, typename std::remove_pointer<To>::type>::value,
+ "down_cast unsafe as To is not a subtype of From");
- //
- // assert(f == NULL || dynamic_cast<To>(f) != NULL); // RTTI: debug mode only!
return static_cast<To>(f);
}
@@ -82,7 +77,7 @@
inline Dest bit_cast(const Source& source) {
// Compile time assertion: sizeof(Dest) == sizeof(Source)
// A compile error here means your Dest and Source have different sizes.
- COMPILE_ASSERT(sizeof(Dest) == sizeof(Source), verify_sizes_are_equal);
+ static_assert(sizeof(Dest) == sizeof(Source), "sizes should be equal");
Dest dest;
memcpy(&dest, &source, sizeof(dest));
return dest;
diff --git a/runtime/base/dumpable.h b/runtime/base/dumpable.h
new file mode 100644
index 0000000..3c316cc
--- /dev/null
+++ b/runtime/base/dumpable.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_DUMPABLE_H_
+#define ART_RUNTIME_BASE_DUMPABLE_H_
+
+#include "base/macros.h"
+
+namespace art {
+
+// A convenience to allow any class with a "Dump(std::ostream& os)" member function
+// but without an operator<< to be used as if it had an operator<<. Use like this:
+//
+// os << Dumpable<MyType>(my_type_instance);
+//
+template<typename T>
+class Dumpable FINAL {
+ public:
+ explicit Dumpable(const T& value) : value_(value) {
+ }
+
+ void Dump(std::ostream& os) const {
+ value_.Dump(os);
+ }
+
+ private:
+ const T& value_;
+
+ DISALLOW_COPY_AND_ASSIGN(Dumpable);
+};
+
+template<typename T>
+std::ostream& operator<<(std::ostream& os, const Dumpable<T>& rhs) {
+ rhs.Dump(os);
+ return os;
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_BASE_DUMPABLE_H_
diff --git a/runtime/base/hash_map.h b/runtime/base/hash_map.h
new file mode 100644
index 0000000..c0f903f
--- /dev/null
+++ b/runtime/base/hash_map.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_HASH_MAP_H_
+#define ART_RUNTIME_BASE_HASH_MAP_H_
+
+#include <utility>
+
+#include "hash_set.h"
+
+namespace art {
+
+template <typename Fn>
+class HashMapWrapper {
+ public:
+ // Hash function.
+ template <class Key, class Value>
+ size_t operator()(const std::pair<Key, Value>& pair) const {
+ return fn_(pair.first);
+ }
+ template <class Key>
+ size_t operator()(const Key& key) const {
+ return fn_(key);
+ }
+ template <class Key, class Value>
+ bool operator()(const std::pair<Key, Value>& a, const std::pair<Key, Value>& b) const {
+ return fn_(a.first, b.first);
+ }
+ template <class Key, class Value, class Element>
+ bool operator()(const std::pair<Key, Value>& a, const Element& element) const {
+ return fn_(a.first, element);
+ }
+
+ private:
+ Fn fn_;
+};
+
+template <class Key, class Value, class EmptyFn = DefaultEmptyFn<Key>,
+ class HashFn = std::hash<Key>, class Pred = std::equal_to<Key>,
+ class Alloc = std::allocator<std::pair<Key, Value>>>
+class HashMap : public HashSet<std::pair<Key, Value>, EmptyFn, HashMapWrapper<HashFn>,
+ HashMapWrapper<Pred>, Alloc> {
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_BASE_HASH_MAP_H_
diff --git a/runtime/base/hash_set.h b/runtime/base/hash_set.h
new file mode 100644
index 0000000..992e5b1
--- /dev/null
+++ b/runtime/base/hash_set.h
@@ -0,0 +1,407 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_HASH_SET_H_
+#define ART_RUNTIME_BASE_HASH_SET_H_
+
+#include <functional>
+#include <memory>
+#include <stdint.h>
+#include <utility>
+
+#include "logging.h"
+
+namespace art {
+
+// Returns true if an item is empty.
+template <class T>
+class DefaultEmptyFn {
+ public:
+ void MakeEmpty(T& item) const {
+ item = T();
+ }
+ bool IsEmpty(const T& item) const {
+ return item == T();
+ }
+};
+
+template <class T>
+class DefaultEmptyFn<T*> {
+ public:
+ void MakeEmpty(T*& item) const {
+ item = nullptr;
+ }
+ bool IsEmpty(const T*& item) const {
+ return item == nullptr;
+ }
+};
+
+// Low memory version of a hash set, uses less memory than std::unordered_set since elements aren't
+// boxed. Uses linear probing.
+// EmptyFn needs to implement two functions MakeEmpty(T& item) and IsEmpty(const T& item)
+template <class T, class EmptyFn = DefaultEmptyFn<T>, class HashFn = std::hash<T>,
+ class Pred = std::equal_to<T>, class Alloc = std::allocator<T>>
+class HashSet {
+ public:
+ static constexpr double kDefaultMinLoadFactor = 0.5;
+ static constexpr double kDefaultMaxLoadFactor = 0.9;
+ static constexpr size_t kMinBuckets = 1000;
+
+ class Iterator {
+ public:
+ Iterator(const Iterator&) = default;
+ Iterator(HashSet* hash_set, size_t index) : hash_set_(hash_set), index_(index) {
+ }
+ Iterator& operator=(const Iterator&) = default;
+ bool operator==(const Iterator& other) const {
+ return hash_set_ == other.hash_set_ && index_ == other.index_;
+ }
+ bool operator!=(const Iterator& other) const {
+ return !(*this == other);
+ }
+ Iterator operator++() { // Value after modification.
+ index_ = NextNonEmptySlot(index_);
+ return *this;
+ }
+ Iterator operator++(int) {
+ Iterator temp = *this;
+ index_ = NextNonEmptySlot(index_);
+ return temp;
+ }
+ T& operator*() {
+ DCHECK(!hash_set_->IsFreeSlot(GetIndex()));
+ return hash_set_->ElementForIndex(index_);
+ }
+ const T& operator*() const {
+ DCHECK(!hash_set_->IsFreeSlot(GetIndex()));
+ return hash_set_->ElementForIndex(index_);
+ }
+ T* operator->() {
+ return &**this;
+ }
+ const T* operator->() const {
+ return &**this;
+ }
+ // TODO: Operator -- --(int)
+
+ private:
+ HashSet* hash_set_;
+ size_t index_;
+
+ size_t GetIndex() const {
+ return index_;
+ }
+ size_t NextNonEmptySlot(size_t index) const {
+ const size_t num_buckets = hash_set_->NumBuckets();
+ DCHECK_LT(index, num_buckets);
+ do {
+ ++index;
+ } while (index < num_buckets && hash_set_->IsFreeSlot(index));
+ return index;
+ }
+
+ friend class HashSet;
+ };
+
+ void Clear() {
+ DeallocateStorage();
+ AllocateStorage(1);
+ num_elements_ = 0;
+ elements_until_expand_ = 0;
+ }
+ HashSet() : num_elements_(0), num_buckets_(0), data_(nullptr),
+ min_load_factor_(kDefaultMinLoadFactor), max_load_factor_(kDefaultMaxLoadFactor) {
+ Clear();
+ }
+ HashSet(const HashSet& other) : num_elements_(0), num_buckets_(0), data_(nullptr) {
+ *this = other;
+ }
+ HashSet(HashSet&& other) : num_elements_(0), num_buckets_(0), data_(nullptr) {
+ *this = std::move(other);
+ }
+ ~HashSet() {
+ DeallocateStorage();
+ }
+ HashSet& operator=(HashSet&& other) {
+ std::swap(data_, other.data_);
+ std::swap(num_buckets_, other.num_buckets_);
+ std::swap(num_elements_, other.num_elements_);
+ std::swap(elements_until_expand_, other.elements_until_expand_);
+ std::swap(min_load_factor_, other.min_load_factor_);
+ std::swap(max_load_factor_, other.max_load_factor_);
+ return *this;
+ }
+ HashSet& operator=(const HashSet& other) {
+ DeallocateStorage();
+ AllocateStorage(other.NumBuckets());
+ for (size_t i = 0; i < num_buckets_; ++i) {
+ ElementForIndex(i) = other.data_[i];
+ }
+ num_elements_ = other.num_elements_;
+ elements_until_expand_ = other.elements_until_expand_;
+ min_load_factor_ = other.min_load_factor_;
+ max_load_factor_ = other.max_load_factor_;
+ return *this;
+ }
+ // Lower case for c++11 for each.
+ Iterator begin() {
+ Iterator ret(this, 0);
+ if (num_buckets_ != 0 && IsFreeSlot(ret.GetIndex())) {
+ ++ret; // Skip all the empty slots.
+ }
+ return ret;
+ }
+ // Lower case for c++11 for each.
+ Iterator end() {
+ return Iterator(this, NumBuckets());
+ }
+ bool Empty() {
+ return begin() == end();
+ }
+ // Erase algorithm:
+ // Make an empty slot where the iterator is pointing.
+ // Scan fowards until we hit another empty slot.
+ // If an element inbetween doesn't rehash to the range from the current empty slot to the
+ // iterator. It must be before the empty slot, in that case we can move it to the empty slot
+ // and set the empty slot to be the location we just moved from.
+ // Relies on maintaining the invariant that there's no empty slots from the 'ideal' index of an
+ // element to its actual location/index.
+ Iterator Erase(Iterator it) {
+ // empty_index is the index that will become empty.
+ size_t empty_index = it.GetIndex();
+ DCHECK(!IsFreeSlot(empty_index));
+ size_t next_index = empty_index;
+ bool filled = false; // True if we filled the empty index.
+ while (true) {
+ next_index = NextIndex(next_index);
+ T& next_element = ElementForIndex(next_index);
+ // If the next element is empty, we are done. Make sure to clear the current empty index.
+ if (emptyfn_.IsEmpty(next_element)) {
+ emptyfn_.MakeEmpty(ElementForIndex(empty_index));
+ break;
+ }
+ // Otherwise try to see if the next element can fill the current empty index.
+ const size_t next_hash = hashfn_(next_element);
+ // Calculate the ideal index, if it is within empty_index + 1 to next_index then there is
+ // nothing we can do.
+ size_t next_ideal_index = IndexForHash(next_hash);
+ // Loop around if needed for our check.
+ size_t unwrapped_next_index = next_index;
+ if (unwrapped_next_index < empty_index) {
+ unwrapped_next_index += NumBuckets();
+ }
+ // Loop around if needed for our check.
+ size_t unwrapped_next_ideal_index = next_ideal_index;
+ if (unwrapped_next_ideal_index < empty_index) {
+ unwrapped_next_ideal_index += NumBuckets();
+ }
+ if (unwrapped_next_ideal_index <= empty_index ||
+ unwrapped_next_ideal_index > unwrapped_next_index) {
+ // If the target index isn't within our current range it must have been probed from before
+ // the empty index.
+ ElementForIndex(empty_index) = std::move(next_element);
+ filled = true; // TODO: Optimize
+ empty_index = next_index;
+ }
+ }
+ --num_elements_;
+ // If we didn't fill the slot then we need go to the next non free slot.
+ if (!filled) {
+ ++it;
+ }
+ return it;
+ }
+ // Find an element, returns end() if not found.
+ // Allows custom K types, example of when this is useful.
+ // Set of Class* sorted by name, want to find a class with a name but can't allocate a dummy
+ // object in the heap for performance solution.
+ template <typename K>
+ Iterator Find(const K& element) {
+ return FindWithHash(element, hashfn_(element));
+ }
+ template <typename K>
+ Iterator FindWithHash(const K& element, size_t hash) {
+ DCHECK_EQ(hashfn_(element), hash);
+ size_t index = IndexForHash(hash);
+ while (true) {
+ T& slot = ElementForIndex(index);
+ if (emptyfn_.IsEmpty(slot)) {
+ return end();
+ }
+ if (pred_(slot, element)) {
+ return Iterator(this, index);
+ }
+ index = NextIndex(index);
+ }
+ }
+ // Insert an element, allows duplicates.
+ void Insert(const T& element) {
+ InsertWithHash(element, hashfn_(element));
+ }
+ void InsertWithHash(const T& element, size_t hash) {
+ DCHECK_EQ(hash, hashfn_(element));
+ if (num_elements_ >= elements_until_expand_) {
+ Expand();
+ DCHECK_LT(num_elements_, elements_until_expand_);
+ }
+ const size_t index = FirstAvailableSlot(IndexForHash(hash));
+ data_[index] = element;
+ ++num_elements_;
+ }
+ size_t Size() const {
+ return num_elements_;
+ }
+ void ShrinkToMaximumLoad() {
+ Resize(Size() / max_load_factor_);
+ }
+ // To distance that inserted elements were probed. Used for measuring how good hash functions
+ // are.
+ size_t TotalProbeDistance() const {
+ size_t total = 0;
+ for (size_t i = 0; i < NumBuckets(); ++i) {
+ const T& element = ElementForIndex(i);
+ if (!emptyfn_.IsEmpty(element)) {
+ size_t ideal_location = IndexForHash(hashfn_(element));
+ if (ideal_location > i) {
+ total += i + NumBuckets() - ideal_location;
+ } else {
+ total += i - ideal_location;
+ }
+ }
+ }
+ return total;
+ }
+ // Calculate the current load factor and return it.
+ double CalculateLoadFactor() const {
+ return static_cast<double>(Size()) / static_cast<double>(NumBuckets());
+ }
+ // Make sure that everything reinserts in the right spot. Returns the number of errors.
+ size_t Verify() {
+ size_t errors = 0;
+ for (size_t i = 0; i < num_buckets_; ++i) {
+ T& element = data_[i];
+ if (!emptyfn_.IsEmpty(element)) {
+ T temp;
+ emptyfn_.MakeEmpty(temp);
+ std::swap(temp, element);
+ size_t first_slot = FirstAvailableSlot(IndexForHash(hashfn_(temp)));
+ if (i != first_slot) {
+ LOG(ERROR) << "Element " << i << " should be in slot " << first_slot;
+ ++errors;
+ }
+ std::swap(temp, element);
+ }
+ }
+ return errors;
+ }
+
+ private:
+ T& ElementForIndex(size_t index) {
+ DCHECK_LT(index, NumBuckets());
+ DCHECK(data_ != nullptr);
+ return data_[index];
+ }
+ const T& ElementForIndex(size_t index) const {
+ DCHECK_LT(index, NumBuckets());
+ DCHECK(data_ != nullptr);
+ return data_[index];
+ }
+ size_t IndexForHash(size_t hash) const {
+ return hash % num_buckets_;
+ }
+ size_t NextIndex(size_t index) const {
+ if (UNLIKELY(++index >= num_buckets_)) {
+ DCHECK_EQ(index, NumBuckets());
+ return 0;
+ }
+ return index;
+ }
+ bool IsFreeSlot(size_t index) const {
+ return emptyfn_.IsEmpty(ElementForIndex(index));
+ }
+ size_t NumBuckets() const {
+ return num_buckets_;
+ }
+ // Allocate a number of buckets.
+ void AllocateStorage(size_t num_buckets) {
+ num_buckets_ = num_buckets;
+ data_ = allocfn_.allocate(num_buckets_);
+ for (size_t i = 0; i < num_buckets_; ++i) {
+ allocfn_.construct(allocfn_.address(data_[i]));
+ emptyfn_.MakeEmpty(data_[i]);
+ }
+ }
+ void DeallocateStorage() {
+ if (num_buckets_ != 0) {
+ for (size_t i = 0; i < NumBuckets(); ++i) {
+ allocfn_.destroy(allocfn_.address(data_[i]));
+ }
+ allocfn_.deallocate(data_, NumBuckets());
+ data_ = nullptr;
+ num_buckets_ = 0;
+ }
+ }
+ // Expand the set based on the load factors.
+ void Expand() {
+ size_t min_index = static_cast<size_t>(Size() / min_load_factor_);
+ if (min_index < kMinBuckets) {
+ min_index = kMinBuckets;
+ }
+ // Resize based on the minimum load factor.
+ Resize(min_index);
+ // When we hit elements_until_expand_, we are at the max load factor and must expand again.
+ elements_until_expand_ = NumBuckets() * max_load_factor_;
+ }
+ // Expand / shrink the table to the new specified size.
+ void Resize(size_t new_size) {
+ DCHECK_GE(new_size, Size());
+ T* old_data = data_;
+ size_t old_num_buckets = num_buckets_;
+ // Reinsert all of the old elements.
+ AllocateStorage(new_size);
+ for (size_t i = 0; i < old_num_buckets; ++i) {
+ T& element = old_data[i];
+ if (!emptyfn_.IsEmpty(element)) {
+ data_[FirstAvailableSlot(IndexForHash(hashfn_(element)))] = std::move(element);
+ }
+ allocfn_.destroy(allocfn_.address(element));
+ }
+ allocfn_.deallocate(old_data, old_num_buckets);
+ }
+ ALWAYS_INLINE size_t FirstAvailableSlot(size_t index) const {
+ while (!emptyfn_.IsEmpty(data_[index])) {
+ index = NextIndex(index);
+ }
+ return index;
+ }
+
+ Alloc allocfn_; // Allocator function.
+ HashFn hashfn_; // Hashing function.
+ EmptyFn emptyfn_; // IsEmpty/SetEmpty function.
+ Pred pred_; // Equals function.
+ size_t num_elements_; // Number of inserted elements.
+ size_t num_buckets_; // Number of hash table buckets.
+ size_t elements_until_expand_; // Maxmimum number of elements until we expand the table.
+ T* data_; // Backing storage.
+ double min_load_factor_;
+ double max_load_factor_;
+
+ friend class Iterator;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_BASE_HASH_SET_H_
diff --git a/runtime/base/hash_set_test.cc b/runtime/base/hash_set_test.cc
new file mode 100644
index 0000000..5f498d9
--- /dev/null
+++ b/runtime/base/hash_set_test.cc
@@ -0,0 +1,223 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hash_set.h"
+
+#include <map>
+#include <sstream>
+#include <string>
+#include <unordered_set>
+
+#include "common_runtime_test.h"
+#include "hash_map.h"
+
+namespace art {
+
+struct IsEmptyFnString {
+ void MakeEmpty(std::string& item) const {
+ item.clear();
+ }
+ bool IsEmpty(const std::string& item) const {
+ return item.empty();
+ }
+};
+
+class HashSetTest : public CommonRuntimeTest {
+ public:
+ HashSetTest() : seed_(97421), unique_number_(0) {
+ }
+ std::string RandomString(size_t len) {
+ std::ostringstream oss;
+ for (size_t i = 0; i < len; ++i) {
+ oss << static_cast<char>('A' + PRand() % 64);
+ }
+ static_assert(' ' < 'A', "space must be less than a");
+ oss << " " << unique_number_++; // Relies on ' ' < 'A'
+ return oss.str();
+ }
+ void SetSeed(size_t seed) {
+ seed_ = seed;
+ }
+ size_t PRand() { // Pseudo random.
+ seed_ = seed_ * 1103515245 + 12345;
+ return seed_;
+ }
+
+ private:
+ size_t seed_;
+ size_t unique_number_;
+};
+
+TEST_F(HashSetTest, TestSmoke) {
+ HashSet<std::string, IsEmptyFnString> hash_set;
+ const std::string test_string = "hello world 1234";
+ ASSERT_TRUE(hash_set.Empty());
+ ASSERT_EQ(hash_set.Size(), 0U);
+ hash_set.Insert(test_string);
+ auto it = hash_set.Find(test_string);
+ ASSERT_EQ(*it, test_string);
+ auto after_it = hash_set.Erase(it);
+ ASSERT_TRUE(after_it == hash_set.end());
+ ASSERT_TRUE(hash_set.Empty());
+ ASSERT_EQ(hash_set.Size(), 0U);
+ it = hash_set.Find(test_string);
+ ASSERT_TRUE(it == hash_set.end());
+}
+
+TEST_F(HashSetTest, TestInsertAndErase) {
+ HashSet<std::string, IsEmptyFnString> hash_set;
+ static constexpr size_t count = 1000;
+ std::vector<std::string> strings;
+ for (size_t i = 0; i < count; ++i) {
+ // Insert a bunch of elements and make sure we can find them.
+ strings.push_back(RandomString(10));
+ hash_set.Insert(strings[i]);
+ auto it = hash_set.Find(strings[i]);
+ ASSERT_TRUE(it != hash_set.end());
+ ASSERT_EQ(*it, strings[i]);
+ }
+ ASSERT_EQ(strings.size(), hash_set.Size());
+ // Try to erase the odd strings.
+ for (size_t i = 1; i < count; i += 2) {
+ auto it = hash_set.Find(strings[i]);
+ ASSERT_TRUE(it != hash_set.end());
+ ASSERT_EQ(*it, strings[i]);
+ hash_set.Erase(it);
+ }
+ // Test removed.
+ for (size_t i = 1; i < count; i += 2) {
+ auto it = hash_set.Find(strings[i]);
+ ASSERT_TRUE(it == hash_set.end());
+ }
+ for (size_t i = 0; i < count; i += 2) {
+ auto it = hash_set.Find(strings[i]);
+ ASSERT_TRUE(it != hash_set.end());
+ ASSERT_EQ(*it, strings[i]);
+ }
+}
+
+TEST_F(HashSetTest, TestIterator) {
+ HashSet<std::string, IsEmptyFnString> hash_set;
+ ASSERT_TRUE(hash_set.begin() == hash_set.end());
+ static constexpr size_t count = 1000;
+ std::vector<std::string> strings;
+ for (size_t i = 0; i < count; ++i) {
+ // Insert a bunch of elements and make sure we can find them.
+ strings.push_back(RandomString(10));
+ hash_set.Insert(strings[i]);
+ }
+ // Make sure we visit each string exactly once.
+ std::map<std::string, size_t> found_count;
+ for (const std::string& s : hash_set) {
+ ++found_count[s];
+ }
+ for (size_t i = 0; i < count; ++i) {
+ ASSERT_EQ(found_count[strings[i]], 1U);
+ }
+ found_count.clear();
+ // Remove all the elements with iterator erase.
+ for (auto it = hash_set.begin(); it != hash_set.end();) {
+ ++found_count[*it];
+ it = hash_set.Erase(it);
+ ASSERT_EQ(hash_set.Verify(), 0U);
+ }
+ for (size_t i = 0; i < count; ++i) {
+ ASSERT_EQ(found_count[strings[i]], 1U);
+ }
+}
+
+TEST_F(HashSetTest, TestSwap) {
+ HashSet<std::string, IsEmptyFnString> hash_seta, hash_setb;
+ std::vector<std::string> strings;
+ static constexpr size_t count = 1000;
+ for (size_t i = 0; i < count; ++i) {
+ strings.push_back(RandomString(10));
+ hash_seta.Insert(strings[i]);
+ }
+ std::swap(hash_seta, hash_setb);
+ hash_seta.Insert("TEST");
+ hash_setb.Insert("TEST2");
+ for (size_t i = 0; i < count; ++i) {
+ strings.push_back(RandomString(10));
+ hash_seta.Insert(strings[i]);
+ }
+}
+
+TEST_F(HashSetTest, TestStress) {
+ HashSet<std::string, IsEmptyFnString> hash_set;
+ std::unordered_multiset<std::string> std_set;
+ std::vector<std::string> strings;
+ static constexpr size_t string_count = 2000;
+ static constexpr size_t operations = 100000;
+ static constexpr size_t target_size = 5000;
+ for (size_t i = 0; i < string_count; ++i) {
+ strings.push_back(RandomString(i % 10 + 1));
+ }
+ const size_t seed = time(nullptr);
+ SetSeed(seed);
+ LOG(INFO) << "Starting stress test with seed " << seed;
+ for (size_t i = 0; i < operations; ++i) {
+ ASSERT_EQ(hash_set.Size(), std_set.size());
+ size_t delta = std::abs(static_cast<ssize_t>(target_size) -
+ static_cast<ssize_t>(hash_set.Size()));
+ size_t n = PRand();
+ if (n % target_size == 0) {
+ hash_set.Clear();
+ std_set.clear();
+ ASSERT_TRUE(hash_set.Empty());
+ ASSERT_TRUE(std_set.empty());
+ } else if (n % target_size < delta) {
+ // Skew towards adding elements until we are at the desired size.
+ const std::string& s = strings[PRand() % string_count];
+ hash_set.Insert(s);
+ std_set.insert(s);
+ ASSERT_EQ(*hash_set.Find(s), *std_set.find(s));
+ } else {
+ const std::string& s = strings[PRand() % string_count];
+ auto it1 = hash_set.Find(s);
+ auto it2 = std_set.find(s);
+ ASSERT_EQ(it1 == hash_set.end(), it2 == std_set.end());
+ if (it1 != hash_set.end()) {
+ ASSERT_EQ(*it1, *it2);
+ hash_set.Erase(it1);
+ std_set.erase(it2);
+ }
+ }
+ }
+}
+
+struct IsEmptyStringPair {
+ void MakeEmpty(std::pair<std::string, int>& pair) const {
+ pair.first.clear();
+ }
+ bool IsEmpty(const std::pair<std::string, int>& pair) const {
+ return pair.first.empty();
+ }
+};
+
+TEST_F(HashSetTest, TestHashMap) {
+ HashMap<std::string, int, IsEmptyStringPair> hash_map;
+ hash_map.Insert(std::make_pair(std::string("abcd"), 123));
+ hash_map.Insert(std::make_pair(std::string("abcd"), 124));
+ hash_map.Insert(std::make_pair(std::string("bags"), 444));
+ auto it = hash_map.Find(std::string("abcd"));
+ ASSERT_EQ(it->second, 123);
+ hash_map.Erase(it);
+ it = hash_map.Find(std::string("abcd"));
+ ASSERT_EQ(it->second, 124);
+}
+
+} // namespace art
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index 5af597b..b781d60 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -16,17 +16,25 @@
#include "logging.h"
+#include <sstream>
+
#include "base/mutex.h"
#include "runtime.h"
#include "thread-inl.h"
#include "utils.h"
+// Headers for LogMessage::LogLine.
+#ifdef HAVE_ANDROID_OS
+#include "cutils/log.h"
+#else
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
namespace art {
LogVerbosity gLogVerbosity;
-std::vector<std::string> gVerboseMethods;
-
unsigned int gAborting = 0;
static LogSeverity gMinimumLogSeverity = INFO;
@@ -47,14 +55,6 @@
: "art";
}
-// Configure logging based on ANDROID_LOG_TAGS environment variable.
-// We need to parse a string that looks like
-//
-// *:v jdwp:d dalvikvm:d dalvikvm-gc:i dalvikvmi:i
-//
-// The tag (or '*' for the global level) comes first, followed by a colon
-// and a letter indicating the minimum priority level we're expected to log.
-// This can be used to reveal or conceal logs with specific tags.
void InitLogging(char* argv[]) {
if (gCmdLine.get() != nullptr) {
return;
@@ -65,22 +65,22 @@
// Stash the command line for later use. We can use /proc/self/cmdline on Linux to recover this,
// but we don't have that luxury on the Mac, and there are a couple of argv[0] variants that are
// commonly used.
- if (argv != NULL) {
+ if (argv != nullptr) {
gCmdLine.reset(new std::string(argv[0]));
- for (size_t i = 1; argv[i] != NULL; ++i) {
+ for (size_t i = 1; argv[i] != nullptr; ++i) {
gCmdLine->append(" ");
gCmdLine->append(argv[i]);
}
gProgramInvocationName.reset(new std::string(argv[0]));
const char* last_slash = strrchr(argv[0], '/');
- gProgramInvocationShortName.reset(new std::string((last_slash != NULL) ? last_slash + 1
+ gProgramInvocationShortName.reset(new std::string((last_slash != nullptr) ? last_slash + 1
: argv[0]));
} else {
- // TODO: fall back to /proc/self/cmdline when argv is NULL on Linux
+ // TODO: fall back to /proc/self/cmdline when argv is NULL on Linux.
gCmdLine.reset(new std::string("<unset>"));
}
const char* tags = getenv("ANDROID_LOG_TAGS");
- if (tags == NULL) {
+ if (tags == nullptr) {
return;
}
@@ -119,47 +119,145 @@
}
}
-LogMessageData::LogMessageData(const char* file, int line, LogSeverity severity, int error)
- : file(file),
- line_number(line),
- severity(severity),
- error(error) {
- const char* last_slash = strrchr(file, '/');
- file = (last_slash == NULL) ? file : last_slash + 1;
-}
+// This indirection greatly reduces the stack impact of having
+// lots of checks/logging in a function.
+class LogMessageData {
+ public:
+ LogMessageData(const char* file, unsigned int line, LogSeverity severity, int error)
+ : file_(file),
+ line_number_(line),
+ severity_(severity),
+ error_(error) {
+ const char* last_slash = strrchr(file, '/');
+ file = (last_slash == nullptr) ? file : last_slash + 1;
+ }
+ const char * GetFile() const {
+ return file_;
+ }
+
+ unsigned int GetLineNumber() const {
+ return line_number_;
+ }
+
+ LogSeverity GetSeverity() const {
+ return severity_;
+ }
+
+ int GetError() const {
+ return error_;
+ }
+
+ std::ostream& GetBuffer() {
+ return buffer_;
+ }
+
+ std::string ToString() const {
+ return buffer_.str();
+ }
+
+ private:
+ std::ostringstream buffer_;
+ const char* const file_;
+ const unsigned int line_number_;
+ const LogSeverity severity_;
+ const int error_;
+
+ DISALLOW_COPY_AND_ASSIGN(LogMessageData);
+};
+
+
+LogMessage::LogMessage(const char* file, unsigned int line, LogSeverity severity, int error)
+ : data_(new LogMessageData(file, line, severity, error)) {
+}
LogMessage::~LogMessage() {
- if (data_->severity < gMinimumLogSeverity) {
+ if (data_->GetSeverity() < gMinimumLogSeverity) {
return; // No need to format something we're not going to output.
}
// Finish constructing the message.
- if (data_->error != -1) {
- data_->buffer << ": " << strerror(data_->error);
+ if (data_->GetError() != -1) {
+ data_->GetBuffer() << ": " << strerror(data_->GetError());
}
- std::string msg(data_->buffer.str());
+ std::string msg(data_->ToString());
// Do the actual logging with the lock held.
{
MutexLock mu(Thread::Current(), *Locks::logging_lock_);
if (msg.find('\n') == std::string::npos) {
- LogLine(*data_, msg.c_str());
+ LogLine(data_->GetFile(), data_->GetLineNumber(), data_->GetSeverity(), msg.c_str());
} else {
msg += '\n';
size_t i = 0;
while (i < msg.size()) {
size_t nl = msg.find('\n', i);
msg[nl] = '\0';
- LogLine(*data_, &msg[i]);
+ LogLine(data_->GetFile(), data_->GetLineNumber(), data_->GetSeverity(), &msg[i]);
i = nl + 1;
}
}
}
// Abort if necessary.
- if (data_->severity == FATAL) {
+ if (data_->GetSeverity() == FATAL) {
Runtime::Abort();
}
}
+std::ostream& LogMessage::stream() {
+ return data_->GetBuffer();
+}
+
+#ifdef HAVE_ANDROID_OS
+static const android_LogPriority kLogSeverityToAndroidLogPriority[] = {
+ ANDROID_LOG_VERBOSE, ANDROID_LOG_DEBUG, ANDROID_LOG_INFO, ANDROID_LOG_WARN,
+ ANDROID_LOG_ERROR, ANDROID_LOG_FATAL, ANDROID_LOG_FATAL
+};
+static_assert(arraysize(kLogSeverityToAndroidLogPriority) == INTERNAL_FATAL + 1,
+ "Mismatch in size of kLogSeverityToAndroidLogPriority and values in LogSeverity");
+#endif
+
+void LogMessage::LogLine(const char* file, unsigned int line, LogSeverity log_severity,
+ const char* message) {
+#ifdef HAVE_ANDROID_OS
+ const char* tag = ProgramInvocationShortName();
+ int priority = kLogSeverityToAndroidLogPriority[log_severity];
+ if (priority == ANDROID_LOG_FATAL) {
+ LOG_PRI(priority, tag, "%s:%u] %s", file, line, message);
+ } else {
+ LOG_PRI(priority, tag, "%s", message);
+ }
+#else
+ static const char* log_characters = "VDIWEFF";
+ CHECK_EQ(strlen(log_characters), INTERNAL_FATAL + 1U);
+ char severity = log_characters[log_severity];
+ fprintf(stderr, "%s %c %5d %5d %s:%u] %s\n",
+ ProgramInvocationShortName(), severity, getpid(), ::art::GetTid(), file, line, message);
+#endif
+}
+
+void LogMessage::LogLineLowStack(const char* file, unsigned int line, LogSeverity log_severity,
+ const char* message) {
+#ifdef HAVE_ANDROID_OS
+ // TODO: be more conservative on stack usage here.
+ LogLine(file, line, log_severity, message);
+#else
+ static const char* log_characters = "VDIWEFF";
+ CHECK_EQ(strlen(log_characters), INTERNAL_FATAL + 1U);
+
+ const char* program_name = ProgramInvocationShortName();
+ write(STDERR_FILENO, program_name, strlen(program_name));
+ write(STDERR_FILENO, " ", 1);
+ write(STDERR_FILENO, &log_characters[log_severity], 1);
+ write(STDERR_FILENO, " ", 1);
+ // TODO: pid and tid.
+ write(STDERR_FILENO, file, strlen(file));
+ // TODO: line.
+ UNUSED(line);
+ write(STDERR_FILENO, "] ", 2);
+ write(STDERR_FILENO, message, strlen(message));
+ write(STDERR_FILENO, "\n", 1);
+#endif
+}
+
} // namespace art
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index cf3e763..ae83e33 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -17,29 +17,113 @@
#ifndef ART_RUNTIME_BASE_LOGGING_H_
#define ART_RUNTIME_BASE_LOGGING_H_
-#include <cerrno>
-#include <cstring>
-#include <iostream> // NOLINT
#include <memory>
-#include <sstream>
-#include <signal.h>
-#include <vector>
+#include <ostream>
#include "base/macros.h"
-#include "log_severity.h"
+namespace art {
+
+enum LogSeverity {
+ VERBOSE,
+ DEBUG,
+ INFO,
+ WARNING,
+ ERROR,
+ FATAL,
+ INTERNAL_FATAL, // For Runtime::Abort.
+};
+
+// The members of this struct are the valid arguments to VLOG and VLOG_IS_ON in code,
+// and the "-verbose:" command line argument.
+struct LogVerbosity {
+ bool class_linker; // Enabled with "-verbose:class".
+ bool compiler;
+ bool gc;
+ bool heap;
+ bool jdwp;
+ bool jni;
+ bool monitor;
+ bool profiler;
+ bool signals;
+ bool startup;
+ bool third_party_jni; // Enabled with "-verbose:third-party-jni".
+ bool threads;
+ bool verifier;
+};
+
+// Global log verbosity setting, initialized by InitLogging.
+extern LogVerbosity gLogVerbosity;
+
+// 0 if not abort, non-zero if an abort is in progress. Used on fatal exit to prevents recursive
+// aborts. Global declaration allows us to disable some error checking to ensure fatal shutdown
+// makes forward progress.
+extern unsigned int gAborting;
+
+// Configure logging based on ANDROID_LOG_TAGS environment variable.
+// We need to parse a string that looks like
+//
+// *:v jdwp:d dalvikvm:d dalvikvm-gc:i dalvikvmi:i
+//
+// The tag (or '*' for the global level) comes first, followed by a colon
+// and a letter indicating the minimum priority level we're expected to log.
+// This can be used to reveal or conceal logs with specific tags.
+extern void InitLogging(char* argv[]);
+
+// Returns the command line used to invoke the current tool or nullptr if InitLogging hasn't been
+// performed.
+extern const char* GetCmdLine();
+
+// The command used to start the ART runtime, such as "/system/bin/dalvikvm". If InitLogging hasn't
+// been performed then just returns "art"
+extern const char* ProgramInvocationName();
+
+// A short version of the command used to start the ART runtime, such as "dalvikvm". If InitLogging
+// hasn't been performed then just returns "art"
+extern const char* ProgramInvocationShortName();
+
+// Logs a message to logcat on Android otherwise to stderr. If the severity is FATAL it also causes
+// an abort. For example: LOG(FATAL) << "We didn't expect to reach here";
+#define LOG(severity) ::art::LogMessage(__FILE__, __LINE__, severity, -1).stream()
+
+// A variant of LOG that also logs the current errno value. To be used when library calls fail.
+#define PLOG(severity) ::art::LogMessage(__FILE__, __LINE__, severity, errno).stream()
+
+// Marker that code is yet to be implemented.
+#define UNIMPLEMENTED(level) LOG(level) << __PRETTY_FUNCTION__ << " unimplemented "
+
+// Is verbose logging enabled for the given module? Where the module is defined in LogVerbosity.
+#define VLOG_IS_ON(module) UNLIKELY(::art::gLogVerbosity.module)
+
+// Variant of LOG that logs when verbose logging is enabled for a module. For example,
+// VLOG(jni) << "A JNI operation was performed";
+#define VLOG(module) \
+ if (VLOG_IS_ON(module)) \
+ ::art::LogMessage(__FILE__, __LINE__, INFO, -1).stream()
+
+// Return the stream associated with logging for the given module.
+#define VLOG_STREAM(module) ::art::LogMessage(__FILE__, __LINE__, INFO, -1).stream()
+
+// Check whether condition x holds and LOG(FATAL) if not. The value of the expression x is only
+// evaluated once. Extra logging can be appended using << after. For example,
+// CHECK(false == true) results in a log message of "Check failed: false == true".
#define CHECK(x) \
if (UNLIKELY(!(x))) \
- ::art::LogMessage(__FILE__, __LINE__, FATAL, -1).stream() \
+ ::art::LogMessage(__FILE__, __LINE__, ::art::FATAL, -1).stream() \
<< "Check failed: " #x << " "
+// Helper for CHECK_xx(x,y) macros.
#define CHECK_OP(LHS, RHS, OP) \
for (auto _values = ::art::MakeEagerEvaluator(LHS, RHS); \
UNLIKELY(!(_values.lhs OP _values.rhs)); /* empty */) \
- ::art::LogMessage(__FILE__, __LINE__, FATAL, -1).stream() \
+ ::art::LogMessage(__FILE__, __LINE__, ::art::FATAL, -1).stream() \
<< "Check failed: " << #LHS << " " << #OP << " " << #RHS \
<< " (" #LHS "=" << _values.lhs << ", " #RHS "=" << _values.rhs << ") "
+
+// Check whether a condition holds between x and y, LOG(FATAL) if not. The value of the expressions
+// x and y is evaluated once. Extra logging can be appended using << after. For example,
+// CHECK_NE(0 == 1, false) results in "Check failed: false != false (0==1=false, false=false) ".
#define CHECK_EQ(x, y) CHECK_OP(x, y, ==)
#define CHECK_NE(x, y) CHECK_OP(x, y, !=)
#define CHECK_LE(x, y) CHECK_OP(x, y, <=)
@@ -47,22 +131,25 @@
#define CHECK_GE(x, y) CHECK_OP(x, y, >=)
#define CHECK_GT(x, y) CHECK_OP(x, y, >)
+// Helper for CHECK_STRxx(s1,s2) macros.
#define CHECK_STROP(s1, s2, sense) \
if (UNLIKELY((strcmp(s1, s2) == 0) != sense)) \
- LOG(FATAL) << "Check failed: " \
- << "\"" << s1 << "\"" \
- << (sense ? " == " : " != ") \
- << "\"" << s2 << "\""
+ LOG(::art::FATAL) << "Check failed: " \
+ << "\"" << s1 << "\"" \
+ << (sense ? " == " : " != ") \
+ << "\"" << s2 << "\""
+// Check for string (const char*) equality between s1 and s2, LOG(FATAL) if not.
#define CHECK_STREQ(s1, s2) CHECK_STROP(s1, s2, true)
#define CHECK_STRNE(s1, s2) CHECK_STROP(s1, s2, false)
+// Perform the pthread function call(args), LOG(FATAL) on error.
#define CHECK_PTHREAD_CALL(call, args, what) \
do { \
int rc = call args; \
if (rc != 0) { \
errno = rc; \
- PLOG(FATAL) << # call << " failed for " << what; \
+ PLOG(::art::FATAL) << # call << " failed for " << what; \
} \
} while (false)
@@ -74,81 +161,34 @@
// n / 2;
// }
#define CHECK_CONSTEXPR(x, out, dummy) \
- (UNLIKELY(!(x))) ? (LOG(FATAL) << "Check failed: " << #x out, dummy) :
+ (UNLIKELY(!(x))) ? (LOG(::art::FATAL) << "Check failed: " << #x out, dummy) :
-#ifndef NDEBUG
-#define DCHECK(x) CHECK(x)
-#define DCHECK_EQ(x, y) CHECK_EQ(x, y)
-#define DCHECK_NE(x, y) CHECK_NE(x, y)
-#define DCHECK_LE(x, y) CHECK_LE(x, y)
-#define DCHECK_LT(x, y) CHECK_LT(x, y)
-#define DCHECK_GE(x, y) CHECK_GE(x, y)
-#define DCHECK_GT(x, y) CHECK_GT(x, y)
-#define DCHECK_STREQ(s1, s2) CHECK_STREQ(s1, s2)
-#define DCHECK_STRNE(s1, s2) CHECK_STRNE(s1, s2)
-#define DCHECK_CONSTEXPR(x, out, dummy) CHECK_CONSTEXPR(x, out, dummy)
-
-#else // NDEBUG
-
-#define DCHECK(condition) \
- while (false) \
- CHECK(condition)
-
-#define DCHECK_EQ(val1, val2) \
- while (false) \
- CHECK_EQ(val1, val2)
-
-#define DCHECK_NE(val1, val2) \
- while (false) \
- CHECK_NE(val1, val2)
-
-#define DCHECK_LE(val1, val2) \
- while (false) \
- CHECK_LE(val1, val2)
-
-#define DCHECK_LT(val1, val2) \
- while (false) \
- CHECK_LT(val1, val2)
-
-#define DCHECK_GE(val1, val2) \
- while (false) \
- CHECK_GE(val1, val2)
-
-#define DCHECK_GT(val1, val2) \
- while (false) \
- CHECK_GT(val1, val2)
-
-#define DCHECK_STREQ(str1, str2) \
- while (false) \
- CHECK_STREQ(str1, str2)
-
-#define DCHECK_STRNE(str1, str2) \
- while (false) \
- CHECK_STRNE(str1, str2)
-
-#define DCHECK_CONSTEXPR(x, out, dummy) \
- (false && (x)) ? (dummy) :
-
+// DCHECKs are debug variants of CHECKs only enabled in debug builds. Generally CHECK should be
+// used unless profiling identifies a CHECK as being in performance critical code.
+#if defined(NDEBUG)
+static constexpr bool kEnableDChecks = false;
+#else
+static constexpr bool kEnableDChecks = true;
#endif
-#define LOG(severity) ::art::LogMessage(__FILE__, __LINE__, severity, -1).stream()
-#define PLOG(severity) ::art::LogMessage(__FILE__, __LINE__, severity, errno).stream()
+#define DCHECK(x) if (::art::kEnableDChecks) CHECK(x)
+#define DCHECK_EQ(x, y) if (::art::kEnableDChecks) CHECK_EQ(x, y)
+#define DCHECK_NE(x, y) if (::art::kEnableDChecks) CHECK_NE(x, y)
+#define DCHECK_LE(x, y) if (::art::kEnableDChecks) CHECK_LE(x, y)
+#define DCHECK_LT(x, y) if (::art::kEnableDChecks) CHECK_LT(x, y)
+#define DCHECK_GE(x, y) if (::art::kEnableDChecks) CHECK_GE(x, y)
+#define DCHECK_GT(x, y) if (::art::kEnableDChecks) CHECK_GT(x, y)
+#define DCHECK_STREQ(s1, s2) if (::art::kEnableDChecks) CHECK_STREQ(s1, s2)
+#define DCHECK_STRNE(s1, s2) if (::art::kEnableDChecks) CHECK_STRNE(s1, s2)
+#if defined(NDEBUG)
+#define DCHECK_CONSTEXPR(x, out, dummy)
+#else
+#define DCHECK_CONSTEXPR(x, out, dummy) CHECK_CONSTEXPR(x, out, dummy)
+#endif
-#define LG LOG(INFO)
-
-#define UNIMPLEMENTED(level) LOG(level) << __PRETTY_FUNCTION__ << " unimplemented "
-
-#define VLOG_IS_ON(module) UNLIKELY(::art::gLogVerbosity.module)
-#define VLOG(module) if (VLOG_IS_ON(module)) ::art::LogMessage(__FILE__, __LINE__, INFO, -1).stream()
-#define VLOG_STREAM(module) ::art::LogMessage(__FILE__, __LINE__, INFO, -1).stream()
-
-//
-// Implementation details beyond this point.
-//
-
-namespace art {
-
+// Temporary class created to evaluate the LHS and RHS, used with MakeEagerEvaluator to infer the
+// types of LHS and RHS.
template <typename LHS, typename RHS>
struct EagerEvaluator {
EagerEvaluator(LHS l, RHS r) : lhs(l), rhs(r) { }
@@ -156,10 +196,14 @@
RHS rhs;
};
-// We want char*s to be treated as pointers, not strings. If you want them treated like strings,
-// you'd need to use CHECK_STREQ and CHECK_STRNE anyway to compare the characters rather than their
-// addresses. We could express this more succinctly with std::remove_const, but this is quick and
-// easy to understand, and works before we have C++0x. We rely on signed/unsigned warnings to
+// Helper function for CHECK_xx.
+template <typename LHS, typename RHS>
+static inline EagerEvaluator<LHS, RHS> MakeEagerEvaluator(LHS lhs, RHS rhs) {
+ return EagerEvaluator<LHS, RHS>(lhs, rhs);
+}
+
+// Explicitly instantiate EagerEvalue for pointers so that char*s aren't treated as strings. To
+// compare strings use CHECK_STREQ and CHECK_STRNE. We rely on signed/unsigned warnings to
// protect you against combinations not explicitly listed below.
#define EAGER_PTR_EVALUATOR(T1, T2) \
template <> struct EagerEvaluator<T1, T2> { \
@@ -182,153 +226,34 @@
EAGER_PTR_EVALUATOR(signed char*, const signed char*);
EAGER_PTR_EVALUATOR(signed char*, signed char*);
-template <typename LHS, typename RHS>
-EagerEvaluator<LHS, RHS> MakeEagerEvaluator(LHS lhs, RHS rhs) {
- return EagerEvaluator<LHS, RHS>(lhs, rhs);
-}
+// Data for the log message, not stored in LogMessage to avoid increasing the stack size.
+class LogMessageData;
-// This indirection greatly reduces the stack impact of having
-// lots of checks/logging in a function.
-struct LogMessageData {
- public:
- LogMessageData(const char* file, int line, LogSeverity severity, int error);
- std::ostringstream buffer;
- const char* const file;
- const int line_number;
- const LogSeverity severity;
- const int error;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(LogMessageData);
-};
-
+// A LogMessage is a temporarily scoped object used by LOG and the unlikely part of a CHECK. The
+// destructor will abort if the severity is FATAL.
class LogMessage {
public:
- LogMessage(const char* file, int line, LogSeverity severity, int error)
- : data_(new LogMessageData(file, line, severity, error)) {
- }
+ LogMessage(const char* file, unsigned int line, LogSeverity severity, int error);
~LogMessage(); // TODO: enable LOCKS_EXCLUDED(Locks::logging_lock_).
- std::ostream& stream() {
- return data_->buffer;
- }
+ // Returns the stream associated with the message, the LogMessage performs output when it goes
+ // out of scope.
+ std::ostream& stream();
+
+ // The routine that performs the actual logging.
+ static void LogLine(const char* file, unsigned int line, LogSeverity severity, const char* msg);
+
+ // A variant of the above for use with little stack.
+ static void LogLineLowStack(const char* file, unsigned int line, LogSeverity severity,
+ const char* msg);
private:
- static void LogLine(const LogMessageData& data, const char*);
-
const std::unique_ptr<LogMessageData> data_;
- friend void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_context);
- friend class Mutex;
DISALLOW_COPY_AND_ASSIGN(LogMessage);
};
-// A convenience to allow any class with a "Dump(std::ostream& os)" member function
-// but without an operator<< to be used as if it had an operator<<. Use like this:
-//
-// os << Dumpable<MyType>(my_type_instance);
-//
-template<typename T>
-class Dumpable {
- public:
- explicit Dumpable(T& value) : value_(value) {
- }
-
- void Dump(std::ostream& os) const {
- value_.Dump(os);
- }
-
- private:
- T& value_;
-
- DISALLOW_COPY_AND_ASSIGN(Dumpable);
-};
-
-template<typename T>
-std::ostream& operator<<(std::ostream& os, const Dumpable<T>& rhs) {
- rhs.Dump(os);
- return os;
-}
-
-template<typename T>
-class ConstDumpable {
- public:
- explicit ConstDumpable(const T& value) : value_(value) {
- }
-
- void Dump(std::ostream& os) const {
- value_.Dump(os);
- }
-
- private:
- const T& value_;
-
- DISALLOW_COPY_AND_ASSIGN(ConstDumpable);
-};
-
-template<typename T>
-std::ostream& operator<<(std::ostream& os, const ConstDumpable<T>& rhs) {
- rhs.Dump(os);
- return os;
-}
-
-// Helps you use operator<< in a const char*-like context such as our various 'F' methods with
-// format strings.
-template<typename T>
-class ToStr {
- public:
- explicit ToStr(const T& value) {
- std::ostringstream os;
- os << value;
- s_ = os.str();
- }
-
- const char* c_str() const {
- return s_.c_str();
- }
-
- const std::string& str() const {
- return s_;
- }
-
- private:
- std::string s_;
- DISALLOW_COPY_AND_ASSIGN(ToStr);
-};
-
-// The members of this struct are the valid arguments to VLOG and VLOG_IS_ON in code,
-// and the "-verbose:" command line argument.
-struct LogVerbosity {
- bool class_linker; // Enabled with "-verbose:class".
- bool compiler;
- bool gc;
- bool heap;
- bool jdwp;
- bool jni;
- bool monitor;
- bool profiler;
- bool signals;
- bool startup;
- bool third_party_jni; // Enabled with "-verbose:third-party-jni".
- bool threads;
- bool verifier;
-};
-
-extern LogVerbosity gLogVerbosity;
-
-extern std::vector<std::string> gVerboseMethods;
-
-// Used on fatal exit. Prevents recursive aborts. Allows us to disable
-// some error checking to ensure fatal shutdown makes forward progress.
-extern unsigned int gAborting;
-
-extern void InitLogging(char* argv[]);
-
-extern const char* GetCmdLine();
-extern const char* ProgramInvocationName();
-extern const char* ProgramInvocationShortName();
-
} // namespace art
#endif // ART_RUNTIME_BASE_LOGGING_H_
diff --git a/runtime/base/logging_android.cc b/runtime/base/logging_android.cc
deleted file mode 100644
index 9b1ac58..0000000
--- a/runtime/base/logging_android.cc
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "logging.h"
-
-#include <unistd.h>
-
-#include <iostream>
-
-#include "base/stringprintf.h"
-#include "cutils/log.h"
-
-namespace art {
-
-static const int kLogSeverityToAndroidLogPriority[] = {
- ANDROID_LOG_VERBOSE, ANDROID_LOG_DEBUG, ANDROID_LOG_INFO, ANDROID_LOG_WARN,
- ANDROID_LOG_ERROR, ANDROID_LOG_FATAL, ANDROID_LOG_FATAL
-};
-
-void LogMessage::LogLine(const LogMessageData& data, const char* message) {
- const char* tag = ProgramInvocationShortName();
- int priority = kLogSeverityToAndroidLogPriority[data.severity];
- if (priority == ANDROID_LOG_FATAL) {
- LOG_PRI(priority, tag, "%s:%d] %s", data.file, data.line_number, message);
- } else {
- LOG_PRI(priority, tag, "%s", message);
- }
-}
-
-} // namespace art
diff --git a/runtime/base/logging_linux.cc b/runtime/base/logging_linux.cc
deleted file mode 100644
index 0399128..0000000
--- a/runtime/base/logging_linux.cc
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "logging.h"
-
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <cstdio>
-#include <cstring>
-#include <iostream>
-
-#include "base/stringprintf.h"
-#include "utils.h"
-
-namespace art {
-
-void LogMessage::LogLine(const LogMessageData& data, const char* message) {
- char severity = "VDIWEFF"[data.severity];
- fprintf(stderr, "%s %c %5d %5d %s:%d] %s\n",
- ProgramInvocationShortName(), severity, getpid(), ::art::GetTid(),
- data.file, data.line_number, message);
-}
-
-} // namespace art
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index c80d35e..66d6fab 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -41,49 +41,35 @@
#define FINAL
#endif
-// The COMPILE_ASSERT macro can be used to verify that a compile time
-// expression is true. For example, you could use it to verify the
-// size of a static array:
-//
-// COMPILE_ASSERT(ARRAYSIZE(content_type_names) == CONTENT_NUM_TYPES,
-// content_type_names_incorrect_size);
-//
-// or to make sure a struct is smaller than a certain size:
-//
-// COMPILE_ASSERT(sizeof(foo) < 128, foo_too_large);
-//
-// The second argument to the macro is the name of the variable. If
-// the expression is false, most compilers will issue a warning/error
-// containing the name of the variable.
-
-template <bool>
-struct CompileAssert {
-};
-
-#define COMPILE_ASSERT(expr, msg) \
- typedef CompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1] // NOLINT
-
// Declare a friend relationship in a class with a test. Used rather that FRIEND_TEST to avoid
// globally importing gtest/gtest.h into the main ART header files.
#define ART_FRIEND_TEST(test_set_name, individual_test)\
friend class test_set_name##_##individual_test##_Test
-// DISALLOW_COPY_AND_ASSIGN disallows the copy and operator= functions.
-// It goes in the private: declarations in a class.
+// DISALLOW_COPY_AND_ASSIGN disallows the copy and operator= functions. It goes in the private:
+// declarations in a class.
+#if !defined(DISALLOW_COPY_AND_ASSIGN)
#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
- TypeName(const TypeName&); \
- void operator=(const TypeName&)
+ TypeName(const TypeName&) = delete; \
+ void operator=(const TypeName&) = delete
+#endif
-// A macro to disallow all the implicit constructors, namely the
-// default constructor, copy constructor and operator= functions.
+// A macro to disallow all the implicit constructors, namely the default constructor, copy
+// constructor and operator= functions.
//
-// This should be used in the private: declarations for a class
-// that wants to prevent anyone from instantiating it. This is
-// especially useful for classes containing only static methods.
+// This should be used in the private: declarations for a class that wants to prevent anyone from
+// instantiating it. This is especially useful for classes containing only static methods.
#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
- TypeName(); \
+ TypeName() = delete; \
DISALLOW_COPY_AND_ASSIGN(TypeName)
+// A macro to disallow new and delete operators for a class. It goes in the private: declarations.
+#define DISALLOW_ALLOCATION() \
+ public: \
+ ALWAYS_INLINE void operator delete(void*, size_t) { UNREACHABLE(); } \
+ private: \
+ void* operator new(size_t) = delete
+
// The arraysize(arr) macro returns the # of elements in an array arr.
// The expression is a compile-time constant, and therefore can be
// used in defining new arrays, for example. If you use arraysize on
@@ -183,7 +169,19 @@
#define PURE __attribute__ ((__pure__))
#define WARN_UNUSED __attribute__((warn_unused_result))
-template<typename T> void UNUSED(const T&) {}
+// A deprecated function to call to create a false use of the parameter, for example:
+// int foo(int x) { UNUSED(x); return 10; }
+// to avoid compiler warnings. Going forward we prefer ATTRIBUTE_UNUSED.
+template<typename... T> void UNUSED(const T&...) {}
+
+// An attribute to place on a parameter to a function, for example:
+// int foo(int x ATTRIBUTE_UNUSED) { return 10; }
+// to avoid compiler warnings.
+#define ATTRIBUTE_UNUSED __attribute__((__unused__))
+
+// Define that a position within code is unreachable, for example:
+// int foo () { LOG(FATAL) << "Don't call me"; UNREACHABLE(); }
+// without the UNREACHABLE a return statement would be necessary.
#define UNREACHABLE __builtin_unreachable
// The FALLTHROUGH_INTENDED macro can be used to annotate implicit fall-through
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index f70db35..c310191 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -21,58 +21,29 @@
#include "mutex.h"
-#define ATRACE_TAG ATRACE_TAG_DALVIK
-
-#include "cutils/trace.h"
-
#include "base/stringprintf.h"
+#include "base/value_object.h"
#include "runtime.h"
#include "thread.h"
-namespace art {
-
-#define CHECK_MUTEX_CALL(call, args) CHECK_PTHREAD_CALL(call, args, name_)
-
#if ART_USE_FUTEXES
#include "linux/futex.h"
#include "sys/syscall.h"
#ifndef SYS_futex
#define SYS_futex __NR_futex
#endif
+#endif // ART_USE_FUTEXES
+
+#define CHECK_MUTEX_CALL(call, args) CHECK_PTHREAD_CALL(call, args, name_)
+
+namespace art {
+
+#if ART_USE_FUTEXES
static inline int futex(volatile int *uaddr, int op, int val, const struct timespec *timeout, volatile int *uaddr2, int val3) {
return syscall(SYS_futex, uaddr, op, val, timeout, uaddr2, val3);
}
#endif // ART_USE_FUTEXES
-class ScopedContentionRecorder {
- public:
- ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
- : mutex_(kLogLockContentions ? mutex : NULL),
- blocked_tid_(kLogLockContentions ? blocked_tid : 0),
- owner_tid_(kLogLockContentions ? owner_tid : 0),
- start_nano_time_(kLogLockContentions ? NanoTime() : 0) {
- if (ATRACE_ENABLED()) {
- std::string msg = StringPrintf("Lock contention on %s (owner tid: %" PRIu64 ")",
- mutex->GetName(), owner_tid);
- ATRACE_BEGIN(msg.c_str());
- }
- }
-
- ~ScopedContentionRecorder() {
- ATRACE_END();
- if (kLogLockContentions) {
- uint64_t end_nano_time = NanoTime();
- mutex_->RecordContention(blocked_tid_, owner_tid_, end_nano_time - start_nano_time_);
- }
- }
-
- private:
- BaseMutex* const mutex_;
- const uint64_t blocked_tid_;
- const uint64_t owner_tid_;
- const uint64_t start_nano_time_;
-};
-
static inline uint64_t SafeGetTid(const Thread* self) {
if (self != NULL) {
return static_cast<uint64_t>(self->GetTid());
@@ -158,15 +129,7 @@
// Add as an extra reader.
done = state_.CompareExchangeWeakAcquire(cur_state, cur_state + 1);
} else {
- // Owner holds it exclusively, hang up.
- ScopedContentionRecorder scr(this, GetExclusiveOwnerTid(), SafeGetTid(self));
- ++num_pending_readers_;
- if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
- if (errno != EAGAIN) {
- PLOG(FATAL) << "futex wait failed for " << name_;
- }
- }
- --num_pending_readers_;
+ HandleSharedLockContention(self, cur_state);
}
} while (!done);
#else
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 70b6f7e..4957988 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -19,8 +19,12 @@
#include <errno.h>
#include <sys/time.h>
+#define ATRACE_TAG ATRACE_TAG_DALVIK
+#include "cutils/trace.h"
+
#include "atomic.h"
#include "base/logging.h"
+#include "base/value_object.h"
#include "mutex-inl.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
@@ -53,7 +57,6 @@
Mutex* Locks::reference_queue_weak_references_lock_ = nullptr;
Mutex* Locks::runtime_shutdown_lock_ = nullptr;
Mutex* Locks::thread_list_lock_ = nullptr;
-Mutex* Locks::thread_list_suspend_thread_lock_ = nullptr;
Mutex* Locks::thread_suspend_count_lock_ = nullptr;
Mutex* Locks::trace_lock_ = nullptr;
Mutex* Locks::unexpected_signal_lock_ = nullptr;
@@ -106,6 +109,36 @@
const BaseMutex* const mutex_;
};
+// Scoped class that generates events at the beginning and end of lock contention.
+class ScopedContentionRecorder FINAL : public ValueObject {
+ public:
+ ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
+ : mutex_(kLogLockContentions ? mutex : NULL),
+ blocked_tid_(kLogLockContentions ? blocked_tid : 0),
+ owner_tid_(kLogLockContentions ? owner_tid : 0),
+ start_nano_time_(kLogLockContentions ? NanoTime() : 0) {
+ if (ATRACE_ENABLED()) {
+ std::string msg = StringPrintf("Lock contention on %s (owner tid: %" PRIu64 ")",
+ mutex->GetName(), owner_tid);
+ ATRACE_BEGIN(msg.c_str());
+ }
+ }
+
+ ~ScopedContentionRecorder() {
+ ATRACE_END();
+ if (kLogLockContentions) {
+ uint64_t end_nano_time = NanoTime();
+ mutex_->RecordContention(blocked_tid_, owner_tid_, end_nano_time - start_nano_time_);
+ }
+ }
+
+ private:
+ BaseMutex* const mutex_;
+ const uint64_t blocked_tid_;
+ const uint64_t owner_tid_;
+ const uint64_t start_nano_time_;
+};
+
BaseMutex::BaseMutex(const char* name, LockLevel level) : level_(level), name_(name) {
if (kLogLockContentions) {
ScopedAllMutexesLock mu(this);
@@ -168,7 +201,7 @@
if (i != level_) {
BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
// We expect waits to happen while holding the thread list suspend thread lock.
- if (held_mutex != NULL && i != kThreadListSuspendThreadLock) {
+ if (held_mutex != NULL) {
LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
<< "(level " << LockLevel(i) << ") while performing wait on "
<< "\"" << name_ << "\" (level " << level_ << ")";
@@ -428,9 +461,9 @@
if (this != Locks::logging_lock_) {
LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_;
} else {
- LogMessageData data(__FILE__, __LINE__, INTERNAL_FATAL, -1);
- LogMessage::LogLine(data, StringPrintf("Unexpected state_ %d in unlock for %s",
- cur_state, name_).c_str());
+ LogMessage::LogLine(__FILE__, __LINE__, INTERNAL_FATAL,
+ StringPrintf("Unexpected state_ %d in unlock for %s",
+ cur_state, name_).c_str());
_exit(1);
}
}
@@ -612,6 +645,20 @@
}
#endif
+#if ART_USE_FUTEXES
+void ReaderWriterMutex::HandleSharedLockContention(Thread* self, int32_t cur_state) {
+ // Owner holds it exclusively, hang up.
+ ScopedContentionRecorder scr(this, GetExclusiveOwnerTid(), SafeGetTid(self));
+ ++num_pending_readers_;
+ if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
+ if (errno != EAGAIN) {
+ PLOG(FATAL) << "futex wait failed for " << name_;
+ }
+ }
+ --num_pending_readers_;
+}
+#endif
+
bool ReaderWriterMutex::SharedTryLock(Thread* self) {
DCHECK(self == NULL || self == Thread::Current());
#if ART_USE_FUTEXES
@@ -680,7 +727,7 @@
CHECK_MUTEX_CALL(pthread_condattr_init, (&cond_attrs));
#if !defined(__APPLE__)
// Apple doesn't have CLOCK_MONOTONIC or pthread_condattr_setclock.
- CHECK_MUTEX_CALL(pthread_condattr_setclock(&cond_attrs, CLOCK_MONOTONIC));
+ CHECK_MUTEX_CALL(pthread_condattr_setclock, (&cond_attrs, CLOCK_MONOTONIC));
#endif
CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, &cond_attrs));
#endif
@@ -870,16 +917,14 @@
DCHECK(mutator_lock_ != nullptr);
DCHECK(profiler_lock_ != nullptr);
DCHECK(thread_list_lock_ != nullptr);
- DCHECK(thread_list_suspend_thread_lock_ != nullptr);
DCHECK(thread_suspend_count_lock_ != nullptr);
DCHECK(trace_lock_ != nullptr);
DCHECK(unexpected_signal_lock_ != nullptr);
} else {
// Create global locks in level order from highest lock level to lowest.
- LockLevel current_lock_level = kThreadListSuspendThreadLock;
- DCHECK(thread_list_suspend_thread_lock_ == nullptr);
- thread_list_suspend_thread_lock_ =
- new Mutex("thread list suspend thread by .. lock", current_lock_level);
+ LockLevel current_lock_level = kInstrumentEntrypointsLock;
+ DCHECK(instrument_entrypoints_lock_ == nullptr);
+ instrument_entrypoints_lock_ = new Mutex("instrument entrypoint lock", current_lock_level);
#define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
if (new_level >= current_lock_level) { \
@@ -890,10 +935,6 @@
} \
current_lock_level = new_level;
- UPDATE_CURRENT_LOCK_LEVEL(kInstrumentEntrypointsLock);
- DCHECK(instrument_entrypoints_lock_ == nullptr);
- instrument_entrypoints_lock_ = new Mutex("instrument entrypoint lock", current_lock_level);
-
UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock);
DCHECK(mutator_lock_ == nullptr);
mutator_lock_ = new ReaderWriterMutex("mutator lock", current_lock_level);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 516fa07..9c93cc6 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -101,7 +101,6 @@
kHeapBitmapLock,
kMutatorLock,
kInstrumentEntrypointsLock,
- kThreadListSuspendThreadLock,
kZygoteCreationLock,
kLockLevelCount // Must come last.
@@ -361,6 +360,9 @@
private:
#if ART_USE_FUTEXES
+ // Out-of-inline path for handling contention for a SharedLock.
+ void HandleSharedLockContention(Thread* self, int32_t cur_state);
+
// -1 implies held exclusive, +ve shared held by state_ many owners.
AtomicInteger state_;
// Exclusive owner. Modification guarded by this mutex.
@@ -432,7 +434,7 @@
DISALLOW_COPY_AND_ASSIGN(MutexLock);
};
// Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)".
-#define MutexLock(x) COMPILE_ASSERT(0, mutex_lock_declaration_missing_variable_name)
+#define MutexLock(x) static_assert(0, "MutexLock declaration missing variable name")
// Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
// construction and releases it upon destruction.
@@ -454,7 +456,7 @@
};
// Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of
// "ReaderMutexLock mu(lock)".
-#define ReaderMutexLock(x) COMPILE_ASSERT(0, reader_mutex_lock_declaration_missing_variable_name)
+#define ReaderMutexLock(x) static_assert(0, "ReaderMutexLock declaration missing variable name")
// Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
// construction and releases it upon destruction.
@@ -476,24 +478,15 @@
};
// Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of
// "WriterMutexLock mu(lock)".
-#define WriterMutexLock(x) COMPILE_ASSERT(0, writer_mutex_lock_declaration_missing_variable_name)
+#define WriterMutexLock(x) static_assert(0, "WriterMutexLock declaration missing variable name")
// Global mutexes corresponding to the levels above.
class Locks {
public:
static void Init();
- // There's a potential race for two threads to try to suspend each other and for both of them
- // to succeed and get blocked becoming runnable. This lock ensures that only one thread is
- // requesting suspension of another at any time. As the the thread list suspend thread logic
- // transitions to runnable, if the current thread were tried to be suspended then this thread
- // would block holding this lock until it could safely request thread suspension of the other
- // thread without that thread having a suspension request against this thread. This avoids a
- // potential deadlock cycle.
- static Mutex* thread_list_suspend_thread_lock_;
-
// Guards allocation entrypoint instrumenting.
- static Mutex* instrument_entrypoints_lock_ ACQUIRED_AFTER(thread_list_suspend_thread_lock_);
+ static Mutex* instrument_entrypoints_lock_;
// The mutator_lock_ is used to allow mutators to execute in a shared (reader) mode or to block
// mutators by having an exclusive (writer) owner. In normal execution each mutator thread holds
diff --git a/runtime/base/scoped_flock.cc b/runtime/base/scoped_flock.cc
index bf091d0..0e93eee 100644
--- a/runtime/base/scoped_flock.cc
+++ b/runtime/base/scoped_flock.cc
@@ -27,6 +27,9 @@
bool ScopedFlock::Init(const char* filename, std::string* error_msg) {
while (true) {
+ if (file_.get() != nullptr) {
+ UNUSED(file_->FlushCloseOrErase()); // Ignore result.
+ }
file_.reset(OS::OpenFileWithFlags(filename, O_CREAT | O_RDWR));
if (file_.get() == NULL) {
*error_msg = StringPrintf("Failed to open file '%s': %s", filename, strerror(errno));
@@ -59,7 +62,7 @@
}
bool ScopedFlock::Init(File* file, std::string* error_msg) {
- file_.reset(new File(dup(file->Fd())));
+ file_.reset(new File(dup(file->Fd()), true));
if (file_->Fd() == -1) {
file_.reset();
*error_msg = StringPrintf("Failed to duplicate open file '%s': %s",
@@ -89,6 +92,9 @@
if (file_.get() != NULL) {
int flock_result = TEMP_FAILURE_RETRY(flock(file_->Fd(), LOCK_UN));
CHECK_EQ(0, flock_result);
+ if (file_->FlushCloseOrErase() != 0) {
+ PLOG(WARNING) << "Could not close scoped file lock file.";
+ }
}
}
diff --git a/runtime/base/stringpiece.cc b/runtime/base/stringpiece.cc
index 824ee48..2570bad 100644
--- a/runtime/base/stringpiece.cc
+++ b/runtime/base/stringpiece.cc
@@ -16,7 +16,7 @@
#include "stringpiece.h"
-#include <iostream>
+#include <ostream>
#include <utility>
#include "logging.h"
diff --git a/runtime/base/stringpiece.h b/runtime/base/stringpiece.h
index b8de308..d793bb6 100644
--- a/runtime/base/stringpiece.h
+++ b/runtime/base/stringpiece.h
@@ -67,8 +67,8 @@
ptr_ = nullptr;
length_ = 0;
}
- void set(const char* data, size_type len) {
- ptr_ = data;
+ void set(const char* data_in, size_type len) {
+ ptr_ = data_in;
length_ = len;
}
void set(const char* str) {
@@ -79,8 +79,8 @@
length_ = 0;
}
}
- void set(const void* data, size_type len) {
- ptr_ = reinterpret_cast<const char*>(data);
+ void set(const void* data_in, size_type len) {
+ ptr_ = reinterpret_cast<const char*>(data_in);
length_ = len;
}
diff --git a/runtime/base/to_str.h b/runtime/base/to_str.h
new file mode 100644
index 0000000..6b1c84c
--- /dev/null
+++ b/runtime/base/to_str.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_TO_STR_H_
+#define ART_RUNTIME_BASE_TO_STR_H_
+
+#include <sstream>
+
+namespace art {
+
+// Helps you use operator<< in a const char*-like context such as our various 'F' methods with
+// format strings.
+template<typename T>
+class ToStr {
+ public:
+ explicit ToStr(const T& value) {
+ std::ostringstream os;
+ os << value;
+ s_ = os.str();
+ }
+
+ const char* c_str() const {
+ return s_.c_str();
+ }
+
+ const std::string& str() const {
+ return s_;
+ }
+
+ private:
+ std::string s_;
+ DISALLOW_COPY_AND_ASSIGN(ToStr);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_BASE_TO_STR_H_
diff --git a/runtime/base/unix_file/fd_file.cc b/runtime/base/unix_file/fd_file.cc
index f29a7ec..6e5e7a1 100644
--- a/runtime/base/unix_file/fd_file.cc
+++ b/runtime/base/unix_file/fd_file.cc
@@ -14,28 +14,68 @@
* limitations under the License.
*/
-#include "base/logging.h"
#include "base/unix_file/fd_file.h"
+
#include <errno.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
+#include "base/logging.h"
+
namespace unix_file {
-FdFile::FdFile() : fd_(-1), auto_close_(true) {
+FdFile::FdFile() : guard_state_(GuardState::kClosed), fd_(-1), auto_close_(true) {
}
-FdFile::FdFile(int fd) : fd_(fd), auto_close_(true) {
+FdFile::FdFile(int fd, bool check_usage)
+ : guard_state_(check_usage ? GuardState::kBase : GuardState::kNoCheck),
+ fd_(fd), auto_close_(true) {
}
-FdFile::FdFile(int fd, const std::string& path) : fd_(fd), file_path_(path), auto_close_(true) {
+FdFile::FdFile(int fd, const std::string& path, bool check_usage)
+ : guard_state_(check_usage ? GuardState::kBase : GuardState::kNoCheck),
+ fd_(fd), file_path_(path), auto_close_(true) {
CHECK_NE(0U, path.size());
}
FdFile::~FdFile() {
+ if (kCheckSafeUsage && (guard_state_ < GuardState::kNoCheck)) {
+ if (guard_state_ < GuardState::kFlushed) {
+ LOG(::art::ERROR) << "File " << file_path_ << " wasn't explicitly flushed before destruction.";
+ }
+ if (guard_state_ < GuardState::kClosed) {
+ LOG(::art::ERROR) << "File " << file_path_ << " wasn't explicitly closed before destruction.";
+ }
+ CHECK_GE(guard_state_, GuardState::kClosed);
+ }
if (auto_close_ && fd_ != -1) {
- Close();
+ if (Close() != 0) {
+ PLOG(::art::WARNING) << "Failed to close file " << file_path_;
+ }
+ }
+}
+
+void FdFile::moveTo(GuardState target, GuardState warn_threshold, const char* warning) {
+ if (kCheckSafeUsage) {
+ if (guard_state_ < GuardState::kNoCheck) {
+ if (warn_threshold < GuardState::kNoCheck && guard_state_ >= warn_threshold) {
+ LOG(::art::ERROR) << warning;
+ }
+ guard_state_ = target;
+ }
+ }
+}
+
+void FdFile::moveUp(GuardState target, const char* warning) {
+ if (kCheckSafeUsage) {
+ if (guard_state_ < GuardState::kNoCheck) {
+ if (guard_state_ < target) {
+ guard_state_ = target;
+ } else if (target < guard_state_) {
+ LOG(::art::ERROR) << warning;
+ }
+ }
}
}
@@ -54,11 +94,28 @@
return false;
}
file_path_ = path;
+ static_assert(O_RDONLY == 0, "Readonly flag has unexpected value.");
+ if (kCheckSafeUsage && (flags & (O_RDWR | O_CREAT | O_WRONLY)) != 0) {
+ // Start in the base state (not flushed, not closed).
+ guard_state_ = GuardState::kBase;
+ } else {
+ // We are not concerned with read-only files. In that case, proper flushing and closing is
+ // not important.
+ guard_state_ = GuardState::kNoCheck;
+ }
return true;
}
int FdFile::Close() {
int result = TEMP_FAILURE_RETRY(close(fd_));
+
+ // Test here, so the file is closed and not leaked.
+ if (kCheckSafeUsage) {
+ CHECK_GE(guard_state_, GuardState::kFlushed) << "File " << file_path_
+ << " has not been flushed before closing.";
+ moveUp(GuardState::kClosed, nullptr);
+ }
+
if (result == -1) {
return -errno;
} else {
@@ -74,6 +131,7 @@
#else
int rc = TEMP_FAILURE_RETRY(fsync(fd_));
#endif
+ moveUp(GuardState::kFlushed, "Flushing closed file.");
return (rc == -1) ? -errno : rc;
}
@@ -92,6 +150,7 @@
#else
int rc = TEMP_FAILURE_RETRY(ftruncate(fd_, new_length));
#endif
+ moveTo(GuardState::kBase, GuardState::kClosed, "Truncating closed file.");
return (rc == -1) ? -errno : rc;
}
@@ -107,6 +166,7 @@
#else
int rc = TEMP_FAILURE_RETRY(pwrite(fd_, buf, byte_count, offset));
#endif
+ moveTo(GuardState::kBase, GuardState::kClosed, "Writing into closed file.");
return (rc == -1) ? -errno : rc;
}
@@ -135,6 +195,7 @@
bool FdFile::WriteFully(const void* buffer, size_t byte_count) {
const char* ptr = static_cast<const char*>(buffer);
+ moveTo(GuardState::kBase, GuardState::kClosed, "Writing into closed file.");
while (byte_count > 0) {
ssize_t bytes_written = TEMP_FAILURE_RETRY(write(fd_, ptr, byte_count));
if (bytes_written == -1) {
@@ -146,4 +207,38 @@
return true;
}
+void FdFile::Erase() {
+ TEMP_FAILURE_RETRY(SetLength(0));
+ TEMP_FAILURE_RETRY(Flush());
+ TEMP_FAILURE_RETRY(Close());
+}
+
+int FdFile::FlushCloseOrErase() {
+ int flush_result = TEMP_FAILURE_RETRY(Flush());
+ if (flush_result != 0) {
+ LOG(::art::ERROR) << "CloseOrErase failed while flushing a file.";
+ Erase();
+ return flush_result;
+ }
+ int close_result = TEMP_FAILURE_RETRY(Close());
+ if (close_result != 0) {
+ LOG(::art::ERROR) << "CloseOrErase failed while closing a file.";
+ Erase();
+ return close_result;
+ }
+ return 0;
+}
+
+int FdFile::FlushClose() {
+ int flush_result = TEMP_FAILURE_RETRY(Flush());
+ if (flush_result != 0) {
+ LOG(::art::ERROR) << "FlushClose failed while flushing a file.";
+ }
+ int close_result = TEMP_FAILURE_RETRY(Close());
+ if (close_result != 0) {
+ LOG(::art::ERROR) << "FlushClose failed while closing a file.";
+ }
+ return (flush_result != 0) ? flush_result : close_result;
+}
+
} // namespace unix_file
diff --git a/runtime/base/unix_file/fd_file.h b/runtime/base/unix_file/fd_file.h
index 01f4ca2..8db2ee4 100644
--- a/runtime/base/unix_file/fd_file.h
+++ b/runtime/base/unix_file/fd_file.h
@@ -24,6 +24,9 @@
namespace unix_file {
+// If true, check whether Flush and Close are called before destruction.
+static constexpr bool kCheckSafeUsage = true;
+
// A RandomAccessFile implementation backed by a file descriptor.
//
// Not thread safe.
@@ -32,8 +35,8 @@
FdFile();
// Creates an FdFile using the given file descriptor. Takes ownership of the
// file descriptor. (Use DisableAutoClose to retain ownership.)
- explicit FdFile(int fd);
- explicit FdFile(int fd, const std::string& path);
+ explicit FdFile(int fd, bool checkUsage);
+ explicit FdFile(int fd, const std::string& path, bool checkUsage);
// Destroys an FdFile, closing the file descriptor if Close hasn't already
// been called. (If you care about the return value of Close, call it
@@ -47,12 +50,21 @@
bool Open(const std::string& file_path, int flags, mode_t mode);
// RandomAccessFile API.
- virtual int Close();
- virtual int64_t Read(char* buf, int64_t byte_count, int64_t offset) const;
- virtual int SetLength(int64_t new_length);
+ virtual int Close() WARN_UNUSED;
+ virtual int64_t Read(char* buf, int64_t byte_count, int64_t offset) const WARN_UNUSED;
+ virtual int SetLength(int64_t new_length) WARN_UNUSED;
virtual int64_t GetLength() const;
- virtual int64_t Write(const char* buf, int64_t byte_count, int64_t offset);
- virtual int Flush();
+ virtual int64_t Write(const char* buf, int64_t byte_count, int64_t offset) WARN_UNUSED;
+ virtual int Flush() WARN_UNUSED;
+
+ // Short for SetLength(0); Flush(); Close();
+ void Erase();
+
+ // Try to Flush(), then try to Close(); If either fails, call Erase().
+ int FlushCloseOrErase() WARN_UNUSED;
+
+ // Try to Flush and Close(). Attempts both, but returns the first error.
+ int FlushClose() WARN_UNUSED;
// Bonus API.
int Fd() const;
@@ -61,8 +73,35 @@
return file_path_;
}
void DisableAutoClose();
- bool ReadFully(void* buffer, size_t byte_count);
- bool WriteFully(const void* buffer, size_t byte_count);
+ bool ReadFully(void* buffer, size_t byte_count) WARN_UNUSED;
+ bool WriteFully(const void* buffer, size_t byte_count) WARN_UNUSED;
+
+ // This enum is public so that we can define the << operator over it.
+ enum class GuardState {
+ kBase, // Base, file has not been flushed or closed.
+ kFlushed, // File has been flushed, but not closed.
+ kClosed, // File has been flushed and closed.
+ kNoCheck // Do not check for the current file instance.
+ };
+
+ protected:
+ // If the guard state indicates checking (!=kNoCheck), go to the target state "target". Print the
+ // given warning if the current state is or exceeds warn_threshold.
+ void moveTo(GuardState target, GuardState warn_threshold, const char* warning);
+
+ // If the guard state indicates checking (<kNoCheck), and is below the target state "target", go
+ // to "target." If the current state is higher (excluding kNoCheck) than the trg state, print the
+ // warning.
+ void moveUp(GuardState target, const char* warning);
+
+ // Forcefully sets the state to the given one. This can overwrite kNoCheck.
+ void resetGuard(GuardState new_state) {
+ if (kCheckSafeUsage) {
+ guard_state_ = new_state;
+ }
+ }
+
+ GuardState guard_state_;
private:
int fd_;
@@ -72,6 +111,8 @@
DISALLOW_COPY_AND_ASSIGN(FdFile);
};
+std::ostream& operator<<(std::ostream& os, const FdFile::GuardState& kind);
+
} // namespace unix_file
#endif // ART_RUNTIME_BASE_UNIX_FILE_FD_FILE_H_
diff --git a/runtime/base/unix_file/fd_file_test.cc b/runtime/base/unix_file/fd_file_test.cc
index 3481f2f..a7e5b96 100644
--- a/runtime/base/unix_file/fd_file_test.cc
+++ b/runtime/base/unix_file/fd_file_test.cc
@@ -24,7 +24,7 @@
class FdFileTest : public RandomAccessFileTest {
protected:
virtual RandomAccessFile* MakeTestFile() {
- return new FdFile(fileno(tmpfile()));
+ return new FdFile(fileno(tmpfile()), false);
}
};
@@ -53,6 +53,7 @@
ASSERT_TRUE(file.Open(good_path, O_CREAT | O_WRONLY));
EXPECT_GE(file.Fd(), 0);
EXPECT_TRUE(file.IsOpened());
+ EXPECT_EQ(0, file.Flush());
EXPECT_EQ(0, file.Close());
EXPECT_EQ(-1, file.Fd());
EXPECT_FALSE(file.IsOpened());
@@ -60,7 +61,7 @@
EXPECT_GE(file.Fd(), 0);
EXPECT_TRUE(file.IsOpened());
- file.Close();
+ ASSERT_EQ(file.Close(), 0);
ASSERT_EQ(unlink(good_path.c_str()), 0);
}
diff --git a/runtime/base/unix_file/mapped_file.cc b/runtime/base/unix_file/mapped_file.cc
deleted file mode 100644
index 63927b1..0000000
--- a/runtime/base/unix_file/mapped_file.cc
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/logging.h"
-#include "base/unix_file/mapped_file.h"
-#include <fcntl.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <algorithm>
-#include <string>
-
-namespace unix_file {
-
-MappedFile::~MappedFile() {
-}
-
-int MappedFile::Close() {
- if (IsMapped()) {
- Unmap();
- }
- return FdFile::Close();
-}
-
-bool MappedFile::MapReadOnly() {
- CHECK(IsOpened());
- CHECK(!IsMapped());
- struct stat st;
- int result = TEMP_FAILURE_RETRY(fstat(Fd(), &st));
- if (result == -1) {
- PLOG(WARNING) << "Failed to stat file '" << GetPath() << "'";
- return false;
- }
- file_size_ = st.st_size;
- do {
- mapped_file_ = mmap(NULL, file_size_, PROT_READ, MAP_PRIVATE, Fd(), 0);
- } while (mapped_file_ == MAP_FAILED && errno == EINTR);
- if (mapped_file_ == MAP_FAILED) {
- PLOG(WARNING) << "Failed to mmap file '" << GetPath() << "' of size "
- << file_size_ << " bytes to memory";
- return false;
- }
- map_mode_ = kMapReadOnly;
- return true;
-}
-
-bool MappedFile::MapReadWrite(int64_t file_size) {
- CHECK(IsOpened());
- CHECK(!IsMapped());
-#ifdef __linux__
- int result = TEMP_FAILURE_RETRY(ftruncate64(Fd(), file_size));
-#else
- int result = TEMP_FAILURE_RETRY(ftruncate(Fd(), file_size));
-#endif
- if (result == -1) {
- PLOG(ERROR) << "Failed to truncate file '" << GetPath()
- << "' to size " << file_size;
- return false;
- }
- file_size_ = file_size;
- do {
- mapped_file_ =
- mmap(NULL, file_size_, PROT_READ | PROT_WRITE, MAP_SHARED, Fd(), 0);
- } while (mapped_file_ == MAP_FAILED && errno == EINTR);
- if (mapped_file_ == MAP_FAILED) {
- PLOG(WARNING) << "Failed to mmap file '" << GetPath() << "' of size "
- << file_size_ << " bytes to memory";
- return false;
- }
- map_mode_ = kMapReadWrite;
- return true;
-}
-
-bool MappedFile::Unmap() {
- CHECK(IsMapped());
- int result = TEMP_FAILURE_RETRY(munmap(mapped_file_, file_size_));
- if (result == -1) {
- PLOG(WARNING) << "Failed unmap file '" << GetPath() << "' of size "
- << file_size_;
- return false;
- } else {
- mapped_file_ = NULL;
- file_size_ = -1;
- return true;
- }
-}
-
-int64_t MappedFile::Read(char* buf, int64_t byte_count, int64_t offset) const {
- if (IsMapped()) {
- if (offset < 0) {
- errno = EINVAL;
- return -errno;
- }
- int64_t read_size = std::max(static_cast<int64_t>(0),
- std::min(byte_count, file_size_ - offset));
- if (read_size > 0) {
- memcpy(buf, data() + offset, read_size);
- }
- return read_size;
- } else {
- return FdFile::Read(buf, byte_count, offset);
- }
-}
-
-int MappedFile::SetLength(int64_t new_length) {
- CHECK(!IsMapped());
- return FdFile::SetLength(new_length);
-}
-
-int64_t MappedFile::GetLength() const {
- if (IsMapped()) {
- return file_size_;
- } else {
- return FdFile::GetLength();
- }
-}
-
-int MappedFile::Flush() {
- int rc = IsMapped() ? TEMP_FAILURE_RETRY(msync(mapped_file_, file_size_, 0)) : FdFile::Flush();
- return rc == -1 ? -errno : 0;
-}
-
-int64_t MappedFile::Write(const char* buf, int64_t byte_count, int64_t offset) {
- if (IsMapped()) {
- CHECK_EQ(kMapReadWrite, map_mode_);
- if (offset < 0) {
- errno = EINVAL;
- return -errno;
- }
- int64_t write_size = std::max(static_cast<int64_t>(0),
- std::min(byte_count, file_size_ - offset));
- if (write_size > 0) {
- memcpy(data() + offset, buf, write_size);
- }
- return write_size;
- } else {
- return FdFile::Write(buf, byte_count, offset);
- }
-}
-
-int64_t MappedFile::size() const {
- return GetLength();
-}
-
-bool MappedFile::IsMapped() const {
- return mapped_file_ != NULL && mapped_file_ != MAP_FAILED;
-}
-
-char* MappedFile::data() const {
- CHECK(IsMapped());
- return static_cast<char*>(mapped_file_);
-}
-
-} // namespace unix_file
diff --git a/runtime/base/unix_file/mapped_file.h b/runtime/base/unix_file/mapped_file.h
deleted file mode 100644
index 73056e9..0000000
--- a/runtime/base/unix_file/mapped_file.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_BASE_UNIX_FILE_MAPPED_FILE_H_
-#define ART_RUNTIME_BASE_UNIX_FILE_MAPPED_FILE_H_
-
-#include <fcntl.h>
-#include <string>
-#include "base/unix_file/fd_file.h"
-
-namespace unix_file {
-
-// Random access file which handles an mmap(2), munmap(2) pair in C++
-// RAII style. When a file is mmapped, the random access file
-// interface accesses the mmapped memory directly; otherwise, the
-// standard file I/O is used. Whenever a function fails, it returns
-// false and errno is set to the corresponding error code.
-class MappedFile : public FdFile {
- public:
- // File modes used in Open().
- enum FileMode {
-#ifdef __linux__
- kReadOnlyMode = O_RDONLY | O_LARGEFILE,
- kReadWriteMode = O_CREAT | O_RDWR | O_LARGEFILE,
-#else
- kReadOnlyMode = O_RDONLY,
- kReadWriteMode = O_CREAT | O_RDWR,
-#endif
- };
-
- MappedFile() : FdFile(), file_size_(-1), mapped_file_(NULL) {
- }
- // Creates a MappedFile using the given file descriptor. Takes ownership of
- // the file descriptor.
- explicit MappedFile(int fd) : FdFile(fd), file_size_(-1), mapped_file_(NULL) {
- }
-
- // Unmaps and closes the file if needed.
- virtual ~MappedFile();
-
- // Maps an opened file to memory in the read-only mode.
- bool MapReadOnly();
-
- // Maps an opened file to memory in the read-write mode. Before the
- // file is mapped, it is truncated to 'file_size' bytes.
- bool MapReadWrite(int64_t file_size);
-
- // Unmaps a mapped file so that, e.g., SetLength() may be invoked.
- bool Unmap();
-
- // RandomAccessFile API.
- // The functions below require that the file is open, but it doesn't
- // have to be mapped.
- virtual int Close();
- virtual int64_t Read(char* buf, int64_t byte_count, int64_t offset) const;
- // SetLength() requires that the file is not mmapped.
- virtual int SetLength(int64_t new_length);
- virtual int64_t GetLength() const;
- virtual int Flush();
- // Write() requires that, if the file is mmapped, it is mmapped in
- // the read-write mode. Writes past the end of file are discarded.
- virtual int64_t Write(const char* buf, int64_t byte_count, int64_t offset);
-
- // A convenience method equivalent to GetLength().
- int64_t size() const;
-
- // Returns true if the file has been mmapped.
- bool IsMapped() const;
-
- // Returns a pointer to the start of the memory mapping once the
- // file is successfully mapped; crashes otherwise.
- char* data() const;
-
- private:
- enum MapMode {
- kMapReadOnly = 1,
- kMapReadWrite = 2,
- };
-
- mutable int64_t file_size_; // May be updated in GetLength().
- void* mapped_file_;
- MapMode map_mode_;
-
- DISALLOW_COPY_AND_ASSIGN(MappedFile);
-};
-
-} // namespace unix_file
-
-#endif // ART_RUNTIME_BASE_UNIX_FILE_MAPPED_FILE_H_
diff --git a/runtime/base/unix_file/mapped_file_test.cc b/runtime/base/unix_file/mapped_file_test.cc
deleted file mode 100644
index 59334d4..0000000
--- a/runtime/base/unix_file/mapped_file_test.cc
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/unix_file/mapped_file.h"
-#include "base/logging.h"
-#include "base/unix_file/fd_file.h"
-#include "base/unix_file/random_access_file_test.h"
-#include "base/unix_file/random_access_file_utils.h"
-#include "base/unix_file/string_file.h"
-#include "gtest/gtest.h"
-
-namespace unix_file {
-
-class MappedFileTest : public RandomAccessFileTest {
- protected:
- MappedFileTest() : kContent("some content") {
- }
-
- void SetUp() {
- RandomAccessFileTest::SetUp();
-
- good_path_ = GetTmpPath("some-file.txt");
- int fd = TEMP_FAILURE_RETRY(open(good_path_.c_str(), O_CREAT|O_RDWR, 0666));
- FdFile dst(fd);
-
- StringFile src;
- src.Assign(kContent);
-
- ASSERT_TRUE(CopyFile(src, &dst));
- }
-
- void TearDown() {
- ASSERT_EQ(unlink(good_path_.c_str()), 0);
-
- RandomAccessFileTest::TearDown();
- }
-
- virtual RandomAccessFile* MakeTestFile() {
- TEMP_FAILURE_RETRY(truncate(good_path_.c_str(), 0));
- MappedFile* f = new MappedFile;
- CHECK(f->Open(good_path_, MappedFile::kReadWriteMode));
- return f;
- }
-
- const std::string kContent;
- std::string good_path_;
-};
-
-TEST_F(MappedFileTest, OkayToNotUse) {
- MappedFile file;
- EXPECT_EQ(-1, file.Fd());
- EXPECT_FALSE(file.IsOpened());
- EXPECT_FALSE(file.IsMapped());
-}
-
-TEST_F(MappedFileTest, OpenClose) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- EXPECT_GE(file.Fd(), 0);
- EXPECT_TRUE(file.IsOpened());
- EXPECT_EQ(kContent.size(), static_cast<uint64_t>(file.size()));
- EXPECT_EQ(0, file.Close());
- EXPECT_EQ(-1, file.Fd());
- EXPECT_FALSE(file.IsOpened());
-}
-
-TEST_F(MappedFileTest, OpenFdClose) {
- FILE* f = tmpfile();
- ASSERT_TRUE(f != NULL);
- MappedFile file(fileno(f));
- EXPECT_GE(file.Fd(), 0);
- EXPECT_TRUE(file.IsOpened());
- EXPECT_EQ(0, file.Close());
-}
-
-TEST_F(MappedFileTest, CanUseAfterMapReadOnly) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- EXPECT_FALSE(file.IsMapped());
- EXPECT_TRUE(file.MapReadOnly());
- EXPECT_TRUE(file.IsMapped());
- EXPECT_EQ(kContent.size(), static_cast<uint64_t>(file.size()));
- ASSERT_TRUE(file.data());
- EXPECT_EQ(0, memcmp(kContent.c_str(), file.data(), file.size()));
- EXPECT_EQ(0, file.Flush());
-}
-
-TEST_F(MappedFileTest, CanUseAfterMapReadWrite) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadWriteMode));
- EXPECT_FALSE(file.IsMapped());
- EXPECT_TRUE(file.MapReadWrite(1));
- EXPECT_TRUE(file.IsMapped());
- EXPECT_EQ(1, file.size());
- ASSERT_TRUE(file.data());
- EXPECT_EQ(kContent[0], *file.data());
- EXPECT_EQ(0, file.Flush());
-}
-
-TEST_F(MappedFileTest, CanWriteNewData) {
- const std::string new_path(GetTmpPath("new-file.txt"));
- ASSERT_EQ(-1, unlink(new_path.c_str()));
- ASSERT_EQ(ENOENT, errno);
-
- MappedFile file;
- ASSERT_TRUE(file.Open(new_path, MappedFile::kReadWriteMode));
- EXPECT_TRUE(file.MapReadWrite(kContent.size()));
- EXPECT_TRUE(file.IsMapped());
- EXPECT_EQ(kContent.size(), static_cast<uint64_t>(file.size()));
- ASSERT_TRUE(file.data());
- memcpy(file.data(), kContent.c_str(), kContent.size());
- EXPECT_EQ(0, file.Close());
- EXPECT_FALSE(file.IsMapped());
-
- FdFile new_file(TEMP_FAILURE_RETRY(open(new_path.c_str(), O_RDONLY)));
- StringFile buffer;
- ASSERT_TRUE(CopyFile(new_file, &buffer));
- EXPECT_EQ(kContent, buffer.ToStringPiece());
- EXPECT_EQ(0, unlink(new_path.c_str()));
-}
-
-TEST_F(MappedFileTest, FileMustExist) {
- const std::string bad_path(GetTmpPath("does-not-exist.txt"));
- MappedFile file;
- EXPECT_FALSE(file.Open(bad_path, MappedFile::kReadOnlyMode));
- EXPECT_EQ(-1, file.Fd());
-}
-
-TEST_F(MappedFileTest, FileMustBeWritable) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- EXPECT_FALSE(file.MapReadWrite(10));
-}
-
-TEST_F(MappedFileTest, RemappingAllowedUntilSuccess) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- EXPECT_FALSE(file.MapReadWrite(10));
- EXPECT_FALSE(file.MapReadWrite(10));
-}
-
-TEST_F(MappedFileTest, ResizeMappedFile) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadWriteMode));
- ASSERT_TRUE(file.MapReadWrite(10));
- EXPECT_EQ(10, file.GetLength());
- EXPECT_TRUE(file.Unmap());
- EXPECT_TRUE(file.MapReadWrite(20));
- EXPECT_EQ(20, file.GetLength());
- EXPECT_EQ(0, file.Flush());
- EXPECT_TRUE(file.Unmap());
- EXPECT_EQ(0, file.Flush());
- EXPECT_EQ(0, file.SetLength(5));
- EXPECT_TRUE(file.MapReadOnly());
- EXPECT_EQ(5, file.GetLength());
-}
-
-TEST_F(MappedFileTest, ReadNotMapped) {
- TestRead();
-}
-
-TEST_F(MappedFileTest, SetLengthNotMapped) {
- TestSetLength();
-}
-
-TEST_F(MappedFileTest, WriteNotMapped) {
- TestWrite();
-}
-
-TEST_F(MappedFileTest, ReadMappedReadOnly) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- ASSERT_TRUE(file.MapReadOnly());
- TestReadContent(kContent, &file);
-}
-
-TEST_F(MappedFileTest, ReadMappedReadWrite) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadWriteMode));
- ASSERT_TRUE(file.MapReadWrite(kContent.size()));
- TestReadContent(kContent, &file);
-}
-
-TEST_F(MappedFileTest, WriteMappedReadWrite) {
- TEMP_FAILURE_RETRY(unlink(good_path_.c_str()));
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadWriteMode));
- ASSERT_TRUE(file.MapReadWrite(kContent.size()));
-
- // Can't write to a negative offset.
- EXPECT_EQ(-EINVAL, file.Write(kContent.c_str(), 0, -123));
-
- // A zero-length write is a no-op.
- EXPECT_EQ(0, file.Write(kContent.c_str(), 0, 0));
- // But the file size is as given when mapped.
- EXPECT_EQ(kContent.size(), static_cast<uint64_t>(file.GetLength()));
-
- // Data written past the end are discarded.
- EXPECT_EQ(kContent.size() - 1,
- static_cast<uint64_t>(file.Write(kContent.c_str(), kContent.size(), 1)));
- EXPECT_EQ(0, memcmp(kContent.c_str(), file.data() + 1, kContent.size() - 1));
-
- // Data can be overwritten.
- EXPECT_EQ(kContent.size(),
- static_cast<uint64_t>(file.Write(kContent.c_str(), kContent.size(), 0)));
- EXPECT_EQ(0, memcmp(kContent.c_str(), file.data(), kContent.size()));
-}
-
-#if 0 // death tests don't work on android yet
-
-class MappedFileDeathTest : public MappedFileTest {};
-
-TEST_F(MappedFileDeathTest, MustMapBeforeUse) {
- MappedFile file;
- EXPECT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- EXPECT_DEATH(file.data(), "mapped_");
-}
-
-TEST_F(MappedFileDeathTest, RemappingNotAllowedReadOnly) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- ASSERT_TRUE(file.MapReadOnly());
- EXPECT_DEATH(file.MapReadOnly(), "mapped_");
-}
-
-TEST_F(MappedFileDeathTest, RemappingNotAllowedReadWrite) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadWriteMode));
- ASSERT_TRUE(file.MapReadWrite(10));
- EXPECT_DEATH(file.MapReadWrite(10), "mapped_");
-}
-
-TEST_F(MappedFileDeathTest, SetLengthMappedReadWrite) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadWriteMode));
- ASSERT_TRUE(file.MapReadWrite(10));
- EXPECT_EQ(10, file.GetLength());
- EXPECT_DEATH(file.SetLength(0), ".*");
-}
-
-TEST_F(MappedFileDeathTest, SetLengthMappedReadOnly) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- ASSERT_TRUE(file.MapReadOnly());
- EXPECT_EQ(kContent.size(), file.GetLength());
- EXPECT_DEATH(file.SetLength(0), ".*");
-}
-
-TEST_F(MappedFileDeathTest, WriteMappedReadOnly) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- ASSERT_TRUE(file.MapReadOnly());
- char buf[10];
- EXPECT_DEATH(file.Write(buf, 0, 0), ".*");
-}
-
-#endif
-
-} // namespace unix_file
diff --git a/runtime/base/unix_file/null_file.cc b/runtime/base/unix_file/null_file.cc
index 050decb..322c25a 100644
--- a/runtime/base/unix_file/null_file.cc
+++ b/runtime/base/unix_file/null_file.cc
@@ -33,7 +33,8 @@
return 0;
}
-int64_t NullFile::Read(char* buf, int64_t byte_count, int64_t offset) const {
+int64_t NullFile::Read(char* buf ATTRIBUTE_UNUSED, int64_t byte_count ATTRIBUTE_UNUSED,
+ int64_t offset) const {
if (offset < 0) {
return -EINVAL;
}
@@ -51,7 +52,8 @@
return 0;
}
-int64_t NullFile::Write(const char* buf, int64_t byte_count, int64_t offset) {
+int64_t NullFile::Write(const char* buf ATTRIBUTE_UNUSED, int64_t byte_count ATTRIBUTE_UNUSED,
+ int64_t offset) {
if (offset < 0) {
return -EINVAL;
}
diff --git a/runtime/base/unix_file/random_access_file_test.h b/runtime/base/unix_file/random_access_file_test.h
index 0002433..e7ace4c 100644
--- a/runtime/base/unix_file/random_access_file_test.h
+++ b/runtime/base/unix_file/random_access_file_test.h
@@ -76,6 +76,8 @@
ASSERT_EQ(content.size(), static_cast<uint64_t>(file->Write(content.data(), content.size(), 0)));
TestReadContent(content, file.get());
+
+ CleanUp(file.get());
}
void TestReadContent(const std::string& content, RandomAccessFile* file) {
@@ -131,6 +133,8 @@
ASSERT_EQ(new_length, file->GetLength());
ASSERT_TRUE(ReadString(file.get(), &new_content));
ASSERT_EQ('\0', new_content[new_length - 1]);
+
+ CleanUp(file.get());
}
void TestWrite() {
@@ -163,6 +167,11 @@
ASSERT_EQ(file->GetLength(), new_length);
ASSERT_TRUE(ReadString(file.get(), &new_content));
ASSERT_EQ(std::string("hello\0hello", new_length), new_content);
+
+ CleanUp(file.get());
+ }
+
+ virtual void CleanUp(RandomAccessFile* file ATTRIBUTE_UNUSED) {
}
protected:
diff --git a/runtime/base/unix_file/random_access_file_utils_test.cc b/runtime/base/unix_file/random_access_file_utils_test.cc
index 6317922..9457d22 100644
--- a/runtime/base/unix_file/random_access_file_utils_test.cc
+++ b/runtime/base/unix_file/random_access_file_utils_test.cc
@@ -37,14 +37,14 @@
}
TEST_F(RandomAccessFileUtilsTest, BadSrc) {
- FdFile src(-1);
+ FdFile src(-1, false);
StringFile dst;
ASSERT_FALSE(CopyFile(src, &dst));
}
TEST_F(RandomAccessFileUtilsTest, BadDst) {
StringFile src;
- FdFile dst(-1);
+ FdFile dst(-1, false);
// We need some source content to trigger a write.
// Copying an empty file is a no-op.
diff --git a/runtime/base/value_object.h b/runtime/base/value_object.h
index ee0e2a0..8c752a9 100644
--- a/runtime/base/value_object.h
+++ b/runtime/base/value_object.h
@@ -17,19 +17,13 @@
#ifndef ART_RUNTIME_BASE_VALUE_OBJECT_H_
#define ART_RUNTIME_BASE_VALUE_OBJECT_H_
-#include "base/logging.h"
+#include "base/macros.h"
namespace art {
class ValueObject {
- public:
- void* operator new(size_t size) {
- LOG(FATAL) << "UNREACHABLE";
- abort();
- }
- void operator delete(void*, size_t) {
- LOG(FATAL) << "UNREACHABLE";
- }
+ private:
+ DISALLOW_ALLOCATION();
};
} // namespace art
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index fec1824..fe5b765 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -14,18 +14,20 @@
* limitations under the License.
*/
-#include "jni_internal.h"
+#include "check_jni.h"
#include <sys/mman.h>
#include <zlib.h>
#include "base/logging.h"
+#include "base/to_str.h"
#include "class_linker.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
#include "field_helper.h"
#include "gc/space/space.h"
#include "java_vm_ext.h"
+#include "jni_internal.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
@@ -511,7 +513,7 @@
return true;
}
- bool CheckReferenceKind(IndirectRefKind expected_kind, JavaVMExt* vm, Thread* self, jobject obj) {
+ bool CheckReferenceKind(IndirectRefKind expected_kind, Thread* self, jobject obj) {
IndirectRefKind found_kind;
if (expected_kind == kLocal) {
found_kind = GetIndirectRefKind(obj);
@@ -2397,7 +2399,7 @@
}
if (sc.Check(soa, false, "L", &result)) {
DCHECK_EQ(IsSameObject(env, obj, result.L), JNI_TRUE);
- DCHECK(sc.CheckReferenceKind(kind, soa.Vm(), soa.Self(), result.L));
+ DCHECK(sc.CheckReferenceKind(kind, soa.Self(), result.L));
return result.L;
}
}
@@ -2409,7 +2411,7 @@
ScopedCheck sc(kFlag_ExcepOkay, function_name);
JniValueType args[2] = {{.E = env}, {.L = obj}};
sc.Check(soa, true, "EL", args);
- if (sc.CheckReferenceKind(kind, soa.Vm(), soa.Self(), obj)) {
+ if (sc.CheckReferenceKind(kind, soa.Self(), obj)) {
JniValueType result;
switch (kind) {
case kGlobal:
@@ -3115,7 +3117,7 @@
static jarray NewPrimitiveArray(const char* function_name, JNIEnv* env, jsize length,
Primitive::Type type) {
ScopedObjectAccess soa(env);
- ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ ScopedCheck sc(kFlag_Default, function_name);
JniValueType args[2] = {{.E = env}, {.z = length}};
if (sc.Check(soa, true, "Ez", args)) {
JniValueType result;
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index 1a78d72..821d613 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -37,7 +37,7 @@
CHECK_EQ(GetDexPc(), DexFile::kDexNoIndex);
}
- if (!m || m->IsNative() || m->IsRuntimeMethod() || IsShadowFrame()) {
+ if (m == nullptr || m->IsNative() || m->IsRuntimeMethod() || IsShadowFrame()) {
return true;
}
@@ -53,7 +53,7 @@
void CheckReferences(int* registers, int number_of_references, uint32_t native_pc_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (GetMethod()->IsOptimized()) {
+ if (GetMethod()->IsOptimized(sizeof(void*))) {
CheckOptimizedMethod(registers, number_of_references, native_pc_offset);
} else {
CheckQuickMethod(registers, number_of_references, native_pc_offset);
@@ -84,8 +84,12 @@
case DexRegisterMap::kInRegister:
CHECK_NE(register_mask & dex_register_map.GetValue(reg), 0u);
break;
+ case DexRegisterMap::kInFpuRegister:
+ // In Fpu register, should not be a reference.
+ CHECK(false);
+ break;
case DexRegisterMap::kConstant:
- CHECK_EQ(dex_register_map.GetValue(0), 0);
+ CHECK_EQ(dex_register_map.GetValue(reg), 0);
break;
}
}
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 875efbb..ead3fa5 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -105,8 +105,7 @@
}
inline mirror::ArtMethod* ClassLinker::GetResolvedMethod(uint32_t method_idx,
- mirror::ArtMethod* referrer,
- InvokeType type) {
+ mirror::ArtMethod* referrer) {
mirror::ArtMethod* resolved_method = referrer->GetDexCacheResolvedMethod(method_idx);
if (resolved_method == nullptr || resolved_method->IsRuntimeMethod()) {
return nullptr;
@@ -117,7 +116,7 @@
inline mirror::ArtMethod* ClassLinker::ResolveMethod(Thread* self, uint32_t method_idx,
mirror::ArtMethod** referrer,
InvokeType type) {
- mirror::ArtMethod* resolved_method = GetResolvedMethod(method_idx, *referrer, type);
+ mirror::ArtMethod* resolved_method = GetResolvedMethod(method_idx, *referrer);
if (LIKELY(resolved_method != nullptr)) {
return resolved_method;
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index bbbb9e0..e1b79c9 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -17,6 +17,7 @@
#include "class_linker.h"
#include <deque>
+#include <iostream>
#include <memory>
#include <queue>
#include <string>
@@ -146,15 +147,6 @@
VlogClassInitializationFailure(klass);
}
-static size_t Hash(const char* s) {
- // This is the java.lang.String hashcode for convenience, not interoperability.
- size_t hash = 0;
- for (; *s != '\0'; ++s) {
- hash = hash * 31 + *s;
- }
- return hash;
-}
-
// Gap between two fields in object layout.
struct FieldGap {
uint32_t start_offset; // The offset from the start of the object.
@@ -172,7 +164,7 @@
typedef std::priority_queue<FieldGap, std::vector<FieldGap>, FieldGapsComparator> FieldGaps;
// Adds largest aligned gaps to queue of gaps.
-void AddFieldGap(uint32_t gap_start, uint32_t gap_end, FieldGaps* gaps) {
+static void AddFieldGap(uint32_t gap_start, uint32_t gap_end, FieldGaps* gaps) {
DCHECK(gaps != nullptr);
uint32_t current_offset = gap_start;
@@ -193,15 +185,13 @@
}
// Shuffle fields forward, making use of gaps whenever possible.
template<int n>
-static void ShuffleForward(const size_t num_fields, size_t* current_field_idx,
+static void ShuffleForward(size_t* current_field_idx,
MemberOffset* field_offset,
- mirror::ObjectArray<mirror::ArtField>* fields,
std::deque<mirror::ArtField*>* grouped_and_sorted_fields,
FieldGaps* gaps)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(current_field_idx != nullptr);
DCHECK(grouped_and_sorted_fields != nullptr);
- DCHECK(fields != nullptr || (num_fields == 0 && grouped_and_sorted_fields->empty()));
DCHECK(gaps != nullptr);
DCHECK(field_offset != nullptr);
@@ -219,7 +209,6 @@
}
CHECK(type != Primitive::kPrimNot) << PrettyField(field); // should be primitive types
grouped_and_sorted_fields->pop_front();
- fields->Set<false>(*current_field_idx, field);
if (!gaps->empty() && gaps->top().size >= n) {
FieldGap gap = gaps->top();
gaps->pop();
@@ -254,7 +243,8 @@
portable_imt_conflict_trampoline_(nullptr),
quick_imt_conflict_trampoline_(nullptr),
quick_generic_jni_trampoline_(nullptr),
- quick_to_interpreter_bridge_trampoline_(nullptr) {
+ quick_to_interpreter_bridge_trampoline_(nullptr),
+ image_pointer_size_(sizeof(void*)) {
memset(find_array_class_cache_, 0, kFindArrayCacheSize * sizeof(mirror::Class*));
}
@@ -389,10 +379,9 @@
Handle<mirror::Class> java_lang_reflect_ArtMethod(hs.NewHandle(
AllocClass(self, java_lang_Class.Get(), mirror::ArtMethod::ClassSize())));
CHECK(java_lang_reflect_ArtMethod.Get() != nullptr);
- java_lang_reflect_ArtMethod->SetObjectSize(mirror::ArtMethod::InstanceSize());
+ java_lang_reflect_ArtMethod->SetObjectSize(mirror::ArtMethod::InstanceSize(sizeof(void*)));
SetClassRoot(kJavaLangReflectArtMethod, java_lang_reflect_ArtMethod.Get());
java_lang_reflect_ArtMethod->SetStatus(mirror::Class::kStatusResolved, self);
-
mirror::ArtMethod::SetClass(java_lang_reflect_ArtMethod.Get());
// Set up array classes for string, field, method
@@ -418,8 +407,7 @@
// DexCache instances. Needs to be after String, Field, Method arrays since AllocDexCache uses
// these roots.
CHECK_NE(0U, boot_class_path.size());
- for (size_t i = 0; i != boot_class_path.size(); ++i) {
- const DexFile* dex_file = boot_class_path[i];
+ for (const DexFile* dex_file : boot_class_path) {
CHECK(dex_file != nullptr);
AppendToBootClassPath(self, *dex_file);
}
@@ -434,6 +422,7 @@
Runtime* runtime = Runtime::Current();
runtime->SetResolutionMethod(runtime->CreateResolutionMethod());
runtime->SetImtConflictMethod(runtime->CreateImtConflictMethod());
+ runtime->SetImtUnimplementedMethod(runtime->CreateImtConflictMethod());
runtime->SetDefaultImt(runtime->CreateDefaultImt(this));
// Set up GenericJNI entrypoint. That is mainly a hack for common_compiler_test.h so that
@@ -815,7 +804,6 @@
if (oat_dex_file == nullptr) {
if (i == 0 && generated) {
- std::string error_msg;
error_msg = StringPrintf("\nFailed to find dex file '%s' (checksum 0x%x) in generated out "
" file'%s'", dex_location, next_location_checksum,
oat_file->GetLocation().c_str());
@@ -1026,7 +1014,7 @@
uint32_t dex_location_checksum,
const char* oat_location,
std::string* error_msg) {
- std::unique_ptr<OatFile> oat_file(OatFile::Open(oat_location, oat_location, nullptr,
+ std::unique_ptr<OatFile> oat_file(OatFile::Open(oat_location, oat_location, nullptr, nullptr,
!Runtime::Current()->IsCompiler(),
error_msg));
if (oat_file.get() == nullptr) {
@@ -1098,7 +1086,7 @@
error_msgs->push_back(error_msg);
return nullptr;
}
- std::unique_ptr<OatFile> oat_file(OatFile::Open(oat_location, oat_location, nullptr,
+ std::unique_ptr<OatFile> oat_file(OatFile::Open(oat_location, oat_location, nullptr, nullptr,
!Runtime::Current()->IsCompiler(),
&error_msg));
if (oat_file.get() == nullptr) {
@@ -1159,9 +1147,13 @@
image_patch_delta = image_header->GetPatchDelta();
}
const OatHeader& oat_header = oat_file->GetOatHeader();
- bool ret = ((oat_header.GetImageFileLocationOatChecksum() == image_oat_checksum)
- && (oat_header.GetImagePatchDelta() == image_patch_delta)
- && (oat_header.GetImageFileLocationOatDataBegin() == image_oat_data_begin));
+ bool ret = (oat_header.GetImageFileLocationOatChecksum() == image_oat_checksum);
+
+ // If the oat file is PIC, it doesn't care if/how image was relocated. Ignore these checks.
+ if (!oat_file->IsPic()) {
+ ret = ret && (oat_header.GetImagePatchDelta() == image_patch_delta)
+ && (oat_header.GetImageFileLocationOatDataBegin() == image_oat_data_begin);
+ }
if (!ret) {
*error_msg = StringPrintf("oat file '%s' mismatch (0x%x, %d, %d) with (0x%x, %" PRIdPTR ", %d)",
oat_file->GetLocation().c_str(),
@@ -1187,11 +1179,11 @@
if (oat_dex_file == nullptr) {
*error_msg = StringPrintf("oat file '%s' does not contain contents for '%s' with checksum 0x%x",
oat_file->GetLocation().c_str(), dex_location, dex_location_checksum);
- for (const OatFile::OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) {
+ for (const OatFile::OatDexFile* oat_dex_file_in : oat_file->GetOatDexFiles()) {
*error_msg += StringPrintf("\noat file '%s' contains contents for '%s' with checksum 0x%x",
oat_file->GetLocation().c_str(),
- oat_dex_file->GetDexFileLocation().c_str(),
- oat_dex_file->GetDexFileLocationChecksum());
+ oat_dex_file_in->GetDexFileLocation().c_str(),
+ oat_dex_file_in->GetDexFileLocationChecksum());
}
return false;
}
@@ -1343,11 +1335,12 @@
bool odex_checksum_verified = false;
bool have_system_odex = false;
{
- // There is a high probability that these both these oat files map similar/the same address
+ // There is a high probability that both these oat files map similar/the same address
// spaces so we must scope them like this so they each gets its turn.
std::unique_ptr<OatFile> odex_oat_file(OatFile::Open(odex_filename, odex_filename, nullptr,
+ nullptr,
executable, &odex_error_msg));
- if (odex_oat_file.get() != nullptr && CheckOatFile(odex_oat_file.get(), isa,
+ if (odex_oat_file.get() != nullptr && CheckOatFile(runtime, odex_oat_file.get(), isa,
&odex_checksum_verified,
&odex_error_msg)) {
return odex_oat_file.release();
@@ -1368,8 +1361,9 @@
bool cache_checksum_verified = false;
if (have_dalvik_cache) {
std::unique_ptr<OatFile> cache_oat_file(OatFile::Open(cache_filename, cache_filename, nullptr,
+ nullptr,
executable, &cache_error_msg));
- if (cache_oat_file.get() != nullptr && CheckOatFile(cache_oat_file.get(), isa,
+ if (cache_oat_file.get() != nullptr && CheckOatFile(runtime, cache_oat_file.get(), isa,
&cache_checksum_verified,
&cache_error_msg)) {
return cache_oat_file.release();
@@ -1444,7 +1438,7 @@
InstructionSet isa,
std::string* error_msg) {
// We open it non-executable
- std::unique_ptr<OatFile> output(OatFile::Open(oat_path, oat_path, nullptr, false, error_msg));
+ std::unique_ptr<OatFile> output(OatFile::Open(oat_path, oat_path, nullptr, nullptr, false, error_msg));
if (output.get() == nullptr) {
return nullptr;
}
@@ -1463,13 +1457,15 @@
const std::string& image_location,
InstructionSet isa,
std::string* error_msg) {
- if (!Runtime::Current()->GetHeap()->HasImageSpace()) {
+ Runtime* runtime = Runtime::Current();
+ DCHECK(runtime != nullptr);
+ if (!runtime->GetHeap()->HasImageSpace()) {
// We don't have an image space so there is no point in trying to patchoat.
LOG(WARNING) << "Patching of oat file '" << input_oat << "' not attempted because we are "
<< "running without an image. Attempting to use oat file for interpretation.";
return GetInterpretedOnlyOat(input_oat, isa, error_msg);
}
- if (!Runtime::Current()->IsDex2OatEnabled()) {
+ if (!runtime->IsDex2OatEnabled()) {
// We don't have dex2oat so we can assume we don't have patchoat either. We should just use the
// input_oat but make sure we only do interpretation on it's dex files.
LOG(WARNING) << "Patching of oat file '" << input_oat << "' not attempted due to dex2oat being "
@@ -1477,7 +1473,7 @@
return GetInterpretedOnlyOat(input_oat, isa, error_msg);
}
Locks::mutator_lock_->AssertNotHeld(Thread::Current()); // Avoid starving GC.
- std::string patchoat(Runtime::Current()->GetPatchoatExecutable());
+ std::string patchoat(runtime->GetPatchoatExecutable());
std::string isa_arg("--instruction-set=");
isa_arg += GetInstructionSetString(isa);
@@ -1499,10 +1495,11 @@
LOG(INFO) << "Relocate Oat File: " << command_line;
bool success = Exec(argv, error_msg);
if (success) {
- std::unique_ptr<OatFile> output(OatFile::Open(output_oat, output_oat, nullptr,
- !Runtime::Current()->IsCompiler(), error_msg));
+ std::unique_ptr<OatFile> output(OatFile::Open(output_oat, output_oat, nullptr, nullptr,
+ !runtime->IsCompiler(), error_msg));
bool checksum_verified = false;
- if (output.get() != nullptr && CheckOatFile(output.get(), isa, &checksum_verified, error_msg)) {
+ if (output.get() != nullptr && CheckOatFile(runtime, output.get(), isa, &checksum_verified,
+ error_msg)) {
return output.release();
} else if (output.get() != nullptr) {
*error_msg = StringPrintf("Patching of oat file '%s' succeeded "
@@ -1513,7 +1510,7 @@
"but was unable to open output file '%s': %s",
input_oat.c_str(), output_oat.c_str(), error_msg->c_str());
}
- } else if (!Runtime::Current()->IsCompiler()) {
+ } else if (!runtime->IsCompiler()) {
// patchoat failed which means we probably don't have enough room to place the output oat file,
// instead of failing we should just run the interpreter from the dex files in the input oat.
LOG(WARNING) << "Patching of oat file '" << input_oat << "' failed. Attempting to use oat file "
@@ -1527,27 +1524,9 @@
return nullptr;
}
-int32_t ClassLinker::GetRequiredDelta(const OatFile* oat_file, InstructionSet isa) {
- Runtime* runtime = Runtime::Current();
- int32_t real_patch_delta;
- const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
- CHECK(image_space != nullptr);
- if (isa == Runtime::Current()->GetInstructionSet()) {
- const ImageHeader& image_header = image_space->GetImageHeader();
- real_patch_delta = image_header.GetPatchDelta();
- } else {
- std::unique_ptr<ImageHeader> image_header(gc::space::ImageSpace::ReadImageHeaderOrDie(
- image_space->GetImageLocation().c_str(), isa));
- real_patch_delta = image_header->GetPatchDelta();
- }
- const OatHeader& oat_header = oat_file->GetOatHeader();
- return real_patch_delta - oat_header.GetImagePatchDelta();
-}
-
-bool ClassLinker::CheckOatFile(const OatFile* oat_file, InstructionSet isa,
+bool ClassLinker::CheckOatFile(const Runtime* runtime, const OatFile* oat_file, InstructionSet isa,
bool* checksum_verified,
std::string* error_msg) {
- Runtime* runtime = Runtime::Current();
const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
if (image_space == nullptr) {
*error_msg = "No image space present";
@@ -1579,19 +1558,30 @@
real_image_checksum, oat_image_checksum);
}
- void* oat_image_oat_offset =
- reinterpret_cast<void*>(oat_header.GetImageFileLocationOatDataBegin());
- bool offset_verified = oat_image_oat_offset == real_image_oat_offset;
- if (!offset_verified) {
- StringAppendF(&compound_msg, " Oat Image oat offset incorrect (expected 0x%p, received 0x%p)",
- real_image_oat_offset, oat_image_oat_offset);
- }
+ bool offset_verified;
+ bool patch_delta_verified;
- int32_t oat_patch_delta = oat_header.GetImagePatchDelta();
- bool patch_delta_verified = oat_patch_delta == real_patch_delta;
- if (!patch_delta_verified) {
- StringAppendF(&compound_msg, " Oat image patch delta incorrect (expected 0x%x, received 0x%x)",
- real_patch_delta, oat_patch_delta);
+ if (!oat_file->IsPic()) {
+ // If an oat file is not PIC, we need to check that the image is at the expected location and
+ // patched in the same way.
+ void* oat_image_oat_offset =
+ reinterpret_cast<void*>(oat_header.GetImageFileLocationOatDataBegin());
+ offset_verified = oat_image_oat_offset == real_image_oat_offset;
+ if (!offset_verified) {
+ StringAppendF(&compound_msg, " Oat Image oat offset incorrect (expected 0x%p, received 0x%p)",
+ real_image_oat_offset, oat_image_oat_offset);
+ }
+
+ int32_t oat_patch_delta = oat_header.GetImagePatchDelta();
+ patch_delta_verified = oat_patch_delta == real_patch_delta;
+ if (!patch_delta_verified) {
+ StringAppendF(&compound_msg, " Oat image patch delta incorrect (expected 0x%x, "
+ "received 0x%x)", real_patch_delta, oat_patch_delta);
+ }
+ } else {
+ // If an oat file is PIC, we ignore offset and patching delta.
+ offset_verified = true;
+ patch_delta_verified = true;
}
bool ret = (*checksum_verified && offset_verified && patch_delta_verified);
@@ -1608,7 +1598,7 @@
return oat_file;
}
- return OatFile::Open(oat_location, oat_location, nullptr, !Runtime::Current()->IsCompiler(),
+ return OatFile::Open(oat_location, oat_location, nullptr, nullptr, !Runtime::Current()->IsCompiler(),
error_msg);
}
@@ -1669,8 +1659,8 @@
CHECK_EQ(oat_file.GetOatHeader().GetDexFileCount(),
static_cast<uint32_t>(dex_caches->GetLength()));
for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
- StackHandleScope<1> hs(self);
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(dex_caches->Get(i)));
+ StackHandleScope<1> hs2(self);
+ Handle<mirror::DexCache> dex_cache(hs2.NewHandle(dex_caches->Get(i)));
const std::string& dex_file_location(dex_cache->GetLocation()->ToModifiedUtf8());
const OatFile::OatDexFile* oat_dex_file = oat_file.GetOatDexFile(dex_file_location.c_str(),
nullptr);
@@ -1691,6 +1681,20 @@
// Set classes on AbstractMethod early so that IsMethod tests can be performed during the live
// bitmap walk.
mirror::ArtMethod::SetClass(GetClassRoot(kJavaLangReflectArtMethod));
+ size_t art_method_object_size = mirror::ArtMethod::GetJavaLangReflectArtMethod()->GetObjectSize();
+ if (!Runtime::Current()->IsCompiler()) {
+ // Compiler supports having an image with a different pointer size than the runtime. This
+ // happens on the host for compile 32 bit tests since we use a 64 bit libart compiler. We may
+ // also use 32 bit dex2oat on a system with 64 bit apps.
+ CHECK_EQ(art_method_object_size, mirror::ArtMethod::InstanceSize(sizeof(void*)))
+ << sizeof(void*);
+ }
+ if (art_method_object_size == mirror::ArtMethod::InstanceSize(4)) {
+ image_pointer_size_ = 4;
+ } else {
+ CHECK_EQ(art_method_object_size, mirror::ArtMethod::InstanceSize(8));
+ image_pointer_size_ = 8;
+ }
// Set entry point to interpreter if in InterpretOnly mode.
if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) {
@@ -1704,7 +1708,7 @@
// reinit array_iftable_ from any array class instance, they should be ==
array_iftable_ = GcRoot<mirror::IfTable>(GetClassRoot(kObjectArrayClass)->GetIfTable());
- DCHECK(array_iftable_.Read() == GetClassRoot(kBooleanArrayClass)->GetIfTable());
+ DCHECK_EQ(array_iftable_.Read(), GetClassRoot(kBooleanArrayClass)->GetIfTable());
// String class root was set above
mirror::Reference::SetClass(GetClassRoot(kJavaLangRefReference));
mirror::ArtField::SetClass(GetClassRoot(kJavaLangReflectArtField));
@@ -1727,25 +1731,24 @@
void ClassLinker::VisitClassRoots(RootCallback* callback, void* arg, VisitRootFlags flags) {
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
if ((flags & kVisitRootFlagAllRoots) != 0) {
- for (std::pair<const size_t, GcRoot<mirror::Class> >& it : class_table_) {
- it.second.VisitRoot(callback, arg, 0, kRootStickyClass);
+ for (GcRoot<mirror::Class>& root : class_table_) {
+ root.VisitRoot(callback, arg, 0, kRootStickyClass);
+ }
+ for (GcRoot<mirror::Class>& root : pre_zygote_class_table_) {
+ root.VisitRoot(callback, arg, 0, kRootStickyClass);
}
} else if ((flags & kVisitRootFlagNewRoots) != 0) {
- for (auto& pair : new_class_roots_) {
- mirror::Class* old_ref = pair.second.Read<kWithoutReadBarrier>();
- pair.second.VisitRoot(callback, arg, 0, kRootStickyClass);
- mirror::Class* new_ref = pair.second.Read<kWithoutReadBarrier>();
+ for (auto& root : new_class_roots_) {
+ mirror::Class* old_ref = root.Read<kWithoutReadBarrier>();
+ root.VisitRoot(callback, arg, 0, kRootStickyClass);
+ mirror::Class* new_ref = root.Read<kWithoutReadBarrier>();
if (UNLIKELY(new_ref != old_ref)) {
// Uh ohes, GC moved a root in the log. Need to search the class_table and update the
// corresponding object. This is slow, but luckily for us, this may only happen with a
// concurrent moving GC.
- for (auto it = class_table_.lower_bound(pair.first), end = class_table_.end();
- it != end && it->first == pair.first; ++it) {
- // If the class stored matches the old class, update it to the new value.
- if (old_ref == it->second.Read<kWithoutReadBarrier>()) {
- it->second = GcRoot<mirror::Class>(new_ref);
- }
- }
+ auto it = class_table_.Find(GcRoot<mirror::Class>(old_ref));
+ class_table_.Erase(it);
+ class_table_.Insert(GcRoot<mirror::Class>(new_ref));
}
}
}
@@ -1803,9 +1806,13 @@
}
// TODO: why isn't this a ReaderMutexLock?
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- for (std::pair<const size_t, GcRoot<mirror::Class> >& it : class_table_) {
- mirror::Class* c = it.second.Read();
- if (!visitor(c, arg)) {
+ for (GcRoot<mirror::Class>& root : class_table_) {
+ if (!visitor(root.Read(), arg)) {
+ return;
+ }
+ }
+ for (GcRoot<mirror::Class>& root : pre_zygote_class_table_) {
+ if (!visitor(root.Read(), arg)) {
return;
}
}
@@ -1861,7 +1868,7 @@
size_t class_table_size;
{
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
- class_table_size = class_table_.size();
+ class_table_size = class_table_.Size() + pre_zygote_class_table_.Size();
}
mirror::Class* class_type = mirror::Class::GetJavaLangClass();
mirror::Class* array_of_class = FindArrayClass(self, &class_type);
@@ -2004,7 +2011,8 @@
}
CHECK(h_class->IsRetired());
// Get the updated class from class table.
- klass = LookupClass(self, descriptor, h_class.Get()->GetClassLoader());
+ klass = LookupClass(self, descriptor, ComputeModifiedUtf8Hash(descriptor),
+ h_class.Get()->GetClassLoader());
}
// Wait for the class if it has not already been linked.
@@ -2038,44 +2046,47 @@
// Search a collection of DexFiles for a descriptor
ClassPathEntry FindInClassPath(const char* descriptor,
- const std::vector<const DexFile*>& class_path) {
- for (size_t i = 0; i != class_path.size(); ++i) {
- const DexFile* dex_file = class_path[i];
- const DexFile::ClassDef* dex_class_def = dex_file->FindClassDef(descriptor);
+ size_t hash, const std::vector<const DexFile*>& class_path) {
+ for (const DexFile* dex_file : class_path) {
+ const DexFile::ClassDef* dex_class_def = dex_file->FindClassDef(descriptor, hash);
if (dex_class_def != nullptr) {
return ClassPathEntry(dex_file, dex_class_def);
}
}
- // TODO: remove reinterpret_cast when issue with -std=gnu++0x host issue resolved
- return ClassPathEntry(static_cast<const DexFile*>(nullptr),
- static_cast<const DexFile::ClassDef*>(nullptr));
+ return ClassPathEntry(nullptr, nullptr);
}
mirror::Class* ClassLinker::FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
Thread* self, const char* descriptor,
+ size_t hash,
Handle<mirror::ClassLoader> class_loader) {
+ // Can we special case for a well understood PathClassLoader with the BootClassLoader as parent?
if (class_loader->GetClass() !=
soa.Decode<mirror::Class*>(WellKnownClasses::dalvik_system_PathClassLoader) ||
class_loader->GetParent()->GetClass() !=
soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_BootClassLoader)) {
return nullptr;
}
- ClassPathEntry pair = FindInClassPath(descriptor, boot_class_path_);
+ ClassPathEntry pair = FindInClassPath(descriptor, hash, boot_class_path_);
// Check if this would be found in the parent boot class loader.
if (pair.second != nullptr) {
- mirror::Class* klass = LookupClass(self, descriptor, nullptr);
+ mirror::Class* klass = LookupClass(self, descriptor, hash, nullptr);
if (klass != nullptr) {
- return EnsureResolved(self, descriptor, klass);
+ // May return null if resolution on another thread fails.
+ klass = EnsureResolved(self, descriptor, klass);
+ } else {
+ // May OOME.
+ klass = DefineClass(self, descriptor, hash, NullHandle<mirror::ClassLoader>(), *pair.first,
+ *pair.second);
}
- klass = DefineClass(self, descriptor, NullHandle<mirror::ClassLoader>(), *pair.first,
- *pair.second);
- if (klass != nullptr) {
- return klass;
+ if (klass == nullptr) {
+ CHECK(self->IsExceptionPending()) << descriptor;
+ self->ClearException();
}
- CHECK(self->IsExceptionPending()) << descriptor;
- self->ClearException();
+ return klass;
} else {
- // RegisterDexFile may allocate dex caches (and cause thread suspension).
+ // Handle as if this is the child PathClassLoader.
+ // Handles as RegisterDexFile may allocate dex caches (and cause thread suspension).
StackHandleScope<3> hs(self);
// The class loader is a PathClassLoader which inherits from BaseDexClassLoader.
// We need to get the DexPathList and loop through it.
@@ -2114,12 +2125,12 @@
LOG(WARNING) << "Null DexFile::mCookie for " << descriptor;
break;
}
- for (const DexFile* dex_file : *dex_files) {
- const DexFile::ClassDef* dex_class_def = dex_file->FindClassDef(descriptor);
+ for (const DexFile* cp_dex_file : *dex_files) {
+ const DexFile::ClassDef* dex_class_def = cp_dex_file->FindClassDef(descriptor, hash);
if (dex_class_def != nullptr) {
- RegisterDexFile(*dex_file);
- mirror::Class* klass =
- DefineClass(self, descriptor, class_loader, *dex_file, *dex_class_def);
+ RegisterDexFile(*cp_dex_file);
+ mirror::Class* klass = DefineClass(self, descriptor, hash, class_loader,
+ *cp_dex_file, *dex_class_def);
if (klass == nullptr) {
CHECK(self->IsExceptionPending()) << descriptor;
self->ClearException();
@@ -2132,8 +2143,9 @@
}
}
}
+ self->AssertNoPendingException();
+ return nullptr;
}
- return nullptr;
}
mirror::Class* ClassLinker::FindClass(Thread* self, const char* descriptor,
@@ -2146,19 +2158,20 @@
// for primitive classes that aren't backed by dex files.
return FindPrimitiveClass(descriptor[0]);
}
+ const size_t hash = ComputeModifiedUtf8Hash(descriptor);
// Find the class in the loaded classes table.
- mirror::Class* klass = LookupClass(self, descriptor, class_loader.Get());
+ mirror::Class* klass = LookupClass(self, descriptor, hash, class_loader.Get());
if (klass != nullptr) {
return EnsureResolved(self, descriptor, klass);
}
// Class is not yet loaded.
if (descriptor[0] == '[') {
- return CreateArrayClass(self, descriptor, class_loader);
+ return CreateArrayClass(self, descriptor, hash, class_loader);
} else if (class_loader.Get() == nullptr) {
// The boot class loader, search the boot class path.
- ClassPathEntry pair = FindInClassPath(descriptor, boot_class_path_);
+ ClassPathEntry pair = FindInClassPath(descriptor, hash, boot_class_path_);
if (pair.second != nullptr) {
- return DefineClass(self, descriptor, NullHandle<mirror::ClassLoader>(), *pair.first,
+ return DefineClass(self, descriptor, hash, NullHandle<mirror::ClassLoader>(), *pair.first,
*pair.second);
} else {
// The boot class loader is searched ahead of the application class loader, failures are
@@ -2171,16 +2184,16 @@
} else if (Runtime::Current()->UseCompileTimeClassPath()) {
// First try with the bootstrap class loader.
if (class_loader.Get() != nullptr) {
- klass = LookupClass(self, descriptor, nullptr);
+ klass = LookupClass(self, descriptor, hash, nullptr);
if (klass != nullptr) {
return EnsureResolved(self, descriptor, klass);
}
}
// If the lookup failed search the boot class path. We don't perform a recursive call to avoid
// a NoClassDefFoundError being allocated.
- ClassPathEntry pair = FindInClassPath(descriptor, boot_class_path_);
+ ClassPathEntry pair = FindInClassPath(descriptor, hash, boot_class_path_);
if (pair.second != nullptr) {
- return DefineClass(self, descriptor, NullHandle<mirror::ClassLoader>(), *pair.first,
+ return DefineClass(self, descriptor, hash, NullHandle<mirror::ClassLoader>(), *pair.first,
*pair.second);
}
// Next try the compile time class path.
@@ -2191,9 +2204,9 @@
soa.AddLocalReference<jobject>(class_loader.Get()));
class_path = &Runtime::Current()->GetCompileTimeClassPath(jclass_loader.get());
}
- pair = FindInClassPath(descriptor, *class_path);
+ pair = FindInClassPath(descriptor, hash, *class_path);
if (pair.second != nullptr) {
- return DefineClass(self, descriptor, class_loader, *pair.first, *pair.second);
+ return DefineClass(self, descriptor, hash, class_loader, *pair.first, *pair.second);
} else {
// Use the pre-allocated NCDFE at compile time to avoid wasting time constructing exceptions.
mirror::Throwable* pre_allocated = Runtime::Current()->GetPreAllocatedNoClassDefFoundError();
@@ -2202,9 +2215,10 @@
}
} else {
ScopedObjectAccessUnchecked soa(self);
- mirror::Class* klass = FindClassInPathClassLoader(soa, self, descriptor, class_loader);
- if (klass != nullptr) {
- return klass;
+ mirror::Class* cp_klass = FindClassInPathClassLoader(soa, self, descriptor, hash,
+ class_loader);
+ if (cp_klass != nullptr) {
+ return cp_klass;
}
ScopedLocalRef<jobject> class_loader_object(soa.Env(),
soa.AddLocalReference<jobject>(class_loader.Get()));
@@ -2239,13 +2253,12 @@
UNREACHABLE();
}
-mirror::Class* ClassLinker::DefineClass(Thread* self, const char* descriptor,
+mirror::Class* ClassLinker::DefineClass(Thread* self, const char* descriptor, size_t hash,
Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
const DexFile::ClassDef& dex_class_def) {
StackHandleScope<3> hs(self);
auto klass = hs.NewHandle<mirror::Class>(nullptr);
- bool should_allocate = false;
// Load the class from the dex file.
if (UNLIKELY(!init_done_)) {
@@ -2264,14 +2277,10 @@
klass.Assign(GetClassRoot(kJavaLangReflectArtField));
} else if (strcmp(descriptor, "Ljava/lang/reflect/ArtMethod;") == 0) {
klass.Assign(GetClassRoot(kJavaLangReflectArtMethod));
- } else {
- should_allocate = true;
}
- } else {
- should_allocate = true;
}
- if (should_allocate) {
+ if (klass.Get() == nullptr) {
// Allocate a class with the status of not ready.
// Interface object should get the right size here. Regular class will
// figure out the right size later and be replaced with one of the right
@@ -2296,7 +2305,7 @@
klass->SetClinitThreadId(self->GetTid());
// Add the newly loaded class to the loaded classes table.
- mirror::Class* existing = InsertClass(descriptor, klass.Get(), Hash(descriptor));
+ mirror::Class* existing = InsertClass(descriptor, klass.Get(), hash);
if (existing != nullptr) {
// We failed to insert because we raced with another thread. Calling EnsureResolved may cause
// this thread to block.
@@ -2449,17 +2458,18 @@
// by search for its position in the declared virtual methods.
oat_method_index = declaring_class->NumDirectMethods();
size_t end = declaring_class->NumVirtualMethods();
- bool found = false;
+ bool found_virtual = false;
for (size_t i = 0; i < end; i++) {
// Check method index instead of identity in case of duplicate method definitions.
if (method->GetDexMethodIndex() ==
declaring_class->GetVirtualMethod(i)->GetDexMethodIndex()) {
- found = true;
+ found_virtual = true;
break;
}
oat_method_index++;
}
- CHECK(found) << "Didn't find oat method index for virtual method: " << PrettyMethod(method);
+ CHECK(found_virtual) << "Didn't find oat method index for virtual method: "
+ << PrettyMethod(method);
}
DCHECK_EQ(oat_method_index,
GetOatMethodIndexFromMethodIndex(*declaring_class->GetDexCache()->GetDexFile(),
@@ -2468,10 +2478,9 @@
OatFile::OatClass oat_class = FindOatClass(*declaring_class->GetDexCache()->GetDexFile(),
declaring_class->GetDexClassDefIndex(),
found);
- if (!found) {
+ if (!(*found)) {
return OatFile::OatMethod::Invalid();
}
- *found = true;
return oat_class.GetOatMethod(oat_method_index);
}
@@ -2669,8 +2678,7 @@
void ClassLinker::LinkCode(Handle<mirror::ArtMethod> method,
const OatFile::OatClass* oat_class,
- const DexFile& dex_file, uint32_t dex_method_index,
- uint32_t method_index) {
+ uint32_t class_def_method_index) {
Runtime* runtime = Runtime::Current();
if (runtime->IsCompiler()) {
// The following code only applies to a non-compiler runtime.
@@ -2682,7 +2690,7 @@
if (oat_class != nullptr) {
// Every kind of method should at least get an invoke stub from the oat_method.
// non-abstract methods also get their code pointers.
- const OatFile::OatMethod oat_method = oat_class->GetOatMethod(method_index);
+ const OatFile::OatMethod oat_method = oat_class->GetOatMethod(class_def_method_index);
oat_method.LinkMethod(method.Get());
}
@@ -2784,18 +2792,17 @@
OatFile::OatClass oat_class = FindOatClass(dex_file, klass->GetDexClassDefIndex(),
&has_oat_class);
if (has_oat_class) {
- LoadClassMembers(self, dex_file, class_data, klass, class_loader, &oat_class);
+ LoadClassMembers(self, dex_file, class_data, klass, &oat_class);
}
}
if (!has_oat_class) {
- LoadClassMembers(self, dex_file, class_data, klass, class_loader, nullptr);
+ LoadClassMembers(self, dex_file, class_data, klass, nullptr);
}
}
void ClassLinker::LoadClassMembers(Thread* self, const DexFile& dex_file,
const uint8_t* class_data,
Handle<mirror::Class> klass,
- mirror::ClassLoader* class_loader,
const OatFile::OatClass* oat_class) {
// Load fields.
ClassDataItemIterator it(dex_file, class_data);
@@ -2872,7 +2879,7 @@
return;
}
klass->SetDirectMethod(i, method.Get());
- LinkCode(method, oat_class, dex_file, it.GetMemberIndex(), class_def_method_index);
+ LinkCode(method, oat_class, class_def_method_index);
uint32_t it_method_index = it.GetMemberIndex();
if (last_dex_method_index == it_method_index) {
// duplicate case
@@ -2894,7 +2901,7 @@
}
klass->SetVirtualMethod(i, method.Get());
DCHECK_EQ(class_def_method_index, it.NumDirectMethods() + i);
- LinkCode(method, oat_class, dex_file, it.GetMemberIndex(), class_def_method_index);
+ LinkCode(method, oat_class, class_def_method_index);
class_def_method_index++;
}
DCHECK(!it.HasNext());
@@ -3102,7 +3109,8 @@
primitive_class->SetPrimitiveType(type);
primitive_class->SetStatus(mirror::Class::kStatusInitialized, self);
const char* descriptor = Primitive::Descriptor(type);
- mirror::Class* existing = InsertClass(descriptor, primitive_class, Hash(descriptor));
+ mirror::Class* existing = InsertClass(descriptor, primitive_class,
+ ComputeModifiedUtf8Hash(descriptor));
CHECK(existing == nullptr) << "InitPrimitiveClass(" << type << ") failed";
return primitive_class;
}
@@ -3120,7 +3128,7 @@
// array class; that always comes from the base element class.
//
// Returns nullptr with an exception raised on failure.
-mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descriptor,
+mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descriptor, size_t hash,
Handle<mirror::ClassLoader> class_loader) {
// Identify the underlying component type
CHECK_EQ('[', descriptor[0]);
@@ -3130,7 +3138,8 @@
if (component_type.Get() == nullptr) {
DCHECK(self->IsExceptionPending());
// We need to accept erroneous classes as component types.
- component_type.Assign(LookupClass(self, descriptor + 1, class_loader.Get()));
+ const size_t component_hash = ComputeModifiedUtf8Hash(descriptor + 1);
+ component_type.Assign(LookupClass(self, descriptor + 1, component_hash, class_loader.Get()));
if (component_type.Get() == nullptr) {
DCHECK(self->IsExceptionPending());
return nullptr;
@@ -3160,7 +3169,7 @@
// class to the hash table --- necessary because of possible races with
// other threads.)
if (class_loader.Get() != component_type->GetClassLoader()) {
- mirror::Class* new_class = LookupClass(self, descriptor, component_type->GetClassLoader());
+ mirror::Class* new_class = LookupClass(self, descriptor, hash, component_type->GetClassLoader());
if (new_class != nullptr) {
return new_class;
}
@@ -3210,7 +3219,11 @@
new_class->SetPrimitiveType(Primitive::kPrimNot);
new_class->SetClassLoader(component_type->GetClassLoader());
new_class->SetStatus(mirror::Class::kStatusLoaded, self);
- new_class->PopulateEmbeddedImtAndVTable();
+ {
+ StackHandleScope<mirror::Class::kImtSize> hs2(self,
+ Runtime::Current()->GetImtUnimplementedMethod());
+ new_class->PopulateEmbeddedImtAndVTable(&hs2);
+ }
new_class->SetStatus(mirror::Class::kStatusInitialized, self);
// don't need to set new_class->SetObjectSize(..)
// because Object::SizeOf delegates to Array::SizeOf
@@ -3245,7 +3258,7 @@
new_class->SetAccessFlags(access_flags);
- mirror::Class* existing = InsertClass(descriptor, new_class.Get(), Hash(descriptor));
+ mirror::Class* existing = InsertClass(descriptor, new_class.Get(), hash);
if (existing == nullptr) {
return new_class.Get();
}
@@ -3298,8 +3311,7 @@
LOG(INFO) << "Loaded class " << descriptor << source;
}
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- mirror::Class* existing =
- LookupClassFromTableLocked(descriptor, klass->GetClassLoader(), hash);
+ mirror::Class* existing = LookupClassFromTableLocked(descriptor, klass->GetClassLoader(), hash);
if (existing != nullptr) {
return existing;
}
@@ -3309,13 +3321,13 @@
// is in the image.
existing = LookupClassFromImage(descriptor);
if (existing != nullptr) {
- CHECK(klass == existing);
+ CHECK_EQ(klass, existing);
}
}
VerifyObject(klass);
- class_table_.insert(std::make_pair(hash, GcRoot<mirror::Class>(klass)));
+ class_table_.InsertWithHash(GcRoot<mirror::Class>(klass), hash);
if (log_new_class_table_roots_) {
- new_class_roots_.push_back(std::make_pair(hash, GcRoot<mirror::Class>(klass)));
+ new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
}
return nullptr;
}
@@ -3323,27 +3335,18 @@
mirror::Class* ClassLinker::UpdateClass(const char* descriptor, mirror::Class* klass,
size_t hash) {
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- mirror::Class* existing =
- LookupClassFromTableLocked(descriptor, klass->GetClassLoader(), hash);
-
- if (existing == nullptr) {
+ auto existing_it = class_table_.FindWithHash(std::make_pair(descriptor, klass->GetClassLoader()),
+ hash);
+ if (existing_it == class_table_.end()) {
CHECK(klass->IsProxyClass());
return nullptr;
}
+ mirror::Class* existing = existing_it->Read();
CHECK_NE(existing, klass) << descriptor;
CHECK(!existing->IsResolved()) << descriptor;
CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusResolving) << descriptor;
- for (auto it = class_table_.lower_bound(hash), end = class_table_.end();
- it != end && it->first == hash; ++it) {
- mirror::Class* klass = it->second.Read();
- if (klass == existing) {
- class_table_.erase(it);
- break;
- }
- }
-
CHECK(!klass->IsTemp()) << descriptor;
if (kIsDebugBuild && klass->GetClassLoader() == nullptr &&
dex_cache_image_class_lookup_required_) {
@@ -3351,37 +3354,38 @@
// is in the image.
existing = LookupClassFromImage(descriptor);
if (existing != nullptr) {
- CHECK(klass == existing) << descriptor;
+ CHECK_EQ(klass, existing) << descriptor;
}
}
VerifyObject(klass);
- class_table_.insert(std::make_pair(hash, GcRoot<mirror::Class>(klass)));
+ // Update the element in the hash set.
+ *existing_it = GcRoot<mirror::Class>(klass);
if (log_new_class_table_roots_) {
- new_class_roots_.push_back(std::make_pair(hash, GcRoot<mirror::Class>(klass)));
+ new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
}
return existing;
}
-bool ClassLinker::RemoveClass(const char* descriptor, const mirror::ClassLoader* class_loader) {
- size_t hash = Hash(descriptor);
+bool ClassLinker::RemoveClass(const char* descriptor, mirror::ClassLoader* class_loader) {
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- for (auto it = class_table_.lower_bound(hash), end = class_table_.end();
- it != end && it->first == hash;
- ++it) {
- mirror::Class* klass = it->second.Read();
- if (klass->GetClassLoader() == class_loader && klass->DescriptorEquals(descriptor)) {
- class_table_.erase(it);
- return true;
- }
+ auto pair = std::make_pair(descriptor, class_loader);
+ auto it = class_table_.Find(pair);
+ if (it != class_table_.end()) {
+ class_table_.Erase(it);
+ return true;
+ }
+ it = pre_zygote_class_table_.Find(pair);
+ if (it != pre_zygote_class_table_.end()) {
+ pre_zygote_class_table_.Erase(it);
+ return true;
}
return false;
}
-mirror::Class* ClassLinker::LookupClass(Thread* self, const char* descriptor,
- const mirror::ClassLoader* class_loader) {
- size_t hash = Hash(descriptor);
+mirror::Class* ClassLinker::LookupClass(Thread* self, const char* descriptor, size_t hash,
+ mirror::ClassLoader* class_loader) {
{
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
mirror::Class* result = LookupClassFromTableLocked(descriptor, class_loader, hash);
@@ -3410,26 +3414,17 @@
}
mirror::Class* ClassLinker::LookupClassFromTableLocked(const char* descriptor,
- const mirror::ClassLoader* class_loader,
+ mirror::ClassLoader* class_loader,
size_t hash) {
- auto end = class_table_.end();
- for (auto it = class_table_.lower_bound(hash); it != end && it->first == hash; ++it) {
- mirror::Class* klass = it->second.Read();
- if (klass->GetClassLoader() == class_loader && klass->DescriptorEquals(descriptor)) {
- if (kIsDebugBuild) {
- // Check for duplicates in the table.
- for (++it; it != end && it->first == hash; ++it) {
- mirror::Class* klass2 = it->second.Read();
- CHECK(!(klass2->GetClassLoader() == class_loader &&
- klass2->DescriptorEquals(descriptor)))
- << PrettyClass(klass) << " " << klass << " " << klass->GetClassLoader() << " "
- << PrettyClass(klass2) << " " << klass2 << " " << klass2->GetClassLoader();
- }
- }
- return klass;
+ auto descriptor_pair = std::make_pair(descriptor, class_loader);
+ auto it = pre_zygote_class_table_.FindWithHash(descriptor_pair, hash);
+ if (it == pre_zygote_class_table_.end()) {
+ it = class_table_.FindWithHash(descriptor_pair, hash);
+ if (it == class_table_.end()) {
+ return nullptr;
}
}
- return nullptr;
+ return it->Read();
}
static mirror::ObjectArray<mirror::DexCache>* GetImageDexCaches()
@@ -3457,15 +3452,15 @@
if (klass != nullptr) {
DCHECK(klass->GetClassLoader() == nullptr);
const char* descriptor = klass->GetDescriptor(&temp);
- size_t hash = Hash(descriptor);
+ size_t hash = ComputeModifiedUtf8Hash(descriptor);
mirror::Class* existing = LookupClassFromTableLocked(descriptor, nullptr, hash);
if (existing != nullptr) {
- CHECK(existing == klass) << PrettyClassAndClassLoader(existing) << " != "
+ CHECK_EQ(existing, klass) << PrettyClassAndClassLoader(existing) << " != "
<< PrettyClassAndClassLoader(klass);
} else {
- class_table_.insert(std::make_pair(hash, GcRoot<mirror::Class>(klass)));
+ class_table_.Insert(GcRoot<mirror::Class>(klass));
if (log_new_class_table_roots_) {
- new_class_roots_.push_back(std::make_pair(hash, GcRoot<mirror::Class>(klass)));
+ new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
}
}
}
@@ -3474,6 +3469,13 @@
dex_cache_image_class_lookup_required_ = false;
}
+void ClassLinker::MoveClassTableToPreZygote() {
+ WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ DCHECK(pre_zygote_class_table_.Empty());
+ pre_zygote_class_table_ = std::move(class_table_);
+ class_table_.Clear();
+}
+
mirror::Class* ClassLinker::LookupClassFromImage(const char* descriptor) {
ScopedAssertNoThreadSuspension ants(Thread::Current(), "Image class lookup");
mirror::ObjectArray<mirror::DexCache>* dex_caches = GetImageDexCaches();
@@ -3502,14 +3504,32 @@
if (dex_cache_image_class_lookup_required_) {
MoveImageClassesToClassTable();
}
- size_t hash = Hash(descriptor);
- ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- for (auto it = class_table_.lower_bound(hash), end = class_table_.end();
- it != end && it->first == hash; ++it) {
- mirror::Class* klass = it->second.Read();
- if (klass->DescriptorEquals(descriptor)) {
- result.push_back(klass);
+ WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ while (true) {
+ auto it = class_table_.Find(descriptor);
+ if (it == class_table_.end()) {
+ break;
}
+ result.push_back(it->Read());
+ class_table_.Erase(it);
+ }
+ for (mirror::Class* k : result) {
+ class_table_.Insert(GcRoot<mirror::Class>(k));
+ }
+ size_t pre_zygote_start = result.size();
+ // Now handle the pre zygote table.
+ // Note: This dirties the pre-zygote table but shouldn't be an issue since LookupClasses is only
+ // called from the debugger.
+ while (true) {
+ auto it = pre_zygote_class_table_.Find(descriptor);
+ if (it == pre_zygote_class_table_.end()) {
+ break;
+ }
+ result.push_back(it->Read());
+ pre_zygote_class_table_.Erase(it);
+ }
+ for (size_t i = pre_zygote_start; i < result.size(); ++i) {
+ pre_zygote_class_table_.Insert(GcRoot<mirror::Class>(result[i]));
}
}
@@ -3554,7 +3574,7 @@
Handle<mirror::Class> super(hs.NewHandle(klass->GetSuperClass()));
if (super.Get() != nullptr) {
// Acquire lock to prevent races on verifying the super class.
- ObjectLock<mirror::Class> lock(self, super);
+ ObjectLock<mirror::Class> super_lock(self, super);
if (!super->IsVerified() && !super->IsErroneous()) {
VerifyClass(self, super);
@@ -3859,10 +3879,10 @@
klass->SetVirtualMethods(virtuals);
}
for (size_t i = 0; i < num_virtual_methods; ++i) {
- StackHandleScope<1> hs(self);
+ StackHandleScope<1> hs2(self);
mirror::ObjectArray<mirror::ArtMethod>* decoded_methods =
soa.Decode<mirror::ObjectArray<mirror::ArtMethod>*>(methods);
- Handle<mirror::ArtMethod> prototype(hs.NewHandle(decoded_methods->Get(i)));
+ Handle<mirror::ArtMethod> prototype(hs2.NewHandle(decoded_methods->Get(i)));
mirror::ArtMethod* clone = CreateProxyMethod(self, klass, prototype);
if (UNLIKELY(clone == nullptr)) {
CHECK(self->IsExceptionPending()); // OOME.
@@ -3911,11 +3931,11 @@
CHECK(klass->GetIFields() == nullptr);
CheckProxyConstructor(klass->GetDirectMethod(0));
for (size_t i = 0; i < num_virtual_methods; ++i) {
- StackHandleScope<2> hs(self);
+ StackHandleScope<2> hs2(self);
mirror::ObjectArray<mirror::ArtMethod>* decoded_methods =
soa.Decode<mirror::ObjectArray<mirror::ArtMethod>*>(methods);
- Handle<mirror::ArtMethod> prototype(hs.NewHandle(decoded_methods->Get(i)));
- Handle<mirror::ArtMethod> virtual_method(hs.NewHandle(klass->GetVirtualMethod(i)));
+ Handle<mirror::ArtMethod> prototype(hs2.NewHandle(decoded_methods->Get(i)));
+ Handle<mirror::ArtMethod> virtual_method(hs2.NewHandle(klass->GetVirtualMethod(i)));
CheckProxyMethod(virtual_method, prototype);
}
@@ -3933,7 +3953,8 @@
CHECK_EQ(klass.Get()->GetThrows(),
soa.Decode<mirror::ObjectArray<mirror::ObjectArray<mirror::Class>>*>(throws));
}
- mirror::Class* existing = InsertClass(descriptor.c_str(), klass.Get(), Hash(descriptor.c_str()));
+ mirror::Class* existing = InsertClass(descriptor.c_str(), klass.Get(),
+ ComputeModifiedUtf8Hash(descriptor.c_str()));
CHECK(existing == nullptr);
return klass.Get();
}
@@ -4047,13 +4068,10 @@
CHECK(prototype->HasSameDexCacheResolvedTypes(method.Get()));
CHECK_EQ(prototype->GetDexMethodIndex(), method->GetDexMethodIndex());
- StackHandleScope<2> hs(Thread::Current());
- MethodHelper mh(hs.NewHandle(method.Get()));
- MethodHelper mh2(hs.NewHandle(prototype.Get()));
CHECK_STREQ(method->GetName(), prototype->GetName());
CHECK_STREQ(method->GetShorty(), prototype->GetShorty());
// More complex sanity - via dex cache
- CHECK_EQ(mh.GetReturnType(), mh2.GetReturnType());
+ CHECK_EQ(method->GetInterfaceMethodIfProxy()->GetReturnType(), prototype->GetReturnType());
}
static bool CanWeInitializeClass(mirror::Class* klass, bool can_init_statics,
@@ -4205,13 +4223,28 @@
}
}
- if (klass->NumStaticFields() > 0) {
+ const size_t num_static_fields = klass->NumStaticFields();
+ if (num_static_fields > 0) {
const DexFile::ClassDef* dex_class_def = klass->GetClassDef();
CHECK(dex_class_def != nullptr);
const DexFile& dex_file = klass->GetDexFile();
StackHandleScope<3> hs(self);
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader()));
Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache()));
+
+ // Eagerly fill in static fields so that the we don't have to do as many expensive
+ // Class::FindStaticField in ResolveField.
+ for (size_t i = 0; i < num_static_fields; ++i) {
+ mirror::ArtField* field = klass->GetStaticField(i);
+ const uint32_t field_idx = field->GetDexFieldIndex();
+ mirror::ArtField* resolved_field = dex_cache->GetResolvedField(field_idx);
+ if (resolved_field == nullptr) {
+ dex_cache->SetResolvedField(field_idx, field);
+ } else {
+ DCHECK_EQ(field, resolved_field);
+ }
+ }
+
EncodedStaticFieldValueIterator value_it(dex_file, &dex_cache, &class_loader,
this, *dex_class_def);
const uint8_t* class_data = dex_file.GetClassData(*dex_class_def);
@@ -4220,8 +4253,8 @@
DCHECK(field_it.HasNextStaticField());
CHECK(can_init_statics);
for ( ; value_it.HasNext(); value_it.Next(), field_it.Next()) {
- StackHandleScope<1> hs(self);
- Handle<mirror::ArtField> field(hs.NewHandle(
+ StackHandleScope<1> hs2(self);
+ Handle<mirror::ArtField> field(hs2.NewHandle(
ResolveField(dex_file, field_it.GetMemberIndex(), dex_cache, class_loader, true)));
if (Runtime::Current()->IsActiveTransaction()) {
value_it.ReadValueToField<true>(field);
@@ -4318,7 +4351,8 @@
return true;
}
// Begin with the methods local to the superclass.
- StackHandleScope<2> hs(Thread::Current());
+ Thread* self = Thread::Current();
+ StackHandleScope<2> hs(self);
MutableMethodHelper mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
MutableMethodHelper super_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
if (klass->HasSuperClass() &&
@@ -4327,7 +4361,7 @@
mh.ChangeMethod(klass->GetVTableEntry(i));
super_mh.ChangeMethod(klass->GetSuperClass()->GetVTableEntry(i));
if (mh.GetMethod() != super_mh.GetMethod() &&
- !mh.HasSameSignatureWithDifferentClassLoaders(&super_mh)) {
+ !mh.HasSameSignatureWithDifferentClassLoaders(self, &super_mh)) {
ThrowLinkageError(klass.Get(),
"Class %s method %s resolves differently in superclass %s",
PrettyDescriptor(klass.Get()).c_str(),
@@ -4344,7 +4378,7 @@
mh.ChangeMethod(klass->GetIfTable()->GetMethodArray(i)->GetWithoutChecks(j));
super_mh.ChangeMethod(klass->GetIfTable()->GetInterface(i)->GetVirtualMethod(j));
if (mh.GetMethod() != super_mh.GetMethod() &&
- !mh.HasSameSignatureWithDifferentClassLoaders(&super_mh)) {
+ !mh.HasSameSignatureWithDifferentClassLoaders(self, &super_mh)) {
ThrowLinkageError(klass.Get(),
"Class %s method %s resolves differently in interface %s",
PrettyDescriptor(klass.Get()).c_str(),
@@ -4422,7 +4456,9 @@
if (!LinkSuperClass(klass)) {
return false;
}
- if (!LinkMethods(self, klass, interfaces)) {
+ StackHandleScope<mirror::Class::kImtSize> imt_handle_scope(
+ self, Runtime::Current()->GetImtUnimplementedMethod());
+ if (!LinkMethods(self, klass, interfaces, &imt_handle_scope)) {
return false;
}
if (!LinkInstanceFields(self, klass)) {
@@ -4441,7 +4477,7 @@
CHECK_EQ(klass->GetClassSize(), class_size) << PrettyDescriptor(klass.Get());
if (klass->ShouldHaveEmbeddedImtAndVTable()) {
- klass->PopulateEmbeddedImtAndVTable();
+ klass->PopulateEmbeddedImtAndVTable(&imt_handle_scope);
}
// This will notify waiters on klass that saw the not yet resolved
@@ -4451,7 +4487,7 @@
} else {
CHECK(!klass->IsResolved());
// Retire the temporary class and create the correctly sized resolved class.
- *new_class = klass->CopyOf(self, class_size);
+ *new_class = klass->CopyOf(self, class_size, &imt_handle_scope);
if (UNLIKELY(*new_class == nullptr)) {
CHECK(self->IsExceptionPending()); // Expect an OOME.
klass->SetStatus(mirror::Class::kStatusError, self);
@@ -4465,7 +4501,8 @@
FixupTemporaryDeclaringClass(klass.Get(), new_class_h.Get());
- mirror::Class* existing = UpdateClass(descriptor, new_class_h.Get(), Hash(descriptor));
+ mirror::Class* existing = UpdateClass(descriptor, new_class_h.Get(),
+ ComputeModifiedUtf8Hash(descriptor));
CHECK(existing == nullptr || existing == klass.Get());
// This will notify waiters on temp class that saw the not yet resolved class in the
@@ -4585,7 +4622,8 @@
// Populate the class vtable and itable. Compute return type indices.
bool ClassLinker::LinkMethods(Thread* self, Handle<mirror::Class> klass,
- Handle<mirror::ObjectArray<mirror::Class>> interfaces) {
+ Handle<mirror::ObjectArray<mirror::Class>> interfaces,
+ StackHandleScope<mirror::Class::kImtSize>* out_imt) {
self->AllowThreadSuspension();
if (klass->IsInterface()) {
// No vtable.
@@ -4597,22 +4635,127 @@
for (size_t i = 0; i < count; ++i) {
klass->GetVirtualMethodDuringLinking(i)->SetMethodIndex(i);
}
- // Link interface method tables
- return LinkInterfaceMethods(klass, interfaces);
- } else {
- // Link virtual and interface method tables
- return LinkVirtualMethods(self, klass) && LinkInterfaceMethods(klass, interfaces);
+ } else if (!LinkVirtualMethods(self, klass)) { // Link virtual methods first.
+ return false;
}
- return true;
+ return LinkInterfaceMethods(self, klass, interfaces, out_imt); // Link interface method last.
}
+// Comparator for name and signature of a method, used in finding overriding methods. Implementation
+// avoids the use of handles, if it didn't then rather than compare dex files we could compare dex
+// caches in the implementation below.
+class MethodNameAndSignatureComparator FINAL : public ValueObject {
+ public:
+ explicit MethodNameAndSignatureComparator(mirror::ArtMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
+ dex_file_(method->GetDexFile()), mid_(&dex_file_->GetMethodId(method->GetDexMethodIndex())),
+ name_(nullptr), name_len_(0) {
+ DCHECK(!method->IsProxyMethod()) << PrettyMethod(method);
+ }
+
+ const char* GetName() {
+ if (name_ == nullptr) {
+ name_ = dex_file_->StringDataAndUtf16LengthByIdx(mid_->name_idx_, &name_len_);
+ }
+ return name_;
+ }
+
+ bool HasSameNameAndSignature(mirror::ArtMethod* other)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(!other->IsProxyMethod()) << PrettyMethod(other);
+ const DexFile* other_dex_file = other->GetDexFile();
+ const DexFile::MethodId& other_mid = other_dex_file->GetMethodId(other->GetDexMethodIndex());
+ if (dex_file_ == other_dex_file) {
+ return mid_->name_idx_ == other_mid.name_idx_ && mid_->proto_idx_ == other_mid.proto_idx_;
+ }
+ GetName(); // Only used to make sure its calculated.
+ uint32_t other_name_len;
+ const char* other_name = other_dex_file->StringDataAndUtf16LengthByIdx(other_mid.name_idx_,
+ &other_name_len);
+ if (name_len_ != other_name_len || strcmp(name_, other_name) != 0) {
+ return false;
+ }
+ return dex_file_->GetMethodSignature(*mid_) == other_dex_file->GetMethodSignature(other_mid);
+ }
+
+ private:
+ // Dex file for the method to compare against.
+ const DexFile* const dex_file_;
+ // MethodId for the method to compare against.
+ const DexFile::MethodId* const mid_;
+ // Lazily computed name from the dex file's strings.
+ const char* name_;
+ // Lazily computed name length.
+ uint32_t name_len_;
+};
+
+class LinkVirtualHashTable {
+ public:
+ LinkVirtualHashTable(Handle<mirror::Class> klass, size_t hash_size, uint32_t* hash_table)
+ : klass_(klass), hash_size_(hash_size), hash_table_(hash_table) {
+ std::fill(hash_table_, hash_table_ + hash_size_, invalid_index_);
+ }
+ void Add(uint32_t virtual_method_index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* local_method = klass_->GetVirtualMethodDuringLinking(virtual_method_index);
+ const char* name = local_method->GetName();
+ uint32_t hash = ComputeModifiedUtf8Hash(name);
+ uint32_t index = hash % hash_size_;
+ // Linear probe until we have an empty slot.
+ while (hash_table_[index] != invalid_index_) {
+ if (++index == hash_size_) {
+ index = 0;
+ }
+ }
+ hash_table_[index] = virtual_method_index;
+ }
+ uint32_t FindAndRemove(MethodNameAndSignatureComparator* comparator)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const char* name = comparator->GetName();
+ uint32_t hash = ComputeModifiedUtf8Hash(name);
+ size_t index = hash % hash_size_;
+ while (true) {
+ const uint32_t value = hash_table_[index];
+ // Since linear probe makes continuous blocks, hitting an invalid index means we are done
+ // the block and can safely assume not found.
+ if (value == invalid_index_) {
+ break;
+ }
+ if (value != removed_index_) { // This signifies not already overriden.
+ mirror::ArtMethod* virtual_method =
+ klass_->GetVirtualMethodDuringLinking(value);
+ if (comparator->HasSameNameAndSignature(virtual_method->GetInterfaceMethodIfProxy())) {
+ hash_table_[index] = removed_index_;
+ return value;
+ }
+ }
+ if (++index == hash_size_) {
+ index = 0;
+ }
+ }
+ return GetNotFoundIndex();
+ }
+ static uint32_t GetNotFoundIndex() {
+ return invalid_index_;
+ }
+
+ private:
+ static const uint32_t invalid_index_;
+ static const uint32_t removed_index_;
+
+ Handle<mirror::Class> klass_;
+ const size_t hash_size_;
+ uint32_t* const hash_table_;
+};
+
+const uint32_t LinkVirtualHashTable::invalid_index_ = std::numeric_limits<uint32_t>::max();
+const uint32_t LinkVirtualHashTable::removed_index_ = std::numeric_limits<uint32_t>::max() - 1;
+
bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass) {
+ const size_t num_virtual_methods = klass->NumVirtualMethods();
if (klass->HasSuperClass()) {
- uint32_t max_count = klass->NumVirtualMethods() +
- klass->GetSuperClass()->GetVTableLength();
- size_t actual_count = klass->GetSuperClass()->GetVTableLength();
- CHECK_LE(actual_count, max_count);
- StackHandleScope<4> hs(self);
+ const size_t super_vtable_length = klass->GetSuperClass()->GetVTableLength();
+ const size_t max_count = num_virtual_methods + super_vtable_length;
+ StackHandleScope<2> hs(self);
Handle<mirror::Class> super_class(hs.NewHandle(klass->GetSuperClass()));
MutableHandle<mirror::ObjectArray<mirror::ArtMethod>> vtable;
if (super_class->ShouldHaveEmbeddedImtAndVTable()) {
@@ -4621,54 +4764,90 @@
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
- int len = super_class->GetVTableLength();
- for (int i = 0; i < len; i++) {
- vtable->Set<false>(i, super_class->GetVTableEntry(i));
+ for (size_t i = 0; i < super_vtable_length; i++) {
+ vtable->SetWithoutChecks<false>(i, super_class->GetEmbeddedVTableEntry(i));
+ }
+ if (num_virtual_methods == 0) {
+ klass->SetVTable(vtable.Get());
+ return true;
}
} else {
- CHECK(super_class->GetVTable() != nullptr) << PrettyClass(super_class.Get());
- vtable = hs.NewHandle(super_class->GetVTable()->CopyOf(self, max_count));
+ mirror::ObjectArray<mirror::ArtMethod>* super_vtable = super_class->GetVTable();
+ CHECK(super_vtable != nullptr) << PrettyClass(super_class.Get());
+ if (num_virtual_methods == 0) {
+ klass->SetVTable(super_vtable);
+ return true;
+ }
+ vtable = hs.NewHandle(super_vtable->CopyOf(self, max_count));
if (UNLIKELY(vtable.Get() == nullptr)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
}
-
- // See if any of our virtual methods override the superclass.
- MutableMethodHelper local_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
- MutableMethodHelper super_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
- for (size_t i = 0; i < klass->NumVirtualMethods(); ++i) {
- mirror::ArtMethod* local_method = klass->GetVirtualMethodDuringLinking(i);
- local_mh.ChangeMethod(local_method);
- size_t j = 0;
- for (; j < actual_count; ++j) {
- mirror::ArtMethod* super_method = vtable->Get(j);
- super_mh.ChangeMethod(super_method);
- if (local_mh.HasSameNameAndSignature(&super_mh)) {
- if (klass->CanAccessMember(super_method->GetDeclaringClass(),
- super_method->GetAccessFlags())) {
- if (super_method->IsFinal()) {
- ThrowLinkageError(klass.Get(), "Method %s overrides final method in class %s",
- PrettyMethod(local_method).c_str(),
- super_method->GetDeclaringClassDescriptor());
- return false;
- }
- vtable->Set<false>(j, local_method);
- local_method->SetMethodIndex(j);
- break;
- } else {
- LOG(WARNING) << "Before Android 4.1, method " << PrettyMethod(local_method)
- << " would have incorrectly overridden the package-private method in "
- << PrettyDescriptor(super_method->GetDeclaringClassDescriptor());
+ // How the algorithm works:
+ // 1. Populate hash table by adding num_virtual_methods from klass. The values in the hash
+ // table are: invalid_index for unused slots, index super_vtable_length + i for a virtual
+ // method which has not been matched to a vtable method, and j if the virtual method at the
+ // index overrode the super virtual method at index j.
+ // 2. Loop through super virtual methods, if they overwrite, update hash table to j
+ // (j < super_vtable_length) to avoid redundant checks. (TODO maybe use this info for reducing
+ // the need for the initial vtable which we later shrink back down).
+ // 3. Add non overridden methods to the end of the vtable.
+ static constexpr size_t kMaxStackHash = 250;
+ const size_t hash_table_size = num_virtual_methods * 3;
+ uint32_t* hash_table_ptr;
+ std::unique_ptr<uint32_t[]> hash_heap_storage;
+ if (hash_table_size <= kMaxStackHash) {
+ hash_table_ptr = reinterpret_cast<uint32_t*>(
+ alloca(hash_table_size * sizeof(*hash_table_ptr)));
+ } else {
+ hash_heap_storage.reset(new uint32_t[hash_table_size]);
+ hash_table_ptr = hash_heap_storage.get();
+ }
+ LinkVirtualHashTable hash_table(klass, hash_table_size, hash_table_ptr);
+ // Add virtual methods to the hash table.
+ for (size_t i = 0; i < num_virtual_methods; ++i) {
+ hash_table.Add(i);
+ }
+ // Loop through each super vtable method and see if they are overriden by a method we added to
+ // the hash table.
+ for (size_t j = 0; j < super_vtable_length; ++j) {
+ // Search the hash table to see if we are overidden by any method.
+ mirror::ArtMethod* super_method = vtable->GetWithoutChecks(j);
+ MethodNameAndSignatureComparator super_method_name_comparator(
+ super_method->GetInterfaceMethodIfProxy());
+ uint32_t hash_index = hash_table.FindAndRemove(&super_method_name_comparator);
+ if (hash_index != hash_table.GetNotFoundIndex()) {
+ mirror::ArtMethod* virtual_method = klass->GetVirtualMethodDuringLinking(hash_index);
+ if (klass->CanAccessMember(super_method->GetDeclaringClass(),
+ super_method->GetAccessFlags())) {
+ if (super_method->IsFinal()) {
+ ThrowLinkageError(klass.Get(), "Method %s overrides final method in class %s",
+ PrettyMethod(virtual_method).c_str(),
+ super_method->GetDeclaringClassDescriptor());
+ return false;
}
+ vtable->SetWithoutChecks<false>(j, virtual_method);
+ virtual_method->SetMethodIndex(j);
+ } else {
+ LOG(WARNING) << "Before Android 4.1, method " << PrettyMethod(virtual_method)
+ << " would have incorrectly overridden the package-private method in "
+ << PrettyDescriptor(super_method->GetDeclaringClassDescriptor());
}
}
- if (j == actual_count) {
- // Not overriding, append.
- vtable->Set<false>(actual_count, local_method);
- local_method->SetMethodIndex(actual_count);
- actual_count += 1;
+ }
+ // Add the non overridden methods at the end.
+ size_t actual_count = super_vtable_length;
+ for (size_t i = 0; i < num_virtual_methods; ++i) {
+ mirror::ArtMethod* local_method = klass->GetVirtualMethodDuringLinking(i);
+ size_t method_idx = local_method->GetMethodIndexDuringLinking();
+ if (method_idx < super_vtable_length &&
+ local_method == vtable->GetWithoutChecks(method_idx)) {
+ continue;
}
+ vtable->SetWithoutChecks<false>(actual_count, local_method);
+ local_method->SetMethodIndex(actual_count);
+ ++actual_count;
}
if (!IsUint(16, actual_count)) {
ThrowClassFormatError(klass.Get(), "Too many methods defined on class: %zd", actual_count);
@@ -4686,72 +4865,72 @@
klass->SetVTable(vtable.Get());
} else {
CHECK_EQ(klass.Get(), GetClassRoot(kJavaLangObject));
- uint32_t num_virtual_methods = klass->NumVirtualMethods();
if (!IsUint(16, num_virtual_methods)) {
- ThrowClassFormatError(klass.Get(), "Too many methods: %d", num_virtual_methods);
+ ThrowClassFormatError(klass.Get(), "Too many methods: %d",
+ static_cast<int>(num_virtual_methods));
return false;
}
- StackHandleScope<1> hs(self);
- Handle<mirror::ObjectArray<mirror::ArtMethod>>
- vtable(hs.NewHandle(AllocArtMethodArray(self, num_virtual_methods)));
- if (UNLIKELY(vtable.Get() == nullptr)) {
+ mirror::ObjectArray<mirror::ArtMethod>* vtable = AllocArtMethodArray(self, num_virtual_methods);
+ if (UNLIKELY(vtable == nullptr)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
for (size_t i = 0; i < num_virtual_methods; ++i) {
mirror::ArtMethod* virtual_method = klass->GetVirtualMethodDuringLinking(i);
- vtable->Set<false>(i, virtual_method);
+ vtable->SetWithoutChecks<false>(i, virtual_method);
virtual_method->SetMethodIndex(i & 0xFFFF);
}
- klass->SetVTable(vtable.Get());
+ klass->SetVTable(vtable);
}
return true;
}
-bool ClassLinker::LinkInterfaceMethods(Handle<mirror::Class> klass,
- Handle<mirror::ObjectArray<mirror::Class>> interfaces) {
- Thread* const self = Thread::Current();
+bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass,
+ Handle<mirror::ObjectArray<mirror::Class>> interfaces,
+ StackHandleScope<mirror::Class::kImtSize>* out_imt) {
+ StackHandleScope<3> hs(self);
Runtime* const runtime = Runtime::Current();
- // Set the imt table to be all conflicts by default.
- klass->SetImTable(runtime->GetDefaultImt());
- size_t super_ifcount;
- if (klass->HasSuperClass()) {
- super_ifcount = klass->GetSuperClass()->GetIfTableCount();
- } else {
- super_ifcount = 0;
- }
- uint32_t num_interfaces =
- interfaces.Get() == nullptr ? klass->NumDirectInterfaces() : interfaces->GetLength();
- size_t ifcount = super_ifcount + num_interfaces;
- for (size_t i = 0; i < num_interfaces; i++) {
- mirror::Class* interface =
- interfaces.Get() == nullptr ? mirror::Class::GetDirectInterface(self, klass, i) :
- interfaces->Get(i);
- ifcount += interface->GetIfTableCount();
- }
- if (ifcount == 0) {
- // Class implements no interfaces.
- DCHECK_EQ(klass->GetIfTableCount(), 0);
- DCHECK(klass->GetIfTable() == nullptr);
- return true;
- }
- if (ifcount == super_ifcount) {
+ const bool has_superclass = klass->HasSuperClass();
+ const size_t super_ifcount = has_superclass ? klass->GetSuperClass()->GetIfTableCount() : 0U;
+ const bool have_interfaces = interfaces.Get() != nullptr;
+ const size_t num_interfaces =
+ have_interfaces ? interfaces->GetLength() : klass->NumDirectInterfaces();
+ if (num_interfaces == 0) {
+ if (super_ifcount == 0) {
+ // Class implements no interfaces.
+ DCHECK_EQ(klass->GetIfTableCount(), 0);
+ DCHECK(klass->GetIfTable() == nullptr);
+ return true;
+ }
// Class implements same interfaces as parent, are any of these not marker interfaces?
bool has_non_marker_interface = false;
mirror::IfTable* super_iftable = klass->GetSuperClass()->GetIfTable();
- for (size_t i = 0; i < ifcount; ++i) {
+ for (size_t i = 0; i < super_ifcount; ++i) {
if (super_iftable->GetMethodArrayCount(i) > 0) {
has_non_marker_interface = true;
break;
}
}
+ // Class just inherits marker interfaces from parent so recycle parent's iftable.
if (!has_non_marker_interface) {
- // Class just inherits marker interfaces from parent so recycle parent's iftable.
klass->SetIfTable(super_iftable);
return true;
}
}
- StackHandleScope<5> hs(self);
+ size_t ifcount = super_ifcount + num_interfaces;
+ for (size_t i = 0; i < num_interfaces; i++) {
+ mirror::Class* interface = have_interfaces ?
+ interfaces->GetWithoutChecks(i) : mirror::Class::GetDirectInterface(self, klass, i);
+ DCHECK(interface != nullptr);
+ if (UNLIKELY(!interface->IsInterface())) {
+ std::string temp;
+ ThrowIncompatibleClassChangeError(klass.Get(), "Class %s implements non-interface class %s",
+ PrettyDescriptor(klass.Get()).c_str(),
+ PrettyDescriptor(interface->GetDescriptor(&temp)).c_str());
+ return false;
+ }
+ ifcount += interface->GetIfTableCount();
+ }
MutableHandle<mirror::IfTable> iftable(hs.NewHandle(AllocIfTable(self, ifcount)));
if (UNLIKELY(iftable.Get() == nullptr)) {
CHECK(self->IsExceptionPending()); // OOME.
@@ -4768,17 +4947,8 @@
// Flatten the interface inheritance hierarchy.
size_t idx = super_ifcount;
for (size_t i = 0; i < num_interfaces; i++) {
- mirror::Class* interface =
- interfaces.Get() == nullptr ? mirror::Class::GetDirectInterface(self, klass, i) :
- interfaces->Get(i);
- DCHECK(interface != nullptr);
- if (!interface->IsInterface()) {
- std::string temp;
- ThrowIncompatibleClassChangeError(klass.Get(), "Class %s implements non-interface class %s",
- PrettyDescriptor(klass.Get()).c_str(),
- PrettyDescriptor(interface->GetDescriptor(&temp)).c_str());
- return false;
- }
+ mirror::Class* interface = have_interfaces ? interfaces->Get(i) :
+ mirror::Class::GetDirectInterface(self, klass, i);
// Check if interface is already in iftable
bool duplicate = false;
for (size_t j = 0; j < idx; j++) {
@@ -4811,6 +4981,7 @@
self->AllowThreadSuspension();
// Shrink iftable in case duplicates were found
if (idx < ifcount) {
+ DCHECK_NE(num_interfaces, 0U);
iftable.Assign(down_cast<mirror::IfTable*>(iftable->CopyOf(self, idx * mirror::IfTable::kMax)));
if (UNLIKELY(iftable.Get() == nullptr)) {
CHECK(self->IsExceptionPending()); // OOME.
@@ -4818,48 +4989,101 @@
}
ifcount = idx;
} else {
- CHECK_EQ(idx, ifcount);
+ DCHECK_EQ(idx, ifcount);
}
klass->SetIfTable(iftable.Get());
-
// If we're an interface, we don't need the vtable pointers, so we're done.
if (klass->IsInterface()) {
return true;
}
- self->AllowThreadSuspension();
- // Allocate imtable
- bool imtable_changed = false;
- Handle<mirror::ObjectArray<mirror::ArtMethod>> imtable(
- hs.NewHandle(AllocArtMethodArray(self, mirror::Class::kImtSize)));
- if (UNLIKELY(imtable.Get() == nullptr)) {
- CHECK(self->IsExceptionPending()); // OOME.
- return false;
- }
- MutableMethodHelper interface_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
- MutableMethodHelper vtable_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
+ size_t miranda_list_size = 0;
size_t max_miranda_methods = 0; // The max size of miranda_list.
for (size_t i = 0; i < ifcount; ++i) {
max_miranda_methods += iftable->GetInterface(i)->NumVirtualMethods();
}
- Handle<mirror::ObjectArray<mirror::ArtMethod>>
+ MutableHandle<mirror::ObjectArray<mirror::ArtMethod>>
miranda_list(hs.NewHandle(AllocArtMethodArray(self, max_miranda_methods)));
- size_t miranda_list_size = 0; // The current size of miranda_list.
+ MutableHandle<mirror::ObjectArray<mirror::ArtMethod>> vtable(
+ hs.NewHandle(klass->GetVTableDuringLinking()));
+ // Copy the IMT from the super class if possible.
+ bool extend_super_iftable = false;
+ if (has_superclass) {
+ mirror::Class* super_class = klass->GetSuperClass();
+ extend_super_iftable = true;
+ if (super_class->ShouldHaveEmbeddedImtAndVTable()) {
+ for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
+ out_imt->SetReference(i, super_class->GetEmbeddedImTableEntry(i));
+ }
+ } else {
+ // No imt in the super class, need to reconstruct from the iftable.
+ mirror::IfTable* if_table = super_class->GetIfTable();
+ mirror::ArtMethod* conflict_method = runtime->GetImtConflictMethod();
+ const size_t length = super_class->GetIfTableCount();
+ for (size_t i = 0; i < length; ++i) {
+ mirror::Class* interface = iftable->GetInterface(i);
+ const size_t num_virtuals = interface->NumVirtualMethods();
+ const size_t method_array_count = if_table->GetMethodArrayCount(i);
+ DCHECK_EQ(num_virtuals, method_array_count);
+ if (method_array_count == 0) {
+ continue;
+ }
+ mirror::ObjectArray<mirror::ArtMethod>* method_array = if_table->GetMethodArray(i);
+ for (size_t j = 0; j < num_virtuals; ++j) {
+ mirror::ArtMethod* method = method_array->GetWithoutChecks(j);
+ if (method->IsMiranda()) {
+ continue;
+ }
+ mirror::ArtMethod* interface_method = interface->GetVirtualMethod(j);
+ uint32_t imt_index = interface_method->GetDexMethodIndex() % mirror::Class::kImtSize;
+ mirror::ArtMethod* imt_ref = out_imt->GetReference(imt_index)->AsArtMethod();
+ if (imt_ref == runtime->GetImtUnimplementedMethod()) {
+ out_imt->SetReference(imt_index, method);
+ } else if (imt_ref != conflict_method) {
+ out_imt->SetReference(imt_index, conflict_method);
+ }
+ }
+ }
+ }
+ }
for (size_t i = 0; i < ifcount; ++i) {
self->AllowThreadSuspension();
size_t num_methods = iftable->GetInterface(i)->NumVirtualMethods();
if (num_methods > 0) {
- StackHandleScope<2> hs(self);
- Handle<mirror::ObjectArray<mirror::ArtMethod>>
- method_array(hs.NewHandle(AllocArtMethodArray(self, num_methods)));
+ StackHandleScope<2> hs2(self);
+ const bool is_super = i < super_ifcount;
+ const bool super_interface = is_super && extend_super_iftable;
+ Handle<mirror::ObjectArray<mirror::ArtMethod>> method_array;
+ Handle<mirror::ObjectArray<mirror::ArtMethod>> input_array;
+ if (super_interface) {
+ mirror::IfTable* if_table = klass->GetSuperClass()->GetIfTable();
+ DCHECK(if_table != nullptr);
+ DCHECK(if_table->GetMethodArray(i) != nullptr);
+ // If we are working on a super interface, try extending the existing method array.
+ method_array = hs2.NewHandle(if_table->GetMethodArray(i)->Clone(self)->
+ AsObjectArray<mirror::ArtMethod>());
+ // We are overwriting a super class interface, try to only virtual methods instead of the
+ // whole vtable.
+ input_array = hs2.NewHandle(klass->GetVirtualMethods());
+ } else {
+ method_array = hs2.NewHandle(AllocArtMethodArray(self, num_methods));
+ // A new interface, we need the whole vtable incase a new interface method is implemented
+ // in the whole superclass.
+ input_array = vtable;
+ }
if (UNLIKELY(method_array.Get() == nullptr)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
iftable->SetMethodArray(i, method_array.Get());
- Handle<mirror::ObjectArray<mirror::ArtMethod>> vtable(
- hs.NewHandle(klass->GetVTableDuringLinking()));
+ if (input_array.Get() == nullptr) {
+ // If the added virtual methods is empty, do nothing.
+ DCHECK(super_interface);
+ continue;
+ }
for (size_t j = 0; j < num_methods; ++j) {
- interface_mh.ChangeMethod(iftable->GetInterface(i)->GetVirtualMethod(j));
+ mirror::ArtMethod* interface_method = iftable->GetInterface(i)->GetVirtualMethod(j);
+ MethodNameAndSignatureComparator interface_name_comparator(
+ interface_method->GetInterfaceMethodIfProxy());
int32_t k;
// For each method listed in the interface's method list, find the
// matching method in our class's method list. We want to favor the
@@ -4869,66 +5093,66 @@
// it -- otherwise it would use the same vtable slot. In .dex files
// those don't end up in the virtual method table, so it shouldn't
// matter which direction we go. We walk it backward anyway.)
- for (k = vtable->GetLength() - 1; k >= 0; --k) {
- vtable_mh.ChangeMethod(vtable->Get(k));
- if (interface_mh.HasSameNameAndSignature(&vtable_mh)) {
- if (!vtable_mh.Get()->IsAbstract() && !vtable_mh.Get()->IsPublic()) {
+ for (k = input_array->GetLength() - 1; k >= 0; --k) {
+ mirror::ArtMethod* vtable_method = input_array->GetWithoutChecks(k);
+ mirror::ArtMethod* vtable_method_for_name_comparison =
+ vtable_method->GetInterfaceMethodIfProxy();
+ if (interface_name_comparator.HasSameNameAndSignature(
+ vtable_method_for_name_comparison)) {
+ if (!vtable_method->IsAbstract() && !vtable_method->IsPublic()) {
ThrowIllegalAccessError(
klass.Get(),
"Method '%s' implementing interface method '%s' is not public",
- PrettyMethod(vtable_mh.Get()).c_str(),
- PrettyMethod(interface_mh.Get()).c_str());
+ PrettyMethod(vtable_method).c_str(),
+ PrettyMethod(interface_method).c_str());
return false;
}
- method_array->Set<false>(j, vtable_mh.Get());
+ method_array->SetWithoutChecks<false>(j, vtable_method);
// Place method in imt if entry is empty, place conflict otherwise.
- uint32_t imt_index = interface_mh.Get()->GetDexMethodIndex() % mirror::Class::kImtSize;
- if (imtable->Get(imt_index) == nullptr) {
- imtable->Set<false>(imt_index, vtable_mh.Get());
- imtable_changed = true;
- } else {
- imtable->Set<false>(imt_index, runtime->GetImtConflictMethod());
+ uint32_t imt_index = interface_method->GetDexMethodIndex() % mirror::Class::kImtSize;
+ mirror::ArtMethod* imt_ref = out_imt->GetReference(imt_index)->AsArtMethod();
+ mirror::ArtMethod* conflict_method = runtime->GetImtConflictMethod();
+ if (imt_ref == runtime->GetImtUnimplementedMethod()) {
+ out_imt->SetReference(imt_index, vtable_method);
+ } else if (imt_ref != conflict_method) {
+ // If we are not a conflict and we have the same signature and name as the imt entry,
+ // it must be that we overwrote a superclass vtable entry.
+ MethodNameAndSignatureComparator imt_ref_name_comparator(
+ imt_ref->GetInterfaceMethodIfProxy());
+ if (imt_ref_name_comparator.HasSameNameAndSignature(
+ vtable_method_for_name_comparison)) {
+ out_imt->SetReference(imt_index, vtable_method);
+ } else {
+ out_imt->SetReference(imt_index, conflict_method);
+ }
}
break;
}
}
- if (k < 0) {
- StackHandleScope<1> hs(self);
- auto miranda_method = hs.NewHandle<mirror::ArtMethod>(nullptr);
+ if (k < 0 && !super_interface) {
+ mirror::ArtMethod* miranda_method = nullptr;
for (size_t l = 0; l < miranda_list_size; ++l) {
mirror::ArtMethod* mir_method = miranda_list->Get(l);
- DCHECK(mir_method != nullptr);
- vtable_mh.ChangeMethod(mir_method);
- if (interface_mh.HasSameNameAndSignature(&vtable_mh)) {
- miranda_method.Assign(mir_method);
+ if (interface_name_comparator.HasSameNameAndSignature(mir_method)) {
+ miranda_method = mir_method;
break;
}
}
- if (miranda_method.Get() == nullptr) {
+ if (miranda_method == nullptr) {
// Point the interface table at a phantom slot.
- miranda_method.Assign(down_cast<mirror::ArtMethod*>(interface_mh.Get()->Clone(self)));
- if (UNLIKELY(miranda_method.Get() == nullptr)) {
+ miranda_method = interface_method->Clone(self)->AsArtMethod();
+ if (UNLIKELY(miranda_method == nullptr)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
DCHECK_LT(miranda_list_size, max_miranda_methods);
- miranda_list->Set<false>(miranda_list_size++, miranda_method.Get());
+ miranda_list->Set<false>(miranda_list_size++, miranda_method);
}
- method_array->Set<false>(j, miranda_method.Get());
+ method_array->SetWithoutChecks<false>(j, miranda_method);
}
}
}
}
- if (imtable_changed) {
- // Fill in empty entries in interface method table with conflict.
- mirror::ArtMethod* imt_conflict_method = runtime->GetImtConflictMethod();
- for (size_t i = 0; i < mirror::Class::kImtSize; i++) {
- if (imtable->Get(i) == nullptr) {
- imtable->Set<false>(i, imt_conflict_method);
- }
- }
- klass->SetImTable(imtable.Get());
- }
if (miranda_list_size > 0) {
int old_method_count = klass->NumVirtualMethods();
int new_method_count = old_method_count + miranda_list_size;
@@ -4944,10 +5168,6 @@
}
klass->SetVirtualMethods(virtuals);
- StackHandleScope<1> hs(self);
- MutableHandle<mirror::ObjectArray<mirror::ArtMethod>> vtable(
- hs.NewHandle(klass->GetVTableDuringLinking()));
- CHECK(vtable.Get() != nullptr);
int old_vtable_count = vtable->GetLength();
int new_vtable_count = old_vtable_count + miranda_list_size;
vtable.Assign(vtable->CopyOf(self, new_vtable_count));
@@ -4961,19 +5181,20 @@
method->SetAccessFlags(method->GetAccessFlags() | kAccMiranda);
method->SetMethodIndex(0xFFFF & (old_vtable_count + i));
klass->SetVirtualMethod(old_method_count + i, method);
- vtable->Set<false>(old_vtable_count + i, method);
+ vtable->SetWithoutChecks<false>(old_vtable_count + i, method);
}
// TODO: do not assign to the vtable field until it is fully constructed.
klass->SetVTable(vtable.Get());
}
- mirror::ObjectArray<mirror::ArtMethod>* vtable = klass->GetVTableDuringLinking();
- for (int i = 0; i < vtable->GetLength(); ++i) {
- CHECK(vtable->Get(i) != nullptr);
+ if (kIsDebugBuild) {
+ mirror::ObjectArray<mirror::ArtMethod>* check_vtable = klass->GetVTableDuringLinking();
+ for (int i = 0; i < check_vtable->GetLength(); ++i) {
+ CHECK(check_vtable->GetWithoutChecks(i) != nullptr);
+ }
}
self->AllowThreadSuspension();
-
return true;
}
@@ -4997,20 +5218,27 @@
Primitive::Type type1 = field1->GetTypeAsPrimitiveType();
Primitive::Type type2 = field2->GetTypeAsPrimitiveType();
if (type1 != type2) {
- bool is_primitive1 = type1 != Primitive::kPrimNot;
- bool is_primitive2 = type2 != Primitive::kPrimNot;
- if (type1 != type2) {
- if (is_primitive1 && is_primitive2) {
- // Larger primitive types go first.
- return Primitive::ComponentSize(type1) > Primitive::ComponentSize(type2);
- } else {
- // Reference always goes first.
- return !is_primitive1;
- }
+ if (type1 == Primitive::kPrimNot) {
+ // Reference always goes first.
+ return true;
}
+ if (type2 == Primitive::kPrimNot) {
+ // Reference always goes first.
+ return false;
+ }
+ size_t size1 = Primitive::ComponentSize(type1);
+ size_t size2 = Primitive::ComponentSize(type2);
+ if (size1 != size2) {
+ // Larger primitive types go first.
+ return size1 > size2;
+ }
+ // Primitive types differ but sizes match. Arbitrarily order by primitive type.
+ return type1 < type2;
}
- // same basic group? then sort by string.
- return strcmp(field1->GetName(), field2->GetName()) < 0;
+ // Same basic group? Then sort by dex field index. This is guaranteed to be sorted
+ // by name and for equal names by type id index.
+ // NOTE: This works also for proxies. Their static fields are assigned appropriate indexes.
+ return field1->GetDexFieldIndex() < field2->GetDexFieldIndex();
}
};
@@ -5026,13 +5254,7 @@
// Initialize field_offset
MemberOffset field_offset(0);
if (is_static) {
- uint32_t base = sizeof(mirror::Class); // Static fields come after the class.
- if (klass->ShouldHaveEmbeddedImtAndVTable()) {
- // Static fields come after the embedded tables.
- base = mirror::Class::ComputeClassSize(true, klass->GetVTableDuringLinking()->GetLength(),
- 0, 0, 0, 0, 0);
- }
- field_offset = MemberOffset(base);
+ field_offset = klass->GetFirstReferenceStaticFieldOffsetDuringLinking();
} else {
mirror::Class* super_class = klass->GetSuperClass();
if (super_class != nullptr) {
@@ -5069,28 +5291,25 @@
if (isPrimitive) {
break; // past last reference, move on to the next phase
}
- if (UNLIKELY(!IsAligned<4>(field_offset.Uint32Value()))) {
+ if (UNLIKELY(!IsAligned<sizeof(mirror::HeapReference<mirror::Object>)>(
+ field_offset.Uint32Value()))) {
MemberOffset old_offset = field_offset;
field_offset = MemberOffset(RoundUp(field_offset.Uint32Value(), 4));
AddFieldGap(old_offset.Uint32Value(), field_offset.Uint32Value(), &gaps);
}
- DCHECK(IsAligned<4>(field_offset.Uint32Value()));
+ DCHECK(IsAligned<sizeof(mirror::HeapReference<mirror::Object>)>(field_offset.Uint32Value()));
grouped_and_sorted_fields.pop_front();
num_reference_fields++;
- fields->Set<false>(current_field, field);
field->SetOffset(field_offset);
- field_offset = MemberOffset(field_offset.Uint32Value() + sizeof(uint32_t));
+ field_offset = MemberOffset(field_offset.Uint32Value() +
+ sizeof(mirror::HeapReference<mirror::Object>));
}
// Gaps are stored as a max heap which means that we must shuffle from largest to smallest
// otherwise we could end up with suboptimal gap fills.
- ShuffleForward<8>(num_fields, ¤t_field, &field_offset,
- fields, &grouped_and_sorted_fields, &gaps);
- ShuffleForward<4>(num_fields, ¤t_field, &field_offset,
- fields, &grouped_and_sorted_fields, &gaps);
- ShuffleForward<2>(num_fields, ¤t_field, &field_offset,
- fields, &grouped_and_sorted_fields, &gaps);
- ShuffleForward<1>(num_fields, ¤t_field, &field_offset,
- fields, &grouped_and_sorted_fields, &gaps);
+ ShuffleForward<8>(¤t_field, &field_offset, &grouped_and_sorted_fields, &gaps);
+ ShuffleForward<4>(¤t_field, &field_offset, &grouped_and_sorted_fields, &gaps);
+ ShuffleForward<2>(¤t_field, &field_offset, &grouped_and_sorted_fields, &gaps);
+ ShuffleForward<1>(¤t_field, &field_offset, &grouped_and_sorted_fields, &gaps);
CHECK(grouped_and_sorted_fields.empty()) << "Missed " << grouped_and_sorted_fields.size() <<
" fields.";
self->EndAssertNoThreadSuspension(old_no_suspend_cause);
@@ -5104,39 +5323,6 @@
--num_reference_fields;
}
- if (kIsDebugBuild) {
- // Make sure that all reference fields appear before
- // non-reference fields, and all double-wide fields are aligned.
- bool seen_non_ref = false;
- for (size_t i = 0; i < num_fields; i++) {
- mirror::ArtField* field = fields->Get(i);
- if (false) { // enable to debug field layout
- LOG(INFO) << "LinkFields: " << (is_static ? "static" : "instance")
- << " class=" << PrettyClass(klass.Get())
- << " field=" << PrettyField(field)
- << " offset="
- << field->GetField32(MemberOffset(mirror::ArtField::OffsetOffset()));
- }
- Primitive::Type type = field->GetTypeAsPrimitiveType();
- bool is_primitive = type != Primitive::kPrimNot;
- if (klass->DescriptorEquals("Ljava/lang/ref/Reference;") &&
- strcmp("referent", field->GetName()) == 0) {
- is_primitive = true; // We lied above, so we have to expect a lie here.
- }
- if (is_primitive) {
- if (!seen_non_ref) {
- seen_non_ref = true;
- DCHECK_EQ(num_reference_fields, i) << PrettyField(field);
- }
- } else {
- DCHECK(!seen_non_ref) << PrettyField(field);
- }
- }
- if (!seen_non_ref) {
- DCHECK_EQ(num_fields, num_reference_fields) << PrettyClass(klass.Get());
- }
- }
-
size_t size = field_offset.Uint32Value();
// Update klass
if (is_static) {
@@ -5145,21 +5331,74 @@
} else {
klass->SetNumReferenceInstanceFields(num_reference_fields);
if (!klass->IsVariableSize()) {
- std::string temp;
- DCHECK_GE(size, sizeof(mirror::Object)) << klass->GetDescriptor(&temp);
- size_t previous_size = klass->GetObjectSize();
- if (previous_size != 0) {
- // Make sure that we didn't originally have an incorrect size.
- CHECK_EQ(previous_size, size) << klass->GetDescriptor(&temp);
+ if (klass->DescriptorEquals("Ljava/lang/reflect/ArtMethod;")) {
+ klass->SetObjectSize(mirror::ArtMethod::InstanceSize(sizeof(void*)));
+ } else {
+ std::string temp;
+ DCHECK_GE(size, sizeof(mirror::Object)) << klass->GetDescriptor(&temp);
+ size_t previous_size = klass->GetObjectSize();
+ if (previous_size != 0) {
+ // Make sure that we didn't originally have an incorrect size.
+ CHECK_EQ(previous_size, size) << klass->GetDescriptor(&temp);
+ }
+ klass->SetObjectSize(size);
}
- klass->SetObjectSize(size);
}
}
+
+ if (kIsDebugBuild) {
+ // Make sure that the fields array is ordered by name but all reference
+ // offsets are at the beginning as far as alignment allows.
+ MemberOffset start_ref_offset = is_static
+ ? klass->GetFirstReferenceStaticFieldOffsetDuringLinking()
+ : klass->GetFirstReferenceInstanceFieldOffset();
+ MemberOffset end_ref_offset(start_ref_offset.Uint32Value() +
+ num_reference_fields *
+ sizeof(mirror::HeapReference<mirror::Object>));
+ MemberOffset current_ref_offset = start_ref_offset;
+ for (size_t i = 0; i < num_fields; i++) {
+ mirror::ArtField* field = fields->Get(i);
+ if ((false)) { // enable to debug field layout
+ LOG(INFO) << "LinkFields: " << (is_static ? "static" : "instance")
+ << " class=" << PrettyClass(klass.Get())
+ << " field=" << PrettyField(field)
+ << " offset="
+ << field->GetField32(mirror::ArtField::OffsetOffset());
+ }
+ if (i != 0) {
+ mirror::ArtField* prev_field = fields->Get(i - 1u);
+ // NOTE: The field names can be the same. This is not possible in the Java language
+ // but it's valid Java/dex bytecode and for example proguard can generate such bytecode.
+ CHECK_LE(strcmp(prev_field->GetName(), field->GetName()), 0);
+ }
+ Primitive::Type type = field->GetTypeAsPrimitiveType();
+ bool is_primitive = type != Primitive::kPrimNot;
+ if (klass->DescriptorEquals("Ljava/lang/ref/Reference;") &&
+ strcmp("referent", field->GetName()) == 0) {
+ is_primitive = true; // We lied above, so we have to expect a lie here.
+ }
+ MemberOffset offset = field->GetOffsetDuringLinking();
+ if (is_primitive) {
+ if (offset.Uint32Value() < end_ref_offset.Uint32Value()) {
+ // Shuffled before references.
+ size_t type_size = Primitive::ComponentSize(type);
+ CHECK_LT(type_size, sizeof(mirror::HeapReference<mirror::Object>));
+ CHECK_LT(offset.Uint32Value(), start_ref_offset.Uint32Value());
+ CHECK_LE(offset.Uint32Value() + type_size, start_ref_offset.Uint32Value());
+ CHECK(!IsAligned<sizeof(mirror::HeapReference<mirror::Object>)>(offset.Uint32Value()));
+ }
+ } else {
+ CHECK_EQ(current_ref_offset.Uint32Value(), offset.Uint32Value());
+ current_ref_offset = MemberOffset(current_ref_offset.Uint32Value() +
+ sizeof(mirror::HeapReference<mirror::Object>));
+ }
+ }
+ CHECK_EQ(current_ref_offset.Uint32Value(), end_ref_offset.Uint32Value());
+ }
return true;
}
-// Set the bitmap of reference offsets, refOffsets, from the ifields
-// list.
+// Set the bitmap of reference instance field offsets.
void ClassLinker::CreateReferenceInstanceOffsets(Handle<mirror::Class> klass) {
uint32_t reference_offsets = 0;
mirror::Class* super_class = klass->GetSuperClass();
@@ -5169,23 +5408,18 @@
// Compute reference offsets unless our superclass overflowed.
if (reference_offsets != mirror::Class::kClassWalkSuper) {
size_t num_reference_fields = klass->NumReferenceInstanceFieldsDuringLinking();
- mirror::ObjectArray<mirror::ArtField>* fields = klass->GetIFields();
- // All of the fields that contain object references are guaranteed
- // to be at the beginning of the fields list.
- for (size_t i = 0; i < num_reference_fields; ++i) {
- // Note that byte_offset is the offset from the beginning of
- // object, not the offset into instance data
- mirror::ArtField* field = fields->Get(i);
- MemberOffset byte_offset = field->GetOffsetDuringLinking();
- uint32_t displaced_bitmap_position =
- (byte_offset.Uint32Value() - mirror::kObjectHeaderSize) /
+ if (num_reference_fields != 0u) {
+ // All of the fields that contain object references are guaranteed be grouped in memory
+ // starting at an appropriately aligned address after super class object data.
+ uint32_t start_offset = RoundUp(super_class->GetObjectSize(),
+ sizeof(mirror::HeapReference<mirror::Object>));
+ uint32_t start_bit = (start_offset - mirror::kObjectHeaderSize) /
sizeof(mirror::HeapReference<mirror::Object>);
- if (displaced_bitmap_position >= 32) {
- // Can't encode offset so fall back on slow-path.
+ if (start_bit + num_reference_fields > 32) {
reference_offsets = mirror::Class::kClassWalkSuper;
- break;
} else {
- reference_offsets |= (1 << displaced_bitmap_position);
+ reference_offsets |= (0xffffffffu << start_bit) &
+ (0xffffffffu >> (32 - (start_bit + num_reference_fields)));
}
}
}
@@ -5283,6 +5517,7 @@
break;
default:
LOG(FATAL) << "Unreachable - invocation type: " << type;
+ UNREACHABLE();
}
if (resolved == nullptr) {
// Search by name, which works across dex files.
@@ -5492,9 +5727,8 @@
std::vector<mirror::Class*> all_classes;
{
ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- for (std::pair<const size_t, GcRoot<mirror::Class> >& it : class_table_) {
- mirror::Class* klass = it.second.Read();
- all_classes.push_back(klass);
+ for (GcRoot<mirror::Class>& it : class_table_) {
+ all_classes.push_back(it.Read());
}
}
@@ -5588,7 +5822,8 @@
MoveImageClassesToClassTable();
}
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
- os << "Loaded classes: " << class_table_.size() << " allocated classes\n";
+ os << "Zygote loaded classes=" << pre_zygote_class_table_.Size() << " post zygote classes="
+ << class_table_.Size() << "\n";
}
size_t ClassLinker::NumLoadedClasses() {
@@ -5596,7 +5831,8 @@
MoveImageClassesToClassTable();
}
ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- return class_table_.size();
+ // Only return non zygote classes since these are the ones which apps which care about.
+ return class_table_.Size();
}
pid_t ClassLinker::GetClassesLockOwner() {
@@ -5657,12 +5893,49 @@
"[S",
"[Ljava/lang/StackTraceElement;",
};
- COMPILE_ASSERT(arraysize(class_roots_descriptors) == size_t(kClassRootsMax),
- mismatch_between_class_descriptors_and_class_root_enum);
+ static_assert(arraysize(class_roots_descriptors) == size_t(kClassRootsMax),
+ "Mismatch between class descriptors and class-root enum");
const char* descriptor = class_roots_descriptors[class_root];
CHECK(descriptor != nullptr);
return descriptor;
}
+std::size_t ClassLinker::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& root)
+ const {
+ std::string temp;
+ return ComputeModifiedUtf8Hash(root.Read()->GetDescriptor(&temp));
+}
+
+bool ClassLinker::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& a,
+ const GcRoot<mirror::Class>& b) {
+ if (a.Read()->GetClassLoader() != b.Read()->GetClassLoader()) {
+ return false;
+ }
+ std::string temp;
+ return a.Read()->DescriptorEquals(b.Read()->GetDescriptor(&temp));
+}
+
+std::size_t ClassLinker::ClassDescriptorHashEquals::operator()(
+ const std::pair<const char*, mirror::ClassLoader*>& element) const {
+ return ComputeModifiedUtf8Hash(element.first);
+}
+
+bool ClassLinker::ClassDescriptorHashEquals::operator()(
+ const GcRoot<mirror::Class>& a, const std::pair<const char*, mirror::ClassLoader*>& b) {
+ if (a.Read()->GetClassLoader() != b.second) {
+ return false;
+ }
+ return a.Read()->DescriptorEquals(b.first);
+}
+
+bool ClassLinker::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& a,
+ const char* descriptor) {
+ return a.Read()->DescriptorEquals(descriptor);
+}
+
+std::size_t ClassLinker::ClassDescriptorHashEquals::operator()(const char* descriptor) const {
+ return ComputeModifiedUtf8Hash(descriptor);
+}
+
} // namespace art
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 1847926..b78d0b5 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -23,6 +23,7 @@
#include <vector>
#include "base/allocator.h"
+#include "base/hash_set.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "dex_file.h"
@@ -50,8 +51,9 @@
template<class T> class Handle;
class InternTable;
template<class T> class ObjectLock;
+class Runtime;
class ScopedObjectAccessAlreadyRunnable;
-template<class T> class Handle;
+template<size_t kNumReferences> class PACKED(4) StackHandleScope;
typedef bool (ClassVisitor)(mirror::Class* c, void* arg);
@@ -115,9 +117,10 @@
Handle<mirror::ClassLoader> class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Find a class in the path class loader, loading it if necessary.
+ // Find a class in the path class loader, loading it if necessary without using JNI. Hash
+ // function is supposed to be ComputeModifiedUtf8Hash(descriptor).
mirror::Class* FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
- Thread* self, const char* descriptor,
+ Thread* self, const char* descriptor, size_t hash,
Handle<mirror::ClassLoader> class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -136,15 +139,15 @@
}
// Define a new a class based on a ClassDef from a DexFile
- mirror::Class* DefineClass(Thread* self, const char* descriptor,
+ mirror::Class* DefineClass(Thread* self, const char* descriptor, size_t hash,
Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file, const DexFile::ClassDef& dex_class_def)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Finds a class by its descriptor, returning NULL if it isn't wasn't loaded
// by the given 'class_loader'.
- mirror::Class* LookupClass(Thread* self, const char* descriptor,
- const mirror::ClassLoader* class_loader)
+ mirror::Class* LookupClass(Thread* self, const char* descriptor, size_t hash,
+ mirror::ClassLoader* class_loader)
LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -157,7 +160,7 @@
// General class unloading is not supported, this is used to prune
// unwanted classes during image writing.
- bool RemoveClass(const char* descriptor, const mirror::ClassLoader* class_loader)
+ bool RemoveClass(const char* descriptor, mirror::ClassLoader* class_loader)
LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -221,8 +224,7 @@
InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtMethod* GetResolvedMethod(uint32_t method_idx, mirror::ArtMethod* referrer,
- InvokeType type)
+ mirror::ArtMethod* GetResolvedMethod(uint32_t method_idx, mirror::ArtMethod* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::ArtMethod* ResolveMethod(Thread* self, uint32_t method_idx, mirror::ArtMethod** referrer,
InvokeType type)
@@ -458,6 +460,17 @@
return class_roots;
}
+ // Move all of the image classes into the class table for faster lookups.
+ void MoveImageClassesToClassTable()
+ LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Move the class table to the pre-zygote table to reduce memory usage. This works by ensuring
+ // that no more classes are ever added to the pre zygote table which makes it that the pages
+ // always remain shared dirty instead of private dirty.
+ void MoveClassTableToPreZygote()
+ LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
private:
const OatFile::OatMethod FindOatMethodFor(mirror::ArtMethod* method, bool* found)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -487,7 +500,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::Class* CreateArrayClass(Thread* self, const char* descriptor,
+ mirror::Class* CreateArrayClass(Thread* self, const char* descriptor, size_t hash,
Handle<mirror::ClassLoader> class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -505,8 +518,7 @@
Handle<mirror::Class> klass, mirror::ClassLoader* class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void LoadClassMembers(Thread* self, const DexFile& dex_file, const uint8_t* class_data,
- Handle<mirror::Class> klass, mirror::ClassLoader* class_loader,
- const OatFile::OatClass* oat_class)
+ Handle<mirror::Class> klass, const OatFile::OatClass* oat_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void LoadField(const DexFile& dex_file, const ClassDataItemIterator& it,
@@ -561,14 +573,16 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool LinkMethods(Thread* self, Handle<mirror::Class> klass,
- Handle<mirror::ObjectArray<mirror::Class>> interfaces)
+ Handle<mirror::ObjectArray<mirror::Class>> interfaces,
+ StackHandleScope<mirror::Class::kImtSize>* out_imt)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkInterfaceMethods(Handle<mirror::Class> klass,
- Handle<mirror::ObjectArray<mirror::Class>> interfaces)
+ bool LinkInterfaceMethods(Thread* const self, Handle<mirror::Class> klass,
+ Handle<mirror::ObjectArray<mirror::Class>> interfaces,
+ StackHandleScope<mirror::Class::kImtSize>* out_imt)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool LinkStaticFields(Thread* self, Handle<mirror::Class> klass, size_t* class_size)
@@ -578,7 +592,7 @@
bool LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_static, size_t* class_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void LinkCode(Handle<mirror::ArtMethod> method, const OatFile::OatClass* oat_class,
- const DexFile& dex_file, uint32_t dex_method_index, uint32_t method_index)
+ uint32_t class_def_method_index)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CreateReferenceInstanceOffsets(Handle<mirror::Class> klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -628,9 +642,8 @@
std::string* error_msg)
LOCKS_EXCLUDED(Locks::mutator_lock_);
- bool CheckOatFile(const OatFile* oat_file, InstructionSet isa,
+ bool CheckOatFile(const Runtime* runtime, const OatFile* oat_file, InstructionSet isa,
bool* checksum_verified, std::string* error_msg);
- int32_t GetRequiredDelta(const OatFile* oat_file, InstructionSet isa);
// Note: will not register the oat file.
const OatFile* FindOatFileInOatLocationForDexFile(const char* dex_location,
@@ -685,7 +698,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Class* LookupClassFromTableLocked(const char* descriptor,
- const mirror::ClassLoader* class_loader,
+ mirror::ClassLoader* class_loader,
size_t hash)
SHARED_LOCKS_REQUIRED(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
@@ -693,9 +706,6 @@
LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void MoveImageClassesToClassTable() LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
- LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Class* LookupClassFromImage(const char* descriptor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -724,15 +734,43 @@
std::vector<GcRoot<mirror::DexCache>> dex_caches_ GUARDED_BY(dex_lock_);
std::vector<const OatFile*> oat_files_ GUARDED_BY(dex_lock_);
+ class ClassDescriptorHashEquals {
+ public:
+ // Same class loader and descriptor.
+ std::size_t operator()(const GcRoot<mirror::Class>& root) const NO_THREAD_SAFETY_ANALYSIS;
+ bool operator()(const GcRoot<mirror::Class>& a, const GcRoot<mirror::Class>& b)
+ NO_THREAD_SAFETY_ANALYSIS;
+ // Same class loader and descriptor.
+ std::size_t operator()(const std::pair<const char*, mirror::ClassLoader*>& element) const
+ NO_THREAD_SAFETY_ANALYSIS;
+ bool operator()(const GcRoot<mirror::Class>& a,
+ const std::pair<const char*, mirror::ClassLoader*>& b)
+ NO_THREAD_SAFETY_ANALYSIS;
+ // Same descriptor.
+ bool operator()(const GcRoot<mirror::Class>& a, const char* descriptor)
+ NO_THREAD_SAFETY_ANALYSIS;
+ std::size_t operator()(const char* descriptor) const NO_THREAD_SAFETY_ANALYSIS;
+ };
+ class GcRootEmptyFn {
+ public:
+ void MakeEmpty(GcRoot<mirror::Class>& item) const {
+ item = GcRoot<mirror::Class>();
+ }
+ bool IsEmpty(const GcRoot<mirror::Class>& item) const {
+ return item.IsNull();
+ }
+ };
- // multimap from a string hash code of a class descriptor to
- // mirror::Class* instances. Results should be compared for a matching
- // Class::descriptor_ and Class::class_loader_.
- typedef AllocationTrackingMultiMap<size_t, GcRoot<mirror::Class>, kAllocatorTagClassTable> Table;
+ // hash set which hashes class descriptor, and compares descriptors nad class loaders. Results
+ // should be compared for a matching Class descriptor and class loader.
+ typedef HashSet<GcRoot<mirror::Class>, GcRootEmptyFn, ClassDescriptorHashEquals,
+ ClassDescriptorHashEquals, TrackingAllocator<GcRoot<mirror::Class>, kAllocatorTagClassTable>>
+ Table;
// This contains strong roots. To enable concurrent root scanning of
// the class table, be careful to use a read barrier when accessing this.
Table class_table_ GUARDED_BY(Locks::classlinker_classes_lock_);
- std::vector<std::pair<size_t, GcRoot<mirror::Class>>> new_class_roots_;
+ Table pre_zygote_class_table_ GUARDED_BY(Locks::classlinker_classes_lock_);
+ std::vector<GcRoot<mirror::Class>> new_class_roots_;
// Do we need to search dex caches to find image classes?
bool dex_cache_image_class_lookup_required_;
@@ -767,6 +805,9 @@
const void* quick_generic_jni_trampoline_;
const void* quick_to_interpreter_bridge_trampoline_;
+ // Image pointer size.
+ size_t image_pointer_size_;
+
friend class ImageWriter; // for GetClassRoots
friend class ImageDumper; // for FindOpenedOatFileFromOatLocation
friend class ElfPatcher; // for FindOpenedOatFileForDexFile & FindOpenedOatFileFromOatLocation
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 88e6265..0c86761 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -275,31 +275,42 @@
EXPECT_TRUE(field->IsStatic());
}
- // Confirm that all instances fields are packed together at the start
+ // Confirm that all instances field offsets are packed together at the start.
EXPECT_GE(klass->NumInstanceFields(), klass->NumReferenceInstanceFields());
StackHandleScope<1> hs(Thread::Current());
MutableHandle<mirror::ArtField> fhandle = hs.NewHandle<mirror::ArtField>(nullptr);
- for (size_t i = 0; i < klass->NumReferenceInstanceFields(); i++) {
- mirror::ArtField* field = klass->GetInstanceField(i);
- fhandle.Assign(field);
- FieldHelper fh(fhandle);
- ASSERT_TRUE(!field->IsPrimitiveType());
- mirror::Class* field_type = fh.GetType();
- ASSERT_TRUE(field_type != nullptr);
- ASSERT_TRUE(!field_type->IsPrimitive());
- }
- for (size_t i = klass->NumReferenceInstanceFields(); i < klass->NumInstanceFields(); i++) {
+ MemberOffset start_ref_offset = klass->GetFirstReferenceInstanceFieldOffset();
+ MemberOffset end_ref_offset(start_ref_offset.Uint32Value() +
+ klass->NumReferenceInstanceFields() *
+ sizeof(mirror::HeapReference<mirror::Object>));
+ MemberOffset current_ref_offset = start_ref_offset;
+ for (size_t i = 0; i < klass->NumInstanceFields(); i++) {
mirror::ArtField* field = klass->GetInstanceField(i);
fhandle.Assign(field);
FieldHelper fh(fhandle);
mirror::Class* field_type = fh.GetType();
ASSERT_TRUE(field_type != nullptr);
- if (!fh.GetField()->IsPrimitiveType() || !field_type->IsPrimitive()) {
- // While Reference.referent is not primitive, the ClassLinker
- // treats it as such so that the garbage collector won't scan it.
- EXPECT_EQ(PrettyField(fh.GetField()), "java.lang.Object java.lang.ref.Reference.referent");
+ if (!field->IsPrimitiveType()) {
+ ASSERT_TRUE(!field_type->IsPrimitive());
+ ASSERT_EQ(current_ref_offset.Uint32Value(), field->GetOffset().Uint32Value());
+ if (current_ref_offset.Uint32Value() == end_ref_offset.Uint32Value()) {
+ // While Reference.referent is not primitive, the ClassLinker
+ // treats it as such so that the garbage collector won't scan it.
+ EXPECT_EQ(PrettyField(fh.GetField()),
+ "java.lang.Object java.lang.ref.Reference.referent");
+ } else {
+ current_ref_offset = MemberOffset(current_ref_offset.Uint32Value() +
+ sizeof(mirror::HeapReference<mirror::Object>));
+ }
+ } else {
+ if (field->GetOffset().Uint32Value() < end_ref_offset.Uint32Value()) {
+ // Shuffled before references.
+ ASSERT_LT(field->GetOffset().Uint32Value(), start_ref_offset.Uint32Value());
+ CHECK(!IsAligned<4>(field->GetOffset().Uint32Value()));
+ }
}
}
+ ASSERT_EQ(end_ref_offset.Uint32Value(), current_ref_offset.Uint32Value());
uint32_t total_num_reference_instance_fields = 0;
mirror::Class* k = klass.Get();
@@ -374,8 +385,8 @@
template <typename T>
struct CheckOffsets {
- CheckOffsets(bool is_static, const char* class_descriptor)
- : is_static(is_static), class_descriptor(class_descriptor) {}
+ CheckOffsets(bool is_static_in, const char* class_descriptor_in)
+ : is_static(is_static_in), class_descriptor(class_descriptor_in) {}
bool is_static;
std::string class_descriptor;
std::vector<CheckOffset> offsets;
@@ -461,10 +472,7 @@
struct ObjectOffsets : public CheckOffsets<mirror::Object> {
ObjectOffsets() : CheckOffsets<mirror::Object>(false, "Ljava/lang/Object;") {
- // alphabetical references
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Object, klass_), "shadow$_klass_"));
-
- // alphabetical 32-bit
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Object, monitor_), "shadow$_monitor_"));
#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Object, x_rb_ptr_), "shadow$_x_rb_ptr_"));
@@ -475,11 +483,8 @@
struct ArtFieldOffsets : public CheckOffsets<mirror::ArtField> {
ArtFieldOffsets() : CheckOffsets<mirror::ArtField>(false, "Ljava/lang/reflect/ArtField;") {
- // alphabetical references
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtField, declaring_class_), "declaringClass"));
-
- // alphabetical 32-bit
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtField, access_flags_), "accessFlags"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtField, declaring_class_), "declaringClass"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtField, field_dex_idx_), "fieldDexIndex"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtField, offset_), "offset"));
};
@@ -487,21 +492,11 @@
struct ArtMethodOffsets : public CheckOffsets<mirror::ArtMethod> {
ArtMethodOffsets() : CheckOffsets<mirror::ArtMethod>(false, "Ljava/lang/reflect/ArtMethod;") {
- // alphabetical references
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, access_flags_), "accessFlags"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, declaring_class_), "declaringClass"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_resolved_methods_), "dexCacheResolvedMethods"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_resolved_types_), "dexCacheResolvedTypes"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_strings_), "dexCacheStrings"));
-
- // alphabetical 64-bit
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, entry_point_from_interpreter_), "entryPointFromInterpreter"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, entry_point_from_jni_), "entryPointFromJni"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, entry_point_from_portable_compiled_code_), "entryPointFromPortableCompiledCode"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, entry_point_from_quick_compiled_code_), "entryPointFromQuickCompiledCode"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, gc_map_), "gcMap"));
-
- // alphabetical 32-bit
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, access_flags_), "accessFlags"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_code_item_offset_), "dexCodeItemOffset"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_method_index_), "dexMethodIndex"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, method_index_), "methodIndex"));
@@ -510,51 +505,43 @@
struct ClassOffsets : public CheckOffsets<mirror::Class> {
ClassOffsets() : CheckOffsets<mirror::Class>(false, "Ljava/lang/Class;") {
- // alphabetical references
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, access_flags_), "accessFlags"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, class_loader_), "classLoader"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, class_size_), "classSize"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, clinit_thread_id_), "clinitThreadId"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, component_type_), "componentType"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, dex_cache_), "dexCache"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, dex_class_def_idx_), "dexClassDefIndex"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, dex_type_idx_), "dexTypeIndex"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, direct_methods_), "directMethods"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, ifields_), "iFields"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, iftable_), "ifTable"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, imtable_), "imTable"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, name_), "name"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, sfields_), "sFields"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, super_class_), "superClass"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, verify_error_class_), "verifyErrorClass"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, virtual_methods_), "virtualMethods"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, vtable_), "vtable"));
-
- // alphabetical 32-bit
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, access_flags_), "accessFlags"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, class_size_), "classSize"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, clinit_thread_id_), "clinitThreadId"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, dex_class_def_idx_), "dexClassDefIndex"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, dex_type_idx_), "dexTypeIndex"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, num_reference_instance_fields_), "numReferenceInstanceFields"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, num_reference_static_fields_), "numReferenceStaticFields"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, object_size_), "objectSize"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, primitive_type_), "primitiveType"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, reference_instance_offsets_), "referenceInstanceOffsets"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, sfields_), "sFields"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, status_), "status"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, super_class_), "superClass"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, verify_error_class_), "verifyErrorClass"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, virtual_methods_), "virtualMethods"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, vtable_), "vtable"));
};
};
struct StringOffsets : public CheckOffsets<mirror::String> {
StringOffsets() : CheckOffsets<mirror::String>(false, "Ljava/lang/String;") {
- // alphabetical references
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::String, array_), "value"));
-
- // alphabetical 32-bit
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::String, count_), "count"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::String, hash_code_), "hashCode"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::String, offset_), "offset"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::String, array_), "value"));
};
};
struct ThrowableOffsets : public CheckOffsets<mirror::Throwable> {
ThrowableOffsets() : CheckOffsets<mirror::Throwable>(false, "Ljava/lang/Throwable;") {
- // alphabetical references
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Throwable, cause_), "cause"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Throwable, detail_message_), "detailMessage"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Throwable, stack_state_), "stackState"));
@@ -565,17 +552,15 @@
struct StackTraceElementOffsets : public CheckOffsets<mirror::StackTraceElement> {
StackTraceElementOffsets() : CheckOffsets<mirror::StackTraceElement>(false, "Ljava/lang/StackTraceElement;") {
- // alphabetical references
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::StackTraceElement, declaring_class_), "declaringClass"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::StackTraceElement, file_name_), "fileName"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::StackTraceElement, method_name_), "methodName"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::StackTraceElement, line_number_), "lineNumber"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::StackTraceElement, method_name_), "methodName"));
};
};
struct ClassLoaderOffsets : public CheckOffsets<mirror::ClassLoader> {
ClassLoaderOffsets() : CheckOffsets<mirror::ClassLoader>(false, "Ljava/lang/ClassLoader;") {
- // alphabetical references
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ClassLoader, packages_), "packages"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ClassLoader, parent_), "parent"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ClassLoader, proxyCache_), "proxyCache"));
@@ -584,27 +569,24 @@
struct ProxyOffsets : public CheckOffsets<mirror::Proxy> {
ProxyOffsets() : CheckOffsets<mirror::Proxy>(false, "Ljava/lang/reflect/Proxy;") {
- // alphabetical references
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Proxy, h_), "h"));
};
};
struct DexCacheOffsets : public CheckOffsets<mirror::DexCache> {
DexCacheOffsets() : CheckOffsets<mirror::DexCache>(false, "Ljava/lang/DexCache;") {
- // alphabetical references
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, dex_), "dex"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, dex_file_), "dexFile"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, location_), "location"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_fields_), "resolvedFields"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_methods_), "resolvedMethods"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_types_), "resolvedTypes"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, strings_), "strings"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, dex_file_), "dexFile"));
};
};
struct ReferenceOffsets : public CheckOffsets<mirror::Reference> {
ReferenceOffsets() : CheckOffsets<mirror::Reference>(false, "Ljava/lang/ref/Reference;") {
- // alphabetical references
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Reference, pending_next_), "pendingNext"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Reference, queue_), "queue"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Reference, queue_next_), "queueNext"));
@@ -614,7 +596,6 @@
struct FinalizerReferenceOffsets : public CheckOffsets<mirror::FinalizerReference> {
FinalizerReferenceOffsets() : CheckOffsets<mirror::FinalizerReference>(false, "Ljava/lang/ref/FinalizerReference;") {
- // alphabetical references
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::FinalizerReference, next_), "next"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::FinalizerReference, prev_), "prev"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::FinalizerReference, zombie_), "zombie"));
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index ea3da64..03b33e9 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -44,7 +44,7 @@
int main(int argc, char **argv) {
art::InitLogging(argv);
- LOG(INFO) << "Running main() from common_runtime_test.cc...";
+ LOG(::art::INFO) << "Running main() from common_runtime_test.cc...";
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
@@ -59,7 +59,7 @@
filename_ += "/TmpFile-XXXXXX";
int fd = mkstemp(&filename_[0]);
CHECK_NE(-1, fd);
- file_.reset(new File(fd, GetFilename()));
+ file_.reset(new File(fd, GetFilename(), true));
}
ScratchFile::ScratchFile(const ScratchFile& other, const char* suffix) {
@@ -67,7 +67,7 @@
filename_ += suffix;
int fd = open(filename_.c_str(), O_RDWR | O_CREAT, 0666);
CHECK_NE(-1, fd);
- file_.reset(new File(fd, GetFilename()));
+ file_.reset(new File(fd, GetFilename(), true));
}
ScratchFile::ScratchFile(File* file) {
@@ -88,6 +88,11 @@
if (!OS::FileExists(filename_.c_str())) {
return;
}
+ if (file_.get() != nullptr) {
+ if (file_->FlushCloseOrErase() != 0) {
+ PLOG(WARNING) << "Error closing scratch file.";
+ }
+ }
int unlink_result = unlink(filename_.c_str());
CHECK_EQ(0, unlink_result);
}
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 1ca6eb3..bd0dbaa 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -85,7 +85,7 @@
virtual void SetUp();
// Allow subclases such as CommonCompilerTest to add extra options.
- virtual void SetUpRuntimeOptions(RuntimeOptions* options) {}
+ virtual void SetUpRuntimeOptions(RuntimeOptions* options ATTRIBUTE_UNUSED) {}
void ClearDirectory(const char* dirpath);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index b676c62..ef5db2d 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -183,16 +183,20 @@
class Breakpoint {
public:
- Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc, bool need_full_deoptimization)
+ Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc,
+ DeoptimizationRequest::Kind deoptimization_kind)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : method_(nullptr), dex_pc_(dex_pc), need_full_deoptimization_(need_full_deoptimization) {
+ : method_(nullptr), dex_pc_(dex_pc), deoptimization_kind_(deoptimization_kind) {
+ CHECK(deoptimization_kind_ == DeoptimizationRequest::kNothing ||
+ deoptimization_kind_ == DeoptimizationRequest::kSelectiveDeoptimization ||
+ deoptimization_kind_ == DeoptimizationRequest::kFullDeoptimization);
ScopedObjectAccessUnchecked soa(Thread::Current());
method_ = soa.EncodeMethod(method);
}
Breakpoint(const Breakpoint& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: method_(nullptr), dex_pc_(other.dex_pc_),
- need_full_deoptimization_(other.need_full_deoptimization_) {
+ deoptimization_kind_(other.deoptimization_kind_) {
ScopedObjectAccessUnchecked soa(Thread::Current());
method_ = soa.EncodeMethod(other.Method());
}
@@ -206,8 +210,8 @@
return dex_pc_;
}
- bool NeedFullDeoptimization() const {
- return need_full_deoptimization_;
+ DeoptimizationRequest::Kind GetDeoptimizationKind() const {
+ return deoptimization_kind_;
}
private:
@@ -216,7 +220,7 @@
uint32_t dex_pc_;
// Indicates whether breakpoint needs full deoptimization or selective deoptimization.
- bool need_full_deoptimization_;
+ DeoptimizationRequest::Kind deoptimization_kind_;
};
static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
@@ -231,7 +235,7 @@
virtual ~DebugInstrumentationListener() {}
void MethodEntered(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
- uint32_t dex_pc)
+ uint32_t dex_pc ATTRIBUTE_UNUSED)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (method->IsNative()) {
// TODO: post location events is a suspension point and native method entry stubs aren't.
@@ -254,6 +258,7 @@
uint32_t dex_pc)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// We're not recorded to listen to this kind of event, so complain.
+ UNUSED(thread, this_object, method, dex_pc);
LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method)
<< " " << dex_pc;
}
@@ -267,16 +272,18 @@
void FieldRead(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
uint32_t dex_pc, mirror::ArtField* field)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(thread);
Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
}
- void FieldWritten(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
- uint32_t dex_pc, mirror::ArtField* field, const JValue& field_value)
+ void FieldWritten(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
+ mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field,
+ const JValue& field_value)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
}
- void ExceptionCaught(Thread* thread, const ThrowLocation& throw_location,
+ void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED, const ThrowLocation& throw_location,
mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
mirror::Throwable* exception_object)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -733,6 +740,12 @@
return gDisposed;
}
+bool Dbg::RequiresDeoptimization() {
+ // We don't need deoptimization if everything runs with interpreter after
+ // enabling -Xint mode.
+ return !Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly();
+}
+
void Dbg::GoActive() {
// Enable all debugging features, including scans for breakpoints.
// This is a no-op if we're already active.
@@ -765,7 +778,9 @@
Thread* self = Thread::Current();
ThreadState old_state = self->SetStateUnsafe(kRunnable);
CHECK_NE(old_state, kRunnable);
- runtime->GetInstrumentation()->EnableDeoptimization();
+ if (RequiresDeoptimization()) {
+ runtime->GetInstrumentation()->EnableDeoptimization();
+ }
instrumentation_events_ = 0;
gDebuggerActive = true;
CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
@@ -803,7 +818,9 @@
instrumentation_events_);
instrumentation_events_ = 0;
}
- runtime->GetInstrumentation()->DisableDeoptimization();
+ if (RequiresDeoptimization()) {
+ runtime->GetInstrumentation()->DisableDeoptimization();
+ }
gDebuggerActive = false;
}
gRegistry->Clear();
@@ -1143,7 +1160,7 @@
// the primitive types).
// Returns a newly-allocated buffer full of RefTypeId values.
struct ClassListCreator {
- explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes) : classes(classes) {
+ explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes_in) : classes(classes_in) {
}
static bool Visit(mirror::Class* c, void* arg) {
@@ -1383,7 +1400,6 @@
mirror::ObjectArray<mirror::Object>* oa = dst->AsObjectArray<mirror::Object>();
for (int i = 0; i < count; ++i) {
JDWP::ObjectId id = request->ReadObjectId();
- JDWP::JdwpError error;
mirror::Object* o = gRegistry->Get<mirror::Object*>(id, &error);
if (error != JDWP::ERR_NONE) {
return error;
@@ -2017,7 +2033,7 @@
} else if (error == JDWP::ERR_NONE) {
mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
CHECK(c != nullptr);
- mirror::ArtField* f = c->FindInstanceField("group", "Ljava/lang/ThreadGroup;");
+ mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
CHECK(f != nullptr);
mirror::Object* group = f->GetObject(thread_object);
CHECK(group != nullptr);
@@ -2058,8 +2074,7 @@
return error;
}
ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroupName");
- mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
- mirror::ArtField* f = c->FindInstanceField("name", "Ljava/lang/String;");
+ mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
CHECK(f != nullptr);
mirror::String* s = reinterpret_cast<mirror::String*>(f->GetObject(thread_group));
@@ -2078,9 +2093,7 @@
mirror::Object* parent;
{
ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroupParent");
- mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
- CHECK(c != nullptr);
- mirror::ArtField* f = c->FindInstanceField("parent", "Ljava/lang/ThreadGroup;");
+ mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_parent);
CHECK(f != nullptr);
parent = f->GetObject(thread_group);
}
@@ -2095,12 +2108,20 @@
CHECK(thread_group != nullptr);
// Get the ArrayList<ThreadGroup> "groups" out of this thread group...
- mirror::ArtField* groups_field = thread_group->GetClass()->FindInstanceField("groups", "Ljava/util/List;");
+ mirror::ArtField* groups_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_groups);
mirror::Object* groups_array_list = groups_field->GetObject(thread_group);
+ {
+ // The "groups" field is declared as a java.util.List: check it really is
+ // an instance of java.util.ArrayList.
+ CHECK(groups_array_list != nullptr);
+ mirror::Class* java_util_ArrayList_class =
+ soa.Decode<mirror::Class*>(WellKnownClasses::java_util_ArrayList);
+ CHECK(groups_array_list->InstanceOf(java_util_ArrayList_class));
+ }
// Get the array and size out of the ArrayList<ThreadGroup>...
- mirror::ArtField* array_field = groups_array_list->GetClass()->FindInstanceField("array", "[Ljava/lang/Object;");
- mirror::ArtField* size_field = groups_array_list->GetClass()->FindInstanceField("size", "I");
+ mirror::ArtField* array_field = soa.DecodeField(WellKnownClasses::java_util_ArrayList_array);
+ mirror::ArtField* size_field = soa.DecodeField(WellKnownClasses::java_util_ArrayList_size);
mirror::ObjectArray<mirror::Object>* groups_array =
array_field->GetObject(groups_array_list)->AsObjectArray<mirror::Object>();
const int32_t size = size_field->GetInt(groups_array_list);
@@ -2283,8 +2304,8 @@
static int GetStackDepth(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
struct CountStackDepthVisitor : public StackVisitor {
- explicit CountStackDepthVisitor(Thread* thread)
- : StackVisitor(thread, nullptr), depth(0) {}
+ explicit CountStackDepthVisitor(Thread* thread_in)
+ : StackVisitor(thread_in, nullptr), depth(0) {}
// TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
// annotalysis.
@@ -2322,10 +2343,11 @@
size_t frame_count, JDWP::ExpandBuf* buf) {
class GetFrameVisitor : public StackVisitor {
public:
- GetFrameVisitor(Thread* thread, size_t start_frame, size_t frame_count, JDWP::ExpandBuf* buf)
+ GetFrameVisitor(Thread* thread, size_t start_frame_in, size_t frame_count_in,
+ JDWP::ExpandBuf* buf_in)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: StackVisitor(thread, nullptr), depth_(0),
- start_frame_(start_frame), frame_count_(frame_count), buf_(buf) {
+ start_frame_(start_frame_in), frame_count_(frame_count_in), buf_(buf_in) {
expandBufAdd4BE(buf_, frame_count_);
}
@@ -2386,7 +2408,7 @@
}
void Dbg::ResumeVM() {
- Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
+ Runtime::Current()->GetThreadList()->ResumeAllForDebugger();
}
JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspension) {
@@ -2400,9 +2422,7 @@
if (peer.get() == nullptr) {
return JDWP::ERR_THREAD_NOT_ALIVE;
}
- // Suspend thread to build stack trace. Take suspend thread lock to avoid races with threads
- // trying to suspend this one.
- MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
+ // Suspend thread to build stack trace.
bool timed_out;
ThreadList* thread_list = Runtime::Current()->GetThreadList();
Thread* thread = thread_list->SuspendThreadByPeer(peer.get(), request_suspension, true,
@@ -2445,9 +2465,9 @@
}
struct GetThisVisitor : public StackVisitor {
- GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
+ GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id_in)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, context), this_object(nullptr), frame_id(frame_id) {}
+ : StackVisitor(thread, context), this_object(nullptr), frame_id(frame_id_in) {}
// TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
// annotalysis.
@@ -3029,9 +3049,11 @@
}
void Dbg::DelayFullUndeoptimization() {
- MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
- ++delayed_full_undeoptimization_count_;
- DCHECK_LE(delayed_full_undeoptimization_count_, full_deoptimization_event_count_);
+ if (RequiresDeoptimization()) {
+ MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
+ ++delayed_full_undeoptimization_count_;
+ DCHECK_LE(delayed_full_undeoptimization_count_, full_deoptimization_event_count_);
+ }
}
void Dbg::ProcessDelayedFullUndeoptimizations() {
@@ -3190,20 +3212,78 @@
}
// Sanity checks all existing breakpoints on the same method.
-static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m, bool need_full_deoptimization)
+static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m,
+ DeoptimizationRequest::Kind deoptimization_kind)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
for (const Breakpoint& breakpoint : gBreakpoints) {
- CHECK_EQ(need_full_deoptimization, breakpoint.NeedFullDeoptimization());
+ if (breakpoint.Method() == m) {
+ CHECK_EQ(deoptimization_kind, breakpoint.GetDeoptimizationKind());
+ }
}
- if (need_full_deoptimization) {
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+ if (deoptimization_kind == DeoptimizationRequest::kFullDeoptimization) {
// We should have deoptimized everything but not "selectively" deoptimized this method.
- CHECK(Runtime::Current()->GetInstrumentation()->AreAllMethodsDeoptimized());
- CHECK(!Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
- } else {
+ CHECK(instrumentation->AreAllMethodsDeoptimized());
+ CHECK(!instrumentation->IsDeoptimized(m));
+ } else if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
// We should have "selectively" deoptimized this method.
// Note: while we have not deoptimized everything for this method, we may have done it for
// another event.
- CHECK(Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
+ CHECK(instrumentation->IsDeoptimized(m));
+ } else {
+ // This method does not require deoptimization.
+ CHECK_EQ(deoptimization_kind, DeoptimizationRequest::kNothing);
+ CHECK(!instrumentation->IsDeoptimized(m));
+ }
+}
+
+static DeoptimizationRequest::Kind GetRequiredDeoptimizationKind(Thread* self,
+ mirror::ArtMethod* m)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (!Dbg::RequiresDeoptimization()) {
+ // We already run in interpreter-only mode so we don't need to deoptimize anything.
+ VLOG(jdwp) << "No need for deoptimization when fully running with interpreter for method "
+ << PrettyMethod(m);
+ return DeoptimizationRequest::kNothing;
+ }
+ const Breakpoint* existing_breakpoint;
+ {
+ ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
+ existing_breakpoint = FindFirstBreakpointForMethod(m);
+ }
+ if (existing_breakpoint == nullptr) {
+ // There is no breakpoint on this method yet: we need to deoptimize. If this method may be
+ // inlined, we deoptimize everything; otherwise we deoptimize only this method.
+ // Note: IsMethodPossiblyInlined goes into the method verifier and may cause thread suspension.
+ // Therefore we must not hold any lock when we call it.
+ bool need_full_deoptimization = IsMethodPossiblyInlined(self, m);
+ if (need_full_deoptimization) {
+ VLOG(jdwp) << "Need full deoptimization because of possible inlining of method "
+ << PrettyMethod(m);
+ return DeoptimizationRequest::kFullDeoptimization;
+ } else {
+ // We don't need to deoptimize if the method has not been compiled.
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ const bool is_compiled = class_linker->GetOatMethodQuickCodeFor(m) != nullptr;
+ if (is_compiled) {
+ VLOG(jdwp) << "Need selective deoptimization for compiled method " << PrettyMethod(m);
+ return DeoptimizationRequest::kSelectiveDeoptimization;
+ } else {
+ // Method is not compiled: we don't need to deoptimize.
+ VLOG(jdwp) << "No need for deoptimization for non-compiled method " << PrettyMethod(m);
+ return DeoptimizationRequest::kNothing;
+ }
+ }
+ } else {
+ // There is at least one breakpoint for this method: we don't need to deoptimize.
+ // Let's check that all breakpoints are configured the same way for deoptimization.
+ VLOG(jdwp) << "Breakpoint already set: no deoptimization is required";
+ DeoptimizationRequest::Kind deoptimization_kind = existing_breakpoint->GetDeoptimizationKind();
+ if (kIsDebugBuild) {
+ ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
+ SanityCheckExistingBreakpoints(m, deoptimization_kind);
+ }
+ return DeoptimizationRequest::kNothing;
}
}
@@ -3214,40 +3294,19 @@
mirror::ArtMethod* m = FromMethodId(location->method_id);
DCHECK(m != nullptr) << "No method for method id " << location->method_id;
- const Breakpoint* existing_breakpoint;
- {
- ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
- existing_breakpoint = FindFirstBreakpointForMethod(m);
- }
- bool need_full_deoptimization;
- if (existing_breakpoint == nullptr) {
- // There is no breakpoint on this method yet: we need to deoptimize. If this method may be
- // inlined, we deoptimize everything; otherwise we deoptimize only this method.
- // Note: IsMethodPossiblyInlined goes into the method verifier and may cause thread suspension.
- // Therefore we must not hold any lock when we call it.
- need_full_deoptimization = IsMethodPossiblyInlined(self, m);
- if (need_full_deoptimization) {
- req->SetKind(DeoptimizationRequest::kFullDeoptimization);
- req->SetMethod(nullptr);
- } else {
- req->SetKind(DeoptimizationRequest::kSelectiveDeoptimization);
- req->SetMethod(m);
- }
+ const DeoptimizationRequest::Kind deoptimization_kind = GetRequiredDeoptimizationKind(self, m);
+ req->SetKind(deoptimization_kind);
+ if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
+ req->SetMethod(m);
} else {
- // There is at least one breakpoint for this method: we don't need to deoptimize.
- req->SetKind(DeoptimizationRequest::kNothing);
+ CHECK(deoptimization_kind == DeoptimizationRequest::kNothing ||
+ deoptimization_kind == DeoptimizationRequest::kFullDeoptimization);
req->SetMethod(nullptr);
-
- need_full_deoptimization = existing_breakpoint->NeedFullDeoptimization();
- if (kIsDebugBuild) {
- ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
- SanityCheckExistingBreakpoints(m, need_full_deoptimization);
- }
}
{
WriterMutexLock mu(self, *Locks::breakpoint_lock_);
- gBreakpoints.push_back(Breakpoint(m, location->dex_pc, need_full_deoptimization));
+ gBreakpoints.push_back(Breakpoint(m, location->dex_pc, deoptimization_kind));
VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": "
<< gBreakpoints[gBreakpoints.size() - 1];
}
@@ -3259,12 +3318,13 @@
WriterMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
mirror::ArtMethod* m = FromMethodId(location->method_id);
DCHECK(m != nullptr) << "No method for method id " << location->method_id;
- bool need_full_deoptimization = false;
+ DeoptimizationRequest::Kind deoptimization_kind = DeoptimizationRequest::kNothing;
for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
if (gBreakpoints[i].DexPc() == location->dex_pc && gBreakpoints[i].Method() == m) {
VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i];
- need_full_deoptimization = gBreakpoints[i].NeedFullDeoptimization();
- DCHECK_NE(need_full_deoptimization, Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
+ deoptimization_kind = gBreakpoints[i].GetDeoptimizationKind();
+ DCHECK_EQ(deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization,
+ Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
gBreakpoints.erase(gBreakpoints.begin() + i);
break;
}
@@ -3272,21 +3332,26 @@
const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
if (existing_breakpoint == nullptr) {
// There is no more breakpoint on this method: we need to undeoptimize.
- if (need_full_deoptimization) {
+ if (deoptimization_kind == DeoptimizationRequest::kFullDeoptimization) {
// This method required full deoptimization: we need to undeoptimize everything.
req->SetKind(DeoptimizationRequest::kFullUndeoptimization);
req->SetMethod(nullptr);
- } else {
+ } else if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
// This method required selective deoptimization: we need to undeoptimize only that method.
req->SetKind(DeoptimizationRequest::kSelectiveUndeoptimization);
req->SetMethod(m);
+ } else {
+ // This method had no need for deoptimization: do nothing.
+ CHECK_EQ(deoptimization_kind, DeoptimizationRequest::kNothing);
+ req->SetKind(DeoptimizationRequest::kNothing);
+ req->SetMethod(nullptr);
}
} else {
// There is at least one breakpoint for this method: we don't need to undeoptimize.
req->SetKind(DeoptimizationRequest::kNothing);
req->SetMethod(nullptr);
if (kIsDebugBuild) {
- SanityCheckExistingBreakpoints(m, need_full_deoptimization);
+ SanityCheckExistingBreakpoints(m, deoptimization_kind);
}
}
}
@@ -3314,13 +3379,9 @@
soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
jobject thread_peer = Dbg::GetObjectRegistry()->GetJObject(thread_id);
bool timed_out;
- Thread* suspended_thread;
- {
- // Take suspend thread lock to avoid races with threads trying to suspend this one.
- MutexLock mu(soa.Self(), *Locks::thread_list_suspend_thread_lock_);
- ThreadList* thread_list = Runtime::Current()->GetThreadList();
- suspended_thread = thread_list->SuspendThreadByPeer(thread_peer, true, true, &timed_out);
- }
+ ThreadList* thread_list = Runtime::Current()->GetThreadList();
+ Thread* suspended_thread = thread_list->SuspendThreadByPeer(thread_peer, true, true,
+ &timed_out);
CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kWaitingForDebuggerSuspension);
if (suspended_thread == nullptr) {
// Thread terminated from under us while suspending.
@@ -3413,15 +3474,15 @@
//
struct DebugCallbackContext {
- explicit DebugCallbackContext(SingleStepControl* single_step_control, int32_t line_number,
+ explicit DebugCallbackContext(SingleStepControl* single_step_control_cb, int32_t line_number_cb,
const DexFile::CodeItem* code_item)
- : single_step_control_(single_step_control), line_number_(line_number), code_item_(code_item),
- last_pc_valid(false), last_pc(0) {
+ : single_step_control_(single_step_control_cb), line_number_(line_number_cb),
+ code_item_(code_item), last_pc_valid(false), last_pc(0) {
}
- static bool Callback(void* raw_context, uint32_t address, uint32_t line_number) {
+ static bool Callback(void* raw_context, uint32_t address, uint32_t line_number_cb) {
DebugCallbackContext* context = reinterpret_cast<DebugCallbackContext*>(raw_context);
- if (static_cast<int32_t>(line_number) == context->line_number_) {
+ if (static_cast<int32_t>(line_number_cb) == context->line_number_) {
if (!context->last_pc_valid) {
// Everything from this address until the next line change is ours.
context->last_pc = address;
@@ -4116,7 +4177,6 @@
HeapChunkContext(bool merge, bool native)
: buf_(16384 - 16),
type_(0),
- merge_(merge),
chunk_overhead_(0) {
Reset();
if (native) {
@@ -4327,7 +4387,6 @@
void* startOfNextMemoryChunk_;
size_t totalAllocationUnits_;
uint32_t type_;
- bool merge_;
bool needHeader_;
size_t chunk_overhead_;
@@ -4370,7 +4429,7 @@
// Send a series of heap segment chunks.
HeapChunkContext context((what == HPSG_WHAT_MERGED_OBJECTS), native);
if (native) {
-#ifdef USE_DLMALLOC
+#if defined(HAVE_ANDROID_OS) && defined(USE_DLMALLOC)
dlmalloc_inspect_all(HeapChunkContext::HeapChunkCallback, &context);
#else
UNIMPLEMENTED(WARNING) << "Native heap inspection is only supported with dlmalloc";
@@ -4478,9 +4537,9 @@
}
struct AllocRecordStackVisitor : public StackVisitor {
- AllocRecordStackVisitor(Thread* thread, AllocRecord* record)
+ AllocRecordStackVisitor(Thread* thread, AllocRecord* record_in)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, nullptr), record(record), depth(0) {}
+ : StackVisitor(thread, nullptr), record(record_in), depth(0) {}
// TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
// annotalysis.
@@ -4678,7 +4737,7 @@
* between the contents of these tables.
*/
jbyteArray Dbg::GetRecentAllocations() {
- if (false) {
+ if ((false)) {
DumpRecentAllocations();
}
diff --git a/runtime/debugger.h b/runtime/debugger.h
index cb7adae..9203163 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -189,6 +189,7 @@
// Method for selective deoptimization.
jmethodID method_;
};
+std::ostream& operator<<(std::ostream& os, const DeoptimizationRequest::Kind& rhs);
class Dbg {
public:
@@ -246,7 +247,9 @@
*/
static int64_t LastDebuggerActivity();
- static void UndoDebuggerSuspensions();
+ static void UndoDebuggerSuspensions()
+ LOCKS_EXCLUDED(Locks::thread_list_lock_,
+ Locks::thread_suspend_count_lock_);
/*
* Class, Object, Array
@@ -459,7 +462,9 @@
static void SuspendVM()
LOCKS_EXCLUDED(Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_);
- static void ResumeVM();
+ static void ResumeVM()
+ LOCKS_EXCLUDED(Locks::thread_list_lock_,
+ Locks::thread_suspend_count_lock_);
static JDWP::JdwpError SuspendThread(JDWP::ObjectId thread_id, bool request_suspension = true)
LOCKS_EXCLUDED(Locks::mutator_lock_,
Locks::thread_list_lock_,
@@ -489,7 +494,7 @@
/*
* Debugger notification
*/
- enum {
+ enum EventFlag {
kBreakpoint = 0x01,
kSingleStep = 0x02,
kMethodEntry = 0x04,
@@ -518,6 +523,9 @@
LOCKS_EXCLUDED(Locks::breakpoint_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Indicates whether we need deoptimization for debugging.
+ static bool RequiresDeoptimization();
+
// Records deoptimization request in the queue.
static void RequestDeoptimization(const DeoptimizationRequest& req)
LOCKS_EXCLUDED(Locks::deoptimization_lock_)
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index f408386..3d4184b 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -23,7 +23,9 @@
#include <string.h>
#include <sys/file.h>
#include <sys/stat.h>
+
#include <memory>
+#include <sstream>
#include "base/logging.h"
#include "base/stringprintf.h"
@@ -37,7 +39,6 @@
#include "mirror/string.h"
#include "os.h"
#include "safe_map.h"
-#include "ScopedFd.h"
#include "handle_scope-inl.h"
#include "thread.h"
#include "utf-inl.h"
@@ -45,6 +46,11 @@
#include "well_known_classes.h"
#include "zip_archive.h"
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#include "ScopedFd.h"
+#pragma GCC diagnostic pop
+
namespace art {
const uint8_t DexFile::kDexMagic[] = { 'd', 'e', 'x', '\n' };
@@ -413,11 +419,12 @@
return atoi(version);
}
-const DexFile::ClassDef* DexFile::FindClassDef(const char* descriptor) const {
+const DexFile::ClassDef* DexFile::FindClassDef(const char* descriptor, size_t hash) const {
+ DCHECK_EQ(ComputeModifiedUtf8Hash(descriptor), hash);
// If we have an index lookup the descriptor via that as its constant time to search.
Index* index = class_def_index_.LoadSequentiallyConsistent();
if (index != nullptr) {
- auto it = index->find(descriptor);
+ auto it = index->FindWithHash(descriptor, hash);
return (it == index->end()) ? nullptr : it->second;
}
// Fast path for rate no class defs case.
@@ -449,11 +456,11 @@
// Are we the ones moving the miss count past the max? Sanity check the index doesn't exist.
CHECK(class_def_index_.LoadSequentiallyConsistent() == nullptr);
// Build the index.
- index = new Index(num_class_defs);
+ index = new Index();
for (uint32_t i = 0; i < num_class_defs; ++i) {
const ClassDef& class_def = GetClassDef(i);
- const char* descriptor = GetClassDescriptor(class_def);
- index->insert(std::make_pair(descriptor, &class_def));
+ const char* class_descriptor = GetClassDescriptor(class_def);
+ index->Insert(std::make_pair(class_descriptor, &class_def));
}
// Sanity check the index still doesn't exist, only 1 thread should build it.
CHECK(class_def_index_.LoadSequentiallyConsistent() == nullptr);
@@ -1180,13 +1187,14 @@
case kArray:
case kAnnotation:
UNIMPLEMENTED(FATAL) << ": type " << type_;
- break;
+ UNREACHABLE();
case kNull:
jval_.l = NULL;
width = 0;
break;
default:
LOG(FATAL) << "Unreached";
+ UNREACHABLE();
}
ptr_ += width;
}
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 620bd6e..a71ca42 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -22,8 +22,10 @@
#include <unordered_map>
#include <vector>
+#include "base/hash_map.h"
#include "base/logging.h"
#include "base/mutex.h" // For Locks::mutator_lock_.
+#include "base/value_object.h"
#include "globals.h"
#include "invoke_type.h"
#include "jni.h"
@@ -205,10 +207,10 @@
// (class or interface). These are all in the lower 16b and do not contain runtime flags.
uint32_t GetJavaAccessFlags() const {
// Make sure that none of our runtime-only flags are set.
- COMPILE_ASSERT((kAccValidClassFlags & kAccJavaFlagsMask) == kAccValidClassFlags,
- valid_class_flags_not_subset_of_java_flags);
- COMPILE_ASSERT((kAccValidInterfaceFlags & kAccJavaFlagsMask) == kAccValidInterfaceFlags,
- valid_interface_flags_not_subset_of_java_flags);
+ static_assert((kAccValidClassFlags & kAccJavaFlagsMask) == kAccValidClassFlags,
+ "Valid class flags not a subset of Java flags");
+ static_assert((kAccValidInterfaceFlags & kAccJavaFlagsMask) == kAccValidInterfaceFlags,
+ "Valid interface flags not a subset of Java flags");
if ((access_flags_ & kAccInterface) != 0) {
// Interface.
@@ -648,8 +650,9 @@
return StringByTypeIdx(class_def.class_idx_);
}
- // Looks up a class definition by its class descriptor.
- const ClassDef* FindClassDef(const char* descriptor) const;
+ // Looks up a class definition by its class descriptor. Hash must be
+ // ComputeModifiedUtf8Hash(descriptor).
+ const ClassDef* FindClassDef(const char* descriptor, size_t hash) const;
// Looks up a class definition by its type index.
const ClassDef* FindClassDef(uint16_t type_idx) const;
@@ -985,17 +988,30 @@
// Number of misses finding a class def from a descriptor.
mutable Atomic<uint32_t> find_class_def_misses_;
+ struct UTF16EmptyFn {
+ void MakeEmpty(std::pair<const char*, const ClassDef*>& pair) const {
+ pair.first = nullptr;
+ pair.second = nullptr;
+ }
+ bool IsEmpty(const std::pair<const char*, const ClassDef*>& pair) const {
+ if (pair.first == nullptr) {
+ DCHECK(pair.second == nullptr);
+ return true;
+ }
+ return false;
+ }
+ };
struct UTF16HashCmp {
// Hash function.
size_t operator()(const char* key) const {
- return ComputeUtf8Hash(key);
+ return ComputeModifiedUtf8Hash(key);
}
// std::equal function.
bool operator()(const char* a, const char* b) const {
return CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(a, b) == 0;
}
};
- typedef std::unordered_map<const char*, const ClassDef*, UTF16HashCmp, UTF16HashCmp> Index;
+ typedef HashMap<const char*, const ClassDef*, UTF16EmptyFn, UTF16HashCmp, UTF16HashCmp> Index;
mutable Atomic<Index*> class_def_index_;
};
std::ostream& operator<<(std::ostream& os, const DexFile& dex_file);
@@ -1027,7 +1043,7 @@
};
// Abstract the signature of a method.
-class Signature {
+class Signature : public ValueObject {
public:
std::string ToString() const;
@@ -1251,7 +1267,7 @@
template<bool kTransactionActive>
void ReadValueToField(Handle<mirror::ArtField> field) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool HasNext() { return pos_ < array_size_; }
+ bool HasNext() const { return pos_ < array_size_; }
void Next();
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 134e284..b304779 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -146,6 +146,9 @@
if (!file->WriteFully(dex_bytes.get(), length)) {
PLOG(FATAL) << "Failed to write base64 as dex file";
}
+ if (file->FlushCloseOrErase() != 0) {
+ PLOG(FATAL) << "Could not flush and close test file.";
+ }
file.reset();
// read dex file
diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc
index addd948..ec1e5f0 100644
--- a/runtime/dex_file_verifier_test.cc
+++ b/runtime/dex_file_verifier_test.cc
@@ -115,6 +115,9 @@
if (!file->WriteFully(dex_bytes.get(), length)) {
PLOG(FATAL) << "Failed to write base64 as dex file";
}
+ if (file->FlushCloseOrErase() != 0) {
+ PLOG(FATAL) << "Could not flush and close test file.";
+ }
file.reset();
// read dex file
@@ -177,6 +180,9 @@
if (!file->WriteFully(bytes, length)) {
PLOG(FATAL) << "Failed to write base64 as dex file";
}
+ if (file->FlushCloseOrErase() != 0) {
+ PLOG(FATAL) << "Could not flush and close test file.";
+ }
file.reset();
// read dex file
diff --git a/runtime/dex_instruction.cc b/runtime/dex_instruction.cc
index 7e775f4..a802759 100644
--- a/runtime/dex_instruction.cc
+++ b/runtime/dex_instruction.cc
@@ -19,6 +19,7 @@
#include <inttypes.h>
#include <iomanip>
+#include <sstream>
#include "base/stringprintf.h"
#include "dex_file-inl.h"
@@ -112,7 +113,7 @@
return 1; // NOP.
} else {
LOG(FATAL) << "Unreachable: " << DumpString(nullptr);
- return 0;
+ UNREACHABLE();
}
}
}
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index b913220..af5d9d0 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -35,7 +35,7 @@
class Instruction {
public:
// NOP-encoded switch-statement signatures.
- enum {
+ enum Signatures {
kPackedSwitchSignature = 0x0100,
kSparseSwitchSignature = 0x0200,
kArrayDataSignature = 0x0300,
@@ -79,15 +79,13 @@
DISALLOW_COPY_AND_ASSIGN(ArrayDataPayload);
};
- // TODO: the code layout below is deliberate to avoid this enum being picked up by
- // generate-operator-out.py.
- enum Code
- { // NOLINT(whitespace/braces)
+ enum Code { // private marker to avoid generate-operator-out.py from processing.
#define INSTRUCTION_ENUM(opcode, cname, p, f, r, i, a, v) cname = opcode,
#include "dex_instruction_list.h"
DEX_INSTRUCTION_LIST(INSTRUCTION_ENUM)
#undef DEX_INSTRUCTION_LIST
#undef INSTRUCTION_ENUM
+ RSUB_INT_LIT16 = RSUB_INT,
};
enum Format {
diff --git a/runtime/dex_instruction_visitor_test.cc b/runtime/dex_instruction_visitor_test.cc
index c5e63eb..5273084 100644
--- a/runtime/dex_instruction_visitor_test.cc
+++ b/runtime/dex_instruction_visitor_test.cc
@@ -16,7 +16,6 @@
#include "dex_instruction_visitor.h"
-#include <iostream>
#include <memory>
#include "gtest/gtest.h"
diff --git a/runtime/dex_method_iterator_test.cc b/runtime/dex_method_iterator_test.cc
index b8f180b..2681ad0 100644
--- a/runtime/dex_method_iterator_test.cc
+++ b/runtime/dex_method_iterator_test.cc
@@ -18,6 +18,7 @@
#include "base/stl_util.h"
#include "common_runtime_test.h"
+#include "oat_file.h"
#include "scoped_thread_state_change.h"
#include "thread-inl.h"
@@ -29,21 +30,20 @@
TEST_F(DexMethodIteratorTest, Basic) {
ScopedObjectAccess soa(Thread::Current());
std::vector<const DexFile*> dex_files;
- const char* jars[] = { "core-libart", "conscrypt", "okhttp", "core-junit", "bouncycastle" };
- for (size_t i = 0; i < 5; ++i) {
- dex_files.push_back(LoadExpectSingleDexFile(GetDexFileName(jars[i]).c_str()));
+ CHECK_NE(boot_class_path_.size(), 0U);
+ for (size_t i = 0; i < boot_class_path_.size(); ++i) {
+ dex_files.push_back(boot_class_path_[i]);
}
DexMethodIterator it(dex_files);
while (it.HasNext()) {
const DexFile& dex_file = it.GetDexFile();
InvokeType invoke_type = it.GetInvokeType();
uint32_t method_idx = it.GetMemberIndex();
- if (false) {
- LG << invoke_type << " " << PrettyMethod(method_idx, dex_file);
+ if ((false)) {
+ LOG(INFO) << invoke_type << " " << PrettyMethod(method_idx, dex_file);
}
it.Next();
}
- STLDeleteElements(&dex_files);
}
} // namespace art
diff --git a/runtime/dwarf.h b/runtime/dwarf.h
index 370ad95..7daa5f1 100644
--- a/runtime/dwarf.h
+++ b/runtime/dwarf.h
@@ -364,38 +364,38 @@
DW_OP_reg29 = 0x6d,
DW_OP_reg30 = 0x6e,
DW_OP_reg31 = 0x6f,
- DW_OP_breg0 = 0x50,
- DW_OP_breg1 = 0x51,
- DW_OP_breg2 = 0x52,
- DW_OP_breg3 = 0x53,
- DW_OP_breg4 = 0x54,
- DW_OP_breg5 = 0x55,
- DW_OP_breg6 = 0x56,
- DW_OP_breg7 = 0x57,
- DW_OP_breg8 = 0x58,
- DW_OP_breg9 = 0x59,
- DW_OP_breg10 = 0x5a,
- DW_OP_breg11 = 0x5b,
- DW_OP_breg12 = 0x5c,
- DW_OP_breg13 = 0x5d,
- DW_OP_breg14 = 0x5e,
- DW_OP_breg15 = 0x5f,
- DW_OP_breg16 = 0x60,
- DW_OP_breg17 = 0x61,
- DW_OP_breg18 = 0x62,
- DW_OP_breg19 = 0x63,
- DW_OP_breg20 = 0x64,
- DW_OP_breg21 = 0x65,
- DW_OP_breg22 = 0x66,
- DW_OP_breg23 = 0x67,
- DW_OP_breg24 = 0x68,
- DW_OP_breg25 = 0x69,
- DW_OP_breg26 = 0x6a,
- DW_OP_breg27 = 0x6b,
- DW_OP_breg28 = 0x6c,
- DW_OP_breg29 = 0x6d,
- DW_OP_breg30 = 0x6e,
- DW_OP_breg31 = 0x6f,
+ DW_OP_breg0 = 0x70,
+ DW_OP_breg1 = 0x71,
+ DW_OP_breg2 = 0x72,
+ DW_OP_breg3 = 0x73,
+ DW_OP_breg4 = 0x74,
+ DW_OP_breg5 = 0x75,
+ DW_OP_breg6 = 0x76,
+ DW_OP_breg7 = 0x77,
+ DW_OP_breg8 = 0x78,
+ DW_OP_breg9 = 0x79,
+ DW_OP_breg10 = 0x7a,
+ DW_OP_breg11 = 0x7b,
+ DW_OP_breg12 = 0x7c,
+ DW_OP_breg13 = 0x7d,
+ DW_OP_breg14 = 0x7e,
+ DW_OP_breg15 = 0x7f,
+ DW_OP_breg16 = 0x80,
+ DW_OP_breg17 = 0x81,
+ DW_OP_breg18 = 0x82,
+ DW_OP_breg19 = 0x83,
+ DW_OP_breg20 = 0x84,
+ DW_OP_breg21 = 0x85,
+ DW_OP_breg22 = 0x86,
+ DW_OP_breg23 = 0x87,
+ DW_OP_breg24 = 0x88,
+ DW_OP_breg25 = 0x89,
+ DW_OP_breg26 = 0x8a,
+ DW_OP_breg27 = 0x8b,
+ DW_OP_breg28 = 0x8c,
+ DW_OP_breg29 = 0x8d,
+ DW_OP_breg30 = 0x8e,
+ DW_OP_breg31 = 0x8f,
DW_OP_regx = 0x90,
DW_OP_fbreg = 0x91,
DW_OP_bregx = 0x92,
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 18053c3..37c5f9c 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -20,6 +20,7 @@
#include <sys/types.h>
#include <unistd.h>
+#include "arch/instruction_set.h"
#include "base/logging.h"
#include "base/stringprintf.h"
#include "base/stl_util.h"
@@ -29,7 +30,6 @@
#include "elf_utils.h"
#include "leb128.h"
#include "utils.h"
-#include "instruction_set.h"
namespace art {
@@ -60,6 +60,7 @@
// GDB will place breakpoint into this function.
// To prevent GCC from inlining or removing it we place noinline attribute
// and inline assembler statement inside.
+ void __attribute__((noinline)) __jit_debug_register_code();
void __attribute__((noinline)) __jit_debug_register_code() {
__asm__("");
}
@@ -115,7 +116,7 @@
typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::ElfFileImpl(File* file, bool writable, bool program_header_only)
+ ::ElfFileImpl(File* file, bool writable, bool program_header_only, uint8_t* requested_base)
: file_(file),
writable_(writable),
program_header_only_(program_header_only),
@@ -133,7 +134,8 @@
symtab_symbol_table_(nullptr),
dynsym_symbol_table_(nullptr),
jit_elf_image_(nullptr),
- jit_gdb_entry_(nullptr) {
+ jit_gdb_entry_(nullptr),
+ requested_base_(requested_base) {
CHECK(file != nullptr);
}
@@ -145,12 +147,12 @@
ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
::Open(File* file, bool writable, bool program_header_only,
- std::string* error_msg) {
+ std::string* error_msg, uint8_t* requested_base) {
std::unique_ptr<ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>>
elf_file(new ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- (file, writable, program_header_only));
+ (file, writable, program_header_only, requested_base));
int prot;
int flags;
if (writable) {
@@ -178,7 +180,8 @@
Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>>
elf_file(new ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- (file, (prot & PROT_WRITE) == PROT_WRITE, false));
+ (file, (prot & PROT_WRITE) == PROT_WRITE, /*program_header_only*/false,
+ /*requested_base*/nullptr));
if (!elf_file->Setup(prot, flags, error_msg)) {
return nullptr;
}
@@ -919,6 +922,8 @@
}
const Elf_Sym* sym = FindDynamicSymbol(symbol_name);
if (sym != nullptr) {
+ // TODO: we need to change this to calculate base_address_ in ::Open,
+ // otherwise it will be wrongly 0 if ::Load has not yet been called.
return base_address_ + sym->st_value;
} else {
return nullptr;
@@ -1364,12 +1369,16 @@
}
size_t file_length = static_cast<size_t>(temp_file_length);
if (!reserved) {
- uint8_t* reserve_base = ((program_header->p_vaddr != 0) ?
- reinterpret_cast<uint8_t*>(program_header->p_vaddr) : nullptr);
+ uint8_t* reserve_base = reinterpret_cast<uint8_t*>(program_header->p_vaddr);
+ uint8_t* reserve_base_override = reserve_base;
+ // Override the base (e.g. when compiling with --compile-pic)
+ if (requested_base_ != nullptr) {
+ reserve_base_override = requested_base_;
+ }
std::string reservation_name("ElfFile reservation for ");
reservation_name += file_->GetPath();
std::unique_ptr<MemMap> reserve(MemMap::MapAnonymous(reservation_name.c_str(),
- reserve_base,
+ reserve_base_override,
GetLoadedSize(), PROT_NONE, false,
error_msg));
if (reserve.get() == nullptr) {
@@ -1378,9 +1387,15 @@
return false;
}
reserved = true;
- if (reserve_base == nullptr) {
- base_address_ = reserve->Begin();
- }
+
+ // Base address is the difference of actual mapped location and the p_vaddr
+ base_address_ = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(reserve->Begin())
+ - reinterpret_cast<uintptr_t>(reserve_base));
+ // By adding the p_vaddr of a section/symbol to base_address_ we will always get the
+ // dynamic memory address of where that object is actually mapped
+ //
+ // TODO: base_address_ needs to be calculated in ::Open, otherwise
+ // FindDynamicSymbolAddress returns the wrong values until Load is called.
segments_.push_back(reserve.release());
}
// empty segment, nothing to map
@@ -2382,22 +2397,22 @@
Elf_Shdr* sh = GetSectionHeader(i);
CHECK(sh != nullptr);
if (sh->sh_type == SHT_REL) {
- for (uint32_t i = 0; i < GetRelNum(*sh); i++) {
- Elf_Rel& rel = GetRel(*sh, i);
+ for (uint32_t j = 0; j < GetRelNum(*sh); j++) {
+ Elf_Rel& rel = GetRel(*sh, j);
if (DEBUG_FIXUP) {
LOG(INFO) << StringPrintf("In %s moving Elf_Rel[%d] from 0x%" PRIx64 " to 0x%" PRIx64,
- GetFile().GetPath().c_str(), i,
+ GetFile().GetPath().c_str(), j,
static_cast<uint64_t>(rel.r_offset),
static_cast<uint64_t>(rel.r_offset + base_address));
}
rel.r_offset += base_address;
}
} else if (sh->sh_type == SHT_RELA) {
- for (uint32_t i = 0; i < GetRelaNum(*sh); i++) {
- Elf_Rela& rela = GetRela(*sh, i);
+ for (uint32_t j = 0; j < GetRelaNum(*sh); j++) {
+ Elf_Rela& rela = GetRela(*sh, j);
if (DEBUG_FIXUP) {
LOG(INFO) << StringPrintf("In %s moving Elf_Rela[%d] from 0x%" PRIx64 " to 0x%" PRIx64,
- GetFile().GetPath().c_str(), i,
+ GetFile().GetPath().c_str(), j,
static_cast<uint64_t>(rela.r_offset),
static_cast<uint64_t>(rela.r_offset + base_address));
}
@@ -2425,7 +2440,8 @@
CHECK_NE(elf32_.get() == nullptr, elf64_.get() == nullptr);
}
-ElfFile* ElfFile::Open(File* file, bool writable, bool program_header_only, std::string* error_msg) {
+ElfFile* ElfFile::Open(File* file, bool writable, bool program_header_only, std::string* error_msg,
+ uint8_t* requested_base) {
if (file->GetLength() < EI_NIDENT) {
*error_msg = StringPrintf("File %s is too short to be a valid ELF file",
file->GetPath().c_str());
@@ -2438,12 +2454,14 @@
}
uint8_t* header = map->Begin();
if (header[EI_CLASS] == ELFCLASS64) {
- ElfFileImpl64* elf_file_impl = ElfFileImpl64::Open(file, writable, program_header_only, error_msg);
+ ElfFileImpl64* elf_file_impl = ElfFileImpl64::Open(file, writable, program_header_only,
+ error_msg, requested_base);
if (elf_file_impl == nullptr)
return nullptr;
return new ElfFile(elf_file_impl);
} else if (header[EI_CLASS] == ELFCLASS32) {
- ElfFileImpl32* elf_file_impl = ElfFileImpl32::Open(file, writable, program_header_only, error_msg);
+ ElfFileImpl32* elf_file_impl = ElfFileImpl32::Open(file, writable, program_header_only,
+ error_msg, requested_base);
if (elf_file_impl == nullptr) {
return nullptr;
}
diff --git a/runtime/elf_file.h b/runtime/elf_file.h
index 10d6360..41c54bc 100644
--- a/runtime/elf_file.h
+++ b/runtime/elf_file.h
@@ -42,7 +42,8 @@
// ELFObjectFile.
class ElfFile {
public:
- static ElfFile* Open(File* file, bool writable, bool program_header_only, std::string* error_msg);
+ static ElfFile* Open(File* file, bool writable, bool program_header_only, std::string* error_msg,
+ uint8_t* requested_base = nullptr); // TODO: move arg to before error_msg.
// Open with specific mmap flags, Always maps in the whole file, not just the
// program header sections.
static ElfFile* Open(File* file, int mmap_prot, int mmap_flags, std::string* error_msg);
@@ -55,8 +56,10 @@
size_t Size() const;
+ // The start of the memory map address range for this ELF file.
uint8_t* Begin() const;
+ // The end of the memory map address range for this ELF file.
uint8_t* End() const;
const File& GetFile() const;
diff --git a/runtime/elf_file_impl.h b/runtime/elf_file_impl.h
index a8bb465..a70fa17 100644
--- a/runtime/elf_file_impl.h
+++ b/runtime/elf_file_impl.h
@@ -36,7 +36,8 @@
typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
class ElfFileImpl {
public:
- static ElfFileImpl* Open(File* file, bool writable, bool program_header_only, std::string* error_msg);
+ static ElfFileImpl* Open(File* file, bool writable, bool program_header_only,
+ std::string* error_msg, uint8_t* requested_base = nullptr);
static ElfFileImpl* Open(File* file, int mmap_prot, int mmap_flags, std::string* error_msg);
~ElfFileImpl();
@@ -112,7 +113,7 @@
bool Strip(std::string* error_msg);
private:
- ElfFileImpl(File* file, bool writable, bool program_header_only);
+ ElfFileImpl(File* file, bool writable, bool program_header_only, uint8_t* requested_base);
bool Setup(int prot, int flags, std::string* error_msg);
@@ -206,6 +207,9 @@
Elf_Rela, Elf_Dyn, Elf_Off>> gdb_file_mapping_;
void GdbJITSupport();
+ // Override the 'base' p_vaddr in the first LOAD segment with this value (if non-null).
+ uint8_t* requested_base_;
+
DISALLOW_COPY_AND_ASSIGN(ElfFileImpl);
};
diff --git a/runtime/elf_utils.h b/runtime/elf_utils.h
index 676cd52..7b00bad 100644
--- a/runtime/elf_utils.h
+++ b/runtime/elf_utils.h
@@ -24,6 +24,8 @@
#include "base/logging.h"
+namespace art {
+
// Architecture dependent flags for the ELF header.
#define EF_ARM_EABI_VER5 0x05000000
#define EF_MIPS_ABI_O32 0x00001000
@@ -163,4 +165,6 @@
}
}
+} // namespace art
+
#endif // ART_RUNTIME_ELF_UTILS_H_
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 9fb9a3b..1a8ca02 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -35,7 +35,6 @@
namespace art {
-// TODO: Fix no thread safety analysis when GCC can handle template specialization.
template <const bool kAccessCheck>
ALWAYS_INLINE
static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
@@ -90,7 +89,6 @@
return klass;
}
-// TODO: Fix no thread safety analysis when annotalysis is smarter.
ALWAYS_INLINE
static inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
Thread* self,
@@ -120,7 +118,6 @@
// cannot be resolved, throw an error. If it can, use it to create an instance.
// When verification/compiler hasn't been able to verify access, optionally perform an access
// check.
-// TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE
static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
@@ -140,11 +137,9 @@
}
// Given the context of a calling Method and a resolved class, create an instance.
-// TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
template <bool kInstrumented>
ALWAYS_INLINE
static inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
- mirror::ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type) {
DCHECK(klass != nullptr);
@@ -163,11 +158,9 @@
}
// Given the context of a calling Method and an initialized class, create an instance.
-// TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
template <bool kInstrumented>
ALWAYS_INLINE
static inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
- mirror::ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type) {
DCHECK(klass != nullptr);
@@ -176,7 +169,6 @@
}
-// TODO: Fix no thread safety analysis when GCC can handle template specialization.
template <bool kAccessCheck>
ALWAYS_INLINE
static inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
@@ -213,7 +205,6 @@
// it cannot be resolved, throw an error. If it can, use it to create an array.
// When verification/compiler hasn't been able to verify access, optionally perform an access
// check.
-// TODO: Fix no thread safety analysis when GCC can handle template specialization.
template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE
static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
@@ -362,7 +353,7 @@
mirror::Object** this_object,
mirror::ArtMethod** referrer, Thread* self) {
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- mirror::ArtMethod* resolved_method = class_linker->GetResolvedMethod(method_idx, *referrer, type);
+ mirror::ArtMethod* resolved_method = class_linker->GetResolvedMethod(method_idx, *referrer);
if (resolved_method == nullptr) {
StackHandleScope<1> hs(self);
mirror::Object* null_this = nullptr;
@@ -437,7 +428,14 @@
case kInterface: {
uint32_t imt_index = resolved_method->GetDexMethodIndex() % mirror::Class::kImtSize;
mirror::ArtMethod* imt_method = (*this_object)->GetClass()->GetEmbeddedImTableEntry(imt_index);
- if (!imt_method->IsImtConflictMethod()) {
+ if (!imt_method->IsImtConflictMethod() && !imt_method->IsImtUnimplementedMethod()) {
+ if (kIsDebugBuild) {
+ mirror::Class* klass = (*this_object)->GetClass();
+ mirror::ArtMethod* method = klass->FindVirtualMethodForInterface(resolved_method);
+ CHECK_EQ(imt_method, method) << PrettyMethod(resolved_method) << " / " <<
+ PrettyMethod(imt_method) << " / " << PrettyMethod(method) << " / " <<
+ PrettyClass(klass);
+ }
return imt_method;
} else {
mirror::ArtMethod* interface_method =
@@ -499,11 +497,8 @@
case StaticPrimitiveRead: is_primitive = true; is_set = false; is_static = true; break;
case StaticPrimitiveWrite: is_primitive = true; is_set = true; is_static = true; break;
default:
- LOG(FATAL) << "UNREACHABLE"; // Assignment below to avoid GCC warnings.
- is_primitive = true;
- is_set = true;
- is_static = true;
- break;
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
}
if (UNLIKELY(resolved_field->IsStatic() != is_static)) {
// Incompatible class change.
@@ -537,8 +532,7 @@
mirror::Object* this_object,
mirror::ArtMethod* referrer,
bool access_check, InvokeType type) {
- bool is_direct = type == kStatic || type == kDirect;
- if (UNLIKELY(this_object == NULL && !is_direct)) {
+ if (UNLIKELY(this_object == NULL && type != kStatic)) {
return NULL;
}
mirror::ArtMethod* resolved_method =
@@ -563,7 +557,7 @@
}
if (type == kInterface) { // Most common form of slow path dispatch.
return this_object->GetClass()->FindVirtualMethodForInterface(resolved_method);
- } else if (is_direct) {
+ } else if (type == kStatic || type == kDirect) {
return resolved_method;
} else if (type == kSuper) {
return referrer->GetDeclaringClass()->GetSuperClass()
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 7b90339..da2dfe1 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -218,17 +218,14 @@
if (o == nullptr) {
return;
}
- mirror::ArtMethod* m = self->GetCurrentMethod(nullptr);
// Make sure that the result is an instance of the type this method was expected to return.
- StackHandleScope<1> hs(self);
- Handle<mirror::ArtMethod> h_m(hs.NewHandle(m));
- mirror::Class* return_type = MethodHelper(h_m).GetReturnType();
+ mirror::Class* return_type = self->GetCurrentMethod(nullptr)->GetReturnType();
if (!o->InstanceOf(return_type)) {
Runtime::Current()->GetJavaVM()->JniAbortF(nullptr,
"attempt to return an instance of %s from %s",
PrettyTypeOf(o).c_str(),
- PrettyMethod(h_m.Get()).c_str());
+ PrettyMethod(self->GetCurrentMethod(nullptr)).c_str());
}
}
@@ -283,20 +280,19 @@
return zero;
} else {
StackHandleScope<1> hs(soa.Self());
- MethodHelper mh_interface_method(
+ Handle<mirror::ArtMethod> h_interface_method(
hs.NewHandle(soa.Decode<mirror::ArtMethod*>(interface_method_jobj)));
// This can cause thread suspension.
- mirror::Class* result_type = mh_interface_method.GetReturnType();
+ mirror::Class* result_type = h_interface_method->GetReturnType();
mirror::Object* result_ref = soa.Decode<mirror::Object*>(result);
mirror::Object* rcvr = soa.Decode<mirror::Object*>(rcvr_jobj);
mirror::ArtMethod* proxy_method;
- if (mh_interface_method.GetMethod()->GetDeclaringClass()->IsInterface()) {
- proxy_method = rcvr->GetClass()->FindVirtualMethodForInterface(
- mh_interface_method.GetMethod());
+ if (h_interface_method->GetDeclaringClass()->IsInterface()) {
+ proxy_method = rcvr->GetClass()->FindVirtualMethodForInterface(h_interface_method.Get());
} else {
// Proxy dispatch to a method defined in Object.
- DCHECK(mh_interface_method.GetMethod()->GetDeclaringClass()->IsObjectClass());
- proxy_method = mh_interface_method.GetMethod();
+ DCHECK(h_interface_method->GetDeclaringClass()->IsObjectClass());
+ proxy_method = h_interface_method.Get();
}
ThrowLocation throw_location(rcvr, proxy_method, -1);
JValue result_unboxed;
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index c46d887..311cafa 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -47,7 +47,6 @@
Thread* self, bool* slow_path)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-// TODO: Fix no thread safety analysis when annotalysis is smarter.
ALWAYS_INLINE static inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
Thread* self, bool* slow_path)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -66,7 +65,6 @@
// Given the context of a calling Method and a resolved class, create an instance.
template <bool kInstrumented>
ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
- mirror::ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -74,7 +72,6 @@
// Given the context of a calling Method and an initialized class, create an instance.
template <bool kInstrumented>
ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
- mirror::ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc b/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc
index f0ad6de..afe769e 100644
--- a/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc
@@ -25,6 +25,7 @@
mirror::Array* array,
uint32_t payload_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(dex_pc);
const DexFile::CodeItem* code_item = method->GetCodeItem();
const Instruction::ArrayDataPayload* payload =
reinterpret_cast<const Instruction::ArrayDataPayload*>(code_item->insns_ + payload_offset);
diff --git a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
index c3664bf..e7975f8 100644
--- a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
@@ -121,6 +121,7 @@
private:
static size_t ComputeArgsInRegs(MethodHelper& mh) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if (defined(__i386__))
+ UNUSED(mh);
return 0;
#else
size_t args_in_regs = 0;
@@ -172,7 +173,7 @@
break;
case Primitive::kPrimVoid:
LOG(FATAL) << "UNREACHABLE";
- break;
+ UNREACHABLE();
}
++cur_reg_;
}
@@ -261,8 +262,7 @@
break;
case Primitive::kPrimVoid:
LOG(FATAL) << "UNREACHABLE";
- val.j = 0;
- break;
+ UNREACHABLE();
}
args_.push_back(val);
}
diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h
index e728f7d..9ffd199 100644
--- a/runtime/entrypoints/quick/callee_save_frame.h
+++ b/runtime/entrypoints/quick/callee_save_frame.h
@@ -17,10 +17,9 @@
#ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_CALLEE_SAVE_FRAME_H_
#define ART_RUNTIME_ENTRYPOINTS_QUICK_CALLEE_SAVE_FRAME_H_
+#include "arch/instruction_set.h"
#include "base/mutex.h"
-#include "gc_root-inl.h"
-#include "instruction_set.h"
-#include "runtime-inl.h"
+#include "runtime.h"
#include "thread-inl.h"
// Specific frame size code is in architecture-specific files. We include this to compile-time
@@ -36,16 +35,41 @@
class ArtMethod;
} // namespace mirror
-// Place a special frame at the TOS that will save the callee saves for the given type.
-static inline void FinishCalleeSaveFrameSetup(Thread* self, StackReference<mirror::ArtMethod>* sp,
- Runtime::CalleeSaveType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // Be aware the store below may well stomp on an incoming argument.
- Locks::mutator_lock_->AssertSharedHeld(self);
- sp->Assign(Runtime::Current()->GetCalleeSaveMethod(type));
- self->SetTopOfStack(sp, 0);
- self->VerifyStack();
-}
+class ScopedQuickEntrypointChecks {
+ public:
+ explicit ScopedQuickEntrypointChecks(Thread *self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : self_(self) {
+ if (kIsDebugBuild) {
+ TestsOnEntry();
+ }
+ }
+
+ explicit ScopedQuickEntrypointChecks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : self_(kIsDebugBuild ? Thread::Current() : nullptr) {
+ if (kIsDebugBuild) {
+ TestsOnEntry();
+ }
+ }
+
+ ~ScopedQuickEntrypointChecks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (kIsDebugBuild) {
+ TestsOnExit();
+ }
+ }
+
+ private:
+ void TestsOnEntry() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Locks::mutator_lock_->AssertSharedHeld(self_);
+ self_->VerifyStack();
+ }
+
+ void TestsOnExit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Locks::mutator_lock_->AssertSharedHeld(self_);
+ self_->VerifyStack();
+ }
+
+ Thread* const self_;
+};
static constexpr size_t GetCalleeSaveFrameSize(InstructionSet isa, Runtime::CalleeSaveType type) {
// constexpr must be a return statement.
@@ -71,7 +95,8 @@
}
// Note: this specialized statement is sanity-checked in the quick-trampoline gtest.
-static constexpr size_t GetCalleeSavePCOffset(InstructionSet isa, Runtime::CalleeSaveType type) {
+static constexpr size_t GetCalleeSaveReturnPcOffset(InstructionSet isa,
+ Runtime::CalleeSaveType type) {
return GetCalleeSaveFrameSize(isa, type) - GetConstExprPointerSize(isa);
}
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index d8da463..c0b79b2 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -29,9 +29,9 @@
#define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, suffix2, instrumented_bool, allocator_type) \
extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \
- uint32_t type_idx, mirror::ArtMethod* method, Thread* self, \
- StackReference<mirror::ArtMethod>* sp) \
+ uint32_t type_idx, mirror::ArtMethod* method, Thread* self) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx); \
if (LIKELY(klass != nullptr && klass->IsInitialized() && !klass->IsFinalizable())) { \
@@ -53,13 +53,13 @@
} \
} \
} \
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
return AllocObjectFromCode<false, instrumented_bool>(type_idx, method, self, allocator_type); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
- mirror::Class* klass, mirror::ArtMethod* method, Thread* self, \
- StackReference<mirror::ArtMethod>* sp) \
+ mirror::Class* klass, mirror::ArtMethod* method, Thread* self) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ UNUSED(method); \
+ ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
if (LIKELY(klass->IsInitialized())) { \
size_t byte_count = klass->GetObjectSize(); \
@@ -80,13 +80,13 @@
} \
} \
} \
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
- return AllocObjectFromCodeResolved<instrumented_bool>(klass, method, self, allocator_type); \
+ return AllocObjectFromCodeResolved<instrumented_bool>(klass, self, allocator_type); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \
- mirror::Class* klass, mirror::ArtMethod* method, Thread* self, \
- StackReference<mirror::ArtMethod>* sp) \
+ mirror::Class* klass, mirror::ArtMethod* method, Thread* self) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ UNUSED(method); \
+ ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
size_t byte_count = klass->GetObjectSize(); \
byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment); \
@@ -105,45 +105,39 @@
return obj; \
} \
} \
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
- return AllocObjectFromCodeInitialized<instrumented_bool>(klass, method, self, allocator_type); \
+ return AllocObjectFromCodeInitialized<instrumented_bool>(klass, self, allocator_type); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheck##suffix##suffix2( \
- uint32_t type_idx, mirror::ArtMethod* method, Thread* self, \
- StackReference<mirror::ArtMethod>* sp) \
+ uint32_t type_idx, mirror::ArtMethod* method, Thread* self) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
+ ScopedQuickEntrypointChecks sqec(self); \
return AllocObjectFromCode<true, instrumented_bool>(type_idx, method, self, allocator_type); \
} \
extern "C" mirror::Array* artAllocArrayFromCode##suffix##suffix2( \
- uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, Thread* self, \
- StackReference<mirror::ArtMethod>* sp) \
+ uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, Thread* self) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
+ ScopedQuickEntrypointChecks sqec(self); \
return AllocArrayFromCode<false, instrumented_bool>(type_idx, method, component_count, self, \
allocator_type); \
} \
extern "C" mirror::Array* artAllocArrayFromCodeResolved##suffix##suffix2( \
- mirror::Class* klass, mirror::ArtMethod* method, int32_t component_count, Thread* self, \
- StackReference<mirror::ArtMethod>* sp) \
+ mirror::Class* klass, mirror::ArtMethod* method, int32_t component_count, Thread* self) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
+ ScopedQuickEntrypointChecks sqec(self); \
return AllocArrayFromCodeResolved<false, instrumented_bool>(klass, method, component_count, self, \
allocator_type); \
} \
extern "C" mirror::Array* artAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \
- uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, Thread* self, \
- StackReference<mirror::ArtMethod>* sp) \
+ uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, Thread* self) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
+ ScopedQuickEntrypointChecks sqec(self); \
return AllocArrayFromCode<true, instrumented_bool>(type_idx, method, component_count, self, \
allocator_type); \
} \
extern "C" mirror::Array* artCheckAndAllocArrayFromCode##suffix##suffix2( \
- uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, Thread* self, \
- StackReference<mirror::ArtMethod>* sp) \
+ uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, Thread* self) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
+ ScopedQuickEntrypointChecks sqec(self); \
if (!instrumented_bool) { \
return CheckAndAllocArrayFromCode(type_idx, method, component_count, self, false, allocator_type); \
} else { \
@@ -151,10 +145,9 @@
} \
} \
extern "C" mirror::Array* artCheckAndAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \
- uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, Thread* self, \
- StackReference<mirror::ArtMethod>* sp) \
+ uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, Thread* self) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
+ ScopedQuickEntrypointChecks sqec(self); \
if (!instrumented_bool) { \
return CheckAndAllocArrayFromCode(type_idx, method, component_count, self, true, allocator_type); \
} else { \
@@ -234,31 +227,34 @@
}
void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints) {
- switch (entry_points_allocator) {
#if !defined(__APPLE__) || !defined(__LP64__)
+ switch (entry_points_allocator) {
case gc::kAllocatorTypeDlMalloc: {
SetQuickAllocEntryPoints_dlmalloc(qpoints, entry_points_instrumented);
- break;
+ return;
}
case gc::kAllocatorTypeRosAlloc: {
SetQuickAllocEntryPoints_rosalloc(qpoints, entry_points_instrumented);
- break;
+ return;
}
case gc::kAllocatorTypeBumpPointer: {
CHECK(kMovingCollector);
SetQuickAllocEntryPoints_bump_pointer(qpoints, entry_points_instrumented);
- break;
+ return;
}
case gc::kAllocatorTypeTLAB: {
CHECK(kMovingCollector);
SetQuickAllocEntryPoints_tlab(qpoints, entry_points_instrumented);
+ return;
+ }
+ default:
break;
- }
-#endif
- default: {
- LOG(FATAL) << "Unimplemented";
- }
}
+#else
+ UNUSED(qpoints);
+#endif
+ UNIMPLEMENTED(FATAL);
+ UNREACHABLE();
}
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_default_externs.h b/runtime/entrypoints/quick/quick_default_externs.h
new file mode 100644
index 0000000..7d77721
--- /dev/null
+++ b/runtime/entrypoints/quick/quick_default_externs.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_DEFAULT_EXTERNS_H_
+#define ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_DEFAULT_EXTERNS_H_
+
+#include <cstdint>
+
+// These are extern declarations of assembly stubs with common names.
+
+// Cast entrypoints.
+extern "C" void art_quick_check_cast(void*, void*);
+
+// DexCache entrypoints.
+extern "C" void* art_quick_initialize_static_storage(uint32_t, void*);
+extern "C" void* art_quick_initialize_type(uint32_t, void*);
+extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*);
+extern "C" void* art_quick_resolve_string(void*, uint32_t);
+
+// Field entrypoints.
+extern "C" int art_quick_set8_instance(uint32_t, void*, int8_t);
+extern "C" int art_quick_set8_static(uint32_t, int8_t);
+extern "C" int art_quick_set16_instance(uint32_t, void*, int16_t);
+extern "C" int art_quick_set16_static(uint32_t, int16_t);
+extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t);
+extern "C" int art_quick_set32_static(uint32_t, int32_t);
+extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t);
+extern "C" int art_quick_set64_static(uint32_t, int64_t);
+extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*);
+extern "C" int art_quick_set_obj_static(uint32_t, void*);
+extern "C" int8_t art_quick_get_byte_instance(uint32_t, void*);
+extern "C" uint8_t art_quick_get_boolean_instance(uint32_t, void*);
+extern "C" int8_t art_quick_get_byte_static(uint32_t);
+extern "C" uint8_t art_quick_get_boolean_static(uint32_t);
+extern "C" int16_t art_quick_get_short_instance(uint32_t, void*);
+extern "C" uint16_t art_quick_get_char_instance(uint32_t, void*);
+extern "C" int16_t art_quick_get_short_static(uint32_t);
+extern "C" uint16_t art_quick_get_char_static(uint32_t);
+extern "C" int32_t art_quick_get32_instance(uint32_t, void*);
+extern "C" int32_t art_quick_get32_static(uint32_t);
+extern "C" int64_t art_quick_get64_instance(uint32_t, void*);
+extern "C" int64_t art_quick_get64_static(uint32_t);
+extern "C" void* art_quick_get_obj_instance(uint32_t, void*);
+extern "C" void* art_quick_get_obj_static(uint32_t);
+
+// Array entrypoints.
+extern "C" void art_quick_aput_obj_with_null_and_bound_check(void*, uint32_t, void*);
+extern "C" void art_quick_aput_obj_with_bound_check(void*, uint32_t, void*);
+extern "C" void art_quick_aput_obj(void*, uint32_t, void*);
+extern "C" void art_quick_handle_fill_data(void*, void*);
+
+// Lock entrypoints.
+extern "C" void art_quick_lock_object(void*);
+extern "C" void art_quick_unlock_object(void*);
+
+// Math entrypoints.
+extern "C" int64_t art_quick_d2l(double);
+extern "C" int64_t art_quick_f2l(float);
+extern "C" int64_t art_quick_ldiv(int64_t, int64_t);
+extern "C" int64_t art_quick_lmod(int64_t, int64_t);
+extern "C" int64_t art_quick_lmul(int64_t, int64_t);
+extern "C" uint64_t art_quick_lshl(uint64_t, uint32_t);
+extern "C" uint64_t art_quick_lshr(uint64_t, uint32_t);
+extern "C" uint64_t art_quick_lushr(uint64_t, uint32_t);
+extern "C" int64_t art_quick_mul_long(int64_t, int64_t);
+extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t);
+extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t);
+extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t);
+
+// Intrinsic entrypoints.
+extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
+extern "C" int32_t art_quick_string_compareto(void*, void*);
+extern "C" void* art_quick_memcpy(void*, const void*, size_t);
+
+// Invoke entrypoints.
+extern "C" void art_quick_imt_conflict_trampoline(art::mirror::ArtMethod*);
+extern "C" void art_quick_resolution_trampoline(art::mirror::ArtMethod*);
+extern "C" void art_quick_to_interpreter_bridge(art::mirror::ArtMethod*);
+extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
+
+// Thread entrypoints.
+extern "C" void art_quick_test_suspend();
+
+// Throw entrypoints.
+extern "C" void art_quick_deliver_exception(void*);
+extern "C" void art_quick_throw_array_bounds(int32_t index, int32_t limit);
+extern "C" void art_quick_throw_div_zero();
+extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
+extern "C" void art_quick_throw_null_pointer_exception();
+extern "C" void art_quick_throw_stack_overflow(void*);
+
+#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_DEFAULT_EXTERNS_H_
diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
index f9f62c2..14ab320 100644
--- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
@@ -27,9 +27,8 @@
namespace art {
-extern "C" void artDeoptimize(Thread* self, StackReference<mirror::ArtMethod>* sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
+extern "C" void artDeoptimize(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
self->SetException(ThrowLocation(), Thread::GetDeoptimizationException());
self->QuickDeliverException();
}
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 704db05..2e7c8ba 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -27,42 +27,39 @@
extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx,
mirror::ArtMethod* referrer,
- Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+ Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Called to ensure static storage base is initialized for direct static field reads and writes.
// A class may be accessing another class' fields when it doesn't have access, as access has been
// given by inheritance.
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ ScopedQuickEntrypointChecks sqec(self);
return ResolveVerifyAndClinit(type_idx, referrer, self, true, false);
}
extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx,
mirror::ArtMethod* referrer,
- Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+ Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Called when method->dex_cache_resolved_types_[] misses.
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ ScopedQuickEntrypointChecks sqec(self);
return ResolveVerifyAndClinit(type_idx, referrer, self, false, false);
}
extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx,
- mirror::ArtMethod* referrer,
- Thread* self,
- StackReference<mirror::ArtMethod>* sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* referrer,
+ Thread* self)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Called when caller isn't guaranteed to have access to a type and the dex cache may be
// unpopulated.
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ ScopedQuickEntrypointChecks sqec(self);
return ResolveVerifyAndClinit(type_idx, referrer, self, false, true);
}
extern "C" mirror::String* artResolveStringFromCode(mirror::ArtMethod* referrer,
int32_t string_idx,
- Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+ Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ ScopedQuickEntrypointChecks sqec(self);
return ResolveStringFromCode(referrer, string_idx);
}
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index b89c015..7326fcf 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -25,295 +25,284 @@
namespace art {
-extern "C" int8_t artGetByteStaticFromCode(uint32_t field_idx,
- mirror::ArtMethod* referrer,
- Thread* self, StackReference<mirror::ArtMethod>* sp)
+extern "C" int8_t artGetByteStaticFromCode(uint32_t field_idx, mirror::ArtMethod* referrer,
+ Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead,
sizeof(int8_t));
- if (LIKELY(field != NULL)) {
+ if (LIKELY(field != nullptr)) {
return field->GetByte(field->GetDeclaringClass());
}
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int8_t));
- if (LIKELY(field != NULL)) {
+ if (LIKELY(field != nullptr)) {
return field->GetByte(field->GetDeclaringClass());
}
- return 0; // Will throw exception by checking with Thread::Current
+ return 0; // Will throw exception by checking with Thread::Current.
}
-extern "C" uint8_t artGetBooleanStaticFromCode(uint32_t field_idx,
- mirror::ArtMethod* referrer,
- Thread* self, StackReference<mirror::ArtMethod>* sp)
+extern "C" uint8_t artGetBooleanStaticFromCode(uint32_t field_idx, mirror::ArtMethod* referrer,
+ Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead,
sizeof(int8_t));
- if (LIKELY(field != NULL)) {
+ if (LIKELY(field != nullptr)) {
return field->GetBoolean(field->GetDeclaringClass());
}
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int8_t));
- if (LIKELY(field != NULL)) {
+ if (LIKELY(field != nullptr)) {
return field->GetBoolean(field->GetDeclaringClass());
}
- return 0; // Will throw exception by checking with Thread::Current
+ return 0; // Will throw exception by checking with Thread::Current.
}
-extern "C" int16_t artGetShortStaticFromCode(uint32_t field_idx,
- mirror::ArtMethod* referrer,
- Thread* self, StackReference<mirror::ArtMethod>* sp)
+extern "C" int16_t artGetShortStaticFromCode(uint32_t field_idx, mirror::ArtMethod* referrer,
+ Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead,
sizeof(int16_t));
- if (LIKELY(field != NULL)) {
+ if (LIKELY(field != nullptr)) {
return field->GetShort(field->GetDeclaringClass());
}
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int16_t));
- if (LIKELY(field != NULL)) {
+ if (LIKELY(field != nullptr)) {
return field->GetShort(field->GetDeclaringClass());
}
- return 0; // Will throw exception by checking with Thread::Current
+ return 0; // Will throw exception by checking with Thread::Current.
}
extern "C" uint16_t artGetCharStaticFromCode(uint32_t field_idx,
mirror::ArtMethod* referrer,
- Thread* self, StackReference<mirror::ArtMethod>* sp)
+ Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead,
sizeof(int16_t));
- if (LIKELY(field != NULL)) {
+ if (LIKELY(field != nullptr)) {
return field->GetChar(field->GetDeclaringClass());
}
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int16_t));
- if (LIKELY(field != NULL)) {
+ if (LIKELY(field != nullptr)) {
return field->GetChar(field->GetDeclaringClass());
}
- return 0; // Will throw exception by checking with Thread::Current
+ return 0; // Will throw exception by checking with Thread::Current.
}
extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx,
mirror::ArtMethod* referrer,
- Thread* self, StackReference<mirror::ArtMethod>* sp)
+ Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead,
sizeof(int32_t));
- if (LIKELY(field != NULL)) {
+ if (LIKELY(field != nullptr)) {
return field->Get32(field->GetDeclaringClass());
}
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int32_t));
- if (LIKELY(field != NULL)) {
+ if (LIKELY(field != nullptr)) {
return field->Get32(field->GetDeclaringClass());
}
- return 0; // Will throw exception by checking with Thread::Current
+ return 0; // Will throw exception by checking with Thread::Current.
}
extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx,
mirror::ArtMethod* referrer,
- Thread* self, StackReference<mirror::ArtMethod>* sp)
+ Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead,
sizeof(int64_t));
- if (LIKELY(field != NULL)) {
+ if (LIKELY(field != nullptr)) {
return field->Get64(field->GetDeclaringClass());
}
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int64_t));
- if (LIKELY(field != NULL)) {
+ if (LIKELY(field != nullptr)) {
return field->Get64(field->GetDeclaringClass());
}
- return 0; // Will throw exception by checking with Thread::Current
+ return 0; // Will throw exception by checking with Thread::Current.
}
extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx,
mirror::ArtMethod* referrer,
- Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+ Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectRead,
sizeof(mirror::HeapReference<mirror::Object>));
- if (LIKELY(field != NULL)) {
+ if (LIKELY(field != nullptr)) {
return field->GetObj(field->GetDeclaringClass());
}
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode<StaticObjectRead, true>(field_idx, referrer, self,
sizeof(mirror::HeapReference<mirror::Object>));
- if (LIKELY(field != NULL)) {
+ if (LIKELY(field != nullptr)) {
return field->GetObj(field->GetDeclaringClass());
}
- return NULL; // Will throw exception by checking with Thread::Current
+ return nullptr; // Will throw exception by checking with Thread::Current.
}
extern "C" int8_t artGetByteInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
- mirror::ArtMethod* referrer, Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+ mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead,
sizeof(int8_t));
- if (LIKELY(field != NULL && obj != NULL)) {
+ if (LIKELY(field != nullptr && obj != nullptr)) {
return field->GetByte(obj);
}
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode<InstancePrimitiveRead, true>(field_idx, referrer, self,
sizeof(int8_t));
- if (LIKELY(field != NULL)) {
- if (UNLIKELY(obj == NULL)) {
+ if (LIKELY(field != nullptr)) {
+ if (UNLIKELY(obj == nullptr)) {
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
} else {
return field->GetByte(obj);
}
}
- return 0; // Will throw exception by checking with Thread::Current
+ return 0; // Will throw exception by checking with Thread::Current.
}
extern "C" uint8_t artGetBooleanInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
- mirror::ArtMethod* referrer, Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+ mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead,
sizeof(int8_t));
- if (LIKELY(field != NULL && obj != NULL)) {
+ if (LIKELY(field != nullptr && obj != nullptr)) {
return field->GetBoolean(obj);
}
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode<InstancePrimitiveRead, true>(field_idx, referrer, self,
sizeof(int8_t));
- if (LIKELY(field != NULL)) {
- if (UNLIKELY(obj == NULL)) {
+ if (LIKELY(field != nullptr)) {
+ if (UNLIKELY(obj == nullptr)) {
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
} else {
return field->GetBoolean(obj);
}
}
- return 0; // Will throw exception by checking with Thread::Current
+ return 0; // Will throw exception by checking with Thread::Current.
}
extern "C" int16_t artGetShortInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
- mirror::ArtMethod* referrer, Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+ mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead,
sizeof(int16_t));
- if (LIKELY(field != NULL && obj != NULL)) {
+ if (LIKELY(field != nullptr && obj != nullptr)) {
return field->GetShort(obj);
}
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode<InstancePrimitiveRead, true>(field_idx, referrer, self,
sizeof(int16_t));
- if (LIKELY(field != NULL)) {
- if (UNLIKELY(obj == NULL)) {
+ if (LIKELY(field != nullptr)) {
+ if (UNLIKELY(obj == nullptr)) {
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
} else {
return field->GetShort(obj);
}
}
- return 0; // Will throw exception by checking with Thread::Current
+ return 0; // Will throw exception by checking with Thread::Current.
}
extern "C" uint16_t artGetCharInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
- mirror::ArtMethod* referrer, Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+ mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead,
sizeof(int16_t));
- if (LIKELY(field != NULL && obj != NULL)) {
+ if (LIKELY(field != nullptr && obj != nullptr)) {
return field->GetChar(obj);
}
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode<InstancePrimitiveRead, true>(field_idx, referrer, self,
sizeof(int16_t));
- if (LIKELY(field != NULL)) {
- if (UNLIKELY(obj == NULL)) {
+ if (LIKELY(field != nullptr)) {
+ if (UNLIKELY(obj == nullptr)) {
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
} else {
return field->GetChar(obj);
}
}
- return 0; // Will throw exception by checking with Thread::Current
+ return 0; // Will throw exception by checking with Thread::Current.
}
extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
- mirror::ArtMethod* referrer, Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+ mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead,
sizeof(int32_t));
- if (LIKELY(field != NULL && obj != NULL)) {
+ if (LIKELY(field != nullptr && obj != nullptr)) {
return field->Get32(obj);
}
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode<InstancePrimitiveRead, true>(field_idx, referrer, self,
sizeof(int32_t));
- if (LIKELY(field != NULL)) {
- if (UNLIKELY(obj == NULL)) {
+ if (LIKELY(field != nullptr)) {
+ if (UNLIKELY(obj == nullptr)) {
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
} else {
return field->Get32(obj);
}
}
- return 0; // Will throw exception by checking with Thread::Current
+ return 0; // Will throw exception by checking with Thread::Current.
}
extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
- mirror::ArtMethod* referrer, Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+ mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead,
sizeof(int64_t));
- if (LIKELY(field != NULL && obj != NULL)) {
+ if (LIKELY(field != nullptr && obj != nullptr)) {
return field->Get64(obj);
}
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode<InstancePrimitiveRead, true>(field_idx, referrer, self,
sizeof(int64_t));
- if (LIKELY(field != NULL)) {
- if (UNLIKELY(obj == NULL)) {
+ if (LIKELY(field != nullptr)) {
+ if (UNLIKELY(obj == nullptr)) {
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
} else {
return field->Get64(obj);
}
}
- return 0; // Will throw exception by checking with Thread::Current
+ return 0; // Will throw exception by checking with Thread::Current.
}
extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
mirror::ArtMethod* referrer,
- Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+ Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectRead,
sizeof(mirror::HeapReference<mirror::Object>));
- if (LIKELY(field != NULL && obj != NULL)) {
+ if (LIKELY(field != nullptr && obj != nullptr)) {
return field->GetObj(obj);
}
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode<InstanceObjectRead, true>(field_idx, referrer, self,
sizeof(mirror::HeapReference<mirror::Object>));
- if (LIKELY(field != NULL)) {
- if (UNLIKELY(obj == NULL)) {
+ if (LIKELY(field != nullptr)) {
+ if (UNLIKELY(obj == nullptr)) {
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
} else {
return field->GetObj(obj);
}
}
- return NULL; // Will throw exception by checking with Thread::Current
+ return nullptr; // Will throw exception by checking with Thread::Current.
}
extern "C" int artSet8StaticFromCode(uint32_t field_idx, uint32_t new_value,
- mirror::ArtMethod* referrer, Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+ mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite,
sizeof(int8_t));
- if (LIKELY(field != NULL)) {
+ if (LIKELY(field != nullptr)) {
Primitive::Type type = field->GetTypeAsPrimitiveType();
// Compiled code can't use transactional mode.
if (type == Primitive::kPrimBoolean) {
@@ -324,9 +313,8 @@
}
return 0; // success
}
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, self, sizeof(int8_t));
- if (LIKELY(field != NULL)) {
+ if (LIKELY(field != nullptr)) {
Primitive::Type type = field->GetTypeAsPrimitiveType();
// Compiled code can't use transactional mode.
if (type == Primitive::kPrimBoolean) {
@@ -341,12 +329,12 @@
}
extern "C" int artSet16StaticFromCode(uint32_t field_idx, uint16_t new_value,
- mirror::ArtMethod* referrer, Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+ mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite,
sizeof(int16_t));
- if (LIKELY(field != NULL)) {
+ if (LIKELY(field != nullptr)) {
Primitive::Type type = field->GetTypeAsPrimitiveType();
// Compiled code can't use transactional mode.
if (type == Primitive::kPrimChar) {
@@ -357,9 +345,8 @@
}
return 0; // success
}
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, self, sizeof(int16_t));
- if (LIKELY(field != NULL)) {
+ if (LIKELY(field != nullptr)) {
Primitive::Type type = field->GetTypeAsPrimitiveType();
// Compiled code can't use transactional mode.
if (type == Primitive::kPrimChar) {
@@ -374,19 +361,18 @@
}
extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value,
- mirror::ArtMethod* referrer, Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+ mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite,
sizeof(int32_t));
- if (LIKELY(field != NULL)) {
+ if (LIKELY(field != nullptr)) {
// Compiled code can't use transactional mode.
field->Set32<false>(field->GetDeclaringClass(), new_value);
return 0; // success
}
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, self, sizeof(int32_t));
- if (LIKELY(field != NULL)) {
+ if (LIKELY(field != nullptr)) {
// Compiled code can't use transactional mode.
field->Set32<false>(field->GetDeclaringClass(), new_value);
return 0; // success
@@ -395,19 +381,18 @@
}
extern "C" int artSet64StaticFromCode(uint32_t field_idx, mirror::ArtMethod* referrer,
- uint64_t new_value, Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+ uint64_t new_value, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite,
sizeof(int64_t));
- if (LIKELY(field != NULL)) {
+ if (LIKELY(field != nullptr)) {
// Compiled code can't use transactional mode.
field->Set64<false>(field->GetDeclaringClass(), new_value);
return 0; // success
}
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, self, sizeof(int64_t));
- if (LIKELY(field != NULL)) {
+ if (LIKELY(field != nullptr)) {
// Compiled code can't use transactional mode.
field->Set64<false>(field->GetDeclaringClass(), new_value);
return 0; // success
@@ -416,22 +401,21 @@
}
extern "C" int artSetObjStaticFromCode(uint32_t field_idx, mirror::Object* new_value,
- mirror::ArtMethod* referrer, Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+ mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectWrite,
sizeof(mirror::HeapReference<mirror::Object>));
- if (LIKELY(field != NULL)) {
+ if (LIKELY(field != nullptr)) {
if (LIKELY(!field->IsPrimitiveType())) {
// Compiled code can't use transactional mode.
field->SetObj<false>(field->GetDeclaringClass(), new_value);
return 0; // success
}
}
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode<StaticObjectWrite, true>(field_idx, referrer, self,
sizeof(mirror::HeapReference<mirror::Object>));
- if (LIKELY(field != NULL)) {
+ if (LIKELY(field != nullptr)) {
// Compiled code can't use transactional mode.
field->SetObj<false>(field->GetDeclaringClass(), new_value);
return 0; // success
@@ -440,12 +424,12 @@
}
extern "C" int artSet8InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint8_t new_value,
- mirror::ArtMethod* referrer, Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+ mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
sizeof(int8_t));
- if (LIKELY(field != NULL && obj != NULL)) {
+ if (LIKELY(field != nullptr && obj != nullptr)) {
Primitive::Type type = field->GetTypeAsPrimitiveType();
// Compiled code can't use transactional mode.
if (type == Primitive::kPrimBoolean) {
@@ -456,15 +440,14 @@
}
return 0; // success
}
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
{
StackHandleScope<1> hs(self);
HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&obj));
field = FindFieldFromCode<InstancePrimitiveWrite, true>(field_idx, referrer, self,
sizeof(int8_t));
}
- if (LIKELY(field != NULL)) {
- if (UNLIKELY(obj == NULL)) {
+ if (LIKELY(field != nullptr)) {
+ if (UNLIKELY(obj == nullptr)) {
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
ThrowNullPointerExceptionForFieldAccess(throw_location, field, false);
} else {
@@ -482,12 +465,12 @@
}
extern "C" int artSet16InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint16_t new_value,
- mirror::ArtMethod* referrer, Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+ mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
sizeof(int16_t));
- if (LIKELY(field != NULL && obj != NULL)) {
+ if (LIKELY(field != nullptr && obj != nullptr)) {
Primitive::Type type = field->GetTypeAsPrimitiveType();
// Compiled code can't use transactional mode.
if (type == Primitive::kPrimChar) {
@@ -498,15 +481,14 @@
}
return 0; // success
}
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
{
StackHandleScope<1> hs(self);
HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&obj));
field = FindFieldFromCode<InstancePrimitiveWrite, true>(field_idx, referrer, self,
sizeof(int16_t));
}
- if (LIKELY(field != NULL)) {
- if (UNLIKELY(obj == NULL)) {
+ if (LIKELY(field != nullptr)) {
+ if (UNLIKELY(obj == nullptr)) {
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
ThrowNullPointerExceptionForFieldAccess(throw_location, field, false);
} else {
@@ -525,25 +507,24 @@
}
extern "C" int artSet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint32_t new_value,
- mirror::ArtMethod* referrer, Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+ mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
sizeof(int32_t));
- if (LIKELY(field != NULL && obj != NULL)) {
+ if (LIKELY(field != nullptr && obj != nullptr)) {
// Compiled code can't use transactional mode.
field->Set32<false>(obj, new_value);
return 0; // success
}
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
{
StackHandleScope<1> hs(self);
HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&obj));
field = FindFieldFromCode<InstancePrimitiveWrite, true>(field_idx, referrer, self,
sizeof(int32_t));
}
- if (LIKELY(field != NULL)) {
- if (UNLIKELY(obj == NULL)) {
+ if (LIKELY(field != nullptr)) {
+ if (UNLIKELY(obj == nullptr)) {
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
ThrowNullPointerExceptionForFieldAccess(throw_location, field, false);
} else {
@@ -556,25 +537,20 @@
}
extern "C" int artSet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint64_t new_value,
- Thread* self, StackReference<mirror::ArtMethod>* sp)
+ mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- constexpr size_t frame_size = GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kRefsOnly);
- mirror::ArtMethod* referrer =
- reinterpret_cast<StackReference<mirror::ArtMethod>*>(
- reinterpret_cast<uint8_t*>(sp) + frame_size)->AsMirrorPtr();
+ ScopedQuickEntrypointChecks sqec(self);
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
sizeof(int64_t));
- if (LIKELY(field != NULL && obj != NULL)) {
+ if (LIKELY(field != nullptr && obj != nullptr)) {
// Compiled code can't use transactional mode.
field->Set64<false>(obj, new_value);
return 0; // success
}
- sp->Assign(Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsOnly));
- self->SetTopOfStack(sp, 0);
field = FindFieldFromCode<InstancePrimitiveWrite, true>(field_idx, referrer, self,
sizeof(int64_t));
- if (LIKELY(field != NULL)) {
- if (UNLIKELY(obj == NULL)) {
+ if (LIKELY(field != nullptr)) {
+ if (UNLIKELY(obj == nullptr)) {
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
ThrowNullPointerExceptionForFieldAccess(throw_location, field, false);
} else {
@@ -588,21 +564,20 @@
extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
mirror::Object* new_value,
- mirror::ArtMethod* referrer, Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+ mirror::ArtMethod* referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite,
sizeof(mirror::HeapReference<mirror::Object>));
- if (LIKELY(field != NULL && obj != NULL)) {
+ if (LIKELY(field != nullptr && obj != nullptr)) {
// Compiled code can't use transactional mode.
field->SetObj<false>(obj, new_value);
return 0; // success
}
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode<InstanceObjectWrite, true>(field_idx, referrer, self,
sizeof(mirror::HeapReference<mirror::Object>));
- if (LIKELY(field != NULL)) {
- if (UNLIKELY(obj == NULL)) {
+ if (LIKELY(field != nullptr)) {
+ if (UNLIKELY(obj == nullptr)) {
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
ThrowNullPointerExceptionForFieldAccess(throw_location, field, false);
} else {
diff --git a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
index 06bbabc..e336543 100644
--- a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
@@ -25,10 +25,9 @@
* Handle fill array data by copying appropriate part of dex file into array.
*/
extern "C" int artHandleFillArrayDataFromCode(uint32_t payload_offset, mirror::Array* array,
- mirror::ArtMethod* method, Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+ mirror::ArtMethod* method, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ ScopedQuickEntrypointChecks sqec(self);
const uint16_t* const insns = method->GetCodeItem()->insns_;
const Instruction::ArrayDataPayload* payload =
reinterpret_cast<const Instruction::ArrayDataPayload*>(insns + payload_offset);
diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
index bb0e5e3..54dbd8c 100644
--- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
@@ -16,7 +16,6 @@
#include "callee_save_frame.h"
#include "entrypoints/runtime_asm_entrypoints.h"
-#include "instruction_set.h"
#include "instrumentation.h"
#include "mirror/art_method-inl.h"
#include "mirror/object-inl.h"
@@ -28,18 +27,17 @@
extern "C" const void* artInstrumentationMethodEntryFromCode(mirror::ArtMethod* method,
mirror::Object* this_object,
Thread* self,
- StackReference<mirror::ArtMethod>* sp,
uintptr_t lr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
+ ScopedQuickEntrypointChecks sqec(self);
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
const void* result;
if (instrumentation->IsDeoptimized(method)) {
result = GetQuickToInterpreterBridge();
} else {
- result = instrumentation->GetQuickCodeFor(method);
+ result = instrumentation->GetQuickCodeFor(method, sizeof(void*));
+ DCHECK(!Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(result));
}
- DCHECK(!Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(result));
bool interpreter_entry = (result == GetQuickToInterpreterBridge());
instrumentation->PushInstrumentationStackFrame(self, method->IsStatic() ? nullptr : this_object,
method, lr, interpreter_entry);
@@ -52,23 +50,19 @@
uint64_t gpr_result,
uint64_t fpr_result)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // TODO: use FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly) not the hand inlined below.
- // We use the hand inline version to ensure the return_pc is assigned before verifying the
- // stack.
- // Be aware the store below may well stomp on an incoming argument.
- Locks::mutator_lock_->AssertSharedHeld(self);
- Runtime* runtime = Runtime::Current();
- sp->Assign(runtime->GetCalleeSaveMethod(Runtime::kRefsOnly));
- uint32_t return_pc_offset = GetCalleeSavePCOffset(kRuntimeISA, Runtime::kRefsOnly);
+ // Compute address of return PC and sanity check that it currently holds 0.
+ size_t return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kRefsOnly);
uintptr_t* return_pc = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) +
return_pc_offset);
CHECK_EQ(*return_pc, 0U);
- self->SetTopOfStack(sp, 0);
- self->VerifyStack();
+
+ // Pop the frame filling in the return pc. The low half of the return value is 0 when
+ // deoptimization shouldn't be performed with the high-half having the return address. When
+ // deoptimization should be performed the low half is zero and the high-half the address of the
+ // deoptimization entry point.
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
TwoWordReturn return_or_deoptimize_pc = instrumentation->PopInstrumentationStackFrame(
self, return_pc, gpr_result, fpr_result);
- self->VerifyStack();
return return_or_deoptimize_pc;
}
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index c239535..c1276b5 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -15,6 +15,7 @@
*/
#include "entrypoints/entrypoint_utils-inl.h"
+#include "mirror/art_method-inl.h"
#include "mirror/object-inl.h"
#include "thread-inl.h"
#include "verify_object-inl.h"
diff --git a/runtime/entrypoints/quick/quick_lock_entrypoints.cc b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
index 92c0841..8ceac97 100644
--- a/runtime/entrypoints/quick/quick_lock_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
@@ -20,12 +20,11 @@
namespace art {
-extern "C" int artLockObjectFromCode(mirror::Object* obj, Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+extern "C" int artLockObjectFromCode(mirror::Object* obj, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
NO_THREAD_SAFETY_ANALYSIS /* EXCLUSIVE_LOCK_FUNCTION(Monitor::monitor_lock_) */ {
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
- if (UNLIKELY(obj == NULL)) {
+ ScopedQuickEntrypointChecks sqec(self);
+ if (UNLIKELY(obj == nullptr)) {
ThrowLocation throw_location(self->GetCurrentLocationForThrow());
ThrowNullPointerException(&throw_location,
"Null reference used for synchronization (monitor-enter)");
@@ -43,12 +42,11 @@
}
}
-extern "C" int artUnlockObjectFromCode(mirror::Object* obj, Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+extern "C" int artUnlockObjectFromCode(mirror::Object* obj, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
NO_THREAD_SAFETY_ANALYSIS /* UNLOCK_FUNCTION(Monitor::monitor_lock_) */ {
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
- if (UNLIKELY(obj == NULL)) {
+ ScopedQuickEntrypointChecks sqec(self);
+ if (UNLIKELY(obj == nullptr)) {
ThrowLocation throw_location(self->GetCurrentLocationForThrow());
ThrowNullPointerException(&throw_location,
"Null reference used for synchronization (monitor-exit)");
diff --git a/runtime/entrypoints/quick/quick_thread_entrypoints.cc b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
index ea75fb6..87e0c6e 100644
--- a/runtime/entrypoints/quick/quick_thread_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
@@ -19,10 +19,9 @@
namespace art {
-extern "C" void artTestSuspendFromCode(Thread* self, StackReference<mirror::ArtMethod>* sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+extern "C" void artTestSuspendFromCode(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Called when suspend count check value is 0 and thread->suspend_count_ != 0
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ ScopedQuickEntrypointChecks sqec(self);
self->CheckSuspend();
}
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index 13decc8..25df40b 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -24,16 +24,14 @@
namespace art {
// Deliver an exception that's pending on thread helping set up a callee save frame on the way.
-extern "C" void artDeliverPendingExceptionFromCode(Thread* thread,
- StackReference<mirror::ArtMethod>* sp)
+extern "C" void artDeliverPendingExceptionFromCode(Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll);
- thread->QuickDeliverException();
+ ScopedQuickEntrypointChecks sqec(self);
+ self->QuickDeliverException();
}
// Called by generated call to throw an exception.
-extern "C" void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+extern "C" void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
/*
* exception may be NULL, in which case this routine should
@@ -42,9 +40,9 @@
* and threw a NPE if NULL. This routine responsible for setting
* exception_ in thread and delivering the exception.
*/
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
+ ScopedQuickEntrypointChecks sqec(self);
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- if (exception == NULL) {
+ if (exception == nullptr) {
self->ThrowNewException(throw_location, "Ljava/lang/NullPointerException;",
"throw with null exception");
} else {
@@ -54,10 +52,9 @@
}
// Called by generated call to throw a NPE exception.
-extern "C" void artThrowNullPointerExceptionFromCode(Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+extern "C" void artThrowNullPointerExceptionFromCode(Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
+ ScopedQuickEntrypointChecks sqec(self);
self->NoteSignalBeingHandled();
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
ThrowNullPointerExceptionFromDexPC(throw_location);
@@ -66,52 +63,50 @@
}
// Called by generated call to throw an arithmetic divide by zero exception.
-extern "C" void artThrowDivZeroFromCode(Thread* self, StackReference<mirror::ArtMethod>* sp)
+extern "C" void artThrowDivZeroFromCode(Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
+ ScopedQuickEntrypointChecks sqec(self);
ThrowArithmeticExceptionDivideByZero();
self->QuickDeliverException();
}
// Called by generated call to throw an array index out of bounds exception.
-extern "C" void artThrowArrayBoundsFromCode(int index, int length, Thread* self,
- StackReference<mirror::ArtMethod>*sp)
+extern "C" void artThrowArrayBoundsFromCode(int index, int length, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
+ ScopedQuickEntrypointChecks sqec(self);
ThrowArrayIndexOutOfBoundsException(index, length);
self->QuickDeliverException();
}
-extern "C" void artThrowStackOverflowFromCode(Thread* self, StackReference<mirror::ArtMethod>* sp)
+extern "C" void artThrowStackOverflowFromCode(Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
+ ScopedQuickEntrypointChecks sqec(self);
self->NoteSignalBeingHandled();
ThrowStackOverflowError(self);
self->NoteSignalHandlerDone();
self->QuickDeliverException();
}
-extern "C" void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self,
- StackReference<mirror::ArtMethod>* sp)
+extern "C" void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
+ ScopedQuickEntrypointChecks sqec(self);
ThrowNoSuchMethodError(method_idx);
self->QuickDeliverException();
}
extern "C" void artThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type,
- Thread* self, StackReference<mirror::ArtMethod>* sp)
+ Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
- CHECK(!dest_type->IsAssignableFrom(src_type));
+ ScopedQuickEntrypointChecks sqec(self);
+ DCHECK(!dest_type->IsAssignableFrom(src_type));
ThrowClassCastException(dest_type, src_type);
self->QuickDeliverException();
}
extern "C" void artThrowArrayStoreException(mirror::Object* array, mirror::Object* value,
- Thread* self, StackReference<mirror::ArtMethod>* sp)
+ Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
+ ScopedQuickEntrypointChecks sqec(self);
ThrowArrayStoreException(value->GetClass(), array->GetClass());
self->QuickDeliverException();
}
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 224756b..93dc62a 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -21,7 +21,6 @@
#include "entrypoints/entrypoint_utils-inl.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/accounting/card_table-inl.h"
-#include "instruction_set.h"
#include "interpreter/interpreter.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
@@ -50,15 +49,19 @@
// | arg1 spill | |
// | Method* | ---
// | LR |
- // | ... | callee saves
- // | R3 | arg3
- // | R2 | arg2
- // | R1 | arg1
- // | R0 | padding
+ // | ... | 4x6 bytes callee saves
+ // | R3 |
+ // | R2 |
+ // | R1 |
+ // | S15 |
+ // | : |
+ // | S0 |
+ // | | 4x2 bytes padding
// | Method* | <- sp
- static constexpr bool kQuickSoftFloatAbi = true; // This is a soft float ABI.
- static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs.
- static constexpr size_t kNumQuickFprArgs = 0; // 0 arguments passed in FPRs.
+ static constexpr bool kQuickSoftFloatAbi = kArm32QuickCodeUseSoftFloat;
+ static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = !kArm32QuickCodeUseSoftFloat;
+ static constexpr size_t kNumQuickGprArgs = 3;
+ static constexpr size_t kNumQuickFprArgs = kArm32QuickCodeUseSoftFloat ? 0 : 16;
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
arm::ArmCalleeSaveFpr1Offset(Runtime::kRefsAndArgs); // Offset of first FPR arg.
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
@@ -90,6 +93,7 @@
// | | padding
// | Method* | <- sp
static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI.
+ static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs.
static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs.
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
@@ -117,6 +121,7 @@
// | A1 | arg1
// | A0/Method* | <- sp
static constexpr bool kQuickSoftFloatAbi = true; // This is a soft float ABI.
+ static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs.
static constexpr size_t kNumQuickFprArgs = 0; // 0 arguments passed in FPRs.
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0; // Offset of first FPR arg.
@@ -141,6 +146,7 @@
// | ECX | arg1
// | EAX/Method* | <- sp
static constexpr bool kQuickSoftFloatAbi = true; // This is a soft float ABI.
+ static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs.
static constexpr size_t kNumQuickFprArgs = 0; // 0 arguments passed in FPRs.
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0; // Offset of first FPR arg.
@@ -178,6 +184,7 @@
// | Padding |
// | RDI/Method* | <- sp
static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI.
+ static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs.
static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs.
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16; // Offset of first FPR arg.
@@ -221,9 +228,18 @@
gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize
- + StackArgumentStartFromShorty(is_static, shorty, shorty_len)),
- gpr_index_(0), fpr_index_(0), stack_index_(0), cur_type_(Primitive::kPrimVoid),
- is_split_long_or_double_(false) {}
+ + sizeof(StackReference<mirror::ArtMethod>)), // Skip StackReference<ArtMethod>.
+ gpr_index_(0), fpr_index_(0), fpr_double_index_(0), stack_index_(0),
+ cur_type_(Primitive::kPrimVoid), is_split_long_or_double_(false) {
+ static_assert(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0),
+ "Number of Quick FPR arguments unexpected");
+ static_assert(!(kQuickSoftFloatAbi && kQuickDoubleRegAlignedFloatBackFilled),
+ "Double alignment unexpected");
+ // For register alignment, we want to assume that counters(fpr_double_index_) are even if the
+ // next register is even.
+ static_assert(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0,
+ "Number of Quick FPR arguments not even");
+ }
virtual ~QuickArgumentVisitor() {}
@@ -237,7 +253,11 @@
if (!kQuickSoftFloatAbi) {
Primitive::Type type = GetParamPrimitiveType();
if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) {
- if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) {
+ if (type == Primitive::kPrimDouble && kQuickDoubleRegAlignedFloatBackFilled) {
+ if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) {
+ return fpr_args_ + (fpr_double_index_ * GetBytesPerFprSpillLocation(kRuntimeISA));
+ }
+ } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeISA));
}
return stack_args_ + (stack_index_ * kBytesStackArgLocation);
@@ -268,28 +288,30 @@
uint64_t ReadSplitLongParam() const {
DCHECK(IsSplitLongOrDouble());
+ // Read low half from register.
uint64_t low_half = *reinterpret_cast<uint32_t*>(GetParamAddress());
- uint64_t high_half = *reinterpret_cast<uint32_t*>(stack_args_);
+ // Read high half from the stack. As current stack_index_ indexes the argument, the high part
+ // index should be (stack_index_ + 1).
+ uint64_t high_half = *reinterpret_cast<uint32_t*>(stack_args_
+ + (stack_index_ + 1) * kBytesStackArgLocation);
return (low_half & 0xffffffffULL) | (high_half << 32);
}
void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // This implementation doesn't support reg-spill area for hard float
- // ABI targets such as x86_64 and aarch64. So, for those targets whose
- // 'kQuickSoftFloatAbi' is 'false':
- // (a) 'stack_args_' should point to the first method's argument
- // (b) whatever the argument type it is, the 'stack_index_' should
- // be moved forward along with every visiting.
+ // (a) 'stack_args_' should point to the first method's argument
+ // (b) whatever the argument type it is, the 'stack_index_' should
+ // be moved forward along with every visiting.
gpr_index_ = 0;
fpr_index_ = 0;
+ if (kQuickDoubleRegAlignedFloatBackFilled) {
+ fpr_double_index_ = 0;
+ }
stack_index_ = 0;
if (!is_static_) { // Handle this.
cur_type_ = Primitive::kPrimNot;
is_split_long_or_double_ = false;
Visit();
- if (!kQuickSoftFloatAbi || kNumQuickGprArgs == 0) {
- stack_index_++;
- }
+ stack_index_++;
if (kNumQuickGprArgs > 0) {
gpr_index_++;
}
@@ -305,9 +327,7 @@
case Primitive::kPrimInt:
is_split_long_or_double_ = false;
Visit();
- if (!kQuickSoftFloatAbi || kNumQuickGprArgs == gpr_index_) {
- stack_index_++;
- }
+ stack_index_++;
if (gpr_index_ < kNumQuickGprArgs) {
gpr_index_++;
}
@@ -315,17 +335,24 @@
case Primitive::kPrimFloat:
is_split_long_or_double_ = false;
Visit();
+ stack_index_++;
if (kQuickSoftFloatAbi) {
if (gpr_index_ < kNumQuickGprArgs) {
gpr_index_++;
- } else {
- stack_index_++;
}
} else {
- if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) {
+ if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
fpr_index_++;
+ if (kQuickDoubleRegAlignedFloatBackFilled) {
+ // Double should not overlap with float.
+ // For example, if fpr_index_ = 3, fpr_double_index_ should be at least 4.
+ fpr_double_index_ = std::max(fpr_double_index_, RoundUp(fpr_index_, 2));
+ // Float should not overlap with double.
+ if (fpr_index_ % 2 == 0) {
+ fpr_index_ = std::max(fpr_double_index_, fpr_index_);
+ }
+ }
}
- stack_index_++;
}
break;
case Primitive::kPrimDouble:
@@ -334,42 +361,46 @@
is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) &&
((gpr_index_ + 1) == kNumQuickGprArgs);
Visit();
- if (!kQuickSoftFloatAbi || kNumQuickGprArgs == gpr_index_) {
- if (kBytesStackArgLocation == 4) {
- stack_index_+= 2;
- } else {
- CHECK_EQ(kBytesStackArgLocation, 8U);
- stack_index_++;
- }
- }
- if (gpr_index_ < kNumQuickGprArgs) {
- gpr_index_++;
- if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) {
- if (gpr_index_ < kNumQuickGprArgs) {
- gpr_index_++;
- } else if (kQuickSoftFloatAbi) {
- stack_index_++;
- }
- }
- }
- } else {
- is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) &&
- ((fpr_index_ + 1) == kNumQuickFprArgs);
- Visit();
- if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) {
- fpr_index_++;
- if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) {
- if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) {
- fpr_index_++;
- }
- }
- }
if (kBytesStackArgLocation == 4) {
stack_index_+= 2;
} else {
CHECK_EQ(kBytesStackArgLocation, 8U);
stack_index_++;
}
+ if (gpr_index_ < kNumQuickGprArgs) {
+ gpr_index_++;
+ if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) {
+ if (gpr_index_ < kNumQuickGprArgs) {
+ gpr_index_++;
+ }
+ }
+ }
+ } else {
+ is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) &&
+ ((fpr_index_ + 1) == kNumQuickFprArgs) && !kQuickDoubleRegAlignedFloatBackFilled;
+ Visit();
+ if (kBytesStackArgLocation == 4) {
+ stack_index_+= 2;
+ } else {
+ CHECK_EQ(kBytesStackArgLocation, 8U);
+ stack_index_++;
+ }
+ if (kQuickDoubleRegAlignedFloatBackFilled) {
+ if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) {
+ fpr_double_index_ += 2;
+ // Float should not overlap with double.
+ if (fpr_index_ % 2 == 0) {
+ fpr_index_ = std::max(fpr_double_index_, fpr_index_);
+ }
+ }
+ } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
+ fpr_index_++;
+ if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) {
+ if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
+ fpr_index_++;
+ }
+ }
+ }
}
break;
default:
@@ -378,21 +409,6 @@
}
}
- private:
- static size_t StackArgumentStartFromShorty(bool is_static, const char* shorty,
- uint32_t shorty_len) {
- if (kQuickSoftFloatAbi) {
- CHECK_EQ(kNumQuickFprArgs, 0U);
- return (kNumQuickGprArgs * GetBytesPerGprSpillLocation(kRuntimeISA))
- + sizeof(StackReference<mirror::ArtMethod>) /* StackReference<ArtMethod> */;
- } else {
- // For now, there is no reg-spill area for the targets with
- // hard float ABI. So, the offset pointing to the first method's
- // parameter ('this' for non-static methods) should be returned.
- return sizeof(StackReference<mirror::ArtMethod>); // Skip StackReference<ArtMethod>.
- }
- }
-
protected:
const bool is_static_;
const char* const shorty_;
@@ -403,7 +419,14 @@
uint8_t* const fpr_args_; // Address of FPR arguments in callee save frame.
uint8_t* const stack_args_; // Address of stack arguments in caller's frame.
uint32_t gpr_index_; // Index into spilled GPRs.
- uint32_t fpr_index_; // Index into spilled FPRs.
+ // Index into spilled FPRs.
+ // In case kQuickDoubleRegAlignedFloatBackFilled, it may index a hole while fpr_double_index_
+ // holds a higher register number.
+ uint32_t fpr_index_;
+ // Index into spilled FPRs for aligned double.
+ // Only used when kQuickDoubleRegAlignedFloatBackFilled. Next available double register indexed in
+ // terms of singles, may be behind fpr_index.
+ uint32_t fpr_double_index_;
uint32_t stack_index_; // Index into arguments on the stack.
// The current type of argument during VisitArguments.
Primitive::Type cur_type_;
@@ -456,7 +479,7 @@
break;
case Primitive::kPrimVoid:
LOG(FATAL) << "UNREACHABLE";
- break;
+ UNREACHABLE();
}
++cur_reg_;
}
@@ -466,7 +489,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Ensure we don't get thread suspension until the object arguments are safely in the shadow
// frame.
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
+ ScopedQuickEntrypointChecks sqec(self);
if (method->IsAbstract()) {
ThrowAbstractMethodError(method);
@@ -497,8 +520,8 @@
MethodHelper mh(hs.NewHandle(method));
if (mh.Get()->IsStatic() && !mh.Get()->GetDeclaringClass()->IsInitialized()) {
// Ensure static method's class is initialized.
- StackHandleScope<1> hs(self);
- Handle<mirror::Class> h_class(hs.NewHandle(mh.Get()->GetDeclaringClass()));
+ StackHandleScope<1> hs2(self);
+ Handle<mirror::Class> h_class(hs2.NewHandle(mh.Get()->GetDeclaringClass()));
if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(mh.Get());
self->PopManagedStackFragment(fragment);
@@ -564,8 +587,7 @@
break;
case Primitive::kPrimVoid:
LOG(FATAL) << "UNREACHABLE";
- val.j = 0;
- break;
+ UNREACHABLE();
}
args_->push_back(val);
}
@@ -593,7 +615,6 @@
self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
// Register the top of the managed stack, making stack crawlable.
DCHECK_EQ(sp->AsMirrorPtr(), proxy_method) << PrettyMethod(proxy_method);
- self->SetTopOfStack(sp, 0);
DCHECK_EQ(proxy_method->GetFrameSizeInBytes(),
Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes())
<< PrettyMethod(proxy_method);
@@ -678,7 +699,7 @@
Thread* self,
StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
+ ScopedQuickEntrypointChecks sqec(self);
// Start new JNI local reference state
JNIEnvExt* env = self->GetJniEnv();
ScopedObjectAccessUnchecked soa(env);
@@ -898,17 +919,16 @@
static constexpr bool kAlignLongOnStack = false;
static constexpr bool kAlignDoubleOnStack = false;
#elif defined(__mips__)
- // TODO: These are all dummy values!
static constexpr bool kNativeSoftFloatAbi = true; // This is a hard float ABI.
- static constexpr size_t kNumNativeGprArgs = 0; // 6 arguments passed in GPRs.
- static constexpr size_t kNumNativeFprArgs = 0; // 8 arguments passed in FPRs.
+ static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs.
+ static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs.
static constexpr size_t kRegistersNeededForLong = 2;
static constexpr size_t kRegistersNeededForDouble = 2;
static constexpr bool kMultiRegistersAligned = true;
static constexpr bool kMultiRegistersWidened = true;
- static constexpr bool kAlignLongOnStack = false;
- static constexpr bool kAlignDoubleOnStack = false;
+ static constexpr bool kAlignLongOnStack = true;
+ static constexpr bool kAlignDoubleOnStack = true;
#elif defined(__i386__)
// TODO: Check these!
static constexpr bool kNativeSoftFloatAbi = false; // Not using int registers for fp
@@ -944,13 +964,13 @@
delegate_(delegate) {
// For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff
// the next register is even; counting down is just to make the compiler happy...
- CHECK_EQ(kNumNativeGprArgs % 2, 0U);
- CHECK_EQ(kNumNativeFprArgs % 2, 0U);
+ static_assert(kNumNativeGprArgs % 2 == 0U, "Number of native GPR arguments not even");
+ static_assert(kNumNativeFprArgs % 2 == 0U, "Number of native FPR arguments not even");
}
virtual ~BuildNativeCallFrameStateMachine() {}
- bool HavePointerGpr() {
+ bool HavePointerGpr() const {
return gpr_index_ > 0;
}
@@ -965,7 +985,7 @@
}
}
- bool HaveHandleScopeGpr() {
+ bool HaveHandleScopeGpr() const {
return gpr_index_ > 0;
}
@@ -981,7 +1001,7 @@
}
}
- bool HaveIntGpr() {
+ bool HaveIntGpr() const {
return gpr_index_ > 0;
}
@@ -996,17 +1016,17 @@
}
}
- bool HaveLongGpr() {
+ bool HaveLongGpr() const {
return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0);
}
- bool LongGprNeedsPadding() {
+ bool LongGprNeedsPadding() const {
return kRegistersNeededForLong > 1 && // only pad when using multiple registers
kAlignLongOnStack && // and when it needs alignment
(gpr_index_ & 1) == 1; // counter is odd, see constructor
}
- bool LongStackNeedsPadding() {
+ bool LongStackNeedsPadding() const {
return kRegistersNeededForLong > 1 && // only pad when using multiple registers
kAlignLongOnStack && // and when it needs 8B alignment
(stack_entries_ & 1) == 1; // counter is odd
@@ -1042,7 +1062,7 @@
}
}
- bool HaveFloatFpr() {
+ bool HaveFloatFpr() const {
return fpr_index_ > 0;
}
@@ -1077,17 +1097,17 @@
}
}
- bool HaveDoubleFpr() {
+ bool HaveDoubleFpr() const {
return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0);
}
- bool DoubleFprNeedsPadding() {
+ bool DoubleFprNeedsPadding() const {
return kRegistersNeededForDouble > 1 && // only pad when using multiple registers
kAlignDoubleOnStack && // and when it needs alignment
(fpr_index_ & 1) == 1; // counter is odd, see constructor
}
- bool DoubleStackNeedsPadding() {
+ bool DoubleStackNeedsPadding() const {
return kRegistersNeededForDouble > 1 && // only pad when using multiple registers
kAlignDoubleOnStack && // and when it needs 8B alignment
(stack_entries_ & 1) == 1; // counter is odd
@@ -1122,15 +1142,15 @@
}
}
- uint32_t getStackEntries() {
+ uint32_t GetStackEntries() const {
return stack_entries_;
}
- uint32_t getNumberOfUsedGprs() {
+ uint32_t GetNumberOfUsedGprs() const {
return kNumNativeGprArgs - gpr_index_;
}
- uint32_t getNumberOfUsedFprs() {
+ uint32_t GetNumberOfUsedFprs() const {
return kNumNativeFprArgs - fpr_index_;
}
@@ -1155,7 +1175,7 @@
uint32_t fpr_index_; // Number of free FPRs
uint32_t stack_entries_; // Stack entries are in multiples of 32b, as floats are usually not
// extended
- T* delegate_; // What Push implementation gets called
+ T* const delegate_; // What Push implementation gets called
};
// Computes the sizes of register stacks and call stack area. Handling of references can be extended
@@ -1169,18 +1189,19 @@
virtual ~ComputeNativeCallFrameSize() {}
- uint32_t GetStackSize() {
+ uint32_t GetStackSize() const {
return num_stack_entries_ * sizeof(uintptr_t);
}
- uint8_t* LayoutCallStack(uint8_t* sp8) {
+ uint8_t* LayoutCallStack(uint8_t* sp8) const {
sp8 -= GetStackSize();
// Align by kStackAlignment.
sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment));
return sp8;
}
- uint8_t* LayoutCallRegisterStacks(uint8_t* sp8, uintptr_t** start_gpr, uint32_t** start_fpr) {
+ uint8_t* LayoutCallRegisterStacks(uint8_t* sp8, uintptr_t** start_gpr, uint32_t** start_fpr)
+ const {
// Assumption is OK right now, as we have soft-float arm
size_t fregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeFprArgs;
sp8 -= fregs * sizeof(uintptr_t);
@@ -1192,7 +1213,7 @@
}
uint8_t* LayoutNativeCall(uint8_t* sp8, uintptr_t** start_stack, uintptr_t** start_gpr,
- uint32_t** start_fpr) {
+ uint32_t** start_fpr) const {
// Native call stack.
sp8 = LayoutCallStack(sp8);
*start_stack = reinterpret_cast<uintptr_t*>(sp8);
@@ -1205,7 +1226,9 @@
}
virtual void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {}
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(sm);
+ }
void Walk(const char* shorty, uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this);
@@ -1216,6 +1239,7 @@
Primitive::Type cur_type_ = Primitive::GetType(shorty[i]);
switch (cur_type_) {
case Primitive::kPrimNot:
+ // TODO: fix abuse of mirror types.
sm.AdvanceHandleScope(
reinterpret_cast<mirror::Object*>(0x12345678));
break;
@@ -1241,7 +1265,7 @@
}
}
- num_stack_entries_ = sm.getStackEntries();
+ num_stack_entries_ = sm.GetStackEntries();
}
void PushGpr(uintptr_t /* val */) {
@@ -1311,7 +1335,7 @@
}
// Adds space for the cookie. Note: may leave stack unaligned.
- void LayoutCookie(uint8_t** sp) {
+ void LayoutCookie(uint8_t** sp) const {
// Reference cookie and padding
*sp -= 8;
}
@@ -1336,8 +1360,7 @@
// WARNING: After this, *sp won't be pointing to the method anymore!
uint8_t* ComputeLayout(Thread* self, StackReference<mirror::ArtMethod>** m,
- bool is_static, const char* shorty, uint32_t shorty_len,
- HandleScope** handle_scope,
+ const char* shorty, uint32_t shorty_len, HandleScope** handle_scope,
uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Walk(shorty, shorty_len);
@@ -1411,9 +1434,9 @@
cur_stack_arg_++;
}
- virtual uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ virtual uintptr_t PushHandle(mirror::Object*) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
LOG(FATAL) << "(Non-JNI) Native call does not use handles.";
- return 0U;
+ UNREACHABLE();
}
private:
@@ -1434,7 +1457,7 @@
uintptr_t* start_gpr_reg;
uint32_t* start_fpr_reg;
uintptr_t* start_stack_arg;
- bottom_of_used_area_ = fsc.ComputeLayout(self, sp, is_static, shorty, shorty_len,
+ bottom_of_used_area_ = fsc.ComputeLayout(self, sp, shorty, shorty_len,
&handle_scope_,
&start_stack_arg,
&start_gpr_reg, &start_fpr_reg);
@@ -1458,11 +1481,11 @@
return handle_scope_->GetHandle(0).GetReference();
}
- jobject GetFirstHandleScopeJObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ jobject GetFirstHandleScopeJObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return handle_scope_->GetHandle(0).ToJObject();
}
- void* GetBottomOfUsedArea() {
+ void* GetBottomOfUsedArea() const {
return bottom_of_used_area_;
}
@@ -1556,7 +1579,7 @@
break;
case Primitive::kPrimVoid:
LOG(FATAL) << "UNREACHABLE";
- break;
+ UNREACHABLE();
}
}
@@ -1609,13 +1632,13 @@
uint32_t shorty_len = 0;
const char* shorty = called->GetShorty(&shorty_len);
- // Run the visitor.
+ // Run the visitor and update sp.
BuildGenericJniFrameVisitor visitor(self, called->IsStatic(), shorty, shorty_len, &sp);
visitor.VisitArguments();
visitor.FinalizeHandleScope(self);
// Fix up managed-stack things in Thread.
- self->SetTopOfStack(sp, 0);
+ self->SetTopOfStack(sp);
self->VerifyStack();
@@ -1635,7 +1658,7 @@
*(sp32 - 1) = cookie;
// Retrieve the stored native code.
- const void* nativeCode = called->GetNativeMethod();
+ void* nativeCode = called->GetEntryPointFromJni();
// There are two cases for the content of nativeCode:
// 1) Pointer to the native function.
@@ -1744,10 +1767,11 @@
static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
mirror::ArtMethod* caller_method,
Thread* self, StackReference<mirror::ArtMethod>* sp) {
+ ScopedQuickEntrypointChecks sqec(self);
+ DCHECK_EQ(sp->AsMirrorPtr(), Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs));
mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check,
type);
if (UNLIKELY(method == nullptr)) {
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
uint32_t shorty_len;
const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len);
@@ -1852,21 +1876,20 @@
Thread* self,
StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
mirror::ArtMethod* method;
if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) {
method = this_object->GetClass()->FindVirtualMethodForInterface(interface_method);
if (UNLIKELY(method == NULL)) {
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(interface_method, this_object,
caller_method);
return GetTwoWordFailureValue(); // Failure.
}
} else {
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
DCHECK(interface_method == Runtime::Current()->GetResolutionMethod());
// Find the caller PC.
- constexpr size_t pc_offset = GetCalleeSavePCOffset(kRuntimeISA, Runtime::kRefsAndArgs);
+ constexpr size_t pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kRefsAndArgs);
uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) + pc_offset);
// Map the caller PC to a dex PC.
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index 41af88e..85a0b99 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -34,7 +34,7 @@
t->TransitionFromSuspendedToRunnable(); // So we can create callee-save methods.
r->SetInstructionSet(isa);
- mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod(type);
+ mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod();
r->SetCalleeSaveMethod(save_method, type);
t->TransitionFromRunnableToSuspended(ThreadState::kNative); // So we can shut down.
@@ -98,11 +98,11 @@
// Note: we can only check against the kRuntimeISA, because the ArtMethod computation uses
// sizeof(void*), which is wrong when the target bitwidth is not the same as the host's.
CheckPCOffset(kRuntimeISA, Runtime::kRefsAndArgs,
- GetCalleeSavePCOffset(kRuntimeISA, Runtime::kRefsAndArgs));
+ GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kRefsAndArgs));
CheckPCOffset(kRuntimeISA, Runtime::kRefsOnly,
- GetCalleeSavePCOffset(kRuntimeISA, Runtime::kRefsOnly));
+ GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kRefsOnly));
CheckPCOffset(kRuntimeISA, Runtime::kSaveAll,
- GetCalleeSavePCOffset(kRuntimeISA, Runtime::kSaveAll));
+ GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kSaveAll));
}
} // namespace art
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 1714134..ee9b221 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -43,7 +43,7 @@
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("ExceptionHandle"))));
my_klass_ = class_linker_->FindClass(soa.Self(), "LExceptionHandle;", class_loader);
- ASSERT_TRUE(my_klass_ != NULL);
+ ASSERT_TRUE(my_klass_ != nullptr);
Handle<mirror::Class> klass(hs.NewHandle(my_klass_));
class_linker_->EnsureInitialized(soa.Self(), klass, true, true);
my_klass_ = klass.Get();
@@ -93,12 +93,12 @@
const uint8_t* code_ptr = &fake_header_code_and_maps_[mapping_table_offset];
method_f_ = my_klass_->FindVirtualMethod("f", "()I");
- ASSERT_TRUE(method_f_ != NULL);
+ ASSERT_TRUE(method_f_ != nullptr);
method_f_->SetEntryPointFromQuickCompiledCode(code_ptr);
method_f_->SetNativeGcMap(&fake_gc_map_[0]);
method_g_ = my_klass_->FindVirtualMethod("g", "(I)V");
- ASSERT_TRUE(method_g_ != NULL);
+ ASSERT_TRUE(method_g_ != nullptr);
method_g_->SetEntryPointFromQuickCompiledCode(code_ptr);
method_g_->SetNativeGcMap(&fake_gc_map_[0]);
}
@@ -122,7 +122,7 @@
ScopedObjectAccess soa(Thread::Current());
const DexFile::CodeItem* code_item = dex_->GetCodeItem(method_f_->GetCodeItemOffset());
- ASSERT_TRUE(code_item != NULL);
+ ASSERT_TRUE(code_item != nullptr);
ASSERT_EQ(2u, code_item->tries_size_);
ASSERT_NE(0u, code_item->insns_size_in_code_units_);
@@ -163,14 +163,30 @@
ScopedObjectAccess soa(env);
std::vector<uintptr_t> fake_stack;
+ Runtime* r = Runtime::Current();
+ r->SetInstructionSet(kRuntimeISA);
+ mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod();
+ r->SetCalleeSaveMethod(save_method, Runtime::kSaveAll);
+ QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
+
ASSERT_EQ(kStackAlignment, 16U);
// ASSERT_EQ(sizeof(uintptr_t), sizeof(uint32_t));
+
if (!kUsePortableCompiler) {
- // Create two fake stack frames with mapping data created in SetUp. We map offset 3 in the code
- // to dex pc 3.
+ // Create three fake stack frames with mapping data created in SetUp. We map offset 3 in the
+ // code to dex pc 3.
const uint32_t dex_pc = 3;
+ // Create the stack frame for the callee save method, expected by the runtime.
+ fake_stack.push_back(reinterpret_cast<uintptr_t>(save_method));
+ for (size_t i = 0; i < frame_info.FrameSizeInBytes() - 2 * sizeof(uintptr_t);
+ i += sizeof(uintptr_t)) {
+ fake_stack.push_back(0);
+ }
+
+ fake_stack.push_back(method_g_->ToNativeQuickPc(dex_pc)); // return pc
+
// Create/push fake 16byte stack frame for method g
fake_stack.push_back(reinterpret_cast<uintptr_t>(method_g_));
fake_stack.push_back(0);
@@ -183,7 +199,7 @@
fake_stack.push_back(0);
fake_stack.push_back(0xEBAD6070); // return pc
- // Pull Method* of NULL to terminate the trace
+ // Push Method* of NULL to terminate the trace
fake_stack.push_back(0);
// Push null values which will become null incoming arguments.
@@ -192,9 +208,7 @@
fake_stack.push_back(0);
// Set up thread to appear as if we called out of method_g_ at pc dex 3
- thread->SetTopOfStack(
- reinterpret_cast<StackReference<mirror::ArtMethod>*>(&fake_stack[0]),
- method_g_->ToNativeQuickPc(dex_pc)); // return pc
+ thread->SetTopOfStack(reinterpret_cast<StackReference<mirror::ArtMethod>*>(&fake_stack[0]));
} else {
// Create/push fake 20-byte shadow frame for method g
fake_stack.push_back(0);
@@ -215,33 +229,35 @@
}
jobject internal = thread->CreateInternalStackTrace<false>(soa);
- ASSERT_TRUE(internal != NULL);
+ ASSERT_TRUE(internal != nullptr);
jobjectArray ste_array = Thread::InternalStackTraceToStackTraceElementArray(soa, internal);
- ASSERT_TRUE(ste_array != NULL);
+ ASSERT_TRUE(ste_array != nullptr);
mirror::ObjectArray<mirror::StackTraceElement>* trace_array =
soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(ste_array);
- ASSERT_TRUE(trace_array != NULL);
- ASSERT_TRUE(trace_array->Get(0) != NULL);
+ ASSERT_TRUE(trace_array != nullptr);
+ ASSERT_TRUE(trace_array->Get(0) != nullptr);
EXPECT_STREQ("ExceptionHandle",
trace_array->Get(0)->GetDeclaringClass()->ToModifiedUtf8().c_str());
- EXPECT_STREQ("ExceptionHandle.java", trace_array->Get(0)->GetFileName()->ToModifiedUtf8().c_str());
+ EXPECT_STREQ("ExceptionHandle.java",
+ trace_array->Get(0)->GetFileName()->ToModifiedUtf8().c_str());
EXPECT_STREQ("g", trace_array->Get(0)->GetMethodName()->ToModifiedUtf8().c_str());
EXPECT_EQ(37, trace_array->Get(0)->GetLineNumber());
- ASSERT_TRUE(trace_array->Get(1) != NULL);
+ ASSERT_TRUE(trace_array->Get(1) != nullptr);
EXPECT_STREQ("ExceptionHandle",
trace_array->Get(1)->GetDeclaringClass()->ToModifiedUtf8().c_str());
- EXPECT_STREQ("ExceptionHandle.java", trace_array->Get(1)->GetFileName()->ToModifiedUtf8().c_str());
+ EXPECT_STREQ("ExceptionHandle.java",
+ trace_array->Get(1)->GetFileName()->ToModifiedUtf8().c_str());
EXPECT_STREQ("f", trace_array->Get(1)->GetMethodName()->ToModifiedUtf8().c_str());
EXPECT_EQ(22, trace_array->Get(1)->GetLineNumber());
-#if !defined(ART_USE_PORTABLE_COMPILER)
- thread->SetTopOfStack(NULL, 0); // Disarm the assertion that no code is running when we detach.
-#else
- thread->PopShadowFrame();
- thread->PopShadowFrame();
-#endif
+ if (!kUsePortableCompiler) {
+ thread->SetTopOfStack(nullptr); // Disarm the assertion that no code is running when we detach.
+ } else {
+ thread->PopShadowFrame();
+ thread->PopShadowFrame();
+ }
}
} // namespace art
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 62e0609..835485c 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -179,6 +179,10 @@
// Now set up the nested signal handler.
+ // TODO: add SIGSEGV back to the nested signals when we can handle running out stack gracefully.
+ static const int handled_nested_signals[] = {SIGABRT};
+ constexpr size_t num_handled_nested_signals = arraysize(handled_nested_signals);
+
// Release the fault manager so that it will remove the signal chain for
// SIGSEGV and we call the real sigaction.
fault_manager.Release();
@@ -188,33 +192,40 @@
// Unblock the signals we allow so that they can be delivered in the signal handler.
sigset_t sigset;
sigemptyset(&sigset);
- sigaddset(&sigset, SIGSEGV);
- sigaddset(&sigset, SIGABRT);
+ for (int signal : handled_nested_signals) {
+ sigaddset(&sigset, signal);
+ }
pthread_sigmask(SIG_UNBLOCK, &sigset, nullptr);
// If we get a signal in this code we want to invoke our nested signal
// handler.
- struct sigaction action, oldsegvaction, oldabortaction;
+ struct sigaction action;
+ struct sigaction oldactions[num_handled_nested_signals];
action.sa_sigaction = art_nested_signal_handler;
// Explicitly mask out SIGSEGV and SIGABRT from the nested signal handler. This
// should be the default but we definitely don't want these happening in our
// nested signal handler.
sigemptyset(&action.sa_mask);
- sigaddset(&action.sa_mask, SIGSEGV);
- sigaddset(&action.sa_mask, SIGABRT);
+ for (int signal : handled_nested_signals) {
+ sigaddset(&action.sa_mask, signal);
+ }
action.sa_flags = SA_SIGINFO | SA_ONSTACK;
#if !defined(__APPLE__) && !defined(__mips__)
action.sa_restorer = nullptr;
#endif
- // Catch SIGSEGV and SIGABRT to invoke our nested handler
- int e1 = sigaction(SIGSEGV, &action, &oldsegvaction);
- int e2 = sigaction(SIGABRT, &action, &oldabortaction);
- if (e1 != 0 || e2 != 0) {
- LOG(ERROR) << "Unable to set up nested signal handler";
- } else {
+ // Catch handled signals to invoke our nested handler.
+ bool success = true;
+ for (size_t i = 0; i < num_handled_nested_signals; ++i) {
+ success = sigaction(handled_nested_signals[i], &action, &oldactions[i]) == 0;
+ if (!success) {
+ PLOG(ERROR) << "Unable to set up nested signal handler";
+ break;
+ }
+ }
+ if (success) {
// Save the current state and call the handlers. If anything causes a signal
// our nested signal handler will be invoked and this will longjmp to the saved
// state.
@@ -223,8 +234,12 @@
if (handler->Action(sig, info, context)) {
// Restore the signal handlers, reinit the fault manager and return. Signal was
// handled.
- sigaction(SIGSEGV, &oldsegvaction, nullptr);
- sigaction(SIGABRT, &oldabortaction, nullptr);
+ for (size_t i = 0; i < num_handled_nested_signals; ++i) {
+ success = sigaction(handled_nested_signals[i], &oldactions[i], nullptr) == 0;
+ if (!success) {
+ PLOG(ERROR) << "Unable to restore signal handler";
+ }
+ }
fault_manager.Init();
return;
}
@@ -234,8 +249,12 @@
}
// Restore the signal handlers.
- sigaction(SIGSEGV, &oldsegvaction, nullptr);
- sigaction(SIGABRT, &oldabortaction, nullptr);
+ for (size_t i = 0; i < num_handled_nested_signals; ++i) {
+ success = sigaction(handled_nested_signals[i], &oldactions[i], nullptr) == 0;
+ if (!success) {
+ PLOG(ERROR) << "Unable to restore signal handler";
+ }
+ }
}
// Now put the fault manager back in place.
@@ -341,7 +360,8 @@
// at the return PC address.
if (true || kIsDebugBuild) {
VLOG(signals) << "looking for dex pc for return pc " << std::hex << return_pc;
- const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(method_obj);
+ const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(method_obj,
+ sizeof(void*));
uint32_t sought_offset = return_pc - reinterpret_cast<uintptr_t>(code);
VLOG(signals) << "pc offset: " << std::hex << sought_offset;
}
@@ -383,7 +403,7 @@
bool JavaStackTraceHandler::Action(int sig, siginfo_t* siginfo, void* context) {
// Make sure that we are in the generated code, but we may not have a dex pc.
-
+ UNUSED(sig);
#ifdef TEST_NESTED_SIGNAL
bool in_generated_code = true;
#else
@@ -400,7 +420,7 @@
// Inside of generated code, sp[0] is the method, so sp is the frame.
StackReference<mirror::ArtMethod>* frame =
reinterpret_cast<StackReference<mirror::ArtMethod>*>(sp);
- self->SetTopOfStack(frame, 0); // Since we don't necessarily have a dex pc, pass in 0.
+ self->SetTopOfStack(frame);
#ifdef TEST_NESTED_SIGNAL
// To test the nested signal handler we raise a signal here. This will cause the
// nested signal handler to be called and perform a longjmp back to the setjmp
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 9a6f2b2..b7b6099 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -66,7 +66,7 @@
CHECK(mem_map.get() != NULL) << "couldn't allocate card table: " << error_msg;
// All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
// don't clear the card table to avoid unnecessary pages being allocated
- COMPILE_ASSERT(kCardClean == 0, card_clean_must_be_0);
+ static_assert(kCardClean == 0, "kCardClean must be 0");
uint8_t* cardtable_begin = mem_map->Begin();
CHECK(cardtable_begin != NULL);
@@ -98,7 +98,7 @@
}
void CardTable::ClearCardTable() {
- COMPILE_ASSERT(kCardClean == 0, clean_card_must_be_0);
+ static_assert(kCardClean == 0, "kCardClean must be 0");
mem_map_->MadviseDontNeedAndZero();
}
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index e1343c8..9bd3fba 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -54,9 +54,8 @@
static CardTable* Create(const uint8_t* heap_begin, size_t heap_capacity);
// Set the card associated with the given address to GC_CARD_DIRTY.
- void MarkCard(const void *addr) {
- uint8_t* card_addr = CardFromAddr(addr);
- *card_addr = kCardDirty;
+ ALWAYS_INLINE void MarkCard(const void *addr) {
+ *CardFromAddr(addr) = kCardDirty;
}
// Is the object on a dirty card?
diff --git a/runtime/gc/accounting/heap_bitmap-inl.h b/runtime/gc/accounting/heap_bitmap-inl.h
index c67542f..34c15c7 100644
--- a/runtime/gc/accounting/heap_bitmap-inl.h
+++ b/runtime/gc/accounting/heap_bitmap-inl.h
@@ -40,9 +40,9 @@
if (LIKELY(bitmap != nullptr)) {
return bitmap->Test(obj);
}
- for (const auto& bitmap : large_object_bitmaps_) {
- if (LIKELY(bitmap->HasAddress(obj))) {
- return bitmap->Test(obj);
+ for (const auto& lo_bitmap : large_object_bitmaps_) {
+ if (LIKELY(lo_bitmap->HasAddress(obj))) {
+ return lo_bitmap->Test(obj);
}
}
LOG(FATAL) << "Invalid object " << obj;
@@ -55,9 +55,9 @@
bitmap->Clear(obj);
return;
}
- for (const auto& bitmap : large_object_bitmaps_) {
- if (LIKELY(bitmap->HasAddress(obj))) {
- bitmap->Clear(obj);
+ for (const auto& lo_bitmap : large_object_bitmaps_) {
+ if (LIKELY(lo_bitmap->HasAddress(obj))) {
+ lo_bitmap->Clear(obj);
}
}
LOG(FATAL) << "Invalid object " << obj;
@@ -70,9 +70,9 @@
return bitmap->Set(obj);
}
visitor(obj);
- for (const auto& bitmap : large_object_bitmaps_) {
- if (LIKELY(bitmap->HasAddress(obj))) {
- return bitmap->Set(obj);
+ for (const auto& lo_bitmap : large_object_bitmaps_) {
+ if (LIKELY(lo_bitmap->HasAddress(obj))) {
+ return lo_bitmap->Set(obj);
}
}
LOG(FATAL) << "Invalid object " << obj;
@@ -87,9 +87,9 @@
return bitmap->AtomicTestAndSet(obj);
}
visitor(obj);
- for (const auto& bitmap : large_object_bitmaps_) {
- if (LIKELY(bitmap->HasAddress(obj))) {
- return bitmap->AtomicTestAndSet(obj);
+ for (const auto& lo_bitmap : large_object_bitmaps_) {
+ if (LIKELY(lo_bitmap->HasAddress(obj))) {
+ return lo_bitmap->AtomicTestAndSet(obj);
}
}
LOG(FATAL) << "Invalid object " << obj;
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 753b42d..0a15e9e 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -46,6 +46,7 @@
}
inline void operator()(uint8_t* card, uint8_t expected_value, uint8_t new_value) const {
+ UNUSED(new_value);
if (expected_value == CardTable::kCardDirty) {
cleared_cards_->insert(card);
}
@@ -62,6 +63,7 @@
}
void operator()(uint8_t* card, uint8_t expected_card, uint8_t new_card) const {
+ UNUSED(new_card);
if (expected_card == CardTable::kCardDirty) {
cleared_cards_->push_back(card);
}
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index d43dc0a..b16a146 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -43,6 +43,7 @@
: dirty_cards_(dirty_cards) {}
void operator()(uint8_t* card, uint8_t expected_value, uint8_t new_value) const {
+ UNUSED(new_value);
if (expected_value == CardTable::kCardDirty) {
dirty_cards_->insert(card);
}
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index 40856fc..850325a 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -91,7 +91,7 @@
public:
explicit SimpleCounter(size_t* counter) : count_(counter) {}
- void operator()(mirror::Object* obj) const {
+ void operator()(mirror::Object* obj ATTRIBUTE_UNUSED) const {
(*count_)++;
}
diff --git a/runtime/gc/allocator/dlmalloc.cc b/runtime/gc/allocator/dlmalloc.cc
index a6a3ee7..8558f96 100644
--- a/runtime/gc/allocator/dlmalloc.cc
+++ b/runtime/gc/allocator/dlmalloc.cc
@@ -19,8 +19,8 @@
#include "base/logging.h"
// ART specific morecore implementation defined in space.cc.
+static void* art_heap_morecore(void* m, intptr_t increment);
#define MORECORE(x) art_heap_morecore(m, x)
-extern "C" void* art_heap_morecore(void* m, intptr_t increment);
// Custom heap error handling.
#define PROCEED_ON_ERROR 0
@@ -31,19 +31,24 @@
// Ugly inclusion of C file so that ART specific #defines configure dlmalloc for our use for
// mspaces (regular dlmalloc is still declared in bionic).
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wredundant-decls"
#pragma GCC diagnostic ignored "-Wempty-body"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "../../../bionic/libc/upstream-dlmalloc/malloc.c"
-#pragma GCC diagnostic warning "-Wstrict-aliasing"
-#pragma GCC diagnostic warning "-Wempty-body"
+#pragma GCC diagnostic pop
+static void* art_heap_morecore(void* m, intptr_t increment) {
+ return ::art::gc::allocator::ArtDlMallocMoreCore(m, increment);
+}
static void art_heap_corruption(const char* function) {
- LOG(FATAL) << "Corrupt heap detected in: " << function;
+ LOG(::art::FATAL) << "Corrupt heap detected in: " << function;
}
static void art_heap_usage_error(const char* function, void* p) {
- LOG(FATAL) << "Incorrect use of function '" << function << "' argument " << p << " not expected";
+ LOG(::art::FATAL) << "Incorrect use of function '" << function << "' argument " << p
+ << " not expected";
}
#include "globals.h"
@@ -63,14 +68,16 @@
int rc = madvise(start, length, MADV_DONTNEED);
if (UNLIKELY(rc != 0)) {
errno = rc;
- PLOG(FATAL) << "madvise failed during heap trimming";
+ PLOG(::art::FATAL) << "madvise failed during heap trimming";
}
size_t* reclaimed = reinterpret_cast<size_t*>(arg);
*reclaimed += length;
}
}
-extern "C" void DlmallocBytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
+extern "C" void DlmallocBytesAllocatedCallback(void* start ATTRIBUTE_UNUSED,
+ void* end ATTRIBUTE_UNUSED, size_t used_bytes,
+ void* arg) {
if (used_bytes == 0) {
return;
}
@@ -78,7 +85,10 @@
*bytes_allocated += used_bytes + sizeof(size_t);
}
-extern "C" void DlmallocObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
+extern "C" void DlmallocObjectsAllocatedCallback(void* start, void* end, size_t used_bytes,
+ void* arg) {
+ UNUSED(start);
+ UNUSED(end);
if (used_bytes == 0) {
return;
}
diff --git a/runtime/gc/allocator/dlmalloc.h b/runtime/gc/allocator/dlmalloc.h
index c820b19..0e91a43 100644
--- a/runtime/gc/allocator/dlmalloc.h
+++ b/runtime/gc/allocator/dlmalloc.h
@@ -17,6 +17,8 @@
#ifndef ART_RUNTIME_GC_ALLOCATOR_DLMALLOC_H_
#define ART_RUNTIME_GC_ALLOCATOR_DLMALLOC_H_
+#include <cstdint>
+
// Configure dlmalloc for mspaces.
// Avoid a collision with one used in llvm.
#undef HAVE_MMAP
@@ -28,12 +30,17 @@
#define ONLY_MSPACES 1
#define MALLOC_INSPECT_ALL 1
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wredundant-decls"
#include "../../bionic/libc/upstream-dlmalloc/malloc.h"
+#pragma GCC diagnostic pop
+#ifdef HAVE_ANDROID_OS
// Define dlmalloc routines from bionic that cannot be included directly because of redefining
// symbols from the include above.
extern "C" void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*), void* arg);
extern "C" int dlmalloc_trim(size_t);
+#endif
// Callback for dlmalloc_inspect_all or mspace_inspect_all that will madvise(2) unused
// pages back to the kernel.
@@ -45,4 +52,16 @@
extern "C" void DlmallocBytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg);
extern "C" void DlmallocObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg);
+namespace art {
+namespace gc {
+namespace allocator {
+
+// Callback from dlmalloc when it needs to increase the footprint. Must be implemented somewhere
+// else (currently dlmalloc_space.cc).
+void* ArtDlMallocMoreCore(void* mspace, intptr_t increment);
+
+} // namespace allocator
+} // namespace gc
+} // namespace art
+
#endif // ART_RUNTIME_GC_ALLOCATOR_DLMALLOC_H_
diff --git a/runtime/gc/allocator/rosalloc-inl.h b/runtime/gc/allocator/rosalloc-inl.h
index dd419a4..f6c9d3c 100644
--- a/runtime/gc/allocator/rosalloc-inl.h
+++ b/runtime/gc/allocator/rosalloc-inl.h
@@ -23,6 +23,10 @@
namespace gc {
namespace allocator {
+inline ALWAYS_INLINE bool RosAlloc::ShouldCheckZeroMemory() {
+ return kCheckZeroMemory && !running_on_valgrind_;
+}
+
template<bool kThreadSafe>
inline ALWAYS_INLINE void* RosAlloc::Alloc(Thread* self, size_t size, size_t* bytes_allocated) {
if (UNLIKELY(size > kLargeSizeThreshold)) {
@@ -35,7 +39,7 @@
m = AllocFromRunThreadUnsafe(self, size, bytes_allocated);
}
// Check if the returned memory is really all zero.
- if (kCheckZeroMemory && m != nullptr) {
+ if (ShouldCheckZeroMemory() && m != nullptr) {
uint8_t* bytes = reinterpret_cast<uint8_t*>(m);
for (size_t i = 0; i < size; ++i) {
DCHECK_EQ(bytes[i], 0);
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 0cea89d..7c2474f 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -14,24 +14,25 @@
* limitations under the License.
*/
+#include "rosalloc.h"
+
#include "base/mutex-inl.h"
+#include "gc/space/valgrind_settings.h"
#include "mirror/class-inl.h"
#include "mirror/object.h"
#include "mirror/object-inl.h"
#include "thread-inl.h"
#include "thread_list.h"
-#include "rosalloc.h"
#include <map>
#include <list>
+#include <sstream>
#include <vector>
namespace art {
namespace gc {
namespace allocator {
-extern "C" void* art_heap_rosalloc_morecore(RosAlloc* rosalloc, intptr_t increment);
-
static constexpr bool kUsePrefetchDuringAllocRun = true;
static constexpr bool kPrefetchNewRunDataByZeroing = false;
static constexpr size_t kPrefetchStride = 64;
@@ -48,13 +49,15 @@
reinterpret_cast<RosAlloc::Run*>(dedicated_full_run_storage_);
RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
- PageReleaseMode page_release_mode, size_t page_release_size_threshold)
+ PageReleaseMode page_release_mode, bool running_on_valgrind,
+ size_t page_release_size_threshold)
: base_(reinterpret_cast<uint8_t*>(base)), footprint_(capacity),
capacity_(capacity), max_capacity_(max_capacity),
lock_("rosalloc global lock", kRosAllocGlobalLock),
bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock),
page_release_mode_(page_release_mode),
- page_release_size_threshold_(page_release_size_threshold) {
+ page_release_size_threshold_(page_release_size_threshold),
+ running_on_valgrind_(running_on_valgrind) {
DCHECK_EQ(RoundUp(capacity, kPageSize), capacity);
DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity);
CHECK_LE(capacity, max_capacity);
@@ -178,7 +181,7 @@
page_map_size_ = new_num_of_pages;
DCHECK_LE(page_map_size_, max_page_map_size_);
free_page_run_size_map_.resize(new_num_of_pages);
- art_heap_rosalloc_morecore(this, increment);
+ ArtRosAllocMoreCore(this, increment);
if (last_free_page_run_size > 0) {
// There was a free page run at the end. Expand its size.
DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this));
@@ -264,7 +267,7 @@
}
break;
default:
- LOG(FATAL) << "Unreachable - page map type: " << page_map_type;
+ LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_type);
break;
}
if (kIsDebugBuild) {
@@ -318,7 +321,7 @@
}
const size_t byte_size = num_pages * kPageSize;
if (already_zero) {
- if (kCheckZeroMemory) {
+ if (ShouldCheckZeroMemory()) {
const uintptr_t* word_ptr = reinterpret_cast<uintptr_t*>(ptr);
for (size_t i = 0; i < byte_size / sizeof(uintptr_t); ++i) {
CHECK_EQ(word_ptr[i], 0U) << "words don't match at index " << i;
@@ -472,7 +475,7 @@
<< "(" << std::dec << (num_pages * kPageSize) << ")";
}
// Check if the returned memory is really all zero.
- if (kCheckZeroMemory) {
+ if (ShouldCheckZeroMemory()) {
CHECK_EQ(total_bytes % sizeof(uintptr_t), 0U);
const uintptr_t* words = reinterpret_cast<uintptr_t*>(r);
for (size_t i = 0; i < total_bytes / sizeof(uintptr_t); ++i) {
@@ -499,7 +502,7 @@
case kPageMapLargeObject:
return FreePages(self, ptr, false);
case kPageMapLargeObjectPart:
- LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
+ LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
return 0;
case kPageMapRunPart: {
// Find the beginning of the run.
@@ -514,11 +517,11 @@
break;
case kPageMapReleased:
case kPageMapEmpty:
- LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
+ LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
return 0;
}
default:
- LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
+ LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
return 0;
}
}
@@ -744,7 +747,7 @@
const size_t idx = run->size_bracket_idx_;
const size_t bracket_size = bracketSizes[idx];
bool run_was_full = false;
- MutexLock mu(self, *size_bracket_locks_[idx]);
+ MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
if (kIsDebugBuild) {
run_was_full = run->IsFull();
}
@@ -784,7 +787,7 @@
DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
run->ZeroHeader();
{
- MutexLock mu(self, lock_);
+ MutexLock lock_mu(self, lock_);
FreePages(self, run, true);
}
} else {
@@ -1143,7 +1146,7 @@
size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
size_t freed_bytes = 0;
- if (false) {
+ if ((false)) {
// Used only to test Free() as GC uses only BulkFree().
for (size_t i = 0; i < num_ptrs; ++i) {
freed_bytes += FreeInternal(self, ptrs[i]);
@@ -1189,7 +1192,7 @@
freed_bytes += FreePages(self, ptr, false);
continue;
} else {
- LOG(FATAL) << "Unreachable - page map type: " << page_map_entry;
+ LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_entry);
}
} else {
// Read the page map entries with a lock.
@@ -1215,7 +1218,7 @@
freed_bytes += FreePages(self, ptr, false);
continue;
} else {
- LOG(FATAL) << "Unreachable - page map type: " << page_map_entry;
+ LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_entry);
}
}
DCHECK(run != nullptr);
@@ -1242,7 +1245,7 @@
run->to_be_bulk_freed_ = false;
#endif
size_t idx = run->size_bracket_idx_;
- MutexLock mu(self, *size_bracket_locks_[idx]);
+ MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
if (run->IsThreadLocal()) {
DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets);
DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
@@ -1302,7 +1305,7 @@
}
if (!run_was_current) {
run->ZeroHeader();
- MutexLock mu(self, lock_);
+ MutexLock lock_mu(self, lock_);
FreePages(self, run, true);
}
} else {
@@ -1434,7 +1437,7 @@
return stream.str();
}
-size_t RosAlloc::UsableSize(void* ptr) {
+size_t RosAlloc::UsableSize(const void* ptr) {
DCHECK_LE(base_, ptr);
DCHECK_LT(ptr, base_ + footprint_);
size_t pm_idx = RoundDownToPageMapIndex(ptr);
@@ -1471,13 +1474,13 @@
Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
DCHECK_EQ(run->magic_num_, kMagicNum);
size_t idx = run->size_bracket_idx_;
- size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
+ size_t offset_from_slot_base = reinterpret_cast<const uint8_t*>(ptr)
- (reinterpret_cast<uint8_t*>(run) + headerSizes[idx]);
DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
return IndexToBracketSize(idx);
}
default: {
- LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
+ LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
break;
}
}
@@ -1520,7 +1523,7 @@
page_map_size_ = new_num_of_pages;
free_page_run_size_map_.resize(new_num_of_pages);
DCHECK_EQ(free_page_run_size_map_.size(), new_num_of_pages);
- art_heap_rosalloc_morecore(this, -(static_cast<intptr_t>(decrement)));
+ ArtRosAllocMoreCore(this, -(static_cast<intptr_t>(decrement)));
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::Trim() : decreased the footprint from "
<< footprint_ << " to " << new_footprint;
@@ -1593,7 +1596,7 @@
break;
}
case kPageMapLargeObjectPart:
- LOG(FATAL) << "Unreachable - page map type: " << pm;
+ LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
break;
case kPageMapRun: {
// The start of a run.
@@ -1613,10 +1616,10 @@
break;
}
case kPageMapRunPart:
- LOG(FATAL) << "Unreachable - page map type: " << pm;
+ LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
break;
default:
- LOG(FATAL) << "Unreachable - page map type: " << pm;
+ LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
break;
}
}
@@ -1736,14 +1739,14 @@
void RosAlloc::AssertAllThreadLocalRunsAreRevoked() {
if (kIsDebugBuild) {
Thread* self = Thread::Current();
- MutexLock mu(self, *Locks::runtime_shutdown_lock_);
- MutexLock mu2(self, *Locks::thread_list_lock_);
+ MutexLock shutdown_mu(self, *Locks::runtime_shutdown_lock_);
+ MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
for (Thread* t : thread_list) {
AssertThreadLocalRunsAreRevoked(t);
}
for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; ++idx) {
- MutexLock mu(self, *size_bracket_locks_[idx]);
+ MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
CHECK_EQ(current_runs_[idx], dedicated_full_run_);
}
}
@@ -1769,7 +1772,7 @@
if (i < 4) {
numOfPages[i] = 1;
} else if (i < 8) {
- numOfPages[i] = 2;
+ numOfPages[i] = 1;
} else if (i < 16) {
numOfPages[i] = 4;
} else if (i < 32) {
@@ -1850,7 +1853,8 @@
dedicated_full_run_->SetIsThreadLocal(true);
}
-void RosAlloc::BytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
+void RosAlloc::BytesAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
+ size_t used_bytes, void* arg) {
if (used_bytes == 0) {
return;
}
@@ -1858,7 +1862,8 @@
*bytes_allocated += used_bytes;
}
-void RosAlloc::ObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
+void RosAlloc::ObjectsAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
+ size_t used_bytes, void* arg) {
if (used_bytes == 0) {
return;
}
@@ -1870,11 +1875,11 @@
Thread* self = Thread::Current();
CHECK(Locks::mutator_lock_->IsExclusiveHeld(self))
<< "The mutator locks isn't exclusively locked at " << __PRETTY_FUNCTION__;
- MutexLock mu(self, *Locks::thread_list_lock_);
+ MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
ReaderMutexLock wmu(self, bulk_free_lock_);
std::vector<Run*> runs;
{
- MutexLock mu(self, lock_);
+ MutexLock lock_mu(self, lock_);
size_t pm_end = page_map_size_;
size_t i = 0;
while (i < pm_end) {
@@ -1914,10 +1919,15 @@
num_pages++;
idx++;
}
- void* start = base_ + i * kPageSize;
+ uint8_t* start = base_ + i * kPageSize;
+ if (running_on_valgrind_) {
+ start += ::art::gc::space::kDefaultValgrindRedZoneBytes;
+ }
mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
size_t obj_size = obj->SizeOf();
- CHECK_GT(obj_size, kLargeSizeThreshold)
+ CHECK_GT(obj_size +
+ (running_on_valgrind_ ? 2 * ::art::gc::space::kDefaultValgrindRedZoneBytes : 0),
+ kLargeSizeThreshold)
<< "A rosalloc large object size must be > " << kLargeSizeThreshold;
CHECK_EQ(num_pages, RoundUp(obj_size, kPageSize) / kPageSize)
<< "A rosalloc large object size " << obj_size
@@ -1929,7 +1939,7 @@
break;
}
case kPageMapLargeObjectPart:
- LOG(FATAL) << "Unreachable - page map type: " << pm << std::endl << DumpPageMap();
+ LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl << DumpPageMap();
break;
case kPageMapRun: {
// The start of a run.
@@ -1957,7 +1967,7 @@
case kPageMapRunPart:
// Fall-through.
default:
- LOG(FATAL) << "Unreachable - page map type: " << pm << std::endl << DumpPageMap();
+ LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl << DumpPageMap();
break;
}
}
@@ -1965,7 +1975,7 @@
std::list<Thread*> threads = Runtime::Current()->GetThreadList()->GetList();
for (Thread* thread : threads) {
for (size_t i = 0; i < kNumThreadLocalSizeBrackets; ++i) {
- MutexLock mu(self, *size_bracket_locks_[i]);
+ MutexLock brackets_mu(self, *size_bracket_locks_[i]);
Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i));
CHECK(thread_local_run != nullptr);
CHECK(thread_local_run->IsThreadLocal());
@@ -1974,7 +1984,7 @@
}
}
for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
- MutexLock mu(self, *size_bracket_locks_[i]);
+ MutexLock brackets_mu(self, *size_bracket_locks_[i]);
Run* current_run = current_runs_[i];
CHECK(current_run != nullptr);
if (current_run != dedicated_full_run_) {
@@ -1985,11 +1995,11 @@
}
// Call Verify() here for the lock order.
for (auto& run : runs) {
- run->Verify(self, this);
+ run->Verify(self, this, running_on_valgrind_);
}
}
-void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc) {
+void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc, bool running_on_valgrind) {
DCHECK_EQ(magic_num_, kMagicNum) << "Bad magic number : " << Dump();
const size_t idx = size_bracket_idx_;
CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
@@ -2072,6 +2082,9 @@
}
// Check each slot.
size_t slots = 0;
+ size_t valgrind_modifier = running_on_valgrind ?
+ 2 * ::art::gc::space::kDefaultValgrindRedZoneBytes :
+ 0U;
for (size_t v = 0; v < num_vec; v++, slots += 32) {
DCHECK_GE(num_slots, slots) << "Out of bounds";
uint32_t vec = alloc_bit_map_[v];
@@ -2084,14 +2097,17 @@
bool is_thread_local_freed = IsThreadLocal() && ((thread_local_free_vec >> i) & 0x1) != 0;
if (is_allocated && !is_thread_local_freed) {
uint8_t* slot_addr = slot_base + (slots + i) * bracket_size;
+ if (running_on_valgrind) {
+ slot_addr += ::art::gc::space::kDefaultValgrindRedZoneBytes;
+ }
mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr);
size_t obj_size = obj->SizeOf();
- CHECK_LE(obj_size, kLargeSizeThreshold)
+ CHECK_LE(obj_size + valgrind_modifier, kLargeSizeThreshold)
<< "A run slot contains a large object " << Dump();
- CHECK_EQ(SizeToIndex(obj_size), idx)
+ CHECK_EQ(SizeToIndex(obj_size + valgrind_modifier), idx)
<< PrettyTypeOf(obj) << " "
- << "obj_size=" << obj_size << ", idx=" << idx << " "
- << "A run slot contains an object with wrong size " << Dump();
+ << "obj_size=" << obj_size << "(" << obj_size + valgrind_modifier << "), idx=" << idx
+ << " A run slot contains an object with wrong size " << Dump();
}
}
}
@@ -2146,7 +2162,7 @@
++i;
break; // Skip.
default:
- LOG(FATAL) << "Unreachable - page map type: " << pm;
+ LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
break;
}
}
@@ -2161,6 +2177,11 @@
// In the debug build, the first page of a free page run
// contains a magic number for debugging. Exclude it.
start += kPageSize;
+
+ // Single pages won't be released.
+ if (start == end) {
+ return 0;
+ }
}
if (!kMadviseZeroes) {
// TODO: Do this when we resurrect the page instead.
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index ad7f901..3269e10 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -105,6 +105,9 @@
rosalloc->ReleasePageRange(start, start + byte_size);
}
}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FreePageRun);
};
// Represents a run of memory slots of the same size.
@@ -246,7 +249,7 @@
// Dump the run metadata for debugging.
std::string Dump();
// Verify for debugging.
- void Verify(Thread* self, RosAlloc* rosalloc)
+ void Verify(Thread* self, RosAlloc* rosalloc, bool running_on_valgrind)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_);
@@ -256,6 +259,8 @@
size_t MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base, const char* caller_name);
// Turns the bit map into a string for debugging.
static std::string BitMapToStr(uint32_t* bit_map_base, size_t num_vec);
+
+ // TODO: DISALLOW_COPY_AND_ASSIGN(Run);
};
// The magic number for a run.
@@ -355,13 +360,14 @@
// Returns the page map index from an address. Requires that the
// address is page size aligned.
size_t ToPageMapIndex(const void* addr) const {
- DCHECK(base_ <= addr && addr < base_ + capacity_);
+ DCHECK_LE(base_, addr);
+ DCHECK_LT(addr, base_ + capacity_);
size_t byte_offset = reinterpret_cast<const uint8_t*>(addr) - base_;
DCHECK_EQ(byte_offset % static_cast<size_t>(kPageSize), static_cast<size_t>(0));
return byte_offset / kPageSize;
}
// Returns the page map index from an address with rounding.
- size_t RoundDownToPageMapIndex(void* addr) const {
+ size_t RoundDownToPageMapIndex(const void* addr) const {
DCHECK(base_ <= addr && addr < reinterpret_cast<uint8_t*>(base_) + capacity_);
return (reinterpret_cast<uintptr_t>(addr) - reinterpret_cast<uintptr_t>(base_)) / kPageSize;
}
@@ -372,6 +378,10 @@
// If true, check that the returned memory is actually zero.
static constexpr bool kCheckZeroMemory = kIsDebugBuild;
+ // Valgrind protects memory, so do not check memory when running under valgrind. In a normal
+ // build with kCheckZeroMemory the whole test should be optimized away.
+ // TODO: Unprotect before checks.
+ ALWAYS_INLINE bool ShouldCheckZeroMemory();
// If true, log verbose details of operations.
static constexpr bool kTraceRosAlloc = false;
@@ -404,8 +414,7 @@
// We use thread-local runs for the size Brackets whose indexes
// are less than this index. We use shared (current) runs for the rest.
-
- static const size_t kNumThreadLocalSizeBrackets = 11;
+ static const size_t kNumThreadLocalSizeBrackets = 8;
private:
// The base address of the memory region that's managed by this allocator.
@@ -446,7 +455,7 @@
// Bracket lock names (since locks only have char* names).
std::string size_bracket_lock_names_[kNumOfSizeBrackets];
// The types of page map entries.
- enum {
+ enum PageMapKind {
kPageMapReleased = 0, // Zero and released back to the OS.
kPageMapEmpty, // Zero but probably dirty.
kPageMapRun, // The beginning of a run.
@@ -480,6 +489,9 @@
// greater than or equal to this value, release pages.
const size_t page_release_size_threshold_;
+ // Whether this allocator is running under Valgrind.
+ bool running_on_valgrind_;
+
// The base address of the memory region that's managed by this allocator.
uint8_t* Begin() { return base_; }
// The end address of the memory region that's managed by this allocator.
@@ -526,11 +538,16 @@
// Release a range of pages.
size_t ReleasePageRange(uint8_t* start, uint8_t* end) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ // Dumps the page map for debugging.
+ std::string DumpPageMap() EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
public:
RosAlloc(void* base, size_t capacity, size_t max_capacity,
PageReleaseMode page_release_mode,
+ bool running_on_valgrind,
size_t page_release_size_threshold = kDefaultPageReleaseSizeThreshold);
~RosAlloc();
+
// If kThreadUnsafe is true then the allocator may avoid acquiring some locks as an optimization.
// If used, this may cause race conditions if multiple threads are allocating at the same time.
template<bool kThreadSafe = true>
@@ -540,8 +557,9 @@
LOCKS_EXCLUDED(bulk_free_lock_);
size_t BulkFree(Thread* self, void** ptrs, size_t num_ptrs)
LOCKS_EXCLUDED(bulk_free_lock_);
+
// Returns the size of the allocated slot for a given allocated memory chunk.
- size_t UsableSize(void* ptr);
+ size_t UsableSize(const void* ptr);
// Returns the size of the allocated slot for a given size.
size_t UsableSize(size_t bytes) {
if (UNLIKELY(bytes > kLargeSizeThreshold)) {
@@ -557,6 +575,7 @@
void InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
void* arg)
LOCKS_EXCLUDED(lock_);
+
// Release empty pages.
size_t ReleasePages() LOCKS_EXCLUDED(lock_);
// Returns the current footprint.
@@ -565,6 +584,7 @@
size_t FootprintLimit() LOCKS_EXCLUDED(lock_);
// Update the current capacity.
void SetFootprintLimit(size_t bytes) LOCKS_EXCLUDED(lock_);
+
// Releases the thread-local runs assigned to the given thread back to the common set of runs.
void RevokeThreadLocalRuns(Thread* thread);
// Releases the thread-local runs assigned to all the threads back to the common set of runs.
@@ -573,8 +593,7 @@
void AssertThreadLocalRunsAreRevoked(Thread* thread);
// Assert all the thread local runs are revoked.
void AssertAllThreadLocalRunsAreRevoked() LOCKS_EXCLUDED(Locks::thread_list_lock_);
- // Dumps the page map for debugging.
- std::string DumpPageMap() EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
static Run* GetDedicatedFullRun() {
return dedicated_full_run_;
}
@@ -597,7 +616,17 @@
void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes);
+
+ private:
+ friend std::ostream& operator<<(std::ostream& os, const RosAlloc::PageMapKind& rhs);
+
+ DISALLOW_COPY_AND_ASSIGN(RosAlloc);
};
+std::ostream& operator<<(std::ostream& os, const RosAlloc::PageMapKind& rhs);
+
+// Callback from rosalloc when it needs to increase the footprint. Must be implemented somewhere
+// else (currently rosalloc_space.cc).
+void* ArtRosAllocMoreCore(allocator::RosAlloc* rosalloc, intptr_t increment);
} // namespace allocator
} // namespace gc
diff --git a/runtime/gc/allocator_type.h b/runtime/gc/allocator_type.h
index 938b0f1..c6ebc73 100644
--- a/runtime/gc/allocator_type.h
+++ b/runtime/gc/allocator_type.h
@@ -17,6 +17,8 @@
#ifndef ART_RUNTIME_GC_ALLOCATOR_TYPE_H_
#define ART_RUNTIME_GC_ALLOCATOR_TYPE_H_
+#include <ostream>
+
namespace art {
namespace gc {
@@ -29,6 +31,7 @@
kAllocatorTypeNonMoving, // Special allocator for non moving objects, doesn't have entrypoints.
kAllocatorTypeLOS, // Large object space, also doesn't have entrypoints.
};
+std::ostream& operator<<(std::ostream& os, const AllocatorType& rhs);
} // namespace gc
} // namespace art
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index ce7c75a..ee5a785 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -29,7 +29,9 @@
const std::string& name_prefix = "")
: GarbageCollector(heap,
name_prefix + (name_prefix.empty() ? "" : " ") +
- "concurrent copying + mark sweep") {}
+ "concurrent copying + mark sweep") {
+ UNUSED(generational);
+ }
~ConcurrentCopying() {}
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 07b61e6..9e6a800 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -18,6 +18,10 @@
#include "garbage_collector.h"
+#define ATRACE_TAG ATRACE_TAG_DALVIK
+#include "cutils/trace.h"
+
+#include "base/dumpable.h"
#include "base/histogram-inl.h"
#include "base/logging.h"
#include "base/mutex-inl.h"
@@ -188,7 +192,7 @@
if (iterations == 0) {
return;
}
- os << ConstDumpable<CumulativeLogger>(logger);
+ os << Dumpable<CumulativeLogger>(logger);
const uint64_t total_ns = logger.GetTotalNs();
double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
const uint64_t freed_bytes = GetTotalFreedBytes();
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 6691b0f..b2482ac 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -239,7 +239,7 @@
accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
if (table != nullptr) {
// TODO: Improve naming.
- TimingLogger::ScopedTiming t(
+ TimingLogger::ScopedTiming t2(
space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
"UpdateAndMarkImageModUnionTable", GetTimings());
table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
@@ -348,7 +348,7 @@
accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
if (table != nullptr) {
// TODO: Improve naming.
- TimingLogger::ScopedTiming t(
+ TimingLogger::ScopedTiming t2(
space->IsZygoteSpace() ? "UpdateZygoteModUnionTableReferences" :
"UpdateImageModUnionTableReferences",
GetTimings());
@@ -538,7 +538,7 @@
if (!ShouldSweepSpace(alloc_space)) {
continue;
}
- TimingLogger::ScopedTiming t(
+ TimingLogger::ScopedTiming t2(
alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
RecordFree(alloc_space->Sweep(swap_bitmaps));
}
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 942b556..6ad44e6 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -21,6 +21,9 @@
#include <climits>
#include <vector>
+#define ATRACE_TAG ATRACE_TAG_DALVIK
+#include "cutils/trace.h"
+
#include "base/bounded_fifo.h"
#include "base/logging.h"
#include "base/macros.h"
@@ -655,6 +658,7 @@
// Scans all of the objects
virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ UNUSED(self);
ScanObjectParallelVisitor visitor(this);
// TODO: Tune this.
static const size_t kFifoSize = 4;
@@ -663,10 +667,10 @@
Object* obj = nullptr;
if (kUseMarkStackPrefetch) {
while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
- Object* obj = mark_stack_[--mark_stack_pos_];
- DCHECK(obj != nullptr);
- __builtin_prefetch(obj);
- prefetch_fifo.push_back(obj);
+ Object* mark_stack_obj = mark_stack_[--mark_stack_pos_];
+ DCHECK(mark_stack_obj != nullptr);
+ __builtin_prefetch(mark_stack_obj);
+ prefetch_fifo.push_back(mark_stack_obj);
}
if (UNLIKELY(prefetch_fifo.empty())) {
break;
@@ -810,6 +814,7 @@
break;
default:
LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
}
TimingLogger::ScopedTiming t(name, GetTimings());
ScanObjectVisitor visitor(this);
@@ -923,7 +928,7 @@
kVisitRootFlagStopLoggingNewRoots |
kVisitRootFlagClearRootLog));
if (kVerifyRootsMarked) {
- TimingLogger::ScopedTiming t("(Paused)VerifyRoots", GetTimings());
+ TimingLogger::ScopedTiming t2("(Paused)VerifyRoots", GetTimings());
Runtime::Current()->VisitRoots(VerifyRootMarked, this);
}
}
@@ -1052,7 +1057,7 @@
// if needed.
if (!mark_bitmap->Test(obj)) {
if (chunk_free_pos >= kSweepArrayChunkFreeSize) {
- TimingLogger::ScopedTiming t("FreeList", GetTimings());
+ TimingLogger::ScopedTiming t2("FreeList", GetTimings());
freed.objects += chunk_free_pos;
freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
chunk_free_pos = 0;
@@ -1064,7 +1069,7 @@
}
}
if (chunk_free_pos > 0) {
- TimingLogger::ScopedTiming t("FreeList", GetTimings());
+ TimingLogger::ScopedTiming t2("FreeList", GetTimings());
freed.objects += chunk_free_pos;
freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
chunk_free_pos = 0;
@@ -1094,10 +1099,10 @@
}
}
{
- TimingLogger::ScopedTiming t("RecordFree", GetTimings());
+ TimingLogger::ScopedTiming t2("RecordFree", GetTimings());
RecordFree(freed);
RecordFreeLOS(freed_los);
- t.NewTiming("ResetStack");
+ t2.NewTiming("ResetStack");
allocations->Reset();
}
sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero();
@@ -1213,10 +1218,10 @@
Object* obj = NULL;
if (kUseMarkStackPrefetch) {
while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
- Object* obj = mark_stack_->PopBack();
- DCHECK(obj != NULL);
- __builtin_prefetch(obj);
- prefetch_fifo.push_back(obj);
+ Object* mark_stack_obj = mark_stack_->PopBack();
+ DCHECK(mark_stack_obj != NULL);
+ __builtin_prefetch(mark_stack_obj);
+ prefetch_fifo.push_back(mark_stack_obj);
}
if (prefetch_fifo.empty()) {
break;
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 9459a3b..cb9f111 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -16,9 +16,10 @@
#include "semi_space-inl.h"
+#include <climits>
#include <functional>
#include <numeric>
-#include <climits>
+#include <sstream>
#include <vector>
#include "base/logging.h"
@@ -223,7 +224,7 @@
// Need to do this before the checkpoint since we don't want any threads to add references to
// the live stack during the recursive mark.
if (kUseThreadLocalAllocationStack) {
- TimingLogger::ScopedTiming t("RevokeAllThreadLocalAllocationStacks", GetTimings());
+ TimingLogger::ScopedTiming t2("RevokeAllThreadLocalAllocationStacks", GetTimings());
heap_->RevokeAllThreadLocalAllocationStacks(self_);
}
heap_->SwapStacks(self_);
@@ -367,7 +368,7 @@
CHECK_EQ(is_large_object_space_immune_, collect_from_space_only_);
space::LargeObjectSpace* los = GetHeap()->GetLargeObjectsSpace();
if (is_large_object_space_immune_ && los != nullptr) {
- TimingLogger::ScopedTiming t("VisitLargeObjects", GetTimings());
+ TimingLogger::ScopedTiming t2("VisitLargeObjects", GetTimings());
DCHECK(collect_from_space_only_);
// Delay copying the live set to the marked set until here from
// BindBitmaps() as the large objects on the allocation stack may
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index 4ed6abc..5be3db7 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -58,6 +58,7 @@
}
void StickyMarkSweep::Sweep(bool swap_bitmaps) {
+ UNUSED(swap_bitmaps);
SweepArray(GetHeap()->GetLiveStack(), false);
}
diff --git a/runtime/gc/gc_cause.cc b/runtime/gc/gc_cause.cc
index f0e1512..6be683d 100644
--- a/runtime/gc/gc_cause.cc
+++ b/runtime/gc/gc_cause.cc
@@ -35,8 +35,8 @@
case kGcCauseTrim: return "HeapTrim";
default:
LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
}
- return "";
}
std::ostream& operator<<(std::ostream& os, const GcCause& gc_cause) {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index b9d69d5..0cceaa4 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -24,6 +24,7 @@
#include <vector>
#include "base/allocator.h"
+#include "base/dumpable.h"
#include "base/histogram-inl.h"
#include "base/stl_util.h"
#include "common_throws.h"
@@ -53,6 +54,7 @@
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "heap-inl.h"
#include "image.h"
+#include "intern_table.h"
#include "mirror/art_field-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object.h"
@@ -94,6 +96,8 @@
static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"};
static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"};
static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
+static const char* kNonMovingSpaceName = "non moving space";
+static const char* kZygoteSpaceName = "zygote space";
static constexpr size_t kGSSBumpPointerSpaceCapacity = 32 * MB;
Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
@@ -230,9 +234,13 @@
+-main alloc space2 / bump space 2 (capacity_)+-
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
*/
+ // We don't have hspace compaction enabled with GSS.
+ if (foreground_collector_type_ == kCollectorTypeGSS) {
+ use_homogeneous_space_compaction_for_oom_ = false;
+ }
bool support_homogeneous_space_compaction =
background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact ||
- use_homogeneous_space_compaction_for_oom;
+ use_homogeneous_space_compaction_for_oom_;
// We may use the same space the main space for the non moving space if we don't need to compact
// from the main space.
// This is not the case if we support homogeneous compaction or have a moving background
@@ -252,10 +260,14 @@
std::string error_str;
std::unique_ptr<MemMap> non_moving_space_mem_map;
if (separate_non_moving_space) {
+ // If we are the zygote, the non moving space becomes the zygote space when we run
+ // PreZygoteFork the first time. In this case, call the map "zygote space" since we can't
+ // rename the mem map later.
+ const char* space_name = is_zygote ? kZygoteSpaceName: kNonMovingSpaceName;
// Reserve the non moving mem map before the other two since it needs to be at a specific
// address.
non_moving_space_mem_map.reset(
- MemMap::MapAnonymous("non moving space", requested_alloc_space_begin,
+ MemMap::MapAnonymous(space_name, requested_alloc_space_begin,
non_moving_space_capacity, PROT_READ | PROT_WRITE, true, &error_str));
CHECK(non_moving_space_mem_map != nullptr) << error_str;
// Try to reserve virtual memory at a lower address if we have a separate non moving space.
@@ -263,14 +275,13 @@
}
// Attempt to create 2 mem maps at or after the requested begin.
main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0], request_begin, capacity_,
- PROT_READ | PROT_WRITE, &error_str));
+ &error_str));
CHECK(main_mem_map_1.get() != nullptr) << error_str;
if (support_homogeneous_space_compaction ||
background_collector_type_ == kCollectorTypeSS ||
foreground_collector_type_ == kCollectorTypeSS) {
main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(),
- capacity_, PROT_READ | PROT_WRITE,
- &error_str));
+ capacity_, &error_str));
CHECK(main_mem_map_2.get() != nullptr) << error_str;
}
// Create the non moving space first so that bitmaps don't take up the address range.
@@ -433,10 +444,10 @@
}
}
-MemMap* Heap::MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin, size_t capacity,
- int prot_flags, std::string* out_error_str) {
+MemMap* Heap::MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin,
+ size_t capacity, std::string* out_error_str) {
while (true) {
- MemMap* map = MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity,
+ MemMap* map = MemMap::MapAnonymous(name, request_begin, capacity,
PROT_READ | PROT_WRITE, true, out_error_str);
if (map != nullptr || request_begin == nullptr) {
return map;
@@ -598,8 +609,8 @@
}
}
// Unprotect all the spaces.
- for (const auto& space : continuous_spaces_) {
- mprotect(space->Begin(), space->Capacity(), PROT_READ | PROT_WRITE);
+ for (const auto& con_space : continuous_spaces_) {
+ mprotect(con_space->Begin(), con_space->Capacity(), PROT_READ | PROT_WRITE);
}
stream << "Object " << obj;
if (space != nullptr) {
@@ -885,7 +896,7 @@
if (result != NULL) {
return result;
}
- return FindDiscontinuousSpaceFromObject(obj, true);
+ return FindDiscontinuousSpaceFromObject(obj, fail_ok);
}
space::ImageSpace* Heap::GetImageSpace() const {
@@ -1016,6 +1027,8 @@
// We never move things in the native heap, so we can finish the GC at this point.
FinishGC(self, collector::kGcTypeNone);
size_t native_reclaimed = 0;
+
+#ifdef HAVE_ANDROID_OS
// Only trim the native heap if we don't care about pauses.
if (!CareAboutPauseTimes()) {
#if defined(USE_DLMALLOC)
@@ -1028,6 +1041,7 @@
UNIMPLEMENTED(WARNING) << "Add trimming support";
#endif
}
+#endif // HAVE_ANDROID_OS
uint64_t end_ns = NanoTime();
VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
<< ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration="
@@ -1262,12 +1276,12 @@
continue;
}
// Attempt to run the collector, if we succeed, re-try the allocation.
- const bool gc_ran =
+ const bool plan_gc_ran =
CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
if (was_default_allocator && allocator != GetCurrentAllocator()) {
return nullptr;
}
- if (gc_ran) {
+ if (plan_gc_ran) {
// Did we free sufficient memory for the allocation to succeed?
mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
usable_size);
@@ -1324,8 +1338,9 @@
// Throw OOM by default.
break;
default: {
- LOG(FATAL) << "Unimplemented homogeneous space compaction result "
- << static_cast<size_t>(result);
+ UNIMPLEMENTED(FATAL) << "homogeneous space compaction result: "
+ << static_cast<size_t>(result);
+ UNREACHABLE();
}
}
// Always print that we ran homogeneous space compation since this can cause jank.
@@ -1527,7 +1542,7 @@
ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
Locks::mutator_lock_->AssertNotHeld(self);
{
- ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
+ ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
MutexLock mu(self, *gc_complete_lock_);
// Ensure there is only one GC at a time.
WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self);
@@ -1599,7 +1614,7 @@
// compacting_gc_disable_count_, this should rarely occurs).
for (;;) {
{
- ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
+ ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
MutexLock mu(self, *gc_complete_lock_);
// Ensure there is only one GC at a time.
WaitForGcToCompleteLocked(kGcCauseCollectorTransition, self);
@@ -1760,7 +1775,8 @@
break;
}
default: {
- LOG(FATAL) << "Unimplemented";
+ UNIMPLEMENTED(FATAL);
+ UNREACHABLE();
}
}
if (IsGcConcurrent()) {
@@ -1825,6 +1841,7 @@
virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const {
// Don't sweep any spaces since we probably blasted the internal accounting of the free list
// allocator.
+ UNUSED(space);
return false;
}
@@ -1891,6 +1908,8 @@
LOG(WARNING) << __FUNCTION__ << " called when we already have a zygote space.";
return;
}
+ Runtime::Current()->GetInternTable()->SwapPostZygoteWithPreZygote();
+ Runtime::Current()->GetClassLinker()->MoveClassTableToPreZygote();
VLOG(heap) << "Starting PreZygoteFork";
// Trim the pages at the end of the non moving space.
non_moving_space_->Trim();
@@ -1963,7 +1982,8 @@
// from this point on.
RemoveRememberedSet(old_alloc_space);
}
- zygote_space_ = old_alloc_space->CreateZygoteSpace("alloc space", low_memory_mode_,
+ // Remaining space becomes the new non moving space.
+ zygote_space_ = old_alloc_space->CreateZygoteSpace(kNonMovingSpaceName, low_memory_mode_,
&non_moving_space_);
CHECK(!non_moving_space_->CanMoveObjects());
if (same_space) {
@@ -2071,7 +2091,7 @@
bool compacting_gc;
{
gc_complete_lock_->AssertNotHeld(self);
- ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
+ ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
MutexLock mu(self, *gc_complete_lock_);
// Ensure there is only one GC at a time.
WaitForGcToCompleteLocked(gc_cause, self);
@@ -2137,6 +2157,13 @@
} else {
LOG(FATAL) << "Invalid current allocator " << current_allocator_;
}
+ if (IsGcConcurrent()) {
+ // Disable concurrent GC check so that we don't have spammy JNI requests.
+ // This gets recalculated in GrowForUtilization. It is important that it is disabled /
+ // calculated in the same thread so that there aren't any races that can cause it to become
+ // permanantly disabled. b/17942071
+ concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
+ }
CHECK(collector != nullptr)
<< "Could not find garbage collector with collector_type="
<< static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
@@ -2178,7 +2205,7 @@
<< percent_free << "% free, " << PrettySize(current_heap_size) << "/"
<< PrettySize(total_memory) << ", " << "paused " << pause_string.str()
<< " total " << PrettyDuration((duration / 1000) * 1000);
- VLOG(heap) << ConstDumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
+ VLOG(heap) << Dumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
}
FinishGC(self, gc_type);
// Inform DDMS that a GC completed.
@@ -2224,6 +2251,7 @@
void operator()(mirror::Class* klass, mirror::Reference* ref) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(klass);
if (verify_referent_) {
VerifyReference(ref, ref->GetReferent(), mirror::Reference::ReferentOffset());
}
@@ -2568,6 +2596,7 @@
}
void Heap::SwapStacks(Thread* self) {
+ UNUSED(self);
if (kUseThreadLocalAllocationStack) {
live_stack_->AssertAllZero();
}
@@ -2629,15 +2658,15 @@
if (table != nullptr) {
const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
"ImageModUnionClearCards";
- TimingLogger::ScopedTiming t(name, timings);
+ TimingLogger::ScopedTiming t2(name, timings);
table->ClearCards();
} else if (use_rem_sets && rem_set != nullptr) {
DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS)
<< static_cast<int>(collector_type_);
- TimingLogger::ScopedTiming t("AllocSpaceRemSetClearCards", timings);
+ TimingLogger::ScopedTiming t2("AllocSpaceRemSetClearCards", timings);
rem_set->ClearCards();
} else if (space->GetType() != space::kSpaceTypeBumpPointerSpace) {
- TimingLogger::ScopedTiming t("AllocSpaceClearCards", timings);
+ TimingLogger::ScopedTiming t2("AllocSpaceClearCards", timings);
// No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards
// were dirty before the GC started.
// TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
@@ -2659,7 +2688,7 @@
TimingLogger* const timings = current_gc_iteration_.GetTimings();
TimingLogger::ScopedTiming t(__FUNCTION__, timings);
if (verify_pre_gc_heap_) {
- TimingLogger::ScopedTiming t("(Paused)PreGcVerifyHeapReferences", timings);
+ TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyHeapReferences", timings);
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
size_t failures = VerifyHeapReferences();
if (failures > 0) {
@@ -2669,7 +2698,7 @@
}
// Check that all objects which reference things in the live stack are on dirty cards.
if (verify_missing_card_marks_) {
- TimingLogger::ScopedTiming t("(Paused)PreGcVerifyMissingCardMarks", timings);
+ TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyMissingCardMarks", timings);
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
SwapStacks(self);
// Sort the live stack so that we can quickly binary search it later.
@@ -2678,7 +2707,7 @@
SwapStacks(self);
}
if (verify_mod_union_table_) {
- TimingLogger::ScopedTiming t("(Paused)PreGcVerifyModUnionTables", timings);
+ TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyModUnionTables", timings);
ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
for (const auto& table_pair : mod_union_tables_) {
accounting::ModUnionTable* mod_union_table = table_pair.second;
@@ -2696,6 +2725,7 @@
}
void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc) {
+ UNUSED(gc);
// TODO: Add a new runtime option for this?
if (verify_pre_gc_rosalloc_) {
RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
@@ -2709,7 +2739,7 @@
// Called before sweeping occurs since we want to make sure we are not going so reclaim any
// reachable objects.
if (verify_pre_sweeping_heap_) {
- TimingLogger::ScopedTiming t("(Paused)PostSweepingVerifyHeapReferences", timings);
+ TimingLogger::ScopedTiming t2("(Paused)PostSweepingVerifyHeapReferences", timings);
CHECK_NE(self->GetState(), kRunnable);
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
// Swapping bound bitmaps does nothing.
@@ -2742,7 +2772,7 @@
RosAllocVerification(timings, "(Paused)PostGcRosAllocVerification");
}
if (verify_post_gc_heap_) {
- TimingLogger::ScopedTiming t("(Paused)PostGcVerifyHeapReferences", timings);
+ TimingLogger::ScopedTiming t2("(Paused)PostGcVerifyHeapReferences", timings);
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
size_t failures = VerifyHeapReferences();
if (failures > 0) {
@@ -2955,9 +2985,6 @@
self->IsHandlingStackOverflow()) {
return;
}
- // We already have a request pending, no reason to start more until we update
- // concurrent_start_bytes_.
- concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
JNIEnv* env = self->GetJniEnv();
DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != nullptr);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index ba85c55..69a573e 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -22,6 +22,7 @@
#include <vector>
#include "allocator_type.h"
+#include "arch/instruction_set.h"
#include "atomic.h"
#include "base/timing_logger.h"
#include "gc/accounting/atomic_stack.h"
@@ -32,7 +33,6 @@
#include "gc/collector_type.h"
#include "gc/space/large_object_space.h"
#include "globals.h"
-#include "instruction_set.h"
#include "jni.h"
#include "object_callbacks.h"
#include "offsets.h"
@@ -382,18 +382,18 @@
// Must be called if a field of an Object in the heap changes, and before any GC safe-point.
// The call is not needed if NULL is stored in the field.
- void WriteBarrierField(const mirror::Object* dst, MemberOffset /*offset*/,
- const mirror::Object* /*new_value*/) {
+ ALWAYS_INLINE void WriteBarrierField(const mirror::Object* dst, MemberOffset /*offset*/,
+ const mirror::Object* /*new_value*/) {
card_table_->MarkCard(dst);
}
// Write barrier for array operations that update many field positions
- void WriteBarrierArray(const mirror::Object* dst, int /*start_offset*/,
- size_t /*length TODO: element_count or byte_count?*/) {
+ ALWAYS_INLINE void WriteBarrierArray(const mirror::Object* dst, int /*start_offset*/,
+ size_t /*length TODO: element_count or byte_count?*/) {
card_table_->MarkCard(dst);
}
- void WriteBarrierEveryFieldOf(const mirror::Object* obj) {
+ ALWAYS_INLINE void WriteBarrierEveryFieldOf(const mirror::Object* obj) {
card_table_->MarkCard(obj);
}
@@ -601,9 +601,6 @@
void RemoveRememberedSet(space::Space* space);
bool IsCompilingBoot() const;
- bool RunningOnValgrind() const {
- return running_on_valgrind_;
- }
bool HasImageSpace() const;
ReferenceProcessor* GetReferenceProcessor() {
@@ -625,8 +622,7 @@
// Create a mem map with a preferred base address.
static MemMap* MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin,
- size_t capacity, int prot_flags,
- std::string* out_error_str);
+ size_t capacity, std::string* out_error_str);
bool SupportHSpaceCompaction() const {
// Returns true if we can do hspace compaction
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index 3106b4c..73196b2 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -48,8 +48,8 @@
Handle<mirror::Class> c(
hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
for (size_t i = 0; i < 1024; ++i) {
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ObjectArray<mirror::Object>> array(hs.NewHandle(
+ StackHandleScope<1> hs2(soa.Self());
+ Handle<mirror::ObjectArray<mirror::Object>> array(hs2.NewHandle(
mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), c.Get(), 2048)));
for (size_t j = 0; j < 2048; ++j) {
mirror::String* string = mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!");
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index bfaa2bb..012f9f9 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -143,7 +143,7 @@
soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
{
- TimingLogger::ScopedTiming t(concurrent ? "EnqueueFinalizerReferences" :
+ TimingLogger::ScopedTiming t2(concurrent ? "EnqueueFinalizerReferences" :
"(Paused)EnqueueFinalizerReferences", timings);
if (concurrent) {
StartPreservingReferences(self);
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 8f42642..04b09e9 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -188,11 +188,11 @@
size_t block_size = header->size_;
pos += sizeof(BlockHeader); // Skip the header so that we know where the objects
mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
- const mirror::Object* end = reinterpret_cast<const mirror::Object*>(pos + block_size);
- CHECK_LE(reinterpret_cast<const uint8_t*>(end), End());
+ const mirror::Object* end_obj = reinterpret_cast<const mirror::Object*>(pos + block_size);
+ CHECK_LE(reinterpret_cast<const uint8_t*>(end_obj), End());
// We don't know how many objects are allocated in the current block. When we hit a null class
// assume its the end. TODO: Have a thread update the header when it flushes the block?
- while (obj < end && obj->GetClass() != nullptr) {
+ while (obj < end_obj && obj->GetClass() != nullptr) {
callback(obj, arg);
obj = GetNextObject(obj);
}
@@ -201,8 +201,8 @@
}
accounting::ContinuousSpaceBitmap::SweepCallback* BumpPointerSpace::GetSweepCallback() {
- LOG(FATAL) << "Unimplemented";
- return nullptr;
+ UNIMPLEMENTED(FATAL);
+ UNREACHABLE();
}
uint64_t BumpPointerSpace::GetBytesAllocated() {
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 98a3189..089ede4 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -186,8 +186,8 @@
size_t unused_; // Ensures alignment of kAlignment.
};
- COMPILE_ASSERT(sizeof(BlockHeader) % kAlignment == 0,
- continuous_block_must_be_kAlignment_aligned);
+ static_assert(sizeof(BlockHeader) % kAlignment == 0,
+ "continuous block must be kAlignment aligned");
friend class collector::MarkSweep;
DISALLOW_COPY_AND_ASSIGN(BumpPointerSpace);
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index d2d95b4..b8a9dd6 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -33,12 +33,9 @@
static constexpr bool kPrefetchDuringDlMallocFreeList = true;
-template class ValgrindMallocSpace<DlMallocSpace, void*>;
-
-DlMallocSpace::DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, uint8_t* begin,
- uint8_t* end, uint8_t* limit, size_t growth_limit,
- bool can_move_objects, size_t starting_size,
- size_t initial_size)
+DlMallocSpace::DlMallocSpace(MemMap* mem_map, size_t initial_size, const std::string& name,
+ void* mspace, uint8_t* begin, uint8_t* end, uint8_t* limit,
+ size_t growth_limit, bool can_move_objects, size_t starting_size)
: MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
starting_size, initial_size),
mspace_(mspace) {
@@ -65,12 +62,12 @@
// Everything is set so record in immutable structure and leave
uint8_t* begin = mem_map->Begin();
if (Runtime::Current()->RunningOnValgrind()) {
- return new ValgrindMallocSpace<DlMallocSpace, void*>(
- name, mem_map, mspace, begin, end, begin + capacity, growth_limit, initial_size,
+ return new ValgrindMallocSpace<DlMallocSpace, kDefaultValgrindRedZoneBytes, true, false>(
+ mem_map, initial_size, name, mspace, begin, end, begin + capacity, growth_limit,
can_move_objects, starting_size);
} else {
- return new DlMallocSpace(name, mem_map, mspace, begin, end, begin + capacity, growth_limit,
- can_move_objects, starting_size, initial_size);
+ return new DlMallocSpace(mem_map, initial_size, name, mspace, begin, end, begin + capacity,
+ growth_limit, can_move_objects, starting_size);
}
}
@@ -148,12 +145,18 @@
return result;
}
-MallocSpace* DlMallocSpace::CreateInstance(const std::string& name, MemMap* mem_map,
+MallocSpace* DlMallocSpace::CreateInstance(MemMap* mem_map, const std::string& name,
void* allocator, uint8_t* begin, uint8_t* end,
uint8_t* limit, size_t growth_limit,
bool can_move_objects) {
- return new DlMallocSpace(name, mem_map, allocator, begin, end, limit, growth_limit,
- can_move_objects, starting_size_, initial_size_);
+ if (Runtime::Current()->RunningOnValgrind()) {
+ return new ValgrindMallocSpace<DlMallocSpace, kDefaultValgrindRedZoneBytes, true, false>(
+ mem_map, initial_size_, name, allocator, begin, end, limit, growth_limit,
+ can_move_objects, starting_size_);
+ } else {
+ return new DlMallocSpace(mem_map, initial_size_, name, allocator, begin, end, limit,
+ growth_limit, can_move_objects, starting_size_);
+ }
}
size_t DlMallocSpace::Free(Thread* self, mirror::Object* ptr) {
@@ -213,27 +216,6 @@
}
}
-// Callback from dlmalloc when it needs to increase the footprint
-extern "C" void* art_heap_morecore(void* mspace, intptr_t increment) {
- Heap* heap = Runtime::Current()->GetHeap();
- DlMallocSpace* dlmalloc_space = heap->GetDlMallocSpace();
- // Support for multiple DlMalloc provided by a slow path.
- if (UNLIKELY(dlmalloc_space == nullptr || dlmalloc_space->GetMspace() != mspace)) {
- dlmalloc_space = nullptr;
- for (space::ContinuousSpace* space : heap->GetContinuousSpaces()) {
- if (space->IsDlMallocSpace()) {
- DlMallocSpace* cur_dlmalloc_space = space->AsDlMallocSpace();
- if (cur_dlmalloc_space->GetMspace() == mspace) {
- dlmalloc_space = cur_dlmalloc_space;
- break;
- }
- }
- }
- CHECK(dlmalloc_space != nullptr) << "Couldn't find DlmMallocSpace with mspace=" << mspace;
- }
- return dlmalloc_space->MoreCore(increment);
-}
-
size_t DlMallocSpace::Trim() {
MutexLock mu(Thread::Current(), lock_);
// Trim to release memory at the end of the space.
@@ -314,6 +296,7 @@
}
void DlMallocSpace::LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) {
+ UNUSED(failed_alloc_bytes);
Thread* self = Thread::Current();
size_t max_contiguous_allocation = 0;
// To allow the Walk/InspectAll() to exclusively-lock the mutator
@@ -329,5 +312,31 @@
}
} // namespace space
+
+namespace allocator {
+
+// Implement the dlmalloc morecore callback.
+void* ArtDlMallocMoreCore(void* mspace, intptr_t increment) {
+ Heap* heap = Runtime::Current()->GetHeap();
+ ::art::gc::space::DlMallocSpace* dlmalloc_space = heap->GetDlMallocSpace();
+ // Support for multiple DlMalloc provided by a slow path.
+ if (UNLIKELY(dlmalloc_space == nullptr || dlmalloc_space->GetMspace() != mspace)) {
+ dlmalloc_space = nullptr;
+ for (space::ContinuousSpace* space : heap->GetContinuousSpaces()) {
+ if (space->IsDlMallocSpace()) {
+ ::art::gc::space::DlMallocSpace* cur_dlmalloc_space = space->AsDlMallocSpace();
+ if (cur_dlmalloc_space->GetMspace() == mspace) {
+ dlmalloc_space = cur_dlmalloc_space;
+ break;
+ }
+ }
+ }
+ CHECK(dlmalloc_space != nullptr) << "Couldn't find DlmMallocSpace with mspace=" << mspace;
+ }
+ return dlmalloc_space->MoreCore(increment);
+}
+
+} // namespace allocator
+
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 3b8065e..6ce138c 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -107,7 +107,7 @@
// allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
void SetFootprintLimit(size_t limit) OVERRIDE;
- MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
+ MallocSpace* CreateInstance(MemMap* mem_map, const std::string& name, void* allocator,
uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
bool can_move_objects);
@@ -128,9 +128,9 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
protected:
- DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, uint8_t* begin, uint8_t* end,
- uint8_t* limit, size_t growth_limit, bool can_move_objects, size_t starting_size,
- size_t initial_size);
+ DlMallocSpace(MemMap* mem_map, size_t initial_size, const std::string& name, void* mspace,
+ uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
+ bool can_move_objects, size_t starting_size);
private:
mirror::Object* AllocWithoutGrowthLocked(Thread* self, size_t num_bytes, size_t* bytes_allocated,
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 39d82cc..071997f 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -17,10 +17,12 @@
#include "image_space.h"
#include <dirent.h>
+#include <sys/statvfs.h>
#include <sys/types.h>
#include <random>
+#include "base/macros.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "base/scoped_flock.h"
@@ -69,18 +71,20 @@
}
// We are relocating or generating the core image. We should get rid of everything. It is all
-// out-of-date. We also don't really care if this fails since it is just a convienence.
+// out-of-date. We also don't really care if this fails since it is just a convenience.
// Adapted from prune_dex_cache(const char* subdir) in frameworks/native/cmds/installd/commands.c
// Note this should only be used during first boot.
-static void RealPruneDexCache(const std::string& cache_dir_path);
-static void PruneDexCache(InstructionSet isa) {
+static void RealPruneDalvikCache(const std::string& cache_dir_path);
+
+static void PruneDalvikCache(InstructionSet isa) {
CHECK_NE(isa, kNone);
- // Prune the base /data/dalvik-cache
- RealPruneDexCache(GetDalvikCacheOrDie(".", false));
- // prune /data/dalvik-cache/<isa>
- RealPruneDexCache(GetDalvikCacheOrDie(GetInstructionSetString(isa), false));
+ // Prune the base /data/dalvik-cache.
+ RealPruneDalvikCache(GetDalvikCacheOrDie(".", false));
+ // Prune /data/dalvik-cache/<isa>.
+ RealPruneDalvikCache(GetDalvikCacheOrDie(GetInstructionSetString(isa), false));
}
-static void RealPruneDexCache(const std::string& cache_dir_path) {
+
+static void RealPruneDalvikCache(const std::string& cache_dir_path) {
if (!OS::DirectoryExists(cache_dir_path.c_str())) {
return;
}
@@ -95,8 +99,8 @@
if (strcmp(name, ".") == 0 || strcmp(name, "..") == 0) {
continue;
}
- // We only want to delete regular files.
- if (de->d_type != DT_REG) {
+ // We only want to delete regular files and symbolic links.
+ if (de->d_type != DT_REG && de->d_type != DT_LNK) {
if (de->d_type != DT_DIR) {
// We do expect some directories (namely the <isa> for pruning the base dalvik-cache).
LOG(WARNING) << "Unexpected file type of " << std::hex << de->d_type << " encountered.";
@@ -114,6 +118,28 @@
CHECK_EQ(0, TEMP_FAILURE_RETRY(closedir(cache_dir))) << "Unable to close directory.";
}
+// We write out an empty file to the zygote's ISA specific cache dir at the start of
+// every zygote boot and delete it when the boot completes. If we find a file already
+// present, it usually means the boot didn't complete. We wipe the entire dalvik
+// cache if that's the case.
+static void MarkZygoteStart(const InstructionSet isa) {
+ const std::string isa_subdir = GetDalvikCacheOrDie(GetInstructionSetString(isa), false);
+ const std::string boot_marker = isa_subdir + "/.booting";
+
+ if (OS::FileExists(boot_marker.c_str())) {
+ LOG(WARNING) << "Incomplete boot detected. Pruning dalvik cache";
+ RealPruneDalvikCache(isa_subdir);
+ }
+
+ VLOG(startup) << "Creating boot start marker: " << boot_marker;
+ std::unique_ptr<File> f(OS::CreateEmptyFile(boot_marker.c_str()));
+ if (f.get() != nullptr) {
+ if (f->FlushCloseOrErase() != 0) {
+ PLOG(WARNING) << "Failed to write boot marker.";
+ }
+ }
+}
+
static bool GenerateImage(const std::string& image_filename, InstructionSet image_isa,
std::string* error_msg) {
const std::string boot_class_path_string(Runtime::Current()->GetBootClassPathString());
@@ -126,7 +152,7 @@
// We should clean up so we are more likely to have room for the image.
if (Runtime::Current()->IsZygote()) {
LOG(INFO) << "Pruning dalvik-cache since we are generating an image and will need to recompile";
- PruneDexCache(image_isa);
+ PruneDalvikCache(image_isa);
}
std::vector<std::string> arg_vector;
@@ -143,9 +169,7 @@
}
std::string oat_file_option_string("--oat-file=");
- oat_file_option_string += image_filename;
- oat_file_option_string.erase(oat_file_option_string.size() - 3);
- oat_file_option_string += "oat";
+ oat_file_option_string += ImageHeader::GetOatLocationFromImageLocation(image_filename);
arg_vector.push_back(oat_file_option_string);
Runtime::Current()->AddCurrentRuntimeFeaturesAsDex2OatArguments(&arg_vector);
@@ -230,7 +254,7 @@
// We should clean up so we are more likely to have room for the image.
if (Runtime::Current()->IsZygote()) {
LOG(INFO) << "Pruning dalvik-cache since we are relocating an image and will need to recompile";
- PruneDexCache(isa);
+ PruneDalvikCache(isa);
}
std::string patchoat(Runtime::Current()->GetPatchoatExecutable());
@@ -377,6 +401,41 @@
return false;
}
+static constexpr uint64_t kLowSpaceValue = 50 * MB;
+static constexpr uint64_t kTmpFsSentinelValue = 384 * MB;
+
+// Read the free space of the cache partition and make a decision whether to keep the generated
+// image. This is to try to mitigate situations where the system might run out of space later.
+static bool CheckSpace(const std::string& cache_filename, std::string* error_msg) {
+ // Using statvfs vs statvfs64 because of b/18207376, and it is enough for all practical purposes.
+ struct statvfs buf;
+
+ int res = TEMP_FAILURE_RETRY(statvfs(cache_filename.c_str(), &buf));
+ if (res != 0) {
+ // Could not stat. Conservatively tell the system to delete the image.
+ *error_msg = "Could not stat the filesystem, assuming low-memory situation.";
+ return false;
+ }
+
+ uint64_t fs_overall_size = buf.f_bsize * static_cast<uint64_t>(buf.f_blocks);
+ // Zygote is privileged, but other things are not. Use bavail.
+ uint64_t fs_free_size = buf.f_bsize * static_cast<uint64_t>(buf.f_bavail);
+
+ // Take the overall size as an indicator for a tmpfs, which is being used for the decryption
+ // environment. We do not want to fail quickening the boot image there, as it is beneficial
+ // for time-to-UI.
+ if (fs_overall_size > kTmpFsSentinelValue) {
+ if (fs_free_size < kLowSpaceValue) {
+ *error_msg = StringPrintf("Low-memory situation: only %4.2f megabytes available after image"
+ " generation, need at least %" PRIu64 ".",
+ static_cast<double>(fs_free_size) / MB,
+ kLowSpaceValue / MB);
+ return false;
+ }
+ }
+ return true;
+}
+
ImageSpace* ImageSpace::Create(const char* image_location,
const InstructionSet image_isa,
std::string* error_msg) {
@@ -390,6 +449,10 @@
&has_system, &cache_filename, &dalvik_cache_exists,
&has_cache, &is_global_cache);
+ if (Runtime::Current()->IsZygote()) {
+ MarkZygoteStart(image_isa);
+ }
+
ImageSpace* space;
bool relocate = Runtime::Current()->ShouldRelocate();
bool can_compile = Runtime::Current()->IsImageDex2OatEnabled();
@@ -434,6 +497,11 @@
*error_msg = StringPrintf("Unable to relocate image '%s' from '%s' to '%s': %s",
image_location, system_filename.c_str(),
cache_filename.c_str(), reason.c_str());
+ // We failed to create files, remove any possibly garbage output.
+ // Since ImageCreationAllowed was true above, we are the zygote
+ // and therefore the only process expected to generate these for
+ // the device.
+ PruneDalvikCache(image_isa);
return nullptr;
}
}
@@ -480,19 +548,23 @@
return space;
}
- // If the /system file exists, it should be up-to-date, don't try to generate it. Same if it is
- // a relocated copy from something in /system (i.e. checksum's match).
- // Otherwise, log a warning and fall through to GenerateImage.
if (relocated_version_used) {
- LOG(FATAL) << "Attempted to use relocated version of " << image_location << " "
- << "at " << cache_filename << " generated from " << system_filename << " "
- << "but image failed to load: " << error_msg;
+ // Something is wrong with the relocated copy (even though checksums match). Cleanup.
+ // This can happen if the .oat is corrupt, since the above only checks the .art checksums.
+ // TODO: Check the oat file validity earlier.
+ *error_msg = StringPrintf("Attempted to use relocated version of %s at %s generated from %s "
+ "but image failed to load: %s",
+ image_location, cache_filename.c_str(), system_filename.c_str(),
+ error_msg->c_str());
+ PruneDalvikCache(image_isa);
return nullptr;
} else if (is_system) {
+ // If the /system file exists, it should be up-to-date, don't try to generate it.
*error_msg = StringPrintf("Failed to load /system image '%s': %s",
image_filename->c_str(), error_msg->c_str());
return nullptr;
} else {
+ // Otherwise, log a warning and fall through to GenerateImage.
LOG(WARNING) << *error_msg;
}
}
@@ -508,8 +580,20 @@
} else if (!GenerateImage(cache_filename, image_isa, error_msg)) {
*error_msg = StringPrintf("Failed to generate image '%s': %s",
cache_filename.c_str(), error_msg->c_str());
+ // We failed to create files, remove any possibly garbage output.
+ // Since ImageCreationAllowed was true above, we are the zygote
+ // and therefore the only process expected to generate these for
+ // the device.
+ PruneDalvikCache(image_isa);
return nullptr;
} else {
+ // Check whether there is enough space left over after we have generated the image.
+ if (!CheckSpace(cache_filename, error_msg)) {
+ // No. Delete the generated image and try to run out of the dex files.
+ PruneDalvikCache(image_isa);
+ return nullptr;
+ }
+
// Note that we must not use the file descriptor associated with
// ScopedFlock::GetFile to Init the image file. We want the file
// descriptor (and the associated exclusive lock) to be released when
@@ -629,6 +713,9 @@
runtime->SetResolutionMethod(down_cast<mirror::ArtMethod*>(resolution_method));
mirror::Object* imt_conflict_method = image_header.GetImageRoot(ImageHeader::kImtConflictMethod);
runtime->SetImtConflictMethod(down_cast<mirror::ArtMethod*>(imt_conflict_method));
+ mirror::Object* imt_unimplemented_method =
+ image_header.GetImageRoot(ImageHeader::kImtUnimplementedMethod);
+ runtime->SetImtUnimplementedMethod(down_cast<mirror::ArtMethod*>(imt_unimplemented_method));
mirror::Object* default_imt = image_header.GetImageRoot(ImageHeader::kDefaultImt);
runtime->SetDefaultImt(down_cast<mirror::ObjectArray<mirror::ArtMethod>*>(default_imt));
@@ -653,7 +740,10 @@
const ImageHeader& image_header = GetImageHeader();
std::string oat_filename = ImageHeader::GetOatLocationFromImageLocation(image_path);
+ CHECK(image_header.GetOatDataBegin() != nullptr);
+
OatFile* oat_file = OatFile::Open(oat_filename, oat_filename, image_header.GetOatDataBegin(),
+ image_header.GetOatFileBegin(),
!Runtime::Current()->IsCompiler(), error_msg);
if (oat_file == NULL) {
*error_msg = StringPrintf("Failed to open oat file '%s' referenced from image %s: %s",
@@ -669,7 +759,7 @@
}
int32_t image_patch_delta = image_header.GetPatchDelta();
int32_t oat_patch_delta = oat_file->GetOatHeader().GetImagePatchDelta();
- if (oat_patch_delta != image_patch_delta) {
+ if (oat_patch_delta != image_patch_delta && !image_header.CompilePic()) {
// We should have already relocated by this point. Bail out.
*error_msg = StringPrintf("Failed to match oat file patch delta %d to expected patch delta %d "
"in image %s", oat_patch_delta, image_patch_delta, GetName());
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 9434bfe..c0c6444 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -159,7 +159,11 @@
MutexLock mu(Thread::Current(), lock_);
auto found = mem_maps_.find(obj);
CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
- return found->second->BaseSize();
+ size_t alloc_size = found->second->BaseSize();
+ if (usable_size != nullptr) {
+ *usable_size = alloc_size;
+ }
+ return alloc_size;
}
size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 9d1fbbe..43a2c59 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -198,7 +198,7 @@
if (capacity > initial_size_) {
CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size_, PROT_NONE), alloc_space_name);
}
- *out_malloc_space = CreateInstance(alloc_space_name, mem_map.release(), allocator, End(), end,
+ *out_malloc_space = CreateInstance(mem_map.release(), alloc_space_name, allocator, End(), end,
limit_, growth_limit, CanMoveObjects());
SetLimit(End());
live_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index 7230116..2fbd5f0 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -19,7 +19,7 @@
#include "space.h"
-#include <iostream>
+#include <ostream>
#include <valgrind.h>
#include <memcheck/memcheck.h>
@@ -114,9 +114,9 @@
void SetGrowthLimit(size_t growth_limit);
- virtual MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
- uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
- bool can_move_objects) = 0;
+ virtual MallocSpace* CreateInstance(MemMap* mem_map, const std::string& name, void* allocator,
+ uint8_t* begin, uint8_t* end, uint8_t* limit,
+ size_t growth_limit, bool can_move_objects) = 0;
// Splits ourself into a zygote space and new malloc space which has our unused memory. When true,
// the low memory mode argument specifies that the heap wishes the created space to be more
diff --git a/runtime/gc/space/rosalloc_space-inl.h b/runtime/gc/space/rosalloc_space-inl.h
index fbfef45..5d6642d 100644
--- a/runtime/gc/space/rosalloc_space-inl.h
+++ b/runtime/gc/space/rosalloc_space-inl.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_GC_SPACE_ROSALLOC_SPACE_INL_H_
#include "gc/allocator/rosalloc-inl.h"
+#include "gc/space/valgrind_settings.h"
#include "rosalloc_space.h"
#include "thread.h"
@@ -26,13 +27,19 @@
namespace space {
inline size_t RosAllocSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
- void* obj_ptr = const_cast<void*>(reinterpret_cast<const void*>(obj));
// obj is a valid object. Use its class in the header to get the size.
// Don't use verification since the object may be dead if we are sweeping.
size_t size = obj->SizeOf<kVerifyNone>();
+ bool running_on_valgrind = RUNNING_ON_VALGRIND != 0;
+ if (running_on_valgrind) {
+ size += 2 * kDefaultValgrindRedZoneBytes;
+ }
size_t size_by_size = rosalloc_->UsableSize(size);
if (kIsDebugBuild) {
- size_t size_by_ptr = rosalloc_->UsableSize(obj_ptr);
+ // On valgrind, the red zone has an impact...
+ const uint8_t* obj_ptr = reinterpret_cast<const uint8_t*>(obj);
+ size_t size_by_ptr = rosalloc_->UsableSize(
+ obj_ptr - (running_on_valgrind ? kDefaultValgrindRedZoneBytes : 0));
if (size_by_size != size_by_ptr) {
LOG(INFO) << "Found a bad sized obj of size " << size
<< " at " << std::hex << reinterpret_cast<intptr_t>(obj_ptr) << std::dec
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index d25694a..74d1a2b 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -17,6 +17,9 @@
#include "rosalloc_space-inl.h"
+#define ATRACE_TAG ATRACE_TAG_DALVIK
+#include "cutils/trace.h"
+
#include "gc/accounting/card_table.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
@@ -41,10 +44,10 @@
// TODO: Fix
// template class ValgrindMallocSpace<RosAllocSpace, allocator::RosAlloc*>;
-RosAllocSpace::RosAllocSpace(const std::string& name, MemMap* mem_map,
+RosAllocSpace::RosAllocSpace(MemMap* mem_map, size_t initial_size, const std::string& name,
art::gc::allocator::RosAlloc* rosalloc, uint8_t* begin, uint8_t* end,
uint8_t* limit, size_t growth_limit, bool can_move_objects,
- size_t starting_size, size_t initial_size, bool low_memory_mode)
+ size_t starting_size, bool low_memory_mode)
: MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
starting_size, initial_size),
rosalloc_(rosalloc), low_memory_mode_(low_memory_mode) {
@@ -56,8 +59,11 @@
size_t growth_limit, size_t capacity,
bool low_memory_mode, bool can_move_objects) {
DCHECK(mem_map != nullptr);
+
+ bool running_on_valgrind = Runtime::Current()->RunningOnValgrind();
+
allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map->Begin(), starting_size, initial_size,
- capacity, low_memory_mode);
+ capacity, low_memory_mode, running_on_valgrind);
if (rosalloc == NULL) {
LOG(ERROR) << "Failed to initialize rosalloc for alloc space (" << name << ")";
return NULL;
@@ -73,11 +79,13 @@
uint8_t* begin = mem_map->Begin();
// TODO: Fix RosAllocSpace to support valgrind. There is currently some issues with
// AllocationSize caused by redzones. b/12944686
- if (false && Runtime::Current()->GetHeap()->RunningOnValgrind()) {
- LOG(FATAL) << "Unimplemented";
+ if (running_on_valgrind) {
+ return new ValgrindMallocSpace<RosAllocSpace, kDefaultValgrindRedZoneBytes, false, true>(
+ mem_map, initial_size, name, rosalloc, begin, end, begin + capacity, growth_limit,
+ can_move_objects, starting_size, low_memory_mode);
} else {
- return new RosAllocSpace(name, mem_map, rosalloc, begin, end, begin + capacity, growth_limit,
- can_move_objects, starting_size, initial_size, low_memory_mode);
+ return new RosAllocSpace(mem_map, initial_size, name, rosalloc, begin, end, begin + capacity,
+ growth_limit, can_move_objects, starting_size, low_memory_mode);
}
}
@@ -124,7 +132,8 @@
allocator::RosAlloc* RosAllocSpace::CreateRosAlloc(void* begin, size_t morecore_start,
size_t initial_size,
- size_t maximum_size, bool low_memory_mode) {
+ size_t maximum_size, bool low_memory_mode,
+ bool running_on_valgrind) {
// clear errno to allow PLOG on error
errno = 0;
// create rosalloc using our backing storage starting at begin and
@@ -134,7 +143,8 @@
begin, morecore_start, maximum_size,
low_memory_mode ?
art::gc::allocator::RosAlloc::kPageReleaseModeAll :
- art::gc::allocator::RosAlloc::kPageReleaseModeSizeAndEnd);
+ art::gc::allocator::RosAlloc::kPageReleaseModeSizeAndEnd,
+ running_on_valgrind);
if (rosalloc != NULL) {
rosalloc->SetFootprintLimit(initial_size);
} else {
@@ -163,12 +173,19 @@
return result;
}
-MallocSpace* RosAllocSpace::CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
- uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
+MallocSpace* RosAllocSpace::CreateInstance(MemMap* mem_map, const std::string& name,
+ void* allocator, uint8_t* begin, uint8_t* end,
+ uint8_t* limit, size_t growth_limit,
bool can_move_objects) {
- return new RosAllocSpace(name, mem_map, reinterpret_cast<allocator::RosAlloc*>(allocator),
- begin, end, limit, growth_limit, can_move_objects, starting_size_,
- initial_size_, low_memory_mode_);
+ if (Runtime::Current()->RunningOnValgrind()) {
+ return new ValgrindMallocSpace<RosAllocSpace, kDefaultValgrindRedZoneBytes, false, true>(
+ mem_map, initial_size_, name, reinterpret_cast<allocator::RosAlloc*>(allocator), begin, end,
+ limit, growth_limit, can_move_objects, starting_size_, low_memory_mode_);
+ } else {
+ return new RosAllocSpace(mem_map, initial_size_, name,
+ reinterpret_cast<allocator::RosAlloc*>(allocator), begin, end, limit,
+ growth_limit, can_move_objects, starting_size_, low_memory_mode_);
+ }
}
size_t RosAllocSpace::Free(Thread* self, mirror::Object* ptr) {
@@ -224,15 +241,6 @@
return bytes_freed;
}
-// Callback from rosalloc when it needs to increase the footprint
-extern "C" void* art_heap_rosalloc_morecore(allocator::RosAlloc* rosalloc, intptr_t increment) {
- Heap* heap = Runtime::Current()->GetHeap();
- RosAllocSpace* rosalloc_space = heap->GetRosAllocSpace(rosalloc);
- DCHECK(rosalloc_space != nullptr);
- DCHECK_EQ(rosalloc_space->GetRosAlloc(), rosalloc);
- return rosalloc_space->MoreCore(increment);
-}
-
size_t RosAllocSpace::Trim() {
VLOG(heap) << "RosAllocSpace::Trim() ";
{
@@ -358,10 +366,24 @@
SetEnd(begin_ + starting_size_);
delete rosalloc_;
rosalloc_ = CreateRosAlloc(mem_map_->Begin(), starting_size_, initial_size_, Capacity(),
- low_memory_mode_);
+ low_memory_mode_, Runtime::Current()->RunningOnValgrind());
SetFootprintLimit(footprint_limit);
}
} // namespace space
+
+namespace allocator {
+
+// Callback from rosalloc when it needs to increase the footprint.
+void* ArtRosAllocMoreCore(allocator::RosAlloc* rosalloc, intptr_t increment) {
+ Heap* heap = Runtime::Current()->GetHeap();
+ art::gc::space::RosAllocSpace* rosalloc_space = heap->GetRosAllocSpace(rosalloc);
+ DCHECK(rosalloc_space != nullptr);
+ DCHECK_EQ(rosalloc_space->GetRosAlloc(), rosalloc);
+ return rosalloc_space->MoreCore(increment);
+}
+
+} // namespace allocator
+
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index 46fffaa..c856e95 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -92,7 +92,7 @@
void Clear() OVERRIDE;
- MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
+ MallocSpace* CreateInstance(MemMap* mem_map, const std::string& name, void* allocator,
uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
bool can_move_objects) OVERRIDE;
@@ -126,9 +126,10 @@
}
protected:
- RosAllocSpace(const std::string& name, MemMap* mem_map, allocator::RosAlloc* rosalloc,
- uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit, bool can_move_objects,
- size_t starting_size, size_t initial_size, bool low_memory_mode);
+ RosAllocSpace(MemMap* mem_map, size_t initial_size, const std::string& name,
+ allocator::RosAlloc* rosalloc, uint8_t* begin, uint8_t* end, uint8_t* limit,
+ size_t growth_limit, bool can_move_objects, size_t starting_size,
+ bool low_memory_mode);
private:
template<bool kThreadSafe = true>
@@ -137,10 +138,12 @@
void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
size_t maximum_size, bool low_memory_mode) OVERRIDE {
- return CreateRosAlloc(base, morecore_start, initial_size, maximum_size, low_memory_mode);
+ return CreateRosAlloc(base, morecore_start, initial_size, maximum_size, low_memory_mode,
+ RUNNING_ON_VALGRIND != 0);
}
static allocator::RosAlloc* CreateRosAlloc(void* base, size_t morecore_start, size_t initial_size,
- size_t maximum_size, bool low_memory_mode);
+ size_t maximum_size, bool low_memory_mode,
+ bool running_on_valgrind);
void InspectAllRosAlloc(void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
void* arg, bool do_null_callback_at_end)
diff --git a/runtime/gc/space/space.cc b/runtime/gc/space/space.cc
index bff28f6..486d79a 100644
--- a/runtime/gc/space/space.cc
+++ b/runtime/gc/space/space.cc
@@ -39,33 +39,33 @@
}
DlMallocSpace* Space::AsDlMallocSpace() {
- LOG(FATAL) << "Unreachable";
- return nullptr;
+ UNIMPLEMENTED(FATAL) << "Unreachable";
+ UNREACHABLE();
}
RosAllocSpace* Space::AsRosAllocSpace() {
- LOG(FATAL) << "Unreachable";
- return nullptr;
+ UNIMPLEMENTED(FATAL) << "Unreachable";
+ UNREACHABLE();
}
ZygoteSpace* Space::AsZygoteSpace() {
- LOG(FATAL) << "Unreachable";
- return nullptr;
+ UNIMPLEMENTED(FATAL) << "Unreachable";
+ UNREACHABLE();
}
BumpPointerSpace* Space::AsBumpPointerSpace() {
- LOG(FATAL) << "Unreachable";
- return nullptr;
+ UNIMPLEMENTED(FATAL) << "Unreachable";
+ UNREACHABLE();
}
AllocSpace* Space::AsAllocSpace() {
- LOG(FATAL) << "Unimplemented";
- return nullptr;
+ UNIMPLEMENTED(FATAL) << "Unreachable";
+ UNREACHABLE();
}
ContinuousMemMapAllocSpace* Space::AsContinuousMemMapAllocSpace() {
- LOG(FATAL) << "Unimplemented";
- return nullptr;
+ UNIMPLEMENTED(FATAL) << "Unreachable";
+ UNREACHABLE();
}
DiscontinuousSpace::DiscontinuousSpace(const std::string& name,
@@ -133,8 +133,8 @@
mark_bitmap_->SetName(temp_name);
}
-AllocSpace::SweepCallbackContext::SweepCallbackContext(bool swap_bitmaps, space::Space* space)
- : swap_bitmaps(swap_bitmaps), space(space), self(Thread::Current()) {
+AllocSpace::SweepCallbackContext::SweepCallbackContext(bool swap_bitmaps_in, space::Space* space_in)
+ : swap_bitmaps(swap_bitmaps_in), space(space_in), self(Thread::Current()) {
}
} // namespace space
diff --git a/runtime/gc/space/valgrind_malloc_space-inl.h b/runtime/gc/space/valgrind_malloc_space-inl.h
index a6b837c..793d798 100644
--- a/runtime/gc/space/valgrind_malloc_space-inl.h
+++ b/runtime/gc/space/valgrind_malloc_space-inl.h
@@ -21,68 +21,165 @@
#include <memcheck/memcheck.h>
+#include "valgrind_settings.h"
+
namespace art {
namespace gc {
namespace space {
-// Number of bytes to use as a red zone (rdz). A red zone of this size will be placed before and
-// after each allocation. 8 bytes provides long/double alignment.
-static constexpr size_t kValgrindRedZoneBytes = 8;
+namespace valgrind_details {
-template <typename S, typename A>
-mirror::Object* ValgrindMallocSpace<S, A>::AllocWithGrowth(Thread* self, size_t num_bytes,
- size_t* bytes_allocated,
- size_t* usable_size) {
+template <size_t kValgrindRedZoneBytes, bool kUseObjSizeForUsable>
+inline mirror::Object* AdjustForValgrind(void* obj_with_rdz, size_t num_bytes,
+ size_t bytes_allocated, size_t usable_size,
+ size_t* bytes_allocated_out, size_t* usable_size_out) {
+ if (bytes_allocated_out != nullptr) {
+ *bytes_allocated_out = bytes_allocated;
+ }
+
+ // This cuts over-provision and is a trade-off between testing the over-provisioning code paths
+ // vs checking overflows in the regular paths.
+ if (usable_size_out != nullptr) {
+ if (kUseObjSizeForUsable) {
+ *usable_size_out = num_bytes;
+ } else {
+ *usable_size_out = usable_size - 2 * kValgrindRedZoneBytes;
+ }
+ }
+
+ // Left redzone.
+ VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes);
+
+ // Make requested memory readable.
+ // (If the allocator assumes memory is zeroed out, we might get UNDEFINED warnings, so make
+ // everything DEFINED initially.)
+ mirror::Object* result = reinterpret_cast<mirror::Object*>(
+ reinterpret_cast<uint8_t*>(obj_with_rdz) + kValgrindRedZoneBytes);
+ VALGRIND_MAKE_MEM_DEFINED(result, num_bytes);
+
+ // Right redzone. Assumes that if bytes_allocated > usable_size, then the difference is
+ // management data at the upper end, and for simplicity we will not protect that.
+ // At the moment, this fits RosAlloc (no management data in a slot, usable_size == alloc_size)
+ // and DlMalloc (allocation_size = (usable_size == num_bytes) + 4, 4 is management)
+ VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<uint8_t*>(result) + num_bytes,
+ usable_size - (num_bytes + kValgrindRedZoneBytes));
+
+ return result;
+}
+
+inline size_t GetObjSizeNoThreadSafety(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS {
+ return obj->SizeOf<kVerifyNone>();
+}
+
+} // namespace valgrind_details
+
+template <typename S,
+ size_t kValgrindRedZoneBytes,
+ bool kAdjustForRedzoneInAllocSize,
+ bool kUseObjSizeForUsable>
+mirror::Object*
+ValgrindMallocSpace<S,
+ kValgrindRedZoneBytes,
+ kAdjustForRedzoneInAllocSize,
+ kUseObjSizeForUsable>::AllocWithGrowth(
+ Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out) {
+ size_t bytes_allocated;
+ size_t usable_size;
void* obj_with_rdz = S::AllocWithGrowth(self, num_bytes + 2 * kValgrindRedZoneBytes,
- bytes_allocated, usable_size);
+ &bytes_allocated, &usable_size);
if (obj_with_rdz == nullptr) {
return nullptr;
}
- mirror::Object* result = reinterpret_cast<mirror::Object*>(
- reinterpret_cast<uint8_t*>(obj_with_rdz) + kValgrindRedZoneBytes);
- // Make redzones as no access.
- VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes);
- VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<uint8_t*>(result) + num_bytes, kValgrindRedZoneBytes);
- return result;
+
+ return valgrind_details::AdjustForValgrind<kValgrindRedZoneBytes,
+ kUseObjSizeForUsable>(obj_with_rdz, num_bytes,
+ bytes_allocated, usable_size,
+ bytes_allocated_out,
+ usable_size_out);
}
-template <typename S, typename A>
-mirror::Object* ValgrindMallocSpace<S, A>::Alloc(Thread* self, size_t num_bytes,
- size_t* bytes_allocated,
- size_t* usable_size) {
- void* obj_with_rdz = S::Alloc(self, num_bytes + 2 * kValgrindRedZoneBytes, bytes_allocated,
- usable_size);
+template <typename S,
+ size_t kValgrindRedZoneBytes,
+ bool kAdjustForRedzoneInAllocSize,
+ bool kUseObjSizeForUsable>
+mirror::Object* ValgrindMallocSpace<S,
+ kValgrindRedZoneBytes,
+ kAdjustForRedzoneInAllocSize,
+ kUseObjSizeForUsable>::Alloc(
+ Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out) {
+ size_t bytes_allocated;
+ size_t usable_size;
+ void* obj_with_rdz = S::Alloc(self, num_bytes + 2 * kValgrindRedZoneBytes,
+ &bytes_allocated, &usable_size);
if (obj_with_rdz == nullptr) {
return nullptr;
}
- mirror::Object* result = reinterpret_cast<mirror::Object*>(
- reinterpret_cast<uint8_t*>(obj_with_rdz) + kValgrindRedZoneBytes);
- // Make redzones as no access.
- VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes);
- VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<uint8_t*>(result) + num_bytes, kValgrindRedZoneBytes);
- return result;
+
+ return valgrind_details::AdjustForValgrind<kValgrindRedZoneBytes,
+ kUseObjSizeForUsable>(obj_with_rdz, num_bytes,
+ bytes_allocated, usable_size,
+ bytes_allocated_out,
+ usable_size_out);
}
-template <typename S, typename A>
-size_t ValgrindMallocSpace<S, A>::AllocationSize(mirror::Object* obj, size_t* usable_size) {
+template <typename S,
+ size_t kValgrindRedZoneBytes,
+ bool kAdjustForRedzoneInAllocSize,
+ bool kUseObjSizeForUsable>
+size_t ValgrindMallocSpace<S,
+ kValgrindRedZoneBytes,
+ kAdjustForRedzoneInAllocSize,
+ kUseObjSizeForUsable>::AllocationSize(
+ mirror::Object* obj, size_t* usable_size) {
size_t result = S::AllocationSize(reinterpret_cast<mirror::Object*>(
- reinterpret_cast<uint8_t*>(obj) - kValgrindRedZoneBytes), usable_size);
+ reinterpret_cast<uint8_t*>(obj) - (kAdjustForRedzoneInAllocSize ? kValgrindRedZoneBytes : 0)),
+ usable_size);
+ if (usable_size != nullptr) {
+ if (kUseObjSizeForUsable) {
+ *usable_size = valgrind_details::GetObjSizeNoThreadSafety(obj);
+ } else {
+ *usable_size = *usable_size - 2 * kValgrindRedZoneBytes;
+ }
+ }
return result;
}
-template <typename S, typename A>
-size_t ValgrindMallocSpace<S, A>::Free(Thread* self, mirror::Object* ptr) {
+template <typename S,
+ size_t kValgrindRedZoneBytes,
+ bool kAdjustForRedzoneInAllocSize,
+ bool kUseObjSizeForUsable>
+size_t ValgrindMallocSpace<S,
+ kValgrindRedZoneBytes,
+ kAdjustForRedzoneInAllocSize,
+ kUseObjSizeForUsable>::Free(
+ Thread* self, mirror::Object* ptr) {
void* obj_after_rdz = reinterpret_cast<void*>(ptr);
- void* obj_with_rdz = reinterpret_cast<uint8_t*>(obj_after_rdz) - kValgrindRedZoneBytes;
+ uint8_t* obj_with_rdz = reinterpret_cast<uint8_t*>(obj_after_rdz) - kValgrindRedZoneBytes;
// Make redzones undefined.
- size_t usable_size = 0;
- AllocationSize(ptr, &usable_size);
- VALGRIND_MAKE_MEM_UNDEFINED(obj_with_rdz, usable_size);
+ size_t usable_size;
+ size_t allocation_size = AllocationSize(ptr, &usable_size);
+
+ // Unprotect the allocation.
+ // Use the obj-size-for-usable flag to determine whether usable_size is the more important one,
+ // e.g., whether there's data in the allocation_size (and usable_size can't be trusted).
+ if (kUseObjSizeForUsable) {
+ VALGRIND_MAKE_MEM_UNDEFINED(obj_with_rdz, allocation_size);
+ } else {
+ VALGRIND_MAKE_MEM_UNDEFINED(obj_with_rdz, usable_size + 2 * kValgrindRedZoneBytes);
+ }
+
return S::Free(self, reinterpret_cast<mirror::Object*>(obj_with_rdz));
}
-template <typename S, typename A>
-size_t ValgrindMallocSpace<S, A>::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
+template <typename S,
+ size_t kValgrindRedZoneBytes,
+ bool kAdjustForRedzoneInAllocSize,
+ bool kUseObjSizeForUsable>
+size_t ValgrindMallocSpace<S,
+ kValgrindRedZoneBytes,
+ kAdjustForRedzoneInAllocSize,
+ kUseObjSizeForUsable>::FreeList(
+ Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
size_t freed = 0;
for (size_t i = 0; i < num_ptrs; i++) {
freed += Free(self, ptrs[i]);
@@ -91,15 +188,18 @@
return freed;
}
-template <typename S, typename A>
-ValgrindMallocSpace<S, A>::ValgrindMallocSpace(const std::string& name, MemMap* mem_map,
- A allocator, uint8_t* begin,
- uint8_t* end, uint8_t* limit, size_t growth_limit,
- size_t initial_size,
- bool can_move_objects, size_t starting_size) :
- S(name, mem_map, allocator, begin, end, limit, growth_limit, can_move_objects, starting_size,
- initial_size) {
- VALGRIND_MAKE_MEM_UNDEFINED(mem_map->Begin() + initial_size, mem_map->Size() - initial_size);
+template <typename S,
+ size_t kValgrindRedZoneBytes,
+ bool kAdjustForRedzoneInAllocSize,
+ bool kUseObjSizeForUsable>
+template <typename... Params>
+ValgrindMallocSpace<S,
+ kValgrindRedZoneBytes,
+ kAdjustForRedzoneInAllocSize,
+ kUseObjSizeForUsable>::ValgrindMallocSpace(
+ MemMap* mem_map, size_t initial_size, Params... params) : S(mem_map, initial_size, params...) {
+ VALGRIND_MAKE_MEM_UNDEFINED(mem_map->Begin() + initial_size,
+ mem_map->Size() - initial_size);
}
} // namespace space
diff --git a/runtime/gc/space/valgrind_malloc_space.h b/runtime/gc/space/valgrind_malloc_space.h
index eb6fe9c..d102f49 100644
--- a/runtime/gc/space/valgrind_malloc_space.h
+++ b/runtime/gc/space/valgrind_malloc_space.h
@@ -27,7 +27,10 @@
// A specialization of DlMallocSpace/RosAllocSpace that places valgrind red zones around
// allocations.
-template <typename BaseMallocSpaceType, typename AllocatorType>
+template <typename BaseMallocSpaceType,
+ size_t kValgrindRedZoneBytes,
+ bool kAdjustForRedzoneInAllocSize,
+ bool kUseObjSizeForUsable>
class ValgrindMallocSpace FINAL : public BaseMallocSpaceType {
public:
mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -44,11 +47,11 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void RegisterRecentFree(mirror::Object* ptr) OVERRIDE {
+ UNUSED(ptr);
}
- ValgrindMallocSpace(const std::string& name, MemMap* mem_map, AllocatorType allocator,
- uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
- size_t initial_size, bool can_move_objects, size_t starting_size);
+ template <typename... Params>
+ explicit ValgrindMallocSpace(MemMap* mem_map, size_t initial_size, Params... params);
virtual ~ValgrindMallocSpace() {}
private:
diff --git a/runtime/gc/space/valgrind_settings.h b/runtime/gc/space/valgrind_settings.h
new file mode 100644
index 0000000..73da0fd
--- /dev/null
+++ b/runtime/gc/space/valgrind_settings.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_SPACE_VALGRIND_SETTINGS_H_
+#define ART_RUNTIME_GC_SPACE_VALGRIND_SETTINGS_H_
+
+namespace art {
+namespace gc {
+namespace space {
+
+// Default number of bytes to use as a red zone (rdz). A red zone of this size will be placed before
+// and after each allocation. 8 bytes provides long/double alignment.
+static constexpr size_t kDefaultValgrindRedZoneBytes = 8;
+
+} // namespace space
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_SPACE_VALGRIND_SETTINGS_H_
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index 51d84f5..a868e68 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -32,6 +32,7 @@
: objects_allocated_(objects_allocated) {}
void operator()(mirror::Object* obj) const {
+ UNUSED(obj);
++*objects_allocated_;
}
@@ -58,7 +59,8 @@
}
void ZygoteSpace::Clear() {
- LOG(FATAL) << "Unimplemented";
+ UNIMPLEMENTED(FATAL);
+ UNREACHABLE();
}
ZygoteSpace::ZygoteSpace(const std::string& name, MemMap* mem_map, size_t objects_allocated)
@@ -75,30 +77,29 @@
<< ",name=\"" << GetName() << "\"]";
}
-mirror::Object* ZygoteSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) {
+mirror::Object* ZygoteSpace::Alloc(Thread*, size_t, size_t*, size_t*) {
UNIMPLEMENTED(FATAL);
- return nullptr;
+ UNREACHABLE();
}
-size_t ZygoteSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
+size_t ZygoteSpace::AllocationSize(mirror::Object*, size_t*) {
UNIMPLEMENTED(FATAL);
- return 0;
+ UNREACHABLE();
}
-size_t ZygoteSpace::Free(Thread* self, mirror::Object* ptr) {
+size_t ZygoteSpace::Free(Thread*, mirror::Object*) {
UNIMPLEMENTED(FATAL);
- return 0;
+ UNREACHABLE();
}
-size_t ZygoteSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
+size_t ZygoteSpace::FreeList(Thread*, size_t, mirror::Object**) {
UNIMPLEMENTED(FATAL);
- return 0;
+ UNREACHABLE();
}
-void ZygoteSpace::LogFragmentationAllocFailure(std::ostream& /*os*/,
- size_t /*failed_alloc_bytes*/) {
+void ZygoteSpace::LogFragmentationAllocFailure(std::ostream&, size_t) {
UNIMPLEMENTED(FATAL);
+ UNREACHABLE();
}
void ZygoteSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
diff --git a/runtime/gc_root-inl.h b/runtime/gc_root-inl.h
index 482f7bc..a42ec08 100644
--- a/runtime/gc_root-inl.h
+++ b/runtime/gc_root-inl.h
@@ -25,7 +25,7 @@
template<class MirrorType>
template<ReadBarrierOption kReadBarrierOption>
-inline MirrorType* GcRoot<MirrorType>::Read() {
+inline MirrorType* GcRoot<MirrorType>::Read() const {
return ReadBarrier::BarrierForRoot<MirrorType, kReadBarrierOption>(&root_);
}
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
index b10a55c..aee5586 100644
--- a/runtime/gc_root.h
+++ b/runtime/gc_root.h
@@ -27,10 +27,12 @@
class PACKED(4) GcRoot {
public:
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- ALWAYS_INLINE MirrorType* Read() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE MirrorType* Read() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void VisitRoot(RootCallback* callback, void* arg, uint32_t thread_id, RootType root_type) {
+ void VisitRoot(RootCallback* callback, void* arg, uint32_t thread_id, RootType root_type) const {
+ DCHECK(!IsNull());
callback(reinterpret_cast<mirror::Object**>(&root_), arg, thread_id, root_type);
+ DCHECK(!IsNull());
}
// This is only used by IrtIterator.
@@ -51,7 +53,7 @@
}
private:
- MirrorType* root_;
+ mutable MirrorType* root_;
};
} // namespace art
diff --git a/runtime/globals.h b/runtime/globals.h
index b7bd44d..4d33196 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -112,6 +112,8 @@
static constexpr bool kDefaultMustRelocate = true;
+static constexpr bool kArm32QuickCodeUseSoftFloat = false;
+
} // namespace art
#endif // ART_RUNTIME_GLOBALS_H_
diff --git a/runtime/handle.h b/runtime/handle.h
index addb663..6af3220 100644
--- a/runtime/handle.h
+++ b/runtime/handle.h
@@ -20,6 +20,7 @@
#include "base/casts.h"
#include "base/logging.h"
#include "base/macros.h"
+#include "base/value_object.h"
#include "stack.h"
namespace art {
@@ -33,7 +34,7 @@
// a wrap pointer. Handles are generally allocated within HandleScopes. Handle is a super-class
// of MutableHandle and doesn't support assignment operations.
template<class T>
-class Handle {
+class Handle : public ValueObject {
public:
Handle() : reference_(nullptr) {
}
@@ -58,7 +59,7 @@
}
ALWAYS_INLINE T* Get() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return reference_->AsMirrorPtr();
+ return down_cast<T*>(reference_->AsMirrorPtr());
}
ALWAYS_INLINE jobject ToJObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -70,25 +71,25 @@
}
protected:
- StackReference<T>* reference_;
-
template<typename S>
explicit Handle(StackReference<S>* reference)
- : reference_(reinterpret_cast<StackReference<T>*>(reference)) {
+ : reference_(reference) {
}
template<typename S>
explicit Handle(const Handle<S>& handle)
- : reference_(reinterpret_cast<StackReference<T>*>(handle.reference_)) {
+ : reference_(handle.reference_) {
}
- StackReference<T>* GetReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+ StackReference<mirror::Object>* GetReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
return reference_;
}
- ALWAYS_INLINE const StackReference<T>* GetReference() const
+ ALWAYS_INLINE const StackReference<mirror::Object>* GetReference() const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return reference_;
}
+ StackReference<mirror::Object>* reference_;
+
private:
friend class BuildGenericJniFrameVisitor;
template<class S> friend class Handle;
@@ -121,8 +122,8 @@
}
ALWAYS_INLINE T* Assign(T* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- StackReference<T>* ref = Handle<T>::GetReference();
- T* const old = ref->AsMirrorPtr();
+ StackReference<mirror::Object>* ref = Handle<T>::GetReference();
+ T* old = down_cast<T*>(ref->AsMirrorPtr());
ref->Assign(reference);
return old;
}
@@ -132,7 +133,6 @@
: Handle<T>(handle) {
}
- protected:
template<typename S>
explicit MutableHandle(StackReference<S>* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: Handle<T>(reference) {
@@ -153,7 +153,7 @@
}
private:
- StackReference<T> null_ref_;
+ StackReference<mirror::Object> null_ref_;
};
} // namespace art
diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h
index 2717180..9ddaf61 100644
--- a/runtime/handle_scope-inl.h
+++ b/runtime/handle_scope-inl.h
@@ -25,13 +25,13 @@
namespace art {
template<size_t kNumReferences>
-inline StackHandleScope<kNumReferences>::StackHandleScope(Thread* self)
+inline StackHandleScope<kNumReferences>::StackHandleScope(Thread* self, mirror::Object* fill_value)
: HandleScope(self->GetTopHandleScope(), kNumReferences), self_(self), pos_(0) {
- COMPILE_ASSERT(kNumReferences >= 1, stack_handle_scope_must_contain_at_least_1_reference);
+ static_assert(kNumReferences >= 1, "StackHandleScope must contain at least 1 reference");
// TODO: Figure out how to use a compile assert.
CHECK_EQ(&storage_[0], GetReferences());
for (size_t i = 0; i < kNumReferences; ++i) {
- SetReference(i, nullptr);
+ SetReference(i, fill_value);
}
self_->PushHandleScope(this);
}
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index f795e38..2c4f0f9 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -100,7 +100,7 @@
}
// Offset of link within HandleScope, used by generated code.
- static size_t LinkOffset(size_t pointer_size) {
+ static size_t LinkOffset(size_t pointer_size ATTRIBUTE_UNUSED) {
return 0;
}
@@ -127,6 +127,10 @@
return reinterpret_cast<StackReference<mirror::Object>*>(address);
}
+ explicit HandleScope(size_t number_of_references) :
+ link_(nullptr), number_of_references_(number_of_references) {
+ }
+
// Semi-hidden constructor. Construction expected by generated code and StackHandleScope.
explicit HandleScope(HandleScope* link, uint32_t num_references) :
link_(link), number_of_references_(num_references) {
@@ -159,29 +163,31 @@
}
private:
- T** obj_;
+ T** const obj_;
};
// Scoped handle storage of a fixed size that is usually stack allocated.
template<size_t kNumReferences>
class PACKED(4) StackHandleScope FINAL : public HandleScope {
public:
- explicit StackHandleScope(Thread* self);
- ~StackHandleScope();
+ explicit ALWAYS_INLINE StackHandleScope(Thread* self, mirror::Object* fill_value = nullptr);
+ ALWAYS_INLINE ~StackHandleScope();
- // Currently unused, using this GetReference instead of the one in HandleScope is preferred to
- // avoid compiler optimizations incorrectly optimizing out of bound array accesses.
- // TODO: Remove this when it is un-necessary.
- ALWAYS_INLINE mirror::Object* GetReference(size_t i) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK_LT(i, kNumReferences);
- return GetReferences()[i].AsMirrorPtr();
+ template<class T>
+ ALWAYS_INLINE MutableHandle<T> NewHandle(T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetReference(pos_, object);
+ MutableHandle<T> h(GetHandle<T>(pos_));
+ pos_++;
+ return h;
}
- ALWAYS_INLINE MutableHandle<mirror::Object> GetHandle(size_t i)
+ template<class T>
+ ALWAYS_INLINE HandleWrapper<T> NewHandleWrapper(T** object)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK_LT(i, kNumReferences);
- return MutableHandle<mirror::Object>(&GetReferences()[i]);
+ SetReference(pos_, *object);
+ MutableHandle<T> h(GetHandle<T>(pos_));
+ pos_++;
+ return HandleWrapper<T>(object, h);
}
ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object)
@@ -190,23 +196,13 @@
GetReferences()[i].Assign(object);
}
- template<class T>
- MutableHandle<T> NewHandle(T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SetReference(pos_, object);
- MutableHandle<T> h(GetHandle(pos_));
- pos_++;
- return h;
- }
-
- template<class T>
- HandleWrapper<T> NewHandleWrapper(T** object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SetReference(pos_, *object);
- MutableHandle<T> h(GetHandle(pos_));
- pos_++;
- return HandleWrapper<T>(object, h);
- }
-
private:
+ template<class T>
+ ALWAYS_INLINE MutableHandle<T> GetHandle(size_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK_LT(i, kNumReferences);
+ return MutableHandle<T>(&GetReferences()[i]);
+ }
+
// Reference storage needs to be first as expected by the HandleScope layout.
StackReference<mirror::Object> storage_[kNumReferences];
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index a2d37b3..3069581 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -475,9 +475,14 @@
}
}
- std::unique_ptr<File> file(new File(out_fd, filename_));
+ std::unique_ptr<File> file(new File(out_fd, filename_, true));
okay = file->WriteFully(header_data_ptr_, header_data_size_) &&
- file->WriteFully(body_data_ptr_, body_data_size_);
+ file->WriteFully(body_data_ptr_, body_data_size_);
+ if (okay) {
+ okay = file->FlushCloseOrErase() == 0;
+ } else {
+ file->Erase();
+ }
if (!okay) {
std::string msg(StringPrintf("Couldn't dump heap; writing \"%s\" failed: %s",
filename_.c_str(), strerror(errno)));
@@ -637,8 +642,8 @@
// U4: size of identifiers. We're using addresses as IDs and our heap references are stored
// as uint32_t.
// Note of warning: hprof-conv hard-codes the size of identifiers to 4.
- COMPILE_ASSERT(sizeof(mirror::HeapReference<mirror::Object>) == sizeof(uint32_t),
- UnexpectedHeapReferenceSize);
+ static_assert(sizeof(mirror::HeapReference<mirror::Object>) == sizeof(uint32_t),
+ "Unexpected HeapReference size");
U4_TO_BUF_BE(buf, 0, sizeof(uint32_t));
fwrite(buf, 1, sizeof(uint32_t), header_fp_);
diff --git a/runtime/image.cc b/runtime/image.cc
index c065d8e..b83eeb1 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,7 +24,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '0', '\0' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '3', '\0' };
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
@@ -35,7 +35,8 @@
uint32_t oat_file_begin,
uint32_t oat_data_begin,
uint32_t oat_data_end,
- uint32_t oat_file_end)
+ uint32_t oat_file_end,
+ bool compile_pic)
: image_begin_(image_begin),
image_size_(image_size),
image_bitmap_offset_(image_bitmap_offset),
@@ -46,7 +47,8 @@
oat_data_end_(oat_data_end),
oat_file_end_(oat_file_end),
patch_delta_(0),
- image_roots_(image_roots) {
+ image_roots_(image_roots),
+ compile_pic_(compile_pic) {
CHECK_EQ(image_begin, RoundUp(image_begin, kPageSize));
CHECK_EQ(oat_file_begin, RoundUp(oat_file_begin, kPageSize));
CHECK_EQ(oat_data_begin, RoundUp(oat_data_begin, kPageSize));
diff --git a/runtime/image.h b/runtime/image.h
index ec95d01..7e2b847 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -28,7 +28,7 @@
// header of image files written by ImageWriter, read and validated by Space.
class PACKED(4) ImageHeader {
public:
- ImageHeader() {}
+ ImageHeader() : compile_pic_(0) {}
ImageHeader(uint32_t image_begin,
uint32_t image_size_,
@@ -39,7 +39,8 @@
uint32_t oat_file_begin,
uint32_t oat_data_begin,
uint32_t oat_data_end,
- uint32_t oat_file_end);
+ uint32_t oat_file_end,
+ bool compile_pic_);
bool IsValid() const;
const char* GetMagic() const;
@@ -105,6 +106,7 @@
enum ImageRoot {
kResolutionMethod,
kImtConflictMethod,
+ kImtUnimplementedMethod,
kDefaultImt,
kCalleeSaveMethod,
kRefsOnlySaveMethod,
@@ -120,6 +122,10 @@
void RelocateImage(off_t delta);
+ bool CompilePic() const {
+ return compile_pic_ != 0;
+ }
+
private:
static const uint8_t kImageMagic[4];
static const uint8_t kImageVersion[4];
@@ -161,6 +167,9 @@
// Absolute address of an Object[] of objects needed to reinitialize from an image.
uint32_t image_roots_;
+ // Boolean (0 or 1) to denote if the image was compiled with --compile-pic option
+ const uint32_t compile_pic_;
+
friend class ImageWriter;
};
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index c1455fd..4d177a3 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -126,7 +126,7 @@
}
table_[index].Add(obj);
result = ToIndirectRef(index);
- if (false) {
+ if ((false)) {
LOG(INFO) << "+++ added at " << ExtractIndex(result) << " top=" << segment_state_.parts.topIndex
<< " holes=" << segment_state_.parts.numHoles;
}
@@ -193,7 +193,7 @@
int numHoles = segment_state_.parts.numHoles - prevState.parts.numHoles;
if (numHoles != 0) {
while (--topIndex > bottomIndex && numHoles != 0) {
- if (false) {
+ if ((false)) {
LOG(INFO) << "+++ checking for hole at " << topIndex - 1
<< " (cookie=" << cookie << ") val="
<< table_[topIndex - 1].GetReference()->Read<kWithoutReadBarrier>();
@@ -201,7 +201,7 @@
if (!table_[topIndex - 1].GetReference()->IsNull()) {
break;
}
- if (false) {
+ if ((false)) {
LOG(INFO) << "+++ ate hole at " << (topIndex - 1);
}
numHoles--;
@@ -210,7 +210,7 @@
segment_state_.parts.topIndex = topIndex;
} else {
segment_state_.parts.topIndex = topIndex-1;
- if (false) {
+ if ((false)) {
LOG(INFO) << "+++ ate last entry " << topIndex - 1;
}
}
@@ -228,7 +228,7 @@
*table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr);
segment_state_.parts.numHoles++;
- if (false) {
+ if ((false)) {
LOG(INFO) << "+++ left hole at " << idx << ", holes=" << segment_state_.parts.numHoles;
}
}
diff --git a/runtime/instruction_set.cc b/runtime/instruction_set.cc
deleted file mode 100644
index 0ca32fe..0000000
--- a/runtime/instruction_set.cc
+++ /dev/null
@@ -1,508 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "instruction_set.h"
-
-#include <signal.h>
-#include <fstream>
-
-#include "base/casts.h"
-#include "base/stringprintf.h"
-#include "utils.h"
-
-namespace art {
-
-const char* GetInstructionSetString(const InstructionSet isa) {
- switch (isa) {
- case kArm:
- case kThumb2:
- return "arm";
- case kArm64:
- return "arm64";
- case kX86:
- return "x86";
- case kX86_64:
- return "x86_64";
- case kMips:
- return "mips";
- case kNone:
- return "none";
- default:
- LOG(FATAL) << "Unknown ISA " << isa;
- UNREACHABLE();
- }
-}
-
-InstructionSet GetInstructionSetFromString(const char* isa_str) {
- CHECK(isa_str != nullptr);
-
- if (strcmp("arm", isa_str) == 0) {
- return kArm;
- } else if (strcmp("arm64", isa_str) == 0) {
- return kArm64;
- } else if (strcmp("x86", isa_str) == 0) {
- return kX86;
- } else if (strcmp("x86_64", isa_str) == 0) {
- return kX86_64;
- } else if (strcmp("mips", isa_str) == 0) {
- return kMips;
- }
-
- return kNone;
-}
-
-size_t GetInstructionSetAlignment(InstructionSet isa) {
- switch (isa) {
- case kArm:
- // Fall-through.
- case kThumb2:
- return kArmAlignment;
- case kArm64:
- return kArm64Alignment;
- case kX86:
- // Fall-through.
- case kX86_64:
- return kX86Alignment;
- case kMips:
- return kMipsAlignment;
- case kNone:
- LOG(FATAL) << "ISA kNone does not have alignment.";
- return 0;
- default:
- LOG(FATAL) << "Unknown ISA " << isa;
- return 0;
- }
-}
-
-
-static constexpr size_t kDefaultStackOverflowReservedBytes = 16 * KB;
-static constexpr size_t kMipsStackOverflowReservedBytes = kDefaultStackOverflowReservedBytes;
-
-static constexpr size_t kArmStackOverflowReservedBytes = 8 * KB;
-static constexpr size_t kArm64StackOverflowReservedBytes = 8 * KB;
-static constexpr size_t kX86StackOverflowReservedBytes = 8 * KB;
-static constexpr size_t kX86_64StackOverflowReservedBytes = 8 * KB;
-
-size_t GetStackOverflowReservedBytes(InstructionSet isa) {
- switch (isa) {
- case kArm: // Intentional fall-through.
- case kThumb2:
- return kArmStackOverflowReservedBytes;
-
- case kArm64:
- return kArm64StackOverflowReservedBytes;
-
- case kMips:
- return kMipsStackOverflowReservedBytes;
-
- case kX86:
- return kX86StackOverflowReservedBytes;
-
- case kX86_64:
- return kX86_64StackOverflowReservedBytes;
-
- case kNone:
- LOG(FATAL) << "kNone has no stack overflow size";
- return 0;
-
- default:
- LOG(FATAL) << "Unknown instruction set" << isa;
- return 0;
- }
-}
-
-const InstructionSetFeatures* InstructionSetFeatures::FromVariant(InstructionSet isa,
- const std::string& variant,
- std::string* error_msg) {
- const InstructionSetFeatures* result;
- switch (isa) {
- case kArm:
- case kThumb2:
- result = ArmInstructionSetFeatures::FromVariant(variant, error_msg);
- break;
- default:
- result = UnknownInstructionSetFeatures::Unknown(isa);
- break;
- }
- CHECK_EQ(result == nullptr, error_msg->size() != 0);
- return result;
-}
-
-const InstructionSetFeatures* InstructionSetFeatures::FromFeatureString(InstructionSet isa,
- const std::string& feature_list,
- std::string* error_msg) {
- const InstructionSetFeatures* result;
- switch (isa) {
- case kArm:
- case kThumb2:
- result = ArmInstructionSetFeatures::FromFeatureString(feature_list, error_msg);
- break;
- default:
- result = UnknownInstructionSetFeatures::Unknown(isa);
- break;
- }
- // TODO: warn if feature_list doesn't agree with result's GetFeatureList().
- CHECK_EQ(result == nullptr, error_msg->size() != 0);
- return result;
-}
-
-const InstructionSetFeatures* InstructionSetFeatures::FromBitmap(InstructionSet isa,
- uint32_t bitmap) {
- const InstructionSetFeatures* result;
- switch (isa) {
- case kArm:
- case kThumb2:
- result = ArmInstructionSetFeatures::FromBitmap(bitmap);
- break;
- default:
- result = UnknownInstructionSetFeatures::Unknown(isa);
- break;
- }
- CHECK_EQ(bitmap, result->AsBitmap());
- return result;
-}
-
-const InstructionSetFeatures* InstructionSetFeatures::FromCppDefines() {
- const InstructionSetFeatures* result;
- switch (kRuntimeISA) {
- case kArm:
- case kThumb2:
- result = ArmInstructionSetFeatures::FromCppDefines();
- break;
- default:
- result = UnknownInstructionSetFeatures::Unknown(kRuntimeISA);
- break;
- }
- return result;
-}
-
-
-const InstructionSetFeatures* InstructionSetFeatures::FromCpuInfo() {
- const InstructionSetFeatures* result;
- switch (kRuntimeISA) {
- case kArm:
- case kThumb2:
- result = ArmInstructionSetFeatures::FromCpuInfo();
- break;
- default:
- result = UnknownInstructionSetFeatures::Unknown(kRuntimeISA);
- break;
- }
- return result;
-}
-
-const InstructionSetFeatures* InstructionSetFeatures::FromHwcap() {
- const InstructionSetFeatures* result;
- switch (kRuntimeISA) {
- case kArm:
- case kThumb2:
- result = ArmInstructionSetFeatures::FromHwcap();
- break;
- default:
- result = UnknownInstructionSetFeatures::Unknown(kRuntimeISA);
- break;
- }
- return result;
-}
-
-const InstructionSetFeatures* InstructionSetFeatures::FromAssembly() {
- const InstructionSetFeatures* result;
- switch (kRuntimeISA) {
- case kArm:
- case kThumb2:
- result = ArmInstructionSetFeatures::FromAssembly();
- break;
- default:
- result = UnknownInstructionSetFeatures::Unknown(kRuntimeISA);
- break;
- }
- return result;
-}
-
-const ArmInstructionSetFeatures* InstructionSetFeatures::AsArmInstructionSetFeatures() const {
- DCHECK_EQ(kArm, GetInstructionSet());
- return down_cast<const ArmInstructionSetFeatures*>(this);
-}
-
-std::ostream& operator<<(std::ostream& os, const InstructionSetFeatures& rhs) {
- os << "ISA: " << rhs.GetInstructionSet() << " Feature string: " << rhs.GetFeatureString();
- return os;
-}
-
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromFeatureString(
- const std::string& feature_list, std::string* error_msg) {
- std::vector<std::string> features;
- Split(feature_list, ',', &features);
- bool has_lpae = false;
- bool has_div = false;
- for (auto i = features.begin(); i != features.end(); i++) {
- std::string feature = Trim(*i);
- if (feature == "default" || feature == "none") {
- // Nothing to do.
- } else if (feature == "div") {
- has_div = true;
- } else if (feature == "nodiv") {
- has_div = false;
- } else if (feature == "lpae") {
- has_lpae = true;
- } else if (feature == "nolpae") {
- has_lpae = false;
- } else {
- *error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str());
- return nullptr;
- }
- }
- return new ArmInstructionSetFeatures(has_lpae, has_div);
-}
-
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromVariant(
- const std::string& variant, std::string* error_msg) {
- // Look for variants that have divide support.
- bool has_div = false;
- {
- static const char* arm_variants_with_div[] = {
- "cortex-a7", "cortex-a12", "cortex-a15", "cortex-a17", "cortex-a53", "cortex-a57",
- "cortex-m3", "cortex-m4", "cortex-r4", "cortex-r5",
- "cyclone", "denver", "krait", "swift"
- };
- for (const char* div_variant : arm_variants_with_div) {
- if (variant == div_variant) {
- has_div = true;
- break;
- }
- }
- }
- // Look for variants that have LPAE support.
- bool has_lpae = false;
- {
- static const char* arm_variants_with_lpae[] = {
- "cortex-a7", "cortex-a15", "krait", "denver"
- };
- for (const char* lpae_variant : arm_variants_with_lpae) {
- if (variant == lpae_variant) {
- has_lpae = true;
- break;
- }
- }
- }
- if (has_div == false && has_lpae == false) {
- // Avoid unsupported variants.
- static const char* unsupported_arm_variants[] = {
- // ARM processors that aren't ARMv7 compatible aren't supported.
- "arm2", "arm250", "arm3", "arm6", "arm60", "arm600", "arm610", "arm620",
- "cortex-m0", "cortex-m0plus", "cortex-m1",
- "fa526", "fa626", "fa606te", "fa626te", "fmp626", "fa726te",
- "iwmmxt", "iwmmxt2",
- "strongarm", "strongarm110", "strongarm1100", "strongarm1110",
- "xscale"
- };
- for (const char* us_variant : unsupported_arm_variants) {
- if (variant == us_variant) {
- *error_msg = StringPrintf("Attempt to use unsupported ARM variant: %s", us_variant);
- return nullptr;
- }
- }
- // Warn if the variant is unknown.
- // TODO: some of the variants below may have feature support, but that support is currently
- // unknown so we'll choose conservative (sub-optimal) defaults without warning.
- // TODO: some of the architectures may not support all features required by ART and should be
- // moved to unsupported_arm_variants[] above.
- static const char* arm_variants_without_known_features[] = {
- "arm7", "arm7m", "arm7d", "arm7dm", "arm7di", "arm7dmi", "arm70", "arm700", "arm700i",
- "arm710", "arm710c", "arm7100", "arm720", "arm7500", "arm7500fe", "arm7tdmi", "arm7tdmi-s",
- "arm710t", "arm720t", "arm740t",
- "arm8", "arm810",
- "arm9", "arm9e", "arm920", "arm920t", "arm922t", "arm946e-s", "arm966e-s", "arm968e-s",
- "arm926ej-s", "arm940t", "arm9tdmi",
- "arm10tdmi", "arm1020t", "arm1026ej-s", "arm10e", "arm1020e", "arm1022e",
- "arm1136j-s", "arm1136jf-s",
- "arm1156t2-s", "arm1156t2f-s", "arm1176jz-s", "arm1176jzf-s",
- "cortex-a5", "cortex-a8", "cortex-a9", "cortex-a9-mp", "cortex-r4f",
- "marvell-pj4", "mpcore", "mpcorenovfp"
- };
- bool found = false;
- for (const char* ff_variant : arm_variants_without_known_features) {
- if (variant == ff_variant) {
- found = true;
- break;
- }
- }
- if (!found) {
- LOG(WARNING) << "Unknown instruction set features for ARM CPU variant (" << variant
- << ") using conservative defaults";
- }
- }
- return new ArmInstructionSetFeatures(has_lpae, has_div);
-}
-
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromBitmap(uint32_t bitmap) {
- bool has_lpae = (bitmap & kLpaeBitfield) != 0;
- bool has_div = (bitmap & kDivBitfield) != 0;
- return new ArmInstructionSetFeatures(has_lpae, has_div);
-}
-
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromCppDefines() {
-#if defined(__ARM_ARCH_EXT_IDIV__)
- bool has_div = true;
-#else
- bool has_div = false;
-#endif
-#if defined(__ARM_FEATURE_LPAE)
- bool has_lpae = true;
-#else
- bool has_lpae = false;
-#endif
- return new ArmInstructionSetFeatures(has_lpae, has_div);
-}
-
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromCpuInfo() {
- // Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that
- // the kernel puts the appropriate feature flags in here. Sometimes it doesn't.
- bool has_lpae = false;
- bool has_div = false;
-
- std::ifstream in("/proc/cpuinfo");
- if (!in.fail()) {
- while (!in.eof()) {
- std::string line;
- std::getline(in, line);
- if (!in.eof()) {
- LOG(INFO) << "cpuinfo line: " << line;
- if (line.find("Features") != std::string::npos) {
- LOG(INFO) << "found features";
- if (line.find("idivt") != std::string::npos) {
- // We always expect both ARM and Thumb divide instructions to be available or not
- // available.
- CHECK_NE(line.find("idiva"), std::string::npos);
- has_div = true;
- }
- if (line.find("lpae") != std::string::npos) {
- has_lpae = true;
- }
- }
- }
- }
- in.close();
- } else {
- LOG(INFO) << "Failed to open /proc/cpuinfo";
- }
- return new ArmInstructionSetFeatures(has_lpae, has_div);
-}
-
-#if defined(HAVE_ANDROID_OS) && defined(__arm__)
-#include <sys/auxv.h>
-#include <asm/hwcap.h>
-#endif
-
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromHwcap() {
- bool has_lpae = false;
- bool has_div = false;
-
-#if defined(HAVE_ANDROID_OS) && defined(__arm__)
- uint64_t hwcaps = getauxval(AT_HWCAP);
- LOG(INFO) << "hwcaps=" << hwcaps;
- if ((hwcaps & HWCAP_IDIVT) != 0) {
- // We always expect both ARM and Thumb divide instructions to be available or not
- // available.
- CHECK_NE(hwcaps & HWCAP_IDIVA, 0U);
- has_div = true;
- }
- if ((hwcaps & HWCAP_LPAE) != 0) {
- has_lpae = true;
- }
-#endif
-
- return new ArmInstructionSetFeatures(has_lpae, has_div);
-}
-
-// A signal handler called by a fault for an illegal instruction. We record the fact in r0
-// and then increment the PC in the signal context to return to the next instruction. We know the
-// instruction is an sdiv (4 bytes long).
-static void bad_divide_inst_handle(int signo, siginfo_t* si, void* data) {
- UNUSED(signo);
- UNUSED(si);
-#if defined(__arm__)
- struct ucontext *uc = (struct ucontext *)data;
- struct sigcontext *sc = &uc->uc_mcontext;
- sc->arm_r0 = 0; // Set R0 to #0 to signal error.
- sc->arm_pc += 4; // Skip offending instruction.
-#else
- UNUSED(data);
-#endif
-}
-
-#if defined(__arm__)
-extern "C" bool artCheckForARMSDIVInstruction();
-#endif
-
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromAssembly() {
- // See if have a sdiv instruction. Register a signal handler and try to execute an sdiv
- // instruction. If we get a SIGILL then it's not supported.
- struct sigaction sa, osa;
- sa.sa_flags = SA_ONSTACK | SA_RESTART | SA_SIGINFO;
- sa.sa_sigaction = bad_divide_inst_handle;
- sigaction(SIGILL, &sa, &osa);
-
- bool has_div = false;
-#if defined(__arm__)
- if (artCheckForARMSDIVInstruction()) {
- has_div = true;
- }
-#endif
-
- // Restore the signal handler.
- sigaction(SIGILL, &osa, nullptr);
-
- // Use compile time features to "detect" LPAE support.
- // TODO: write an assembly LPAE support test.
-#if defined(__ARM_FEATURE_LPAE)
- bool has_lpae = true;
-#else
- bool has_lpae = false;
-#endif
- return new ArmInstructionSetFeatures(has_lpae, has_div);
-}
-
-
-bool ArmInstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
- if (kArm != other->GetInstructionSet()) {
- return false;
- }
- const ArmInstructionSetFeatures* other_as_arm = other->AsArmInstructionSetFeatures();
- return has_lpae_ == other_as_arm->has_lpae_ && has_div_ == other_as_arm->has_div_;
-}
-
-uint32_t ArmInstructionSetFeatures::AsBitmap() const {
- return (has_lpae_ ? kLpaeBitfield : 0) | (has_div_ ? kDivBitfield : 0);
-}
-
-std::string ArmInstructionSetFeatures::GetFeatureString() const {
- std::string result;
- if (has_div_) {
- result += ",div";
- }
- if (has_lpae_) {
- result += ",lpae";
- }
- if (result.size() == 0) {
- return "none";
- } else {
- // Strip leading comma.
- return result.substr(1, result.size());
- }
-}
-
-} // namespace art
diff --git a/runtime/instruction_set.h b/runtime/instruction_set.h
deleted file mode 100644
index 529fa0c..0000000
--- a/runtime/instruction_set.h
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_INSTRUCTION_SET_H_
-#define ART_RUNTIME_INSTRUCTION_SET_H_
-
-#include <iosfwd>
-#include <string>
-
-#include "base/logging.h" // Logging is required for FATAL in the helper functions.
-#include "base/macros.h"
-#include "base/value_object.h"
-#include "globals.h" // For KB.
-
-namespace art {
-
-enum InstructionSet {
- kNone,
- kArm,
- kArm64,
- kThumb2,
- kX86,
- kX86_64,
- kMips,
- kMips64
-};
-std::ostream& operator<<(std::ostream& os, const InstructionSet& rhs);
-
-#if defined(__arm__)
-static constexpr InstructionSet kRuntimeISA = kArm;
-#elif defined(__aarch64__)
-static constexpr InstructionSet kRuntimeISA = kArm64;
-#elif defined(__mips__)
-static constexpr InstructionSet kRuntimeISA = kMips;
-#elif defined(__i386__)
-static constexpr InstructionSet kRuntimeISA = kX86;
-#elif defined(__x86_64__)
-static constexpr InstructionSet kRuntimeISA = kX86_64;
-#else
-static constexpr InstructionSet kRuntimeISA = kNone;
-#endif
-
-// Architecture-specific pointer sizes
-static constexpr size_t kArmPointerSize = 4;
-static constexpr size_t kArm64PointerSize = 8;
-static constexpr size_t kMipsPointerSize = 4;
-static constexpr size_t kMips64PointerSize = 8;
-static constexpr size_t kX86PointerSize = 4;
-static constexpr size_t kX86_64PointerSize = 8;
-
-// ARM instruction alignment. ARM processors require code to be 4-byte aligned,
-// but ARM ELF requires 8..
-static constexpr size_t kArmAlignment = 8;
-
-// ARM64 instruction alignment. This is the recommended alignment for maximum performance.
-static constexpr size_t kArm64Alignment = 16;
-
-// MIPS instruction alignment. MIPS processors require code to be 4-byte aligned.
-// TODO: Can this be 4?
-static constexpr size_t kMipsAlignment = 8;
-
-// X86 instruction alignment. This is the recommended alignment for maximum performance.
-static constexpr size_t kX86Alignment = 16;
-
-
-const char* GetInstructionSetString(InstructionSet isa);
-
-// Note: Returns kNone when the string cannot be parsed to a known value.
-InstructionSet GetInstructionSetFromString(const char* instruction_set);
-
-static inline size_t GetInstructionSetPointerSize(InstructionSet isa) {
- switch (isa) {
- case kArm:
- // Fall-through.
- case kThumb2:
- return kArmPointerSize;
- case kArm64:
- return kArm64PointerSize;
- case kX86:
- return kX86PointerSize;
- case kX86_64:
- return kX86_64PointerSize;
- case kMips:
- return kMipsPointerSize;
- case kMips64:
- return kMips64PointerSize;
- case kNone:
- LOG(FATAL) << "ISA kNone does not have pointer size.";
- return 0;
- default:
- LOG(FATAL) << "Unknown ISA " << isa;
- return 0;
- }
-}
-
-size_t GetInstructionSetAlignment(InstructionSet isa);
-
-static inline bool Is64BitInstructionSet(InstructionSet isa) {
- switch (isa) {
- case kArm:
- case kThumb2:
- case kX86:
- case kMips:
- return false;
-
- case kArm64:
- case kX86_64:
- case kMips64:
- return true;
-
- case kNone:
- LOG(FATAL) << "ISA kNone does not have bit width.";
- return 0;
- default:
- LOG(FATAL) << "Unknown ISA " << isa;
- return 0;
- }
-}
-
-static inline size_t GetBytesPerGprSpillLocation(InstructionSet isa) {
- switch (isa) {
- case kArm:
- // Fall-through.
- case kThumb2:
- return 4;
- case kArm64:
- return 8;
- case kX86:
- return 4;
- case kX86_64:
- return 8;
- case kMips:
- return 4;
- case kNone:
- LOG(FATAL) << "ISA kNone does not have spills.";
- return 0;
- default:
- LOG(FATAL) << "Unknown ISA " << isa;
- return 0;
- }
-}
-
-static inline size_t GetBytesPerFprSpillLocation(InstructionSet isa) {
- switch (isa) {
- case kArm:
- // Fall-through.
- case kThumb2:
- return 4;
- case kArm64:
- return 8;
- case kX86:
- return 8;
- case kX86_64:
- return 8;
- case kMips:
- return 4;
- case kNone:
- LOG(FATAL) << "ISA kNone does not have spills.";
- return 0;
- default:
- LOG(FATAL) << "Unknown ISA " << isa;
- return 0;
- }
-}
-
-size_t GetStackOverflowReservedBytes(InstructionSet isa);
-
-class ArmInstructionSetFeatures;
-
-// Abstraction used to describe features of a different instruction sets.
-class InstructionSetFeatures {
- public:
- // Process a CPU variant string for the given ISA and create an InstructionSetFeatures.
- static const InstructionSetFeatures* FromVariant(InstructionSet isa,
- const std::string& variant,
- std::string* error_msg);
-
- // Parse a string of the form "div,lpae" and create an InstructionSetFeatures.
- static const InstructionSetFeatures* FromFeatureString(InstructionSet isa,
- const std::string& feature_list,
- std::string* error_msg);
-
- // Parse a bitmap for the given isa and create an InstructionSetFeatures.
- static const InstructionSetFeatures* FromBitmap(InstructionSet isa, uint32_t bitmap);
-
- // Turn C pre-processor #defines into the equivalent instruction set features for kRuntimeISA.
- static const InstructionSetFeatures* FromCppDefines();
-
- // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
- static const InstructionSetFeatures* FromCpuInfo();
-
- // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
- // InstructionSetFeatures.
- static const InstructionSetFeatures* FromHwcap();
-
- // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
- // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
- static const InstructionSetFeatures* FromAssembly();
-
- // Are these features the same as the other given features?
- virtual bool Equals(const InstructionSetFeatures* other) const = 0;
-
- // Return the ISA these features relate to.
- virtual InstructionSet GetInstructionSet() const = 0;
-
- // Return a bitmap that represents the features. ISA specific.
- virtual uint32_t AsBitmap() const = 0;
-
- // Return a string of the form "div,lpae" or "none".
- virtual std::string GetFeatureString() const = 0;
-
- // Down cast this ArmInstructionFeatures.
- const ArmInstructionSetFeatures* AsArmInstructionSetFeatures() const;
-
- virtual ~InstructionSetFeatures() {}
-
- protected:
- InstructionSetFeatures() {}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(InstructionSetFeatures);
-};
-std::ostream& operator<<(std::ostream& os, const InstructionSetFeatures& rhs);
-
-// Instruction set features relevant to the ARM architecture.
-class ArmInstructionSetFeatures FINAL : public InstructionSetFeatures {
- public:
- // Process a CPU variant string like "krait" or "cortex-a15" and create InstructionSetFeatures.
- static const ArmInstructionSetFeatures* FromVariant(const std::string& variant,
- std::string* error_msg);
-
- // Parse a string of the form "div,lpae" and create an InstructionSetFeatures.
- static const ArmInstructionSetFeatures* FromFeatureString(const std::string& feature_list,
- std::string* error_msg);
-
- // Parse a bitmap and create an InstructionSetFeatures.
- static const ArmInstructionSetFeatures* FromBitmap(uint32_t bitmap);
-
- // Turn C pre-processor #defines into the equivalent instruction set features.
- static const ArmInstructionSetFeatures* FromCppDefines();
-
- // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
- static const ArmInstructionSetFeatures* FromCpuInfo();
-
- // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
- // InstructionSetFeatures.
- static const ArmInstructionSetFeatures* FromHwcap();
-
- // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
- // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
- static const ArmInstructionSetFeatures* FromAssembly();
-
- bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
-
- InstructionSet GetInstructionSet() const OVERRIDE {
- return kArm;
- }
-
- uint32_t AsBitmap() const OVERRIDE;
-
- // Return a string of the form "div,lpae" or "none".
- std::string GetFeatureString() const OVERRIDE;
-
- // Is the divide instruction feature enabled?
- bool HasDivideInstruction() const {
- return has_div_;
- }
-
- // Is the Large Physical Address Extension (LPAE) instruction feature enabled? When true code can
- // be used that assumes double register loads and stores (ldrd, strd) don't tear.
- bool HasLpae() const {
- return has_lpae_;
- }
-
- virtual ~ArmInstructionSetFeatures() {}
-
- private:
- ArmInstructionSetFeatures(bool has_lpae, bool has_div)
- : has_lpae_(has_lpae), has_div_(has_div) {
- }
-
- // Bitmap positions for encoding features as a bitmap.
- enum {
- kDivBitfield = 1,
- kLpaeBitfield = 2,
- };
-
- const bool has_lpae_;
- const bool has_div_;
-
- DISALLOW_COPY_AND_ASSIGN(ArmInstructionSetFeatures);
-};
-
-// A class used for instruction set features on ISAs that don't yet have any features defined.
-class UnknownInstructionSetFeatures FINAL : public InstructionSetFeatures {
- public:
- static const UnknownInstructionSetFeatures* Unknown(InstructionSet isa) {
- return new UnknownInstructionSetFeatures(isa);
- }
-
- bool Equals(const InstructionSetFeatures* other) const OVERRIDE {
- return isa_ == other->GetInstructionSet();
- }
-
- InstructionSet GetInstructionSet() const OVERRIDE {
- return isa_;
- }
-
- uint32_t AsBitmap() const OVERRIDE {
- return 0;
- }
-
- std::string GetFeatureString() const OVERRIDE {
- return "none";
- }
-
- virtual ~UnknownInstructionSetFeatures() {}
-
- private:
- explicit UnknownInstructionSetFeatures(InstructionSet isa) : isa_(isa) {}
-
- const InstructionSet isa_;
-
- DISALLOW_COPY_AND_ASSIGN(UnknownInstructionSetFeatures);
-};
-
-// The following definitions create return types for two word-sized entities that will be passed
-// in registers so that memory operations for the interface trampolines can be avoided. The entities
-// are the resolved method and the pointer to the code to be invoked.
-//
-// On x86, ARM32 and MIPS, this is given for a *scalar* 64bit value. The definition thus *must* be
-// uint64_t or long long int.
-//
-// On x86_64 and ARM64, structs are decomposed for allocation, so we can create a structs of two
-// size_t-sized values.
-//
-// We need two operations:
-//
-// 1) A flag value that signals failure. The assembly stubs expect the lower part to be "0".
-// GetTwoWordFailureValue() will return a value that has lower part == 0.
-//
-// 2) A value that combines two word-sized values.
-// GetTwoWordSuccessValue() constructs this.
-//
-// IMPORTANT: If you use this to transfer object pointers, it is your responsibility to ensure
-// that the object does not move or the value is updated. Simple use of this is NOT SAFE
-// when the garbage collector can move objects concurrently. Ensure that required locks
-// are held when using!
-
-#if defined(__i386__) || defined(__arm__) || defined(__mips__)
-typedef uint64_t TwoWordReturn;
-
-// Encodes method_ptr==nullptr and code_ptr==nullptr
-static inline constexpr TwoWordReturn GetTwoWordFailureValue() {
- return 0;
-}
-
-// Use the lower 32b for the method pointer and the upper 32b for the code pointer.
-static inline TwoWordReturn GetTwoWordSuccessValue(uintptr_t hi, uintptr_t lo) {
- uint32_t lo32 = static_cast<uint32_t>(lo);
- uint64_t hi64 = static_cast<uint64_t>(hi);
- return ((hi64 << 32) | lo32);
-}
-
-#elif defined(__x86_64__) || defined(__aarch64__)
-struct TwoWordReturn {
- uintptr_t lo;
- uintptr_t hi;
-};
-
-// Encodes method_ptr==nullptr. Leaves random value in code pointer.
-static inline TwoWordReturn GetTwoWordFailureValue() {
- TwoWordReturn ret;
- ret.lo = 0;
- return ret;
-}
-
-// Write values into their respective members.
-static inline TwoWordReturn GetTwoWordSuccessValue(uintptr_t hi, uintptr_t lo) {
- TwoWordReturn ret;
- ret.lo = lo;
- ret.hi = hi;
- return ret;
-}
-#else
-#error "Unsupported architecture"
-#endif
-
-} // namespace art
-
-#endif // ART_RUNTIME_INSTRUCTION_SET_H_
diff --git a/runtime/instruction_set_test.cc b/runtime/instruction_set_test.cc
deleted file mode 100644
index 3f2d16b..0000000
--- a/runtime/instruction_set_test.cc
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "instruction_set.h"
-
-#include "base/stringprintf.h"
-#include "common_runtime_test.h"
-
-namespace art {
-
-class InstructionSetTest : public CommonRuntimeTest {};
-
-TEST_F(InstructionSetTest, GetInstructionSetFromString) {
- EXPECT_EQ(kArm, GetInstructionSetFromString("arm"));
- EXPECT_EQ(kArm64, GetInstructionSetFromString("arm64"));
- EXPECT_EQ(kX86, GetInstructionSetFromString("x86"));
- EXPECT_EQ(kX86_64, GetInstructionSetFromString("x86_64"));
- EXPECT_EQ(kMips, GetInstructionSetFromString("mips"));
- EXPECT_EQ(kNone, GetInstructionSetFromString("none"));
- EXPECT_EQ(kNone, GetInstructionSetFromString("random-string"));
-}
-
-TEST_F(InstructionSetTest, GetInstructionSetString) {
- EXPECT_STREQ("arm", GetInstructionSetString(kArm));
- EXPECT_STREQ("arm", GetInstructionSetString(kThumb2));
- EXPECT_STREQ("arm64", GetInstructionSetString(kArm64));
- EXPECT_STREQ("x86", GetInstructionSetString(kX86));
- EXPECT_STREQ("x86_64", GetInstructionSetString(kX86_64));
- EXPECT_STREQ("mips", GetInstructionSetString(kMips));
- EXPECT_STREQ("none", GetInstructionSetString(kNone));
-}
-
-TEST_F(InstructionSetTest, TestRoundTrip) {
- EXPECT_EQ(kRuntimeISA, GetInstructionSetFromString(GetInstructionSetString(kRuntimeISA)));
-}
-
-TEST_F(InstructionSetTest, PointerSize) {
- EXPECT_EQ(sizeof(void*), GetInstructionSetPointerSize(kRuntimeISA));
-}
-
-TEST_F(InstructionSetTest, X86Features) {
- // Build features for a 32-bit x86 atom processor.
- std::string error_msg;
- std::unique_ptr<const InstructionSetFeatures> x86_features(
- InstructionSetFeatures::FromVariant(kX86, "atom", &error_msg));
- ASSERT_TRUE(x86_features.get() != nullptr) << error_msg;
- EXPECT_EQ(x86_features->GetInstructionSet(), kX86);
- EXPECT_TRUE(x86_features->Equals(x86_features.get()));
- EXPECT_STREQ("none", x86_features->GetFeatureString().c_str());
- EXPECT_EQ(x86_features->AsBitmap(), 0U);
-
- // Build features for a 32-bit x86 default processor.
- std::unique_ptr<const InstructionSetFeatures> x86_default_features(
- InstructionSetFeatures::FromFeatureString(kX86, "default", &error_msg));
- ASSERT_TRUE(x86_default_features.get() != nullptr) << error_msg;
- EXPECT_EQ(x86_default_features->GetInstructionSet(), kX86);
- EXPECT_TRUE(x86_default_features->Equals(x86_default_features.get()));
- EXPECT_STREQ("none", x86_default_features->GetFeatureString().c_str());
- EXPECT_EQ(x86_default_features->AsBitmap(), 0U);
-
- // Build features for a 64-bit x86-64 atom processor.
- std::unique_ptr<const InstructionSetFeatures> x86_64_features(
- InstructionSetFeatures::FromVariant(kX86_64, "atom", &error_msg));
- ASSERT_TRUE(x86_64_features.get() != nullptr) << error_msg;
- EXPECT_EQ(x86_64_features->GetInstructionSet(), kX86_64);
- EXPECT_TRUE(x86_64_features->Equals(x86_64_features.get()));
- EXPECT_STREQ("none", x86_64_features->GetFeatureString().c_str());
- EXPECT_EQ(x86_64_features->AsBitmap(), 0U);
-
- EXPECT_FALSE(x86_64_features->Equals(x86_features.get()));
- EXPECT_FALSE(x86_64_features->Equals(x86_default_features.get()));
- EXPECT_TRUE(x86_features->Equals(x86_default_features.get()));
-}
-
-TEST_F(InstructionSetTest, ArmFeaturesFromVariant) {
- // Build features for a 32-bit ARM krait processor.
- std::string error_msg;
- std::unique_ptr<const InstructionSetFeatures> krait_features(
- InstructionSetFeatures::FromVariant(kArm, "krait", &error_msg));
- ASSERT_TRUE(krait_features.get() != nullptr) << error_msg;
-
- ASSERT_EQ(krait_features->GetInstructionSet(), kArm);
- EXPECT_TRUE(krait_features->Equals(krait_features.get()));
- EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
- EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasLpae());
- EXPECT_STREQ("div,lpae", krait_features->GetFeatureString().c_str());
- EXPECT_EQ(krait_features->AsBitmap(), 3U);
-
- // Build features for a 32-bit ARM denver processor.
- std::unique_ptr<const InstructionSetFeatures> denver_features(
- InstructionSetFeatures::FromVariant(kArm, "denver", &error_msg));
- ASSERT_TRUE(denver_features.get() != nullptr) << error_msg;
-
- EXPECT_TRUE(denver_features->Equals(denver_features.get()));
- EXPECT_TRUE(denver_features->Equals(krait_features.get()));
- EXPECT_TRUE(krait_features->Equals(denver_features.get()));
- EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
- EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasLpae());
- EXPECT_STREQ("div,lpae", denver_features->GetFeatureString().c_str());
- EXPECT_EQ(denver_features->AsBitmap(), 3U);
-
- // Build features for a 32-bit ARMv7 processor.
- std::unique_ptr<const InstructionSetFeatures> arm7_features(
- InstructionSetFeatures::FromVariant(kArm, "arm7", &error_msg));
- ASSERT_TRUE(arm7_features.get() != nullptr) << error_msg;
-
- EXPECT_TRUE(arm7_features->Equals(arm7_features.get()));
- EXPECT_FALSE(arm7_features->Equals(krait_features.get()));
- EXPECT_FALSE(krait_features->Equals(arm7_features.get()));
- EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
- EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasLpae());
- EXPECT_STREQ("none", arm7_features->GetFeatureString().c_str());
- EXPECT_EQ(arm7_features->AsBitmap(), 0U);
-
- // ARM6 is not a supported architecture variant.
- std::unique_ptr<const InstructionSetFeatures> arm6_features(
- InstructionSetFeatures::FromVariant(kArm, "arm6", &error_msg));
- EXPECT_TRUE(arm6_features.get() == nullptr);
- EXPECT_NE(error_msg.size(), 0U);
-}
-
-TEST_F(InstructionSetTest, ArmFeaturesFromString) {
- // Build features for a 32-bit ARM with LPAE and div processor.
- std::string error_msg;
- std::unique_ptr<const InstructionSetFeatures> krait_features(
- InstructionSetFeatures::FromFeatureString(kArm, "lpae,div", &error_msg));
- ASSERT_TRUE(krait_features.get() != nullptr) << error_msg;
-
- ASSERT_EQ(krait_features->GetInstructionSet(), kArm);
- EXPECT_TRUE(krait_features->Equals(krait_features.get()));
- EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
- EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasLpae());
- EXPECT_STREQ("div,lpae", krait_features->GetFeatureString().c_str());
- EXPECT_EQ(krait_features->AsBitmap(), 3U);
-
- // Build features for a 32-bit ARM processor with LPAE and div flipped.
- std::unique_ptr<const InstructionSetFeatures> denver_features(
- InstructionSetFeatures::FromFeatureString(kArm, "div,lpae", &error_msg));
- ASSERT_TRUE(denver_features.get() != nullptr) << error_msg;
-
- EXPECT_TRUE(denver_features->Equals(denver_features.get()));
- EXPECT_TRUE(denver_features->Equals(krait_features.get()));
- EXPECT_TRUE(krait_features->Equals(denver_features.get()));
- EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
- EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasLpae());
- EXPECT_STREQ("div,lpae", denver_features->GetFeatureString().c_str());
- EXPECT_EQ(denver_features->AsBitmap(), 3U);
-
- // Build features for a 32-bit default ARM processor.
- std::unique_ptr<const InstructionSetFeatures> arm7_features(
- InstructionSetFeatures::FromFeatureString(kArm, "default", &error_msg));
- ASSERT_TRUE(arm7_features.get() != nullptr) << error_msg;
-
- EXPECT_TRUE(arm7_features->Equals(arm7_features.get()));
- EXPECT_FALSE(arm7_features->Equals(krait_features.get()));
- EXPECT_FALSE(krait_features->Equals(arm7_features.get()));
- EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
- EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasLpae());
- EXPECT_STREQ("none", arm7_features->GetFeatureString().c_str());
- EXPECT_EQ(arm7_features->AsBitmap(), 0U);
-}
-
-#ifdef HAVE_ANDROID_OS
-#include "cutils/properties.h"
-
-TEST_F(InstructionSetTest, FeaturesFromSystemPropertyVariant) {
- // Take the default set of instruction features from the build.
- std::unique_ptr<const InstructionSetFeatures> instruction_set_features(
- InstructionSetFeatures::FromCppDefines());
-
- // Read the features property.
- std::string key = StringPrintf("dalvik.vm.isa.%s.variant", GetInstructionSetString(kRuntimeISA));
- char dex2oat_isa_variant[PROPERTY_VALUE_MAX];
- if (property_get(key.c_str(), dex2oat_isa_variant, nullptr) > 0) {
- // Use features from property to build InstructionSetFeatures and check against build's
- // features.
- std::string error_msg;
- std::unique_ptr<const InstructionSetFeatures> property_features(
- InstructionSetFeatures::FromVariant(kRuntimeISA, dex2oat_isa_variant, &error_msg));
- ASSERT_TRUE(property_features.get() != nullptr) << error_msg;
-
- EXPECT_TRUE(property_features->Equals(instruction_set_features.get()))
- << "System property features: " << *property_features.get()
- << "\nFeatures from build: " << *instruction_set_features.get();
- }
-}
-
-TEST_F(InstructionSetTest, FeaturesFromSystemPropertyString) {
- // Take the default set of instruction features from the build.
- std::unique_ptr<const InstructionSetFeatures> instruction_set_features(
- InstructionSetFeatures::FromCppDefines());
-
- // Read the features property.
- std::string key = StringPrintf("dalvik.vm.isa.%s.features", GetInstructionSetString(kRuntimeISA));
- char dex2oat_isa_features[PROPERTY_VALUE_MAX];
- if (property_get(key.c_str(), dex2oat_isa_features, nullptr) > 0) {
- // Use features from property to build InstructionSetFeatures and check against build's
- // features.
- std::string error_msg;
- std::unique_ptr<const InstructionSetFeatures> property_features(
- InstructionSetFeatures::FromFeatureString(kRuntimeISA, dex2oat_isa_features, &error_msg));
- ASSERT_TRUE(property_features.get() != nullptr) << error_msg;
-
- EXPECT_TRUE(property_features->Equals(instruction_set_features.get()))
- << "System property features: " << *property_features.get()
- << "\nFeatures from build: " << *instruction_set_features.get();
- }
-}
-#endif
-
-#if defined(__arm__)
-TEST_F(InstructionSetTest, DISABLED_FeaturesFromCpuInfo) {
- LOG(WARNING) << "Test disabled due to buggy ARM kernels";
-#else
-TEST_F(InstructionSetTest, FeaturesFromCpuInfo) {
-#endif
- // Take the default set of instruction features from the build.
- std::unique_ptr<const InstructionSetFeatures> instruction_set_features(
- InstructionSetFeatures::FromCppDefines());
-
- // Check we get the same instruction set features using /proc/cpuinfo.
- std::unique_ptr<const InstructionSetFeatures> cpuinfo_features(
- InstructionSetFeatures::FromCpuInfo());
- EXPECT_TRUE(cpuinfo_features->Equals(instruction_set_features.get()))
- << "CPU Info features: " << *cpuinfo_features.get()
- << "\nFeatures from build: " << *instruction_set_features.get();
-}
-
-#if defined(__arm__)
-TEST_F(InstructionSetTest, DISABLED_FeaturesFromHwcap) {
- LOG(WARNING) << "Test disabled due to buggy ARM kernels";
-#else
-TEST_F(InstructionSetTest, FeaturesFromHwcap) {
-#endif
- // Take the default set of instruction features from the build.
- std::unique_ptr<const InstructionSetFeatures> instruction_set_features(
- InstructionSetFeatures::FromCppDefines());
-
- // Check we get the same instruction set features using AT_HWCAP.
- std::unique_ptr<const InstructionSetFeatures> hwcap_features(
- InstructionSetFeatures::FromHwcap());
- EXPECT_TRUE(hwcap_features->Equals(instruction_set_features.get()))
- << "Hwcap features: " << *hwcap_features.get()
- << "\nFeatures from build: " << *instruction_set_features.get();
-}
-
-
-#if defined(__arm__)
-TEST_F(InstructionSetTest, DISABLED_FeaturesFromAssembly) {
- LOG(WARNING) << "Test disabled due to buggy ARM kernels";
-#else
-TEST_F(InstructionSetTest, FeaturesFromAssembly) {
-#endif
- // Take the default set of instruction features from the build.
- std::unique_ptr<const InstructionSetFeatures> instruction_set_features(
- InstructionSetFeatures::FromCppDefines());
-
- // Check we get the same instruction set features using assembly tests.
- std::unique_ptr<const InstructionSetFeatures> assembly_features(
- InstructionSetFeatures::FromAssembly());
- EXPECT_TRUE(assembly_features->Equals(instruction_set_features.get()))
- << "Assembly features: " << *assembly_features.get()
- << "\nFeatures from build: " << *instruction_set_features.get();
-}
-
-} // namespace art
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 6c6058f..639b0f0 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -18,12 +18,15 @@
#include <sys/uio.h>
+#include <sstream>
+
#include "arch/context.h"
#include "atomic.h"
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
#include "debugger.h"
#include "dex_file-inl.h"
+#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc_root-inl.h"
@@ -34,9 +37,6 @@
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
#include "nth_caller_visitor.h"
-#if !defined(ART_USE_PORTABLE_COMPILER)
-#include "entrypoints/quick/quick_entrypoints.h"
-#endif
#include "os.h"
#include "scoped_thread_state_change.h"
#include "thread.h"
@@ -177,8 +177,9 @@
static void InstrumentationInstallStack(Thread* thread, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
struct InstallStackVisitor : public StackVisitor {
- InstallStackVisitor(Thread* thread, Context* context, uintptr_t instrumentation_exit_pc)
- : StackVisitor(thread, context), instrumentation_stack_(thread->GetInstrumentationStack()),
+ InstallStackVisitor(Thread* thread_in, Context* context, uintptr_t instrumentation_exit_pc)
+ : StackVisitor(thread_in, context),
+ instrumentation_stack_(thread_in->GetInstrumentationStack()),
instrumentation_exit_pc_(instrumentation_exit_pc),
reached_existing_instrumentation_frames_(false), instrumentation_stack_depth_(0),
last_return_pc_(0) {
@@ -316,12 +317,12 @@
static void InstrumentationRestoreStack(Thread* thread, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
struct RestoreStackVisitor : public StackVisitor {
- RestoreStackVisitor(Thread* thread, uintptr_t instrumentation_exit_pc,
+ RestoreStackVisitor(Thread* thread_in, uintptr_t instrumentation_exit_pc,
Instrumentation* instrumentation)
- : StackVisitor(thread, NULL), thread_(thread),
+ : StackVisitor(thread_in, NULL), thread_(thread_in),
instrumentation_exit_pc_(instrumentation_exit_pc),
instrumentation_(instrumentation),
- instrumentation_stack_(thread->GetInstrumentationStack()),
+ instrumentation_stack_(thread_in->GetInstrumentationStack()),
frames_removed_(0) {}
virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -595,6 +596,7 @@
}
static void ResetQuickAllocEntryPointsForThread(Thread* thread, void* arg) {
+ UNUSED(arg);
thread->ResetQuickAllocEntryPointsForThread();
}
@@ -633,7 +635,6 @@
SetEntrypointsInstrumented(true);
}
++quick_alloc_entry_points_instrumentation_counter_;
- LOG(INFO) << "Counter: " << quick_alloc_entry_points_instrumentation_counter_;
}
void Instrumentation::UninstrumentQuickAllocEntryPointsLocked() {
@@ -643,7 +644,6 @@
if (quick_alloc_entry_points_instrumentation_counter_ == 0) {
SetEntrypointsInstrumented(false);
}
- LOG(INFO) << "Counter: " << quick_alloc_entry_points_instrumentation_counter_;
}
void Instrumentation::ResetQuickAllocEntryPoints() {
@@ -869,10 +869,10 @@
ConfigureStubs(false, false);
}
-const void* Instrumentation::GetQuickCodeFor(mirror::ArtMethod* method) const {
+const void* Instrumentation::GetQuickCodeFor(mirror::ArtMethod* method, size_t pointer_size) const {
Runtime* runtime = Runtime::Current();
if (LIKELY(!instrumentation_stubs_installed_)) {
- const void* code = method->GetEntryPointFromQuickCompiledCode();
+ const void* code = method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size);
DCHECK(code != nullptr);
ClassLinker* class_linker = runtime->GetClassLinker();
if (LIKELY(!class_linker->IsQuickResolutionStub(code) &&
@@ -1016,6 +1016,7 @@
// Set return PC and check the sanity of the stack.
*return_pc = instrumentation_frame.return_pc_;
CheckStackDepth(self, instrumentation_frame, 0);
+ self->VerifyStack();
mirror::ArtMethod* method = instrumentation_frame.method_;
uint32_t length;
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 3017bf6..effa9f7 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -21,8 +21,8 @@
#include <list>
#include <map>
+#include "arch/instruction_set.h"
#include "atomic.h"
-#include "instruction_set.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "gc_root.h"
@@ -103,13 +103,13 @@
class Instrumentation {
public:
enum InstrumentationEvent {
- kMethodEntered = 1 << 0,
- kMethodExited = 1 << 1,
- kMethodUnwind = 1 << 2,
- kDexPcMoved = 1 << 3,
- kFieldRead = 1 << 4,
- kFieldWritten = 1 << 5,
- kExceptionCaught = 1 << 6,
+ kMethodEntered = 1, // 1 << 0
+ kMethodExited = 2, // 1 << 1
+ kMethodUnwind = 4, // 1 << 2
+ kDexPcMoved = 8, // 1 << 3
+ kFieldRead = 16, // 1 << 4,
+ kFieldWritten = 32, // 1 << 5
+ kExceptionCaught = 64, // 1 << 6
};
Instrumentation();
@@ -200,7 +200,7 @@
// Get the quick code for the given method. More efficient than asking the class linker as it
// will short-cut to GetCode if instrumentation and static method resolution stubs aren't
// installed.
- const void* GetQuickCodeFor(mirror::ArtMethod* method) const
+ const void* GetQuickCodeFor(mirror::ArtMethod* method, size_t pointer_size) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ForceInterpretOnly() {
@@ -464,6 +464,7 @@
DISALLOW_COPY_AND_ASSIGN(Instrumentation);
};
+std::ostream& operator<<(std::ostream& os, const Instrumentation::InstrumentationEvent& rhs);
// An element in the instrumentation side stack maintained in art::Thread.
struct InstrumentationStackFrame {
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index f6e6661..7ecb58e 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -29,56 +29,48 @@
namespace art {
InternTable::InternTable()
- : log_new_roots_(false), allow_new_interns_(true),
+ : image_added_to_intern_table_(false), log_new_roots_(false),
+ allow_new_interns_(true),
new_intern_condition_("New intern condition", *Locks::intern_table_lock_) {
}
size_t InternTable::Size() const {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- return strong_interns_.size() + weak_interns_.size();
+ return strong_interns_.Size() + weak_interns_.Size();
}
size_t InternTable::StrongSize() const {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- return strong_interns_.size();
+ return strong_interns_.Size();
}
size_t InternTable::WeakSize() const {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- return weak_interns_.size();
+ return weak_interns_.Size();
}
void InternTable::DumpForSigQuit(std::ostream& os) const {
- MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- os << "Intern table: " << strong_interns_.size() << " strong; "
- << weak_interns_.size() << " weak\n";
+ os << "Intern table: " << StrongSize() << " strong; " << WeakSize() << " weak\n";
}
void InternTable::VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags) {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
if ((flags & kVisitRootFlagAllRoots) != 0) {
- for (auto& strong_intern : strong_interns_) {
- const_cast<GcRoot<mirror::String>&>(strong_intern).
- VisitRoot(callback, arg, 0, kRootInternedString);
- DCHECK(!strong_intern.IsNull());
- }
+ strong_interns_.VisitRoots(callback, arg);
} else if ((flags & kVisitRootFlagNewRoots) != 0) {
for (auto& root : new_strong_intern_roots_) {
mirror::String* old_ref = root.Read<kWithoutReadBarrier>();
root.VisitRoot(callback, arg, 0, kRootInternedString);
mirror::String* new_ref = root.Read<kWithoutReadBarrier>();
- if (UNLIKELY(new_ref != old_ref)) {
+ if (new_ref != old_ref) {
// The GC moved a root in the log. Need to search the strong interns and update the
// corresponding object. This is slow, but luckily for us, this may only happen with a
// concurrent moving GC.
- auto it = strong_interns_.find(GcRoot<mirror::String>(old_ref));
- DCHECK(it != strong_interns_.end());
- strong_interns_.erase(it);
- strong_interns_.insert(GcRoot<mirror::String>(new_ref));
+ strong_interns_.Remove(old_ref);
+ strong_interns_.Insert(new_ref);
}
}
}
-
if ((flags & kVisitRootFlagClearRootLog) != 0) {
new_strong_intern_roots_.clear();
}
@@ -91,21 +83,17 @@
}
mirror::String* InternTable::LookupStrong(mirror::String* s) {
- return Lookup(&strong_interns_, s);
+ return strong_interns_.Find(s);
}
mirror::String* InternTable::LookupWeak(mirror::String* s) {
- // Weak interns need a read barrier because they are weak roots.
- return Lookup(&weak_interns_, s);
+ return weak_interns_.Find(s);
}
-mirror::String* InternTable::Lookup(Table* table, mirror::String* s) {
- Locks::intern_table_lock_->AssertHeld(Thread::Current());
- auto it = table->find(GcRoot<mirror::String>(s));
- if (LIKELY(it != table->end())) {
- return const_cast<GcRoot<mirror::String>&>(*it).Read();
- }
- return nullptr;
+void InternTable::SwapPostZygoteWithPreZygote() {
+ MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
+ weak_interns_.SwapPostZygoteWithPreZygote();
+ strong_interns_.SwapPostZygoteWithPreZygote();
}
mirror::String* InternTable::InsertStrong(mirror::String* s) {
@@ -116,7 +104,7 @@
if (log_new_roots_) {
new_strong_intern_roots_.push_back(GcRoot<mirror::String>(s));
}
- strong_interns_.insert(GcRoot<mirror::String>(s));
+ strong_interns_.Insert(s);
return s;
}
@@ -125,12 +113,12 @@
if (runtime->IsActiveTransaction()) {
runtime->RecordWeakStringInsertion(s);
}
- weak_interns_.insert(GcRoot<mirror::String>(s));
+ weak_interns_.Insert(s);
return s;
}
void InternTable::RemoveStrong(mirror::String* s) {
- Remove(&strong_interns_, s);
+ strong_interns_.Remove(s);
}
void InternTable::RemoveWeak(mirror::String* s) {
@@ -138,13 +126,7 @@
if (runtime->IsActiveTransaction()) {
runtime->RecordWeakStringRemoval(s);
}
- Remove(&weak_interns_, s);
-}
-
-void InternTable::Remove(Table* table, mirror::String* s) {
- auto it = table->find(GcRoot<mirror::String>(s));
- DCHECK(it != table->end());
- table->erase(it);
+ weak_interns_.Remove(s);
}
// Insert/remove methods used to undo changes made during an aborted transaction.
@@ -165,11 +147,40 @@
RemoveWeak(s);
}
-static mirror::String* LookupStringFromImage(mirror::String* s)
+void InternTable::AddImageStringsToTable(gc::space::ImageSpace* image_space) {
+ CHECK(image_space != nullptr);
+ MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
+ if (!image_added_to_intern_table_) {
+ mirror::Object* root = image_space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
+ mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
+ for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
+ mirror::DexCache* dex_cache = dex_caches->Get(i);
+ const DexFile* dex_file = dex_cache->GetDexFile();
+ const size_t num_strings = dex_file->NumStringIds();
+ for (size_t j = 0; j < num_strings; ++j) {
+ mirror::String* image_string = dex_cache->GetResolvedString(j);
+ if (image_string != nullptr) {
+ mirror::String* found = LookupStrong(image_string);
+ if (found == nullptr) {
+ InsertStrong(image_string);
+ } else {
+ DCHECK_EQ(found, image_string);
+ }
+ }
+ }
+ }
+ image_added_to_intern_table_ = true;
+ }
+}
+
+mirror::String* InternTable::LookupStringFromImage(mirror::String* s)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (image_added_to_intern_table_) {
+ return nullptr;
+ }
gc::space::ImageSpace* image = Runtime::Current()->GetHeap()->GetImageSpace();
- if (image == NULL) {
- return NULL; // No image present.
+ if (image == nullptr) {
+ return nullptr; // No image present.
}
mirror::Object* root = image->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
@@ -179,15 +190,15 @@
const DexFile* dex_file = dex_cache->GetDexFile();
// Binary search the dex file for the string index.
const DexFile::StringId* string_id = dex_file->FindStringId(utf8.c_str());
- if (string_id != NULL) {
+ if (string_id != nullptr) {
uint32_t string_idx = dex_file->GetIndexForStringId(*string_id);
- mirror::String* image = dex_cache->GetResolvedString(string_idx);
- if (image != NULL) {
- return image;
+ mirror::String* image_string = dex_cache->GetResolvedString(string_idx);
+ if (image_string != NULL) {
+ return image_string;
}
}
}
- return NULL;
+ return nullptr;
}
void InternTable::AllowNewInterns() {
@@ -204,58 +215,36 @@
}
mirror::String* InternTable::Insert(mirror::String* s, bool is_strong) {
+ if (s == nullptr) {
+ return nullptr;
+ }
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::intern_table_lock_);
-
- DCHECK(s != NULL);
-
while (UNLIKELY(!allow_new_interns_)) {
new_intern_condition_.WaitHoldingLocks(self);
}
-
- if (is_strong) {
- // Check the strong table for a match.
- mirror::String* strong = LookupStrong(s);
- if (strong != NULL) {
- return strong;
- }
-
- // Check the image for a match.
- mirror::String* image = LookupStringFromImage(s);
- if (image != NULL) {
- return InsertStrong(image);
- }
-
- // There is no match in the strong table, check the weak table.
- mirror::String* weak = LookupWeak(s);
- if (weak != NULL) {
- // A match was found in the weak table. Promote to the strong table.
- RemoveWeak(weak);
- return InsertStrong(weak);
- }
-
- // No match in the strong table or the weak table. Insert into the strong
- // table.
- return InsertStrong(s);
- }
-
// Check the strong table for a match.
mirror::String* strong = LookupStrong(s);
- if (strong != NULL) {
+ if (strong != nullptr) {
return strong;
}
// Check the image for a match.
mirror::String* image = LookupStringFromImage(s);
- if (image != NULL) {
- return InsertWeak(image);
+ if (image != nullptr) {
+ return is_strong ? InsertStrong(image) : InsertWeak(image);
}
- // Check the weak table for a match.
+ // There is no match in the strong table, check the weak table.
mirror::String* weak = LookupWeak(s);
- if (weak != NULL) {
+ if (weak != nullptr) {
+ if (is_strong) {
+ // A match was found in the weak table. Promote to the strong table.
+ RemoveWeak(weak);
+ return InsertStrong(weak);
+ }
return weak;
}
- // Insert into the weak table.
- return InsertWeak(s);
+ // No match in the strong table or the weak table. Insert into the strong / weak table.
+ return is_strong ? InsertStrong(s) : InsertWeak(s);
}
mirror::String* InternTable::InternStrong(int32_t utf16_length, const char* utf8_data) {
@@ -270,55 +259,104 @@
}
mirror::String* InternTable::InternStrong(mirror::String* s) {
- if (s == nullptr) {
- return nullptr;
- }
return Insert(s, true);
}
mirror::String* InternTable::InternWeak(mirror::String* s) {
- if (s == nullptr) {
- return nullptr;
- }
return Insert(s, false);
}
bool InternTable::ContainsWeak(mirror::String* s) {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- const mirror::String* found = LookupWeak(s);
- return found == s;
+ return LookupWeak(s) == s;
}
void InternTable::SweepInternTableWeaks(IsMarkedCallback* callback, void* arg) {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- for (auto it = weak_interns_.begin(), end = weak_interns_.end(); it != end;) {
+ weak_interns_.SweepWeaks(callback, arg);
+}
+
+std::size_t InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& root) const {
+ if (kIsDebugBuild) {
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+ }
+ return static_cast<size_t>(root.Read()->GetHashCode());
+}
+
+bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
+ const GcRoot<mirror::String>& b) const {
+ if (kIsDebugBuild) {
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+ }
+ return a.Read()->Equals(b.Read());
+}
+
+void InternTable::Table::Remove(mirror::String* s) {
+ auto it = post_zygote_table_.Find(GcRoot<mirror::String>(s));
+ if (it != post_zygote_table_.end()) {
+ post_zygote_table_.Erase(it);
+ } else {
+ it = pre_zygote_table_.Find(GcRoot<mirror::String>(s));
+ DCHECK(it != pre_zygote_table_.end());
+ pre_zygote_table_.Erase(it);
+ }
+}
+
+mirror::String* InternTable::Table::Find(mirror::String* s) {
+ Locks::intern_table_lock_->AssertHeld(Thread::Current());
+ auto it = pre_zygote_table_.Find(GcRoot<mirror::String>(s));
+ if (it != pre_zygote_table_.end()) {
+ return it->Read();
+ }
+ it = post_zygote_table_.Find(GcRoot<mirror::String>(s));
+ if (it != post_zygote_table_.end()) {
+ return it->Read();
+ }
+ return nullptr;
+}
+
+void InternTable::Table::SwapPostZygoteWithPreZygote() {
+ CHECK(pre_zygote_table_.Empty());
+ std::swap(pre_zygote_table_, post_zygote_table_);
+ VLOG(heap) << "Swapping " << pre_zygote_table_.Size() << " interns to the pre zygote table";
+}
+
+void InternTable::Table::Insert(mirror::String* s) {
+ // Always insert the post zygote table, this gets swapped when we create the zygote to be the
+ // pre zygote table.
+ post_zygote_table_.Insert(GcRoot<mirror::String>(s));
+}
+
+void InternTable::Table::VisitRoots(RootCallback* callback, void* arg) {
+ for (auto& intern : pre_zygote_table_) {
+ intern.VisitRoot(callback, arg, 0, kRootInternedString);
+ }
+ for (auto& intern : post_zygote_table_) {
+ intern.VisitRoot(callback, arg, 0, kRootInternedString);
+ }
+}
+
+void InternTable::Table::SweepWeaks(IsMarkedCallback* callback, void* arg) {
+ SweepWeaks(&pre_zygote_table_, callback, arg);
+ SweepWeaks(&post_zygote_table_, callback, arg);
+}
+
+void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedCallback* callback, void* arg) {
+ for (auto it = set->begin(), end = set->end(); it != end;) {
// This does not need a read barrier because this is called by GC.
- GcRoot<mirror::String>& root = const_cast<GcRoot<mirror::String>&>(*it);
- mirror::Object* object = root.Read<kWithoutReadBarrier>();
+ mirror::Object* object = it->Read<kWithoutReadBarrier>();
mirror::Object* new_object = callback(object, arg);
if (new_object == nullptr) {
- it = weak_interns_.erase(it);
+ it = set->Erase(it);
} else {
- root = GcRoot<mirror::String>(down_cast<mirror::String*>(new_object));
+ *it = GcRoot<mirror::String>(new_object->AsString());
++it;
}
}
}
-std::size_t InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& root) {
- if (kIsDebugBuild) {
- Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
- }
- return static_cast<size_t>(const_cast<GcRoot<mirror::String>&>(root).Read()->GetHashCode());
-}
-
-bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
- const GcRoot<mirror::String>& b) {
- if (kIsDebugBuild) {
- Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
- }
- return const_cast<GcRoot<mirror::String>&>(a).Read()->Equals(
- const_cast<GcRoot<mirror::String>&>(b).Read());
+size_t InternTable::Table::Size() const {
+ return pre_zygote_table_.Size() + post_zygote_table_.Size();
}
} // namespace art
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index e3223c8..371d3f7 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -20,12 +20,19 @@
#include <unordered_set>
#include "base/allocator.h"
+#include "base/hash_set.h"
#include "base/mutex.h"
#include "gc_root.h"
#include "object_callbacks.h"
namespace art {
+namespace gc {
+namespace space {
+class ImageSpace;
+} // namespace space
+} // namespace gc
+
enum VisitRootFlags : uint8_t;
namespace mirror {
@@ -66,9 +73,12 @@
bool ContainsWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- size_t Size() const;
- size_t StrongSize() const;
- size_t WeakSize() const;
+ // Total number of interned strings.
+ size_t Size() const LOCKS_EXCLUDED(Locks::intern_table_lock_);
+ // Total number of weakly live interned strings.
+ size_t StrongSize() const LOCKS_EXCLUDED(Locks::intern_table_lock_);
+ // Total number of strongly live interned strings.
+ size_t WeakSize() const LOCKS_EXCLUDED(Locks::intern_table_lock_);
void VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -78,29 +88,83 @@
void DisallowNewInterns() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
void AllowNewInterns() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Adds all of the resolved image strings from the image space into the intern table. The
+ // advantage of doing this is preventing expensive DexFile::FindStringId calls.
+ void AddImageStringsToTable(gc::space::ImageSpace* image_space)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::intern_table_lock_);
+ // Copy the post zygote tables to pre zygote to save memory by preventing dirty pages.
+ void SwapPostZygoteWithPreZygote()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::intern_table_lock_);
+
private:
class StringHashEquals {
public:
- std::size_t operator()(const GcRoot<mirror::String>& root) NO_THREAD_SAFETY_ANALYSIS;
- bool operator()(const GcRoot<mirror::String>& a, const GcRoot<mirror::String>& b)
+ std::size_t operator()(const GcRoot<mirror::String>& root) const NO_THREAD_SAFETY_ANALYSIS;
+ bool operator()(const GcRoot<mirror::String>& a, const GcRoot<mirror::String>& b) const
NO_THREAD_SAFETY_ANALYSIS;
};
- typedef std::unordered_set<GcRoot<mirror::String>, StringHashEquals, StringHashEquals,
- TrackingAllocator<GcRoot<mirror::String>, kAllocatorTagInternTable>> Table;
+ class GcRootEmptyFn {
+ public:
+ void MakeEmpty(GcRoot<mirror::String>& item) const {
+ item = GcRoot<mirror::String>();
+ }
+ bool IsEmpty(const GcRoot<mirror::String>& item) const {
+ return item.IsNull();
+ }
+ };
+ // Table which holds pre zygote and post zygote interned strings. There is one instance for
+ // weak interns and strong interns.
+ class Table {
+ public:
+ mirror::String* Find(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ void Insert(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ void Remove(mirror::String* s)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ void VisitRoots(RootCallback* callback, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ void SweepWeaks(IsMarkedCallback* callback, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ void SwapPostZygoteWithPreZygote() EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ size_t Size() const EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+
+ private:
+ typedef HashSet<GcRoot<mirror::String>, GcRootEmptyFn, StringHashEquals, StringHashEquals,
+ TrackingAllocator<GcRoot<mirror::String>, kAllocatorTagInternTable>> UnorderedSet;
+
+ void SweepWeaks(UnorderedSet* set, IsMarkedCallback* callback, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+
+ // We call SwapPostZygoteWithPreZygote when we create the zygote to reduce private dirty pages
+ // caused by modifying the zygote intern table hash table. The pre zygote table are the
+ // interned strings which were interned before we created the zygote space. Post zygote is self
+ // explanatory.
+ UnorderedSet pre_zygote_table_;
+ UnorderedSet post_zygote_table_;
+ };
+
+ // Insert if non null, otherwise return nullptr.
mirror::String* Insert(mirror::String* s, bool is_strong)
LOCKS_EXCLUDED(Locks::intern_table_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::String* LookupStrong(mirror::String* s)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
mirror::String* LookupWeak(mirror::String* s)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::String* Lookup(Table* table, mirror::String* s)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
mirror::String* InsertStrong(mirror::String* s)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
mirror::String* InsertWeak(mirror::String* s)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
void RemoveStrong(mirror::String* s)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
@@ -108,14 +172,16 @@
void RemoveWeak(mirror::String* s)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
- void Remove(Table* table, mirror::String* s)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
// Transaction rollback access.
+ mirror::String* LookupStringFromImage(mirror::String* s)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
mirror::String* InsertStrongFromTransaction(mirror::String* s)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
mirror::String* InsertWeakFromTransaction(mirror::String* s)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
void RemoveStrongFromTransaction(mirror::String* s)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
@@ -125,6 +191,7 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
friend class Transaction;
+ bool image_added_to_intern_table_ GUARDED_BY(Locks::intern_table_lock_);
bool log_new_roots_ GUARDED_BY(Locks::intern_table_lock_);
bool allow_new_interns_ GUARDED_BY(Locks::intern_table_lock_);
ConditionVariable new_intern_condition_ GUARDED_BY(Locks::intern_table_lock_);
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index dfb03cd..b17f303 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -138,7 +138,7 @@
if (method->IsStatic()) {
if (shorty == "L") {
typedef jobject (fntype)(JNIEnv*, jclass);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
jobject jresult;
@@ -149,35 +149,35 @@
result->SetL(soa.Decode<Object*>(jresult));
} else if (shorty == "V") {
typedef void (fntype)(JNIEnv*, jclass);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
fn(soa.Env(), klass.get());
} else if (shorty == "Z") {
typedef jboolean (fntype)(JNIEnv*, jclass);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
result->SetZ(fn(soa.Env(), klass.get()));
} else if (shorty == "BI") {
typedef jbyte (fntype)(JNIEnv*, jclass, jint);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
result->SetB(fn(soa.Env(), klass.get(), args[0]));
} else if (shorty == "II") {
typedef jint (fntype)(JNIEnv*, jclass, jint);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
result->SetI(fn(soa.Env(), klass.get(), args[0]));
} else if (shorty == "LL") {
typedef jobject (fntype)(JNIEnv*, jclass, jobject);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
@@ -190,14 +190,15 @@
result->SetL(soa.Decode<Object*>(jresult));
} else if (shorty == "IIZ") {
typedef jint (fntype)(JNIEnv*, jclass, jint, jboolean);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
result->SetI(fn(soa.Env(), klass.get(), args[0], args[1]));
} else if (shorty == "ILI") {
typedef jint (fntype)(JNIEnv*, jclass, jobject, jint);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(
+ method->GetEntryPointFromJni()));
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
@@ -206,21 +207,21 @@
result->SetI(fn(soa.Env(), klass.get(), arg0.get(), args[1]));
} else if (shorty == "SIZ") {
typedef jshort (fntype)(JNIEnv*, jclass, jint, jboolean);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetEntryPointFromJni()));
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
result->SetS(fn(soa.Env(), klass.get(), args[0], args[1]));
} else if (shorty == "VIZ") {
typedef void (fntype)(JNIEnv*, jclass, jint, jboolean);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
fn(soa.Env(), klass.get(), args[0], args[1]);
} else if (shorty == "ZLL") {
typedef jboolean (fntype)(JNIEnv*, jclass, jobject, jobject);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
@@ -231,7 +232,7 @@
result->SetZ(fn(soa.Env(), klass.get(), arg0.get(), arg1.get()));
} else if (shorty == "ZILL") {
typedef jboolean (fntype)(JNIEnv*, jclass, jint, jobject, jobject);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg1(soa.Env(),
@@ -242,7 +243,7 @@
result->SetZ(fn(soa.Env(), klass.get(), args[0], arg1.get(), arg2.get()));
} else if (shorty == "VILII") {
typedef void (fntype)(JNIEnv*, jclass, jint, jobject, jint, jint);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg1(soa.Env(),
@@ -251,7 +252,7 @@
fn(soa.Env(), klass.get(), args[0], arg1.get(), args[2], args[3]);
} else if (shorty == "VLILII") {
typedef void (fntype)(JNIEnv*, jclass, jobject, jint, jobject, jint, jint);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
@@ -267,7 +268,7 @@
} else {
if (shorty == "L") {
typedef jobject (fntype)(JNIEnv*, jobject);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jobject> rcvr(soa.Env(),
soa.AddLocalReference<jobject>(receiver));
jobject jresult;
@@ -278,14 +279,14 @@
result->SetL(soa.Decode<Object*>(jresult));
} else if (shorty == "V") {
typedef void (fntype)(JNIEnv*, jobject);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jobject> rcvr(soa.Env(),
soa.AddLocalReference<jobject>(receiver));
ScopedThreadStateChange tsc(self, kNative);
fn(soa.Env(), rcvr.get());
} else if (shorty == "LL") {
typedef jobject (fntype)(JNIEnv*, jobject, jobject);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jobject> rcvr(soa.Env(),
soa.AddLocalReference<jobject>(receiver));
ScopedLocalRef<jobject> arg0(soa.Env(),
@@ -299,7 +300,7 @@
ScopedThreadStateChange tsc(self, kNative);
} else if (shorty == "III") {
typedef jint (fntype)(JNIEnv*, jobject, jint, jint);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+ fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jobject> rcvr(soa.Env(),
soa.AddLocalReference<jobject>(receiver));
ScopedThreadStateChange tsc(self, kNative);
@@ -315,6 +316,10 @@
kSwitchImpl, // Switch-based interpreter implementation.
kComputedGotoImplKind // Computed-goto-based interpreter implementation.
};
+static std::ostream& operator<<(std::ostream& os, const InterpreterImplKind& rhs) {
+ os << ((rhs == kSwitchImpl) ? "Switch-based interpreter" : "Computed-goto-based interpreter");
+ return os;
+}
#if !defined(__clang__)
static constexpr InterpreterImplKind kInterpreterImplKind = kComputedGotoImplKind;
@@ -322,10 +327,9 @@
// Clang 3.4 fails to build the goto interpreter implementation.
static constexpr InterpreterImplKind kInterpreterImplKind = kSwitchImpl;
template<bool do_access_check, bool transaction_active>
-JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register) {
+JValue ExecuteGotoImpl(Thread*, MethodHelper&, const DexFile::CodeItem*, ShadowFrame&, JValue) {
LOG(FATAL) << "UNREACHABLE";
- exit(0);
+ UNREACHABLE();
}
// Explicit definitions of ExecuteGotoImpl.
template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 3ccdd03..eb80c30 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -80,6 +80,7 @@
break;
default:
LOG(FATAL) << "Unreachable: " << field_type;
+ UNREACHABLE();
}
return true;
}
@@ -153,6 +154,7 @@
break;
default:
LOG(FATAL) << "Unreachable: " << field_type;
+ UNREACHABLE();
}
return true;
}
@@ -195,7 +197,7 @@
break;
default:
LOG(FATAL) << "Unreachable: " << field_type;
- break;
+ UNREACHABLE();
}
return field_value;
}
@@ -285,6 +287,7 @@
}
default:
LOG(FATAL) << "Unreachable: " << field_type;
+ UNREACHABLE();
}
return true;
}
@@ -369,6 +372,7 @@
break;
default:
LOG(FATAL) << "Unreachable: " << field_type;
+ UNREACHABLE();
}
return true;
}
@@ -851,12 +855,12 @@
// Special managed code cut-out to allow field lookup in a un-started runtime that'd fail
// going the reflective Dex way.
Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
- String* name = shadow_frame->GetVRegReference(arg_offset + 1)->AsString();
+ String* name2 = shadow_frame->GetVRegReference(arg_offset + 1)->AsString();
ArtField* found = NULL;
ObjectArray<ArtField>* fields = klass->GetIFields();
for (int32_t i = 0; i < fields->GetLength() && found == NULL; ++i) {
ArtField* f = fields->Get(i);
- if (name->Equals(f->GetName())) {
+ if (name2->Equals(f->GetName())) {
found = f;
}
}
@@ -864,14 +868,14 @@
fields = klass->GetSFields();
for (int32_t i = 0; i < fields->GetLength() && found == NULL; ++i) {
ArtField* f = fields->Get(i);
- if (name->Equals(f->GetName())) {
+ if (name2->Equals(f->GetName())) {
found = f;
}
}
}
CHECK(found != NULL)
<< "Failed to find field in Class.getDeclaredField in un-started runtime. name="
- << name->ToModifiedUtf8() << " class=" << PrettyDescriptor(klass);
+ << name2->ToModifiedUtf8() << " class=" << PrettyDescriptor(klass);
// TODO: getDeclaredField calls GetType once the field is found to ensure a
// NoClassDefFoundError is thrown if the field's type cannot be resolved.
Class* jlr_Field = self->DecodeJObject(WellKnownClasses::java_lang_reflect_Field)->AsClass();
@@ -887,9 +891,8 @@
Object* obj = shadow_frame->GetVRegReference(arg_offset);
result->SetI(obj->IdentityHashCode());
} else if (name == "java.lang.String java.lang.reflect.ArtMethod.getMethodName(java.lang.reflect.ArtMethod)") {
- StackHandleScope<1> hs(self);
- MethodHelper mh(hs.NewHandle(shadow_frame->GetVRegReference(arg_offset)->AsArtMethod()));
- result->SetL(mh.GetNameAsString(self));
+ mirror::ArtMethod* method = shadow_frame->GetVRegReference(arg_offset)->AsArtMethod();
+ result->SetL(method->GetNameAsString(self));
} else if (name == "void java.lang.System.arraycopy(java.lang.Object, int, java.lang.Object, int, int)" ||
name == "void java.lang.System.arraycopy(char[], int, char[], int, int)") {
// Special case array copying without initializing System.
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index a8345ad..7f6303a 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -21,6 +21,9 @@
#include <math.h>
+#include <iostream>
+#include <sstream>
+
#include "base/logging.h"
#include "class_linker-inl.h"
#include "common_throws.h"
@@ -184,7 +187,7 @@
// Handles string resolution for const-string and const-string-jumbo instructions. Also ensures the
// java.lang.String class is initialized.
-static inline String* ResolveString(Thread* self, MethodHelper& mh, uint32_t string_idx)
+static inline String* ResolveString(Thread* self, ShadowFrame& shadow_frame, uint32_t string_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
CHECK(!kMovingMethods);
Class* java_lang_string_class = String::GetJavaLangString();
@@ -197,7 +200,15 @@
return nullptr;
}
}
- return mh.ResolveString(string_idx);
+ mirror::ArtMethod* method = shadow_frame.GetMethod();
+ mirror::String* s = method->GetDexCacheStrings()->Get(string_idx);
+ if (UNLIKELY(s == nullptr)) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
+ s = Runtime::Current()->GetClassLinker()->ResolveString(*method->GetDexFile(), string_idx,
+ dex_cache);
+ }
+ return s;
}
// Handles div-int, div-int/2addr, div-int/li16 and div-int/lit8 instructions.
@@ -205,7 +216,7 @@
static inline bool DoIntDivide(ShadowFrame& shadow_frame, size_t result_reg,
int32_t dividend, int32_t divisor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- const int32_t kMinInt = std::numeric_limits<int32_t>::min();
+ constexpr int32_t kMinInt = std::numeric_limits<int32_t>::min();
if (UNLIKELY(divisor == 0)) {
ThrowArithmeticExceptionDivideByZero();
return false;
@@ -223,7 +234,7 @@
static inline bool DoIntRemainder(ShadowFrame& shadow_frame, size_t result_reg,
int32_t dividend, int32_t divisor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- const int32_t kMinInt = std::numeric_limits<int32_t>::min();
+ constexpr int32_t kMinInt = std::numeric_limits<int32_t>::min();
if (UNLIKELY(divisor == 0)) {
ThrowArithmeticExceptionDivideByZero();
return false;
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index 88d6544..6350c56 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -15,6 +15,7 @@
*/
#include "interpreter_common.h"
+#include "safe_math.h"
namespace art {
namespace interpreter {
@@ -321,9 +322,7 @@
const uint8_t vreg_index = inst->VRegA_11x(inst_data);
Object* obj_result = shadow_frame.GetVRegReference(vreg_index);
if (do_assignability_check && obj_result != NULL) {
- StackHandleScope<1> hs(self);
- MethodHelper mh(hs.NewHandle(shadow_frame.GetMethod()));
- Class* return_type = mh.GetReturnType();
+ Class* return_type = shadow_frame.GetMethod()->GetReturnType();
obj_result = shadow_frame.GetVRegReference(vreg_index);
if (return_type == NULL) {
// Return the pending exception.
@@ -420,7 +419,7 @@
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(CONST_STRING) {
- String* s = ResolveString(self, mh, inst->VRegB_21c());
+ String* s = ResolveString(self, shadow_frame, inst->VRegB_21c());
if (UNLIKELY(s == NULL)) {
HANDLE_PENDING_EXCEPTION();
} else {
@@ -431,7 +430,7 @@
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(CONST_STRING_JUMBO) {
- String* s = ResolveString(self, mh, inst->VRegB_31c());
+ String* s = ResolveString(self, shadow_frame, inst->VRegB_31c());
if (UNLIKELY(s == NULL)) {
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1636,22 +1635,22 @@
HANDLE_INSTRUCTION_START(ADD_INT)
shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
- shadow_frame.GetVReg(inst->VRegB_23x()) +
- shadow_frame.GetVReg(inst->VRegC_23x()));
+ SafeAdd(shadow_frame.GetVReg(inst->VRegB_23x()),
+ shadow_frame.GetVReg(inst->VRegC_23x())));
ADVANCE(2);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SUB_INT)
shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
- shadow_frame.GetVReg(inst->VRegB_23x()) -
- shadow_frame.GetVReg(inst->VRegC_23x()));
+ SafeSub(shadow_frame.GetVReg(inst->VRegB_23x()),
+ shadow_frame.GetVReg(inst->VRegC_23x())));
ADVANCE(2);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(MUL_INT)
shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
- shadow_frame.GetVReg(inst->VRegB_23x()) *
- shadow_frame.GetVReg(inst->VRegC_23x()));
+ SafeMul(shadow_frame.GetVReg(inst->VRegB_23x()),
+ shadow_frame.GetVReg(inst->VRegC_23x())));
ADVANCE(2);
HANDLE_INSTRUCTION_END();
@@ -1715,22 +1714,22 @@
HANDLE_INSTRUCTION_START(ADD_LONG)
shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) +
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ SafeAdd(shadow_frame.GetVRegLong(inst->VRegB_23x()),
+ shadow_frame.GetVRegLong(inst->VRegC_23x())));
ADVANCE(2);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SUB_LONG)
shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) -
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ SafeSub(shadow_frame.GetVRegLong(inst->VRegB_23x()),
+ shadow_frame.GetVRegLong(inst->VRegC_23x())));
ADVANCE(2);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(MUL_LONG)
shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) *
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ SafeMul(shadow_frame.GetVRegLong(inst->VRegB_23x()),
+ shadow_frame.GetVRegLong(inst->VRegC_23x())));
ADVANCE(2);
HANDLE_INSTRUCTION_END();
@@ -1865,8 +1864,8 @@
HANDLE_INSTRUCTION_START(ADD_INT_2ADDR) {
uint32_t vregA = inst->VRegA_12x(inst_data);
shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) +
- shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ SafeAdd(shadow_frame.GetVReg(vregA),
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
ADVANCE(1);
}
HANDLE_INSTRUCTION_END();
@@ -1874,8 +1873,8 @@
HANDLE_INSTRUCTION_START(SUB_INT_2ADDR) {
uint32_t vregA = inst->VRegA_12x(inst_data);
shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) -
- shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ SafeSub(shadow_frame.GetVReg(vregA),
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
ADVANCE(1);
}
HANDLE_INSTRUCTION_END();
@@ -1883,8 +1882,8 @@
HANDLE_INSTRUCTION_START(MUL_INT_2ADDR) {
uint32_t vregA = inst->VRegA_12x(inst_data);
shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) *
- shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ SafeMul(shadow_frame.GetVReg(vregA),
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
ADVANCE(1);
}
HANDLE_INSTRUCTION_END();
@@ -1962,8 +1961,8 @@
HANDLE_INSTRUCTION_START(ADD_LONG_2ADDR) {
uint32_t vregA = inst->VRegA_12x(inst_data);
shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) +
- shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ SafeAdd(shadow_frame.GetVRegLong(vregA),
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data))));
ADVANCE(1);
}
HANDLE_INSTRUCTION_END();
@@ -1971,8 +1970,8 @@
HANDLE_INSTRUCTION_START(SUB_LONG_2ADDR) {
uint32_t vregA = inst->VRegA_12x(inst_data);
shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) -
- shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ SafeSub(shadow_frame.GetVRegLong(vregA),
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data))));
ADVANCE(1);
}
HANDLE_INSTRUCTION_END();
@@ -1980,8 +1979,8 @@
HANDLE_INSTRUCTION_START(MUL_LONG_2ADDR) {
uint32_t vregA = inst->VRegA_12x(inst_data);
shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) *
- shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ SafeMul(shadow_frame.GetVRegLong(vregA),
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data))));
ADVANCE(1);
}
HANDLE_INSTRUCTION_END();
@@ -2148,22 +2147,22 @@
HANDLE_INSTRUCTION_START(ADD_INT_LIT16)
shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) +
- inst->VRegC_22s());
+ SafeAdd(shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+ inst->VRegC_22s()));
ADVANCE(2);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(RSUB_INT)
shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
- inst->VRegC_22s() -
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data)));
+ SafeSub(inst->VRegC_22s(),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data))));
ADVANCE(2);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(MUL_INT_LIT16)
shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) *
- inst->VRegC_22s());
+ SafeMul(shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+ inst->VRegC_22s()));
ADVANCE(2);
HANDLE_INSTRUCTION_END();
@@ -2204,22 +2203,22 @@
HANDLE_INSTRUCTION_START(ADD_INT_LIT8)
shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22b()) +
- inst->VRegC_22b());
+ SafeAdd(shadow_frame.GetVReg(inst->VRegB_22b()),
+ inst->VRegC_22b()));
ADVANCE(2);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(RSUB_INT_LIT8)
shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
- inst->VRegC_22b() -
- shadow_frame.GetVReg(inst->VRegB_22b()));
+ SafeSub(inst->VRegC_22b(),
+ shadow_frame.GetVReg(inst->VRegB_22b())));
ADVANCE(2);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(MUL_INT_LIT8)
shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22b()) *
- inst->VRegC_22b());
+ SafeMul(shadow_frame.GetVReg(inst->VRegB_22b()),
+ inst->VRegC_22b()));
ADVANCE(2);
HANDLE_INSTRUCTION_END();
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 14e8a52..1b6f53e 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -15,6 +15,7 @@
*/
#include "interpreter_common.h"
+#include "safe_math.h"
namespace art {
namespace interpreter {
@@ -233,9 +234,7 @@
const size_t ref_idx = inst->VRegA_11x(inst_data);
Object* obj_result = shadow_frame.GetVRegReference(ref_idx);
if (do_assignability_check && obj_result != NULL) {
- StackHandleScope<1> hs(self);
- MethodHelper mhs(hs.NewHandle(shadow_frame.GetMethod()));
- Class* return_type = mhs.GetReturnType();
+ Class* return_type = shadow_frame.GetMethod()->GetReturnType();
// Re-load since it might have moved.
obj_result = shadow_frame.GetVRegReference(ref_idx);
if (return_type == NULL) {
@@ -331,7 +330,7 @@
break;
case Instruction::CONST_STRING: {
PREAMBLE();
- String* s = ResolveString(self, mh, inst->VRegB_21c());
+ String* s = ResolveString(self, shadow_frame, inst->VRegB_21c());
if (UNLIKELY(s == NULL)) {
HANDLE_PENDING_EXCEPTION();
} else {
@@ -342,7 +341,7 @@
}
case Instruction::CONST_STRING_JUMBO: {
PREAMBLE();
- String* s = ResolveString(self, mh, inst->VRegB_31c());
+ String* s = ResolveString(self, shadow_frame, inst->VRegB_31c());
if (UNLIKELY(s == NULL)) {
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1497,25 +1496,26 @@
static_cast<int16_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
inst = inst->Next_1xx();
break;
- case Instruction::ADD_INT:
+ case Instruction::ADD_INT: {
PREAMBLE();
shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
- shadow_frame.GetVReg(inst->VRegB_23x()) +
- shadow_frame.GetVReg(inst->VRegC_23x()));
+ SafeAdd(shadow_frame.GetVReg(inst->VRegB_23x()),
+ shadow_frame.GetVReg(inst->VRegC_23x())));
inst = inst->Next_2xx();
break;
+ }
case Instruction::SUB_INT:
PREAMBLE();
shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
- shadow_frame.GetVReg(inst->VRegB_23x()) -
- shadow_frame.GetVReg(inst->VRegC_23x()));
+ SafeSub(shadow_frame.GetVReg(inst->VRegB_23x()),
+ shadow_frame.GetVReg(inst->VRegC_23x())));
inst = inst->Next_2xx();
break;
case Instruction::MUL_INT:
PREAMBLE();
shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
- shadow_frame.GetVReg(inst->VRegB_23x()) *
- shadow_frame.GetVReg(inst->VRegC_23x()));
+ SafeMul(shadow_frame.GetVReg(inst->VRegB_23x()),
+ shadow_frame.GetVReg(inst->VRegC_23x())));
inst = inst->Next_2xx();
break;
case Instruction::DIV_INT: {
@@ -1579,29 +1579,29 @@
case Instruction::ADD_LONG:
PREAMBLE();
shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) +
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ SafeAdd(shadow_frame.GetVRegLong(inst->VRegB_23x()),
+ shadow_frame.GetVRegLong(inst->VRegC_23x())));
inst = inst->Next_2xx();
break;
case Instruction::SUB_LONG:
PREAMBLE();
shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) -
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ SafeSub(shadow_frame.GetVRegLong(inst->VRegB_23x()),
+ shadow_frame.GetVRegLong(inst->VRegC_23x())));
inst = inst->Next_2xx();
break;
case Instruction::MUL_LONG:
PREAMBLE();
shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) *
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ SafeMul(shadow_frame.GetVRegLong(inst->VRegB_23x()),
+ shadow_frame.GetVRegLong(inst->VRegC_23x())));
inst = inst->Next_2xx();
break;
case Instruction::DIV_LONG:
PREAMBLE();
DoLongDivide(shadow_frame, inst->VRegA_23x(inst_data),
shadow_frame.GetVRegLong(inst->VRegB_23x()),
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_2xx);
break;
case Instruction::REM_LONG:
@@ -1726,9 +1726,8 @@
case Instruction::ADD_INT_2ADDR: {
PREAMBLE();
uint4_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) +
- shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVReg(vregA, SafeAdd(shadow_frame.GetVReg(vregA),
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
inst = inst->Next_1xx();
break;
}
@@ -1736,8 +1735,8 @@
PREAMBLE();
uint4_t vregA = inst->VRegA_12x(inst_data);
shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) -
- shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ SafeSub(shadow_frame.GetVReg(vregA),
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
inst = inst->Next_1xx();
break;
}
@@ -1745,8 +1744,8 @@
PREAMBLE();
uint4_t vregA = inst->VRegA_12x(inst_data);
shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) *
- shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ SafeMul(shadow_frame.GetVReg(vregA),
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
inst = inst->Next_1xx();
break;
}
@@ -1824,8 +1823,8 @@
PREAMBLE();
uint4_t vregA = inst->VRegA_12x(inst_data);
shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) +
- shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ SafeAdd(shadow_frame.GetVRegLong(vregA),
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data))));
inst = inst->Next_1xx();
break;
}
@@ -1833,8 +1832,8 @@
PREAMBLE();
uint4_t vregA = inst->VRegA_12x(inst_data);
shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) -
- shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ SafeSub(shadow_frame.GetVRegLong(vregA),
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data))));
inst = inst->Next_1xx();
break;
}
@@ -1842,8 +1841,8 @@
PREAMBLE();
uint4_t vregA = inst->VRegA_12x(inst_data);
shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) *
- shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ SafeMul(shadow_frame.GetVRegLong(vregA),
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data))));
inst = inst->Next_1xx();
break;
}
@@ -2010,22 +2009,22 @@
case Instruction::ADD_INT_LIT16:
PREAMBLE();
shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) +
- inst->VRegC_22s());
+ SafeAdd(shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+ inst->VRegC_22s()));
inst = inst->Next_2xx();
break;
- case Instruction::RSUB_INT:
+ case Instruction::RSUB_INT_LIT16:
PREAMBLE();
shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
- inst->VRegC_22s() -
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data)));
+ SafeSub(inst->VRegC_22s(),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data))));
inst = inst->Next_2xx();
break;
case Instruction::MUL_INT_LIT16:
PREAMBLE();
shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) *
- inst->VRegC_22s());
+ SafeMul(shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+ inst->VRegC_22s()));
inst = inst->Next_2xx();
break;
case Instruction::DIV_INT_LIT16: {
@@ -2066,22 +2065,19 @@
case Instruction::ADD_INT_LIT8:
PREAMBLE();
shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22b()) +
- inst->VRegC_22b());
+ SafeAdd(shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b()));
inst = inst->Next_2xx();
break;
case Instruction::RSUB_INT_LIT8:
PREAMBLE();
shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
- inst->VRegC_22b() -
- shadow_frame.GetVReg(inst->VRegB_22b()));
+ SafeSub(inst->VRegC_22b(), shadow_frame.GetVReg(inst->VRegB_22b())));
inst = inst->Next_2xx();
break;
case Instruction::MUL_INT_LIT8:
PREAMBLE();
shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22b()) *
- inst->VRegC_22b());
+ SafeMul(shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b()));
inst = inst->Next_2xx();
break;
case Instruction::DIV_INT_LIT8: {
diff --git a/runtime/interpreter/safe_math.h b/runtime/interpreter/safe_math.h
new file mode 100644
index 0000000..78b3539
--- /dev/null
+++ b/runtime/interpreter/safe_math.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_INTERPRETER_SAFE_MATH_H_
+#define ART_RUNTIME_INTERPRETER_SAFE_MATH_H_
+
+#include <functional>
+#include <type_traits>
+
+namespace art {
+namespace interpreter {
+
+// Declares a type which is the larger in bit size of the two template parameters.
+template <typename T1, typename T2>
+struct select_bigger {
+ typedef typename std::conditional<sizeof(T1) >= sizeof(T2), T1, T2>::type type;
+};
+
+// Perform signed arithmetic Op on 'a' and 'b' with defined wrapping behavior.
+template<template <typename OpT> class Op, typename T1, typename T2>
+static inline typename select_bigger<T1, T2>::type SafeMath(T1 a, T2 b) {
+ typedef typename select_bigger<T1, T2>::type biggest_T;
+ typedef typename std::make_unsigned<biggest_T>::type unsigned_biggest_T;
+ static_assert(std::is_signed<T1>::value, "Expected T1 to be signed");
+ static_assert(std::is_signed<T2>::value, "Expected T2 to be signed");
+ unsigned_biggest_T val1 = static_cast<unsigned_biggest_T>(static_cast<biggest_T>(a));
+ unsigned_biggest_T val2 = static_cast<unsigned_biggest_T>(b);
+ return static_cast<biggest_T>(Op<unsigned_biggest_T>()(val1, val2));
+}
+
+// Perform signed a signed add on 'a' and 'b' with defined wrapping behavior.
+template<typename T1, typename T2>
+static inline typename select_bigger<T1, T2>::type SafeAdd(T1 a, T2 b) {
+ return SafeMath<std::plus>(a, b);
+}
+
+// Perform signed a signed substract on 'a' and 'b' with defined wrapping behavior.
+template<typename T1, typename T2>
+static inline typename select_bigger<T1, T2>::type SafeSub(T1 a, T2 b) {
+ return SafeMath<std::minus>(a, b);
+}
+
+// Perform signed a signed multiply on 'a' and 'b' with defined wrapping behavior.
+template<typename T1, typename T2>
+static inline typename select_bigger<T1, T2>::type SafeMul(T1 a, T2 b) {
+ return SafeMath<std::multiplies>(a, b);
+}
+
+} // namespace interpreter
+} // namespace art
+
+#endif // ART_RUNTIME_INTERPRETER_SAFE_MATH_H_
diff --git a/runtime/interpreter/safe_math_test.cc b/runtime/interpreter/safe_math_test.cc
new file mode 100644
index 0000000..28087a3
--- /dev/null
+++ b/runtime/interpreter/safe_math_test.cc
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "safe_math.h"
+
+#include <limits>
+
+#include "gtest/gtest.h"
+
+namespace art {
+namespace interpreter {
+
+TEST(SafeMath, Add) {
+ // Adding 1 overflows 0x7ff... to 0x800... aka max and min.
+ EXPECT_EQ(SafeAdd(std::numeric_limits<int32_t>::max(), 1),
+ std::numeric_limits<int32_t>::min());
+ EXPECT_EQ(SafeAdd(std::numeric_limits<int64_t>::max(), 1),
+ std::numeric_limits<int64_t>::min());
+
+ // Vanilla arithmetic should work too.
+ EXPECT_EQ(SafeAdd(std::numeric_limits<int32_t>::max() - 1, 1),
+ std::numeric_limits<int32_t>::max());
+ EXPECT_EQ(SafeAdd(std::numeric_limits<int64_t>::max() - 1, 1),
+ std::numeric_limits<int64_t>::max());
+
+ EXPECT_EQ(SafeAdd(std::numeric_limits<int32_t>::min() + 1, -1),
+ std::numeric_limits<int32_t>::min());
+ EXPECT_EQ(SafeAdd(std::numeric_limits<int64_t>::min() + 1, -1),
+ std::numeric_limits<int64_t>::min());
+
+ EXPECT_EQ(SafeAdd(int32_t(-1), -1), -2);
+ EXPECT_EQ(SafeAdd(int64_t(-1), -1), -2);
+
+ EXPECT_EQ(SafeAdd(int32_t(1), 1), 2);
+ EXPECT_EQ(SafeAdd(int64_t(1), 1), 2);
+
+ EXPECT_EQ(SafeAdd(int32_t(-1), 1), 0);
+ EXPECT_EQ(SafeAdd(int64_t(-1), 1), 0);
+
+ EXPECT_EQ(SafeAdd(int32_t(1), -1), 0);
+ EXPECT_EQ(SafeAdd(int64_t(1), -1), 0);
+
+ // Test sign extension of smaller operand sizes.
+ EXPECT_EQ(SafeAdd(int32_t(1), int8_t(-1)), 0);
+ EXPECT_EQ(SafeAdd(int64_t(1), int8_t(-1)), 0);
+}
+
+TEST(SafeMath, Sub) {
+ // Subtracting 1 underflows 0x800... to 0x7ff... aka min and max.
+ EXPECT_EQ(SafeSub(std::numeric_limits<int32_t>::min(), 1),
+ std::numeric_limits<int32_t>::max());
+ EXPECT_EQ(SafeSub(std::numeric_limits<int64_t>::min(), 1),
+ std::numeric_limits<int64_t>::max());
+
+ // Vanilla arithmetic should work too.
+ EXPECT_EQ(SafeSub(std::numeric_limits<int32_t>::max() - 1, -1),
+ std::numeric_limits<int32_t>::max());
+ EXPECT_EQ(SafeSub(std::numeric_limits<int64_t>::max() - 1, -1),
+ std::numeric_limits<int64_t>::max());
+
+ EXPECT_EQ(SafeSub(std::numeric_limits<int32_t>::min() + 1, 1),
+ std::numeric_limits<int32_t>::min());
+ EXPECT_EQ(SafeSub(std::numeric_limits<int64_t>::min() + 1, 1),
+ std::numeric_limits<int64_t>::min());
+
+ EXPECT_EQ(SafeSub(int32_t(-1), -1), 0);
+ EXPECT_EQ(SafeSub(int64_t(-1), -1), 0);
+
+ EXPECT_EQ(SafeSub(int32_t(1), 1), 0);
+ EXPECT_EQ(SafeSub(int64_t(1), 1), 0);
+
+ EXPECT_EQ(SafeSub(int32_t(-1), 1), -2);
+ EXPECT_EQ(SafeSub(int64_t(-1), 1), -2);
+
+ EXPECT_EQ(SafeSub(int32_t(1), -1), 2);
+ EXPECT_EQ(SafeSub(int64_t(1), -1), 2);
+
+ // Test sign extension of smaller operand sizes.
+ EXPECT_EQ(SafeAdd(int32_t(1), int8_t(-1)), 0);
+ EXPECT_EQ(SafeAdd(int64_t(1), int8_t(-1)), 0);
+}
+
+TEST(SafeMath, Mul) {
+ // Multiplying by 2 overflows 0x7ff...f to 0xfff...e aka max and -2.
+ EXPECT_EQ(SafeMul(std::numeric_limits<int32_t>::max(), 2),
+ -2);
+ EXPECT_EQ(SafeMul(std::numeric_limits<int64_t>::max(), 2),
+ -2);
+
+ // Vanilla arithmetic should work too.
+ EXPECT_EQ(SafeMul(std::numeric_limits<int32_t>::max() / 2, 2),
+ std::numeric_limits<int32_t>::max() - 1); // -1 as LSB is lost by division.
+ EXPECT_EQ(SafeMul(std::numeric_limits<int64_t>::max() / 2, 2),
+ std::numeric_limits<int64_t>::max() - 1); // -1 as LSB is lost by division.
+
+ EXPECT_EQ(SafeMul(std::numeric_limits<int32_t>::min() / 2, 2),
+ std::numeric_limits<int32_t>::min());
+ EXPECT_EQ(SafeMul(std::numeric_limits<int64_t>::min() / 2, 2),
+ std::numeric_limits<int64_t>::min());
+
+ EXPECT_EQ(SafeMul(int32_t(-1), -1), 1);
+ EXPECT_EQ(SafeMul(int64_t(-1), -1), 1);
+
+ EXPECT_EQ(SafeMul(int32_t(1), 1), 1);
+ EXPECT_EQ(SafeMul(int64_t(1), 1), 1);
+
+ EXPECT_EQ(SafeMul(int32_t(-1), 1), -1);
+ EXPECT_EQ(SafeMul(int64_t(-1), 1), -1);
+
+ EXPECT_EQ(SafeMul(int32_t(1), -1), -1);
+ EXPECT_EQ(SafeMul(int64_t(1), -1), -1);
+
+ // Test sign extension of smaller operand sizes.
+ EXPECT_EQ(SafeMul(int32_t(1), int8_t(-1)), -1);
+ EXPECT_EQ(SafeMul(int64_t(1), int8_t(-1)), -1);
+}
+
+} // namespace interpreter
+} // namespace art
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index fed8bf0..a5abce6 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -18,6 +18,7 @@
#include <dlfcn.h>
+#include "base/dumpable.h"
#include "base/mutex.h"
#include "base/stl_util.h"
#include "check_jni.h"
@@ -794,13 +795,13 @@
return JNI_OK;
}
-extern "C" jint JNI_GetCreatedJavaVMs(JavaVM** vms, jsize, jsize* vm_count) {
+extern "C" jint JNI_GetCreatedJavaVMs(JavaVM** vms_buf, jsize buf_len, jsize* vm_count) {
Runtime* runtime = Runtime::Current();
- if (runtime == nullptr) {
+ if (runtime == nullptr || buf_len == 0) {
*vm_count = 0;
} else {
*vm_count = 1;
- vms[0] = runtime->GetJavaVM();
+ vms_buf[0] = runtime->GetJavaVM();
}
return JNI_OK;
}
diff --git a/runtime/java_vm_ext_test.cc b/runtime/java_vm_ext_test.cc
new file mode 100644
index 0000000..60c6a5c
--- /dev/null
+++ b/runtime/java_vm_ext_test.cc
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_internal.h"
+
+#include <pthread.h>
+
+#include "common_runtime_test.h"
+#include "java_vm_ext.h"
+#include "runtime.h"
+
+namespace art {
+
+class JavaVmExtTest : public CommonRuntimeTest {
+ protected:
+ virtual void SetUp() {
+ CommonRuntimeTest::SetUp();
+
+ vm_ = Runtime::Current()->GetJavaVM();
+ }
+
+
+ virtual void TearDown() OVERRIDE {
+ CommonRuntimeTest::TearDown();
+ }
+
+ JavaVMExt* vm_;
+};
+
+TEST_F(JavaVmExtTest, JNI_GetDefaultJavaVMInitArgs) {
+ jint err = JNI_GetDefaultJavaVMInitArgs(nullptr);
+ EXPECT_EQ(JNI_ERR, err);
+}
+
+TEST_F(JavaVmExtTest, JNI_GetCreatedJavaVMs) {
+ JavaVM* vms_buf[1];
+ jsize num_vms;
+ jint ok = JNI_GetCreatedJavaVMs(vms_buf, arraysize(vms_buf), &num_vms);
+ EXPECT_EQ(JNI_OK, ok);
+ EXPECT_EQ(1, num_vms);
+ EXPECT_EQ(vms_buf[0], vm_);
+}
+
+static bool gSmallStack = false;
+static bool gAsDaemon = false;
+
+static void* attach_current_thread_callback(void* arg ATTRIBUTE_UNUSED) {
+ JavaVM* vms_buf[1];
+ jsize num_vms;
+ JNIEnv* env;
+ jint ok = JNI_GetCreatedJavaVMs(vms_buf, arraysize(vms_buf), &num_vms);
+ EXPECT_EQ(JNI_OK, ok);
+ if (ok == JNI_OK) {
+ if (!gAsDaemon) {
+ ok = vms_buf[0]->AttachCurrentThread(&env, nullptr);
+ } else {
+ ok = vms_buf[0]->AttachCurrentThreadAsDaemon(&env, nullptr);
+ }
+ EXPECT_EQ(gSmallStack ? JNI_ERR : JNI_OK, ok);
+ if (ok == JNI_OK) {
+ ok = vms_buf[0]->DetachCurrentThread();
+ EXPECT_EQ(JNI_OK, ok);
+ }
+ }
+ return nullptr;
+}
+
+TEST_F(JavaVmExtTest, AttachCurrentThread) {
+ pthread_t pthread;
+ const char* reason = __PRETTY_FUNCTION__;
+ gSmallStack = false;
+ gAsDaemon = false;
+ CHECK_PTHREAD_CALL(pthread_create, (&pthread, nullptr, attach_current_thread_callback,
+ nullptr), reason);
+ void* ret_val;
+ CHECK_PTHREAD_CALL(pthread_join, (pthread, &ret_val), reason);
+ EXPECT_EQ(ret_val, nullptr);
+}
+
+TEST_F(JavaVmExtTest, AttachCurrentThreadAsDaemon) {
+ pthread_t pthread;
+ const char* reason = __PRETTY_FUNCTION__;
+ gSmallStack = false;
+ gAsDaemon = true;
+ CHECK_PTHREAD_CALL(pthread_create, (&pthread, nullptr, attach_current_thread_callback,
+ nullptr), reason);
+ void* ret_val;
+ CHECK_PTHREAD_CALL(pthread_join, (pthread, &ret_val), reason);
+ EXPECT_EQ(ret_val, nullptr);
+}
+
+TEST_F(JavaVmExtTest, AttachCurrentThread_SmallStack) {
+ pthread_t pthread;
+ pthread_attr_t attr;
+ const char* reason = __PRETTY_FUNCTION__;
+ gSmallStack = true;
+ gAsDaemon = false;
+ CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), reason);
+ CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, PTHREAD_STACK_MIN), reason);
+ CHECK_PTHREAD_CALL(pthread_create, (&pthread, &attr, attach_current_thread_callback,
+ nullptr), reason);
+ CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), reason);
+ void* ret_val;
+ CHECK_PTHREAD_CALL(pthread_join, (pthread, &ret_val), reason);
+ EXPECT_EQ(ret_val, nullptr);
+}
+
+TEST_F(JavaVmExtTest, DetachCurrentThread) {
+ JNIEnv* env;
+ jint ok = vm_->AttachCurrentThread(&env, nullptr);
+ ASSERT_EQ(JNI_OK, ok);
+ ok = vm_->DetachCurrentThread();
+ EXPECT_EQ(JNI_OK, ok);
+
+ jint err = vm_->DetachCurrentThread();
+ EXPECT_EQ(JNI_ERR, err);
+}
+
+} // namespace art
diff --git a/runtime/jdwp/jdwp_adb.cc b/runtime/jdwp/jdwp_adb.cc
index fe91bb6..df7d068 100644
--- a/runtime/jdwp/jdwp_adb.cc
+++ b/runtime/jdwp/jdwp_adb.cc
@@ -84,13 +84,13 @@
shutting_down_ = true;
int control_sock = this->control_sock_;
- int clientSock = this->clientSock;
+ int local_clientSock = this->clientSock;
/* clear these out so it doesn't wake up and try to reuse them */
this->control_sock_ = this->clientSock = -1;
- if (clientSock != -1) {
- shutdown(clientSock, SHUT_RDWR);
+ if (local_clientSock != -1) {
+ shutdown(local_clientSock, SHUT_RDWR);
}
if (control_sock != -1) {
diff --git a/runtime/jdwp/jdwp_bits.h b/runtime/jdwp/jdwp_bits.h
index 9f80cbe..f9cf9ca 100644
--- a/runtime/jdwp/jdwp_bits.h
+++ b/runtime/jdwp/jdwp_bits.h
@@ -68,7 +68,7 @@
// @deprecated
static inline void Set1(uint8_t* buf, uint8_t val) {
- *buf = (uint8_t)(val);
+ *buf = val;
}
// @deprecated
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index d1229b2..1e0a2d2 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -125,6 +125,10 @@
};
static bool NeedsFullDeoptimization(JdwpEventKind eventKind) {
+ if (!Dbg::RequiresDeoptimization()) {
+ // We don't need deoptimization for debugging.
+ return false;
+ }
switch (eventKind) {
case EK_METHOD_ENTRY:
case EK_METHOD_EXIT:
@@ -138,7 +142,7 @@
}
}
-uint32_t GetInstrumentationEventFor(JdwpEventKind eventKind) {
+static uint32_t GetInstrumentationEventFor(JdwpEventKind eventKind) {
switch (eventKind) {
case EK_BREAKPOINT:
case EK_SINGLE_STEP:
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 7fdc18e..be34bd3 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -1126,6 +1126,7 @@
static JdwpError TR_Interrupt(JdwpState*, Request* request, ExpandBuf* reply)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(reply);
ObjectId thread_id = request->ReadThreadId();
return Dbg::Interrupt(thread_id);
}
diff --git a/runtime/jdwp/jdwp_socket.cc b/runtime/jdwp/jdwp_socket.cc
index 4a80957..7119ce5 100644
--- a/runtime/jdwp/jdwp_socket.cc
+++ b/runtime/jdwp/jdwp_socket.cc
@@ -170,20 +170,20 @@
* for an open port.)
*/
void JdwpSocketState::Shutdown() {
- int listenSock = this->listenSock;
- int clientSock = this->clientSock;
+ int local_listenSock = this->listenSock;
+ int local_clientSock = this->clientSock;
/* clear these out so it doesn't wake up and try to reuse them */
this->listenSock = this->clientSock = -1;
/* "shutdown" dislodges blocking read() and accept() calls */
- if (listenSock != -1) {
- shutdown(listenSock, SHUT_RDWR);
- close(listenSock);
+ if (local_listenSock != -1) {
+ shutdown(local_listenSock, SHUT_RDWR);
+ close(local_listenSock);
}
- if (clientSock != -1) {
- shutdown(clientSock, SHUT_RDWR);
- close(clientSock);
+ if (local_clientSock != -1) {
+ shutdown(local_clientSock, SHUT_RDWR);
+ close(local_clientSock);
}
WakePipe();
@@ -272,7 +272,7 @@
/*
* Start by resolving the host name.
*/
-#ifdef HAVE_GETHOSTBYNAME_R
+#if defined(__linux__)
hostent he;
char auxBuf[128];
int error;
diff --git a/runtime/jdwp/object_registry.cc b/runtime/jdwp/object_registry.cc
index 35aaf0a..9123994 100644
--- a/runtime/jdwp/object_registry.cc
+++ b/runtime/jdwp/object_registry.cc
@@ -16,6 +16,8 @@
#include "object_registry.h"
+#include "handle_scope-inl.h"
+#include "jni_internal.h"
#include "mirror/class.h"
#include "scoped_thread_state_change.h"
@@ -46,12 +48,17 @@
return 0;
}
+ Thread* const self = Thread::Current();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Object> obj_h(hs.NewHandle(o));
+
// Call IdentityHashCode here to avoid a lock level violation between lock_ and monitor_lock.
- int32_t identity_hash_code = o->IdentityHashCode();
- ScopedObjectAccessUnchecked soa(Thread::Current());
+ int32_t identity_hash_code = obj_h->IdentityHashCode();
+
+ ScopedObjectAccessUnchecked soa(self);
MutexLock mu(soa.Self(), lock_);
ObjectRegistryEntry* entry = nullptr;
- if (ContainsLocked(soa.Self(), o, identity_hash_code, &entry)) {
+ if (ContainsLocked(soa.Self(), obj_h.Get(), identity_hash_code, &entry)) {
// This object was already in our map.
++entry->reference_count;
} else {
@@ -66,7 +73,7 @@
// This object isn't in the registry yet, so add it.
JNIEnv* env = soa.Env();
- jobject local_reference = soa.AddLocalReference<jobject>(o);
+ jobject local_reference = soa.AddLocalReference<jobject>(obj_h.Get());
entry->jni_reference_type = JNIWeakGlobalRefType;
entry->jni_reference = env->NewWeakGlobalRef(local_reference);
@@ -80,17 +87,6 @@
return entry->id;
}
-bool ObjectRegistry::Contains(mirror::Object* o, ObjectRegistryEntry** out_entry) {
- if (o == nullptr) {
- return false;
- }
- // Call IdentityHashCode here to avoid a lock level violation between lock_ and monitor_lock.
- int32_t identity_hash_code = o->IdentityHashCode();
- Thread* self = Thread::Current();
- MutexLock mu(self, lock_);
- return ContainsLocked(self, o, identity_hash_code, out_entry);
-}
-
bool ObjectRegistry::ContainsLocked(Thread* self, mirror::Object* o, int32_t identity_hash_code,
ObjectRegistryEntry** out_entry) {
DCHECK(o != nullptr);
@@ -218,10 +214,10 @@
// Erase the object from the maps. Note object may be null if it's
// a weak ref and the GC has cleared it.
int32_t hash_code = entry->identity_hash_code;
- for (auto it = object_to_entry_.lower_bound(hash_code), end = object_to_entry_.end();
- it != end && it->first == hash_code; ++it) {
- if (entry == it->second) {
- object_to_entry_.erase(it);
+ for (auto inner_it = object_to_entry_.lower_bound(hash_code), end = object_to_entry_.end();
+ inner_it != end && inner_it->first == hash_code; ++inner_it) {
+ if (entry == inner_it->second) {
+ object_to_entry_.erase(inner_it);
break;
}
}
diff --git a/runtime/jdwp/object_registry.h b/runtime/jdwp/object_registry.h
index faddff1..0693f33 100644
--- a/runtime/jdwp/object_registry.h
+++ b/runtime/jdwp/object_registry.h
@@ -75,10 +75,6 @@
return down_cast<T>(InternalGet(id, error));
}
- bool Contains(mirror::Object* o) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return Contains(o, nullptr);
- }
-
void Clear() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void DisableCollection(JDWP::ObjectId id)
@@ -114,9 +110,6 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
- bool Contains(mirror::Object* o, ObjectRegistryEntry** out_entry)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
-
bool ContainsLocked(Thread* self, mirror::Object* o, int32_t identity_hash_code,
ObjectRegistryEntry** out_entry)
EXCLUSIVE_LOCKS_REQUIRED(lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/jni_env_ext.cc b/runtime/jni_env_ext.cc
index 180e3d7..b2d3835 100644
--- a/runtime/jni_env_ext.cc
+++ b/runtime/jni_env_ext.cc
@@ -28,9 +28,9 @@
static constexpr size_t kLocalsInitial = 64; // Arbitrary.
-JNIEnvExt::JNIEnvExt(Thread* self, JavaVMExt* vm)
- : self(self),
- vm(vm),
+JNIEnvExt::JNIEnvExt(Thread* self_in, JavaVMExt* vm_in)
+ : self(self_in),
+ vm(vm_in),
local_ref_cookie(IRT_FIRST_SEGMENT),
locals(kLocalsInitial, kLocalsMax, kLocal),
check_jni(false),
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 70754f2..1dcfcab 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -45,6 +45,7 @@
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
#include "mirror/throwable.h"
+#include "nativebridge/native_bridge.h"
#include "parsed_options.h"
#include "reflection.h"
#include "runtime.h"
@@ -196,8 +197,8 @@
// Failed to find type from the signature of the field.
DCHECK(soa.Self()->IsExceptionPending());
ThrowLocation throw_location;
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::Throwable> cause(hs.NewHandle(soa.Self()->GetException(&throw_location)));
+ StackHandleScope<1> hs2(soa.Self());
+ Handle<mirror::Throwable> cause(hs2.NewHandle(soa.Self()->GetException(&throw_location)));
soa.Self()->ClearException();
std::string temp;
soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;",
@@ -359,7 +360,7 @@
ScopedObjectAccess soa(env);
mirror::ArtMethod* m = soa.DecodeMethod(mid);
CHECK(!kMovingMethods);
- jobject art_method = soa.AddLocalReference<jobject>(m);
+ ScopedLocalRef<jobject> art_method(env, soa.AddLocalReference<jobject>(m));
jobject reflect_method;
if (m->IsConstructor()) {
reflect_method = env->AllocObject(WellKnownClasses::java_lang_reflect_Constructor);
@@ -370,7 +371,7 @@
return nullptr;
}
SetObjectField(env, reflect_method,
- WellKnownClasses::java_lang_reflect_AbstractMethod_artMethod, art_method);
+ WellKnownClasses::java_lang_reflect_AbstractMethod_artMethod, art_method.get());
return reflect_method;
}
@@ -378,13 +379,13 @@
CHECK_NON_NULL_ARGUMENT(fid);
ScopedObjectAccess soa(env);
mirror::ArtField* f = soa.DecodeField(fid);
- jobject art_field = soa.AddLocalReference<jobject>(f);
+ ScopedLocalRef<jobject> art_field(env, soa.AddLocalReference<jobject>(f));
jobject reflect_field = env->AllocObject(WellKnownClasses::java_lang_reflect_Field);
if (env->ExceptionCheck()) {
return nullptr;
}
SetObjectField(env, reflect_field,
- WellKnownClasses::java_lang_reflect_Field_artField, art_field);
+ WellKnownClasses::java_lang_reflect_Field_artField, art_field.get());
return reflect_field;
}
@@ -1749,6 +1750,7 @@
}
static void ReleaseStringCritical(JNIEnv* env, jstring java_string, const jchar* chars) {
+ UNUSED(chars);
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(java_string);
ScopedObjectAccess soa(env);
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -1777,7 +1779,7 @@
return bytes;
}
- static void ReleaseStringUTFChars(JNIEnv* env, jstring, const char* chars) {
+ static void ReleaseStringUTFChars(JNIEnv*, jstring, const char* chars) {
delete[] chars;
}
@@ -2705,7 +2707,7 @@
os << "JNIWeakGlobalRefType";
return os;
default:
- LOG(FATAL) << "jobjectRefType[" << static_cast<int>(rhs) << "]";
- return os;
+ LOG(::art::FATAL) << "jobjectRefType[" << static_cast<int>(rhs) << "]";
+ UNREACHABLE();
}
}
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index cab907c..62b6b34 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -390,59 +390,72 @@
void ReleasePrimitiveArrayElementsOfWrongType(bool check_jni) {
bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
CheckJniAbortCatcher jni_abort_catcher;
+ {
+ jbooleanArray array = env_->NewBooleanArray(10);
+ ASSERT_TRUE(array != nullptr);
+ jboolean is_copy;
+ jboolean* elements = env_->GetBooleanArrayElements(array, &is_copy);
+ ASSERT_TRUE(elements != nullptr);
+ env_->ReleaseByteArrayElements(reinterpret_cast<jbyteArray>(array),
+ reinterpret_cast<jbyte*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected byte[]"
+ : "attempt to release byte primitive array elements with an object of type boolean[]");
+ env_->ReleaseShortArrayElements(reinterpret_cast<jshortArray>(array),
+ reinterpret_cast<jshort*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected short[]"
+ : "attempt to release short primitive array elements with an object of type boolean[]");
+ env_->ReleaseCharArrayElements(reinterpret_cast<jcharArray>(array),
+ reinterpret_cast<jchar*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected char[]"
+ : "attempt to release char primitive array elements with an object of type boolean[]");
+ env_->ReleaseIntArrayElements(reinterpret_cast<jintArray>(array),
+ reinterpret_cast<jint*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected int[]"
+ : "attempt to release int primitive array elements with an object of type boolean[]");
+ env_->ReleaseLongArrayElements(reinterpret_cast<jlongArray>(array),
+ reinterpret_cast<jlong*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected long[]"
+ : "attempt to release long primitive array elements with an object of type boolean[]");
+ env_->ReleaseFloatArrayElements(reinterpret_cast<jfloatArray>(array),
+ reinterpret_cast<jfloat*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected float[]"
+ : "attempt to release float primitive array elements with an object of type boolean[]");
+ env_->ReleaseDoubleArrayElements(reinterpret_cast<jdoubleArray>(array),
+ reinterpret_cast<jdouble*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected double[]"
+ : "attempt to release double primitive array elements with an object of type boolean[]");
- jbooleanArray array = env_->NewBooleanArray(10);
- ASSERT_TRUE(array != nullptr);
- jboolean is_copy;
- jboolean* elements = env_->GetBooleanArrayElements(array, &is_copy);
- ASSERT_TRUE(elements != nullptr);
- env_->ReleaseByteArrayElements(reinterpret_cast<jbyteArray>(array),
- reinterpret_cast<jbyte*>(elements), 0);
- jni_abort_catcher.Check(
- check_jni ? "incompatible array type boolean[] expected byte[]"
- : "attempt to release byte primitive array elements with an object of type boolean[]");
- env_->ReleaseShortArrayElements(reinterpret_cast<jshortArray>(array),
- reinterpret_cast<jshort*>(elements), 0);
- jni_abort_catcher.Check(
- check_jni ? "incompatible array type boolean[] expected short[]"
- : "attempt to release short primitive array elements with an object of type boolean[]");
- env_->ReleaseCharArrayElements(reinterpret_cast<jcharArray>(array),
- reinterpret_cast<jchar*>(elements), 0);
- jni_abort_catcher.Check(
- check_jni ? "incompatible array type boolean[] expected char[]"
- : "attempt to release char primitive array elements with an object of type boolean[]");
- env_->ReleaseIntArrayElements(reinterpret_cast<jintArray>(array),
- reinterpret_cast<jint*>(elements), 0);
- jni_abort_catcher.Check(
- check_jni ? "incompatible array type boolean[] expected int[]"
- : "attempt to release int primitive array elements with an object of type boolean[]");
- env_->ReleaseLongArrayElements(reinterpret_cast<jlongArray>(array),
- reinterpret_cast<jlong*>(elements), 0);
- jni_abort_catcher.Check(
- check_jni ? "incompatible array type boolean[] expected long[]"
- : "attempt to release long primitive array elements with an object of type boolean[]");
- env_->ReleaseFloatArrayElements(reinterpret_cast<jfloatArray>(array),
- reinterpret_cast<jfloat*>(elements), 0);
- jni_abort_catcher.Check(
- check_jni ? "incompatible array type boolean[] expected float[]"
- : "attempt to release float primitive array elements with an object of type boolean[]");
- env_->ReleaseDoubleArrayElements(reinterpret_cast<jdoubleArray>(array),
- reinterpret_cast<jdouble*>(elements), 0);
- jni_abort_catcher.Check(
- check_jni ? "incompatible array type boolean[] expected double[]"
- : "attempt to release double primitive array elements with an object of type boolean[]");
- jbyteArray array2 = env_->NewByteArray(10);
- env_->ReleaseBooleanArrayElements(reinterpret_cast<jbooleanArray>(array2), elements, 0);
- jni_abort_catcher.Check(
- check_jni ? "incompatible array type byte[] expected boolean[]"
- : "attempt to release boolean primitive array elements with an object of type byte[]");
- jobject object = env_->NewStringUTF("Test String");
- env_->ReleaseBooleanArrayElements(reinterpret_cast<jbooleanArray>(object), elements, 0);
- jni_abort_catcher.Check(
- check_jni ? "jarray argument has non-array type: java.lang.String"
- : "attempt to release boolean primitive array elements with an object of type "
+ // Don't leak the elements array.
+ env_->ReleaseBooleanArrayElements(array, elements, 0);
+ }
+ {
+ jbyteArray array = env_->NewByteArray(10);
+ jboolean is_copy;
+ jbyte* elements = env_->GetByteArrayElements(array, &is_copy);
+
+ env_->ReleaseBooleanArrayElements(reinterpret_cast<jbooleanArray>(array),
+ reinterpret_cast<jboolean*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type byte[] expected boolean[]"
+ : "attempt to release boolean primitive array elements with an object of type byte[]");
+ jobject object = env_->NewStringUTF("Test String");
+ env_->ReleaseBooleanArrayElements(reinterpret_cast<jbooleanArray>(object),
+ reinterpret_cast<jboolean*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "jarray argument has non-array type: java.lang.String"
+ : "attempt to release boolean primitive array elements with an object of type "
"java.lang.String");
+ // Don't leak the elements array.
+ env_->ReleaseByteArrayElements(array, elements, 0);
+ }
EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
}
@@ -799,6 +812,11 @@
ASSERT_NE(fid, nullptr);
// Turn the fid into a java.lang.reflect.Field...
jobject field = env_->ToReflectedField(c, fid, JNI_FALSE);
+ for (size_t i = 0; i <= kLocalsMax; ++i) {
+ // Regression test for b/18396311, ToReflectedField leaking local refs causing a local
+ // reference table overflows with 512 references to ArtField
+ env_->DeleteLocalRef(env_->ToReflectedField(c, fid, JNI_FALSE));
+ }
ASSERT_NE(c, nullptr);
ASSERT_TRUE(env_->IsInstanceOf(field, jlrField));
// ...and back again.
@@ -825,6 +843,11 @@
ASSERT_NE(mid, nullptr);
// Turn the mid into a java.lang.reflect.Constructor...
jobject method = env_->ToReflectedMethod(c, mid, JNI_FALSE);
+ for (size_t i = 0; i <= kLocalsMax; ++i) {
+ // Regression test for b/18396311, ToReflectedMethod leaking local refs causing a local
+ // reference table overflows with 512 references to ArtMethod
+ env_->DeleteLocalRef(env_->ToReflectedMethod(c, mid, JNI_FALSE));
+ }
ASSERT_NE(method, nullptr);
ASSERT_TRUE(env_->IsInstanceOf(method, jlrConstructor));
// ...and back again.
@@ -1170,7 +1193,15 @@
}
TEST_F(JniInternalTest, GetArrayLength) {
- // Already tested in NewObjectArray/NewPrimitiveArray.
+ // Already tested in NewObjectArray/NewPrimitiveArray except for NULL.
+ CheckJniAbortCatcher jni_abort_catcher;
+ bool old_check_jni = vm_->SetCheckJniEnabled(false);
+ EXPECT_EQ(0, env_->GetArrayLength(nullptr));
+ jni_abort_catcher.Check("java_array == null");
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
+ EXPECT_EQ(JNI_ERR, env_->GetArrayLength(nullptr));
+ jni_abort_catcher.Check("jarray was NULL");
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
}
TEST_F(JniInternalTest, GetObjectClass) {
@@ -2011,14 +2042,4 @@
}
}
-TEST_F(JniInternalTest, DetachCurrentThread) {
- CleanUpJniEnv(); // cleanup now so TearDown won't have junk from wrong JNIEnv
- jint ok = vm_->DetachCurrentThread();
- EXPECT_EQ(JNI_OK, ok);
-
- jint err = vm_->DetachCurrentThread();
- EXPECT_EQ(JNI_ERR, err);
- vm_->AttachCurrentThread(&env_, nullptr); // need attached thread for CommonRuntimeTest::TearDown
-}
-
} // namespace art
diff --git a/runtime/lock_word-inl.h b/runtime/lock_word-inl.h
index cf6f83c..c52578f 100644
--- a/runtime/lock_word-inl.h
+++ b/runtime/lock_word-inl.h
@@ -34,13 +34,13 @@
inline Monitor* LockWord::FatLockMonitor() const {
DCHECK_EQ(GetState(), kFatLocked);
- MonitorId mon_id = static_cast<MonitorId>(value_ & ~(kStateMask << kStateShift));
+ MonitorId mon_id = value_ & ~(kStateMask << kStateShift);
return MonitorPool::MonitorFromMonitorId(mon_id);
}
inline size_t LockWord::ForwardingAddress() const {
DCHECK_EQ(GetState(), kForwardingAddress);
- return static_cast<size_t>(value_ << kStateSize);
+ return value_ << kStateSize;
}
inline LockWord::LockWord() : value_(0) {
diff --git a/runtime/lock_word.h b/runtime/lock_word.h
index 13cc3b0..2d5c71b 100644
--- a/runtime/lock_word.h
+++ b/runtime/lock_word.h
@@ -52,7 +52,7 @@
*/
class LockWord {
public:
- enum {
+ enum SizeShiftsAndMasks { // private marker to avoid generate-operator-out.py from processing.
// Number of bits to encode the state, currently just fat or thin/unlocked or hash code.
kStateSize = 2,
// Number of bits to encode the thin lock owner.
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 3144ce1..8303f84 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -15,11 +15,12 @@
*/
#include "mem_map.h"
-#include "thread-inl.h"
-#include <inttypes.h>
#include <backtrace/BacktraceMap.h>
+#include <inttypes.h>
+
#include <memory>
+#include <sstream>
// See CreateStartPos below.
#ifdef __BIONIC__
@@ -27,7 +28,13 @@
#endif
#include "base/stringprintf.h"
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
#include "ScopedFd.h"
+#pragma GCC diagnostic pop
+
+#include "thread-inl.h"
#include "utils.h"
#define USE_ASHMEM 1
@@ -236,6 +243,9 @@
MemMap* MemMap::MapAnonymous(const char* name, uint8_t* expected_ptr, size_t byte_count, int prot,
bool low_4gb, std::string* error_msg) {
+#ifndef __LP64__
+ UNUSED(low_4gb);
+#endif
if (byte_count == 0) {
return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
}
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index df1222c..9b003aa 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -175,6 +175,7 @@
friend class MemMapTest; // To allow access to base_begin_ and base_size_.
};
std::ostream& operator<<(std::ostream& os, const MemMap& mem_map);
+std::ostream& operator<<(std::ostream& os, const MemMap::Maps& mem_maps);
} // namespace art
diff --git a/runtime/memory_region.h b/runtime/memory_region.h
index 4eb6d47..b3820be 100644
--- a/runtime/memory_region.h
+++ b/runtime/memory_region.h
@@ -32,7 +32,7 @@
class MemoryRegion FINAL : public ValueObject {
public:
MemoryRegion() : pointer_(nullptr), size_(0) {}
- MemoryRegion(void* pointer, uintptr_t size) : pointer_(pointer), size_(size) {}
+ MemoryRegion(void* pointer_in, uintptr_t size_in) : pointer_(pointer_in), size_(size_in) {}
void* pointer() const { return pointer_; }
size_t size() const { return size_; }
@@ -78,10 +78,10 @@
void CopyFrom(size_t offset, const MemoryRegion& from) const;
// Compute a sub memory region based on an existing one.
- MemoryRegion Subregion(uintptr_t offset, uintptr_t size) const {
- CHECK_GE(this->size(), size);
- CHECK_LE(offset, this->size() - size);
- return MemoryRegion(reinterpret_cast<void*>(start() + offset), size);
+ MemoryRegion Subregion(uintptr_t offset, uintptr_t size_in) const {
+ CHECK_GE(this->size(), size_in);
+ CHECK_LE(offset, this->size() - size_in);
+ return MemoryRegion(reinterpret_cast<void*>(start() + offset), size_in);
}
// Compute an extended memory region based on an existing one.
diff --git a/runtime/method_helper-inl.h b/runtime/method_helper-inl.h
index 143f4bc..7a7949e 100644
--- a/runtime/method_helper-inl.h
+++ b/runtime/method_helper-inl.h
@@ -57,29 +57,6 @@
return type;
}
-template <template <class T> class HandleKind>
-inline mirror::Class* MethodHelperT<HandleKind>::GetReturnType(bool resolve) {
- mirror::ArtMethod* method = GetMethod();
- const DexFile* dex_file = method->GetDexFile();
- const DexFile::MethodId& method_id = dex_file->GetMethodId(method->GetDexMethodIndex());
- const DexFile::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
- uint16_t return_type_idx = proto_id.return_type_idx_;
- return GetClassFromTypeIdx(return_type_idx, resolve);
-}
-
-template <template <class T> class HandleKind>
-inline mirror::String* MethodHelperT<HandleKind>::ResolveString(uint32_t string_idx) {
- mirror::ArtMethod* method = GetMethod();
- mirror::String* s = method->GetDexCacheStrings()->Get(string_idx);
- if (UNLIKELY(s == nullptr)) {
- StackHandleScope<1> hs(Thread::Current());
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
- s = Runtime::Current()->GetClassLinker()->ResolveString(*method->GetDexFile(), string_idx,
- dex_cache);
- }
- return s;
-}
-
} // namespace art
#endif // ART_RUNTIME_METHOD_HELPER_INL_H_
diff --git a/runtime/method_helper.cc b/runtime/method_helper.cc
index 79c2b91..81e1794 100644
--- a/runtime/method_helper.cc
+++ b/runtime/method_helper.cc
@@ -26,23 +26,15 @@
namespace art {
template <template <class T> class HandleKind>
-mirror::String* MethodHelperT<HandleKind>::GetNameAsString(Thread* self) {
- const DexFile* dex_file = method_->GetDexFile();
- mirror::ArtMethod* method = method_->GetInterfaceMethodIfProxy();
- uint32_t dex_method_idx = method->GetDexMethodIndex();
- const DexFile::MethodId& method_id = dex_file->GetMethodId(dex_method_idx);
- StackHandleScope<1> hs(self);
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
- return Runtime::Current()->GetClassLinker()->ResolveString(*dex_file, method_id.name_idx_,
- dex_cache);
-}
-
-template <template <class T> class HandleKind>
template <template <class T2> class HandleKind2>
-bool MethodHelperT<HandleKind>::HasSameSignatureWithDifferentClassLoaders(
+bool MethodHelperT<HandleKind>::HasSameSignatureWithDifferentClassLoaders(Thread* self,
MethodHelperT<HandleKind2>* other) {
- if (UNLIKELY(GetReturnType() != other->GetReturnType())) {
- return false;
+ {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> return_type(hs.NewHandle(GetMethod()->GetReturnType()));
+ if (UNLIKELY(other->GetMethod()->GetReturnType() != return_type.Get())) {
+ return false;
+ }
}
const DexFile::TypeList* types = method_->GetParameterTypeList();
const DexFile::TypeList* other_types = other->method_->GetParameterTypeList();
@@ -140,10 +132,6 @@
}
// Instantiate methods.
-template mirror::String* MethodHelperT<Handle>::GetNameAsString(Thread* self);
-
-template mirror::String* MethodHelperT<MutableHandle>::GetNameAsString(Thread* self);
-
template
uint32_t MethodHelperT<Handle>::FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfile);
template
@@ -158,19 +146,19 @@
const DexFile& other_dexfile, uint32_t name_and_signature_idx);
template
-bool MethodHelperT<Handle>::HasSameSignatureWithDifferentClassLoaders<Handle>(
+bool MethodHelperT<Handle>::HasSameSignatureWithDifferentClassLoaders<Handle>(Thread* self,
MethodHelperT<Handle>* other);
template
-bool MethodHelperT<Handle>::HasSameSignatureWithDifferentClassLoaders<MutableHandle>(
+bool MethodHelperT<Handle>::HasSameSignatureWithDifferentClassLoaders<MutableHandle>(Thread* self,
MethodHelperT<MutableHandle>* other);
template
-bool MethodHelperT<MutableHandle>::HasSameSignatureWithDifferentClassLoaders<Handle>(
+bool MethodHelperT<MutableHandle>::HasSameSignatureWithDifferentClassLoaders<Handle>(Thread* self,
MethodHelperT<Handle>* other);
template
bool MethodHelperT<MutableHandle>::HasSameSignatureWithDifferentClassLoaders<MutableHandle>(
- MethodHelperT<MutableHandle>* other);
+ Thread* self, MethodHelperT<MutableHandle>* other);
} // namespace art
diff --git a/runtime/method_helper.h b/runtime/method_helper.h
index fe364d3..dc305d5 100644
--- a/runtime/method_helper.h
+++ b/runtime/method_helper.h
@@ -40,8 +40,6 @@
return method_.Get();
}
- mirror::String* GetNameAsString(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
const char* GetShorty() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const char* result = shorty_;
if (result == nullptr) {
@@ -72,10 +70,6 @@
return refs;
}
- // May cause thread suspension due to GetClassFromTypeIdx calling ResolveType this caused a large
- // number of bugs at call sites.
- mirror::Class* GetReturnType(bool resolve = true) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
size_t NumArgs() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// "1 +" because the first in Args is the receiver.
// "- 1" because we don't count the return type.
@@ -109,14 +103,12 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template <template <class T> class HandleKind2>
- bool HasSameSignatureWithDifferentClassLoaders(MethodHelperT<HandleKind2>* other)
+ bool HasSameSignatureWithDifferentClassLoaders(Thread* self, MethodHelperT<HandleKind2>* other)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Class* GetClassFromTypeIdx(uint16_t type_idx, bool resolve = true)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::String* ResolveString(uint32_t string_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
uint32_t FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfile)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 7e1ad78..13f881d 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -81,6 +81,7 @@
// 64-bit. No overflow as component_count is 32-bit and the maximum
// component size is 8.
DCHECK_LE((1U << component_size_shift), 8U);
+ UNUSED(self);
#else
// 32-bit.
DCHECK_NE(header_size, 0U);
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index 636be33..b92f017 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -58,8 +58,8 @@
if (current_dimension + 1 < dimensions->GetLength()) {
// Create a new sub-array in every element of the array.
for (int32_t i = 0; i < array_length; i++) {
- StackHandleScope<1> hs(self);
- Handle<mirror::Class> h_component_type(hs.NewHandle(array_class->GetComponentType()));
+ StackHandleScope<1> hs2(self);
+ Handle<mirror::Class> h_component_type(hs2.NewHandle(array_class->GetComponentType()));
Array* sub_array = RecursiveCreateMultiArray(self, h_component_type,
current_dimension + 1, dimensions);
if (UNLIKELY(sub_array == nullptr)) {
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 12bec89..83e3688 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -49,7 +49,7 @@
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
size_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- int32_t GetLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE int32_t GetLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Array, length_));
}
@@ -82,7 +82,7 @@
// Returns true if the index is valid. If not, throws an ArrayIndexOutOfBoundsException and
// returns false.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool CheckIsValidIndex(int32_t index) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE bool CheckIsValidIndex(int32_t index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
protected:
void ThrowArrayStoreException(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index 664a412..b936511 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -68,6 +68,10 @@
return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_));
}
+inline uint16_t ArtMethod::GetMethodIndexDuringLinking() {
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_));
+}
+
inline uint32_t ArtMethod::GetDexMethodIndex() {
#ifdef ART_SEA_IR_MODE
// TODO: Re-add this check for (PORTABLE + SMALL + ) SEA IR when PORTABLE IS fixed!
@@ -171,7 +175,7 @@
}
default:
LOG(FATAL) << "Unreachable - invocation type: " << type;
- return true;
+ UNREACHABLE();
}
}
@@ -195,17 +199,17 @@
SetEntryPointFromPortableCompiledCode(reinterpret_cast<void*>(code_offset));
}
-inline const uint8_t* ArtMethod::GetMappingTable() {
- const void* code_pointer = GetQuickOatCodePointer();
+inline const uint8_t* ArtMethod::GetMappingTable(size_t pointer_size) {
+ const void* code_pointer = GetQuickOatCodePointer(pointer_size);
if (code_pointer == nullptr) {
return nullptr;
}
- return GetMappingTable(code_pointer);
+ return GetMappingTable(code_pointer, pointer_size);
}
-inline const uint8_t* ArtMethod::GetMappingTable(const void* code_pointer) {
+inline const uint8_t* ArtMethod::GetMappingTable(const void* code_pointer, size_t pointer_size) {
DCHECK(code_pointer != nullptr);
- DCHECK(code_pointer == GetQuickOatCodePointer());
+ DCHECK_EQ(code_pointer, GetQuickOatCodePointer(pointer_size));
uint32_t offset =
reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].mapping_table_offset_;
if (UNLIKELY(offset == 0u)) {
@@ -214,20 +218,18 @@
return reinterpret_cast<const uint8_t*>(code_pointer) - offset;
}
-inline const uint8_t* ArtMethod::GetVmapTable() {
- const void* code_pointer = GetQuickOatCodePointer();
+inline const uint8_t* ArtMethod::GetVmapTable(size_t pointer_size) {
+ const void* code_pointer = GetQuickOatCodePointer(pointer_size);
if (code_pointer == nullptr) {
return nullptr;
}
- return GetVmapTable(code_pointer);
+ return GetVmapTable(code_pointer, pointer_size);
}
-inline const uint8_t* ArtMethod::GetVmapTable(const void* code_pointer) {
- if (IsOptimized()) {
- LOG(FATAL) << "Unimplemented vmap table for optimized compiler";
- }
+inline const uint8_t* ArtMethod::GetVmapTable(const void* code_pointer, size_t pointer_size) {
+ CHECK(!IsOptimized(pointer_size)) << "Unimplemented vmap table for optimized compiler";
DCHECK(code_pointer != nullptr);
- DCHECK(code_pointer == GetQuickOatCodePointer());
+ DCHECK_EQ(code_pointer, GetQuickOatCodePointer(pointer_size));
uint32_t offset =
reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].vmap_table_offset_;
if (UNLIKELY(offset == 0u)) {
@@ -241,8 +243,8 @@
}
inline CodeInfo ArtMethod::GetOptimizedCodeInfo() {
- DCHECK(IsOptimized());
- const void* code_pointer = GetQuickOatCodePointer();
+ DCHECK(IsOptimized(sizeof(void*)));
+ const void* code_pointer = GetQuickOatCodePointer(sizeof(void*));
DCHECK(code_pointer != nullptr);
uint32_t offset =
reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].vmap_table_offset_;
@@ -293,25 +295,27 @@
return result;
}
-inline uintptr_t ArtMethod::NativeQuickPcOffset(const uintptr_t pc) {
- const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this);
- return pc - reinterpret_cast<uintptr_t>(code);
+inline bool ArtMethod::IsImtUnimplementedMethod() {
+ bool result = this == Runtime::Current()->GetImtUnimplementedMethod();
+ // Check that if we do think it is phony it looks like the imt unimplemented method.
+ DCHECK(!result || IsRuntimeMethod());
+ return result;
}
-template<VerifyObjectFlags kVerifyFlags>
-inline void ArtMethod::SetNativeMethod(const void* native_method) {
- SetFieldPtr<false, true, kVerifyFlags>(
- OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_), native_method);
+inline uintptr_t ArtMethod::NativeQuickPcOffset(const uintptr_t pc) {
+ const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(
+ this, sizeof(void*));
+ return pc - reinterpret_cast<uintptr_t>(code);
}
inline QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo(const void* code_pointer) {
DCHECK(code_pointer != nullptr);
- DCHECK_EQ(code_pointer, GetQuickOatCodePointer());
+ DCHECK_EQ(code_pointer, GetQuickOatCodePointer(sizeof(void*)));
return reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].frame_info_;
}
inline const DexFile* ArtMethod::GetDexFile() {
- return GetInterfaceMethodIfProxy()->GetDeclaringClass()->GetDexCache()->GetDexFile();
+ return GetDexCache()->GetDexFile();
}
inline const char* ArtMethod::GetDeclaringClassDescriptor() {
@@ -431,11 +435,15 @@
return GetInterfaceMethodIfProxy()->GetDeclaringClass()->GetDexCache();
}
+inline bool ArtMethod::IsProxyMethod() {
+ return GetDeclaringClass()->IsProxyClass();
+}
+
inline ArtMethod* ArtMethod::GetInterfaceMethodIfProxy() {
- mirror::Class* klass = GetDeclaringClass();
- if (LIKELY(!klass->IsProxyClass())) {
+ if (LIKELY(!IsProxyMethod())) {
return this;
}
+ mirror::Class* klass = GetDeclaringClass();
mirror::ArtMethod* interface_method = GetDexCacheResolvedMethods()->Get(GetDexMethodIndex());
DCHECK(interface_method != nullptr);
DCHECK_EQ(interface_method,
@@ -443,6 +451,41 @@
return interface_method;
}
+inline void ArtMethod::SetDexCacheStrings(ObjectArray<String>* new_dex_cache_strings) {
+ SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_strings_),
+ new_dex_cache_strings);
+}
+
+inline void ArtMethod::SetDexCacheResolvedMethods(ObjectArray<ArtMethod>* new_dex_cache_methods) {
+ SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_methods_),
+ new_dex_cache_methods);
+}
+
+inline void ArtMethod::SetDexCacheResolvedTypes(ObjectArray<Class>* new_dex_cache_classes) {
+ SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_types_),
+ new_dex_cache_classes);
+}
+
+inline mirror::Class* ArtMethod::GetReturnType(bool resolve) {
+ DCHECK(!IsProxyMethod());
+ const DexFile* dex_file = GetDexFile();
+ const DexFile::MethodId& method_id = dex_file->GetMethodId(GetDexMethodIndex());
+ const DexFile::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
+ uint16_t return_type_idx = proto_id.return_type_idx_;
+ mirror::Class* type = GetDexCacheResolvedType(return_type_idx);
+ if (type == nullptr && resolve) {
+ type = Runtime::Current()->GetClassLinker()->ResolveType(return_type_idx, this);
+ CHECK(type != nullptr || Thread::Current()->IsExceptionPending());
+ }
+ return type;
+}
+
+inline void ArtMethod::CheckObjectSizeEqualsMirrorSize() {
+ // Using the default, check the class object size to make sure it matches the size of the
+ // object.
+ DCHECK_EQ(GetClass()->GetObjectSize(), sizeof(*this));
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index aa10a17..4f5ca3f 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -43,7 +43,7 @@
extern "C" void art_portable_invoke_stub(ArtMethod*, uint32_t*, uint32_t, Thread*, JValue*, char);
extern "C" void art_quick_invoke_stub(ArtMethod*, uint32_t*, uint32_t, Thread*, JValue*,
const char*);
-#ifdef __LP64__
+#if defined(__LP64__) || defined(__arm__)
extern "C" void art_quick_invoke_static_stub(ArtMethod*, uint32_t*, uint32_t, Thread*, JValue*,
const char*);
#endif
@@ -67,6 +67,17 @@
}
}
+mirror::String* ArtMethod::GetNameAsString(Thread* self) {
+ mirror::ArtMethod* method = GetInterfaceMethodIfProxy();
+ const DexFile* dex_file = method->GetDexFile();
+ uint32_t dex_method_idx = method->GetDexMethodIndex();
+ const DexFile::MethodId& method_id = dex_file->GetMethodId(dex_method_idx);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
+ return Runtime::Current()->GetClassLinker()->ResolveString(*dex_file, method_id.name_idx_,
+ dex_cache);
+}
+
InvokeType ArtMethod::GetInvokeType() {
// TODO: kSuper?
if (GetDeclaringClass()->IsInterface()) {
@@ -91,21 +102,6 @@
java_lang_reflect_ArtMethod_ = GcRoot<Class>(nullptr);
}
-void ArtMethod::SetDexCacheStrings(ObjectArray<String>* new_dex_cache_strings) {
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_strings_),
- new_dex_cache_strings);
-}
-
-void ArtMethod::SetDexCacheResolvedMethods(ObjectArray<ArtMethod>* new_dex_cache_methods) {
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_methods_),
- new_dex_cache_methods);
-}
-
-void ArtMethod::SetDexCacheResolvedTypes(ObjectArray<Class>* new_dex_cache_classes) {
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_types_),
- new_dex_cache_classes);
-}
-
size_t ArtMethod::NumArgRegisters(const StringPiece& shorty) {
CHECK_LE(1U, shorty.length());
uint32_t num_registers = 0;
@@ -120,10 +116,6 @@
return num_registers;
}
-bool ArtMethod::IsProxyMethod() {
- return GetDeclaringClass()->IsProxyClass();
-}
-
ArtMethod* ArtMethod::FindOverriddenMethod() {
if (IsStatic()) {
return NULL;
@@ -173,9 +165,9 @@
// Portable doesn't use the machine pc, we just use dex pc instead.
return static_cast<uint32_t>(pc);
}
- const void* entry_point = GetQuickOatEntryPoint();
- MappingTable table(
- entry_point != nullptr ? GetMappingTable(EntryPointToCodePointer(entry_point)) : nullptr);
+ const void* entry_point = GetQuickOatEntryPoint(sizeof(void*));
+ MappingTable table(entry_point != nullptr ?
+ GetMappingTable(EntryPointToCodePointer(entry_point), sizeof(void*)) : nullptr);
if (table.TotalSize() == 0) {
// NOTE: Special methods (see Mir2Lir::GenSpecialCase()) have an empty mapping
// but they have no suspend checks and, consequently, we never call ToDexPc() for them.
@@ -205,10 +197,10 @@
return DexFile::kDexNoIndex;
}
-uintptr_t ArtMethod::ToNativeQuickPc(const uint32_t dex_pc) {
- const void* entry_point = GetQuickOatEntryPoint();
- MappingTable table(
- entry_point != nullptr ? GetMappingTable(EntryPointToCodePointer(entry_point)) : nullptr);
+uintptr_t ArtMethod::ToNativeQuickPc(const uint32_t dex_pc, bool abort_on_failure) {
+ const void* entry_point = GetQuickOatEntryPoint(sizeof(void*));
+ MappingTable table(entry_point != nullptr ?
+ GetMappingTable(EntryPointToCodePointer(entry_point), sizeof(void*)) : nullptr);
if (table.TotalSize() == 0) {
DCHECK_EQ(dex_pc, 0U);
return 0; // Special no mapping/pc == 0 case
@@ -227,9 +219,11 @@
return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
}
}
- LOG(FATAL) << "Failed to find native offset for dex pc 0x" << std::hex << dex_pc
- << " in " << PrettyMethod(this);
- return 0;
+ if (abort_on_failure) {
+ LOG(FATAL) << "Failed to find native offset for dex pc 0x" << std::hex << dex_pc
+ << " in " << PrettyMethod(this);
+ }
+ return UINTPTR_MAX;
}
uint32_t ArtMethod::FindCatchBlock(Handle<ArtMethod> h_this, Handle<Class> exception_type,
@@ -315,24 +309,24 @@
bool ArtMethod::IsEntrypointInterpreter() {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- const void* oat_quick_code = class_linker->GetOatMethodQuickCodeFor(this);
- const void* oat_portable_code = class_linker->GetOatMethodPortableCodeFor(this);
if (!IsPortableCompiled()) { // Quick.
+ const void* oat_quick_code = class_linker->GetOatMethodQuickCodeFor(this);
return oat_quick_code == nullptr ||
oat_quick_code != GetEntryPointFromQuickCompiledCode();
} else { // Portable.
+ const void* oat_portable_code = class_linker->GetOatMethodPortableCodeFor(this);
return oat_portable_code == nullptr ||
oat_portable_code != GetEntryPointFromPortableCompiledCode();
}
}
-const void* ArtMethod::GetQuickOatEntryPoint() {
+const void* ArtMethod::GetQuickOatEntryPoint(size_t pointer_size) {
if (IsPortableCompiled() || IsAbstract() || IsRuntimeMethod() || IsProxyMethod()) {
return nullptr;
}
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
- const void* code = runtime->GetInstrumentation()->GetQuickCodeFor(this);
+ const void* code = runtime->GetInstrumentation()->GetQuickCodeFor(this, pointer_size);
// On failure, instead of nullptr we get the quick-generic-jni-trampoline for native method
// indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
// for non-native methods.
@@ -346,7 +340,7 @@
#ifndef NDEBUG
uintptr_t ArtMethod::NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point) {
CHECK_NE(quick_entry_point, GetQuickToInterpreterBridge());
- CHECK_EQ(quick_entry_point, Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this));
+ CHECK_EQ(quick_entry_point, Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this, sizeof(void*)));
return pc - reinterpret_cast<uintptr_t>(quick_entry_point);
}
#endif
@@ -396,7 +390,7 @@
}
if (!IsPortableCompiled()) {
-#ifdef __LP64__
+#if defined(__LP64__) || defined(__arm__)
if (!IsStatic()) {
(*art_quick_invoke_stub)(this, args, args_size, self, result, shorty);
} else {
@@ -414,7 +408,7 @@
// stack. Continue execution in the interpreter.
self->ClearException();
ShadowFrame* shadow_frame = self->GetAndClearDeoptimizationShadowFrame(result);
- self->SetTopOfStack(nullptr, 0);
+ self->SetTopOfStack(nullptr);
self->SetTopOfShadowStack(shadow_frame);
interpreter::EnterInterpreterFromDeoptimize(self, shadow_frame, result);
}
@@ -467,7 +461,7 @@
return runtime->GetRuntimeMethodFrameInfo(this);
}
- const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(this);
+ const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(this, sizeof(void*));
ClassLinker* class_linker = runtime->GetClassLinker();
// On failure, instead of nullptr we get the quick-generic-jni-trampoline for native method
// indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
@@ -503,7 +497,7 @@
if (is_fast) {
SetAccessFlags(GetAccessFlags() | kAccFastNative);
}
- SetNativeMethod(native_method);
+ SetEntryPointFromJni(native_method);
}
void ArtMethod::UnregisterNative() {
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 1dbfe5d..d292552 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -48,11 +48,6 @@
// Size of java.lang.reflect.ArtMethod.class.
static uint32_t ClassSize();
- // Size of an instance of java.lang.reflect.ArtMethod not including its value array.
- static constexpr uint32_t InstanceSize() {
- return sizeof(ArtMethod);
- }
-
static ArtMethod* FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa,
jobject jlr_method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -65,7 +60,7 @@
return MemberOffset(OFFSETOF_MEMBER(ArtMethod, declaring_class_));
}
- uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetAccessFlags(uint32_t new_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Not called within a transaction.
@@ -151,13 +146,13 @@
SetAccessFlags(GetAccessFlags() | kAccPreverified);
}
- bool IsOptimized() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsOptimized(size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Temporary solution for detecting if a method has been optimized: the compiler
// does not create a GC map. Instead, the vmap table contains the stack map
// (as in stack_map.h).
- return (GetEntryPointFromQuickCompiledCode() != nullptr)
- && (GetQuickOatCodePointer() != nullptr)
- && (GetNativeGcMap() == nullptr);
+ return GetEntryPointFromQuickCompiledCodePtrSize(pointer_size) != nullptr
+ && GetQuickOatCodePointer(pointer_size) != nullptr
+ && GetNativeGcMapPtrSize(pointer_size) == nullptr;
}
bool IsPortableCompiled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -178,6 +173,9 @@
uint16_t GetMethodIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Doesn't do erroneous / unresolved class checks.
+ uint16_t GetMethodIndexDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
size_t GetVtableIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return GetMethodIndex();
}
@@ -203,7 +201,7 @@
// Number of 32bit registers that would be required to hold all the arguments
static size_t NumArgRegisters(const StringPiece& shorty);
- uint32_t GetDexMethodIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t GetDexMethodIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetDexMethodIndex(uint32_t new_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Not called within a transaction.
@@ -226,11 +224,11 @@
return OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_types_);
}
- ArtMethod* GetDexCacheResolvedMethod(uint16_t method_idx)
+ ALWAYS_INLINE ArtMethod* GetDexCacheResolvedMethod(uint16_t method_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetDexCacheResolvedMethod(uint16_t method_idx, ArtMethod* new_method)
+ ALWAYS_INLINE void SetDexCacheResolvedMethod(uint16_t method_idx, ArtMethod* new_method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetDexCacheResolvedMethods(ObjectArray<ArtMethod>* new_dex_cache_methods)
+ ALWAYS_INLINE void SetDexCacheResolvedMethods(ObjectArray<ArtMethod>* new_dex_cache_methods)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool HasDexCacheResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool HasSameDexCacheResolvedMethods(ArtMethod* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -255,49 +253,92 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
EntryPointFromInterpreter* GetEntryPointFromInterpreter()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldPtr<EntryPointFromInterpreter*, kVerifyFlags>(
- OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_interpreter_));
+ CheckObjectSizeEqualsMirrorSize();
+ return GetEntryPointFromInterpreterPtrSize(sizeof(void*));
+ }
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ EntryPointFromInterpreter* GetEntryPointFromInterpreterPtrSize(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldPtrWithSize<EntryPointFromInterpreter*, kVerifyFlags>(
+ EntryPointFromInterpreterOffset(pointer_size), pointer_size);
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetEntryPointFromInterpreter(EntryPointFromInterpreter* entry_point_from_interpreter)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SetFieldPtr<false, true, kVerifyFlags>(
- OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_interpreter_),
- entry_point_from_interpreter);
+ CheckObjectSizeEqualsMirrorSize();
+ SetEntryPointFromInterpreterPtrSize(entry_point_from_interpreter, sizeof(void*));
+ }
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ void SetEntryPointFromInterpreterPtrSize(EntryPointFromInterpreter* entry_point_from_interpreter,
+ size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetFieldPtrWithSize<false, true, kVerifyFlags>(
+ EntryPointFromInterpreterOffset(pointer_size), entry_point_from_interpreter, pointer_size);
}
- static MemberOffset EntryPointFromPortableCompiledCodeOffset() {
- return MemberOffset(OFFSETOF_MEMBER(ArtMethod, entry_point_from_portable_compiled_code_));
+ ALWAYS_INLINE static MemberOffset EntryPointFromPortableCompiledCodeOffset(size_t pointer_size) {
+ return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER(
+ PtrSizedFields, entry_point_from_portable_compiled_code_) / sizeof(void*) * pointer_size);
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- const void* GetEntryPointFromPortableCompiledCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldPtr<const void*, kVerifyFlags>(
- EntryPointFromPortableCompiledCodeOffset());
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ const void* GetEntryPointFromPortableCompiledCode()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ CheckObjectSizeEqualsMirrorSize();
+ return GetEntryPointFromPortableCompiledCodePtrSize(sizeof(void*));
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ ALWAYS_INLINE const void* GetEntryPointFromPortableCompiledCodePtrSize(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldPtrWithSize<const void*, kVerifyFlags>(
+ EntryPointFromPortableCompiledCodeOffset(pointer_size), pointer_size);
+ }
+
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetEntryPointFromPortableCompiledCode(const void* entry_point_from_portable_compiled_code)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SetFieldPtr<false, true, kVerifyFlags>(
- EntryPointFromPortableCompiledCodeOffset(), entry_point_from_portable_compiled_code);
+ CheckObjectSizeEqualsMirrorSize();
+ return SetEntryPointFromPortableCompiledCodePtrSize(entry_point_from_portable_compiled_code,
+ sizeof(void*));
}
- static MemberOffset EntryPointFromQuickCompiledCodeOffset() {
- return MemberOffset(OFFSETOF_MEMBER(ArtMethod, entry_point_from_quick_compiled_code_));
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ void SetEntryPointFromPortableCompiledCodePtrSize(
+ const void* entry_point_from_portable_compiled_code, size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetFieldPtrWithSize<false, true, kVerifyFlags>(
+ EntryPointFromPortableCompiledCodeOffset(pointer_size),
+ entry_point_from_portable_compiled_code, pointer_size);
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
const void* GetEntryPointFromQuickCompiledCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldPtr<const void*, kVerifyFlags>(EntryPointFromQuickCompiledCodeOffset());
+ CheckObjectSizeEqualsMirrorSize();
+ return GetEntryPointFromQuickCompiledCodePtrSize(sizeof(void*));
+ }
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ ALWAYS_INLINE const void* GetEntryPointFromQuickCompiledCodePtrSize(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldPtrWithSize<const void*, kVerifyFlags>(
+ EntryPointFromQuickCompiledCodeOffset(pointer_size), pointer_size);
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetEntryPointFromQuickCompiledCode(const void* entry_point_from_quick_compiled_code)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SetFieldPtr<false, true, kVerifyFlags>(
- EntryPointFromQuickCompiledCodeOffset(), entry_point_from_quick_compiled_code);
+ CheckObjectSizeEqualsMirrorSize();
+ SetEntryPointFromQuickCompiledCodePtrSize(entry_point_from_quick_compiled_code,
+ sizeof(void*));
+ }
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ ALWAYS_INLINE void SetEntryPointFromQuickCompiledCodePtrSize(
+ const void* entry_point_from_quick_compiled_code, size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetFieldPtrWithSize<false, true, kVerifyFlags>(
+ EntryPointFromQuickCompiledCodeOffset(pointer_size), entry_point_from_quick_compiled_code,
+ pointer_size);
}
uint32_t GetCodeSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -334,37 +375,54 @@
ALWAYS_INLINE static const void* EntryPointToCodePointer(const void* entry_point) {
uintptr_t code = reinterpret_cast<uintptr_t>(entry_point);
- code &= ~0x1; // TODO: Make this Thumb2 specific.
+ // TODO: Make this Thumb2 specific. It is benign on other architectures as code is always at
+ // least 2 byte aligned.
+ code &= ~0x1;
return reinterpret_cast<const void*>(code);
}
- // Actual entry point pointer to compiled oat code or nullptr if method has no compiled code.
- const void* GetQuickOatEntryPoint() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
+ // Actual entry point pointer to compiled oat code or nullptr.
+ const void* GetQuickOatEntryPoint(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Actual pointer to compiled oat code or nullptr.
- const void* GetQuickOatCodePointer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return EntryPointToCodePointer(GetQuickOatEntryPoint());
+ const void* GetQuickOatCodePointer(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size));
}
// Callers should wrap the uint8_t* in a MappingTable instance for convenient access.
- const uint8_t* GetMappingTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const uint8_t* GetMappingTable(const void* code_pointer)
+ const uint8_t* GetMappingTable(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const uint8_t* GetMappingTable(const void* code_pointer, size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Callers should wrap the uint8_t* in a VmapTable instance for convenient access.
- const uint8_t* GetVmapTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const uint8_t* GetVmapTable(const void* code_pointer)
+ const uint8_t* GetVmapTable(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const uint8_t* GetVmapTable(const void* code_pointer, size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
StackMap GetStackMap(uint32_t native_pc_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
CodeInfo GetOptimizedCodeInfo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const uint8_t* GetNativeGcMap() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldPtr<uint8_t*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, gc_map_));
+ CheckObjectSizeEqualsMirrorSize();
+ return GetNativeGcMapPtrSize(sizeof(void*));
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ ALWAYS_INLINE const uint8_t* GetNativeGcMapPtrSize(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldPtrWithSize<uint8_t*>(GcMapOffset(pointer_size), pointer_size);
+ }
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetNativeGcMap(const uint8_t* data) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SetFieldPtr<false, true, kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, gc_map_), data);
+ CheckObjectSizeEqualsMirrorSize();
+ SetNativeGcMapPtrSize(data, sizeof(void*));
+ }
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ ALWAYS_INLINE void SetNativeGcMapPtrSize(const uint8_t* data, size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetFieldPtrWithSize<false, true, kVerifyFlags>(GcMapOffset(pointer_size), data,
+ pointer_size);
}
// When building the oat need a convenient place to stuff the offset of the native GC map.
@@ -404,16 +462,46 @@
void UnregisterNative() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static MemberOffset NativeMethodOffset() {
- return OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_);
+ static MemberOffset EntryPointFromInterpreterOffset(size_t pointer_size) {
+ return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER(
+ PtrSizedFields, entry_point_from_interpreter_) / sizeof(void*) * pointer_size);
}
- const void* GetNativeMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldPtr<const void*>(NativeMethodOffset());
+ static MemberOffset EntryPointFromJniOffset(size_t pointer_size) {
+ return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER(
+ PtrSizedFields, entry_point_from_jni_) / sizeof(void*) * pointer_size);
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetNativeMethod(const void*) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static MemberOffset EntryPointFromQuickCompiledCodeOffset(size_t pointer_size) {
+ return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER(
+ PtrSizedFields, entry_point_from_quick_compiled_code_) / sizeof(void*) * pointer_size);
+ }
+
+ static MemberOffset GcMapOffset(size_t pointer_size) {
+ return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER(
+ PtrSizedFields, gc_map_) / sizeof(void*) * pointer_size);
+ }
+
+ void* GetEntryPointFromJni() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ CheckObjectSizeEqualsMirrorSize();
+ return GetEntryPointFromJniPtrSize(sizeof(void*));
+ }
+ ALWAYS_INLINE void* GetEntryPointFromJniPtrSize(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldPtrWithSize<void*>(EntryPointFromJniOffset(pointer_size), pointer_size);
+ }
+
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ void SetEntryPointFromJni(const void* entrypoint) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ CheckObjectSizeEqualsMirrorSize();
+ SetEntryPointFromJniPtrSize<kVerifyFlags>(entrypoint, sizeof(void*));
+ }
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ ALWAYS_INLINE void SetEntryPointFromJniPtrSize(const void* entrypoint, size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetFieldPtrWithSize<false, true, kVerifyFlags>(
+ EntryPointFromJniOffset(pointer_size), entrypoint, pointer_size);
+ }
static MemberOffset GetMethodIndexOffset() {
return OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_);
@@ -430,6 +518,8 @@
bool IsImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsImtUnimplementedMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
uintptr_t NativeQuickPcOffset(const uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
#ifdef NDEBUG
uintptr_t NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point)
@@ -446,7 +536,8 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Converts a dex PC to a native PC.
- uintptr_t ToNativeQuickPc(const uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uintptr_t ToNativeQuickPc(const uint32_t dex_pc, bool abort_on_failure = true)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Find the catch block for the given exception type and dex_pc. When a catch block is found,
// indicates whether the found catch block is responsible for clearing the exception or whether
@@ -480,6 +571,8 @@
ALWAYS_INLINE const char* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::String* GetNameAsString(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
const DexFile::CodeItem* GetCodeItem() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsResolvedTypeIdx(uint16_t type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -501,12 +594,25 @@
const char* GetTypeDescriptorFromTypeIdx(uint16_t type_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // May cause thread suspension due to GetClassFromTypeIdx calling ResolveType this caused a large
+ // number of bugs at call sites.
+ mirror::Class* GetReturnType(bool resolve = true) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
mirror::ClassLoader* GetClassLoader() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static size_t SizeWithoutPointerFields() {
+ return sizeof(ArtMethod) - sizeof(PtrSizedFields);
+ }
+
+ // Size of an instance of java.lang.reflect.ArtMethod not including its value array.
+ static size_t InstanceSize(size_t pointer_size) {
+ return SizeWithoutPointerFields() + (sizeof(PtrSizedFields) / sizeof(void*)) * pointer_size;
+ }
+
protected:
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
// The class we are a part of.
@@ -521,26 +627,6 @@
// Short cuts to declaring_class_->dex_cache_ member for fast compiled code access.
HeapReference<ObjectArray<String>> dex_cache_strings_;
- // Method dispatch from the interpreter invokes this pointer which may cause a bridge into
- // compiled code.
- uint64_t entry_point_from_interpreter_;
-
- // Pointer to JNI function registered to this method, or a function to resolve the JNI function.
- uint64_t entry_point_from_jni_;
-
- // Method dispatch from portable compiled code invokes this pointer which may cause bridging into
- // quick compiled code or the interpreter.
- uint64_t entry_point_from_portable_compiled_code_;
-
- // Method dispatch from quick compiled code invokes this pointer which may cause bridging into
- // portable compiled code or the interpreter.
- uint64_t entry_point_from_quick_compiled_code_;
-
- // Pointer to a data structure created by the compiler and used by the garbage collector to
- // determine which registers hold live references to objects within the heap. Keyed by native PC
- // offsets for the quick compiler and dex PCs for the portable.
- uint64_t gc_map_;
-
// Access flags; low 16 bits are defined by spec.
uint32_t access_flags_;
@@ -559,12 +645,45 @@
// ifTable.
uint32_t method_index_;
+ // Add alignment word here if necessary.
+
+ // Must be the last fields in the method.
+ struct PACKED(4) PtrSizedFields {
+ // Method dispatch from the interpreter invokes this pointer which may cause a bridge into
+ // compiled code.
+ void* entry_point_from_interpreter_;
+
+ // Pointer to JNI function registered to this method, or a function to resolve the JNI function.
+ void* entry_point_from_jni_;
+
+ // Method dispatch from quick compiled code invokes this pointer which may cause bridging into
+ // portable compiled code or the interpreter.
+ void* entry_point_from_quick_compiled_code_;
+
+ // Pointer to a data structure created by the compiler and used by the garbage collector to
+ // determine which registers hold live references to objects within the heap. Keyed by native PC
+ // offsets for the quick compiler and dex PCs for the portable.
+ void* gc_map_;
+
+ // Method dispatch from portable compiled code invokes this pointer which may cause bridging
+ // into quick compiled code or the interpreter. Last to simplify entrypoint logic.
+ void* entry_point_from_portable_compiled_code_;
+ } ptr_sized_fields_;
+
static GcRoot<Class> java_lang_reflect_ArtMethod_;
private:
- ObjectArray<ArtMethod>* GetDexCacheResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE void CheckObjectSizeEqualsMirrorSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ObjectArray<Class>* GetDexCacheResolvedTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE ObjectArray<ArtMethod>* GetDexCacheResolvedMethods()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ ALWAYS_INLINE ObjectArray<Class>* GetDexCacheResolvedTypes()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static size_t PtrSizedFieldsOffset() {
+ return OFFSETOF_MEMBER(ArtMethod, ptr_sized_fields_);
+ }
friend struct art::ArtMethodOffsets; // for verifying offset information
DISALLOW_IMPLICIT_CONSTRUCTORS(ArtMethod);
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 3d3ae16..a69d37e 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -112,20 +112,21 @@
template<VerifyObjectFlags kVerifyFlags>
inline ArtMethod* Class::GetVirtualMethod(uint32_t i) {
- DCHECK(IsResolved<kVerifyFlags>() || IsErroneous<kVerifyFlags>());
- return GetVirtualMethods()->Get(i);
+ DCHECK(IsResolved<kVerifyFlags>() || IsErroneous<kVerifyFlags>())
+ << PrettyClass(this) << " status=" << GetStatus();
+ return GetVirtualMethods()->GetWithoutChecks(i);
}
inline ArtMethod* Class::GetVirtualMethodDuringLinking(uint32_t i) {
DCHECK(IsLoaded() || IsErroneous());
- return GetVirtualMethods()->Get(i);
+ return GetVirtualMethods()->GetWithoutChecks(i);
}
inline void Class::SetVirtualMethod(uint32_t i, ArtMethod* f) // TODO: uint16_t
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ObjectArray<ArtMethod>* virtual_methods =
GetFieldObject<ObjectArray<ArtMethod>>(OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_));
- virtual_methods->Set<false>(i, f);
+ virtual_methods->SetWithoutChecks<false>(i, f);
}
inline ObjectArray<ArtMethod>* Class::GetVTable() {
@@ -142,14 +143,6 @@
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, vtable_), new_vtable);
}
-inline ObjectArray<ArtMethod>* Class::GetImTable() {
- return GetFieldObject<ObjectArray<ArtMethod>>(OFFSET_OF_OBJECT_MEMBER(Class, imtable_));
-}
-
-inline void Class::SetImTable(ObjectArray<ArtMethod>* new_imtable) {
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, imtable_), new_imtable);
-}
-
inline ArtMethod* Class::GetEmbeddedImTableEntry(uint32_t i) {
uint32_t offset = EmbeddedImTableOffset().Uint32Value() + i * sizeof(ImTableEntry);
return GetFieldObject<mirror::ArtMethod>(MemberOffset(offset));
@@ -158,7 +151,6 @@
inline void Class::SetEmbeddedImTableEntry(uint32_t i, ArtMethod* method) {
uint32_t offset = EmbeddedImTableOffset().Uint32Value() + i * sizeof(ImTableEntry);
SetFieldObject<false>(MemberOffset(offset), method);
- CHECK(method == GetImTable()->Get(i));
}
inline bool Class::HasVTable() {
@@ -287,7 +279,7 @@
template <bool throw_on_failure, bool use_referrers_cache, InvokeType throw_invoke_type>
inline bool Class::ResolvedMethodAccessTest(Class* access_to, ArtMethod* method,
uint32_t method_idx, DexCache* dex_cache) {
- COMPILE_ASSERT(throw_on_failure || throw_invoke_type == kStatic, non_default_throw_invoke_type);
+ static_assert(throw_on_failure || throw_invoke_type == kStatic, "Non-default throw invoke type");
DCHECK_EQ(use_referrers_cache, dex_cache == nullptr);
if (UNLIKELY(!this->CanAccess(access_to))) {
// The referrer class can't access the method's declaring class but may still be able
@@ -410,6 +402,36 @@
return GetFieldObject<ObjectArray<ArtField>>(OFFSET_OF_OBJECT_MEMBER(Class, ifields_));
}
+inline MemberOffset Class::GetFirstReferenceInstanceFieldOffset() {
+ Class* super_class = GetSuperClass();
+ return (super_class != nullptr)
+ ? MemberOffset(RoundUp(super_class->GetObjectSize(),
+ sizeof(mirror::HeapReference<mirror::Object>)))
+ : ClassOffset();
+}
+
+inline MemberOffset Class::GetFirstReferenceStaticFieldOffset() {
+ DCHECK(IsResolved());
+ uint32_t base = sizeof(mirror::Class); // Static fields come after the class.
+ if (ShouldHaveEmbeddedImtAndVTable()) {
+ // Static fields come after the embedded tables.
+ base = mirror::Class::ComputeClassSize(true, GetEmbeddedVTableLength(),
+ 0, 0, 0, 0, 0);
+ }
+ return MemberOffset(base);
+}
+
+inline MemberOffset Class::GetFirstReferenceStaticFieldOffsetDuringLinking() {
+ DCHECK(IsLoaded());
+ uint32_t base = sizeof(mirror::Class); // Static fields come after the class.
+ if (ShouldHaveEmbeddedImtAndVTable()) {
+ // Static fields come after the embedded tables.
+ base = mirror::Class::ComputeClassSize(true, GetVTableDuringLinking()->GetLength(),
+ 0, 0, 0, 0, 0);
+ }
+ return MemberOffset(base);
+}
+
inline void Class::SetIFields(ObjectArray<ArtField>* new_ifields)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(NULL == GetFieldObject<ObjectArray<ArtField>>(OFFSET_OF_OBJECT_MEMBER(Class, ifields_)));
@@ -615,12 +637,11 @@
template <bool kVisitClass, typename Visitor>
inline void Class::VisitReferences(mirror::Class* klass, const Visitor& visitor) {
- // Visit the static fields first so that we don't overwrite the SFields / IFields instance
- // fields.
VisitInstanceFieldsReferences<kVisitClass>(klass, visitor);
- if (!IsTemp()) {
+ if (!IsTemp() && IsResolved()) {
// Temp classes don't ever populate imt/vtable or static fields and they are not even
- // allocated with the right size for those.
+ // allocated with the right size for those. Also, unresolved classes don't have fields
+ // linked yet.
VisitStaticFieldsReferences<kVisitClass>(this, visitor);
if (ShouldHaveEmbeddedImtAndVTable()) {
VisitEmbeddedImtAndVTable(visitor);
@@ -761,6 +782,24 @@
}
}
+inline uint32_t Class::NumDirectInterfaces() {
+ if (IsPrimitive()) {
+ return 0;
+ } else if (IsArrayClass()) {
+ return 2;
+ } else if (IsProxyClass()) {
+ mirror::ObjectArray<mirror::Class>* interfaces = GetInterfaces();
+ return interfaces != nullptr ? interfaces->GetLength() : 0;
+ } else {
+ const DexFile::TypeList* interfaces = GetInterfaceTypeList();
+ if (interfaces == nullptr) {
+ return 0;
+ } else {
+ return interfaces->Size();
+ }
+ }
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 6df7204..5665059 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -117,7 +117,7 @@
self->SetException(gc_safe_throw_location, old_exception.Get());
self->SetExceptionReportedToInstrumentation(is_exception_reported);
}
- COMPILE_ASSERT(sizeof(Status) == sizeof(uint32_t), size_of_status_not_uint32);
+ static_assert(sizeof(Status) == sizeof(uint32_t), "Size of status not equal to uint32");
if (Runtime::Current()->IsActiveTransaction()) {
SetField32Volatile<true>(OFFSET_OF_OBJECT_MEMBER(Class, status_), new_status);
} else {
@@ -489,7 +489,10 @@
if (GetDexCache() == dex_cache) {
for (size_t i = 0; i < NumVirtualMethods(); ++i) {
ArtMethod* method = GetVirtualMethod(i);
- if (method->GetDexMethodIndex() == dex_method_idx) {
+ if (method->GetDexMethodIndex() == dex_method_idx &&
+ // A miranda method may have a different DexCache and is always created by linking,
+ // never *declared* in the class.
+ !method->IsMiranda()) {
return method;
}
}
@@ -625,8 +628,8 @@
HandleWrapper<mirror::Class> h_k(hs.NewHandleWrapper(&k));
// Is this field in any of this class' interfaces?
for (uint32_t i = 0; i < h_k->NumDirectInterfaces(); ++i) {
- StackHandleScope<1> hs(self);
- Handle<mirror::Class> interface(hs.NewHandle(GetDirectInterface(self, h_k, i)));
+ StackHandleScope<1> hs2(self);
+ Handle<mirror::Class> interface(hs2.NewHandle(GetDirectInterface(self, h_k, i)));
f = FindStaticField(self, interface, name, type);
if (f != nullptr) {
return f;
@@ -649,8 +652,8 @@
HandleWrapper<mirror::Class> h_k(hs.NewHandleWrapper(&k));
// Is this field in any of this class' interfaces?
for (uint32_t i = 0; i < h_k->NumDirectInterfaces(); ++i) {
- StackHandleScope<1> hs(self);
- Handle<mirror::Class> interface(hs.NewHandle(GetDirectInterface(self, h_k, i)));
+ StackHandleScope<1> hs2(self);
+ Handle<mirror::Class> interface(hs2.NewHandle(GetDirectInterface(self, h_k, i)));
f = FindStaticField(self, interface, dex_cache, dex_field_idx);
if (f != nullptr) {
return f;
@@ -677,8 +680,8 @@
StackHandleScope<1> hs(self);
HandleWrapper<mirror::Class> h_k(hs.NewHandleWrapper(&k));
for (uint32_t i = 0; i < h_k->NumDirectInterfaces(); ++i) {
- StackHandleScope<1> hs(self);
- Handle<mirror::Class> interface(hs.NewHandle(GetDirectInterface(self, h_k, i)));
+ StackHandleScope<1> hs2(self);
+ Handle<mirror::Class> interface(hs2.NewHandle(GetDirectInterface(self, h_k, i)));
f = interface->FindStaticField(self, interface, name, type);
if (f != nullptr) {
return f;
@@ -738,24 +741,6 @@
return &GetDexFile().GetClassDef(class_def_idx);
}
-uint32_t Class::NumDirectInterfaces() {
- if (IsPrimitive()) {
- return 0;
- } else if (IsArrayClass()) {
- return 2;
- } else if (IsProxyClass()) {
- mirror::ObjectArray<mirror::Class>* interfaces = GetInterfaces();
- return interfaces != nullptr ? interfaces->GetLength() : 0;
- } else {
- const DexFile::TypeList* interfaces = GetInterfaceTypeList();
- if (interfaces == nullptr) {
- return 0;
- } else {
- return interfaces->Size();
- }
- }
-}
-
uint16_t Class::GetDirectInterfaceTypeIdx(uint32_t idx) {
DCHECK(!IsPrimitive());
DCHECK(!IsArrayClass());
@@ -817,22 +802,21 @@
return GetDexFile().GetInterfacesList(*class_def);
}
-void Class::PopulateEmbeddedImtAndVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ObjectArray<ArtMethod>* table = GetImTable();
- if (table != nullptr) {
- for (uint32_t i = 0; i < kImtSize; i++) {
- SetEmbeddedImTableEntry(i, table->Get(i));
- }
+void Class::PopulateEmbeddedImtAndVTable(StackHandleScope<kImtSize>* imt_handle_scope) {
+ for (uint32_t i = 0; i < kImtSize; i++) {
+ // Replace null with conflict.
+ mirror::Object* obj = imt_handle_scope->GetReference(i);
+ DCHECK(obj != nullptr);
+ SetEmbeddedImTableEntry(i, obj->AsArtMethod());
}
- table = GetVTableDuringLinking();
+ ObjectArray<ArtMethod>* table = GetVTableDuringLinking();
CHECK(table != nullptr) << PrettyClass(this);
SetEmbeddedVTableLength(table->GetLength());
for (int32_t i = 0; i < table->GetLength(); i++) {
- SetEmbeddedVTableEntry(i, table->Get(i));
+ SetEmbeddedVTableEntry(i, table->GetWithoutChecks(i));
}
- SetImTable(nullptr);
// Keep java.lang.Object class's vtable around for since it's easier
// to be reused by array classes during their linking.
if (!IsObjectClass()) {
@@ -844,9 +828,10 @@
class CopyClassVisitor {
public:
explicit CopyClassVisitor(Thread* self, Handle<mirror::Class>* orig,
- size_t new_length, size_t copy_bytes)
+ size_t new_length, size_t copy_bytes,
+ StackHandleScope<mirror::Class::kImtSize>* imt_handle_scope)
: self_(self), orig_(orig), new_length_(new_length),
- copy_bytes_(copy_bytes) {
+ copy_bytes_(copy_bytes), imt_handle_scope_(imt_handle_scope) {
}
void operator()(Object* obj, size_t usable_size) const
@@ -855,7 +840,7 @@
mirror::Class* new_class_obj = obj->AsClass();
mirror::Object::CopyObject(self_, new_class_obj, orig_->Get(), copy_bytes_);
new_class_obj->SetStatus(Class::kStatusResolving, self_);
- new_class_obj->PopulateEmbeddedImtAndVTable();
+ new_class_obj->PopulateEmbeddedImtAndVTable(imt_handle_scope_);
new_class_obj->SetClassSize(new_length_);
}
@@ -864,10 +849,12 @@
Handle<mirror::Class>* const orig_;
const size_t new_length_;
const size_t copy_bytes_;
+ StackHandleScope<mirror::Class::kImtSize>* const imt_handle_scope_;
DISALLOW_COPY_AND_ASSIGN(CopyClassVisitor);
};
-Class* Class::CopyOf(Thread* self, int32_t new_length) {
+Class* Class::CopyOf(Thread* self, int32_t new_length,
+ StackHandleScope<kImtSize>* imt_handle_scope) {
DCHECK_GE(new_length, static_cast<int32_t>(sizeof(Class)));
// We may get copied by a compacting GC.
StackHandleScope<1> hs(self);
@@ -875,17 +862,15 @@
gc::Heap* heap = Runtime::Current()->GetHeap();
// The num_bytes (3rd param) is sizeof(Class) as opposed to SizeOf()
// to skip copying the tail part that we will overwrite here.
- CopyClassVisitor visitor(self, &h_this, new_length, sizeof(Class));
-
+ CopyClassVisitor visitor(self, &h_this, new_length, sizeof(Class), imt_handle_scope);
mirror::Object* new_class =
kMovingClasses
? heap->AllocObject<true>(self, java_lang_Class_.Read(), new_length, visitor)
: heap->AllocNonMovableObject<true>(self, java_lang_Class_.Read(), new_length, visitor);
if (UNLIKELY(new_class == nullptr)) {
CHECK(self->IsExceptionPending()); // Expect an OOME.
- return NULL;
+ return nullptr;
}
-
return new_class->AsClass();
}
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 4383993..82425b5 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -27,6 +27,7 @@
#include "object_callbacks.h"
#include "primitive.h"
#include "read_barrier_option.h"
+#include "utils.h"
namespace art {
@@ -35,6 +36,7 @@
template<class T> class Handle;
class Signature;
class StringPiece;
+template<size_t kNumReferences> class PACKED(4) StackHandleScope;
namespace mirror {
@@ -129,7 +131,7 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
Status GetStatus() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- COMPILE_ASSERT(sizeof(Status) == sizeof(uint32_t), size_of_status_not_uint32);
+ static_assert(sizeof(Status) == sizeof(uint32_t), "Size of status not equal to uint32");
return static_cast<Status>(
GetField32Volatile<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, status_)));
}
@@ -195,46 +197,46 @@
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetAccessFlags(uint32_t new_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns true if the class is an interface.
- bool IsInterface() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsInterface() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccInterface) != 0;
}
// Returns true if the class is declared public.
- bool IsPublic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsPublic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccPublic) != 0;
}
// Returns true if the class is declared final.
- bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccFinal) != 0;
}
- bool IsFinalizable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsFinalizable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccClassIsFinalizable) != 0;
}
- void SetFinalizable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE void SetFinalizable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
SetAccessFlags(flags | kAccClassIsFinalizable);
}
// Returns true if the class is abstract.
- bool IsAbstract() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsAbstract() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccAbstract) != 0;
}
// Returns true if the class is an annotation.
- bool IsAnnotation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsAnnotation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccAnnotation) != 0;
}
// Returns true if the class is synthetic.
- bool IsSynthetic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsSynthetic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccSynthetic) != 0;
}
@@ -526,6 +528,13 @@
return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size);
}
+ void SetObjectSizeWithoutChecks(uint32_t new_object_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Not called within a transaction.
+ return SetField32<false, false, kVerifyNone>(
+ OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size);
+ }
+
// Returns true if this class is in the same packages as that class.
bool IsInSamePackage(Class* that) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -591,7 +600,7 @@
// downcast would be necessary. Similarly for interfaces, a class that implements (or an interface
// that extends) another can be assigned to its parent, but not vice-versa. All Classes may assign
// to themselves. Classes for primitive types may not assign to each other.
- inline bool IsAssignableFrom(Class* src) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsAssignableFrom(Class* src) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(src != NULL);
if (this == src) {
// Can always assign to things of the same type.
@@ -608,7 +617,7 @@
}
}
- Class* GetSuperClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE Class* GetSuperClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetSuperClass(Class *new_super_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Super class is assigned once, except during class linker initialization.
@@ -647,12 +656,13 @@
void SetDexCache(DexCache* new_dex_cache) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ObjectArray<ArtMethod>* GetDirectMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE ObjectArray<ArtMethod>* GetDirectMethods()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetDirectMethods(ObjectArray<ArtMethod>* new_direct_methods)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ArtMethod* GetDirectMethod(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE ArtMethod* GetDirectMethod(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetDirectMethod(uint32_t i, ArtMethod* f) // TODO: uint16_t
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -661,13 +671,14 @@
uint32_t NumDirectMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ObjectArray<ArtMethod>* GetVirtualMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE ObjectArray<ArtMethod>* GetVirtualMethods()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetVirtualMethods(ObjectArray<ArtMethod>* new_virtual_methods)
+ ALWAYS_INLINE void SetVirtualMethods(ObjectArray<ArtMethod>* new_virtual_methods)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns the number of non-inherited virtual methods.
- uint32_t NumVirtualMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t NumVirtualMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ArtMethod* GetVirtualMethod(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -677,9 +688,10 @@
void SetVirtualMethod(uint32_t i, ArtMethod* f) // TODO: uint16_t
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ObjectArray<ArtMethod>* GetVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE ObjectArray<ArtMethod>* GetVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ObjectArray<ArtMethod>* GetVTableDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE ObjectArray<ArtMethod>* GetVTableDuringLinking()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetVTable(ObjectArray<ArtMethod>* new_vtable)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -688,13 +700,6 @@
return OFFSET_OF_OBJECT_MEMBER(Class, vtable_);
}
- void SetImTable(ObjectArray<ArtMethod>* new_imtable)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- static MemberOffset ImTableOffset() {
- return OFFSET_OF_OBJECT_MEMBER(Class, imtable_);
- }
-
static MemberOffset EmbeddedImTableOffset() {
return MemberOffset(sizeof(Class));
}
@@ -704,7 +709,7 @@
}
static MemberOffset EmbeddedVTableOffset() {
- return MemberOffset(sizeof(Class) + kImtSize * sizeof(mirror::Class::ImTableEntry) + sizeof(int32_t));
+ return MemberOffset(sizeof(Class) + kImtSize * sizeof(ImTableEntry) + sizeof(int32_t));
}
bool ShouldHaveEmbeddedImtAndVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -729,7 +734,8 @@
void SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void PopulateEmbeddedImtAndVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void PopulateEmbeddedImtAndVTable(StackHandleScope<kImtSize>* imt_handle_scope)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Given a method implemented by this class but potentially from a super class, return the
// specific implementation method for this class.
@@ -797,11 +803,11 @@
ArtMethod* FindClassInitializer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- int32_t GetIfTableCount() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE int32_t GetIfTableCount() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- IfTable* GetIfTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE IfTable* GetIfTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetIfTable(IfTable* new_iftable) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE void SetIfTable(IfTable* new_iftable) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get instance fields of the class (See also GetSFields).
ObjectArray<ArtField>* GetIFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -838,6 +844,9 @@
void SetReferenceInstanceOffsets(uint32_t new_reference_offsets)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Get the offset of the first reference instance field. Other reference instance fields follow.
+ MemberOffset GetFirstReferenceInstanceFieldOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Returns the number of static fields containing reference types.
uint32_t NumReferenceStaticFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(IsResolved() || IsErroneous());
@@ -854,6 +863,13 @@
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_), new_num);
}
+ // Get the offset of the first reference static field. Other reference static fields follow.
+ MemberOffset GetFirstReferenceStaticFieldOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Get the offset of the first reference static field. Other reference static fields follow.
+ MemberOffset GetFirstReferenceStaticFieldOffsetDuringLinking()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Gets the static fields of the class.
ObjectArray<ArtField>* GetSFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -969,7 +985,7 @@
const DexFile::ClassDef* GetClassDef() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- uint32_t NumDirectInterfaces() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t NumDirectInterfaces() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
uint16_t GetDirectInterfaceTypeIdx(uint32_t idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -989,7 +1005,7 @@
void AssertInitializedOrInitializingInThread(Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- Class* CopyOf(Thread* self, int32_t new_length)
+ Class* CopyOf(Thread* self, int32_t new_length, StackHandleScope<kImtSize>* imt_handle_scope)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// For proxy class only.
@@ -1038,8 +1054,6 @@
void CheckObjectAlloc() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ObjectArray<ArtMethod>* GetImTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// defining class loader, or NULL for the "bootstrap" system loader
HeapReference<ClassLoader> class_loader_;
@@ -1079,9 +1093,6 @@
// methods for the methods in the interface.
HeapReference<IfTable> iftable_;
- // Interface method table (imt), for quick "invoke-interface".
- HeapReference<ObjectArray<ArtMethod>> imtable_;
-
// Descriptor for the class such as "java.lang.Class" or "[C". Lazily initialized by ComputeName
HeapReference<String> name_;
@@ -1150,11 +1161,11 @@
// The following data exist in real class objects.
// Embedded Imtable, for class object that's not an interface, fixed size.
- ImTableEntry embedded_imtable_[0];
+ // ImTableEntry embedded_imtable_[0];
// Embedded Vtable, for class object that's not an interface, variable size.
- VTableEntry embedded_vtable_[0];
+ // VTableEntry embedded_vtable_[0];
// Static fields, variable size.
- uint32_t fields_[0];
+ // uint32_t fields_[0];
// java.lang.Class
static GcRoot<Class> java_lang_Class_;
diff --git a/runtime/mirror/iftable-inl.h b/runtime/mirror/iftable-inl.h
index 3f20bf4..d1309d2 100644
--- a/runtime/mirror/iftable-inl.h
+++ b/runtime/mirror/iftable-inl.h
@@ -27,7 +27,7 @@
DCHECK(interface->IsInterface());
const size_t idx = i * kMax + kInterface;
DCHECK_EQ(Get(idx), static_cast<Object*>(nullptr));
- Set<false>(idx, interface);
+ SetWithoutChecks<false>(idx, interface);
}
} // namespace mirror
diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h
index 5feb602..4d899d2 100644
--- a/runtime/mirror/iftable.h
+++ b/runtime/mirror/iftable.h
@@ -25,13 +25,14 @@
class MANAGED IfTable FINAL : public ObjectArray<Object> {
public:
- Class* GetInterface(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Class* interface = Get((i * kMax) + kInterface)->AsClass();
+ ALWAYS_INLINE Class* GetInterface(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Class* interface = GetWithoutChecks((i * kMax) + kInterface)->AsClass();
DCHECK(interface != NULL);
return interface;
}
- void SetInterface(int32_t i, Class* interface) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE void SetInterface(int32_t i, Class* interface)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
ObjectArray<ArtMethod>* GetMethodArray(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ObjectArray<ArtMethod>* method_array =
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index b89da9d..121947d 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -121,7 +121,7 @@
OFFSET_OF_OBJECT_MEMBER(Object, x_rb_ptr_));
#else
LOG(FATAL) << "Unreachable";
- return nullptr;
+ UNREACHABLE();
#endif
}
@@ -134,6 +134,8 @@
OFFSET_OF_OBJECT_MEMBER(Object, x_rb_ptr_), rb_ptr);
#else
LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ UNUSED(rb_ptr);
#endif
}
@@ -155,8 +157,9 @@
DCHECK_EQ(new_ref.reference_, atomic_rb_ptr->LoadRelaxed());
return true;
#else
+ UNUSED(expected_rb_ptr, rb_ptr);
LOG(FATAL) << "Unreachable";
- return false;
+ UNREACHABLE();
#endif
}
@@ -166,13 +169,12 @@
DCHECK(obj->GetReadBarrierPointer() == nullptr)
<< "Bad Baker pointer: obj=" << reinterpret_cast<void*>(obj)
<< " ptr=" << reinterpret_cast<void*>(obj->GetReadBarrierPointer());
- } else if (kUseBrooksReadBarrier) {
+ } else {
+ CHECK(kUseBrooksReadBarrier);
Object* obj = const_cast<Object*>(this);
DCHECK_EQ(obj, obj->GetReadBarrierPointer())
<< "Bad Brooks pointer: obj=" << reinterpret_cast<void*>(obj)
<< " ptr=" << reinterpret_cast<void*>(obj->GetReadBarrierPointer());
- } else {
- LOG(FATAL) << "Unreachable";
}
}
@@ -402,8 +404,7 @@
}
DCHECK_GE(result, sizeof(Object))
<< " class=" << PrettyTypeOf(GetClass<kNewFlags, kReadBarrierOption>());
- DCHECK(!(IsArtField<kNewFlags, kReadBarrierOption>()) || result == sizeof(ArtField));
- DCHECK(!(IsArtMethod<kNewFlags, kReadBarrierOption>()) || result == sizeof(ArtMethod));
+ DCHECK(!(IsArtField<kNewFlags, kReadBarrierOption>()) || result == sizeof(ArtField));
return result;
}
@@ -909,13 +910,19 @@
klass = kIsStatic ? nullptr : klass->GetSuperClass()) {
size_t num_reference_fields =
kIsStatic ? klass->NumReferenceStaticFields() : klass->NumReferenceInstanceFields();
+ if (num_reference_fields == 0u) {
+ continue;
+ }
+ MemberOffset field_offset = kIsStatic
+ ? klass->GetFirstReferenceStaticFieldOffset()
+ : klass->GetFirstReferenceInstanceFieldOffset();
for (size_t i = 0; i < num_reference_fields; ++i) {
- mirror::ArtField* field = kIsStatic ? klass->GetStaticField(i) : klass->GetInstanceField(i);
- MemberOffset field_offset = field->GetOffset();
// TODO: Do a simpler check?
if (kVisitClass || field_offset.Uint32Value() != ClassOffset().Uint32Value()) {
visitor(this, field_offset, kIsStatic);
}
+ field_offset = MemberOffset(field_offset.Uint32Value() +
+ sizeof(mirror::HeapReference<mirror::Object>));
}
}
}
@@ -954,7 +961,6 @@
}
}
}
-
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 9578c97..fa1f226 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -135,11 +135,11 @@
return copy;
}
-int32_t Object::GenerateIdentityHashCode() {
- static AtomicInteger seed(987654321 + std::time(nullptr));
- int32_t expected_value, new_value;
+uint32_t Object::GenerateIdentityHashCode() {
+ static Atomic<uint32_t> seed(987654321U + std::time(nullptr));
+ uint32_t expected_value, new_value;
do {
- expected_value = static_cast<uint32_t>(seed.LoadRelaxed());
+ expected_value = seed.LoadRelaxed();
new_value = expected_value * 1103515245 + 12345;
} while ((expected_value & LockWord::kHashMask) == 0 ||
!seed.CompareExchangeWeakRelaxed(expected_value, new_value));
@@ -200,10 +200,11 @@
for (Class* cur = c; cur != NULL; cur = cur->GetSuperClass()) {
ObjectArray<ArtField>* fields = cur->GetIFields();
if (fields != NULL) {
- size_t num_ref_ifields = cur->NumReferenceInstanceFields();
- for (size_t i = 0; i < num_ref_ifields; ++i) {
+ size_t num_ifields = fields->GetLength();
+ for (size_t i = 0; i < num_ifields; ++i) {
ArtField* field = fields->Get(i);
if (field->GetOffset().Int32Value() == field_offset.Int32Value()) {
+ CHECK_NE(field->GetTypeAsPrimitiveType(), Primitive::kPrimNot);
StackHandleScope<1> hs(Thread::Current());
FieldHelper fh(hs.NewHandle(field));
CHECK(fh.GetType()->IsAssignableFrom(new_value->GetClass()));
@@ -219,10 +220,11 @@
if (IsClass()) {
ObjectArray<ArtField>* fields = AsClass()->GetSFields();
if (fields != NULL) {
- size_t num_ref_sfields = AsClass()->NumReferenceStaticFields();
- for (size_t i = 0; i < num_ref_sfields; ++i) {
+ size_t num_sfields = fields->GetLength();
+ for (size_t i = 0; i < num_sfields; ++i) {
ArtField* field = fields->Get(i);
if (field->GetOffset().Int32Value() == field_offset.Int32Value()) {
+ CHECK_NE(field->GetTypeAsPrimitiveType(), Primitive::kPrimNot);
StackHandleScope<1> hs(Thread::Current());
FieldHelper fh(hs.NewHandle(field));
CHECK(fh.GetType()->IsAssignableFrom(new_value->GetClass()));
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 1bbcf8e..221feca 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -392,15 +392,26 @@
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T>
void SetFieldPtr(MemberOffset field_offset, T new_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#ifndef __LP64__
- SetField32<kTransactionActive, kCheckTransaction, kVerifyFlags>(
- field_offset, reinterpret_cast<int32_t>(new_value));
-#else
- SetField64<kTransactionActive, kCheckTransaction, kVerifyFlags>(
- field_offset, reinterpret_cast<int64_t>(new_value));
-#endif
+ SetFieldPtrWithSize<kTransactionActive, kCheckTransaction, kVerifyFlags>(
+ field_offset, new_value, sizeof(void*));
}
+ template<bool kTransactionActive, bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T>
+ ALWAYS_INLINE void SetFieldPtrWithSize(MemberOffset field_offset, T new_value,
+ size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size;
+ if (pointer_size == 4) {
+ intptr_t ptr = reinterpret_cast<intptr_t>(new_value);
+ DCHECK_EQ(static_cast<int32_t>(ptr), ptr); // Check that we dont lose any non 0 bits.
+ SetField32<kTransactionActive, kCheckTransaction, kVerifyFlags>(
+ field_offset, static_cast<int32_t>(ptr));
+ } else {
+ SetField64<kTransactionActive, kCheckTransaction, kVerifyFlags>(
+ field_offset, static_cast<int64_t>(reinterpret_cast<intptr_t>(new_value)));
+ }
+ }
// TODO fix thread safety analysis broken by the use of template. This should be
// SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
template <const bool kVisitClass, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -413,11 +424,21 @@
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
T GetFieldPtr(MemberOffset field_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#ifndef __LP64__
- return reinterpret_cast<T>(GetField32<kVerifyFlags, kIsVolatile>(field_offset));
-#else
- return reinterpret_cast<T>(GetField64<kVerifyFlags, kIsVolatile>(field_offset));
-#endif
+ return GetFieldPtrWithSize<T, kVerifyFlags, kIsVolatile>(field_offset, sizeof(void*));
+ }
+
+ template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
+ ALWAYS_INLINE T GetFieldPtrWithSize(MemberOffset field_offset, size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size;
+ if (pointer_size == 4) {
+ return reinterpret_cast<T>(GetField32<kVerifyFlags, kIsVolatile>(field_offset));
+ } else {
+ int64_t v = GetField64<kVerifyFlags, kIsVolatile>(field_offset);
+ // Check that we dont lose any non 0 bits.
+ DCHECK_EQ(reinterpret_cast<int64_t>(reinterpret_cast<T>(v)), v);
+ return reinterpret_cast<T>(v);
+ }
}
// TODO: Fixme when anotatalysis works with visitors.
@@ -451,7 +472,7 @@
}
// Generate an identity hash code.
- static int32_t GenerateIdentityHashCode();
+ static uint32_t GenerateIdentityHashCode();
// A utility function that copies an object in a read barrier and
// write barrier-aware way. This is internally used by Clone() and
@@ -478,6 +499,7 @@
friend struct art::ObjectOffsets; // for verifying offset information
friend class CopyObjectVisitor; // for CopyObject().
friend class CopyClassVisitor; // for CopyObject().
+ DISALLOW_ALLOCATION();
DISALLOW_IMPLICIT_CONSTRUCTORS(Object);
};
diff --git a/runtime/mirror/object_array.h b/runtime/mirror/object_array.h
index 7012b19..6404faf 100644
--- a/runtime/mirror/object_array.h
+++ b/runtime/mirror/object_array.h
@@ -45,11 +45,11 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CheckAssignable(T* object) NO_THREAD_SAFETY_ANALYSIS;
- void Set(int32_t i, T* object) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE void Set(int32_t i, T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// TODO fix thread safety analysis: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void Set(int32_t i, T* object) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS;
+ ALWAYS_INLINE void Set(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS;
// Set element without bound and element type checks, to be used in limited
// circumstances, such as during boot image writing.
@@ -57,15 +57,15 @@
// SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetWithoutChecks(int32_t i, T* object) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS;
+ ALWAYS_INLINE void SetWithoutChecks(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS;
// TODO fix thread safety analysis broken by the use of template. This should be
// SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetWithoutChecksAndWriteBarrier(int32_t i, T* object) ALWAYS_INLINE
+ ALWAYS_INLINE void SetWithoutChecksAndWriteBarrier(int32_t i, T* object)
NO_THREAD_SAFETY_ANALYSIS;
- T* GetWithoutChecks(int32_t i) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE T* GetWithoutChecks(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Copy src into this array (dealing with overlaps as memmove does) without assignability checks.
void AssignableMemmove(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index a2a0626..4402031 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -78,26 +78,14 @@
TEST_F(ObjectTest, Constants) {
EXPECT_EQ(kObjectReferenceSize, sizeof(HeapReference<Object>));
EXPECT_EQ(kObjectHeaderSize, sizeof(Object));
-}
-
-// Keep the assembly code constats in sync.
-TEST_F(ObjectTest, AsmConstants) {
- EXPECT_EQ(CLASS_OFFSET, Object::ClassOffset().Int32Value());
- EXPECT_EQ(LOCK_WORD_OFFSET, Object::MonitorOffset().Int32Value());
-
- EXPECT_EQ(CLASS_COMPONENT_TYPE_OFFSET, Class::ComponentTypeOffset().Int32Value());
-
- EXPECT_EQ(ARRAY_LENGTH_OFFSET, Array::LengthOffset().Int32Value());
- EXPECT_EQ(OBJECT_ARRAY_DATA_OFFSET, Array::DataOffset(sizeof(HeapReference<Object>)).Int32Value());
-
- EXPECT_EQ(STRING_VALUE_OFFSET, String::ValueOffset().Int32Value());
- EXPECT_EQ(STRING_COUNT_OFFSET, String::CountOffset().Int32Value());
- EXPECT_EQ(STRING_OFFSET_OFFSET, String::OffsetOffset().Int32Value());
- EXPECT_EQ(STRING_DATA_OFFSET, Array::DataOffset(sizeof(uint16_t)).Int32Value());
-
- EXPECT_EQ(METHOD_DEX_CACHE_METHODS_OFFSET, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
- EXPECT_EQ(METHOD_PORTABLE_CODE_OFFSET, ArtMethod::EntryPointFromPortableCompiledCodeOffset().Int32Value());
- EXPECT_EQ(METHOD_QUICK_CODE_OFFSET, ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
+ EXPECT_EQ(MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_32,
+ ArtMethod::EntryPointFromPortableCompiledCodeOffset(4).Int32Value());
+ EXPECT_EQ(MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_64,
+ ArtMethod::EntryPointFromPortableCompiledCodeOffset(8).Int32Value());
+ EXPECT_EQ(MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32,
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(4).Int32Value());
+ EXPECT_EQ(MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64,
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(8).Int32Value());
}
TEST_F(ObjectTest, IsInSamePackage) {
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index 64408a6..30b8aa3 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -109,6 +109,14 @@
int32_t CompareTo(String* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetOffset(int32_t new_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Offset is only used during testing so use non-transactional mode.
+ DCHECK_LE(0, new_offset);
+ SetField32<false>(OFFSET_OF_OBJECT_MEMBER(String, offset_), new_offset);
+ }
+
+ void SetArray(CharArray* new_array) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
static Class* GetJavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(!java_lang_String_.IsNull());
return java_lang_String_.Read();
@@ -134,21 +142,12 @@
SetField32<false, false>(OFFSET_OF_OBJECT_MEMBER(String, count_), new_count);
}
- void SetOffset(int32_t new_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // Offset is only used during testing so use non-transactional mode.
- DCHECK_LE(0, new_offset);
- DCHECK_GE(GetLength(), new_offset);
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(String, offset_), new_offset);
- }
-
static String* Alloc(Thread* self, int32_t utf16_length)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static String* Alloc(Thread* self, Handle<CharArray> array)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetArray(CharArray* new_array) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
HeapReference<CharArray> array_;
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 6123934..233267b 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -36,6 +36,8 @@
namespace art {
+static constexpr uint64_t kLongWaitMs = 100;
+
/*
* Every Object has a monitor associated with it, but not every Object is actually locked. Even
* the ones that are locked do not need a full-fledged monitor until a) there is actual contention
@@ -242,6 +244,7 @@
mirror::ArtMethod* owners_method = locking_method_;
uint32_t owners_dex_pc = locking_dex_pc_;
// Do this before releasing the lock so that we don't get deflated.
+ size_t num_waiters = num_waiters_;
++num_waiters_;
monitor_lock_.Unlock(self); // Let go of locks in order.
self->SetMonitorEnterObject(GetObject());
@@ -263,6 +266,12 @@
const char* owners_filename;
uint32_t owners_line_number;
TranslateLocation(owners_method, owners_dex_pc, &owners_filename, &owners_line_number);
+ if (wait_ms > kLongWaitMs && owners_method != nullptr) {
+ LOG(WARNING) << "Long monitor contention event with owner method="
+ << PrettyMethod(owners_method) << " from " << owners_filename << ":"
+ << owners_line_number << " waiters=" << num_waiters << " for "
+ << PrettyDuration(MsToNs(wait_ms));
+ }
LogContentionEvent(self, wait_ms, sample_percent, owners_filename, owners_line_number);
}
}
@@ -543,7 +552,7 @@
thread->SetWaitNext(nullptr);
// Check to see if the thread is still waiting.
- MutexLock mu(self, *thread->GetWaitMutex());
+ MutexLock wait_mu(self, *thread->GetWaitMutex());
if (thread->GetWaitMonitor() != nullptr) {
thread->GetWaitConditionVariable()->Signal(self);
return;
@@ -646,8 +655,6 @@
Thread* owner;
{
ScopedThreadStateChange tsc(self, kBlocked);
- // Take suspend thread lock to avoid races with threads trying to suspend this one.
- MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
owner = thread_list->SuspendThreadByThreadId(owner_thread_id, false, &timed_out);
}
if (owner != nullptr) {
@@ -877,7 +884,7 @@
}
default: {
LOG(FATAL) << "Unreachable";
- return ThreadList::kInvalidThreadId;
+ UNREACHABLE();
}
}
}
@@ -992,12 +999,12 @@
for (size_t i = 0; i < monitor_enter_dex_pcs.size(); ++i) {
// The verifier works in terms of the dex pcs of the monitor-enter instructions.
// We want the registers used by those instructions (so we can read the values out of them).
- uint32_t dex_pc = monitor_enter_dex_pcs[i];
- uint16_t monitor_enter_instruction = code_item->insns_[dex_pc];
+ uint32_t monitor_dex_pc = monitor_enter_dex_pcs[i];
+ uint16_t monitor_enter_instruction = code_item->insns_[monitor_dex_pc];
// Quick sanity check.
if ((monitor_enter_instruction & 0xff) != Instruction::MONITOR_ENTER) {
- LOG(FATAL) << "expected monitor-enter @" << dex_pc << "; was "
+ LOG(FATAL) << "expected monitor-enter @" << monitor_dex_pc << "; was "
<< reinterpret_cast<void*>(monitor_enter_instruction);
}
@@ -1032,7 +1039,7 @@
return true;
default:
LOG(FATAL) << "Unreachable";
- return false;
+ UNREACHABLE();
}
}
diff --git a/runtime/monitor_pool.h b/runtime/monitor_pool.h
index 5b92093..27678dc 100644
--- a/runtime/monitor_pool.h
+++ b/runtime/monitor_pool.h
@@ -53,6 +53,7 @@
static void ReleaseMonitor(Thread* self, Monitor* monitor) {
#ifndef __LP64__
+ UNUSED(self);
delete monitor;
#else
GetMonitorPool()->ReleaseMonitorToPool(self, monitor);
@@ -61,6 +62,7 @@
static void ReleaseMonitors(Thread* self, MonitorList::Monitors* monitors) {
#ifndef __LP64__
+ UNUSED(self);
STLDeleteElements(monitors);
#else
GetMonitorPool()->ReleaseMonitorsToPool(self, monitors);
@@ -85,6 +87,7 @@
static MonitorId ComputeMonitorId(Monitor* mon, Thread* self) {
#ifndef __LP64__
+ UNUSED(self);
return MonitorIdFromMonitor(mon);
#else
return GetMonitorPool()->ComputeMonitorIdInPool(mon, self);
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index 704e041..adc7848 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -341,8 +341,7 @@
// Wake the watchdog.
{
- Thread* self = Thread::Current();
- ScopedObjectAccess soa(self);
+ ScopedObjectAccess soa(Thread::Current());
test->watchdog_object_.Get()->MonitorEnter(self); // Lock the object.
test->watchdog_object_.Get()->NotifyAll(self); // Wake up waiting parties.
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 11e9efc..f37312e 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "dalvik_system_DexFile.h"
+
#include <algorithm>
#include <set>
#include <fcntl.h>
@@ -43,13 +45,17 @@
#include "profiler.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
-#include "ScopedFd.h"
#include "ScopedLocalRef.h"
#include "ScopedUtfChars.h"
#include "utils.h"
#include "well_known_classes.h"
#include "zip_archive.h"
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#include "ScopedFd.h"
+#pragma GCC diagnostic pop
+
namespace art {
// A smart pointer that provides read-only access to a Java string's UTF chars.
@@ -179,9 +185,9 @@
return NULL;
}
const std::string descriptor(DotToDescriptor(class_name.c_str()));
-
+ const size_t hash(ComputeModifiedUtf8Hash(descriptor.c_str()));
for (const DexFile* dex_file : *dex_files) {
- const DexFile::ClassDef* dex_class_def = dex_file->FindClassDef(descriptor.c_str());
+ const DexFile::ClassDef* dex_class_def = dex_file->FindClassDef(descriptor.c_str(), hash);
if (dex_class_def != nullptr) {
ScopedObjectAccess soa(env);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -189,7 +195,7 @@
StackHandleScope<1> hs(soa.Self());
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(javaLoader)));
- mirror::Class* result = class_linker->DefineClass(soa.Self(), descriptor.c_str(),
+ mirror::Class* result = class_linker->DefineClass(soa.Self(), descriptor.c_str(), hash,
class_loader, *dex_file, *dex_class_def);
if (result != nullptr) {
VLOG(class_linker) << "DexFile_defineClassNative returning " << result
@@ -288,9 +294,11 @@
template <const bool kVerboseLogging, const bool kReasonLogging>
static jbyte IsDexOptNeededForFile(const std::string& oat_filename, const char* filename,
- InstructionSet target_instruction_set) {
+ InstructionSet target_instruction_set,
+ bool* oat_is_pic) {
std::string error_msg;
std::unique_ptr<const OatFile> oat_file(OatFile::Open(oat_filename, oat_filename, nullptr,
+ nullptr,
false, &error_msg));
if (oat_file.get() == nullptr) {
if (kReasonLogging) {
@@ -300,6 +308,11 @@
error_msg.clear();
return kDexoptNeeded;
}
+
+ // Pass-up the information about if this is PIC.
+ // TODO: Refactor this function to be less complicated.
+ *oat_is_pic = oat_file->IsPic();
+
bool should_relocate_if_possible = Runtime::Current()->ShouldRelocate();
uint32_t location_checksum = 0;
const art::OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(filename, nullptr,
@@ -527,10 +540,16 @@
// Lets try the cache first (since we want to load from there since thats where the relocated
// versions will be).
if (have_cache_filename && !force_system_only) {
+ bool oat_is_pic;
// We can use the dalvik-cache if we find a good file.
dalvik_cache_decision =
IsDexOptNeededForFile<kVerboseLogging, kReasonLogging>(cache_filename, filename,
- target_instruction_set);
+ target_instruction_set, &oat_is_pic);
+
+ // Apps that are compiled with --compile-pic never need to be patchoat-d
+ if (oat_is_pic && dalvik_cache_decision == kPatchoatNeeded) {
+ dalvik_cache_decision = kUpToDate;
+ }
// We will only return DexOptNeeded if both the cache and system return it.
if (dalvik_cache_decision != kDexoptNeeded && !require_system_version) {
CHECK(!(dalvik_cache_decision == kPatchoatNeeded && !should_relocate_if_possible))
@@ -540,12 +559,18 @@
// We couldn't find one thats easy. We should now try the system.
}
+ bool oat_is_pic;
jbyte system_decision =
IsDexOptNeededForFile<kVerboseLogging, kReasonLogging>(odex_filename, filename,
- target_instruction_set);
+ target_instruction_set, &oat_is_pic);
CHECK(!(system_decision == kPatchoatNeeded && !should_relocate_if_possible))
<< "May not return PatchoatNeeded when patching is disabled.";
+ // Apps that are compiled with --compile-pic never need to be patchoat-d
+ if (oat_is_pic && system_decision == kPatchoatNeeded) {
+ system_decision = kUpToDate;
+ }
+
if (require_system_version && system_decision == kPatchoatNeeded
&& dalvik_cache_decision == kUpToDate) {
// We have a version from system relocated to the cache. Return it.
diff --git a/runtime/log_severity.h b/runtime/native/dalvik_system_DexFile.h
similarity index 63%
copy from runtime/log_severity.h
copy to runtime/native/dalvik_system_DexFile.h
index 31682df..487df05 100644
--- a/runtime/log_severity.h
+++ b/runtime/native/dalvik_system_DexFile.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LOG_SEVERITY_H_
-#define ART_RUNTIME_LOG_SEVERITY_H_
+#ifndef ART_RUNTIME_NATIVE_DALVIK_SYSTEM_DEXFILE_H_
+#define ART_RUNTIME_NATIVE_DALVIK_SYSTEM_DEXFILE_H_
-typedef int LogSeverity;
+#include <jni.h>
-const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5;
-const int INTERNAL_FATAL = 6; // For Runtime::Abort.
+namespace art {
-#endif // ART_RUNTIME_LOG_SEVERITY_H_
+void register_dalvik_system_DexFile(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_DALVIK_SYSTEM_DEXFILE_H_
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index ceff206..6c82eb2 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "dalvik_system_VMDebug.h"
+
#include <string.h>
#include <unistd.h>
diff --git a/runtime/log_severity.h b/runtime/native/dalvik_system_VMDebug.h
similarity index 63%
copy from runtime/log_severity.h
copy to runtime/native/dalvik_system_VMDebug.h
index 31682df..b7eb8a8 100644
--- a/runtime/log_severity.h
+++ b/runtime/native/dalvik_system_VMDebug.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LOG_SEVERITY_H_
-#define ART_RUNTIME_LOG_SEVERITY_H_
+#ifndef ART_RUNTIME_NATIVE_DALVIK_SYSTEM_VMDEBUG_H_
+#define ART_RUNTIME_NATIVE_DALVIK_SYSTEM_VMDEBUG_H_
-typedef int LogSeverity;
+#include <jni.h>
-const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5;
-const int INTERNAL_FATAL = 6; // For Runtime::Abort.
+namespace art {
-#endif // ART_RUNTIME_LOG_SEVERITY_H_
+void register_dalvik_system_VMDebug(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_DALVIK_SYSTEM_VMDEBUG_H_
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index ec7d82d..d40d64b 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -14,9 +14,17 @@
* limitations under the License.
*/
-#include <limits.h>
+#include "dalvik_system_VMRuntime.h"
-#include "ScopedUtfChars.h"
+#include <limits.h>
+#include <ScopedUtfChars.h>
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#include "toStringArray.h"
+#pragma GCC diagnostic pop
+
+#include "arch/instruction_set.h"
#include "class_linker-inl.h"
#include "common_throws.h"
#include "debugger.h"
@@ -26,7 +34,6 @@
#include "gc/heap.h"
#include "gc/space/dlmalloc_space.h"
#include "gc/space/image_space.h"
-#include "instruction_set.h"
#include "intern_table.h"
#include "jni_internal.h"
#include "mirror/art_method-inl.h"
@@ -38,7 +45,6 @@
#include "scoped_thread_state_change.h"
#include "thread.h"
#include "thread_list.h"
-#include "toStringArray.h"
namespace art {
@@ -166,13 +172,13 @@
return env->NewStringUTF(isa_string);
}
-static jboolean VMRuntime_is64Bit(JNIEnv* env, jobject) {
+static jboolean VMRuntime_is64Bit(JNIEnv*, jobject) {
bool is64BitMode = (sizeof(void*) == sizeof(uint64_t));
return is64BitMode ? JNI_TRUE : JNI_FALSE;
}
static jboolean VMRuntime_isCheckJniEnabled(JNIEnv* env, jobject) {
- return Runtime::Current()->GetJavaVM()->IsCheckJniEnabled() ? JNI_TRUE : JNI_FALSE;
+ return down_cast<JNIEnvExt*>(env)->vm->IsCheckJniEnabled() ? JNI_TRUE : JNI_FALSE;
}
static void VMRuntime_setTargetSdkVersionNative(JNIEnv*, jobject, jint target_sdk_version) {
@@ -201,9 +207,10 @@
Runtime::Current()->GetHeap()->RegisterNativeFree(env, static_cast<size_t>(bytes));
}
-static void VMRuntime_updateProcessState(JNIEnv* env, jobject, jint process_state) {
- Runtime::Current()->GetHeap()->UpdateProcessState(static_cast<gc::ProcessState>(process_state));
- Runtime::Current()->UpdateProfilerState(process_state);
+static void VMRuntime_updateProcessState(JNIEnv*, jobject, jint process_state) {
+ Runtime* runtime = Runtime::Current();
+ runtime->GetHeap()->UpdateProcessState(static_cast<gc::ProcessState>(process_state));
+ runtime->UpdateProfilerState(process_state);
}
static void VMRuntime_trimHeap(JNIEnv*, jobject) {
@@ -255,7 +262,7 @@
if (class_name[1] == '\0') {
klass = linker->FindPrimitiveClass(class_name[0]);
} else {
- klass = linker->LookupClass(self, class_name, NULL);
+ klass = linker->LookupClass(self, class_name, ComputeModifiedUtf8Hash(class_name), NULL);
}
if (klass == NULL) {
return;
@@ -326,6 +333,7 @@
break;
default:
LOG(FATAL) << "Unreachable - invocation type: " << invoke_type;
+ UNREACHABLE();
}
if (method == NULL) {
return;
@@ -384,26 +392,26 @@
const DexFile* dex_file = boot_class_path[i];
CHECK(dex_file != NULL);
mirror::DexCache* dex_cache = linker->FindDexCache(*dex_file);
- for (size_t i = 0; i < dex_cache->NumStrings(); i++) {
- mirror::String* string = dex_cache->GetResolvedString(i);
+ for (size_t j = 0; j < dex_cache->NumStrings(); j++) {
+ mirror::String* string = dex_cache->GetResolvedString(j);
if (string != NULL) {
filled->num_strings++;
}
}
- for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
- mirror::Class* klass = dex_cache->GetResolvedType(i);
+ for (size_t j = 0; j < dex_cache->NumResolvedTypes(); j++) {
+ mirror::Class* klass = dex_cache->GetResolvedType(j);
if (klass != NULL) {
filled->num_types++;
}
}
- for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) {
- mirror::ArtField* field = dex_cache->GetResolvedField(i);
+ for (size_t j = 0; j < dex_cache->NumResolvedFields(); j++) {
+ mirror::ArtField* field = dex_cache->GetResolvedField(j);
if (field != NULL) {
filled->num_fields++;
}
}
- for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) {
- mirror::ArtMethod* method = dex_cache->GetResolvedMethod(i);
+ for (size_t j = 0; j < dex_cache->NumResolvedMethods(); j++) {
+ mirror::ArtMethod* method = dex_cache->GetResolvedMethod(j);
if (method != NULL) {
filled->num_methods++;
}
@@ -448,14 +456,14 @@
Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->FindDexCache(*dex_file)));
if (kPreloadDexCachesStrings) {
- for (size_t i = 0; i < dex_cache->NumStrings(); i++) {
- PreloadDexCachesResolveString(dex_cache, i, strings);
+ for (size_t j = 0; j < dex_cache->NumStrings(); j++) {
+ PreloadDexCachesResolveString(dex_cache, j, strings);
}
}
if (kPreloadDexCachesTypes) {
- for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
- PreloadDexCachesResolveType(soa.Self(), dex_cache.Get(), i);
+ for (size_t j = 0; j < dex_cache->NumResolvedTypes(); j++) {
+ PreloadDexCachesResolveType(soa.Self(), dex_cache.Get(), j);
}
}
@@ -513,8 +521,9 @@
* for ART.
*/
static void VMRuntime_registerAppInfo(JNIEnv* env, jclass, jstring pkgName,
- jstring appDir, jstring procName) {
- const char *pkgNameChars = env->GetStringUTFChars(pkgName, NULL);
+ jstring appDir ATTRIBUTE_UNUSED,
+ jstring procName ATTRIBUTE_UNUSED) {
+ const char *pkgNameChars = env->GetStringUTFChars(pkgName, nullptr);
std::string profileFile = StringPrintf("/data/dalvik-cache/profiles/%s", pkgNameChars);
Runtime::Current()->StartProfiler(profileFile.c_str());
diff --git a/runtime/log_severity.h b/runtime/native/dalvik_system_VMRuntime.h
similarity index 63%
rename from runtime/log_severity.h
rename to runtime/native/dalvik_system_VMRuntime.h
index 31682df..795caa5 100644
--- a/runtime/log_severity.h
+++ b/runtime/native/dalvik_system_VMRuntime.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LOG_SEVERITY_H_
-#define ART_RUNTIME_LOG_SEVERITY_H_
+#ifndef ART_RUNTIME_NATIVE_DALVIK_SYSTEM_VMRUNTIME_H_
+#define ART_RUNTIME_NATIVE_DALVIK_SYSTEM_VMRUNTIME_H_
-typedef int LogSeverity;
+#include <jni.h>
-const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5;
-const int INTERNAL_FATAL = 6; // For Runtime::Abort.
+namespace art {
-#endif // ART_RUNTIME_LOG_SEVERITY_H_
+void register_dalvik_system_VMRuntime(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_DALVIK_SYSTEM_VMRUNTIME_H_
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index eef1c46..2cdc68f 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "dalvik_system_VMStack.h"
+
#include "jni_internal.h"
#include "nth_caller_visitor.h"
#include "mirror/art_method-inl.h"
@@ -36,12 +38,7 @@
soa.Self()->TransitionFromRunnableToSuspended(kNative);
ThreadList* thread_list = Runtime::Current()->GetThreadList();
bool timed_out;
- Thread* thread;
- {
- // Take suspend thread lock to avoid races with threads trying to suspend this one.
- MutexLock mu(soa.Self(), *Locks::thread_list_suspend_thread_lock_);
- thread = thread_list->SuspendThreadByPeer(peer, true, false, &timed_out);
- }
+ Thread* thread = thread_list->SuspendThreadByPeer(peer, true, false, &timed_out);
if (thread != nullptr) {
// Must be runnable to create returned array.
CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kNative);
@@ -87,8 +84,10 @@
static jobject VMStack_getClosestUserClassLoader(JNIEnv* env, jclass, jobject javaBootstrap,
jobject javaSystem) {
struct ClosestUserClassLoaderVisitor : public StackVisitor {
- ClosestUserClassLoaderVisitor(Thread* thread, mirror::Object* bootstrap, mirror::Object* system)
- : StackVisitor(thread, NULL), bootstrap(bootstrap), system(system), class_loader(NULL) {}
+ ClosestUserClassLoaderVisitor(Thread* thread, mirror::Object* bootstrap_in,
+ mirror::Object* system_in)
+ : StackVisitor(thread, NULL), bootstrap(bootstrap_in), system(system_in),
+ class_loader(NULL) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(class_loader == NULL);
diff --git a/runtime/log_severity.h b/runtime/native/dalvik_system_VMStack.h
similarity index 63%
copy from runtime/log_severity.h
copy to runtime/native/dalvik_system_VMStack.h
index 31682df..5638f99 100644
--- a/runtime/log_severity.h
+++ b/runtime/native/dalvik_system_VMStack.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LOG_SEVERITY_H_
-#define ART_RUNTIME_LOG_SEVERITY_H_
+#ifndef ART_RUNTIME_NATIVE_DALVIK_SYSTEM_VMSTACK_H_
+#define ART_RUNTIME_NATIVE_DALVIK_SYSTEM_VMSTACK_H_
-typedef int LogSeverity;
+#include <jni.h>
-const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5;
-const int INTERNAL_FATAL = 6; // For Runtime::Abort.
+namespace art {
-#endif // ART_RUNTIME_LOG_SEVERITY_H_
+void register_dalvik_system_VMStack(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_DALVIK_SYSTEM_VMSTACK_H_
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index e469126..f1a04cb 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -14,10 +14,12 @@
* limitations under the License.
*/
+#include "dalvik_system_ZygoteHooks.h"
+
#include <stdlib.h>
+#include "arch/instruction_set.h"
#include "debugger.h"
-#include "instruction_set.h"
#include "java_vm_ext.h"
#include "jni_internal.h"
#include "JNIHelp.h"
@@ -100,8 +102,7 @@
runtime->PreZygoteFork();
// Grab thread before fork potentially makes Thread::pthread_key_self_ unusable.
- Thread* self = Thread::Current();
- return reinterpret_cast<jlong>(self);
+ return reinterpret_cast<jlong>(ThreadForEnv(env));
}
static void ZygoteHooks_nativePostForkChild(JNIEnv* env, jclass, jlong token, jint debug_flags,
diff --git a/runtime/log_severity.h b/runtime/native/dalvik_system_ZygoteHooks.h
similarity index 62%
copy from runtime/log_severity.h
copy to runtime/native/dalvik_system_ZygoteHooks.h
index 31682df..ca0658d 100644
--- a/runtime/log_severity.h
+++ b/runtime/native/dalvik_system_ZygoteHooks.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LOG_SEVERITY_H_
-#define ART_RUNTIME_LOG_SEVERITY_H_
+#ifndef ART_RUNTIME_NATIVE_DALVIK_SYSTEM_ZYGOTEHOOKS_H_
+#define ART_RUNTIME_NATIVE_DALVIK_SYSTEM_ZYGOTEHOOKS_H_
-typedef int LogSeverity;
+#include <jni.h>
-const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5;
-const int INTERNAL_FATAL = 6; // For Runtime::Abort.
+namespace art {
-#endif // ART_RUNTIME_LOG_SEVERITY_H_
+void register_dalvik_system_ZygoteHooks(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_DALVIK_SYSTEM_ZYGOTEHOOKS_H_
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index b11cbdf..1ea75f3 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_Class.h"
+
#include "class_linker.h"
#include "dex_file-inl.h"
#include "jni_internal.h"
diff --git a/runtime/log_severity.h b/runtime/native/java_lang_Class.h
similarity index 63%
copy from runtime/log_severity.h
copy to runtime/native/java_lang_Class.h
index 31682df..8f769c3 100644
--- a/runtime/log_severity.h
+++ b/runtime/native/java_lang_Class.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LOG_SEVERITY_H_
-#define ART_RUNTIME_LOG_SEVERITY_H_
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_CLASS_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_CLASS_H_
-typedef int LogSeverity;
+#include <jni.h>
-const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5;
-const int INTERNAL_FATAL = 6; // For Runtime::Abort.
+namespace art {
-#endif // ART_RUNTIME_LOG_SEVERITY_H_
+void register_java_lang_Class(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_CLASS_H_
diff --git a/runtime/native/java_lang_DexCache.cc b/runtime/native/java_lang_DexCache.cc
index c1c6c26..27eae46 100644
--- a/runtime/native/java_lang_DexCache.cc
+++ b/runtime/native/java_lang_DexCache.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_DexCache.h"
+
#include "dex_file.h"
#include "jni_internal.h"
#include "mirror/dex_cache.h"
diff --git a/runtime/log_severity.h b/runtime/native/java_lang_DexCache.h
similarity index 63%
copy from runtime/log_severity.h
copy to runtime/native/java_lang_DexCache.h
index 31682df..b1c1f5e 100644
--- a/runtime/log_severity.h
+++ b/runtime/native/java_lang_DexCache.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LOG_SEVERITY_H_
-#define ART_RUNTIME_LOG_SEVERITY_H_
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_DEXCACHE_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_DEXCACHE_H_
-typedef int LogSeverity;
+#include <jni.h>
-const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5;
-const int INTERNAL_FATAL = 6; // For Runtime::Abort.
+namespace art {
-#endif // ART_RUNTIME_LOG_SEVERITY_H_
+void register_java_lang_DexCache(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_DEXCACHE_H_
diff --git a/runtime/native/java_lang_Object.cc b/runtime/native/java_lang_Object.cc
index 4768f48..49cacdf 100644
--- a/runtime/native/java_lang_Object.cc
+++ b/runtime/native/java_lang_Object.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_Object.h"
+
#include "jni_internal.h"
#include "mirror/object-inl.h"
#include "scoped_fast_native_object_access.h"
diff --git a/runtime/log_severity.h b/runtime/native/java_lang_Object.h
similarity index 63%
copy from runtime/log_severity.h
copy to runtime/native/java_lang_Object.h
index 31682df..c860571 100644
--- a/runtime/log_severity.h
+++ b/runtime/native/java_lang_Object.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LOG_SEVERITY_H_
-#define ART_RUNTIME_LOG_SEVERITY_H_
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_OBJECT_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_OBJECT_H_
-typedef int LogSeverity;
+#include <jni.h>
-const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5;
-const int INTERNAL_FATAL = 6; // For Runtime::Abort.
+namespace art {
-#endif // ART_RUNTIME_LOG_SEVERITY_H_
+void register_java_lang_Object(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_OBJECT_H_
diff --git a/runtime/native/java_lang_Runtime.cc b/runtime/native/java_lang_Runtime.cc
index f9a1cee..dc0cb7b 100644
--- a/runtime/native/java_lang_Runtime.cc
+++ b/runtime/native/java_lang_Runtime.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_Runtime.h"
+
#include <dlfcn.h>
#include <limits.h>
#include <unistd.h>
diff --git a/runtime/log_severity.h b/runtime/native/java_lang_Runtime.h
similarity index 63%
copy from runtime/log_severity.h
copy to runtime/native/java_lang_Runtime.h
index 31682df..ceda06b 100644
--- a/runtime/log_severity.h
+++ b/runtime/native/java_lang_Runtime.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LOG_SEVERITY_H_
-#define ART_RUNTIME_LOG_SEVERITY_H_
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_RUNTIME_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_RUNTIME_H_
-typedef int LogSeverity;
+#include <jni.h>
-const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5;
-const int INTERNAL_FATAL = 6; // For Runtime::Abort.
+namespace art {
-#endif // ART_RUNTIME_LOG_SEVERITY_H_
+void register_java_lang_Runtime(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_RUNTIME_H_
diff --git a/runtime/native/java_lang_String.cc b/runtime/native/java_lang_String.cc
index d6b47eb..4ea2546 100644
--- a/runtime/native/java_lang_String.cc
+++ b/runtime/native/java_lang_String.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_String.h"
+
#include "common_throws.h"
#include "jni_internal.h"
#include "mirror/string-inl.h"
diff --git a/runtime/log_severity.h b/runtime/native/java_lang_String.h
similarity index 63%
copy from runtime/log_severity.h
copy to runtime/native/java_lang_String.h
index 31682df..357eb3d 100644
--- a/runtime/log_severity.h
+++ b/runtime/native/java_lang_String.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LOG_SEVERITY_H_
-#define ART_RUNTIME_LOG_SEVERITY_H_
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_STRING_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_STRING_H_
-typedef int LogSeverity;
+#include <jni.h>
-const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5;
-const int INTERNAL_FATAL = 6; // For Runtime::Abort.
+namespace art {
-#endif // ART_RUNTIME_LOG_SEVERITY_H_
+void register_java_lang_String(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_STRING_H_
diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc
index ee99e78..f79be56 100644
--- a/runtime/native/java_lang_System.cc
+++ b/runtime/native/java_lang_System.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_System.h"
+
#include "common_throws.h"
#include "gc/accounting/card_table-inl.h"
#include "jni_internal.h"
@@ -93,7 +95,7 @@
switch (dstComponentPrimitiveType) {
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable, cannot have arrays of type void";
- return;
+ UNREACHABLE();
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
DCHECK_EQ(Primitive::ComponentSize(dstComponentPrimitiveType), 1U);
@@ -122,7 +124,7 @@
}
default:
LOG(FATAL) << "Unknown array type: " << PrettyTypeOf(srcArray);
- return;
+ UNREACHABLE();
}
}
// If one of the arrays holds a primitive type the other array must hold the exact same type.
diff --git a/runtime/log_severity.h b/runtime/native/java_lang_System.h
similarity index 63%
copy from runtime/log_severity.h
copy to runtime/native/java_lang_System.h
index 31682df..e371fa5 100644
--- a/runtime/log_severity.h
+++ b/runtime/native/java_lang_System.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LOG_SEVERITY_H_
-#define ART_RUNTIME_LOG_SEVERITY_H_
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_SYSTEM_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_SYSTEM_H_
-typedef int LogSeverity;
+#include <jni.h>
-const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5;
-const int INTERNAL_FATAL = 6; // For Runtime::Abort.
+namespace art {
-#endif // ART_RUNTIME_LOG_SEVERITY_H_
+void register_java_lang_System(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_SYSTEM_H_
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index c0c7265..420e9df 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_Thread.h"
+
#include "common_throws.h"
#include "debugger.h"
#include "jni_internal.h"
@@ -131,11 +133,7 @@
ThreadList* thread_list = Runtime::Current()->GetThreadList();
bool timed_out;
// Take suspend thread lock to avoid races with threads trying to suspend this one.
- Thread* thread;
- {
- MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
- thread = thread_list->SuspendThreadByPeer(peer, true, false, &timed_out);
- }
+ Thread* thread = thread_list->SuspendThreadByPeer(peer, true, false, &timed_out);
if (thread != NULL) {
{
ScopedObjectAccess soa(env);
diff --git a/runtime/log_severity.h b/runtime/native/java_lang_Thread.h
similarity index 63%
copy from runtime/log_severity.h
copy to runtime/native/java_lang_Thread.h
index 31682df..7700ce2 100644
--- a/runtime/log_severity.h
+++ b/runtime/native/java_lang_Thread.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LOG_SEVERITY_H_
-#define ART_RUNTIME_LOG_SEVERITY_H_
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_THREAD_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_THREAD_H_
-typedef int LogSeverity;
+#include <jni.h>
-const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5;
-const int INTERNAL_FATAL = 6; // For Runtime::Abort.
+namespace art {
-#endif // ART_RUNTIME_LOG_SEVERITY_H_
+void register_java_lang_Thread(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_THREAD_H_
diff --git a/runtime/native/java_lang_Throwable.cc b/runtime/native/java_lang_Throwable.cc
index 3ed4cfe..cb8a869 100644
--- a/runtime/native/java_lang_Throwable.cc
+++ b/runtime/native/java_lang_Throwable.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_Throwable.h"
+
#include "jni_internal.h"
#include "scoped_fast_native_object_access.h"
#include "thread.h"
diff --git a/runtime/log_severity.h b/runtime/native/java_lang_Throwable.h
similarity index 63%
copy from runtime/log_severity.h
copy to runtime/native/java_lang_Throwable.h
index 31682df..f9aea84 100644
--- a/runtime/log_severity.h
+++ b/runtime/native/java_lang_Throwable.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LOG_SEVERITY_H_
-#define ART_RUNTIME_LOG_SEVERITY_H_
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_THROWABLE_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_THROWABLE_H_
-typedef int LogSeverity;
+#include <jni.h>
-const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5;
-const int INTERNAL_FATAL = 6; // For Runtime::Abort.
+namespace art {
-#endif // ART_RUNTIME_LOG_SEVERITY_H_
+void register_java_lang_Throwable(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_THROWABLE_H_
diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc
index f6a46bd..35932e0 100644
--- a/runtime/native/java_lang_VMClassLoader.cc
+++ b/runtime/native/java_lang_VMClassLoader.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_VMClassLoader.h"
+
#include "class_linker.h"
#include "jni_internal.h"
#include "mirror/class_loader.h"
@@ -34,14 +36,16 @@
}
ClassLinker* cl = Runtime::Current()->GetClassLinker();
std::string descriptor(DotToDescriptor(name.c_str()));
- mirror::Class* c = cl->LookupClass(soa.Self(), descriptor.c_str(), loader);
+ const size_t descriptor_hash = ComputeModifiedUtf8Hash(descriptor.c_str());
+ mirror::Class* c = cl->LookupClass(soa.Self(), descriptor.c_str(), descriptor_hash, loader);
if (c != nullptr && c->IsResolved()) {
return soa.AddLocalReference<jclass>(c);
}
if (loader != nullptr) {
// Try the common case.
StackHandleScope<1> hs(soa.Self());
- c = cl->FindClassInPathClassLoader(soa, soa.Self(), descriptor.c_str(), hs.NewHandle(loader));
+ c = cl->FindClassInPathClassLoader(soa, soa.Self(), descriptor.c_str(), descriptor_hash,
+ hs.NewHandle(loader));
if (c != nullptr) {
return soa.AddLocalReference<jclass>(c);
}
diff --git a/runtime/log_severity.h b/runtime/native/java_lang_VMClassLoader.h
similarity index 63%
copy from runtime/log_severity.h
copy to runtime/native/java_lang_VMClassLoader.h
index 31682df..bf8d94f 100644
--- a/runtime/log_severity.h
+++ b/runtime/native/java_lang_VMClassLoader.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LOG_SEVERITY_H_
-#define ART_RUNTIME_LOG_SEVERITY_H_
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_VMCLASSLOADER_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_VMCLASSLOADER_H_
-typedef int LogSeverity;
+#include <jni.h>
-const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5;
-const int INTERNAL_FATAL = 6; // For Runtime::Abort.
+namespace art {
-#endif // ART_RUNTIME_LOG_SEVERITY_H_
+void register_java_lang_VMClassLoader(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_VMCLASSLOADER_H_
diff --git a/runtime/native/java_lang_ref_FinalizerReference.cc b/runtime/native/java_lang_ref_FinalizerReference.cc
index ad48ec0..0532c35 100644
--- a/runtime/native/java_lang_ref_FinalizerReference.cc
+++ b/runtime/native/java_lang_ref_FinalizerReference.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_ref_FinalizerReference.h"
+
#include "gc/heap.h"
#include "gc/reference_processor.h"
#include "jni_internal.h"
diff --git a/runtime/log_severity.h b/runtime/native/java_lang_ref_FinalizerReference.h
similarity index 60%
copy from runtime/log_severity.h
copy to runtime/native/java_lang_ref_FinalizerReference.h
index 31682df..848a7ad 100644
--- a/runtime/log_severity.h
+++ b/runtime/native/java_lang_ref_FinalizerReference.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LOG_SEVERITY_H_
-#define ART_RUNTIME_LOG_SEVERITY_H_
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_REF_FINALIZERREFERENCE_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_REF_FINALIZERREFERENCE_H_
-typedef int LogSeverity;
+#include <jni.h>
-const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5;
-const int INTERNAL_FATAL = 6; // For Runtime::Abort.
+namespace art {
-#endif // ART_RUNTIME_LOG_SEVERITY_H_
+void register_java_lang_ref_FinalizerReference(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_REF_FINALIZERREFERENCE_H_
diff --git a/runtime/native/java_lang_ref_Reference.cc b/runtime/native/java_lang_ref_Reference.cc
index 4f04d60..d232059 100644
--- a/runtime/native/java_lang_ref_Reference.cc
+++ b/runtime/native/java_lang_ref_Reference.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_ref_Reference.h"
+
#include "gc/heap.h"
#include "gc/reference_processor.h"
#include "jni_internal.h"
diff --git a/runtime/log_severity.h b/runtime/native/java_lang_ref_Reference.h
similarity index 63%
copy from runtime/log_severity.h
copy to runtime/native/java_lang_ref_Reference.h
index 31682df..0cbf116 100644
--- a/runtime/log_severity.h
+++ b/runtime/native/java_lang_ref_Reference.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LOG_SEVERITY_H_
-#define ART_RUNTIME_LOG_SEVERITY_H_
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_REF_REFERENCE_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_REF_REFERENCE_H_
-typedef int LogSeverity;
+#include <jni.h>
-const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5;
-const int INTERNAL_FATAL = 6; // For Runtime::Abort.
+namespace art {
-#endif // ART_RUNTIME_LOG_SEVERITY_H_
+void register_java_lang_ref_Reference(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_REF_REFERENCE_H_
diff --git a/runtime/native/java_lang_reflect_Array.cc b/runtime/native/java_lang_reflect_Array.cc
index 763a664..1ffcbdf 100644
--- a/runtime/native/java_lang_reflect_Array.cc
+++ b/runtime/native/java_lang_reflect_Array.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_reflect_Array.h"
+
#include "class_linker-inl.h"
#include "common_throws.h"
#include "dex_file-inl.h"
diff --git a/runtime/log_severity.h b/runtime/native/java_lang_reflect_Array.h
similarity index 63%
copy from runtime/log_severity.h
copy to runtime/native/java_lang_reflect_Array.h
index 31682df..805bf79 100644
--- a/runtime/log_severity.h
+++ b/runtime/native/java_lang_reflect_Array.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LOG_SEVERITY_H_
-#define ART_RUNTIME_LOG_SEVERITY_H_
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_ARRAY_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_ARRAY_H_
-typedef int LogSeverity;
+#include <jni.h>
-const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5;
-const int INTERNAL_FATAL = 6; // For Runtime::Abort.
+namespace art {
-#endif // ART_RUNTIME_LOG_SEVERITY_H_
+void register_java_lang_reflect_Array(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_ARRAY_H_
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index 0542aeb..3121a90 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_reflect_Constructor.h"
+
#include "class_linker.h"
#include "jni_internal.h"
#include "mirror/art_method.h"
diff --git a/runtime/log_severity.h b/runtime/native/java_lang_reflect_Constructor.h
similarity index 61%
copy from runtime/log_severity.h
copy to runtime/native/java_lang_reflect_Constructor.h
index 31682df..7baae97 100644
--- a/runtime/log_severity.h
+++ b/runtime/native/java_lang_reflect_Constructor.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LOG_SEVERITY_H_
-#define ART_RUNTIME_LOG_SEVERITY_H_
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_CONSTRUCTOR_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_CONSTRUCTOR_H_
-typedef int LogSeverity;
+#include <jni.h>
-const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5;
-const int INTERNAL_FATAL = 6; // For Runtime::Abort.
+namespace art {
-#endif // ART_RUNTIME_LOG_SEVERITY_H_
+void register_java_lang_reflect_Constructor(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_CONSTRUCTOR_H_
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index 7f5a611..a042620 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_reflect_Field.h"
+
#include "class_linker.h"
#include "class_linker-inl.h"
#include "common_throws.h"
@@ -57,9 +59,8 @@
}
template<bool kAllowReferences>
-ALWAYS_INLINE inline static bool GetFieldValue(
- const ScopedFastNativeObjectAccess& soa, mirror::Object* o, mirror::ArtField* f,
- Primitive::Type field_type, JValue* value)
+ALWAYS_INLINE inline static bool GetFieldValue(mirror::Object* o, mirror::ArtField* f,
+ Primitive::Type field_type, JValue* value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK_EQ(value->GetJ(), INT64_C(0));
switch (field_type) {
@@ -148,7 +149,7 @@
// Get the field's value, boxing if necessary.
Primitive::Type field_type = f->GetTypeAsPrimitiveType();
JValue value;
- if (!GetFieldValue<true>(soa, o, f, field_type, &value)) {
+ if (!GetFieldValue<true>(o, f, field_type, &value)) {
DCHECK(soa.Self()->IsExceptionPending());
return nullptr;
}
@@ -178,13 +179,13 @@
JValue field_value;
if (field_type == kPrimitiveType) {
// This if statement should get optimized out since we only pass in valid primitive types.
- if (UNLIKELY(!GetFieldValue<false>(soa, o, f, kPrimitiveType, &field_value))) {
+ if (UNLIKELY(!GetFieldValue<false>(o, f, kPrimitiveType, &field_value))) {
DCHECK(soa.Self()->IsExceptionPending());
return JValue();
}
return field_value;
}
- if (!GetFieldValue<false>(soa, o, f, field_type, &field_value)) {
+ if (!GetFieldValue<false>(o, f, field_type, &field_value)) {
DCHECK(soa.Self()->IsExceptionPending());
return JValue();
}
@@ -232,9 +233,8 @@
return GetPrimitiveField<Primitive::kPrimShort>(env, javaField, javaObj, accessible).GetS();
}
-static void SetFieldValue(ScopedFastNativeObjectAccess& soa, mirror::Object* o,
- mirror::ArtField* f, Primitive::Type field_type, bool allow_references,
- const JValue& new_value)
+static void SetFieldValue(mirror::Object* o, mirror::ArtField* f, Primitive::Type field_type,
+ bool allow_references, const JValue& new_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(f->GetDeclaringClass()->IsInitialized());
switch (field_type) {
@@ -317,7 +317,7 @@
DCHECK(soa.Self()->IsExceptionPending());
return;
}
- SetFieldValue(soa, o, f, field_prim_type, true, unboxed_value);
+ SetFieldValue(o, f, field_prim_type, true, unboxed_value);
}
template<Primitive::Type kPrimitiveType>
@@ -350,7 +350,7 @@
}
// Write the value.
- SetFieldValue(soa, o, f, field_type, false, wide_value);
+ SetFieldValue(o, f, field_type, false, wide_value);
}
static void Field_setBoolean(JNIEnv* env, jobject javaField, jobject javaObj, jboolean z,
diff --git a/runtime/log_severity.h b/runtime/native/java_lang_reflect_Field.h
similarity index 63%
copy from runtime/log_severity.h
copy to runtime/native/java_lang_reflect_Field.h
index 31682df..1739711 100644
--- a/runtime/log_severity.h
+++ b/runtime/native/java_lang_reflect_Field.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LOG_SEVERITY_H_
-#define ART_RUNTIME_LOG_SEVERITY_H_
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_FIELD_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_FIELD_H_
-typedef int LogSeverity;
+#include <jni.h>
-const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5;
-const int INTERNAL_FATAL = 6; // For Runtime::Abort.
+namespace art {
-#endif // ART_RUNTIME_LOG_SEVERITY_H_
+void register_java_lang_reflect_Field(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_FIELD_H_
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index f029b16..9859746 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_reflect_Method.h"
+
#include "class_linker.h"
#include "jni_internal.h"
#include "mirror/art_method.h"
diff --git a/runtime/log_severity.h b/runtime/native/java_lang_reflect_Method.h
similarity index 62%
copy from runtime/log_severity.h
copy to runtime/native/java_lang_reflect_Method.h
index 31682df..3a93cd0 100644
--- a/runtime/log_severity.h
+++ b/runtime/native/java_lang_reflect_Method.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LOG_SEVERITY_H_
-#define ART_RUNTIME_LOG_SEVERITY_H_
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_METHOD_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_METHOD_H_
-typedef int LogSeverity;
+#include <jni.h>
-const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5;
-const int INTERNAL_FATAL = 6; // For Runtime::Abort.
+namespace art {
-#endif // ART_RUNTIME_LOG_SEVERITY_H_
+void register_java_lang_reflect_Method(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_METHOD_H_
diff --git a/runtime/native/java_lang_reflect_Proxy.cc b/runtime/native/java_lang_reflect_Proxy.cc
index 07d670d..baf8b24 100644
--- a/runtime/native/java_lang_reflect_Proxy.cc
+++ b/runtime/native/java_lang_reflect_Proxy.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_reflect_Proxy.h"
+
#include "class_linker.h"
#include "jni_internal.h"
#include "mirror/class_loader.h"
diff --git a/runtime/log_severity.h b/runtime/native/java_lang_reflect_Proxy.h
similarity index 63%
copy from runtime/log_severity.h
copy to runtime/native/java_lang_reflect_Proxy.h
index 31682df..e25f0f7 100644
--- a/runtime/log_severity.h
+++ b/runtime/native/java_lang_reflect_Proxy.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LOG_SEVERITY_H_
-#define ART_RUNTIME_LOG_SEVERITY_H_
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_PROXY_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_PROXY_H_
-typedef int LogSeverity;
+#include <jni.h>
-const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5;
-const int INTERNAL_FATAL = 6; // For Runtime::Abort.
+namespace art {
-#endif // ART_RUNTIME_LOG_SEVERITY_H_
+void register_java_lang_reflect_Proxy(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_PROXY_H_
diff --git a/runtime/native/java_util_concurrent_atomic_AtomicLong.cc b/runtime/native/java_util_concurrent_atomic_AtomicLong.cc
index bf92e12..04f0ba0 100644
--- a/runtime/native/java_util_concurrent_atomic_AtomicLong.cc
+++ b/runtime/native/java_util_concurrent_atomic_AtomicLong.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_util_concurrent_atomic_AtomicLong.h"
+
#include "atomic.h"
#include "jni_internal.h"
diff --git a/runtime/native/java_util_concurrent_atomic_AtomicLong.h b/runtime/native/java_util_concurrent_atomic_AtomicLong.h
new file mode 100644
index 0000000..990dc86
--- /dev/null
+++ b/runtime/native/java_util_concurrent_atomic_AtomicLong.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_JAVA_UTIL_CONCURRENT_ATOMIC_ATOMICLONG_H_
+#define ART_RUNTIME_NATIVE_JAVA_UTIL_CONCURRENT_ATOMIC_ATOMICLONG_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_java_util_concurrent_atomic_AtomicLong(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_UTIL_CONCURRENT_ATOMIC_ATOMICLONG_H_
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc
index 8b2aecb..0ab2979 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "org_apache_harmony_dalvik_ddmc_DdmServer.h"
+
#include "base/logging.h"
#include "debugger.h"
#include "jni_internal.h"
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.h b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.h
new file mode 100644
index 0000000..9a4645c
--- /dev/null
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_ORG_APACHE_HARMONY_DALVIK_DDMC_DDMSERVER_H_
+#define ART_RUNTIME_NATIVE_ORG_APACHE_HARMONY_DALVIK_DDMC_DDMSERVER_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_org_apache_harmony_dalvik_ddmc_DdmServer(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_ORG_APACHE_HARMONY_DALVIK_DDMC_DDMSERVER_H_
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
index 45ef9ae..987427e 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "org_apache_harmony_dalvik_ddmc_DdmVmInternal.h"
+
#include "base/logging.h"
#include "base/mutex.h"
#include "debugger.h"
@@ -61,12 +63,7 @@
}
// Suspend thread to build stack trace.
- Thread* thread;
- {
- // Take suspend thread lock to avoid races with threads trying to suspend this one.
- MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
- thread = thread_list->SuspendThreadByThreadId(thin_lock_id, false, &timed_out);
- }
+ Thread* thread = thread_list->SuspendThreadByThreadId(thin_lock_id, false, &timed_out);
if (thread != nullptr) {
{
ScopedObjectAccess soa(env);
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.h b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.h
new file mode 100644
index 0000000..736e4c8
--- /dev/null
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_ORG_APACHE_HARMONY_DALVIK_DDMC_DDMVMINTERNAL_H_
+#define ART_RUNTIME_NATIVE_ORG_APACHE_HARMONY_DALVIK_DDMC_DDMVMINTERNAL_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_org_apache_harmony_dalvik_ddmc_DdmVmInternal(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_ORG_APACHE_HARMONY_DALVIK_DDMC_DDMVMINTERNAL_H_
diff --git a/runtime/native/scoped_fast_native_object_access.h b/runtime/native/scoped_fast_native_object_access.h
index 606d62d..dfabff5 100644
--- a/runtime/native/scoped_fast_native_object_access.h
+++ b/runtime/native/scoped_fast_native_object_access.h
@@ -17,7 +17,7 @@
#ifndef ART_RUNTIME_NATIVE_SCOPED_FAST_NATIVE_OBJECT_ACCESS_H_
#define ART_RUNTIME_NATIVE_SCOPED_FAST_NATIVE_OBJECT_ACCESS_H_
-#include "mirror/art_method.h"
+#include "mirror/art_method-inl.h"
#include "scoped_thread_state_change.h"
namespace art {
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index 65dece0..17ebdff 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "sun_misc_Unsafe.h"
+
#include "gc/accounting/card_table-inl.h"
#include "jni_internal.h"
#include "mirror/array.h"
diff --git a/runtime/log_severity.h b/runtime/native/sun_misc_Unsafe.h
similarity index 63%
copy from runtime/log_severity.h
copy to runtime/native/sun_misc_Unsafe.h
index 31682df..93194f4 100644
--- a/runtime/log_severity.h
+++ b/runtime/native/sun_misc_Unsafe.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LOG_SEVERITY_H_
-#define ART_RUNTIME_LOG_SEVERITY_H_
+#ifndef ART_RUNTIME_NATIVE_SUN_MISC_UNSAFE_H_
+#define ART_RUNTIME_NATIVE_SUN_MISC_UNSAFE_H_
-typedef int LogSeverity;
+#include <jni.h>
-const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5;
-const int INTERNAL_FATAL = 6; // For Runtime::Abort.
+namespace art {
-#endif // ART_RUNTIME_LOG_SEVERITY_H_
+void register_sun_misc_Unsafe(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_SUN_MISC_UNSAFE_H_
diff --git a/runtime/native_bridge_art_interface.cc b/runtime/native_bridge_art_interface.cc
index bc191b4..ffadfc6 100644
--- a/runtime/native_bridge_art_interface.cc
+++ b/runtime/native_bridge_art_interface.cc
@@ -19,13 +19,14 @@
#include "nativebridge/native_bridge.h"
#include "base/logging.h"
+#include "base/macros.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "scoped_thread_state_change.h"
namespace art {
-const char* GetMethodShorty(JNIEnv* env, jmethodID mid) {
+static const char* GetMethodShorty(JNIEnv* env, jmethodID mid) {
ScopedObjectAccess soa(env);
StackHandleScope<1> scope(soa.Self());
mirror::ArtMethod* m = soa.DecodeMethod(mid);
@@ -33,7 +34,7 @@
return mh.GetShorty();
}
-uint32_t GetNativeMethodCount(JNIEnv* env, jclass clazz) {
+static uint32_t GetNativeMethodCount(JNIEnv* env, jclass clazz) {
if (clazz == nullptr)
return 0;
@@ -56,8 +57,8 @@
return native_method_count;
}
-uint32_t GetNativeMethods(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
- uint32_t method_count) {
+static uint32_t GetNativeMethods(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
+ uint32_t method_count) {
if ((clazz == nullptr) || (methods == nullptr)) {
return 0;
}
@@ -71,7 +72,7 @@
if (count < method_count) {
methods[count].name = m->GetName();
methods[count].signature = m->GetShorty();
- methods[count].fnPtr = const_cast<void*>(m->GetNativeMethod());
+ methods[count].fnPtr = m->GetEntryPointFromJni();
count++;
} else {
LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m);
@@ -84,7 +85,7 @@
if (count < method_count) {
methods[count].name = m->GetName();
methods[count].signature = m->GetShorty();
- methods[count].fnPtr = const_cast<void*>(m->GetNativeMethod());
+ methods[count].fnPtr = m->GetEntryPointFromJni();
count++;
} else {
LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m);
@@ -107,10 +108,11 @@
GetMethodShorty, GetNativeMethodCount, GetNativeMethods
};
-void LoadNativeBridge(std::string& native_bridge_library_filename) {
- android::LoadNativeBridge(native_bridge_library_filename.c_str(), &native_bridge_art_callbacks_);
+bool LoadNativeBridge(std::string& native_bridge_library_filename) {
VLOG(startup) << "Runtime::Setup native bridge library: "
<< (native_bridge_library_filename.empty() ? "(empty)" : native_bridge_library_filename);
+ return android::LoadNativeBridge(native_bridge_library_filename.c_str(),
+ &native_bridge_art_callbacks_);
}
void PreInitializeNativeBridge(std::string dir) {
@@ -118,9 +120,10 @@
#ifndef __APPLE__ // Mac OS does not support CLONE_NEWNS.
if (unshare(CLONE_NEWNS) == -1) {
LOG(WARNING) << "Could not create mount namespace.";
- return;
}
android::PreInitializeNativeBridge(dir.c_str(), GetInstructionSetString(kRuntimeISA));
+#else
+ UNUSED(dir);
#endif
}
diff --git a/runtime/native_bridge_art_interface.h b/runtime/native_bridge_art_interface.h
index 026cd82..090cddb 100644
--- a/runtime/native_bridge_art_interface.h
+++ b/runtime/native_bridge_art_interface.h
@@ -26,7 +26,7 @@
// Mirror libnativebridge interface. Done to have the ART callbacks out of line, and not require
// the system/core header file in other files.
-void LoadNativeBridge(std::string& native_bridge_library_filename);
+bool LoadNativeBridge(std::string& native_bridge_library_filename);
// This is mostly for testing purposes, as in a full system this is called by Zygote code.
void PreInitializeNativeBridge(std::string dir);
diff --git a/runtime/noop_compiler_callbacks.h b/runtime/noop_compiler_callbacks.h
index e9ad353..300abc9 100644
--- a/runtime/noop_compiler_callbacks.h
+++ b/runtime/noop_compiler_callbacks.h
@@ -26,11 +26,11 @@
NoopCompilerCallbacks() {}
~NoopCompilerCallbacks() {}
- bool MethodVerified(verifier::MethodVerifier* verifier) OVERRIDE {
+ bool MethodVerified(verifier::MethodVerifier* verifier ATTRIBUTE_UNUSED) OVERRIDE {
return true;
}
- void ClassRejected(ClassReference ref) OVERRIDE {}
+ void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) OVERRIDE {}
// This is only used by compilers which need to be able to run without relocation even when it
// would normally be enabled. For example the patchoat executable, and dex2oat --image, both need
diff --git a/runtime/nth_caller_visitor.h b/runtime/nth_caller_visitor.h
index 374a80e..a851f21 100644
--- a/runtime/nth_caller_visitor.h
+++ b/runtime/nth_caller_visitor.h
@@ -26,9 +26,9 @@
// Walks up the stack 'n' callers, when used with Thread::WalkStack.
struct NthCallerVisitor : public StackVisitor {
- NthCallerVisitor(Thread* thread, size_t n, bool include_runtime_and_upcalls = false)
- : StackVisitor(thread, NULL), n(n), include_runtime_and_upcalls_(include_runtime_and_upcalls),
- count(0), caller(NULL) {}
+ NthCallerVisitor(Thread* thread, size_t n_in, bool include_runtime_and_upcalls = false)
+ : StackVisitor(thread, NULL), n(n_in),
+ include_runtime_and_upcalls_(include_runtime_and_upcalls), count(0), caller(NULL) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* m = GetMethod();
diff --git a/runtime/oat.cc b/runtime/oat.cc
index a237bf6..bfb27dd 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -15,15 +15,17 @@
*/
#include "oat.h"
-#include "utils.h"
#include <string.h>
#include <zlib.h>
+#include "arch/instruction_set_features.h"
+#include "utils.h"
+
namespace art {
const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' };
-const uint8_t OatHeader::kOatVersion[] = { '0', '4', '2', '\0' };
+const uint8_t OatHeader::kOatVersion[] = { '0', '4', '8', '\0' };
static size_t ComputeOatHeaderSize(const SafeMap<std::string, std::string>* variable_data) {
size_t estimate = 0U;
@@ -470,6 +472,12 @@
return sizeof(OatHeader) + key_value_store_size_;
}
+bool OatHeader::IsPic() const {
+ const char* pic_string = GetStoreValueByKey(OatHeader::kPicKey);
+ static const char kTrue[] = "true";
+ return (pic_string != nullptr && strncmp(pic_string, kTrue, sizeof(kTrue)) == 0);
+}
+
void OatHeader::Flatten(const SafeMap<std::string, std::string>* key_value_store) {
char* data_ptr = reinterpret_cast<char*>(&key_value_store_);
if (key_value_store != nullptr) {
diff --git a/runtime/oat.h b/runtime/oat.h
index 92b98b1..8fb02b8 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -19,14 +19,16 @@
#include <vector>
+#include "arch/instruction_set.h"
#include "base/macros.h"
#include "dex_file.h"
-#include "instruction_set.h"
#include "quick/quick_method_frame_info.h"
#include "safe_map.h"
namespace art {
+class InstructionSetFeatures;
+
class PACKED(4) OatHeader {
public:
static const uint8_t kOatMagic[4];
@@ -35,6 +37,7 @@
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
static constexpr const char* kDex2OatHostKey = "dex2oat-host";
+ static constexpr const char* kPicKey = "pic";
static OatHeader* Create(InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features,
@@ -103,6 +106,7 @@
bool GetStoreKeyValuePairByIndex(size_t index, const char** key, const char** value) const;
size_t GetHeaderSize() const;
+ bool IsPic() const;
private:
OatHeader(InstructionSet instruction_set,
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 03a398e..54f5eab 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -69,10 +69,11 @@
OatFile* OatFile::Open(const std::string& filename,
const std::string& location,
uint8_t* requested_base,
+ uint8_t* oat_file_begin,
bool executable,
std::string* error_msg) {
CHECK(!filename.empty()) << location;
- CheckLocation(filename);
+ CheckLocation(location);
std::unique_ptr<OatFile> ret;
if (kUsePortableCompiler && executable) {
// If we are using PORTABLE, use dlopen to deal with relocations.
@@ -93,7 +94,8 @@
*error_msg = StringPrintf("Failed to open oat filename for reading: %s", strerror(errno));
return nullptr;
}
- ret.reset(OpenElfFile(file.get(), location, requested_base, false, executable, error_msg));
+ ret.reset(OpenElfFile(file.get(), location, requested_base, oat_file_begin, false, executable,
+ error_msg));
// It would be nice to unlink here. But we might have opened the file created by the
// ScopedLock, which we better not delete to avoid races. TODO: Investigate how to fix the API
@@ -104,12 +106,12 @@
OatFile* OatFile::OpenWritable(File* file, const std::string& location, std::string* error_msg) {
CheckLocation(location);
- return OpenElfFile(file, location, NULL, true, false, error_msg);
+ return OpenElfFile(file, location, nullptr, nullptr, true, false, error_msg);
}
OatFile* OatFile::OpenReadable(File* file, const std::string& location, std::string* error_msg) {
CheckLocation(location);
- return OpenElfFile(file, location, NULL, false, false, error_msg);
+ return OpenElfFile(file, location, nullptr, nullptr, false, false, error_msg);
}
OatFile* OatFile::OpenDlopen(const std::string& elf_filename,
@@ -127,11 +129,13 @@
OatFile* OatFile::OpenElfFile(File* file,
const std::string& location,
uint8_t* requested_base,
+ uint8_t* oat_file_begin,
bool writable,
bool executable,
std::string* error_msg) {
std::unique_ptr<OatFile> oat_file(new OatFile(location, executable));
- bool success = oat_file->ElfFileOpen(file, requested_base, writable, executable, error_msg);
+ bool success = oat_file->ElfFileOpen(file, requested_base, oat_file_begin, writable, executable,
+ error_msg);
if (!success) {
CHECK(!error_msg->empty());
return nullptr;
@@ -190,9 +194,12 @@
return Setup(error_msg);
}
-bool OatFile::ElfFileOpen(File* file, uint8_t* requested_base, bool writable, bool executable,
+bool OatFile::ElfFileOpen(File* file, uint8_t* requested_base, uint8_t* oat_file_begin,
+ bool writable, bool executable,
std::string* error_msg) {
- elf_file_.reset(ElfFile::Open(file, writable, true, error_msg));
+ // TODO: rename requested_base to oat_data_begin
+ elf_file_.reset(ElfFile::Open(file, writable, /*program_header_only*/true, error_msg,
+ oat_file_begin));
if (elf_file_.get() == nullptr) {
DCHECK(!error_msg->empty());
return false;
@@ -295,7 +302,7 @@
oat += sizeof(dex_file_offset);
if (UNLIKELY(oat > End())) {
*error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' truncated "
- " after dex file offsets", GetLocation().c_str(), i,
+ "after dex file offsets", GetLocation().c_str(), i,
dex_file_location.c_str());
return false;
}
@@ -303,13 +310,13 @@
const uint8_t* dex_file_pointer = Begin() + dex_file_offset;
if (UNLIKELY(!DexFile::IsMagicValid(dex_file_pointer))) {
*error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' with invalid "
- " dex file magic '%s'", GetLocation().c_str(), i,
+ "dex file magic '%s'", GetLocation().c_str(), i,
dex_file_location.c_str(), dex_file_pointer);
return false;
}
if (UNLIKELY(!DexFile::IsVersionValid(dex_file_pointer))) {
*error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' with invalid "
- " dex file version '%s'", GetLocation().c_str(), i,
+ "dex file version '%s'", GetLocation().c_str(), i,
dex_file_location.c_str(), dex_file_pointer);
return false;
}
@@ -319,7 +326,7 @@
oat += (sizeof(*methods_offsets_pointer) * header->class_defs_size_);
if (UNLIKELY(oat > End())) {
*error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' with truncated "
- " method offsets", GetLocation().c_str(), i,
+ "method offsets", GetLocation().c_str(), i,
dex_file_location.c_str());
return false;
}
@@ -592,4 +599,9 @@
method->SetNativeGcMap(GetNativeGcMap()); // Used by native methods in work around JNI mode.
}
+bool OatFile::IsPic() const {
+ return GetOatHeader().IsPic();
+ // TODO: Check against oat_patches. b/18144996
+}
+
} // namespace art
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 734b9b3..2b94249 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -49,6 +49,7 @@
static OatFile* Open(const std::string& filename,
const std::string& location,
uint8_t* requested_base,
+ uint8_t* oat_file_begin,
bool executable,
std::string* error_msg);
@@ -72,6 +73,8 @@
return is_executable_;
}
+ bool IsPic() const;
+
ElfFile* GetElfFile() const {
CHECK_NE(reinterpret_cast<uintptr_t>(elf_file_.get()), reinterpret_cast<uintptr_t>(nullptr))
<< "Cannot get an elf file from " << GetLocation();
@@ -313,13 +316,16 @@
static OatFile* OpenElfFile(File* file,
const std::string& location,
uint8_t* requested_base,
+ uint8_t* oat_file_begin, // Override base if not null
bool writable,
bool executable,
std::string* error_msg);
explicit OatFile(const std::string& filename, bool executable);
bool Dlopen(const std::string& elf_filename, uint8_t* requested_base, std::string* error_msg);
- bool ElfFileOpen(File* file, uint8_t* requested_base, bool writable, bool executable,
+ bool ElfFileOpen(File* file, uint8_t* requested_base,
+ uint8_t* oat_file_begin, // Override where the file is loaded to if not null
+ bool writable, bool executable,
std::string* error_msg);
bool Setup(std::string* error_msg);
diff --git a/runtime/offsets.cc b/runtime/offsets.cc
index 3691401..f59ed88 100644
--- a/runtime/offsets.cc
+++ b/runtime/offsets.cc
@@ -16,7 +16,7 @@
#include "offsets.h"
-#include <iostream> // NOLINT
+#include <ostream>
namespace art {
diff --git a/runtime/offsets.h b/runtime/offsets.h
index 72a6b0f..9d5063f 100644
--- a/runtime/offsets.h
+++ b/runtime/offsets.h
@@ -17,7 +17,8 @@
#ifndef ART_RUNTIME_OFFSETS_H_
#define ART_RUNTIME_OFFSETS_H_
-#include <iostream> // NOLINT
+#include <ostream>
+
#include "globals.h"
namespace art {
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index dcca9d3..3e6c86b 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -16,6 +16,8 @@
#include "parsed_options.h"
+#include <sstream>
+
#ifdef HAVE_ANDROID_OS
#include "cutils/properties.h"
#endif
@@ -103,11 +105,7 @@
profile_clock_source_(kDefaultTraceClockSource),
verify_(true),
image_isa_(kRuntimeISA),
- use_homogeneous_space_compaction_for_oom_(false), // If we are using homogeneous space
- // compaction then default background
- // compaction to off since homogeneous
- // space compactions when we transition
- // to not jank perceptible.
+ use_homogeneous_space_compaction_for_oom_(true), // Enable hspace compaction on OOM by default.
min_interval_homogeneous_space_compaction_by_oom_(MsToNs(100 * 1000)) // 100s.
{}
@@ -502,41 +500,38 @@
} else if (StartsWith(option, "-verbose:")) {
std::vector<std::string> verbose_options;
Split(option.substr(strlen("-verbose:")), ',', &verbose_options);
- for (size_t i = 0; i < verbose_options.size(); ++i) {
- if (verbose_options[i] == "class") {
+ for (size_t j = 0; j < verbose_options.size(); ++j) {
+ if (verbose_options[j] == "class") {
gLogVerbosity.class_linker = true;
- } else if (verbose_options[i] == "compiler") {
+ } else if (verbose_options[j] == "compiler") {
gLogVerbosity.compiler = true;
- } else if (verbose_options[i] == "gc") {
+ } else if (verbose_options[j] == "gc") {
gLogVerbosity.gc = true;
- } else if (verbose_options[i] == "heap") {
+ } else if (verbose_options[j] == "heap") {
gLogVerbosity.heap = true;
- } else if (verbose_options[i] == "jdwp") {
+ } else if (verbose_options[j] == "jdwp") {
gLogVerbosity.jdwp = true;
- } else if (verbose_options[i] == "jni") {
+ } else if (verbose_options[j] == "jni") {
gLogVerbosity.jni = true;
- } else if (verbose_options[i] == "monitor") {
+ } else if (verbose_options[j] == "monitor") {
gLogVerbosity.monitor = true;
- } else if (verbose_options[i] == "profiler") {
+ } else if (verbose_options[j] == "profiler") {
gLogVerbosity.profiler = true;
- } else if (verbose_options[i] == "signals") {
+ } else if (verbose_options[j] == "signals") {
gLogVerbosity.signals = true;
- } else if (verbose_options[i] == "startup") {
+ } else if (verbose_options[j] == "startup") {
gLogVerbosity.startup = true;
- } else if (verbose_options[i] == "third-party-jni") {
+ } else if (verbose_options[j] == "third-party-jni") {
gLogVerbosity.third_party_jni = true;
- } else if (verbose_options[i] == "threads") {
+ } else if (verbose_options[j] == "threads") {
gLogVerbosity.threads = true;
- } else if (verbose_options[i] == "verifier") {
+ } else if (verbose_options[j] == "verifier") {
gLogVerbosity.verifier = true;
} else {
- Usage("Unknown -verbose option %s\n", verbose_options[i].c_str());
+ Usage("Unknown -verbose option %s\n", verbose_options[j].c_str());
return false;
}
}
- } else if (StartsWith(option, "-verbose-methods:")) {
- gLogVerbosity.compiler = false;
- Split(option.substr(strlen("-verbose-methods:")), ',', &gVerboseMethods);
} else if (StartsWith(option, "-Xlockprofthreshold:")) {
if (!ParseUnsignedInteger(option, ':', &lock_profiling_threshold_)) {
return false;
@@ -742,7 +737,7 @@
}
void ParsedOptions::UsageMessageV(FILE* stream, const char* fmt, va_list ap) {
- hook_vfprintf_(stderr, fmt, ap);
+ hook_vfprintf_(stream, fmt, ap);
}
void ParsedOptions::UsageMessage(FILE* stream, const char* fmt, ...) {
diff --git a/runtime/parsed_options.h b/runtime/parsed_options.h
index 26a2f31..9294868 100644
--- a/runtime/parsed_options.h
+++ b/runtime/parsed_options.h
@@ -25,7 +25,7 @@
#include "globals.h"
#include "gc/collector_type.h"
#include "gc/space/large_object_space.h"
-#include "instruction_set.h"
+#include "arch/instruction_set.h"
#include "profiler_options.h"
namespace art {
diff --git a/runtime/profiler.cc b/runtime/profiler.cc
index 1d06d35..e399195 100644
--- a/runtime/profiler.cc
+++ b/runtime/profiler.cc
@@ -97,7 +97,7 @@
switch (profile_options.GetProfileType()) {
case kProfilerMethod: {
mirror::ArtMethod* method = thread->GetCurrentMethod(nullptr);
- if (false && method == nullptr) {
+ if ((false) && method == nullptr) {
LOG(INFO) << "No current method available";
std::ostringstream os;
thread->Dump(os);
diff --git a/runtime/profiler_options.h b/runtime/profiler_options.h
index e3ef697..1db2f05 100644
--- a/runtime/profiler_options.h
+++ b/runtime/profiler_options.h
@@ -26,6 +26,7 @@
kProfilerMethod, // Method only
kProfilerBoundedStack, // Methods with Dex PC on top of the stack
};
+std::ostream& operator<<(std::ostream& os, const ProfileDataType& rhs);
class ProfilerOptions {
public:
diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc
index d8fc277..3415e8f 100644
--- a/runtime/quick/inline_method_analyser.cc
+++ b/runtime/quick/inline_method_analyser.cc
@@ -35,50 +35,38 @@
namespace art {
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET),
- check_iget_type);
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_WIDE),
- check_iget_wide_type);
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_OBJECT),
- check_iget_object_type);
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_BOOLEAN),
- check_iget_boolean_type);
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_BYTE),
- check_iget_byte_type);
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_CHAR),
- check_iget_char_type);
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_SHORT),
- check_iget_short_type);
-
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT),
- check_iput_type);
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_WIDE),
- check_iput_wide_type);
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_OBJECT),
- check_iput_object_type);
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_BOOLEAN),
- check_iput_boolean_type);
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_BYTE),
- check_iput_byte_type);
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_CHAR),
- check_iput_char_type);
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_SHORT),
- check_iput_short_type);
-
-COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET) ==
- InlineMethodAnalyser::IPutVariant(Instruction::IPUT), check_iget_iput_variant);
-COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET_WIDE) ==
- InlineMethodAnalyser::IPutVariant(Instruction::IPUT_WIDE), check_iget_iput_wide_variant);
-COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT) ==
- InlineMethodAnalyser::IPutVariant(Instruction::IPUT_OBJECT), check_iget_iput_object_variant);
-COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET_BOOLEAN) ==
- InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BOOLEAN), check_iget_iput_boolean_variant);
-COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET_BYTE) ==
- InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BYTE), check_iget_iput_byte_variant);
-COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET_CHAR) ==
- InlineMethodAnalyser::IPutVariant(Instruction::IPUT_CHAR), check_iget_iput_char_variant);
-COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET_SHORT) ==
- InlineMethodAnalyser::IPutVariant(Instruction::IPUT_SHORT), check_iget_iput_short_variant);
+static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET), "iget type");
+static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_WIDE), "iget_wide type");
+static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_OBJECT),
+ "iget_object type");
+static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_BOOLEAN),
+ "iget_boolean type");
+static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_BYTE), "iget_byte type");
+static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_CHAR), "iget_char type");
+static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_SHORT), "iget_short type");
+static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT), "iput type");
+static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_WIDE), "iput_wide type");
+static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_OBJECT),
+ "iput_object type");
+static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_BOOLEAN),
+ "iput_boolean type");
+static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_BYTE), "iput_byte type");
+static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_CHAR), "iput_char type");
+static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_SHORT), "iput_short type");
+static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET) ==
+ InlineMethodAnalyser::IPutVariant(Instruction::IPUT), "iget/iput variant");
+static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_WIDE) ==
+ InlineMethodAnalyser::IPutVariant(Instruction::IPUT_WIDE), "iget/iput_wide variant");
+static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT) ==
+ InlineMethodAnalyser::IPutVariant(Instruction::IPUT_OBJECT), "iget/iput_object variant");
+static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_BOOLEAN) ==
+ InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BOOLEAN), "iget/iput_boolean variant");
+static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_BYTE) ==
+ InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BYTE), "iget/iput_byte variant");
+static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_CHAR) ==
+ InlineMethodAnalyser::IPutVariant(Instruction::IPUT_CHAR), "iget/iput_char variant");
+static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_SHORT) ==
+ InlineMethodAnalyser::IPutVariant(Instruction::IPUT_SHORT), "iget/iput_short variant");
// This is used by compiler and debugger. We look into the dex cache for resolved methods and
// fields. However, in the context of the debugger, not all methods and fields are resolved. Since
diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h
index a2ae397..a8d4308 100644
--- a/runtime/quick/inline_method_analyser.h
+++ b/runtime/quick/inline_method_analyser.h
@@ -118,7 +118,7 @@
uint32_t is_volatile : 1;
uint32_t field_offset : 31;
};
-COMPILE_ASSERT(sizeof(InlineIGetIPutData) == sizeof(uint64_t), InvalidSizeOfInlineIGetIPutData);
+static_assert(sizeof(InlineIGetIPutData) == sizeof(uint64_t), "Invalid size of InlineIGetIPutData");
struct InlineReturnArgData {
uint16_t arg;
@@ -127,7 +127,8 @@
uint16_t reserved : 14;
uint32_t reserved2;
};
-COMPILE_ASSERT(sizeof(InlineReturnArgData) == sizeof(uint64_t), InvalidSizeOfInlineReturnArgData);
+static_assert(sizeof(InlineReturnArgData) == sizeof(uint64_t),
+ "Invalid size of InlineReturnArgData");
struct InlineMethod {
InlineMethodOpcode opcode;
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 8e57837..90c9fe7 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -297,10 +297,10 @@
// Unwinds all instrumentation stack frame prior to catch handler or upcall.
class InstrumentationStackVisitor : public StackVisitor {
public:
- InstrumentationStackVisitor(Thread* self, bool is_deoptimization, size_t frame_depth)
+ InstrumentationStackVisitor(Thread* self, size_t frame_depth)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: StackVisitor(self, nullptr),
- self_(self), frame_depth_(frame_depth),
+ frame_depth_(frame_depth),
instrumentation_frames_to_pop_(0) {
CHECK_NE(frame_depth_, kInvalidFrameDepth);
}
@@ -324,7 +324,6 @@
}
private:
- Thread* const self_;
const size_t frame_depth_;
size_t instrumentation_frames_to_pop_;
@@ -333,7 +332,7 @@
void QuickExceptionHandler::UpdateInstrumentationStack() {
if (method_tracing_active_) {
- InstrumentationStackVisitor visitor(self_, is_deoptimization_, handler_frame_depth_);
+ InstrumentationStackVisitor visitor(self_, handler_frame_depth_);
visitor.WalkStack(true);
size_t instrumentation_frames_to_pop = visitor.GetInstrumentationFramesToPop();
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index b93769c..cf1ecbf 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -40,6 +40,7 @@
~QuickExceptionHandler() {
LOG(FATAL) << "UNREACHABLE"; // Expected to take long jump.
+ UNREACHABLE();
}
void FindCatch(const ThrowLocation& throw_location, mirror::Throwable* exception,
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index fd43d78..0dc31e7 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -27,9 +27,7 @@
inline MirrorType* ReadBarrier::Barrier(
mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
// Unused for now.
- UNUSED(obj);
- UNUSED(offset);
- UNUSED(ref_addr);
+ UNUSED(obj, offset, ref_addr);
const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
if (with_read_barrier && kUseBakerReadBarrier) {
// To be implemented.
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index b57e48f..44d1bc4 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -219,8 +219,7 @@
PrettyDescriptor(found_descriptor).c_str()).c_str());
}
- bool BuildArgArrayFromObjectArray(const ScopedObjectAccessAlreadyRunnable& soa,
- mirror::Object* receiver,
+ bool BuildArgArrayFromObjectArray(mirror::Object* receiver,
mirror::ObjectArray<mirror::Object>* args, MethodHelper& mh)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const DexFile::TypeList* classes = mh.GetMethod()->GetParameterTypeList();
@@ -613,7 +612,7 @@
ArgArray arg_array(shorty, shorty_len);
StackHandleScope<1> hs(soa.Self());
MethodHelper mh(hs.NewHandle(m));
- if (!arg_array.BuildArgArrayFromObjectArray(soa, receiver, objects, mh)) {
+ if (!arg_array.BuildArgArrayFromObjectArray(receiver, objects, mh)) {
CHECK(soa.Self()->IsExceptionPending());
return nullptr;
}
@@ -632,8 +631,8 @@
}
// Box if necessary and return.
- return soa.AddLocalReference<jobject>(BoxPrimitive(mh.GetReturnType()->GetPrimitiveType(),
- result));
+ return soa.AddLocalReference<jobject>(
+ BoxPrimitive(Primitive::GetType(mh.GetMethod()->GetReturnTypeDescriptor()[0]), result));
}
bool VerifyObjectIsClass(mirror::Object* o, mirror::Class* c) {
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index f8e0f47..eca1800 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -115,8 +115,8 @@
*receiver = nullptr;
} else {
// Ensure class is initialized before allocating object
- StackHandleScope<1> hs(self);
- Handle<mirror::Class> h_class(hs.NewHandle(c));
+ StackHandleScope<1> hs2(self);
+ Handle<mirror::Class> h_class(hs2.NewHandle(c));
bool initialized = class_linker_->EnsureInitialized(self, h_class, true, true);
CHECK(initialized);
*receiver = c->AllocObject(self);
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index fe05073..cdf8d54 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -59,6 +59,11 @@
return imt_conflict_method_.Read();
}
+inline mirror::ArtMethod* Runtime::GetImtUnimplementedMethod() {
+ CHECK(!imt_unimplemented_method_.IsNull());
+ return imt_unimplemented_method_.Read();
+}
+
inline mirror::ObjectArray<mirror::ArtMethod>* Runtime::GetDefaultImt()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
CHECK(HasDefaultImt());
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index adf0994..078e7d2 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -37,14 +37,17 @@
#include "arch/arm/registers_arm.h"
#include "arch/arm64/quick_method_frame_info_arm64.h"
#include "arch/arm64/registers_arm64.h"
+#include "arch/instruction_set_features.h"
#include "arch/mips/quick_method_frame_info_mips.h"
#include "arch/mips/registers_mips.h"
#include "arch/x86/quick_method_frame_info_x86.h"
#include "arch/x86/registers_x86.h"
#include "arch/x86_64/quick_method_frame_info_x86_64.h"
#include "arch/x86_64/registers_x86_64.h"
-#include "base/unix_file/fd_file.h"
+#include "asm_support.h"
#include "atomic.h"
+#include "base/dumpable.h"
+#include "base/unix_file/fd_file.h"
#include "class_linker.h"
#include "debugger.h"
#include "elf_file.h"
@@ -67,6 +70,31 @@
#include "mirror/throwable.h"
#include "monitor.h"
#include "native_bridge_art_interface.h"
+#include "native/dalvik_system_DexFile.h"
+#include "native/dalvik_system_VMDebug.h"
+#include "native/dalvik_system_VMRuntime.h"
+#include "native/dalvik_system_VMStack.h"
+#include "native/dalvik_system_ZygoteHooks.h"
+#include "native/java_lang_Class.h"
+#include "native/java_lang_DexCache.h"
+#include "native/java_lang_Object.h"
+#include "native/java_lang_ref_FinalizerReference.h"
+#include "native/java_lang_reflect_Array.h"
+#include "native/java_lang_reflect_Constructor.h"
+#include "native/java_lang_reflect_Field.h"
+#include "native/java_lang_reflect_Method.h"
+#include "native/java_lang_reflect_Proxy.h"
+#include "native/java_lang_ref_Reference.h"
+#include "native/java_lang_Runtime.h"
+#include "native/java_lang_String.h"
+#include "native/java_lang_System.h"
+#include "native/java_lang_Thread.h"
+#include "native/java_lang_Throwable.h"
+#include "native/java_lang_VMClassLoader.h"
+#include "native/java_util_concurrent_atomic_AtomicLong.h"
+#include "native/org_apache_harmony_dalvik_ddmc_DdmServer.h"
+#include "native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.h"
+#include "native/sun_misc_Unsafe.h"
#include "parsed_options.h"
#include "oat_file.h"
#include "os.h"
@@ -145,10 +173,15 @@
target_sdk_version_(0),
implicit_null_checks_(false),
implicit_so_checks_(false),
- implicit_suspend_checks_(false) {
+ implicit_suspend_checks_(false),
+ is_native_bridge_loaded_(false) {
+ CheckAsmSupportOffsetsAndSizes();
}
Runtime::~Runtime() {
+ if (is_native_bridge_loaded_) {
+ UnloadNativeBridge();
+ }
if (dump_gc_performance_on_shutdown_) {
// This can't be called from the Heap destructor below because it
// could call RosAlloc::InspectAll() which needs the thread_list
@@ -170,9 +203,6 @@
BackgroundMethodSamplingProfiler::Shutdown();
}
- // Shutdown the fault manager if it was initialized.
- fault_manager.Shutdown();
-
Trace::Shutdown();
// Make sure to let the GC complete if it is running.
@@ -185,6 +215,10 @@
// Make sure all other non-daemon threads have terminated, and all daemon threads are suspended.
delete thread_list_;
+
+ // Shutdown the fault manager if it was initialized.
+ fault_manager.Shutdown();
+
delete monitor_list_;
delete monitor_pool_;
delete class_linker_;
@@ -201,7 +235,7 @@
}
struct AbortState {
- void Dump(std::ostream& os) {
+ void Dump(std::ostream& os) const {
if (gAborting > 1) {
os << "Runtime aborting --- recursively, so no thread-specific detail!\n";
return;
@@ -232,7 +266,7 @@
}
// No thread-safety analysis as we do explicitly test for holding the mutator lock.
- void DumpThread(std::ostream& os, Thread* self) NO_THREAD_SAFETY_ANALYSIS {
+ void DumpThread(std::ostream& os, Thread* self) const NO_THREAD_SAFETY_ANALYSIS {
DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self) || Locks::mutator_lock_->IsSharedHeld(self));
self->Dump(os);
if (self->IsExceptionPending()) {
@@ -244,7 +278,7 @@
}
}
- void DumpAllThreads(std::ostream& os, Thread* self) {
+ void DumpAllThreads(std::ostream& os, Thread* self) const {
Runtime* runtime = Runtime::Current();
if (runtime != nullptr) {
ThreadList* thread_list = runtime->GetThreadList();
@@ -336,7 +370,7 @@
return true;
}
-jobject CreateSystemClassLoader() {
+static jobject CreateSystemClassLoader() {
if (Runtime::Current()->UseCompileTimeClassPath()) {
return NULL;
}
@@ -380,9 +414,9 @@
if (!patchoat_executable_.empty()) {
return patchoat_executable_;
}
- std::string patchoat_executable_(GetAndroidRoot());
- patchoat_executable_ += (kIsDebugBuild ? "/bin/patchoatd" : "/bin/patchoat");
- return patchoat_executable_;
+ std::string patchoat_executable(GetAndroidRoot());
+ patchoat_executable += (kIsDebugBuild ? "/bin/patchoatd" : "/bin/patchoat");
+ return patchoat_executable;
}
std::string Runtime::GetCompilerExecutable() const {
@@ -404,8 +438,18 @@
started_ = true;
+ // Use !IsCompiler so that we get test coverage, tests are never the zygote.
+ if (!IsCompiler()) {
+ ScopedObjectAccess soa(self);
+ gc::space::ImageSpace* image_space = heap_->GetImageSpace();
+ if (image_space != nullptr) {
+ Runtime::Current()->GetInternTable()->AddImageStringsToTable(image_space);
+ Runtime::Current()->GetClassLinker()->MoveImageClassesToClassTable();
+ }
+ }
+
if (!IsImageDex2OatEnabled() || !Runtime::Current()->GetHeap()->HasImageSpace()) {
- ScopedObjectAccess soa(Thread::Current());
+ ScopedObjectAccess soa(self);
StackHandleScope<1> hs(soa.Self());
auto klass(hs.NewHandle<mirror::Class>(mirror::Class::GetJavaLangClass()));
class_linker_->EnsureInitialized(soa.Self(), klass, true, true);
@@ -427,12 +471,11 @@
return false;
}
} else {
- bool have_native_bridge = !native_bridge_library_filename_.empty();
- if (have_native_bridge) {
+ if (is_native_bridge_loaded_) {
PreInitializeNativeBridge(".");
}
- DidForkFromZygote(self->GetJniEnv(), have_native_bridge ? NativeBridgeAction::kInitialize :
- NativeBridgeAction::kUnload, GetInstructionSetString(kRuntimeISA));
+ DidForkFromZygote(self->GetJniEnv(), NativeBridgeAction::kInitialize,
+ GetInstructionSetString(kRuntimeISA));
}
StartDaemonThreads();
@@ -511,14 +554,17 @@
void Runtime::DidForkFromZygote(JNIEnv* env, NativeBridgeAction action, const char* isa) {
is_zygote_ = false;
- switch (action) {
- case NativeBridgeAction::kUnload:
- UnloadNativeBridge();
- break;
+ if (is_native_bridge_loaded_) {
+ switch (action) {
+ case NativeBridgeAction::kUnload:
+ UnloadNativeBridge();
+ is_native_bridge_loaded_ = false;
+ break;
- case NativeBridgeAction::kInitialize:
- InitializeNativeBridge(env, isa);
- break;
+ case NativeBridgeAction::kInitialize:
+ InitializeNativeBridge(env, isa);
+ break;
+ }
}
// Create the thread pool.
@@ -561,8 +607,7 @@
VLOG(startup) << "Runtime::StartDaemonThreads exiting";
}
-static bool OpenDexFilesFromImage(const std::vector<std::string>& dex_filenames,
- const std::string& image_location,
+static bool OpenDexFilesFromImage(const std::string& image_location,
std::vector<const DexFile*>& dex_files,
size_t* failures) {
std::string system_filename;
@@ -624,8 +669,7 @@
const std::string& image_location,
std::vector<const DexFile*>& dex_files) {
size_t failure_count = 0;
- if (!image_location.empty() && OpenDexFilesFromImage(dex_filenames, image_location, dex_files,
- &failure_count)) {
+ if (!image_location.empty() && OpenDexFilesFromImage(image_location, dex_files, &failure_count)) {
return failure_count;
}
failure_count = 0;
@@ -813,7 +857,7 @@
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
if (!HasCalleeSaveMethod(type)) {
- SetCalleeSaveMethod(CreateCalleeSaveMethod(type), type);
+ SetCalleeSaveMethod(CreateCalleeSaveMethod(), type);
}
}
} else {
@@ -848,14 +892,14 @@
// Pre-allocate an OutOfMemoryError for the double-OOME case.
self->ThrowNewException(ThrowLocation(), "Ljava/lang/OutOfMemoryError;",
"OutOfMemoryError thrown while trying to throw OutOfMemoryError; "
- "no stack available");
+ "no stack trace available");
pre_allocated_OutOfMemoryError_ = GcRoot<mirror::Throwable>(self->GetException(NULL));
self->ClearException();
// Pre-allocate a NoClassDefFoundError for the common case of failing to find a system class
// ahead of checking the application's class loader.
self->ThrowNewException(ThrowLocation(), "Ljava/lang/NoClassDefFoundError;",
- "Class not found using the boot class loader; no stack available");
+ "Class not found using the boot class loader; no stack trace available");
pre_allocated_NoClassDefFoundError_ = GcRoot<mirror::Throwable>(self->GetException(NULL));
self->ClearException();
@@ -886,8 +930,7 @@
// Runtime::Start():
// DidForkFromZygote(kInitialize) -> try to initialize any native bridge given.
// No-op wrt native bridge.
- native_bridge_library_filename_ = options->native_bridge_library_filename_;
- LoadNativeBridge(native_bridge_library_filename_);
+ is_native_bridge_loaded_ = LoadNativeBridge(options->native_bridge_library_filename_);
VLOG(startup) << "Runtime::Init exiting";
return true;
@@ -957,34 +1000,31 @@
}
void Runtime::RegisterRuntimeNativeMethods(JNIEnv* env) {
-#define REGISTER(FN) extern void FN(JNIEnv*); FN(env)
- // Register Throwable first so that registration of other native methods can throw exceptions
- REGISTER(register_java_lang_Throwable);
- REGISTER(register_dalvik_system_DexFile);
- REGISTER(register_dalvik_system_VMDebug);
- REGISTER(register_dalvik_system_VMRuntime);
- REGISTER(register_dalvik_system_VMStack);
- REGISTER(register_dalvik_system_ZygoteHooks);
- REGISTER(register_java_lang_Class);
- REGISTER(register_java_lang_DexCache);
- REGISTER(register_java_lang_Object);
- REGISTER(register_java_lang_Runtime);
- REGISTER(register_java_lang_String);
- REGISTER(register_java_lang_System);
- REGISTER(register_java_lang_Thread);
- REGISTER(register_java_lang_VMClassLoader);
- REGISTER(register_java_lang_ref_FinalizerReference);
- REGISTER(register_java_lang_ref_Reference);
- REGISTER(register_java_lang_reflect_Array);
- REGISTER(register_java_lang_reflect_Constructor);
- REGISTER(register_java_lang_reflect_Field);
- REGISTER(register_java_lang_reflect_Method);
- REGISTER(register_java_lang_reflect_Proxy);
- REGISTER(register_java_util_concurrent_atomic_AtomicLong);
- REGISTER(register_org_apache_harmony_dalvik_ddmc_DdmServer);
- REGISTER(register_org_apache_harmony_dalvik_ddmc_DdmVmInternal);
- REGISTER(register_sun_misc_Unsafe);
-#undef REGISTER
+ register_dalvik_system_DexFile(env);
+ register_dalvik_system_VMDebug(env);
+ register_dalvik_system_VMRuntime(env);
+ register_dalvik_system_VMStack(env);
+ register_dalvik_system_ZygoteHooks(env);
+ register_java_lang_Class(env);
+ register_java_lang_DexCache(env);
+ register_java_lang_Object(env);
+ register_java_lang_ref_FinalizerReference(env);
+ register_java_lang_reflect_Array(env);
+ register_java_lang_reflect_Constructor(env);
+ register_java_lang_reflect_Field(env);
+ register_java_lang_reflect_Method(env);
+ register_java_lang_reflect_Proxy(env);
+ register_java_lang_ref_Reference(env);
+ register_java_lang_Runtime(env);
+ register_java_lang_String(env);
+ register_java_lang_System(env);
+ register_java_lang_Thread(env);
+ register_java_lang_Throwable(env);
+ register_java_lang_VMClassLoader(env);
+ register_java_util_concurrent_atomic_AtomicLong(env);
+ register_org_apache_harmony_dalvik_ddmc_DdmServer(env);
+ register_org_apache_harmony_dalvik_ddmc_DdmVmInternal(env);
+ register_sun_misc_Unsafe(env);
}
void Runtime::DumpForSigQuit(std::ostream& os) {
@@ -1160,6 +1200,9 @@
if (HasImtConflictMethod()) {
imt_conflict_method_.VisitRoot(callback, arg, 0, kRootVMInternal);
}
+ if (!imt_unimplemented_method_.IsNull()) {
+ imt_unimplemented_method_.VisitRoot(callback, arg, 0, kRootVMInternal);
+ }
if (HasDefaultImt()) {
default_imt_.VisitRoot(callback, arg, 0, kRootVMInternal);
}
@@ -1243,7 +1286,7 @@
return method.Get();
}
-mirror::ArtMethod* Runtime::CreateCalleeSaveMethod(CalleeSaveType type) {
+mirror::ArtMethod* Runtime::CreateCalleeSaveMethod() {
Thread* self = Thread::Current();
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
diff --git a/runtime/runtime.h b/runtime/runtime.h
index f3bea17..39fd910 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -26,11 +26,11 @@
#include <utility>
#include <vector>
+#include "arch/instruction_set.h"
#include "base/allocator.h"
#include "compiler_callbacks.h"
#include "gc_root.h"
#include "instrumentation.h"
-#include "instruction_set.h"
#include "jobject_comparator.h"
#include "object_callbacks.h"
#include "offsets.h"
@@ -55,8 +55,8 @@
class Throwable;
} // namespace mirror
namespace verifier {
-class MethodVerifier;
-}
+ class MethodVerifier;
+} // namespace verifier
class ClassLinker;
class DexFile;
class InternTable;
@@ -312,6 +312,7 @@
// Returns a special method that calls into a trampoline for runtime imt conflicts.
mirror::ArtMethod* GetImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::ArtMethod* GetImtUnimplementedMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool HasImtConflictMethod() const {
return !imt_conflict_method_.IsNull();
@@ -320,6 +321,9 @@
void SetImtConflictMethod(mirror::ArtMethod* method) {
imt_conflict_method_ = GcRoot<mirror::ArtMethod>(method);
}
+ void SetImtUnimplementedMethod(mirror::ArtMethod* method) {
+ imt_unimplemented_method_ = GcRoot<mirror::ArtMethod>(method);
+ }
mirror::ArtMethod* CreateImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -375,8 +379,7 @@
void SetCalleeSaveMethod(mirror::ArtMethod* method, CalleeSaveType type);
- mirror::ArtMethod* CreateCalleeSaveMethod(CalleeSaveType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::ArtMethod* CreateCalleeSaveMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
int32_t GetStat(int kind);
@@ -511,6 +514,9 @@
GcRoot<mirror::Throwable> pre_allocated_NoClassDefFoundError_;
GcRoot<mirror::ArtMethod> resolution_method_;
GcRoot<mirror::ArtMethod> imt_conflict_method_;
+ // Unresolved method has the same behavior as the conflict method, it is used by the class linker
+ // for differentiating between unfilled imt slots vs conflict slots in superclasses.
+ GcRoot<mirror::ArtMethod> imt_unimplemented_method_;
GcRoot<mirror::ObjectArray<mirror::ArtMethod>> default_imt_;
// Special sentinel object used to invalid conditions in JNI (cleared weak references) and
@@ -635,17 +641,20 @@
bool implicit_so_checks_; // StackOverflow checks are implicit.
bool implicit_suspend_checks_; // Thread suspension checks are implicit.
- // The filename to the native bridge library. If this is not empty the native bridge will be
- // initialized and loaded from the given file (initialized and available). An empty value means
- // that there's no native bridge (initialized but not available).
+ // Whether or not a native bridge has been loaded.
//
// The native bridge allows running native code compiled for a foreign ISA. The way it works is,
// if standard dlopen fails to load native library associated with native activity, it calls to
// the native bridge to load it and then gets the trampoline for the entry to native activity.
- std::string native_bridge_library_filename_;
+ //
+ // The option 'native_bridge_library_filename' specifies the name of the native bridge.
+ // When non-empty the native bridge will be loaded from the given file. An empty value means
+ // that there's no native bridge.
+ bool is_native_bridge_loaded_;
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
+std::ostream& operator<<(std::ostream& os, const Runtime::CalleeSaveType& rhs);
} // namespace art
diff --git a/runtime/runtime_android.cc b/runtime/runtime_android.cc
index 079d7e5..33600dd 100644
--- a/runtime/runtime_android.cc
+++ b/runtime/runtime_android.cc
@@ -32,13 +32,12 @@
struct sigaction old_action;
void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_context) {
- static bool handlingUnexpectedSignal = false;
- if (handlingUnexpectedSignal) {
- LogMessageData data(__FILE__, __LINE__, INTERNAL_FATAL, -1);
- LogMessage::LogLine(data, "HandleUnexpectedSignal reentered\n");
+ static bool handling_unexpected_signal = false;
+ if (handling_unexpected_signal) {
+ LogMessage::LogLine(__FILE__, __LINE__, INTERNAL_FATAL, "HandleUnexpectedSignal reentered\n");
_exit(1);
}
- handlingUnexpectedSignal = true;
+ handling_unexpected_signal = true;
gAborting++; // set before taking any locks
MutexLock mu(Thread::Current(), *Locks::unexpected_signal_lock_);
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index 46ee274..1de035c 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -21,6 +21,9 @@
#include <sys/utsname.h>
#include <inttypes.h>
+#include <sstream>
+
+#include "base/dumpable.h"
#include "base/logging.h"
#include "base/mutex.h"
#include "base/stringprintf.h"
@@ -32,13 +35,13 @@
static constexpr bool kDumpHeapObjectOnSigsevg = false;
struct Backtrace {
- void Dump(std::ostream& os) {
+ void Dump(std::ostream& os) const {
DumpNativeStack(os, GetTid(), "\t");
}
};
struct OsInfo {
- void Dump(std::ostream& os) {
+ void Dump(std::ostream& os) const {
utsname info;
uname(&info);
// Linux 2.6.38.8-gg784 (x86_64)
@@ -132,9 +135,11 @@
}
struct UContext {
- explicit UContext(void* raw_context) : context(reinterpret_cast<ucontext_t*>(raw_context)->uc_mcontext) {}
+ explicit UContext(void* raw_context) :
+ context(reinterpret_cast<ucontext_t*>(raw_context)->uc_mcontext) {
+ }
- void Dump(std::ostream& os) {
+ void Dump(std::ostream& os) const {
// TODO: support non-x86 hosts (not urgent because this code doesn't run on targets).
#if defined(__APPLE__) && defined(__i386__)
DumpRegister32(os, "eax", context->__ss.__eax);
@@ -228,15 +233,15 @@
#endif
}
- void DumpRegister32(std::ostream& os, const char* name, uint32_t value) {
+ void DumpRegister32(std::ostream& os, const char* name, uint32_t value) const {
os << StringPrintf(" %6s: 0x%08x", name, value);
}
- void DumpRegister64(std::ostream& os, const char* name, uint64_t value) {
+ void DumpRegister64(std::ostream& os, const char* name, uint64_t value) const {
os << StringPrintf(" %6s: 0x%016" PRIx64, name, value);
}
- void DumpX86Flags(std::ostream& os, uint32_t flags) {
+ void DumpX86Flags(std::ostream& os, uint32_t flags) const {
os << " [";
if ((flags & (1 << 0)) != 0) {
os << " CF";
@@ -274,8 +279,7 @@
void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_context) {
static bool handlingUnexpectedSignal = false;
if (handlingUnexpectedSignal) {
- LogMessageData data(__FILE__, __LINE__, INTERNAL_FATAL, -1);
- LogMessage::LogLine(data, "HandleUnexpectedSignal reentered\n");
+ LogMessage::LogLine(__FILE__, __LINE__, INTERNAL_FATAL, "HandleUnexpectedSignal reentered\n");
_exit(1);
}
handlingUnexpectedSignal = true;
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index 336340e..e377542 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -25,10 +25,12 @@
#include <sys/types.h>
#include <unistd.h>
+#include <sstream>
+
+#include "arch/instruction_set.h"
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
#include "gc/heap.h"
-#include "instruction_set.h"
#include "os.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
@@ -108,11 +110,17 @@
PLOG(ERROR) << "Unable to open stack trace file '" << stack_trace_file_ << "'";
return;
}
- std::unique_ptr<File> file(new File(fd, stack_trace_file_));
- if (!file->WriteFully(s.data(), s.size())) {
- PLOG(ERROR) << "Failed to write stack traces to '" << stack_trace_file_ << "'";
+ std::unique_ptr<File> file(new File(fd, stack_trace_file_, true));
+ bool success = file->WriteFully(s.data(), s.size());
+ if (success) {
+ success = file->FlushCloseOrErase() == 0;
} else {
+ file->Erase();
+ }
+ if (success) {
LOG(INFO) << "Wrote stack traces to '" << stack_trace_file_ << "'";
+ } else {
+ PLOG(ERROR) << "Failed to write stack traces to '" << stack_trace_file_ << "'";
}
}
@@ -131,7 +139,7 @@
runtime->DumpForSigQuit(os);
- if (false) {
+ if ((false)) {
std::string maps;
if (ReadFileToString("/proc/self/maps", &maps)) {
os << "/proc/self/maps:\n" << maps;
diff --git a/runtime/stack.cc b/runtime/stack.cc
index b4e85e2..43714b9 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -125,9 +125,10 @@
} else {
return cur_shadow_frame_->GetVRegReference(0);
}
- } else if (m->IsOptimized()) {
+ } else if (m->IsOptimized(sizeof(void*))) {
// TODO: Implement, currently only used for exceptions when jdwp is enabled.
- LOG(WARNING) << "StackVisitor::GetThisObject is unimplemented with the optimizing compiler";
+ UNIMPLEMENTED(WARNING)
+ << "StackVisitor::GetThisObject is unimplemented with the optimizing compiler";
return nullptr;
} else {
const DexFile::CodeItem* code_item = m->GetCodeItem();
@@ -152,9 +153,9 @@
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably read registers without a context.
DCHECK(m == GetMethod());
- const void* code_pointer = m->GetQuickOatCodePointer();
+ const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
DCHECK(code_pointer != nullptr);
- const VmapTable vmap_table(m->GetVmapTable(code_pointer));
+ const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
uint32_t vmap_offset;
// TODO: IsInContext stops before spotting floating point registers.
@@ -206,9 +207,9 @@
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably read registers without a context.
DCHECK(m == GetMethod());
- const void* code_pointer = m->GetQuickOatCodePointer();
+ const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
DCHECK(code_pointer != nullptr);
- const VmapTable vmap_table(m->GetVmapTable(code_pointer));
+ const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
uint32_t vmap_offset_lo, vmap_offset_hi;
// TODO: IsInContext stops before spotting floating point registers.
@@ -253,9 +254,9 @@
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
- const void* code_pointer = m->GetQuickOatCodePointer();
+ const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
DCHECK(code_pointer != nullptr);
- const VmapTable vmap_table(m->GetVmapTable(code_pointer));
+ const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
uint32_t vmap_offset;
// TODO: IsInContext stops before spotting floating point registers.
@@ -317,9 +318,9 @@
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
- const void* code_pointer = m->GetQuickOatCodePointer();
+ const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
DCHECK(code_pointer != nullptr);
- const VmapTable vmap_table(m->GetVmapTable(code_pointer));
+ const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
uint32_t vmap_offset_lo, vmap_offset_hi;
// TODO: IsInContext stops before spotting floating point registers.
@@ -408,8 +409,8 @@
size_t StackVisitor::ComputeNumFrames(Thread* thread) {
struct NumFramesVisitor : public StackVisitor {
- explicit NumFramesVisitor(Thread* thread)
- : StackVisitor(thread, NULL), frames(0) {}
+ explicit NumFramesVisitor(Thread* thread_in)
+ : StackVisitor(thread_in, NULL), frames(0) {}
bool VisitFrame() OVERRIDE {
frames++;
@@ -460,8 +461,8 @@
void StackVisitor::DescribeStack(Thread* thread) {
struct DescribeStackVisitor : public StackVisitor {
- explicit DescribeStackVisitor(Thread* thread)
- : StackVisitor(thread, NULL) {}
+ explicit DescribeStackVisitor(Thread* thread_in)
+ : StackVisitor(thread_in, NULL) {}
bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation();
@@ -526,7 +527,7 @@
current_fragment = current_fragment->GetLink()) {
cur_shadow_frame_ = current_fragment->GetTopShadowFrame();
cur_quick_frame_ = current_fragment->GetTopQuickFrame();
- cur_quick_frame_pc_ = current_fragment->GetTopQuickFramePc();
+ cur_quick_frame_pc_ = 0;
if (cur_quick_frame_ != NULL) { // Handle quick stack frames.
// Can't be both a shadow and a quick fragment.
diff --git a/runtime/stack.h b/runtime/stack.h
index 25e50a1..1d772e6 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -20,8 +20,8 @@
#include <stdint.h>
#include <string>
+#include "arch/instruction_set.h"
#include "dex_file.h"
-#include "instruction_set.h"
#include "mirror/object_reference.h"
#include "throw_location.h"
#include "utils.h"
@@ -53,6 +53,7 @@
kImpreciseConstant,
kUndefined,
};
+std::ostream& operator<<(std::ostream& os, const VRegKind& rhs);
// A reference from the shadow stack to a MirrorType object within the Java heap.
template<class MirrorType>
@@ -336,9 +337,7 @@
}
#if defined(ART_USE_PORTABLE_COMPILER)
- enum ShadowFrameFlag {
- kHasReferenceArray = 1ul << 31
- };
+ constexpr uint32_t kHasReferenceArray = 1ul << 31;
// TODO: make const in the portable case.
uint32_t number_of_vregs_;
#else
@@ -360,7 +359,7 @@
class PACKED(4) ManagedStack {
public:
ManagedStack()
- : link_(NULL), top_shadow_frame_(NULL), top_quick_frame_(NULL), top_quick_frame_pc_(0) {}
+ : top_quick_frame_(nullptr), link_(nullptr), top_shadow_frame_(nullptr) {}
void PushManagedStackFragment(ManagedStack* fragment) {
// Copy this top fragment into given fragment.
@@ -386,29 +385,16 @@
}
void SetTopQuickFrame(StackReference<mirror::ArtMethod>* top) {
- DCHECK(top_shadow_frame_ == NULL);
+ DCHECK(top_shadow_frame_ == nullptr);
top_quick_frame_ = top;
}
- uintptr_t GetTopQuickFramePc() const {
- return top_quick_frame_pc_;
- }
-
- void SetTopQuickFramePc(uintptr_t pc) {
- DCHECK(top_shadow_frame_ == NULL);
- top_quick_frame_pc_ = pc;
- }
-
static size_t TopQuickFrameOffset() {
return OFFSETOF_MEMBER(ManagedStack, top_quick_frame_);
}
- static size_t TopQuickFramePcOffset() {
- return OFFSETOF_MEMBER(ManagedStack, top_quick_frame_pc_);
- }
-
ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
- DCHECK(top_quick_frame_ == NULL);
+ DCHECK(top_quick_frame_ == nullptr);
ShadowFrame* old_frame = top_shadow_frame_;
top_shadow_frame_ = new_top_frame;
new_top_frame->SetLink(old_frame);
@@ -416,8 +402,8 @@
}
ShadowFrame* PopShadowFrame() {
- DCHECK(top_quick_frame_ == NULL);
- CHECK(top_shadow_frame_ != NULL);
+ DCHECK(top_quick_frame_ == nullptr);
+ CHECK(top_shadow_frame_ != nullptr);
ShadowFrame* frame = top_shadow_frame_;
top_shadow_frame_ = frame->GetLink();
return frame;
@@ -428,7 +414,7 @@
}
void SetTopShadowFrame(ShadowFrame* top) {
- DCHECK(top_quick_frame_ == NULL);
+ DCHECK(top_quick_frame_ == nullptr);
top_shadow_frame_ = top;
}
@@ -441,10 +427,9 @@
bool ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const;
private:
+ StackReference<mirror::ArtMethod>* top_quick_frame_;
ManagedStack* link_;
ShadowFrame* top_shadow_frame_;
- StackReference<mirror::ArtMethod>* top_quick_frame_;
- uintptr_t top_quick_frame_pc_;
};
class StackVisitor {
@@ -647,6 +632,7 @@
}
static int GetOutVROffset(uint16_t out_num, InstructionSet isa) {
+ UNUSED(isa);
// According to stack model, the first out is above the Method referernce.
return sizeof(StackReference<mirror::ArtMethod>) + (out_num * sizeof(uint32_t));
}
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 9b49d31..a58ecab 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -88,6 +88,7 @@
kNone,
kInStack,
kInRegister,
+ kInFpuRegister,
kConstant
};
@@ -285,7 +286,7 @@
}
}
LOG(FATAL) << "Unreachable";
- return StackMap(MemoryRegion());
+ UNREACHABLE();
}
StackMap GetStackMapForNativePcOffset(uint32_t native_pc_offset) {
@@ -297,7 +298,7 @@
}
}
LOG(FATAL) << "Unreachable";
- return StackMap(MemoryRegion());
+ UNREACHABLE();
}
private:
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index e1b5b91..e30e745 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -78,12 +78,14 @@
inline void Thread::AssertThreadSuspensionIsAllowable(bool check_locks) const {
if (kIsDebugBuild) {
- CHECK_EQ(0u, tls32_.no_thread_suspension) << tlsPtr_.last_no_thread_suspension_cause;
+ if (gAborting == 0) {
+ CHECK_EQ(0u, tls32_.no_thread_suspension) << tlsPtr_.last_no_thread_suspension_cause;
+ }
if (check_locks) {
bool bad_mutexes_held = false;
for (int i = kLockLevelCount - 1; i >= 0; --i) {
// We expect no locks except the mutator_lock_ or thread list suspend thread lock.
- if (i != kMutatorLock && i != kThreadListSuspendThreadLock) {
+ if (i != kMutatorLock) {
BaseMutex* held_mutex = GetHeldMutex(static_cast<LockLevel>(i));
if (held_mutex != NULL) {
LOG(ERROR) << "holding \"" << held_mutex->GetName()
@@ -92,7 +94,9 @@
}
}
}
- CHECK(!bad_mutexes_held);
+ if (gAborting == 0) {
+ CHECK(!bad_mutexes_held);
+ }
}
}
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index efe27ee..cd47b5e 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -29,9 +29,11 @@
#include <cerrno>
#include <iostream>
#include <list>
+#include <sstream>
#include "arch/context.h"
#include "base/mutex.h"
+#include "base/to_str.h"
#include "class_linker-inl.h"
#include "class_linker.h"
#include "debugger.h"
@@ -156,7 +158,7 @@
// Check that if we got here we cannot be shutting down (as shutdown should never have started
// while threads are being born).
CHECK(!runtime->IsShuttingDownLocked());
- self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
+ CHECK(self->Init(runtime->GetThreadList(), runtime->GetJavaVM()));
Runtime::Current()->EndThreadBirth();
}
{
@@ -168,6 +170,9 @@
self->GetJniEnv()->DeleteGlobalRef(self->tlsPtr_.jpeer);
self->tlsPtr_.jpeer = nullptr;
self->SetThreadName(self->GetThreadName(soa)->ToModifiedUtf8().c_str());
+
+ mirror::ArtField* priorityField = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority);
+ self->SetNativePriority(priorityField->GetInt(self->tlsPtr_.opeer));
Dbg::PostThreadStart(self);
// Invoke the 'run' method of our java.lang.Thread.
@@ -343,40 +348,46 @@
}
}
-void Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm) {
+bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm) {
// This function does all the initialization that must be run by the native thread it applies to.
// (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so
// we can handshake with the corresponding native thread when it's ready.) Check this native
// thread hasn't been through here already...
CHECK(Thread::Current() == nullptr);
+
+ // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
+ // avoids pthread_self_ ever being invalid when discovered from Thread::Current().
+ tlsPtr_.pthread_self = pthread_self();
+ CHECK(is_started_);
+
SetUpAlternateSignalStack();
+ if (!InitStackHwm()) {
+ return false;
+ }
InitCpu();
InitTlsEntryPoints();
RemoveSuspendTrigger();
InitCardTable();
InitTid();
- // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
- // avoids pthread_self_ ever being invalid when discovered from Thread::Current().
- tlsPtr_.pthread_self = pthread_self();
- CHECK(is_started_);
+
CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self");
DCHECK_EQ(Thread::Current(), this);
tls32_.thin_lock_thread_id = thread_list->AllocThreadId(this);
- InitStackHwm();
tlsPtr_.jni_env = new JNIEnvExt(this, java_vm);
thread_list->Register(this);
+ return true;
}
Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_group,
bool create_peer) {
- Thread* self;
Runtime* runtime = Runtime::Current();
if (runtime == nullptr) {
LOG(ERROR) << "Thread attaching to non-existent runtime: " << thread_name;
return nullptr;
}
+ Thread* self;
{
MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
if (runtime->IsShuttingDownLocked()) {
@@ -385,8 +396,12 @@
} else {
Runtime::Current()->StartThreadBirth();
self = new Thread(as_daemon);
- self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
+ bool init_success = self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
Runtime::Current()->EndThreadBirth();
+ if (!init_success) {
+ delete self;
+ return nullptr;
+ }
}
}
@@ -409,6 +424,11 @@
}
}
+ {
+ ScopedObjectAccess soa(self);
+ Dbg::PostThreadStart(self);
+ }
+
return self;
}
@@ -421,6 +441,11 @@
thread_group = runtime->GetMainThreadGroup();
}
ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name));
+ // Add missing null check in case of OOM b/18297817
+ if (name != nullptr && thread_name.get() == nullptr) {
+ CHECK(IsExceptionPending());
+ return;
+ }
jint thread_priority = GetNativePriority();
jboolean thread_is_daemon = as_daemon;
@@ -484,7 +509,7 @@
Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM"));
}
-void Thread::InitStackHwm() {
+bool Thread::InitStackHwm() {
void* read_stack_base;
size_t read_stack_size;
size_t read_guard_size;
@@ -506,8 +531,10 @@
uint32_t min_stack = GetStackOverflowReservedBytes(kRuntimeISA) + kStackOverflowProtectedSize
+ 4 * KB;
if (read_stack_size <= min_stack) {
- LOG(FATAL) << "Attempt to attach a thread with a too-small stack (" << read_stack_size
- << " bytes)";
+ // Note, as we know the stack is small, avoid operations that could use a lot of stack.
+ LogMessage::LogLineLowStack(__PRETTY_FUNCTION__, __LINE__, ERROR,
+ "Attempt to attach a thread with a too-small stack");
+ return false;
}
// Set stack_end_ to the bottom of the stack saving space of stack overflows
@@ -532,6 +559,8 @@
// Sanity check.
int stack_variable;
CHECK_GT(&stack_variable, reinterpret_cast<void*>(tlsPtr_.stack_end));
+
+ return true;
}
void Thread::ShortDump(std::ostream& os) const {
@@ -830,10 +859,11 @@
}
struct StackDumpVisitor : public StackVisitor {
- StackDumpVisitor(std::ostream& os, Thread* thread, Context* context, bool can_allocate)
+ StackDumpVisitor(std::ostream& os_in, Thread* thread_in, Context* context, bool can_allocate_in)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, context), os(os), thread(thread), can_allocate(can_allocate),
- last_method(nullptr), last_line_number(0), repetition_count(0), frame_count(0) {
+ : StackVisitor(thread_in, context), os(os_in), thread(thread_in),
+ can_allocate(can_allocate_in), last_method(nullptr), last_line_number(0),
+ repetition_count(0), frame_count(0) {
}
virtual ~StackDumpVisitor() {
@@ -1031,7 +1061,8 @@
}
// Allocate a TLS slot.
- CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback), "self key");
+ CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback),
+ "self key");
// Double-check the TLS slot allocation.
if (pthread_getspecific(pthread_key_self_) != nullptr) {
@@ -1812,7 +1843,6 @@
DO_THREAD_OFFSET(StackEndOffset<ptr_size>(), "stack_end")
DO_THREAD_OFFSET(ThinLockIdOffset<ptr_size>(), "thin_lock_thread_id")
DO_THREAD_OFFSET(TopOfManagedStackOffset<ptr_size>(), "top_quick_frame_method")
- DO_THREAD_OFFSET(TopOfManagedStackPcOffset<ptr_size>(), "top_quick_frame_pc")
DO_THREAD_OFFSET(TopShadowFrameOffset<ptr_size>(), "top_shadow_frame")
DO_THREAD_OFFSET(TopHandleScopeOffset<ptr_size>(), "top_handle_scope")
DO_THREAD_OFFSET(ThreadSuspendTriggerOffset<ptr_size>(), "suspend_trigger")
@@ -1965,6 +1995,7 @@
exception_handler.UpdateInstrumentationStack();
exception_handler.DoLongJump();
LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
}
Context* Thread::GetLongJumpContext() {
@@ -2105,9 +2136,9 @@
// Process register map (which native and runtime methods don't have)
if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
- if (m->IsOptimized()) {
+ if (m->IsOptimized(sizeof(void*))) {
Runtime* runtime = Runtime::Current();
- const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m);
+ const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
StackMap map = m->GetStackMap(native_pc_offset);
MemoryRegion mask = map.GetStackMask();
@@ -2136,15 +2167,14 @@
static_cast<size_t>(code_item->registers_size_));
if (num_regs > 0) {
Runtime* runtime = Runtime::Current();
- const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m);
+ const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
const uint8_t* reg_bitmap = map.FindBitMap(native_pc_offset);
DCHECK(reg_bitmap != nullptr);
const void* code_pointer = mirror::ArtMethod::EntryPointToCodePointer(entry_point);
- const VmapTable vmap_table(m->GetVmapTable(code_pointer));
+ const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
// For all dex registers in the bitmap
- StackReference<mirror::ArtMethod>* cur_quick_frame = GetCurrentQuickFrame();
DCHECK(cur_quick_frame != nullptr);
for (size_t reg = 0; reg < num_regs; ++reg) {
// Does this register hold a reference?
diff --git a/runtime/thread.h b/runtime/thread.h
index 32ed758..b69d2f4 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -25,6 +25,7 @@
#include <setjmp.h>
#include <string>
+#include "arch/instruction_set.h"
#include "atomic.h"
#include "base/macros.h"
#include "base/mutex.h"
@@ -34,7 +35,6 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "globals.h"
#include "handle_scope.h"
-#include "instruction_set.h"
#include "jvalue.h"
#include "object_callbacks.h"
#include "offsets.h"
@@ -365,9 +365,8 @@
ThrowLocation GetCurrentLocationForThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetTopOfStack(StackReference<mirror::ArtMethod>* top_method, uintptr_t pc) {
+ void SetTopOfStack(StackReference<mirror::ArtMethod>* top_method) {
tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
- tlsPtr_.managed_stack.SetTopQuickFramePc(pc);
}
void SetTopOfShadowStack(ShadowFrame* top) {
@@ -637,13 +636,6 @@
ManagedStack::TopQuickFrameOffset());
}
- template<size_t pointer_size>
- static ThreadOffset<pointer_size> TopOfManagedStackPcOffset() {
- return ThreadOffsetFromTlsPtr<pointer_size>(
- OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
- ManagedStack::TopQuickFramePcOffset());
- }
-
const ManagedStack* GetManagedStack() const {
return &tlsPtr_.managed_stack;
}
@@ -901,14 +893,14 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void Init(ThreadList*, JavaVMExt*) EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
+ bool Init(ThreadList*, JavaVMExt*) EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
void InitCardTable();
void InitCpu();
void CleanupCpu();
void InitTlsEntryPoints();
void InitTid();
void InitPthreadKeySelf();
- void InitStackHwm();
+ bool InitStackHwm();
void SetUpAlternateSignalStack();
void TearDownAlternateSignalStack();
@@ -935,7 +927,7 @@
// See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47409
DISALLOW_COPY_AND_ASSIGN(StateAndFlags);
};
- COMPILE_ASSERT(sizeof(StateAndFlags) == sizeof(int32_t), weird_state_and_flags_size);
+ static_assert(sizeof(StateAndFlags) == sizeof(int32_t), "Weird state_and_flags size");
static void ThreadExitCallback(void* arg);
@@ -971,8 +963,8 @@
}
union StateAndFlags state_and_flags;
- COMPILE_ASSERT(sizeof(union StateAndFlags) == sizeof(int32_t),
- sizeof_state_and_flags_and_int32_are_different);
+ static_assert(sizeof(union StateAndFlags) == sizeof(int32_t),
+ "Size of state_and_flags and int32 are different");
// A non-zero value is used to tell the current thread to enter a safe point
// at the next poll.
@@ -1201,7 +1193,6 @@
};
std::ostream& operator<<(std::ostream& os, const Thread& thread);
-std::ostream& operator<<(std::ostream& os, const ThreadState& state);
} // namespace art
diff --git a/runtime/thread_android.cc b/runtime/thread_android.cc
index 73a9e54..d5db983 100644
--- a/runtime/thread_android.cc
+++ b/runtime/thread_android.cc
@@ -55,6 +55,13 @@
int newNice = kNiceValues[newPriority-1];
pid_t tid = GetTid();
+ // TODO: b/18249098 The code below is broken. It uses getpriority() as a proxy for whether a
+ // thread is already in the SP_FOREGROUND cgroup. This is not necessarily true for background
+ // processes, where all threads are in the SP_BACKGROUND cgroup. This means that callers will
+ // have to call setPriority twice to do what they want :
+ //
+ // Thread.setPriority(Thread.MIN_PRIORITY); // no-op wrt to cgroups
+ // Thread.setPriority(Thread.MAX_PRIORITY); // will actually change cgroups.
if (newNice >= ANDROID_PRIORITY_BACKGROUND) {
set_sched_policy(tid, SP_BACKGROUND);
} else if (getpriority(PRIO_PROCESS, tid) >= ANDROID_PRIORITY_BACKGROUND) {
diff --git a/runtime/thread_linux.cc b/runtime/thread_linux.cc
index 1254056..0284364 100644
--- a/runtime/thread_linux.cc
+++ b/runtime/thread_linux.cc
@@ -16,6 +16,8 @@
#include "thread.h"
+#include <signal.h>
+
namespace art {
void Thread::SetNativePriority(int) {
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 646830a..5ff90d6 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -25,6 +25,8 @@
#include <sys/types.h>
#include <unistd.h>
+#include <sstream>
+
#include "base/mutex.h"
#include "base/mutex-inl.h"
#include "base/timing_logger.h"
@@ -218,7 +220,7 @@
// individual thread requires polling. delay_us is the requested sleep and total_delay_us
// accumulates the total time spent sleeping for timeouts. The first sleep is just a yield,
// subsequently sleeps increase delay_us from 1ms to 500ms by doubling.
-static void ThreadSuspendSleep(Thread* self, useconds_t* delay_us, useconds_t* total_delay_us) {
+static void ThreadSuspendSleep(useconds_t* delay_us, useconds_t* total_delay_us) {
useconds_t new_delay_us = (*delay_us) * 2;
CHECK_GE(new_delay_us, *delay_us);
if (new_delay_us < 500000) { // Don't allow sleeping to be more than 0.5s.
@@ -239,7 +241,7 @@
Locks::mutator_lock_->AssertNotExclusiveHeld(self);
Locks::thread_list_lock_->AssertNotHeld(self);
Locks::thread_suspend_count_lock_->AssertNotHeld(self);
- if (kDebugLocking) {
+ if (kDebugLocking && gAborting == 0) {
CHECK_NE(self->GetState(), kRunnable);
}
@@ -283,7 +285,7 @@
useconds_t total_delay_us = 0;
do {
useconds_t delay_us = 100;
- ThreadSuspendSleep(self, &delay_us, &total_delay_us);
+ ThreadSuspendSleep(&delay_us, &total_delay_us);
} while (!thread->IsSuspended());
// Shouldn't need to wait for longer than 1000 microseconds.
constexpr useconds_t kLongWaitThresholdUS = 1000;
@@ -480,17 +482,18 @@
VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") complete";
}
-static void ThreadSuspendByPeerWarning(Thread* self, int level, const char* message, jobject peer) {
+static void ThreadSuspendByPeerWarning(Thread* self, LogSeverity severity, const char* message,
+ jobject peer) {
JNIEnvExt* env = self->GetJniEnv();
ScopedLocalRef<jstring>
scoped_name_string(env, (jstring)env->GetObjectField(peer,
WellKnownClasses::java_lang_Thread_name));
ScopedUtfChars scoped_name_chars(env, scoped_name_string.get());
if (scoped_name_chars.c_str() == NULL) {
- LOG(level) << message << ": " << peer;
+ LOG(severity) << message << ": " << peer;
env->ExceptionClear();
} else {
- LOG(level) << message << ": " << peer << ":" << scoped_name_chars.c_str();
+ LOG(severity) << message << ": " << peer << ":" << scoped_name_chars.c_str();
}
}
@@ -512,7 +515,7 @@
// than request thread suspension, to avoid potential cycles in threads requesting each other
// suspend.
ScopedObjectAccess soa(self);
- MutexLock mu(self, *Locks::thread_list_lock_);
+ MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
thread = Thread::FromManagedThread(soa, peer);
if (thread == nullptr) {
ThreadSuspendByPeerWarning(self, WARNING, "No such thread for suspend", peer);
@@ -525,8 +528,14 @@
}
VLOG(threads) << "SuspendThreadByPeer found thread: " << *thread;
{
- MutexLock mu(self, *Locks::thread_suspend_count_lock_);
+ MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
if (request_suspension) {
+ if (self->GetSuspendCount() > 0) {
+ // We hold the suspend count lock but another thread is trying to suspend us. Its not
+ // safe to try to suspend another thread in case we get a cycle. Start the loop again
+ // which will allow this thread to be suspended.
+ continue;
+ }
thread->ModifySuspendCount(self, +1, debug_suspension);
request_suspension = false;
did_suspend_request = true;
@@ -558,12 +567,13 @@
// Release locks and come out of runnable state.
}
VLOG(threads) << "SuspendThreadByPeer sleeping to allow thread chance to suspend";
- ThreadSuspendSleep(self, &delay_us, &total_delay_us);
+ ThreadSuspendSleep(&delay_us, &total_delay_us);
}
}
-static void ThreadSuspendByThreadIdWarning(int level, const char* message, uint32_t thread_id) {
- LOG(level) << StringPrintf("%s: %d", message, thread_id);
+static void ThreadSuspendByThreadIdWarning(LogSeverity severity, const char* message,
+ uint32_t thread_id) {
+ LOG(severity) << StringPrintf("%s: %d", message, thread_id);
}
Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspension,
@@ -584,7 +594,7 @@
// than request thread suspension, to avoid potential cycles in threads requesting each other
// suspend.
ScopedObjectAccess soa(self);
- MutexLock mu(self, *Locks::thread_list_lock_);
+ MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
Thread* thread = nullptr;
for (const auto& it : list_) {
if (it->GetThreadId() == thread_id) {
@@ -602,8 +612,14 @@
VLOG(threads) << "SuspendThreadByThreadId found thread: " << *thread;
DCHECK(Contains(thread));
{
- MutexLock mu(self, *Locks::thread_suspend_count_lock_);
+ MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
if (suspended_thread == nullptr) {
+ if (self->GetSuspendCount() > 0) {
+ // We hold the suspend count lock but another thread is trying to suspend us. Its not
+ // safe to try to suspend another thread in case we get a cycle. Start the loop again
+ // which will allow this thread to be suspended.
+ continue;
+ }
thread->ModifySuspendCount(self, +1, debug_suspension);
suspended_thread = thread;
} else {
@@ -635,7 +651,7 @@
// Release locks and come out of runnable state.
}
VLOG(threads) << "SuspendThreadByThreadId sleeping to allow thread chance to suspend";
- ThreadSuspendSleep(self, &delay_us, &total_delay_us);
+ ThreadSuspendSleep(&delay_us, &total_delay_us);
}
}
@@ -658,10 +674,11 @@
VLOG(threads) << *self << " SuspendAllForDebugger starting...";
{
- MutexLock mu(self, *Locks::thread_list_lock_);
+ MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
{
- MutexLock mu(self, *Locks::thread_suspend_count_lock_);
+ MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
// Update global suspend all state for attaching threads.
+ DCHECK_GE(suspend_all_count_, debug_suspend_all_count_);
++suspend_all_count_;
++debug_suspend_all_count_;
// Increment everybody's suspend count (except our own).
@@ -753,6 +770,55 @@
VLOG(threads) << *self << " self-reviving (debugger)";
}
+void ThreadList::ResumeAllForDebugger() {
+ Thread* self = Thread::Current();
+ Thread* debug_thread = Dbg::GetDebugThread();
+ bool needs_resume = false;
+
+ VLOG(threads) << *self << " ResumeAllForDebugger starting...";
+
+ // Threads can't resume if we exclusively hold the mutator lock.
+ Locks::mutator_lock_->AssertNotExclusiveHeld(self);
+
+ {
+ MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
+ {
+ MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
+ // Update global suspend all state for attaching threads.
+ DCHECK_GE(suspend_all_count_, debug_suspend_all_count_);
+ needs_resume = (debug_suspend_all_count_ > 0);
+ if (needs_resume) {
+ --suspend_all_count_;
+ --debug_suspend_all_count_;
+ // Decrement everybody's suspend count (except our own).
+ for (const auto& thread : list_) {
+ if (thread == self || thread == debug_thread) {
+ continue;
+ }
+ if (thread->GetDebugSuspendCount() == 0) {
+ // This thread may have been individually resumed with ThreadReference.Resume.
+ continue;
+ }
+ VLOG(threads) << "requesting thread resume: " << *thread;
+ thread->ModifySuspendCount(self, -1, true);
+ }
+ } else {
+ // We've been asked to resume all threads without being asked to
+ // suspend them all before. Let's print a warning.
+ LOG(WARNING) << "Debugger attempted to resume all threads without "
+ << "having suspended them all before.";
+ }
+ }
+ }
+
+ if (needs_resume) {
+ MutexLock mu(self, *Locks::thread_suspend_count_lock_);
+ Thread::resume_cond_->Broadcast(self);
+ }
+
+ VLOG(threads) << *self << " ResumeAllForDebugger complete";
+}
+
void ThreadList::UndoDebuggerSuspensions() {
Thread* self = Thread::Current();
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index 9f47f9f..13684c7 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -68,7 +68,6 @@
// is set to true.
Thread* SuspendThreadByPeer(jobject peer, bool request_suspension, bool debug_suspension,
bool* timed_out)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_suspend_thread_lock_)
LOCKS_EXCLUDED(Locks::mutator_lock_,
Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_);
@@ -78,7 +77,6 @@
// the thread terminating. Note that as thread ids are recycled this may not suspend the expected
// thread, that may be terminating. If the suspension times out then *timeout is set to true.
Thread* SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspension, bool* timed_out)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_suspend_thread_lock_)
LOCKS_EXCLUDED(Locks::mutator_lock_,
Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_);
@@ -105,6 +103,11 @@
void SuspendSelfForDebugger()
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_);
+ // Resume all threads
+ void ResumeAllForDebugger()
+ LOCKS_EXCLUDED(Locks::thread_list_lock_,
+ Locks::thread_suspend_count_lock_);
+
void UndoDebuggerSuspensions()
LOCKS_EXCLUDED(Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_);
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index e8c9ff8..587eb32 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -42,14 +42,14 @@
}
ThreadPoolWorker::~ThreadPoolWorker() {
- CHECK_PTHREAD_CALL(pthread_join, (pthread_, NULL), "thread pool worker shutdown");
+ CHECK_PTHREAD_CALL(pthread_join, (pthread_, nullptr), "thread pool worker shutdown");
}
void ThreadPoolWorker::Run() {
Thread* self = Thread::Current();
- Task* task = NULL;
+ Task* task = nullptr;
thread_pool_->creation_barier_.Wait(self);
- while ((task = thread_pool_->GetTask(self)) != NULL) {
+ while ((task = thread_pool_->GetTask(self)) != nullptr) {
task->Run(self);
task->Finalize();
}
@@ -58,11 +58,11 @@
void* ThreadPoolWorker::Callback(void* arg) {
ThreadPoolWorker* worker = reinterpret_cast<ThreadPoolWorker*>(arg);
Runtime* runtime = Runtime::Current();
- CHECK(runtime->AttachCurrentThread(worker->name_.c_str(), true, NULL, false));
+ CHECK(runtime->AttachCurrentThread(worker->name_.c_str(), true, nullptr, false));
// Do work until its time to shut down.
worker->Run();
runtime->DetachCurrentThread();
- return NULL;
+ return nullptr;
}
void ThreadPool::AddTask(Thread* self, Task* task) {
@@ -89,8 +89,9 @@
max_active_workers_(num_threads) {
Thread* self = Thread::Current();
while (GetThreadCount() < num_threads) {
- const std::string name = StringPrintf("%s worker thread %zu", name_.c_str(), GetThreadCount());
- threads_.push_back(new ThreadPoolWorker(this, name, ThreadPoolWorker::kDefaultStackSize));
+ const std::string worker_name = StringPrintf("%s worker thread %zu", name_.c_str(),
+ GetThreadCount());
+ threads_.push_back(new ThreadPoolWorker(this, worker_name, ThreadPoolWorker::kDefaultStackSize));
}
// Wait for all of the threads to attach.
creation_barier_.Wait(self);
@@ -137,8 +138,8 @@
const size_t active_threads = thread_count - waiting_count_;
// <= since self is considered an active worker.
if (active_threads <= max_active_workers_) {
- Task* task = TryGetTaskLocked(self);
- if (task != NULL) {
+ Task* task = TryGetTaskLocked();
+ if (task != nullptr) {
return task;
}
}
@@ -157,28 +158,28 @@
--waiting_count_;
}
- // We are shutting down, return NULL to tell the worker thread to stop looping.
- return NULL;
+ // We are shutting down, return nullptr to tell the worker thread to stop looping.
+ return nullptr;
}
Task* ThreadPool::TryGetTask(Thread* self) {
MutexLock mu(self, task_queue_lock_);
- return TryGetTaskLocked(self);
+ return TryGetTaskLocked();
}
-Task* ThreadPool::TryGetTaskLocked(Thread* self) {
+Task* ThreadPool::TryGetTaskLocked() {
if (started_ && !tasks_.empty()) {
Task* task = tasks_.front();
tasks_.pop_front();
return task;
}
- return NULL;
+ return nullptr;
}
void ThreadPool::Wait(Thread* self, bool do_work, bool may_hold_locks) {
if (do_work) {
- Task* task = NULL;
- while ((task = TryGetTask(self)) != NULL) {
+ Task* task = nullptr;
+ while ((task = TryGetTask(self)) != nullptr) {
task->Run(self);
task->Finalize();
}
@@ -201,17 +202,17 @@
WorkStealingWorker::WorkStealingWorker(ThreadPool* thread_pool, const std::string& name,
size_t stack_size)
- : ThreadPoolWorker(thread_pool, name, stack_size), task_(NULL) {}
+ : ThreadPoolWorker(thread_pool, name, stack_size), task_(nullptr) {}
void WorkStealingWorker::Run() {
Thread* self = Thread::Current();
- Task* task = NULL;
+ Task* task = nullptr;
WorkStealingThreadPool* thread_pool = down_cast<WorkStealingThreadPool*>(thread_pool_);
- while ((task = thread_pool_->GetTask(self)) != NULL) {
+ while ((task = thread_pool_->GetTask(self)) != nullptr) {
WorkStealingTask* stealing_task = down_cast<WorkStealingTask*>(task);
{
- CHECK(task_ == NULL);
+ CHECK(task_ == nullptr);
MutexLock mu(self, thread_pool->work_steal_lock_);
// Register that we are running the task
++stealing_task->ref_count_;
@@ -221,7 +222,7 @@
// Mark ourselves as not running a task so that nobody tries to steal from us.
// There is a race condition that someone starts stealing from us at this point. This is okay
// due to the reference counting.
- task_ = NULL;
+ task_ = nullptr;
bool finalize;
@@ -229,13 +230,13 @@
// all that happens when the race occurs is that we steal some work instead of processing a
// task from the queue.
while (thread_pool->GetTaskCount(self) == 0) {
- WorkStealingTask* steal_from_task = NULL;
+ WorkStealingTask* steal_from_task = nullptr;
{
MutexLock mu(self, thread_pool->work_steal_lock_);
// Try finding a task to steal from.
- steal_from_task = thread_pool->FindTaskToStealFrom(self);
- if (steal_from_task != NULL) {
+ steal_from_task = thread_pool->FindTaskToStealFrom();
+ if (steal_from_task != nullptr) {
CHECK_NE(stealing_task, steal_from_task)
<< "Attempting to steal from completed self task";
steal_from_task->ref_count_++;
@@ -244,7 +245,7 @@
}
}
- if (steal_from_task != NULL) {
+ if (steal_from_task != nullptr) {
// Task which completed earlier is going to steal some work.
stealing_task->StealFrom(self, steal_from_task);
@@ -279,12 +280,13 @@
work_steal_lock_("work stealing lock"),
steal_index_(0) {
while (GetThreadCount() < num_threads) {
- const std::string name = StringPrintf("Work stealing worker %zu", GetThreadCount());
- threads_.push_back(new WorkStealingWorker(this, name, ThreadPoolWorker::kDefaultStackSize));
+ const std::string worker_name = StringPrintf("Work stealing worker %zu", GetThreadCount());
+ threads_.push_back(new WorkStealingWorker(this, worker_name,
+ ThreadPoolWorker::kDefaultStackSize));
}
}
-WorkStealingTask* WorkStealingThreadPool::FindTaskToStealFrom(Thread* self) {
+WorkStealingTask* WorkStealingThreadPool::FindTaskToStealFrom() {
const size_t thread_count = GetThreadCount();
for (size_t i = 0; i < thread_count; ++i) {
// TODO: Use CAS instead of lock.
@@ -301,7 +303,7 @@
}
}
// Couldn't find something to steal.
- return NULL;
+ return nullptr;
}
WorkStealingThreadPool::~WorkStealingThreadPool() {}
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index c816c84..d6330c8 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -101,7 +101,7 @@
// Try to get a task, returning NULL if there is none available.
Task* TryGetTask(Thread* self);
- Task* TryGetTaskLocked(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(task_queue_lock_);
+ Task* TryGetTaskLocked() EXCLUSIVE_LOCKS_REQUIRED(task_queue_lock_);
// Are we shutting down?
bool IsShuttingDown() const EXCLUSIVE_LOCKS_REQUIRED(task_queue_lock_) {
@@ -178,7 +178,7 @@
size_t steal_index_;
// Find a task to steal from
- WorkStealingTask* FindTaskToStealFrom(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(work_steal_lock_);
+ WorkStealingTask* FindTaskToStealFrom() EXCLUSIVE_LOCKS_REQUIRED(work_steal_lock_);
friend class WorkStealingWorker;
};
diff --git a/runtime/thread_pool_test.cc b/runtime/thread_pool_test.cc
index 4bd44dc..d5f17d1 100644
--- a/runtime/thread_pool_test.cc
+++ b/runtime/thread_pool_test.cc
@@ -95,11 +95,7 @@
EXPECT_EQ(0, bad_count.LoadSequentiallyConsistent());
// Allow tasks to finish up and delete themselves.
thread_pool.StartWorkers(self);
- while (count.LoadSequentiallyConsistent() != num_tasks &&
- bad_count.LoadSequentiallyConsistent() != 1) {
- usleep(200);
- }
- thread_pool.StopWorkers(self);
+ thread_pool.Wait(self, false, false);
}
class TreeTask : public Task {
diff --git a/runtime/thread_state.h b/runtime/thread_state.h
index 0e47d21..6e5deeb 100644
--- a/runtime/thread_state.h
+++ b/runtime/thread_state.h
@@ -17,6 +17,8 @@
#ifndef ART_RUNTIME_THREAD_STATE_H_
#define ART_RUNTIME_THREAD_STATE_H_
+#include <ostream>
+
namespace art {
enum ThreadState {
@@ -43,6 +45,7 @@
kNative, // RUNNABLE TS_RUNNING running in a JNI native method
kSuspended, // RUNNABLE TS_RUNNING suspended by GC or debugger
};
+std::ostream& operator<<(std::ostream& os, const ThreadState& rhs);
} // namespace art
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 91a37fd..2cc50b3 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -18,6 +18,9 @@
#include <sys/uio.h>
+#define ATRACE_TAG ATRACE_TAG_DALVIK
+#include "cutils/trace.h"
+
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
@@ -241,7 +244,8 @@
the_trace->CompareAndUpdateStackTrace(thread, stack_trace);
}
-static void ClearThreadStackTraceAndClockBase(Thread* thread, void* arg) {
+static void ClearThreadStackTraceAndClockBase(Thread* thread ATTRIBUTE_UNUSED,
+ void* arg ATTRIBUTE_UNUSED) {
thread->SetTraceClockBase(0);
std::vector<mirror::ArtMethod*>* stack_trace = thread->GetStackTraceSample();
thread->SetStackTraceSample(NULL);
@@ -427,6 +431,15 @@
instrumentation::Instrumentation::kMethodExited |
instrumentation::Instrumentation::kMethodUnwind);
}
+ if (the_trace->trace_file_.get() != nullptr) {
+ // Do not try to erase, so flush and close explicitly.
+ if (the_trace->trace_file_->Flush() != 0) {
+ PLOG(ERROR) << "Could not flush trace file.";
+ }
+ if (the_trace->trace_file_->Close() != 0) {
+ PLOG(ERROR) << "Could not close trace file.";
+ }
+ }
delete the_trace;
}
runtime->GetThreadList()->ResumeAll();
@@ -558,27 +571,30 @@
void Trace::DexPcMoved(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t new_dex_pc) {
+ UNUSED(thread, this_object, method, new_dex_pc);
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected dex PC event in tracing " << PrettyMethod(method) << " " << new_dex_pc;
}
-void Trace::FieldRead(Thread* /*thread*/, mirror::Object* this_object,
+void Trace::FieldRead(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(thread, this_object, method, dex_pc, field);
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected field read event in tracing " << PrettyMethod(method) << " " << dex_pc;
}
-void Trace::FieldWritten(Thread* /*thread*/, mirror::Object* this_object,
+void Trace::FieldWritten(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field,
const JValue& field_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(thread, this_object, method, dex_pc, field, field_value);
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected field write event in tracing " << PrettyMethod(method) << " " << dex_pc;
}
-void Trace::MethodEntered(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc) {
+void Trace::MethodEntered(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED) {
uint32_t thread_clock_diff = 0;
uint32_t wall_clock_diff = 0;
ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
@@ -586,10 +602,9 @@
thread_clock_diff, wall_clock_diff);
}
-void Trace::MethodExited(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc,
- const JValue& return_value) {
- UNUSED(return_value);
+void Trace::MethodExited(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED,
+ const JValue& return_value ATTRIBUTE_UNUSED) {
uint32_t thread_clock_diff = 0;
uint32_t wall_clock_diff = 0;
ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
@@ -597,8 +612,8 @@
thread_clock_diff, wall_clock_diff);
}
-void Trace::MethodUnwind(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc) {
+void Trace::MethodUnwind(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED) {
uint32_t thread_clock_diff = 0;
uint32_t wall_clock_diff = 0;
ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
@@ -610,6 +625,7 @@
mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
mirror::Throwable* exception_object)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(thread, throw_location, catch_method, catch_dex_pc, exception_object);
LOG(ERROR) << "Unexpected exception caught event in tracing";
}
diff --git a/runtime/transaction.cc b/runtime/transaction.cc
index b496f25..478066f 100644
--- a/runtime/transaction.cc
+++ b/runtime/transaction.cc
@@ -144,7 +144,7 @@
LogInternedString(log);
}
-void Transaction::LogInternedString(InternStringLog& log) {
+void Transaction::LogInternedString(const InternStringLog& log) {
Locks::intern_table_lock_->AssertExclusiveHeld(Thread::Current());
MutexLock mu(Thread::Current(), log_lock_);
intern_string_logs_.push_front(log);
@@ -384,7 +384,7 @@
}
break;
default:
- LOG(FATAL) << "Unknown value kind " << field_value.kind;
+ LOG(FATAL) << "Unknown value kind " << static_cast<int>(field_value.kind);
break;
}
}
@@ -406,38 +406,38 @@
void Transaction::InternStringLog::Undo(InternTable* intern_table) {
DCHECK(intern_table != nullptr);
switch (string_op_) {
- case InternStringLog::kInsert: {
- switch (string_kind_) {
- case InternStringLog::kStrongString:
- intern_table->RemoveStrongFromTransaction(str_);
- break;
- case InternStringLog::kWeakString:
- intern_table->RemoveWeakFromTransaction(str_);
- break;
- default:
- LOG(FATAL) << "Unknown interned string kind";
- break;
- }
- break;
+ case InternStringLog::kInsert: {
+ switch (string_kind_) {
+ case InternStringLog::kStrongString:
+ intern_table->RemoveStrongFromTransaction(str_);
+ break;
+ case InternStringLog::kWeakString:
+ intern_table->RemoveWeakFromTransaction(str_);
+ break;
+ default:
+ LOG(FATAL) << "Unknown interned string kind";
+ break;
}
- case InternStringLog::kRemove: {
- switch (string_kind_) {
- case InternStringLog::kStrongString:
- intern_table->InsertStrongFromTransaction(str_);
- break;
- case InternStringLog::kWeakString:
- intern_table->InsertWeakFromTransaction(str_);
- break;
- default:
- LOG(FATAL) << "Unknown interned string kind";
- break;
- }
- break;
- }
- default:
- LOG(FATAL) << "Unknown interned string op";
- break;
+ break;
}
+ case InternStringLog::kRemove: {
+ switch (string_kind_) {
+ case InternStringLog::kStrongString:
+ intern_table->InsertStrongFromTransaction(str_);
+ break;
+ case InternStringLog::kWeakString:
+ intern_table->InsertWeakFromTransaction(str_);
+ break;
+ default:
+ LOG(FATAL) << "Unknown interned string kind";
+ break;
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unknown interned string op";
+ break;
+ }
}
void Transaction::InternStringLog::VisitRoots(RootCallback* callback, void* arg) {
diff --git a/runtime/transaction.h b/runtime/transaction.h
index 21d3c98..566f231 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -19,6 +19,7 @@
#include "base/macros.h"
#include "base/mutex.h"
+#include "base/value_object.h"
#include "object_callbacks.h"
#include "offsets.h"
#include "primitive.h"
@@ -35,7 +36,7 @@
}
class InternTable;
-class Transaction {
+class Transaction FINAL {
public:
Transaction();
~Transaction();
@@ -92,7 +93,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
- class ObjectLog {
+ class ObjectLog : public ValueObject {
public:
void LogBooleanValue(MemberOffset offset, uint8_t value, bool is_volatile);
void LogByteValue(MemberOffset offset, int8_t value, bool is_volatile);
@@ -119,7 +120,7 @@
k64Bits,
kReference
};
- struct FieldValue {
+ struct FieldValue : public ValueObject {
// TODO use JValue instead ?
uint64_t value;
FieldValueKind kind;
@@ -134,7 +135,7 @@
std::map<uint32_t, FieldValue> field_values_;
};
- class ArrayLog {
+ class ArrayLog : public ValueObject {
public:
void LogValue(size_t index, uint64_t value);
@@ -153,7 +154,7 @@
std::map<size_t, uint64_t> array_values_;
};
- class InternStringLog {
+ class InternStringLog : public ValueObject {
public:
enum StringKind {
kStrongString,
@@ -175,11 +176,11 @@
private:
mirror::String* str_;
- StringKind string_kind_;
- StringOp string_op_;
+ const StringKind string_kind_;
+ const StringOp string_op_;
};
- void LogInternedString(InternStringLog& log)
+ void LogInternedString(const InternStringLog& log)
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
LOCKS_EXCLUDED(log_lock_);
diff --git a/runtime/utf.cc b/runtime/utf.cc
index 02cbe3b..05b847b 100644
--- a/runtime/utf.cc
+++ b/runtime/utf.cc
@@ -70,27 +70,27 @@
int32_t ComputeUtf16Hash(mirror::CharArray* chars, int32_t offset,
size_t char_count) {
- int32_t hash = 0;
+ uint32_t hash = 0;
for (size_t i = 0; i < char_count; i++) {
hash = hash * 31 + chars->Get(offset + i);
}
- return hash;
+ return static_cast<int32_t>(hash);
}
int32_t ComputeUtf16Hash(const uint16_t* chars, size_t char_count) {
- int32_t hash = 0;
+ uint32_t hash = 0;
while (char_count--) {
hash = hash * 31 + *chars++;
}
- return hash;
+ return static_cast<int32_t>(hash);
}
-int32_t ComputeUtf8Hash(const char* chars) {
- int32_t hash = 0;
+size_t ComputeModifiedUtf8Hash(const char* chars) {
+ size_t hash = 0;
while (*chars != '\0') {
- hash = hash * 31 + GetUtf16FromUtf8(&chars);
+ hash = hash * 31 + *chars++;
}
- return hash;
+ return static_cast<int32_t>(hash);
}
int CompareModifiedUtf8ToUtf16AsCodePointValues(const char* utf8_1, const uint16_t* utf8_2) {
diff --git a/runtime/utf.h b/runtime/utf.h
index a09db9d..b55227b 100644
--- a/runtime/utf.h
+++ b/runtime/utf.h
@@ -79,8 +79,9 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
int32_t ComputeUtf16Hash(const uint16_t* chars, size_t char_count);
-// Compute a hash code of a UTF-8 string.
-int32_t ComputeUtf8Hash(const char* chars);
+// Compute a hash code of a modified UTF-8 string. Not the standard java hash since it returns a
+// size_t and hashes individual chars instead of codepoint words.
+size_t ComputeModifiedUtf8Hash(const char* chars);
/*
* Retrieve the next UTF-16 character from a UTF-8 string.
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 9c94f6c..ad46be6 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1109,8 +1109,8 @@
Split(cgroup_lines[i], ':', &cgroup_fields);
std::vector<std::string> cgroups;
Split(cgroup_fields[1], ',', &cgroups);
- for (size_t i = 0; i < cgroups.size(); ++i) {
- if (cgroups[i] == "cpu") {
+ for (size_t j = 0; j < cgroups.size(); ++j) {
+ if (cgroups[j] == "cpu") {
return cgroup_fields[2].substr(1); // Skip the leading slash.
}
}
@@ -1120,7 +1120,20 @@
void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix,
mirror::ArtMethod* current_method) {
-#ifdef __linux__
+#if __linux__
+ // b/18119146
+ if (RUNNING_ON_VALGRIND != 0) {
+ return;
+ }
+
+#if !defined(HAVE_ANDROID_OS)
+ if (GetTid() != tid) {
+ // TODO: dumping of other threads is disabled to avoid crashes during stress testing.
+ // b/15446488.
+ return;
+ }
+#endif
+
std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, tid));
if (!backtrace->Unwind(0)) {
os << prefix << "(backtrace::Unwind failed for thread " << tid << ")\n";
@@ -1164,6 +1177,8 @@
}
os << "\n";
}
+#else
+ UNUSED(os, tid, prefix, current_method);
#endif
}
diff --git a/runtime/utils.h b/runtime/utils.h
index b7daa64..668c897 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -24,10 +24,10 @@
#include <string>
#include <vector>
+#include "arch/instruction_set.h"
#include "base/logging.h"
#include "base/mutex.h"
#include "globals.h"
-#include "instruction_set.h"
#include "primitive.h"
namespace art {
@@ -84,7 +84,7 @@
template<int n, typename T>
static inline bool IsAligned(T x) {
- COMPILE_ASSERT((n & (n - 1)) == 0, n_not_power_of_two);
+ static_assert((n & (n - 1)) == 0, "n is not a power of two");
return (x & (n - 1)) == 0;
}
@@ -115,6 +115,20 @@
return (-limit <= value) && (value < limit);
}
+static inline bool IsInt32(int N, int32_t value) {
+ CHECK_LT(0, N);
+ CHECK_LT(static_cast<size_t>(N), 8 * sizeof(int32_t));
+ int32_t limit = static_cast<int32_t>(1) << (N - 1);
+ return (-limit <= value) && (value < limit);
+}
+
+static inline bool IsInt64(int N, int64_t value) {
+ CHECK_LT(0, N);
+ CHECK_LT(static_cast<size_t>(N), 8 * sizeof(int64_t));
+ int64_t limit = static_cast<int64_t>(1) << (N - 1);
+ return (-limit <= value) && (value < limit);
+}
+
static inline bool IsUint(int N, intptr_t value) {
CHECK_LT(0, N);
CHECK_LT(N, kBitsPerIntPtrT);
@@ -151,6 +165,18 @@
typedef T type;
};
+// Like sizeof, but count how many bits a type takes. Pass type explicitly.
+template <typename T>
+static constexpr size_t BitSizeOf() {
+ return sizeof(T) * CHAR_BIT;
+}
+
+// Like sizeof, but count how many bits a type takes. Infers type from parameter.
+template <typename T>
+static constexpr size_t BitSizeOf(T /*x*/) {
+ return sizeof(T) * CHAR_BIT;
+}
+
// For rounding integers.
template<typename T>
static constexpr T RoundDown(T x, typename TypeIdentity<T>::type n) WARN_UNUSED;
@@ -187,10 +213,39 @@
return reinterpret_cast<T*>(RoundUp(reinterpret_cast<uintptr_t>(x), n));
}
+namespace utils {
+namespace detail { // Private, implementation-specific namespace. Do not poke outside of this file.
+template <typename T>
+static constexpr inline T RoundUpToPowerOfTwoRecursive(T x, size_t bit) {
+ return bit == (BitSizeOf<T>()) ? x: RoundUpToPowerOfTwoRecursive(x | x >> bit, bit << 1);
+}
+} // namespace detail
+} // namespace utils
+
+// Recursive implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
+// figure 3-3, page 48, where the function is called clp2.
+template <typename T>
+static constexpr inline T RoundUpToPowerOfTwo(T x) {
+ return art::utils::detail::RoundUpToPowerOfTwoRecursive(x - 1, 1) + 1;
+}
+
+// Find the bit position of the most significant bit (0-based), or -1 if there were no bits set.
+template <typename T>
+static constexpr ssize_t MostSignificantBit(T value) {
+ return (value == 0) ? -1 : (MostSignificantBit(value >> 1) + 1);
+}
+
+// How many bits (minimally) does it take to store the constant 'value'? i.e. 1 for 1, 3 for 5, etc.
+template <typename T>
+static constexpr size_t MinimumBitsToStore(T value) {
+ return static_cast<size_t>(MostSignificantBit(value) + 1);
+}
+
template<typename T>
static constexpr int CLZ(T x) {
+ static_assert(sizeof(T) <= sizeof(long long), "T too large, must be smaller than long long"); // NOLINT [runtime/int] [4]
return (sizeof(T) == sizeof(uint32_t))
- ? __builtin_clz(x)
+ ? __builtin_clz(x) // TODO: __builtin_clz[ll] has undefined behavior for x=0
: __builtin_clzll(x);
}
@@ -222,7 +277,7 @@
// of V >= size of U (compile-time checked).
template<typename U, typename V>
static inline V bit_cast(U in) {
- COMPILE_ASSERT(sizeof(U) <= sizeof(V), size_of_u_not_le_size_of_v);
+ static_assert(sizeof(U) <= sizeof(V), "Size of U not <= size of V");
union {
U u;
V v;
@@ -467,15 +522,12 @@
template <typename A, typename B>
inline void operator() (A a, B b) const {
- UNUSED(a);
- UNUSED(b);
+ UNUSED(a, b);
}
template <typename A, typename B, typename C>
inline void operator() (A a, B b, C c) const {
- UNUSED(a);
- UNUSED(b);
- UNUSED(c);
+ UNUSED(a, b, c);
}
};
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index 92323da..a98bc90 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -402,4 +402,36 @@
}
}
+TEST_F(UtilsTest, RoundUpToPowerOfTwo) {
+ // Tests the constexpr variant since all the parameters are constexpr
+ EXPECT_EQ(0, RoundUpToPowerOfTwo(0));
+ EXPECT_EQ(1, RoundUpToPowerOfTwo(1));
+ EXPECT_EQ(2, RoundUpToPowerOfTwo(2));
+ EXPECT_EQ(4, RoundUpToPowerOfTwo(3));
+ EXPECT_EQ(8, RoundUpToPowerOfTwo(7));
+
+ EXPECT_EQ(0b10000L, RoundUpToPowerOfTwo(0b01101L));
+ EXPECT_EQ(1ULL << 63, RoundUpToPowerOfTwo(1ULL << 62 | 1ULL));
+}
+
+TEST_F(UtilsTest, MostSignificantBit) {
+ EXPECT_EQ(-1, MostSignificantBit(0));
+ EXPECT_EQ(0, MostSignificantBit(1));
+ EXPECT_EQ(31, MostSignificantBit(~static_cast<uint32_t>(0)));
+ EXPECT_EQ(2, MostSignificantBit(0b110));
+ EXPECT_EQ(2, MostSignificantBit(0b100));
+}
+
+TEST_F(UtilsTest, MinimumBitsToStore) {
+ EXPECT_EQ(0u, MinimumBitsToStore(0));
+ EXPECT_EQ(1u, MinimumBitsToStore(1));
+ EXPECT_EQ(2u, MinimumBitsToStore(0b10));
+ EXPECT_EQ(2u, MinimumBitsToStore(0b11));
+ EXPECT_EQ(3u, MinimumBitsToStore(0b100));
+ EXPECT_EQ(3u, MinimumBitsToStore(0b110));
+ EXPECT_EQ(3u, MinimumBitsToStore(0b101));
+ EXPECT_EQ(8u, MinimumBitsToStore(0xFF));
+ EXPECT_EQ(32u, MinimumBitsToStore(~static_cast<uint32_t>(0)));
+}
+
} // namespace art
diff --git a/runtime/verifier/instruction_flags.h b/runtime/verifier/instruction_flags.h
index 36a6e55..e67067c 100644
--- a/runtime/verifier/instruction_flags.h
+++ b/runtime/verifier/instruction_flags.h
@@ -130,7 +130,8 @@
uint8_t flags_;
};
-COMPILE_ASSERT(sizeof(InstructionFlags) == sizeof(uint8_t), err);
+static_assert(sizeof(InstructionFlags) == sizeof(uint8_t),
+ "Size of InstructionFlags not equal to uint8_t");
} // namespace verifier
} // namespace art
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index fb07ba0..a10c7cb 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -88,6 +88,23 @@
}
}
+// Note: returns true on failure.
+ALWAYS_INLINE static inline bool FailOrAbort(MethodVerifier* verifier, bool condition,
+ const char* error_msg, uint32_t work_insn_idx) {
+ if (kIsDebugBuild) {
+ // In a debug build, abort if the error condition is wrong.
+ DCHECK(condition) << error_msg << work_insn_idx;
+ } else {
+ // In a non-debug build, just fail the class.
+ if (!condition) {
+ verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << error_msg << work_insn_idx;
+ return true;
+ }
+ }
+
+ return false;
+}
+
MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self,
mirror::Class* klass,
bool allow_soft_failures,
@@ -1174,11 +1191,6 @@
return os;
}
-extern "C" void MethodVerifierGdbDump(MethodVerifier* v)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- v->Dump(std::cerr);
-}
-
void MethodVerifier::Dump(std::ostream& os) {
if (code_item_ == nullptr) {
os << "Native method\n";
@@ -2014,7 +2026,11 @@
while (0 != instance_of_idx && !insn_flags_[instance_of_idx].IsOpcode()) {
instance_of_idx--;
}
- CHECK(insn_flags_[instance_of_idx].IsOpcode());
+ if (FailOrAbort(this, insn_flags_[instance_of_idx].IsOpcode(),
+ "Unable to get previous instruction of if-eqz/if-nez for work index ",
+ work_insn_idx_)) {
+ break;
+ }
} else {
break;
}
@@ -2072,7 +2088,11 @@
while (0 != move_idx && !insn_flags_[move_idx].IsOpcode()) {
move_idx--;
}
- CHECK(insn_flags_[move_idx].IsOpcode());
+ if (FailOrAbort(this, insn_flags_[move_idx].IsOpcode(),
+ "Unable to get previous instruction of if-eqz/if-nez for work index ",
+ work_insn_idx_)) {
+ break;
+ }
const Instruction* move_inst = Instruction::At(code_item_->insns_ + move_idx);
switch (move_inst->Opcode()) {
case Instruction::MOVE_OBJECT:
@@ -2155,91 +2175,95 @@
break;
case Instruction::IGET_BOOLEAN:
- VerifyISGet(inst, reg_types_.Boolean(), true, false);
+ VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Boolean(), true, false);
break;
case Instruction::IGET_BYTE:
- VerifyISGet(inst, reg_types_.Byte(), true, false);
+ VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Byte(), true, false);
break;
case Instruction::IGET_CHAR:
- VerifyISGet(inst, reg_types_.Char(), true, false);
+ VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Char(), true, false);
break;
case Instruction::IGET_SHORT:
- VerifyISGet(inst, reg_types_.Short(), true, false);
+ VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Short(), true, false);
break;
case Instruction::IGET:
- VerifyISGet(inst, reg_types_.Integer(), true, false);
+ VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Integer(), true, false);
break;
case Instruction::IGET_WIDE:
- VerifyISGet(inst, reg_types_.LongLo(), true, false);
+ VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.LongLo(), true, false);
break;
case Instruction::IGET_OBJECT:
- VerifyISGet(inst, reg_types_.JavaLangObject(false), false, false);
+ VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.JavaLangObject(false), false,
+ false);
break;
case Instruction::IPUT_BOOLEAN:
- VerifyISPut(inst, reg_types_.Boolean(), true, false);
+ VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Boolean(), true, false);
break;
case Instruction::IPUT_BYTE:
- VerifyISPut(inst, reg_types_.Byte(), true, false);
+ VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Byte(), true, false);
break;
case Instruction::IPUT_CHAR:
- VerifyISPut(inst, reg_types_.Char(), true, false);
+ VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Char(), true, false);
break;
case Instruction::IPUT_SHORT:
- VerifyISPut(inst, reg_types_.Short(), true, false);
+ VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Short(), true, false);
break;
case Instruction::IPUT:
- VerifyISPut(inst, reg_types_.Integer(), true, false);
+ VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Integer(), true, false);
break;
case Instruction::IPUT_WIDE:
- VerifyISPut(inst, reg_types_.LongLo(), true, false);
+ VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.LongLo(), true, false);
break;
case Instruction::IPUT_OBJECT:
- VerifyISPut(inst, reg_types_.JavaLangObject(false), false, false);
+ VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.JavaLangObject(false), false,
+ false);
break;
case Instruction::SGET_BOOLEAN:
- VerifyISGet(inst, reg_types_.Boolean(), true, true);
+ VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Boolean(), true, true);
break;
case Instruction::SGET_BYTE:
- VerifyISGet(inst, reg_types_.Byte(), true, true);
+ VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Byte(), true, true);
break;
case Instruction::SGET_CHAR:
- VerifyISGet(inst, reg_types_.Char(), true, true);
+ VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Char(), true, true);
break;
case Instruction::SGET_SHORT:
- VerifyISGet(inst, reg_types_.Short(), true, true);
+ VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Short(), true, true);
break;
case Instruction::SGET:
- VerifyISGet(inst, reg_types_.Integer(), true, true);
+ VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Integer(), true, true);
break;
case Instruction::SGET_WIDE:
- VerifyISGet(inst, reg_types_.LongLo(), true, true);
+ VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.LongLo(), true, true);
break;
case Instruction::SGET_OBJECT:
- VerifyISGet(inst, reg_types_.JavaLangObject(false), false, true);
+ VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.JavaLangObject(false), false,
+ true);
break;
case Instruction::SPUT_BOOLEAN:
- VerifyISPut(inst, reg_types_.Boolean(), true, true);
+ VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Boolean(), true, true);
break;
case Instruction::SPUT_BYTE:
- VerifyISPut(inst, reg_types_.Byte(), true, true);
+ VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Byte(), true, true);
break;
case Instruction::SPUT_CHAR:
- VerifyISPut(inst, reg_types_.Char(), true, true);
+ VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Char(), true, true);
break;
case Instruction::SPUT_SHORT:
- VerifyISPut(inst, reg_types_.Short(), true, true);
+ VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Short(), true, true);
break;
case Instruction::SPUT:
- VerifyISPut(inst, reg_types_.Integer(), true, true);
+ VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Integer(), true, true);
break;
case Instruction::SPUT_WIDE:
- VerifyISPut(inst, reg_types_.LongLo(), true, true);
+ VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.LongLo(), true, true);
break;
case Instruction::SPUT_OBJECT:
- VerifyISPut(inst, reg_types_.JavaLangObject(false), false, true);
+ VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.JavaLangObject(false), false,
+ true);
break;
case Instruction::INVOKE_VIRTUAL:
@@ -2256,8 +2280,7 @@
if (called_method != nullptr) {
StackHandleScope<1> hs(self_);
Handle<mirror::ArtMethod> h_called_method(hs.NewHandle(called_method));
- MethodHelper mh(h_called_method);
- mirror::Class* return_type_class = mh.GetReturnType(can_load_classes_);
+ mirror::Class* return_type_class = h_called_method->GetReturnType(can_load_classes_);
if (return_type_class != nullptr) {
return_type = ®_types_.FromClass(h_called_method->GetReturnTypeDescriptor(),
return_type_class,
@@ -2301,8 +2324,7 @@
return_type_descriptor = called_method->GetReturnTypeDescriptor();
StackHandleScope<1> hs(self_);
Handle<mirror::ArtMethod> h_called_method(hs.NewHandle(called_method));
- MethodHelper mh(h_called_method);
- mirror::Class* return_type_class = mh.GetReturnType(can_load_classes_);
+ mirror::Class* return_type_class = h_called_method->GetReturnType(can_load_classes_);
if (return_type_class != nullptr) {
return_type = ®_types_.FromClass(return_type_descriptor,
return_type_class,
@@ -2627,7 +2649,7 @@
reg_types_.DoubleLo(), reg_types_.DoubleHi());
break;
case Instruction::ADD_INT_LIT16:
- case Instruction::RSUB_INT:
+ case Instruction::RSUB_INT_LIT16:
case Instruction::MUL_INT_LIT16:
case Instruction::DIV_INT_LIT16:
case Instruction::REM_INT_LIT16:
@@ -2668,34 +2690,34 @@
// As such they use Class*/Field*/AbstractMethod* as these offsets only have
// meaning if the class linking and resolution were successful.
case Instruction::IGET_QUICK:
- VerifyIGetQuick(inst, reg_types_.Integer(), true);
+ VerifyQuickFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Integer(), true);
break;
case Instruction::IGET_WIDE_QUICK:
- VerifyIGetQuick(inst, reg_types_.LongLo(), true);
+ VerifyQuickFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.LongLo(), true);
break;
case Instruction::IGET_OBJECT_QUICK:
- VerifyIGetQuick(inst, reg_types_.JavaLangObject(false), false);
+ VerifyQuickFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.JavaLangObject(false), false);
break;
case Instruction::IPUT_QUICK:
- VerifyIPutQuick(inst, reg_types_.Integer(), true);
+ VerifyQuickFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Integer(), true);
break;
case Instruction::IPUT_BOOLEAN_QUICK:
- VerifyIPutQuick(inst, reg_types_.Boolean(), true);
+ VerifyQuickFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Boolean(), true);
break;
case Instruction::IPUT_BYTE_QUICK:
- VerifyIPutQuick(inst, reg_types_.Byte(), true);
+ VerifyQuickFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Byte(), true);
break;
case Instruction::IPUT_CHAR_QUICK:
- VerifyIPutQuick(inst, reg_types_.Char(), true);
+ VerifyQuickFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Char(), true);
break;
case Instruction::IPUT_SHORT_QUICK:
- VerifyIPutQuick(inst, reg_types_.Short(), true);
+ VerifyQuickFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Short(), true);
break;
case Instruction::IPUT_WIDE_QUICK:
- VerifyIPutQuick(inst, reg_types_.LongLo(), true);
+ VerifyQuickFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.LongLo(), true);
break;
case Instruction::IPUT_OBJECT_QUICK:
- VerifyIPutQuick(inst, reg_types_.JavaLangObject(false), false);
+ VerifyQuickFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.JavaLangObject(false), false);
break;
case Instruction::INVOKE_VIRTUAL_QUICK:
case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
@@ -3037,7 +3059,12 @@
// odd case, but nothing to do
} else {
common_super = &common_super->Merge(exception, ®_types_);
- CHECK(reg_types_.JavaLangThrowable(false).IsAssignableFrom(*common_super));
+ if (FailOrAbort(this,
+ reg_types_.JavaLangThrowable(false).IsAssignableFrom(*common_super),
+ "java.lang.Throwable is not assignable-from common_super at ",
+ work_insn_idx_)) {
+ break;
+ }
}
}
}
@@ -3362,18 +3389,32 @@
if (klass->IsInterface()) {
// Derive Object.class from Class.class.getSuperclass().
mirror::Class* object_klass = klass->GetClass()->GetSuperClass();
- CHECK(object_klass->IsObjectClass());
+ if (FailOrAbort(this, object_klass->IsObjectClass(),
+ "Failed to find Object class in quickened invoke receiver",
+ work_insn_idx_)) {
+ return nullptr;
+ }
dispatch_class = object_klass;
} else {
dispatch_class = klass;
}
- CHECK(dispatch_class->HasVTable()) << PrettyDescriptor(dispatch_class);
+ if (FailOrAbort(this, dispatch_class->HasVTable(),
+ "Receiver class has no vtable for quickened invoke at ",
+ work_insn_idx_)) {
+ return nullptr;
+ }
uint16_t vtable_index = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
- CHECK_LT(static_cast<int32_t>(vtable_index), dispatch_class->GetVTableLength())
- << PrettyDescriptor(klass) << " in method "
- << PrettyMethod(dex_method_idx_, *dex_file_, true);
+ if (FailOrAbort(this, static_cast<int32_t>(vtable_index) < dispatch_class->GetVTableLength(),
+ "Receiver class has not enough vtable slots for quickened invoke at ",
+ work_insn_idx_)) {
+ return nullptr;
+ }
mirror::ArtMethod* res_method = dispatch_class->GetVTableEntry(vtable_index);
- CHECK(!self_->IsExceptionPending());
+ if (FailOrAbort(this, !Thread::Current()->IsExceptionPending(),
+ "Unexpected exception pending for quickened invoke at ",
+ work_insn_idx_)) {
+ return nullptr;
+ }
return res_method;
}
@@ -3386,7 +3427,14 @@
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer method from " << inst->Name();
return nullptr;
}
- CHECK(!res_method->IsDirect() && !res_method->IsStatic());
+ if (FailOrAbort(this, !res_method->IsDirect(), "Quick-invoked method is direct at ",
+ work_insn_idx_)) {
+ return nullptr;
+ }
+ if (FailOrAbort(this, !res_method->IsStatic(), "Quick-invoked method is static at ",
+ work_insn_idx_)) {
+ return nullptr;
+ }
// We use vAA as our expected arg count, rather than res_method->insSize, because we need to
// match the call to the signature. Also, we might be calling through an abstract method
@@ -3751,8 +3799,9 @@
}
}
-void MethodVerifier::VerifyISGet(const Instruction* inst, const RegType& insn_type,
- bool is_primitive, bool is_static) {
+template <MethodVerifier::FieldAccessType kAccType>
+void MethodVerifier::VerifyISFieldAccess(const Instruction* inst, const RegType& insn_type,
+ bool is_primitive, bool is_static) {
uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
mirror::ArtField* field;
if (is_static) {
@@ -3760,9 +3809,20 @@
} else {
const RegType& object_type = work_line_->GetRegisterType(this, inst->VRegB_22c());
field = GetInstanceField(object_type, field_idx);
+ if (UNLIKELY(have_pending_hard_failure_)) {
+ return;
+ }
}
const RegType* field_type = nullptr;
if (field != nullptr) {
+ if (kAccType == FieldAccessType::kAccPut) {
+ if (field->IsFinal() && field->GetDeclaringClass() != GetDeclaringClass().GetClass()) {
+ Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot modify final field " << PrettyField(field)
+ << " from other class " << GetDeclaringClass();
+ return;
+ }
+ }
+
mirror::Class* field_type_class;
{
StackHandleScope<1> hs(self_);
@@ -3784,88 +3844,56 @@
}
DCHECK(field_type != nullptr);
const uint32_t vregA = (is_static) ? inst->VRegA_21c() : inst->VRegA_22c();
- if (is_primitive) {
- if (field_type->Equals(insn_type) ||
- (field_type->IsFloat() && insn_type.IsInteger()) ||
- (field_type->IsDouble() && insn_type.IsLong())) {
- // expected that read is of the correct primitive type or that int reads are reading
- // floats or long reads are reading doubles
+ static_assert(kAccType == FieldAccessType::kAccPut || kAccType == FieldAccessType::kAccGet,
+ "Unexpected third access type");
+ if (kAccType == FieldAccessType::kAccPut) {
+ // sput or iput.
+ if (is_primitive) {
+ VerifyPrimitivePut(*field_type, insn_type, vregA);
} else {
- // This is a global failure rather than a class change failure as the instructions and
- // the descriptors for the type should have been consistent within the same file at
- // compile time
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected field " << PrettyField(field)
- << " to be of type '" << insn_type
- << "' but found type '" << *field_type << "' in get";
- return;
+ if (!insn_type.IsAssignableFrom(*field_type)) {
+ Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << PrettyField(field)
+ << " to be compatible with type '" << insn_type
+ << "' but found type '" << *field_type
+ << "' in put-object";
+ return;
+ }
+ work_line_->VerifyRegisterType(this, vregA, *field_type);
}
- } else {
- if (!insn_type.IsAssignableFrom(*field_type)) {
- Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << PrettyField(field)
- << " to be compatible with type '" << insn_type
- << "' but found type '" << *field_type
- << "' in Get-object";
- work_line_->SetRegisterType(this, vregA, reg_types_.Conflict());
- return;
- }
- }
- if (!field_type->IsLowHalf()) {
- work_line_->SetRegisterType(this, vregA, *field_type);
- } else {
- work_line_->SetRegisterTypeWide(this, vregA, *field_type, field_type->HighHalf(®_types_));
- }
-}
-
-void MethodVerifier::VerifyISPut(const Instruction* inst, const RegType& insn_type,
- bool is_primitive, bool is_static) {
- uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
- mirror::ArtField* field;
- if (is_static) {
- field = GetStaticField(field_idx);
- } else {
- const RegType& object_type = work_line_->GetRegisterType(this, inst->VRegB_22c());
- field = GetInstanceField(object_type, field_idx);
- }
- const RegType* field_type = nullptr;
- if (field != nullptr) {
- if (field->IsFinal() && field->GetDeclaringClass() != GetDeclaringClass().GetClass()) {
- Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot modify final field " << PrettyField(field)
- << " from other class " << GetDeclaringClass();
- return;
- }
- mirror::Class* field_type_class;
- {
- StackHandleScope<1> hs(self_);
- HandleWrapper<mirror::ArtField> h_field(hs.NewHandleWrapper(&field));
- FieldHelper fh(h_field);
- field_type_class = fh.GetType(can_load_classes_);
- }
- if (field_type_class != nullptr) {
- field_type = ®_types_.FromClass(field->GetTypeDescriptor(), field_type_class,
- field_type_class->CannotBeAssignedFromOtherTypes());
+ } else if (kAccType == FieldAccessType::kAccGet) {
+ // sget or iget.
+ if (is_primitive) {
+ if (field_type->Equals(insn_type) ||
+ (field_type->IsFloat() && insn_type.IsInteger()) ||
+ (field_type->IsDouble() && insn_type.IsLong())) {
+ // expected that read is of the correct primitive type or that int reads are reading
+ // floats or long reads are reading doubles
+ } else {
+ // This is a global failure rather than a class change failure as the instructions and
+ // the descriptors for the type should have been consistent within the same file at
+ // compile time
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected field " << PrettyField(field)
+ << " to be of type '" << insn_type
+ << "' but found type '" << *field_type << "' in get";
+ return;
+ }
} else {
- DCHECK(!can_load_classes_ || self_->IsExceptionPending());
- self_->ClearException();
+ if (!insn_type.IsAssignableFrom(*field_type)) {
+ Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << PrettyField(field)
+ << " to be compatible with type '" << insn_type
+ << "' but found type '" << *field_type
+ << "' in get-object";
+ work_line_->SetRegisterType(this, vregA, reg_types_.Conflict());
+ return;
+ }
}
- }
- if (field_type == nullptr) {
- const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
- const char* descriptor = dex_file_->GetFieldTypeDescriptor(field_id);
- field_type = ®_types_.FromDescriptor(GetClassLoader(), descriptor, false);
- }
- DCHECK(field_type != nullptr);
- const uint32_t vregA = (is_static) ? inst->VRegA_21c() : inst->VRegA_22c();
- if (is_primitive) {
- VerifyPrimitivePut(*field_type, insn_type, vregA);
+ if (!field_type->IsLowHalf()) {
+ work_line_->SetRegisterType(this, vregA, *field_type);
+ } else {
+ work_line_->SetRegisterTypeWide(this, vregA, *field_type, field_type->HighHalf(®_types_));
+ }
} else {
- if (!insn_type.IsAssignableFrom(*field_type)) {
- Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << PrettyField(field)
- << " to be compatible with type '" << insn_type
- << "' but found type '" << *field_type
- << "' in put-object";
- return;
- }
- work_line_->VerifyRegisterType(this, vregA, *field_type);
+ LOG(FATAL) << "Unexpected case.";
}
}
@@ -3896,131 +3924,137 @@
return f;
}
-void MethodVerifier::VerifyIGetQuick(const Instruction* inst, const RegType& insn_type,
- bool is_primitive) {
+template <MethodVerifier::FieldAccessType kAccType>
+void MethodVerifier::VerifyQuickFieldAccess(const Instruction* inst, const RegType& insn_type,
+ bool is_primitive) {
DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_);
- mirror::ArtField* field = GetQuickFieldAccess(inst, work_line_.get());
- if (field == nullptr) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer field from " << inst->Name();
- return;
- }
- mirror::Class* field_type_class;
- {
- StackHandleScope<1> hs(self_);
- HandleWrapper<mirror::ArtField> h_field(hs.NewHandleWrapper(&field));
- FieldHelper fh(h_field);
- field_type_class = fh.GetType(can_load_classes_);
- }
- const RegType* field_type;
- if (field_type_class != nullptr) {
- field_type = ®_types_.FromClass(field->GetTypeDescriptor(), field_type_class,
- field_type_class->CannotBeAssignedFromOtherTypes());
- } else {
- DCHECK(!can_load_classes_ || self_->IsExceptionPending());
- self_->ClearException();
- field_type = ®_types_.FromDescriptor(field->GetDeclaringClass()->GetClassLoader(),
- field->GetTypeDescriptor(), false);
- }
- DCHECK(field_type != nullptr);
- const uint32_t vregA = inst->VRegA_22c();
- if (is_primitive) {
- if (field_type->Equals(insn_type) ||
- (field_type->IsFloat() && insn_type.IsIntegralTypes()) ||
- (field_type->IsDouble() && insn_type.IsLongTypes())) {
- // expected that read is of the correct primitive type or that int reads are reading
- // floats or long reads are reading doubles
- } else {
- // This is a global failure rather than a class change failure as the instructions and
- // the descriptors for the type should have been consistent within the same file at
- // compile time
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected field " << PrettyField(field)
- << " to be of type '" << insn_type
- << "' but found type '" << *field_type << "' in Get";
- return;
- }
- } else {
- if (!insn_type.IsAssignableFrom(*field_type)) {
- Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << PrettyField(field)
- << " to be compatible with type '" << insn_type
- << "' but found type '" << *field_type
- << "' in get-object";
- work_line_->SetRegisterType(this, vregA, reg_types_.Conflict());
- return;
- }
- }
- if (!field_type->IsLowHalf()) {
- work_line_->SetRegisterType(this, vregA, *field_type);
- } else {
- work_line_->SetRegisterTypeWide(this, vregA, *field_type, field_type->HighHalf(®_types_));
- }
-}
-void MethodVerifier::VerifyIPutQuick(const Instruction* inst, const RegType& insn_type,
- bool is_primitive) {
- DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_);
mirror::ArtField* field = GetQuickFieldAccess(inst, work_line_.get());
if (field == nullptr) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer field from " << inst->Name();
return;
}
- const char* descriptor = field->GetTypeDescriptor();
- mirror::ClassLoader* loader = field->GetDeclaringClass()->GetClassLoader();
- const RegType& field_type = reg_types_.FromDescriptor(loader, descriptor, false);
- if (field != nullptr) {
+
+ // For an IPUT_QUICK, we now test for final flag of the field.
+ if (kAccType == FieldAccessType::kAccPut) {
if (field->IsFinal() && field->GetDeclaringClass() != GetDeclaringClass().GetClass()) {
Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot modify final field " << PrettyField(field)
<< " from other class " << GetDeclaringClass();
return;
}
}
- const uint32_t vregA = inst->VRegA_22c();
- if (is_primitive) {
- // Primitive field assignability rules are weaker than regular assignability rules
- bool instruction_compatible;
- bool value_compatible;
- const RegType& value_type = work_line_->GetRegisterType(this, vregA);
- if (field_type.IsIntegralTypes()) {
- instruction_compatible = insn_type.IsIntegralTypes();
- value_compatible = value_type.IsIntegralTypes();
- } else if (field_type.IsFloat()) {
- instruction_compatible = insn_type.IsInteger(); // no [is]put-float, so expect [is]put-int
- value_compatible = value_type.IsFloatTypes();
- } else if (field_type.IsLong()) {
- instruction_compatible = insn_type.IsLong();
- value_compatible = value_type.IsLongTypes();
- } else if (field_type.IsDouble()) {
- instruction_compatible = insn_type.IsLong(); // no [is]put-double, so expect [is]put-long
- value_compatible = value_type.IsDoubleTypes();
+
+ // Get the field type.
+ const RegType* field_type;
+ {
+ mirror::Class* field_type_class;
+ {
+ StackHandleScope<1> hs(Thread::Current());
+ HandleWrapper<mirror::ArtField> h_field(hs.NewHandleWrapper(&field));
+ field_type_class = FieldHelper(h_field).GetType(can_load_classes_);
+ }
+
+ if (field_type_class != nullptr) {
+ field_type = ®_types_.FromClass(field->GetTypeDescriptor(), field_type_class,
+ field_type_class->CannotBeAssignedFromOtherTypes());
} else {
- instruction_compatible = false; // reference field with primitive store
- value_compatible = false; // unused
+ Thread* self = Thread::Current();
+ DCHECK(!can_load_classes_ || self->IsExceptionPending());
+ self->ClearException();
+ field_type = ®_types_.FromDescriptor(field->GetDeclaringClass()->GetClassLoader(),
+ field->GetTypeDescriptor(), false);
}
- if (!instruction_compatible) {
- // This is a global failure rather than a class change failure as the instructions and
- // the descriptors for the type should have been consistent within the same file at
- // compile time
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected field " << PrettyField(field)
- << " to be of type '" << insn_type
- << "' but found type '" << field_type
- << "' in put";
+ if (field_type == nullptr) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer field type from " << inst->Name();
return;
}
- if (!value_compatible) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected value in v" << vregA
- << " of type " << value_type
- << " but expected " << field_type
- << " for store to " << PrettyField(field) << " in put";
- return;
+ }
+
+ const uint32_t vregA = inst->VRegA_22c();
+ static_assert(kAccType == FieldAccessType::kAccPut || kAccType == FieldAccessType::kAccGet,
+ "Unexpected third access type");
+ if (kAccType == FieldAccessType::kAccPut) {
+ if (is_primitive) {
+ // Primitive field assignability rules are weaker than regular assignability rules
+ bool instruction_compatible;
+ bool value_compatible;
+ const RegType& value_type = work_line_->GetRegisterType(this, vregA);
+ if (field_type->IsIntegralTypes()) {
+ instruction_compatible = insn_type.IsIntegralTypes();
+ value_compatible = value_type.IsIntegralTypes();
+ } else if (field_type->IsFloat()) {
+ instruction_compatible = insn_type.IsInteger(); // no [is]put-float, so expect [is]put-int
+ value_compatible = value_type.IsFloatTypes();
+ } else if (field_type->IsLong()) {
+ instruction_compatible = insn_type.IsLong();
+ value_compatible = value_type.IsLongTypes();
+ } else if (field_type->IsDouble()) {
+ instruction_compatible = insn_type.IsLong(); // no [is]put-double, so expect [is]put-long
+ value_compatible = value_type.IsDoubleTypes();
+ } else {
+ instruction_compatible = false; // reference field with primitive store
+ value_compatible = false; // unused
+ }
+ if (!instruction_compatible) {
+ // This is a global failure rather than a class change failure as the instructions and
+ // the descriptors for the type should have been consistent within the same file at
+ // compile time
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected field " << PrettyField(field)
+ << " to be of type '" << insn_type
+ << "' but found type '" << *field_type
+ << "' in put";
+ return;
+ }
+ if (!value_compatible) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected value in v" << vregA
+ << " of type " << value_type
+ << " but expected " << *field_type
+ << " for store to " << PrettyField(field) << " in put";
+ return;
+ }
+ } else {
+ if (!insn_type.IsAssignableFrom(*field_type)) {
+ Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << PrettyField(field)
+ << " to be compatible with type '" << insn_type
+ << "' but found type '" << *field_type
+ << "' in put-object";
+ return;
+ }
+ work_line_->VerifyRegisterType(this, vregA, *field_type);
+ }
+ } else if (kAccType == FieldAccessType::kAccGet) {
+ if (is_primitive) {
+ if (field_type->Equals(insn_type) ||
+ (field_type->IsFloat() && insn_type.IsIntegralTypes()) ||
+ (field_type->IsDouble() && insn_type.IsLongTypes())) {
+ // expected that read is of the correct primitive type or that int reads are reading
+ // floats or long reads are reading doubles
+ } else {
+ // This is a global failure rather than a class change failure as the instructions and
+ // the descriptors for the type should have been consistent within the same file at
+ // compile time
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected field " << PrettyField(field)
+ << " to be of type '" << insn_type
+ << "' but found type '" << *field_type << "' in Get";
+ return;
+ }
+ } else {
+ if (!insn_type.IsAssignableFrom(*field_type)) {
+ Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << PrettyField(field)
+ << " to be compatible with type '" << insn_type
+ << "' but found type '" << *field_type
+ << "' in get-object";
+ work_line_->SetRegisterType(this, vregA, reg_types_.Conflict());
+ return;
+ }
+ }
+ if (!field_type->IsLowHalf()) {
+ work_line_->SetRegisterType(this, vregA, *field_type);
+ } else {
+ work_line_->SetRegisterTypeWide(this, vregA, *field_type, field_type->HighHalf(®_types_));
}
} else {
- if (!insn_type.IsAssignableFrom(field_type)) {
- Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << PrettyField(field)
- << " to be compatible with type '" << insn_type
- << "' but found type '" << field_type
- << "' in put-object";
- return;
- }
- work_line_->VerifyRegisterType(this, vregA, field_type);
+ LOG(FATAL) << "Unexpected case.";
}
}
@@ -4112,9 +4146,7 @@
const RegType& MethodVerifier::GetMethodReturnType() {
if (return_type_ == nullptr) {
if (mirror_method_.Get() != nullptr) {
- StackHandleScope<1> hs(self_);
- mirror::Class* return_type_class =
- MethodHelper(hs.NewHandle(mirror_method_.Get())).GetReturnType(can_load_classes_);
+ mirror::Class* return_type_class = mirror_method_->GetReturnType(can_load_classes_);
if (return_type_class != nullptr) {
return_type_ = ®_types_.FromClass(mirror_method_->GetReturnTypeDescriptor(),
return_type_class,
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 9f5efe8..0c4bf3c 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -18,32 +18,26 @@
#define ART_RUNTIME_VERIFIER_METHOD_VERIFIER_H_
#include <memory>
-#include <set>
#include <vector>
-#include "base/casts.h"
#include "base/macros.h"
-#include "base/stl_util.h"
-#include "class_reference.h"
#include "dex_file.h"
-#include "dex_instruction.h"
#include "handle.h"
#include "instruction_flags.h"
#include "method_reference.h"
-#include "reg_type.h"
#include "reg_type_cache.h"
-#include "register_line.h"
-#include "safe_map.h"
namespace art {
+class Instruction;
struct ReferenceMap2Visitor;
-template<class T> class Handle;
namespace verifier {
-class MethodVerifier;
class DexPcToReferenceMap;
+class MethodVerifier;
+class RegisterLine;
+class RegType;
/*
* "Direct" and "virtual" methods are stored independently. The type of call used to invoke the
@@ -128,6 +122,8 @@
private:
std::unique_ptr<RegisterLine*[]> register_lines_;
size_t size_;
+
+ DISALLOW_COPY_AND_ASSIGN(PcToRegisterLineTable);
};
// The verifier
@@ -511,14 +507,14 @@
// Lookup static field and fail for resolution violations
mirror::ArtField* GetStaticField(int field_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Perform verification of an iget or sget instruction.
- void VerifyISGet(const Instruction* inst, const RegType& insn_type,
- bool is_primitive, bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- // Perform verification of an iput or sput instruction.
- void VerifyISPut(const Instruction* inst, const RegType& insn_type,
- bool is_primitive, bool is_static)
+ // Perform verification of an iget/sget/iput/sput instruction.
+ enum class FieldAccessType { // private
+ kAccGet,
+ kAccPut
+ };
+ template <FieldAccessType kAccType>
+ void VerifyISFieldAccess(const Instruction* inst, const RegType& insn_type,
+ bool is_primitive, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns the access field of a quick field access (iget/iput-quick) or nullptr
@@ -526,14 +522,8 @@
mirror::ArtField* GetQuickFieldAccess(const Instruction* inst, RegisterLine* reg_line)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Perform verification of an iget-quick instruction.
- void VerifyIGetQuick(const Instruction* inst, const RegType& insn_type,
- bool is_primitive)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- // Perform verification of an iput-quick instruction.
- void VerifyIPutQuick(const Instruction* inst, const RegType& insn_type,
- bool is_primitive)
+ template <FieldAccessType kAccType>
+ void VerifyQuickFieldAccess(const Instruction* inst, const RegType& insn_type, bool is_primitive)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolves a class based on an index and performs access checks to ensure the referrer can
@@ -739,6 +729,8 @@
// even though we might detect to be a compiler. Should only be set when running
// VerifyMethodAndDump.
const bool verify_to_dump_;
+
+ DISALLOW_COPY_AND_ASSIGN(MethodVerifier);
};
std::ostream& operator<<(std::ostream& os, const MethodVerifier::FailureKind& rhs);
diff --git a/runtime/verifier/reg_type-inl.h b/runtime/verifier/reg_type-inl.h
index 480ed40..f445132 100644
--- a/runtime/verifier/reg_type-inl.h
+++ b/runtime/verifier/reg_type-inl.h
@@ -81,6 +81,9 @@
return rhs.IsLongTypes();
} else if (lhs.IsDoubleLo()) {
return rhs.IsDoubleTypes();
+ } else if (lhs.IsConflict()) {
+ LOG(WARNING) << "RegType::AssignableFrom lhs is Conflict!";
+ return false;
} else {
CHECK(lhs.IsReferenceTypes())
<< "Unexpected register type in IsAssignableFrom: '"
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 34d6caa..05958b5 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -17,17 +17,14 @@
#ifndef ART_RUNTIME_VERIFIER_REG_TYPE_H_
#define ART_RUNTIME_VERIFIER_REG_TYPE_H_
-#include <limits>
#include <stdint.h>
+#include <limits>
#include <set>
#include <string>
-#include "jni.h"
-
#include "base/macros.h"
#include "base/mutex.h"
#include "gc_root.h"
-#include "globals.h"
#include "object_callbacks.h"
#include "primitive.h"
@@ -35,6 +32,7 @@
namespace mirror {
class Class;
} // namespace mirror
+
namespace verifier {
class RegTypeCache;
@@ -578,17 +576,17 @@
bool IsConstantChar() const OVERRIDE {
return IsConstant() && ConstantValue() >= 0 &&
- ConstantValue() <= std::numeric_limits<jchar>::max();
+ ConstantValue() <= std::numeric_limits<uint16_t>::max();
}
bool IsConstantByte() const OVERRIDE {
return IsConstant() &&
- ConstantValue() >= std::numeric_limits<jbyte>::min() &&
- ConstantValue() <= std::numeric_limits<jbyte>::max();
+ ConstantValue() >= std::numeric_limits<int8_t>::min() &&
+ ConstantValue() <= std::numeric_limits<int8_t>::max();
}
bool IsConstantShort() const OVERRIDE {
return IsConstant() &&
- ConstantValue() >= std::numeric_limits<jshort>::min() &&
- ConstantValue() <= std::numeric_limits<jshort>::max();
+ ConstantValue() >= std::numeric_limits<int16_t>::min() &&
+ ConstantValue() <= std::numeric_limits<int16_t>::max();
}
virtual bool IsConstantTypes() const OVERRIDE { return true; }
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 7c40945..1dfbe51 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -148,7 +148,8 @@
if (can_load_classes_) {
klass = class_linker->FindClass(self, descriptor, class_loader);
} else {
- klass = class_linker->LookupClass(self, descriptor, loader);
+ klass = class_linker->LookupClass(self, descriptor, ComputeModifiedUtf8Hash(descriptor),
+ loader);
if (klass != nullptr && !klass->IsLoaded()) {
// We found the class but without it being loaded its not safe for use.
klass = nullptr;
diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h
index 219e687..244deed 100644
--- a/runtime/verifier/register_line-inl.h
+++ b/runtime/verifier/register_line-inl.h
@@ -114,6 +114,17 @@
}
}
+inline size_t RegisterLine::GetMaxNonZeroReferenceReg(MethodVerifier* verifier,
+ size_t max_ref_reg) const {
+ size_t i = static_cast<int>(max_ref_reg) < 0 ? 0 : max_ref_reg;
+ for (; i < num_regs_; i++) {
+ if (GetRegisterType(verifier, i).IsNonZeroReferenceTypes()) {
+ max_ref_reg = i;
+ }
+ }
+ return max_ref_reg;
+}
+
inline bool RegisterLine::VerifyRegisterType(MethodVerifier* verifier, uint32_t vsrc,
const RegType& check_type) {
// Verify the src register type against the check type refining the type of the register
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 3139204..72d7938 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -310,8 +310,12 @@
verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-enter stack overflow: "
<< monitors_.size();
} else {
- SetRegToLockDepth(reg_idx, monitors_.size());
- monitors_.push_back(insn_idx);
+ if (SetRegToLockDepth(reg_idx, monitors_.size())) {
+ monitors_.push_back(insn_idx);
+ } else {
+ verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected monitor-enter on register v" <<
+ reg_idx;
+ }
}
}
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index c7fd369..52b5c13 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -20,14 +20,16 @@
#include <memory>
#include <vector>
-#include "dex_instruction.h"
-#include "reg_type.h"
#include "safe_map.h"
namespace art {
+
+class Instruction;
+
namespace verifier {
class MethodVerifier;
+class RegType;
/*
* Register type categories, for type checking.
@@ -118,9 +120,7 @@
void FillWithGarbage() {
memset(&line_, 0xf1, num_regs_ * sizeof(uint16_t));
- while (!monitors_.empty()) {
- monitors_.pop_back();
- }
+ monitors_.clear();
reg_to_lock_depths_.clear();
}
@@ -277,15 +277,7 @@
bool MergeRegisters(MethodVerifier* verifier, const RegisterLine* incoming_line)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- size_t GetMaxNonZeroReferenceReg(MethodVerifier* verifier, size_t max_ref_reg) {
- size_t i = static_cast<int>(max_ref_reg) < 0 ? 0 : max_ref_reg;
- for (; i < num_regs_; i++) {
- if (GetRegisterType(verifier, i).IsNonZeroReferenceTypes()) {
- max_ref_reg = i;
- }
- }
- return max_ref_reg;
- }
+ size_t GetMaxNonZeroReferenceReg(MethodVerifier* verifier, size_t max_ref_reg) const;
// Write a bit at each register location that holds a reference.
void WriteReferenceBitMap(MethodVerifier* verifier, std::vector<uint8_t>* data, size_t max_bytes);
@@ -315,15 +307,18 @@
}
}
- void SetRegToLockDepth(size_t reg, size_t depth) {
+ bool SetRegToLockDepth(size_t reg, size_t depth) {
CHECK_LT(depth, 32u);
- DCHECK(!IsSetLockDepth(reg, depth));
+ if (IsSetLockDepth(reg, depth)) {
+ return false; // Register already holds lock so locking twice is erroneous.
+ }
auto it = reg_to_lock_depths_.find(reg);
if (it == reg_to_lock_depths_.end()) {
reg_to_lock_depths_.Put(reg, 1 << depth);
} else {
it->second |= (1 << depth);
}
+ return true;
}
void ClearRegToLockDepth(size_t reg, size_t depth) {
@@ -349,21 +344,23 @@
SetResultTypeToUnknown(verifier);
}
- // Storage for the result register's type, valid after an invocation
+ // Storage for the result register's type, valid after an invocation.
uint16_t result_[2];
// Length of reg_types_
const uint32_t num_regs_;
- // A stack of monitor enter locations
+ // A stack of monitor enter locations.
std::vector<uint32_t, TrackingAllocator<uint32_t, kAllocatorTagVerifier>> monitors_;
// A map from register to a bit vector of indices into the monitors_ stack. As we pop the monitor
// stack we verify that monitor-enter/exit are correctly nested. That is, if there was a
- // monitor-enter on v5 and then on v6, we expect the monitor-exit to be on v6 then on v5
+ // monitor-enter on v5 and then on v6, we expect the monitor-exit to be on v6 then on v5.
AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier> reg_to_lock_depths_;
// An array of RegType Ids associated with each dex register.
uint16_t line_[0];
+
+ DISALLOW_COPY_AND_ASSIGN(RegisterLine);
};
} // namespace verifier
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 4a3c3ec..16338c4 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -18,6 +18,8 @@
#include <stdlib.h>
+#include <sstream>
+
#include "base/logging.h"
#include "mirror/class.h"
#include "ScopedLocalRef.h"
@@ -52,6 +54,7 @@
jclass WellKnownClasses::java_lang_ThreadGroup;
jclass WellKnownClasses::java_lang_Throwable;
jclass WellKnownClasses::java_nio_DirectByteBuffer;
+jclass WellKnownClasses::java_util_ArrayList;
jclass WellKnownClasses::java_util_Collections;
jclass WellKnownClasses::libcore_util_EmptyArray;
jclass WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk;
@@ -95,8 +98,10 @@
jfieldID WellKnownClasses::java_lang_Thread_priority;
jfieldID WellKnownClasses::java_lang_Thread_uncaughtHandler;
jfieldID WellKnownClasses::java_lang_Thread_nativePeer;
+jfieldID WellKnownClasses::java_lang_ThreadGroup_groups;
jfieldID WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup;
jfieldID WellKnownClasses::java_lang_ThreadGroup_name;
+jfieldID WellKnownClasses::java_lang_ThreadGroup_parent;
jfieldID WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup;
jfieldID WellKnownClasses::java_lang_Throwable_cause;
jfieldID WellKnownClasses::java_lang_Throwable_detailMessage;
@@ -108,6 +113,8 @@
jfieldID WellKnownClasses::java_lang_reflect_Proxy_h;
jfieldID WellKnownClasses::java_nio_DirectByteBuffer_capacity;
jfieldID WellKnownClasses::java_nio_DirectByteBuffer_effectiveDirectAddress;
+jfieldID WellKnownClasses::java_util_ArrayList_array;
+jfieldID WellKnownClasses::java_util_ArrayList_size;
jfieldID WellKnownClasses::java_util_Collections_EMPTY_LIST;
jfieldID WellKnownClasses::libcore_util_EmptyArray_STACK_TRACE_ELEMENT;
jfieldID WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data;
@@ -187,6 +194,7 @@
java_lang_ThreadGroup = CacheClass(env, "java/lang/ThreadGroup");
java_lang_Throwable = CacheClass(env, "java/lang/Throwable");
java_nio_DirectByteBuffer = CacheClass(env, "java/nio/DirectByteBuffer");
+ java_util_ArrayList = CacheClass(env, "java/util/ArrayList");
java_util_Collections = CacheClass(env, "java/util/Collections");
libcore_util_EmptyArray = CacheClass(env, "libcore/util/EmptyArray");
org_apache_harmony_dalvik_ddmc_Chunk = CacheClass(env, "org/apache/harmony/dalvik/ddmc/Chunk");
@@ -225,8 +233,10 @@
java_lang_Thread_priority = CacheField(env, java_lang_Thread, false, "priority", "I");
java_lang_Thread_uncaughtHandler = CacheField(env, java_lang_Thread, false, "uncaughtHandler", "Ljava/lang/Thread$UncaughtExceptionHandler;");
java_lang_Thread_nativePeer = CacheField(env, java_lang_Thread, false, "nativePeer", "J");
+ java_lang_ThreadGroup_groups = CacheField(env, java_lang_ThreadGroup, false, "groups", "Ljava/util/List;");
java_lang_ThreadGroup_mainThreadGroup = CacheField(env, java_lang_ThreadGroup, true, "mainThreadGroup", "Ljava/lang/ThreadGroup;");
java_lang_ThreadGroup_name = CacheField(env, java_lang_ThreadGroup, false, "name", "Ljava/lang/String;");
+ java_lang_ThreadGroup_parent = CacheField(env, java_lang_ThreadGroup, false, "parent", "Ljava/lang/ThreadGroup;");
java_lang_ThreadGroup_systemThreadGroup = CacheField(env, java_lang_ThreadGroup, true, "systemThreadGroup", "Ljava/lang/ThreadGroup;");
java_lang_Throwable_cause = CacheField(env, java_lang_Throwable, false, "cause", "Ljava/lang/Throwable;");
java_lang_Throwable_detailMessage = CacheField(env, java_lang_Throwable, false, "detailMessage", "Ljava/lang/String;");
@@ -238,6 +248,8 @@
java_lang_reflect_Proxy_h = CacheField(env, java_lang_reflect_Proxy, false, "h", "Ljava/lang/reflect/InvocationHandler;");
java_nio_DirectByteBuffer_capacity = CacheField(env, java_nio_DirectByteBuffer, false, "capacity", "I");
java_nio_DirectByteBuffer_effectiveDirectAddress = CacheField(env, java_nio_DirectByteBuffer, false, "effectiveDirectAddress", "J");
+ java_util_ArrayList_array = CacheField(env, java_util_ArrayList, false, "array", "[Ljava/lang/Object;");
+ java_util_ArrayList_size = CacheField(env, java_util_ArrayList, false, "size", "I");
java_util_Collections_EMPTY_LIST = CacheField(env, java_util_Collections, true, "EMPTY_LIST", "Ljava/util/List;");
libcore_util_EmptyArray_STACK_TRACE_ELEMENT = CacheField(env, libcore_util_EmptyArray, true, "STACK_TRACE_ELEMENT", "[Ljava/lang/StackTraceElement;");
org_apache_harmony_dalvik_ddmc_Chunk_data = CacheField(env, org_apache_harmony_dalvik_ddmc_Chunk, false, "data", "[B");
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index 790d7f7..d651b90 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -64,6 +64,7 @@
static jclass java_lang_ThreadGroup;
static jclass java_lang_Thread__UncaughtExceptionHandler;
static jclass java_lang_Throwable;
+ static jclass java_util_ArrayList;
static jclass java_util_Collections;
static jclass java_nio_DirectByteBuffer;
static jclass libcore_util_EmptyArray;
@@ -111,8 +112,10 @@
static jfieldID java_lang_Thread_priority;
static jfieldID java_lang_Thread_uncaughtHandler;
static jfieldID java_lang_Thread_nativePeer;
+ static jfieldID java_lang_ThreadGroup_groups;
static jfieldID java_lang_ThreadGroup_mainThreadGroup;
static jfieldID java_lang_ThreadGroup_name;
+ static jfieldID java_lang_ThreadGroup_parent;
static jfieldID java_lang_ThreadGroup_systemThreadGroup;
static jfieldID java_lang_Throwable_cause;
static jfieldID java_lang_Throwable_detailMessage;
@@ -121,6 +124,8 @@
static jfieldID java_lang_Throwable_suppressedExceptions;
static jfieldID java_nio_DirectByteBuffer_capacity;
static jfieldID java_nio_DirectByteBuffer_effectiveDirectAddress;
+ static jfieldID java_util_ArrayList_array;
+ static jfieldID java_util_ArrayList_size;
static jfieldID java_util_Collections_EMPTY_LIST;
static jfieldID libcore_util_EmptyArray_STACK_TRACE_ELEMENT;
static jfieldID org_apache_harmony_dalvik_ddmc_Chunk_data;
diff --git a/runtime/zip_archive_test.cc b/runtime/zip_archive_test.cc
index 96abee2..70a4dda 100644
--- a/runtime/zip_archive_test.cc
+++ b/runtime/zip_archive_test.cc
@@ -41,7 +41,7 @@
ScratchFile tmp;
ASSERT_NE(-1, tmp.GetFd());
- std::unique_ptr<File> file(new File(tmp.GetFd(), tmp.GetFilename()));
+ std::unique_ptr<File> file(new File(tmp.GetFd(), tmp.GetFilename(), false));
ASSERT_TRUE(file.get() != NULL);
bool success = zip_entry->ExtractToFile(*file, &error_msg);
ASSERT_TRUE(success) << error_msg;
diff --git a/sigchainlib/Android.mk b/sigchainlib/Android.mk
index e52adfc..b7ff360 100644
--- a/sigchainlib/Android.mk
+++ b/sigchainlib/Android.mk
@@ -30,6 +30,18 @@
LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.common_build.mk
include $(BUILD_SHARED_LIBRARY)
+include $(CLEAR_VARS)
+LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
+LOCAL_MODULE_TAGS := optional
+LOCAL_CFLAGS += $(ART_TARGET_CFLAGS)
+LOCAL_SRC_FILES := sigchain.cc
+LOCAL_CLANG = $(ART_TARGET_CLANG)
+LOCAL_MODULE:= libsigchain
+LOCAL_SHARED_LIBRARIES := liblog
+LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
+LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.common_build.mk
+include $(BUILD_STATIC_LIBRARY)
+
# Build host library.
include $(CLEAR_VARS)
LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
@@ -43,3 +55,17 @@
LOCAL_LDLIBS = -ldl
LOCAL_MULTILIB := both
include $(BUILD_HOST_SHARED_LIBRARY)
+
+include $(CLEAR_VARS)
+LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
+LOCAL_MODULE_TAGS := optional
+LOCAL_IS_HOST_MODULE := true
+LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
+LOCAL_CLANG = $(ART_HOST_CLANG)
+LOCAL_SRC_FILES := sigchain.cc
+LOCAL_MODULE:= libsigchain
+LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
+LOCAL_LDLIBS = -ldl
+LOCAL_MULTILIB := both
+include external/libcxx/libcxx.mk
+include $(BUILD_HOST_STATIC_LIBRARY)
diff --git a/sigchainlib/sigchain_dummy.cc b/sigchainlib/sigchain_dummy.cc
index fbc8c3f..76779ab 100644
--- a/sigchainlib/sigchain_dummy.cc
+++ b/sigchainlib/sigchain_dummy.cc
@@ -14,6 +14,9 @@
* limitations under the License.
*/
+#include <stdio.h>
+#include <stdlib.h>
+
#ifdef HAVE_ANDROID_OS
#include <android/log.h>
#else
@@ -21,10 +24,10 @@
#include <iostream>
#endif
-#include <stdlib.h>
-
#include "sigchain.h"
+#define ATTRIBUTE_UNUSED __attribute__((__unused__))
+
static void log(const char* format, ...) {
char buf[256];
va_list ap;
@@ -38,17 +41,23 @@
va_end(ap);
}
-extern "C" void ClaimSignalChain(int signal, struct sigaction* oldaction) {
+namespace art {
+
+
+extern "C" void ClaimSignalChain(int signal ATTRIBUTE_UNUSED,
+ struct sigaction* oldaction ATTRIBUTE_UNUSED) {
log("ClaimSignalChain is not exported by the main executable.");
abort();
}
-extern "C" void UnclaimSignalChain(int signal) {
+extern "C" void UnclaimSignalChain(int signal ATTRIBUTE_UNUSED) {
log("UnclaimSignalChain is not exported by the main executable.");
abort();
}
-extern "C" void InvokeUserSignalHandler(int sig, siginfo_t* info, void* context) {
+extern "C" void InvokeUserSignalHandler(int sig ATTRIBUTE_UNUSED,
+ siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context ATTRIBUTE_UNUSED) {
log("InvokeUserSignalHandler is not exported by the main executable.");
abort();
}
@@ -58,7 +67,10 @@
abort();
}
-extern "C" void EnsureFrontOfChain(int signal, struct sigaction* expected_action) {
+extern "C" void EnsureFrontOfChain(int signal ATTRIBUTE_UNUSED,
+ struct sigaction* expected_action ATTRIBUTE_UNUSED) {
log("EnsureFrontOfChain is not exported by the main executable.");
abort();
}
+
+} // namespace art
diff --git a/test/004-JniTest/jni_test.cc b/test/004-JniTest/jni_test.cc
index 6fc4484..c2877be 100644
--- a/test/004-JniTest/jni_test.cc
+++ b/test/004-JniTest/jni_test.cc
@@ -172,7 +172,7 @@
constexpr size_t kByteReturnSize = 7;
jbyte byte_returns[kByteReturnSize] = { 0, 1, 2, 127, -1, -2, -128 };
-extern "C" jbyte JNICALL Java_Main_byteMethod(JNIEnv* env, jclass klass, jbyte b1, jbyte b2,
+extern "C" jbyte JNICALL Java_Main_byteMethod(JNIEnv*, jclass, jbyte b1, jbyte b2,
jbyte b3, jbyte b4, jbyte b5, jbyte b6,
jbyte b7, jbyte b8, jbyte b9, jbyte b10) {
// We use b1 to drive the output.
@@ -197,7 +197,7 @@
static_cast<jshort>(0x8000) };
// The weird static_cast is because short int is only guaranteed down to -32767, not Java's -32768.
-extern "C" jshort JNICALL Java_Main_shortMethod(JNIEnv* env, jclass klass, jshort s1, jshort s2,
+extern "C" jshort JNICALL Java_Main_shortMethod(JNIEnv*, jclass, jshort s1, jshort s2,
jshort s3, jshort s4, jshort s5, jshort s6,
jshort s7, jshort s8, jshort s9, jshort s10) {
// We use s1 to drive the output.
@@ -217,7 +217,7 @@
return short_returns[s1];
}
-extern "C" jboolean JNICALL Java_Main_booleanMethod(JNIEnv* env, jclass klass, jboolean b1,
+extern "C" jboolean JNICALL Java_Main_booleanMethod(JNIEnv*, jclass, jboolean b1,
jboolean b2, jboolean b3, jboolean b4,
jboolean b5, jboolean b6, jboolean b7,
jboolean b8, jboolean b9, jboolean b10) {
@@ -239,7 +239,7 @@
constexpr size_t kCharReturnSize = 8;
jchar char_returns[kCharReturnSize] = { 0, 1, 2, 127, 255, 256, 15000, 34000 };
-extern "C" jchar JNICALL Java_Main_charMethod(JNIEnv* env, jclass klacc, jchar c1, jchar c2,
+extern "C" jchar JNICALL Java_Main_charMethod(JNIEnv*, jclass, jchar c1, jchar c2,
jchar c3, jchar c4, jchar c5, jchar c6, jchar c7,
jchar c8, jchar c9, jchar c10) {
// We use c1 to drive the output.
@@ -312,7 +312,7 @@
}
// http://b/16867274
-extern "C" JNIEXPORT void JNICALL Java_Main_nativeTestShallowGetCallingClassLoader(JNIEnv* env,
+extern "C" JNIEXPORT void JNICALL Java_Main_nativeTestShallowGetCallingClassLoader(JNIEnv*,
jclass) {
PthreadHelper(&testShallowGetCallingClassLoader);
}
@@ -350,7 +350,7 @@
// ourselves.
}
-extern "C" JNIEXPORT void JNICALL Java_Main_nativeTestShallowGetStackClass2(JNIEnv* env, jclass) {
+extern "C" JNIEXPORT void JNICALL Java_Main_nativeTestShallowGetStackClass2(JNIEnv*, jclass) {
PthreadHelper(&testShallowGetStackClass2);
}
diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
index 291b45f..631c4be 100644
--- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -19,10 +19,13 @@
namespace art {
-#define CHECK_REGS_CONTAIN_REFS(native_pc_offset, ...) do { \
+#define CHECK_REGS_CONTAIN_REFS(dex_pc, abort_if_not_found, ...) do { \
int t[] = {__VA_ARGS__}; \
int t_size = sizeof(t) / sizeof(*t); \
- CheckReferences(t, t_size, m->NativeQuickPcOffset(m->ToNativeQuickPc(native_pc_offset))); \
+ uintptr_t native_quick_pc = m->ToNativeQuickPc(dex_pc, abort_if_not_found); \
+ if (native_quick_pc != UINTPTR_MAX) { \
+ CheckReferences(t, t_size, m->NativeQuickPcOffset(native_quick_pc)); \
+ } \
} while (false);
struct ReferenceMap2Visitor : public CheckReferenceMapVisitor {
@@ -40,31 +43,33 @@
// we know the Dex registers with live reference values. Assert that what we
// find is what is expected.
if (m_name.compare("f") == 0) {
- CHECK_REGS_CONTAIN_REFS(0x03U, 8); // v8: this
- CHECK_REGS_CONTAIN_REFS(0x06U, 8, 1); // v8: this, v1: x
- CHECK_REGS_CONTAIN_REFS(0x08U, 8, 3, 1); // v8: this, v3: y, v1: x
- CHECK_REGS_CONTAIN_REFS(0x0cU, 8, 3, 1); // v8: this, v3: y, v1: x
- CHECK_REGS_CONTAIN_REFS(0x0eU, 8, 3, 1); // v8: this, v3: y, v1: x
- CHECK_REGS_CONTAIN_REFS(0x10U, 8, 3, 1); // v8: this, v3: y, v1: x
+ CHECK_REGS_CONTAIN_REFS(0x03U, true, 8); // v8: this
+ CHECK_REGS_CONTAIN_REFS(0x06U, true, 8, 1); // v8: this, v1: x
+ CHECK_REGS_CONTAIN_REFS(0x08U, true, 8, 3, 1); // v8: this, v3: y, v1: x
+ CHECK_REGS_CONTAIN_REFS(0x0cU, true, 8, 3, 1); // v8: this, v3: y, v1: x
+ CHECK_REGS_CONTAIN_REFS(0x0eU, true, 8, 3, 1); // v8: this, v3: y, v1: x
+ CHECK_REGS_CONTAIN_REFS(0x10U, true, 8, 3, 1); // v8: this, v3: y, v1: x
// v2 is added because of the instruction at DexPC 0024. Object merges with 0 is Object. See:
// 0024: move-object v3, v2
// 0025: goto 0013
// Detaled dex instructions for ReferenceMap.java are at the end of this function.
// CHECK_REGS_CONTAIN_REFS(8, 3, 2, 1); // v8: this, v3: y, v2: y, v1: x
- // We eliminate the non-live registers at a return, so only v3 is live:
- CHECK_REGS_CONTAIN_REFS(0x13U); // v3: y
- CHECK_REGS_CONTAIN_REFS(0x18U, 8, 2, 1, 0); // v8: this, v2: y, v1: x, v0: ex
- CHECK_REGS_CONTAIN_REFS(0x1aU, 8, 5, 2, 1, 0); // v8: this, v5: x[1], v2: y, v1: x, v0: ex
- CHECK_REGS_CONTAIN_REFS(0x1dU, 8, 5, 2, 1, 0); // v8: this, v5: x[1], v2: y, v1: x, v0: ex
+ // We eliminate the non-live registers at a return, so only v3 is live.
+ // Note that it is OK for a compiler to not have a dex map at this dex PC because
+ // a return is not a safepoint.
+ CHECK_REGS_CONTAIN_REFS(0x13U, false); // v3: y
+ CHECK_REGS_CONTAIN_REFS(0x18U, true, 8, 2, 1, 0); // v8: this, v2: y, v1: x, v0: ex
+ CHECK_REGS_CONTAIN_REFS(0x1aU, true, 8, 5, 2, 1, 0); // v8: this, v5: x[1], v2: y, v1: x, v0: ex
+ CHECK_REGS_CONTAIN_REFS(0x1dU, true, 8, 5, 2, 1, 0); // v8: this, v5: x[1], v2: y, v1: x, v0: ex
// v5 is removed from the root set because there is a "merge" operation.
// See 0015: if-nez v2, 001f.
- CHECK_REGS_CONTAIN_REFS(0x1fU, 8, 2, 1, 0); // v8: this, v2: y, v1: x, v0: ex
- CHECK_REGS_CONTAIN_REFS(0x21U, 8, 2, 1, 0); // v8: this, v2: y, v1: x, v0: ex
- CHECK_REGS_CONTAIN_REFS(0x27U, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
- CHECK_REGS_CONTAIN_REFS(0x29U, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
- CHECK_REGS_CONTAIN_REFS(0x2cU, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
- CHECK_REGS_CONTAIN_REFS(0x2fU, 8, 4, 3, 2, 1); // v8: this, v4: ex, v3: y, v2: y, v1: x
- CHECK_REGS_CONTAIN_REFS(0x32U, 8, 3, 2, 1, 0); // v8: this, v3: y, v2: y, v1: x, v0: ex
+ CHECK_REGS_CONTAIN_REFS(0x1fU, true, 8, 2, 1, 0); // v8: this, v2: y, v1: x, v0: ex
+ CHECK_REGS_CONTAIN_REFS(0x21U, true, 8, 2, 1, 0); // v8: this, v2: y, v1: x, v0: ex
+ CHECK_REGS_CONTAIN_REFS(0x27U, true, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
+ CHECK_REGS_CONTAIN_REFS(0x29U, true, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
+ CHECK_REGS_CONTAIN_REFS(0x2cU, true, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
+ CHECK_REGS_CONTAIN_REFS(0x2fU, true, 8, 4, 3, 2, 1); // v8: this, v4: ex, v3: y, v2: y, v1: x
+ CHECK_REGS_CONTAIN_REFS(0x32U, true, 8, 3, 2, 1, 0); // v8: this, v3: y, v2: y, v1: x, v0: ex
}
return true;
diff --git a/test/004-SignalTest/signaltest.cc b/test/004-SignalTest/signaltest.cc
index a6d9b66..31371f6 100644
--- a/test/004-SignalTest/signaltest.cc
+++ b/test/004-SignalTest/signaltest.cc
@@ -14,14 +14,14 @@
* limitations under the License.
*/
+#include <jni.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
+#include <sys/ucontext.h>
#include <unistd.h>
-#include "jni.h"
-
-#include <sys/ucontext.h>
+#include "base/macros.h"
static int signal_count;
static const int kMaxSignal = 2;
@@ -47,7 +47,8 @@
#endif
#endif
-static void signalhandler(int sig, siginfo_t* info, void* context) {
+static void signalhandler(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context) {
printf("signal caught\n");
++signal_count;
if (signal_count > kMaxSignal) {
diff --git a/test/023-many-interfaces/build b/test/023-many-interfaces/build
index 4ea52d9..ad42a2d 100644
--- a/test/023-many-interfaces/build
+++ b/test/023-many-interfaces/build
@@ -18,7 +18,7 @@
set -e
# Write out a bunch of interface source files.
-gcc -o iface-gen iface-gen.c
+gcc -Wall -Werror -o iface-gen iface-gen.c
./iface-gen
mkdir classes
diff --git a/test/040-miranda/src/Main.java b/test/040-miranda/src/Main.java
index ff5eba0..65f4fb4 100644
--- a/test/040-miranda/src/Main.java
+++ b/test/040-miranda/src/Main.java
@@ -42,8 +42,8 @@
System.out.println("Test getting miranda method via reflection:");
try {
- Class mirandaClass = Class.forName("MirandaAbstract");
- Method mirandaMethod = mirandaClass.getDeclaredMethod("inInterface", (Class[]) null);
+ Class<?> mirandaClass = Class.forName("MirandaAbstract");
+ Method mirandaMethod = mirandaClass.getDeclaredMethod("inInterface");
System.out.println(" did not expect to find miranda method");
} catch (NoSuchMethodException nsme) {
System.out.println(" caught expected NoSuchMethodException");
diff --git a/test/040-miranda/src/MirandaAbstract.java b/test/040-miranda/src/MirandaAbstract.java
index 309ecca..c8cfa34 100644
--- a/test/040-miranda/src/MirandaAbstract.java
+++ b/test/040-miranda/src/MirandaAbstract.java
@@ -21,6 +21,8 @@
{
protected MirandaAbstract() { }
+ // These will be miranda methods, as the interfaces define them, but they are not
+ // implemented in this abstract class:
//public abstract boolean inInterface();
//public abstract int inInterface2();
diff --git a/test/040-miranda/src/MirandaClass.java b/test/040-miranda/src/MirandaClass.java
index 0d942f0..4160992 100644
--- a/test/040-miranda/src/MirandaClass.java
+++ b/test/040-miranda/src/MirandaClass.java
@@ -22,17 +22,14 @@
public MirandaClass() {}
public boolean inInterface() {
- //System.out.println(" MirandaClass inInterface");
return true;
}
public int inInterface2() {
- //System.out.println(" MirandaClass inInterface2");
return 27;
}
public boolean inAbstract() {
- //System.out.println(" MirandaClass inAbstract");
return false;
}
}
diff --git a/test/040-miranda/src/MirandaClass2.java b/test/040-miranda/src/MirandaClass2.java
index e9bdf2b..143eb37 100644
--- a/test/040-miranda/src/MirandaClass2.java
+++ b/test/040-miranda/src/MirandaClass2.java
@@ -1,3 +1,19 @@
+/*
+ * Copyright (C) 2006 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
class MirandaClass2 extends MirandaAbstract {
public boolean inInterface() {
return true;
diff --git a/test/051-thread/expected.txt b/test/051-thread/expected.txt
index 943d1df..54e34af 100644
--- a/test/051-thread/expected.txt
+++ b/test/051-thread/expected.txt
@@ -9,4 +9,6 @@
testSetName starting
testSetName running
testSetName finished
+testThreadPriorities starting
+testThreadPriorities finished
thread test done
diff --git a/test/051-thread/src/Main.java b/test/051-thread/src/Main.java
index 390685d..b81273e 100644
--- a/test/051-thread/src/Main.java
+++ b/test/051-thread/src/Main.java
@@ -20,12 +20,17 @@
* Test some basic thread stuff.
*/
public class Main {
+ static {
+ System.loadLibrary("arttest");
+ }
+
public static void main(String[] args) throws Exception {
System.out.println("thread test starting");
testThreadCapacity();
testThreadDaemons();
testSleepZero();
testSetName();
+ testThreadPriorities();
System.out.println("thread test done");
}
@@ -133,4 +138,53 @@
}
System.out.print("testSetName finished\n");
}
+
+ private static void testThreadPriorities() throws Exception {
+ System.out.print("testThreadPriorities starting\n");
+
+ PriorityStoringThread t1 = new PriorityStoringThread(false);
+ t1.setPriority(Thread.MAX_PRIORITY);
+ t1.start();
+ t1.join();
+ if (supportsThreadPriorities() && (t1.getNativePriority() != Thread.MAX_PRIORITY)) {
+ System.out.print("thread priority for t1 was " + t1.getNativePriority() +
+ " [expected Thread.MAX_PRIORITY]\n");
+ }
+
+ PriorityStoringThread t2 = new PriorityStoringThread(true);
+ t2.start();
+ t2.join();
+ if (supportsThreadPriorities() && (t2.getNativePriority() != Thread.MAX_PRIORITY)) {
+ System.out.print("thread priority for t2 was " + t2.getNativePriority() +
+ " [expected Thread.MAX_PRIORITY]\n");
+ }
+
+ System.out.print("testThreadPriorities finished\n");
+ }
+
+ private static native int getNativePriority();
+ private static native boolean supportsThreadPriorities();
+
+ static class PriorityStoringThread extends Thread {
+ private final boolean setPriority;
+ private volatile int nativePriority;
+
+ public PriorityStoringThread(boolean setPriority) {
+ this.setPriority = setPriority;
+ this.nativePriority = -1;
+ }
+
+ @Override
+ public void run() {
+ if (setPriority) {
+ setPriority(Thread.MAX_PRIORITY);
+ }
+
+ nativePriority = Main.getNativePriority();
+ }
+
+ public int getNativePriority() {
+ return nativePriority;
+ }
+ }
}
diff --git a/test/051-thread/thread_test.cc b/test/051-thread/thread_test.cc
new file mode 100644
index 0000000..2b8e675
--- /dev/null
+++ b/test/051-thread/thread_test.cc
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/macros.h"
+#include "jni.h"
+#include "thread-inl.h"
+
+namespace art {
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_getNativePriority(JNIEnv* env,
+ jclass clazz ATTRIBUTE_UNUSED) {
+ return ThreadForEnv(env)->GetNativePriority();
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_supportsThreadPriorities(
+ JNIEnv* env ATTRIBUTE_UNUSED,
+ jclass clazz ATTRIBUTE_UNUSED) {
+#if defined(HAVE_ANDROID_OS)
+ return JNI_TRUE;
+#else
+ return JNI_FALSE;
+#endif
+}
+
+} // namespace art
diff --git a/test/083-compiler-regressions/expected.txt b/test/083-compiler-regressions/expected.txt
index c43d1f7..78c92fc 100644
--- a/test/083-compiler-regressions/expected.txt
+++ b/test/083-compiler-regressions/expected.txt
@@ -1,3 +1,5 @@
+b17325447 passes
+b17630605 passes
b17411468 passes
b2296099 passes
b2302318 passes
diff --git a/test/083-compiler-regressions/src/Main.java b/test/083-compiler-regressions/src/Main.java
index 9c772b9..285c360 100644
--- a/test/083-compiler-regressions/src/Main.java
+++ b/test/083-compiler-regressions/src/Main.java
@@ -30,6 +30,8 @@
}
public static void main(String args[]) throws Exception {
+ b17325447();
+ b17630605();
b17411468();
b2296099Test();
b2302318Test();
@@ -63,6 +65,43 @@
minDoubleWith3ConstsTest();
}
+ public static double b17325447_i1(int i1, double f) {
+ return f;
+ }
+
+ public static double b17325447_i2(int i1, int i2, double f) {
+ return f;
+ }
+
+ public static double b17325447_i3(int i1, int i2, int i3, double f) {
+ return f;
+ }
+
+ public static void b17325447() {
+ // b/17325447 - x86 handling of special identity method w/ double spanning reg/mem.
+ double d = 0.0;
+ d += b17325447_i1(123, 1.0);
+ d += b17325447_i2(123, 456, 2.0);
+ d += b17325447_i3(123, 456, 789, 3.0);
+ if (d == 6.0) {
+ System.out.println("b17325447 passes");
+ } else {
+ System.out.println("b17325447 fails: " + d);
+ }
+ }
+
+ public static void b17630605() {
+ // b/17630605 - failure to properly handle min long immediates.
+ long a1 = 40455547223404749L;
+ long a2 = Long.MIN_VALUE;
+ long answer = a1 + a2;
+ if (answer == -9182916489631371059L) {
+ System.out.println("b17630605 passes");
+ } else {
+ System.out.println("b17630605 fails: " + answer);
+ }
+ }
+
public static void b17411468() {
// b/17411468 - inline Math.round failure.
double d1 = 1.0;
diff --git a/test/089-many-methods/check b/test/089-many-methods/check
new file mode 100755
index 0000000..65b7139
--- /dev/null
+++ b/test/089-many-methods/check
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Strip build error debug messages, as they are environment-specific.
+sed -e '/^Failed to build/d' -e '/^Non-canonical tmpdir/d' -e '/^Args:/d' -e '/^Max filename/d' -e '/^Max pathlength/d' "$2" > "$2.tmp"
+
+diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
\ No newline at end of file
diff --git a/test/100-reflect2/expected.txt b/test/100-reflect2/expected.txt
index 1af4121..8fdeccc 100644
--- a/test/100-reflect2/expected.txt
+++ b/test/100-reflect2/expected.txt
@@ -32,7 +32,7 @@
62 (class java.lang.Long)
14 (class java.lang.Short)
[public java.lang.String(), java.lang.String(int,int,char[]), public java.lang.String(java.lang.String), public java.lang.String(java.lang.StringBuffer), public java.lang.String(java.lang.StringBuilder), public java.lang.String(byte[]), public java.lang.String(byte[],int), public java.lang.String(byte[],int,int), public java.lang.String(byte[],int,int,int), public java.lang.String(byte[],int,int,java.lang.String) throws java.io.UnsupportedEncodingException, public java.lang.String(byte[],int,int,java.nio.charset.Charset), public java.lang.String(byte[],java.lang.String) throws java.io.UnsupportedEncodingException, public java.lang.String(byte[],java.nio.charset.Charset), public java.lang.String(char[]), public java.lang.String(char[],int,int), public java.lang.String(int[],int,int)]
-[private final char[] java.lang.String.value, private final int java.lang.String.count, private int java.lang.String.hashCode, private final int java.lang.String.offset, private static final char[] java.lang.String.ASCII, public static final java.util.Comparator java.lang.String.CASE_INSENSITIVE_ORDER, private static final long java.lang.String.serialVersionUID, private static final char java.lang.String.REPLACEMENT_CHAR]
+[private final int java.lang.String.count, private int java.lang.String.hashCode, private final int java.lang.String.offset, private final char[] java.lang.String.value, private static final char[] java.lang.String.ASCII, public static final java.util.Comparator java.lang.String.CASE_INSENSITIVE_ORDER, private static final char java.lang.String.REPLACEMENT_CHAR, private static final long java.lang.String.serialVersionUID]
[void java.lang.String._getChars(int,int,char[],int), public char java.lang.String.charAt(int), public int java.lang.String.codePointAt(int), public int java.lang.String.codePointBefore(int), public int java.lang.String.codePointCount(int,int), public volatile int java.lang.String.compareTo(java.lang.Object), public native int java.lang.String.compareTo(java.lang.String), public int java.lang.String.compareToIgnoreCase(java.lang.String), public java.lang.String java.lang.String.concat(java.lang.String), public boolean java.lang.String.contains(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.StringBuffer), public boolean java.lang.String.endsWith(java.lang.String), public boolean java.lang.String.equals(java.lang.Object), public boolean java.lang.String.equalsIgnoreCase(java.lang.String), public void java.lang.String.getBytes(int,int,byte[],int), public [B java.lang.String.getBytes(), public [B java.lang.String.getBytes(java.lang.String) throws java.io.UnsupportedEncodingException, public [B java.lang.String.getBytes(java.nio.charset.Charset), public void java.lang.String.getChars(int,int,char[],int), public int java.lang.String.hashCode(), public int java.lang.String.indexOf(int), public int java.lang.String.indexOf(int,int), public int java.lang.String.indexOf(java.lang.String), public int java.lang.String.indexOf(java.lang.String,int), public native java.lang.String java.lang.String.intern(), public boolean java.lang.String.isEmpty(), public int java.lang.String.lastIndexOf(int), public int java.lang.String.lastIndexOf(int,int), public int java.lang.String.lastIndexOf(java.lang.String), public int java.lang.String.lastIndexOf(java.lang.String,int), public int java.lang.String.length(), public boolean java.lang.String.matches(java.lang.String), public int java.lang.String.offsetByCodePoints(int,int), public boolean java.lang.String.regionMatches(int,java.lang.String,int,int), public boolean java.lang.String.regionMatches(boolean,int,java.lang.String,int,int), public java.lang.String java.lang.String.replace(char,char), public java.lang.String java.lang.String.replace(java.lang.CharSequence,java.lang.CharSequence), public java.lang.String java.lang.String.replaceAll(java.lang.String,java.lang.String), public java.lang.String java.lang.String.replaceFirst(java.lang.String,java.lang.String), public [Ljava.lang.String; java.lang.String.split(java.lang.String), public [Ljava.lang.String; java.lang.String.split(java.lang.String,int), public boolean java.lang.String.startsWith(java.lang.String), public boolean java.lang.String.startsWith(java.lang.String,int), public java.lang.CharSequence java.lang.String.subSequence(int,int), public java.lang.String java.lang.String.substring(int), public java.lang.String java.lang.String.substring(int,int), public [C java.lang.String.toCharArray(), public java.lang.String java.lang.String.toLowerCase(), public java.lang.String java.lang.String.toLowerCase(java.util.Locale), public java.lang.String java.lang.String.toString(), public java.lang.String java.lang.String.toUpperCase(), public java.lang.String java.lang.String.toUpperCase(java.util.Locale), public java.lang.String java.lang.String.trim(), public static java.lang.String java.lang.String.copyValueOf(char[]), public static java.lang.String java.lang.String.copyValueOf(char[],int,int), private java.lang.StringIndexOutOfBoundsException java.lang.String.failedBoundsCheck(int,int,int), private native int java.lang.String.fastIndexOf(int,int), private char java.lang.String.foldCase(char), public static transient java.lang.String java.lang.String.format(java.lang.String,java.lang.Object[]), public static transient java.lang.String java.lang.String.format(java.util.Locale,java.lang.String,java.lang.Object[]), private java.lang.StringIndexOutOfBoundsException java.lang.String.indexAndLength(int), private static int java.lang.String.indexOf(java.lang.String,java.lang.String,int,int,char), private int java.lang.String.indexOfSupplementary(int,int), private int java.lang.String.lastIndexOfSupplementary(int,int), private java.lang.StringIndexOutOfBoundsException java.lang.String.startEndAndLength(int,int), public static java.lang.String java.lang.String.valueOf(char), public static java.lang.String java.lang.String.valueOf(double), public static java.lang.String java.lang.String.valueOf(float), public static java.lang.String java.lang.String.valueOf(int), public static java.lang.String java.lang.String.valueOf(long), public static java.lang.String java.lang.String.valueOf(java.lang.Object), public static java.lang.String java.lang.String.valueOf(boolean), public static java.lang.String java.lang.String.valueOf(char[]), public static java.lang.String java.lang.String.valueOf(char[],int,int)]
[]
[interface java.io.Serializable, interface java.lang.Comparable, interface java.lang.CharSequence]
diff --git a/test/115-native-bridge/expected.txt b/test/115-native-bridge/expected.txt
index a5eedc6..16a71e4 100644
--- a/test/115-native-bridge/expected.txt
+++ b/test/115-native-bridge/expected.txt
@@ -1,3 +1,4 @@
+Code cache exists: './code_cache'.
Native bridge initialized.
Checking for getEnvValues.
Ready for native bridge tests.
diff --git a/test/115-native-bridge/nativebridge.cc b/test/115-native-bridge/nativebridge.cc
index 442f99c..6bcc1f5 100644
--- a/test/115-native-bridge/nativebridge.cc
+++ b/test/115-native-bridge/nativebridge.cc
@@ -18,12 +18,14 @@
#include <algorithm>
#include <dlfcn.h>
+#include <jni.h>
#include <vector>
-#include "jni.h"
#include "stdio.h"
#include "unistd.h"
+#include "sys/stat.h"
+#include "base/macros.h"
#include "nativebridge/native_bridge.h"
struct NativeBridgeMethod {
@@ -208,7 +210,14 @@
// NativeBridgeCallbacks implementations
extern "C" bool native_bridge_initialize(const android::NativeBridgeRuntimeCallbacks* art_cbs,
- const char* private_dir, const char* isa) {
+ const char* app_code_cache_dir,
+ const char* isa ATTRIBUTE_UNUSED) {
+ struct stat st;
+ if ((app_code_cache_dir != nullptr)
+ && (stat(app_code_cache_dir, &st) == 0)
+ && S_ISDIR(st.st_mode)) {
+ printf("Code cache exists: '%s'.\n", app_code_cache_dir);
+ }
if (art_cbs != nullptr) {
gNativeBridgeArtCallbacks = art_cbs;
printf("Native bridge initialized.\n");
@@ -241,7 +250,7 @@
}
extern "C" void* native_bridge_getTrampoline(void* handle, const char* name, const char* shorty,
- uint32_t len) {
+ uint32_t len ATTRIBUTE_UNUSED) {
printf("Getting trampoline for %s with shorty %s.\n", name, shorty);
// The name here is actually the JNI name, so we can directly do the lookup.
diff --git a/test/115-native-bridge/run b/test/115-native-bridge/run
index e475cd6..32a9975 100644
--- a/test/115-native-bridge/run
+++ b/test/115-native-bridge/run
@@ -18,9 +18,9 @@
# Use libnativebridgetest as a native bridge, start NativeBridgeMain (Main is JniTest main file).
LIBPATH=$(echo ${ARGS} | sed -r 's/.*Djava.library.path=([^ ]*) .*/\1/')
-cp ${LIBPATH}/libnativebridgetest.so .
+ln -s ${LIBPATH}/libnativebridgetest.so .
touch libarttest.so
-cp ${LIBPATH}/libarttest.so libarttest2.so
+ln -s ${LIBPATH}/libarttest.so libarttest2.so
# pwd likely has /, so it's a pain to put that into a sed rule.
LEFT=$(echo ${ARGS} | sed -r 's/-Djava.library.path.*//')
diff --git a/test/116-nodex2oat/nodex2oat.cc b/test/116-nodex2oat/nodex2oat.cc
index 04cac45..564d58d 100644
--- a/test/116-nodex2oat/nodex2oat.cc
+++ b/test/116-nodex2oat/nodex2oat.cc
@@ -38,7 +38,7 @@
return NoDex2OatTest::hasOat(cls);
}
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_isDex2OatEnabled(JNIEnv*, jclass cls) {
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isDex2OatEnabled(JNIEnv*, jclass) {
return Runtime::Current()->IsDex2OatEnabled();
}
diff --git a/test/117-nopatchoat/expected.txt b/test/117-nopatchoat/expected.txt
index a1293ae..5cc02d1 100644
--- a/test/117-nopatchoat/expected.txt
+++ b/test/117-nopatchoat/expected.txt
@@ -1,9 +1,9 @@
Run without dex2oat/patchoat
-dex2oat & patchoat are disabled, has oat is true, has executable oat is false.
+dex2oat & patchoat are disabled, has oat is true, has executable oat is expected.
This is a function call
Run with dexoat/patchoat
-dex2oat & patchoat are enabled, has oat is true, has executable oat is true.
+dex2oat & patchoat are enabled, has oat is true, has executable oat is expected.
This is a function call
Run default
-dex2oat & patchoat are enabled, has oat is true, has executable oat is true.
+dex2oat & patchoat are enabled, has oat is true, has executable oat is expected.
This is a function call
diff --git a/test/117-nopatchoat/nopatchoat.cc b/test/117-nopatchoat/nopatchoat.cc
index 5994653..da276f2 100644
--- a/test/117-nopatchoat/nopatchoat.cc
+++ b/test/117-nopatchoat/nopatchoat.cc
@@ -24,18 +24,41 @@
class NoPatchoatTest {
public:
- static bool hasExecutableOat(jclass cls) {
+ static const OatFile::OatDexFile* getOatDexFile(jclass cls) {
ScopedObjectAccess soa(Thread::Current());
mirror::Class* klass = soa.Decode<mirror::Class*>(cls);
const DexFile& dex_file = klass->GetDexFile();
+
const OatFile::OatDexFile* oat_dex_file =
Runtime::Current()->GetClassLinker()->FindOpenedOatDexFileForDexFile(dex_file);
+
+ return oat_dex_file;
+ }
+
+ static bool hasExecutableOat(jclass cls) {
+ const OatFile::OatDexFile* oat_dex_file = getOatDexFile(cls);
+
return oat_dex_file != nullptr && oat_dex_file->GetOatFile()->IsExecutable();
}
+
+ static bool isPic(jclass cls) {
+ const OatFile::OatDexFile* oat_dex_file = getOatDexFile(cls);
+
+ if (oat_dex_file == nullptr) {
+ return false;
+ }
+
+ const OatFile* oat_file = oat_dex_file->GetOatFile();
+ return oat_file->IsPic();
+ }
};
extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasExecutableOat(JNIEnv*, jclass cls) {
return NoPatchoatTest::hasExecutableOat(cls);
}
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isPic(JNIEnv*, jclass cls) {
+ return NoPatchoatTest::isPic(cls);
+}
+
} // namespace art
diff --git a/test/117-nopatchoat/src/Main.java b/test/117-nopatchoat/src/Main.java
index f3f91ce..7bc9dbb 100644
--- a/test/117-nopatchoat/src/Main.java
+++ b/test/117-nopatchoat/src/Main.java
@@ -16,9 +16,14 @@
public class Main {
public static void main(String[] args) {
+ boolean executable_correct = (isPic() ?
+ hasExecutableOat() == true :
+ hasExecutableOat() == isDex2OatEnabled());
+
System.out.println(
"dex2oat & patchoat are " + ((isDex2OatEnabled()) ? "enabled" : "disabled") +
- ", has oat is " + hasOat() + ", has executable oat is " + hasExecutableOat() + ".");
+ ", has oat is " + hasOat() + ", has executable oat is " + (
+ executable_correct ? "expected" : "not expected") + ".");
if (!hasOat() && isDex2OatEnabled()) {
throw new Error("Application with dex2oat enabled runs without an oat file");
@@ -42,6 +47,8 @@
private native static boolean isDex2OatEnabled();
+ private native static boolean isPic();
+
private native static boolean hasOat();
private native static boolean hasExecutableOat();
diff --git a/test/118-noimage-dex2oat/noimage-dex2oat.cc b/test/118-noimage-dex2oat/noimage-dex2oat.cc
index 7340d9e..c49a13e 100644
--- a/test/118-noimage-dex2oat/noimage-dex2oat.cc
+++ b/test/118-noimage-dex2oat/noimage-dex2oat.cc
@@ -34,11 +34,11 @@
}
};
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasImage(JNIEnv*, jclass cls) {
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasImage(JNIEnv*, jclass) {
return Runtime::Current()->GetHeap()->HasImageSpace();
}
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_isImageDex2OatEnabled(JNIEnv*, jclass cls) {
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isImageDex2OatEnabled(JNIEnv*, jclass) {
return Runtime::Current()->IsImageDex2OatEnabled();
}
diff --git a/test/125-gc-and-classloading/expected.txt b/test/125-gc-and-classloading/expected.txt
new file mode 100644
index 0000000..7ef22e9
--- /dev/null
+++ b/test/125-gc-and-classloading/expected.txt
@@ -0,0 +1 @@
+PASS
diff --git a/test/125-gc-and-classloading/info.txt b/test/125-gc-and-classloading/info.txt
new file mode 100644
index 0000000..bb6bf12
--- /dev/null
+++ b/test/125-gc-and-classloading/info.txt
@@ -0,0 +1 @@
+Tests class loading and GC running in parallel.
diff --git a/test/125-gc-and-classloading/src/Main.java b/test/125-gc-and-classloading/src/Main.java
new file mode 100644
index 0000000..61e123d
--- /dev/null
+++ b/test/125-gc-and-classloading/src/Main.java
@@ -0,0 +1,3072 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.concurrent.CountDownLatch;
+
+public class Main {
+
+ public static void main(String[] args) throws Exception {
+ // Try to cause a class loading/linking while GC is running.
+ CountDownLatch cdl = new CountDownLatch(1);
+ GcThread gcThread = new GcThread(cdl);
+ ClassLoadingThread classLoadingThread = new ClassLoadingThread(cdl);
+ gcThread.start();
+ classLoadingThread.start();
+ gcThread.join();
+ classLoadingThread.join();
+ System.out.println("PASS");
+ }
+
+ static class GcThread extends Thread {
+ CountDownLatch cdl;
+
+ GcThread(CountDownLatch cdl) {
+ this.cdl = cdl;
+ }
+
+ public void run() {
+ for (int i = 0; i < 10; ++i) {
+ Runtime.getRuntime().gc();
+ if (i == 0) {
+ cdl.countDown();
+ }
+ }
+ }
+ }
+
+ static class ClassLoadingThread extends Thread {
+ CountDownLatch cdl;
+
+ ClassLoadingThread(CountDownLatch cdl) {
+ this.cdl = cdl;
+ }
+
+ public void run() {
+ try {
+ cdl.await();
+ Class c0 = Class.forName("Main$BigClass");
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+ }
+
+ // A class with lots of fields so that the class loading/linking takes a long time.
+ // It is an abstract class to exercise the non-embedded imt/vtable case.
+ static abstract class BigClass {
+ static Object sf1;
+ static Object sf2;
+ static Object sf3;
+ static Object sf4;
+ static Object sf5;
+ static Object sf6;
+ static Object sf7;
+ static Object sf8;
+ static Object sf9;
+ static Object sf10;
+ static Object sf11;
+ static Object sf12;
+ static Object sf13;
+ static Object sf14;
+ static Object sf15;
+ static Object sf16;
+ static Object sf17;
+ static Object sf18;
+ static Object sf19;
+ static Object sf20;
+ static Object sf21;
+ static Object sf22;
+ static Object sf23;
+ static Object sf24;
+ static Object sf25;
+ static Object sf26;
+ static Object sf27;
+ static Object sf28;
+ static Object sf29;
+ static Object sf30;
+ static Object sf31;
+ static Object sf32;
+ static Object sf33;
+ static Object sf34;
+ static Object sf35;
+ static Object sf36;
+ static Object sf37;
+ static Object sf38;
+ static Object sf39;
+ static Object sf40;
+ static Object sf41;
+ static Object sf42;
+ static Object sf43;
+ static Object sf44;
+ static Object sf45;
+ static Object sf46;
+ static Object sf47;
+ static Object sf48;
+ static Object sf49;
+ static Object sf50;
+ static Object sf51;
+ static Object sf52;
+ static Object sf53;
+ static Object sf54;
+ static Object sf55;
+ static Object sf56;
+ static Object sf57;
+ static Object sf58;
+ static Object sf59;
+ static Object sf60;
+ static Object sf61;
+ static Object sf62;
+ static Object sf63;
+ static Object sf64;
+ static Object sf65;
+ static Object sf66;
+ static Object sf67;
+ static Object sf68;
+ static Object sf69;
+ static Object sf70;
+ static Object sf71;
+ static Object sf72;
+ static Object sf73;
+ static Object sf74;
+ static Object sf75;
+ static Object sf76;
+ static Object sf77;
+ static Object sf78;
+ static Object sf79;
+ static Object sf80;
+ static Object sf81;
+ static Object sf82;
+ static Object sf83;
+ static Object sf84;
+ static Object sf85;
+ static Object sf86;
+ static Object sf87;
+ static Object sf88;
+ static Object sf89;
+ static Object sf90;
+ static Object sf91;
+ static Object sf92;
+ static Object sf93;
+ static Object sf94;
+ static Object sf95;
+ static Object sf96;
+ static Object sf97;
+ static Object sf98;
+ static Object sf99;
+ static Object sf100;
+ static Object sf101;
+ static Object sf102;
+ static Object sf103;
+ static Object sf104;
+ static Object sf105;
+ static Object sf106;
+ static Object sf107;
+ static Object sf108;
+ static Object sf109;
+ static Object sf110;
+ static Object sf111;
+ static Object sf112;
+ static Object sf113;
+ static Object sf114;
+ static Object sf115;
+ static Object sf116;
+ static Object sf117;
+ static Object sf118;
+ static Object sf119;
+ static Object sf120;
+ static Object sf121;
+ static Object sf122;
+ static Object sf123;
+ static Object sf124;
+ static Object sf125;
+ static Object sf126;
+ static Object sf127;
+ static Object sf128;
+ static Object sf129;
+ static Object sf130;
+ static Object sf131;
+ static Object sf132;
+ static Object sf133;
+ static Object sf134;
+ static Object sf135;
+ static Object sf136;
+ static Object sf137;
+ static Object sf138;
+ static Object sf139;
+ static Object sf140;
+ static Object sf141;
+ static Object sf142;
+ static Object sf143;
+ static Object sf144;
+ static Object sf145;
+ static Object sf146;
+ static Object sf147;
+ static Object sf148;
+ static Object sf149;
+ static Object sf150;
+ static Object sf151;
+ static Object sf152;
+ static Object sf153;
+ static Object sf154;
+ static Object sf155;
+ static Object sf156;
+ static Object sf157;
+ static Object sf158;
+ static Object sf159;
+ static Object sf160;
+ static Object sf161;
+ static Object sf162;
+ static Object sf163;
+ static Object sf164;
+ static Object sf165;
+ static Object sf166;
+ static Object sf167;
+ static Object sf168;
+ static Object sf169;
+ static Object sf170;
+ static Object sf171;
+ static Object sf172;
+ static Object sf173;
+ static Object sf174;
+ static Object sf175;
+ static Object sf176;
+ static Object sf177;
+ static Object sf178;
+ static Object sf179;
+ static Object sf180;
+ static Object sf181;
+ static Object sf182;
+ static Object sf183;
+ static Object sf184;
+ static Object sf185;
+ static Object sf186;
+ static Object sf187;
+ static Object sf188;
+ static Object sf189;
+ static Object sf190;
+ static Object sf191;
+ static Object sf192;
+ static Object sf193;
+ static Object sf194;
+ static Object sf195;
+ static Object sf196;
+ static Object sf197;
+ static Object sf198;
+ static Object sf199;
+ static Object sf200;
+ static Object sf201;
+ static Object sf202;
+ static Object sf203;
+ static Object sf204;
+ static Object sf205;
+ static Object sf206;
+ static Object sf207;
+ static Object sf208;
+ static Object sf209;
+ static Object sf210;
+ static Object sf211;
+ static Object sf212;
+ static Object sf213;
+ static Object sf214;
+ static Object sf215;
+ static Object sf216;
+ static Object sf217;
+ static Object sf218;
+ static Object sf219;
+ static Object sf220;
+ static Object sf221;
+ static Object sf222;
+ static Object sf223;
+ static Object sf224;
+ static Object sf225;
+ static Object sf226;
+ static Object sf227;
+ static Object sf228;
+ static Object sf229;
+ static Object sf230;
+ static Object sf231;
+ static Object sf232;
+ static Object sf233;
+ static Object sf234;
+ static Object sf235;
+ static Object sf236;
+ static Object sf237;
+ static Object sf238;
+ static Object sf239;
+ static Object sf240;
+ static Object sf241;
+ static Object sf242;
+ static Object sf243;
+ static Object sf244;
+ static Object sf245;
+ static Object sf246;
+ static Object sf247;
+ static Object sf248;
+ static Object sf249;
+ static Object sf250;
+ static Object sf251;
+ static Object sf252;
+ static Object sf253;
+ static Object sf254;
+ static Object sf255;
+ static Object sf256;
+ static Object sf257;
+ static Object sf258;
+ static Object sf259;
+ static Object sf260;
+ static Object sf261;
+ static Object sf262;
+ static Object sf263;
+ static Object sf264;
+ static Object sf265;
+ static Object sf266;
+ static Object sf267;
+ static Object sf268;
+ static Object sf269;
+ static Object sf270;
+ static Object sf271;
+ static Object sf272;
+ static Object sf273;
+ static Object sf274;
+ static Object sf275;
+ static Object sf276;
+ static Object sf277;
+ static Object sf278;
+ static Object sf279;
+ static Object sf280;
+ static Object sf281;
+ static Object sf282;
+ static Object sf283;
+ static Object sf284;
+ static Object sf285;
+ static Object sf286;
+ static Object sf287;
+ static Object sf288;
+ static Object sf289;
+ static Object sf290;
+ static Object sf291;
+ static Object sf292;
+ static Object sf293;
+ static Object sf294;
+ static Object sf295;
+ static Object sf296;
+ static Object sf297;
+ static Object sf298;
+ static Object sf299;
+ static Object sf300;
+ static Object sf301;
+ static Object sf302;
+ static Object sf303;
+ static Object sf304;
+ static Object sf305;
+ static Object sf306;
+ static Object sf307;
+ static Object sf308;
+ static Object sf309;
+ static Object sf310;
+ static Object sf311;
+ static Object sf312;
+ static Object sf313;
+ static Object sf314;
+ static Object sf315;
+ static Object sf316;
+ static Object sf317;
+ static Object sf318;
+ static Object sf319;
+ static Object sf320;
+ static Object sf321;
+ static Object sf322;
+ static Object sf323;
+ static Object sf324;
+ static Object sf325;
+ static Object sf326;
+ static Object sf327;
+ static Object sf328;
+ static Object sf329;
+ static Object sf330;
+ static Object sf331;
+ static Object sf332;
+ static Object sf333;
+ static Object sf334;
+ static Object sf335;
+ static Object sf336;
+ static Object sf337;
+ static Object sf338;
+ static Object sf339;
+ static Object sf340;
+ static Object sf341;
+ static Object sf342;
+ static Object sf343;
+ static Object sf344;
+ static Object sf345;
+ static Object sf346;
+ static Object sf347;
+ static Object sf348;
+ static Object sf349;
+ static Object sf350;
+ static Object sf351;
+ static Object sf352;
+ static Object sf353;
+ static Object sf354;
+ static Object sf355;
+ static Object sf356;
+ static Object sf357;
+ static Object sf358;
+ static Object sf359;
+ static Object sf360;
+ static Object sf361;
+ static Object sf362;
+ static Object sf363;
+ static Object sf364;
+ static Object sf365;
+ static Object sf366;
+ static Object sf367;
+ static Object sf368;
+ static Object sf369;
+ static Object sf370;
+ static Object sf371;
+ static Object sf372;
+ static Object sf373;
+ static Object sf374;
+ static Object sf375;
+ static Object sf376;
+ static Object sf377;
+ static Object sf378;
+ static Object sf379;
+ static Object sf380;
+ static Object sf381;
+ static Object sf382;
+ static Object sf383;
+ static Object sf384;
+ static Object sf385;
+ static Object sf386;
+ static Object sf387;
+ static Object sf388;
+ static Object sf389;
+ static Object sf390;
+ static Object sf391;
+ static Object sf392;
+ static Object sf393;
+ static Object sf394;
+ static Object sf395;
+ static Object sf396;
+ static Object sf397;
+ static Object sf398;
+ static Object sf399;
+ static Object sf400;
+ static Object sf401;
+ static Object sf402;
+ static Object sf403;
+ static Object sf404;
+ static Object sf405;
+ static Object sf406;
+ static Object sf407;
+ static Object sf408;
+ static Object sf409;
+ static Object sf410;
+ static Object sf411;
+ static Object sf412;
+ static Object sf413;
+ static Object sf414;
+ static Object sf415;
+ static Object sf416;
+ static Object sf417;
+ static Object sf418;
+ static Object sf419;
+ static Object sf420;
+ static Object sf421;
+ static Object sf422;
+ static Object sf423;
+ static Object sf424;
+ static Object sf425;
+ static Object sf426;
+ static Object sf427;
+ static Object sf428;
+ static Object sf429;
+ static Object sf430;
+ static Object sf431;
+ static Object sf432;
+ static Object sf433;
+ static Object sf434;
+ static Object sf435;
+ static Object sf436;
+ static Object sf437;
+ static Object sf438;
+ static Object sf439;
+ static Object sf440;
+ static Object sf441;
+ static Object sf442;
+ static Object sf443;
+ static Object sf444;
+ static Object sf445;
+ static Object sf446;
+ static Object sf447;
+ static Object sf448;
+ static Object sf449;
+ static Object sf450;
+ static Object sf451;
+ static Object sf452;
+ static Object sf453;
+ static Object sf454;
+ static Object sf455;
+ static Object sf456;
+ static Object sf457;
+ static Object sf458;
+ static Object sf459;
+ static Object sf460;
+ static Object sf461;
+ static Object sf462;
+ static Object sf463;
+ static Object sf464;
+ static Object sf465;
+ static Object sf466;
+ static Object sf467;
+ static Object sf468;
+ static Object sf469;
+ static Object sf470;
+ static Object sf471;
+ static Object sf472;
+ static Object sf473;
+ static Object sf474;
+ static Object sf475;
+ static Object sf476;
+ static Object sf477;
+ static Object sf478;
+ static Object sf479;
+ static Object sf480;
+ static Object sf481;
+ static Object sf482;
+ static Object sf483;
+ static Object sf484;
+ static Object sf485;
+ static Object sf486;
+ static Object sf487;
+ static Object sf488;
+ static Object sf489;
+ static Object sf490;
+ static Object sf491;
+ static Object sf492;
+ static Object sf493;
+ static Object sf494;
+ static Object sf495;
+ static Object sf496;
+ static Object sf497;
+ static Object sf498;
+ static Object sf499;
+ static Object sf500;
+ static Object sf501;
+ static Object sf502;
+ static Object sf503;
+ static Object sf504;
+ static Object sf505;
+ static Object sf506;
+ static Object sf507;
+ static Object sf508;
+ static Object sf509;
+ static Object sf510;
+ static Object sf511;
+ static Object sf512;
+ static Object sf513;
+ static Object sf514;
+ static Object sf515;
+ static Object sf516;
+ static Object sf517;
+ static Object sf518;
+ static Object sf519;
+ static Object sf520;
+ static Object sf521;
+ static Object sf522;
+ static Object sf523;
+ static Object sf524;
+ static Object sf525;
+ static Object sf526;
+ static Object sf527;
+ static Object sf528;
+ static Object sf529;
+ static Object sf530;
+ static Object sf531;
+ static Object sf532;
+ static Object sf533;
+ static Object sf534;
+ static Object sf535;
+ static Object sf536;
+ static Object sf537;
+ static Object sf538;
+ static Object sf539;
+ static Object sf540;
+ static Object sf541;
+ static Object sf542;
+ static Object sf543;
+ static Object sf544;
+ static Object sf545;
+ static Object sf546;
+ static Object sf547;
+ static Object sf548;
+ static Object sf549;
+ static Object sf550;
+ static Object sf551;
+ static Object sf552;
+ static Object sf553;
+ static Object sf554;
+ static Object sf555;
+ static Object sf556;
+ static Object sf557;
+ static Object sf558;
+ static Object sf559;
+ static Object sf560;
+ static Object sf561;
+ static Object sf562;
+ static Object sf563;
+ static Object sf564;
+ static Object sf565;
+ static Object sf566;
+ static Object sf567;
+ static Object sf568;
+ static Object sf569;
+ static Object sf570;
+ static Object sf571;
+ static Object sf572;
+ static Object sf573;
+ static Object sf574;
+ static Object sf575;
+ static Object sf576;
+ static Object sf577;
+ static Object sf578;
+ static Object sf579;
+ static Object sf580;
+ static Object sf581;
+ static Object sf582;
+ static Object sf583;
+ static Object sf584;
+ static Object sf585;
+ static Object sf586;
+ static Object sf587;
+ static Object sf588;
+ static Object sf589;
+ static Object sf590;
+ static Object sf591;
+ static Object sf592;
+ static Object sf593;
+ static Object sf594;
+ static Object sf595;
+ static Object sf596;
+ static Object sf597;
+ static Object sf598;
+ static Object sf599;
+ static Object sf600;
+ static Object sf601;
+ static Object sf602;
+ static Object sf603;
+ static Object sf604;
+ static Object sf605;
+ static Object sf606;
+ static Object sf607;
+ static Object sf608;
+ static Object sf609;
+ static Object sf610;
+ static Object sf611;
+ static Object sf612;
+ static Object sf613;
+ static Object sf614;
+ static Object sf615;
+ static Object sf616;
+ static Object sf617;
+ static Object sf618;
+ static Object sf619;
+ static Object sf620;
+ static Object sf621;
+ static Object sf622;
+ static Object sf623;
+ static Object sf624;
+ static Object sf625;
+ static Object sf626;
+ static Object sf627;
+ static Object sf628;
+ static Object sf629;
+ static Object sf630;
+ static Object sf631;
+ static Object sf632;
+ static Object sf633;
+ static Object sf634;
+ static Object sf635;
+ static Object sf636;
+ static Object sf637;
+ static Object sf638;
+ static Object sf639;
+ static Object sf640;
+ static Object sf641;
+ static Object sf642;
+ static Object sf643;
+ static Object sf644;
+ static Object sf645;
+ static Object sf646;
+ static Object sf647;
+ static Object sf648;
+ static Object sf649;
+ static Object sf650;
+ static Object sf651;
+ static Object sf652;
+ static Object sf653;
+ static Object sf654;
+ static Object sf655;
+ static Object sf656;
+ static Object sf657;
+ static Object sf658;
+ static Object sf659;
+ static Object sf660;
+ static Object sf661;
+ static Object sf662;
+ static Object sf663;
+ static Object sf664;
+ static Object sf665;
+ static Object sf666;
+ static Object sf667;
+ static Object sf668;
+ static Object sf669;
+ static Object sf670;
+ static Object sf671;
+ static Object sf672;
+ static Object sf673;
+ static Object sf674;
+ static Object sf675;
+ static Object sf676;
+ static Object sf677;
+ static Object sf678;
+ static Object sf679;
+ static Object sf680;
+ static Object sf681;
+ static Object sf682;
+ static Object sf683;
+ static Object sf684;
+ static Object sf685;
+ static Object sf686;
+ static Object sf687;
+ static Object sf688;
+ static Object sf689;
+ static Object sf690;
+ static Object sf691;
+ static Object sf692;
+ static Object sf693;
+ static Object sf694;
+ static Object sf695;
+ static Object sf696;
+ static Object sf697;
+ static Object sf698;
+ static Object sf699;
+ static Object sf700;
+ static Object sf701;
+ static Object sf702;
+ static Object sf703;
+ static Object sf704;
+ static Object sf705;
+ static Object sf706;
+ static Object sf707;
+ static Object sf708;
+ static Object sf709;
+ static Object sf710;
+ static Object sf711;
+ static Object sf712;
+ static Object sf713;
+ static Object sf714;
+ static Object sf715;
+ static Object sf716;
+ static Object sf717;
+ static Object sf718;
+ static Object sf719;
+ static Object sf720;
+ static Object sf721;
+ static Object sf722;
+ static Object sf723;
+ static Object sf724;
+ static Object sf725;
+ static Object sf726;
+ static Object sf727;
+ static Object sf728;
+ static Object sf729;
+ static Object sf730;
+ static Object sf731;
+ static Object sf732;
+ static Object sf733;
+ static Object sf734;
+ static Object sf735;
+ static Object sf736;
+ static Object sf737;
+ static Object sf738;
+ static Object sf739;
+ static Object sf740;
+ static Object sf741;
+ static Object sf742;
+ static Object sf743;
+ static Object sf744;
+ static Object sf745;
+ static Object sf746;
+ static Object sf747;
+ static Object sf748;
+ static Object sf749;
+ static Object sf750;
+ static Object sf751;
+ static Object sf752;
+ static Object sf753;
+ static Object sf754;
+ static Object sf755;
+ static Object sf756;
+ static Object sf757;
+ static Object sf758;
+ static Object sf759;
+ static Object sf760;
+ static Object sf761;
+ static Object sf762;
+ static Object sf763;
+ static Object sf764;
+ static Object sf765;
+ static Object sf766;
+ static Object sf767;
+ static Object sf768;
+ static Object sf769;
+ static Object sf770;
+ static Object sf771;
+ static Object sf772;
+ static Object sf773;
+ static Object sf774;
+ static Object sf775;
+ static Object sf776;
+ static Object sf777;
+ static Object sf778;
+ static Object sf779;
+ static Object sf780;
+ static Object sf781;
+ static Object sf782;
+ static Object sf783;
+ static Object sf784;
+ static Object sf785;
+ static Object sf786;
+ static Object sf787;
+ static Object sf788;
+ static Object sf789;
+ static Object sf790;
+ static Object sf791;
+ static Object sf792;
+ static Object sf793;
+ static Object sf794;
+ static Object sf795;
+ static Object sf796;
+ static Object sf797;
+ static Object sf798;
+ static Object sf799;
+ static Object sf800;
+ static Object sf801;
+ static Object sf802;
+ static Object sf803;
+ static Object sf804;
+ static Object sf805;
+ static Object sf806;
+ static Object sf807;
+ static Object sf808;
+ static Object sf809;
+ static Object sf810;
+ static Object sf811;
+ static Object sf812;
+ static Object sf813;
+ static Object sf814;
+ static Object sf815;
+ static Object sf816;
+ static Object sf817;
+ static Object sf818;
+ static Object sf819;
+ static Object sf820;
+ static Object sf821;
+ static Object sf822;
+ static Object sf823;
+ static Object sf824;
+ static Object sf825;
+ static Object sf826;
+ static Object sf827;
+ static Object sf828;
+ static Object sf829;
+ static Object sf830;
+ static Object sf831;
+ static Object sf832;
+ static Object sf833;
+ static Object sf834;
+ static Object sf835;
+ static Object sf836;
+ static Object sf837;
+ static Object sf838;
+ static Object sf839;
+ static Object sf840;
+ static Object sf841;
+ static Object sf842;
+ static Object sf843;
+ static Object sf844;
+ static Object sf845;
+ static Object sf846;
+ static Object sf847;
+ static Object sf848;
+ static Object sf849;
+ static Object sf850;
+ static Object sf851;
+ static Object sf852;
+ static Object sf853;
+ static Object sf854;
+ static Object sf855;
+ static Object sf856;
+ static Object sf857;
+ static Object sf858;
+ static Object sf859;
+ static Object sf860;
+ static Object sf861;
+ static Object sf862;
+ static Object sf863;
+ static Object sf864;
+ static Object sf865;
+ static Object sf866;
+ static Object sf867;
+ static Object sf868;
+ static Object sf869;
+ static Object sf870;
+ static Object sf871;
+ static Object sf872;
+ static Object sf873;
+ static Object sf874;
+ static Object sf875;
+ static Object sf876;
+ static Object sf877;
+ static Object sf878;
+ static Object sf879;
+ static Object sf880;
+ static Object sf881;
+ static Object sf882;
+ static Object sf883;
+ static Object sf884;
+ static Object sf885;
+ static Object sf886;
+ static Object sf887;
+ static Object sf888;
+ static Object sf889;
+ static Object sf890;
+ static Object sf891;
+ static Object sf892;
+ static Object sf893;
+ static Object sf894;
+ static Object sf895;
+ static Object sf896;
+ static Object sf897;
+ static Object sf898;
+ static Object sf899;
+ static Object sf900;
+ static Object sf901;
+ static Object sf902;
+ static Object sf903;
+ static Object sf904;
+ static Object sf905;
+ static Object sf906;
+ static Object sf907;
+ static Object sf908;
+ static Object sf909;
+ static Object sf910;
+ static Object sf911;
+ static Object sf912;
+ static Object sf913;
+ static Object sf914;
+ static Object sf915;
+ static Object sf916;
+ static Object sf917;
+ static Object sf918;
+ static Object sf919;
+ static Object sf920;
+ static Object sf921;
+ static Object sf922;
+ static Object sf923;
+ static Object sf924;
+ static Object sf925;
+ static Object sf926;
+ static Object sf927;
+ static Object sf928;
+ static Object sf929;
+ static Object sf930;
+ static Object sf931;
+ static Object sf932;
+ static Object sf933;
+ static Object sf934;
+ static Object sf935;
+ static Object sf936;
+ static Object sf937;
+ static Object sf938;
+ static Object sf939;
+ static Object sf940;
+ static Object sf941;
+ static Object sf942;
+ static Object sf943;
+ static Object sf944;
+ static Object sf945;
+ static Object sf946;
+ static Object sf947;
+ static Object sf948;
+ static Object sf949;
+ static Object sf950;
+ static Object sf951;
+ static Object sf952;
+ static Object sf953;
+ static Object sf954;
+ static Object sf955;
+ static Object sf956;
+ static Object sf957;
+ static Object sf958;
+ static Object sf959;
+ static Object sf960;
+ static Object sf961;
+ static Object sf962;
+ static Object sf963;
+ static Object sf964;
+ static Object sf965;
+ static Object sf966;
+ static Object sf967;
+ static Object sf968;
+ static Object sf969;
+ static Object sf970;
+ static Object sf971;
+ static Object sf972;
+ static Object sf973;
+ static Object sf974;
+ static Object sf975;
+ static Object sf976;
+ static Object sf977;
+ static Object sf978;
+ static Object sf979;
+ static Object sf980;
+ static Object sf981;
+ static Object sf982;
+ static Object sf983;
+ static Object sf984;
+ static Object sf985;
+ static Object sf986;
+ static Object sf987;
+ static Object sf988;
+ static Object sf989;
+ static Object sf990;
+ static Object sf991;
+ static Object sf992;
+ static Object sf993;
+ static Object sf994;
+ static Object sf995;
+ static Object sf996;
+ static Object sf997;
+ static Object sf998;
+ static Object sf999;
+ static Object sf1000;
+ static Object sf1001;
+ static Object sf1002;
+ static Object sf1003;
+ static Object sf1004;
+ static Object sf1005;
+ static Object sf1006;
+ static Object sf1007;
+ static Object sf1008;
+ static Object sf1009;
+ static Object sf1010;
+ static Object sf1011;
+ static Object sf1012;
+ static Object sf1013;
+ static Object sf1014;
+ static Object sf1015;
+ static Object sf1016;
+ static Object sf1017;
+ static Object sf1018;
+ static Object sf1019;
+ static Object sf1020;
+ static Object sf1021;
+ static Object sf1022;
+ static Object sf1023;
+ static Object sf1024;
+ static Object sf1025;
+ static Object sf1026;
+ static Object sf1027;
+ static Object sf1028;
+ static Object sf1029;
+ static Object sf1030;
+ static Object sf1031;
+ static Object sf1032;
+ static Object sf1033;
+ static Object sf1034;
+ static Object sf1035;
+ static Object sf1036;
+ static Object sf1037;
+ static Object sf1038;
+ static Object sf1039;
+ static Object sf1040;
+ static Object sf1041;
+ static Object sf1042;
+ static Object sf1043;
+ static Object sf1044;
+ static Object sf1045;
+ static Object sf1046;
+ static Object sf1047;
+ static Object sf1048;
+ static Object sf1049;
+ static Object sf1050;
+ static Object sf1051;
+ static Object sf1052;
+ static Object sf1053;
+ static Object sf1054;
+ static Object sf1055;
+ static Object sf1056;
+ static Object sf1057;
+ static Object sf1058;
+ static Object sf1059;
+ static Object sf1060;
+ static Object sf1061;
+ static Object sf1062;
+ static Object sf1063;
+ static Object sf1064;
+ static Object sf1065;
+ static Object sf1066;
+ static Object sf1067;
+ static Object sf1068;
+ static Object sf1069;
+ static Object sf1070;
+ static Object sf1071;
+ static Object sf1072;
+ static Object sf1073;
+ static Object sf1074;
+ static Object sf1075;
+ static Object sf1076;
+ static Object sf1077;
+ static Object sf1078;
+ static Object sf1079;
+ static Object sf1080;
+ static Object sf1081;
+ static Object sf1082;
+ static Object sf1083;
+ static Object sf1084;
+ static Object sf1085;
+ static Object sf1086;
+ static Object sf1087;
+ static Object sf1088;
+ static Object sf1089;
+ static Object sf1090;
+ static Object sf1091;
+ static Object sf1092;
+ static Object sf1093;
+ static Object sf1094;
+ static Object sf1095;
+ static Object sf1096;
+ static Object sf1097;
+ static Object sf1098;
+ static Object sf1099;
+ static Object sf1100;
+ static Object sf1101;
+ static Object sf1102;
+ static Object sf1103;
+ static Object sf1104;
+ static Object sf1105;
+ static Object sf1106;
+ static Object sf1107;
+ static Object sf1108;
+ static Object sf1109;
+ static Object sf1110;
+ static Object sf1111;
+ static Object sf1112;
+ static Object sf1113;
+ static Object sf1114;
+ static Object sf1115;
+ static Object sf1116;
+ static Object sf1117;
+ static Object sf1118;
+ static Object sf1119;
+ static Object sf1120;
+ static Object sf1121;
+ static Object sf1122;
+ static Object sf1123;
+ static Object sf1124;
+ static Object sf1125;
+ static Object sf1126;
+ static Object sf1127;
+ static Object sf1128;
+ static Object sf1129;
+ static Object sf1130;
+ static Object sf1131;
+ static Object sf1132;
+ static Object sf1133;
+ static Object sf1134;
+ static Object sf1135;
+ static Object sf1136;
+ static Object sf1137;
+ static Object sf1138;
+ static Object sf1139;
+ static Object sf1140;
+ static Object sf1141;
+ static Object sf1142;
+ static Object sf1143;
+ static Object sf1144;
+ static Object sf1145;
+ static Object sf1146;
+ static Object sf1147;
+ static Object sf1148;
+ static Object sf1149;
+ static Object sf1150;
+ static Object sf1151;
+ static Object sf1152;
+ static Object sf1153;
+ static Object sf1154;
+ static Object sf1155;
+ static Object sf1156;
+ static Object sf1157;
+ static Object sf1158;
+ static Object sf1159;
+ static Object sf1160;
+ static Object sf1161;
+ static Object sf1162;
+ static Object sf1163;
+ static Object sf1164;
+ static Object sf1165;
+ static Object sf1166;
+ static Object sf1167;
+ static Object sf1168;
+ static Object sf1169;
+ static Object sf1170;
+ static Object sf1171;
+ static Object sf1172;
+ static Object sf1173;
+ static Object sf1174;
+ static Object sf1175;
+ static Object sf1176;
+ static Object sf1177;
+ static Object sf1178;
+ static Object sf1179;
+ static Object sf1180;
+ static Object sf1181;
+ static Object sf1182;
+ static Object sf1183;
+ static Object sf1184;
+ static Object sf1185;
+ static Object sf1186;
+ static Object sf1187;
+ static Object sf1188;
+ static Object sf1189;
+ static Object sf1190;
+ static Object sf1191;
+ static Object sf1192;
+ static Object sf1193;
+ static Object sf1194;
+ static Object sf1195;
+ static Object sf1196;
+ static Object sf1197;
+ static Object sf1198;
+ static Object sf1199;
+ static Object sf1200;
+ static Object sf1201;
+ static Object sf1202;
+ static Object sf1203;
+ static Object sf1204;
+ static Object sf1205;
+ static Object sf1206;
+ static Object sf1207;
+ static Object sf1208;
+ static Object sf1209;
+ static Object sf1210;
+ static Object sf1211;
+ static Object sf1212;
+ static Object sf1213;
+ static Object sf1214;
+ static Object sf1215;
+ static Object sf1216;
+ static Object sf1217;
+ static Object sf1218;
+ static Object sf1219;
+ static Object sf1220;
+ static Object sf1221;
+ static Object sf1222;
+ static Object sf1223;
+ static Object sf1224;
+ static Object sf1225;
+ static Object sf1226;
+ static Object sf1227;
+ static Object sf1228;
+ static Object sf1229;
+ static Object sf1230;
+ static Object sf1231;
+ static Object sf1232;
+ static Object sf1233;
+ static Object sf1234;
+ static Object sf1235;
+ static Object sf1236;
+ static Object sf1237;
+ static Object sf1238;
+ static Object sf1239;
+ static Object sf1240;
+ static Object sf1241;
+ static Object sf1242;
+ static Object sf1243;
+ static Object sf1244;
+ static Object sf1245;
+ static Object sf1246;
+ static Object sf1247;
+ static Object sf1248;
+ static Object sf1249;
+ static Object sf1250;
+ static Object sf1251;
+ static Object sf1252;
+ static Object sf1253;
+ static Object sf1254;
+ static Object sf1255;
+ static Object sf1256;
+ static Object sf1257;
+ static Object sf1258;
+ static Object sf1259;
+ static Object sf1260;
+ static Object sf1261;
+ static Object sf1262;
+ static Object sf1263;
+ static Object sf1264;
+ static Object sf1265;
+ static Object sf1266;
+ static Object sf1267;
+ static Object sf1268;
+ static Object sf1269;
+ static Object sf1270;
+ static Object sf1271;
+ static Object sf1272;
+ static Object sf1273;
+ static Object sf1274;
+ static Object sf1275;
+ static Object sf1276;
+ static Object sf1277;
+ static Object sf1278;
+ static Object sf1279;
+ static Object sf1280;
+ static Object sf1281;
+ static Object sf1282;
+ static Object sf1283;
+ static Object sf1284;
+ static Object sf1285;
+ static Object sf1286;
+ static Object sf1287;
+ static Object sf1288;
+ static Object sf1289;
+ static Object sf1290;
+ static Object sf1291;
+ static Object sf1292;
+ static Object sf1293;
+ static Object sf1294;
+ static Object sf1295;
+ static Object sf1296;
+ static Object sf1297;
+ static Object sf1298;
+ static Object sf1299;
+ static Object sf1300;
+ static Object sf1301;
+ static Object sf1302;
+ static Object sf1303;
+ static Object sf1304;
+ static Object sf1305;
+ static Object sf1306;
+ static Object sf1307;
+ static Object sf1308;
+ static Object sf1309;
+ static Object sf1310;
+ static Object sf1311;
+ static Object sf1312;
+ static Object sf1313;
+ static Object sf1314;
+ static Object sf1315;
+ static Object sf1316;
+ static Object sf1317;
+ static Object sf1318;
+ static Object sf1319;
+ static Object sf1320;
+ static Object sf1321;
+ static Object sf1322;
+ static Object sf1323;
+ static Object sf1324;
+ static Object sf1325;
+ static Object sf1326;
+ static Object sf1327;
+ static Object sf1328;
+ static Object sf1329;
+ static Object sf1330;
+ static Object sf1331;
+ static Object sf1332;
+ static Object sf1333;
+ static Object sf1334;
+ static Object sf1335;
+ static Object sf1336;
+ static Object sf1337;
+ static Object sf1338;
+ static Object sf1339;
+ static Object sf1340;
+ static Object sf1341;
+ static Object sf1342;
+ static Object sf1343;
+ static Object sf1344;
+ static Object sf1345;
+ static Object sf1346;
+ static Object sf1347;
+ static Object sf1348;
+ static Object sf1349;
+ static Object sf1350;
+ static Object sf1351;
+ static Object sf1352;
+ static Object sf1353;
+ static Object sf1354;
+ static Object sf1355;
+ static Object sf1356;
+ static Object sf1357;
+ static Object sf1358;
+ static Object sf1359;
+ static Object sf1360;
+ static Object sf1361;
+ static Object sf1362;
+ static Object sf1363;
+ static Object sf1364;
+ static Object sf1365;
+ static Object sf1366;
+ static Object sf1367;
+ static Object sf1368;
+ static Object sf1369;
+ static Object sf1370;
+ static Object sf1371;
+ static Object sf1372;
+ static Object sf1373;
+ static Object sf1374;
+ static Object sf1375;
+ static Object sf1376;
+ static Object sf1377;
+ static Object sf1378;
+ static Object sf1379;
+ static Object sf1380;
+ static Object sf1381;
+ static Object sf1382;
+ static Object sf1383;
+ static Object sf1384;
+ static Object sf1385;
+ static Object sf1386;
+ static Object sf1387;
+ static Object sf1388;
+ static Object sf1389;
+ static Object sf1390;
+ static Object sf1391;
+ static Object sf1392;
+ static Object sf1393;
+ static Object sf1394;
+ static Object sf1395;
+ static Object sf1396;
+ static Object sf1397;
+ static Object sf1398;
+ static Object sf1399;
+ static Object sf1400;
+ static Object sf1401;
+ static Object sf1402;
+ static Object sf1403;
+ static Object sf1404;
+ static Object sf1405;
+ static Object sf1406;
+ static Object sf1407;
+ static Object sf1408;
+ static Object sf1409;
+ static Object sf1410;
+ static Object sf1411;
+ static Object sf1412;
+ static Object sf1413;
+ static Object sf1414;
+ static Object sf1415;
+ static Object sf1416;
+ static Object sf1417;
+ static Object sf1418;
+ static Object sf1419;
+ static Object sf1420;
+ static Object sf1421;
+ static Object sf1422;
+ static Object sf1423;
+ static Object sf1424;
+ static Object sf1425;
+ static Object sf1426;
+ static Object sf1427;
+ static Object sf1428;
+ static Object sf1429;
+ static Object sf1430;
+ static Object sf1431;
+ static Object sf1432;
+ static Object sf1433;
+ static Object sf1434;
+ static Object sf1435;
+ static Object sf1436;
+ static Object sf1437;
+ static Object sf1438;
+ static Object sf1439;
+ static Object sf1440;
+ static Object sf1441;
+ static Object sf1442;
+ static Object sf1443;
+ static Object sf1444;
+ static Object sf1445;
+ static Object sf1446;
+ static Object sf1447;
+ static Object sf1448;
+ static Object sf1449;
+ static Object sf1450;
+ static Object sf1451;
+ static Object sf1452;
+ static Object sf1453;
+ static Object sf1454;
+ static Object sf1455;
+ static Object sf1456;
+ static Object sf1457;
+ static Object sf1458;
+ static Object sf1459;
+ static Object sf1460;
+ static Object sf1461;
+ static Object sf1462;
+ static Object sf1463;
+ static Object sf1464;
+ static Object sf1465;
+ static Object sf1466;
+ static Object sf1467;
+ static Object sf1468;
+ static Object sf1469;
+ static Object sf1470;
+ static Object sf1471;
+ static Object sf1472;
+ static Object sf1473;
+ static Object sf1474;
+ static Object sf1475;
+ static Object sf1476;
+ static Object sf1477;
+ static Object sf1478;
+ static Object sf1479;
+ static Object sf1480;
+ static Object sf1481;
+ static Object sf1482;
+ static Object sf1483;
+ static Object sf1484;
+ static Object sf1485;
+ static Object sf1486;
+ static Object sf1487;
+ static Object sf1488;
+ static Object sf1489;
+ static Object sf1490;
+ static Object sf1491;
+ static Object sf1492;
+ static Object sf1493;
+ static Object sf1494;
+ static Object sf1495;
+ static Object sf1496;
+ static Object sf1497;
+ static Object sf1498;
+ static Object sf1499;
+ static Object sf1500;
+ static Object sf1501;
+ static Object sf1502;
+ static Object sf1503;
+ static Object sf1504;
+ static Object sf1505;
+ static Object sf1506;
+ static Object sf1507;
+ static Object sf1508;
+ static Object sf1509;
+ static Object sf1510;
+ static Object sf1511;
+ static Object sf1512;
+ static Object sf1513;
+ static Object sf1514;
+ static Object sf1515;
+ static Object sf1516;
+ static Object sf1517;
+ static Object sf1518;
+ static Object sf1519;
+ static Object sf1520;
+ static Object sf1521;
+ static Object sf1522;
+ static Object sf1523;
+ static Object sf1524;
+ static Object sf1525;
+ static Object sf1526;
+ static Object sf1527;
+ static Object sf1528;
+ static Object sf1529;
+ static Object sf1530;
+ static Object sf1531;
+ static Object sf1532;
+ static Object sf1533;
+ static Object sf1534;
+ static Object sf1535;
+ static Object sf1536;
+ static Object sf1537;
+ static Object sf1538;
+ static Object sf1539;
+ static Object sf1540;
+ static Object sf1541;
+ static Object sf1542;
+ static Object sf1543;
+ static Object sf1544;
+ static Object sf1545;
+ static Object sf1546;
+ static Object sf1547;
+ static Object sf1548;
+ static Object sf1549;
+ static Object sf1550;
+ static Object sf1551;
+ static Object sf1552;
+ static Object sf1553;
+ static Object sf1554;
+ static Object sf1555;
+ static Object sf1556;
+ static Object sf1557;
+ static Object sf1558;
+ static Object sf1559;
+ static Object sf1560;
+ static Object sf1561;
+ static Object sf1562;
+ static Object sf1563;
+ static Object sf1564;
+ static Object sf1565;
+ static Object sf1566;
+ static Object sf1567;
+ static Object sf1568;
+ static Object sf1569;
+ static Object sf1570;
+ static Object sf1571;
+ static Object sf1572;
+ static Object sf1573;
+ static Object sf1574;
+ static Object sf1575;
+ static Object sf1576;
+ static Object sf1577;
+ static Object sf1578;
+ static Object sf1579;
+ static Object sf1580;
+ static Object sf1581;
+ static Object sf1582;
+ static Object sf1583;
+ static Object sf1584;
+ static Object sf1585;
+ static Object sf1586;
+ static Object sf1587;
+ static Object sf1588;
+ static Object sf1589;
+ static Object sf1590;
+ static Object sf1591;
+ static Object sf1592;
+ static Object sf1593;
+ static Object sf1594;
+ static Object sf1595;
+ static Object sf1596;
+ static Object sf1597;
+ static Object sf1598;
+ static Object sf1599;
+ static Object sf1600;
+ static Object sf1601;
+ static Object sf1602;
+ static Object sf1603;
+ static Object sf1604;
+ static Object sf1605;
+ static Object sf1606;
+ static Object sf1607;
+ static Object sf1608;
+ static Object sf1609;
+ static Object sf1610;
+ static Object sf1611;
+ static Object sf1612;
+ static Object sf1613;
+ static Object sf1614;
+ static Object sf1615;
+ static Object sf1616;
+ static Object sf1617;
+ static Object sf1618;
+ static Object sf1619;
+ static Object sf1620;
+ static Object sf1621;
+ static Object sf1622;
+ static Object sf1623;
+ static Object sf1624;
+ static Object sf1625;
+ static Object sf1626;
+ static Object sf1627;
+ static Object sf1628;
+ static Object sf1629;
+ static Object sf1630;
+ static Object sf1631;
+ static Object sf1632;
+ static Object sf1633;
+ static Object sf1634;
+ static Object sf1635;
+ static Object sf1636;
+ static Object sf1637;
+ static Object sf1638;
+ static Object sf1639;
+ static Object sf1640;
+ static Object sf1641;
+ static Object sf1642;
+ static Object sf1643;
+ static Object sf1644;
+ static Object sf1645;
+ static Object sf1646;
+ static Object sf1647;
+ static Object sf1648;
+ static Object sf1649;
+ static Object sf1650;
+ static Object sf1651;
+ static Object sf1652;
+ static Object sf1653;
+ static Object sf1654;
+ static Object sf1655;
+ static Object sf1656;
+ static Object sf1657;
+ static Object sf1658;
+ static Object sf1659;
+ static Object sf1660;
+ static Object sf1661;
+ static Object sf1662;
+ static Object sf1663;
+ static Object sf1664;
+ static Object sf1665;
+ static Object sf1666;
+ static Object sf1667;
+ static Object sf1668;
+ static Object sf1669;
+ static Object sf1670;
+ static Object sf1671;
+ static Object sf1672;
+ static Object sf1673;
+ static Object sf1674;
+ static Object sf1675;
+ static Object sf1676;
+ static Object sf1677;
+ static Object sf1678;
+ static Object sf1679;
+ static Object sf1680;
+ static Object sf1681;
+ static Object sf1682;
+ static Object sf1683;
+ static Object sf1684;
+ static Object sf1685;
+ static Object sf1686;
+ static Object sf1687;
+ static Object sf1688;
+ static Object sf1689;
+ static Object sf1690;
+ static Object sf1691;
+ static Object sf1692;
+ static Object sf1693;
+ static Object sf1694;
+ static Object sf1695;
+ static Object sf1696;
+ static Object sf1697;
+ static Object sf1698;
+ static Object sf1699;
+ static Object sf1700;
+ static Object sf1701;
+ static Object sf1702;
+ static Object sf1703;
+ static Object sf1704;
+ static Object sf1705;
+ static Object sf1706;
+ static Object sf1707;
+ static Object sf1708;
+ static Object sf1709;
+ static Object sf1710;
+ static Object sf1711;
+ static Object sf1712;
+ static Object sf1713;
+ static Object sf1714;
+ static Object sf1715;
+ static Object sf1716;
+ static Object sf1717;
+ static Object sf1718;
+ static Object sf1719;
+ static Object sf1720;
+ static Object sf1721;
+ static Object sf1722;
+ static Object sf1723;
+ static Object sf1724;
+ static Object sf1725;
+ static Object sf1726;
+ static Object sf1727;
+ static Object sf1728;
+ static Object sf1729;
+ static Object sf1730;
+ static Object sf1731;
+ static Object sf1732;
+ static Object sf1733;
+ static Object sf1734;
+ static Object sf1735;
+ static Object sf1736;
+ static Object sf1737;
+ static Object sf1738;
+ static Object sf1739;
+ static Object sf1740;
+ static Object sf1741;
+ static Object sf1742;
+ static Object sf1743;
+ static Object sf1744;
+ static Object sf1745;
+ static Object sf1746;
+ static Object sf1747;
+ static Object sf1748;
+ static Object sf1749;
+ static Object sf1750;
+ static Object sf1751;
+ static Object sf1752;
+ static Object sf1753;
+ static Object sf1754;
+ static Object sf1755;
+ static Object sf1756;
+ static Object sf1757;
+ static Object sf1758;
+ static Object sf1759;
+ static Object sf1760;
+ static Object sf1761;
+ static Object sf1762;
+ static Object sf1763;
+ static Object sf1764;
+ static Object sf1765;
+ static Object sf1766;
+ static Object sf1767;
+ static Object sf1768;
+ static Object sf1769;
+ static Object sf1770;
+ static Object sf1771;
+ static Object sf1772;
+ static Object sf1773;
+ static Object sf1774;
+ static Object sf1775;
+ static Object sf1776;
+ static Object sf1777;
+ static Object sf1778;
+ static Object sf1779;
+ static Object sf1780;
+ static Object sf1781;
+ static Object sf1782;
+ static Object sf1783;
+ static Object sf1784;
+ static Object sf1785;
+ static Object sf1786;
+ static Object sf1787;
+ static Object sf1788;
+ static Object sf1789;
+ static Object sf1790;
+ static Object sf1791;
+ static Object sf1792;
+ static Object sf1793;
+ static Object sf1794;
+ static Object sf1795;
+ static Object sf1796;
+ static Object sf1797;
+ static Object sf1798;
+ static Object sf1799;
+ static Object sf1800;
+ static Object sf1801;
+ static Object sf1802;
+ static Object sf1803;
+ static Object sf1804;
+ static Object sf1805;
+ static Object sf1806;
+ static Object sf1807;
+ static Object sf1808;
+ static Object sf1809;
+ static Object sf1810;
+ static Object sf1811;
+ static Object sf1812;
+ static Object sf1813;
+ static Object sf1814;
+ static Object sf1815;
+ static Object sf1816;
+ static Object sf1817;
+ static Object sf1818;
+ static Object sf1819;
+ static Object sf1820;
+ static Object sf1821;
+ static Object sf1822;
+ static Object sf1823;
+ static Object sf1824;
+ static Object sf1825;
+ static Object sf1826;
+ static Object sf1827;
+ static Object sf1828;
+ static Object sf1829;
+ static Object sf1830;
+ static Object sf1831;
+ static Object sf1832;
+ static Object sf1833;
+ static Object sf1834;
+ static Object sf1835;
+ static Object sf1836;
+ static Object sf1837;
+ static Object sf1838;
+ static Object sf1839;
+ static Object sf1840;
+ static Object sf1841;
+ static Object sf1842;
+ static Object sf1843;
+ static Object sf1844;
+ static Object sf1845;
+ static Object sf1846;
+ static Object sf1847;
+ static Object sf1848;
+ static Object sf1849;
+ static Object sf1850;
+ static Object sf1851;
+ static Object sf1852;
+ static Object sf1853;
+ static Object sf1854;
+ static Object sf1855;
+ static Object sf1856;
+ static Object sf1857;
+ static Object sf1858;
+ static Object sf1859;
+ static Object sf1860;
+ static Object sf1861;
+ static Object sf1862;
+ static Object sf1863;
+ static Object sf1864;
+ static Object sf1865;
+ static Object sf1866;
+ static Object sf1867;
+ static Object sf1868;
+ static Object sf1869;
+ static Object sf1870;
+ static Object sf1871;
+ static Object sf1872;
+ static Object sf1873;
+ static Object sf1874;
+ static Object sf1875;
+ static Object sf1876;
+ static Object sf1877;
+ static Object sf1878;
+ static Object sf1879;
+ static Object sf1880;
+ static Object sf1881;
+ static Object sf1882;
+ static Object sf1883;
+ static Object sf1884;
+ static Object sf1885;
+ static Object sf1886;
+ static Object sf1887;
+ static Object sf1888;
+ static Object sf1889;
+ static Object sf1890;
+ static Object sf1891;
+ static Object sf1892;
+ static Object sf1893;
+ static Object sf1894;
+ static Object sf1895;
+ static Object sf1896;
+ static Object sf1897;
+ static Object sf1898;
+ static Object sf1899;
+ static Object sf1900;
+ static Object sf1901;
+ static Object sf1902;
+ static Object sf1903;
+ static Object sf1904;
+ static Object sf1905;
+ static Object sf1906;
+ static Object sf1907;
+ static Object sf1908;
+ static Object sf1909;
+ static Object sf1910;
+ static Object sf1911;
+ static Object sf1912;
+ static Object sf1913;
+ static Object sf1914;
+ static Object sf1915;
+ static Object sf1916;
+ static Object sf1917;
+ static Object sf1918;
+ static Object sf1919;
+ static Object sf1920;
+ static Object sf1921;
+ static Object sf1922;
+ static Object sf1923;
+ static Object sf1924;
+ static Object sf1925;
+ static Object sf1926;
+ static Object sf1927;
+ static Object sf1928;
+ static Object sf1929;
+ static Object sf1930;
+ static Object sf1931;
+ static Object sf1932;
+ static Object sf1933;
+ static Object sf1934;
+ static Object sf1935;
+ static Object sf1936;
+ static Object sf1937;
+ static Object sf1938;
+ static Object sf1939;
+ static Object sf1940;
+ static Object sf1941;
+ static Object sf1942;
+ static Object sf1943;
+ static Object sf1944;
+ static Object sf1945;
+ static Object sf1946;
+ static Object sf1947;
+ static Object sf1948;
+ static Object sf1949;
+ static Object sf1950;
+ static Object sf1951;
+ static Object sf1952;
+ static Object sf1953;
+ static Object sf1954;
+ static Object sf1955;
+ static Object sf1956;
+ static Object sf1957;
+ static Object sf1958;
+ static Object sf1959;
+ static Object sf1960;
+ static Object sf1961;
+ static Object sf1962;
+ static Object sf1963;
+ static Object sf1964;
+ static Object sf1965;
+ static Object sf1966;
+ static Object sf1967;
+ static Object sf1968;
+ static Object sf1969;
+ static Object sf1970;
+ static Object sf1971;
+ static Object sf1972;
+ static Object sf1973;
+ static Object sf1974;
+ static Object sf1975;
+ static Object sf1976;
+ static Object sf1977;
+ static Object sf1978;
+ static Object sf1979;
+ static Object sf1980;
+ static Object sf1981;
+ static Object sf1982;
+ static Object sf1983;
+ static Object sf1984;
+ static Object sf1985;
+ static Object sf1986;
+ static Object sf1987;
+ static Object sf1988;
+ static Object sf1989;
+ static Object sf1990;
+ static Object sf1991;
+ static Object sf1992;
+ static Object sf1993;
+ static Object sf1994;
+ static Object sf1995;
+ static Object sf1996;
+ static Object sf1997;
+ static Object sf1998;
+ static Object sf1999;
+ static Object sf2000;
+ static Object sf2001;
+ static Object sf2002;
+ static Object sf2003;
+ static Object sf2004;
+ static Object sf2005;
+ static Object sf2006;
+ static Object sf2007;
+ static Object sf2008;
+ static Object sf2009;
+ static Object sf2010;
+ static Object sf2011;
+ static Object sf2012;
+ static Object sf2013;
+ static Object sf2014;
+ static Object sf2015;
+ static Object sf2016;
+ static Object sf2017;
+ static Object sf2018;
+ static Object sf2019;
+ static Object sf2020;
+ static Object sf2021;
+ static Object sf2022;
+ static Object sf2023;
+ static Object sf2024;
+ static Object sf2025;
+ static Object sf2026;
+ static Object sf2027;
+ static Object sf2028;
+ static Object sf2029;
+ static Object sf2030;
+ static Object sf2031;
+ static Object sf2032;
+ static Object sf2033;
+ static Object sf2034;
+ static Object sf2035;
+ static Object sf2036;
+ static Object sf2037;
+ static Object sf2038;
+ static Object sf2039;
+ static Object sf2040;
+ static Object sf2041;
+ static Object sf2042;
+ static Object sf2043;
+ static Object sf2044;
+ static Object sf2045;
+ static Object sf2046;
+ static Object sf2047;
+ static Object sf2048;
+ static Object sf2049;
+ static Object sf2050;
+ static Object sf2051;
+ static Object sf2052;
+ static Object sf2053;
+ static Object sf2054;
+ static Object sf2055;
+ static Object sf2056;
+ static Object sf2057;
+ static Object sf2058;
+ static Object sf2059;
+ static Object sf2060;
+ static Object sf2061;
+ static Object sf2062;
+ static Object sf2063;
+ static Object sf2064;
+ static Object sf2065;
+ static Object sf2066;
+ static Object sf2067;
+ static Object sf2068;
+ static Object sf2069;
+ static Object sf2070;
+ static Object sf2071;
+ static Object sf2072;
+ static Object sf2073;
+ static Object sf2074;
+ static Object sf2075;
+ static Object sf2076;
+ static Object sf2077;
+ static Object sf2078;
+ static Object sf2079;
+ static Object sf2080;
+ static Object sf2081;
+ static Object sf2082;
+ static Object sf2083;
+ static Object sf2084;
+ static Object sf2085;
+ static Object sf2086;
+ static Object sf2087;
+ static Object sf2088;
+ static Object sf2089;
+ static Object sf2090;
+ static Object sf2091;
+ static Object sf2092;
+ static Object sf2093;
+ static Object sf2094;
+ static Object sf2095;
+ static Object sf2096;
+ static Object sf2097;
+ static Object sf2098;
+ static Object sf2099;
+ static Object sf2100;
+ static Object sf2101;
+ static Object sf2102;
+ static Object sf2103;
+ static Object sf2104;
+ static Object sf2105;
+ static Object sf2106;
+ static Object sf2107;
+ static Object sf2108;
+ static Object sf2109;
+ static Object sf2110;
+ static Object sf2111;
+ static Object sf2112;
+ static Object sf2113;
+ static Object sf2114;
+ static Object sf2115;
+ static Object sf2116;
+ static Object sf2117;
+ static Object sf2118;
+ static Object sf2119;
+ static Object sf2120;
+ static Object sf2121;
+ static Object sf2122;
+ static Object sf2123;
+ static Object sf2124;
+ static Object sf2125;
+ static Object sf2126;
+ static Object sf2127;
+ static Object sf2128;
+ static Object sf2129;
+ static Object sf2130;
+ static Object sf2131;
+ static Object sf2132;
+ static Object sf2133;
+ static Object sf2134;
+ static Object sf2135;
+ static Object sf2136;
+ static Object sf2137;
+ static Object sf2138;
+ static Object sf2139;
+ static Object sf2140;
+ static Object sf2141;
+ static Object sf2142;
+ static Object sf2143;
+ static Object sf2144;
+ static Object sf2145;
+ static Object sf2146;
+ static Object sf2147;
+ static Object sf2148;
+ static Object sf2149;
+ static Object sf2150;
+ static Object sf2151;
+ static Object sf2152;
+ static Object sf2153;
+ static Object sf2154;
+ static Object sf2155;
+ static Object sf2156;
+ static Object sf2157;
+ static Object sf2158;
+ static Object sf2159;
+ static Object sf2160;
+ static Object sf2161;
+ static Object sf2162;
+ static Object sf2163;
+ static Object sf2164;
+ static Object sf2165;
+ static Object sf2166;
+ static Object sf2167;
+ static Object sf2168;
+ static Object sf2169;
+ static Object sf2170;
+ static Object sf2171;
+ static Object sf2172;
+ static Object sf2173;
+ static Object sf2174;
+ static Object sf2175;
+ static Object sf2176;
+ static Object sf2177;
+ static Object sf2178;
+ static Object sf2179;
+ static Object sf2180;
+ static Object sf2181;
+ static Object sf2182;
+ static Object sf2183;
+ static Object sf2184;
+ static Object sf2185;
+ static Object sf2186;
+ static Object sf2187;
+ static Object sf2188;
+ static Object sf2189;
+ static Object sf2190;
+ static Object sf2191;
+ static Object sf2192;
+ static Object sf2193;
+ static Object sf2194;
+ static Object sf2195;
+ static Object sf2196;
+ static Object sf2197;
+ static Object sf2198;
+ static Object sf2199;
+ static Object sf2200;
+ static Object sf2201;
+ static Object sf2202;
+ static Object sf2203;
+ static Object sf2204;
+ static Object sf2205;
+ static Object sf2206;
+ static Object sf2207;
+ static Object sf2208;
+ static Object sf2209;
+ static Object sf2210;
+ static Object sf2211;
+ static Object sf2212;
+ static Object sf2213;
+ static Object sf2214;
+ static Object sf2215;
+ static Object sf2216;
+ static Object sf2217;
+ static Object sf2218;
+ static Object sf2219;
+ static Object sf2220;
+ static Object sf2221;
+ static Object sf2222;
+ static Object sf2223;
+ static Object sf2224;
+ static Object sf2225;
+ static Object sf2226;
+ static Object sf2227;
+ static Object sf2228;
+ static Object sf2229;
+ static Object sf2230;
+ static Object sf2231;
+ static Object sf2232;
+ static Object sf2233;
+ static Object sf2234;
+ static Object sf2235;
+ static Object sf2236;
+ static Object sf2237;
+ static Object sf2238;
+ static Object sf2239;
+ static Object sf2240;
+ static Object sf2241;
+ static Object sf2242;
+ static Object sf2243;
+ static Object sf2244;
+ static Object sf2245;
+ static Object sf2246;
+ static Object sf2247;
+ static Object sf2248;
+ static Object sf2249;
+ static Object sf2250;
+ static Object sf2251;
+ static Object sf2252;
+ static Object sf2253;
+ static Object sf2254;
+ static Object sf2255;
+ static Object sf2256;
+ static Object sf2257;
+ static Object sf2258;
+ static Object sf2259;
+ static Object sf2260;
+ static Object sf2261;
+ static Object sf2262;
+ static Object sf2263;
+ static Object sf2264;
+ static Object sf2265;
+ static Object sf2266;
+ static Object sf2267;
+ static Object sf2268;
+ static Object sf2269;
+ static Object sf2270;
+ static Object sf2271;
+ static Object sf2272;
+ static Object sf2273;
+ static Object sf2274;
+ static Object sf2275;
+ static Object sf2276;
+ static Object sf2277;
+ static Object sf2278;
+ static Object sf2279;
+ static Object sf2280;
+ static Object sf2281;
+ static Object sf2282;
+ static Object sf2283;
+ static Object sf2284;
+ static Object sf2285;
+ static Object sf2286;
+ static Object sf2287;
+ static Object sf2288;
+ static Object sf2289;
+ static Object sf2290;
+ static Object sf2291;
+ static Object sf2292;
+ static Object sf2293;
+ static Object sf2294;
+ static Object sf2295;
+ static Object sf2296;
+ static Object sf2297;
+ static Object sf2298;
+ static Object sf2299;
+ static Object sf2300;
+ static Object sf2301;
+ static Object sf2302;
+ static Object sf2303;
+ static Object sf2304;
+ static Object sf2305;
+ static Object sf2306;
+ static Object sf2307;
+ static Object sf2308;
+ static Object sf2309;
+ static Object sf2310;
+ static Object sf2311;
+ static Object sf2312;
+ static Object sf2313;
+ static Object sf2314;
+ static Object sf2315;
+ static Object sf2316;
+ static Object sf2317;
+ static Object sf2318;
+ static Object sf2319;
+ static Object sf2320;
+ static Object sf2321;
+ static Object sf2322;
+ static Object sf2323;
+ static Object sf2324;
+ static Object sf2325;
+ static Object sf2326;
+ static Object sf2327;
+ static Object sf2328;
+ static Object sf2329;
+ static Object sf2330;
+ static Object sf2331;
+ static Object sf2332;
+ static Object sf2333;
+ static Object sf2334;
+ static Object sf2335;
+ static Object sf2336;
+ static Object sf2337;
+ static Object sf2338;
+ static Object sf2339;
+ static Object sf2340;
+ static Object sf2341;
+ static Object sf2342;
+ static Object sf2343;
+ static Object sf2344;
+ static Object sf2345;
+ static Object sf2346;
+ static Object sf2347;
+ static Object sf2348;
+ static Object sf2349;
+ static Object sf2350;
+ static Object sf2351;
+ static Object sf2352;
+ static Object sf2353;
+ static Object sf2354;
+ static Object sf2355;
+ static Object sf2356;
+ static Object sf2357;
+ static Object sf2358;
+ static Object sf2359;
+ static Object sf2360;
+ static Object sf2361;
+ static Object sf2362;
+ static Object sf2363;
+ static Object sf2364;
+ static Object sf2365;
+ static Object sf2366;
+ static Object sf2367;
+ static Object sf2368;
+ static Object sf2369;
+ static Object sf2370;
+ static Object sf2371;
+ static Object sf2372;
+ static Object sf2373;
+ static Object sf2374;
+ static Object sf2375;
+ static Object sf2376;
+ static Object sf2377;
+ static Object sf2378;
+ static Object sf2379;
+ static Object sf2380;
+ static Object sf2381;
+ static Object sf2382;
+ static Object sf2383;
+ static Object sf2384;
+ static Object sf2385;
+ static Object sf2386;
+ static Object sf2387;
+ static Object sf2388;
+ static Object sf2389;
+ static Object sf2390;
+ static Object sf2391;
+ static Object sf2392;
+ static Object sf2393;
+ static Object sf2394;
+ static Object sf2395;
+ static Object sf2396;
+ static Object sf2397;
+ static Object sf2398;
+ static Object sf2399;
+ static Object sf2400;
+ static Object sf2401;
+ static Object sf2402;
+ static Object sf2403;
+ static Object sf2404;
+ static Object sf2405;
+ static Object sf2406;
+ static Object sf2407;
+ static Object sf2408;
+ static Object sf2409;
+ static Object sf2410;
+ static Object sf2411;
+ static Object sf2412;
+ static Object sf2413;
+ static Object sf2414;
+ static Object sf2415;
+ static Object sf2416;
+ static Object sf2417;
+ static Object sf2418;
+ static Object sf2419;
+ static Object sf2420;
+ static Object sf2421;
+ static Object sf2422;
+ static Object sf2423;
+ static Object sf2424;
+ static Object sf2425;
+ static Object sf2426;
+ static Object sf2427;
+ static Object sf2428;
+ static Object sf2429;
+ static Object sf2430;
+ static Object sf2431;
+ static Object sf2432;
+ static Object sf2433;
+ static Object sf2434;
+ static Object sf2435;
+ static Object sf2436;
+ static Object sf2437;
+ static Object sf2438;
+ static Object sf2439;
+ static Object sf2440;
+ static Object sf2441;
+ static Object sf2442;
+ static Object sf2443;
+ static Object sf2444;
+ static Object sf2445;
+ static Object sf2446;
+ static Object sf2447;
+ static Object sf2448;
+ static Object sf2449;
+ static Object sf2450;
+ static Object sf2451;
+ static Object sf2452;
+ static Object sf2453;
+ static Object sf2454;
+ static Object sf2455;
+ static Object sf2456;
+ static Object sf2457;
+ static Object sf2458;
+ static Object sf2459;
+ static Object sf2460;
+ static Object sf2461;
+ static Object sf2462;
+ static Object sf2463;
+ static Object sf2464;
+ static Object sf2465;
+ static Object sf2466;
+ static Object sf2467;
+ static Object sf2468;
+ static Object sf2469;
+ static Object sf2470;
+ static Object sf2471;
+ static Object sf2472;
+ static Object sf2473;
+ static Object sf2474;
+ static Object sf2475;
+ static Object sf2476;
+ static Object sf2477;
+ static Object sf2478;
+ static Object sf2479;
+ static Object sf2480;
+ static Object sf2481;
+ static Object sf2482;
+ static Object sf2483;
+ static Object sf2484;
+ static Object sf2485;
+ static Object sf2486;
+ static Object sf2487;
+ static Object sf2488;
+ static Object sf2489;
+ static Object sf2490;
+ static Object sf2491;
+ static Object sf2492;
+ static Object sf2493;
+ static Object sf2494;
+ static Object sf2495;
+ static Object sf2496;
+ static Object sf2497;
+ static Object sf2498;
+ static Object sf2499;
+ static Object sf2500;
+ static Object sf2501;
+ static Object sf2502;
+ static Object sf2503;
+ static Object sf2504;
+ static Object sf2505;
+ static Object sf2506;
+ static Object sf2507;
+ static Object sf2508;
+ static Object sf2509;
+ static Object sf2510;
+ static Object sf2511;
+ static Object sf2512;
+ static Object sf2513;
+ static Object sf2514;
+ static Object sf2515;
+ static Object sf2516;
+ static Object sf2517;
+ static Object sf2518;
+ static Object sf2519;
+ static Object sf2520;
+ static Object sf2521;
+ static Object sf2522;
+ static Object sf2523;
+ static Object sf2524;
+ static Object sf2525;
+ static Object sf2526;
+ static Object sf2527;
+ static Object sf2528;
+ static Object sf2529;
+ static Object sf2530;
+ static Object sf2531;
+ static Object sf2532;
+ static Object sf2533;
+ static Object sf2534;
+ static Object sf2535;
+ static Object sf2536;
+ static Object sf2537;
+ static Object sf2538;
+ static Object sf2539;
+ static Object sf2540;
+ static Object sf2541;
+ static Object sf2542;
+ static Object sf2543;
+ static Object sf2544;
+ static Object sf2545;
+ static Object sf2546;
+ static Object sf2547;
+ static Object sf2548;
+ static Object sf2549;
+ static Object sf2550;
+ static Object sf2551;
+ static Object sf2552;
+ static Object sf2553;
+ static Object sf2554;
+ static Object sf2555;
+ static Object sf2556;
+ static Object sf2557;
+ static Object sf2558;
+ static Object sf2559;
+ static Object sf2560;
+ static Object sf2561;
+ static Object sf2562;
+ static Object sf2563;
+ static Object sf2564;
+ static Object sf2565;
+ static Object sf2566;
+ static Object sf2567;
+ static Object sf2568;
+ static Object sf2569;
+ static Object sf2570;
+ static Object sf2571;
+ static Object sf2572;
+ static Object sf2573;
+ static Object sf2574;
+ static Object sf2575;
+ static Object sf2576;
+ static Object sf2577;
+ static Object sf2578;
+ static Object sf2579;
+ static Object sf2580;
+ static Object sf2581;
+ static Object sf2582;
+ static Object sf2583;
+ static Object sf2584;
+ static Object sf2585;
+ static Object sf2586;
+ static Object sf2587;
+ static Object sf2588;
+ static Object sf2589;
+ static Object sf2590;
+ static Object sf2591;
+ static Object sf2592;
+ static Object sf2593;
+ static Object sf2594;
+ static Object sf2595;
+ static Object sf2596;
+ static Object sf2597;
+ static Object sf2598;
+ static Object sf2599;
+ static Object sf2600;
+ static Object sf2601;
+ static Object sf2602;
+ static Object sf2603;
+ static Object sf2604;
+ static Object sf2605;
+ static Object sf2606;
+ static Object sf2607;
+ static Object sf2608;
+ static Object sf2609;
+ static Object sf2610;
+ static Object sf2611;
+ static Object sf2612;
+ static Object sf2613;
+ static Object sf2614;
+ static Object sf2615;
+ static Object sf2616;
+ static Object sf2617;
+ static Object sf2618;
+ static Object sf2619;
+ static Object sf2620;
+ static Object sf2621;
+ static Object sf2622;
+ static Object sf2623;
+ static Object sf2624;
+ static Object sf2625;
+ static Object sf2626;
+ static Object sf2627;
+ static Object sf2628;
+ static Object sf2629;
+ static Object sf2630;
+ static Object sf2631;
+ static Object sf2632;
+ static Object sf2633;
+ static Object sf2634;
+ static Object sf2635;
+ static Object sf2636;
+ static Object sf2637;
+ static Object sf2638;
+ static Object sf2639;
+ static Object sf2640;
+ static Object sf2641;
+ static Object sf2642;
+ static Object sf2643;
+ static Object sf2644;
+ static Object sf2645;
+ static Object sf2646;
+ static Object sf2647;
+ static Object sf2648;
+ static Object sf2649;
+ static Object sf2650;
+ static Object sf2651;
+ static Object sf2652;
+ static Object sf2653;
+ static Object sf2654;
+ static Object sf2655;
+ static Object sf2656;
+ static Object sf2657;
+ static Object sf2658;
+ static Object sf2659;
+ static Object sf2660;
+ static Object sf2661;
+ static Object sf2662;
+ static Object sf2663;
+ static Object sf2664;
+ static Object sf2665;
+ static Object sf2666;
+ static Object sf2667;
+ static Object sf2668;
+ static Object sf2669;
+ static Object sf2670;
+ static Object sf2671;
+ static Object sf2672;
+ static Object sf2673;
+ static Object sf2674;
+ static Object sf2675;
+ static Object sf2676;
+ static Object sf2677;
+ static Object sf2678;
+ static Object sf2679;
+ static Object sf2680;
+ static Object sf2681;
+ static Object sf2682;
+ static Object sf2683;
+ static Object sf2684;
+ static Object sf2685;
+ static Object sf2686;
+ static Object sf2687;
+ static Object sf2688;
+ static Object sf2689;
+ static Object sf2690;
+ static Object sf2691;
+ static Object sf2692;
+ static Object sf2693;
+ static Object sf2694;
+ static Object sf2695;
+ static Object sf2696;
+ static Object sf2697;
+ static Object sf2698;
+ static Object sf2699;
+ static Object sf2700;
+ static Object sf2701;
+ static Object sf2702;
+ static Object sf2703;
+ static Object sf2704;
+ static Object sf2705;
+ static Object sf2706;
+ static Object sf2707;
+ static Object sf2708;
+ static Object sf2709;
+ static Object sf2710;
+ static Object sf2711;
+ static Object sf2712;
+ static Object sf2713;
+ static Object sf2714;
+ static Object sf2715;
+ static Object sf2716;
+ static Object sf2717;
+ static Object sf2718;
+ static Object sf2719;
+ static Object sf2720;
+ static Object sf2721;
+ static Object sf2722;
+ static Object sf2723;
+ static Object sf2724;
+ static Object sf2725;
+ static Object sf2726;
+ static Object sf2727;
+ static Object sf2728;
+ static Object sf2729;
+ static Object sf2730;
+ static Object sf2731;
+ static Object sf2732;
+ static Object sf2733;
+ static Object sf2734;
+ static Object sf2735;
+ static Object sf2736;
+ static Object sf2737;
+ static Object sf2738;
+ static Object sf2739;
+ static Object sf2740;
+ static Object sf2741;
+ static Object sf2742;
+ static Object sf2743;
+ static Object sf2744;
+ static Object sf2745;
+ static Object sf2746;
+ static Object sf2747;
+ static Object sf2748;
+ static Object sf2749;
+ static Object sf2750;
+ static Object sf2751;
+ static Object sf2752;
+ static Object sf2753;
+ static Object sf2754;
+ static Object sf2755;
+ static Object sf2756;
+ static Object sf2757;
+ static Object sf2758;
+ static Object sf2759;
+ static Object sf2760;
+ static Object sf2761;
+ static Object sf2762;
+ static Object sf2763;
+ static Object sf2764;
+ static Object sf2765;
+ static Object sf2766;
+ static Object sf2767;
+ static Object sf2768;
+ static Object sf2769;
+ static Object sf2770;
+ static Object sf2771;
+ static Object sf2772;
+ static Object sf2773;
+ static Object sf2774;
+ static Object sf2775;
+ static Object sf2776;
+ static Object sf2777;
+ static Object sf2778;
+ static Object sf2779;
+ static Object sf2780;
+ static Object sf2781;
+ static Object sf2782;
+ static Object sf2783;
+ static Object sf2784;
+ static Object sf2785;
+ static Object sf2786;
+ static Object sf2787;
+ static Object sf2788;
+ static Object sf2789;
+ static Object sf2790;
+ static Object sf2791;
+ static Object sf2792;
+ static Object sf2793;
+ static Object sf2794;
+ static Object sf2795;
+ static Object sf2796;
+ static Object sf2797;
+ static Object sf2798;
+ static Object sf2799;
+ static Object sf2800;
+ static Object sf2801;
+ static Object sf2802;
+ static Object sf2803;
+ static Object sf2804;
+ static Object sf2805;
+ static Object sf2806;
+ static Object sf2807;
+ static Object sf2808;
+ static Object sf2809;
+ static Object sf2810;
+ static Object sf2811;
+ static Object sf2812;
+ static Object sf2813;
+ static Object sf2814;
+ static Object sf2815;
+ static Object sf2816;
+ static Object sf2817;
+ static Object sf2818;
+ static Object sf2819;
+ static Object sf2820;
+ static Object sf2821;
+ static Object sf2822;
+ static Object sf2823;
+ static Object sf2824;
+ static Object sf2825;
+ static Object sf2826;
+ static Object sf2827;
+ static Object sf2828;
+ static Object sf2829;
+ static Object sf2830;
+ static Object sf2831;
+ static Object sf2832;
+ static Object sf2833;
+ static Object sf2834;
+ static Object sf2835;
+ static Object sf2836;
+ static Object sf2837;
+ static Object sf2838;
+ static Object sf2839;
+ static Object sf2840;
+ static Object sf2841;
+ static Object sf2842;
+ static Object sf2843;
+ static Object sf2844;
+ static Object sf2845;
+ static Object sf2846;
+ static Object sf2847;
+ static Object sf2848;
+ static Object sf2849;
+ static Object sf2850;
+ static Object sf2851;
+ static Object sf2852;
+ static Object sf2853;
+ static Object sf2854;
+ static Object sf2855;
+ static Object sf2856;
+ static Object sf2857;
+ static Object sf2858;
+ static Object sf2859;
+ static Object sf2860;
+ static Object sf2861;
+ static Object sf2862;
+ static Object sf2863;
+ static Object sf2864;
+ static Object sf2865;
+ static Object sf2866;
+ static Object sf2867;
+ static Object sf2868;
+ static Object sf2869;
+ static Object sf2870;
+ static Object sf2871;
+ static Object sf2872;
+ static Object sf2873;
+ static Object sf2874;
+ static Object sf2875;
+ static Object sf2876;
+ static Object sf2877;
+ static Object sf2878;
+ static Object sf2879;
+ static Object sf2880;
+ static Object sf2881;
+ static Object sf2882;
+ static Object sf2883;
+ static Object sf2884;
+ static Object sf2885;
+ static Object sf2886;
+ static Object sf2887;
+ static Object sf2888;
+ static Object sf2889;
+ static Object sf2890;
+ static Object sf2891;
+ static Object sf2892;
+ static Object sf2893;
+ static Object sf2894;
+ static Object sf2895;
+ static Object sf2896;
+ static Object sf2897;
+ static Object sf2898;
+ static Object sf2899;
+ static Object sf2900;
+ static Object sf2901;
+ static Object sf2902;
+ static Object sf2903;
+ static Object sf2904;
+ static Object sf2905;
+ static Object sf2906;
+ static Object sf2907;
+ static Object sf2908;
+ static Object sf2909;
+ static Object sf2910;
+ static Object sf2911;
+ static Object sf2912;
+ static Object sf2913;
+ static Object sf2914;
+ static Object sf2915;
+ static Object sf2916;
+ static Object sf2917;
+ static Object sf2918;
+ static Object sf2919;
+ static Object sf2920;
+ static Object sf2921;
+ static Object sf2922;
+ static Object sf2923;
+ static Object sf2924;
+ static Object sf2925;
+ static Object sf2926;
+ static Object sf2927;
+ static Object sf2928;
+ static Object sf2929;
+ static Object sf2930;
+ static Object sf2931;
+ static Object sf2932;
+ static Object sf2933;
+ static Object sf2934;
+ static Object sf2935;
+ static Object sf2936;
+ static Object sf2937;
+ static Object sf2938;
+ static Object sf2939;
+ static Object sf2940;
+ static Object sf2941;
+ static Object sf2942;
+ static Object sf2943;
+ static Object sf2944;
+ static Object sf2945;
+ static Object sf2946;
+ static Object sf2947;
+ static Object sf2948;
+ static Object sf2949;
+ static Object sf2950;
+ static Object sf2951;
+ static Object sf2952;
+ static Object sf2953;
+ static Object sf2954;
+ static Object sf2955;
+ static Object sf2956;
+ static Object sf2957;
+ static Object sf2958;
+ static Object sf2959;
+ static Object sf2960;
+ static Object sf2961;
+ static Object sf2962;
+ static Object sf2963;
+ static Object sf2964;
+ static Object sf2965;
+ static Object sf2966;
+ static Object sf2967;
+ static Object sf2968;
+ static Object sf2969;
+ static Object sf2970;
+ static Object sf2971;
+ static Object sf2972;
+ static Object sf2973;
+ static Object sf2974;
+ static Object sf2975;
+ static Object sf2976;
+ static Object sf2977;
+ static Object sf2978;
+ static Object sf2979;
+ static Object sf2980;
+ static Object sf2981;
+ static Object sf2982;
+ static Object sf2983;
+ static Object sf2984;
+ static Object sf2985;
+ static Object sf2986;
+ static Object sf2987;
+ static Object sf2988;
+ static Object sf2989;
+ static Object sf2990;
+ static Object sf2991;
+ static Object sf2992;
+ static Object sf2993;
+ static Object sf2994;
+ static Object sf2995;
+ static Object sf2996;
+ static Object sf2997;
+ static Object sf2998;
+ static Object sf2999;
+ static Object sf3000;
+ }
+
+}
diff --git a/test/800-smali/build b/test/126-miranda-multidex/build
similarity index 62%
copy from test/800-smali/build
copy to test/126-miranda-multidex/build
index 1b5a4e3..4c30f3f 100644
--- a/test/800-smali/build
+++ b/test/126-miranda-multidex/build
@@ -17,16 +17,16 @@
# Stop if something fails.
set -e
-# Compile Java classes
mkdir classes
+
+# All except Main
${JAVAC} -d classes `find src -name '*.java'`
-${DX} -JXmx256m --debug --dex --output=java_classes.dex classes
+rm classes/MirandaInterface.class
+${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex classes
-# Compile Smali classes
-${SMALI} -JXmx256m --output smali_classes.dex `find src -name '*.smali'`
+# Only Main
+${JAVAC} -d classes `find src -name '*.java'`
+rm classes/Main.class classes/MirandaAbstract.class classes/MirandaClass*.class classes/MirandaInterface2*.class
+${DX} -JXmx256m --debug --dex --dump-to=classes2.lst --output=classes2.dex classes
-# Combine files.
-${DXMERGER} classes.dex java_classes.dex smali_classes.dex
-
-# Zip up output.
-zip $TEST_NAME.jar classes.dex
+zip $TEST_NAME.jar classes.dex classes2.dex
diff --git a/test/126-miranda-multidex/expected.txt b/test/126-miranda-multidex/expected.txt
new file mode 100644
index 0000000..dbe3717
--- /dev/null
+++ b/test/126-miranda-multidex/expected.txt
@@ -0,0 +1,32 @@
+MirandaClass:
+ inInterface: true
+ inInterface2: 27
+ inAbstract: false
+MirandaAbstract / MirandaClass:
+ inInterface: true
+ inInterface2: 27
+ inAbstract: false
+true 27
+MirandaAbstract / MirandaClass2:
+ inInterface: true
+ inInterface2: 28
+ inAbstract: true
+true 28
+Test getting miranda method via reflection:
+ caught expected NoSuchMethodException
+MirandaClass:
+ inInterface: true
+ inInterface2: 27
+ inAbstract: false
+MirandaAbstract / MirandaClass:
+ inInterface: true
+ inInterface2: 27
+ inAbstract: false
+true 27
+MirandaAbstract / MirandaClass2:
+ inInterface: true
+ inInterface2: 28
+ inAbstract: true
+true 28
+Test getting miranda method via reflection:
+ caught expected NoSuchMethodException
diff --git a/test/126-miranda-multidex/info.txt b/test/126-miranda-multidex/info.txt
new file mode 100644
index 0000000..ac50e2e
--- /dev/null
+++ b/test/126-miranda-multidex/info.txt
@@ -0,0 +1,2 @@
+This test ensures that cross-dex-file Miranda methods are correctly resolved.
+See b/18193682 for details.
diff --git a/test/126-miranda-multidex/run b/test/126-miranda-multidex/run
new file mode 100755
index 0000000..23c9935
--- /dev/null
+++ b/test/126-miranda-multidex/run
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+${RUN} $@
+
+# The problem was first exposed in a no-verify setting, as that changes the resolution path
+# taken. Make sure we also test in that environment.
+${RUN} --no-verify ${@}
diff --git a/test/126-miranda-multidex/src/Main.java b/test/126-miranda-multidex/src/Main.java
new file mode 100644
index 0000000..8624378
--- /dev/null
+++ b/test/126-miranda-multidex/src/Main.java
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2006 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+/**
+ * Miranda testing.
+ */
+public class Main {
+ public static void main(String[] args) {
+ MirandaClass mir = new MirandaClass();
+ System.out.println("MirandaClass:");
+ System.out.println(" inInterface: " + mir.inInterface());
+ System.out.println(" inInterface2: " + mir.inInterface2());
+ System.out.println(" inAbstract: " + mir.inAbstract());
+
+ /* try again through abstract class; results should be identical */
+ MirandaAbstract mira = mir;
+ System.out.println("MirandaAbstract / MirandaClass:");
+ System.out.println(" inInterface: " + mira.inInterface());
+ System.out.println(" inInterface2: " + mira.inInterface2());
+ System.out.println(" inAbstract: " + mira.inAbstract());
+ mira.callMiranda();
+
+ MirandaAbstract mira2 = new MirandaClass2();
+ System.out.println("MirandaAbstract / MirandaClass2:");
+ System.out.println(" inInterface: " + mira2.inInterface());
+ System.out.println(" inInterface2: " + mira2.inInterface2());
+ System.out.println(" inAbstract: " + mira2.inAbstract());
+ mira2.callMiranda();
+
+ System.out.println("Test getting miranda method via reflection:");
+ try {
+ Class<?> mirandaClass = Class.forName("MirandaAbstract");
+ Method mirandaMethod = mirandaClass.getDeclaredMethod("inInterface");
+ System.out.println(" did not expect to find miranda method");
+ } catch (NoSuchMethodException nsme) {
+ System.out.println(" caught expected NoSuchMethodException");
+ } catch (Exception e) {
+ System.out.println(" caught unexpected exception " + e);
+ }
+ }
+}
diff --git a/test/126-miranda-multidex/src/MirandaAbstract.java b/test/126-miranda-multidex/src/MirandaAbstract.java
new file mode 100644
index 0000000..c09a61f
--- /dev/null
+++ b/test/126-miranda-multidex/src/MirandaAbstract.java
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2006 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Miranda testing.
+ */
+public abstract class MirandaAbstract implements MirandaInterface, MirandaInterface2
+{
+ protected MirandaAbstract() { }
+
+ // These will be miranda methods, as the interfaces define them, but they are not
+ // implemented in this abstract class:
+ //public abstract boolean inInterface();
+ //public abstract int inInterface2();
+
+ public boolean inAbstract() {
+ return true;
+ }
+
+ public void callMiranda() {
+ System.out.println(inInterface() + " " + inInterface2());
+ }
+}
diff --git a/test/126-miranda-multidex/src/MirandaClass.java b/test/126-miranda-multidex/src/MirandaClass.java
new file mode 100644
index 0000000..7bb37e7
--- /dev/null
+++ b/test/126-miranda-multidex/src/MirandaClass.java
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2006 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Miranda testing.
+ */
+public class MirandaClass extends MirandaAbstract {
+
+ public MirandaClass() {}
+
+ public boolean inInterface() {
+ return true;
+ }
+
+ public int inInterface2() {
+ return 27;
+ }
+
+ public boolean inAbstract() {
+ return false;
+ }
+
+ // Better not hit any of these...
+ public void inInterfaceDummy1() {
+ System.out.println("inInterfaceDummy1");
+ }
+ public void inInterfaceDummy2() {
+ System.out.println("inInterfaceDummy2");
+ }
+ public void inInterfaceDummy3() {
+ System.out.println("inInterfaceDummy3");
+ }
+ public void inInterfaceDummy4() {
+ System.out.println("inInterfaceDummy4");
+ }
+ public void inInterfaceDummy5() {
+ System.out.println("inInterfaceDummy5");
+ }
+}
diff --git a/test/126-miranda-multidex/src/MirandaClass2.java b/test/126-miranda-multidex/src/MirandaClass2.java
new file mode 100644
index 0000000..797ead2
--- /dev/null
+++ b/test/126-miranda-multidex/src/MirandaClass2.java
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2006 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class MirandaClass2 extends MirandaAbstract {
+ public boolean inInterface() {
+ return true;
+ }
+
+ public int inInterface2() {
+ return 28;
+ }
+
+ // Better not hit any of these...
+ public void inInterfaceDummy1() {
+ System.out.println("inInterfaceDummy1");
+ }
+ public void inInterfaceDummy2() {
+ System.out.println("inInterfaceDummy2");
+ }
+ public void inInterfaceDummy3() {
+ System.out.println("inInterfaceDummy3");
+ }
+ public void inInterfaceDummy4() {
+ System.out.println("inInterfaceDummy4");
+ }
+ public void inInterfaceDummy5() {
+ System.out.println("inInterfaceDummy5");
+ }
+}
diff --git a/test/126-miranda-multidex/src/MirandaInterface.java b/test/126-miranda-multidex/src/MirandaInterface.java
new file mode 100644
index 0000000..df12fcc
--- /dev/null
+++ b/test/126-miranda-multidex/src/MirandaInterface.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2006 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Miranda testing.
+ */
+public interface MirandaInterface {
+
+ public boolean inInterface();
+
+ // A couple of dummy methods to fill the method table.
+ public void inInterfaceDummy1();
+ public void inInterfaceDummy2();
+ public void inInterfaceDummy3();
+ public void inInterfaceDummy4();
+ public void inInterfaceDummy5();
+
+}
diff --git a/runtime/log_severity.h b/test/126-miranda-multidex/src/MirandaInterface2.java
similarity index 63%
copy from runtime/log_severity.h
copy to test/126-miranda-multidex/src/MirandaInterface2.java
index 31682df..7c93fd0 100644
--- a/runtime/log_severity.h
+++ b/test/126-miranda-multidex/src/MirandaInterface2.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2006 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,13 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LOG_SEVERITY_H_
-#define ART_RUNTIME_LOG_SEVERITY_H_
+/**
+ * Miranda testing.
+ */
+public interface MirandaInterface2 {
-typedef int LogSeverity;
+ public boolean inInterface();
-const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5;
-const int INTERNAL_FATAL = 6; // For Runtime::Abort.
+ public int inInterface2();
-#endif // ART_RUNTIME_LOG_SEVERITY_H_
+}
diff --git a/test/800-smali/build b/test/127-secondarydex/build
old mode 100644
new mode 100755
similarity index 65%
rename from test/800-smali/build
rename to test/127-secondarydex/build
index 1b5a4e3..712774f
--- a/test/800-smali/build
+++ b/test/127-secondarydex/build
@@ -17,16 +17,15 @@
# Stop if something fails.
set -e
-# Compile Java classes
mkdir classes
${JAVAC} -d classes `find src -name '*.java'`
-${DX} -JXmx256m --debug --dex --output=java_classes.dex classes
-# Compile Smali classes
-${SMALI} -JXmx256m --output smali_classes.dex `find src -name '*.smali'`
+mkdir classes-ex
+mv classes/Super.class classes-ex
-# Combine files.
-${DXMERGER} classes.dex java_classes.dex smali_classes.dex
-
-# Zip up output.
-zip $TEST_NAME.jar classes.dex
+if [ ${NEED_DEX} = "true" ]; then
+ ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex --dump-width=1000 classes
+ zip $TEST_NAME.jar classes.dex
+ ${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes.dex --dump-width=1000 classes-ex
+ zip ${TEST_NAME}-ex.jar classes.dex
+fi
diff --git a/test/127-secondarydex/expected.txt b/test/127-secondarydex/expected.txt
new file mode 100644
index 0000000..29a1411
--- /dev/null
+++ b/test/127-secondarydex/expected.txt
@@ -0,0 +1,3 @@
+testSlowPathDirectInvoke
+Test
+Got null pointer exception
diff --git a/test/127-secondarydex/info.txt b/test/127-secondarydex/info.txt
new file mode 100644
index 0000000..0479d1a
--- /dev/null
+++ b/test/127-secondarydex/info.txt
@@ -0,0 +1,3 @@
+Test features with a secondary dex file.
+
+- Regression test to ensure slow path of direct invoke does null check.
diff --git a/test/127-secondarydex/run b/test/127-secondarydex/run
new file mode 100755
index 0000000..d8c3c79
--- /dev/null
+++ b/test/127-secondarydex/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Use secondary switch to add secondary dex file to class path.
+exec ${RUN} "${@}" --secondary
diff --git a/test/127-secondarydex/src/Main.java b/test/127-secondarydex/src/Main.java
new file mode 100644
index 0000000..c921c5b
--- /dev/null
+++ b/test/127-secondarydex/src/Main.java
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+
+/**
+ * Secondary dex file test.
+ */
+public class Main {
+ public static void main(String[] args) {
+ testSlowPathDirectInvoke();
+ }
+
+ public static void testSlowPathDirectInvoke() {
+ System.out.println("testSlowPathDirectInvoke");
+ try {
+ Test t1 = new Test();
+ Test t2 = new Test();
+ Test t3 = null;
+ t1.test(t2);
+ t1.test(t3);
+ } catch (NullPointerException npe) {
+ System.out.println("Got null pointer exception");
+ } catch (Exception e) {
+ System.out.println("Got unexpected exception " + e);
+ }
+ }
+}
diff --git a/runtime/log_severity.h b/test/127-secondarydex/src/Super.java
similarity index 62%
copy from runtime/log_severity.h
copy to test/127-secondarydex/src/Super.java
index 31682df..7608d4a 100644
--- a/runtime/log_severity.h
+++ b/test/127-secondarydex/src/Super.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LOG_SEVERITY_H_
-#define ART_RUNTIME_LOG_SEVERITY_H_
-
-typedef int LogSeverity;
-
-const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5;
-const int INTERNAL_FATAL = 6; // For Runtime::Abort.
-
-#endif // ART_RUNTIME_LOG_SEVERITY_H_
+public class Super {
+ private void print() {
+ System.out.println("Super");
+ }
+}
diff --git a/runtime/log_severity.h b/test/127-secondarydex/src/Test.java
similarity index 62%
copy from runtime/log_severity.h
copy to test/127-secondarydex/src/Test.java
index 31682df..82cb901 100644
--- a/runtime/log_severity.h
+++ b/test/127-secondarydex/src/Test.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,12 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LOG_SEVERITY_H_
-#define ART_RUNTIME_LOG_SEVERITY_H_
+public class Test extends Super {
+ public void test(Test t) {
+ t.print();
+ }
-typedef int LogSeverity;
-
-const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5;
-const int INTERNAL_FATAL = 6; // For Runtime::Abort.
-
-#endif // ART_RUNTIME_LOG_SEVERITY_H_
+ private void print() {
+ System.out.println("Test");
+ }
+}
diff --git a/test/128-reg-spilling-on-implicit-nullcheck/expected.txt b/test/128-reg-spilling-on-implicit-nullcheck/expected.txt
new file mode 100644
index 0000000..9bdf658
--- /dev/null
+++ b/test/128-reg-spilling-on-implicit-nullcheck/expected.txt
@@ -0,0 +1 @@
+t7q = 2
diff --git a/test/128-reg-spilling-on-implicit-nullcheck/info.txt b/test/128-reg-spilling-on-implicit-nullcheck/info.txt
new file mode 100644
index 0000000..18b2112
--- /dev/null
+++ b/test/128-reg-spilling-on-implicit-nullcheck/info.txt
@@ -0,0 +1 @@
+This is a compiler reggression test for missing reg spilling on implicit nullcheck.
diff --git a/test/128-reg-spilling-on-implicit-nullcheck/src/Main.java b/test/128-reg-spilling-on-implicit-nullcheck/src/Main.java
new file mode 100644
index 0000000..48276bf
--- /dev/null
+++ b/test/128-reg-spilling-on-implicit-nullcheck/src/Main.java
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2007 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ public static void main(String[] args) {
+ int t7q = 0;
+ long q = 1L;
+
+ try {
+ for (int i = 1; i < 8; i++) {
+ t7q = (--t7q);
+ TestClass f = null;
+ t7q = f.field;
+ }
+ }
+ catch (NullPointerException wpw) {
+ q++;
+ }
+ finally {
+ t7q += (int)(1 - ((q - q) - 2));
+ }
+
+ System.out.println("t7q = " + t7q);
+ }
+}
+
+class TestClass {
+ public int field;
+ public void meth() {field = 1;}
+}
diff --git a/test/303-verification-stress/build b/test/303-verification-stress/build
index c1935d2..789d38e 100644
--- a/test/303-verification-stress/build
+++ b/test/303-verification-stress/build
@@ -18,7 +18,7 @@
set -e
# Write out a bunch of source files.
-gcc -o classes-gen classes-gen.c
+gcc -Wall -Werror -o classes-gen classes-gen.c
./classes-gen
mkdir classes
diff --git a/test/303-verification-stress/classes-gen.c b/test/303-verification-stress/classes-gen.c
index be6cfa7..1f2fe3b 100644
--- a/test/303-verification-stress/classes-gen.c
+++ b/test/303-verification-stress/classes-gen.c
@@ -26,11 +26,11 @@
fprintf(fp, "public class Test%03d {\n", i);
fprintf(fp, " static String[] array = new String[%d];\n", array_size);
- fprintf(fp, " static {\n", array_size);
+ fprintf(fp, " static {\n");
for (k = 0; k < array_size; k++) {
fprintf(fp, " array[%d] = \"string_%04d\";\n", k, k);
}
- fprintf(fp, " }\n", array_size);
+ fprintf(fp, " }\n");
fprintf(fp, "}\n");
fclose(fp);
}
diff --git a/test/401-optimizing-compiler/src/Main.java b/test/401-optimizing-compiler/src/Main.java
index 07c407b..7c3fd25 100644
--- a/test/401-optimizing-compiler/src/Main.java
+++ b/test/401-optimizing-compiler/src/Main.java
@@ -94,6 +94,14 @@
exception = e;
}
+ // Test that we do NPE checks on array length.
+ exception = null;
+ try {
+ $opt$ArrayLengthOfNull(null);
+ } catch (NullPointerException e) {
+ exception = e;
+ }
+
if (exception == null) {
throw new Error("Missing NullPointerException");
}
@@ -218,5 +226,9 @@
return 42;
}
+ public static int $opt$ArrayLengthOfNull(int[] array) {
+ return array.length;
+ }
+
Object o;
}
diff --git a/test/406-fields/src/Main.java b/test/406-fields/src/Main.java
index 3e94e42..768a784 100644
--- a/test/406-fields/src/Main.java
+++ b/test/406-fields/src/Main.java
@@ -30,6 +30,8 @@
assertEquals(0, fields.iI);
assertEquals(0, fields.iJ);
assertEquals(0, fields.iS);
+ assertEquals(0.0f, fields.iF);
+ assertEquals(0.0, fields.iD);
assertNull(fields.iObject);
long longValue = -1122198787987987987L;
@@ -40,6 +42,8 @@
fields.iJ = longValue;
fields.iS = 68;
fields.iObject = fields;
+ fields.iF = 2.3f;
+ fields.iD = 5.3;
assertEquals(true, fields.iZ);
assertEquals(-2, fields.iB);
@@ -48,6 +52,8 @@
assertEquals(longValue, fields.iJ);
assertEquals(68, fields.iS);
assertEquals(fields, fields.iObject);
+ assertEquals(2.3f, fields.iF);
+ assertEquals(5.3, fields.iD);
}
static class AllFields {
diff --git a/test/410-floats/src/Main.java b/test/410-floats/src/Main.java
index d8d6fac..2300457 100644
--- a/test/410-floats/src/Main.java
+++ b/test/410-floats/src/Main.java
@@ -17,9 +17,10 @@
public class Main {
public static void main(String[] args) {
assertEquals(4.2f, returnFloat());
- float[] a = new float[1];
+ float[] a = new float[2];
a[0] = 42.2f;
- assertEquals(42.2f, returnFloat(a));
+ a[1] = 3.2f;
+ assertEquals(45.4f, returnFloat(a));
assertEquals(4.4, returnDouble());
double[] b = new double[1];
@@ -36,6 +37,9 @@
assertEquals(3.1, invokeTakeADouble(3.1));
assertEquals(12.7, invokeTakeThreeDouble(3.1, 4.4, 5.2));
assertEquals(12.7f, invokeTakeThreeFloat(3.1f, 4.4f, 5.2f));
+
+ testArrayOperations(new float[2], 0, 1.2f, 3.4f);
+ testArrayOperations(new double[2], 0, 4.1, 7.6);
}
public static float invokeReturnFloat() {
@@ -51,7 +55,7 @@
}
public static float returnFloat(float[] a) {
- return a[0];
+ return a[0] + a[1];
}
public static double returnDouble() {
@@ -94,6 +98,34 @@
return takeThreeFloat(a, b, c);
}
+ // Test simple operations on a float array to ensure the register allocator works
+ // properly.
+ public static void testArrayOperations(float[] a, int index, float value1, float value2) {
+ a[0] = value1;
+ a[1] = value2;
+ assertEquals(value1 + value2, a[0] + a[1]);
+ a[0] = 0.0f;
+ a[1] = 0.0f;
+ assertEquals(0.0f, a[0] + a[1]);
+ a[index] = value1;
+ a[index + 1] = value2;
+ assertEquals(value1 + value2, a[0] + a[1]);
+ }
+
+ // Test simple operations on a double array to ensure the register allocator works
+ // properly.
+ public static void testArrayOperations(double[] a, int index, double value1, double value2) {
+ a[0] = value1;
+ a[1] = value2;
+ assertEquals(value1 + value2, a[0] + a[1]);
+ a[0] = 0.0;
+ a[1] = 0.0;
+ assertEquals(0.0, a[0] + a[1]);
+ a[index] = value1;
+ a[index + 1] = value2;
+ assertEquals(value1 + value2, a[0] + a[1]);
+ }
+
public static void assertEquals(float expected, float actual) {
if (expected != actual) {
throw new AssertionError("Expected " + expected + " got " + actual);
diff --git a/test/411-optimizing-arith/src/Main.java b/test/411-optimizing-arith/src/Main.java
index 74c47a6..3a5d7c0 100644
--- a/test/411-optimizing-arith/src/Main.java
+++ b/test/411-optimizing-arith/src/Main.java
@@ -16,7 +16,6 @@
// Note that $opt$ is a marker for the optimizing compiler to ensure
// it does compile the method.
-
public class Main {
public static void expectEquals(int expected, int result) {
@@ -31,28 +30,139 @@
}
}
+ public static void expectEquals(float expected, float result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(double expected, double result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectApproxEquals(float a, float b, float maxDelta) {
+ boolean aproxEquals = (a > b)
+ ? ((a - b) < maxDelta)
+ : ((b - a) < maxDelta);
+ if (!aproxEquals) {
+ throw new Error("Expected: " + a + ", found: " + b + ", with delta: " + maxDelta);
+ }
+ }
+
+ public static void expectApproxEquals(double a, double b, double maxDelta) {
+ boolean aproxEquals = (a > b)
+ ? ((a - b) < maxDelta)
+ : ((b - a) < maxDelta);
+ if (!aproxEquals) {
+ throw new Error("Expected: " + a + ", found: " + b + ", with delta: " + maxDelta);
+ }
+ }
+
+ public static void expectNaN(float a) {
+ if (a == a) {
+ throw new Error("Expected NaN: " + a);
+ }
+ }
+
+ public static void expectNaN(double a) {
+ if (a == a) {
+ throw new Error("Expected NaN: " + a);
+ }
+ }
+
public static void main(String[] args) {
mul();
}
public static void mul() {
+ mulInt();
+ mulLong();
+ mulFloat();
+ mulDouble();
+ }
+
+ private static void mulInt() {
expectEquals(15, $opt$Mul(5, 3));
+ expectEquals(0, $opt$Mul(0, 0));
expectEquals(0, $opt$Mul(0, 3));
expectEquals(0, $opt$Mul(3, 0));
expectEquals(-3, $opt$Mul(1, -3));
expectEquals(36, $opt$Mul(-12, -3));
expectEquals(33, $opt$Mul(1, 3) * 11);
expectEquals(671088645, $opt$Mul(134217729, 5)); // (2^27 + 1) * 5
+ }
+ private static void mulLong() {
expectEquals(15L, $opt$Mul(5L, 3L));
+ expectEquals(0L, $opt$Mul(0L, 0L));
expectEquals(0L, $opt$Mul(0L, 3L));
expectEquals(0L, $opt$Mul(3L, 0L));
expectEquals(-3L, $opt$Mul(1L, -3L));
expectEquals(36L, $opt$Mul(-12L, -3L));
- expectEquals(33L, $opt$Mul(1L, 3L) * 11);
+ expectEquals(33L, $opt$Mul(1L, 3L) * 11L);
expectEquals(240518168583L, $opt$Mul(34359738369L, 7L)); // (2^35 + 1) * 7
}
+ private static void mulFloat() {
+ expectApproxEquals(15F, $opt$Mul(5F, 3F), 0.0001F);
+ expectApproxEquals(0F, $opt$Mul(0F, 0F), 0.0001F);
+ expectApproxEquals(0F, $opt$Mul(0F, 3F), 0.0001F);
+ expectApproxEquals(0F, $opt$Mul(3F, 0F), 0.0001F);
+ expectApproxEquals(-3F, $opt$Mul(1F, -3F), 0.0001F);
+ expectApproxEquals(36F, $opt$Mul(-12F, -3F), 0.0001F);
+ expectApproxEquals(33F, $opt$Mul(1F, 3F) * 11F, 0.0001F);
+ expectApproxEquals(0.02F, 0.1F * 0.2F, 0.0001F);
+ expectApproxEquals(-0.1F, -0.5F * 0.2F, 0.0001F);
+
+ expectNaN($opt$Mul(0F, Float.POSITIVE_INFINITY));
+ expectNaN($opt$Mul(0F, Float.NEGATIVE_INFINITY));
+ expectNaN($opt$Mul(Float.NaN, 11F));
+ expectNaN($opt$Mul(Float.NaN, -11F));
+ expectNaN($opt$Mul(Float.NaN, Float.NEGATIVE_INFINITY));
+ expectNaN($opt$Mul(Float.NaN, Float.POSITIVE_INFINITY));
+
+ expectEquals(Float.POSITIVE_INFINITY, $opt$Mul(2F, 3.40282346638528860e+38F));
+ expectEquals(Float.POSITIVE_INFINITY, $opt$Mul(2F, Float.POSITIVE_INFINITY));
+ expectEquals(Float.NEGATIVE_INFINITY, $opt$Mul(-2F, Float.POSITIVE_INFINITY));
+ expectEquals(Float.NEGATIVE_INFINITY, $opt$Mul(-2F, 3.40282346638528860e+38F));
+ expectEquals(Float.NEGATIVE_INFINITY, $opt$Mul(2F, Float.NEGATIVE_INFINITY));
+ expectEquals(Float.POSITIVE_INFINITY, $opt$Mul(-2F, Float.NEGATIVE_INFINITY));
+ expectEquals(Float.NEGATIVE_INFINITY, $opt$Mul(Float.POSITIVE_INFINITY, Float.NEGATIVE_INFINITY));
+ expectEquals(Float.POSITIVE_INFINITY, $opt$Mul(Float.POSITIVE_INFINITY, Float.POSITIVE_INFINITY));
+ expectEquals(Float.POSITIVE_INFINITY, $opt$Mul(Float.NEGATIVE_INFINITY, Float.NEGATIVE_INFINITY));
+ }
+
+ private static void mulDouble() {
+ expectApproxEquals(15D, $opt$Mul(5D, 3D), 0.0001D);
+ expectApproxEquals(0D, $opt$Mul(0D, 0D), 0.0001D);
+ expectApproxEquals(0D, $opt$Mul(0D, 3D), 0.0001D);
+ expectApproxEquals(0D, $opt$Mul(3D, 0D), 0.0001D);
+ expectApproxEquals(-3D, $opt$Mul(1D, -3D), 0.0001D);
+ expectApproxEquals(36D, $opt$Mul(-12D, -3D), 0.0001D);
+ expectApproxEquals(33D, $opt$Mul(1D, 3D) * 11D, 0.0001D);
+ expectApproxEquals(0.02D, 0.1D * 0.2D, 0.0001D);
+ expectApproxEquals(-0.1D, -0.5D * 0.2D, 0.0001D);
+
+ expectNaN($opt$Mul(0D, Double.POSITIVE_INFINITY));
+ expectNaN($opt$Mul(0D, Double.NEGATIVE_INFINITY));
+ expectNaN($opt$Mul(Double.NaN, 11D));
+ expectNaN($opt$Mul(Double.NaN, -11D));
+ expectNaN($opt$Mul(Double.NaN, Double.NEGATIVE_INFINITY));
+ expectNaN($opt$Mul(Double.NaN, Double.POSITIVE_INFINITY));
+
+ expectEquals(Double.POSITIVE_INFINITY, $opt$Mul(2D, 1.79769313486231570e+308));
+ expectEquals(Double.POSITIVE_INFINITY, $opt$Mul(2D, Double.POSITIVE_INFINITY));
+ expectEquals(Double.NEGATIVE_INFINITY, $opt$Mul(-2D, Double.POSITIVE_INFINITY));
+ expectEquals(Double.NEGATIVE_INFINITY, $opt$Mul(-2D, 1.79769313486231570e+308));
+ expectEquals(Double.NEGATIVE_INFINITY, $opt$Mul(2D, Double.NEGATIVE_INFINITY));
+ expectEquals(Double.POSITIVE_INFINITY, $opt$Mul(-2D, Double.NEGATIVE_INFINITY));
+ expectEquals(Double.NEGATIVE_INFINITY, $opt$Mul(Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY));
+ expectEquals(Double.POSITIVE_INFINITY, $opt$Mul(Double.POSITIVE_INFINITY, Double.POSITIVE_INFINITY));
+ expectEquals(Double.POSITIVE_INFINITY, $opt$Mul(Double.NEGATIVE_INFINITY, Double.NEGATIVE_INFINITY));
+ }
+
static int $opt$Mul(int a, int b) {
return a * b;
}
@@ -61,4 +171,11 @@
return a * b;
}
+ static float $opt$Mul(float a, float b) {
+ return a * b;
+ }
+
+ static double $opt$Mul(double a, double b) {
+ return a * b;
+ }
}
diff --git a/test/412-new-array/expected.txt b/test/412-new-array/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/412-new-array/expected.txt
diff --git a/test/412-new-array/info.txt b/test/412-new-array/info.txt
new file mode 100644
index 0000000..cb388b6
--- /dev/null
+++ b/test/412-new-array/info.txt
@@ -0,0 +1 @@
+Simple tests for new-array, filled-new-array and fill-array-data.
diff --git a/test/412-new-array/smali/fill_array_data.smali b/test/412-new-array/smali/fill_array_data.smali
new file mode 100644
index 0000000..34776db
--- /dev/null
+++ b/test/412-new-array/smali/fill_array_data.smali
@@ -0,0 +1,81 @@
+.class public LFillArrayData;
+
+.super Ljava/lang/Object;
+
+.method public static intArray([I)V
+ .registers 1
+
+ fill-array-data v0, :ArrayData
+ return-void
+
+:ArrayData
+ .array-data 4
+ 1 2 3 4 5
+ .end array-data
+
+.end method
+
+.method public static shortArray([S)V
+ .registers 1
+
+ fill-array-data v0, :ArrayData
+ return-void
+
+:ArrayData
+ .array-data 2
+ 1 2 3 4 5
+ .end array-data
+
+.end method
+
+.method public static charArray([C)V
+ .registers 1
+
+ fill-array-data v0, :ArrayData
+ return-void
+
+:ArrayData
+ .array-data 2
+ 1 2 3 4 5
+ .end array-data
+
+.end method
+
+.method public static byteArray([B)V
+ .registers 1
+
+ fill-array-data v0, :ArrayData
+ return-void
+
+:ArrayData
+ .array-data 1
+ 1 2 3 4 5
+ .end array-data
+
+.end method
+
+.method public static booleanArray([Z)V
+ .registers 1
+
+ fill-array-data v0, :ArrayData
+ return-void
+
+:ArrayData
+ .array-data 1
+ 0 1 1
+ .end array-data
+
+.end method
+
+.method public static longArray([J)V
+ .registers 1
+
+ fill-array-data v0, :ArrayData
+ return-void
+
+:ArrayData
+ .array-data 8
+ 1 2 3 4 5
+ .end array-data
+
+.end method
diff --git a/test/412-new-array/smali/filled_new_array.smali b/test/412-new-array/smali/filled_new_array.smali
new file mode 100644
index 0000000..ed8683a
--- /dev/null
+++ b/test/412-new-array/smali/filled_new_array.smali
@@ -0,0 +1,45 @@
+.class public LFilledNewArray;
+
+.super Ljava/lang/Object;
+
+.method public static newInt(III)[I
+ .registers 4
+ filled-new-array {v1, v2, v3}, [I
+ move-result-object v0
+ return-object v0
+.end method
+
+.method public static newRef(Ljava/lang/Object;Ljava/lang/Object;)[Ljava/lang/Object;
+ .registers 3
+ filled-new-array {v1, v2}, [Ljava/lang/Object;
+ move-result-object v0
+ return-object v0
+.end method
+
+.method public static newArray([I[I)[[I
+ .registers 3
+ filled-new-array {v1, v2}, [[I
+ move-result-object v0
+ return-object v0
+.end method
+
+.method public static newIntRange(III)[I
+ .registers 4
+ filled-new-array/range {v1 .. v3}, [I
+ move-result-object v0
+ return-object v0
+.end method
+
+.method public static newRefRange(Ljava/lang/Object;Ljava/lang/Object;)[Ljava/lang/Object;
+ .registers 3
+ filled-new-array/range {v1 .. v2}, [Ljava/lang/Object;
+ move-result-object v0
+ return-object v0
+.end method
+
+.method public static newArrayRange([I[I)[[I
+ .registers 3
+ filled-new-array/range {v1 .. v2}, [[I
+ move-result-object v0
+ return-object v0
+.end method
diff --git a/test/412-new-array/smali/filled_new_array_verify_error.smali b/test/412-new-array/smali/filled_new_array_verify_error.smali
new file mode 100644
index 0000000..b1470ec
--- /dev/null
+++ b/test/412-new-array/smali/filled_new_array_verify_error.smali
@@ -0,0 +1,10 @@
+.class public LFilledNewArrayVerifyError;
+
+.super Ljava/lang/Object;
+
+.method public static newRef(Ljava/lang/Object;Ljava/lang/Object;)[Ljava/lang/Object;
+ .registers 3
+ filled-new-array {v1, v2}, [Ljava/lang/Integer;
+ move-result-object v0
+ return-object v0
+.end method
diff --git a/test/412-new-array/src/Main.java b/test/412-new-array/src/Main.java
new file mode 100644
index 0000000..168420c
--- /dev/null
+++ b/test/412-new-array/src/Main.java
@@ -0,0 +1,438 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+
+// Note that $opt$ is a marker for the optimizing compiler to ensure
+// it does compile the method.
+
+public class Main extends TestCase {
+ public static void main(String[] args) throws Exception {
+ $opt$TestAllocations();
+ $opt$TestWithInitializations();
+ $opt$TestNegativeValueNewByteArray();
+ $opt$TestNegativeValueNewCharArray();
+ testSmaliFilledNewArray();
+ testSmaliFillArrayData();
+ testSmaliVerifyError();
+ }
+
+ static void $opt$TestAllocations() {
+ float[] a = new float[1];
+ assertEquals(1, a.length);
+
+ double[] b = new double[2];
+ assertEquals(2, b.length);
+
+ long[] c = new long[3];
+ assertEquals(3, c.length);
+
+ int[] d = new int[4];
+ assertEquals(4, d.length);
+
+ short[] e = new short[5];
+ assertEquals(5, e.length);
+
+ char[] f = new char[6];
+ assertEquals(6, f.length);
+
+ byte[] g = new byte[7];
+ assertEquals(7, g.length);
+
+ boolean[] h = new boolean[8];
+ assertEquals(8, h.length);
+
+ Object[] i = new Object[9];
+ assertEquals(9, i.length);
+ }
+
+ static void $opt$TestWithInitializations() {
+ float[] a = { 1.2f };
+ assertEquals(1, a.length);
+ assertEquals(1.2f, a[0]);
+
+ double[] b = { 4.3, 1.2 };
+ assertEquals(2, b.length);
+ assertEquals(4.3, b[0]);
+ assertEquals(1.2, b[1]);
+
+ long[] c = { 4L, 5L };
+ assertEquals(2, c.length);
+ assertEquals(4L, c[0]);
+ assertEquals(5L, c[1]);
+
+ int[] d = {1, 2, 3};
+ assertEquals(3, d.length);
+ assertEquals(1, d[0]);
+ assertEquals(2, d[1]);
+ assertEquals(3, d[2]);
+
+ short[] e = {4, 5, 6};
+ assertEquals(3, e.length);
+ assertEquals(4, e[0]);
+ assertEquals(5, e[1]);
+ assertEquals(6, e[2]);
+
+ char[] f = {'a', 'b'};
+ assertEquals(2, f.length);
+ assertEquals('a', f[0]);
+ assertEquals('b', f[1]);
+
+ byte[] g = {7, 8, 9};
+ assertEquals(3, g.length);
+ assertEquals(7, g[0]);
+ assertEquals(8, g[1]);
+ assertEquals(9, g[2]);
+
+ boolean[] h = {true, false};
+ assertEquals(2, h.length);
+ assertEquals(true, h[0]);
+ assertEquals(false, h[1]);
+
+ Object obj1 = new Object();
+ Object obj2 = new Object();
+ Object[] i = {obj1, obj2};
+ assertEquals(2, i.length);
+ assertEquals(obj1, i[0]);
+ assertEquals(obj2, i[1]);
+ }
+
+ static void $opt$TestNegativeValueNewByteArray() {
+ // Use an array initializer to hint the use of filled-new-array.
+ byte[] a = { (byte)0xa0, (byte)0xa1, (byte)0xa2, (byte)0xa3,
+ (byte)0xa4, (byte)0xa5, (byte)0xa6, (byte)0xa7 };
+ for (int i = 0; i < a.length; i++) {
+ assertEquals((byte)0xa0 + i, a[i]);
+ }
+ }
+
+ static void $opt$TestNegativeValueNewCharArray() {
+ // Use an array initializer to hint the use of filled-new-array.
+ char[] a = { (char)0xa000, (char)0xa001, (char)0xa002, (char)0xa003,
+ (char)0xa004, (char)0xa005, (char)0xa006, (char)0xa007 };
+ for (int i = 0; i < a.length; i++) {
+ assertEquals((char)0xa000 + i, a[i]);
+ }
+ }
+
+ public static void testSmaliFilledNewArray() throws Exception {
+ Class<?> c = Class.forName("FilledNewArray");
+
+ {
+ Method m = c.getMethod("newInt", Integer.TYPE, Integer.TYPE, Integer.TYPE);
+ Object[] args = {new Integer(1), new Integer(2), new Integer(3)};
+ int[] result = (int[])m.invoke(null, args);
+ assertEquals(3, result.length);
+ assertEquals(1, result[0]);
+ assertEquals(2, result[1]);
+ assertEquals(3, result[2]);
+ }
+
+ {
+ Method m = c.getMethod("newRef", Object.class, Object.class);
+ Object[] args = {new Integer(1), new Integer(2)};
+ Object[] result = (Object[])m.invoke(null, args);
+ assertEquals(2, result.length);
+ assertEquals(args[0], result[0]);
+ assertEquals(args[1], result[1]);
+ }
+
+ {
+ Method m = c.getMethod("newArray", int[].class, int[].class);
+ Object[] args = {new int[0], new int[1]};
+ Object[] result = (Object[])m.invoke(null, args);
+ assertEquals(2, result.length);
+ assertEquals(args[0], result[0]);
+ assertEquals(args[1], result[1]);
+ }
+
+ {
+ Method m = c.getMethod("newIntRange", Integer.TYPE, Integer.TYPE, Integer.TYPE);
+ Object[] args = {new Integer(1), new Integer(2), new Integer(3)};
+ int[] result = (int[])m.invoke(null, args);
+ assertEquals(3, result.length);
+ assertEquals(1, result[0]);
+ assertEquals(2, result[1]);
+ assertEquals(3, result[2]);
+ }
+
+ {
+ Method m = c.getMethod("newRefRange", Object.class, Object.class);
+ Object[] args = {new Integer(1), new Integer(2)};
+ Object[] result = (Object[])m.invoke(null, args);
+ assertEquals(2, result.length);
+ assertEquals(args[0], result[0]);
+ assertEquals(args[1], result[1]);
+ }
+
+ {
+ Method m = c.getMethod("newArrayRange", int[].class, int[].class);
+ Object[] args = {new int[0], new int[1]};
+ Object[] result = (Object[])m.invoke(null, args);
+ assertEquals(2, result.length);
+ assertEquals(args[0], result[0]);
+ assertEquals(args[1], result[1]);
+ }
+ }
+
+ public static void testSmaliVerifyError() throws Exception {
+ Error error = null;
+ // Ensure the elements in filled-new-array must be assignable
+ // to the array component type.
+ try {
+ Class.forName("FilledNewArrayVerifyError");
+ } catch (VerifyError e) {
+ error = e;
+ }
+ assertNotNull(error);
+ }
+
+ public static void testSmaliFillArrayData() throws Exception {
+ Class<?> c = Class.forName("FillArrayData");
+ {
+ Method m = c.getMethod("intArray", int[].class);
+ int[] array = new int[7];
+ Object[] args = { array };
+ m.invoke(null, args);
+ assertEquals(7, array.length);
+ assertEquals(1, array[0]);
+ assertEquals(2, array[1]);
+ assertEquals(3, array[2]);
+ assertEquals(4, array[3]);
+ assertEquals(5, array[4]);
+ assertEquals(0, array[5]);
+ assertEquals(0, array[6]);
+
+ array = new int[2];
+ args[0] = array;
+ Throwable exception = null;
+ try {
+ m.invoke(null, args);
+ } catch (InvocationTargetException e) {
+ exception = e.getCause();
+ assertTrue(exception instanceof IndexOutOfBoundsException);
+ }
+ assertNotNull(exception);
+ exception = null;
+ // Test that nothing has been written to the array.
+ assertEquals(0, array[0]);
+ assertEquals(0, array[1]);
+
+ args[0] = null;
+ try {
+ m.invoke(null, args);
+ } catch (InvocationTargetException e) {
+ exception = e.getCause();
+ assertTrue(exception instanceof NullPointerException);
+ }
+ assertNotNull(exception);
+ }
+
+ {
+ Method m = c.getMethod("shortArray", short[].class);
+ short[] array = new short[7];
+ Object[] args = { array };
+ m.invoke(null, args);
+ assertEquals(7, array.length);
+ assertEquals(1, array[0]);
+ assertEquals(2, array[1]);
+ assertEquals(3, array[2]);
+ assertEquals(4, array[3]);
+ assertEquals(5, array[4]);
+ assertEquals(0, array[5]);
+ assertEquals(0, array[6]);
+
+ array = new short[2];
+ args[0] = array;
+ Throwable exception = null;
+ try {
+ m.invoke(null, args);
+ } catch (InvocationTargetException e) {
+ exception = e.getCause();
+ assertTrue(exception instanceof IndexOutOfBoundsException);
+ }
+ assertNotNull(exception);
+ exception = null;
+ // Test that nothing has been written to the array.
+ assertEquals(0, array[0]);
+ assertEquals(0, array[1]);
+
+ args[0] = null;
+ try {
+ m.invoke(null, args);
+ } catch (InvocationTargetException e) {
+ exception = e.getCause();
+ assertTrue(exception instanceof NullPointerException);
+ }
+ assertNotNull(exception);
+ }
+
+ {
+ Method m = c.getMethod("longArray", long[].class);
+ long[] array = new long[7];
+ Object[] args = { array };
+ m.invoke(null, args);
+ assertEquals(7, array.length);
+ assertEquals(1L, array[0]);
+ assertEquals(2L, array[1]);
+ assertEquals(3L, array[2]);
+ assertEquals(4L, array[3]);
+ assertEquals(5L, array[4]);
+ assertEquals(0L, array[5]);
+ assertEquals(0L, array[6]);
+
+ array = new long[2];
+ args[0] = array;
+ Throwable exception = null;
+ try {
+ m.invoke(null, args);
+ } catch (InvocationTargetException e) {
+ exception = e.getCause();
+ assertTrue(exception instanceof IndexOutOfBoundsException);
+ }
+ assertNotNull(exception);
+ exception = null;
+ // Test that nothing has been written to the array.
+ assertEquals(0, array[0]);
+ assertEquals(0, array[1]);
+
+ args[0] = null;
+ try {
+ m.invoke(null, args);
+ } catch (InvocationTargetException e) {
+ exception = e.getCause();
+ assertTrue(exception instanceof NullPointerException);
+ }
+ assertNotNull(exception);
+ }
+
+ {
+ Method m = c.getMethod("charArray", char[].class);
+ char[] array = new char[7];
+ Object[] args = { array };
+ m.invoke(null, args);
+ assertEquals(7, array.length);
+ assertEquals(1, array[0]);
+ assertEquals(2, array[1]);
+ assertEquals(3, array[2]);
+ assertEquals(4, array[3]);
+ assertEquals(5, array[4]);
+ assertEquals(0, array[5]);
+ assertEquals(0, array[6]);
+
+ array = new char[2];
+ args[0] = array;
+ Throwable exception = null;
+ try {
+ m.invoke(null, args);
+ } catch (InvocationTargetException e) {
+ exception = e.getCause();
+ assertTrue(exception instanceof IndexOutOfBoundsException);
+ }
+ assertNotNull(exception);
+ exception = null;
+ // Test that nothing has been written to the array.
+ assertEquals(0, array[0]);
+ assertEquals(0, array[1]);
+
+ args[0] = null;
+ try {
+ m.invoke(null, args);
+ } catch (InvocationTargetException e) {
+ exception = e.getCause();
+ assertTrue(exception instanceof NullPointerException);
+ }
+ assertNotNull(exception);
+ }
+
+ {
+ Method m = c.getMethod("byteArray", byte[].class);
+ byte[] array = new byte[7];
+ Object[] args = { array };
+ m.invoke(null, args);
+ assertEquals(7, array.length);
+ assertEquals(1, array[0]);
+ assertEquals(2, array[1]);
+ assertEquals(3, array[2]);
+ assertEquals(4, array[3]);
+ assertEquals(5, array[4]);
+ assertEquals(0, array[5]);
+ assertEquals(0, array[6]);
+
+ array = new byte[2];
+ args[0] = array;
+ Throwable exception = null;
+ try {
+ m.invoke(null, args);
+ } catch (InvocationTargetException e) {
+ exception = e.getCause();
+ assertTrue(exception instanceof IndexOutOfBoundsException);
+ }
+ assertNotNull(exception);
+ exception = null;
+ // Test that nothing has been written to the array.
+ assertEquals(0, array[0]);
+ assertEquals(0, array[1]);
+
+ args[0] = null;
+ try {
+ m.invoke(null, args);
+ } catch (InvocationTargetException e) {
+ exception = e.getCause();
+ assertTrue(exception instanceof NullPointerException);
+ }
+ assertNotNull(exception);
+ }
+
+ {
+ Method m = c.getMethod("booleanArray", boolean[].class);
+ boolean[] array = new boolean[5];
+ Object[] args = { array };
+ m.invoke(null, args);
+ assertEquals(5, array.length);
+ assertEquals(false, array[0]);
+ assertEquals(true, array[1]);
+ assertEquals(true, array[2]);
+ assertEquals(false, array[3]);
+ assertEquals(false, array[4]);
+
+ array = new boolean[2];
+ args[0] = array;
+ Throwable exception = null;
+ try {
+ m.invoke(null, args);
+ } catch (InvocationTargetException e) {
+ exception = e.getCause();
+ assertTrue(exception instanceof IndexOutOfBoundsException);
+ }
+ assertNotNull(exception);
+ exception = null;
+ // Test that nothing has been written to the array.
+ assertEquals(false, array[0]);
+ assertEquals(false, array[1]);
+
+ args[0] = null;
+ try {
+ m.invoke(null, args);
+ } catch (InvocationTargetException e) {
+ exception = e.getCause();
+ assertTrue(exception instanceof NullPointerException);
+ }
+ assertNotNull(exception);
+ }
+ }
+}
diff --git a/test/412-new-array/src/TestCase.java b/test/412-new-array/src/TestCase.java
new file mode 100644
index 0000000..ef77f71
--- /dev/null
+++ b/test/412-new-array/src/TestCase.java
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Common superclass for test cases.
+ */
+
+import java.util.Arrays;
+
+public abstract class TestCase {
+ public static void assertSame(Object expected, Object value) {
+ if (expected != value) {
+ throw new AssertionError("Objects are not the same: expected " +
+ String.valueOf(expected) + ", got " + String.valueOf(value));
+ }
+ }
+
+ public static void assertNotSame(Object expected, Object value) {
+ if (expected == value) {
+ throw new AssertionError(
+ "Objects are the same: " + String.valueOf(expected));
+ }
+ }
+
+ public static void assertEquals(String message, int expected, int actual) {
+ if (expected != actual) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertEquals(int expected, int actual) {
+ if (expected != actual) {
+ throw new AssertionError("Expected " + expected + " got " + actual);
+ }
+ }
+
+ public static void assertTrue(String message, boolean condition) {
+ if (!condition) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertTrue(boolean condition) {
+ assertTrue("Expected true", condition);
+ }
+
+ public static void assertFalse(String message, boolean condition) {
+ if (condition) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertFalse(boolean condition) {
+ assertFalse("Expected false", condition);
+ }
+
+ public static void assertEquals(Object expected, Object actual) {
+ if (!expected.equals(actual)) {
+ String msg = "Expected \"" + expected + "\" but got \"" + actual + "\"";
+ throw new AssertionError(msg);
+ }
+ }
+
+ public static void assertNotEquals(int expected, int actual) {
+ if (expected == actual) {
+ throw new AssertionError("Expected " + expected + " got " + actual);
+ }
+ }
+
+ public static void assertNotEquals(Object expected, Object actual) {
+ if (expected.equals(actual)) {
+ String msg = "Objects are the same: " + String.valueOf(expected);
+ throw new AssertionError(msg);
+ }
+ }
+
+ public static <T> void assertArrayEquals(T[] actual, T... expected) {
+ assertTrue(Arrays.equals(expected, actual));
+ }
+
+ public static void assertEquals(
+ String message, Object expected, Object actual) {
+ if (!expected.equals(actual)) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertEquals(
+ String message, long expected, long actual) {
+ if (expected != actual) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertEquals(long expected, long actual) {
+ if (expected != actual) {
+ throw new AssertionError("Expected " + expected + " got " + actual);
+ }
+ }
+
+ public static void assertEquals(
+ String message, boolean expected, boolean actual) {
+ if (expected != actual) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertEquals(boolean expected, boolean actual) {
+ if (expected != actual) {
+ throw new AssertionError("Expected " + expected + " got " + actual);
+ }
+ }
+
+ public static void assertEquals(
+ String message, float expected, float actual) {
+ if (expected != actual) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertEquals(float expected, float actual) {
+ if (expected != actual) {
+ throw new AssertionError("Expected " + expected + " got " + actual);
+ }
+ }
+
+ public static void assertEquals(float expected, float actual,
+ float tolerance) {
+ if ((actual < expected - tolerance) || (expected + tolerance < actual)) {
+ throw new AssertionError("Expected " + expected + " got " + actual +
+ " tolerance " + tolerance);
+ }
+ }
+
+ public static void assertEquals(
+ String message, double expected, double actual) {
+ if (expected != actual) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertEquals(double expected, double actual) {
+ if (expected != actual) {
+ throw new AssertionError("Expected " + expected + " got " + actual);
+ }
+ }
+
+ public static void assertEquals(double expected, double actual,
+ double tolerance) {
+ if ((actual < expected - tolerance) || (expected + tolerance < actual)) {
+ throw new AssertionError("Expected " + expected + " got " + actual +
+ " tolerance " + tolerance);
+ }
+ }
+
+ public static void assertSame(
+ String message, Object expected, Object actual) {
+ if (expected != actual) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertNull(String message, Object object) {
+ if (object != null) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertNull(Object object) {
+ assertNull("Expected null", object);
+ }
+
+ public static void assertNotNull(String message, Object object) {
+ if (object == null) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertNotNull(Object object) {
+ assertNotNull("Expected non-null", object);
+ }
+
+ public static void fail(String msg) {
+ throw new AssertionError(msg);
+ }
+}
diff --git a/test/413-regalloc-regression/expected.txt b/test/413-regalloc-regression/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/413-regalloc-regression/expected.txt
diff --git a/test/413-regalloc-regression/info.txt b/test/413-regalloc-regression/info.txt
new file mode 100644
index 0000000..c706c1d
--- /dev/null
+++ b/test/413-regalloc-regression/info.txt
@@ -0,0 +1,2 @@
+Regression test for the linear scan register allocator, that use to
+fail compiling removeElementAt in x86.
diff --git a/test/413-regalloc-regression/src/Main.java b/test/413-regalloc-regression/src/Main.java
new file mode 100644
index 0000000..3e649f8
--- /dev/null
+++ b/test/413-regalloc-regression/src/Main.java
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ private Object[] data;
+ private int size;
+
+ public Main() {
+ data = new Object[4];
+ size = 0;
+ }
+
+ public void removeElementAt(int index) {
+ for (int i = index; i < size - 1; i++) {
+ data[i] = data[i + 1];
+ }
+ data[--size] = null;
+ }
+
+ public static void main(String[] args) {
+ Main main = new Main();
+ main.size++;
+ main.removeElementAt(0);
+ if (main.size != 0) {
+ throw new Error("Unexpected size");
+ }
+ }
+}
diff --git a/test/414-optimizing-arith-sub/expected.txt b/test/414-optimizing-arith-sub/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/414-optimizing-arith-sub/expected.txt
diff --git a/test/414-optimizing-arith-sub/info.txt b/test/414-optimizing-arith-sub/info.txt
new file mode 100644
index 0000000..1eaa148
--- /dev/null
+++ b/test/414-optimizing-arith-sub/info.txt
@@ -0,0 +1 @@
+Subtraction tests.
diff --git a/test/414-optimizing-arith-sub/src/Main.java b/test/414-optimizing-arith-sub/src/Main.java
new file mode 100644
index 0000000..30e8436
--- /dev/null
+++ b/test/414-optimizing-arith-sub/src/Main.java
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Note that $opt$ is a marker for the optimizing compiler to ensure
+// it does compile the method.
+public class Main {
+
+ public static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(float expected, float result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(double expected, double result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectApproxEquals(float a, float b) {
+ float maxDelta = 0.0001F;
+ boolean aproxEquals = (a > b) ? ((a - b) < maxDelta) : ((b - a) < maxDelta);
+ if (!aproxEquals) {
+ throw new Error("Expected: " + a + ", found: " + b + ", with delta: " + maxDelta + " " + (a - b));
+ }
+ }
+
+ public static void expectApproxEquals(double a, double b) {
+ double maxDelta = 0.00001D;
+ boolean aproxEquals = (a > b) ? ((a - b) < maxDelta) : ((b - a) < maxDelta);
+ if (!aproxEquals) {
+ throw new Error("Expected: " + a + ", found: " + b + ", with delta: " + maxDelta + " " + (a - b));
+ }
+ }
+
+ public static void expectNaN(float a) {
+ if (a == a) {
+ throw new Error("Expected NaN: " + a);
+ }
+ }
+
+ public static void expectNaN(double a) {
+ if (a == a) {
+ throw new Error("Expected NaN: " + a);
+ }
+ }
+
+ public static void main(String[] args) {
+ subInt();
+ subLong();
+ subFloat();
+ subDouble();
+ }
+
+ private static void subInt() {
+ expectEquals(2, $opt$Sub(5, 3));
+ expectEquals(0, $opt$Sub(0, 0));
+ expectEquals(-3, $opt$Sub(0, 3));
+ expectEquals(3, $opt$Sub(3, 0));
+ expectEquals(4, $opt$Sub(1, -3));
+ expectEquals(-9, $opt$Sub(-12, -3));
+ expectEquals(134217724, $opt$Sub(134217729, 5)); // (2^27 + 1) - 5
+ }
+
+ private static void subLong() {
+ expectEquals(2L, $opt$Sub(5L, 3L));
+ expectEquals(0L, $opt$Sub(0L, 0L));
+ expectEquals(-3L, $opt$Sub(0L, 3L));
+ expectEquals(3L, $opt$Sub(3L, 0L));
+ expectEquals(4L, $opt$Sub(1L, -3L));
+ expectEquals(-9L, $opt$Sub(-12L, -3L));
+ expectEquals(134217724L, $opt$Sub(134217729L, 5L)); // (2^27 + 1) - 5
+ expectEquals(34359738362L, $opt$Sub(34359738369L, 7L)); // (2^35 + 1) - 7
+ }
+
+ private static void subFloat() {
+ expectApproxEquals(2F, $opt$Sub(5F, 3F));
+ expectApproxEquals(0F, $opt$Sub(0F, 0F));
+ expectApproxEquals(-3F, $opt$Sub(0F, 3F));
+ expectApproxEquals(3F, $opt$Sub(3F, 0F));
+ expectApproxEquals(4F, $opt$Sub(1F, -3F));
+ expectApproxEquals(-9F, $opt$Sub(-12F, -3F));
+ expectApproxEquals(34359738362F, $opt$Sub(34359738369F, 7F)); // (2^35 + 1) - 7
+ expectApproxEquals(-0.1F, $opt$Sub(0.1F, 0.2F));
+ expectApproxEquals(0.2F, $opt$Sub(-0.5F, -0.7F));
+
+ expectNaN($opt$Sub(Float.NEGATIVE_INFINITY, Float.NEGATIVE_INFINITY));
+ expectNaN($opt$Sub(Float.POSITIVE_INFINITY, Float.POSITIVE_INFINITY));
+ expectNaN($opt$Sub(Float.NaN, 11F));
+ expectNaN($opt$Sub(Float.NaN, -11F));
+ expectNaN($opt$Sub(Float.NaN, Float.NEGATIVE_INFINITY));
+ expectNaN($opt$Sub(Float.NaN, Float.POSITIVE_INFINITY));
+
+ expectEquals(Float.NEGATIVE_INFINITY, $opt$Sub(-Float.MAX_VALUE, Float.MAX_VALUE));
+ expectEquals(Float.NEGATIVE_INFINITY, $opt$Sub(2F, Float.POSITIVE_INFINITY));
+ expectEquals(Float.POSITIVE_INFINITY, $opt$Sub(Float.MAX_VALUE, -Float.MAX_VALUE));
+ expectEquals(Float.POSITIVE_INFINITY, $opt$Sub(2F, Float.NEGATIVE_INFINITY));
+ expectEquals(Float.POSITIVE_INFINITY, $opt$Sub(Float.POSITIVE_INFINITY, Float.NEGATIVE_INFINITY));
+ expectEquals(Float.NEGATIVE_INFINITY, $opt$Sub(Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY));
+ }
+
+ private static void subDouble() {
+ expectApproxEquals(2D, $opt$Sub(5D, 3D));
+ expectApproxEquals(0D, $opt$Sub(0D, 0D));
+ expectApproxEquals(-3D, $opt$Sub(0D, 3D));
+ expectApproxEquals(3D, $opt$Sub(3D, 0D));
+ expectApproxEquals(4D, $opt$Sub(1D, -3D));
+ expectApproxEquals(-9D, $opt$Sub(-12D, -3D));
+ expectApproxEquals(134217724D, $opt$Sub(134217729D, 5D)); // (2^27 + 1) - 5
+ expectApproxEquals(34359738362D, $opt$Sub(34359738369D, 7D)); // (2^35 + 1) - 7
+ expectApproxEquals(-0.1D, $opt$Sub(0.1D, 0.2D));
+ expectApproxEquals(0.2D, $opt$Sub(-0.5D, -0.7D));
+
+ expectNaN($opt$Sub(Double.NEGATIVE_INFINITY, Double.NEGATIVE_INFINITY));
+ expectNaN($opt$Sub(Double.POSITIVE_INFINITY, Double.POSITIVE_INFINITY));
+ expectNaN($opt$Sub(Double.NaN, 11D));
+ expectNaN($opt$Sub(Double.NaN, -11D));
+ expectNaN($opt$Sub(Double.NaN, Double.NEGATIVE_INFINITY));
+ expectNaN($opt$Sub(Double.NaN, Double.POSITIVE_INFINITY));
+
+ expectEquals(Double.NEGATIVE_INFINITY, $opt$Sub(-Double.MAX_VALUE, Double.MAX_VALUE));
+ expectEquals(Double.NEGATIVE_INFINITY, $opt$Sub(2D, Double.POSITIVE_INFINITY));
+ expectEquals(Double.POSITIVE_INFINITY, $opt$Sub(Double.MAX_VALUE, -Double.MAX_VALUE));
+ expectEquals(Double.POSITIVE_INFINITY, $opt$Sub(2D, Double.NEGATIVE_INFINITY));
+ expectEquals(Double.POSITIVE_INFINITY, $opt$Sub(Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY));
+ expectEquals(Double.NEGATIVE_INFINITY, $opt$Sub(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY));
+ }
+
+ static int $opt$Sub(int a, int b) {
+ return a - b;
+ }
+
+ static long $opt$Sub(long a, long b) {
+ return a - b;
+ }
+
+ static float $opt$Sub(float a, float b) {
+ return a - b;
+ }
+
+ static double $opt$Sub(double a, double b) {
+ return a - b;
+ }
+
+}
diff --git a/test/414-static-fields/expected.txt b/test/414-static-fields/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/414-static-fields/expected.txt
diff --git a/test/414-static-fields/info.txt b/test/414-static-fields/info.txt
new file mode 100644
index 0000000..1cdd3c7
--- /dev/null
+++ b/test/414-static-fields/info.txt
@@ -0,0 +1 @@
+Simple test for static field access.
diff --git a/test/414-static-fields/src/Main.java b/test/414-static-fields/src/Main.java
new file mode 100644
index 0000000..3403772
--- /dev/null
+++ b/test/414-static-fields/src/Main.java
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main extends TestCase {
+ public static void main(String[] args) {
+ $opt$TestThisClassStaticField();
+ $opt$TestOtherClassStaticField();
+ $opt$TestAddThisClassStaticField();
+ $opt$TestAddOtherClassStaticField();
+ $opt$TestOtherClassWithClinitStaticField();
+ $opt$TestAccess();
+ }
+
+ static int staticField = 42;
+
+ static int getInt() {
+ return 33;
+ }
+
+ static void $opt$TestThisClassStaticField() {
+ assertEquals(42, staticField);
+ }
+
+ static void $opt$TestOtherClassStaticField() {
+ assertEquals(41, Other.staticField);
+ }
+
+ static void $opt$TestAddThisClassStaticField() {
+ int a = getInt();
+ assertEquals(a + 42, a + staticField);
+ }
+
+ static void $opt$TestAddOtherClassStaticField() {
+ int a = getInt();
+ assertEquals(a + 41, a + Other.staticField);
+ }
+
+ static void $opt$TestOtherClassWithClinitStaticField() {
+ assertEquals(40, OtherWithClinit.staticField);
+ }
+
+ static void $opt$TestAccess() {
+ assertEquals(false, sZ);
+ assertEquals(0, sB);
+ assertEquals(0, sC);
+ assertEquals(0, sI);
+ assertEquals(0, sJ);
+ assertEquals(0, sS);
+ assertEquals(0.0f, sF);
+ assertEquals(0.0, sD);
+ assertNull(sObject);
+
+ long longValue = -1122198787987987987L;
+ Object o = new Object();
+ sZ = true;
+ sB = -2;
+ sC = 'c';
+ sI = 42;
+ sJ = longValue;
+ sS = 68;
+ sObject = o;
+ sF = 2.3f;
+ sD = 5.3;
+
+ assertEquals(true, sZ);
+ assertEquals(-2, sB);
+ assertEquals('c', sC);
+ assertEquals(42, sI);
+ assertEquals(longValue, sJ);
+ assertEquals(68, sS);
+ assertEquals(o, sObject);
+ assertEquals(2.3f, sF);
+ assertEquals(5.3, sD);
+ }
+
+ static boolean sZ;
+ static byte sB;
+ static char sC;
+ static double sD;
+ static float sF;
+ static int sI;
+ static long sJ;
+ static short sS;
+ static Object sObject;
+}
diff --git a/runtime/log_severity.h b/test/414-static-fields/src/Other.java
similarity index 62%
copy from runtime/log_severity.h
copy to test/414-static-fields/src/Other.java
index 31682df..aede930 100644
--- a/runtime/log_severity.h
+++ b/test/414-static-fields/src/Other.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,6 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LOG_SEVERITY_H_
-#define ART_RUNTIME_LOG_SEVERITY_H_
-
-typedef int LogSeverity;
-
-const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5;
-const int INTERNAL_FATAL = 6; // For Runtime::Abort.
-
-#endif // ART_RUNTIME_LOG_SEVERITY_H_
+public class Other {
+ static int staticField = 41;
+}
diff --git a/runtime/log_severity.h b/test/414-static-fields/src/OtherWithClinit.java
similarity index 62%
copy from runtime/log_severity.h
copy to test/414-static-fields/src/OtherWithClinit.java
index 31682df..eafbd2b 100644
--- a/runtime/log_severity.h
+++ b/test/414-static-fields/src/OtherWithClinit.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,12 +14,10 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LOG_SEVERITY_H_
-#define ART_RUNTIME_LOG_SEVERITY_H_
+public class OtherWithClinit {
+ static int staticField = 39;
-typedef int LogSeverity;
-
-const int VERBOSE = 0, DEBUG = 1, INFO = 2, WARNING = 3, ERROR = 4, FATAL = 5;
-const int INTERNAL_FATAL = 6; // For Runtime::Abort.
-
-#endif // ART_RUNTIME_LOG_SEVERITY_H_
+ static {
+ staticField = 40;
+ }
+}
diff --git a/test/414-static-fields/src/TestCase.java b/test/414-static-fields/src/TestCase.java
new file mode 100644
index 0000000..ef77f71
--- /dev/null
+++ b/test/414-static-fields/src/TestCase.java
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Common superclass for test cases.
+ */
+
+import java.util.Arrays;
+
+public abstract class TestCase {
+ public static void assertSame(Object expected, Object value) {
+ if (expected != value) {
+ throw new AssertionError("Objects are not the same: expected " +
+ String.valueOf(expected) + ", got " + String.valueOf(value));
+ }
+ }
+
+ public static void assertNotSame(Object expected, Object value) {
+ if (expected == value) {
+ throw new AssertionError(
+ "Objects are the same: " + String.valueOf(expected));
+ }
+ }
+
+ public static void assertEquals(String message, int expected, int actual) {
+ if (expected != actual) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertEquals(int expected, int actual) {
+ if (expected != actual) {
+ throw new AssertionError("Expected " + expected + " got " + actual);
+ }
+ }
+
+ public static void assertTrue(String message, boolean condition) {
+ if (!condition) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertTrue(boolean condition) {
+ assertTrue("Expected true", condition);
+ }
+
+ public static void assertFalse(String message, boolean condition) {
+ if (condition) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertFalse(boolean condition) {
+ assertFalse("Expected false", condition);
+ }
+
+ public static void assertEquals(Object expected, Object actual) {
+ if (!expected.equals(actual)) {
+ String msg = "Expected \"" + expected + "\" but got \"" + actual + "\"";
+ throw new AssertionError(msg);
+ }
+ }
+
+ public static void assertNotEquals(int expected, int actual) {
+ if (expected == actual) {
+ throw new AssertionError("Expected " + expected + " got " + actual);
+ }
+ }
+
+ public static void assertNotEquals(Object expected, Object actual) {
+ if (expected.equals(actual)) {
+ String msg = "Objects are the same: " + String.valueOf(expected);
+ throw new AssertionError(msg);
+ }
+ }
+
+ public static <T> void assertArrayEquals(T[] actual, T... expected) {
+ assertTrue(Arrays.equals(expected, actual));
+ }
+
+ public static void assertEquals(
+ String message, Object expected, Object actual) {
+ if (!expected.equals(actual)) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertEquals(
+ String message, long expected, long actual) {
+ if (expected != actual) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertEquals(long expected, long actual) {
+ if (expected != actual) {
+ throw new AssertionError("Expected " + expected + " got " + actual);
+ }
+ }
+
+ public static void assertEquals(
+ String message, boolean expected, boolean actual) {
+ if (expected != actual) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertEquals(boolean expected, boolean actual) {
+ if (expected != actual) {
+ throw new AssertionError("Expected " + expected + " got " + actual);
+ }
+ }
+
+ public static void assertEquals(
+ String message, float expected, float actual) {
+ if (expected != actual) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertEquals(float expected, float actual) {
+ if (expected != actual) {
+ throw new AssertionError("Expected " + expected + " got " + actual);
+ }
+ }
+
+ public static void assertEquals(float expected, float actual,
+ float tolerance) {
+ if ((actual < expected - tolerance) || (expected + tolerance < actual)) {
+ throw new AssertionError("Expected " + expected + " got " + actual +
+ " tolerance " + tolerance);
+ }
+ }
+
+ public static void assertEquals(
+ String message, double expected, double actual) {
+ if (expected != actual) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertEquals(double expected, double actual) {
+ if (expected != actual) {
+ throw new AssertionError("Expected " + expected + " got " + actual);
+ }
+ }
+
+ public static void assertEquals(double expected, double actual,
+ double tolerance) {
+ if ((actual < expected - tolerance) || (expected + tolerance < actual)) {
+ throw new AssertionError("Expected " + expected + " got " + actual +
+ " tolerance " + tolerance);
+ }
+ }
+
+ public static void assertSame(
+ String message, Object expected, Object actual) {
+ if (expected != actual) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertNull(String message, Object object) {
+ if (object != null) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertNull(Object object) {
+ assertNull("Expected null", object);
+ }
+
+ public static void assertNotNull(String message, Object object) {
+ if (object == null) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertNotNull(Object object) {
+ assertNotNull("Expected non-null", object);
+ }
+
+ public static void fail(String msg) {
+ throw new AssertionError(msg);
+ }
+}
diff --git a/test/415-optimizing-arith-neg/expected.txt b/test/415-optimizing-arith-neg/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/415-optimizing-arith-neg/expected.txt
diff --git a/test/415-optimizing-arith-neg/info.txt b/test/415-optimizing-arith-neg/info.txt
new file mode 100644
index 0000000..8494aad
--- /dev/null
+++ b/test/415-optimizing-arith-neg/info.txt
@@ -0,0 +1 @@
+Tests for arithmetic negation operations.
diff --git a/test/415-optimizing-arith-neg/src/Main.java b/test/415-optimizing-arith-neg/src/Main.java
new file mode 100644
index 0000000..d9f8bcf
--- /dev/null
+++ b/test/415-optimizing-arith-neg/src/Main.java
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Note that $opt$ is a marker for the optimizing compiler to ensure
+// it does compile the method.
+public class Main {
+
+ public static void assertEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void assertEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void assertEquals(float expected, float result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void assertEquals(double expected, double result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void assertIsNaN(float result) {
+ if (!Float.isNaN(result)) {
+ throw new Error("Expected NaN: " + result);
+ }
+ }
+
+ public static void assertIsNaN(double result) {
+ if (!Double.isNaN(result)) {
+ throw new Error("Expected NaN: " + result);
+ }
+ }
+
+ public static void main(String[] args) {
+ negInt();
+ $opt$InplaceNegOneInt(1);
+
+ negLong();
+ $opt$InplaceNegOneLong(1L);
+
+ negFloat();
+ negDouble();
+ }
+
+ private static void negInt() {
+ assertEquals(-1, $opt$NegInt(1));
+ assertEquals(1, $opt$NegInt(-1));
+ assertEquals(0, $opt$NegInt(0));
+ assertEquals(51, $opt$NegInt(-51));
+ assertEquals(-51, $opt$NegInt(51));
+ assertEquals(2147483647, $opt$NegInt(-2147483647)); // -(2^31 - 1)
+ assertEquals(-2147483647, $opt$NegInt(2147483647)); // 2^31 - 1
+ // From the Java 7 SE Edition specification:
+ // http://docs.oracle.com/javase/specs/jls/se7/html/jls-15.html#jls-15.15.4
+ //
+ // For integer values, negation is the same as subtraction from
+ // zero. The Java programming language uses two's-complement
+ // representation for integers, and the range of two's-complement
+ // values is not symmetric, so negation of the maximum negative
+ // int or long results in that same maximum negative number.
+ // Overflow occurs in this case, but no exception is thrown.
+ // For all integer values x, -x equals (~x)+1.''
+ assertEquals(-2147483648, $opt$NegInt(-2147483648)); // -(2^31)
+ }
+
+ private static void $opt$InplaceNegOneInt(int a) {
+ a = -a;
+ assertEquals(-1, a);
+ }
+
+ private static void negLong() {
+ assertEquals(-1L, $opt$NegLong(1L));
+ assertEquals(1L, $opt$NegLong(-1L));
+ assertEquals(0L, $opt$NegLong(0L));
+ assertEquals(51L, $opt$NegLong(-51L));
+ assertEquals(-51L, $opt$NegLong(51L));
+
+ assertEquals(2147483647L, $opt$NegLong(-2147483647L)); // -(2^31 - 1)
+ assertEquals(-2147483647L, $opt$NegLong(2147483647L)); // (2^31 - 1)
+ assertEquals(2147483648L, $opt$NegLong(-2147483648L)); // -(2^31)
+ assertEquals(-2147483648L, $opt$NegLong(2147483648L)); // 2^31
+
+ assertEquals(9223372036854775807L, $opt$NegLong(-9223372036854775807L)); // -(2^63 - 1)
+ assertEquals(-9223372036854775807L, $opt$NegLong(9223372036854775807L)); // 2^63 - 1
+ // See remark regarding the negation of the maximum negative
+ // (long) value in negInt().
+ assertEquals(-9223372036854775808L, $opt$NegLong(-9223372036854775808L)); // -(2^63)
+ }
+
+ private static void $opt$InplaceNegOneLong(long a) {
+ a = -a;
+ assertEquals(-1L, a);
+ }
+
+ private static void negFloat() {
+ assertEquals(-1F, $opt$NegFloat(1F));
+ assertEquals(1F, $opt$NegFloat(-1F));
+ assertEquals(0F, $opt$NegFloat(0F));
+ assertEquals(51F, $opt$NegFloat(-51F));
+ assertEquals(-51F, $opt$NegFloat(51F));
+
+ assertEquals(-0.1F, $opt$NegFloat(0.1F));
+ assertEquals(0.1F, $opt$NegFloat(-0.1F));
+ assertEquals(343597.38362F, $opt$NegFloat(-343597.38362F));
+ assertEquals(-343597.38362F, $opt$NegFloat(343597.38362F));
+
+ assertEquals(-Float.MIN_NORMAL, $opt$NegFloat(Float.MIN_NORMAL));
+ assertEquals(Float.MIN_NORMAL, $opt$NegFloat(-Float.MIN_NORMAL));
+ assertEquals(-Float.MIN_VALUE, $opt$NegFloat(Float.MIN_VALUE));
+ assertEquals(Float.MIN_VALUE, $opt$NegFloat(-Float.MIN_VALUE));
+ assertEquals(-Float.MAX_VALUE, $opt$NegFloat(Float.MAX_VALUE));
+ assertEquals(Float.MAX_VALUE, $opt$NegFloat(-Float.MAX_VALUE));
+
+ assertEquals(Float.NEGATIVE_INFINITY, $opt$NegFloat(Float.POSITIVE_INFINITY));
+ assertEquals(Float.POSITIVE_INFINITY, $opt$NegFloat(Float.NEGATIVE_INFINITY));
+ assertIsNaN($opt$NegFloat(Float.NaN));
+ }
+
+ private static void negDouble() {
+ assertEquals(-1D, $opt$NegDouble(1D));
+ assertEquals(1D, $opt$NegDouble(-1D));
+ assertEquals(0D, $opt$NegDouble(0D));
+ assertEquals(51D, $opt$NegDouble(-51D));
+ assertEquals(-51D, $opt$NegDouble(51D));
+
+ assertEquals(-0.1D, $opt$NegDouble(0.1D));
+ assertEquals(0.1D, $opt$NegDouble(-0.1D));
+ assertEquals(343597.38362D, $opt$NegDouble(-343597.38362D));
+ assertEquals(-343597.38362D, $opt$NegDouble(343597.38362D));
+
+ assertEquals(-Double.MIN_NORMAL, $opt$NegDouble(Double.MIN_NORMAL));
+ assertEquals(Double.MIN_NORMAL, $opt$NegDouble(-Double.MIN_NORMAL));
+ assertEquals(-Double.MIN_VALUE, $opt$NegDouble(Double.MIN_VALUE));
+ assertEquals(Double.MIN_VALUE, $opt$NegDouble(-Double.MIN_VALUE));
+ assertEquals(-Double.MAX_VALUE, $opt$NegDouble(Double.MAX_VALUE));
+ assertEquals(Double.MAX_VALUE, $opt$NegDouble(-Double.MAX_VALUE));
+
+ assertEquals(Double.NEGATIVE_INFINITY, $opt$NegDouble(Double.POSITIVE_INFINITY));
+ assertEquals(Double.POSITIVE_INFINITY, $opt$NegDouble(Double.NEGATIVE_INFINITY));
+ assertIsNaN($opt$NegDouble(Double.NaN));
+ }
+
+ static int $opt$NegInt(int a){
+ return -a;
+ }
+
+ static long $opt$NegLong(long a){
+ return -a;
+ }
+
+ static float $opt$NegFloat(float a){
+ return -a;
+ }
+
+ static double $opt$NegDouble(double a){
+ return -a;
+ }
+}
diff --git a/test/416-optimizing-arith-not/expected.txt b/test/416-optimizing-arith-not/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/416-optimizing-arith-not/expected.txt
diff --git a/test/416-optimizing-arith-not/info.txt b/test/416-optimizing-arith-not/info.txt
new file mode 100644
index 0000000..e9418fd
--- /dev/null
+++ b/test/416-optimizing-arith-not/info.txt
@@ -0,0 +1 @@
+Tests for bitwise not operations.
diff --git a/test/416-optimizing-arith-not/smali/not.smali b/test/416-optimizing-arith-not/smali/not.smali
new file mode 100644
index 0000000..6dea3b6
--- /dev/null
+++ b/test/416-optimizing-arith-not/smali/not.smali
@@ -0,0 +1,15 @@
+.class public LTestNot;
+
+.super Ljava/lang/Object;
+
+.method public static $opt$NotInt(I)I
+ .registers 2
+ not-int v0, v1
+ return v0
+.end method
+
+.method public static $opt$NotLong(J)J
+ .registers 4
+ not-long v0, v2
+ return-wide v0
+.end method
diff --git a/test/416-optimizing-arith-not/src/Main.java b/test/416-optimizing-arith-not/src/Main.java
new file mode 100644
index 0000000..44c7d3c
--- /dev/null
+++ b/test/416-optimizing-arith-not/src/Main.java
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+
+ public static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ notInt();
+ notLong();
+ }
+
+ private static void notInt() throws Exception {
+ expectEquals(1, smaliNotInt(-2));
+ expectEquals(0, smaliNotInt(-1));
+ expectEquals(-1, smaliNotInt(0));
+ expectEquals(-2, smaliNotInt(1));
+ expectEquals(2147483647, smaliNotInt(-2147483648)); // -(2^31)
+ expectEquals(2147483646, smaliNotInt(-2147483647)); // -(2^31 - 1)
+ expectEquals(-2147483647, smaliNotInt(2147483646)); // 2^31 - 2
+ expectEquals(-2147483648, smaliNotInt(2147483647)); // 2^31 - 1
+ }
+
+ private static void notLong() throws Exception {
+ expectEquals(1L, smaliNotLong(-2L));
+ expectEquals(0L, smaliNotLong(-1L));
+ expectEquals(-1L, smaliNotLong(0L));
+ expectEquals(-2L, smaliNotLong(1L));
+ expectEquals(2147483647L, smaliNotLong(-2147483648L)); // -(2^31)
+ expectEquals(2147483646L, smaliNotLong(-2147483647L)); // -(2^31 - 1)
+ expectEquals(-2147483647L, smaliNotLong(2147483646L)); // 2^31 - 2
+ expectEquals(-2147483648L, smaliNotLong(2147483647L)); // 2^31 - 1
+ expectEquals(9223372036854775807L, smaliNotLong(-9223372036854775808L)); // -(2^63)
+ expectEquals(9223372036854775806L, smaliNotLong(-9223372036854775807L)); // -(2^63 - 1)
+ expectEquals(-9223372036854775807L, smaliNotLong(9223372036854775806L)); // 2^63 - 2
+ expectEquals(-9223372036854775808L, smaliNotLong(9223372036854775807L)); // 2^63 - 1
+ }
+
+ // Wrappers around methods located in file not.smali.
+
+ private static int smaliNotInt(int a) throws Exception {
+ Class<?> c = Class.forName("TestNot");
+ Method m = c.getMethod("$opt$NotInt", int.class);
+ int result = (Integer)m.invoke(null, a);
+ return result;
+ }
+
+ private static long smaliNotLong(long a) throws Exception {
+ Class<?> c = Class.forName("TestNot");
+ Method m = c.getMethod("$opt$NotLong", long.class);
+ long result = (Long)m.invoke(null, a);
+ return result;
+ }
+}
diff --git a/test/417-optimizing-arith-div/expected.txt b/test/417-optimizing-arith-div/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/417-optimizing-arith-div/expected.txt
diff --git a/test/417-optimizing-arith-div/info.txt b/test/417-optimizing-arith-div/info.txt
new file mode 100644
index 0000000..1374b0f
--- /dev/null
+++ b/test/417-optimizing-arith-div/info.txt
@@ -0,0 +1 @@
+Tests for division operation.
diff --git a/test/417-optimizing-arith-div/src/Main.java b/test/417-optimizing-arith-div/src/Main.java
new file mode 100644
index 0000000..909ceb4
--- /dev/null
+++ b/test/417-optimizing-arith-div/src/Main.java
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Note that $opt$ is a marker for the optimizing compiler to ensure
+// it does compile the method.
+public class Main {
+
+ public static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(float expected, float result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(double expected, double result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectApproxEquals(float a, float b) {
+ float maxDelta = 0.00001F;
+ boolean aproxEquals = (a > b) ? ((a - b) < maxDelta) : ((b - a) < maxDelta);
+ if (!aproxEquals) {
+ throw new Error("Expected: " + a + ", found: " + b
+ + ", with delta: " + maxDelta + " " + (a - b));
+ }
+ }
+
+ public static void expectApproxEquals(double a, double b) {
+ double maxDelta = 0.00001D;
+ boolean aproxEquals = (a > b) ? ((a - b) < maxDelta) : ((b - a) < maxDelta);
+ if (!aproxEquals) {
+ throw new Error("Expected: " + a + ", found: "
+ + b + ", with delta: " + maxDelta + " " + (a - b));
+ }
+ }
+
+ public static void expectNaN(float a) {
+ if (a == a) {
+ throw new Error("Expected NaN: " + a);
+ }
+ }
+
+ public static void expectNaN(double a) {
+ if (a == a) {
+ throw new Error("Expected NaN: " + a);
+ }
+ }
+
+ public static void expectDivisionByZero(int value) {
+ try {
+ $opt$Div(value, 0);
+ throw new Error("Expected RuntimeException when dividing by 0");
+ } catch (java.lang.RuntimeException e) {
+ }
+ try {
+ $opt$DivZero(value);
+ throw new Error("Expected RuntimeException when dividing by 0");
+ } catch (java.lang.RuntimeException e) {
+ }
+ }
+
+ public static void expectDivisionByZero(long value) {
+ try {
+ $opt$Div(value, 0L);
+ throw new Error("Expected RuntimeException when dividing by 0");
+ } catch (java.lang.RuntimeException e) {
+ }
+ try {
+ $opt$DivZero(value);
+ throw new Error("Expected RuntimeException when dividing by 0");
+ } catch (java.lang.RuntimeException e) {
+ }
+ }
+
+ public static void main(String[] args) {
+ div();
+ }
+
+ public static void div() {
+ divInt();
+ divLong();
+ divFloat();
+ divDouble();
+ }
+
+ private static void divInt() {
+ expectEquals(2, $opt$DivConst(6));
+ expectEquals(2, $opt$Div(6, 3));
+ expectEquals(6, $opt$Div(6, 1));
+ expectEquals(-2, $opt$Div(6, -3));
+ expectEquals(1, $opt$Div(4, 3));
+ expectEquals(-1, $opt$Div(4, -3));
+ expectEquals(5, $opt$Div(23, 4));
+ expectEquals(-5, $opt$Div(-23, 4));
+
+ expectEquals(-Integer.MAX_VALUE, $opt$Div(Integer.MAX_VALUE, -1));
+ expectEquals(Integer.MIN_VALUE, $opt$Div(Integer.MIN_VALUE, -1)); // overflow
+ expectEquals(-1073741824, $opt$Div(Integer.MIN_VALUE, 2));
+
+ expectEquals(0, $opt$Div(0, Integer.MAX_VALUE));
+ expectEquals(0, $opt$Div(0, Integer.MIN_VALUE));
+
+ expectDivisionByZero(0);
+ expectDivisionByZero(1);
+ expectDivisionByZero(Integer.MAX_VALUE);
+ expectDivisionByZero(Integer.MIN_VALUE);
+ }
+
+ private static void divLong() {
+ expectEquals(2L, $opt$DivConst(6L));
+ expectEquals(2L, $opt$Div(6L, 3L));
+ expectEquals(6L, $opt$Div(6L, 1L));
+ expectEquals(-2L, $opt$Div(6L, -3L));
+ expectEquals(1L, $opt$Div(4L, 3L));
+ expectEquals(-1L, $opt$Div(4L, -3L));
+ expectEquals(5L, $opt$Div(23L, 4L));
+ expectEquals(-5L, $opt$Div(-23L, 4L));
+
+ expectEquals(-Integer.MAX_VALUE, $opt$Div(Integer.MAX_VALUE, -1L));
+ expectEquals(2147483648L, $opt$Div(Integer.MIN_VALUE, -1L));
+ expectEquals(-1073741824L, $opt$Div(Integer.MIN_VALUE, 2L));
+
+ expectEquals(-Long.MAX_VALUE, $opt$Div(Long.MAX_VALUE, -1L));
+ expectEquals(Long.MIN_VALUE, $opt$Div(Long.MIN_VALUE, -1L)); // overflow
+
+ expectEquals(11111111111111L, $opt$Div(33333333333333L, 3L));
+ expectEquals(3L, $opt$Div(33333333333333L, 11111111111111L));
+
+ expectEquals(0L, $opt$Div(0L, Long.MAX_VALUE));
+ expectEquals(0L, $opt$Div(0L, Long.MIN_VALUE));
+
+ expectDivisionByZero(0L);
+ expectDivisionByZero(1L);
+ expectDivisionByZero(Long.MAX_VALUE);
+ expectDivisionByZero(Long.MIN_VALUE);
+ }
+
+ private static void divFloat() {
+ expectApproxEquals(1.6666666F, $opt$Div(5F, 3F));
+ expectApproxEquals(0F, $opt$Div(0F, 3F));
+ expectApproxEquals(-0.3333333F, $opt$Div(1F, -3F));
+ expectApproxEquals(4F, $opt$Div(-12F, -3F));
+ expectApproxEquals(0.5, $opt$Div(0.1F, 0.2F));
+ expectApproxEquals(-2.5F, $opt$Div(-0.5F, 0.2F));
+
+ expectEquals(0F, $opt$Div(0F, Float.POSITIVE_INFINITY));
+ expectEquals(0F, $opt$Div(11F, Float.POSITIVE_INFINITY));
+ expectEquals(0F, $opt$Div(0F, Float.NEGATIVE_INFINITY));
+ expectEquals(0F, $opt$Div(11F, Float.NEGATIVE_INFINITY));
+
+ expectNaN($opt$Div(0F, 0F));
+ expectNaN($opt$Div(Float.NaN, 11F));
+ expectNaN($opt$Div(-11F, Float.NaN));
+ expectNaN($opt$Div(Float.NEGATIVE_INFINITY, Float.NEGATIVE_INFINITY));
+ expectNaN($opt$Div(Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY));
+ expectNaN($opt$Div(Float.POSITIVE_INFINITY, Float.NEGATIVE_INFINITY));
+ expectNaN($opt$Div(Float.POSITIVE_INFINITY, Float.POSITIVE_INFINITY));
+ expectNaN($opt$Div(Float.NaN, Float.NEGATIVE_INFINITY));
+ expectNaN($opt$Div(Float.POSITIVE_INFINITY, Float.NaN));
+
+ expectEquals(Float.POSITIVE_INFINITY, $opt$Div(3F, 0F));
+ expectEquals(Float.NEGATIVE_INFINITY, $opt$Div(-3F, 0F));
+ expectEquals(Float.POSITIVE_INFINITY, $opt$Div(Float.MAX_VALUE, Float.MIN_VALUE));
+ expectEquals(Float.NEGATIVE_INFINITY, $opt$Div(-Float.MAX_VALUE, Float.MIN_VALUE));
+ }
+
+ private static void divDouble() {
+ expectApproxEquals(1.6666666D, $opt$Div(5D, 3D));
+ expectApproxEquals(0D, $opt$Div(0D, 3D));
+ expectApproxEquals(-0.3333333D, $opt$Div(1D, -3D));
+ expectApproxEquals(4D, $opt$Div(-12D, -3D));
+ expectApproxEquals(0.5, $opt$Div(0.1D, 0.2D));
+ expectApproxEquals(-2.5D, $opt$Div(-0.5D, 0.2D));
+
+ expectEquals(0D, $opt$Div(0D, Float.POSITIVE_INFINITY));
+ expectEquals(0D, $opt$Div(11D, Float.POSITIVE_INFINITY));
+ expectEquals(0D, $opt$Div(0D, Float.NEGATIVE_INFINITY));
+ expectEquals(0D, $opt$Div(11D, Float.NEGATIVE_INFINITY));
+
+ expectNaN($opt$Div(0D, 0D));
+ expectNaN($opt$Div(Float.NaN, 11D));
+ expectNaN($opt$Div(-11D, Float.NaN));
+ expectNaN($opt$Div(Float.NEGATIVE_INFINITY, Float.NEGATIVE_INFINITY));
+ expectNaN($opt$Div(Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY));
+ expectNaN($opt$Div(Float.POSITIVE_INFINITY, Float.NEGATIVE_INFINITY));
+ expectNaN($opt$Div(Float.POSITIVE_INFINITY, Float.POSITIVE_INFINITY));
+ expectNaN($opt$Div(Float.NaN, Float.NEGATIVE_INFINITY));
+ expectNaN($opt$Div(Float.POSITIVE_INFINITY, Float.NaN));
+
+ expectEquals(Float.POSITIVE_INFINITY, $opt$Div(3D, 0D));
+ expectEquals(Float.NEGATIVE_INFINITY, $opt$Div(-3D, 0D));
+ expectEquals(Float.POSITIVE_INFINITY, $opt$Div(Float.MAX_VALUE, Float.MIN_VALUE));
+ expectEquals(Float.NEGATIVE_INFINITY, $opt$Div(-Float.MAX_VALUE, Float.MIN_VALUE));
+ }
+
+ static int $opt$Div(int a, int b) {
+ return a / b;
+ }
+
+ static int $opt$DivZero(int a) {
+ return a / 0;
+ }
+
+ // Division by literals != 0 should not generate checks.
+ static int $opt$DivConst(int a) {
+ return a / 3;
+ }
+
+ static long $opt$DivConst(long a) {
+ return a / 3L;
+ }
+
+ static long $opt$Div(long a, long b) {
+ return a / b;
+ }
+
+ static long $opt$DivZero(long a) {
+ return a / 0L;
+ }
+
+ static float $opt$Div(float a, float b) {
+ return a / b;
+ }
+
+ static double $opt$Div(double a, double b) {
+ return a / b;
+ }
+}
diff --git a/test/418-const-string/expected.txt b/test/418-const-string/expected.txt
new file mode 100644
index 0000000..8254f87
--- /dev/null
+++ b/test/418-const-string/expected.txt
@@ -0,0 +1,2 @@
+Hello World
+Hello World
diff --git a/test/418-const-string/info.txt b/test/418-const-string/info.txt
new file mode 100644
index 0000000..b7a468f
--- /dev/null
+++ b/test/418-const-string/info.txt
@@ -0,0 +1 @@
+Small test case for testing CONST_STRING.
diff --git a/test/418-const-string/src/Main.java b/test/418-const-string/src/Main.java
new file mode 100644
index 0000000..7c1ffec
--- /dev/null
+++ b/test/418-const-string/src/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ // First call: may go in slow path.
+ System.out.println($opt$ReturnHelloWorld());
+ // Second call: no slow path.
+ System.out.println($opt$ReturnHelloWorld());
+ }
+
+ public static String $opt$ReturnHelloWorld() {
+ return "Hello World";
+ }
+}
diff --git a/test/419-long-parameter/expected.txt b/test/419-long-parameter/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/419-long-parameter/expected.txt
diff --git a/test/419-long-parameter/info.txt b/test/419-long-parameter/info.txt
new file mode 100644
index 0000000..5eac977
--- /dev/null
+++ b/test/419-long-parameter/info.txt
@@ -0,0 +1,3 @@
+Regression test for the long parameter passed both in stack and register
+on 32bits architectures. The move to hard float ABI makes it so that the
+register index does not necessarily match the stack index anymore.
diff --git a/test/419-long-parameter/src/Main.java b/test/419-long-parameter/src/Main.java
new file mode 100644
index 0000000..808b7f6
--- /dev/null
+++ b/test/419-long-parameter/src/Main.java
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ if ($opt$TestCallee(1.0, 2.0, 1L, 2L) != 1L) {
+ throw new Error("Unexpected result");
+ }
+ if ($opt$TestCaller() != 1L) {
+ throw new Error("Unexpected result");
+ }
+ }
+
+ public static long $opt$TestCallee(double a, double b, long c, long d) {
+ return d - c;
+ }
+
+ public static long $opt$TestCaller() {
+ return $opt$TestCallee(1.0, 2.0, 1L, 2L);
+ }
+}
diff --git a/test/420-const-class/expected.txt b/test/420-const-class/expected.txt
new file mode 100644
index 0000000..3213026
--- /dev/null
+++ b/test/420-const-class/expected.txt
@@ -0,0 +1,16 @@
+class Main
+class Main
+class Main$Other
+class Main$Other
+class java.lang.System
+class java.lang.System
+Hello from OtherWithClinit
+42
+class Main$OtherWithClinit
+42
+class Main$OtherWithClinit
+class Main$OtherWithClinit2
+Hello from OtherWithClinit2
+43
+class Main$OtherWithClinit2
+43
diff --git a/test/420-const-class/info.txt b/test/420-const-class/info.txt
new file mode 100644
index 0000000..81cbac7
--- /dev/null
+++ b/test/420-const-class/info.txt
@@ -0,0 +1 @@
+Test for the CONST_CLASS opcode.
diff --git a/test/420-const-class/src/Main.java b/test/420-const-class/src/Main.java
new file mode 100644
index 0000000..44a7436
--- /dev/null
+++ b/test/420-const-class/src/Main.java
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ static class Other {
+ }
+
+ static class OtherWithClinit {
+ static int a;
+ static {
+ System.out.println("Hello from OtherWithClinit");
+ a = 42;
+ }
+ }
+
+ static class OtherWithClinit2 {
+ static int a;
+ static {
+ System.out.println("Hello from OtherWithClinit2");
+ a = 43;
+ }
+ }
+
+ public static void main(String[] args) {
+ // Call methods twice in case they have a slow path.
+
+ System.out.println($opt$LoadThisClass());
+ System.out.println($opt$LoadThisClass());
+
+ System.out.println($opt$LoadOtherClass());
+ System.out.println($opt$LoadOtherClass());
+
+ System.out.println($opt$LoadSystemClass());
+ System.out.println($opt$LoadSystemClass());
+
+ $opt$ClinitCheckAndLoad();
+ $opt$ClinitCheckAndLoad();
+
+ $opt$LoadAndClinitCheck();
+ $opt$LoadAndClinitCheck();
+ }
+
+ public static Class $opt$LoadThisClass() {
+ return Main.class;
+ }
+
+ public static Class $opt$LoadOtherClass() {
+ return Other.class;
+ }
+
+ public static Class $opt$LoadSystemClass() {
+ return System.class;
+ }
+
+ public static void $opt$ClinitCheckAndLoad() {
+ System.out.println(OtherWithClinit.a);
+ System.out.println(OtherWithClinit.class);
+ }
+
+ public static void $opt$LoadAndClinitCheck() {
+ System.out.println(OtherWithClinit2.class);
+ System.out.println(OtherWithClinit2.a);
+ }
+}
diff --git a/test/421-exceptions/expected.txt b/test/421-exceptions/expected.txt
new file mode 100644
index 0000000..94db350
--- /dev/null
+++ b/test/421-exceptions/expected.txt
@@ -0,0 +1,20 @@
+1
+3
+4
+1
+4
+1
+4
+1
+4
+Caught class java.lang.RuntimeException
+1
+2
+4
+1
+4
+1
+4
+1
+4
+Caught class java.lang.NullPointerException
diff --git a/test/421-exceptions/info.txt b/test/421-exceptions/info.txt
new file mode 100644
index 0000000..bdec67e
--- /dev/null
+++ b/test/421-exceptions/info.txt
@@ -0,0 +1 @@
+Simple test for try/catch/throw.
diff --git a/test/421-exceptions/src/Main.java b/test/421-exceptions/src/Main.java
new file mode 100644
index 0000000..6bf2377
--- /dev/null
+++ b/test/421-exceptions/src/Main.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void $opt$bar() {
+ try {
+ $opt$foo(1);
+ } catch (NullPointerException e) {
+ $opt$foo(2);
+ } catch (RuntimeException e) {
+ $opt$foo(3);
+ } finally {
+ $opt$foo(4);
+ }
+ }
+
+ static int barState;
+ static int fooState;
+
+ public static void main(String[] args) {
+ fooState = 0;
+ $opt$runTest();
+ fooState = 1;
+ $opt$runTest();
+ }
+
+ public static void $opt$runTest() {
+ barState = 1;
+ $opt$bar();
+ barState = 2;
+ $opt$bar();
+ barState = 3;
+ $opt$bar();
+ barState = 4;
+ try {
+ $opt$bar();
+ } catch (RuntimeException e) {
+ System.out.println("Caught " + e.getClass());
+ }
+ }
+
+ public static void $opt$foo(int value) {
+ System.out.println(value);
+ if (value == barState) {
+ if (fooState == 0) {
+ throw new RuntimeException();
+ } else {
+ throw new NullPointerException();
+ }
+ }
+ }
+}
diff --git a/test/421-large-frame/expected.txt b/test/421-large-frame/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/421-large-frame/expected.txt
diff --git a/test/421-large-frame/info.txt b/test/421-large-frame/info.txt
new file mode 100644
index 0000000..d71e7ee
--- /dev/null
+++ b/test/421-large-frame/info.txt
@@ -0,0 +1 @@
+Tests for large stack frames.
diff --git a/test/421-large-frame/src/Main.java b/test/421-large-frame/src/Main.java
new file mode 100644
index 0000000..01b89ba
--- /dev/null
+++ b/test/421-large-frame/src/Main.java
@@ -0,0 +1,2034 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Note that $opt$ is a marker for the optimizing compiler to ensure
+// it does compile the method.
+public class Main {
+
+ public static void assertEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void main(String[] args) {
+ // Sum[i = 0..999](i) = 999 * 1000 / 2 = 499500L.
+ assertEquals(499500L, $opt$LargeFrame());
+ }
+
+ static long $opt$LargeFrame() {
+ long l0 = 0L;
+ long l1 = 1L;
+ long l2 = 2L;
+ long l3 = 3L;
+ long l4 = 4L;
+ long l5 = 5L;
+ long l6 = 6L;
+ long l7 = 7L;
+ long l8 = 8L;
+ long l9 = 9L;
+ long l10 = 10L;
+ long l11 = 11L;
+ long l12 = 12L;
+ long l13 = 13L;
+ long l14 = 14L;
+ long l15 = 15L;
+ long l16 = 16L;
+ long l17 = 17L;
+ long l18 = 18L;
+ long l19 = 19L;
+ long l20 = 20L;
+ long l21 = 21L;
+ long l22 = 22L;
+ long l23 = 23L;
+ long l24 = 24L;
+ long l25 = 25L;
+ long l26 = 26L;
+ long l27 = 27L;
+ long l28 = 28L;
+ long l29 = 29L;
+ long l30 = 30L;
+ long l31 = 31L;
+ long l32 = 32L;
+ long l33 = 33L;
+ long l34 = 34L;
+ long l35 = 35L;
+ long l36 = 36L;
+ long l37 = 37L;
+ long l38 = 38L;
+ long l39 = 39L;
+ long l40 = 40L;
+ long l41 = 41L;
+ long l42 = 42L;
+ long l43 = 43L;
+ long l44 = 44L;
+ long l45 = 45L;
+ long l46 = 46L;
+ long l47 = 47L;
+ long l48 = 48L;
+ long l49 = 49L;
+ long l50 = 50L;
+ long l51 = 51L;
+ long l52 = 52L;
+ long l53 = 53L;
+ long l54 = 54L;
+ long l55 = 55L;
+ long l56 = 56L;
+ long l57 = 57L;
+ long l58 = 58L;
+ long l59 = 59L;
+ long l60 = 60L;
+ long l61 = 61L;
+ long l62 = 62L;
+ long l63 = 63L;
+ long l64 = 64L;
+ long l65 = 65L;
+ long l66 = 66L;
+ long l67 = 67L;
+ long l68 = 68L;
+ long l69 = 69L;
+ long l70 = 70L;
+ long l71 = 71L;
+ long l72 = 72L;
+ long l73 = 73L;
+ long l74 = 74L;
+ long l75 = 75L;
+ long l76 = 76L;
+ long l77 = 77L;
+ long l78 = 78L;
+ long l79 = 79L;
+ long l80 = 80L;
+ long l81 = 81L;
+ long l82 = 82L;
+ long l83 = 83L;
+ long l84 = 84L;
+ long l85 = 85L;
+ long l86 = 86L;
+ long l87 = 87L;
+ long l88 = 88L;
+ long l89 = 89L;
+ long l90 = 90L;
+ long l91 = 91L;
+ long l92 = 92L;
+ long l93 = 93L;
+ long l94 = 94L;
+ long l95 = 95L;
+ long l96 = 96L;
+ long l97 = 97L;
+ long l98 = 98L;
+ long l99 = 99L;
+ long l100 = 100L;
+ long l101 = 101L;
+ long l102 = 102L;
+ long l103 = 103L;
+ long l104 = 104L;
+ long l105 = 105L;
+ long l106 = 106L;
+ long l107 = 107L;
+ long l108 = 108L;
+ long l109 = 109L;
+ long l110 = 110L;
+ long l111 = 111L;
+ long l112 = 112L;
+ long l113 = 113L;
+ long l114 = 114L;
+ long l115 = 115L;
+ long l116 = 116L;
+ long l117 = 117L;
+ long l118 = 118L;
+ long l119 = 119L;
+ long l120 = 120L;
+ long l121 = 121L;
+ long l122 = 122L;
+ long l123 = 123L;
+ long l124 = 124L;
+ long l125 = 125L;
+ long l126 = 126L;
+ long l127 = 127L;
+ long l128 = 128L;
+ long l129 = 129L;
+ long l130 = 130L;
+ long l131 = 131L;
+ long l132 = 132L;
+ long l133 = 133L;
+ long l134 = 134L;
+ long l135 = 135L;
+ long l136 = 136L;
+ long l137 = 137L;
+ long l138 = 138L;
+ long l139 = 139L;
+ long l140 = 140L;
+ long l141 = 141L;
+ long l142 = 142L;
+ long l143 = 143L;
+ long l144 = 144L;
+ long l145 = 145L;
+ long l146 = 146L;
+ long l147 = 147L;
+ long l148 = 148L;
+ long l149 = 149L;
+ long l150 = 150L;
+ long l151 = 151L;
+ long l152 = 152L;
+ long l153 = 153L;
+ long l154 = 154L;
+ long l155 = 155L;
+ long l156 = 156L;
+ long l157 = 157L;
+ long l158 = 158L;
+ long l159 = 159L;
+ long l160 = 160L;
+ long l161 = 161L;
+ long l162 = 162L;
+ long l163 = 163L;
+ long l164 = 164L;
+ long l165 = 165L;
+ long l166 = 166L;
+ long l167 = 167L;
+ long l168 = 168L;
+ long l169 = 169L;
+ long l170 = 170L;
+ long l171 = 171L;
+ long l172 = 172L;
+ long l173 = 173L;
+ long l174 = 174L;
+ long l175 = 175L;
+ long l176 = 176L;
+ long l177 = 177L;
+ long l178 = 178L;
+ long l179 = 179L;
+ long l180 = 180L;
+ long l181 = 181L;
+ long l182 = 182L;
+ long l183 = 183L;
+ long l184 = 184L;
+ long l185 = 185L;
+ long l186 = 186L;
+ long l187 = 187L;
+ long l188 = 188L;
+ long l189 = 189L;
+ long l190 = 190L;
+ long l191 = 191L;
+ long l192 = 192L;
+ long l193 = 193L;
+ long l194 = 194L;
+ long l195 = 195L;
+ long l196 = 196L;
+ long l197 = 197L;
+ long l198 = 198L;
+ long l199 = 199L;
+ long l200 = 200L;
+ long l201 = 201L;
+ long l202 = 202L;
+ long l203 = 203L;
+ long l204 = 204L;
+ long l205 = 205L;
+ long l206 = 206L;
+ long l207 = 207L;
+ long l208 = 208L;
+ long l209 = 209L;
+ long l210 = 210L;
+ long l211 = 211L;
+ long l212 = 212L;
+ long l213 = 213L;
+ long l214 = 214L;
+ long l215 = 215L;
+ long l216 = 216L;
+ long l217 = 217L;
+ long l218 = 218L;
+ long l219 = 219L;
+ long l220 = 220L;
+ long l221 = 221L;
+ long l222 = 222L;
+ long l223 = 223L;
+ long l224 = 224L;
+ long l225 = 225L;
+ long l226 = 226L;
+ long l227 = 227L;
+ long l228 = 228L;
+ long l229 = 229L;
+ long l230 = 230L;
+ long l231 = 231L;
+ long l232 = 232L;
+ long l233 = 233L;
+ long l234 = 234L;
+ long l235 = 235L;
+ long l236 = 236L;
+ long l237 = 237L;
+ long l238 = 238L;
+ long l239 = 239L;
+ long l240 = 240L;
+ long l241 = 241L;
+ long l242 = 242L;
+ long l243 = 243L;
+ long l244 = 244L;
+ long l245 = 245L;
+ long l246 = 246L;
+ long l247 = 247L;
+ long l248 = 248L;
+ long l249 = 249L;
+ long l250 = 250L;
+ long l251 = 251L;
+ long l252 = 252L;
+ long l253 = 253L;
+ long l254 = 254L;
+ long l255 = 255L;
+ long l256 = 256L;
+ long l257 = 257L;
+ long l258 = 258L;
+ long l259 = 259L;
+ long l260 = 260L;
+ long l261 = 261L;
+ long l262 = 262L;
+ long l263 = 263L;
+ long l264 = 264L;
+ long l265 = 265L;
+ long l266 = 266L;
+ long l267 = 267L;
+ long l268 = 268L;
+ long l269 = 269L;
+ long l270 = 270L;
+ long l271 = 271L;
+ long l272 = 272L;
+ long l273 = 273L;
+ long l274 = 274L;
+ long l275 = 275L;
+ long l276 = 276L;
+ long l277 = 277L;
+ long l278 = 278L;
+ long l279 = 279L;
+ long l280 = 280L;
+ long l281 = 281L;
+ long l282 = 282L;
+ long l283 = 283L;
+ long l284 = 284L;
+ long l285 = 285L;
+ long l286 = 286L;
+ long l287 = 287L;
+ long l288 = 288L;
+ long l289 = 289L;
+ long l290 = 290L;
+ long l291 = 291L;
+ long l292 = 292L;
+ long l293 = 293L;
+ long l294 = 294L;
+ long l295 = 295L;
+ long l296 = 296L;
+ long l297 = 297L;
+ long l298 = 298L;
+ long l299 = 299L;
+ long l300 = 300L;
+ long l301 = 301L;
+ long l302 = 302L;
+ long l303 = 303L;
+ long l304 = 304L;
+ long l305 = 305L;
+ long l306 = 306L;
+ long l307 = 307L;
+ long l308 = 308L;
+ long l309 = 309L;
+ long l310 = 310L;
+ long l311 = 311L;
+ long l312 = 312L;
+ long l313 = 313L;
+ long l314 = 314L;
+ long l315 = 315L;
+ long l316 = 316L;
+ long l317 = 317L;
+ long l318 = 318L;
+ long l319 = 319L;
+ long l320 = 320L;
+ long l321 = 321L;
+ long l322 = 322L;
+ long l323 = 323L;
+ long l324 = 324L;
+ long l325 = 325L;
+ long l326 = 326L;
+ long l327 = 327L;
+ long l328 = 328L;
+ long l329 = 329L;
+ long l330 = 330L;
+ long l331 = 331L;
+ long l332 = 332L;
+ long l333 = 333L;
+ long l334 = 334L;
+ long l335 = 335L;
+ long l336 = 336L;
+ long l337 = 337L;
+ long l338 = 338L;
+ long l339 = 339L;
+ long l340 = 340L;
+ long l341 = 341L;
+ long l342 = 342L;
+ long l343 = 343L;
+ long l344 = 344L;
+ long l345 = 345L;
+ long l346 = 346L;
+ long l347 = 347L;
+ long l348 = 348L;
+ long l349 = 349L;
+ long l350 = 350L;
+ long l351 = 351L;
+ long l352 = 352L;
+ long l353 = 353L;
+ long l354 = 354L;
+ long l355 = 355L;
+ long l356 = 356L;
+ long l357 = 357L;
+ long l358 = 358L;
+ long l359 = 359L;
+ long l360 = 360L;
+ long l361 = 361L;
+ long l362 = 362L;
+ long l363 = 363L;
+ long l364 = 364L;
+ long l365 = 365L;
+ long l366 = 366L;
+ long l367 = 367L;
+ long l368 = 368L;
+ long l369 = 369L;
+ long l370 = 370L;
+ long l371 = 371L;
+ long l372 = 372L;
+ long l373 = 373L;
+ long l374 = 374L;
+ long l375 = 375L;
+ long l376 = 376L;
+ long l377 = 377L;
+ long l378 = 378L;
+ long l379 = 379L;
+ long l380 = 380L;
+ long l381 = 381L;
+ long l382 = 382L;
+ long l383 = 383L;
+ long l384 = 384L;
+ long l385 = 385L;
+ long l386 = 386L;
+ long l387 = 387L;
+ long l388 = 388L;
+ long l389 = 389L;
+ long l390 = 390L;
+ long l391 = 391L;
+ long l392 = 392L;
+ long l393 = 393L;
+ long l394 = 394L;
+ long l395 = 395L;
+ long l396 = 396L;
+ long l397 = 397L;
+ long l398 = 398L;
+ long l399 = 399L;
+ long l400 = 400L;
+ long l401 = 401L;
+ long l402 = 402L;
+ long l403 = 403L;
+ long l404 = 404L;
+ long l405 = 405L;
+ long l406 = 406L;
+ long l407 = 407L;
+ long l408 = 408L;
+ long l409 = 409L;
+ long l410 = 410L;
+ long l411 = 411L;
+ long l412 = 412L;
+ long l413 = 413L;
+ long l414 = 414L;
+ long l415 = 415L;
+ long l416 = 416L;
+ long l417 = 417L;
+ long l418 = 418L;
+ long l419 = 419L;
+ long l420 = 420L;
+ long l421 = 421L;
+ long l422 = 422L;
+ long l423 = 423L;
+ long l424 = 424L;
+ long l425 = 425L;
+ long l426 = 426L;
+ long l427 = 427L;
+ long l428 = 428L;
+ long l429 = 429L;
+ long l430 = 430L;
+ long l431 = 431L;
+ long l432 = 432L;
+ long l433 = 433L;
+ long l434 = 434L;
+ long l435 = 435L;
+ long l436 = 436L;
+ long l437 = 437L;
+ long l438 = 438L;
+ long l439 = 439L;
+ long l440 = 440L;
+ long l441 = 441L;
+ long l442 = 442L;
+ long l443 = 443L;
+ long l444 = 444L;
+ long l445 = 445L;
+ long l446 = 446L;
+ long l447 = 447L;
+ long l448 = 448L;
+ long l449 = 449L;
+ long l450 = 450L;
+ long l451 = 451L;
+ long l452 = 452L;
+ long l453 = 453L;
+ long l454 = 454L;
+ long l455 = 455L;
+ long l456 = 456L;
+ long l457 = 457L;
+ long l458 = 458L;
+ long l459 = 459L;
+ long l460 = 460L;
+ long l461 = 461L;
+ long l462 = 462L;
+ long l463 = 463L;
+ long l464 = 464L;
+ long l465 = 465L;
+ long l466 = 466L;
+ long l467 = 467L;
+ long l468 = 468L;
+ long l469 = 469L;
+ long l470 = 470L;
+ long l471 = 471L;
+ long l472 = 472L;
+ long l473 = 473L;
+ long l474 = 474L;
+ long l475 = 475L;
+ long l476 = 476L;
+ long l477 = 477L;
+ long l478 = 478L;
+ long l479 = 479L;
+ long l480 = 480L;
+ long l481 = 481L;
+ long l482 = 482L;
+ long l483 = 483L;
+ long l484 = 484L;
+ long l485 = 485L;
+ long l486 = 486L;
+ long l487 = 487L;
+ long l488 = 488L;
+ long l489 = 489L;
+ long l490 = 490L;
+ long l491 = 491L;
+ long l492 = 492L;
+ long l493 = 493L;
+ long l494 = 494L;
+ long l495 = 495L;
+ long l496 = 496L;
+ long l497 = 497L;
+ long l498 = 498L;
+ long l499 = 499L;
+ long l500 = 500L;
+ long l501 = 501L;
+ long l502 = 502L;
+ long l503 = 503L;
+ long l504 = 504L;
+ long l505 = 505L;
+ long l506 = 506L;
+ long l507 = 507L;
+ long l508 = 508L;
+ long l509 = 509L;
+ long l510 = 510L;
+ long l511 = 511L;
+ long l512 = 512L;
+ long l513 = 513L;
+ long l514 = 514L;
+ long l515 = 515L;
+ long l516 = 516L;
+ long l517 = 517L;
+ long l518 = 518L;
+ long l519 = 519L;
+ long l520 = 520L;
+ long l521 = 521L;
+ long l522 = 522L;
+ long l523 = 523L;
+ long l524 = 524L;
+ long l525 = 525L;
+ long l526 = 526L;
+ long l527 = 527L;
+ long l528 = 528L;
+ long l529 = 529L;
+ long l530 = 530L;
+ long l531 = 531L;
+ long l532 = 532L;
+ long l533 = 533L;
+ long l534 = 534L;
+ long l535 = 535L;
+ long l536 = 536L;
+ long l537 = 537L;
+ long l538 = 538L;
+ long l539 = 539L;
+ long l540 = 540L;
+ long l541 = 541L;
+ long l542 = 542L;
+ long l543 = 543L;
+ long l544 = 544L;
+ long l545 = 545L;
+ long l546 = 546L;
+ long l547 = 547L;
+ long l548 = 548L;
+ long l549 = 549L;
+ long l550 = 550L;
+ long l551 = 551L;
+ long l552 = 552L;
+ long l553 = 553L;
+ long l554 = 554L;
+ long l555 = 555L;
+ long l556 = 556L;
+ long l557 = 557L;
+ long l558 = 558L;
+ long l559 = 559L;
+ long l560 = 560L;
+ long l561 = 561L;
+ long l562 = 562L;
+ long l563 = 563L;
+ long l564 = 564L;
+ long l565 = 565L;
+ long l566 = 566L;
+ long l567 = 567L;
+ long l568 = 568L;
+ long l569 = 569L;
+ long l570 = 570L;
+ long l571 = 571L;
+ long l572 = 572L;
+ long l573 = 573L;
+ long l574 = 574L;
+ long l575 = 575L;
+ long l576 = 576L;
+ long l577 = 577L;
+ long l578 = 578L;
+ long l579 = 579L;
+ long l580 = 580L;
+ long l581 = 581L;
+ long l582 = 582L;
+ long l583 = 583L;
+ long l584 = 584L;
+ long l585 = 585L;
+ long l586 = 586L;
+ long l587 = 587L;
+ long l588 = 588L;
+ long l589 = 589L;
+ long l590 = 590L;
+ long l591 = 591L;
+ long l592 = 592L;
+ long l593 = 593L;
+ long l594 = 594L;
+ long l595 = 595L;
+ long l596 = 596L;
+ long l597 = 597L;
+ long l598 = 598L;
+ long l599 = 599L;
+ long l600 = 600L;
+ long l601 = 601L;
+ long l602 = 602L;
+ long l603 = 603L;
+ long l604 = 604L;
+ long l605 = 605L;
+ long l606 = 606L;
+ long l607 = 607L;
+ long l608 = 608L;
+ long l609 = 609L;
+ long l610 = 610L;
+ long l611 = 611L;
+ long l612 = 612L;
+ long l613 = 613L;
+ long l614 = 614L;
+ long l615 = 615L;
+ long l616 = 616L;
+ long l617 = 617L;
+ long l618 = 618L;
+ long l619 = 619L;
+ long l620 = 620L;
+ long l621 = 621L;
+ long l622 = 622L;
+ long l623 = 623L;
+ long l624 = 624L;
+ long l625 = 625L;
+ long l626 = 626L;
+ long l627 = 627L;
+ long l628 = 628L;
+ long l629 = 629L;
+ long l630 = 630L;
+ long l631 = 631L;
+ long l632 = 632L;
+ long l633 = 633L;
+ long l634 = 634L;
+ long l635 = 635L;
+ long l636 = 636L;
+ long l637 = 637L;
+ long l638 = 638L;
+ long l639 = 639L;
+ long l640 = 640L;
+ long l641 = 641L;
+ long l642 = 642L;
+ long l643 = 643L;
+ long l644 = 644L;
+ long l645 = 645L;
+ long l646 = 646L;
+ long l647 = 647L;
+ long l648 = 648L;
+ long l649 = 649L;
+ long l650 = 650L;
+ long l651 = 651L;
+ long l652 = 652L;
+ long l653 = 653L;
+ long l654 = 654L;
+ long l655 = 655L;
+ long l656 = 656L;
+ long l657 = 657L;
+ long l658 = 658L;
+ long l659 = 659L;
+ long l660 = 660L;
+ long l661 = 661L;
+ long l662 = 662L;
+ long l663 = 663L;
+ long l664 = 664L;
+ long l665 = 665L;
+ long l666 = 666L;
+ long l667 = 667L;
+ long l668 = 668L;
+ long l669 = 669L;
+ long l670 = 670L;
+ long l671 = 671L;
+ long l672 = 672L;
+ long l673 = 673L;
+ long l674 = 674L;
+ long l675 = 675L;
+ long l676 = 676L;
+ long l677 = 677L;
+ long l678 = 678L;
+ long l679 = 679L;
+ long l680 = 680L;
+ long l681 = 681L;
+ long l682 = 682L;
+ long l683 = 683L;
+ long l684 = 684L;
+ long l685 = 685L;
+ long l686 = 686L;
+ long l687 = 687L;
+ long l688 = 688L;
+ long l689 = 689L;
+ long l690 = 690L;
+ long l691 = 691L;
+ long l692 = 692L;
+ long l693 = 693L;
+ long l694 = 694L;
+ long l695 = 695L;
+ long l696 = 696L;
+ long l697 = 697L;
+ long l698 = 698L;
+ long l699 = 699L;
+ long l700 = 700L;
+ long l701 = 701L;
+ long l702 = 702L;
+ long l703 = 703L;
+ long l704 = 704L;
+ long l705 = 705L;
+ long l706 = 706L;
+ long l707 = 707L;
+ long l708 = 708L;
+ long l709 = 709L;
+ long l710 = 710L;
+ long l711 = 711L;
+ long l712 = 712L;
+ long l713 = 713L;
+ long l714 = 714L;
+ long l715 = 715L;
+ long l716 = 716L;
+ long l717 = 717L;
+ long l718 = 718L;
+ long l719 = 719L;
+ long l720 = 720L;
+ long l721 = 721L;
+ long l722 = 722L;
+ long l723 = 723L;
+ long l724 = 724L;
+ long l725 = 725L;
+ long l726 = 726L;
+ long l727 = 727L;
+ long l728 = 728L;
+ long l729 = 729L;
+ long l730 = 730L;
+ long l731 = 731L;
+ long l732 = 732L;
+ long l733 = 733L;
+ long l734 = 734L;
+ long l735 = 735L;
+ long l736 = 736L;
+ long l737 = 737L;
+ long l738 = 738L;
+ long l739 = 739L;
+ long l740 = 740L;
+ long l741 = 741L;
+ long l742 = 742L;
+ long l743 = 743L;
+ long l744 = 744L;
+ long l745 = 745L;
+ long l746 = 746L;
+ long l747 = 747L;
+ long l748 = 748L;
+ long l749 = 749L;
+ long l750 = 750L;
+ long l751 = 751L;
+ long l752 = 752L;
+ long l753 = 753L;
+ long l754 = 754L;
+ long l755 = 755L;
+ long l756 = 756L;
+ long l757 = 757L;
+ long l758 = 758L;
+ long l759 = 759L;
+ long l760 = 760L;
+ long l761 = 761L;
+ long l762 = 762L;
+ long l763 = 763L;
+ long l764 = 764L;
+ long l765 = 765L;
+ long l766 = 766L;
+ long l767 = 767L;
+ long l768 = 768L;
+ long l769 = 769L;
+ long l770 = 770L;
+ long l771 = 771L;
+ long l772 = 772L;
+ long l773 = 773L;
+ long l774 = 774L;
+ long l775 = 775L;
+ long l776 = 776L;
+ long l777 = 777L;
+ long l778 = 778L;
+ long l779 = 779L;
+ long l780 = 780L;
+ long l781 = 781L;
+ long l782 = 782L;
+ long l783 = 783L;
+ long l784 = 784L;
+ long l785 = 785L;
+ long l786 = 786L;
+ long l787 = 787L;
+ long l788 = 788L;
+ long l789 = 789L;
+ long l790 = 790L;
+ long l791 = 791L;
+ long l792 = 792L;
+ long l793 = 793L;
+ long l794 = 794L;
+ long l795 = 795L;
+ long l796 = 796L;
+ long l797 = 797L;
+ long l798 = 798L;
+ long l799 = 799L;
+ long l800 = 800L;
+ long l801 = 801L;
+ long l802 = 802L;
+ long l803 = 803L;
+ long l804 = 804L;
+ long l805 = 805L;
+ long l806 = 806L;
+ long l807 = 807L;
+ long l808 = 808L;
+ long l809 = 809L;
+ long l810 = 810L;
+ long l811 = 811L;
+ long l812 = 812L;
+ long l813 = 813L;
+ long l814 = 814L;
+ long l815 = 815L;
+ long l816 = 816L;
+ long l817 = 817L;
+ long l818 = 818L;
+ long l819 = 819L;
+ long l820 = 820L;
+ long l821 = 821L;
+ long l822 = 822L;
+ long l823 = 823L;
+ long l824 = 824L;
+ long l825 = 825L;
+ long l826 = 826L;
+ long l827 = 827L;
+ long l828 = 828L;
+ long l829 = 829L;
+ long l830 = 830L;
+ long l831 = 831L;
+ long l832 = 832L;
+ long l833 = 833L;
+ long l834 = 834L;
+ long l835 = 835L;
+ long l836 = 836L;
+ long l837 = 837L;
+ long l838 = 838L;
+ long l839 = 839L;
+ long l840 = 840L;
+ long l841 = 841L;
+ long l842 = 842L;
+ long l843 = 843L;
+ long l844 = 844L;
+ long l845 = 845L;
+ long l846 = 846L;
+ long l847 = 847L;
+ long l848 = 848L;
+ long l849 = 849L;
+ long l850 = 850L;
+ long l851 = 851L;
+ long l852 = 852L;
+ long l853 = 853L;
+ long l854 = 854L;
+ long l855 = 855L;
+ long l856 = 856L;
+ long l857 = 857L;
+ long l858 = 858L;
+ long l859 = 859L;
+ long l860 = 860L;
+ long l861 = 861L;
+ long l862 = 862L;
+ long l863 = 863L;
+ long l864 = 864L;
+ long l865 = 865L;
+ long l866 = 866L;
+ long l867 = 867L;
+ long l868 = 868L;
+ long l869 = 869L;
+ long l870 = 870L;
+ long l871 = 871L;
+ long l872 = 872L;
+ long l873 = 873L;
+ long l874 = 874L;
+ long l875 = 875L;
+ long l876 = 876L;
+ long l877 = 877L;
+ long l878 = 878L;
+ long l879 = 879L;
+ long l880 = 880L;
+ long l881 = 881L;
+ long l882 = 882L;
+ long l883 = 883L;
+ long l884 = 884L;
+ long l885 = 885L;
+ long l886 = 886L;
+ long l887 = 887L;
+ long l888 = 888L;
+ long l889 = 889L;
+ long l890 = 890L;
+ long l891 = 891L;
+ long l892 = 892L;
+ long l893 = 893L;
+ long l894 = 894L;
+ long l895 = 895L;
+ long l896 = 896L;
+ long l897 = 897L;
+ long l898 = 898L;
+ long l899 = 899L;
+ long l900 = 900L;
+ long l901 = 901L;
+ long l902 = 902L;
+ long l903 = 903L;
+ long l904 = 904L;
+ long l905 = 905L;
+ long l906 = 906L;
+ long l907 = 907L;
+ long l908 = 908L;
+ long l909 = 909L;
+ long l910 = 910L;
+ long l911 = 911L;
+ long l912 = 912L;
+ long l913 = 913L;
+ long l914 = 914L;
+ long l915 = 915L;
+ long l916 = 916L;
+ long l917 = 917L;
+ long l918 = 918L;
+ long l919 = 919L;
+ long l920 = 920L;
+ long l921 = 921L;
+ long l922 = 922L;
+ long l923 = 923L;
+ long l924 = 924L;
+ long l925 = 925L;
+ long l926 = 926L;
+ long l927 = 927L;
+ long l928 = 928L;
+ long l929 = 929L;
+ long l930 = 930L;
+ long l931 = 931L;
+ long l932 = 932L;
+ long l933 = 933L;
+ long l934 = 934L;
+ long l935 = 935L;
+ long l936 = 936L;
+ long l937 = 937L;
+ long l938 = 938L;
+ long l939 = 939L;
+ long l940 = 940L;
+ long l941 = 941L;
+ long l942 = 942L;
+ long l943 = 943L;
+ long l944 = 944L;
+ long l945 = 945L;
+ long l946 = 946L;
+ long l947 = 947L;
+ long l948 = 948L;
+ long l949 = 949L;
+ long l950 = 950L;
+ long l951 = 951L;
+ long l952 = 952L;
+ long l953 = 953L;
+ long l954 = 954L;
+ long l955 = 955L;
+ long l956 = 956L;
+ long l957 = 957L;
+ long l958 = 958L;
+ long l959 = 959L;
+ long l960 = 960L;
+ long l961 = 961L;
+ long l962 = 962L;
+ long l963 = 963L;
+ long l964 = 964L;
+ long l965 = 965L;
+ long l966 = 966L;
+ long l967 = 967L;
+ long l968 = 968L;
+ long l969 = 969L;
+ long l970 = 970L;
+ long l971 = 971L;
+ long l972 = 972L;
+ long l973 = 973L;
+ long l974 = 974L;
+ long l975 = 975L;
+ long l976 = 976L;
+ long l977 = 977L;
+ long l978 = 978L;
+ long l979 = 979L;
+ long l980 = 980L;
+ long l981 = 981L;
+ long l982 = 982L;
+ long l983 = 983L;
+ long l984 = 984L;
+ long l985 = 985L;
+ long l986 = 986L;
+ long l987 = 987L;
+ long l988 = 988L;
+ long l989 = 989L;
+ long l990 = 990L;
+ long l991 = 991L;
+ long l992 = 992L;
+ long l993 = 993L;
+ long l994 = 994L;
+ long l995 = 995L;
+ long l996 = 996L;
+ long l997 = 997L;
+ long l998 = 998L;
+ long l999 = 999L;
+ l1 += l0;
+ l2 += l1;
+ l3 += l2;
+ l4 += l3;
+ l5 += l4;
+ l6 += l5;
+ l7 += l6;
+ l8 += l7;
+ l9 += l8;
+ l10 += l9;
+ l11 += l10;
+ l12 += l11;
+ l13 += l12;
+ l14 += l13;
+ l15 += l14;
+ l16 += l15;
+ l17 += l16;
+ l18 += l17;
+ l19 += l18;
+ l20 += l19;
+ l21 += l20;
+ l22 += l21;
+ l23 += l22;
+ l24 += l23;
+ l25 += l24;
+ l26 += l25;
+ l27 += l26;
+ l28 += l27;
+ l29 += l28;
+ l30 += l29;
+ l31 += l30;
+ l32 += l31;
+ l33 += l32;
+ l34 += l33;
+ l35 += l34;
+ l36 += l35;
+ l37 += l36;
+ l38 += l37;
+ l39 += l38;
+ l40 += l39;
+ l41 += l40;
+ l42 += l41;
+ l43 += l42;
+ l44 += l43;
+ l45 += l44;
+ l46 += l45;
+ l47 += l46;
+ l48 += l47;
+ l49 += l48;
+ l50 += l49;
+ l51 += l50;
+ l52 += l51;
+ l53 += l52;
+ l54 += l53;
+ l55 += l54;
+ l56 += l55;
+ l57 += l56;
+ l58 += l57;
+ l59 += l58;
+ l60 += l59;
+ l61 += l60;
+ l62 += l61;
+ l63 += l62;
+ l64 += l63;
+ l65 += l64;
+ l66 += l65;
+ l67 += l66;
+ l68 += l67;
+ l69 += l68;
+ l70 += l69;
+ l71 += l70;
+ l72 += l71;
+ l73 += l72;
+ l74 += l73;
+ l75 += l74;
+ l76 += l75;
+ l77 += l76;
+ l78 += l77;
+ l79 += l78;
+ l80 += l79;
+ l81 += l80;
+ l82 += l81;
+ l83 += l82;
+ l84 += l83;
+ l85 += l84;
+ l86 += l85;
+ l87 += l86;
+ l88 += l87;
+ l89 += l88;
+ l90 += l89;
+ l91 += l90;
+ l92 += l91;
+ l93 += l92;
+ l94 += l93;
+ l95 += l94;
+ l96 += l95;
+ l97 += l96;
+ l98 += l97;
+ l99 += l98;
+ l100 += l99;
+ l101 += l100;
+ l102 += l101;
+ l103 += l102;
+ l104 += l103;
+ l105 += l104;
+ l106 += l105;
+ l107 += l106;
+ l108 += l107;
+ l109 += l108;
+ l110 += l109;
+ l111 += l110;
+ l112 += l111;
+ l113 += l112;
+ l114 += l113;
+ l115 += l114;
+ l116 += l115;
+ l117 += l116;
+ l118 += l117;
+ l119 += l118;
+ l120 += l119;
+ l121 += l120;
+ l122 += l121;
+ l123 += l122;
+ l124 += l123;
+ l125 += l124;
+ l126 += l125;
+ l127 += l126;
+ l128 += l127;
+ l129 += l128;
+ l130 += l129;
+ l131 += l130;
+ l132 += l131;
+ l133 += l132;
+ l134 += l133;
+ l135 += l134;
+ l136 += l135;
+ l137 += l136;
+ l138 += l137;
+ l139 += l138;
+ l140 += l139;
+ l141 += l140;
+ l142 += l141;
+ l143 += l142;
+ l144 += l143;
+ l145 += l144;
+ l146 += l145;
+ l147 += l146;
+ l148 += l147;
+ l149 += l148;
+ l150 += l149;
+ l151 += l150;
+ l152 += l151;
+ l153 += l152;
+ l154 += l153;
+ l155 += l154;
+ l156 += l155;
+ l157 += l156;
+ l158 += l157;
+ l159 += l158;
+ l160 += l159;
+ l161 += l160;
+ l162 += l161;
+ l163 += l162;
+ l164 += l163;
+ l165 += l164;
+ l166 += l165;
+ l167 += l166;
+ l168 += l167;
+ l169 += l168;
+ l170 += l169;
+ l171 += l170;
+ l172 += l171;
+ l173 += l172;
+ l174 += l173;
+ l175 += l174;
+ l176 += l175;
+ l177 += l176;
+ l178 += l177;
+ l179 += l178;
+ l180 += l179;
+ l181 += l180;
+ l182 += l181;
+ l183 += l182;
+ l184 += l183;
+ l185 += l184;
+ l186 += l185;
+ l187 += l186;
+ l188 += l187;
+ l189 += l188;
+ l190 += l189;
+ l191 += l190;
+ l192 += l191;
+ l193 += l192;
+ l194 += l193;
+ l195 += l194;
+ l196 += l195;
+ l197 += l196;
+ l198 += l197;
+ l199 += l198;
+ l200 += l199;
+ l201 += l200;
+ l202 += l201;
+ l203 += l202;
+ l204 += l203;
+ l205 += l204;
+ l206 += l205;
+ l207 += l206;
+ l208 += l207;
+ l209 += l208;
+ l210 += l209;
+ l211 += l210;
+ l212 += l211;
+ l213 += l212;
+ l214 += l213;
+ l215 += l214;
+ l216 += l215;
+ l217 += l216;
+ l218 += l217;
+ l219 += l218;
+ l220 += l219;
+ l221 += l220;
+ l222 += l221;
+ l223 += l222;
+ l224 += l223;
+ l225 += l224;
+ l226 += l225;
+ l227 += l226;
+ l228 += l227;
+ l229 += l228;
+ l230 += l229;
+ l231 += l230;
+ l232 += l231;
+ l233 += l232;
+ l234 += l233;
+ l235 += l234;
+ l236 += l235;
+ l237 += l236;
+ l238 += l237;
+ l239 += l238;
+ l240 += l239;
+ l241 += l240;
+ l242 += l241;
+ l243 += l242;
+ l244 += l243;
+ l245 += l244;
+ l246 += l245;
+ l247 += l246;
+ l248 += l247;
+ l249 += l248;
+ l250 += l249;
+ l251 += l250;
+ l252 += l251;
+ l253 += l252;
+ l254 += l253;
+ l255 += l254;
+ l256 += l255;
+ l257 += l256;
+ l258 += l257;
+ l259 += l258;
+ l260 += l259;
+ l261 += l260;
+ l262 += l261;
+ l263 += l262;
+ l264 += l263;
+ l265 += l264;
+ l266 += l265;
+ l267 += l266;
+ l268 += l267;
+ l269 += l268;
+ l270 += l269;
+ l271 += l270;
+ l272 += l271;
+ l273 += l272;
+ l274 += l273;
+ l275 += l274;
+ l276 += l275;
+ l277 += l276;
+ l278 += l277;
+ l279 += l278;
+ l280 += l279;
+ l281 += l280;
+ l282 += l281;
+ l283 += l282;
+ l284 += l283;
+ l285 += l284;
+ l286 += l285;
+ l287 += l286;
+ l288 += l287;
+ l289 += l288;
+ l290 += l289;
+ l291 += l290;
+ l292 += l291;
+ l293 += l292;
+ l294 += l293;
+ l295 += l294;
+ l296 += l295;
+ l297 += l296;
+ l298 += l297;
+ l299 += l298;
+ l300 += l299;
+ l301 += l300;
+ l302 += l301;
+ l303 += l302;
+ l304 += l303;
+ l305 += l304;
+ l306 += l305;
+ l307 += l306;
+ l308 += l307;
+ l309 += l308;
+ l310 += l309;
+ l311 += l310;
+ l312 += l311;
+ l313 += l312;
+ l314 += l313;
+ l315 += l314;
+ l316 += l315;
+ l317 += l316;
+ l318 += l317;
+ l319 += l318;
+ l320 += l319;
+ l321 += l320;
+ l322 += l321;
+ l323 += l322;
+ l324 += l323;
+ l325 += l324;
+ l326 += l325;
+ l327 += l326;
+ l328 += l327;
+ l329 += l328;
+ l330 += l329;
+ l331 += l330;
+ l332 += l331;
+ l333 += l332;
+ l334 += l333;
+ l335 += l334;
+ l336 += l335;
+ l337 += l336;
+ l338 += l337;
+ l339 += l338;
+ l340 += l339;
+ l341 += l340;
+ l342 += l341;
+ l343 += l342;
+ l344 += l343;
+ l345 += l344;
+ l346 += l345;
+ l347 += l346;
+ l348 += l347;
+ l349 += l348;
+ l350 += l349;
+ l351 += l350;
+ l352 += l351;
+ l353 += l352;
+ l354 += l353;
+ l355 += l354;
+ l356 += l355;
+ l357 += l356;
+ l358 += l357;
+ l359 += l358;
+ l360 += l359;
+ l361 += l360;
+ l362 += l361;
+ l363 += l362;
+ l364 += l363;
+ l365 += l364;
+ l366 += l365;
+ l367 += l366;
+ l368 += l367;
+ l369 += l368;
+ l370 += l369;
+ l371 += l370;
+ l372 += l371;
+ l373 += l372;
+ l374 += l373;
+ l375 += l374;
+ l376 += l375;
+ l377 += l376;
+ l378 += l377;
+ l379 += l378;
+ l380 += l379;
+ l381 += l380;
+ l382 += l381;
+ l383 += l382;
+ l384 += l383;
+ l385 += l384;
+ l386 += l385;
+ l387 += l386;
+ l388 += l387;
+ l389 += l388;
+ l390 += l389;
+ l391 += l390;
+ l392 += l391;
+ l393 += l392;
+ l394 += l393;
+ l395 += l394;
+ l396 += l395;
+ l397 += l396;
+ l398 += l397;
+ l399 += l398;
+ l400 += l399;
+ l401 += l400;
+ l402 += l401;
+ l403 += l402;
+ l404 += l403;
+ l405 += l404;
+ l406 += l405;
+ l407 += l406;
+ l408 += l407;
+ l409 += l408;
+ l410 += l409;
+ l411 += l410;
+ l412 += l411;
+ l413 += l412;
+ l414 += l413;
+ l415 += l414;
+ l416 += l415;
+ l417 += l416;
+ l418 += l417;
+ l419 += l418;
+ l420 += l419;
+ l421 += l420;
+ l422 += l421;
+ l423 += l422;
+ l424 += l423;
+ l425 += l424;
+ l426 += l425;
+ l427 += l426;
+ l428 += l427;
+ l429 += l428;
+ l430 += l429;
+ l431 += l430;
+ l432 += l431;
+ l433 += l432;
+ l434 += l433;
+ l435 += l434;
+ l436 += l435;
+ l437 += l436;
+ l438 += l437;
+ l439 += l438;
+ l440 += l439;
+ l441 += l440;
+ l442 += l441;
+ l443 += l442;
+ l444 += l443;
+ l445 += l444;
+ l446 += l445;
+ l447 += l446;
+ l448 += l447;
+ l449 += l448;
+ l450 += l449;
+ l451 += l450;
+ l452 += l451;
+ l453 += l452;
+ l454 += l453;
+ l455 += l454;
+ l456 += l455;
+ l457 += l456;
+ l458 += l457;
+ l459 += l458;
+ l460 += l459;
+ l461 += l460;
+ l462 += l461;
+ l463 += l462;
+ l464 += l463;
+ l465 += l464;
+ l466 += l465;
+ l467 += l466;
+ l468 += l467;
+ l469 += l468;
+ l470 += l469;
+ l471 += l470;
+ l472 += l471;
+ l473 += l472;
+ l474 += l473;
+ l475 += l474;
+ l476 += l475;
+ l477 += l476;
+ l478 += l477;
+ l479 += l478;
+ l480 += l479;
+ l481 += l480;
+ l482 += l481;
+ l483 += l482;
+ l484 += l483;
+ l485 += l484;
+ l486 += l485;
+ l487 += l486;
+ l488 += l487;
+ l489 += l488;
+ l490 += l489;
+ l491 += l490;
+ l492 += l491;
+ l493 += l492;
+ l494 += l493;
+ l495 += l494;
+ l496 += l495;
+ l497 += l496;
+ l498 += l497;
+ l499 += l498;
+ l500 += l499;
+ l501 += l500;
+ l502 += l501;
+ l503 += l502;
+ l504 += l503;
+ l505 += l504;
+ l506 += l505;
+ l507 += l506;
+ l508 += l507;
+ l509 += l508;
+ l510 += l509;
+ l511 += l510;
+ l512 += l511;
+ l513 += l512;
+ l514 += l513;
+ l515 += l514;
+ l516 += l515;
+ l517 += l516;
+ l518 += l517;
+ l519 += l518;
+ l520 += l519;
+ l521 += l520;
+ l522 += l521;
+ l523 += l522;
+ l524 += l523;
+ l525 += l524;
+ l526 += l525;
+ l527 += l526;
+ l528 += l527;
+ l529 += l528;
+ l530 += l529;
+ l531 += l530;
+ l532 += l531;
+ l533 += l532;
+ l534 += l533;
+ l535 += l534;
+ l536 += l535;
+ l537 += l536;
+ l538 += l537;
+ l539 += l538;
+ l540 += l539;
+ l541 += l540;
+ l542 += l541;
+ l543 += l542;
+ l544 += l543;
+ l545 += l544;
+ l546 += l545;
+ l547 += l546;
+ l548 += l547;
+ l549 += l548;
+ l550 += l549;
+ l551 += l550;
+ l552 += l551;
+ l553 += l552;
+ l554 += l553;
+ l555 += l554;
+ l556 += l555;
+ l557 += l556;
+ l558 += l557;
+ l559 += l558;
+ l560 += l559;
+ l561 += l560;
+ l562 += l561;
+ l563 += l562;
+ l564 += l563;
+ l565 += l564;
+ l566 += l565;
+ l567 += l566;
+ l568 += l567;
+ l569 += l568;
+ l570 += l569;
+ l571 += l570;
+ l572 += l571;
+ l573 += l572;
+ l574 += l573;
+ l575 += l574;
+ l576 += l575;
+ l577 += l576;
+ l578 += l577;
+ l579 += l578;
+ l580 += l579;
+ l581 += l580;
+ l582 += l581;
+ l583 += l582;
+ l584 += l583;
+ l585 += l584;
+ l586 += l585;
+ l587 += l586;
+ l588 += l587;
+ l589 += l588;
+ l590 += l589;
+ l591 += l590;
+ l592 += l591;
+ l593 += l592;
+ l594 += l593;
+ l595 += l594;
+ l596 += l595;
+ l597 += l596;
+ l598 += l597;
+ l599 += l598;
+ l600 += l599;
+ l601 += l600;
+ l602 += l601;
+ l603 += l602;
+ l604 += l603;
+ l605 += l604;
+ l606 += l605;
+ l607 += l606;
+ l608 += l607;
+ l609 += l608;
+ l610 += l609;
+ l611 += l610;
+ l612 += l611;
+ l613 += l612;
+ l614 += l613;
+ l615 += l614;
+ l616 += l615;
+ l617 += l616;
+ l618 += l617;
+ l619 += l618;
+ l620 += l619;
+ l621 += l620;
+ l622 += l621;
+ l623 += l622;
+ l624 += l623;
+ l625 += l624;
+ l626 += l625;
+ l627 += l626;
+ l628 += l627;
+ l629 += l628;
+ l630 += l629;
+ l631 += l630;
+ l632 += l631;
+ l633 += l632;
+ l634 += l633;
+ l635 += l634;
+ l636 += l635;
+ l637 += l636;
+ l638 += l637;
+ l639 += l638;
+ l640 += l639;
+ l641 += l640;
+ l642 += l641;
+ l643 += l642;
+ l644 += l643;
+ l645 += l644;
+ l646 += l645;
+ l647 += l646;
+ l648 += l647;
+ l649 += l648;
+ l650 += l649;
+ l651 += l650;
+ l652 += l651;
+ l653 += l652;
+ l654 += l653;
+ l655 += l654;
+ l656 += l655;
+ l657 += l656;
+ l658 += l657;
+ l659 += l658;
+ l660 += l659;
+ l661 += l660;
+ l662 += l661;
+ l663 += l662;
+ l664 += l663;
+ l665 += l664;
+ l666 += l665;
+ l667 += l666;
+ l668 += l667;
+ l669 += l668;
+ l670 += l669;
+ l671 += l670;
+ l672 += l671;
+ l673 += l672;
+ l674 += l673;
+ l675 += l674;
+ l676 += l675;
+ l677 += l676;
+ l678 += l677;
+ l679 += l678;
+ l680 += l679;
+ l681 += l680;
+ l682 += l681;
+ l683 += l682;
+ l684 += l683;
+ l685 += l684;
+ l686 += l685;
+ l687 += l686;
+ l688 += l687;
+ l689 += l688;
+ l690 += l689;
+ l691 += l690;
+ l692 += l691;
+ l693 += l692;
+ l694 += l693;
+ l695 += l694;
+ l696 += l695;
+ l697 += l696;
+ l698 += l697;
+ l699 += l698;
+ l700 += l699;
+ l701 += l700;
+ l702 += l701;
+ l703 += l702;
+ l704 += l703;
+ l705 += l704;
+ l706 += l705;
+ l707 += l706;
+ l708 += l707;
+ l709 += l708;
+ l710 += l709;
+ l711 += l710;
+ l712 += l711;
+ l713 += l712;
+ l714 += l713;
+ l715 += l714;
+ l716 += l715;
+ l717 += l716;
+ l718 += l717;
+ l719 += l718;
+ l720 += l719;
+ l721 += l720;
+ l722 += l721;
+ l723 += l722;
+ l724 += l723;
+ l725 += l724;
+ l726 += l725;
+ l727 += l726;
+ l728 += l727;
+ l729 += l728;
+ l730 += l729;
+ l731 += l730;
+ l732 += l731;
+ l733 += l732;
+ l734 += l733;
+ l735 += l734;
+ l736 += l735;
+ l737 += l736;
+ l738 += l737;
+ l739 += l738;
+ l740 += l739;
+ l741 += l740;
+ l742 += l741;
+ l743 += l742;
+ l744 += l743;
+ l745 += l744;
+ l746 += l745;
+ l747 += l746;
+ l748 += l747;
+ l749 += l748;
+ l750 += l749;
+ l751 += l750;
+ l752 += l751;
+ l753 += l752;
+ l754 += l753;
+ l755 += l754;
+ l756 += l755;
+ l757 += l756;
+ l758 += l757;
+ l759 += l758;
+ l760 += l759;
+ l761 += l760;
+ l762 += l761;
+ l763 += l762;
+ l764 += l763;
+ l765 += l764;
+ l766 += l765;
+ l767 += l766;
+ l768 += l767;
+ l769 += l768;
+ l770 += l769;
+ l771 += l770;
+ l772 += l771;
+ l773 += l772;
+ l774 += l773;
+ l775 += l774;
+ l776 += l775;
+ l777 += l776;
+ l778 += l777;
+ l779 += l778;
+ l780 += l779;
+ l781 += l780;
+ l782 += l781;
+ l783 += l782;
+ l784 += l783;
+ l785 += l784;
+ l786 += l785;
+ l787 += l786;
+ l788 += l787;
+ l789 += l788;
+ l790 += l789;
+ l791 += l790;
+ l792 += l791;
+ l793 += l792;
+ l794 += l793;
+ l795 += l794;
+ l796 += l795;
+ l797 += l796;
+ l798 += l797;
+ l799 += l798;
+ l800 += l799;
+ l801 += l800;
+ l802 += l801;
+ l803 += l802;
+ l804 += l803;
+ l805 += l804;
+ l806 += l805;
+ l807 += l806;
+ l808 += l807;
+ l809 += l808;
+ l810 += l809;
+ l811 += l810;
+ l812 += l811;
+ l813 += l812;
+ l814 += l813;
+ l815 += l814;
+ l816 += l815;
+ l817 += l816;
+ l818 += l817;
+ l819 += l818;
+ l820 += l819;
+ l821 += l820;
+ l822 += l821;
+ l823 += l822;
+ l824 += l823;
+ l825 += l824;
+ l826 += l825;
+ l827 += l826;
+ l828 += l827;
+ l829 += l828;
+ l830 += l829;
+ l831 += l830;
+ l832 += l831;
+ l833 += l832;
+ l834 += l833;
+ l835 += l834;
+ l836 += l835;
+ l837 += l836;
+ l838 += l837;
+ l839 += l838;
+ l840 += l839;
+ l841 += l840;
+ l842 += l841;
+ l843 += l842;
+ l844 += l843;
+ l845 += l844;
+ l846 += l845;
+ l847 += l846;
+ l848 += l847;
+ l849 += l848;
+ l850 += l849;
+ l851 += l850;
+ l852 += l851;
+ l853 += l852;
+ l854 += l853;
+ l855 += l854;
+ l856 += l855;
+ l857 += l856;
+ l858 += l857;
+ l859 += l858;
+ l860 += l859;
+ l861 += l860;
+ l862 += l861;
+ l863 += l862;
+ l864 += l863;
+ l865 += l864;
+ l866 += l865;
+ l867 += l866;
+ l868 += l867;
+ l869 += l868;
+ l870 += l869;
+ l871 += l870;
+ l872 += l871;
+ l873 += l872;
+ l874 += l873;
+ l875 += l874;
+ l876 += l875;
+ l877 += l876;
+ l878 += l877;
+ l879 += l878;
+ l880 += l879;
+ l881 += l880;
+ l882 += l881;
+ l883 += l882;
+ l884 += l883;
+ l885 += l884;
+ l886 += l885;
+ l887 += l886;
+ l888 += l887;
+ l889 += l888;
+ l890 += l889;
+ l891 += l890;
+ l892 += l891;
+ l893 += l892;
+ l894 += l893;
+ l895 += l894;
+ l896 += l895;
+ l897 += l896;
+ l898 += l897;
+ l899 += l898;
+ l900 += l899;
+ l901 += l900;
+ l902 += l901;
+ l903 += l902;
+ l904 += l903;
+ l905 += l904;
+ l906 += l905;
+ l907 += l906;
+ l908 += l907;
+ l909 += l908;
+ l910 += l909;
+ l911 += l910;
+ l912 += l911;
+ l913 += l912;
+ l914 += l913;
+ l915 += l914;
+ l916 += l915;
+ l917 += l916;
+ l918 += l917;
+ l919 += l918;
+ l920 += l919;
+ l921 += l920;
+ l922 += l921;
+ l923 += l922;
+ l924 += l923;
+ l925 += l924;
+ l926 += l925;
+ l927 += l926;
+ l928 += l927;
+ l929 += l928;
+ l930 += l929;
+ l931 += l930;
+ l932 += l931;
+ l933 += l932;
+ l934 += l933;
+ l935 += l934;
+ l936 += l935;
+ l937 += l936;
+ l938 += l937;
+ l939 += l938;
+ l940 += l939;
+ l941 += l940;
+ l942 += l941;
+ l943 += l942;
+ l944 += l943;
+ l945 += l944;
+ l946 += l945;
+ l947 += l946;
+ l948 += l947;
+ l949 += l948;
+ l950 += l949;
+ l951 += l950;
+ l952 += l951;
+ l953 += l952;
+ l954 += l953;
+ l955 += l954;
+ l956 += l955;
+ l957 += l956;
+ l958 += l957;
+ l959 += l958;
+ l960 += l959;
+ l961 += l960;
+ l962 += l961;
+ l963 += l962;
+ l964 += l963;
+ l965 += l964;
+ l966 += l965;
+ l967 += l966;
+ l968 += l967;
+ l969 += l968;
+ l970 += l969;
+ l971 += l970;
+ l972 += l971;
+ l973 += l972;
+ l974 += l973;
+ l975 += l974;
+ l976 += l975;
+ l977 += l976;
+ l978 += l977;
+ l979 += l978;
+ l980 += l979;
+ l981 += l980;
+ l982 += l981;
+ l983 += l982;
+ l984 += l983;
+ l985 += l984;
+ l986 += l985;
+ l987 += l986;
+ l988 += l987;
+ l989 += l988;
+ l990 += l989;
+ l991 += l990;
+ l992 += l991;
+ l993 += l992;
+ l994 += l993;
+ l995 += l994;
+ l996 += l995;
+ l997 += l996;
+ l998 += l997;
+ l999 += l998;
+ return l999;
+ }
+}
diff --git a/test/422-instanceof/expected.txt b/test/422-instanceof/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/422-instanceof/expected.txt
diff --git a/test/422-instanceof/info.txt b/test/422-instanceof/info.txt
new file mode 100644
index 0000000..b2f7ff1
--- /dev/null
+++ b/test/422-instanceof/info.txt
@@ -0,0 +1 @@
+Tests for instanceof bytecode.
diff --git a/test/422-instanceof/src/Main.java b/test/422-instanceof/src/Main.java
new file mode 100644
index 0000000..307c987
--- /dev/null
+++ b/test/422-instanceof/src/Main.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static Object a;
+
+ public static void assertTrue(boolean value) {
+ if (!value) {
+ throw new Error("Wrong result");
+ }
+ }
+
+ public static void assertFalse(boolean value) {
+ if (value) {
+ throw new Error("Wrong result");
+ }
+ }
+
+ public static boolean $opt$InstanceOfMain() {
+ return a instanceof Main;
+ }
+
+ public static boolean $opt$InstanceOfFinalClass() {
+ return a instanceof FinalClass;
+ }
+
+ public static void main(String[] args) {
+ $opt$TestMain();
+ $opt$TestFinalClass();
+ }
+
+ public static void $opt$TestMain() {
+ a = new Main();
+ assertTrue($opt$InstanceOfMain());
+ a = null;
+ assertFalse($opt$InstanceOfMain());
+ a = new MainChild();
+ assertTrue($opt$InstanceOfMain());
+ a = new Object();
+ assertFalse($opt$InstanceOfMain());
+ }
+
+ public static void $opt$TestFinalClass() {
+ a = new FinalClass();
+ assertTrue($opt$InstanceOfFinalClass());
+ a = null;
+ assertFalse($opt$InstanceOfFinalClass());
+ a = new Main();
+ assertFalse($opt$InstanceOfFinalClass());
+ a = new Object();
+ assertFalse($opt$InstanceOfFinalClass());
+ }
+
+ static class MainChild extends Main {}
+
+ static final class FinalClass {}
+}
diff --git a/test/422-type-conversion/expected.txt b/test/422-type-conversion/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/422-type-conversion/expected.txt
diff --git a/test/422-type-conversion/info.txt b/test/422-type-conversion/info.txt
new file mode 100644
index 0000000..e734f32
--- /dev/null
+++ b/test/422-type-conversion/info.txt
@@ -0,0 +1 @@
+Tests for type conversions.
diff --git a/test/422-type-conversion/src/Main.java b/test/422-type-conversion/src/Main.java
new file mode 100644
index 0000000..37bc777
--- /dev/null
+++ b/test/422-type-conversion/src/Main.java
@@ -0,0 +1,433 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Note that $opt$ is a marker for the optimizing compiler to ensure
+// it does compile the method.
+public class Main {
+
+ public static void assertByteEquals(byte expected, byte result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void assertShortEquals(short expected, short result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void assertIntEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void assertLongEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void assertCharEquals(char expected, char result) {
+ if (expected != result) {
+ // Values are cast to int to display numeric values instead of
+ // (UTF-16 encoded) characters.
+ throw new Error("Expected: " + (int)expected + ", found: " + (int)result);
+ }
+ }
+
+ public static void assertFloatEquals(float expected, float result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void assertDoubleEquals(double expected, double result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+
+ public static void main(String[] args) {
+ // Generate, compile and check int-to-long Dex instructions.
+ byteToLong();
+ shortToLong();
+ intToLong();
+ charToLong();
+
+ // Generate, compile and check int-to-float Dex instructions.
+ byteToFloat();
+ shortToFloat();
+ intToFloat();
+ charToFloat();
+
+ // Generate, compile and check int-to-double Dex instructions.
+ byteToDouble();
+ shortToDouble();
+ intToDouble();
+ charToDouble();
+
+ // Generate, compile and check long-to-int Dex instructions.
+ longToInt();
+
+ // Generate, compile and check int-to-byte Dex instructions.
+ shortToByte();
+ intToByte();
+ charToByte();
+
+ // Generate, compile and check int-to-short Dex instructions.
+ byteToShort();
+ intToShort();
+ charToShort();
+
+ // Generate, compile and check int-to-char Dex instructions.
+ byteToChar();
+ shortToChar();
+ intToChar();
+ }
+
+ private static void byteToLong() {
+ assertLongEquals(1L, $opt$ByteToLong((byte)1));
+ assertLongEquals(0L, $opt$ByteToLong((byte)0));
+ assertLongEquals(-1L, $opt$ByteToLong((byte)-1));
+ assertLongEquals(51L, $opt$ByteToLong((byte)51));
+ assertLongEquals(-51L, $opt$ByteToLong((byte)-51));
+ assertLongEquals(127L, $opt$ByteToLong((byte)127)); // 2^7 - 1
+ assertLongEquals(-127L, $opt$ByteToLong((byte)-127)); // -(2^7 - 1)
+ assertLongEquals(-128L, $opt$ByteToLong((byte)-128)); // -(2^7)
+ }
+
+ private static void shortToLong() {
+ assertLongEquals(1L, $opt$ShortToLong((short)1));
+ assertLongEquals(0L, $opt$ShortToLong((short)0));
+ assertLongEquals(-1L, $opt$ShortToLong((short)-1));
+ assertLongEquals(51L, $opt$ShortToLong((short)51));
+ assertLongEquals(-51L, $opt$ShortToLong((short)-51));
+ assertLongEquals(32767L, $opt$ShortToLong((short)32767)); // 2^15 - 1
+ assertLongEquals(-32767L, $opt$ShortToLong((short)-32767)); // -(2^15 - 1)
+ assertLongEquals(-32768L, $opt$ShortToLong((short)-32768)); // -(2^15)
+ }
+
+ private static void intToLong() {
+ assertLongEquals(1L, $opt$IntToLong(1));
+ assertLongEquals(0L, $opt$IntToLong(0));
+ assertLongEquals(-1L, $opt$IntToLong(-1));
+ assertLongEquals(51L, $opt$IntToLong(51));
+ assertLongEquals(-51L, $opt$IntToLong(-51));
+ assertLongEquals(2147483647L, $opt$IntToLong(2147483647)); // 2^31 - 1
+ assertLongEquals(-2147483647L, $opt$IntToLong(-2147483647)); // -(2^31 - 1)
+ assertLongEquals(-2147483648L, $opt$IntToLong(-2147483648)); // -(2^31)
+ }
+
+ private static void charToLong() {
+ assertLongEquals(1L, $opt$CharToLong((char)1));
+ assertLongEquals(0L, $opt$CharToLong((char)0));
+ assertLongEquals(51L, $opt$CharToLong((char)51));
+ assertLongEquals(32767L, $opt$CharToLong((char)32767)); // 2^15 - 1
+ assertLongEquals(65535L, $opt$CharToLong((char)65535)); // 2^16 - 1
+ assertLongEquals(65535L, $opt$CharToLong((char)-1));
+ assertLongEquals(65485L, $opt$CharToLong((char)-51));
+ assertLongEquals(32769L, $opt$CharToLong((char)-32767)); // -(2^15 - 1)
+ assertLongEquals(32768L, $opt$CharToLong((char)-32768)); // -(2^15)
+ }
+
+ private static void byteToFloat() {
+ assertFloatEquals(1F, $opt$ByteToFloat((byte)1));
+ assertFloatEquals(0F, $opt$ByteToFloat((byte)0));
+ assertFloatEquals(-1F, $opt$ByteToFloat((byte)-1));
+ assertFloatEquals(51F, $opt$ByteToFloat((byte)51));
+ assertFloatEquals(-51F, $opt$ByteToFloat((byte)-51));
+ assertFloatEquals(127F, $opt$ByteToFloat((byte)127)); // 2^7 - 1
+ assertFloatEquals(-127F, $opt$ByteToFloat((byte)-127)); // -(2^7 - 1)
+ assertFloatEquals(-128F, $opt$ByteToFloat((byte)-128)); // -(2^7)
+ }
+
+ private static void shortToFloat() {
+ assertFloatEquals(1F, $opt$ShortToFloat((short)1));
+ assertFloatEquals(0F, $opt$ShortToFloat((short)0));
+ assertFloatEquals(-1F, $opt$ShortToFloat((short)-1));
+ assertFloatEquals(51F, $opt$ShortToFloat((short)51));
+ assertFloatEquals(-51F, $opt$ShortToFloat((short)-51));
+ assertFloatEquals(32767F, $opt$ShortToFloat((short)32767)); // 2^15 - 1
+ assertFloatEquals(-32767F, $opt$ShortToFloat((short)-32767)); // -(2^15 - 1)
+ assertFloatEquals(-32768F, $opt$ShortToFloat((short)-32768)); // -(2^15)
+ }
+
+ private static void intToFloat() {
+ assertFloatEquals(1F, $opt$IntToFloat(1));
+ assertFloatEquals(0F, $opt$IntToFloat(0));
+ assertFloatEquals(-1F, $opt$IntToFloat(-1));
+ assertFloatEquals(51F, $opt$IntToFloat(51));
+ assertFloatEquals(-51F, $opt$IntToFloat(-51));
+ assertFloatEquals(16777215F, $opt$IntToFloat(16777215)); // 2^24 - 1
+ assertFloatEquals(-16777215F, $opt$IntToFloat(-16777215)); // -(2^24 - 1)
+ assertFloatEquals(16777216F, $opt$IntToFloat(16777216)); // 2^24
+ assertFloatEquals(-16777216F, $opt$IntToFloat(-16777216)); // -(2^24)
+ assertFloatEquals(2147483647F, $opt$IntToFloat(2147483647)); // 2^31 - 1
+ assertFloatEquals(-2147483648F, $opt$IntToFloat(-2147483648)); // -(2^31)
+ }
+
+ private static void charToFloat() {
+ assertFloatEquals(1F, $opt$CharToFloat((char)1));
+ assertFloatEquals(0F, $opt$CharToFloat((char)0));
+ assertFloatEquals(51F, $opt$CharToFloat((char)51));
+ assertFloatEquals(32767F, $opt$CharToFloat((char)32767)); // 2^15 - 1
+ assertFloatEquals(65535F, $opt$CharToFloat((char)65535)); // 2^16 - 1
+ assertFloatEquals(65535F, $opt$CharToFloat((char)-1));
+ assertFloatEquals(65485F, $opt$CharToFloat((char)-51));
+ assertFloatEquals(32769F, $opt$CharToFloat((char)-32767)); // -(2^15 - 1)
+ assertFloatEquals(32768F, $opt$CharToFloat((char)-32768)); // -(2^15)
+ }
+
+ private static void byteToDouble() {
+ assertDoubleEquals(1D, $opt$ByteToDouble((byte)1));
+ assertDoubleEquals(0D, $opt$ByteToDouble((byte)0));
+ assertDoubleEquals(-1D, $opt$ByteToDouble((byte)-1));
+ assertDoubleEquals(51D, $opt$ByteToDouble((byte)51));
+ assertDoubleEquals(-51D, $opt$ByteToDouble((byte)-51));
+ assertDoubleEquals(127D, $opt$ByteToDouble((byte)127)); // 2^7 - 1
+ assertDoubleEquals(-127D, $opt$ByteToDouble((byte)-127)); // -(2^7 - 1)
+ assertDoubleEquals(-128D, $opt$ByteToDouble((byte)-128)); // -(2^7)
+ }
+
+ private static void shortToDouble() {
+ assertDoubleEquals(1D, $opt$ShortToDouble((short)1));
+ assertDoubleEquals(0D, $opt$ShortToDouble((short)0));
+ assertDoubleEquals(-1D, $opt$ShortToDouble((short)-1));
+ assertDoubleEquals(51D, $opt$ShortToDouble((short)51));
+ assertDoubleEquals(-51D, $opt$ShortToDouble((short)-51));
+ assertDoubleEquals(32767D, $opt$ShortToDouble((short)32767)); // 2^15 - 1
+ assertDoubleEquals(-32767D, $opt$ShortToDouble((short)-32767)); // -(2^15 - 1)
+ assertDoubleEquals(-32768D, $opt$ShortToDouble((short)-32768)); // -(2^15)
+ }
+
+ private static void intToDouble() {
+ assertDoubleEquals(1D, $opt$IntToDouble(1));
+ assertDoubleEquals(0D, $opt$IntToDouble(0));
+ assertDoubleEquals(-1D, $opt$IntToDouble(-1));
+ assertDoubleEquals(51D, $opt$IntToDouble(51));
+ assertDoubleEquals(-51D, $opt$IntToDouble(-51));
+ assertDoubleEquals(16777216D, $opt$IntToDouble(16777216)); // 2^24
+ assertDoubleEquals(-16777216D, $opt$IntToDouble(-16777216)); // -(2^24)
+ assertDoubleEquals(2147483647D, $opt$IntToDouble(2147483647)); // 2^31 - 1
+ assertDoubleEquals(-2147483648D, $opt$IntToDouble(-2147483648)); // -(2^31)
+ }
+
+ private static void charToDouble() {
+ assertDoubleEquals(1D, $opt$CharToDouble((char)1));
+ assertDoubleEquals(0D, $opt$CharToDouble((char)0));
+ assertDoubleEquals(51D, $opt$CharToDouble((char)51));
+ assertDoubleEquals(32767D, $opt$CharToDouble((char)32767)); // 2^15 - 1
+ assertDoubleEquals(65535D, $opt$CharToDouble((char)65535)); // 2^16 - 1
+ assertDoubleEquals(65535D, $opt$CharToDouble((char)-1));
+ assertDoubleEquals(65485D, $opt$CharToDouble((char)-51));
+ assertDoubleEquals(32769D, $opt$CharToDouble((char)-32767)); // -(2^15 - 1)
+ assertDoubleEquals(32768D, $opt$CharToDouble((char)-32768)); // -(2^15)
+ }
+
+ private static void longToInt() {
+ assertIntEquals(1, $opt$LongToInt(1L));
+ assertIntEquals(0, $opt$LongToInt(0L));
+ assertIntEquals(-1, $opt$LongToInt(-1L));
+ assertIntEquals(51, $opt$LongToInt(51L));
+ assertIntEquals(-51, $opt$LongToInt(-51L));
+ assertIntEquals(2147483647, $opt$LongToInt(2147483647L)); // 2^31 - 1
+ assertIntEquals(-2147483647, $opt$LongToInt(-2147483647L)); // -(2^31 - 1)
+ assertIntEquals(-2147483648, $opt$LongToInt(-2147483648L)); // -(2^31)
+ assertIntEquals(-2147483648, $opt$LongToInt(2147483648L)); // (2^31)
+ assertIntEquals(2147483647, $opt$LongToInt(-2147483649L)); // -(2^31 + 1)
+ assertIntEquals(-1, $opt$LongToInt(9223372036854775807L)); // 2^63 - 1
+ assertIntEquals(1, $opt$LongToInt(-9223372036854775807L)); // -(2^63 - 1)
+ assertIntEquals(0, $opt$LongToInt(-9223372036854775808L)); // -(2^63)
+
+ assertIntEquals(42, $opt$LongLiteralToInt());
+
+ // Ensure long-to-int conversions truncates values as expected.
+ assertLongEquals(1L, $opt$IntToLong($opt$LongToInt(4294967297L))); // 2^32 + 1
+ assertLongEquals(0L, $opt$IntToLong($opt$LongToInt(4294967296L))); // 2^32
+ assertLongEquals(-1L, $opt$IntToLong($opt$LongToInt(4294967295L))); // 2^32 - 1
+ assertLongEquals(0L, $opt$IntToLong($opt$LongToInt(0L)));
+ assertLongEquals(1L, $opt$IntToLong($opt$LongToInt(-4294967295L))); // -(2^32 - 1)
+ assertLongEquals(0L, $opt$IntToLong($opt$LongToInt(-4294967296L))); // -(2^32)
+ assertLongEquals(-1, $opt$IntToLong($opt$LongToInt(-4294967297L))); // -(2^32 + 1)
+ }
+
+ private static void shortToByte() {
+ assertByteEquals((byte)1, $opt$ShortToByte((short)1));
+ assertByteEquals((byte)0, $opt$ShortToByte((short)0));
+ assertByteEquals((byte)-1, $opt$ShortToByte((short)-1));
+ assertByteEquals((byte)51, $opt$ShortToByte((short)51));
+ assertByteEquals((byte)-51, $opt$ShortToByte((short)-51));
+ assertByteEquals((byte)127, $opt$ShortToByte((short)127)); // 2^7 - 1
+ assertByteEquals((byte)-127, $opt$ShortToByte((short)-127)); // -(2^7 - 1)
+ assertByteEquals((byte)-128, $opt$ShortToByte((short)-128)); // -(2^7)
+ assertByteEquals((byte)-128, $opt$ShortToByte((short)128)); // 2^7
+ assertByteEquals((byte)127, $opt$ShortToByte((short)-129)); // -(2^7 + 1)
+ assertByteEquals((byte)-1, $opt$ShortToByte((short)32767)); // 2^15 - 1
+ assertByteEquals((byte)0, $opt$ShortToByte((short)-32768)); // -(2^15)
+ }
+
+ private static void intToByte() {
+ assertByteEquals((byte)1, $opt$IntToByte(1));
+ assertByteEquals((byte)0, $opt$IntToByte(0));
+ assertByteEquals((byte)-1, $opt$IntToByte(-1));
+ assertByteEquals((byte)51, $opt$IntToByte(51));
+ assertByteEquals((byte)-51, $opt$IntToByte(-51));
+ assertByteEquals((byte)127, $opt$IntToByte(127)); // 2^7 - 1
+ assertByteEquals((byte)-127, $opt$IntToByte(-127)); // -(2^7 - 1)
+ assertByteEquals((byte)-128, $opt$IntToByte(-128)); // -(2^7)
+ assertByteEquals((byte)-128, $opt$IntToByte(128)); // 2^7
+ assertByteEquals((byte)127, $opt$IntToByte(-129)); // -(2^7 + 1)
+ assertByteEquals((byte)-1, $opt$IntToByte(2147483647)); // 2^31 - 1
+ assertByteEquals((byte)0, $opt$IntToByte(-2147483648)); // -(2^31)
+ }
+
+ private static void charToByte() {
+ assertByteEquals((byte)1, $opt$CharToByte((char)1));
+ assertByteEquals((byte)0, $opt$CharToByte((char)0));
+ assertByteEquals((byte)51, $opt$CharToByte((char)51));
+ assertByteEquals((byte)127, $opt$CharToByte((char)127)); // 2^7 - 1
+ assertByteEquals((byte)-128, $opt$CharToByte((char)128)); // 2^7
+ assertByteEquals((byte)-1, $opt$CharToByte((char)32767)); // 2^15 - 1
+ assertByteEquals((byte)-1, $opt$CharToByte((char)65535)); // 2^16 - 1
+ assertByteEquals((byte)-1, $opt$CharToByte((char)-1));
+ assertByteEquals((byte)-51, $opt$CharToByte((char)-51));
+ assertByteEquals((byte)-127, $opt$CharToByte((char)-127)); // -(2^7 - 1)
+ assertByteEquals((byte)-128, $opt$CharToByte((char)-128)); // -(2^7)
+ assertByteEquals((byte)127, $opt$CharToByte((char)-129)); // -(2^7 + 1)
+ }
+
+ private static void byteToShort() {
+ assertShortEquals((short)1, $opt$ByteToShort((byte)1));
+ assertShortEquals((short)0, $opt$ByteToShort((byte)0));
+ assertShortEquals((short)-1, $opt$ByteToShort((byte)-1));
+ assertShortEquals((short)51, $opt$ByteToShort((byte)51));
+ assertShortEquals((short)-51, $opt$ByteToShort((byte)-51));
+ assertShortEquals((short)127, $opt$ByteToShort((byte)127)); // 2^7 - 1
+ assertShortEquals((short)-127, $opt$ByteToShort((byte)-127)); // -(2^7 - 1)
+ assertShortEquals((short)-128, $opt$ByteToShort((byte)-128)); // -(2^7)
+ }
+
+ private static void intToShort() {
+ assertShortEquals((short)1, $opt$IntToShort(1));
+ assertShortEquals((short)0, $opt$IntToShort(0));
+ assertShortEquals((short)-1, $opt$IntToShort(-1));
+ assertShortEquals((short)51, $opt$IntToShort(51));
+ assertShortEquals((short)-51, $opt$IntToShort(-51));
+ assertShortEquals((short)32767, $opt$IntToShort(32767)); // 2^15 - 1
+ assertShortEquals((short)-32767, $opt$IntToShort(-32767)); // -(2^15 - 1)
+ assertShortEquals((short)-32768, $opt$IntToShort(-32768)); // -(2^15)
+ assertShortEquals((short)-32768, $opt$IntToShort(32768)); // 2^15
+ assertShortEquals((short)32767, $opt$IntToShort(-32769)); // -(2^15 + 1)
+ assertShortEquals((short)-1, $opt$IntToShort(2147483647)); // 2^31 - 1
+ assertShortEquals((short)0, $opt$IntToShort(-2147483648)); // -(2^31)
+ }
+
+ private static void charToShort() {
+ assertShortEquals((short)1, $opt$CharToShort((char)1));
+ assertShortEquals((short)0, $opt$CharToShort((char)0));
+ assertShortEquals((short)51, $opt$CharToShort((char)51));
+ assertShortEquals((short)32767, $opt$CharToShort((char)32767)); // 2^15 - 1
+ assertShortEquals((short)-32768, $opt$CharToShort((char)32768)); // 2^15
+ assertShortEquals((short)-32767, $opt$CharToShort((char)32769)); // 2^15
+ assertShortEquals((short)-1, $opt$CharToShort((char)65535)); // 2^16 - 1
+ assertShortEquals((short)-1, $opt$CharToShort((char)-1));
+ assertShortEquals((short)-51, $opt$CharToShort((char)-51));
+ assertShortEquals((short)-32767, $opt$CharToShort((char)-32767)); // -(2^15 - 1)
+ assertShortEquals((short)-32768, $opt$CharToShort((char)-32768)); // -(2^15)
+ assertShortEquals((short)32767, $opt$CharToShort((char)-32769)); // -(2^15 + 1)
+ }
+
+ private static void byteToChar() {
+ assertCharEquals((char)1, $opt$ByteToChar((byte)1));
+ assertCharEquals((char)0, $opt$ByteToChar((byte)0));
+ assertCharEquals((char)65535, $opt$ByteToChar((byte)-1));
+ assertCharEquals((char)51, $opt$ByteToChar((byte)51));
+ assertCharEquals((char)65485, $opt$ByteToChar((byte)-51));
+ assertCharEquals((char)127, $opt$ByteToChar((byte)127)); // 2^7 - 1
+ assertCharEquals((char)65409, $opt$ByteToChar((byte)-127)); // -(2^7 - 1)
+ assertCharEquals((char)65408, $opt$ByteToChar((byte)-128)); // -(2^7)
+ }
+
+ private static void shortToChar() {
+ assertCharEquals((char)1, $opt$ShortToChar((short)1));
+ assertCharEquals((char)0, $opt$ShortToChar((short)0));
+ assertCharEquals((char)65535, $opt$ShortToChar((short)-1));
+ assertCharEquals((char)51, $opt$ShortToChar((short)51));
+ assertCharEquals((char)65485, $opt$ShortToChar((short)-51));
+ assertCharEquals((char)32767, $opt$ShortToChar((short)32767)); // 2^15 - 1
+ assertCharEquals((char)32769, $opt$ShortToChar((short)-32767)); // -(2^15 - 1)
+ assertCharEquals((char)32768, $opt$ShortToChar((short)-32768)); // -(2^15)
+ }
+
+ private static void intToChar() {
+ assertCharEquals((char)1, $opt$IntToChar(1));
+ assertCharEquals((char)0, $opt$IntToChar(0));
+ assertCharEquals((char)65535, $opt$IntToChar(-1));
+ assertCharEquals((char)51, $opt$IntToChar(51));
+ assertCharEquals((char)65485, $opt$IntToChar(-51));
+ assertCharEquals((char)32767, $opt$IntToChar(32767)); // 2^15 - 1
+ assertCharEquals((char)32769, $opt$IntToChar(-32767)); // -(2^15 - 1)
+ assertCharEquals((char)32768, $opt$IntToChar(32768)); // 2^15
+ assertCharEquals((char)32768, $opt$IntToChar(-32768)); // -(2^15)
+ assertCharEquals((char)65535, $opt$IntToChar(65535)); // 2^16 - 1
+ assertCharEquals((char)1, $opt$IntToChar(-65535)); // -(2^16 - 1)
+ assertCharEquals((char)0, $opt$IntToChar(65536)); // 2^16
+ assertCharEquals((char)0, $opt$IntToChar(-65536)); // -(2^16)
+ assertCharEquals((char)65535, $opt$IntToChar(2147483647)); // 2^31 - 1
+ assertCharEquals((char)0, $opt$IntToChar(-2147483648)); // -(2^31)
+ }
+
+
+ // These methods produce int-to-long Dex instructions.
+ static long $opt$ByteToLong(byte a) { return a; }
+ static long $opt$ShortToLong(short a) { return a; }
+ static long $opt$IntToLong(int a) { return a; }
+ static long $opt$CharToLong(int a) { return a; }
+
+ // These methods produce int-to-float Dex instructions.
+ static float $opt$ByteToFloat(byte a) { return a; }
+ static float $opt$ShortToFloat(short a) { return a; }
+ static float $opt$IntToFloat(int a) { return a; }
+ static float $opt$CharToFloat(char a) { return a; }
+
+ // These methods produce int-to-double Dex instructions.
+ static double $opt$ByteToDouble(byte a) { return a; }
+ static double $opt$ShortToDouble(short a) { return a; }
+ static double $opt$IntToDouble(int a) { return a; }
+ static double $opt$CharToDouble(int a) { return a; }
+
+ // These methods produce long-to-int Dex instructions.
+ static int $opt$LongToInt(long a){ return (int)a; }
+ static int $opt$LongLiteralToInt(){ return (int)42L; }
+
+ // These methods produce int-to-byte Dex instructions.
+ static byte $opt$ShortToByte(short a){ return (byte)a; }
+ static byte $opt$IntToByte(int a){ return (byte)a; }
+ static byte $opt$CharToByte(char a){ return (byte)a; }
+
+ // These methods produce int-to-short Dex instructions.
+ static short $opt$ByteToShort(byte a){ return (short)a; }
+ static short $opt$IntToShort(int a){ return (short)a; }
+ static short $opt$CharToShort(char a){ return (short)a; }
+
+ // These methods produce int-to-char Dex instructions.
+ static char $opt$ByteToChar(byte a){ return (char)a; }
+ static char $opt$ShortToChar(short a){ return (char)a; }
+ static char $opt$IntToChar(int a){ return (char)a; }
+}
diff --git a/test/423-invoke-interface/expected.txt b/test/423-invoke-interface/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/423-invoke-interface/expected.txt
diff --git a/test/423-invoke-interface/info.txt b/test/423-invoke-interface/info.txt
new file mode 100644
index 0000000..a496ffe
--- /dev/null
+++ b/test/423-invoke-interface/info.txt
@@ -0,0 +1,2 @@
+invoke-interface test with hopefully enough interface methods to trigger
+a conflict in our imt table.
diff --git a/test/423-invoke-interface/src/Main.java b/test/423-invoke-interface/src/Main.java
new file mode 100644
index 0000000..fae857a
--- /dev/null
+++ b/test/423-invoke-interface/src/Main.java
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ static interface Itf {
+ public int return1();
+ public int return2();
+ public int return3();
+ public int return4();
+ public int return5();
+ public int return6();
+ public int return7();
+ public int return8();
+ public int return9();
+ public int return10();
+ public int return11();
+ public int return12();
+ public int return13();
+ public int return14();
+ public int return15();
+ public int return16();
+ public int return17();
+ public int return18();
+ public int return19();
+ public int return20();
+ }
+
+ static class ItfImpl1 implements Itf {
+ public int return1() { return 1; }
+ public int return2() { return 2; }
+ public int return3() { return 3; }
+ public int return4() { return 4; }
+ public int return5() { return 5; }
+ public int return6() { return 6; }
+ public int return7() { return 7; }
+ public int return8() { return 8; }
+ public int return9() { return 9; }
+ public int return10() { return 10; }
+ public int return11() { return 11; }
+ public int return12() { return 12; }
+ public int return13() { return 13; }
+ public int return14() { return 14; }
+ public int return15() { return 15; }
+ public int return16() { return 16; }
+ public int return17() { return 17; }
+ public int return18() { return 18; }
+ public int return19() { return 19; }
+ public int return20() { return 20; }
+ }
+
+ static class ItfImpl2 implements Itf {
+ public int return1() { return -1; }
+ public int return2() { return -2; }
+ public int return3() { return -3; }
+ public int return4() { return -4; }
+ public int return5() { return -5; }
+ public int return6() { return -6; }
+ public int return7() { return -7; }
+ public int return8() { return -8; }
+ public int return9() { return -9; }
+ public int return10() { return -10; }
+ public int return11() { return -11; }
+ public int return12() { return -12; }
+ public int return13() { return -13; }
+ public int return14() { return -14; }
+ public int return15() { return -15; }
+ public int return16() { return -16; }
+ public int return17() { return -17; }
+ public int return18() { return -18; }
+ public int return19() { return -19; }
+ public int return20() { return -20; }
+ }
+
+ public static void main(String[] args) {
+ $opt$InvokeInterface(new ItfImpl1(), 1);
+ $opt$InvokeInterface(new ItfImpl2(), -1);
+ }
+
+ public static void assertEquals(int expected, int value) {
+ if (expected != value) {
+ throw new Error("Expected " + expected + ", got " + value);
+ }
+ }
+
+ public static void $opt$InvokeInterface(Itf object, int factor) {
+ assertEquals(factor * 1, object.return1());
+ assertEquals(factor * 2, object.return2());
+ assertEquals(factor * 3, object.return3());
+ assertEquals(factor * 4, object.return4());
+ assertEquals(factor * 5, object.return5());
+ assertEquals(factor * 6, object.return6());
+ assertEquals(factor * 7, object.return7());
+ assertEquals(factor * 8, object.return8());
+ assertEquals(factor * 9, object.return9());
+ assertEquals(factor * 10, object.return10());
+ assertEquals(factor * 11, object.return11());
+ assertEquals(factor * 12, object.return12());
+ assertEquals(factor * 13, object.return13());
+ assertEquals(factor * 14, object.return14());
+ assertEquals(factor * 15, object.return15());
+ assertEquals(factor * 16, object.return16());
+ assertEquals(factor * 17, object.return17());
+ assertEquals(factor * 18, object.return18());
+ assertEquals(factor * 19, object.return19());
+ assertEquals(factor * 20, object.return20());
+ }
+}
diff --git a/test/424-checkcast/expected.txt b/test/424-checkcast/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/424-checkcast/expected.txt
diff --git a/test/424-checkcast/info.txt b/test/424-checkcast/info.txt
new file mode 100644
index 0000000..b50b082
--- /dev/null
+++ b/test/424-checkcast/info.txt
@@ -0,0 +1 @@
+Simple tests for the checkcast opcode.
diff --git a/test/424-checkcast/src/Main.java b/test/424-checkcast/src/Main.java
new file mode 100644
index 0000000..791b166
--- /dev/null
+++ b/test/424-checkcast/src/Main.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static Object a;
+
+ public static Object $opt$CheckCastMain() {
+ return (Main)a;
+ }
+
+ public static Object $opt$CheckCastFinalClass() {
+ return (FinalClass)a;
+ }
+
+ public static void main(String[] args) {
+ $opt$TestMain();
+ $opt$TestFinalClass();
+ }
+
+ public static void $opt$TestMain() {
+ a = new Main();
+ $opt$CheckCastMain();
+
+ a = null;
+ $opt$CheckCastMain();
+
+ a = new MainChild();
+ $opt$CheckCastMain();
+
+ a = new Object();
+ try {
+ $opt$CheckCastMain();
+ throw new Error("Should have gotten a ClassCastException");
+ } catch (ClassCastException ex) {}
+ }
+
+ public static void $opt$TestFinalClass() {
+ a = new FinalClass();
+ $opt$CheckCastFinalClass();
+
+ a = null;
+ $opt$CheckCastFinalClass();
+
+ a = new Main();
+ try {
+ $opt$CheckCastFinalClass();
+ throw new Error("Should have gotten a ClassCastException");
+ } catch (ClassCastException ex) {}
+
+ a = new Object();
+ try {
+ $opt$CheckCastFinalClass();
+ throw new Error("Should have gotten a ClassCastException");
+ } catch (ClassCastException ex) {}
+ }
+
+ static class MainChild extends Main {}
+
+ static final class FinalClass {}
+}
diff --git a/test/425-invoke-super/expected.txt b/test/425-invoke-super/expected.txt
new file mode 100644
index 0000000..f7f6ae4
--- /dev/null
+++ b/test/425-invoke-super/expected.txt
@@ -0,0 +1 @@
+Test started
diff --git a/test/425-invoke-super/info.txt b/test/425-invoke-super/info.txt
new file mode 100644
index 0000000..ad99030
--- /dev/null
+++ b/test/425-invoke-super/info.txt
@@ -0,0 +1 @@
+Tests the invoke-super opcode.
diff --git a/test/425-invoke-super/smali/invokesuper.smali b/test/425-invoke-super/smali/invokesuper.smali
new file mode 100644
index 0000000..ab13091
--- /dev/null
+++ b/test/425-invoke-super/smali/invokesuper.smali
@@ -0,0 +1,40 @@
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+.class public LInvokeSuper;
+.super LSuperClass;
+
+.method public constructor <init>()V
+.registers 1
+ invoke-direct {v0}, LSuperClass;-><init>()V
+ return-void
+.end method
+
+
+.method public run()I
+.registers 2
+ # Do an invoke super on a non-super class to force slow path.
+ invoke-super {v1}, LInvokeSuper;->returnInt()I
+ move-result v0
+ return v0
+.end method
+
+
+.method public returnInt()I
+.registers 2
+ const v0, 777
+ return v0
+.end method
diff --git a/test/425-invoke-super/smali/subclass.smali b/test/425-invoke-super/smali/subclass.smali
new file mode 100644
index 0000000..54e3474
--- /dev/null
+++ b/test/425-invoke-super/smali/subclass.smali
@@ -0,0 +1,29 @@
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LSubClass;
+.super LInvokeSuper;
+
+.method public constructor <init>()V
+.registers 1
+ invoke-direct {v0}, LInvokeSuper;-><init>()V
+ return-void
+.end method
+
+.method public returnInt()I
+.registers 2
+ const v0, 0
+ return v0
+.end method
diff --git a/test/425-invoke-super/smali/superclass.smali b/test/425-invoke-super/smali/superclass.smali
new file mode 100644
index 0000000..b366aa7
--- /dev/null
+++ b/test/425-invoke-super/smali/superclass.smali
@@ -0,0 +1,29 @@
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LSuperClass;
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+.registers 1
+ invoke-direct {v0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public returnInt()I
+.registers 2
+ const v0, 42
+ return v0
+.end method
diff --git a/test/425-invoke-super/src/Main.java b/test/425-invoke-super/src/Main.java
new file mode 100644
index 0000000..f3166fd
--- /dev/null
+++ b/test/425-invoke-super/src/Main.java
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+ static class A {
+ public int foo() { return 1; }
+ }
+
+ static class B extends A {
+ public int $opt$bar() { return super.foo(); }
+ }
+
+ static class C extends B {
+ public int foo() { return 42; }
+ }
+
+ static class D extends C {
+ }
+
+ static void assertEquals(int expected, int value) {
+ if (expected != value) {
+ throw new Error("Expected " + expected + ", got " + value);
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ // Workaround for b/18051191.
+ System.out.println("Test started");
+ assertEquals(1, new B().$opt$bar());
+ assertEquals(1, new C().$opt$bar());
+ assertEquals(1, new D().$opt$bar());
+
+ Class<?> c = Class.forName("InvokeSuper");
+ Method m = c.getMethod("run");
+ assertEquals(42, ((Integer)m.invoke(c.newInstance(), new Object[0])).intValue());
+
+ c = Class.forName("SubClass");
+ assertEquals(42, ((Integer)m.invoke(c.newInstance(), new Object[0])).intValue());
+ }
+}
diff --git a/test/426-monitor/expected.txt b/test/426-monitor/expected.txt
new file mode 100644
index 0000000..2ffeff4
--- /dev/null
+++ b/test/426-monitor/expected.txt
@@ -0,0 +1,5 @@
+In static method
+In instance method
+In synchronized block
+In second instance method
+In second static method
diff --git a/test/426-monitor/info.txt b/test/426-monitor/info.txt
new file mode 100644
index 0000000..1b093ea
--- /dev/null
+++ b/test/426-monitor/info.txt
@@ -0,0 +1 @@
+Simple tests for monitorenter/monitorexit.
diff --git a/test/426-monitor/src/Main.java b/test/426-monitor/src/Main.java
new file mode 100644
index 0000000..a073a95
--- /dev/null
+++ b/test/426-monitor/src/Main.java
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ $opt$StaticSynchronizedMethod();
+ new Main().$opt$InstanceSynchronizedMethod();
+ $opt$SynchronizedBlock();
+ new Main().$opt$DoubleInstanceSynchronized();
+ $opt$DoubleStaticSynchronized();
+ }
+
+ public static synchronized void $opt$StaticSynchronizedMethod() {
+ System.out.println("In static method");
+ }
+
+ public synchronized void $opt$InstanceSynchronizedMethod() {
+ System.out.println("In instance method");
+ }
+
+ public static void $opt$SynchronizedBlock() {
+ Object o = new Object();
+ synchronized(o) {
+ System.out.println("In synchronized block");
+ }
+ }
+
+ public synchronized void $opt$DoubleInstanceSynchronized() {
+ synchronized (this) {
+ System.out.println("In second instance method");
+ }
+ }
+
+ public synchronized static void $opt$DoubleStaticSynchronized() {
+ synchronized (Main.class) {
+ System.out.println("In second static method");
+ }
+ }
+}
diff --git a/test/427-bitwise/expected.txt b/test/427-bitwise/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/427-bitwise/expected.txt
diff --git a/test/427-bitwise/info.txt b/test/427-bitwise/info.txt
new file mode 100644
index 0000000..4762847
--- /dev/null
+++ b/test/427-bitwise/info.txt
@@ -0,0 +1 @@
+Tests for the and/or/xor opcodes.
diff --git a/test/427-bitwise/src/Main.java b/test/427-bitwise/src/Main.java
new file mode 100644
index 0000000..e984066
--- /dev/null
+++ b/test/427-bitwise/src/Main.java
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Note that $opt$ is a marker for the optimizing compiler to ensure
+// it does compile the method.
+public class Main {
+
+ public static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void main(String[] args) {
+ andInt();
+ andLong();
+
+ orInt();
+ orLong();
+
+ xorInt();
+ xorLong();
+ }
+
+ private static void andInt() {
+ expectEquals(1, $opt$And(5, 3));
+ expectEquals(0, $opt$And(0, 0));
+ expectEquals(0, $opt$And(0, 3));
+ expectEquals(0, $opt$And(3, 0));
+ expectEquals(1, $opt$And(1, -3));
+ expectEquals(-12, $opt$And(-12, -3));
+
+ expectEquals(1, $opt$AndLit8(1));
+ expectEquals(0, $opt$AndLit8(0));
+ expectEquals(0, $opt$AndLit8(0));
+ expectEquals(3, $opt$AndLit8(3));
+ expectEquals(4, $opt$AndLit8(-12));
+
+ expectEquals(0, $opt$AndLit16(1));
+ expectEquals(0, $opt$AndLit16(0));
+ expectEquals(0, $opt$AndLit16(0));
+ expectEquals(0, $opt$AndLit16(3));
+ expectEquals(65280, $opt$AndLit16(-12));
+ }
+
+ private static void andLong() {
+ expectEquals(1L, $opt$And(5L, 3L));
+ expectEquals(0L, $opt$And(0L, 0L));
+ expectEquals(0L, $opt$And(0L, 3L));
+ expectEquals(0L, $opt$And(3L, 0L));
+ expectEquals(1L, $opt$And(1L, -3L));
+ expectEquals(-12L, $opt$And(-12L, -3L));
+
+ expectEquals(1L, $opt$AndLit8(1L));
+ expectEquals(0L, $opt$AndLit8(0L));
+ expectEquals(0L, $opt$AndLit8(0L));
+ expectEquals(3L, $opt$AndLit8(3L));
+ expectEquals(4L, $opt$AndLit8(-12L));
+
+ expectEquals(0L, $opt$AndLit16(1L));
+ expectEquals(0L, $opt$AndLit16(0L));
+ expectEquals(0L, $opt$AndLit16(0L));
+ expectEquals(0L, $opt$AndLit16(3L));
+ expectEquals(65280L, $opt$AndLit16(-12L));
+ }
+
+ static int $opt$And(int a, int b) {
+ return a & b;
+ }
+
+ static int $opt$AndLit8(int a) {
+ return a & 0xF;
+ }
+
+ static int $opt$AndLit16(int a) {
+ return a & 0xFF00;
+ }
+
+ static long $opt$And(long a, long b) {
+ return a & b;
+ }
+
+ static long $opt$AndLit8(long a) {
+ return a & 0xF;
+ }
+
+ static long $opt$AndLit16(long a) {
+ return a & 0xFF00;
+ }
+
+ private static void orInt() {
+ expectEquals(7, $opt$Or(5, 3));
+ expectEquals(0, $opt$Or(0, 0));
+ expectEquals(3, $opt$Or(0, 3));
+ expectEquals(3, $opt$Or(3, 0));
+ expectEquals(-3, $opt$Or(1, -3));
+ expectEquals(-3, $opt$Or(-12, -3));
+
+ expectEquals(15, $opt$OrLit8(1));
+ expectEquals(15, $opt$OrLit8(0));
+ expectEquals(15, $opt$OrLit8(3));
+ expectEquals(-1, $opt$OrLit8(-12));
+
+ expectEquals(0xFF01, $opt$OrLit16(1));
+ expectEquals(0xFF00, $opt$OrLit16(0));
+ expectEquals(0xFF03, $opt$OrLit16(3));
+ expectEquals(-12, $opt$OrLit16(-12));
+ }
+
+ private static void orLong() {
+ expectEquals(7L, $opt$Or(5L, 3L));
+ expectEquals(0L, $opt$Or(0L, 0L));
+ expectEquals(3L, $opt$Or(0L, 3L));
+ expectEquals(3L, $opt$Or(3L, 0L));
+ expectEquals(-3L, $opt$Or(1L, -3L));
+ expectEquals(-3L, $opt$Or(-12L, -3L));
+
+ expectEquals(15L, $opt$OrLit8(1L));
+ expectEquals(15L, $opt$OrLit8(0L));
+ expectEquals(15L, $opt$OrLit8(3L));
+ expectEquals(-1L, $opt$OrLit8(-12L));
+
+ expectEquals(0xFF01L, $opt$OrLit16(1L));
+ expectEquals(0xFF00L, $opt$OrLit16(0L));
+ expectEquals(0xFF03L, $opt$OrLit16(3L));
+ expectEquals(-12L, $opt$OrLit16(-12L));
+ }
+
+ static int $opt$Or(int a, int b) {
+ return a | b;
+ }
+
+ static int $opt$OrLit8(int a) {
+ return a | 0xF;
+ }
+
+ static int $opt$OrLit16(int a) {
+ return a | 0xFF00;
+ }
+
+ static long $opt$Or(long a, long b) {
+ return a | b;
+ }
+
+ static long $opt$OrLit8(long a) {
+ return a | 0xF;
+ }
+
+ static long $opt$OrLit16(long a) {
+ return a | 0xFF00;
+ }
+
+ private static void xorInt() {
+ expectEquals(6, $opt$Xor(5, 3));
+ expectEquals(0, $opt$Xor(0, 0));
+ expectEquals(3, $opt$Xor(0, 3));
+ expectEquals(3, $opt$Xor(3, 0));
+ expectEquals(-4, $opt$Xor(1, -3));
+ expectEquals(9, $opt$Xor(-12, -3));
+
+ expectEquals(14, $opt$XorLit8(1));
+ expectEquals(15, $opt$XorLit8(0));
+ expectEquals(12, $opt$XorLit8(3));
+ expectEquals(-5, $opt$XorLit8(-12));
+
+ expectEquals(0xFF01, $opt$XorLit16(1));
+ expectEquals(0xFF00, $opt$XorLit16(0));
+ expectEquals(0xFF03, $opt$XorLit16(3));
+ expectEquals(-0xFF0c, $opt$XorLit16(-12));
+ }
+
+ private static void xorLong() {
+ expectEquals(6L, $opt$Xor(5L, 3L));
+ expectEquals(0L, $opt$Xor(0L, 0L));
+ expectEquals(3L, $opt$Xor(0L, 3L));
+ expectEquals(3L, $opt$Xor(3L, 0L));
+ expectEquals(-4L, $opt$Xor(1L, -3L));
+ expectEquals(9L, $opt$Xor(-12L, -3L));
+
+ expectEquals(14L, $opt$XorLit8(1L));
+ expectEquals(15L, $opt$XorLit8(0L));
+ expectEquals(12L, $opt$XorLit8(3L));
+ expectEquals(-5L, $opt$XorLit8(-12L));
+
+ expectEquals(0xFF01L, $opt$XorLit16(1L));
+ expectEquals(0xFF00L, $opt$XorLit16(0L));
+ expectEquals(0xFF03L, $opt$XorLit16(3L));
+ expectEquals(-0xFF0cL, $opt$XorLit16(-12L));
+ }
+
+ static int $opt$Xor(int a, int b) {
+ return a ^ b;
+ }
+
+ static int $opt$XorLit8(int a) {
+ return a ^ 0xF;
+ }
+
+ static int $opt$XorLit16(int a) {
+ return a ^ 0xFF00;
+ }
+
+ static long $opt$Xor(long a, long b) {
+ return a ^ b;
+ }
+
+ static long $opt$XorLit8(long a) {
+ return a ^ 0xF;
+ }
+
+ static long $opt$XorLit16(long a) {
+ return a ^ 0xFF00;
+ }
+}
diff --git a/test/427-bounds/expected.txt b/test/427-bounds/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/427-bounds/expected.txt
diff --git a/test/427-bounds/info.txt b/test/427-bounds/info.txt
new file mode 100644
index 0000000..8b8b957
--- /dev/null
+++ b/test/427-bounds/info.txt
@@ -0,0 +1,2 @@
+Regression test for the optimizing compiler that used to incorrectly pass
+index and/or length to the pThrowArrayBounds entrypoint.
diff --git a/test/427-bounds/src/Main.java b/test/427-bounds/src/Main.java
new file mode 100644
index 0000000..a2d84d2
--- /dev/null
+++ b/test/427-bounds/src/Main.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ Exception exception = null;
+ try {
+ $opt$Throw(new int[1]);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ exception = e;
+ }
+
+ String exceptionMessage = exception.getMessage();
+
+ // Note that it's ART specific to emit the length.
+ if (exceptionMessage.contains("length")) {
+ if (!exceptionMessage.contains("length=1")) {
+ throw new Error("Wrong length in exception message");
+ }
+ }
+
+ // Note that it's ART specific to emit the index.
+ if (exceptionMessage.contains("index")) {
+ if (!exceptionMessage.contains("index=2")) {
+ throw new Error("Wrong index in exception message");
+ }
+ }
+ }
+
+ static void $opt$Throw(int[] array) {
+ // We fetch the length first, to ensure it is in EAX (on x86).
+ // The pThrowArrayBounds entrypoint expects the index in EAX and the
+ // length in ECX, and the optimizing compiler used to write to EAX
+ // before putting the length in ECX.
+ int length = array.length;
+ array[2] = 42;
+ }
+}
diff --git a/test/428-optimizing-arith-rem/expected.txt b/test/428-optimizing-arith-rem/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/428-optimizing-arith-rem/expected.txt
diff --git a/test/428-optimizing-arith-rem/info.txt b/test/428-optimizing-arith-rem/info.txt
new file mode 100644
index 0000000..3e37ffe
--- /dev/null
+++ b/test/428-optimizing-arith-rem/info.txt
@@ -0,0 +1 @@
+Tests for modulo (rem) operation.
diff --git a/test/428-optimizing-arith-rem/src/Main.java b/test/428-optimizing-arith-rem/src/Main.java
new file mode 100644
index 0000000..46bd3c6
--- /dev/null
+++ b/test/428-optimizing-arith-rem/src/Main.java
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ public static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectDivisionByZero(int value) {
+ try {
+ $opt$Rem(value, 0);
+ throw new Error("Expected RuntimeException when modulo by 0");
+ } catch (java.lang.RuntimeException e) {
+ }
+ try {
+ $opt$RemZero(value);
+ throw new Error("Expected RuntimeException when modulo by 0");
+ } catch (java.lang.RuntimeException e) {
+ }
+ }
+
+ public static void expectDivisionByZero(long value) {
+ try {
+ $opt$Rem(value, 0L);
+ throw new Error("Expected RuntimeException when modulo by 0");
+ } catch (java.lang.RuntimeException e) {
+ }
+ try {
+ $opt$RemZero(value);
+ throw new Error("Expected RuntimeException when modulo by 0");
+ } catch (java.lang.RuntimeException e) {
+ }
+ }
+
+ public static void main(String[] args) {
+ rem();
+ }
+
+ public static void rem() {
+ remInt();
+ remLong();
+ }
+
+ private static void remInt() {
+ expectEquals(2, $opt$RemConst(6));
+ expectEquals(2, $opt$Rem(6, 4));
+ expectEquals(2, $opt$Rem(6, -4));
+ expectEquals(0, $opt$Rem(6, 3));
+ expectEquals(0, $opt$Rem(6, -3));
+ expectEquals(0, $opt$Rem(6, 1));
+ expectEquals(0, $opt$Rem(6, -1));
+ expectEquals(-1, $opt$Rem(-7, 3));
+ expectEquals(-1, $opt$Rem(-7, -3));
+ expectEquals(0, $opt$Rem(6, 6));
+ expectEquals(0, $opt$Rem(-6, -6));
+ expectEquals(7, $opt$Rem(7, 9));
+ expectEquals(7, $opt$Rem(7, -9));
+ expectEquals(-7, $opt$Rem(-7, 9));
+ expectEquals(-7, $opt$Rem(-7, -9));
+
+ expectEquals(0, $opt$Rem(Integer.MAX_VALUE, 1));
+ expectEquals(0, $opt$Rem(Integer.MAX_VALUE, -1));
+ expectEquals(0, $opt$Rem(Integer.MIN_VALUE, 1));
+ expectEquals(0, $opt$Rem(Integer.MIN_VALUE, -1)); // no overflow
+ expectEquals(-1, $opt$Rem(Integer.MIN_VALUE, Integer.MAX_VALUE));
+ expectEquals(Integer.MAX_VALUE, $opt$Rem(Integer.MAX_VALUE, Integer.MIN_VALUE));
+
+ expectEquals(0, $opt$Rem(0, 7));
+ expectEquals(0, $opt$Rem(0, Integer.MAX_VALUE));
+ expectEquals(0, $opt$Rem(0, Integer.MIN_VALUE));
+
+ expectDivisionByZero(0);
+ expectDivisionByZero(1);
+ expectDivisionByZero(5);
+ expectDivisionByZero(Integer.MAX_VALUE);
+ expectDivisionByZero(Integer.MIN_VALUE);
+ }
+
+ private static void remLong() {
+ expectEquals(2L, $opt$RemConst(6L));
+ expectEquals(2L, $opt$Rem(6L, 4L));
+ expectEquals(2L, $opt$Rem(6L, -4L));
+ expectEquals(0L, $opt$Rem(6L, 3L));
+ expectEquals(0L, $opt$Rem(6L, -3L));
+ expectEquals(0L, $opt$Rem(6L, 1L));
+ expectEquals(0L, $opt$Rem(6L, -1L));
+ expectEquals(-1L, $opt$Rem(-7L, 3L));
+ expectEquals(-1L, $opt$Rem(-7L, -3L));
+ expectEquals(0L, $opt$Rem(6L, 6L));
+ expectEquals(0L, $opt$Rem(-6L, -6L));
+ expectEquals(7L, $opt$Rem(7L, 9L));
+ expectEquals(7L, $opt$Rem(7L, -9L));
+ expectEquals(-7L, $opt$Rem(-7L, 9L));
+ expectEquals(-7L, $opt$Rem(-7L, -9L));
+
+ expectEquals(0L, $opt$Rem(Integer.MAX_VALUE, 1L));
+ expectEquals(0L, $opt$Rem(Integer.MAX_VALUE, -1L));
+ expectEquals(0L, $opt$Rem(Integer.MIN_VALUE, 1L));
+ expectEquals(0L, $opt$Rem(Integer.MIN_VALUE, -1L)); // no overflow
+ expectEquals(-1L, $opt$Rem(Integer.MIN_VALUE, Integer.MAX_VALUE));
+ expectEquals(Integer.MAX_VALUE, $opt$Rem(Integer.MAX_VALUE, Integer.MIN_VALUE));
+
+ expectEquals(0L, $opt$Rem(0L, 7L));
+ expectEquals(0L, $opt$Rem(0L, Integer.MAX_VALUE));
+ expectEquals(0L, $opt$Rem(0L, Integer.MIN_VALUE));
+
+ expectDivisionByZero(0L);
+ expectDivisionByZero(1L);
+ expectDivisionByZero(5L);
+ expectDivisionByZero(Integer.MAX_VALUE);
+ expectDivisionByZero(Integer.MIN_VALUE);
+ }
+
+ static int $opt$Rem(int a, int b) {
+ return a % b;
+ }
+
+ static int $opt$RemZero(int a) {
+ return a % 0;
+ }
+
+ // Modulo by literals != 0 should not generate checks.
+ static int $opt$RemConst(int a) {
+ return a % 4;
+ }
+
+ static long $opt$RemConst(long a) {
+ return a % 4L;
+ }
+
+ static long $opt$Rem(long a, long b) {
+ return a % b;
+ }
+
+ static long $opt$RemZero(long a) {
+ return a % 0L;
+ }
+}
diff --git a/test/429-ssa-builder/expected.txt b/test/429-ssa-builder/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/429-ssa-builder/expected.txt
diff --git a/test/429-ssa-builder/info.txt b/test/429-ssa-builder/info.txt
new file mode 100644
index 0000000..509d00f
--- /dev/null
+++ b/test/429-ssa-builder/info.txt
@@ -0,0 +1,3 @@
+Regression test for the type propagation phase of the optimizing
+compiler, that used to crash when dealing with phi floating-point
+equivalents.
diff --git a/test/429-ssa-builder/src/Main.java b/test/429-ssa-builder/src/Main.java
new file mode 100644
index 0000000..32fcef0
--- /dev/null
+++ b/test/429-ssa-builder/src/Main.java
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ if (new Main().$opt$TestFloatPhi() != 33.0f) {
+ throw new Error("Unexpected result");
+ }
+ }
+
+ public float $opt$TestFloatPhi() {
+ float a = floatField;
+ float b = 42.0f;
+ if (test1) {
+ // The phi for `a` will be found to be of type float.
+ a = otherFloatField;
+ // The phi for `b` will be found to be of type int (constants in DEX).
+ b = 33.0f;
+ }
+ // Use a different condition to avoid having dx being too clever.
+ if (test2) {
+ // Type propagation now realizes that `b` must be of type float. So
+ // it requests a float equivalent for `b`. Because the phi for `a` is
+ // next to the phi for `b` in the phi list, the compiler used to crash,
+ // assuming that a float phi following a phi *must* be for the same DEX
+ // register.
+ a = b;
+ }
+ return a;
+ }
+
+ float floatField = 4.2f;
+ float otherFloatField = 42.2f;
+ boolean test1 = true;
+ boolean test2 = true;
+}
diff --git a/test/430-live-register-slow-path/expected.txt b/test/430-live-register-slow-path/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/430-live-register-slow-path/expected.txt
diff --git a/test/430-live-register-slow-path/info.txt b/test/430-live-register-slow-path/info.txt
new file mode 100644
index 0000000..6f2af28
--- /dev/null
+++ b/test/430-live-register-slow-path/info.txt
@@ -0,0 +1,2 @@
+Regression test for the linear scan register allocator. It used
+to miscompute the number of live registers at a safepoint.
diff --git a/test/430-live-register-slow-path/src/Main.java b/test/430-live-register-slow-path/src/Main.java
new file mode 100644
index 0000000..b84e647
--- /dev/null
+++ b/test/430-live-register-slow-path/src/Main.java
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ $opt$TestSlowPath();
+ }
+
+ public static void $opt$TestSlowPath() {
+ Object[] o = bar();
+ assertEquals(0, o.length);
+ // The slowpath of the instanceof requires the live register
+ // holding `o` to be saved before going into runtime. The linear
+ // scan register allocator used to miscompute the number of
+ // live registers at a safepoint, so the place at which the register
+ // was saved was wrong.
+ doCall(o instanceof Interface[], o);
+ }
+
+ public static void assertEquals(int a, int b) {}
+ public static boolean doCall(boolean val, Object o) { return val; }
+
+ static Object[] bar() { return new Object[0]; }
+
+ static interface Interface {}
+}
diff --git a/test/703-floating-point-div/expected.txt b/test/703-floating-point-div/expected.txt
new file mode 100644
index 0000000..76f5a5a
--- /dev/null
+++ b/test/703-floating-point-div/expected.txt
@@ -0,0 +1 @@
+Done!
diff --git a/test/703-floating-point-div/info.txt b/test/703-floating-point-div/info.txt
new file mode 100644
index 0000000..418b831
--- /dev/null
+++ b/test/703-floating-point-div/info.txt
@@ -0,0 +1 @@
+Simple tests to check floating point division.
diff --git a/test/703-floating-point-div/src/Main.java b/test/703-floating-point-div/src/Main.java
new file mode 100644
index 0000000..9990a54
--- /dev/null
+++ b/test/703-floating-point-div/src/Main.java
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ static double dPi = Math.PI;
+ static float fPi = (float)Math.PI;
+
+ public static void expectEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void divDoubleTest() {
+ double d1 = 0x1.0p1023;
+ double d2 = -2.0;
+ double d3 = 0.0;
+ double d4 = Double.MIN_NORMAL;
+ double d5 = Double.POSITIVE_INFINITY;
+ double d6 = Double.NEGATIVE_INFINITY;
+ double d7 = -0.0;
+ double d8 = Double.MAX_VALUE;
+ double d9 = Double.MIN_VALUE;
+ double d0 = Double.NaN;
+
+ expectEquals(Double.doubleToRawLongBits(dPi/d1), 0x1921fb54442d18L);
+ expectEquals(Double.doubleToRawLongBits(dPi/d2), 0xbff921fb54442d18L);
+ expectEquals(Double.doubleToRawLongBits(dPi/d3), 0x7ff0000000000000L);
+ expectEquals(Double.doubleToRawLongBits(dPi/d4), 0x7fe921fb54442d18L);
+ expectEquals(Double.doubleToRawLongBits(dPi/d5), 0x0L);
+ expectEquals(Double.doubleToRawLongBits(dPi/d6), 0x8000000000000000L);
+ expectEquals(Double.doubleToRawLongBits(dPi/d7), 0xfff0000000000000L);
+
+ expectEquals(Double.doubleToRawLongBits(dPi/d8), 0xc90fdaa22168cL);
+ expectEquals(Double.doubleToRawLongBits(dPi/d9), 0x7ff0000000000000L);
+ expectEquals(Double.doubleToRawLongBits(dPi/d0), 0x7ff8000000000000L);
+ }
+
+ public static void divFloatTest() {
+ float f1 = 0x1.0p127f;
+ float f2 = -2.0f;
+ float f3 = 0.0f;
+ float f4 = Float.MIN_NORMAL;
+ float f5 = Float.POSITIVE_INFINITY;
+ float f6 = Float.NEGATIVE_INFINITY;
+ float f7 = -0.0f;
+ float f8 = Float.MAX_VALUE;
+ float f9 = Float.MIN_VALUE;
+ float f0 = Float.NaN;
+
+ expectEquals(Float.floatToRawIntBits(fPi/f1), 0xc90fdb);
+ expectEquals(Float.floatToRawIntBits(fPi/f2), 0xbfc90fdb);
+ expectEquals(Float.floatToRawIntBits(fPi/f3), 0x7f800000);
+ expectEquals(Float.floatToRawIntBits(fPi/f4), 0x7f490fdb);
+ expectEquals(Float.floatToRawIntBits(fPi/f5), 0x0);
+ expectEquals(Float.floatToRawIntBits(fPi/f6), 0x80000000);
+ expectEquals(Float.floatToRawIntBits(fPi/f7), 0xff800000);
+
+ expectEquals(Float.floatToRawIntBits(fPi/f8), 0x6487ee);
+ expectEquals(Float.floatToRawIntBits(fPi/f9), 0x7f800000);
+ expectEquals(Float.floatToRawIntBits(fPi/f0), 0x7fc00000);
+ }
+
+ public static void main(String[] args) {
+ divDoubleTest();
+ divFloatTest();
+ System.out.println("Done!");
+ }
+
+}
diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt
index 468e7a6..7674a8a 100644
--- a/test/800-smali/expected.txt
+++ b/test/800-smali/expected.txt
@@ -1,2 +1,9 @@
b/17790197
+b/17978759
+FloatBadArgReg
+negLong
+sameFieldNames
+b/18380491
+invoke-super abstract
+BadCaseInOpRegRegReg
Done!
diff --git a/test/800-smali/info.txt b/test/800-smali/info.txt
index cfcc230..3022962 100644
--- a/test/800-smali/info.txt
+++ b/test/800-smali/info.txt
@@ -1,4 +1,4 @@
Smali-based tests.
-Will compile and run all the smali files in src/ and run the test cases mentioned in src/Main.java.
+Will compile and run all the smali files in smali/ and run the test cases mentioned in src/Main.java.
Obviously needs to run under Dalvik or ART.
diff --git a/test/800-smali/smali/BadCaseInOpRegRegReg.smali b/test/800-smali/smali/BadCaseInOpRegRegReg.smali
new file mode 100644
index 0000000..2683790
--- /dev/null
+++ b/test/800-smali/smali/BadCaseInOpRegRegReg.smali
@@ -0,0 +1,13 @@
+.class public LBadCaseInOpRegRegReg;
+
+.super Ljava/lang/Object;
+
+.method public static getInt()I
+ .registers 2
+ const/4 v0, 0x0
+ const/4 v1, 0x1
+ add-int/2addr v0, v1
+ add-int/lit8 v1, v0, 0x1
+ mul-int v0, v1, v0
+ return v0
+.end method
diff --git a/test/800-smali/smali/FloatBadArgReg.smali b/test/800-smali/smali/FloatBadArgReg.smali
new file mode 100644
index 0000000..719ba09
--- /dev/null
+++ b/test/800-smali/smali/FloatBadArgReg.smali
@@ -0,0 +1,16 @@
+.class public LFloatBadArgReg;
+
+.super Ljava/lang/Object;
+
+.method public static getInt(I)I
+ .registers 2
+ const/4 v0, 0x0
+ if-ne v0, v0, :after
+ float-to-int v0, v0
+ :exit
+ add-int/2addr v0, v1
+ return v0
+ :after
+ move v1, v0
+ goto :exit
+.end method
diff --git a/test/800-smali/src/b_17790197.smali b/test/800-smali/smali/b_17790197.smali
similarity index 100%
rename from test/800-smali/src/b_17790197.smali
rename to test/800-smali/smali/b_17790197.smali
diff --git a/test/800-smali/smali/b_17978759.smali b/test/800-smali/smali/b_17978759.smali
new file mode 100644
index 0000000..07bcae5
--- /dev/null
+++ b/test/800-smali/smali/b_17978759.smali
@@ -0,0 +1,28 @@
+.class public LB17978759;
+.super Ljava/lang/Object;
+
+ .method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+ .end method
+
+ .method public test()V
+ .registers 2
+
+ move-object v0, p0
+ # v0 and p0 alias
+ monitor-enter p0
+ # monitor-enter on p0
+ monitor-exit v0
+ # monitor-exit on v0, however, verifier doesn't track this and so this is
+ # a warning. Verifier will still think p0 is locked.
+
+ move-object v0, p0
+ # v0 will now appear locked.
+ monitor-enter v0
+ # Attempt to lock v0 twice is a verifier failure.
+ monitor-exit v0
+
+ return-void
+ .end method
diff --git a/test/800-smali/smali/b_18380491AbstractBase.smali b/test/800-smali/smali/b_18380491AbstractBase.smali
new file mode 100644
index 0000000..7aa1b1a
--- /dev/null
+++ b/test/800-smali/smali/b_18380491AbstractBase.smali
@@ -0,0 +1,12 @@
+.class public LB18380491ActractBase;
+
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+ .locals 0
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public abstract foo(I)I
+.end method
diff --git a/test/800-smali/smali/b_18380491ConcreteClass.smali b/test/800-smali/smali/b_18380491ConcreteClass.smali
new file mode 100644
index 0000000..db5ef3b
--- /dev/null
+++ b/test/800-smali/smali/b_18380491ConcreteClass.smali
@@ -0,0 +1,19 @@
+.class public LB18380491ConcreteClass;
+
+.super LB18380491ActractBase;
+
+.method public constructor <init>()V
+ .locals 0
+ invoke-direct {p0}, LB18380491ActractBase;-><init>()V
+ return-void
+.end method
+
+.method public foo(I)I
+ .locals 1
+ if-eqz p1, :invoke_super_abstract
+ return p1
+ :invoke_super_abstract
+ invoke-super {p0, p1}, LB18380491ActractBase;->foo(I)I
+ move-result v0
+ return v0
+.end method
diff --git a/test/800-smali/smali/negLong.smali b/test/800-smali/smali/negLong.smali
new file mode 100755
index 0000000..29d416e
--- /dev/null
+++ b/test/800-smali/smali/negLong.smali
@@ -0,0 +1,186 @@
+.class public LnegLong;
+.super Ljava/lang/Object;
+.source "negLong.java"
+# static fields
+.field public static final N:I = 0x64
+.field public static i:I
+# direct methods
+.method static constructor <clinit>()V
+ .registers 1
+ .prologue
+ .line 5
+ const/16 v0, 0x44da
+ sput v0, LnegLong;->i:I
+ return-void
+.end method
+.method public constructor <init>()V
+ .registers 1
+ .prologue
+ .line 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+.method public static checkSum1([S)J
+ .registers 7
+ .prologue
+ .line 14
+ array-length v3, p0
+ .line 15
+ const-wide/16 v0, 0x0
+ .line 16
+ const/4 v2, 0x0
+ :goto_4
+ if-ge v2, v3, :cond_d
+ .line 17
+ aget-short v4, p0, v2
+ int-to-long v4, v4
+ add-long/2addr v0, v4
+ .line 16
+ add-int/lit8 v2, v2, 0x1
+ goto :goto_4
+ .line 18
+ :cond_d
+ return-wide v0
+.end method
+.method public static init1([SS)V
+ .registers 4
+ .prologue
+ .line 8
+ array-length v1, p0
+ .line 9
+ const/4 v0, 0x0
+ :goto_2
+ if-ge v0, v1, :cond_9
+ .line 10
+ aput-short p1, p0, v0
+ .line 9
+ add-int/lit8 v0, v0, 0x1
+ goto :goto_2
+ .line 11
+ :cond_9
+ return-void
+.end method
+.method public static main([Ljava/lang/String;)V
+ .registers 6
+ .prologue
+ .line 50
+ invoke-static {}, LnegLong;->negLong()J
+ move-result-wide v0
+ .line 51
+ sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ new-instance v3, Ljava/lang/StringBuilder;
+ invoke-direct {v3}, Ljava/lang/StringBuilder;-><init>()V
+ const-string v4, "nbp ztw p = "
+ invoke-virtual {v3, v4}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder;
+ move-result-object v3
+ invoke-virtual {v3, v0, v1}, Ljava/lang/StringBuilder;->append(J)Ljava/lang/StringBuilder;
+ move-result-object v0
+ invoke-virtual {v0}, Ljava/lang/StringBuilder;->toString()Ljava/lang/String;
+ move-result-object v0
+ invoke-virtual {v2, v0}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
+ .line 52
+ return-void
+.end method
+.method public static negLong()J
+ .registers 17
+ .prologue
+ .line 23
+ const-wide v1, -0x4c4a1f4aa9b1db83L
+ .line 24
+ const v7, -0x3f727efa
+ .line 25
+ const/16 v4, -0x284b
+ const v3, 0xdc01
+ .line 26
+ const/16 v0, 0x64
+ new-array v8, v0, [S
+ .line 28
+ const/16 v0, 0x1c60
+ invoke-static {v8, v0}, LnegLong;->init1([SS)V
+ .line 29
+ const/4 v0, 0x2
+ move v6, v0
+ :goto_18
+ const/16 v0, 0x56
+ if-ge v6, v0, :cond_64
+ .line 30
+ const/4 v0, 0x1
+ move v5, v0
+ move v0, v3
+ move-wide v15, v1
+ move-wide v2, v15
+ :goto_21
+ if-ge v5, v6, :cond_5d
+ .line 31
+ int-to-float v0, v4
+ neg-float v1, v7
+ add-float/2addr v0, v1
+ float-to-int v1, v0
+ .line 32
+ const/4 v0, 0x1
+ move v4, v1
+ move-wide v15, v2
+ move-wide v1, v15
+ .line 33
+ :goto_2b
+ add-int/lit8 v3, v0, 0x1
+ const/16 v0, 0x1b
+ if-ge v3, v0, :cond_3a
+ .line 35
+ int-to-long v9, v5
+ mul-long v0, v9, v1
+ neg-long v1, v0
+ .line 38
+ sget v0, LnegLong;->i:I
+ move v4, v0
+ move v0, v3
+ goto :goto_2b
+ .line 40
+ :cond_3a
+ aget-short v0, v8, v6
+ int-to-double v9, v0
+ long-to-double v11, v1
+ const-wide v13, 0x403f9851eb851eb8L
+ sub-double/2addr v11, v13
+ add-double/2addr v9, v11
+ double-to-int v0, v9
+ int-to-short v0, v0
+ aput-short v0, v8, v6
+ .line 41
+ const/4 v0, 0x2
+ :goto_4a
+ const/16 v9, 0x43
+ if-ge v0, v9, :cond_56
+ .line 42
+ neg-long v9, v1
+ const-wide/16 v11, 0x1
+ or-long/2addr v9, v11
+ add-long/2addr v1, v9
+ .line 41
+ add-int/lit8 v0, v0, 0x1
+ goto :goto_4a
+ .line 30
+ :cond_56
+ add-int/lit8 v0, v5, 0x1
+ move v5, v0
+ move v0, v3
+ move-wide v15, v1
+ move-wide v2, v15
+ goto :goto_21
+ .line 29
+ :cond_5d
+ add-int/lit8 v1, v6, 0x1
+ move v6, v1
+ move-wide v15, v2
+ move-wide v1, v15
+ move v3, v0
+ goto :goto_18
+ .line 45
+ :cond_64
+ invoke-static {v8}, LnegLong;->checkSum1([S)J
+ move-result-wide v0
+ int-to-long v2, v3
+ add-long/2addr v0, v2
+ .line 46
+ return-wide v0
+.end method
diff --git a/test/800-smali/smali/sameFieldNames.smali b/test/800-smali/smali/sameFieldNames.smali
new file mode 100644
index 0000000..107161b
--- /dev/null
+++ b/test/800-smali/smali/sameFieldNames.smali
@@ -0,0 +1,64 @@
+.class public LsameFieldNames;
+.super Ljava/lang/Object;
+
+# Test multiple fields with the same name and different types.
+# (Invalid in Java language but valid in bytecode.)
+.field static public a:D
+.field static public a:S
+.field static public a:J
+.field static public a:F
+.field static public a:Z
+.field static public a:I
+.field static public a:B
+.field static public a:C
+.field static public a:Ljava/lang/Integer;
+.field static public a:Ljava/lang/Long;
+.field static public a:Ljava/lang/Float;
+.field static public a:Ljava/lang/Double;
+.field static public a:Ljava/lang/Boolean;
+.field static public a:Ljava/lang/Void;
+.field static public a:Ljava/lang/Short;
+.field static public a:Ljava/lang/Char;
+.field static public a:Ljava/lang/Byte;
+
+# Add some more fields to stress test the sorting for offset assignment.
+.field static public b:C
+.field static public c:J
+.field static public d:C
+.field static public e:B
+.field static public f:C
+.field static public g:J
+.field static public h:C
+.field static public i:J
+.field static public j:I
+.field static public k:J
+.field static public l:J
+.field static public m:I
+.field static public n:J
+.field static public o:I
+.field static public p:Ljava/lang/Integer;
+.field static public q:I
+.field static public r:J
+.field static public s:I
+.field static public t:Ljava/lang/Integer;
+.field static public u:I
+.field static public v:J
+.field static public w:I
+.field static public x:Ljava/lang/Integer;
+.field static public y:I
+.field static public z:Ljava/lang/Integer;
+
+.method public static getInt()I
+ .locals 2
+ const/4 v0, 2
+ sput v0, LsameFieldNames;->a:I
+ sget-object v1, LsameFieldNames;->a:Ljava/lang/Integer;
+ const/4 v1, 0
+ if-nez v1, :fail
+ const/4 v0, 7
+ :ret
+ return v0
+ :fail
+ const/4 v0, 0
+ goto :ret
+.end method
diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java
index 0ef3a9d..8d318c3 100644
--- a/test/800-smali/src/Main.java
+++ b/test/800-smali/src/Main.java
@@ -14,7 +14,9 @@
* limitations under the License.
*/
+import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
import java.util.LinkedList;
import java.util.List;
@@ -49,6 +51,16 @@
testCases = new LinkedList<TestCase>();
testCases.add(new TestCase("b/17790197", "B17790197", "getInt", null, null, 100));
+ testCases.add(new TestCase("b/17978759", "B17978759", "test", null, new VerifyError(), null));
+ testCases.add(new TestCase("FloatBadArgReg", "FloatBadArgReg", "getInt",
+ new Object[]{100}, null, 100));
+ testCases.add(new TestCase("negLong", "negLong", "negLong", null, null, 122142L));
+ testCases.add(new TestCase("sameFieldNames", "sameFieldNames", "getInt", null, null, 7));
+ testCases.add(new TestCase("b/18380491", "B18380491ConcreteClass", "foo",
+ new Object[]{42}, null, 42));
+ testCases.add(new TestCase("invoke-super abstract", "B18380491ConcreteClass", "foo",
+ new Object[]{0}, new AbstractMethodError(), null));
+ testCases.add(new TestCase("BadCaseInOpRegRegReg", "BadCaseInOpRegRegReg", "getInt", null, null, 2));
}
public void runTests() {
@@ -63,47 +75,62 @@
}
private void runTest(TestCase tc) throws Exception {
- Class<?> c = Class.forName(tc.testClass);
-
- Method[] methods = c.getDeclaredMethods();
-
- // For simplicity we assume that test methods are not overloaded. So searching by name
- // will give us the method we need to run.
- Method method = null;
- for (Method m : methods) {
- if (m.getName().equals(tc.testMethodName)) {
- method = m;
- break;
- }
- }
-
- if (method == null) {
- throw new IllegalArgumentException("Could not find test method " + tc.testMethodName +
- " in class " + tc.testClass + " for test " + tc.testName);
- }
-
Exception errorReturn = null;
try {
- Object retValue = method.invoke(null, tc.values);
- if (tc.expectedException != null) {
- errorReturn = new IllegalStateException("Expected an exception in test " +
- tc.testName);
+ Class<?> c = Class.forName(tc.testClass);
+
+ Method[] methods = c.getDeclaredMethods();
+
+ // For simplicity we assume that test methods are not overloaded. So searching by name
+ // will give us the method we need to run.
+ Method method = null;
+ for (Method m : methods) {
+ if (m.getName().equals(tc.testMethodName)) {
+ method = m;
+ break;
+ }
}
- if (tc.expectedReturn == null && retValue != null) {
- errorReturn = new IllegalStateException("Expected a null result in test " +
- tc.testName);
- } else if (tc.expectedReturn != null &&
- (retValue == null || !tc.expectedReturn.equals(retValue))) {
- errorReturn = new IllegalStateException("Expected return " + tc.expectedReturn +
- ", but got " + retValue);
+
+ if (method == null) {
+ errorReturn = new IllegalArgumentException("Could not find test method " +
+ tc.testMethodName + " in class " +
+ tc.testClass + " for test " +
+ tc.testName);
+ } else {
+ Object retValue;
+ if (Modifier.isStatic(method.getModifiers())) {
+ retValue = method.invoke(null, tc.values);
+ } else {
+ retValue = method.invoke(method.getDeclaringClass().newInstance(), tc.values);
+ }
+ if (tc.expectedException != null) {
+ errorReturn = new IllegalStateException("Expected an exception in test " +
+ tc.testName);
+ }
+ if (tc.expectedReturn == null && retValue != null) {
+ errorReturn = new IllegalStateException("Expected a null result in test " +
+ tc.testName);
+ } else if (tc.expectedReturn != null &&
+ (retValue == null || !tc.expectedReturn.equals(retValue))) {
+ errorReturn = new IllegalStateException("Expected return " +
+ tc.expectedReturn +
+ ", but got " + retValue);
+ } else {
+ // Expected result, do nothing.
+ }
}
- } catch (Exception exc) {
+ } catch (Throwable exc) {
if (tc.expectedException == null) {
errorReturn = new IllegalStateException("Did not expect exception", exc);
+ } else if (exc instanceof InvocationTargetException && exc.getCause() != null &&
+ exc.getCause().getClass().equals(tc.expectedException.getClass())) {
+ // Expected exception is wrapped in InvocationTargetException.
} else if (!tc.expectedException.getClass().equals(exc.getClass())) {
errorReturn = new IllegalStateException("Expected " +
- tc.expectedException.getClass().getName() +
- ", but got " + exc.getClass(), exc);
+ tc.expectedException.getClass().getName() +
+ ", but got " + exc.getClass(), exc);
+ } else {
+ // Expected exception, do nothing.
}
} finally {
if (errorReturn != null) {
diff --git a/test/801-VoidCheckCast/classes.dex b/test/801-VoidCheckCast/classes.dex
new file mode 100644
index 0000000..e6f0f02
--- /dev/null
+++ b/test/801-VoidCheckCast/classes.dex
Binary files differ
diff --git a/test/801-VoidCheckCast/expected.txt b/test/801-VoidCheckCast/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/801-VoidCheckCast/expected.txt
diff --git a/test/801-VoidCheckCast/info.txt b/test/801-VoidCheckCast/info.txt
new file mode 100644
index 0000000..422f740
--- /dev/null
+++ b/test/801-VoidCheckCast/info.txt
@@ -0,0 +1,4 @@
+A test that is only available as a DEX binary.
+
+This tests that an attempt to use check-cast with the void type doesn't
+cause the compiler to crash.
diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk
index fd95038..c9c0475 100644
--- a/test/Android.libarttest.mk
+++ b/test/Android.libarttest.mk
@@ -24,6 +24,7 @@
004-ReferenceMap/stack_walk_refmap_jni.cc \
004-StackWalk/stack_walk_jni.cc \
004-UnsafeTest/unsafe_test.cc \
+ 051-thread/thread_test.cc \
116-nodex2oat/nodex2oat.cc \
117-nopatchoat/nopatchoat.cc \
118-noimage-dex2oat/noimage-dex2oat.cc
@@ -58,8 +59,7 @@
ifeq ($$(art_target_or_host),target)
$(call set-target-local-clang-vars)
$(call set-target-local-cflags-vars,debug)
- LOCAL_SHARED_LIBRARIES += libdl libcutils
- LOCAL_STATIC_LIBRARIES := libgtest
+ LOCAL_SHARED_LIBRARIES += libdl
LOCAL_MULTILIB := both
LOCAL_MODULE_PATH_32 := $(ART_TARGET_TEST_OUT)/$(ART_TARGET_ARCH_32)
LOCAL_MODULE_PATH_64 := $(ART_TARGET_TEST_OUT)/$(ART_TARGET_ARCH_64)
@@ -68,11 +68,7 @@
else # host
LOCAL_CLANG := $(ART_HOST_CLANG)
LOCAL_CFLAGS := $(ART_HOST_CFLAGS) $(ART_HOST_DEBUG_CFLAGS)
- LOCAL_STATIC_LIBRARIES := libcutils
LOCAL_LDLIBS := $(ART_HOST_LDLIBS) -ldl -lpthread
- ifeq ($(HOST_OS),linux)
- LOCAL_LDLIBS += -lrt
- endif
LOCAL_IS_HOST_MODULE := true
LOCAL_MULTILIB := both
include $(BUILD_HOST_SHARED_LIBRARY)
diff --git a/test/Android.libnativebridgetest.mk b/test/Android.libnativebridgetest.mk
index 5e2493c..1b20e69 100644
--- a/test/Android.libnativebridgetest.mk
+++ b/test/Android.libnativebridgetest.mk
@@ -51,7 +51,7 @@
ifeq ($$(art_target_or_host),target)
$(call set-target-local-clang-vars)
$(call set-target-local-cflags-vars,debug)
- LOCAL_SHARED_LIBRARIES += libdl libcutils
+ LOCAL_SHARED_LIBRARIES += libdl
LOCAL_STATIC_LIBRARIES := libgtest
LOCAL_MULTILIB := both
LOCAL_MODULE_PATH_32 := $(ART_TARGET_TEST_OUT)/$(ART_TARGET_ARCH_32)
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index e066a38..29da2f6 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -66,13 +66,11 @@
########################################################################
# General rules to build and run a run-test.
-# Test rule names or of the form:
-# test-art-{1: host or target}-run-test-{2: prebuild no-prebuild no-dex2oat}-
-# {3: interpreter default optimizing}-{4: relocate no-relocate relocate-no-patchoat}-
-# {5: trace or no-trace}-{6: gcstress gcverify cms}-{7: forcecopy checkjni jni}-
-# {8: no-image or image}-{9: test name}{10: 32 or 64}
TARGET_TYPES := host target
-PREBUILD_TYPES := prebuild
+PREBUILD_TYPES :=
+ifeq ($(ART_TEST_RUN_TEST_PREBUILD),true)
+ PREBUILD_TYPES += prebuild
+endif
ifeq ($(ART_TEST_RUN_TEST_NO_PREBUILD),true)
PREBUILD_TYPES += no-prebuild
endif
@@ -115,28 +113,48 @@
ifeq ($(ART_TEST_RUN_TEST_NO_IMAGE),true)
IMAGE_TYPES += no-image
endif
-ADDRESS_SIZES_TARGET := $(ART_PHONY_TEST_TARGET_SUFFIX) $(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
-ADDRESS_SIZES_HOST := $(ART_PHONY_TEST_HOST_SUFFIX) $(2ND_ART_PHONY_TEST_HOST_SUFFIX)
+ifeq ($(ART_TEST_PIC_IMAGE),true)
+ IMAGE_TYPES += picimage
+endif
+PICTEST_TYPES := nopictest
+ifeq ($(ART_TEST_PIC_TEST),true)
+ PICTEST_TYPES += pictest
+endif
+RUN_TYPES :=
+ifeq ($(ART_TEST_RUN_TEST_DEBUG),true)
+ RUN_TYPES += debug
+endif
+ifeq ($(ART_TEST_RUN_TEST_NDEBUG),true)
+ RUN_TYPES += ndebug
+endif
+ADDRESS_SIZES_TARGET := $(ART_PHONY_TEST_TARGET_SUFFIX)
+ADDRESS_SIZES_HOST := $(ART_PHONY_TEST_HOST_SUFFIX)
+ifeq ($(ART_TEST_RUN_TEST_2ND_ARCH),true)
+ ADDRESS_SIZES_TARGET += $(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
+ ADDRESS_SIZES_HOST += $(2ND_ART_PHONY_TEST_HOST_SUFFIX)
+endif
ALL_ADDRESS_SIZES := 64 32
# List all run test names with number arguments agreeing with the comment above.
define all-run-test-names
$(foreach target, $(1), \
- $(foreach prebuild, $(2), \
- $(foreach compiler, $(3), \
- $(foreach relocate, $(4), \
- $(foreach trace, $(5), \
- $(foreach gc, $(6), \
- $(foreach jni, $(7), \
- $(foreach image, $(8), \
- $(foreach test, $(9), \
- $(foreach address_size, $(10), \
- test-art-$(target)-run-test-$(prebuild)-$(compiler)-$(relocate)-$(trace)-$(gc)-$(jni)-$(image)-$(test)$(address_size) \
- ))))))))))
+ $(foreach run-type, $(2), \
+ $(foreach prebuild, $(3), \
+ $(foreach compiler, $(4), \
+ $(foreach relocate, $(5), \
+ $(foreach trace, $(6), \
+ $(foreach gc, $(7), \
+ $(foreach jni, $(8), \
+ $(foreach image, $(9), \
+ $(foreach pictest, $(10), \
+ $(foreach test, $(11), \
+ $(foreach address_size, $(12), \
+ test-art-$(target)-run-test-$(run-type)-$(prebuild)-$(compiler)-$(relocate)-$(trace)-$(gc)-$(jni)-$(image)-$(pictest)-$(test)$(address_size) \
+ ))))))))))))
endef # all-run-test-names
# To generate a full list or tests:
-# $(call all-run-test-names,$(TARGET_TYPES),$(PREBUILD_TYPES),$(COMPILER_TYPES), \
+# $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES),$(COMPILER_TYPES), \
# $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
# $(TEST_ART_RUN_TESTS), $(ALL_ADDRESS_SIZES))
@@ -152,30 +170,21 @@
# disable timing sensitive tests on "dist" builds.
ifdef dist_goal
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(PREBUILD_TYPES), \
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES), $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(ALL_ADDRESS_SIZES))
+ $(IMAGE_TYPES), $(PICTEST_TYPES), $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(ALL_ADDRESS_SIZES))
endif
TEST_ART_TIMING_SENSITIVE_RUN_TESTS :=
-TEST_ART_BROKEN_RUN_TESTS := \
- 004-ThreadStress
-
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(PREBUILD_TYPES), \
- $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES), $(TEST_ART_BROKEN_RUN_TESTS), $(ALL_ADDRESS_SIZES))
-
-TEST_ART_BROKEN_RUN_TESTS :=
-
# Note 116-nodex2oat is not broken per-se it just doesn't (and isn't meant to) work with --prebuild.
TEST_ART_BROKEN_PREBUILD_RUN_TESTS := \
116-nodex2oat
ifneq (,$(filter prebuild,$(PREBUILD_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),prebuild, \
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),prebuild, \
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES), $(TEST_ART_BROKEN_PREBUILD_RUN_TESTS), $(ALL_ADDRESS_SIZES))
+ $(IMAGE_TYPES), $(PICTEST_TYPES), $(TEST_ART_BROKEN_PREBUILD_RUN_TESTS), $(ALL_ADDRESS_SIZES))
endif
TEST_ART_BROKEN_PREBUILD_RUN_TESTS :=
@@ -184,9 +193,9 @@
117-nopatchoat
ifneq (,$(filter no-prebuild,$(PREBUILD_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),no-prebuild, \
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),no-prebuild, \
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES), $(TEST_ART_BROKEN_NO_PREBUILD_TESTS), $(ALL_ADDRESS_SIZES))
+ $(IMAGE_TYPES), $(PICTEST_TYPES), $(TEST_ART_BROKEN_NO_PREBUILD_TESTS), $(ALL_ADDRESS_SIZES))
endif
TEST_ART_BROKEN_NO_PREBUILD_TESTS :=
@@ -197,9 +206,9 @@
117-nopatchoat
ifneq (,$(filter no-relocate,$(RELOCATE_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(PREBUILD_TYPES), \
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
$(COMPILER_TYPES), no-relocate,$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES), $(TEST_ART_BROKEN_NO_RELOCATE_TESTS), $(ALL_ADDRESS_SIZES))
+ $(IMAGE_TYPES), $(PICTEST_TYPES), $(TEST_ART_BROKEN_NO_RELOCATE_TESTS), $(ALL_ADDRESS_SIZES))
endif
TEST_ART_BROKEN_NO_RELOCATE_TESTS :=
@@ -210,16 +219,16 @@
114-ParallelGC
ifneq (,$(filter gcstress,$(GC_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(PREBUILD_TYPES), \
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),gcstress,$(JNI_TYPES), \
- $(IMAGE_TYPES), $(TEST_ART_BROKEN_GCSTRESS_RUN_TESTS), $(ALL_ADDRESS_SIZES))
+ $(IMAGE_TYPES), $(PICTEST_TYPES), $(TEST_ART_BROKEN_GCSTRESS_RUN_TESTS), $(ALL_ADDRESS_SIZES))
endif
TEST_ART_BROKEN_GCSTRESS_RUN_TESTS :=
# 115-native-bridge setup is complicated. Need to implement it correctly for the target.
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(PREBUILD_TYPES),$(COMPILER_TYPES), \
- $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES),115-native-bridge, \
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES),$(COMPILER_TYPES), \
+ $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES),$(PICTEST_TYPES),115-native-bridge, \
$(ALL_ADDRESS_SIZES))
# All these tests check that we have sane behavior if we don't have a patchoat or dex2oat.
@@ -232,26 +241,118 @@
119-noimage-patchoat
ifneq (,$(filter no-dex2oat,$(PREBUILD_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),no-dex2oat, \
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),no-dex2oat, \
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
- $(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+ $(PICTEST_TYPES),$(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
ifneq (,$(filter no-image,$(IMAGE_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(PREBUILD_TYPES), \
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
$(COMPILER_TYPES), $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),no-image, \
- $(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+ $(PICTEST_TYPES), $(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
ifneq (,$(filter relocate-no-patchoat,$(RELOCATE_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(PREBUILD_TYPES), \
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
$(COMPILER_TYPES), relocate-no-patchoat,$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES),$(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+ $(IMAGE_TYPES),$(PICTEST_TYPES),$(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
TEST_ART_BROKEN_FALLBACK_RUN_TESTS :=
+# The following tests use libarttest.so, which is linked against libartd.so, so will
+# not work when libart.so is the one loaded.
+# TODO: Find a way to run these tests in ndebug mode.
+TEST_ART_BROKEN_NDEBUG_TESTS := \
+ 004-JniTest \
+ 004-ReferenceMap \
+ 004-SignalTest \
+ 004-StackWalk \
+ 004-UnsafeTest \
+ 051-thread \
+ 115-native-bridge \
+ 116-nodex2oat \
+ 117-nopatchoat \
+ 118-noimage-dex2oat \
+ 119-noimage-patchoat \
+
+ifneq (,$(filter ndebug,$(RUN_TYPES)))
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),ndebug,$(PREBUILD_TYPES), \
+ $(COMPILER_TYPES), $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
+ $(PICTEST_TYPES),$(TEST_ART_BROKEN_NDEBUG_TESTS),$(ALL_ADDRESS_SIZES))
+endif
+
+TEST_ART_BROKEN_NDEBUG_TESTS :=
+
+# Known broken tests for the default compiler (Quick).
+TEST_ART_BROKEN_DEFAULT_RUN_TESTS :=
+
+ifneq (,$(filter default,$(COMPILER_TYPES)))
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+ default,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES),$(PICTEST_TYPES),$(TEST_ART_BROKEN_DEFAULT_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+endif
+
+TEST_ART_BROKEN_DEFAULT_RUN_TESTS :=
+
+# Known broken tests for the arm64 optimizing compiler backend.
+TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS := \
+ 003-omnibus-opcodes \
+ 004-NativeAllocations \
+ 004-ReferenceMap \
+ 005-annotations \
+ 009-instanceof \
+ 010-instance \
+ 012-math \
+ 023-many-interfaces \
+ 037-inherit \
+ 044-proxy \
+ 045-reflect-array \
+ 046-reflect \
+ 047-returns \
+ 062-character-encodings \
+ 063-process-manager \
+ 068-classloader \
+ 069-field-type \
+ 071-dexfile \
+ 083-compiler-regressions \
+ 106-exceptions2 \
+ 107-int-math2 \
+ 114-ParallelGC \
+ 201-built-in-exception-detail-messages \
+ 407-arrays \
+ 412-new-array \
+ 422-instanceof \
+ 422-type-conversion \
+ 424-checkcast \
+ 427-bounds \
+ 428-optimizing-arith-rem \
+ 430-live-register-slow-path \
+ 701-easy-div-rem \
+ 800-smali \
+
+ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
+ optimizing,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES),$(PICTEST_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS),64)
+endif
+
+TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS :=
+
+# Known broken tests for the optimizing compiler.
+TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS := \
+ 099-vmdebug \ # b/18098594
+
+ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+ optimizing,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES),$(PICTEST_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+endif
+
+TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS :=
+
+
# Clear variables ahead of appending to them when defining tests.
$(foreach target, $(TARGET_TYPES), $(eval ART_RUN_TEST_$(call name-to-var,$(target))_RULES :=))
$(foreach target, $(TARGET_TYPES), \
@@ -281,9 +382,13 @@
$(foreach target, $(TARGET_TYPES), \
$(foreach address_size, $(ALL_ADDRESS_SIZES), \
$(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(address_size))_RULES :=)))
+$(foreach target, $(TARGET_TYPES), \
+ $(foreach run_type, $(RUN_TYPES), \
+ $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(run_type))_RULES :=)))
-# We need dex2oat and dalvikvm on the target as well as the core image.
-TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_EXECUTABLES) $(TARGET_CORE_IMG_OUT) $(2ND_TARGET_CORE_IMG_OUT)
+# We need dex2oat and dalvikvm on the target as well as the core images (all images as we sync
+# only once).
+TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_EXECUTABLES) $(TARGET_CORE_IMG_OUTS)
# Also need libarttest.
TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttest.so
@@ -297,27 +402,26 @@
TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_TEST_OUT)/$(TARGET_2ND_ARCH)/libnativebridgetest.so
endif
-# All tests require the host executables and the core images.
+# All tests require the host executables. The tests also depend on the core images, but on
+# specific version depending on the compiler.
ART_TEST_HOST_RUN_TEST_DEPENDENCIES := \
$(ART_HOST_EXECUTABLES) \
$(ART_HOST_OUT_SHARED_LIBRARIES)/libarttest$(ART_HOST_SHLIB_EXTENSION) \
$(ART_HOST_OUT_SHARED_LIBRARIES)/libnativebridgetest$(ART_HOST_SHLIB_EXTENSION) \
- $(ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION) \
- $(HOST_CORE_IMG_OUT)
+ $(ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION)
ifneq ($(HOST_PREFER_32_BIT),true)
ART_TEST_HOST_RUN_TEST_DEPENDENCIES += \
$(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libarttest$(ART_HOST_SHLIB_EXTENSION) \
$(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libnativebridgetest$(ART_HOST_SHLIB_EXTENSION) \
- $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION) \
- $(2ND_HOST_CORE_IMG_OUT)
+ $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION)
endif
# Create a rule to build and run a tests following the form:
-# test-art-{1: host or target}-run-test-{2: prebuild no-prebuild no-dex2oat}-
-# {3: interpreter default optimizing}-{4: relocate no-relocate relocate-no-patchoat}-
-# {5: trace or no-trace}-{6: gcstress gcverify cms}-{7: forcecopy checkjni jni}-
-# {8: no-image image}-{9: test name}{10: 32 or 64}
+# test-art-{1: host or target}-run-test-{2: debug ndebug}-{3: prebuild no-prebuild no-dex2oat}-
+# {4: interpreter default optimizing}-{5: relocate no-relocate relocate-no-patchoat}-
+# {6: trace or no-trace}-{7: gcstress gcverify cms}-{8: forcecopy checkjni jni}-
+# {9: no-image image picimage}-{10: pictest nopictest}-{11: test name}{12: 32 or 64}
define define-test-art-run-test
run_test_options :=
prereq_rule :=
@@ -340,121 +444,166 @@
$$(error found $(1) expected $(TARGET_TYPES))
endif
endif
- ifeq ($(2),prebuild)
+ ifeq ($(2),debug)
+ test_groups += ART_RUN_TEST_$$(uc_host_or_target)_DEBUG_RULES
+ else
+ ifeq ($(2),ndebug)
+ test_groups += ART_RUN_TEST_$$(uc_host_or_target)_RELEASE_RULES
+ run_test_options += -O
+ else
+ $$(error found $(2) expected $(RUN_TYPES))
+ endif
+ endif
+ ifeq ($(3),prebuild)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_PREBUILD_RULES
run_test_options += --prebuild
else
- ifeq ($(2),no-prebuild)
+ ifeq ($(3),no-prebuild)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NO_PREBUILD_RULES
run_test_options += --no-prebuild
else
- ifeq ($(2),no-dex2oat)
+ ifeq ($(3),no-dex2oat)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NO_DEX2OAT_RULES
run_test_options += --no-prebuild --no-dex2oat
else
- $$(error found $(2) expected $(PREBUILD_TYPES))
+ $$(error found $(3) expected $(PREBUILD_TYPES))
endif
endif
endif
- ifeq ($(3),optimizing)
+ ifeq ($(4),optimizing)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_OPTIMIZING_RULES
- run_test_options += -Xcompiler-option --compiler-backend=Optimizing
+ run_test_options += --optimizing
else
- ifeq ($(3),interpreter)
+ ifeq ($(4),interpreter)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_INTERPRETER_RULES
run_test_options += --interpreter
else
- ifeq ($(3),default)
+ ifeq ($(4),default)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_DEFAULT_RULES
else
- $$(error found $(3) expected $(COMPILER_TYPES))
+ $$(error found $(4) expected $(COMPILER_TYPES))
endif
endif
endif
- ifeq ($(4),relocate)
+
+ ifeq ($(5),relocate)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_RELOCATE_RULES
run_test_options += --relocate
else
- ifeq ($(4),no-relocate)
+ ifeq ($(5),no-relocate)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NO_RELOCATE_RULES
run_test_options += --no-relocate
else
- ifeq ($(4),relocate-no-patchoat)
+ ifeq ($(5),relocate-no-patchoat)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_RELOCATE_NO_PATCHOAT_RULES
run_test_options += --relocate --no-patchoat
else
- $$(error found $(4) expected $(RELOCATE_TYPES))
+ $$(error found $(5) expected $(RELOCATE_TYPES))
endif
endif
endif
- ifeq ($(5),trace)
+ ifeq ($(6),trace)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_TRACE_RULES
run_test_options += --trace
else
- ifeq ($(5),no-trace)
+ ifeq ($(6),no-trace)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NO_TRACE_RULES
else
- $$(error found $(5) expected $(TRACE_TYPES))
+ $$(error found $(6) expected $(TRACE_TYPES))
endif
endif
- ifeq ($(6),gcverify)
+ ifeq ($(7),gcverify)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_GCVERIFY_RULES
run_test_options += --gcverify
else
- ifeq ($(6),gcstress)
+ ifeq ($(7),gcstress)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_GCSTRESS_RULES
run_test_options += --gcstress
else
- ifeq ($(6),cms)
+ ifeq ($(7),cms)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_CMS_RULES
else
- $$(error found $(6) expected $(GC_TYPES))
+ $$(error found $(7) expected $(GC_TYPES))
endif
endif
endif
- ifeq ($(7),forcecopy)
+ ifeq ($(8),forcecopy)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_FORCECOPY_RULES
run_test_options += --runtime-option -Xjniopts:forcecopy
ifneq ($$(ART_TEST_JNI_FORCECOPY),true)
skip_test := true
endif
else
- ifeq ($(7),checkjni)
+ ifeq ($(8),checkjni)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_CHECKJNI_RULES
run_test_options += --runtime-option -Xcheck:jni
else
- ifeq ($(7),jni)
+ ifeq ($(8),jni)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_JNI_RULES
else
- $$(error found $(7) expected $(JNI_TYPES))
+ $$(error found $(8) expected $(JNI_TYPES))
endif
endif
endif
- ifeq ($(8),no-image)
+ ifeq ($(9),no-image)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NO_IMAGE_RULES
run_test_options += --no-image
- else
- ifeq ($(8),image)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_IMAGE_RULES
+ # Add the core dependency. This is required for pre-building.
+ ifeq ($(1),host)
+ prereq_rule += $(HOST_CORE_IMAGE_$(4)_no-pic_$(12))
else
- $$(error found $(8) expected $(IMAGE_TYPES))
+ prereq_rule += $(TARGET_CORE_IMAGE_$(4)_no-pic_$(12))
+ endif
+ else
+ ifeq ($(9),image)
+ test_groups += ART_RUN_TEST_$$(uc_host_or_target)_IMAGE_RULES
+ # Add the core dependency.
+ ifeq ($(1),host)
+ prereq_rule += $(HOST_CORE_IMAGE_$(4)_no-pic_$(12))
+ else
+ prereq_rule += $(TARGET_CORE_IMAGE_$(4)_no-pic_$(12))
+ endif
+ else
+ ifeq ($(9),picimage)
+ test_groups += ART_RUN_TEST_$$(uc_host_or_target)_PICIMAGE_RULES
+ run_test_options += --pic-image
+ ifeq ($(1),host)
+ prereq_rule += $(HOST_CORE_IMAGE_$(4)_pic_$(12))
+ else
+ prereq_rule += $(TARGET_CORE_IMAGE_$(4)_pic_$(12))
+ endif
+ else
+ $$(error found $(9) expected $(IMAGE_TYPES))
+ endif
endif
endif
- # $(9) is the test name
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_$(call name-to-var,$(9))_RULES
- ifeq ($(10),64)
+ ifeq ($(10),pictest)
+ run_test_options += --pic-test
+ else
+ ifeq ($(10),nopictest)
+ # Nothing to be done.
+ else
+ $$(error found $(10) expected $(PICTEST_TYPES))
+ endif
+ endif
+ # $(11) is the test name
+ test_groups += ART_RUN_TEST_$$(uc_host_or_target)_$(call name-to-var,$(11))_RULES
+ ifeq ($(12),64)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_64_RULES
run_test_options += --64
else
- ifeq ($(10),32)
+ ifeq ($(12),32)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_32_RULES
else
- $$(error found $(10) expected $(ALL_ADDRESS_SIZES))
+ $$(error found $(12) expected $(ALL_ADDRESS_SIZES))
endif
endif
- run_test_rule_name := test-art-$(1)-run-test-$(2)-$(3)-$(4)-$(5)-$(6)-$(7)-$(8)-$(9)$(10)
+ run_test_rule_name := test-art-$(1)-run-test-$(2)-$(3)-$(4)-$(5)-$(6)-$(7)-$(8)-$(9)-$(10)-$(11)$(12)
run_test_options := --output-path $(ART_HOST_TEST_DIR)/run-test-output/$$(run_test_rule_name) \
$$(run_test_options)
+ ifneq ($(ART_TEST_ANDROID_ROOT),)
+ run_test_options := --android-root $(ART_TEST_ANDROID_ROOT) $$(run_test_options)
+ endif
$$(run_test_rule_name): PRIVATE_RUN_TEST_OPTIONS := $$(run_test_options)
.PHONY: $$(run_test_rule_name)
$$(run_test_rule_name): $(DX) $(HOST_OUT_EXECUTABLES)/jasmin $(HOST_OUT_EXECUTABLES)/smali $(HOST_OUT_EXECUTABLES)/dexmerger $$(prereq_rule)
@@ -462,7 +611,7 @@
DX=$(abspath $(DX)) JASMIN=$(abspath $(HOST_OUT_EXECUTABLES)/jasmin) \
SMALI=$(abspath $(HOST_OUT_EXECUTABLES)/smali) \
DXMERGER=$(abspath $(HOST_OUT_EXECUTABLES)/dexmerger) \
- art/test/run-test $$(PRIVATE_RUN_TEST_OPTIONS) $(9) \
+ art/test/run-test $$(PRIVATE_RUN_TEST_OPTIONS) $(11) \
&& $$(call ART_TEST_PASSED,$$@) || $$(call ART_TEST_FAILED,$$@)
$$(hide) (echo $(MAKECMDGOALS) | grep -q $$@ && \
echo "run-test run as top-level target, removing test directory $(ART_HOST_TEST_DIR)" && \
@@ -480,16 +629,18 @@
$(foreach target, $(TARGET_TYPES), \
$(foreach test, $(TEST_ART_RUN_TESTS), \
- $(foreach address_size, $(ADDRESS_SIZES_$(call name-to-var,$(target))), \
- $(foreach prebuild, $(PREBUILD_TYPES), \
- $(foreach compiler, $(COMPILER_TYPES), \
- $(foreach relocate, $(RELOCATE_TYPES), \
- $(foreach trace, $(TRACE_TYPES), \
- $(foreach gc, $(GC_TYPES), \
- $(foreach jni, $(JNI_TYPES), \
- $(foreach image, $(IMAGE_TYPES), \
- $(eval $(call define-test-art-run-test,$(target),$(prebuild),$(compiler),$(relocate),$(trace),$(gc),$(jni),$(image),$(test),$(address_size))) \
- ))))))))))
+ $(foreach run_type, $(RUN_TYPES), \
+ $(foreach address_size, $(ADDRESS_SIZES_$(call name-to-var,$(target))), \
+ $(foreach prebuild, $(PREBUILD_TYPES), \
+ $(foreach compiler, $(COMPILER_TYPES), \
+ $(foreach relocate, $(RELOCATE_TYPES), \
+ $(foreach trace, $(TRACE_TYPES), \
+ $(foreach gc, $(GC_TYPES), \
+ $(foreach jni, $(JNI_TYPES), \
+ $(foreach image, $(IMAGE_TYPES), \
+ $(foreach pictest, $(PICTEST_TYPES), \
+ $(eval $(call define-test-art-run-test,$(target),$(run_type),$(prebuild),$(compiler),$(relocate),$(trace),$(gc),$(jni),$(image),$(pictest),$(test),$(address_size))) \
+ ))))))))))))
define-test-art-run-test :=
# Define a phony rule whose purpose is to test its prerequisites.
@@ -509,6 +660,9 @@
$(foreach prebuild, $(PREBUILD_TYPES), $(eval \
$(call define-test-art-run-test-group,test-art-$(target)-run-test-$(prebuild),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(prebuild))_RULES)))))
$(foreach target, $(TARGET_TYPES), \
+ $(foreach run-type, $(RUN_TYPES), $(eval \
+ $(call define-test-art-run-test-group,test-art-$(target)-run-test-$(run-type),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(run-type))_RULES)))))
+$(foreach target, $(TARGET_TYPES), \
$(foreach compiler, $(COMPILER_TYPES), $(eval \
$(call define-test-art-run-test-group,test-art-$(target)-run-test-$(compiler),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(compiler))_RULES)))))
$(foreach target, $(TARGET_TYPES), \
@@ -562,6 +716,9 @@
$(foreach target, $(TARGET_TYPES), \
$(foreach address_size, $(ALL_ADDRESS_SIZES), \
$(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(address_size))_RULES :=)))
+$(foreach target, $(TARGET_TYPES), \
+ $(foreach run_type, $(RUN_TYPES), \
+ $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(run_type))_RULES :=)))
define-test-art-run-test-group :=
TARGET_TYPES :=
PREBUILD_TYPES :=
@@ -574,6 +731,7 @@
ADDRESS_SIZES_TARGET :=
ADDRESS_SIZES_HOST :=
ALL_ADDRESS_SIZES :=
+RUN_TYPES :=
include $(LOCAL_PATH)/Android.libarttest.mk
include art/test/Android.libnativebridgetest.mk
diff --git a/test/etc/default-build b/test/etc/default-build
index faafc1f..6731ad3 100755
--- a/test/etc/default-build
+++ b/test/etc/default-build
@@ -17,19 +17,29 @@
# Stop if something fails.
set -e
+if [ -e classes.dex ]; then
+ zip $TEST_NAME.jar classes.dex
+ exit 0
+fi
+
mkdir classes
${JAVAC} -d classes `find src -name '*.java'`
-if [ -r src2 ]; then
+if [ -d src2 ]; then
${JAVAC} -d classes `find src2 -name '*.java'`
fi
if [ ${NEED_DEX} = "true" ]; then
${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex --dump-width=1000 classes
- zip $TEST_NAME.jar classes.dex
fi
-if [ -r src-ex ]; then
+if [ -d smali ]; then
+ # Compile Smali classes
+ ${SMALI} -JXmx256m --output smali_classes.dex `find smali -name '*.smali'`
+ ${DXMERGER} classes.dex classes.dex smali_classes.dex
+fi
+
+if [ -d src-ex ]; then
mkdir classes-ex
${JAVAC} -d classes-ex -cp classes `find src-ex -name '*.java'`
if [ ${NEED_DEX} = "true" ]; then
@@ -43,3 +53,7 @@
mv classes-1.dex classes.dex
fi
fi
+
+if [ ${NEED_DEX} = "true" ]; then
+ zip $TEST_NAME.jar classes.dex
+fi
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 82d47d7..d2cd8ab 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -8,9 +8,11 @@
fi
}
+ANDROID_ROOT="/system"
ARCHITECTURES_32="(arm|x86|mips|none)"
ARCHITECTURES_64="(arm64|x86_64|none)"
ARCHITECTURES_PATTERN="${ARCHITECTURES_32}"
+BOOT_IMAGE=""
COMPILE_FLAGS=""
DALVIKVM="dalvikvm32"
DEBUGGER="n"
@@ -26,16 +28,21 @@
INTERPRETER="n"
INVOKE_WITH=""
ISA=x86
+LIBRARY_DIRECTORY="lib"
+MAIN=""
OPTIMIZE="y"
PATCHOAT=""
PREBUILD="y"
QUIET="n"
RELOCATE="y"
+SECONDARY_DEX=""
+TIME_OUT="y"
+TIME_OUT_VALUE=5m
USE_GDB="n"
USE_JVM="n"
VERIFY="y"
ZYGOTE=""
-MAIN=""
+DEX_VERIFY=""
while true; do
if [ "x$1" = "x--quiet" ]; then
@@ -62,8 +69,7 @@
shift
elif [ "x$1" = "x--boot" ]; then
shift
- DALVIKVM_BOOT_OPT="$1"
- DEX2OAT_BOOT_OPT="--boot-image=${1#-Ximage:}"
+ BOOT_IMAGE="$1"
shift
elif [ "x$1" = "x--no-dex2oat" ]; then
DEX2OAT="-Xcompiler:${FALSE_BIN}"
@@ -82,6 +88,7 @@
shift
elif [ "x$1" = "x--host" ]; then
HOST="y"
+ ANDROID_ROOT="$ANDROID_HOST_OUT"
shift
elif [ "x$1" = "x--no-prebuild" ]; then
PREBUILD="n"
@@ -89,12 +96,17 @@
elif [ "x$1" = "x--no-image" ]; then
HAVE_IMAGE="n"
shift
+ elif [ "x$1" = "x--secondary" ]; then
+ SECONDARY_DEX=":$DEX_LOCATION/$TEST_NAME-ex.jar"
+ shift
elif [ "x$1" = "x--debug" ]; then
DEBUGGER="y"
+ TIME_OUT="n"
shift
elif [ "x$1" = "x--gdb" ]; then
USE_GDB="y"
DEV_MODE="y"
+ TIME_OUT="n"
shift
elif [ "x$1" = "x--zygote" ]; then
ZYGOTE="-Xzygote"
@@ -127,6 +139,10 @@
elif [ "x$1" = "x--no-optimize" ]; then
OPTIMIZE="n"
shift
+ elif [ "x$1" = "x--android-root" ]; then
+ shift
+ ANDROID_ROOT="$1"
+ shift
elif [ "x$1" = "x--" ]; then
shift
break
@@ -134,8 +150,13 @@
ISA="x86_64"
GDB_SERVER="gdbserver64"
DALVIKVM="dalvikvm64"
+ LIBRARY_DIRECTORY="lib64"
ARCHITECTURES_PATTERN="${ARCHITECTURES_64}"
shift
+ elif [ "x$1" = "x--pic-test" ]; then
+ FLAGS="${FLAGS} -Xcompiler-option --compile-pic"
+ COMPILE_FLAGS="${COMPILE_FLAGS} --compile-pic"
+ shift
elif expr "x$1" : "x--" >/dev/null 2>&1; then
echo "unknown $0 option: $1" 1>&2
exit 1
@@ -164,7 +185,6 @@
fi
if [ "$VERIFY" = "y" ]; then
- DEX_VERIFY=""
JVM_VERIFY_ARG="-Xverify:all"
msg "Performing verification"
else
@@ -197,7 +217,9 @@
if [ "$HAVE_IMAGE" = "n" ]; then
- BOOT_OPT="-Ximage:/system/non-existant/core.art"
+ DALVIKVM_BOOT_OPT="-Ximage:/system/non-existant/core.art"
+else
+ DALVIKVM_BOOT_OPT="-Ximage:${BOOT_IMAGE}"
fi
@@ -220,7 +242,12 @@
if [ "$INTERPRETER" = "y" ]; then
INT_OPTS="-Xint"
- COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=interpret-only"
+ if [ "$VERIFY" = "y" ] ; then
+ COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=interpret-only"
+ else
+ COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=verify-none"
+ DEX_VERIFY="${DEX_VERIFY} -Xverify:none"
+ fi
fi
JNI_OPTS="-Xjnigreflimit:512 -Xcheck:jni"
@@ -254,17 +281,18 @@
mkdir_cmdline="mkdir -p ${DEX_LOCATION}/dalvik-cache/$ISA"
if [ "$PREBUILD" = "y" ]; then
- dex2oat_cmdline="$INVOKE_WITH dex2oatd \
+ dex2oat_cmdline="$INVOKE_WITH $ANDROID_ROOT/bin/dex2oatd \
$COMPILE_FLAGS \
- $DEX2OAT_BOOT_OPT \
+ --boot-image=${BOOT_IMAGE} \
--dex-file=$DEX_LOCATION/$TEST_NAME.jar \
--oat-file=$DEX_LOCATION/dalvik-cache/$ISA/$(echo $DEX_LOCATION/$TEST_NAME.jar/classes.dex | cut -d/ -f 2- | sed "s:/:@:g") \
--instruction-set=$ISA"
fi
-dalvikvm_cmdline="$INVOKE_WITH $GDB $DALVIKVM \
+dalvikvm_cmdline="$INVOKE_WITH $GDB $ANDROID_ROOT/bin/$DALVIKVM \
$GDB_ARGS \
$FLAGS \
+ $DEX_VERIFY \
-XXlib:$LIB \
$PATCHOAT \
$DEX2OAT \
@@ -273,7 +301,7 @@
$INT_OPTS \
$DEBUGGER_OPTS \
$DALVIKVM_BOOT_OPT \
- -cp $DEX_LOCATION/$TEST_NAME.jar $MAIN"
+ -cp $DEX_LOCATION/$TEST_NAME.jar$SECONDARY_DEX $MAIN"
if [ "$HOST" = "n" ]; then
@@ -291,12 +319,22 @@
adb push $TEST_NAME-ex.jar $DEX_LOCATION >/dev/null 2>&1
fi
+ LD_LIBRARY_PATH=
+ if [ "$ANDROID_ROOT" != "/system" ]; then
+ # Current default installation is dalvikvm 64bits and dex2oat 32bits,
+ # so we can only use LD_LIBRARY_PATH when testing on a local
+ # installation.
+ LD_LIBRARY_PATH=$ANDROID_ROOT/$LIBRARY_DIRECTORY
+ fi
+
# Create a script with the command. The command can get longer than the longest
# allowed adb command and there is no way to get the exit status from a adb shell
# command.
cmdline="cd $DEX_LOCATION && \
export ANDROID_DATA=$DEX_LOCATION && \
export DEX_LOCATION=$DEX_LOCATION && \
+ export ANDROID_ROOT=$ANDROID_ROOT && \
+ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH && \
$mkdir_cmdline && \
$dex2oat_cmdline && \
$dalvikvm_cmdline"
@@ -325,7 +363,7 @@
export ANDROID_LOG_TAGS='*:s'
fi
export ANDROID_DATA="$DEX_LOCATION"
- export ANDROID_ROOT="${ANDROID_HOST_OUT}"
+ export ANDROID_ROOT="${ANDROID_ROOT}"
export LD_LIBRARY_PATH="${ANDROID_ROOT}/lib"
export DYLD_LIBRARY_PATH="${ANDROID_ROOT}/lib"
export PATH="$PATH:${ANDROID_ROOT}/bin"
diff --git a/test/run-test b/test/run-test
index 62c701f..843714b 100755
--- a/test/run-test
+++ b/test/run-test
@@ -17,6 +17,7 @@
# Set up prog to be the path of this script, including following symlinks,
# and set up progdir to be the fully-qualified pathname of its directory.
prog="$0"
+args="$@"
while [ -h "${prog}" ]; do
newProg=`/bin/ls -ld "${prog}"`
newProg=`expr "${newProg}" : ".* -> \(.*\)$"`
@@ -94,6 +95,9 @@
have_dex2oat="yes"
have_patchoat="yes"
have_image="yes"
+image_suffix=""
+pic_image_suffix=""
+android_root="/system"
while true; do
if [ "x$1" = "x--host" ]; then
@@ -124,6 +128,12 @@
elif [ "x$1" = "x--no-image" ]; then
have_image="no"
shift
+ elif [ "x$1" = "x--pic-image" ]; then
+ pic_image_suffix="-pic"
+ shift
+ elif [ "x$1" = "x--pic-test" ]; then
+ run_args="${run_args} --pic-test"
+ shift
elif [ "x$1" = "x--relocate" ]; then
relocate="yes"
shift
@@ -173,6 +183,11 @@
shift
elif [ "x$1" = "x--interpreter" ]; then
run_args="${run_args} --interpreter"
+ image_suffix="-interpreter"
+ shift
+ elif [ "x$1" = "x--optimizing" ]; then
+ run_args="${run_args} -Xcompiler-option --compiler-backend=Optimizing"
+ image_suffix="-optimizing"
shift
elif [ "x$1" = "x--no-verify" ]; then
run_args="${run_args} --no-verify"
@@ -209,6 +224,16 @@
break
fi
shift
+ elif [ "x$1" = "x--android-root" ]; then
+ shift
+ if [ "x$1" = "x" ]; then
+ echo "$0 missing argument to --android-root" 1>&2
+ usage="yes"
+ break
+ fi
+ android_root="$1"
+ run_args="${run_args} --android-root $1"
+ shift
elif [ "x$1" = "x--update" ]; then
update_mode="yes"
shift
@@ -307,12 +332,12 @@
if [ -z "$ANDROID_HOST_OUT" ]; then
export ANDROID_HOST_OUT=$ANDROID_BUILD_TOP/out/host/linux-x86
fi
- run_args="${run_args} --boot -Ximage:${ANDROID_HOST_OUT}/framework/core.art"
+ run_args="${run_args} --boot ${ANDROID_HOST_OUT}/framework/core${image_suffix}${pic_image_suffix}.art"
run_args="${run_args} --runtime-option -Djava.library.path=${ANDROID_HOST_OUT}/lib${suffix64}"
else
guess_arch_name
run_args="${run_args} --runtime-option -Djava.library.path=/data/art-test/${target_arch_name}"
- run_args="${run_args} --boot -Ximage:/data/art-test/core.art"
+ run_args="${run_args} --boot /data/art-test/core${image_suffix}${pic_image_suffix}.art"
fi
if [ "$relocate" = "yes" ]; then
run_args="${run_args} --relocate"
@@ -330,7 +355,7 @@
framework="${ANDROID_HOST_OUT}/framework"
bpath_suffix="-hostdex"
else
- framework="/system/framework"
+ framework="${android_root}/framework"
bpath_suffix=""
fi
# TODO If the target was compiled WITH_DEXPREOPT=true then these tests will
@@ -383,39 +408,41 @@
echo ' Omitting the test name or specifying "-" will use the' \
"current directory."
echo " Runtime Options:"
- echo " -O Run non-debug rather than debug build (off by default)."
- echo " -Xcompiler-option Pass an option to the compiler."
- echo " --runtime-option Pass an option to the runtime."
- echo " --debug Wait for a debugger to attach."
- echo " --gdb Run under gdb; incompatible with some tests."
- echo " --build-only Build test files only (off by default)."
- echo " --interpreter Enable interpreter only mode (off by default)."
- echo " --no-verify Turn off verification (on by default)."
- echo " --no-optimize Turn off optimization (on by default)."
- echo " --no-precise Turn off precise GC (on by default)."
- echo " --zygote Spawn the process from the Zygote." \
+ echo " -O Run non-debug rather than debug build (off by default)."
+ echo " -Xcompiler-option Pass an option to the compiler."
+ echo " --runtime-option Pass an option to the runtime."
+ echo " --debug Wait for a debugger to attach."
+ echo " --gdb Run under gdb; incompatible with some tests."
+ echo " --build-only Build test files only (off by default)."
+ echo " --interpreter Enable interpreter only mode (off by default)."
+ echo " --optimizing Enable optimizing compiler (off by default)."
+ echo " --no-verify Turn off verification (on by default)."
+ echo " --no-optimize Turn off optimization (on by default)."
+ echo " --no-precise Turn off precise GC (on by default)."
+ echo " --zygote Spawn the process from the Zygote." \
"If used, then the"
- echo " other runtime options are ignored."
- echo " --no-dex2oat Run as though dex2oat was failing."
- echo " --no-patchoat Run as though patchoat was failing."
- echo " --prebuild Run dex2oat on the files before starting test. (default)"
- echo " --no-prebuild Do not run dex2oat on the files before starting"
- echo " the test."
- echo " --relocate Force the use of relocating in the test, making"
- echo " the image and oat files be relocated to a random"
- echo " address before running. (default)"
- echo " --no-relocate Force the use of no relocating in the test"
- echo " --host Use the host-mode virtual machine."
- echo " --invoke-with Pass --invoke-with option to runtime."
- echo " --dalvik Use Dalvik (off by default)."
- echo " --jvm Use a host-local RI virtual machine."
- echo " --output-path [path] Location where to store the build" \
+ echo " other runtime options are ignored."
+ echo " --no-dex2oat Run as though dex2oat was failing."
+ echo " --no-patchoat Run as though patchoat was failing."
+ echo " --prebuild Run dex2oat on the files before starting test. (default)"
+ echo " --no-prebuild Do not run dex2oat on the files before starting"
+ echo " the test."
+ echo " --relocate Force the use of relocating in the test, making"
+ echo " the image and oat files be relocated to a random"
+ echo " address before running. (default)"
+ echo " --no-relocate Force the use of no relocating in the test"
+ echo " --host Use the host-mode virtual machine."
+ echo " --invoke-with Pass --invoke-with option to runtime."
+ echo " --dalvik Use Dalvik (off by default)."
+ echo " --jvm Use a host-local RI virtual machine."
+ echo " --output-path [path] Location where to store the build" \
"files."
- echo " --64 Run the test in 64-bit mode"
- echo " --trace Run with method tracing"
- echo " --gcstress Run with gc stress testing"
- echo " --gcverify Run with gc verification"
- echo " --always-clean Delete the test files even if the test fails."
+ echo " --64 Run the test in 64-bit mode"
+ echo " --trace Run with method tracing"
+ echo " --gcstress Run with gc stress testing"
+ echo " --gcverify Run with gc verification"
+ echo " --always-clean Delete the test files even if the test fails."
+ echo " --android-root [path] The path on target for the android root. (/system by default)."
) 1>&2
exit 1
fi
@@ -468,15 +495,14 @@
file_size_limit=5120
elif echo "$test_dir" | grep 083; then
file_size_limit=5120
-elif echo "$test_dir" | grep 115; then
-# Native bridge test copies libarttest.so into its directory, which needs 2MB already.
- file_size_limit=5120
fi
if ! ulimit -S "$file_size_limit"; then
echo "ulimit file size setting failed"
fi
good="no"
+good_build="yes"
+good_run="yes"
if [ "$dev_mode" = "yes" ]; then
"./${build}" 2>&1
build_exit="$?"
@@ -524,17 +550,32 @@
if [ "$build_exit" = '0' ]; then
echo "${test_dir}: running..." 1>&2
"./${run}" $run_args "$@" >"$output" 2>&1
+ run_exit="$?"
+ if [ "$run_exit" != "0" ]; then
+ echo "run exit status: $run_exit" 1>&2
+ good_run="no"
+ else
+ good_run="yes"
+ fi
else
+ good_build="no"
cp "$build_output" "$output"
- echo "Failed to build in tmpdir=${tmp_dir} from oldwd=${oldwd} and cwd=`pwd`"
- echo "Non-canonical tmpdir was ${noncanonical_tmp_dir}"
- echo "build exit status: $build_exit" >>"$output"
+ echo "Failed to build in tmpdir=${tmp_dir} from oldwd=${oldwd} and cwd=`pwd`" >> "$output"
+ echo "Non-canonical tmpdir was ${noncanonical_tmp_dir}" >> "$output"
+ echo "Args: ${args}" >> "$output"
+ echo "build exit status: $build_exit" >> "$output"
+ max_name_length=$(getconf NAME_MAX ${tmp_dir})
+ echo "Max filename (NAME_MAX): ${max_name_length}" >> "$output"
+ max_path_length=$(getconf PATH_MAX ${tmp_dir})
+ echo "Max pathlength (PATH_MAX): ${max_path_length}" >> "$output"
fi
./$check_cmd "$expected" "$output"
if [ "$?" = "0" ]; then
- # output == expected
- good="yes"
- echo "${test_dir}: succeeded!" 1>&2
+ if [ "$good_build" = "no" -o "$good_run" = "yes" ]; then
+ # output == expected
+ good="yes"
+ echo "${test_dir}: succeeded!" 1>&2
+ fi
fi
fi
diff --git a/tools/art b/tools/art
index 0103071..af96b47 100644
--- a/tools/art
+++ b/tools/art
@@ -53,6 +53,12 @@
elif [ "$1" = "--64" ]; then
DALVIKVM=dalvikvm64
shift
+ elif [ "$1" = "--perf" ]; then
+ PERF="record"
+ shift
+ elif [ "$1" = "--perf-report" ]; then
+ PERF="report"
+ shift
elif expr "$1" : "--" >/dev/null 2>&1; then
echo "unknown option: $1" 1>&2
exit 1
@@ -68,6 +74,11 @@
LIBDIR=$(find_libdir)
LD_LIBRARY_PATH=$ANDROID_ROOT/$LIBDIR
+
+if [ z"$PERF" != z ]; then
+ invoke_with="perf record -o $ANDROID_DATA/perf.data -e cycles:u $invoke_with"
+fi
+
mkdir -p $ANDROID_DATA/dalvik-cache/{arm,arm64,x86,x86_64}
ANDROID_DATA=$ANDROID_DATA \
ANDROID_ROOT=$ANDROID_ROOT \
@@ -75,7 +86,18 @@
$invoke_with $ANDROID_ROOT/bin/$DALVIKVM $lib \
-XXlib:$LIBART \
-Ximage:$ANDROID_ROOT/framework/core.art \
- "$@"
+ -Xcompiler-option --include-debug-symbols \
+ "$@"
+
EXIT_STATUS=$?
-rm -rf $ANDROID_DATA
+
+if [ z"$PERF" != z ]; then
+ if [ z"$PERF" = zreport ]; then
+ perf report -i $ANDROID_DATA/perf.data
+ fi
+ echo "Perf data saved in: $ANDROID_DATA/perf.data"
+else
+ rm -rf $ANDROID_DATA
+fi
+
exit $EXIT_STATUS
diff --git a/tools/generate-operator-out.py b/tools/generate-operator-out.py
index f666ad1..2b57222 100755
--- a/tools/generate-operator-out.py
+++ b/tools/generate-operator-out.py
@@ -39,6 +39,7 @@
def ProcessFile(filename):
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
in_enum = False
+ is_enum_private = False
is_enum_class = False
line_number = 0
@@ -57,15 +58,16 @@
# Except when it's private
if m.group(3) is not None:
- continue
-
- is_enum_class = m.group(1) is not None
- enum_name = m.group(2)
- if len(enclosing_classes) > 0:
- enum_name = '::'.join(enclosing_classes) + '::' + enum_name
- _ENUMS[enum_name] = []
- _NAMESPACES[enum_name] = '::'.join(namespaces)
- _ENUM_CLASSES[enum_name] = is_enum_class
+ is_enum_private = True
+ else:
+ is_enum_private = False
+ is_enum_class = m.group(1) is not None
+ enum_name = m.group(2)
+ if len(enclosing_classes) > 0:
+ enum_name = '::'.join(enclosing_classes) + '::' + enum_name
+ _ENUMS[enum_name] = []
+ _NAMESPACES[enum_name] = '::'.join(namespaces)
+ _ENUM_CLASSES[enum_name] = is_enum_class
in_enum = True
continue
@@ -80,11 +82,11 @@
continue
# Is this the start or end of an enclosing class or struct?
- m = re.compile(r'^(?:class|struct)(?: MANAGED)? (\S+).* \{').search(raw_line)
+ m = re.compile(r'^\s*(?:class|struct)(?: MANAGED)?(?: PACKED\([0-9]\))? (\S+).* \{').search(raw_line)
if m:
enclosing_classes.append(m.group(1))
continue
- m = re.compile(r'^\};').search(raw_line)
+ m = re.compile(r'^\s*\}( .*)?;').search(raw_line)
if m:
enclosing_classes = enclosing_classes[0:len(enclosing_classes) - 1]
continue
@@ -99,6 +101,9 @@
in_enum = False
continue
+ if is_enum_private:
+ continue
+
# The only useful thing in comments is the <<alternate text>> syntax for
# overriding the default enum value names. Pull that out...
enum_text = None
@@ -146,6 +151,7 @@
# There shouldn't be anything left.
if len(rest):
+ sys.stderr.write('%s\n' % (rest))
Confused(filename, line_number, raw_line)
if len(enclosing_classes) > 0: