Merge "ART: Turn on dex2oat watchdog on target"
diff --git a/Android.mk b/Android.mk
index 7a95dfe..15e8308 100644
--- a/Android.mk
+++ b/Android.mk
@@ -67,8 +67,13 @@
rm -f $(2ND_TARGET_OUT_INTERMEDIATES)/JAVA_LIBRARIES/*_intermediates/javalib.odex
rm -f $(2ND_TARGET_OUT_INTERMEDIATES)/APPS/*_intermediates/*.odex
endif
+ifneq ($(TMPDIR),)
+ rm -rf $(TMPDIR)/$(USER)/test-*/dalvik-cache/*
+ rm -rf $(TMPDIR)/android-data/dalvik-cache/*
+else
rm -rf /tmp/$(USER)/test-*/dalvik-cache/*
rm -rf /tmp/android-data/dalvik-cache/*
+endif
.PHONY: clean-oat-target
clean-oat-target:
@@ -127,7 +132,6 @@
include $(art_path)/build/Android.common_test.mk
include $(art_path)/build/Android.gtest.mk
-include $(art_path)/test/Android.oat.mk
include $(art_path)/test/Android.run-test.mk
# Sync test files to the target, depends upon all things that must be pushed to the target.
@@ -169,65 +173,59 @@
# "mm test-art-host" to build and run all host tests.
.PHONY: test-art-host
-test-art-host: test-art-host-gtest test-art-host-oat test-art-host-run-test test-art-host-vixl
+test-art-host: test-art-host-gtest test-art-host-run-test test-art-host-vixl
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
# All host tests that run solely with the default compiler.
.PHONY: test-art-host-default
-test-art-host-default: test-art-host-oat-default test-art-host-run-test-default
+test-art-host-default: test-art-host-run-test-default
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
# All host tests that run solely with the optimizing compiler.
.PHONY: test-art-host-optimizing
-test-art-host-optimizing: test-art-host-oat-optimizing test-art-host-run-test-optimizing
+test-art-host-optimizing: test-art-host-run-test-optimizing
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
# All host tests that run solely on the interpreter.
.PHONY: test-art-host-interpreter
-test-art-host-interpreter: test-art-host-oat-interpreter test-art-host-run-test-interpreter
+test-art-host-interpreter: test-art-host-run-test-interpreter
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
# Primary host architecture variants:
.PHONY: test-art-host$(ART_PHONY_TEST_HOST_SUFFIX)
test-art-host$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-gtest$(ART_PHONY_TEST_HOST_SUFFIX) \
- test-art-host-oat$(ART_PHONY_TEST_HOST_SUFFIX) test-art-host-run-test$(ART_PHONY_TEST_HOST_SUFFIX)
+ test-art-host-run-test$(ART_PHONY_TEST_HOST_SUFFIX)
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
.PHONY: test-art-host-default$(ART_PHONY_TEST_HOST_SUFFIX)
-test-art-host-default$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-oat-default$(ART_PHONY_TEST_HOST_SUFFIX) \
- test-art-host-run-test-default$(ART_PHONY_TEST_HOST_SUFFIX)
+test-art-host-default$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-default$(ART_PHONY_TEST_HOST_SUFFIX)
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
.PHONY: test-art-host-optimizing$(ART_PHONY_TEST_HOST_SUFFIX)
-test-art-host-optimizing$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-oat-optimizing$(ART_PHONY_TEST_HOST_SUFFIX) \
- test-art-host-run-test-optimizing$(ART_PHONY_TEST_HOST_SUFFIX)
+test-art-host-optimizing$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-optimizing$(ART_PHONY_TEST_HOST_SUFFIX)
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
.PHONY: test-art-host-interpreter$(ART_PHONY_TEST_HOST_SUFFIX)
-test-art-host-interpreter$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-oat-interpreter$(ART_PHONY_TEST_HOST_SUFFIX) \
- test-art-host-run-test-interpreter$(ART_PHONY_TEST_HOST_SUFFIX)
+test-art-host-interpreter$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-interpreter$(ART_PHONY_TEST_HOST_SUFFIX)
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
# Secondary host architecture variants:
ifneq ($(HOST_PREFER_32_BIT),true)
.PHONY: test-art-host$(2ND_ART_PHONY_TEST_HOST_SUFFIX)
test-art-host$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-gtest$(2ND_ART_PHONY_TEST_HOST_SUFFIX) \
- test-art-host-oat$(2ND_ART_PHONY_TEST_HOST_SUFFIX) test-art-host-run-test$(2ND_ART_PHONY_TEST_HOST_SUFFIX)
+ test-art-host-run-test$(2ND_ART_PHONY_TEST_HOST_SUFFIX)
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
.PHONY: test-art-host-default$(2ND_ART_PHONY_TEST_HOST_SUFFIX)
-test-art-host-default$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-oat-default$(2ND_ART_PHONY_TEST_HOST_SUFFIX) \
- test-art-host-run-test-default$(2ND_ART_PHONY_TEST_HOST_SUFFIX)
+test-art-host-default$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-default$(2ND_ART_PHONY_TEST_HOST_SUFFIX)
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
.PHONY: test-art-host-optimizing$(2ND_ART_PHONY_TEST_HOST_SUFFIX)
-test-art-host-optimizing$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-oat-optimizing$(2ND_ART_PHONY_TEST_HOST_SUFFIX) \
- test-art-host-run-test-optimizing$(2ND_ART_PHONY_TEST_HOST_SUFFIX)
+test-art-host-optimizing$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-optimizing$(2ND_ART_PHONY_TEST_HOST_SUFFIX)
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
.PHONY: test-art-host-interpreter$(2ND_ART_PHONY_TEST_HOST_SUFFIX)
-test-art-host-interpreter$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-oat-interpreter$(2ND_ART_PHONY_TEST_HOST_SUFFIX) \
- test-art-host-run-test-interpreter$(2ND_ART_PHONY_TEST_HOST_SUFFIX)
+test-art-host-interpreter$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-interpreter$(2ND_ART_PHONY_TEST_HOST_SUFFIX)
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
endif
@@ -236,65 +234,59 @@
# "mm test-art-target" to build and run all target tests.
.PHONY: test-art-target
-test-art-target: test-art-target-gtest test-art-target-oat test-art-target-run-test
+test-art-target: test-art-target-gtest test-art-target-run-test
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
# All target tests that run solely with the default compiler.
.PHONY: test-art-target-default
-test-art-target-default: test-art-target-oat-default test-art-target-run-test-default
+test-art-target-default: test-art-target-run-test-default
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
# All target tests that run solely with the optimizing compiler.
.PHONY: test-art-target-optimizing
-test-art-target-optimizing: test-art-target-oat-optimizing test-art-target-run-test-optimizing
+test-art-target-optimizing: test-art-target-run-test-optimizing
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
# All target tests that run solely on the interpreter.
.PHONY: test-art-target-interpreter
-test-art-target-interpreter: test-art-target-oat-interpreter test-art-target-run-test-interpreter
+test-art-target-interpreter: test-art-target-run-test-interpreter
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
# Primary target architecture variants:
.PHONY: test-art-target$(ART_PHONY_TEST_TARGET_SUFFIX)
test-art-target$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-gtest$(ART_PHONY_TEST_TARGET_SUFFIX) \
- test-art-target-oat$(ART_PHONY_TEST_TARGET_SUFFIX) test-art-target-run-test$(ART_PHONY_TEST_TARGET_SUFFIX)
+ test-art-target-run-test$(ART_PHONY_TEST_TARGET_SUFFIX)
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
.PHONY: test-art-target-default$(ART_PHONY_TEST_TARGET_SUFFIX)
-test-art-target-default$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-oat-default$(ART_PHONY_TEST_TARGET_SUFFIX) \
- test-art-target-run-test-default$(ART_PHONY_TEST_TARGET_SUFFIX)
+test-art-target-default$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-default$(ART_PHONY_TEST_TARGET_SUFFIX)
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
.PHONY: test-art-target-optimizing$(ART_PHONY_TEST_TARGET_SUFFIX)
-test-art-target-optimizing$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-oat-optimizing$(ART_PHONY_TEST_TARGET_SUFFIX) \
- test-art-target-run-test-optimizing$(ART_PHONY_TEST_TARGET_SUFFIX)
+test-art-target-optimizing$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-optimizing$(ART_PHONY_TEST_TARGET_SUFFIX)
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
.PHONY: test-art-target-interpreter$(ART_PHONY_TEST_TARGET_SUFFIX)
-test-art-target-interpreter$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-oat-interpreter$(ART_PHONY_TEST_TARGET_SUFFIX) \
- test-art-target-run-test-interpreter$(ART_PHONY_TEST_TARGET_SUFFIX)
+test-art-target-interpreter$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-interpreter$(ART_PHONY_TEST_TARGET_SUFFIX)
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
# Secondary target architecture variants:
ifdef TARGET_2ND_ARCH
.PHONY: test-art-target$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
test-art-target$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-gtest$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) \
- test-art-target-oat$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) test-art-target-run-test$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
+ test-art-target-run-test$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
.PHONY: test-art-target-default$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
-test-art-target-default$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-oat-default$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) \
- test-art-target-run-test-default$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
+test-art-target-default$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-default$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
.PHONY: test-art-target-optimizing$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
-test-art-target-optimizing$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-oat-optimizing$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) \
- test-art-target-run-test-optimizing$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
+test-art-target-optimizing$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-optimizing$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
.PHONY: test-art-target-interpreter$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
-test-art-target-interpreter$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-oat-interpreter$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) \
- test-art-target-run-test-interpreter$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
+test-art-target-interpreter$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-interpreter$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
endif
@@ -329,7 +321,8 @@
--dex-location=/$(1) --oat-file=$$@ \
--instruction-set=$(DEX2OAT_TARGET_ARCH) \
--instruction-set-features=$(DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \
- --android-root=$(PRODUCT_OUT)/system
+ --android-root=$(PRODUCT_OUT)/system --include-patch-information \
+ --runtime-arg -Xnorelocate
endif
@@ -402,8 +395,8 @@
adb root && sleep 3
adb shell stop
adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
- adb shell setprop dalvik.vm.dex2oat-flags ""
- adb shell setprop dalvik.vm.image-dex2oat-flags ""
+ adb shell setprop dalvik.vm.dex2oat-filter ""
+ adb shell setprop dalvik.vm.image-dex2oat-filter ""
adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
adb shell start
@@ -412,8 +405,8 @@
adb root && sleep 3
adb shell stop
adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
- adb shell setprop dalvik.vm.dex2oat-flags ""
- adb shell setprop dalvik.vm.image-dex2oat-flags ""
+ adb shell setprop dalvik.vm.dex2oat-filter ""
+ adb shell setprop dalvik.vm.image-dex2oat-filter ""
adb shell setprop persist.sys.dalvik.vm.lib.2 libartd.so
adb shell start
@@ -422,8 +415,8 @@
adb root && sleep 3
adb shell stop
adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
- adb shell setprop dalvik.vm.dex2oat-flags "--compiler-filter=interpret-only"
- adb shell setprop dalvik.vm.image-dex2oat-flags ""
+ adb shell setprop dalvik.vm.dex2oat-filter "interpret-only"
+ adb shell setprop dalvik.vm.image-dex2oat-filter ""
adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
adb shell start
@@ -432,8 +425,8 @@
adb root && sleep 3
adb shell stop
adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
- adb shell setprop dalvik.vm.dex2oat-flags "--compiler-filter=interpret-only"
- adb shell setprop dalvik.vm.image-dex2oat-flags "--compiler-filter=interpret-only"
+ adb shell setprop dalvik.vm.dex2oat-filter "interpret-only"
+ adb shell setprop dalvik.vm.image-dex2oat-filter "interpret-only"
adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
adb shell start
@@ -442,8 +435,8 @@
adb root && sleep 3
adb shell stop
adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
- adb shell setprop dalvik.vm.dex2oat-flags "--compiler-filter=interpret-only"
- adb shell setprop dalvik.vm.image-dex2oat-flags "--compiler-filter=interpret-only"
+ adb shell setprop dalvik.vm.dex2oat-filter "interpret-only"
+ adb shell setprop dalvik.vm.image-dex2oat-filter "interpret-only"
adb shell setprop persist.sys.dalvik.vm.lib.2 libartd.so
adb shell start
@@ -452,8 +445,8 @@
adb root && sleep 3
adb shell stop
adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
- adb shell setprop dalvik.vm.dex2oat-flags "--compiler-filter=verify-none"
- adb shell setprop dalvik.vm.image-dex2oat-flags "--compiler-filter=verify-none"
+ adb shell setprop dalvik.vm.dex2oat-filter "verify-none"
+ adb shell setprop dalvik.vm.image-dex2oat-filter "verify-none"
adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
adb shell start
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index c39bc5d..d55f310 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -183,8 +183,16 @@
art_non_debug_cflags := \
-O3
+art_host_non_debug_cflags := \
+ $(art_non_debug_cflags)
+
+art_target_non_debug_cflags := \
+ $(art_non_debug_cflags)
+
ifeq ($(HOST_OS),linux)
- art_non_debug_cflags += -Wframe-larger-than=1728
+ # Larger frame-size for host clang builds today
+ art_host_non_debug_cflags += -Wframe-larger-than=2600
+ art_target_non_debug_cflags += -Wframe-larger-than=1728
endif
# FIXME: upstream LLVM has a vectorizer bug that needs to be fixed
@@ -207,6 +215,25 @@
$(error LIBART_IMG_TARGET_BASE_ADDRESS unset)
endif
ART_TARGET_CFLAGS := $(art_cflags) -DART_TARGET -DART_BASE_ADDRESS=$(LIBART_IMG_TARGET_BASE_ADDRESS)
+
+ifndef LIBART_IMG_HOST_MIN_BASE_ADDRESS_DELTA
+ LIBART_IMG_HOST_MIN_BASE_ADDRESS_DELTA=-0x1000000
+endif
+ifndef LIBART_IMG_HOST_MAX_BASE_ADDRESS_DELTA
+ LIBART_IMG_HOST_MAX_BASE_ADDRESS_DELTA=0x1000000
+endif
+ART_HOST_CFLAGS += -DART_BASE_ADDRESS_MIN_DELTA=$(LIBART_IMG_HOST_MIN_BASE_ADDRESS_DELTA)
+ART_HOST_CFLAGS += -DART_BASE_ADDRESS_MAX_DELTA=$(LIBART_IMG_HOST_MAX_BASE_ADDRESS_DELTA)
+
+ifndef LIBART_IMG_TARGET_MIN_BASE_ADDRESS_DELTA
+ LIBART_IMG_TARGET_MIN_BASE_ADDRESS_DELTA=-0x1000000
+endif
+ifndef LIBART_IMG_TARGET_MAX_BASE_ADDRESS_DELTA
+ LIBART_IMG_TARGET_MAX_BASE_ADDRESS_DELTA=0x1000000
+endif
+ART_TARGET_CFLAGS += -DART_BASE_ADDRESS_MIN_DELTA=$(LIBART_IMG_TARGET_MIN_BASE_ADDRESS_DELTA)
+ART_TARGET_CFLAGS += -DART_BASE_ADDRESS_MAX_DELTA=$(LIBART_IMG_TARGET_MAX_BASE_ADDRESS_DELTA)
+
ART_TARGET_LDFLAGS :=
ifeq ($(TARGET_CPU_SMP),true)
ART_TARGET_CFLAGS += -DANDROID_SMP=1
@@ -220,14 +247,6 @@
endif
endif
ART_TARGET_CFLAGS += $(ART_DEFAULT_GC_TYPE_CFLAGS)
-ifdef PGO_GEN
- ART_TARGET_CFLAGS += -fprofile-generate=$(PGO_GEN)
- ART_TARGET_LDFLAGS += -fprofile-generate=$(PGO_GEN)
-endif
-ifdef PGO_USE
- ART_TARGET_CFLAGS += -fprofile-use=$(PGO_USE)
- ART_TARGET_CFLAGS += -fprofile-correction -Wno-error
-endif
# DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES is set in ../build/core/dex_preopt.mk based on
# the TARGET_CPU_VARIANT
@@ -266,8 +285,8 @@
endif
endif
-ART_HOST_NON_DEBUG_CFLAGS := $(art_non_debug_cflags)
-ART_TARGET_NON_DEBUG_CFLAGS := $(art_non_debug_cflags)
+ART_HOST_NON_DEBUG_CFLAGS := $(art_host_non_debug_cflags)
+ART_TARGET_NON_DEBUG_CFLAGS := $(art_target_non_debug_cflags)
# TODO: move -fkeep-inline-functions to art_debug_cflags when target gcc > 4.4 (and -lsupc++)
ART_HOST_DEBUG_CFLAGS := $(art_debug_cflags) -fkeep-inline-functions
@@ -329,5 +348,8 @@
ART_DEFAULT_GC_TYPE :=
ART_DEFAULT_GC_TYPE_CFLAGS :=
art_cflags :=
+art_target_non_debug_cflags :=
+art_host_non_debug_cflags :=
+art_non_debug_cflags :=
endif # ANDROID_COMMON_BUILD_MK
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index 8c0d9f2..10695b6 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -23,7 +23,11 @@
ART_TARGET_DALVIK_CACHE_DIR := /data/dalvik-cache
# Directory used for gtests on device.
-ART_TARGET_NATIVETEST_DIR := /data/nativetest/art
+# $(TARGET_OUT_DATA_NATIVE_TESTS) will evaluate to the nativetest directory in the target part on
+# the host, so we can strip everything but the directory to find out whether it is "nativetest" or
+# "nativetest64."
+ART_TARGET_NATIVETEST_DIR := /data/$(notdir $(TARGET_OUT_DATA_NATIVE_TESTS))/art
+
ART_TARGET_NATIVETEST_OUT := $(TARGET_OUT_DATA_NATIVE_TESTS)/art
# Directory used for oat tests on device.
@@ -31,7 +35,11 @@
ART_TARGET_TEST_OUT := $(TARGET_OUT_DATA)/art-test
# Directory used for temporary test files on the host.
+ifneq ($(TMPDIR),)
+ART_HOST_TEST_DIR := $(TMPDIR)/test-art-$(shell echo $$PPID)
+else
ART_HOST_TEST_DIR := /tmp/test-art-$(shell echo $$PPID)
+endif
# Core.oat location on the device.
TARGET_CORE_OAT := $(ART_TARGET_TEST_DIR)/$(DEX2OAT_TARGET_ARCH)/core.oat
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index ed07129..52d1ee3 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -19,18 +19,58 @@
include art/build/Android.common_path.mk
+# We need to set a define for the nativetest dir so that common_runtime_test will know the right
+# path. (The problem is being a 32b test on 64b device, which is still located in nativetest64).
+ART_TARGET_CFLAGS += -DART_TARGET_NATIVETEST_DIR=${ART_TARGET_NATIVETEST_DIR}
+
# List of known broken tests that we won't attempt to execute. The test name must be the full
# rule name such as test-art-host-oat-optimizing-HelloWorld64.
ART_TEST_KNOWN_BROKEN := \
- test-art-host-oat-optimizing-SignalTest64 \
- test-art-host-oat-optimizing-SignalTest32
+ test-art-target-run-test-gcstress-optimizing-prebuild-004-SignalTest32 \
+ test-art-target-run-test-gcstress-optimizing-norelocate-004-SignalTest32 \
+ test-art-target-run-test-gcstress-default-prebuild-004-SignalTest32 \
+ test-art-target-run-test-gcstress-default-norelocate-004-SignalTest32 \
+ test-art-target-run-test-gcstress-optimizing-relocate-004-SignalTest32 \
+ test-art-target-run-test-gcstress-default-relocate-004-SignalTest32 \
+ test-art-target-run-test-gcstress-optimizing-no-prebuild-004-SignalTest32 \
+ test-art-target-run-test-gcstress-default-no-prebuild-004-SignalTest32
# List of known failing tests that when executed won't cause test execution to not finish.
# The test name must be the full rule name such as test-art-host-oat-optimizing-HelloWorld64.
ART_TEST_KNOWN_FAILING :=
# Keep going after encountering a test failure?
-ART_TEST_KEEP_GOING ?= false
+ART_TEST_KEEP_GOING ?= true
+
+# Do you want all tests, even those that are time consuming?
+ART_TEST_FULL ?= true
+
+# Do you want optimizing compiler tests run?
+ART_TEST_OPTIMIZING ?= $(ART_TEST_FULL)
+
+# Do you want tracing tests run?
+ART_TEST_TRACE ?= $(ART_TEST_FULL)
+
+# Do you want tests with GC verification enabled run?
+ART_TEST_GC_VERIFY ?= $(ART_TEST_FULL)
+
+# Do you want tests with the GC stress mode enabled run?
+ART_TEST_GC_STRESS ?= $(ART_TEST_FULL)
+
+# Do you want run-tests with relocation enabled?
+ART_TEST_RUN_TEST_RELOCATE ?= $(ART_TEST_FULL)
+
+# Do you want run-tests with relocation disabled?
+ART_TEST_RUN_TEST_NO_RELOCATE ?= $(ART_TEST_FULL)
+
+# Do you want run-tests with prebuild disabled?
+ART_TEST_RUN_TEST_NO_PREBUILD ?= $(ART_TEST_FULL)
+
+# Do you want run-tests with prebuild enabled?
+ART_TEST_RUN_TEST_PREBUILD ?= true
+
+# Do you want failed tests to have their artifacts cleaned up?
+ART_TEST_RUN_TEST_ALWAYS_CLEAN ?= true
# Define the command run on test failure. $(1) is the name of the test. Executed by the shell.
define ART_TEST_FAILED
@@ -38,7 +78,7 @@
(mkdir -p $(ART_HOST_TEST_DIR)/failed/ && touch $(ART_HOST_TEST_DIR)/failed/$(1) && \
echo $(ART_TEST_KNOWN_FAILING) | grep -q $(1) \
&& (echo -e "$(1) \e[91mKNOWN FAILURE\e[0m") \
- || (echo -e "$(1) \e[91mFAILED\e[0m")))
+ || (echo -e "$(1) \e[91mFAILED\e[0m" >&2 )))
endef
# Define the command run on test success. $(1) is the name of the test. Executed by the shell.
@@ -64,7 +104,7 @@
&& (echo -e "\e[93mSKIPPED TESTS\e[0m" && ls -1 $(ART_HOST_TEST_DIR)/skipped/) \
|| (echo -e "\e[92mNO TESTS SKIPPED\e[0m")) && \
([ -d $(ART_HOST_TEST_DIR)/failed/ ] \
- && (echo -e "\e[91mFAILING TESTS\e[0m" && ls -1 $(ART_HOST_TEST_DIR)/failed/) \
+ && (echo -e "\e[91mFAILING TESTS\e[0m" >&2 && ls -1 $(ART_HOST_TEST_DIR)/failed/ >&2) \
|| (echo -e "\e[92mNO TESTS FAILED\e[0m")) \
&& ([ ! -d $(ART_HOST_TEST_DIR)/failed/ ] && rm -r $(ART_HOST_TEST_DIR) \
|| (rm -r $(ART_HOST_TEST_DIR) && false)))))
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index a340588..dd9f414 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -61,6 +61,9 @@
# The elf writer test has dependencies on core.oat.
ART_GTEST_elf_writer_test_HOST_DEPS := $(HOST_CORE_OAT_OUT) $(2ND_HOST_CORE_OAT_OUT)
ART_GTEST_elf_writer_test_TARGET_DEPS := $(TARGET_CORE_OAT_OUT) $(2ND_TARGET_CORE_OAT_OUT)
+ART_GTEST_jni_internal_test_TARGET_DEPS := $(TARGET_CORE_JARS)
+ART_GTEST_proxy_test_TARGET_DEPS := $(TARGET_CORE_JARS)
+ART_GTEST_proxy_test_HOST_DEPS := $(HOST_CORE_OAT_OUT) $(2ND_HOST_CORE_OAT_OUT)
# The path for which all the source files are relative, not actually the current directory.
LOCAL_PATH := art
@@ -91,6 +94,7 @@
runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc \
runtime/entrypoints_order_test.cc \
runtime/exception_test.cc \
+ runtime/gc/accounting/card_table_test.cc \
runtime/gc/accounting/space_bitmap_test.cc \
runtime/gc/heap_test.cc \
runtime/gc/space/dlmalloc_space_base_test.cc \
@@ -113,6 +117,7 @@
runtime/monitor_pool_test.cc \
runtime/monitor_test.cc \
runtime/parsed_options_test.cc \
+ runtime/proxy_test.cc \
runtime/reference_table_test.cc \
runtime/thread_pool_test.cc \
runtime/transaction_test.cc \
@@ -123,10 +128,10 @@
COMPILER_GTEST_COMMON_SRC_FILES := \
runtime/jni_internal_test.cc \
- runtime/proxy_test.cc \
runtime/reflection_test.cc \
compiler/dex/global_value_numbering_test.cc \
compiler/dex/local_value_numbering_test.cc \
+ compiler/dex/mir_graph_test.cc \
compiler/dex/mir_optimization_test.cc \
compiler/driver/compiler_driver_test.cc \
compiler/elf_writer_test.cc \
@@ -145,6 +150,7 @@
compiler/optimizing/pretty_printer_test.cc \
compiler/optimizing/register_allocator_test.cc \
compiler/optimizing/ssa_test.cc \
+ compiler/optimizing/stack_map_test.cc \
compiler/output_stream_test.cc \
compiler/utils/arena_allocator_test.cc \
compiler/utils/dedupe_set_test.cc \
@@ -171,6 +177,7 @@
COMPILER_GTEST_HOST_SRC_FILES := \
$(COMPILER_GTEST_COMMON_SRC_FILES) \
+ compiler/utils//assembler_thumb_test.cc \
compiler/utils/x86/assembler_x86_test.cc \
compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -273,7 +280,7 @@
.PHONY: $$(gtest_rule)
$$(gtest_rule): $$(gtest_exe) $$(ART_GTEST_$(1)_HOST_DEPS) $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_HOST_GTEST_$(file)_DEX)) $$(gtest_deps)
- $(hide) ($$(call ART_TEST_SKIP,$$@) && $$< && $$(call ART_TEST_PASSED,$$@)) \
+ $(hide) ($$(call ART_TEST_SKIP,$$@) && LD_PRELOAD=libsigchain$$(ART_HOST_SHLIB_EXTENSION) $$< && $$(call ART_TEST_PASSED,$$@)) \
|| $$(call ART_TEST_FAILED,$$@)
ART_TEST_HOST_GTEST$$($(2)ART_PHONY_TEST_HOST_SUFFIX)_RULES += $$(gtest_rule)
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index 61a2cde..10936a4 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -48,11 +48,6 @@
$(eval $(call create-core-oat-host-rules,2ND_))
endif
-IMPLICIT_CHECKS_arm := null,stack
-IMPLICIT_CHECKS_arm64 := none
-IMPLICIT_CHECKS_x86 := none
-IMPLICIT_CHECKS_x86_64 := none
-IMPLICIT_CHECKS_mips := none
define create-core-oat-target-rules
$$($(1)TARGET_CORE_IMG_OUT): $$($(1)TARGET_CORE_DEX_FILES) $$(DEX2OATD_DEPENDENCY)
@echo "target dex2oat: $$@ ($$?)"
@@ -63,7 +58,6 @@
--oat-location=$$($(1)TARGET_CORE_OAT) --image=$$($(1)TARGET_CORE_IMG_OUT) \
--base=$$(LIBART_IMG_TARGET_BASE_ADDRESS) --instruction-set=$$($(1)TARGET_ARCH) \
--instruction-set-features=$$($(1)TARGET_INSTRUCTION_SET_FEATURES) \
- --implicit-checks=$(IMPLICIT_CHECKS_$($(1)TARGET_ARCH)) \
--android-root=$$(PRODUCT_OUT)/system --include-patch-information
# This "renaming" eases declaration in art/Android.mk
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 02dad2a..5c5163d 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -95,8 +95,8 @@
optimizing/register_allocator.cc \
optimizing/ssa_builder.cc \
optimizing/ssa_liveness_analysis.cc \
- optimizing/ssa_type_propagation.cc \
optimizing/ssa_phi_elimination.cc \
+ optimizing/ssa_type_propagation.cc \
trampolines/trampoline_compiler.cc \
utils/arena_allocator.cc \
utils/arena_bit_vector.cc \
@@ -107,6 +107,7 @@
utils/arm64/assembler_arm64.cc \
utils/arm64/managed_register_arm64.cc \
utils/assembler.cc \
+ utils/dwarf_cfi.cc \
utils/mips/assembler_mips.cc \
utils/mips/managed_register_mips.cc \
utils/x86/assembler_x86.cc \
@@ -118,6 +119,7 @@
compilers.cc \
compiler.cc \
elf_fixup.cc \
+ elf_patcher.cc \
elf_stripper.cc \
elf_writer.cc \
elf_writer_quick.cc \
@@ -186,6 +188,7 @@
ifeq ($$(art_ndebug_or_debug),ndebug)
LOCAL_MODULE := libart-compiler
LOCAL_SHARED_LIBRARIES += libart
+ LOCAL_FDO_SUPPORT := true
else # debug
LOCAL_MODULE := libartd-compiler
LOCAL_SHARED_LIBRARIES += libartd
@@ -280,10 +283,10 @@
endef
# We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
-ifeq ($(ART_BUILD_NDEBUG),true)
+ifeq ($(ART_BUILD_HOST_NDEBUG),true)
$(eval $(call build-libart-compiler,host,ndebug))
endif
-ifeq ($(ART_BUILD_DEBUG),true)
+ifeq ($(ART_BUILD_HOST_DEBUG),true)
$(eval $(call build-libart-compiler,host,debug))
endif
ifeq ($(ART_BUILD_TARGET_NDEBUG),true)
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index f098a34..f2a8d84 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -165,14 +165,15 @@
const std::vector<uint8_t>& code,
const size_t frame_size_in_bytes,
const uint32_t core_spill_mask,
- const uint32_t fp_spill_mask)
+ const uint32_t fp_spill_mask,
+ const std::vector<uint8_t>* cfi_info)
: CompiledCode(driver, instruction_set, code),
frame_size_in_bytes_(frame_size_in_bytes),
core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask),
mapping_table_(driver->DeduplicateMappingTable(std::vector<uint8_t>())),
vmap_table_(driver->DeduplicateVMapTable(std::vector<uint8_t>())),
gc_map_(driver->DeduplicateGCMap(std::vector<uint8_t>())),
- cfi_info_(nullptr) {
+ cfi_info_(driver->DeduplicateCFIInfo(cfi_info)) {
}
// Constructs a CompiledMethod for the Portable compiler.
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index b8cd851..c98d06a 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -120,7 +120,8 @@
const std::vector<uint8_t>& quick_code,
const size_t frame_size_in_bytes,
const uint32_t core_spill_mask,
- const uint32_t fp_spill_mask);
+ const uint32_t fp_spill_mask,
+ const std::vector<uint8_t>* cfi_info);
// Constructs a CompiledMethod for the Portable compiler.
CompiledMethod(CompilerDriver* driver, InstructionSet instruction_set, const std::string& code,
diff --git a/compiler/compilers.cc b/compiler/compilers.cc
index bac1f12..5cf846f 100644
--- a/compiler/compilers.cc
+++ b/compiler/compilers.cc
@@ -38,9 +38,6 @@
uint32_t access_flags, uint32_t method_idx,
const art::DexFile& dex_file);
-// Hack for CFI CIE initialization
-extern std::vector<uint8_t>* X86CFIInitialization();
-
void QuickCompiler::Init() const {
ArtInitQuickCompilerContext(GetCompilerDriver());
}
@@ -126,17 +123,6 @@
return mir_to_lir;
}
-std::vector<uint8_t>* QuickCompiler::GetCallFrameInformationInitialization(
- const CompilerDriver& driver) const {
- if (driver.GetInstructionSet() == kX86) {
- return X86CFIInitialization();
- }
- if (driver.GetInstructionSet() == kX86_64) {
- return X86CFIInitialization();
- }
- return nullptr;
-}
-
CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
diff --git a/compiler/compilers.h b/compiler/compilers.h
index 2c231e1..151bf6f 100644
--- a/compiler/compilers.h
+++ b/compiler/compilers.h
@@ -56,17 +56,6 @@
void InitCompilationUnit(CompilationUnit& cu) const OVERRIDE {}
- /*
- * @brief Generate and return Dwarf CFI initialization, if supported by the
- * backend.
- * @param driver CompilerDriver for this compile.
- * @returns nullptr if not supported by backend or a vector of bytes for CFI DWARF
- * information.
- * @note This is used for backtrace information in generated code.
- */
- std::vector<uint8_t>* GetCallFrameInformationInitialization(const CompilerDriver& driver) const
- OVERRIDE;
-
private:
DISALLOW_COPY_AND_ASSIGN(QuickCompiler);
};
diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h
index d1d5ad9..7395324 100644
--- a/compiler/dex/bb_optimizations.h
+++ b/compiler/dex/bb_optimizations.h
@@ -172,7 +172,7 @@
class ClassInitCheckElimination : public PassME {
public:
ClassInitCheckElimination()
- : PassME("ClInitCheckElimination", kRepeatingTopologicalSortTraversal) {
+ : PassME("ClInitCheckElimination", kLoopRepeatingTopologicalSortTraversal) {
}
bool Gate(const PassDataHolder* data) const {
@@ -207,17 +207,17 @@
class GlobalValueNumberingPass : public PassME {
public:
GlobalValueNumberingPass()
- : PassME("GVN", kRepeatingTopologicalSortTraversal, "4_post_gvn_cfg") {
+ : PassME("GVN", kLoopRepeatingTopologicalSortTraversal, "4_post_gvn_cfg") {
}
- bool Gate(const PassDataHolder* data) const {
+ bool Gate(const PassDataHolder* data) const OVERRIDE {
DCHECK(data != nullptr);
CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
DCHECK(cUnit != nullptr);
return cUnit->mir_graph->ApplyGlobalValueNumberingGate();
}
- bool Worker(const PassDataHolder* data) const {
+ bool Worker(const PassDataHolder* data) const OVERRIDE {
DCHECK(data != nullptr);
const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
CompilationUnit* cUnit = pass_me_data_holder->c_unit;
@@ -227,7 +227,7 @@
return cUnit->mir_graph->ApplyGlobalValueNumbering(bb);
}
- void End(PassDataHolder* data) const {
+ void End(PassDataHolder* data) const OVERRIDE {
DCHECK(data != nullptr);
CompilationUnit* cUnit = down_cast<PassMEDataHolder*>(data)->c_unit;
DCHECK(cUnit != nullptr);
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 69adb35..dcc67c3 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -425,10 +425,6 @@
std::ostream& operator<<(std::ostream& os, const X86ConditionCode& kind);
-enum ThrowKind {
- kThrowNoSuchMethod,
-};
-
enum DividePattern {
DivideNone,
Divide3,
@@ -471,8 +467,13 @@
kIsQuinOp,
kIsSextupleOp,
kIsIT,
+ kIsMoveOp,
kMemLoad,
kMemStore,
+ kMemVolatile,
+ kMemScaledx0,
+ kMemScaledx2,
+ kMemScaledx4,
kPCRelFixup, // x86 FIXME: add NEEDS_FIXUP to instruction attributes.
kRegDef0,
kRegDef1,
diff --git a/compiler/dex/dataflow_iterator-inl.h b/compiler/dex/dataflow_iterator-inl.h
index f8b9c1a..d1abf7f 100644
--- a/compiler/dex/dataflow_iterator-inl.h
+++ b/compiler/dex/dataflow_iterator-inl.h
@@ -121,6 +121,56 @@
return res;
}
+inline BasicBlock* LoopRepeatingTopologicalSortIterator::Next(bool had_change) {
+ if (idx_ != 0) {
+ // Mark last processed block visited.
+ BasicBlock* bb = mir_graph_->GetBasicBlock(block_id_list_->Get(idx_ - 1));
+ bb->visited = true;
+ if (had_change) {
+ // If we had a change we need to revisit the children.
+ ChildBlockIterator iter(bb, mir_graph_);
+ for (BasicBlock* child_bb = iter.Next(); child_bb != nullptr; child_bb = iter.Next()) {
+ child_bb->visited = false;
+ }
+ }
+ }
+
+ while (true) {
+ // Pop loops we have left and check if we need to recalculate one of them.
+ // NOTE: We need to do this even if idx_ == end_idx_.
+ while (loop_head_stack_->Size() != 0u &&
+ loop_ends_->Get(loop_head_stack_->Peek().first) == idx_) {
+ auto top = loop_head_stack_->Peek();
+ uint16_t loop_head_idx = top.first;
+ bool recalculated = top.second;
+ loop_head_stack_->Pop();
+ BasicBlock* loop_head = mir_graph_->GetBasicBlock(block_id_list_->Get(loop_head_idx));
+ DCHECK(loop_head != nullptr);
+ if (!recalculated || !loop_head->visited) {
+ loop_head_stack_->Insert(std::make_pair(loop_head_idx, true)); // Recalculating this loop.
+ idx_ = loop_head_idx + 1;
+ return loop_head;
+ }
+ }
+
+ if (idx_ == end_idx_) {
+ return nullptr;
+ }
+
+ // Get next block and return it if unvisited.
+ BasicBlockId idx = idx_;
+ idx_ += 1;
+ BasicBlock* bb = mir_graph_->GetBasicBlock(block_id_list_->Get(idx));
+ DCHECK(bb != nullptr);
+ if (!bb->visited) {
+ if (loop_ends_->Get(idx) != 0u) {
+ loop_head_stack_->Insert(std::make_pair(idx, false)); // Not recalculating.
+ }
+ return bb;
+ }
+ }
+}
+
} // namespace art
#endif // ART_COMPILER_DEX_DATAFLOW_ITERATOR_INL_H_
diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h
index 66c524f..06d6832 100644
--- a/compiler/dex/dataflow_iterator.h
+++ b/compiler/dex/dataflow_iterator.h
@@ -388,6 +388,52 @@
}
};
+ /**
+ * @class LoopRepeatingTopologicalSortIterator
+ * @brief Used to perform a Topological Sort Iteration of a MIRGraph, repeating loops as needed.
+ * @details The iterator uses the visited flags to keep track of the blocks that need
+ * recalculation and keeps a stack of loop heads in the MIRGraph. At the end of the loop
+ * it returns back to the loop head if it needs to be recalculated. Due to the use of
+ * the visited flags and the loop head stack in the MIRGraph, it's not possible to use
+ * two iterators at the same time or modify this data during iteration (though inspection
+ * of this data is allowed and sometimes even expected).
+ *
+ * NOTE: This iterator is not suitable for passes that need to propagate changes to
+ * predecessors, such as type inferrence.
+ */
+ class LoopRepeatingTopologicalSortIterator : public DataflowIterator {
+ public:
+ /**
+ * @brief The constructor, using all of the reachable blocks of the MIRGraph.
+ * @param mir_graph The MIRGraph considered.
+ */
+ explicit LoopRepeatingTopologicalSortIterator(MIRGraph* mir_graph)
+ : DataflowIterator(mir_graph, 0, mir_graph->GetTopologicalSortOrder()->Size()),
+ loop_ends_(mir_graph->GetTopologicalSortOrderLoopEnds()),
+ loop_head_stack_(mir_graph_->GetTopologicalSortOrderLoopHeadStack()) {
+ // Extra setup for RepeatingTopologicalSortIterator.
+ idx_ = start_idx_;
+ block_id_list_ = mir_graph->GetTopologicalSortOrder();
+ // Clear visited flags and check that the loop head stack is empty.
+ mir_graph->ClearAllVisitedFlags();
+ DCHECK_EQ(loop_head_stack_->Size(), 0u);
+ }
+
+ ~LoopRepeatingTopologicalSortIterator() {
+ DCHECK_EQ(loop_head_stack_->Size(), 0u);
+ }
+
+ /**
+ * @brief Get the next BasicBlock depending on iteration order.
+ * @param had_change did the user of the iteration change the previous BasicBlock.
+ * @return the next BasicBlock following the iteration order, 0 if finished.
+ */
+ virtual BasicBlock* Next(bool had_change = false) OVERRIDE;
+
+ private:
+ const GrowableArray<BasicBlockId>* const loop_ends_;
+ GrowableArray<std::pair<uint16_t, bool>>* const loop_head_stack_;
+ };
} // namespace art
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index d097500..4f8c1d4 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include <cstdint>
+
#include "compiler.h"
#include "compiler_internals.h"
#include "driver/compiler_driver.h"
@@ -40,7 +42,7 @@
/* Default optimizer/debug setting for the compiler. */
static uint32_t kCompilerOptimizerDisableFlags = 0 | // Disable specific optimizations
- (1 << kLoadStoreElimination) | // TODO: this pass has been broken for awhile - fix or delete.
+ // (1 << kLoadStoreElimination) |
// (1 << kLoadHoisting) |
// (1 << kSuppressLoads) |
// (1 << kNullCheckElimination) |
@@ -94,12 +96,12 @@
~0U,
// 1 = kArm, unused (will use kThumb2).
~0U,
- // 2 = kArm64. TODO(Arm64): enable optimizations once backend is mature enough.
- (1 << kLoadStoreElimination) |
+ // 2 = kArm64.
0,
// 3 = kThumb2.
0,
// 4 = kX86.
+ (1 << kLoadStoreElimination) |
0,
// 5 = kX86_64.
(1 << kLoadStoreElimination) |
@@ -470,6 +472,10 @@
COMPILE_ASSERT(sizeof(kUnsupportedOpcodesSize) == 8 * sizeof(size_t),
kUnsupportedOpcodesSize_unexp);
+// The maximum amount of Dalvik register in a method for which we will start compiling. Tries to
+// avoid an abort when we need to manage more SSA registers than we can.
+static constexpr size_t kMaxAllowedDalvikRegisters = INT16_MAX / 2;
+
CompilationUnit::CompilationUnit(ArenaPool* pool)
: compiler_driver(nullptr),
class_linker(nullptr),
@@ -548,6 +554,12 @@
// Skip the method that we do not support currently.
static bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file,
CompilationUnit& cu) {
+ // This is a limitation in mir_graph. See MirGraph::SetNumSSARegs.
+ if (cu.num_dalvik_registers > kMaxAllowedDalvikRegisters) {
+ VLOG(compiler) << "Too many dalvik registers : " << cu.num_dalvik_registers;
+ return false;
+ }
+
// Check whether we do have limitations at all.
if (kSupportedTypes[cu.instruction_set] == nullptr &&
kUnsupportedOpcodesSize[cu.instruction_set] == 0U) {
@@ -735,7 +747,7 @@
/* Free Arenas from the cu.arena_stack for reuse by the cu.arena in the codegen. */
if (cu.enable_debug & (1 << kDebugShowMemoryUsage)) {
- if (cu.arena_stack.PeakBytesAllocated() > 256 * 1024) {
+ if (cu.arena_stack.PeakBytesAllocated() > 1 * 1024 * 1024) {
MemStats stack_stats(cu.arena_stack.GetPeakStats());
LOG(INFO) << method_name << " " << Dumpable<MemStats>(stack_stats);
}
diff --git a/compiler/dex/global_value_numbering.cc b/compiler/dex/global_value_numbering.cc
index 614e826..d7ef6f0 100644
--- a/compiler/dex/global_value_numbering.cc
+++ b/compiler/dex/global_value_numbering.cc
@@ -22,8 +22,10 @@
GlobalValueNumbering::GlobalValueNumbering(CompilationUnit* cu, ScopedArenaAllocator* allocator)
: cu_(cu),
+ mir_graph_(cu->mir_graph.get()),
allocator_(allocator),
- repeat_count_(0u),
+ bbs_processed_(0u),
+ max_bbs_to_process_(kMaxBbsToProcessMultiplyFactor * mir_graph_->GetNumReachableBlocks()),
last_value_(0u),
modifications_allowed_(false),
global_value_map_(std::less<uint64_t>(), allocator->Adapter()),
@@ -32,53 +34,72 @@
array_location_map_(ArrayLocationComparator(), allocator->Adapter()),
array_location_reverse_map_(allocator->Adapter()),
ref_set_map_(std::less<ValueNameSet>(), allocator->Adapter()),
- lvns_(cu_->mir_graph->GetNumBlocks(), nullptr, allocator->Adapter()),
+ lvns_(mir_graph_->GetNumBlocks(), nullptr, allocator->Adapter()),
work_lvn_(nullptr),
merge_lvns_(allocator->Adapter()) {
- cu_->mir_graph->ClearAllVisitedFlags();
}
GlobalValueNumbering::~GlobalValueNumbering() {
STLDeleteElements(&lvns_);
}
-LocalValueNumbering* GlobalValueNumbering::PrepareBasicBlock(BasicBlock* bb) {
+LocalValueNumbering* GlobalValueNumbering::PrepareBasicBlock(BasicBlock* bb,
+ ScopedArenaAllocator* allocator) {
if (UNLIKELY(!Good())) {
return nullptr;
}
- if (bb->data_flow_info == nullptr) {
+ if (UNLIKELY(bb->data_flow_info == nullptr)) {
return nullptr;
}
- if (bb->block_type == kEntryBlock) {
- repeat_count_ += 1u;
- if (repeat_count_ > kMaxRepeatCount) {
- last_value_ = kNoValue; // Make bad.
- return nullptr;
- }
- }
- if (bb->block_type == kExitBlock) {
+ if (UNLIKELY(bb->block_type == kExitBlock)) {
DCHECK(bb->first_mir_insn == nullptr);
return nullptr;
}
- if (bb->visited) {
+ if (UNLIKELY(bbs_processed_ == max_bbs_to_process_)) {
+ last_value_ = kNoValue; // Make bad.
return nullptr;
}
+ if (allocator == nullptr) {
+ allocator = allocator_;
+ }
DCHECK(work_lvn_.get() == nullptr);
- work_lvn_.reset(new (allocator_) LocalValueNumbering(this, bb->id));
+ work_lvn_.reset(new (allocator) LocalValueNumbering(this, bb->id, allocator));
if (bb->block_type == kEntryBlock) {
if ((cu_->access_flags & kAccStatic) == 0) {
// If non-static method, mark "this" as non-null
int this_reg = cu_->num_dalvik_registers - cu_->num_ins;
- work_lvn_->SetSRegNullChecked(this_reg);
+ uint16_t value_name = work_lvn_->GetSRegValueName(this_reg);
+ work_lvn_->SetValueNameNullChecked(value_name);
}
} else {
- // Merge all incoming arcs.
// To avoid repeated allocation on the ArenaStack, reuse a single vector kept as a member.
DCHECK(merge_lvns_.empty());
+ // If we're running the full GVN, the RepeatingTopologicalSortIterator keeps the loop
+ // head stack in the MIRGraph up to date and for a loop head we need to check whether
+ // we're making the initial computation and need to merge only preceding blocks in the
+ // topological order, or we're recalculating a loop head and need to merge all incoming
+ // LVNs. When we're not at a loop head (including having an empty loop head stack) all
+ // predecessors should be preceding blocks and we shall merge all of them anyway.
+ //
+ // If we're running the modification phase of the full GVN, the loop head stack will be
+ // empty and we need to merge all incoming LVNs. If we're running just a simple LVN,
+ // the loop head stack will also be empty and there will be nothing to merge anyway.
+ bool use_all_predecessors = true;
+ uint16_t loop_head_idx = 0u; // Used only if !use_all_predecessors.
+ if (mir_graph_->GetTopologicalSortOrderLoopHeadStack()->Size() != 0) {
+ // Full GVN inside a loop, see if we're at the loop head for the first time.
+ auto top = mir_graph_->GetTopologicalSortOrderLoopHeadStack()->Peek();
+ loop_head_idx = top.first;
+ bool recalculating = top.second;
+ use_all_predecessors = recalculating ||
+ loop_head_idx != mir_graph_->GetTopologicalSortOrderIndexes()->Get(bb->id);
+ }
GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
- for (BasicBlock* pred_bb = cu_->mir_graph->GetBasicBlock(iter.Next());
- pred_bb != nullptr; pred_bb = cu_->mir_graph->GetBasicBlock(iter.Next())) {
- if (lvns_[pred_bb->id] != nullptr) {
+ for (BasicBlock* pred_bb = mir_graph_->GetBasicBlock(iter.Next());
+ pred_bb != nullptr; pred_bb = mir_graph_->GetBasicBlock(iter.Next())) {
+ if (lvns_[pred_bb->id] != nullptr &&
+ (use_all_predecessors ||
+ mir_graph_->GetTopologicalSortOrderIndexes()->Get(pred_bb->id) < loop_head_idx)) {
merge_lvns_.push_back(lvns_[pred_bb->id]);
}
}
@@ -87,21 +108,26 @@
if (bb->catch_entry) {
merge_type = LocalValueNumbering::kCatchMerge;
} else if (bb->last_mir_insn != nullptr &&
- (bb->last_mir_insn->dalvikInsn.opcode == Instruction::RETURN ||
+ (bb->last_mir_insn->dalvikInsn.opcode == Instruction::RETURN_VOID ||
+ bb->last_mir_insn->dalvikInsn.opcode == Instruction::RETURN ||
bb->last_mir_insn->dalvikInsn.opcode == Instruction::RETURN_OBJECT ||
bb->last_mir_insn->dalvikInsn.opcode == Instruction::RETURN_WIDE) &&
(bb->first_mir_insn == bb->last_mir_insn ||
- (bb->first_mir_insn->next == bb->last_mir_insn &&
- static_cast<int>(bb->first_mir_insn->dalvikInsn.opcode) == kMirOpPhi))) {
+ (static_cast<int>(bb->first_mir_insn->dalvikInsn.opcode) == kMirOpPhi &&
+ (bb->first_mir_insn->next == bb->last_mir_insn ||
+ (static_cast<int>(bb->first_mir_insn->next->dalvikInsn.opcode) == kMirOpPhi &&
+ bb->first_mir_insn->next->next == bb->last_mir_insn))))) {
merge_type = LocalValueNumbering::kReturnMerge;
}
// At least one predecessor must have been processed before this bb.
CHECK(!merge_lvns_.empty());
if (merge_lvns_.size() == 1u) {
work_lvn_->MergeOne(*merge_lvns_[0], merge_type);
- BasicBlock* pred_bb = cu_->mir_graph->GetBasicBlock(merge_lvns_[0]->Id());
+ BasicBlock* pred_bb = mir_graph_->GetBasicBlock(merge_lvns_[0]->Id());
if (HasNullCheckLastInsn(pred_bb, bb->id)) {
- work_lvn_->SetSRegNullChecked(pred_bb->last_mir_insn->ssa_rep->uses[0]);
+ int s_reg = pred_bb->last_mir_insn->ssa_rep->uses[0];
+ uint16_t value_name = merge_lvns_[0]->GetSRegValueName(s_reg);
+ work_lvn_->SetValueNameNullChecked(value_name);
}
} else {
work_lvn_->Merge(merge_type);
@@ -112,30 +138,16 @@
bool GlobalValueNumbering::FinishBasicBlock(BasicBlock* bb) {
DCHECK(work_lvn_ != nullptr);
- DCHECK(bb->id == work_lvn_->Id());
+ DCHECK_EQ(bb->id, work_lvn_->Id());
+ ++bbs_processed_;
merge_lvns_.clear();
- bool change = false;
- // Look for a branch to self or an already processed child.
- // (No need to repeat the LVN if all children are processed later.)
- ChildBlockIterator iter(bb, cu_->mir_graph.get());
- for (BasicBlock* child = iter.Next(); child != nullptr; child = iter.Next()) {
- if (child == bb || lvns_[child->id] != nullptr) {
- // If we found an already processed child, check if the LVN actually differs.
- change = (lvns_[bb->id] == nullptr || !lvns_[bb->id]->Equals(*work_lvn_));
- break;
- }
- }
-
- std::unique_ptr<const LocalValueNumbering> old_lvn(lvns_[bb->id]);
- lvns_[bb->id] = work_lvn_.release();
-
- bb->visited = true;
+ bool change = (lvns_[bb->id] == nullptr) || !lvns_[bb->id]->Equals(*work_lvn_);
if (change) {
- ChildBlockIterator iter(bb, cu_->mir_graph.get());
- for (BasicBlock* child = iter.Next(); child != nullptr; child = iter.Next()) {
- child->visited = false;
- }
+ std::unique_ptr<const LocalValueNumbering> old_lvn(lvns_[bb->id]);
+ lvns_[bb->id] = work_lvn_.release();
+ } else {
+ work_lvn_.reset();
}
return change;
}
@@ -188,7 +200,7 @@
uint16_t value_name = merge_names[i];
if (!pred_lvn->IsValueNullChecked(value_name)) {
// Check if the predecessor has an IF_EQZ/IF_NEZ as the last insn.
- const BasicBlock* pred_bb = cu_->mir_graph->GetBasicBlock(pred_lvn->Id());
+ const BasicBlock* pred_bb = mir_graph_->GetBasicBlock(pred_lvn->Id());
if (!HasNullCheckLastInsn(pred_bb, work_lvn_->Id())) {
return false;
}
diff --git a/compiler/dex/global_value_numbering.h b/compiler/dex/global_value_numbering.h
index 7ab77b7..c06ff6f 100644
--- a/compiler/dex/global_value_numbering.h
+++ b/compiler/dex/global_value_numbering.h
@@ -31,7 +31,11 @@
GlobalValueNumbering(CompilationUnit* cu, ScopedArenaAllocator* allocator);
~GlobalValueNumbering();
- LocalValueNumbering* PrepareBasicBlock(BasicBlock* bb);
+ // Prepare LVN for the basic block.
+ LocalValueNumbering* PrepareBasicBlock(BasicBlock* bb,
+ ScopedArenaAllocator* allocator = nullptr);
+
+ // Finish processing the basic block.
bool FinishBasicBlock(BasicBlock* bb);
// Checks that the value names didn't overflow.
@@ -42,7 +46,6 @@
// Allow modifications.
void AllowModifications() {
DCHECK(Good());
- cu_->mir_graph->ClearAllVisitedFlags();
modifications_allowed_ = true;
}
@@ -53,7 +56,7 @@
// GlobalValueNumbering should be allocated on the ArenaStack (or the native stack).
static void* operator new(size_t size, ScopedArenaAllocator* allocator) {
- return allocator->Alloc(sizeof(GlobalValueNumbering), kArenaAllocMIR);
+ return allocator->Alloc(sizeof(GlobalValueNumbering), kArenaAllocMisc);
}
// Allow delete-expression to destroy a GlobalValueNumbering object without deallocation.
@@ -182,7 +185,7 @@
}
const BasicBlock* GetBasicBlock(uint16_t bb_id) const {
- return cu_->mir_graph->GetBasicBlock(bb_id);
+ return mir_graph_->GetBasicBlock(bb_id);
}
static bool HasNullCheckLastInsn(const BasicBlock* pred_bb, BasicBlockId succ_id);
@@ -194,7 +197,7 @@
}
MIRGraph* GetMirGraph() const {
- return cu_->mir_graph.get();
+ return mir_graph_;
}
ScopedArenaAllocator* Allocator() const {
@@ -202,12 +205,16 @@
}
CompilationUnit* const cu_;
+ MIRGraph* mir_graph_;
ScopedArenaAllocator* const allocator_;
- static constexpr uint32_t kMaxRepeatCount = 10u;
+ // The number of BBs that we need to process grows exponentially with the number
+ // of nested loops. Don't allow excessive processing for too many nested loops or
+ // otherwise expensive methods.
+ static constexpr uint32_t kMaxBbsToProcessMultiplyFactor = 20u;
- // Track the repeat count to make sure the GVN converges quickly and abort the GVN otherwise.
- uint32_t repeat_count_;
+ uint32_t bbs_processed_;
+ uint32_t max_bbs_to_process_;
// We have 32-bit last_value_ so that we can detect when we run out of value names, see Good().
// We usually don't check Good() until the end of LVN unless we're about to modify code.
diff --git a/compiler/dex/global_value_numbering_test.cc b/compiler/dex/global_value_numbering_test.cc
index 40bd983..e8501cd 100644
--- a/compiler/dex/global_value_numbering_test.cc
+++ b/compiler/dex/global_value_numbering_test.cc
@@ -212,6 +212,7 @@
if (def->type == kDalvikByteCode || def->type == kEntryBlock || def->type == kExitBlock) {
bb->data_flow_info = static_cast<BasicBlockDataFlow*>(
cu_.arena.Alloc(sizeof(BasicBlockDataFlow), kArenaAllocDFInfo));
+ bb->data_flow_info->live_in_v = live_in_v_;
}
}
cu_.mir_graph->num_blocks_ = count;
@@ -273,23 +274,20 @@
}
void PerformGVN() {
- cu_.mir_graph->SSATransformationStart();
- cu_.mir_graph->ComputeDFSOrders();
- cu_.mir_graph->ComputeDominators();
- cu_.mir_graph->ComputeTopologicalSortOrder();
- cu_.mir_graph->SSATransformationEnd();
- DoPerformGVN<RepeatingPreOrderDfsIterator>();
+ DoPerformGVN<LoopRepeatingTopologicalSortIterator>();
}
void PerformPreOrderDfsGVN() {
- cu_.mir_graph->SSATransformationStart();
- cu_.mir_graph->ComputeDFSOrders();
- cu_.mir_graph->SSATransformationEnd();
DoPerformGVN<RepeatingPreOrderDfsIterator>();
}
template <typename IteratorType>
void DoPerformGVN() {
+ cu_.mir_graph->SSATransformationStart();
+ cu_.mir_graph->ComputeDFSOrders();
+ cu_.mir_graph->ComputeDominators();
+ cu_.mir_graph->ComputeTopologicalSortOrder();
+ cu_.mir_graph->SSATransformationEnd();
ASSERT_TRUE(gvn_ == nullptr);
gvn_.reset(new (allocator_.get()) GlobalValueNumbering(&cu_, allocator_.get()));
ASSERT_FALSE(gvn_->CanModify());
@@ -313,7 +311,7 @@
ASSERT_TRUE(gvn_->Good());
ASSERT_FALSE(gvn_->CanModify());
gvn_->AllowModifications();
- PreOrderDfsIterator iterator(cu_.mir_graph.get());
+ TopologicalSortIterator iterator(cu_.mir_graph.get());
for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
LocalValueNumbering* lvn = gvn_->PrepareBasicBlock(bb);
if (lvn != nullptr) {
@@ -336,13 +334,23 @@
ssa_reps_(),
allocator_(),
gvn_(),
- value_names_() {
+ value_names_(),
+ live_in_v_(new (&cu_.arena) ArenaBitVector(&cu_.arena, kMaxSsaRegs, false, kBitMapMisc)) {
cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
cu_.access_flags = kAccStatic; // Don't let "this" interfere with this test.
allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
- // gvn_->AllowModifications();
+ // Bind all possible sregs to live vregs for test purposes.
+ live_in_v_->SetInitialBits(kMaxSsaRegs);
+ cu_.mir_graph->ssa_base_vregs_ = new (&cu_.arena) GrowableArray<int>(&cu_.arena, kMaxSsaRegs);
+ cu_.mir_graph->ssa_subscripts_ = new (&cu_.arena) GrowableArray<int>(&cu_.arena, kMaxSsaRegs);
+ for (unsigned int i = 0; i < kMaxSsaRegs; i++) {
+ cu_.mir_graph->ssa_base_vregs_->Insert(i);
+ cu_.mir_graph->ssa_subscripts_->Insert(0);
+ }
}
+ static constexpr size_t kMaxSsaRegs = 16384u;
+
ArenaPool pool_;
CompilationUnit cu_;
size_t mir_count_;
@@ -351,6 +359,7 @@
std::unique_ptr<ScopedArenaAllocator> allocator_;
std::unique_ptr<GlobalValueNumbering> gvn_;
std::vector<uint16_t> value_names_;
+ ArenaBitVector* live_in_v_;
};
class GlobalValueNumberingTestDiamond : public GlobalValueNumberingTest {
@@ -1917,7 +1926,7 @@
PerformPreOrderDfsGVN();
}
-TEST_F(GlobalValueNumberingTestTwoConsecutiveLoops, DISABLED_IFieldAndPhi) {
+TEST_F(GlobalValueNumberingTestTwoConsecutiveLoops, IFieldAndPhi) {
static const IFieldDef ifields[] = {
{ 0u, 1u, 0u, false }, // Int.
};
@@ -1954,7 +1963,7 @@
EXPECT_EQ(value_names_[5], value_names_[12]);
}
-TEST_F(GlobalValueNumberingTestTwoConsecutiveLoops, DISABLED_NullCheck) {
+TEST_F(GlobalValueNumberingTestTwoConsecutiveLoops, NullCheck) {
static const IFieldDef ifields[] = {
{ 0u, 1u, 0u, false }, // Int.
};
@@ -2024,14 +2033,10 @@
EXPECT_NE(value_names_[2], value_names_[6]);
EXPECT_NE(value_names_[3], value_names_[7]);
EXPECT_NE(value_names_[4], value_names_[8]);
- EXPECT_NE(value_names_[0], value_names_[12]);
- EXPECT_NE(value_names_[1], value_names_[13]);
- EXPECT_NE(value_names_[2], value_names_[14]);
- EXPECT_NE(value_names_[3], value_names_[15]);
EXPECT_EQ(value_names_[4], value_names_[12]);
- EXPECT_NE(value_names_[5], value_names_[13]);
- EXPECT_NE(value_names_[6], value_names_[14]);
- EXPECT_NE(value_names_[7], value_names_[15]);
+ EXPECT_EQ(value_names_[5], value_names_[13]);
+ EXPECT_EQ(value_names_[6], value_names_[14]);
+ EXPECT_EQ(value_names_[7], value_names_[15]);
EXPECT_EQ(value_names_[12], value_names_[20]);
EXPECT_EQ(value_names_[13], value_names_[21]);
EXPECT_EQ(value_names_[14], value_names_[22]);
@@ -2049,7 +2054,7 @@
}
}
-TEST_F(GlobalValueNumberingTestTwoNestedLoops, DISABLED_IFieldAndPhi) {
+TEST_F(GlobalValueNumberingTestTwoNestedLoops, IFieldAndPhi) {
static const IFieldDef ifields[] = {
{ 0u, 1u, 0u, false }, // Int.
};
@@ -2090,4 +2095,37 @@
EXPECT_EQ(value_names_[3], value_names_[14]);
}
+TEST_F(GlobalValueNumberingTest, NormalPathToCatchEntry) {
+ // When there's an empty catch block, all the exception paths lead to the next block in
+ // the normal path and we can also have normal "taken" or "fall-through" branches to that
+ // path. Check that LocalValueNumbering::PruneNonAliasingRefsForCatch() can handle it.
+ static const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(5), DEF_PRED1(3)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(3, 4)),
+ };
+ static const MIRDef mirs[] = {
+ DEF_INVOKE1(4, Instruction::INVOKE_STATIC, 100u),
+ };
+ PrepareBasicBlocks(bbs);
+ BasicBlock* catch_handler = cu_.mir_graph->GetBasicBlock(5u);
+ catch_handler->catch_entry = true;
+ // Add successor block info to the check block.
+ BasicBlock* check_bb = cu_.mir_graph->GetBasicBlock(3u);
+ check_bb->successor_block_list_type = kCatch;
+ check_bb->successor_blocks = new (&cu_.arena) GrowableArray<SuccessorBlockInfo*>(
+ &cu_.arena, 2, kGrowableArraySuccessorBlocks);
+ SuccessorBlockInfo* successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
+ (cu_.arena.Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessor));
+ successor_block_info->block = catch_handler->id;
+ check_bb->successor_blocks->Insert(successor_block_info);
+ BasicBlock* merge_block = cu_.mir_graph->GetBasicBlock(4u);
+ std::swap(merge_block->taken, merge_block->fall_through);
+ PrepareMIRs(mirs);
+ PerformGVN();
+}
+
} // namespace art
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index d5fd6fe..5997568 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -197,11 +197,7 @@
Map* map, const typename Map::key_type& key) {
auto lb = map->lower_bound(key);
if (lb == map->end() || map->key_comp()(key, lb->first)) {
- map->PutBefore(lb, key, AliasingValues(gvn_->allocator_));
- // The new entry was inserted before lb.
- DCHECK(lb != map->begin());
- --lb;
- DCHECK(!map->key_comp()(lb->first, key) && !map->key_comp()(key, lb->first));
+ lb = map->PutBefore(lb, key, AliasingValues(this));
}
return &lb->second;
}
@@ -308,25 +304,37 @@
return true;
}
-LocalValueNumbering::LocalValueNumbering(GlobalValueNumbering* gvn, uint16_t id)
+template <typename K>
+void LocalValueNumbering::CopyAliasingValuesMap(ScopedArenaSafeMap<K, AliasingValues>* dest,
+ const ScopedArenaSafeMap<K, AliasingValues>& src) {
+ // We need each new AliasingValues (or rather its map members) to be constructed
+ // with our allocator, rather than the allocator of the source.
+ for (const auto& entry : src) {
+ auto it = dest->PutBefore(dest->end(), entry.first, AliasingValues(this));
+ it->second = entry.second; // Map assignments preserve current allocator.
+ }
+}
+
+LocalValueNumbering::LocalValueNumbering(GlobalValueNumbering* gvn, uint16_t id,
+ ScopedArenaAllocator* allocator)
: gvn_(gvn),
id_(id),
- sreg_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
- sreg_wide_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
- sfield_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
- non_aliasing_ifield_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
- aliasing_ifield_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
- non_aliasing_array_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
- aliasing_array_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
+ sreg_value_map_(std::less<uint16_t>(), allocator->Adapter()),
+ sreg_wide_value_map_(std::less<uint16_t>(), allocator->Adapter()),
+ sfield_value_map_(std::less<uint16_t>(), allocator->Adapter()),
+ non_aliasing_ifield_value_map_(std::less<uint16_t>(), allocator->Adapter()),
+ aliasing_ifield_value_map_(std::less<uint16_t>(), allocator->Adapter()),
+ non_aliasing_array_value_map_(std::less<uint16_t>(), allocator->Adapter()),
+ aliasing_array_value_map_(std::less<uint16_t>(), allocator->Adapter()),
global_memory_version_(0u),
- non_aliasing_refs_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
- escaped_refs_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
- escaped_ifield_clobber_set_(EscapedIFieldClobberKeyComparator(), gvn->Allocator()->Adapter()),
- escaped_array_clobber_set_(EscapedArrayClobberKeyComparator(), gvn->Allocator()->Adapter()),
- range_checked_(RangeCheckKeyComparator() , gvn->Allocator()->Adapter()),
- null_checked_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
- merge_names_(gvn->Allocator()->Adapter()),
- merge_map_(std::less<ScopedArenaVector<BasicBlockId>>(), gvn->Allocator()->Adapter()),
+ non_aliasing_refs_(std::less<uint16_t>(), allocator->Adapter()),
+ escaped_refs_(std::less<uint16_t>(), allocator->Adapter()),
+ escaped_ifield_clobber_set_(EscapedIFieldClobberKeyComparator(), allocator->Adapter()),
+ escaped_array_clobber_set_(EscapedArrayClobberKeyComparator(), allocator->Adapter()),
+ range_checked_(RangeCheckKeyComparator() , allocator->Adapter()),
+ null_checked_(std::less<uint16_t>(), allocator->Adapter()),
+ merge_names_(allocator->Adapter()),
+ merge_map_(std::less<ScopedArenaVector<BasicBlockId>>(), allocator->Adapter()),
merge_new_memory_version_(kNoValue) {
std::fill_n(unresolved_sfield_version_, kFieldTypeCount, 0u);
std::fill_n(unresolved_ifield_version_, kFieldTypeCount, 0u);
@@ -352,8 +360,8 @@
}
void LocalValueNumbering::MergeOne(const LocalValueNumbering& other, MergeType merge_type) {
- sreg_value_map_ = other.sreg_value_map_;
- sreg_wide_value_map_ = other.sreg_wide_value_map_;
+ CopyLiveSregValues(&sreg_value_map_, other.sreg_value_map_);
+ CopyLiveSregValues(&sreg_wide_value_map_, other.sreg_wide_value_map_);
if (merge_type == kReturnMerge) {
// RETURN or PHI+RETURN. We need only sreg value maps.
@@ -361,7 +369,7 @@
}
non_aliasing_ifield_value_map_ = other.non_aliasing_ifield_value_map_;
- non_aliasing_array_value_map_ = other.non_aliasing_array_value_map_;
+ CopyAliasingValuesMap(&non_aliasing_array_value_map_, other.non_aliasing_array_value_map_);
non_aliasing_refs_ = other.non_aliasing_refs_;
range_checked_ = other.range_checked_;
null_checked_ = other.null_checked_;
@@ -380,8 +388,8 @@
std::copy_n(other.unresolved_ifield_version_, kFieldTypeCount, unresolved_ifield_version_);
std::copy_n(other.unresolved_sfield_version_, kFieldTypeCount, unresolved_sfield_version_);
sfield_value_map_ = other.sfield_value_map_;
- aliasing_ifield_value_map_ = other.aliasing_ifield_value_map_;
- aliasing_array_value_map_ = other.aliasing_array_value_map_;
+ CopyAliasingValuesMap(&aliasing_ifield_value_map_, other.aliasing_ifield_value_map_);
+ CopyAliasingValuesMap(&aliasing_array_value_map_, other.aliasing_array_value_map_);
escaped_refs_ = other.escaped_refs_;
escaped_ifield_clobber_set_ = other.escaped_ifield_clobber_set_;
escaped_array_clobber_set_ = other.escaped_array_clobber_set_;
@@ -445,6 +453,11 @@
void LocalValueNumbering::PruneNonAliasingRefsForCatch() {
for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
const BasicBlock* bb = gvn_->GetBasicBlock(lvn->Id());
+ if (UNLIKELY(bb->taken == id_) || UNLIKELY(bb->fall_through == id_)) {
+ // Non-exceptional path to a catch handler means that the catch block was actually
+ // empty and all exceptional paths lead to the shared path after that empty block.
+ continue;
+ }
DCHECK_EQ(bb->taken, kNullBlock);
DCHECK_NE(bb->fall_through, kNullBlock);
const BasicBlock* fall_through_bb = gvn_->GetBasicBlock(bb->fall_through);
@@ -488,8 +501,20 @@
}
}
-template <typename Map, Map LocalValueNumbering::* map_ptr>
-void LocalValueNumbering::IntersectMaps() {
+void LocalValueNumbering::CopyLiveSregValues(SregValueMap* dest, const SregValueMap& src) {
+ auto dest_end = dest->end();
+ ArenaBitVector* live_in_v = gvn_->GetMirGraph()->GetBasicBlock(id_)->data_flow_info->live_in_v;
+ DCHECK(live_in_v != nullptr);
+ for (const auto& entry : src) {
+ bool live = live_in_v->IsBitSet(gvn_->GetMirGraph()->SRegToVReg(entry.first));
+ if (live) {
+ dest->PutBefore(dest_end, entry.first, entry.second);
+ }
+ }
+}
+
+template <LocalValueNumbering::SregValueMap LocalValueNumbering::* map_ptr>
+void LocalValueNumbering::IntersectSregValueMaps() {
DCHECK_GE(gvn_->merge_lvns_.size(), 2u);
// Find the LVN with the least entries in the set.
@@ -501,18 +526,22 @@
}
// For each key check if it's in all the LVNs.
+ ArenaBitVector* live_in_v = gvn_->GetMirGraph()->GetBasicBlock(id_)->data_flow_info->live_in_v;
+ DCHECK(live_in_v != nullptr);
for (const auto& entry : least_entries_lvn->*map_ptr) {
- bool checked = true;
- for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
- if (lvn != least_entries_lvn) {
- auto it = (lvn->*map_ptr).find(entry.first);
- if (it == (lvn->*map_ptr).end() || !(it->second == entry.second)) {
- checked = false;
- break;
+ bool live_and_same = live_in_v->IsBitSet(gvn_->GetMirGraph()->SRegToVReg(entry.first));
+ if (live_and_same) {
+ for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
+ if (lvn != least_entries_lvn) {
+ auto it = (lvn->*map_ptr).find(entry.first);
+ if (it == (lvn->*map_ptr).end() || !(it->second == entry.second)) {
+ live_and_same = false;
+ break;
+ }
}
}
}
- if (checked) {
+ if (live_and_same) {
(this->*map_ptr).PutBefore((this->*map_ptr).end(), entry.first, entry.second);
}
}
@@ -529,6 +558,10 @@
(!cmp(entry, *work_it) && !(work_it->second == entry.second)))) {
work_it = work_map->erase(work_it);
}
+ if (work_it == work_end) {
+ return;
+ }
+ ++work_it;
}
}
@@ -712,11 +745,7 @@
typename Map::iterator hint) {
const typename Map::key_type& key = entry.first;
- (this->*map_ptr).PutBefore(hint, key, AliasingValues(gvn_->allocator_));
- DCHECK(hint != (this->*map_ptr).begin());
- AliasingIFieldValuesMap::iterator it = hint;
- --it;
- DCHECK_EQ(it->first, key);
+ auto it = (this->*map_ptr).PutBefore(hint, key, AliasingValues(this));
AliasingValues* my_values = &it->second;
const AliasingValues* cmp_values = nullptr;
@@ -840,8 +869,8 @@
void LocalValueNumbering::Merge(MergeType merge_type) {
DCHECK_GE(gvn_->merge_lvns_.size(), 2u);
- IntersectMaps<SregValueMap, &LocalValueNumbering::sreg_value_map_>();
- IntersectMaps<SregValueMap, &LocalValueNumbering::sreg_wide_value_map_>();
+ IntersectSregValueMaps<&LocalValueNumbering::sreg_value_map_>();
+ IntersectSregValueMaps<&LocalValueNumbering::sreg_wide_value_map_>();
if (merge_type == kReturnMerge) {
// RETURN or PHI+RETURN. We need only sreg value maps.
return;
@@ -850,13 +879,18 @@
MergeMemoryVersions(merge_type == kCatchMerge);
// Merge non-aliasing maps/sets.
- MergeSets<IFieldLocToValueMap, &LocalValueNumbering::non_aliasing_ifield_value_map_,
- &LocalValueNumbering::MergeNonAliasingIFieldValues>();
- MergeSets<NonAliasingArrayValuesMap, &LocalValueNumbering::non_aliasing_array_value_map_,
- &LocalValueNumbering::MergeAliasingValues<
- NonAliasingArrayValuesMap, &LocalValueNumbering::non_aliasing_array_value_map_,
- NonAliasingArrayVersions>>();
IntersectSets<ValueNameSet, &LocalValueNumbering::non_aliasing_refs_>();
+ if (!non_aliasing_refs_.empty() && merge_type == kCatchMerge) {
+ PruneNonAliasingRefsForCatch();
+ }
+ if (!non_aliasing_refs_.empty()) {
+ MergeSets<IFieldLocToValueMap, &LocalValueNumbering::non_aliasing_ifield_value_map_,
+ &LocalValueNumbering::MergeNonAliasingIFieldValues>();
+ MergeSets<NonAliasingArrayValuesMap, &LocalValueNumbering::non_aliasing_array_value_map_,
+ &LocalValueNumbering::MergeAliasingValues<
+ NonAliasingArrayValuesMap, &LocalValueNumbering::non_aliasing_array_value_map_,
+ NonAliasingArrayVersions>>();
+ }
// We won't do anything complicated for range checks, just calculate the intersection.
IntersectSets<RangeCheckSet, &LocalValueNumbering::range_checked_>();
@@ -867,7 +901,6 @@
if (merge_type == kCatchMerge) {
// Memory is clobbered. New memory version already created, don't merge aliasing locations.
- PruneNonAliasingRefsForCatch();
return;
}
@@ -1356,8 +1389,8 @@
case Instruction::MONITOR_EXIT:
HandleNullCheck(mir, GetOperandValue(mir->ssa_rep->uses[0]));
// If we're running GVN and CanModify(), uneliminated null check indicates bytecode error.
- if ((gvn_->cu_->disable_opt & (1 << kGlobalValueNumbering)) == 0 && gvn_->CanModify() &&
- (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0) {
+ if ((gvn_->GetCompilationUnit()->disable_opt & (1u << kGlobalValueNumbering)) == 0u &&
+ gvn_->CanModify() && (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0) {
LOG(WARNING) << "Bytecode error: MONITOR_EXIT is still null checked at 0x" << std::hex
<< mir->offset << " in " << PrettyMethod(gvn_->cu_->method_idx, *gvn_->cu_->dex_file);
}
@@ -1372,7 +1405,7 @@
if (kLocalValueNumberingEnableFilledNewArrayTracking && mir->ssa_rep->num_uses != 0u) {
AliasingValues* values = GetAliasingValues(&non_aliasing_array_value_map_, array);
// Clear the value if we got a merged version in a loop.
- *values = AliasingValues(gvn_->allocator_);
+ *values = AliasingValues(this);
for (size_t i = 0u, count = mir->ssa_rep->num_uses; i != count; ++i) {
DCHECK_EQ(High16Bits(i), 0u);
uint16_t index = gvn_->LookupValue(Instruction::CONST, i, 0u, 0);
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index 190eab4..855d66d 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -36,7 +36,7 @@
static constexpr uint16_t kNoValue = GlobalValueNumbering::kNoValue;
public:
- LocalValueNumbering(GlobalValueNumbering* gvn, BasicBlockId id);
+ LocalValueNumbering(GlobalValueNumbering* gvn, BasicBlockId id, ScopedArenaAllocator* allocator);
BasicBlockId Id() const {
return id_;
@@ -44,9 +44,11 @@
bool Equals(const LocalValueNumbering& other) const;
- // Set non-static method's "this".
- void SetSRegNullChecked(uint16_t s_reg) {
- uint16_t value_name = GetOperandValue(s_reg);
+ uint16_t GetSRegValueName(uint16_t s_reg) const {
+ return GetOperandValue(s_reg);
+ }
+
+ void SetValueNameNullChecked(uint16_t value_name) {
null_checked_.insert(value_name);
}
@@ -76,7 +78,7 @@
// LocalValueNumbering should be allocated on the ArenaStack (or the native stack).
static void* operator new(size_t size, ScopedArenaAllocator* allocator) {
- return allocator->Alloc(sizeof(LocalValueNumbering), kArenaAllocMIR);
+ return allocator->Alloc(sizeof(LocalValueNumbering), kArenaAllocMisc);
}
// Allow delete-expression to destroy a LocalValueNumbering object without deallocation.
@@ -225,12 +227,12 @@
// store or because they contained the last_stored_value before the store and thus could not
// have changed as a result.
struct AliasingValues {
- explicit AliasingValues(ScopedArenaAllocator* allocator)
+ explicit AliasingValues(LocalValueNumbering* lvn)
: memory_version_before_stores(kNoValue),
last_stored_value(kNoValue),
- store_loc_set(std::less<uint16_t>(), allocator->Adapter()),
+ store_loc_set(std::less<uint16_t>(), lvn->null_checked_.get_allocator()),
last_load_memory_version(kNoValue),
- load_value_map(std::less<uint16_t>(), allocator->Adapter()) {
+ load_value_map(std::less<uint16_t>(), lvn->null_checked_.get_allocator()) {
}
uint16_t memory_version_before_stores; // kNoValue if start version for the field.
@@ -286,6 +288,10 @@
bool HandleAliasingValuesPut(Map* map, const typename Map::key_type& key,
uint16_t location, uint16_t value);
+ template <typename K>
+ void CopyAliasingValuesMap(ScopedArenaSafeMap<K, AliasingValues>* dest,
+ const ScopedArenaSafeMap<K, AliasingValues>& src);
+
uint16_t MarkNonAliasingNonNull(MIR* mir);
bool IsNonAliasing(uint16_t reg) const;
bool IsNonAliasingIField(uint16_t reg, uint16_t field_id, uint16_t type) const;
@@ -314,9 +320,11 @@
template <typename Set, Set LocalValueNumbering::* set_ptr>
void IntersectSets();
+ void CopyLiveSregValues(SregValueMap* dest, const SregValueMap& src);
+
// Intersect maps as sets. The value type must be equality-comparable.
- template <typename Map, Map LocalValueNumbering::* map_ptr>
- void IntersectMaps();
+ template <SregValueMap LocalValueNumbering::* map_ptr>
+ void IntersectSregValueMaps();
// Intersect maps as sets. The value type must be equality-comparable.
template <typename Map>
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index b3eae42..e4e944e 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -196,7 +196,7 @@
cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
gvn_.reset(new (allocator_.get()) GlobalValueNumbering(&cu_, allocator_.get()));
- lvn_.reset(new (allocator_.get()) LocalValueNumbering(gvn_.get(), 0u));
+ lvn_.reset(new (allocator_.get()) LocalValueNumbering(gvn_.get(), 0u, allocator_.get()));
gvn_->AllowModifications();
}
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 3de4483..0b05bbe 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -841,6 +841,54 @@
// 113 MIR_SELECT
AN_NONE,
+
+ // 114 MirOpConstVector
+ AN_NONE,
+
+ // 115 MirOpMoveVector
+ AN_NONE,
+
+ // 116 MirOpPackedMultiply
+ AN_NONE,
+
+ // 117 MirOpPackedAddition
+ AN_NONE,
+
+ // 118 MirOpPackedSubtract
+ AN_NONE,
+
+ // 119 MirOpPackedShiftLeft
+ AN_NONE,
+
+ // 120 MirOpPackedSignedShiftRight
+ AN_NONE,
+
+ // 121 MirOpPackedUnsignedShiftRight
+ AN_NONE,
+
+ // 122 MirOpPackedAnd
+ AN_NONE,
+
+ // 123 MirOpPackedOr
+ AN_NONE,
+
+ // 124 MirOpPackedXor
+ AN_NONE,
+
+ // 125 MirOpPackedAddReduce
+ AN_NONE,
+
+ // 126 MirOpPackedReduce
+ AN_NONE,
+
+ // 127 MirOpPackedSet
+ AN_NONE,
+
+ // 128 MirOpReserveVectorRegisters
+ AN_NONE,
+
+ // 129 MirOpReturnVectorRegisters
+ AN_NONE,
};
struct MethodStats {
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 1c8a9b5..6aee563 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -69,6 +69,7 @@
MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
: reg_location_(NULL),
+ block_id_map_(std::less<unsigned int>(), arena->Adapter()),
cu_(cu),
ssa_base_vregs_(NULL),
ssa_subscripts_(NULL),
@@ -84,6 +85,9 @@
dfs_post_order_(NULL),
dom_post_order_traversal_(NULL),
topological_order_(nullptr),
+ topological_order_loop_ends_(nullptr),
+ topological_order_indexes_(nullptr),
+ topological_order_loop_head_stack_(nullptr),
i_dom_list_(NULL),
def_block_matrix_(NULL),
temp_scoped_alloc_(),
@@ -98,11 +102,14 @@
num_blocks_(0),
current_code_item_(NULL),
dex_pc_to_block_map_(arena, 0, kGrowableArrayMisc),
+ m_units_(arena->Adapter()),
+ method_stack_(arena->Adapter()),
current_method_(kInvalidEntry),
current_offset_(kInvalidEntry),
def_count_(0),
opcode_count_(NULL),
num_ssa_regs_(0),
+ extended_basic_blocks_(arena->Adapter()),
method_sreg_(0),
attributes_(METHOD_IS_LEAF), // Start with leaf assumption, change on encountering invoke.
checkstats_(NULL),
@@ -792,7 +799,8 @@
} else if (flags & Instruction::kSwitch) {
cur_block = ProcessCanSwitch(cur_block, insn, current_offset_, width, flags);
}
- if (verify_flags & Instruction::kVerifyVarArgRange) {
+ if (verify_flags & Instruction::kVerifyVarArgRange ||
+ verify_flags & Instruction::kVerifyVarArgRangeNonZero) {
/*
* The Quick backend's runtime model includes a gap between a method's
* argument ("in") vregs and the rest of its vregs. Handling a range instruction
@@ -1526,117 +1534,248 @@
temp_scoped_alloc_.reset();
}
-void MIRGraph::ComputeTopologicalSortOrder() {
- // Clear the nodes.
- ClearAllVisitedFlags();
-
- // Create the topological order if need be.
- if (topological_order_ == nullptr) {
- topological_order_ = new (arena_) GrowableArray<BasicBlockId>(arena_, GetNumBlocks());
- }
- topological_order_->Reset();
-
- ScopedArenaAllocator allocator(&cu_->arena_stack);
- ScopedArenaQueue<BasicBlock*> q(allocator.Adapter());
- ScopedArenaVector<size_t> visited_cnt_values(GetNumBlocks(), 0u, allocator.Adapter());
-
- // Set up visitedCntValues map for all BB. The default value for this counters in the map is zero.
- // also fill initial queue.
- GrowableArray<BasicBlock*>::Iterator iterator(&block_list_);
-
- size_t num_blocks = 0u;
- while (true) {
- BasicBlock* bb = iterator.Next();
-
- if (bb == nullptr) {
- break;
+static BasicBlock* SelectTopologicalSortOrderFallBack(
+ MIRGraph* mir_graph, const ArenaBitVector* current_loop,
+ const ScopedArenaVector<size_t>* visited_cnt_values, ScopedArenaAllocator* allocator,
+ ScopedArenaVector<BasicBlockId>* tmp_stack) {
+ // No true loop head has been found but there may be true loop heads after the mess we need
+ // to resolve. To avoid taking one of those, pick the candidate with the highest number of
+ // reachable unvisited nodes. That candidate will surely be a part of a loop.
+ BasicBlock* fall_back = nullptr;
+ size_t fall_back_num_reachable = 0u;
+ // Reuse the same bit vector for each candidate to mark reachable unvisited blocks.
+ ArenaBitVector candidate_reachable(allocator, mir_graph->GetNumBlocks(), false, kBitMapMisc);
+ AllNodesIterator iter(mir_graph);
+ for (BasicBlock* candidate = iter.Next(); candidate != nullptr; candidate = iter.Next()) {
+ if (candidate->hidden || // Hidden, or
+ candidate->visited || // already processed, or
+ (*visited_cnt_values)[candidate->id] == 0u || // no processed predecessors, or
+ (current_loop != nullptr && // outside current loop.
+ !current_loop->IsBitSet(candidate->id))) {
+ continue;
}
+ DCHECK(tmp_stack->empty());
+ tmp_stack->push_back(candidate->id);
+ candidate_reachable.ClearAllBits();
+ size_t num_reachable = 0u;
+ while (!tmp_stack->empty()) {
+ BasicBlockId current_id = tmp_stack->back();
+ tmp_stack->pop_back();
+ BasicBlock* current_bb = mir_graph->GetBasicBlock(current_id);
+ DCHECK(current_bb != nullptr);
+ ChildBlockIterator child_iter(current_bb, mir_graph);
+ BasicBlock* child_bb = child_iter.Next();
+ for ( ; child_bb != nullptr; child_bb = child_iter.Next()) {
+ DCHECK(!child_bb->hidden);
+ if (child_bb->visited || // Already processed, or
+ (current_loop != nullptr && // outside current loop.
+ !current_loop->IsBitSet(child_bb->id))) {
+ continue;
+ }
+ if (!candidate_reachable.IsBitSet(child_bb->id)) {
+ candidate_reachable.SetBit(child_bb->id);
+ tmp_stack->push_back(child_bb->id);
+ num_reachable += 1u;
+ }
+ }
+ }
+ if (fall_back_num_reachable < num_reachable) {
+ fall_back_num_reachable = num_reachable;
+ fall_back = candidate;
+ }
+ }
+ return fall_back;
+}
+// Compute from which unvisited blocks is bb_id reachable through unvisited blocks.
+static void ComputeUnvisitedReachableFrom(MIRGraph* mir_graph, BasicBlockId bb_id,
+ ArenaBitVector* reachable,
+ ScopedArenaVector<BasicBlockId>* tmp_stack) {
+ // NOTE: Loop heads indicated by the "visited" flag.
+ DCHECK(tmp_stack->empty());
+ reachable->ClearAllBits();
+ tmp_stack->push_back(bb_id);
+ while (!tmp_stack->empty()) {
+ BasicBlockId current_id = tmp_stack->back();
+ tmp_stack->pop_back();
+ BasicBlock* current_bb = mir_graph->GetBasicBlock(current_id);
+ DCHECK(current_bb != nullptr);
+ GrowableArray<BasicBlockId>::Iterator iter(current_bb->predecessors);
+ BasicBlock* pred_bb = mir_graph->GetBasicBlock(iter.Next());
+ for ( ; pred_bb != nullptr; pred_bb = mir_graph->GetBasicBlock(iter.Next())) {
+ if (!pred_bb->visited && !reachable->IsBitSet(pred_bb->id)) {
+ reachable->SetBit(pred_bb->id);
+ tmp_stack->push_back(pred_bb->id);
+ }
+ }
+ }
+}
+
+void MIRGraph::ComputeTopologicalSortOrder() {
+ ScopedArenaAllocator allocator(&cu_->arena_stack);
+ unsigned int num_blocks = GetNumBlocks();
+
+ ScopedArenaQueue<BasicBlock*> q(allocator.Adapter());
+ ScopedArenaVector<size_t> visited_cnt_values(num_blocks, 0u, allocator.Adapter());
+ ScopedArenaVector<BasicBlockId> loop_head_stack(allocator.Adapter());
+ size_t max_nested_loops = 0u;
+ ArenaBitVector loop_exit_blocks(&allocator, num_blocks, false, kBitMapMisc);
+ loop_exit_blocks.ClearAllBits();
+
+ // Count the number of blocks to process and add the entry block(s).
+ GrowableArray<BasicBlock*>::Iterator iterator(&block_list_);
+ unsigned int num_blocks_to_process = 0u;
+ for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
if (bb->hidden == true) {
continue;
}
- num_blocks += 1u;
- size_t unvisited_predecessor_count = bb->predecessors->Size();
+ num_blocks_to_process += 1u;
- GrowableArray<BasicBlockId>::Iterator pred_iterator(bb->predecessors);
- // To process loops we should not wait for dominators.
- while (true) {
- BasicBlock* pred_bb = GetBasicBlock(pred_iterator.Next());
-
- if (pred_bb == nullptr) {
- break;
- }
-
- // Skip the backward branch or hidden predecessor.
- if (pred_bb->hidden ||
- (pred_bb->dominators != nullptr && pred_bb->dominators->IsBitSet(bb->id))) {
- unvisited_predecessor_count -= 1u;
- }
- }
-
- visited_cnt_values[bb->id] = unvisited_predecessor_count;
-
- // Add entry block to queue.
- if (unvisited_predecessor_count == 0) {
+ if (bb->predecessors->Size() == 0u) {
+ // Add entry block to the queue.
q.push(bb);
}
}
- // We can get a cycle where none of the blocks dominates the other. Therefore don't
- // stop when the queue is empty, continue until we've processed all the blocks.
- AllNodesIterator candidate_iter(this); // For the empty queue case.
- while (num_blocks != 0u) {
- num_blocks -= 1u;
+ // Create the topological order if need be.
+ if (topological_order_ == nullptr) {
+ topological_order_ = new (arena_) GrowableArray<BasicBlockId>(arena_, num_blocks);
+ topological_order_loop_ends_ = new (arena_) GrowableArray<uint16_t>(arena_, num_blocks);
+ topological_order_indexes_ = new (arena_) GrowableArray<uint16_t>(arena_, num_blocks);
+ }
+ topological_order_->Reset();
+ topological_order_loop_ends_->Reset();
+ topological_order_indexes_->Reset();
+ topological_order_loop_ends_->Resize(num_blocks);
+ topological_order_indexes_->Resize(num_blocks);
+ for (BasicBlockId i = 0; i != num_blocks; ++i) {
+ topological_order_loop_ends_->Insert(0u);
+ topological_order_indexes_->Insert(static_cast<uint16_t>(-1));
+ }
+
+ // Mark all blocks as unvisited.
+ ClearAllVisitedFlags();
+
+ // For loop heads, keep track from which blocks they are reachable not going through other
+ // loop heads. Other loop heads are excluded to detect the heads of nested loops. The children
+ // in this set go into the loop body, the other children are jumping over the loop.
+ ScopedArenaVector<ArenaBitVector*> loop_head_reachable_from(allocator.Adapter());
+ loop_head_reachable_from.resize(num_blocks, nullptr);
+ // Reuse the same temp stack whenever calculating a loop_head_reachable_from[loop_head_id].
+ ScopedArenaVector<BasicBlockId> tmp_stack(allocator.Adapter());
+
+ while (num_blocks_to_process != 0u) {
BasicBlock* bb = nullptr;
if (!q.empty()) {
+ num_blocks_to_process -= 1u;
// Get top.
bb = q.front();
q.pop();
- } else {
- // Find some block we didn't visit yet that has at least one visited predecessor.
- while (bb == nullptr) {
- BasicBlock* candidate = candidate_iter.Next();
- DCHECK(candidate != nullptr);
- if (candidate->visited || candidate->hidden) {
- continue;
- }
- GrowableArray<BasicBlockId>::Iterator iter(candidate->predecessors);
- for (BasicBlock* pred_bb = GetBasicBlock(iter.Next()); pred_bb != nullptr;
- pred_bb = GetBasicBlock(iter.Next())) {
- if (!pred_bb->hidden && pred_bb->visited) {
- bb = candidate;
- break;
+ if (bb->visited) {
+ // Loop head: it was already processed, mark end and copy exit blocks to the queue.
+ DCHECK(q.empty()) << PrettyMethod(cu_->method_idx, *cu_->dex_file);
+ uint16_t idx = static_cast<uint16_t>(topological_order_->Size());
+ topological_order_loop_ends_->Put(topological_order_indexes_->Get(bb->id), idx);
+ DCHECK_EQ(loop_head_stack.back(), bb->id);
+ loop_head_stack.pop_back();
+ ArenaBitVector* reachable =
+ loop_head_stack.empty() ? nullptr : loop_head_reachable_from[loop_head_stack.back()];
+ for (BasicBlockId candidate_id : loop_exit_blocks.Indexes()) {
+ if (reachable == nullptr || reachable->IsBitSet(candidate_id)) {
+ q.push(GetBasicBlock(candidate_id));
+ // NOTE: The BitVectorSet::IndexIterator will not check the pointed-to bit again,
+ // so clearing the bit has no effect on the iterator.
+ loop_exit_blocks.ClearBit(candidate_id);
}
}
+ continue;
}
+ } else {
+ // Find the new loop head.
+ AllNodesIterator iter(this);
+ while (true) {
+ BasicBlock* candidate = iter.Next();
+ if (candidate == nullptr) {
+ // We did not find a true loop head, fall back to a reachable block in any loop.
+ ArenaBitVector* current_loop =
+ loop_head_stack.empty() ? nullptr : loop_head_reachable_from[loop_head_stack.back()];
+ bb = SelectTopologicalSortOrderFallBack(this, current_loop, &visited_cnt_values,
+ &allocator, &tmp_stack);
+ DCHECK(bb != nullptr) << PrettyMethod(cu_->method_idx, *cu_->dex_file);
+ if (kIsDebugBuild && cu_->dex_file != nullptr) {
+ LOG(INFO) << "Topological sort order: Using fall-back in "
+ << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " BB #" << bb->id
+ << " @0x" << std::hex << bb->start_offset
+ << ", num_blocks = " << std::dec << num_blocks;
+ }
+ break;
+ }
+ if (candidate->hidden || // Hidden, or
+ candidate->visited || // already processed, or
+ visited_cnt_values[candidate->id] == 0u || // no processed predecessors, or
+ (!loop_head_stack.empty() && // outside current loop.
+ !loop_head_reachable_from[loop_head_stack.back()]->IsBitSet(candidate->id))) {
+ continue;
+ }
+
+ GrowableArray<BasicBlockId>::Iterator pred_iter(candidate->predecessors);
+ BasicBlock* pred_bb = GetBasicBlock(pred_iter.Next());
+ for ( ; pred_bb != nullptr; pred_bb = GetBasicBlock(pred_iter.Next())) {
+ if (pred_bb != candidate && !pred_bb->visited &&
+ !pred_bb->dominators->IsBitSet(candidate->id)) {
+ break; // Keep non-null pred_bb to indicate failure.
+ }
+ }
+ if (pred_bb == nullptr) {
+ bb = candidate;
+ break;
+ }
+ }
+ // Compute blocks from which the loop head is reachable and process those blocks first.
+ ArenaBitVector* reachable =
+ new (&allocator) ArenaBitVector(&allocator, num_blocks, false, kBitMapMisc);
+ loop_head_reachable_from[bb->id] = reachable;
+ ComputeUnvisitedReachableFrom(this, bb->id, reachable, &tmp_stack);
+ // Now mark as loop head. (Even if it's only a fall back when we don't find a true loop.)
+ loop_head_stack.push_back(bb->id);
+ max_nested_loops = std::max(max_nested_loops, loop_head_stack.size());
}
DCHECK_EQ(bb->hidden, false);
DCHECK_EQ(bb->visited, false);
-
- // We've visited all the predecessors. So, we can visit bb.
bb->visited = true;
// Now add the basic block.
+ uint16_t idx = static_cast<uint16_t>(topological_order_->Size());
+ topological_order_indexes_->Put(bb->id, idx);
topological_order_->Insert(bb->id);
- // Reduce visitedCnt for all the successors and add into the queue ones with visitedCnt equals to zero.
+ // Update visited_cnt_values for children.
ChildBlockIterator succIter(bb, this);
BasicBlock* successor = succIter.Next();
for ( ; successor != nullptr; successor = succIter.Next()) {
- if (successor->visited || successor->hidden) {
+ if (successor->hidden) {
continue;
}
- // one more predecessor was visited.
- DCHECK_NE(visited_cnt_values[successor->id], 0u);
- visited_cnt_values[successor->id] -= 1u;
- if (visited_cnt_values[successor->id] == 0u) {
- q.push(successor);
+ // One more predecessor was visited.
+ visited_cnt_values[successor->id] += 1u;
+ if (visited_cnt_values[successor->id] == successor->predecessors->Size()) {
+ if (loop_head_stack.empty() ||
+ loop_head_reachable_from[loop_head_stack.back()]->IsBitSet(successor->id)) {
+ q.push(successor);
+ } else {
+ DCHECK(!loop_exit_blocks.IsBitSet(successor->id));
+ loop_exit_blocks.SetBit(successor->id);
+ }
}
}
}
+
+ // Prepare the loop head stack for iteration.
+ topological_order_loop_head_stack_ =
+ new (arena_) GrowableArray<std::pair<uint16_t, bool>>(arena_, max_nested_loops);
}
bool BasicBlock::IsExceptionBlock() const {
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 1556a19..491d72e 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -27,6 +27,8 @@
#include "mir_method_info.h"
#include "utils/arena_bit_vector.h"
#include "utils/growable_array.h"
+#include "utils/arena_containers.h"
+#include "utils/scoped_arena_containers.h"
#include "reg_location.h"
#include "reg_storage.h"
@@ -689,6 +691,21 @@
return topological_order_;
}
+ GrowableArray<BasicBlockId>* GetTopologicalSortOrderLoopEnds() {
+ DCHECK(topological_order_loop_ends_ != nullptr);
+ return topological_order_loop_ends_;
+ }
+
+ GrowableArray<BasicBlockId>* GetTopologicalSortOrderIndexes() {
+ DCHECK(topological_order_indexes_ != nullptr);
+ return topological_order_indexes_;
+ }
+
+ GrowableArray<std::pair<uint16_t, bool>>* GetTopologicalSortOrderLoopHeadStack() {
+ DCHECK(topological_order_loop_head_stack_ != nullptr);
+ return topological_order_loop_head_stack_;
+ }
+
bool IsConst(int32_t s_reg) const {
return is_constant_v_->IsBitSet(s_reg);
}
@@ -727,7 +744,7 @@
* would be filtered out with current settings. When orig_sreg field is removed
* from RegLocation, expand s_reg_low to handle all possible cases and remove DCHECK().
*/
- DCHECK_EQ(new_num, static_cast<int16_t>(new_num));
+ CHECK_EQ(new_num, static_cast<int16_t>(new_num));
num_ssa_regs_ = new_num;
}
@@ -1035,8 +1052,8 @@
std::set<uint32_t> catches_;
// TODO: make these private.
- RegLocation* reg_location_; // Map SSA names to location.
- SafeMap<unsigned int, unsigned int> block_id_map_; // Block collapse lookup cache.
+ RegLocation* reg_location_; // Map SSA names to location.
+ ArenaSafeMap<unsigned int, unsigned int> block_id_map_; // Block collapse lookup cache.
static const char* extended_mir_op_names_[kMirOpLast - kMirOpFirst];
static const uint32_t analysis_attributes_[kMirOpLast];
@@ -1132,6 +1149,14 @@
GrowableArray<BasicBlockId>* dfs_post_order_;
GrowableArray<BasicBlockId>* dom_post_order_traversal_;
GrowableArray<BasicBlockId>* topological_order_;
+ // Indexes in topological_order_ need to be only as big as the BasicBlockId.
+ COMPILE_ASSERT(sizeof(BasicBlockId) == sizeof(uint16_t), assuming_16_bit_BasicBlockId);
+ // For each loop head, remember the past-the-end index of the end of the loop. 0 if not loop head.
+ GrowableArray<uint16_t>* topological_order_loop_ends_;
+ // Map BB ids to topological_order_ indexes. 0xffff if not included (hidden or null block).
+ GrowableArray<uint16_t>* topological_order_indexes_;
+ // Stack of the loop head indexes and recalculation flags for RepeatingTopologicalSortIterator.
+ GrowableArray<std::pair<uint16_t, bool>>* topological_order_loop_head_stack_;
int* i_dom_list_;
ArenaBitVector** def_block_matrix_; // num_dalvik_register x num_blocks.
std::unique_ptr<ScopedArenaAllocator> temp_scoped_alloc_;
@@ -1147,15 +1172,15 @@
unsigned int num_blocks_;
const DexFile::CodeItem* current_code_item_;
GrowableArray<uint16_t> dex_pc_to_block_map_; // FindBlock lookup cache.
- std::vector<DexCompilationUnit*> m_units_; // List of methods included in this graph
+ ArenaVector<DexCompilationUnit*> m_units_; // List of methods included in this graph
typedef std::pair<int, int> MIRLocation; // Insert point, (m_unit_ index, offset)
- std::vector<MIRLocation> method_stack_; // Include stack
+ ArenaVector<MIRLocation> method_stack_; // Include stack
int current_method_;
DexOffset current_offset_; // Offset in code units
int def_count_; // Used to estimate size of ssa name storage.
int* opcode_count_; // Dex opcode coverage stats.
int num_ssa_regs_; // Number of names following SSA transformation.
- std::vector<BasicBlockId> extended_basic_blocks_; // Heads of block "traces".
+ ArenaVector<BasicBlockId> extended_basic_blocks_; // Heads of block "traces".
int method_sreg_;
unsigned int attributes_;
Checkstats* checkstats_;
@@ -1177,6 +1202,7 @@
friend class ClassInitCheckEliminationTest;
friend class GlobalValueNumberingTest;
friend class LocalValueNumberingTest;
+ friend class TopologicalSortOrderTest;
};
} // namespace art
diff --git a/compiler/dex/mir_graph_test.cc b/compiler/dex/mir_graph_test.cc
new file mode 100644
index 0000000..932f453
--- /dev/null
+++ b/compiler/dex/mir_graph_test.cc
@@ -0,0 +1,381 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mir_graph.h"
+#include "gtest/gtest.h"
+
+namespace art {
+
+class TopologicalSortOrderTest : public testing::Test {
+ protected:
+ struct BBDef {
+ static constexpr size_t kMaxSuccessors = 4;
+ static constexpr size_t kMaxPredecessors = 4;
+
+ BBType type;
+ size_t num_successors;
+ BasicBlockId successors[kMaxPredecessors];
+ size_t num_predecessors;
+ BasicBlockId predecessors[kMaxPredecessors];
+ };
+
+#define DEF_SUCC0() \
+ 0u, { }
+#define DEF_SUCC1(s1) \
+ 1u, { s1 }
+#define DEF_SUCC2(s1, s2) \
+ 2u, { s1, s2 }
+#define DEF_SUCC3(s1, s2, s3) \
+ 3u, { s1, s2, s3 }
+#define DEF_SUCC4(s1, s2, s3, s4) \
+ 4u, { s1, s2, s3, s4 }
+#define DEF_PRED0() \
+ 0u, { }
+#define DEF_PRED1(p1) \
+ 1u, { p1 }
+#define DEF_PRED2(p1, p2) \
+ 2u, { p1, p2 }
+#define DEF_PRED3(p1, p2, p3) \
+ 3u, { p1, p2, p3 }
+#define DEF_PRED4(p1, p2, p3, p4) \
+ 4u, { p1, p2, p3, p4 }
+#define DEF_BB(type, succ, pred) \
+ { type, succ, pred }
+
+ void DoPrepareBasicBlocks(const BBDef* defs, size_t count) {
+ cu_.mir_graph->block_id_map_.clear();
+ cu_.mir_graph->block_list_.Reset();
+ ASSERT_LT(3u, count); // null, entry, exit and at least one bytecode block.
+ ASSERT_EQ(kNullBlock, defs[0].type);
+ ASSERT_EQ(kEntryBlock, defs[1].type);
+ ASSERT_EQ(kExitBlock, defs[2].type);
+ for (size_t i = 0u; i != count; ++i) {
+ const BBDef* def = &defs[i];
+ BasicBlock* bb = cu_.mir_graph->NewMemBB(def->type, i);
+ cu_.mir_graph->block_list_.Insert(bb);
+ if (def->num_successors <= 2) {
+ bb->successor_block_list_type = kNotUsed;
+ bb->successor_blocks = nullptr;
+ bb->fall_through = (def->num_successors >= 1) ? def->successors[0] : 0u;
+ bb->taken = (def->num_successors >= 2) ? def->successors[1] : 0u;
+ } else {
+ bb->successor_block_list_type = kPackedSwitch;
+ bb->fall_through = 0u;
+ bb->taken = 0u;
+ bb->successor_blocks = new (&cu_.arena) GrowableArray<SuccessorBlockInfo*>(
+ &cu_.arena, def->num_successors, kGrowableArraySuccessorBlocks);
+ for (size_t j = 0u; j != def->num_successors; ++j) {
+ SuccessorBlockInfo* successor_block_info =
+ static_cast<SuccessorBlockInfo*>(cu_.arena.Alloc(sizeof(SuccessorBlockInfo),
+ kArenaAllocSuccessor));
+ successor_block_info->block = j;
+ successor_block_info->key = 0u; // Not used by class init check elimination.
+ bb->successor_blocks->Insert(successor_block_info);
+ }
+ }
+ bb->predecessors = new (&cu_.arena) GrowableArray<BasicBlockId>(
+ &cu_.arena, def->num_predecessors, kGrowableArrayPredecessors);
+ for (size_t j = 0u; j != def->num_predecessors; ++j) {
+ ASSERT_NE(0u, def->predecessors[j]);
+ bb->predecessors->Insert(def->predecessors[j]);
+ }
+ if (def->type == kDalvikByteCode || def->type == kEntryBlock || def->type == kExitBlock) {
+ bb->data_flow_info = static_cast<BasicBlockDataFlow*>(
+ cu_.arena.Alloc(sizeof(BasicBlockDataFlow), kArenaAllocDFInfo));
+ }
+ }
+ cu_.mir_graph->num_blocks_ = count;
+ ASSERT_EQ(count, cu_.mir_graph->block_list_.Size());
+ cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_.Get(1);
+ ASSERT_EQ(kEntryBlock, cu_.mir_graph->entry_block_->block_type);
+ cu_.mir_graph->exit_block_ = cu_.mir_graph->block_list_.Get(2);
+ ASSERT_EQ(kExitBlock, cu_.mir_graph->exit_block_->block_type);
+ }
+
+ template <size_t count>
+ void PrepareBasicBlocks(const BBDef (&defs)[count]) {
+ DoPrepareBasicBlocks(defs, count);
+ }
+
+ void ComputeTopologicalSortOrder() {
+ cu_.mir_graph->SSATransformationStart();
+ cu_.mir_graph->ComputeDFSOrders();
+ cu_.mir_graph->ComputeDominators();
+ cu_.mir_graph->ComputeTopologicalSortOrder();
+ cu_.mir_graph->SSATransformationEnd();
+ ASSERT_NE(cu_.mir_graph->topological_order_, nullptr);
+ ASSERT_NE(cu_.mir_graph->topological_order_loop_ends_, nullptr);
+ ASSERT_NE(cu_.mir_graph->topological_order_indexes_, nullptr);
+ ASSERT_EQ(cu_.mir_graph->GetNumBlocks(), cu_.mir_graph->topological_order_indexes_->Size());
+ for (size_t i = 0, size = cu_.mir_graph->GetTopologicalSortOrder()->Size(); i != size; ++i) {
+ ASSERT_LT(cu_.mir_graph->topological_order_->Get(i), cu_.mir_graph->GetNumBlocks());
+ BasicBlockId id = cu_.mir_graph->topological_order_->Get(i);
+ EXPECT_EQ(i, cu_.mir_graph->topological_order_indexes_->Get(id));
+ }
+ }
+
+ void DoCheckOrder(const BasicBlockId* ids, size_t count) {
+ ASSERT_EQ(count, cu_.mir_graph->GetTopologicalSortOrder()->Size());
+ for (size_t i = 0; i != count; ++i) {
+ EXPECT_EQ(ids[i], cu_.mir_graph->GetTopologicalSortOrder()->Get(i)) << i;
+ }
+ }
+
+ template <size_t count>
+ void CheckOrder(const BasicBlockId (&ids)[count]) {
+ DoCheckOrder(ids, count);
+ }
+
+ void DoCheckLoopEnds(const uint16_t* ends, size_t count) {
+ for (size_t i = 0; i != count; ++i) {
+ ASSERT_LT(i, cu_.mir_graph->GetTopologicalSortOrderLoopEnds()->Size());
+ EXPECT_EQ(ends[i], cu_.mir_graph->GetTopologicalSortOrderLoopEnds()->Get(i)) << i;
+ }
+ }
+
+ template <size_t count>
+ void CheckLoopEnds(const uint16_t (&ends)[count]) {
+ DoCheckLoopEnds(ends, count);
+ }
+
+ TopologicalSortOrderTest()
+ : pool_(),
+ cu_(&pool_) {
+ cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
+ }
+
+ ArenaPool pool_;
+ CompilationUnit cu_;
+};
+
+TEST_F(TopologicalSortOrderTest, DoWhile) {
+ const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 4), DEF_PRED2(3, 4)), // "taken" loops to self.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(4)),
+ };
+ const BasicBlockId expected_order[] = {
+ 1, 3, 4, 5, 2
+ };
+ const uint16_t loop_ends[] = {
+ 0, 0, 3, 0, 0
+ };
+
+ PrepareBasicBlocks(bbs);
+ ComputeTopologicalSortOrder();
+ CheckOrder(expected_order);
+ CheckLoopEnds(loop_ends);
+}
+
+TEST_F(TopologicalSortOrderTest, While) {
+ const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 5), DEF_PRED2(1, 4)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(3), DEF_PRED1(3)), // Loops to 3.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(3)),
+ };
+ const BasicBlockId expected_order[] = {
+ 1, 3, 4, 5, 2
+ };
+ const uint16_t loop_ends[] = {
+ 0, 3, 0, 0, 0
+ };
+
+ PrepareBasicBlocks(bbs);
+ ComputeTopologicalSortOrder();
+ CheckOrder(expected_order);
+ CheckLoopEnds(loop_ends);
+}
+
+TEST_F(TopologicalSortOrderTest, WhileWithTwoBackEdges) {
+ const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 6), DEF_PRED3(1, 4, 5)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 3), DEF_PRED1(3)), // Loops to 3.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(3), DEF_PRED1(4)), // Loops to 3.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(3)),
+ };
+ const BasicBlockId expected_order[] = {
+ 1, 3, 4, 5, 6, 2
+ };
+ const uint16_t loop_ends[] = {
+ 0, 4, 0, 0, 0, 0
+ };
+
+ PrepareBasicBlocks(bbs);
+ ComputeTopologicalSortOrder();
+ CheckOrder(expected_order);
+ CheckLoopEnds(loop_ends);
+}
+
+TEST_F(TopologicalSortOrderTest, NestedLoop) {
+ const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(7)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 7), DEF_PRED2(1, 6)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 6), DEF_PRED2(3, 5)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(4)), // Loops to 4.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(3), DEF_PRED1(4)), // Loops to 3.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(3)),
+ };
+ const BasicBlockId expected_order[] = {
+ 1, 3, 4, 5, 6, 7, 2
+ };
+ const uint16_t loop_ends[] = {
+ 0, 5, 4, 0, 0, 0, 0
+ };
+
+ PrepareBasicBlocks(bbs);
+ ComputeTopologicalSortOrder();
+ CheckOrder(expected_order);
+ CheckLoopEnds(loop_ends);
+}
+
+TEST_F(TopologicalSortOrderTest, NestedLoopHeadLoops) {
+ const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 6), DEF_PRED2(1, 4)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 3), DEF_PRED2(3, 5)), // Nested head, loops to 3.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(4)), // Loops to 4.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(3)),
+ };
+ const BasicBlockId expected_order[] = {
+ 1, 3, 4, 5, 6, 2
+ };
+ const uint16_t loop_ends[] = {
+ 0, 4, 4, 0, 0, 0
+ };
+
+ PrepareBasicBlocks(bbs);
+ ComputeTopologicalSortOrder();
+ CheckOrder(expected_order);
+ CheckLoopEnds(loop_ends);
+}
+
+TEST_F(TopologicalSortOrderTest, NestedLoopSameBackBranchBlock) {
+ const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 6), DEF_PRED2(1, 5)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(5), DEF_PRED2(3, 5)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 3), DEF_PRED1(4)), // Loops to 4 and 3.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(3)),
+ };
+ const BasicBlockId expected_order[] = {
+ 1, 3, 4, 5, 6, 2
+ };
+ const uint16_t loop_ends[] = {
+ 0, 4, 4, 0, 0, 0
+ };
+
+ PrepareBasicBlocks(bbs);
+ ComputeTopologicalSortOrder();
+ CheckOrder(expected_order);
+ CheckLoopEnds(loop_ends);
+}
+
+TEST_F(TopologicalSortOrderTest, TwoReorderedInnerLoops) {
+ // This is a simplified version of real code graph where the branch from 8 to 5 must prevent
+ // the block 5 from being considered a loop head before processing the loop 7-8.
+ const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(9)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 9), DEF_PRED2(1, 5)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 7), DEF_PRED1(3)), // Branch over loop in 5.
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(6, 3), DEF_PRED3(4, 6, 8)), // Loops to 4; inner loop.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(5), DEF_PRED1(5)), // Loops to 5.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(8), DEF_PRED2(4, 8)), // Loop head.
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(7, 5), DEF_PRED1(7)), // Loops to 7; branches to 5.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(3)),
+ };
+ const BasicBlockId expected_order[] = {
+ 1, 3, 4, 7, 8, 5, 6, 9, 2
+ };
+ const uint16_t loop_ends[] = {
+ 0, 7, 0, 5, 0, 7, 0, 0, 0
+ };
+
+ PrepareBasicBlocks(bbs);
+ ComputeTopologicalSortOrder();
+ CheckOrder(expected_order);
+ CheckLoopEnds(loop_ends);
+}
+
+TEST_F(TopologicalSortOrderTest, NestedLoopWithBackEdgeAfterOuterLoopBackEdge) {
+ // This is a simplified version of real code graph. The back-edge from 7 to the inner
+ // loop head 4 comes after the back-edge from 6 to the outer loop head 3. To make this
+ // appear a bit more complex, there's also a back-edge from 5 to 4.
+ const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(7)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED2(1, 6)), // Outer loop head.
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 6), DEF_PRED3(3, 5, 7)), // Inner loop head.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(4)), // Loops to inner loop head 4.
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(7, 3), DEF_PRED1(4)), // Loops to outer loop head 3.
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(2, 4), DEF_PRED1(6)), // Loops to inner loop head 4.
+ };
+ const BasicBlockId expected_order[] = {
+ // NOTE: The 5 goes before 6 only because 5 is a "fall-through" from 4 while 6 is "taken".
+ 1, 3, 4, 5, 6, 7, 2
+ };
+ const uint16_t loop_ends[] = {
+ 0, 6, 6, 0, 0, 0, 0
+ };
+
+ PrepareBasicBlocks(bbs);
+ ComputeTopologicalSortOrder();
+ CheckOrder(expected_order);
+ CheckLoopEnds(loop_ends);
+}
+
+TEST_F(TopologicalSortOrderTest, LoopWithTwoEntryPoints) {
+ const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(7)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 4), DEF_PRED1(1)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(5), DEF_PRED2(3, 6)), // Fall-back block is chosen as
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED2(3, 4)), // the earlier from these two.
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 7), DEF_PRED1(5)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(6)),
+ };
+ const BasicBlockId expected_order[] = {
+ 1, 3, 4, 5, 6, 7, 2
+ };
+ const uint16_t loop_ends[] = {
+ 0, 0, 5, 0, 0, 0, 0
+ };
+
+ PrepareBasicBlocks(bbs);
+ ComputeTopologicalSortOrder();
+ CheckOrder(expected_order);
+ CheckLoopEnds(loop_ends);
+}
+
+} // namespace art
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 869c48f..23ceb56 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -37,6 +37,7 @@
void MIRGraph::SetConstantWide(int ssa_reg, int64_t value) {
is_constant_v_->SetBit(ssa_reg);
+ is_constant_v_->SetBit(ssa_reg + 1);
constant_values_[ssa_reg] = Low32Bits(value);
constant_values_[ssa_reg + 1] = High32Bits(value);
}
@@ -321,14 +322,15 @@
return true;
}
// Don't do a separate LVN if we did the GVN.
- bool use_lvn = bb->use_lvn && (cu_->disable_opt & (1 << kGlobalValueNumbering)) != 0;
+ bool use_lvn = bb->use_lvn && (cu_->disable_opt & (1u << kGlobalValueNumbering)) != 0u;
std::unique_ptr<ScopedArenaAllocator> allocator;
std::unique_ptr<GlobalValueNumbering> global_valnum;
std::unique_ptr<LocalValueNumbering> local_valnum;
if (use_lvn) {
allocator.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
global_valnum.reset(new (allocator.get()) GlobalValueNumbering(cu_, allocator.get()));
- local_valnum.reset(new (allocator.get()) LocalValueNumbering(global_valnum.get(), bb->id));
+ local_valnum.reset(new (allocator.get()) LocalValueNumbering(global_valnum.get(), bb->id,
+ allocator.get()));
}
while (bb != NULL) {
for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
@@ -737,11 +739,9 @@
ArenaBitVector* ssa_regs_to_check = temp_bit_vector_;
if (do_nce) {
/*
- * Set initial state. Be conservative with catch
- * blocks and start with no assumptions about null check
- * status (except for "this").
+ * Set initial state. Catch blocks don't need any special treatment.
*/
- if ((bb->block_type == kEntryBlock) | bb->catch_entry) {
+ if (bb->block_type == kEntryBlock) {
ssa_regs_to_check->ClearAllBits();
// Assume all ins are objects.
for (uint16_t in_reg = cu_->num_dalvik_registers - cu_->num_ins;
@@ -1047,12 +1047,11 @@
}
/*
- * Set initial state. Be conservative with catch
- * blocks and start with no assumptions about class init check status.
+ * Set initial state. Catch blocks don't need any special treatment.
*/
ArenaBitVector* classes_to_check = temp_bit_vector_;
DCHECK(classes_to_check != nullptr);
- if ((bb->block_type == kEntryBlock) | bb->catch_entry) {
+ if (bb->block_type == kEntryBlock) {
classes_to_check->SetInitialBits(temp_bit_vector_size_);
} else if (bb->predecessors->Size() == 1) {
BasicBlock* pred_bb = GetBasicBlock(bb->predecessors->Get(0));
@@ -1142,11 +1141,7 @@
}
bool MIRGraph::ApplyGlobalValueNumberingGate() {
- if ((cu_->disable_opt & (1 << kGlobalValueNumbering)) != 0) {
- return false;
- }
-
- if ((merged_df_flags_ & DF_LVN) == 0) {
+ if ((cu_->disable_opt & (1u << kGlobalValueNumbering)) != 0u) {
return false;
}
@@ -1176,13 +1171,14 @@
temp_gvn_->AllowModifications();
PreOrderDfsIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
- LocalValueNumbering* lvn = temp_gvn_->PrepareBasicBlock(bb);
+ ScopedArenaAllocator allocator(&cu_->arena_stack); // Reclaim memory after each LVN.
+ LocalValueNumbering* lvn = temp_gvn_->PrepareBasicBlock(bb, &allocator);
if (lvn != nullptr) {
for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
lvn->GetValueNumber(mir);
}
bool change = temp_gvn_->FinishBasicBlock(bb);
- DCHECK(!change);
+ DCHECK(!change) << PrettyMethod(cu_->method_idx, *cu_->dex_file);
}
}
} else {
diff --git a/compiler/dex/mir_optimization_test.cc b/compiler/dex/mir_optimization_test.cc
index 8c70b5c..c510b52 100644
--- a/compiler/dex/mir_optimization_test.cc
+++ b/compiler/dex/mir_optimization_test.cc
@@ -195,7 +195,7 @@
cu_.mir_graph->SSATransformationEnd();
bool gate_result = cu_.mir_graph->EliminateClassInitChecksGate();
ASSERT_TRUE(gate_result);
- RepeatingTopologicalSortIterator iterator(cu_.mir_graph.get());
+ LoopRepeatingTopologicalSortIterator iterator(cu_.mir_graph.get());
bool change = false;
for (BasicBlock* bb = iterator.Next(change); bb != nullptr; bb = iterator.Next(change)) {
change = cu_.mir_graph->EliminateClassInitChecks(bb);
@@ -373,30 +373,47 @@
static const SFieldDef sfields[] = {
{ 0u, 1u, 0u, 0u },
{ 1u, 1u, 1u, 1u },
+ { 2u, 1u, 2u, 2u },
+ { 3u, 1u, 3u, 3u },
};
static const BBDef bbs[] = {
DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
- DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
- DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 4), DEF_PRED1(1)),
- DEF_BB(kDalvikByteCode, DEF_SUCC1(5), DEF_PRED1(3)), // Catch handler.
- DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(3, 4)),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)), // The top.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)), // The throwing insn.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)), // Catch handler.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(4, 5)), // The merged block.
};
static const MIRDef mirs[] = {
- DEF_MIR(Instruction::SGET, 3u, 0u),
- DEF_MIR(Instruction::SGET, 3u, 1u),
- DEF_MIR(Instruction::SGET, 4u, 1u),
- DEF_MIR(Instruction::SGET, 5u, 0u), // Not eliminated.
- DEF_MIR(Instruction::SGET, 5u, 1u), // Eliminated.
+ DEF_MIR(Instruction::SGET, 3u, 0u), // Before the exception edge.
+ DEF_MIR(Instruction::SGET, 3u, 1u), // Before the exception edge.
+ DEF_MIR(Instruction::SGET, 4u, 2u), // After the exception edge.
+ DEF_MIR(Instruction::SGET, 4u, 3u), // After the exception edge.
+ DEF_MIR(Instruction::SGET, 5u, 0u), // In catch handler; class init check eliminated.
+ DEF_MIR(Instruction::SGET, 5u, 2u), // In catch handler; class init check not eliminated.
+ DEF_MIR(Instruction::SGET, 6u, 0u), // Class init check eliminated.
+ DEF_MIR(Instruction::SGET, 6u, 1u), // Class init check eliminated.
+ DEF_MIR(Instruction::SGET, 6u, 2u), // Class init check eliminated.
+ DEF_MIR(Instruction::SGET, 6u, 3u), // Class init check not eliminated.
};
static const bool expected_ignore_clinit_check[] = {
- false, false, false, false, true
+ false, false, false, false, true, false, true, true, true, false
};
PrepareSFields(sfields);
PrepareBasicBlocks(bbs);
- BasicBlock* catch_handler = cu_.mir_graph->GetBasicBlock(4u);
+ BasicBlock* catch_handler = cu_.mir_graph->GetBasicBlock(5u);
catch_handler->catch_entry = true;
+ // Add successor block info to the check block.
+ BasicBlock* check_bb = cu_.mir_graph->GetBasicBlock(3u);
+ check_bb->successor_block_list_type = kCatch;
+ check_bb->successor_blocks = new (&cu_.arena) GrowableArray<SuccessorBlockInfo*>(
+ &cu_.arena, 2, kGrowableArraySuccessorBlocks);
+ SuccessorBlockInfo* successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
+ (cu_.arena.Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessor));
+ successor_block_info->block = catch_handler->id;
+ check_bb->successor_blocks->Insert(successor_block_info);
PrepareMIRs(mirs);
PerformClassInitCheckElimination();
ASSERT_EQ(arraysize(expected_ignore_clinit_check), mir_count_);
diff --git a/compiler/dex/pass_driver_me.h b/compiler/dex/pass_driver_me.h
index 031c5cf..133593c 100644
--- a/compiler/dex/pass_driver_me.h
+++ b/compiler/dex/pass_driver_me.h
@@ -68,6 +68,9 @@
case kRepeatingTopologicalSortTraversal:
DoWalkBasicBlocks<RepeatingTopologicalSortIterator>(&pass_me_data_holder_, me_pass);
break;
+ case kLoopRepeatingTopologicalSortTraversal:
+ DoWalkBasicBlocks<LoopRepeatingTopologicalSortIterator>(&pass_me_data_holder_, me_pass);
+ break;
case kAllNodes:
DoWalkBasicBlocks<AllNodesIterator>(&pass_me_data_holder_, me_pass);
break;
diff --git a/compiler/dex/pass_me.h b/compiler/dex/pass_me.h
index ff69865..c7276eb 100644
--- a/compiler/dex/pass_me.h
+++ b/compiler/dex/pass_me.h
@@ -55,6 +55,7 @@
kPostOrderDOMTraversal, /**< @brief Dominator tree / Post-Order. */
kTopologicalSortTraversal, /**< @brief Topological Order traversal. */
kRepeatingTopologicalSortTraversal, /**< @brief Repeating Topological Order traversal. */
+ kLoopRepeatingTopologicalSortTraversal, /**< @brief Loop-repeating Topological Order traversal. */
kNoNodes, /**< @brief Skip BasicBlock traversal. */
};
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index 5083bbc..35c3597 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -217,7 +217,7 @@
"ldmia", "!0C!!, <!1R>", 2, kFixupNone),
ENCODING_MAP(kThumbLdrRRI5, 0x6800,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF4,
"ldr", "!0C, [!1C, #!2E]", 2, kFixupNone),
ENCODING_MAP(kThumbLdrRRR, 0x5800,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
@@ -226,14 +226,14 @@
ENCODING_MAP(kThumbLdrPcRel, 0x4800,
kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0 | REG_USE_PC
- | IS_LOAD | NEEDS_FIXUP, "ldr", "!0C, [pc, #!1E]", 2, kFixupLoad),
+ | IS_LOAD_OFF4 | NEEDS_FIXUP, "ldr", "!0C, [pc, #!1E]", 2, kFixupLoad),
ENCODING_MAP(kThumbLdrSpRel, 0x9800,
kFmtBitBlt, 10, 8, kFmtSkip, -1, -1, kFmtBitBlt, 7, 0,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0 | REG_USE_SP
- | IS_LOAD, "ldr", "!0C, [sp, #!2E]", 2, kFixupNone),
+ | IS_LOAD_OFF4, "ldr", "!0C, [sp, #!2E]", 2, kFixupNone),
ENCODING_MAP(kThumbLdrbRRI5, 0x7800,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
"ldrb", "!0C, [!1C, #2d]", 2, kFixupNone),
ENCODING_MAP(kThumbLdrbRRR, 0x5c00,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
@@ -241,7 +241,7 @@
"ldrb", "!0C, [!1C, !2C]", 2, kFixupNone),
ENCODING_MAP(kThumbLdrhRRI5, 0x8800,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF2,
"ldrh", "!0C, [!1C, #!2F]", 2, kFixupNone),
ENCODING_MAP(kThumbLdrhRRR, 0x5a00,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
@@ -283,19 +283,19 @@
ENCODING_MAP(kThumbMovRR, 0x1c00,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
kFmtUnused, -1, -1,
- IS_BINARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+ IS_BINARY_OP | REG_DEF0_USE1 | SETS_CCODES |IS_MOVE,
"movs", "!0C, !1C", 2, kFixupNone),
ENCODING_MAP(kThumbMovRR_H2H, 0x46c0,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
"mov", "!0C, !1C", 2, kFixupNone),
ENCODING_MAP(kThumbMovRR_H2L, 0x4640,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
"mov", "!0C, !1C", 2, kFixupNone),
ENCODING_MAP(kThumbMovRR_L2H, 0x4680,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
"mov", "!0C, !1C", 2, kFixupNone),
ENCODING_MAP(kThumbMul, 0x4340,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
@@ -354,7 +354,7 @@
"stmia", "!0C!!, <!1R>", 2, kFixupNone),
ENCODING_MAP(kThumbStrRRI5, 0x6000,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF4,
"str", "!0C, [!1C, #!2E]", 2, kFixupNone),
ENCODING_MAP(kThumbStrRRR, 0x5000,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
@@ -363,10 +363,10 @@
ENCODING_MAP(kThumbStrSpRel, 0x9000,
kFmtBitBlt, 10, 8, kFmtSkip, -1, -1, kFmtBitBlt, 7, 0,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE0 | REG_USE_SP
- | IS_STORE, "str", "!0C, [sp, #!2E]", 2, kFixupNone),
+ | IS_STORE_OFF4, "str", "!0C, [sp, #!2E]", 2, kFixupNone),
ENCODING_MAP(kThumbStrbRRI5, 0x7000,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
"strb", "!0C, [!1C, #!2d]", 2, kFixupNone),
ENCODING_MAP(kThumbStrbRRR, 0x5400,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
@@ -374,7 +374,7 @@
"strb", "!0C, [!1C, !2C]", 2, kFixupNone),
ENCODING_MAP(kThumbStrhRRI5, 0x8000,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF2,
"strh", "!0C, [!1C, #!2F]", 2, kFixupNone),
ENCODING_MAP(kThumbStrhRRR, 0x5200,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
@@ -423,11 +423,11 @@
*/
ENCODING_MAP(kThumb2Vldrs, 0xed900a00,
kFmtSfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD |
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF4 |
REG_DEF_LR | NEEDS_FIXUP, "vldr", "!0s, [!1C, #!2E]", 4, kFixupVLoad),
ENCODING_MAP(kThumb2Vldrd, 0xed900b00,
kFmtDfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD |
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF |
REG_DEF_LR | NEEDS_FIXUP, "vldr", "!0S, [!1C, #!2E]", 4, kFixupVLoad),
ENCODING_MAP(kThumb2Vmuls, 0xee200a00,
kFmtSfp, 22, 12, kFmtSfp, 7, 16, kFmtSfp, 5, 0,
@@ -440,11 +440,11 @@
"vmuld", "!0S, !1S, !2S", 4, kFixupNone),
ENCODING_MAP(kThumb2Vstrs, 0xed800a00,
kFmtSfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF4,
"vstr", "!0s, [!1C, #!2E]", 4, kFixupNone),
ENCODING_MAP(kThumb2Vstrd, 0xed800b00,
kFmtDfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF4,
"vstr", "!0S, [!1C, #!2E]", 4, kFixupNone),
ENCODING_MAP(kThumb2Vsubs, 0xee300a40,
kFmtSfp, 22, 12, kFmtSfp, 7, 16, kFmtSfp, 5, 0,
@@ -520,19 +520,19 @@
"mov", "!0C, #!1M", 4, kFixupNone),
ENCODING_MAP(kThumb2StrRRI12, 0xf8c00000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
"str", "!0C, [!1C, #!2d]", 4, kFixupNone),
ENCODING_MAP(kThumb2LdrRRI12, 0xf8d00000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
"ldr", "!0C, [!1C, #!2d]", 4, kFixupNone),
ENCODING_MAP(kThumb2StrRRI8Predec, 0xf8400c00,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 8, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
"str", "!0C, [!1C, #-!2d]", 4, kFixupNone),
ENCODING_MAP(kThumb2LdrRRI8Predec, 0xf8500c00,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 8, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
"ldr", "!0C, [!1C, #-!2d]", 4, kFixupNone),
ENCODING_MAP(kThumb2Cbnz, 0xb900, /* Note: does not affect flags */
kFmtBitBlt, 2, 0, kFmtImm6, -1, -1, kFmtUnused, -1, -1,
@@ -549,15 +549,15 @@
"add", "!0C,!1C,#!2d", 4, kFixupNone),
ENCODING_MAP(kThumb2MovRR, 0xea4f0000, /* no setflags encoding */
kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
"mov", "!0C, !1C", 4, kFixupNone),
ENCODING_MAP(kThumb2Vmovs, 0xeeb00a40,
kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
"vmov.f32 ", " !0s, !1s", 4, kFixupNone),
ENCODING_MAP(kThumb2Vmovd, 0xeeb00b40,
kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
"vmov.f64 ", " !0S, !1S", 4, kFixupNone),
ENCODING_MAP(kThumb2Ldmia, 0xe8900000,
kFmtBitBlt, 19, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
@@ -613,59 +613,59 @@
"sbfx", "!0C, !1C, #!2d, #!3d", 4, kFixupNone),
ENCODING_MAP(kThumb2LdrRRR, 0xf8500000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
- kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+ kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
"ldr", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
ENCODING_MAP(kThumb2LdrhRRR, 0xf8300000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
- kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+ kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
"ldrh", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
ENCODING_MAP(kThumb2LdrshRRR, 0xf9300000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
- kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+ kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
"ldrsh", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
ENCODING_MAP(kThumb2LdrbRRR, 0xf8100000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
- kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+ kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
"ldrb", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
ENCODING_MAP(kThumb2LdrsbRRR, 0xf9100000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
- kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+ kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
"ldrsb", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
ENCODING_MAP(kThumb2StrRRR, 0xf8400000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
- kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE,
+ kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE_OFF,
"str", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
ENCODING_MAP(kThumb2StrhRRR, 0xf8200000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
- kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE,
+ kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE_OFF,
"strh", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
ENCODING_MAP(kThumb2StrbRRR, 0xf8000000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
- kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE,
+ kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE_OFF,
"strb", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
ENCODING_MAP(kThumb2LdrhRRI12, 0xf8b00000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
"ldrh", "!0C, [!1C, #!2d]", 4, kFixupNone),
ENCODING_MAP(kThumb2LdrshRRI12, 0xf9b00000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
"ldrsh", "!0C, [!1C, #!2d]", 4, kFixupNone),
ENCODING_MAP(kThumb2LdrbRRI12, 0xf8900000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
"ldrb", "!0C, [!1C, #!2d]", 4, kFixupNone),
ENCODING_MAP(kThumb2LdrsbRRI12, 0xf9900000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
"ldrsb", "!0C, [!1C, #!2d]", 4, kFixupNone),
ENCODING_MAP(kThumb2StrhRRI12, 0xf8a00000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
"strh", "!0C, [!1C, #!2d]", 4, kFixupNone),
ENCODING_MAP(kThumb2StrbRRI12, 0xf8800000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
"strb", "!0C, [!1C, #!2d]", 4, kFixupNone),
ENCODING_MAP(kThumb2Pop, 0xe8bd0000,
kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
@@ -841,7 +841,7 @@
ENCODING_MAP(kThumb2LdrPcRel12, 0xf8df0000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 0, kFmtUnused, -1, -1,
kFmtUnused, -1, -1,
- IS_TERTIARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD | NEEDS_FIXUP,
+ IS_TERTIARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD_OFF | NEEDS_FIXUP,
"ldr", "!0C, [r15pc, #!1d]", 4, kFixupLoad),
ENCODING_MAP(kThumb2BCond, 0xf0008000,
kFmtBrOffset, -1, -1, kFmtBitBlt, 25, 22, kFmtUnused, -1, -1,
@@ -899,19 +899,19 @@
"umull", "!0C, !1C, !2C, !3C", 4, kFixupNone),
ENCODING_MAP(kThumb2Ldrex, 0xe8500f00,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOADX,
"ldrex", "!0C, [!1C, #!2E]", 4, kFixupNone),
ENCODING_MAP(kThumb2Ldrexd, 0xe8d0007f,
kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF01_USE2 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF01_USE2 | IS_LOADX,
"ldrexd", "!0C, !1C, [!2C]", 4, kFixupNone),
ENCODING_MAP(kThumb2Strex, 0xe8400000,
kFmtBitBlt, 11, 8, kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16,
- kFmtBitBlt, 7, 0, IS_QUAD_OP | REG_DEF0_USE12 | IS_STORE,
+ kFmtBitBlt, 7, 0, IS_QUAD_OP | REG_DEF0_USE12 | IS_STOREX,
"strex", "!0C, !1C, [!2C, #!2E]", 4, kFixupNone),
ENCODING_MAP(kThumb2Strexd, 0xe8c00070,
kFmtBitBlt, 3, 0, kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8,
- kFmtBitBlt, 19, 16, IS_QUAD_OP | REG_DEF0_USE123 | IS_STORE,
+ kFmtBitBlt, 19, 16, IS_QUAD_OP | REG_DEF0_USE123 | IS_STOREX,
"strexd", "!0C, !1C, !2C, [!3C]", 4, kFixupNone),
ENCODING_MAP(kThumb2Clrex, 0xf3bf8f2f,
kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
@@ -927,12 +927,12 @@
"bfc", "!0C,#!1d,#!2d", 4, kFixupNone),
ENCODING_MAP(kThumb2Dmb, 0xf3bf8f50,
kFmtBitBlt, 3, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_VOLATILE,
"dmb", "#!0B", 4, kFixupNone),
ENCODING_MAP(kThumb2LdrPcReln12, 0xf85f0000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 0, kFmtUnused, -1, -1,
kFmtUnused, -1, -1,
- IS_BINARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD,
+ IS_BINARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD_OFF,
"ldr", "!0C, [r15pc, -#!1d]", 4, kFixupNone),
ENCODING_MAP(kThumb2Stm, 0xe9000000,
kFmtBitBlt, 19, 16, kFmtBitBlt, 12, 0, kFmtUnused, -1, -1,
@@ -1023,17 +1023,17 @@
ENCODING_MAP(kThumb2LdrdPcRel8, 0xe9df0000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 7, 0,
kFmtUnused, -1, -1,
- IS_TERTIARY_OP | REG_DEF0 | REG_DEF1 | REG_USE_PC | IS_LOAD | NEEDS_FIXUP,
+ IS_TERTIARY_OP | REG_DEF0 | REG_DEF1 | REG_USE_PC | IS_LOAD_OFF4 | NEEDS_FIXUP,
"ldrd", "!0C, !1C, [pc, #!2E]", 4, kFixupLoad),
ENCODING_MAP(kThumb2LdrdI8, 0xe9d00000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16,
kFmtBitBlt, 7, 0,
- IS_QUAD_OP | REG_DEF0 | REG_DEF1 | REG_USE2 | IS_LOAD,
+ IS_QUAD_OP | REG_DEF0 | REG_DEF1 | REG_USE2 | IS_LOAD_OFF4,
"ldrd", "!0C, !1C, [!2C, #!3E]", 4, kFixupNone),
ENCODING_MAP(kThumb2StrdI8, 0xe9c00000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16,
kFmtBitBlt, 7, 0,
- IS_QUAD_OP | REG_USE0 | REG_USE1 | REG_USE2 | IS_STORE,
+ IS_QUAD_OP | REG_USE0 | REG_USE1 | REG_USE2 | IS_STORE_OFF4,
"strd", "!0C, !1C, [!2C, #!3E]", 4, kFixupNone),
};
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 01e17bf..b133991 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -43,8 +43,7 @@
* add rARM_PC, r_disp ; This is the branch from which we compute displacement
* cbnz r_idx, lp
*/
-void ArmMir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset,
- RegLocation rl_src) {
+void ArmMir2Lir::GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) {
const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
if (cu_->verbose) {
DumpSparseSwitchTable(table);
@@ -92,8 +91,7 @@
}
-void ArmMir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset,
- RegLocation rl_src) {
+void ArmMir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) {
const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
if (cu_->verbose) {
DumpPackedSwitchTable(table);
@@ -190,7 +188,7 @@
null_check_branch = nullptr; // No null check.
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
}
}
@@ -261,7 +259,7 @@
null_check_branch = nullptr; // No null check.
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
}
}
@@ -358,11 +356,11 @@
*/
bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !IsLargeFrame(frame_size_, kArm);
NewLIR0(kPseudoMethodEntry);
- constexpr size_t kStackOverflowReservedUsableBytes = kArmStackOverflowReservedBytes -
+ const size_t kStackOverflowReservedUsableBytes = GetStackOverflowReservedBytes(kArm) -
Thread::kStackOverflowSignalReservedBytes;
bool large_frame = (static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes);
if (!skip_overflow_check) {
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitStackOverflowChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
if (!large_frame) {
/* Load stack limit */
LockTemp(rs_r12);
@@ -381,7 +379,7 @@
// This is done before the callee save instructions to avoid any possibility
// of these overflowing. This uses r12 and that's never saved in a callee
// save.
- OpRegRegImm(kOpSub, rs_r12, rs_rARM_SP, kArmStackOverflowReservedBytes);
+ OpRegRegImm(kOpSub, rs_r12, rs_rARM_SP, GetStackOverflowReservedBytes(kArm));
Load32Disp(rs_r12, 0, rs_r12);
MarkPossibleStackOverflowException();
}
@@ -401,7 +399,7 @@
const int spill_size = spill_count * 4;
const int frame_size_without_spills = frame_size_ - spill_size;
if (!skip_overflow_check) {
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitStackOverflowChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
class StackOverflowSlowPath : public LIRSlowPath {
public:
StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, bool restore_lr, size_t sp_displace)
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index d4b0de7..cd6c9cc 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -31,22 +31,17 @@
RegLocation rl_dest, int lit);
bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
LIR* CheckSuspendUsingLoad() OVERRIDE;
- RegStorage LoadHelper(ThreadOffset<4> offset) OVERRIDE;
- RegStorage LoadHelper(ThreadOffset<8> offset) OVERRIDE;
+ RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
OpSize size, VolatileKind is_volatile) OVERRIDE;
LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
OpSize size) OVERRIDE;
- LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
- RegStorage r_dest, OpSize size) OVERRIDE;
LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size, VolatileKind is_volatile) OVERRIDE;
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
OpSize size) OVERRIDE;
- LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
- RegStorage r_src, OpSize size) OVERRIDE;
void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
// Required for target - register utilities.
@@ -85,12 +80,12 @@
size_t GetInsnSize(LIR* lir) OVERRIDE;
bool IsUnconditionalBranch(LIR* lir);
- // Check support for volatile load/store of a given size.
- bool SupportsVolatileLoadStore(OpSize size) OVERRIDE;
// Get the register class for load/store of a field.
RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
// Required for target - Dalvik-level generators.
+ void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) OVERRIDE;
void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2);
void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
@@ -99,12 +94,6 @@
RegLocation rl_src, int scale, bool card_mark);
void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_shift);
- void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2);
void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
@@ -112,21 +101,14 @@
void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2);
void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
+ bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
+ bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long);
bool GenInlinedSqrt(CallInfo* info);
bool GenInlinedPeek(CallInfo* info, OpSize size);
bool GenInlinedPoke(CallInfo* info, OpSize size);
- void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
- void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
- void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div);
+ bool GenInlinedArrayCopyCharArray(CallInfo* info) OVERRIDE;
RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
@@ -138,6 +120,9 @@
void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
void GenSelect(BasicBlock* bb, MIR* mir);
+ void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
+ int32_t true_val, int32_t false_val, RegStorage rs_dest,
+ int dest_reg_class) OVERRIDE;
bool GenMemBarrier(MemBarrierKind barrier_kind);
void GenMonitorEnter(int opt_flags, RegLocation rl_src);
void GenMonitorExit(int opt_flags, RegLocation rl_src);
@@ -146,8 +131,8 @@
int first_bit, int second_bit);
void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
- void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
- void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
+ void GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
+ void GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
// Required for target - single operation generators.
LIR* OpUnconditionalBranch(LIR* target);
@@ -165,7 +150,6 @@
void OpRegCopy(RegStorage r_dest, RegStorage r_src);
LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
- LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
@@ -173,14 +157,9 @@
LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
LIR* OpTestSuspend(LIR* target);
- LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) OVERRIDE;
- LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) OVERRIDE;
LIR* OpVldm(RegStorage r_base, int count);
LIR* OpVstm(RegStorage r_base, int count);
- void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset);
void OpRegCopyWide(RegStorage dest, RegStorage src);
- void OpTlsCmp(ThreadOffset<4> offset, int val) OVERRIDE;
- void OpTlsCmp(ThreadOffset<8> offset, int val) OVERRIDE;
LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size);
LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
@@ -205,7 +184,13 @@
return false; // Wide FPRs are formed by pairing.
}
+ LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
+ size_t GetInstructionOffset(LIR* lir);
+
private:
+ void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+ void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
void GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1, int64_t val,
ConditionCode ccode);
LIR* LoadFPConstantValue(int r_dest, int value);
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index e06d814..2ad11da 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -49,8 +49,7 @@
case Instruction::REM_FLOAT_2ADDR:
case Instruction::REM_FLOAT:
FlushAllRegs(); // Send everything to home location
- CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pFmodf), rl_src1, rl_src2,
- false);
+ CallRuntimeHelperRegLocationRegLocation(kQuickFmodf, rl_src1, rl_src2, false);
rl_result = GetReturn(kFPReg);
StoreValue(rl_dest, rl_result);
return;
@@ -92,8 +91,7 @@
case Instruction::REM_DOUBLE_2ADDR:
case Instruction::REM_DOUBLE:
FlushAllRegs(); // Send everything to home location
- CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pFmod), rl_src1, rl_src2,
- false);
+ CallRuntimeHelperRegLocationRegLocation(kQuickFmod, rl_src1, rl_src2, false);
rl_result = GetReturnWide(kFPReg);
StoreValueWide(rl_dest, rl_result);
return;
@@ -160,7 +158,7 @@
return;
}
case Instruction::FLOAT_TO_LONG:
- GenConversionCall(QUICK_ENTRYPOINT_OFFSET(4, pF2l), rl_dest, rl_src);
+ GenConversionCall(kQuickF2l, rl_dest, rl_src);
return;
case Instruction::LONG_TO_FLOAT: {
rl_src = LoadValueWide(rl_src, kFPReg);
@@ -190,7 +188,7 @@
return;
}
case Instruction::DOUBLE_TO_LONG:
- GenConversionCall(QUICK_ENTRYPOINT_OFFSET(4, pD2l), rl_dest, rl_src);
+ GenConversionCall(kQuickD2l, rl_dest, rl_src);
return;
default:
LOG(FATAL) << "Unexpected opcode: " << opcode;
@@ -338,6 +336,60 @@
StoreValueWide(rl_dest, rl_result);
}
+static RegisterClass RegClassForAbsFP(RegLocation rl_src, RegLocation rl_dest) {
+ // If src is in a core reg or, unlikely, dest has been promoted to a core reg, use core reg.
+ if ((rl_src.location == kLocPhysReg && !rl_src.reg.IsFloat()) ||
+ (rl_dest.location == kLocPhysReg && !rl_dest.reg.IsFloat())) {
+ return kCoreReg;
+ }
+ // If src is in an fp reg or dest has been promoted to an fp reg, use fp reg.
+ if (rl_src.location == kLocPhysReg || rl_dest.location == kLocPhysReg) {
+ return kFPReg;
+ }
+ // With both src and dest in the stack frame we have to perform load+abs+store. Whether this
+ // is faster using a core reg or fp reg depends on the particular CPU. Without further
+ // investigation and testing we prefer core register. (If the result is subsequently used in
+ // another fp operation, the dalvik reg will probably get promoted and that should be handled
+ // by the cases above.)
+ return kCoreReg;
+}
+
+bool ArmMir2Lir::GenInlinedAbsFloat(CallInfo* info) {
+ if (info->result.location == kLocInvalid) {
+ return true; // Result is unused: inlining successful, no code generated.
+ }
+ RegLocation rl_dest = info->result;
+ RegLocation rl_src = UpdateLoc(info->args[0]);
+ RegisterClass reg_class = RegClassForAbsFP(rl_src, rl_dest);
+ rl_src = LoadValue(rl_src, reg_class);
+ RegLocation rl_result = EvalLoc(rl_dest, reg_class, true);
+ if (reg_class == kFPReg) {
+ NewLIR2(kThumb2Vabss, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ } else {
+ OpRegRegImm(kOpAnd, rl_result.reg, rl_src.reg, 0x7fffffff);
+ }
+ StoreValue(rl_dest, rl_result);
+ return true;
+}
+
+bool ArmMir2Lir::GenInlinedAbsDouble(CallInfo* info) {
+ if (info->result.location == kLocInvalid) {
+ return true; // Result is unused: inlining successful, no code generated.
+ }
+ RegLocation rl_dest = info->result;
+ RegLocation rl_src = UpdateLocWide(info->args[0]);
+ RegisterClass reg_class = RegClassForAbsFP(rl_src, rl_dest);
+ rl_src = LoadValueWide(rl_src, reg_class);
+ RegLocation rl_result = EvalLoc(rl_dest, reg_class, true);
+ if (reg_class == kFPReg) {
+ NewLIR2(kThumb2Vabsd, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ } else {
+ OpRegImm(kOpAnd, rl_result.reg.GetHigh(), 0x7fffffff);
+ }
+ StoreValueWide(rl_dest, rl_result);
+ return true;
+}
+
bool ArmMir2Lir::GenInlinedSqrt(CallInfo* info) {
DCHECK_EQ(cu_->instruction_set, kThumb2);
RegLocation rl_src = info->args[0];
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 2fcc3a5..b9a17cc 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -203,6 +203,30 @@
OpCmpImmBranch(ccode, low_reg, val_lo, taken);
}
+void ArmMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
+ int32_t true_val, int32_t false_val, RegStorage rs_dest,
+ int dest_reg_class) {
+ // TODO: Generalize the IT below to accept more than one-instruction loads.
+ DCHECK(InexpensiveConstantInt(true_val));
+ DCHECK(InexpensiveConstantInt(false_val));
+
+ if ((true_val == 0 && code == kCondEq) ||
+ (false_val == 0 && code == kCondNe)) {
+ OpRegRegReg(kOpSub, rs_dest, left_op, right_op);
+ DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
+ LIR* it = OpIT(kCondNe, "");
+ LoadConstant(rs_dest, code == kCondEq ? false_val : true_val);
+ OpEndIT(it);
+ return;
+ }
+
+ OpRegReg(kOpCmp, left_op, right_op); // Same?
+ LIR* it = OpIT(code, "E"); // if-convert the test
+ LoadConstant(rs_dest, true_val); // .eq case - load true
+ LoadConstant(rs_dest, false_val); // .eq case - load true
+ OpEndIT(it);
+}
+
void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
RegLocation rl_result;
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
@@ -760,18 +784,6 @@
return true;
}
-void ArmMir2Lir::OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset) {
- LOG(FATAL) << "Unexpected use of OpLea for Arm";
-}
-
-void ArmMir2Lir::OpTlsCmp(ThreadOffset<4> offset, int val) {
- LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm";
-}
-
-void ArmMir2Lir::OpTlsCmp(ThreadOffset<8> offset, int val) {
- UNIMPLEMENTED(FATAL) << "Should not be called.";
-}
-
// Generate a CAS with memory_order_seq_cst semantics.
bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
DCHECK_EQ(cu_->instruction_set, kThumb2);
@@ -937,6 +949,100 @@
return true;
}
+bool ArmMir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
+ constexpr int kLargeArrayThreshold = 256;
+
+ RegLocation rl_src = info->args[0];
+ RegLocation rl_src_pos = info->args[1];
+ RegLocation rl_dst = info->args[2];
+ RegLocation rl_dst_pos = info->args[3];
+ RegLocation rl_length = info->args[4];
+ // Compile time check, handle exception by non-inline method to reduce related meta-data.
+ if ((rl_src_pos.is_const && (mir_graph_->ConstantValue(rl_src_pos) < 0)) ||
+ (rl_dst_pos.is_const && (mir_graph_->ConstantValue(rl_dst_pos) < 0)) ||
+ (rl_length.is_const && (mir_graph_->ConstantValue(rl_length) < 0))) {
+ return false;
+ }
+
+ ClobberCallerSave();
+ LockCallTemps(); // Prepare for explicit register usage.
+ LockTemp(rs_r12);
+ RegStorage rs_src = rs_r0;
+ RegStorage rs_dst = rs_r1;
+ LoadValueDirectFixed(rl_src, rs_src);
+ LoadValueDirectFixed(rl_dst, rs_dst);
+
+ // Handle null pointer exception in slow-path.
+ LIR* src_check_branch = OpCmpImmBranch(kCondEq, rs_src, 0, nullptr);
+ LIR* dst_check_branch = OpCmpImmBranch(kCondEq, rs_dst, 0, nullptr);
+ // Handle potential overlapping in slow-path.
+ LIR* src_dst_same = OpCmpBranch(kCondEq, rs_src, rs_dst, nullptr);
+ // Handle exception or big length in slow-path.
+ RegStorage rs_length = rs_r2;
+ LoadValueDirectFixed(rl_length, rs_length);
+ LIR* len_neg_or_too_big = OpCmpImmBranch(kCondHi, rs_length, kLargeArrayThreshold, nullptr);
+ // Src bounds check.
+ RegStorage rs_pos = rs_r3;
+ RegStorage rs_arr_length = rs_r12;
+ LoadValueDirectFixed(rl_src_pos, rs_pos);
+ LIR* src_pos_negative = OpCmpImmBranch(kCondLt, rs_pos, 0, nullptr);
+ Load32Disp(rs_src, mirror::Array::LengthOffset().Int32Value(), rs_arr_length);
+ OpRegReg(kOpSub, rs_arr_length, rs_pos);
+ LIR* src_bad_len = OpCmpBranch(kCondLt, rs_arr_length, rs_length, nullptr);
+ // Dst bounds check.
+ LoadValueDirectFixed(rl_dst_pos, rs_pos);
+ LIR* dst_pos_negative = OpCmpImmBranch(kCondLt, rs_pos, 0, nullptr);
+ Load32Disp(rs_dst, mirror::Array::LengthOffset().Int32Value(), rs_arr_length);
+ OpRegReg(kOpSub, rs_arr_length, rs_pos);
+ LIR* dst_bad_len = OpCmpBranch(kCondLt, rs_arr_length, rs_length, nullptr);
+
+ // Everything is checked now.
+ OpRegImm(kOpAdd, rs_dst, mirror::Array::DataOffset(2).Int32Value());
+ OpRegReg(kOpAdd, rs_dst, rs_pos);
+ OpRegReg(kOpAdd, rs_dst, rs_pos);
+ OpRegImm(kOpAdd, rs_src, mirror::Array::DataOffset(2).Int32Value());
+ LoadValueDirectFixed(rl_src_pos, rs_pos);
+ OpRegReg(kOpAdd, rs_src, rs_pos);
+ OpRegReg(kOpAdd, rs_src, rs_pos);
+
+ RegStorage rs_tmp = rs_pos;
+ OpRegRegImm(kOpLsl, rs_length, rs_length, 1);
+
+ // Copy one element.
+ OpRegRegImm(kOpAnd, rs_tmp, rs_length, 2);
+ LIR* jmp_to_begin_loop = OpCmpImmBranch(kCondEq, rs_tmp, 0, nullptr);
+ OpRegImm(kOpSub, rs_length, 2);
+ LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, kSignedHalf);
+ StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, kSignedHalf);
+
+ // Copy two elements.
+ LIR *begin_loop = NewLIR0(kPseudoTargetLabel);
+ LIR* jmp_to_ret = OpCmpImmBranch(kCondEq, rs_length, 0, nullptr);
+ OpRegImm(kOpSub, rs_length, 4);
+ LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, k32);
+ StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, k32);
+ OpUnconditionalBranch(begin_loop);
+
+ LIR *check_failed = NewLIR0(kPseudoTargetLabel);
+ LIR* launchpad_branch = OpUnconditionalBranch(nullptr);
+ LIR* return_point = NewLIR0(kPseudoTargetLabel);
+
+ src_check_branch->target = check_failed;
+ dst_check_branch->target = check_failed;
+ src_dst_same->target = check_failed;
+ len_neg_or_too_big->target = check_failed;
+ src_pos_negative->target = check_failed;
+ src_bad_len->target = check_failed;
+ dst_pos_negative->target = check_failed;
+ dst_bad_len->target = check_failed;
+ jmp_to_begin_loop->target = begin_loop;
+ jmp_to_ret->target = return_point;
+
+ AddIntrinsicSlowPath(info, launchpad_branch, return_point);
+
+ return true;
+}
+
LIR* ArmMir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
return RawLIR(current_dalvik_offset_, kThumb2LdrPcRel12, reg.GetReg(), 0, 0, 0, 0, target);
}
@@ -1027,15 +1133,6 @@
#endif
}
-void ArmMir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
- LOG(FATAL) << "Unexpected use GenNotLong()";
-}
-
-void ArmMir2Lir::GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div) {
- LOG(FATAL) << "Unexpected use GenDivRemLong()";
-}
-
void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
rl_src = LoadValueWide(rl_src, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
@@ -1073,9 +1170,8 @@
*/
RegLocation rl_result;
if (BadOverlap(rl_src1, rl_dest) || (BadOverlap(rl_src2, rl_dest))) {
- ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pLmul);
FlushAllRegs();
- CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
+ CallRuntimeHelperRegLocationRegLocation(kQuickLmul, rl_src1, rl_src2, false);
rl_result = GetReturnWide(kCoreReg);
StoreValueWide(rl_dest, rl_result);
return;
@@ -1162,29 +1258,23 @@
StoreValueWide(rl_dest, rl_result);
}
-void ArmMir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) {
- LOG(FATAL) << "Unexpected use of GenAddLong for Arm";
-}
+void ArmMir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) {
+ switch (opcode) {
+ case Instruction::MUL_LONG:
+ case Instruction::MUL_LONG_2ADDR:
+ GenMulLong(opcode, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::NEG_LONG:
+ GenNegLong(rl_dest, rl_src2);
+ return;
-void ArmMir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) {
- LOG(FATAL) << "Unexpected use of GenSubLong for Arm";
-}
+ default:
+ break;
+ }
-void ArmMir2Lir::GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) {
- LOG(FATAL) << "Unexpected use of GenAndLong for Arm";
-}
-
-void ArmMir2Lir::GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) {
- LOG(FATAL) << "Unexpected use of GenOrLong for Arm";
-}
-
-void ArmMir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) {
- LOG(FATAL) << "Unexpected use of genXoLong for Arm";
+ // Fallback for all other ops.
+ Mir2Lir::GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
}
/*
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index ef94bbc..0509ad3 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -538,10 +538,6 @@
return ((lir->opcode == kThumbBUncond) || (lir->opcode == kThumb2BUncond));
}
-bool ArmMir2Lir::SupportsVolatileLoadStore(OpSize size) {
- return true;
-}
-
RegisterClass ArmMir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
if (UNLIKELY(is_volatile)) {
// On arm, atomic 64-bit load/store requires a core register pair.
@@ -724,16 +720,11 @@
FreeTemp(rs_r3);
}
-RegStorage ArmMir2Lir::LoadHelper(ThreadOffset<4> offset) {
- LoadWordDisp(rs_rARM_SELF, offset.Int32Value(), rs_rARM_LR);
+RegStorage ArmMir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
+ LoadWordDisp(rs_rARM_SELF, GetThreadOffset<4>(trampoline).Int32Value(), rs_rARM_LR);
return rs_rARM_LR;
}
-RegStorage ArmMir2Lir::LoadHelper(ThreadOffset<8> offset) {
- UNIMPLEMENTED(FATAL) << "Should not be called.";
- return RegStorage::InvalidReg();
-}
-
LIR* ArmMir2Lir::CheckSuspendUsingLoad() {
RegStorage tmp = rs_r0;
Load32Disp(rs_rARM_SELF, Thread::ThreadSuspendTriggerOffset<4>().Int32Value(), tmp);
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 9cbf7b8..bba1a8c 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -1107,7 +1107,7 @@
// take 4, we can't directly allocate 2 more for LDREXD temps. In that case clobber r_ptr
// in LDREXD and recalculate it from r_base.
RegStorage r_temp = AllocTemp();
- RegStorage r_temp_high = AllocFreeTemp(); // We may not have another temp.
+ RegStorage r_temp_high = AllocTemp(false); // We may not have another temp.
if (r_temp_high.Valid()) {
NewLIR3(kThumb2Ldrexd, r_temp.GetReg(), r_temp_high.GetReg(), r_ptr.GetReg());
FreeTemp(r_temp_high);
@@ -1160,36 +1160,26 @@
return res;
}
-LIR* ArmMir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) {
- LOG(FATAL) << "Unexpected use of OpThreadMem for Arm";
- return NULL;
-}
-
-LIR* ArmMir2Lir::OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) {
- UNIMPLEMENTED(FATAL) << "Should not be called.";
- return nullptr;
-}
-
LIR* ArmMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
LOG(FATAL) << "Unexpected use of OpMem for Arm";
return NULL;
}
-LIR* ArmMir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
- int displacement, RegStorage r_src, OpSize size) {
- LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for Arm";
- return NULL;
+LIR* ArmMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+ return OpReg(op, r_tgt);
}
-LIR* ArmMir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) {
- LOG(FATAL) << "Unexpected use of OpRegMem for Arm";
- return NULL;
-}
+size_t ArmMir2Lir::GetInstructionOffset(LIR* lir) {
+ uint64_t check_flags = GetTargetInstFlags(lir->opcode);
+ DCHECK((check_flags & IS_LOAD) || (check_flags & IS_STORE));
+ size_t offset = (check_flags & IS_TERTIARY_OP) ? lir->operands[2] : 0;
-LIR* ArmMir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
- int displacement, RegStorage r_dest, OpSize size) {
- LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for Arm";
- return NULL;
+ if (check_flags & SCALED_OFFSET_X2) {
+ offset = offset * 2;
+ } else if (check_flags & SCALED_OFFSET_X4) {
+ offset = offset * 4;
+ }
+ return offset;
}
} // namespace art
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index d0633af..a449cbd 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -142,6 +142,8 @@
rwsp = rw31,
// Aliases which are not defined in "ARM Architecture Reference, register names".
+ rxIP0 = rx16,
+ rxIP1 = rx17,
rxSUSPEND = rx19,
rxSELF = rx18,
rxLR = rx30,
@@ -150,6 +152,8 @@
* the 64-bit view. However, for now we'll define a 32-bit view to keep these from being
* allocated as 32-bit temp registers.
*/
+ rwIP0 = rw16,
+ rwIP1 = rw17,
rwSUSPEND = rw19,
rwSELF = rw18,
rwLR = rw30,
@@ -165,6 +169,10 @@
constexpr RegStorage rs_xzr(RegStorage::kValid | rxzr);
constexpr RegStorage rs_wzr(RegStorage::kValid | rwzr);
+constexpr RegStorage rs_xIP0(RegStorage::kValid | rxIP0);
+constexpr RegStorage rs_wIP0(RegStorage::kValid | rwIP0);
+constexpr RegStorage rs_xIP1(RegStorage::kValid | rxIP1);
+constexpr RegStorage rs_wIP1(RegStorage::kValid | rwIP1);
// Reserved registers.
constexpr RegStorage rs_xSUSPEND(RegStorage::kValid | rxSUSPEND);
constexpr RegStorage rs_xSELF(RegStorage::kValid | rxSELF);
@@ -259,6 +267,8 @@
kA64Fcvtzs2xf, // fcvtzs [100111100s111000000000] rn[9-5] rd[4-0].
kA64Fcvt2Ss, // fcvt [0001111000100010110000] rn[9-5] rd[4-0].
kA64Fcvt2sS, // fcvt [0001111001100010010000] rn[9-5] rd[4-0].
+ kA64Fcvtms2ws, // fcvtms [0001111000110000000000] rn[9-5] rd[4-0].
+ kA64Fcvtms2xS, // fcvtms [1001111001110000000000] rn[9-5] rd[4-0].
kA64Fdiv3fff, // fdiv[000111100s1] rm[20-16] [000110] rn[9-5] rd[4-0].
kA64Fmax3fff, // fmax[000111100s1] rm[20-16] [010010] rn[9-5] rd[4-0].
kA64Fmin3fff, // fmin[000111100s1] rm[20-16] [010110] rn[9-5] rd[4-0].
@@ -270,6 +280,9 @@
kA64Fmov2xS, // fmov[1001111001101111000000] rn[9-5] rd[4-0].
kA64Fmul3fff, // fmul[000111100s1] rm[20-16] [000010] rn[9-5] rd[4-0].
kA64Fneg2ff, // fneg[000111100s100001010000] rn[9-5] rd[4-0].
+ kA64Frintp2ff, // frintp [000111100s100100110000] rn[9-5] rd[4-0].
+ kA64Frintm2ff, // frintm [000111100s100101010000] rn[9-5] rd[4-0].
+ kA64Frintn2ff, // frintn [000111100s100100010000] rn[9-5] rd[4-0].
kA64Frintz2ff, // frintz [000111100s100101110000] rn[9-5] rd[4-0].
kA64Fsqrt2ff, // fsqrt[000111100s100001110000] rn[9-5] rd[4-0].
kA64Fsub3fff, // fsub[000111100s1] rm[20-16] [001110] rn[9-5] rd[4-0].
@@ -323,6 +336,7 @@
kA64Stp4ffXD, // stp [0s10110100] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
kA64Stp4rrXD, // stp [s010100100] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
kA64StpPost4rrXD, // stp [s010100010] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
+ kA64StpPre4ffXD, // stp [0s10110110] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
kA64StpPre4rrXD, // stp [s010100110] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
kA64Str3fXD, // str [1s11110100] imm_12[21-10] rn[9-5] rt[4-0].
kA64Str4fXxG, // str [1s111100001] rm[20-16] [011] S[12] [10] rn[9-5] rt[4-0].
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 462be54..15c89f2 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -214,7 +214,7 @@
"csneg", "!0r, !1r, !2r, !3c", kFixupNone),
ENCODING_MAP(kA64Dmb1B, NO_VARIANTS(0xd50330bf),
kFmtBitBlt, 11, 8, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_VOLATILE,
"dmb", "#!0B", kFixupNone),
ENCODING_MAP(WIDE(kA64Eor3Rrl), SF_VARIANTS(0x52000000),
kFmtRegROrSp, 4, 0, kFmtRegR, 9, 5, kFmtBitBlt, 22, 10,
@@ -260,6 +260,14 @@
kFmtRegS, 4, 0, kFmtRegD, 9, 5, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
"fcvt", "!0s, !1S", kFixupNone),
+ ENCODING_MAP(kA64Fcvtms2ws, NO_VARIANTS(0x1e300000),
+ kFmtRegW, 4, 0, kFmtRegS, 9, 5, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "fcvtms", "!0w, !1s", kFixupNone),
+ ENCODING_MAP(kA64Fcvtms2xS, NO_VARIANTS(0x9e700000),
+ kFmtRegX, 4, 0, kFmtRegD, 9, 5, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "fcvtms", "!0x, !1S", kFixupNone),
ENCODING_MAP(FWIDE(kA64Fdiv3fff), FLOAT_VARIANTS(0x1e201800),
kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtRegF, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
@@ -274,7 +282,7 @@
"fmin", "!0f, !1f, !2f", kFixupNone),
ENCODING_MAP(FWIDE(kA64Fmov2ff), FLOAT_VARIANTS(0x1e204000),
kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
"fmov", "!0f, !1f", kFixupNone),
ENCODING_MAP(FWIDE(kA64Fmov2fI), FLOAT_VARIANTS(0x1e201000),
kFmtRegF, 4, 0, kFmtBitBlt, 20, 13, kFmtUnused, -1, -1,
@@ -304,6 +312,18 @@
kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
"fneg", "!0f, !1f", kFixupNone),
+ ENCODING_MAP(FWIDE(kA64Frintp2ff), FLOAT_VARIANTS(0x1e24c000),
+ kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "frintp", "!0f, !1f", kFixupNone),
+ ENCODING_MAP(FWIDE(kA64Frintm2ff), FLOAT_VARIANTS(0x1e254000),
+ kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "frintm", "!0f, !1f", kFixupNone),
+ ENCODING_MAP(FWIDE(kA64Frintn2ff), FLOAT_VARIANTS(0x1e244000),
+ kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "frintn", "!0f, !1f", kFixupNone),
ENCODING_MAP(FWIDE(kA64Frintz2ff), FLOAT_VARIANTS(0x1e25c000),
kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
@@ -318,7 +338,7 @@
"fsub", "!0f, !1f, !2f", kFixupNone),
ENCODING_MAP(kA64Ldrb3wXd, NO_VARIANTS(0x39400000),
kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
"ldrb", "!0w, [!1X, #!2d]", kFixupNone),
ENCODING_MAP(kA64Ldrb3wXx, NO_VARIANTS(0x38606800),
kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -326,7 +346,7 @@
"ldrb", "!0w, [!1X, !2x]", kFixupNone),
ENCODING_MAP(WIDE(kA64Ldrsb3rXd), CUSTOM_VARIANTS(0x39c00000, 0x39800000),
kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
"ldrsb", "!0r, [!1X, #!2d]", kFixupNone),
ENCODING_MAP(WIDE(kA64Ldrsb3rXx), CUSTOM_VARIANTS(0x38e06800, 0x38a06800),
kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -334,19 +354,19 @@
"ldrsb", "!0r, [!1X, !2x]", kFixupNone),
ENCODING_MAP(kA64Ldrh3wXF, NO_VARIANTS(0x79400000),
kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
"ldrh", "!0w, [!1X, #!2F]", kFixupNone),
ENCODING_MAP(kA64Ldrh4wXxd, NO_VARIANTS(0x78606800),
kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
- kFmtBitBlt, 12, 12, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+ kFmtBitBlt, 12, 12, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
"ldrh", "!0w, [!1X, !2x, lsl #!3d]", kFixupNone),
ENCODING_MAP(WIDE(kA64Ldrsh3rXF), CUSTOM_VARIANTS(0x79c00000, 0x79800000),
kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
"ldrsh", "!0r, [!1X, #!2F]", kFixupNone),
ENCODING_MAP(WIDE(kA64Ldrsh4rXxd), CUSTOM_VARIANTS(0x78e06800, 0x78906800),
kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
- kFmtBitBlt, 12, 12, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+ kFmtBitBlt, 12, 12, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
"ldrsh", "!0r, [!1X, !2x, lsl #!3d]", kFixupNone),
ENCODING_MAP(FWIDE(kA64Ldr2fp), SIZE_VARIANTS(0x1c000000),
kFmtRegF, 4, 0, kFmtBitBlt, 23, 5, kFmtUnused, -1, -1,
@@ -360,11 +380,11 @@
"ldr", "!0r, !1p", kFixupLoad),
ENCODING_MAP(FWIDE(kA64Ldr3fXD), SIZE_VARIANTS(0xbd400000),
kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
"ldr", "!0f, [!1X, #!2D]", kFixupNone),
ENCODING_MAP(WIDE(kA64Ldr3rXD), SIZE_VARIANTS(0xb9400000),
kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
"ldr", "!0r, [!1X, #!2D]", kFixupNone),
ENCODING_MAP(FWIDE(kA64Ldr4fXxG), SIZE_VARIANTS(0xbc606800),
kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -380,11 +400,11 @@
"ldr", "!0r, [!1X], #!2d", kFixupNone),
ENCODING_MAP(WIDE(kA64Ldp4ffXD), CUSTOM_VARIANTS(0x2d400000, 0x6d400000),
kFmtRegF, 4, 0, kFmtRegF, 14, 10, kFmtRegXOrSp, 9, 5,
- kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE2 | REG_DEF01 | IS_LOAD,
+ kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE2 | REG_DEF01 | IS_LOAD_OFF,
"ldp", "!0f, !1f, [!2X, #!3D]", kFixupNone),
ENCODING_MAP(WIDE(kA64Ldp4rrXD), SF_VARIANTS(0x29400000),
kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
- kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE2 | REG_DEF01 | IS_LOAD,
+ kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE2 | REG_DEF01 | IS_LOAD_OFF,
"ldp", "!0r, !1r, [!2X, #!3D]", kFixupNone),
ENCODING_MAP(WIDE(kA64LdpPost4rrXD), CUSTOM_VARIANTS(0x28c00000, 0xa8c00000),
kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
@@ -400,11 +420,11 @@
"ldur", "!0r, [!1X, #!2d]", kFixupNone),
ENCODING_MAP(WIDE(kA64Ldxr2rX), SIZE_VARIANTS(0x885f7c00),
kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_LOADX,
"ldxr", "!0r, [!1X]", kFixupNone),
ENCODING_MAP(WIDE(kA64Ldaxr2rX), SIZE_VARIANTS(0x885ffc00),
kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_LOADX,
"ldaxr", "!0r, [!1X]", kFixupNone),
ENCODING_MAP(WIDE(kA64Lsl3rrr), SF_VARIANTS(0x1ac02000),
kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
@@ -432,7 +452,7 @@
"movz", "!0r, #!1d!2M", kFixupNone),
ENCODING_MAP(WIDE(kA64Mov2rr), SF_VARIANTS(0x2a0003e0),
kFmtRegR, 4, 0, kFmtRegR, 20, 16, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
"mov", "!0r, !1r", kFixupNone),
ENCODING_MAP(WIDE(kA64Mvn2rr), SF_VARIANTS(0x2a2003e0),
kFmtRegR, 4, 0, kFmtRegR, 20, 16, kFmtUnused, -1, -1,
@@ -508,23 +528,27 @@
"smulh", "!0x, !1x, !2x", kFixupNone),
ENCODING_MAP(WIDE(kA64Stp4ffXD), CUSTOM_VARIANTS(0x2d000000, 0x6d000000),
kFmtRegF, 4, 0, kFmtRegF, 14, 10, kFmtRegXOrSp, 9, 5,
- kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE012 | IS_STORE,
+ kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE012 | IS_STORE_OFF,
"stp", "!0f, !1f, [!2X, #!3D]", kFixupNone),
ENCODING_MAP(WIDE(kA64Stp4rrXD), SF_VARIANTS(0x29000000),
kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
- kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE012 | IS_STORE,
+ kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE012 | IS_STORE_OFF,
"stp", "!0r, !1r, [!2X, #!3D]", kFixupNone),
ENCODING_MAP(WIDE(kA64StpPost4rrXD), CUSTOM_VARIANTS(0x28800000, 0xa8800000),
kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_DEF2 | REG_USE012 | IS_STORE,
"stp", "!0r, !1r, [!2X], #!3D", kFixupNone),
+ ENCODING_MAP(WIDE(kA64StpPre4ffXD), CUSTOM_VARIANTS(0x2d800000, 0x6d800000),
+ kFmtRegF, 4, 0, kFmtRegF, 14, 10, kFmtRegXOrSp, 9, 5,
+ kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_DEF2 | REG_USE012 | IS_STORE,
+ "stp", "!0f, !1f, [!2X, #!3D]!!", kFixupNone),
ENCODING_MAP(WIDE(kA64StpPre4rrXD), CUSTOM_VARIANTS(0x29800000, 0xa9800000),
kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_DEF2 | REG_USE012 | IS_STORE,
"stp", "!0r, !1r, [!2X, #!3D]!!", kFixupNone),
ENCODING_MAP(FWIDE(kA64Str3fXD), CUSTOM_VARIANTS(0xbd000000, 0xfd000000),
kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
"str", "!0f, [!1X, #!2D]", kFixupNone),
ENCODING_MAP(FWIDE(kA64Str4fXxG), CUSTOM_VARIANTS(0xbc206800, 0xfc206800),
kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -532,7 +556,7 @@
"str", "!0f, [!1X, !2x!3G]", kFixupNone),
ENCODING_MAP(WIDE(kA64Str3rXD), SIZE_VARIANTS(0xb9000000),
kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
"str", "!0r, [!1X, #!2D]", kFixupNone),
ENCODING_MAP(WIDE(kA64Str4rXxG), SIZE_VARIANTS(0xb8206800),
kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -540,7 +564,7 @@
"str", "!0r, [!1X, !2x!3G]", kFixupNone),
ENCODING_MAP(kA64Strb3wXd, NO_VARIANTS(0x39000000),
kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
"strb", "!0w, [!1X, #!2d]", kFixupNone),
ENCODING_MAP(kA64Strb3wXx, NO_VARIANTS(0x38206800),
kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -548,7 +572,7 @@
"strb", "!0w, [!1X, !2x]", kFixupNone),
ENCODING_MAP(kA64Strh3wXF, NO_VARIANTS(0x79000000),
kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
"strh", "!0w, [!1X, #!2F]", kFixupNone),
ENCODING_MAP(kA64Strh4wXxd, NO_VARIANTS(0x78206800),
kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -568,11 +592,11 @@
"stur", "!0r, [!1X, #!2d]", kFixupNone),
ENCODING_MAP(WIDE(kA64Stxr3wrX), SIZE_VARIANTS(0x88007c00),
kFmtRegW, 20, 16, kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_STOREX,
"stxr", "!0w, !1r, [!2X]", kFixupNone),
ENCODING_MAP(WIDE(kA64Stlxr3wrX), SIZE_VARIANTS(0x8800fc00),
kFmtRegW, 20, 16, kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_STOREX,
"stlxr", "!0w, !1r, [!2X]", kFixupNone),
ENCODING_MAP(WIDE(kA64Sub4RRdT), SF_VARIANTS(0x51000000),
kFmtRegROrSp, 4, 0, kFmtRegROrSp, 9, 5, kFmtBitBlt, 21, 10,
@@ -723,6 +747,7 @@
<< " @ 0x" << std::hex << lir->dalvik_offset;
if (kFailOnSizeError) {
LOG(FATAL) << "Bad argument n. " << i << " of " << encoder->name
+ << "(" << UNWIDE(encoder->opcode) << ", " << encoder->fmt << ")"
<< ". Expected " << expected << ", got 0x" << std::hex << operand;
} else {
LOG(WARNING) << "Bad argument n. " << i << " of " << encoder->name
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index 8117c62..28b747b 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -43,8 +43,7 @@
* br r_base
* quit:
*/
-void Arm64Mir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset,
- RegLocation rl_src) {
+void Arm64Mir2Lir::GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) {
const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
if (cu_->verbose) {
DumpSparseSwitchTable(table);
@@ -96,8 +95,7 @@
}
-void Arm64Mir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset,
- RegLocation rl_src) {
+void Arm64Mir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) {
const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
if (cu_->verbose) {
DumpPackedSwitchTable(table);
@@ -202,7 +200,7 @@
null_check_branch = nullptr; // No null check.
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL);
}
}
@@ -250,7 +248,7 @@
null_check_branch = nullptr; // No null check.
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL);
}
}
@@ -319,8 +317,13 @@
LockTemp(rs_x5);
LockTemp(rs_x6);
LockTemp(rs_x7);
- LockTemp(rs_x8);
- LockTemp(rs_x9);
+ LockTemp(rs_xIP0);
+ LockTemp(rs_xIP1);
+
+ /* TUNING:
+ * Use AllocTemp() and reuse LR if possible to give us the freedom on adjusting the number
+ * of temp registers.
+ */
/*
* We can safely skip the stack overflow check if we're
@@ -330,48 +333,39 @@
NewLIR0(kPseudoMethodEntry);
- constexpr size_t kStackOverflowReservedUsableBytes = kArm64StackOverflowReservedBytes -
- Thread::kStackOverflowSignalReservedBytes;
- const bool large_frame = static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes;
const int spill_count = num_core_spills_ + num_fp_spills_;
const int spill_size = (spill_count * kArm64PointerSize + 15) & ~0xf; // SP 16 byte alignment.
const int frame_size_without_spills = frame_size_ - spill_size;
if (!skip_overflow_check) {
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitStackOverflowChecks()) {
- if (!large_frame) {
- // Load stack limit
- LoadWordDisp(rs_xSELF, Thread::StackEndOffset<8>().Int32Value(), rs_x9);
- }
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
+ // Load stack limit
+ LoadWordDisp(rs_xSELF, Thread::StackEndOffset<8>().Int32Value(), rs_xIP1);
} else {
- // TODO(Arm64) Implement implicit checks.
// Implicit stack overflow check.
// Generate a load from [sp, #-framesize]. If this is in the stack
// redzone we will get a segmentation fault.
- // Load32Disp(rs_wSP, -Thread::kStackOverflowReservedBytes, rs_wzr);
- // MarkPossibleStackOverflowException();
- LOG(FATAL) << "Implicit stack overflow checks not implemented.";
+
+ // TODO: If the frame size is small enough, is it possible to make this a pre-indexed load,
+ // so that we can avoid the following "sub sp" when spilling?
+ OpRegRegImm(kOpSub, rs_x8, rs_sp, GetStackOverflowReservedBytes(kArm64));
+ LoadWordDisp(rs_x8, 0, rs_x8);
+ MarkPossibleStackOverflowException();
}
}
- if (frame_size_ > 0) {
- OpRegImm64(kOpSub, rs_sp, spill_size);
+ int spilled_already = 0;
+ if (spill_size > 0) {
+ spilled_already = SpillRegs(rs_sp, core_spill_mask_, fp_spill_mask_, frame_size_);
+ DCHECK(spill_size == spilled_already || frame_size_ == spilled_already);
}
- /* Need to spill any FP regs? */
- if (fp_spill_mask_) {
- int spill_offset = spill_size - kArm64PointerSize*(num_fp_spills_ + num_core_spills_);
- SpillFPRegs(rs_sp, spill_offset, fp_spill_mask_);
- }
-
- /* Spill core callee saves. */
- if (core_spill_mask_) {
- int spill_offset = spill_size - kArm64PointerSize*num_core_spills_;
- SpillCoreRegs(rs_sp, spill_offset, core_spill_mask_);
+ if (spilled_already != frame_size_) {
+ OpRegImm(kOpSub, rs_sp, frame_size_without_spills);
}
if (!skip_overflow_check) {
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitStackOverflowChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
class StackOverflowSlowPath: public LIRSlowPath {
public:
StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace) :
@@ -386,39 +380,19 @@
m2l_->OpRegImm(kOpAdd, rs_sp, sp_displace_);
m2l_->ClobberCallerSave();
ThreadOffset<8> func_offset = QUICK_ENTRYPOINT_OFFSET(8, pThrowStackOverflow);
- m2l_->LockTemp(rs_x8);
- m2l_->LoadWordDisp(rs_xSELF, func_offset.Int32Value(), rs_x8);
- m2l_->NewLIR1(kA64Br1x, rs_x8.GetReg());
- m2l_->FreeTemp(rs_x8);
+ m2l_->LockTemp(rs_xIP0);
+ m2l_->LoadWordDisp(rs_xSELF, func_offset.Int32Value(), rs_xIP0);
+ m2l_->NewLIR1(kA64Br1x, rs_xIP0.GetReg());
+ m2l_->FreeTemp(rs_xIP0);
}
private:
const size_t sp_displace_;
};
- if (large_frame) {
- // Compare Expected SP against bottom of stack.
- // Branch to throw target if there is not enough room.
- OpRegRegImm(kOpSub, rs_x9, rs_sp, frame_size_without_spills);
- LoadWordDisp(rs_xSELF, Thread::StackEndOffset<8>().Int32Value(), rs_x8);
- LIR* branch = OpCmpBranch(kCondUlt, rs_x9, rs_x8, nullptr);
- AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, spill_size));
- OpRegCopy(rs_sp, rs_x9); // Establish stack after checks.
- } else {
- /*
- * If the frame is small enough we are guaranteed to have enough space that remains to
- * handle signals on the user stack.
- * Establishes stack before checks.
- */
- OpRegRegImm(kOpSub, rs_sp, rs_sp, frame_size_without_spills);
- LIR* branch = OpCmpBranch(kCondUlt, rs_sp, rs_x9, nullptr);
- AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, frame_size_));
- }
- } else {
- OpRegImm(kOpSub, rs_sp, frame_size_without_spills);
+ LIR* branch = OpCmpBranch(kCondUlt, rs_sp, rs_xIP1, nullptr);
+ AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, frame_size_));
}
- } else {
- OpRegImm(kOpSub, rs_sp, frame_size_without_spills);
}
FlushIns(ArgLocs, rl_method);
@@ -431,8 +405,8 @@
FreeTemp(rs_x5);
FreeTemp(rs_x6);
FreeTemp(rs_x7);
- FreeTemp(rs_x8);
- FreeTemp(rs_x9);
+ FreeTemp(rs_xIP0);
+ FreeTemp(rs_xIP1);
}
void Arm64Mir2Lir::GenExitSequence() {
@@ -445,57 +419,7 @@
NewLIR0(kPseudoMethodExit);
- // Restore saves and drop stack frame.
- // 2 versions:
- //
- // 1. (Original): Try to address directly, then drop the whole frame.
- // Limitation: ldp is a 7b signed immediate. There should have been a DCHECK!
- //
- // 2. (New): Drop the non-save-part. Then do similar to original, which is now guaranteed to be
- // in range. Then drop the rest.
- //
- // TODO: In methods with few spills but huge frame, it would be better to do non-immediate loads
- // in variant 1.
-
- if (frame_size_ <= 504) {
- // "Magic" constant, 63 (max signed 7b) * 8. Do variant 1.
- // Could be tighter, as the last load is below frame_size_ offset.
- if (fp_spill_mask_) {
- int spill_offset = frame_size_ - kArm64PointerSize * (num_fp_spills_ + num_core_spills_);
- UnSpillFPRegs(rs_sp, spill_offset, fp_spill_mask_);
- }
- if (core_spill_mask_) {
- int spill_offset = frame_size_ - kArm64PointerSize * num_core_spills_;
- UnSpillCoreRegs(rs_sp, spill_offset, core_spill_mask_);
- }
-
- OpRegImm64(kOpAdd, rs_sp, frame_size_);
- } else {
- // Second variant. Drop the frame part.
- int drop = 0;
- // TODO: Always use the first formula, as num_fp_spills would be zero?
- if (fp_spill_mask_) {
- drop = frame_size_ - kArm64PointerSize * (num_fp_spills_ + num_core_spills_);
- } else {
- drop = frame_size_ - kArm64PointerSize * num_core_spills_;
- }
-
- // Drop needs to be 16B aligned, so that SP keeps aligned.
- drop = RoundDown(drop, 16);
-
- OpRegImm64(kOpAdd, rs_sp, drop);
-
- if (fp_spill_mask_) {
- int offset = frame_size_ - drop - kArm64PointerSize * (num_fp_spills_ + num_core_spills_);
- UnSpillFPRegs(rs_sp, offset, fp_spill_mask_);
- }
- if (core_spill_mask_) {
- int offset = frame_size_ - drop - kArm64PointerSize * num_core_spills_;
- UnSpillCoreRegs(rs_sp, offset, core_spill_mask_);
- }
-
- OpRegImm64(kOpAdd, rs_sp, frame_size_ - drop);
- }
+ UnspillRegs(rs_sp, core_spill_mask_, fp_spill_mask_, frame_size_);
// Finally return.
NewLIR0(kA64Ret);
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index de97653..3e1c18b 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -59,331 +59,341 @@
bool initialized_;
};
- public:
- Arm64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
+ public:
+ Arm64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
- // Required for target - codegen helpers.
- bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
- RegLocation rl_dest, int lit) OVERRIDE;
- bool SmallLiteralDivRem64(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
- RegLocation rl_dest, int64_t lit);
- bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
- RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
- bool HandleEasyDivRem64(Instruction::Code dalvik_opcode, bool is_div,
- RegLocation rl_src, RegLocation rl_dest, int64_t lit);
- bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
- LIR* CheckSuspendUsingLoad() OVERRIDE;
- RegStorage LoadHelper(ThreadOffset<4> offset) OVERRIDE;
- RegStorage LoadHelper(ThreadOffset<8> offset) OVERRIDE;
- LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size, VolatileKind is_volatile) OVERRIDE;
- LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- VolatileKind is_volatile)
- OVERRIDE;
- LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
- OpSize size) OVERRIDE;
- LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale)
- OVERRIDE;
- LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
- RegStorage r_dest, OpSize size) OVERRIDE;
- LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
- LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
- LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size, VolatileKind is_volatile) OVERRIDE;
- LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src,
- VolatileKind is_volatile) OVERRIDE;
- LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
- OpSize size) OVERRIDE;
- LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale)
- OVERRIDE;
- LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
- RegStorage r_src, OpSize size) OVERRIDE;
- void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) OVERRIDE;
- LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
- int offset, int check_value, LIR* target) OVERRIDE;
+ // Required for target - codegen helpers.
+ bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
+ RegLocation rl_dest, int lit) OVERRIDE;
+ bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
+ RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
+ bool HandleEasyDivRem64(Instruction::Code dalvik_opcode, bool is_div,
+ RegLocation rl_src, RegLocation rl_dest, int64_t lit);
+ bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
+ LIR* CheckSuspendUsingLoad() OVERRIDE;
+ RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
+ LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size, VolatileKind is_volatile) OVERRIDE;
+ LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest,
+ VolatileKind is_volatile) OVERRIDE;
+ LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
+ OpSize size) OVERRIDE;
+ LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale)
+ OVERRIDE;
+ LIR* LoadConstantNoClobber(RegStorage r_dest, int value) OVERRIDE;
+ LIR* LoadConstantWide(RegStorage r_dest, int64_t value) OVERRIDE;
+ LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
+ VolatileKind is_volatile) OVERRIDE;
+ LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src, VolatileKind is_volatile)
+ OVERRIDE;
+ LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
+ OpSize size) OVERRIDE;
+ LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale) OVERRIDE;
+ void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) OVERRIDE;
+ LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
+ int offset, int check_value, LIR* target, LIR** compare) OVERRIDE;
- // Required for target - register utilities.
- RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
- RegStorage TargetReg(SpecialTargetRegister symbolic_reg, WideKind wide_kind) OVERRIDE {
- if (wide_kind == kWide || wide_kind == kRef) {
- return As64BitReg(TargetReg(symbolic_reg));
- } else {
- return Check32BitReg(TargetReg(symbolic_reg));
- }
- }
- RegStorage TargetPtrReg(SpecialTargetRegister symbolic_reg) OVERRIDE {
+ // Required for target - register utilities.
+ RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
+ RegStorage TargetReg(SpecialTargetRegister symbolic_reg, WideKind wide_kind) OVERRIDE {
+ if (wide_kind == kWide || wide_kind == kRef) {
return As64BitReg(TargetReg(symbolic_reg));
+ } else {
+ return Check32BitReg(TargetReg(symbolic_reg));
}
- RegStorage GetArgMappingToPhysicalReg(int arg_num);
- RegLocation GetReturnAlt();
- RegLocation GetReturnWideAlt();
- RegLocation LocCReturn();
- RegLocation LocCReturnRef();
- RegLocation LocCReturnDouble();
- RegLocation LocCReturnFloat();
- RegLocation LocCReturnWide();
- ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
- void AdjustSpillMask();
- void ClobberCallerSave();
- void FreeCallTemps();
- void LockCallTemps();
- void CompilerInitializeRegAlloc();
+ }
+ RegStorage TargetPtrReg(SpecialTargetRegister symbolic_reg) OVERRIDE {
+ return As64BitReg(TargetReg(symbolic_reg));
+ }
+ RegStorage GetArgMappingToPhysicalReg(int arg_num) OVERRIDE;
+ RegLocation GetReturnAlt() OVERRIDE;
+ RegLocation GetReturnWideAlt() OVERRIDE;
+ RegLocation LocCReturn() OVERRIDE;
+ RegLocation LocCReturnRef() OVERRIDE;
+ RegLocation LocCReturnDouble() OVERRIDE;
+ RegLocation LocCReturnFloat() OVERRIDE;
+ RegLocation LocCReturnWide() OVERRIDE;
+ ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
+ void AdjustSpillMask() OVERRIDE;
+ void ClobberCallerSave() OVERRIDE;
+ void FreeCallTemps() OVERRIDE;
+ void LockCallTemps() OVERRIDE;
+ void CompilerInitializeRegAlloc() OVERRIDE;
- // Required for target - miscellaneous.
- void AssembleLIR();
- uint32_t LinkFixupInsns(LIR* head_lir, LIR* tail_lir, CodeOffset offset);
- int AssignInsnOffsets();
- void AssignOffsets();
- uint8_t* EncodeLIRs(uint8_t* write_pos, LIR* lir);
- void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
- void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
- ResourceMask* use_mask, ResourceMask* def_mask) OVERRIDE;
- const char* GetTargetInstFmt(int opcode);
- const char* GetTargetInstName(int opcode);
- std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
- ResourceMask GetPCUseDefEncoding() const OVERRIDE;
- uint64_t GetTargetInstFlags(int opcode);
- size_t GetInsnSize(LIR* lir) OVERRIDE;
- bool IsUnconditionalBranch(LIR* lir);
+ // Required for target - miscellaneous.
+ void AssembleLIR() OVERRIDE;
+ void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
+ void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
+ ResourceMask* use_mask, ResourceMask* def_mask) OVERRIDE;
+ const char* GetTargetInstFmt(int opcode) OVERRIDE;
+ const char* GetTargetInstName(int opcode) OVERRIDE;
+ std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) OVERRIDE;
+ ResourceMask GetPCUseDefEncoding() const OVERRIDE;
+ uint64_t GetTargetInstFlags(int opcode) OVERRIDE;
+ size_t GetInsnSize(LIR* lir) OVERRIDE;
+ bool IsUnconditionalBranch(LIR* lir) OVERRIDE;
- // Check support for volatile load/store of a given size.
- bool SupportsVolatileLoadStore(OpSize size) OVERRIDE;
- // Get the register class for load/store of a field.
- RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
+ // Get the register class for load/store of a field.
+ RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
- // Required for target - Dalvik-level generators.
- void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation lr_shift);
- void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2);
- void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
- RegLocation rl_index, RegLocation rl_dest, int scale);
- void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
- RegLocation rl_src, int scale, bool card_mark);
- void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_shift);
- void GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
- bool GenInlinedReverseBits(CallInfo* info, OpSize size);
- bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
- bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
- bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long);
- bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double);
- bool GenInlinedSqrt(CallInfo* info);
- bool GenInlinedPeek(CallInfo* info, OpSize size);
- bool GenInlinedPoke(CallInfo* info, OpSize size);
- bool GenInlinedAbsLong(CallInfo* info);
- void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
- void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
- void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
- void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div);
- RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
- RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
- void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenDivZeroCheckWide(RegStorage reg);
- void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
- void GenExitSequence();
- void GenSpecialExitSequence();
- void GenFillArrayData(DexOffset table_offset, RegLocation rl_src);
- void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
- void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
- void GenSelect(BasicBlock* bb, MIR* mir);
- bool GenMemBarrier(MemBarrierKind barrier_kind);
- void GenMonitorEnter(int opt_flags, RegLocation rl_src);
- void GenMonitorExit(int opt_flags, RegLocation rl_src);
- void GenMoveException(RegLocation rl_dest);
- void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
- int first_bit, int second_bit);
- void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
- void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
- void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
- void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
+ // Required for target - Dalvik-level generators.
+ void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation lr_shift) OVERRIDE;
+ void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) OVERRIDE;
+ void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
+ RegLocation rl_dest, int scale) OVERRIDE;
+ void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
+ RegLocation rl_src, int scale, bool card_mark) OVERRIDE;
+ void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_shift) OVERRIDE;
+ void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) OVERRIDE;
+ void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) OVERRIDE;
+ void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) OVERRIDE;
+ void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+ bool GenInlinedReverseBits(CallInfo* info, OpSize size) OVERRIDE;
+ bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
+ bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
+ bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object) OVERRIDE;
+ bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) OVERRIDE;
+ bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) OVERRIDE;
+ bool GenInlinedSqrt(CallInfo* info) OVERRIDE;
+ bool GenInlinedCeil(CallInfo* info) OVERRIDE;
+ bool GenInlinedFloor(CallInfo* info) OVERRIDE;
+ bool GenInlinedRint(CallInfo* info) OVERRIDE;
+ bool GenInlinedRound(CallInfo* info, bool is_double) OVERRIDE;
+ bool GenInlinedPeek(CallInfo* info, OpSize size) OVERRIDE;
+ bool GenInlinedPoke(CallInfo* info, OpSize size) OVERRIDE;
+ bool GenInlinedAbsLong(CallInfo* info) OVERRIDE;
+ bool GenInlinedArrayCopyCharArray(CallInfo* info) OVERRIDE;
+ void GenIntToLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+ void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) OVERRIDE;
+ RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div)
+ OVERRIDE;
+ RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div)
+ OVERRIDE;
+ void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) OVERRIDE;
+ void GenDivZeroCheckWide(RegStorage reg) OVERRIDE;
+ void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) OVERRIDE;
+ void GenExitSequence() OVERRIDE;
+ void GenSpecialExitSequence() OVERRIDE;
+ void GenFillArrayData(DexOffset table_offset, RegLocation rl_src) OVERRIDE;
+ void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) OVERRIDE;
+ void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) OVERRIDE;
+ void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
+ void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
+ int32_t true_val, int32_t false_val, RegStorage rs_dest,
+ int dest_reg_class) OVERRIDE;
- uint32_t GenPairWise(uint32_t reg_mask, int* reg1, int* reg2);
- void UnSpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask);
- void SpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask);
- void UnSpillFPRegs(RegStorage base, int offset, uint32_t reg_mask);
- void SpillFPRegs(RegStorage base, int offset, uint32_t reg_mask);
+ bool GenMemBarrier(MemBarrierKind barrier_kind) OVERRIDE;
+ void GenMonitorEnter(int opt_flags, RegLocation rl_src) OVERRIDE;
+ void GenMonitorExit(int opt_flags, RegLocation rl_src) OVERRIDE;
+ void GenMoveException(RegLocation rl_dest) OVERRIDE;
+ void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
+ int first_bit, int second_bit) OVERRIDE;
+ void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+ void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+ void GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
+ void GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
- // Required for target - single operation generators.
- LIR* OpUnconditionalBranch(LIR* target);
- LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target);
- LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target);
- LIR* OpCondBranch(ConditionCode cc, LIR* target);
- LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
- LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
- LIR* OpIT(ConditionCode cond, const char* guide);
- void OpEndIT(LIR* it);
- LIR* OpMem(OpKind op, RegStorage r_base, int disp);
- LIR* OpPcRelLoad(RegStorage reg, LIR* target);
- LIR* OpReg(OpKind op, RegStorage r_dest_src);
- void OpRegCopy(RegStorage r_dest, RegStorage r_src);
- LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
- LIR* OpRegImm64(OpKind op, RegStorage r_dest_src1, int64_t value);
- LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
- LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
- LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
- LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
- LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
- LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
- LIR* OpRegRegImm64(OpKind op, RegStorage r_dest, RegStorage r_src1, int64_t value);
- LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
- LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
- LIR* OpTestSuspend(LIR* target);
- LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) OVERRIDE;
- LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) OVERRIDE;
- LIR* OpVldm(RegStorage r_base, int count);
- LIR* OpVstm(RegStorage r_base, int count);
- void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset);
- void OpRegCopyWide(RegStorage dest, RegStorage src);
- void OpTlsCmp(ThreadOffset<4> offset, int val) OVERRIDE;
- void OpTlsCmp(ThreadOffset<8> offset, int val) OVERRIDE;
+ // Required for target - single operation generators.
+ LIR* OpUnconditionalBranch(LIR* target) OVERRIDE;
+ LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) OVERRIDE;
+ LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target) OVERRIDE;
+ LIR* OpCondBranch(ConditionCode cc, LIR* target) OVERRIDE;
+ LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) OVERRIDE;
+ LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
+ LIR* OpIT(ConditionCode cond, const char* guide) OVERRIDE;
+ void OpEndIT(LIR* it) OVERRIDE;
+ LIR* OpMem(OpKind op, RegStorage r_base, int disp) OVERRIDE;
+ LIR* OpPcRelLoad(RegStorage reg, LIR* target) OVERRIDE;
+ LIR* OpReg(OpKind op, RegStorage r_dest_src) OVERRIDE;
+ void OpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
+ LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) OVERRIDE;
+ LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value) OVERRIDE;
+ LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) OVERRIDE;
+ LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) OVERRIDE;
+ LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) OVERRIDE;
+ LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) OVERRIDE;
+ LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) OVERRIDE;
+ LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) OVERRIDE;
+ LIR* OpTestSuspend(LIR* target) OVERRIDE;
+ LIR* OpVldm(RegStorage r_base, int count) OVERRIDE;
+ LIR* OpVstm(RegStorage r_base, int count) OVERRIDE;
+ void OpRegCopyWide(RegStorage dest, RegStorage src) OVERRIDE;
- LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size);
- LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
- LIR* OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2,
- int shift);
- LIR* OpRegRegRegExtend(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2,
- A64RegExtEncodings ext, uint8_t amount);
- LIR* OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift);
- LIR* OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift);
- static const ArmEncodingMap EncodingMap[kA64Last];
- int EncodeShift(int code, int amount);
- int EncodeExtend(int extend_type, int amount);
- bool IsExtendEncoding(int encoded_value);
- int EncodeLogicalImmediate(bool is_wide, uint64_t value);
- uint64_t DecodeLogicalImmediate(bool is_wide, int value);
+ bool InexpensiveConstantInt(int32_t value) OVERRIDE;
+ bool InexpensiveConstantInt(int32_t value, Instruction::Code opcode) OVERRIDE;
+ bool InexpensiveConstantFloat(int32_t value) OVERRIDE;
+ bool InexpensiveConstantLong(int64_t value) OVERRIDE;
+ bool InexpensiveConstantDouble(int64_t value) OVERRIDE;
- ArmConditionCode ArmConditionEncoding(ConditionCode code);
- bool InexpensiveConstantInt(int32_t value);
- bool InexpensiveConstantFloat(int32_t value);
- bool InexpensiveConstantLong(int64_t value);
- bool InexpensiveConstantDouble(int64_t value);
+ void FlushIns(RegLocation* ArgLocs, RegLocation rl_method) OVERRIDE;
- void FlushIns(RegLocation* ArgLocs, RegLocation rl_method);
-
- int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel,
- NextCallInsn next_call_insn,
- const MethodReference& target_method,
- uint32_t vtable_idx,
- uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
- bool skip_this);
-
- int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel,
+ int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel,
NextCallInsn next_call_insn,
const MethodReference& target_method,
uint32_t vtable_idx,
uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
- bool skip_this);
- InToRegStorageMapping in_to_reg_storage_mapping_;
+ bool skip_this) OVERRIDE;
- bool WideGPRsAreAliases() OVERRIDE {
- return true; // 64b architecture.
- }
- bool WideFPRsAreAliases() OVERRIDE {
- return true; // 64b architecture.
- }
+ int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel,
+ NextCallInsn next_call_insn,
+ const MethodReference& target_method,
+ uint32_t vtable_idx,
+ uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
+ bool skip_this) OVERRIDE;
- private:
- /**
- * @brief Given register xNN (dNN), returns register wNN (sNN).
- * @param reg #RegStorage containing a Solo64 input register (e.g. @c x1 or @c d2).
- * @return A Solo32 with the same register number as the @p reg (e.g. @c w1 or @c s2).
- * @see As64BitReg
- */
- RegStorage As32BitReg(RegStorage reg) {
- DCHECK(!reg.IsPair());
- if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
- if (kFailOnSizeError) {
- LOG(FATAL) << "Expected 64b register";
- } else {
- LOG(WARNING) << "Expected 64b register";
- return reg;
- }
+ bool WideGPRsAreAliases() OVERRIDE {
+ return true; // 64b architecture.
+ }
+ bool WideFPRsAreAliases() OVERRIDE {
+ return true; // 64b architecture.
+ }
+
+ size_t GetInstructionOffset(LIR* lir) OVERRIDE;
+
+ LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
+
+ private:
+ /**
+ * @brief Given register xNN (dNN), returns register wNN (sNN).
+ * @param reg #RegStorage containing a Solo64 input register (e.g. @c x1 or @c d2).
+ * @return A Solo32 with the same register number as the @p reg (e.g. @c w1 or @c s2).
+ * @see As64BitReg
+ */
+ RegStorage As32BitReg(RegStorage reg) {
+ DCHECK(!reg.IsPair());
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Expected 64b register";
+ } else {
+ LOG(WARNING) << "Expected 64b register";
+ return reg;
}
- RegStorage ret_val = RegStorage(RegStorage::k32BitSolo,
- reg.GetRawBits() & RegStorage::kRegTypeMask);
- DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k32SoloStorageMask)
- ->GetReg().GetReg(),
- ret_val.GetReg());
- return ret_val;
}
+ RegStorage ret_val = RegStorage(RegStorage::k32BitSolo,
+ reg.GetRawBits() & RegStorage::kRegTypeMask);
+ DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k32SoloStorageMask)
+ ->GetReg().GetReg(),
+ ret_val.GetReg());
+ return ret_val;
+ }
- RegStorage Check32BitReg(RegStorage reg) {
- if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
- if (kFailOnSizeError) {
- LOG(FATAL) << "Checked for 32b register";
- } else {
- LOG(WARNING) << "Checked for 32b register";
- return As32BitReg(reg);
- }
+ RegStorage Check32BitReg(RegStorage reg) {
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Checked for 32b register";
+ } else {
+ LOG(WARNING) << "Checked for 32b register";
+ return As32BitReg(reg);
}
- return reg;
}
+ return reg;
+ }
- /**
- * @brief Given register wNN (sNN), returns register xNN (dNN).
- * @param reg #RegStorage containing a Solo32 input register (e.g. @c w1 or @c s2).
- * @return A Solo64 with the same register number as the @p reg (e.g. @c x1 or @c d2).
- * @see As32BitReg
- */
- RegStorage As64BitReg(RegStorage reg) {
- DCHECK(!reg.IsPair());
- if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
- if (kFailOnSizeError) {
- LOG(FATAL) << "Expected 32b register";
- } else {
- LOG(WARNING) << "Expected 32b register";
- return reg;
- }
+ /**
+ * @brief Given register wNN (sNN), returns register xNN (dNN).
+ * @param reg #RegStorage containing a Solo32 input register (e.g. @c w1 or @c s2).
+ * @return A Solo64 with the same register number as the @p reg (e.g. @c x1 or @c d2).
+ * @see As32BitReg
+ */
+ RegStorage As64BitReg(RegStorage reg) {
+ DCHECK(!reg.IsPair());
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Expected 32b register";
+ } else {
+ LOG(WARNING) << "Expected 32b register";
+ return reg;
}
- RegStorage ret_val = RegStorage(RegStorage::k64BitSolo,
- reg.GetRawBits() & RegStorage::kRegTypeMask);
- DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k64SoloStorageMask)
- ->GetReg().GetReg(),
- ret_val.GetReg());
- return ret_val;
}
+ RegStorage ret_val = RegStorage(RegStorage::k64BitSolo,
+ reg.GetRawBits() & RegStorage::kRegTypeMask);
+ DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k64SoloStorageMask)
+ ->GetReg().GetReg(),
+ ret_val.GetReg());
+ return ret_val;
+ }
- RegStorage Check64BitReg(RegStorage reg) {
- if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
- if (kFailOnSizeError) {
- LOG(FATAL) << "Checked for 64b register";
- } else {
- LOG(WARNING) << "Checked for 64b register";
- return As64BitReg(reg);
- }
+ RegStorage Check64BitReg(RegStorage reg) {
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Checked for 64b register";
+ } else {
+ LOG(WARNING) << "Checked for 64b register";
+ return As64BitReg(reg);
}
- return reg;
}
+ return reg;
+ }
- LIR* LoadFPConstantValue(RegStorage r_dest, int32_t value);
- LIR* LoadFPConstantValueWide(RegStorage r_dest, int64_t value);
- void ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
- void InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
- void AssignDataOffsets();
- RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
- bool is_div, bool check_zero);
- RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div);
+ int32_t EncodeImmSingle(uint32_t bits);
+ int32_t EncodeImmDouble(uint64_t bits);
+ LIR* LoadFPConstantValue(RegStorage r_dest, int32_t value);
+ LIR* LoadFPConstantValueWide(RegStorage r_dest, int64_t value);
+ void ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
+ void InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
+ void AssignDataOffsets();
+ RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+ bool is_div, bool check_zero);
+ RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div);
+ size_t GetLoadStoreSize(LIR* lir);
+
+ bool SmallLiteralDivRem64(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
+ RegLocation rl_dest, int64_t lit);
+
+ uint32_t LinkFixupInsns(LIR* head_lir, LIR* tail_lir, CodeOffset offset);
+ int AssignInsnOffsets();
+ void AssignOffsets();
+ uint8_t* EncodeLIRs(uint8_t* write_pos, LIR* lir);
+
+ // Spill core and FP registers. Returns the SP difference: either spill size, or whole
+ // frame size.
+ int SpillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask, int frame_size);
+
+ // Unspill core and FP registers.
+ void UnspillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask, int frame_size);
+
+ void GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+
+ LIR* OpRegImm64(OpKind op, RegStorage r_dest_src1, int64_t value);
+ LIR* OpRegRegImm64(OpKind op, RegStorage r_dest, RegStorage r_src1, int64_t value);
+
+ LIR* OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift);
+ LIR* OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2,
+ int shift);
+ int EncodeShift(int code, int amount);
+
+ LIR* OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
+ A64RegExtEncodings ext, uint8_t amount);
+ LIR* OpRegRegRegExtend(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2,
+ A64RegExtEncodings ext, uint8_t amount);
+ int EncodeExtend(int extend_type, int amount);
+ bool IsExtendEncoding(int encoded_value);
+
+ LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size);
+ LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
+
+ int EncodeLogicalImmediate(bool is_wide, uint64_t value);
+ uint64_t DecodeLogicalImmediate(bool is_wide, int value);
+ ArmConditionCode ArmConditionEncoding(ConditionCode code);
+
+ // Helper used in the two GenSelect variants.
+ void GenSelect(int32_t left, int32_t right, ConditionCode code, RegStorage rs_dest,
+ int result_reg_class);
+
+ void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
+ void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+ void GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2, bool is_div);
+
+ InToRegStorageMapping in_to_reg_storage_mapping_;
+ static const ArmEncodingMap EncodingMap[kA64Last];
};
} // namespace art
diff --git a/compiler/dex/quick/arm64/fp_arm64.cc b/compiler/dex/quick/arm64/fp_arm64.cc
index 6594c4b..d0b2636 100644
--- a/compiler/dex/quick/arm64/fp_arm64.cc
+++ b/compiler/dex/quick/arm64/fp_arm64.cc
@@ -17,6 +17,7 @@
#include "arm64_lir.h"
#include "codegen_arm64.h"
#include "dex/quick/mir_to_lir-inl.h"
+#include "utils.h"
namespace art {
@@ -45,8 +46,7 @@
case Instruction::REM_FLOAT_2ADDR:
case Instruction::REM_FLOAT:
FlushAllRegs(); // Send everything to home location
- CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pFmodf), rl_src1, rl_src2,
- false);
+ CallRuntimeHelperRegLocationRegLocation(kQuickFmodf, rl_src1, rl_src2, false);
rl_result = GetReturn(kFPReg);
StoreValue(rl_dest, rl_result);
return;
@@ -89,12 +89,11 @@
case Instruction::REM_DOUBLE:
FlushAllRegs(); // Send everything to home location
{
- ThreadOffset<8> helper_offset = QUICK_ENTRYPOINT_OFFSET(8, pFmod);
- RegStorage r_tgt = CallHelperSetup(helper_offset);
+ RegStorage r_tgt = CallHelperSetup(kQuickFmod);
LoadValueDirectWideFixed(rl_src1, rs_d0);
LoadValueDirectWideFixed(rl_src2, rs_d1);
ClobberCallerSave();
- CallHelper(r_tgt, helper_offset, false);
+ CallHelper(r_tgt, kQuickFmod, false);
}
rl_result = GetReturnWide(kFPReg);
StoreValueWide(rl_dest, rl_result);
@@ -323,12 +322,57 @@
StoreValueWide(rl_dest, rl_result);
}
+static RegisterClass RegClassForAbsFP(RegLocation rl_src, RegLocation rl_dest) {
+ // If src is in a core reg or, unlikely, dest has been promoted to a core reg, use core reg.
+ if ((rl_src.location == kLocPhysReg && !rl_src.reg.IsFloat()) ||
+ (rl_dest.location == kLocPhysReg && !rl_dest.reg.IsFloat())) {
+ return kCoreReg;
+ }
+ // If src is in an fp reg or dest has been promoted to an fp reg, use fp reg.
+ if (rl_src.location == kLocPhysReg || rl_dest.location == kLocPhysReg) {
+ return kFPReg;
+ }
+ // With both src and dest in the stack frame we have to perform load+abs+store. Whether this
+ // is faster using a core reg or fp reg depends on the particular CPU. For example, on A53
+ // it's faster using core reg while on A57 it's faster with fp reg, the difference being
+ // bigger on the A53. Without further investigation and testing we prefer core register.
+ // (If the result is subsequently used in another fp operation, the dalvik reg will probably
+ // get promoted and that should be handled by the cases above.)
+ return kCoreReg;
+}
+
+bool Arm64Mir2Lir::GenInlinedAbsFloat(CallInfo* info) {
+ if (info->result.location == kLocInvalid) {
+ return true; // Result is unused: inlining successful, no code generated.
+ }
+ RegLocation rl_dest = info->result;
+ RegLocation rl_src = UpdateLoc(info->args[0]);
+ RegisterClass reg_class = RegClassForAbsFP(rl_src, rl_dest);
+ rl_src = LoadValue(rl_src, reg_class);
+ RegLocation rl_result = EvalLoc(rl_dest, reg_class, true);
+ if (reg_class == kFPReg) {
+ NewLIR2(kA64Fabs2ff, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ } else {
+ NewLIR4(kA64Ubfm4rrdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), 0, 30);
+ }
+ StoreValue(rl_dest, rl_result);
+ return true;
+}
+
bool Arm64Mir2Lir::GenInlinedAbsDouble(CallInfo* info) {
- RegLocation rl_src = info->args[0];
- rl_src = LoadValueWide(rl_src, kCoreReg);
- RegLocation rl_dest = InlineTargetWide(info);
- RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- NewLIR4(WIDE(kA64Ubfm4rrdd), rl_result.reg.GetReg(), rl_src.reg.GetReg(), 0, 62);
+ if (info->result.location == kLocInvalid) {
+ return true; // Result is unused: inlining successful, no code generated.
+ }
+ RegLocation rl_dest = info->result;
+ RegLocation rl_src = UpdateLocWide(info->args[0]);
+ RegisterClass reg_class = RegClassForAbsFP(rl_src, rl_dest);
+ rl_src = LoadValueWide(rl_src, reg_class);
+ RegLocation rl_result = EvalLoc(rl_dest, reg_class, true);
+ if (reg_class == kFPReg) {
+ NewLIR2(FWIDE(kA64Fabs2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ } else {
+ NewLIR4(WIDE(kA64Ubfm4rrdd), rl_result.reg.GetReg(), rl_src.reg.GetReg(), 0, 62);
+ }
StoreValueWide(rl_dest, rl_result);
return true;
}
@@ -343,6 +387,52 @@
return true;
}
+bool Arm64Mir2Lir::GenInlinedCeil(CallInfo* info) {
+ RegLocation rl_src = info->args[0];
+ RegLocation rl_dest = InlineTargetWide(info);
+ rl_src = LoadValueWide(rl_src, kFPReg);
+ RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
+ NewLIR2(FWIDE(kA64Frintp2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ StoreValueWide(rl_dest, rl_result);
+ return true;
+}
+
+bool Arm64Mir2Lir::GenInlinedFloor(CallInfo* info) {
+ RegLocation rl_src = info->args[0];
+ RegLocation rl_dest = InlineTargetWide(info);
+ rl_src = LoadValueWide(rl_src, kFPReg);
+ RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
+ NewLIR2(FWIDE(kA64Frintm2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ StoreValueWide(rl_dest, rl_result);
+ return true;
+}
+
+bool Arm64Mir2Lir::GenInlinedRint(CallInfo* info) {
+ RegLocation rl_src = info->args[0];
+ RegLocation rl_dest = InlineTargetWide(info);
+ rl_src = LoadValueWide(rl_src, kFPReg);
+ RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
+ NewLIR2(FWIDE(kA64Frintn2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ StoreValueWide(rl_dest, rl_result);
+ return true;
+}
+
+bool Arm64Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) {
+ int32_t encoded_imm = EncodeImmSingle(bit_cast<float, uint32_t>(0.5f));
+ ArmOpcode wide = (is_double) ? FWIDE(0) : FUNWIDE(0);
+ RegLocation rl_src = info->args[0];
+ RegLocation rl_dest = (is_double) ? InlineTargetWide(info) : InlineTarget(info);
+ rl_src = (is_double) ? LoadValueWide(rl_src, kFPReg) : LoadValue(rl_src, kFPReg);
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ RegStorage r_tmp = (is_double) ? AllocTempDouble() : AllocTempSingle();
+ // 0.5f and 0.5d are encoded in the same way.
+ NewLIR2(kA64Fmov2fI | wide, r_tmp.GetReg(), encoded_imm);
+ NewLIR3(kA64Fadd3fff | wide, rl_src.reg.GetReg(), rl_src.reg.GetReg(), r_tmp.GetReg());
+ NewLIR2((is_double) ? kA64Fcvtms2xS : kA64Fcvtms2ws, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ (is_double) ? StoreValueWide(rl_dest, rl_result) : StoreValue(rl_dest, rl_result);
+ return true;
+}
+
bool Arm64Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) {
DCHECK_EQ(cu_->instruction_set, kArm64);
int op = (is_min) ? kA64Fmin3fff : kA64Fmax3fff;
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 6dc4a7a..d00c57d 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -22,6 +22,7 @@
#include "dex/reg_storage_eq.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "mirror/array.h"
+#include "utils.h"
namespace art {
@@ -85,141 +86,129 @@
StoreValueWide(rl_dest, rl_result);
}
-void Arm64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
- RegLocation rl_result;
- RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
- RegLocation rl_dest = mir_graph_->GetDest(mir);
- RegisterClass src_reg_class = rl_src.ref ? kRefReg : kCoreReg;
- RegisterClass result_reg_class = rl_dest.ref ? kRefReg : kCoreReg;
+static constexpr bool kUseDeltaEncodingInGenSelect = false;
- rl_src = LoadValue(rl_src, src_reg_class);
+void Arm64Mir2Lir::GenSelect(int32_t true_val, int32_t false_val, ConditionCode ccode,
+ RegStorage rs_dest, int result_reg_class) {
+ if (false_val == 0 || // 0 is better as first operand.
+ true_val == 1 || // Potentially Csinc.
+ true_val == -1 || // Potentially Csinv.
+ true_val == false_val + 1) { // Potentially Csinc.
+ ccode = NegateComparison(ccode);
+ std::swap(true_val, false_val);
+ }
+
+ ArmConditionCode code = ArmConditionEncoding(ccode);
+
+ int opcode; // The opcode.
+ RegStorage left_op = RegStorage::InvalidReg(); // The operands.
+ RegStorage right_op = RegStorage::InvalidReg(); // The operands.
+
+ bool is_wide = rs_dest.Is64Bit();
+
+ RegStorage zero_reg = is_wide ? rs_xzr : rs_wzr;
+
+ if (true_val == 0) {
+ left_op = zero_reg;
+ } else {
+ left_op = rs_dest;
+ LoadConstantNoClobber(rs_dest, true_val);
+ }
+ if (false_val == 1) {
+ right_op = zero_reg;
+ opcode = kA64Csinc4rrrc;
+ } else if (false_val == -1) {
+ right_op = zero_reg;
+ opcode = kA64Csinv4rrrc;
+ } else if (false_val == true_val + 1) {
+ right_op = left_op;
+ opcode = kA64Csinc4rrrc;
+ } else if (false_val == -true_val) {
+ right_op = left_op;
+ opcode = kA64Csneg4rrrc;
+ } else if (false_val == ~true_val) {
+ right_op = left_op;
+ opcode = kA64Csinv4rrrc;
+ } else if (true_val == 0) {
+ // left_op is zero_reg.
+ right_op = rs_dest;
+ LoadConstantNoClobber(rs_dest, false_val);
+ opcode = kA64Csel4rrrc;
+ } else {
+ // Generic case.
+ RegStorage t_reg2 = AllocTypedTemp(false, result_reg_class);
+ if (is_wide) {
+ if (t_reg2.Is32Bit()) {
+ t_reg2 = As64BitReg(t_reg2);
+ }
+ } else {
+ if (t_reg2.Is64Bit()) {
+ t_reg2 = As32BitReg(t_reg2);
+ }
+ }
+
+ if (kUseDeltaEncodingInGenSelect) {
+ int32_t delta = false_val - true_val;
+ uint32_t abs_val = delta < 0 ? -delta : delta;
+
+ if (abs_val < 0x1000) { // TODO: Replace with InexpensiveConstant with opcode.
+ // Can encode as immediate to an add.
+ right_op = t_reg2;
+ OpRegRegImm(kOpAdd, t_reg2, left_op, delta);
+ }
+ }
+
+ // Load as constant.
+ if (!right_op.Valid()) {
+ LoadConstantNoClobber(t_reg2, false_val);
+ right_op = t_reg2;
+ }
+
+ opcode = kA64Csel4rrrc;
+ }
+
+ DCHECK(left_op.Valid() && right_op.Valid());
+ NewLIR4(is_wide ? WIDE(opcode) : opcode, rs_dest.GetReg(), left_op.GetReg(), right_op.GetReg(),
+ code);
+}
+
+void Arm64Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
+ int32_t true_val, int32_t false_val, RegStorage rs_dest,
+ int dest_reg_class) {
+ DCHECK(rs_dest.Valid());
+ OpRegReg(kOpCmp, left_op, right_op);
+ GenSelect(true_val, false_val, code, rs_dest, dest_reg_class);
+}
+
+void Arm64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
+ RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
+ rl_src = LoadValue(rl_src, rl_src.ref ? kRefReg : kCoreReg);
// rl_src may be aliased with rl_result/rl_dest, so do compare early.
OpRegImm(kOpCmp, rl_src.reg, 0);
- ArmConditionCode code = ArmConditionEncoding(mir->meta.ccode);
+ RegLocation rl_dest = mir_graph_->GetDest(mir);
// The kMirOpSelect has two variants, one for constants and one for moves.
- bool is_wide = rl_dest.ref || rl_dest.wide;
-
if (mir->ssa_rep->num_uses == 1) {
- uint32_t true_val = mir->dalvikInsn.vB;
- uint32_t false_val = mir->dalvikInsn.vC;
-
- int opcode; // The opcode.
- int left_op, right_op; // The operands.
- bool rl_result_evaled = false;
-
- // Check some simple cases.
- // TODO: Improve this.
- int zero_reg = (is_wide ? rs_xzr : rs_wzr).GetReg();
-
- if ((true_val == 0 && false_val == 1) || (true_val == 1 && false_val == 0)) {
- // CSInc cheap based on wzr.
- if (true_val == 1) {
- // Negate.
- code = ArmConditionEncoding(NegateComparison(mir->meta.ccode));
- }
-
- left_op = right_op = zero_reg;
- opcode = is_wide ? WIDE(kA64Csinc4rrrc) : kA64Csinc4rrrc;
- } else if ((true_val == 0 && false_val == 0xFFFFFFFF) ||
- (true_val == 0xFFFFFFFF && false_val == 0)) {
- // CSneg cheap based on wzr.
- if (true_val == 0xFFFFFFFF) {
- // Negate.
- code = ArmConditionEncoding(NegateComparison(mir->meta.ccode));
- }
-
- left_op = right_op = zero_reg;
- opcode = is_wide ? WIDE(kA64Csinv4rrrc) : kA64Csinv4rrrc;
- } else if (true_val == 0 || false_val == 0) {
- // Csel half cheap based on wzr.
- rl_result = EvalLoc(rl_dest, result_reg_class, true);
- rl_result_evaled = true;
- if (false_val == 0) {
- // Negate.
- code = ArmConditionEncoding(NegateComparison(mir->meta.ccode));
- }
- LoadConstantNoClobber(rl_result.reg, true_val == 0 ? false_val : true_val);
- left_op = zero_reg;
- right_op = rl_result.reg.GetReg();
- opcode = is_wide ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc;
- } else if (true_val == 1 || false_val == 1) {
- // CSInc half cheap based on wzr.
- rl_result = EvalLoc(rl_dest, result_reg_class, true);
- rl_result_evaled = true;
- if (true_val == 1) {
- // Negate.
- code = ArmConditionEncoding(NegateComparison(mir->meta.ccode));
- }
- LoadConstantNoClobber(rl_result.reg, true_val == 1 ? false_val : true_val);
- left_op = rl_result.reg.GetReg();
- right_op = zero_reg;
- opcode = is_wide ? WIDE(kA64Csinc4rrrc) : kA64Csinc4rrrc;
- } else if (true_val == 0xFFFFFFFF || false_val == 0xFFFFFFFF) {
- // CSneg half cheap based on wzr.
- rl_result = EvalLoc(rl_dest, result_reg_class, true);
- rl_result_evaled = true;
- if (true_val == 0xFFFFFFFF) {
- // Negate.
- code = ArmConditionEncoding(NegateComparison(mir->meta.ccode));
- }
- LoadConstantNoClobber(rl_result.reg, true_val == 0xFFFFFFFF ? false_val : true_val);
- left_op = rl_result.reg.GetReg();
- right_op = zero_reg;
- opcode = is_wide ? WIDE(kA64Csinv4rrrc) : kA64Csinv4rrrc;
- } else if ((true_val + 1 == false_val) || (false_val + 1 == true_val)) {
- // Load a constant and use CSinc. Use rl_result.
- if (false_val + 1 == true_val) {
- // Negate.
- code = ArmConditionEncoding(NegateComparison(mir->meta.ccode));
- true_val = false_val;
- }
-
- rl_result = EvalLoc(rl_dest, result_reg_class, true);
- rl_result_evaled = true;
- LoadConstantNoClobber(rl_result.reg, true_val);
- left_op = right_op = rl_result.reg.GetReg();
- opcode = is_wide ? WIDE(kA64Csinc4rrrc) : kA64Csinc4rrrc;
- } else {
- // Csel. The rest. Use rl_result and a temp.
- // TODO: To minimize the constants being loaded, check whether one can be inexpensively
- // loaded as n - 1 or ~n.
- rl_result = EvalLoc(rl_dest, result_reg_class, true);
- rl_result_evaled = true;
- LoadConstantNoClobber(rl_result.reg, true_val);
- RegStorage t_reg2 = AllocTypedTemp(false, result_reg_class);
- if (rl_dest.wide) {
- if (t_reg2.Is32Bit()) {
- t_reg2 = As64BitReg(t_reg2);
- }
- }
- LoadConstantNoClobber(t_reg2, false_val);
-
- // Use csel.
- left_op = rl_result.reg.GetReg();
- right_op = t_reg2.GetReg();
- opcode = is_wide ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc;
- }
-
- if (!rl_result_evaled) {
- rl_result = EvalLoc(rl_dest, result_reg_class, true);
- }
-
- NewLIR4(opcode, rl_result.reg.GetReg(), left_op, right_op, code);
+ RegLocation rl_result = EvalLoc(rl_dest, rl_dest.ref ? kRefReg : kCoreReg, true);
+ GenSelect(mir->dalvikInsn.vB, mir->dalvikInsn.vC, mir->meta.ccode, rl_result.reg,
+ rl_dest.ref ? kRefReg : kCoreReg);
+ StoreValue(rl_dest, rl_result);
} else {
RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]];
RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]];
+ RegisterClass result_reg_class = rl_dest.ref ? kRefReg : kCoreReg;
rl_true = LoadValue(rl_true, result_reg_class);
rl_false = LoadValue(rl_false, result_reg_class);
- rl_result = EvalLoc(rl_dest, result_reg_class, true);
+ RegLocation rl_result = EvalLoc(rl_dest, result_reg_class, true);
+ bool is_wide = rl_dest.ref || rl_dest.wide;
int opcode = is_wide ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc;
NewLIR4(opcode, rl_result.reg.GetReg(),
- rl_true.reg.GetReg(), rl_false.reg.GetReg(), code);
+ rl_true.reg.GetReg(), rl_false.reg.GetReg(), ArmConditionEncoding(mir->meta.ccode));
+ StoreValue(rl_dest, rl_result);
}
- StoreValue(rl_dest, rl_result);
}
void Arm64Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
@@ -283,6 +272,7 @@
ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
}
+ // TODO: Use tbz/tbnz for < 0 or >= 0.
}
if (branch == nullptr) {
@@ -296,7 +286,8 @@
LIR* Arm64Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg,
RegStorage base_reg, int offset, int check_value,
- LIR* target) {
+ LIR* target, LIR** compare) {
+ DCHECK(compare == nullptr);
// It is possible that temp register is 64-bit. (ArgReg or RefReg)
// Always compare 32-bit value no matter what temp_reg is.
if (temp_reg.Is64Bit()) {
@@ -712,18 +703,6 @@
return true;
}
-void Arm64Mir2Lir::OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset) {
- LOG(FATAL) << "Unexpected use of OpLea for Arm64";
-}
-
-void Arm64Mir2Lir::OpTlsCmp(ThreadOffset<4> offset, int val) {
- UNIMPLEMENTED(FATAL) << "Should not be used.";
-}
-
-void Arm64Mir2Lir::OpTlsCmp(ThreadOffset<8> offset, int val) {
- LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm64";
-}
-
bool Arm64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
DCHECK_EQ(cu_->instruction_set, kArm64);
// Unused - RegLocation rl_src_unsafe = info->args[0];
@@ -810,7 +789,123 @@
return true;
}
+bool Arm64Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
+ constexpr int kLargeArrayThreshold = 512;
+
+ RegLocation rl_src = info->args[0];
+ RegLocation rl_src_pos = info->args[1];
+ RegLocation rl_dst = info->args[2];
+ RegLocation rl_dst_pos = info->args[3];
+ RegLocation rl_length = info->args[4];
+ // Compile time check, handle exception by non-inline method to reduce related meta-data.
+ if ((rl_src_pos.is_const && (mir_graph_->ConstantValue(rl_src_pos) < 0)) ||
+ (rl_dst_pos.is_const && (mir_graph_->ConstantValue(rl_dst_pos) < 0)) ||
+ (rl_length.is_const && (mir_graph_->ConstantValue(rl_length) < 0))) {
+ return false;
+ }
+
+ ClobberCallerSave();
+ LockCallTemps(); // Prepare for explicit register usage.
+ RegStorage rs_src = rs_x0;
+ RegStorage rs_dst = rs_x1;
+ LoadValueDirectFixed(rl_src, rs_src);
+ LoadValueDirectFixed(rl_dst, rs_dst);
+
+ // Handle null pointer exception in slow-path.
+ LIR* src_check_branch = OpCmpImmBranch(kCondEq, rs_src, 0, nullptr);
+ LIR* dst_check_branch = OpCmpImmBranch(kCondEq, rs_dst, 0, nullptr);
+ // Handle potential overlapping in slow-path.
+ // TUNING: Support overlapping cases.
+ LIR* src_dst_same = OpCmpBranch(kCondEq, rs_src, rs_dst, nullptr);
+ // Handle exception or big length in slow-path.
+ RegStorage rs_length = rs_w2;
+ LoadValueDirectFixed(rl_length, rs_length);
+ LIR* len_neg_or_too_big = OpCmpImmBranch(kCondHi, rs_length, kLargeArrayThreshold, nullptr);
+ // Src bounds check.
+ RegStorage rs_src_pos = rs_w3;
+ RegStorage rs_arr_length = rs_w4;
+ LoadValueDirectFixed(rl_src_pos, rs_src_pos);
+ LIR* src_pos_negative = OpCmpImmBranch(kCondLt, rs_src_pos, 0, nullptr);
+ Load32Disp(rs_src, mirror::Array::LengthOffset().Int32Value(), rs_arr_length);
+ OpRegReg(kOpSub, rs_arr_length, rs_src_pos);
+ LIR* src_bad_len = OpCmpBranch(kCondLt, rs_arr_length, rs_length, nullptr);
+ // Dst bounds check.
+ RegStorage rs_dst_pos = rs_w5;
+ LoadValueDirectFixed(rl_dst_pos, rs_dst_pos);
+ LIR* dst_pos_negative = OpCmpImmBranch(kCondLt, rs_dst_pos, 0, nullptr);
+ Load32Disp(rs_dst, mirror::Array::LengthOffset().Int32Value(), rs_arr_length);
+ OpRegReg(kOpSub, rs_arr_length, rs_dst_pos);
+ LIR* dst_bad_len = OpCmpBranch(kCondLt, rs_arr_length, rs_length, nullptr);
+
+ // Everything is checked now.
+ // Set rs_src to the address of the first element to be copied.
+ rs_src_pos = As64BitReg(rs_src_pos);
+ OpRegImm(kOpAdd, rs_src, mirror::Array::DataOffset(2).Int32Value());
+ OpRegRegImm(kOpLsl, rs_src_pos, rs_src_pos, 1);
+ OpRegReg(kOpAdd, rs_src, rs_src_pos);
+ // Set rs_src to the address of the first element to be copied.
+ rs_dst_pos = As64BitReg(rs_dst_pos);
+ OpRegImm(kOpAdd, rs_dst, mirror::Array::DataOffset(2).Int32Value());
+ OpRegRegImm(kOpLsl, rs_dst_pos, rs_dst_pos, 1);
+ OpRegReg(kOpAdd, rs_dst, rs_dst_pos);
+
+ // rs_arr_length won't be not used anymore.
+ RegStorage rs_tmp = rs_arr_length;
+ // Use 64-bit view since rs_length will be used as index.
+ rs_length = As64BitReg(rs_length);
+ OpRegRegImm(kOpLsl, rs_length, rs_length, 1);
+
+ // Copy one element.
+ OpRegRegImm(kOpAnd, rs_tmp, As32BitReg(rs_length), 2);
+ LIR* jmp_to_copy_two = OpCmpImmBranch(kCondEq, rs_tmp, 0, nullptr);
+ OpRegImm(kOpSub, rs_length, 2);
+ LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, kSignedHalf);
+ StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, kSignedHalf);
+
+ // Copy two elements.
+ LIR *copy_two = NewLIR0(kPseudoTargetLabel);
+ OpRegRegImm(kOpAnd, rs_tmp, As32BitReg(rs_length), 4);
+ LIR* jmp_to_copy_four = OpCmpImmBranch(kCondEq, rs_tmp, 0, nullptr);
+ OpRegImm(kOpSub, rs_length, 4);
+ LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, k32);
+ StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, k32);
+
+ // Copy four elements.
+ LIR *copy_four = NewLIR0(kPseudoTargetLabel);
+ LIR* jmp_to_ret = OpCmpImmBranch(kCondEq, rs_length, 0, nullptr);
+ LIR *begin_loop = NewLIR0(kPseudoTargetLabel);
+ OpRegImm(kOpSub, rs_length, 8);
+ rs_tmp = As64BitReg(rs_tmp);
+ LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, k64);
+ StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, k64);
+ LIR* jmp_to_loop = OpCmpImmBranch(kCondNe, rs_length, 0, nullptr);
+ LIR* loop_finished = OpUnconditionalBranch(nullptr);
+
+ LIR *check_failed = NewLIR0(kPseudoTargetLabel);
+ LIR* launchpad_branch = OpUnconditionalBranch(nullptr);
+ LIR* return_point = NewLIR0(kPseudoTargetLabel);
+
+ src_check_branch->target = check_failed;
+ dst_check_branch->target = check_failed;
+ src_dst_same->target = check_failed;
+ len_neg_or_too_big->target = check_failed;
+ src_pos_negative->target = check_failed;
+ src_bad_len->target = check_failed;
+ dst_pos_negative->target = check_failed;
+ dst_bad_len->target = check_failed;
+ jmp_to_copy_two->target = copy_two;
+ jmp_to_copy_four->target = copy_four;
+ jmp_to_ret->target = return_point;
+ jmp_to_loop->target = begin_loop;
+ loop_finished->target = return_point;
+
+ AddIntrinsicSlowPath(info, launchpad_branch, return_point);
+
+ return true;
+}
+
LIR* Arm64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
+ ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
return RawLIR(current_dalvik_offset_, WIDE(kA64Ldr2rp), reg.GetReg(), 0, 0, 0, 0, target);
}
@@ -952,34 +1047,52 @@
StoreValueWide(rl_dest, rl_result);
}
-void Arm64Mir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) {
- GenLongOp(kOpMul, rl_dest, rl_src1, rl_src2);
-}
-
-void Arm64Mir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) {
- GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2);
-}
-
-void Arm64Mir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) {
- GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2);
-}
-
-void Arm64Mir2Lir::GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) {
- GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2);
-}
-
-void Arm64Mir2Lir::GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) {
- GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2);
-}
-
-void Arm64Mir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) {
- GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2);
+void Arm64Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2) {
+ switch (opcode) {
+ case Instruction::NOT_LONG:
+ GenNotLong(rl_dest, rl_src2);
+ return;
+ case Instruction::ADD_LONG:
+ case Instruction::ADD_LONG_2ADDR:
+ GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::SUB_LONG:
+ case Instruction::SUB_LONG_2ADDR:
+ GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::MUL_LONG:
+ case Instruction::MUL_LONG_2ADDR:
+ GenLongOp(kOpMul, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::DIV_LONG:
+ case Instruction::DIV_LONG_2ADDR:
+ GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true);
+ return;
+ case Instruction::REM_LONG:
+ case Instruction::REM_LONG_2ADDR:
+ GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false);
+ return;
+ case Instruction::AND_LONG_2ADDR:
+ case Instruction::AND_LONG:
+ GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::OR_LONG:
+ case Instruction::OR_LONG_2ADDR:
+ GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::XOR_LONG:
+ case Instruction::XOR_LONG_2ADDR:
+ GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::NEG_LONG: {
+ GenNegLong(rl_dest, rl_src2);
+ return;
+ }
+ default:
+ LOG(FATAL) << "Invalid long arith op";
+ return;
+ }
}
/*
@@ -1213,22 +1326,7 @@
void Arm64Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2) {
- if ((opcode == Instruction::SUB_LONG) || (opcode == Instruction::SUB_LONG_2ADDR)) {
- if (!rl_src2.is_const) {
- return GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
- }
- } else {
- // Associativity.
- if (!rl_src2.is_const) {
- DCHECK(rl_src1.is_const);
- std::swap(rl_src1, rl_src2);
- }
- }
- DCHECK(rl_src2.is_const);
-
OpKind op = kOpBkpt;
- int64_t val = mir_graph_->ConstantValueWide(rl_src2);
-
switch (opcode) {
case Instruction::ADD_LONG:
case Instruction::ADD_LONG_2ADDR:
@@ -1254,12 +1352,34 @@
LOG(FATAL) << "Unexpected opcode";
}
+ if (op == kOpSub) {
+ if (!rl_src2.is_const) {
+ return GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
+ }
+ } else {
+ // Associativity.
+ if (!rl_src2.is_const) {
+ DCHECK(rl_src1.is_const);
+ std::swap(rl_src1, rl_src2);
+ }
+ }
+ DCHECK(rl_src2.is_const);
+ int64_t val = mir_graph_->ConstantValueWide(rl_src2);
+
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
OpRegRegImm64(op, rl_result.reg, rl_src1.reg, val);
StoreValueWide(rl_dest, rl_result);
}
+static uint32_t ExtractReg(uint32_t reg_mask, int* reg) {
+ // Find first register.
+ int first_bit_set = CTZ(reg_mask) + 1;
+ *reg = *reg + first_bit_set;
+ reg_mask >>= first_bit_set;
+ return reg_mask;
+}
+
/**
* @brief Split a register list in pairs or registers.
*
@@ -1276,15 +1396,15 @@
* }
* @endcode
*/
-uint32_t Arm64Mir2Lir::GenPairWise(uint32_t reg_mask, int* reg1, int* reg2) {
+static uint32_t GenPairWise(uint32_t reg_mask, int* reg1, int* reg2) {
// Find first register.
- int first_bit_set = __builtin_ctz(reg_mask) + 1;
+ int first_bit_set = CTZ(reg_mask) + 1;
int reg = *reg1 + first_bit_set;
reg_mask >>= first_bit_set;
if (LIKELY(reg_mask)) {
// Save the first register, find the second and use the pair opcode.
- int second_bit_set = __builtin_ctz(reg_mask) + 1;
+ int second_bit_set = CTZ(reg_mask) + 1;
*reg2 = reg;
reg_mask >>= second_bit_set;
*reg1 = reg + second_bit_set;
@@ -1297,68 +1417,274 @@
return reg_mask;
}
-void Arm64Mir2Lir::UnSpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) {
- int reg1 = -1, reg2 = -1;
- const int reg_log2_size = 3;
-
- for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
- reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
- if (UNLIKELY(reg2 < 0)) {
- NewLIR3(WIDE(kA64Ldr3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
- } else {
- DCHECK_LE(offset, 63);
- NewLIR4(WIDE(kA64Ldp4rrXD), RegStorage::Solo64(reg2).GetReg(),
- RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
- }
- }
-}
-
-void Arm64Mir2Lir::SpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) {
+static void SpillCoreRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
int reg1 = -1, reg2 = -1;
const int reg_log2_size = 3;
for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
if (UNLIKELY(reg2 < 0)) {
- NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
+ m2l->NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
} else {
- NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
- RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
- }
- }
-}
-
-void Arm64Mir2Lir::UnSpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) {
- int reg1 = -1, reg2 = -1;
- const int reg_log2_size = 3;
-
- for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
- reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
- if (UNLIKELY(reg2 < 0)) {
- NewLIR3(FWIDE(kA64Ldr3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
- } else {
- NewLIR4(WIDE(kA64Ldp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
- RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
+ m2l->NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
+ RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
}
}
}
// TODO(Arm64): consider using ld1 and st1?
-void Arm64Mir2Lir::SpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) {
+static void SpillFPRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
int reg1 = -1, reg2 = -1;
const int reg_log2_size = 3;
for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
if (UNLIKELY(reg2 < 0)) {
- NewLIR3(FWIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
+ m2l->NewLIR3(FWIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
+ offset);
} else {
- NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
- RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
+ m2l->NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
+ RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
}
}
}
+static int SpillRegsPreSub(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg_mask,
+ uint32_t fp_reg_mask, int frame_size) {
+ m2l->OpRegRegImm(kOpSub, rs_sp, rs_sp, frame_size);
+
+ int core_count = POPCOUNT(core_reg_mask);
+
+ if (fp_reg_mask != 0) {
+ // Spill FP regs.
+ int fp_count = POPCOUNT(fp_reg_mask);
+ int spill_offset = frame_size - (core_count + fp_count) * kArm64PointerSize;
+ SpillFPRegs(m2l, rs_sp, spill_offset, fp_reg_mask);
+ }
+
+ if (core_reg_mask != 0) {
+ // Spill core regs.
+ int spill_offset = frame_size - (core_count * kArm64PointerSize);
+ SpillCoreRegs(m2l, rs_sp, spill_offset, core_reg_mask);
+ }
+
+ return frame_size;
+}
+
+static int SpillRegsPreIndexed(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg_mask,
+ uint32_t fp_reg_mask, int frame_size) {
+ // Otherwise, spill both core and fp regs at the same time.
+ // The very first instruction will be an stp with pre-indexed address, moving the stack pointer
+ // down. From then on, we fill upwards. This will generate overall the same number of instructions
+ // as the specialized code above in most cases (exception being odd number of core and even
+ // non-zero fp spills), but is more flexible, as the offsets are guaranteed small.
+ //
+ // Some demonstrative fill cases : (c) = core, (f) = fp
+ // cc 44 cc 44 cc 22 cc 33 fc => 1[1/2]
+ // fc => 23 fc => 23 ff => 11 ff => 22
+ // ff 11 f 11 f 11
+ //
+ int reg1 = -1, reg2 = -1;
+ int core_count = POPCOUNT(core_reg_mask);
+ int fp_count = POPCOUNT(fp_reg_mask);
+
+ int combined = fp_count + core_count;
+ int all_offset = RoundUp(combined, 2); // Needs to be 16B = 2-reg aligned.
+
+ int cur_offset = 2; // What's the starting offset after the first stp? We expect the base slot
+ // to be filled.
+
+ // First figure out whether the bottom is FP or core.
+ if (fp_count > 0) {
+ // Some FP spills.
+ //
+ // Four cases: (d0 is dummy to fill up stp)
+ // 1) Single FP, even number of core -> stp d0, fp_reg
+ // 2) Single FP, odd number of core -> stp fp_reg, d0
+ // 3) More FP, even number combined -> stp fp_reg1, fp_reg2
+ // 4) More FP, odd number combined -> stp d0, fp_reg
+ if (fp_count == 1) {
+ fp_reg_mask = ExtractReg(fp_reg_mask, ®1);
+ DCHECK_EQ(fp_reg_mask, 0U);
+ if (core_count % 2 == 0) {
+ m2l->NewLIR4(WIDE(kA64StpPre4ffXD),
+ RegStorage::FloatSolo64(reg1).GetReg(),
+ RegStorage::FloatSolo64(reg1).GetReg(),
+ base.GetReg(), -all_offset);
+ } else {
+ m2l->NewLIR4(WIDE(kA64StpPre4ffXD),
+ RegStorage::FloatSolo64(reg1).GetReg(),
+ RegStorage::FloatSolo64(reg1).GetReg(),
+ base.GetReg(), -all_offset);
+ cur_offset = 0; // That core reg needs to go into the upper half.
+ }
+ } else {
+ if (combined % 2 == 0) {
+ fp_reg_mask = GenPairWise(fp_reg_mask, ®1, ®2);
+ m2l->NewLIR4(WIDE(kA64StpPre4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
+ RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), -all_offset);
+ } else {
+ fp_reg_mask = ExtractReg(fp_reg_mask, ®1);
+ m2l->NewLIR4(WIDE(kA64StpPre4ffXD), rs_d0.GetReg(), RegStorage::FloatSolo64(reg1).GetReg(),
+ base.GetReg(), -all_offset);
+ }
+ }
+ } else {
+ // No FP spills.
+ //
+ // Two cases:
+ // 1) Even number of core -> stp core1, core2
+ // 2) Odd number of core -> stp xzr, core1
+ if (core_count % 2 == 1) {
+ core_reg_mask = ExtractReg(core_reg_mask, ®1);
+ m2l->NewLIR4(WIDE(kA64StpPre4rrXD), rs_xzr.GetReg(),
+ RegStorage::Solo64(reg1).GetReg(), base.GetReg(), -all_offset);
+ } else {
+ core_reg_mask = GenPairWise(core_reg_mask, ®1, ®2);
+ m2l->NewLIR4(WIDE(kA64StpPre4rrXD), RegStorage::Solo64(reg2).GetReg(),
+ RegStorage::Solo64(reg1).GetReg(), base.GetReg(), -all_offset);
+ }
+ }
+
+ if (fp_count != 0) {
+ for (; fp_reg_mask != 0;) {
+ // Have some FP regs to do.
+ fp_reg_mask = GenPairWise(fp_reg_mask, ®1, ®2);
+ if (UNLIKELY(reg2 < 0)) {
+ m2l->NewLIR3(FWIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
+ cur_offset);
+ // Do not increment offset here, as the second half will be filled by a core reg.
+ } else {
+ m2l->NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
+ RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), cur_offset);
+ cur_offset += 2;
+ }
+ }
+
+ // Reset counting.
+ reg1 = -1;
+
+ // If there is an odd number of core registers, we need to store the bottom now.
+ if (core_count % 2 == 1) {
+ core_reg_mask = ExtractReg(core_reg_mask, ®1);
+ m2l->NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(),
+ cur_offset + 1);
+ cur_offset += 2; // Half-slot filled now.
+ }
+ }
+
+ // Spill the rest of the core regs. They are guaranteed to be even.
+ DCHECK_EQ(POPCOUNT(core_reg_mask) % 2, 0);
+ for (; core_reg_mask != 0; cur_offset += 2) {
+ core_reg_mask = GenPairWise(core_reg_mask, ®1, ®2);
+ m2l->NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
+ RegStorage::Solo64(reg1).GetReg(), base.GetReg(), cur_offset);
+ }
+
+ DCHECK_EQ(cur_offset, all_offset);
+
+ return all_offset * 8;
+}
+
+int Arm64Mir2Lir::SpillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask,
+ int frame_size) {
+ // If the frame size is small enough that all offsets would fit into the immediates, use that
+ // setup, as it decrements sp early (kind of instruction scheduling), and is not worse
+ // instruction-count wise than the complicated code below.
+ //
+ // This case is also optimal when we have an odd number of core spills, and an even (non-zero)
+ // number of fp spills.
+ if ((RoundUp(frame_size, 8) / 8 <= 63)) {
+ return SpillRegsPreSub(this, base, core_reg_mask, fp_reg_mask, frame_size);
+ } else {
+ return SpillRegsPreIndexed(this, base, core_reg_mask, fp_reg_mask, frame_size);
+ }
+}
+
+static void UnSpillCoreRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
+ int reg1 = -1, reg2 = -1;
+ const int reg_log2_size = 3;
+
+ for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
+ reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
+ if (UNLIKELY(reg2 < 0)) {
+ m2l->NewLIR3(WIDE(kA64Ldr3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
+ } else {
+ DCHECK_LE(offset, 63);
+ m2l->NewLIR4(WIDE(kA64Ldp4rrXD), RegStorage::Solo64(reg2).GetReg(),
+ RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
+ }
+ }
+}
+
+static void UnSpillFPRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
+ int reg1 = -1, reg2 = -1;
+ const int reg_log2_size = 3;
+
+ for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
+ reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
+ if (UNLIKELY(reg2 < 0)) {
+ m2l->NewLIR3(FWIDE(kA64Ldr3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
+ offset);
+ } else {
+ m2l->NewLIR4(WIDE(kA64Ldp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
+ RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
+ }
+ }
+}
+
+void Arm64Mir2Lir::UnspillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask,
+ int frame_size) {
+ // Restore saves and drop stack frame.
+ // 2 versions:
+ //
+ // 1. (Original): Try to address directly, then drop the whole frame.
+ // Limitation: ldp is a 7b signed immediate.
+ //
+ // 2. (New): Drop the non-save-part. Then do similar to original, which is now guaranteed to be
+ // in range. Then drop the rest.
+ //
+ // TODO: In methods with few spills but huge frame, it would be better to do non-immediate loads
+ // in variant 1.
+
+ // "Magic" constant, 63 (max signed 7b) * 8.
+ static constexpr int kMaxFramesizeForOffset = 63 * kArm64PointerSize;
+
+ const int num_core_spills = POPCOUNT(core_reg_mask);
+ const int num_fp_spills = POPCOUNT(fp_reg_mask);
+
+ int early_drop = 0;
+
+ if (frame_size > kMaxFramesizeForOffset) {
+ // Second variant. Drop the frame part.
+
+ // TODO: Always use the first formula, as num_fp_spills would be zero?
+ if (fp_reg_mask != 0) {
+ early_drop = frame_size - kArm64PointerSize * (num_fp_spills + num_core_spills);
+ } else {
+ early_drop = frame_size - kArm64PointerSize * num_core_spills;
+ }
+
+ // Drop needs to be 16B aligned, so that SP keeps aligned.
+ early_drop = RoundDown(early_drop, 16);
+
+ OpRegImm64(kOpAdd, rs_sp, early_drop);
+ }
+
+ // Unspill.
+ if (fp_reg_mask != 0) {
+ int offset = frame_size - early_drop - kArm64PointerSize * (num_fp_spills + num_core_spills);
+ UnSpillFPRegs(this, rs_sp, offset, fp_reg_mask);
+ }
+ if (core_reg_mask != 0) {
+ int offset = frame_size - early_drop - kArm64PointerSize * num_core_spills;
+ UnSpillCoreRegs(this, rs_sp, offset, core_reg_mask);
+ }
+
+ // Drop the (rest of) the frame.
+ OpRegImm64(kOpAdd, rs_sp, frame_size - early_drop);
+}
+
bool Arm64Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
ArmOpcode wide = (size == k64) ? WIDE(0) : UNWIDE(0);
RegLocation rl_src_i = info->args[0];
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index 6a27ad0..9b4546a 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -48,14 +48,12 @@
rs_d8, rs_d9, rs_d10, rs_d11, rs_d12, rs_d13, rs_d14, rs_d15,
rs_d16, rs_d17, rs_d18, rs_d19, rs_d20, rs_d21, rs_d22, rs_d23,
rs_d24, rs_d25, rs_d26, rs_d27, rs_d28, rs_d29, rs_d30, rs_d31};
+// Note: we are not able to call to C function since rs_xSELF is a special register need to be
+// preserved but would be scratched by native functions follow aapcs64.
static constexpr RegStorage reserved_regs_arr[] =
{rs_wSUSPEND, rs_wSELF, rs_wsp, rs_wLR, rs_wzr};
static constexpr RegStorage reserved64_regs_arr[] =
{rs_xSUSPEND, rs_xSELF, rs_sp, rs_xLR, rs_xzr};
-// TUNING: Are there too many temp registers and too less promote target?
-// This definition need to be matched with runtime.cc, quick entry assembly and JNI compiler
-// Note: we are not able to call to C function directly if it un-match C ABI.
-// Currently, rs_rA64_SELF is not a callee save register which does not match C ABI.
static constexpr RegStorage core_temps_arr[] =
{rs_w0, rs_w1, rs_w2, rs_w3, rs_w4, rs_w5, rs_w6, rs_w7,
rs_w8, rs_w9, rs_w10, rs_w11, rs_w12, rs_w13, rs_w14, rs_w15, rs_w16,
@@ -132,7 +130,7 @@
case kRet0: res_reg = rs_w0; break;
case kRet1: res_reg = rs_w1; break;
case kInvokeTgt: res_reg = rs_wLR; break;
- case kHiddenArg: res_reg = rs_w12; break;
+ case kHiddenArg: res_reg = rs_wIP1; break;
case kHiddenFpArg: res_reg = RegStorage::InvalidReg(); break;
case kCount: res_reg = RegStorage::InvalidReg(); break;
default: res_reg = RegStorage::InvalidReg();
@@ -569,10 +567,6 @@
return (lir->opcode == kA64B1t);
}
-bool Arm64Mir2Lir::SupportsVolatileLoadStore(OpSize size) {
- return true;
-}
-
RegisterClass Arm64Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
if (UNLIKELY(is_volatile)) {
// On arm64, fp register load/store is atomic only for single bytes.
@@ -762,15 +756,10 @@
FreeTemp(rs_f7);
}
-RegStorage Arm64Mir2Lir::LoadHelper(ThreadOffset<4> offset) {
- UNIMPLEMENTED(FATAL) << "Should not be called.";
- return RegStorage::InvalidReg();
-}
-
-RegStorage Arm64Mir2Lir::LoadHelper(ThreadOffset<8> offset) {
+RegStorage Arm64Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
// TODO(Arm64): use LoadWordDisp instead.
// e.g. LoadWordDisp(rs_rA64_SELF, offset.Int32Value(), rs_rA64_LR);
- LoadBaseDisp(rs_xSELF, offset.Int32Value(), rs_xLR, k64, kNotVolatile);
+ LoadBaseDisp(rs_xSELF, GetThreadOffset<8>(trampoline).Int32Value(), rs_xLR, k64, kNotVolatile);
return rs_xLR;
}
@@ -1195,7 +1184,7 @@
call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
direct_code, direct_method, type);
if (pcrLabel) {
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
*pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
} else {
*pcrLabel = nullptr;
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index fdebb92..5326e74 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -23,7 +23,7 @@
/* This file contains codegen for the A64 ISA. */
-static int32_t EncodeImmSingle(uint32_t bits) {
+int32_t Arm64Mir2Lir::EncodeImmSingle(uint32_t bits) {
/*
* Valid values will have the form:
*
@@ -55,7 +55,7 @@
return (bit7 | bit6 | bit5_to_0);
}
-static int32_t EncodeImmDouble(uint64_t bits) {
+int32_t Arm64Mir2Lir::EncodeImmDouble(uint64_t bits) {
/*
* Valid values will have the form:
*
@@ -87,6 +87,26 @@
return (bit7 | bit6 | bit5_to_0);
}
+size_t Arm64Mir2Lir::GetLoadStoreSize(LIR* lir) {
+ bool opcode_is_wide = IS_WIDE(lir->opcode);
+ ArmOpcode opcode = UNWIDE(lir->opcode);
+ DCHECK(!IsPseudoLirOp(opcode));
+ const ArmEncodingMap *encoder = &EncodingMap[opcode];
+ uint32_t bits = opcode_is_wide ? encoder->xskeleton : encoder->wskeleton;
+ return (bits >> 30);
+}
+
+size_t Arm64Mir2Lir::GetInstructionOffset(LIR* lir) {
+ size_t offset = lir->operands[2];
+ uint64_t check_flags = GetTargetInstFlags(lir->opcode);
+ DCHECK((check_flags & IS_LOAD) || (check_flags & IS_STORE));
+ if (check_flags & SCALED_OFFSET_X0) {
+ DCHECK(check_flags & IS_TERTIARY_OP);
+ offset = offset * (1 << GetLoadStoreSize(lir));
+ }
+ return offset;
+}
+
LIR* Arm64Mir2Lir::LoadFPConstantValue(RegStorage r_dest, int32_t value) {
DCHECK(r_dest.IsSingle());
if (value == 0) {
@@ -249,8 +269,47 @@
return (n << 12 | imm_r << 6 | imm_s);
}
+// Maximum number of instructions to use for encoding the immediate.
+static const int max_num_ops_per_const_load = 2;
+
+/**
+ * @brief Return the number of fast halfwords in the given uint64_t integer.
+ * @details The input integer is split into 4 halfwords (bits 0-15, 16-31, 32-47, 48-63). The
+ * number of fast halfwords (halfwords that are either 0 or 0xffff) is returned. See below for
+ * a more accurate description.
+ * @param value The input 64-bit integer.
+ * @return Return @c retval such that (retval & 0x7) is the maximum between n and m, where n is
+ * the number of halfwords with all bits unset (0) and m is the number of halfwords with all bits
+ * set (0xffff). Additionally (retval & 0x8) is set when m > n.
+ */
+static int GetNumFastHalfWords(uint64_t value) {
+ unsigned int num_0000_halfwords = 0;
+ unsigned int num_ffff_halfwords = 0;
+ for (int shift = 0; shift < 64; shift += 16) {
+ uint16_t halfword = static_cast<uint16_t>(value >> shift);
+ if (halfword == 0)
+ num_0000_halfwords++;
+ else if (halfword == UINT16_C(0xffff))
+ num_ffff_halfwords++;
+ }
+ if (num_0000_halfwords >= num_ffff_halfwords) {
+ DCHECK_LE(num_0000_halfwords, 4U);
+ return num_0000_halfwords;
+ } else {
+ DCHECK_LE(num_ffff_halfwords, 4U);
+ return num_ffff_halfwords | 0x8;
+ }
+}
+
+// The InexpensiveConstantXXX variants below are used in the promotion algorithm to determine how a
+// constant is considered for promotion. If the constant is "inexpensive" then the promotion
+// algorithm will give it a low priority for promotion, even when it is referenced many times in
+// the code.
+
bool Arm64Mir2Lir::InexpensiveConstantInt(int32_t value) {
- return false; // (ModifiedImmediate(value) >= 0) || (ModifiedImmediate(~value) >= 0);
+ // A 32-bit int can always be loaded with 2 instructions (and without using the literal pool).
+ // We therefore return true and give it a low priority for promotion.
+ return true;
}
bool Arm64Mir2Lir::InexpensiveConstantFloat(int32_t value) {
@@ -258,13 +317,70 @@
}
bool Arm64Mir2Lir::InexpensiveConstantLong(int64_t value) {
- return InexpensiveConstantInt(High32Bits(value)) && InexpensiveConstantInt(Low32Bits(value));
+ int num_slow_halfwords = 4 - (GetNumFastHalfWords(value) & 0x7);
+ if (num_slow_halfwords <= max_num_ops_per_const_load) {
+ return true;
+ }
+ return (EncodeLogicalImmediate(/*is_wide=*/true, value) >= 0);
}
bool Arm64Mir2Lir::InexpensiveConstantDouble(int64_t value) {
return EncodeImmDouble(value) >= 0;
}
+// The InexpensiveConstantXXX variants below are used to determine which A64 instructions to use
+// when one of the operands is an immediate (e.g. register version or immediate version of add).
+
+bool Arm64Mir2Lir::InexpensiveConstantInt(int32_t value, Instruction::Code opcode) {
+ switch (opcode) {
+ case Instruction::IF_EQ:
+ case Instruction::IF_NE:
+ case Instruction::IF_LT:
+ case Instruction::IF_GE:
+ case Instruction::IF_GT:
+ case Instruction::IF_LE:
+ case Instruction::ADD_INT:
+ case Instruction::ADD_INT_2ADDR:
+ case Instruction::SUB_INT:
+ case Instruction::SUB_INT_2ADDR:
+ // The code below is consistent with the implementation of OpRegRegImm().
+ {
+ int32_t abs_value = std::abs(value);
+ if (abs_value < 0x1000) {
+ return true;
+ } else if ((abs_value & UINT64_C(0xfff)) == 0 && ((abs_value >> 12) < 0x1000)) {
+ return true;
+ }
+ return false;
+ }
+ case Instruction::SHL_INT:
+ case Instruction::SHL_INT_2ADDR:
+ case Instruction::SHR_INT:
+ case Instruction::SHR_INT_2ADDR:
+ case Instruction::USHR_INT:
+ case Instruction::USHR_INT_2ADDR:
+ return true;
+ case Instruction::AND_INT:
+ case Instruction::AND_INT_2ADDR:
+ case Instruction::AND_INT_LIT16:
+ case Instruction::AND_INT_LIT8:
+ case Instruction::OR_INT:
+ case Instruction::OR_INT_2ADDR:
+ case Instruction::OR_INT_LIT16:
+ case Instruction::OR_INT_LIT8:
+ case Instruction::XOR_INT:
+ case Instruction::XOR_INT_2ADDR:
+ case Instruction::XOR_INT_LIT16:
+ case Instruction::XOR_INT_LIT8:
+ if (value == 0 || value == INT32_C(-1)) {
+ return true;
+ }
+ return (EncodeLogicalImmediate(/*is_wide=*/false, value) >= 0);
+ default:
+ return false;
+ }
+}
+
/*
* Load a immediate using one single instruction when possible; otherwise
* use a pair of movz and movk instructions.
@@ -338,9 +454,6 @@
// TODO: clean up the names. LoadConstantWide() should really be LoadConstantNoClobberWide().
LIR* Arm64Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
- // Maximum number of instructions to use for encoding the immediate.
- const int max_num_ops = 2;
-
if (r_dest.IsFloat()) {
return LoadFPConstantValueWide(r_dest, value);
}
@@ -358,19 +471,12 @@
}
// At least one in value's halfwords is not 0x0, nor 0xffff: find out how many.
- int num_0000_halfwords = 0;
- int num_ffff_halfwords = 0;
uint64_t uvalue = static_cast<uint64_t>(value);
- for (int shift = 0; shift < 64; shift += 16) {
- uint16_t halfword = static_cast<uint16_t>(uvalue >> shift);
- if (halfword == 0)
- num_0000_halfwords++;
- else if (halfword == UINT16_C(0xffff))
- num_ffff_halfwords++;
- }
- int num_fast_halfwords = std::max(num_0000_halfwords, num_ffff_halfwords);
+ int num_fast_halfwords = GetNumFastHalfWords(uvalue);
+ int num_slow_halfwords = 4 - (num_fast_halfwords & 0x7);
+ bool more_ffff_halfwords = (num_fast_halfwords & 0x8) != 0;
- if (num_fast_halfwords < 3) {
+ if (num_slow_halfwords > 1) {
// A single movz/movn is not enough. Try the logical immediate route.
int log_imm = EncodeLogicalImmediate(/*is_wide=*/true, value);
if (log_imm >= 0) {
@@ -378,19 +484,19 @@
}
}
- if (num_fast_halfwords >= 4 - max_num_ops) {
+ if (num_slow_halfwords <= max_num_ops_per_const_load) {
// We can encode the number using a movz/movn followed by one or more movk.
ArmOpcode op;
uint16_t background;
LIR* res = nullptr;
// Decide whether to use a movz or a movn.
- if (num_0000_halfwords >= num_ffff_halfwords) {
- op = WIDE(kA64Movz3rdM);
- background = 0;
- } else {
+ if (more_ffff_halfwords) {
op = WIDE(kA64Movn3rdM);
background = 0xffff;
+ } else {
+ op = WIDE(kA64Movz3rdM);
+ background = 0;
}
// Emit the first instruction (movz, movn).
@@ -525,7 +631,8 @@
return NULL;
}
-LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int extend) {
+LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
+ A64RegExtEncodings ext, uint8_t amount) {
ArmOpcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
ArmOpcode opcode = kA64Brk1d;
@@ -536,6 +643,11 @@
case kOpCmp:
opcode = kA64Cmp3Rre;
break;
+ case kOpAdd:
+ // Note: intentional fallthrough
+ case kOpSub:
+ return OpRegRegRegExtend(op, r_dest_src1, r_dest_src1, r_src2, ext, amount);
+ break;
default:
LOG(FATAL) << "Bad Opcode: " << opcode;
break;
@@ -545,7 +657,8 @@
if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
ArmEncodingKind kind = EncodingMap[opcode].field_loc[2].kind;
if (kind == kFmtExtend) {
- return NewLIR3(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg(), extend);
+ return NewLIR3(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg(),
+ EncodeExtend(ext, amount));
}
}
@@ -555,10 +668,10 @@
LIR* Arm64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
/* RegReg operations with SP in first parameter need extended register instruction form.
- * Only CMN and CMP instructions are implemented.
+ * Only CMN, CMP, ADD & SUB instructions are implemented.
*/
if (r_dest_src1 == rs_sp) {
- return OpRegRegExtend(op, r_dest_src1, r_src2, ENCODE_NO_EXTEND);
+ return OpRegRegExtend(op, r_dest_src1, r_src2, kA64Uxtx, 0);
} else {
return OpRegRegShift(op, r_dest_src1, r_src2, ENCODE_NO_SHIFT);
}
@@ -699,7 +812,7 @@
int64_t abs_value = (neg) ? -value : value;
ArmOpcode opcode = kA64Brk1d;
ArmOpcode alt_opcode = kA64Brk1d;
- int32_t log_imm = -1;
+ bool is_logical = false;
bool is_wide = r_dest.Is64Bit();
ArmOpcode wide = (is_wide) ? WIDE(0) : UNWIDE(0);
int info = 0;
@@ -734,65 +847,89 @@
opcode = (neg) ? kA64Add4RRdT : kA64Sub4RRdT;
return NewLIR4(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), abs_value >> 12, 1);
} else {
- log_imm = -1;
alt_opcode = (op == kOpAdd) ? kA64Add4RRre : kA64Sub4RRre;
info = EncodeExtend(is_wide ? kA64Uxtx : kA64Uxtw, 0);
}
break;
- // case kOpRsub:
- // opcode = kThumb2RsubRRI8M;
- // alt_opcode = kThumb2RsubRRR;
- // break;
case kOpAdc:
- log_imm = -1;
alt_opcode = kA64Adc3rrr;
break;
case kOpSbc:
- log_imm = -1;
alt_opcode = kA64Sbc3rrr;
break;
case kOpOr:
- log_imm = EncodeLogicalImmediate(is_wide, value);
+ is_logical = true;
opcode = kA64Orr3Rrl;
alt_opcode = kA64Orr4rrro;
break;
case kOpAnd:
- log_imm = EncodeLogicalImmediate(is_wide, value);
+ is_logical = true;
opcode = kA64And3Rrl;
alt_opcode = kA64And4rrro;
break;
case kOpXor:
- log_imm = EncodeLogicalImmediate(is_wide, value);
+ is_logical = true;
opcode = kA64Eor3Rrl;
alt_opcode = kA64Eor4rrro;
break;
case kOpMul:
// TUNING: power of 2, shift & add
- log_imm = -1;
alt_opcode = kA64Mul3rrr;
break;
default:
LOG(FATAL) << "Bad opcode: " << op;
}
- if (log_imm >= 0) {
- return NewLIR3(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), log_imm);
- } else {
- RegStorage r_scratch;
- if (is_wide) {
- r_scratch = AllocTempWide();
- LoadConstantWide(r_scratch, value);
+ if (is_logical) {
+ int log_imm = EncodeLogicalImmediate(is_wide, value);
+ if (log_imm >= 0) {
+ return NewLIR3(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), log_imm);
} else {
- r_scratch = AllocTemp();
- LoadConstant(r_scratch, value);
+ // When the immediate is either 0 or ~0, the logical operation can be trivially reduced
+ // to a - possibly negated - assignment.
+ if (value == 0) {
+ switch (op) {
+ case kOpOr:
+ case kOpXor:
+ // Or/Xor by zero reduces to an assignment.
+ return NewLIR2(kA64Mov2rr | wide, r_dest.GetReg(), r_src1.GetReg());
+ default:
+ // And by zero reduces to a `mov rdest, xzr'.
+ DCHECK(op == kOpAnd);
+ return NewLIR2(kA64Mov2rr | wide, r_dest.GetReg(), (is_wide) ? rxzr : rwzr);
+ }
+ } else if (value == INT64_C(-1)
+ || (!is_wide && static_cast<uint32_t>(value) == ~UINT32_C(0))) {
+ switch (op) {
+ case kOpAnd:
+ // And by -1 reduces to an assignment.
+ return NewLIR2(kA64Mov2rr | wide, r_dest.GetReg(), r_src1.GetReg());
+ case kOpXor:
+ // Xor by -1 reduces to an `mvn rdest, rsrc'.
+ return NewLIR2(kA64Mvn2rr | wide, r_dest.GetReg(), r_src1.GetReg());
+ default:
+ // Or by -1 reduces to a `mvn rdest, xzr'.
+ DCHECK(op == kOpOr);
+ return NewLIR2(kA64Mvn2rr | wide, r_dest.GetReg(), (is_wide) ? rxzr : rwzr);
+ }
+ }
}
- if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
- res = NewLIR4(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg(), info);
- else
- res = NewLIR3(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
- FreeTemp(r_scratch);
- return res;
}
+
+ RegStorage r_scratch;
+ if (is_wide) {
+ r_scratch = AllocTempWide();
+ LoadConstantWide(r_scratch, value);
+ } else {
+ r_scratch = AllocTemp();
+ LoadConstant(r_scratch, value);
+ }
+ if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
+ res = NewLIR4(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg(), info);
+ else
+ res = NewLIR3(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
+ FreeTemp(r_scratch);
+ return res;
}
LIR* Arm64Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
@@ -825,15 +962,6 @@
}
OpRegImm64(op, r_dest_src1, abs_value & (~INT64_C(0xfff)));
return OpRegImm64(op, r_dest_src1, abs_value & 0xfff);
- } else if (LIKELY(A64_REG_IS_SP(r_dest_src1.GetReg()) && (op == kOpAdd || op == kOpSub))) {
- // Note: "sub sp, sp, Xm" is not correct on arm64.
- // We need special instructions for SP.
- // Also operation on 32-bit SP should be avoided.
- DCHECK(IS_WIDE(wide));
- RegStorage r_tmp = AllocTempWide();
- OpRegRegImm(kOpAdd, r_tmp, r_dest_src1, 0);
- OpRegImm64(op, r_tmp, value);
- return OpRegRegImm(kOpAdd, r_dest_src1, r_tmp, 0);
} else {
RegStorage r_tmp;
LIR* res;
@@ -878,10 +1006,14 @@
}
int Arm64Mir2Lir::EncodeShift(int shift_type, int amount) {
+ DCHECK_EQ(shift_type & 0x3, shift_type);
+ DCHECK_EQ(amount & 0x3f, amount);
return ((shift_type & 0x3) << 7) | (amount & 0x3f);
}
int Arm64Mir2Lir::EncodeExtend(int extend_type, int amount) {
+ DCHECK_EQ(extend_type & 0x7, extend_type);
+ DCHECK_EQ(amount & 0x7, amount);
return (1 << 6) | ((extend_type & 0x7) << 3) | (amount & 0x7);
}
@@ -1262,36 +1394,13 @@
return NULL;
}
-LIR* Arm64Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) {
- UNIMPLEMENTED(FATAL) << "Should not be used.";
- return nullptr;
-}
-
-LIR* Arm64Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) {
- LOG(FATAL) << "Unexpected use of OpThreadMem for Arm64";
- return NULL;
-}
-
LIR* Arm64Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
LOG(FATAL) << "Unexpected use of OpMem for Arm64";
return NULL;
}
-LIR* Arm64Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
- int displacement, RegStorage r_src, OpSize size) {
- LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for Arm64";
- return NULL;
-}
-
-LIR* Arm64Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) {
- LOG(FATAL) << "Unexpected use of OpRegMem for Arm64";
- return NULL;
-}
-
-LIR* Arm64Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
- int displacement, RegStorage r_dest, OpSize size) {
- LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for Arm64";
- return NULL;
+LIR* Arm64Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+ return OpReg(op, r_tgt);
}
} // namespace art
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 60d2589..be79b63 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -394,6 +394,18 @@
return nullptr;
}
+/* Search the existing constants in the literal pool for an exact class match */
+LIR* Mir2Lir::ScanLiteralPoolClass(LIR* data_target, const DexFile& dex_file, uint32_t type_idx) {
+ while (data_target) {
+ if (static_cast<uint32_t>(data_target->operands[0]) == type_idx &&
+ UnwrapPointer(data_target->operands[1]) == &dex_file) {
+ return data_target;
+ }
+ data_target = data_target->next;
+ }
+ return nullptr;
+}
+
/*
* The following are building blocks to insert constants into the pool or
* instruction streams.
@@ -492,12 +504,15 @@
data_lir = class_literal_list_;
while (data_lir != NULL) {
uint32_t target_method_idx = data_lir->operands[0];
+ const DexFile* class_dex_file =
+ reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1]));
cu_->compiler_driver->AddClassPatch(cu_->dex_file,
cu_->class_def_idx,
cu_->method_idx,
target_method_idx,
+ class_dex_file,
code_buffer_.size());
- const DexFile::TypeId& target_method_id = cu_->dex_file->GetTypeId(target_method_idx);
+ const DexFile::TypeId& target_method_id = class_dex_file->GetTypeId(target_method_idx);
// unique value based on target to ensure code deduplication works
PushPointer(code_buffer_, &target_method_id, cu_->target64);
data_lir = NEXT_LIR(data_lir);
@@ -983,6 +998,8 @@
estimated_native_code_size_(0),
reg_pool_(NULL),
live_sreg_(0),
+ core_vmap_table_(mir_graph->GetArena()->Adapter()),
+ fp_vmap_table_(mir_graph->GetArena()->Adapter()),
num_core_spills_(0),
num_fp_spills_(0),
frame_size_(0),
@@ -1068,7 +1085,7 @@
vmap_encoder.PushBackUnsigned(0u); // Size is 0.
}
- std::unique_ptr<std::vector<uint8_t>> cfi_info(ReturnCallFrameInformation());
+ std::unique_ptr<std::vector<uint8_t>> cfi_info(ReturnFrameDescriptionEntry());
CompiledMethod* result =
new CompiledMethod(cu_->compiler_driver, cu_->instruction_set, code_buffer_, frame_size_,
core_spill_mask_, fp_spill_mask_, encoded_mapping_table_,
@@ -1172,9 +1189,12 @@
}
LIR *Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
- int offset, int check_value, LIR* target) {
+ int offset, int check_value, LIR* target, LIR** compare) {
// Handle this for architectures that can't compare to memory.
- Load32Disp(base_reg, offset, temp_reg);
+ LIR* inst = Load32Disp(base_reg, offset, temp_reg);
+ if (compare != nullptr) {
+ *compare = inst;
+ }
LIR* branch = OpCmpImmBranch(cond, temp_reg, check_value, target);
return branch;
}
@@ -1217,18 +1237,20 @@
DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target);
}
-void Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) {
+void Mir2Lir::LoadClassType(const DexFile& dex_file, uint32_t type_idx,
+ SpecialTargetRegister symbolic_reg) {
// Use the literal pool and a PC-relative load from a data word.
- LIR* data_target = ScanLiteralPool(class_literal_list_, type_idx, 0);
+ LIR* data_target = ScanLiteralPoolClass(class_literal_list_, dex_file, type_idx);
if (data_target == nullptr) {
data_target = AddWordData(&class_literal_list_, type_idx);
+ data_target->operands[1] = WrapPointer(const_cast<DexFile*>(&dex_file));
}
// Loads a Class pointer, which is a reference as it lives in the heap.
LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg, kRef), data_target);
AppendLIR(load_pc_rel);
}
-std::vector<uint8_t>* Mir2Lir::ReturnCallFrameInformation() {
+std::vector<uint8_t>* Mir2Lir::ReturnFrameDescriptionEntry() {
// Default case is to do nothing.
return nullptr;
}
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 0e46c96..dbceaff 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -48,7 +48,12 @@
true, // kIntrinsicMinMaxFloat
true, // kIntrinsicMinMaxDouble
true, // kIntrinsicSqrt
- false, // kIntrinsicGet
+ true, // kIntrinsicCeil
+ true, // kIntrinsicFloor
+ true, // kIntrinsicRint
+ true, // kIntrinsicRoundFloat
+ true, // kIntrinsicRoundDouble
+ false, // kIntrinsicReferenceGet
false, // kIntrinsicCharAt
false, // kIntrinsicCompareTo
false, // kIntrinsicIsEmptyOrLength
@@ -75,7 +80,12 @@
COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxFloat], MinMaxFloat_must_be_static);
COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxDouble], MinMaxDouble_must_be_static);
COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicSqrt], Sqrt_must_be_static);
-COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicGet], Get_must_not_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicCeil], Ceil_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicFloor], Floor_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicRint], Rint_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicRoundFloat], RoundFloat_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicRoundDouble], RoundDouble_must_be_static);
+COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicReferenceGet], Get_must_not_be_static);
COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCharAt], CharAt_must_not_be_static);
COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCompareTo], CompareTo_must_not_be_static);
COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicIsEmptyOrLength], IsEmptyOrLength_must_not_be_static);
@@ -155,7 +165,11 @@
"max", // kNameCacheMax
"min", // kNameCacheMin
"sqrt", // kNameCacheSqrt
- "get", // kNameCacheGet
+ "ceil", // kNameCacheCeil
+ "floor", // kNameCacheFloor
+ "rint", // kNameCacheRint
+ "round", // kNameCacheRound
+ "get", // kNameCacheReferenceGet
"charAt", // kNameCacheCharAt
"compareTo", // kNameCacheCompareTo
"isEmpty", // kNameCacheIsEmpty
@@ -314,7 +328,18 @@
INTRINSIC(JavaLangMath, Sqrt, D_D, kIntrinsicSqrt, 0),
INTRINSIC(JavaLangStrictMath, Sqrt, D_D, kIntrinsicSqrt, 0),
- INTRINSIC(JavaLangRefReference, Get, _Object, kIntrinsicGet, 0),
+ INTRINSIC(JavaLangMath, Ceil, D_D, kIntrinsicCeil, 0),
+ INTRINSIC(JavaLangStrictMath, Ceil, D_D, kIntrinsicCeil, 0),
+ INTRINSIC(JavaLangMath, Floor, D_D, kIntrinsicFloor, 0),
+ INTRINSIC(JavaLangStrictMath, Floor, D_D, kIntrinsicFloor, 0),
+ INTRINSIC(JavaLangMath, Rint, D_D, kIntrinsicRint, 0),
+ INTRINSIC(JavaLangStrictMath, Rint, D_D, kIntrinsicRint, 0),
+ INTRINSIC(JavaLangMath, Round, F_I, kIntrinsicRoundFloat, 0),
+ INTRINSIC(JavaLangStrictMath, Round, F_I, kIntrinsicRoundFloat, 0),
+ INTRINSIC(JavaLangMath, Round, D_J, kIntrinsicRoundDouble, 0),
+ INTRINSIC(JavaLangStrictMath, Round, D_J, kIntrinsicRoundDouble, 0),
+
+ INTRINSIC(JavaLangRefReference, ReferenceGet, _Object, kIntrinsicReferenceGet, 0),
INTRINSIC(JavaLangString, CharAt, I_C, kIntrinsicCharAt, 0),
INTRINSIC(JavaLangString, CompareTo, String_I, kIntrinsicCompareTo, 0),
@@ -436,8 +461,18 @@
return backend->GenInlinedMinMaxFP(info, intrinsic.d.data & kIntrinsicFlagMin, true /* is_double */);
case kIntrinsicSqrt:
return backend->GenInlinedSqrt(info);
- case kIntrinsicGet:
- return backend->GenInlinedGet(info);
+ case kIntrinsicCeil:
+ return backend->GenInlinedCeil(info);
+ case kIntrinsicFloor:
+ return backend->GenInlinedFloor(info);
+ case kIntrinsicRint:
+ return backend->GenInlinedRint(info);
+ case kIntrinsicRoundFloat:
+ return backend->GenInlinedRound(info, false /* is_double */);
+ case kIntrinsicRoundDouble:
+ return backend->GenInlinedRound(info, true /* is_double */);
+ case kIntrinsicReferenceGet:
+ return backend->GenInlinedReferenceGet(info);
case kIntrinsicCharAt:
return backend->GenInlinedCharAt(info);
case kIntrinsicCompareTo:
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index cb8c165..b875e2b 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -141,7 +141,11 @@
kNameCacheMax,
kNameCacheMin,
kNameCacheSqrt,
- kNameCacheGet,
+ kNameCacheCeil,
+ kNameCacheFloor,
+ kNameCacheRint,
+ kNameCacheRound,
+ kNameCacheReferenceGet,
kNameCacheCharAt,
kNameCacheCompareTo,
kNameCacheIsEmpty,
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 1fc0cff..3f22913 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -21,6 +21,7 @@
#include "mirror/array.h"
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
+#include "mirror/object_reference.h"
#include "verifier/method_verifier.h"
#include <functional>
@@ -73,11 +74,7 @@
m2l_->ResetRegPool();
m2l_->ResetDefTracking();
GenerateTargetLabel(kPseudoThrowTarget);
- if (m2l_->cu_->target64) {
- m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(8, pThrowDivZero), true);
- } else {
- m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pThrowDivZero), true);
- }
+ m2l_->CallRuntimeHelper(kQuickThrowDivZero, true);
}
};
@@ -96,13 +93,7 @@
m2l_->ResetRegPool();
m2l_->ResetDefTracking();
GenerateTargetLabel(kPseudoThrowTarget);
- if (m2l_->cu_->target64) {
- m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds),
- index_, length_, true);
- } else {
- m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds),
- index_, length_, true);
- }
+ m2l_->CallRuntimeHelperRegReg(kQuickThrowArrayBounds, index_, length_, true);
}
private:
@@ -132,13 +123,7 @@
m2l_->OpRegCopy(arg1_32, length_);
m2l_->LoadConstant(arg0_32, index_);
- if (m2l_->cu_->target64) {
- m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds),
- arg0_32, arg1_32, true);
- } else {
- m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds),
- arg0_32, arg1_32, true);
- }
+ m2l_->CallRuntimeHelperRegReg(kQuickThrowArrayBounds, arg0_32, arg1_32, true);
}
private:
@@ -161,11 +146,7 @@
m2l_->ResetRegPool();
m2l_->ResetDefTracking();
GenerateTargetLabel(kPseudoThrowTarget);
- if (m2l_->cu_->target64) {
- m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(8, pThrowNullPointer), true);
- } else {
- m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pThrowNullPointer), true);
- }
+ m2l_->CallRuntimeHelper(kQuickThrowNullPointer, true);
}
};
@@ -176,7 +157,7 @@
/* Perform null-check on a register. */
LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) {
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
return GenExplicitNullCheck(m_reg, opt_flags);
}
return nullptr;
@@ -191,16 +172,17 @@
}
void Mir2Lir::MarkPossibleNullPointerException(int opt_flags) {
- if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
+ if (cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
return;
}
+ // Insert after last instruction.
MarkSafepointPC(last_lir_insn_);
}
}
void Mir2Lir::MarkPossibleNullPointerExceptionAfter(int opt_flags, LIR* after) {
- if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
+ if (cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
return;
}
@@ -209,13 +191,13 @@
}
void Mir2Lir::MarkPossibleStackOverflowException() {
- if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitStackOverflowChecks()) {
+ if (cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
MarkSafepointPC(last_lir_insn_);
}
}
void Mir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags) {
- if (!cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
+ if (cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
return;
}
@@ -272,13 +254,25 @@
if (rl_src2.is_const) {
// If it's already live in a register or not easily materialized, just keep going
RegLocation rl_temp = UpdateLoc(rl_src2);
+ int32_t constant_value = mir_graph_->ConstantValue(rl_src2);
if ((rl_temp.location == kLocDalvikFrame) &&
- InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) {
+ InexpensiveConstantInt(constant_value, opcode)) {
// OK - convert this to a compare immediate and branch
OpCmpImmBranch(cond, rl_src1.reg, mir_graph_->ConstantValue(rl_src2), taken);
return;
}
+
+ // It's also commonly more efficient to have a test against zero with Eq/Ne. This is not worse
+ // for x86, and allows a cbz/cbnz for Arm and Mips. At the same time, it works around a register
+ // mismatch for 64b systems, where a reference is compared against null, as dex bytecode uses
+ // the 32b literal 0 for null.
+ if (constant_value == 0 && (cond == kCondEq || cond == kCondNe)) {
+ // Use the OpCmpImmBranch and ignore the value in the register.
+ OpCmpImmBranch(cond, rl_src1.reg, 0, taken);
+ return;
+ }
}
+
rl_src2 = LoadValue(rl_src2);
OpCmpBranch(cond, rl_src1.reg, rl_src2.reg, taken);
}
@@ -347,16 +341,17 @@
StoreValue(rl_dest, rl_result);
}
-template <size_t pointer_size>
-static void GenNewArrayImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu,
- uint32_t type_idx, RegLocation rl_dest,
- RegLocation rl_src) {
- mir_to_lir->FlushAllRegs(); /* Everything to home location */
- ThreadOffset<pointer_size> func_offset(-1);
- const DexFile* dex_file = cu->dex_file;
- CompilerDriver* driver = cu->compiler_driver;
- if (cu->compiler_driver->CanAccessTypeWithoutChecks(cu->method_idx, *dex_file,
- type_idx)) {
+/*
+ * Let helper function take care of everything. Will call
+ * Array::AllocFromCode(type_idx, method, count);
+ * Note: AllocFromCode will handle checks for errNegativeArraySize.
+ */
+void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest,
+ RegLocation rl_src) {
+ FlushAllRegs(); /* Everything to home location */
+ const DexFile* dex_file = cu_->dex_file;
+ CompilerDriver* driver = cu_->compiler_driver;
+ if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *dex_file, type_idx)) {
bool is_type_initialized; // Ignored as an array does not have an initializer.
bool use_direct_type_ptr;
uintptr_t direct_type_ptr;
@@ -366,55 +361,22 @@
&direct_type_ptr, &is_finalizable)) {
// The fast path.
if (!use_direct_type_ptr) {
- mir_to_lir->LoadClassType(type_idx, kArg0);
- func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArrayResolved);
- mir_to_lir->CallRuntimeHelperRegMethodRegLocation(func_offset,
- mir_to_lir->TargetReg(kArg0, kNotWide),
- rl_src, true);
+ LoadClassType(*dex_file, type_idx, kArg0);
+ CallRuntimeHelperRegMethodRegLocation(kQuickAllocArrayResolved, TargetReg(kArg0, kNotWide),
+ rl_src, true);
} else {
// Use the direct pointer.
- func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArrayResolved);
- mir_to_lir->CallRuntimeHelperImmMethodRegLocation(func_offset, direct_type_ptr, rl_src,
- true);
+ CallRuntimeHelperImmMethodRegLocation(kQuickAllocArrayResolved, direct_type_ptr, rl_src,
+ true);
}
} else {
// The slow path.
- func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArray);
- mir_to_lir->CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true);
+ CallRuntimeHelperImmMethodRegLocation(kQuickAllocArray, type_idx, rl_src, true);
}
- DCHECK_NE(func_offset.Int32Value(), -1);
} else {
- func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArrayWithAccessCheck);
- mir_to_lir->CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true);
+ CallRuntimeHelperImmMethodRegLocation(kQuickAllocArrayWithAccessCheck, type_idx, rl_src, true);
}
- RegLocation rl_result = mir_to_lir->GetReturn(kRefReg);
- mir_to_lir->StoreValue(rl_dest, rl_result);
-}
-
-/*
- * Let helper function take care of everything. Will call
- * Array::AllocFromCode(type_idx, method, count);
- * Note: AllocFromCode will handle checks for errNegativeArraySize.
- */
-void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest,
- RegLocation rl_src) {
- if (cu_->target64) {
- GenNewArrayImpl<8>(this, cu_, type_idx, rl_dest, rl_src);
- } else {
- GenNewArrayImpl<4>(this, cu_, type_idx, rl_dest, rl_src);
- }
-}
-
-template <size_t pointer_size>
-static void GenFilledNewArrayCall(Mir2Lir* mir_to_lir, CompilationUnit* cu, int elems, int type_idx) {
- ThreadOffset<pointer_size> func_offset(-1);
- if (cu->compiler_driver->CanAccessTypeWithoutChecks(cu->method_idx, *cu->dex_file,
- type_idx)) {
- func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pCheckAndAllocArray);
- } else {
- func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pCheckAndAllocArrayWithAccessCheck);
- }
- mir_to_lir->CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true);
+ StoreValue(rl_dest, GetReturn(kRefReg));
}
/*
@@ -427,11 +389,14 @@
int elems = info->num_arg_words;
int type_idx = info->index;
FlushAllRegs(); /* Everything to home location */
- if (cu_->target64) {
- GenFilledNewArrayCall<8>(this, cu_, elems, type_idx);
+ QuickEntrypointEnum target;
+ if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
+ type_idx)) {
+ target = kQuickCheckAndAllocArray;
} else {
- GenFilledNewArrayCall<4>(this, cu_, elems, type_idx);
+ target = kQuickCheckAndAllocArrayWithAccessCheck;
}
+ CallRuntimeHelperImmMethodImm(target, type_idx, elems, true);
FreeTemp(TargetReg(kArg2, kNotWide));
FreeTemp(TargetReg(kArg1, kNotWide));
/*
@@ -546,13 +511,7 @@
void Compile() {
LIR* unresolved_target = GenerateTargetLabel();
uninit_->target = unresolved_target;
- if (cu_->target64) {
- m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeStaticStorage),
- storage_index_, true);
- } else {
- m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeStaticStorage),
- storage_index_, true);
- }
+ m2l_->CallRuntimeHelperImm(kQuickInitializeStaticStorage, storage_index_, true);
// Copy helper's result into r_base, a no-op on all but MIPS.
m2l_->OpRegCopy(r_base_, m2l_->TargetReg(kRet0, kRef));
@@ -565,24 +524,12 @@
const RegStorage r_base_;
};
-template <size_t pointer_size>
-static void GenSputCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object,
- const MirSFieldLoweringInfo* field_info, RegLocation rl_src) {
- ThreadOffset<pointer_size> setter_offset =
- is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet64Static)
- : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSetObjStatic)
- : QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet32Static));
- mir_to_lir->CallRuntimeHelperImmRegLocation(setter_offset, field_info->FieldIndex(), rl_src,
- true);
-}
-
void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
bool is_object) {
const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass());
OpSize store_size = LoadStoreOpSize(is_long_or_double, is_object);
- if (!SLOW_FIELD_PATH && field_info.FastPut() &&
- (!field_info.IsVolatile() || SupportsVolatileLoadStore(store_size))) {
+ if (!SLOW_FIELD_PATH && field_info.FastPut()) {
DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
RegStorage r_base;
if (field_info.IsReferrersClass()) {
@@ -623,7 +570,7 @@
LockTemp(r_tmp);
LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base,
mirror::Class::StatusOffset().Int32Value(),
- mirror::Class::kStatusInitialized, NULL);
+ mirror::Class::kStatusInitialized, nullptr, nullptr);
LIR* cont = NewLIR0(kPseudoTargetLabel);
AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont,
@@ -659,31 +606,19 @@
FreeTemp(r_base);
} else {
FlushAllRegs(); // Everything to home locations
- if (cu_->target64) {
- GenSputCall<8>(this, is_long_or_double, is_object, &field_info, rl_src);
- } else {
- GenSputCall<4>(this, is_long_or_double, is_object, &field_info, rl_src);
- }
+ QuickEntrypointEnum target =
+ is_long_or_double ? kQuickSet64Static
+ : (is_object ? kQuickSetObjStatic : kQuickSet32Static);
+ CallRuntimeHelperImmRegLocation(target, field_info.FieldIndex(), rl_src, true);
}
}
-template <size_t pointer_size>
-static void GenSgetCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object,
- const MirSFieldLoweringInfo* field_info) {
- ThreadOffset<pointer_size> getter_offset =
- is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet64Static)
- : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGetObjStatic)
- : QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet32Static));
- mir_to_lir->CallRuntimeHelperImm(getter_offset, field_info->FieldIndex(), true);
-}
-
void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest,
bool is_long_or_double, bool is_object) {
const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass());
OpSize load_size = LoadStoreOpSize(is_long_or_double, is_object);
- if (!SLOW_FIELD_PATH && field_info.FastGet() &&
- (!field_info.IsVolatile() || SupportsVolatileLoadStore(load_size))) {
+ if (!SLOW_FIELD_PATH && field_info.FastGet()) {
DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
RegStorage r_base;
if (field_info.IsReferrersClass()) {
@@ -720,7 +655,7 @@
LockTemp(r_tmp);
LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base,
mirror::Class::StatusOffset().Int32Value(),
- mirror::Class::kStatusInitialized, NULL);
+ mirror::Class::kStatusInitialized, nullptr, nullptr);
LIR* cont = NewLIR0(kPseudoTargetLabel);
AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont,
@@ -753,11 +688,11 @@
}
} else {
FlushAllRegs(); // Everything to home locations
- if (cu_->target64) {
- GenSgetCall<8>(this, is_long_or_double, is_object, &field_info);
- } else {
- GenSgetCall<4>(this, is_long_or_double, is_object, &field_info);
- }
+ QuickEntrypointEnum target =
+ is_long_or_double ? kQuickGet64Static
+ : (is_object ? kQuickGetObjStatic : kQuickGet32Static);
+ CallRuntimeHelperImm(target, field_info.FieldIndex(), true);
+
// FIXME: pGetXXStatic always return an int or int64 regardless of rl_dest.fp.
if (is_long_or_double) {
RegLocation rl_result = GetReturnWide(kCoreReg);
@@ -780,26 +715,15 @@
slow_paths_.Reset();
}
-template <size_t pointer_size>
-static void GenIgetCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object,
- const MirIFieldLoweringInfo* field_info, RegLocation rl_obj) {
- ThreadOffset<pointer_size> getter_offset =
- is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet64Instance)
- : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGetObjInstance)
- : QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet32Instance));
- mir_to_lir->CallRuntimeHelperImmRegLocation(getter_offset, field_info->FieldIndex(), rl_obj,
- true);
-}
-
void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size,
RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double,
bool is_object) {
const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet());
OpSize load_size = LoadStoreOpSize(is_long_or_double, is_object);
- if (!SLOW_FIELD_PATH && field_info.FastGet() &&
- (!field_info.IsVolatile() || SupportsVolatileLoadStore(load_size))) {
+ if (!SLOW_FIELD_PATH && field_info.FastGet()) {
RegisterClass reg_class = RegClassForFieldLoadStore(load_size, field_info.IsVolatile());
+ // A load of the class will lead to an iget with offset 0.
DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
rl_obj = LoadValue(rl_obj, kRefReg);
GenNullCheck(rl_obj.reg, opt_flags);
@@ -820,11 +744,13 @@
StoreValue(rl_dest, rl_result);
}
} else {
- if (cu_->target64) {
- GenIgetCall<8>(this, is_long_or_double, is_object, &field_info, rl_obj);
- } else {
- GenIgetCall<4>(this, is_long_or_double, is_object, &field_info, rl_obj);
- }
+ QuickEntrypointEnum target =
+ is_long_or_double ? kQuickGet64Instance
+ : (is_object ? kQuickGetObjInstance : kQuickGet32Instance);
+ // Second argument of pGetXXInstance is always a reference.
+ DCHECK_EQ(static_cast<unsigned int>(rl_obj.wide), 0U);
+ CallRuntimeHelperImmRegLocation(target, field_info.FieldIndex(), rl_obj, true);
+
// FIXME: pGetXXInstance always return an int or int64 regardless of rl_dest.fp.
if (is_long_or_double) {
RegLocation rl_result = GetReturnWide(kCoreReg);
@@ -836,28 +762,17 @@
}
}
-template <size_t pointer_size>
-static void GenIputCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_object,
- const MirIFieldLoweringInfo* field_info, RegLocation rl_obj,
- RegLocation rl_src) {
- ThreadOffset<pointer_size> setter_offset =
- is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet64Instance)
- : (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pSetObjInstance)
- : QUICK_ENTRYPOINT_OFFSET(pointer_size, pSet32Instance));
- mir_to_lir->CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_info->FieldIndex(),
- rl_obj, rl_src, true);
-}
-
void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double,
bool is_object) {
const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut());
OpSize store_size = LoadStoreOpSize(is_long_or_double, is_object);
- if (!SLOW_FIELD_PATH && field_info.FastPut() &&
- (!field_info.IsVolatile() || SupportsVolatileLoadStore(store_size))) {
+ if (!SLOW_FIELD_PATH && field_info.FastPut()) {
RegisterClass reg_class = RegClassForFieldLoadStore(store_size, field_info.IsVolatile());
- DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
+ // Dex code never writes to the class field.
+ DCHECK_GE(static_cast<uint32_t>(field_info.FieldOffset().Int32Value()),
+ sizeof(mirror::HeapReference<mirror::Class>));
rl_obj = LoadValue(rl_obj, kRefReg);
if (is_long_or_double) {
rl_src = LoadValueWide(rl_src, reg_class);
@@ -879,35 +794,24 @@
MarkGCCard(rl_src.reg, rl_obj.reg);
}
} else {
- if (cu_->target64) {
- GenIputCall<8>(this, is_long_or_double, is_object, &field_info, rl_obj, rl_src);
- } else {
- GenIputCall<4>(this, is_long_or_double, is_object, &field_info, rl_obj, rl_src);
- }
+ QuickEntrypointEnum target =
+ is_long_or_double ? kQuickSet64Instance
+ : (is_object ? kQuickSetObjInstance : kQuickSet32Instance);
+ CallRuntimeHelperImmRegLocationRegLocation(target, field_info.FieldIndex(), rl_obj, rl_src,
+ true);
}
}
-template <size_t pointer_size>
-static void GenArrayObjPutCall(Mir2Lir* mir_to_lir, bool needs_range_check, bool needs_null_check,
- RegLocation rl_array, RegLocation rl_index, RegLocation rl_src) {
- ThreadOffset<pointer_size> helper = needs_range_check
- ? (needs_null_check ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pAputObjectWithNullAndBoundCheck)
- : QUICK_ENTRYPOINT_OFFSET(pointer_size, pAputObjectWithBoundCheck))
- : QUICK_ENTRYPOINT_OFFSET(pointer_size, pAputObject);
- mir_to_lir->CallRuntimeHelperRegLocationRegLocationRegLocation(helper, rl_array, rl_index, rl_src,
- true);
-}
-
void Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index,
RegLocation rl_src) {
bool needs_range_check = !(opt_flags & MIR_IGNORE_RANGE_CHECK);
bool needs_null_check = !((cu_->disable_opt & (1 << kNullCheckElimination)) &&
(opt_flags & MIR_IGNORE_NULL_CHECK));
- if (cu_->target64) {
- GenArrayObjPutCall<8>(this, needs_range_check, needs_null_check, rl_array, rl_index, rl_src);
- } else {
- GenArrayObjPutCall<4>(this, needs_range_check, needs_null_check, rl_array, rl_index, rl_src);
- }
+ QuickEntrypointEnum target = needs_range_check
+ ? (needs_null_check ? kQuickAputObjectWithNullAndBoundCheck
+ : kQuickAputObjectWithBoundCheck)
+ : kQuickAputObject;
+ CallRuntimeHelperRegLocationRegLocationRegLocation(target, rl_array, rl_index, rl_src, true);
}
void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) {
@@ -920,13 +824,7 @@
type_idx)) {
// Call out to helper which resolves type and verifies access.
// Resolved type returned in kRet0.
- if (cu_->target64) {
- CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess),
- type_idx, rl_method.reg, true);
- } else {
- CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess),
- type_idx, rl_method.reg, true);
- }
+ CallRuntimeHelperImmReg(kQuickInitializeTypeAndVerifyAccess, type_idx, rl_method.reg, true);
RegLocation rl_result = GetReturn(kRefReg);
StoreValue(rl_dest, rl_result);
} else {
@@ -955,15 +853,8 @@
void Compile() {
GenerateTargetLabel();
- if (cu_->target64) {
- m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx_,
- rl_method_.reg, true);
- } else {
- m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_,
- rl_method_.reg, true);
- }
+ m2l_->CallRuntimeHelperImmReg(kQuickInitializeType, type_idx_, rl_method_.reg, true);
m2l_->OpRegCopy(rl_result_.reg, m2l_->TargetReg(kRet0, kRef));
-
m2l_->OpUnconditionalBranch(cont_);
}
@@ -1024,13 +915,7 @@
void Compile() {
GenerateTargetLabel();
- if (cu_->target64) {
- m2l_->CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(8, pResolveString),
- r_method_, string_idx_, true);
- } else {
- m2l_->CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(4, pResolveString),
- r_method_, string_idx_, true);
- }
+ m2l_->CallRuntimeHelperRegImm(kQuickResolveString, r_method_, string_idx_, true);
m2l_->OpUnconditionalBranch(cont_);
}
@@ -1055,17 +940,17 @@
}
}
-template <size_t pointer_size>
-static void GenNewInstanceImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, uint32_t type_idx,
- RegLocation rl_dest) {
- mir_to_lir->FlushAllRegs(); /* Everything to home location */
+/*
+ * Let helper function take care of everything. Will
+ * call Class::NewInstanceFromCode(type_idx, method);
+ */
+void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) {
+ FlushAllRegs(); /* Everything to home location */
// alloc will always check for resolution, do we also need to verify
// access because the verifier was unable to?
- ThreadOffset<pointer_size> func_offset(-1);
- const DexFile* dex_file = cu->dex_file;
- CompilerDriver* driver = cu->compiler_driver;
- if (driver->CanAccessInstantiableTypeWithoutChecks(
- cu->method_idx, *dex_file, type_idx)) {
+ const DexFile* dex_file = cu_->dex_file;
+ CompilerDriver* driver = cu_->compiler_driver;
+ if (driver->CanAccessInstantiableTypeWithoutChecks(cu_->method_idx, *dex_file, type_idx)) {
bool is_type_initialized;
bool use_direct_type_ptr;
uintptr_t direct_type_ptr;
@@ -1076,60 +961,33 @@
!is_finalizable) {
// The fast path.
if (!use_direct_type_ptr) {
- mir_to_lir->LoadClassType(type_idx, kArg0);
+ LoadClassType(*dex_file, type_idx, kArg0);
if (!is_type_initialized) {
- func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectResolved);
- mir_to_lir->CallRuntimeHelperRegMethod(func_offset, mir_to_lir->TargetReg(kArg0, kRef),
- true);
+ CallRuntimeHelperRegMethod(kQuickAllocObjectResolved, TargetReg(kArg0, kRef), true);
} else {
- func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectInitialized);
- mir_to_lir->CallRuntimeHelperRegMethod(func_offset, mir_to_lir->TargetReg(kArg0, kRef),
- true);
+ CallRuntimeHelperRegMethod(kQuickAllocObjectInitialized, TargetReg(kArg0, kRef), true);
}
} else {
// Use the direct pointer.
if (!is_type_initialized) {
- func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectResolved);
- mir_to_lir->CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true);
+ CallRuntimeHelperImmMethod(kQuickAllocObjectResolved, direct_type_ptr, true);
} else {
- func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectInitialized);
- mir_to_lir->CallRuntimeHelperImmMethod(func_offset, direct_type_ptr, true);
+ CallRuntimeHelperImmMethod(kQuickAllocObjectInitialized, direct_type_ptr, true);
}
}
} else {
// The slow path.
- DCHECK_EQ(func_offset.Int32Value(), -1);
- func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObject);
- mir_to_lir->CallRuntimeHelperImmMethod(func_offset, type_idx, true);
+ CallRuntimeHelperImmMethod(kQuickAllocObject, type_idx, true);
}
- DCHECK_NE(func_offset.Int32Value(), -1);
} else {
- func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectWithAccessCheck);
- mir_to_lir->CallRuntimeHelperImmMethod(func_offset, type_idx, true);
+ CallRuntimeHelperImmMethod(kQuickAllocObjectWithAccessCheck, type_idx, true);
}
- RegLocation rl_result = mir_to_lir->GetReturn(kRefReg);
- mir_to_lir->StoreValue(rl_dest, rl_result);
-}
-
-/*
- * Let helper function take care of everything. Will
- * call Class::NewInstanceFromCode(type_idx, method);
- */
-void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) {
- if (cu_->target64) {
- GenNewInstanceImpl<8>(this, cu_, type_idx, rl_dest);
- } else {
- GenNewInstanceImpl<4>(this, cu_, type_idx, rl_dest);
- }
+ StoreValue(rl_dest, GetReturn(kRefReg));
}
void Mir2Lir::GenThrow(RegLocation rl_src) {
FlushAllRegs();
- if (cu_->target64) {
- CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pDeliverException), rl_src, true);
- } else {
- CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pDeliverException), rl_src, true);
- }
+ CallRuntimeHelperRegLocation(kQuickDeliverException, rl_src, true);
}
// For final classes there are no sub-classes to check and so we can answer the instance-of
@@ -1167,7 +1025,6 @@
LoadRefDisp(check_class, offset_of_type, check_class, kNotVolatile);
}
- LIR* ne_branchover = NULL;
// FIXME: what should we be comparing here? compressed or decompressed references?
if (cu_->instruction_set == kThumb2) {
OpRegReg(kOpCmp, check_class, object_class); // Same?
@@ -1175,14 +1032,10 @@
LoadConstant(result_reg, 1); // .eq case - load true
OpEndIT(it);
} else {
- ne_branchover = OpCmpBranch(kCondNe, check_class, object_class, NULL);
- LoadConstant(result_reg, 1); // eq case - load true
+ GenSelectConst32(check_class, object_class, kCondEq, 1, 0, result_reg, kCoreReg);
}
LIR* target = NewLIR0(kPseudoTargetLabel);
null_branchover->target = target;
- if (ne_branchover != NULL) {
- ne_branchover->target = target;
- }
FreeTemp(object_class);
FreeTemp(check_class);
if (IsTemp(result_reg)) {
@@ -1197,95 +1050,98 @@
bool can_assume_type_is_in_dex_cache,
uint32_t type_idx, RegLocation rl_dest,
RegLocation rl_src) {
- // X86 has its own implementation.
- DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64);
-
FlushAllRegs();
// May generate a call - use explicit registers
LockCallTemps();
RegStorage method_reg = TargetReg(kArg1, kRef);
LoadCurrMethodDirect(method_reg); // kArg1 <= current Method*
RegStorage class_reg = TargetReg(kArg2, kRef); // kArg2 will hold the Class*
+ RegStorage ref_reg = TargetReg(kArg0, kRef); // kArg0 will hold the ref.
+ RegStorage ret_reg = GetReturn(kRefReg).reg;
if (needs_access_check) {
// Check we have access to type_idx and if not throw IllegalAccessError,
// returns Class* in kArg0
- if (cu_->target64) {
- CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess),
- type_idx, true);
- } else {
- CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess),
- type_idx, true);
- }
- OpRegCopy(class_reg, TargetReg(kRet0, kRef)); // Align usage with fast path
- LoadValueDirectFixed(rl_src, TargetReg(kArg0, kRef)); // kArg0 <= ref
+ CallRuntimeHelperImm(kQuickInitializeTypeAndVerifyAccess, type_idx, true);
+ OpRegCopy(class_reg, ret_reg); // Align usage with fast path
+ LoadValueDirectFixed(rl_src, ref_reg); // kArg0 <= ref
} else if (use_declaring_class) {
- LoadValueDirectFixed(rl_src, TargetReg(kArg0, kRef)); // kArg0 <= ref
+ LoadValueDirectFixed(rl_src, ref_reg); // kArg0 <= ref
LoadRefDisp(method_reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
class_reg, kNotVolatile);
} else {
+ if (can_assume_type_is_in_dex_cache) {
+ // Conditionally, as in the other case we will also load it.
+ LoadValueDirectFixed(rl_src, ref_reg); // kArg0 <= ref
+ }
+
// Load dex cache entry into class_reg (kArg2)
- LoadValueDirectFixed(rl_src, TargetReg(kArg0, kRef)); // kArg0 <= ref
LoadRefDisp(method_reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
class_reg, kNotVolatile);
int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile);
if (!can_assume_type_is_in_dex_cache) {
- // Need to test presence of type in dex cache at runtime
- LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
- // Not resolved
- // Call out to helper, which will return resolved type in kRet0
- if (cu_->target64) {
- CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx, true);
- } else {
- CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx, true);
- }
- OpRegCopy(TargetReg(kArg2, kRef), TargetReg(kRet0, kRef)); // Align usage with fast path
- LoadValueDirectFixed(rl_src, TargetReg(kArg0, kRef)); /* reload Ref */
- // Rejoin code paths
- LIR* hop_target = NewLIR0(kPseudoTargetLabel);
- hop_branch->target = hop_target;
+ LIR* slow_path_branch = OpCmpImmBranch(kCondEq, class_reg, 0, NULL);
+ LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
+
+ // Should load value here.
+ LoadValueDirectFixed(rl_src, ref_reg); // kArg0 <= ref
+
+ class InitTypeSlowPath : public Mir2Lir::LIRSlowPath {
+ public:
+ InitTypeSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont, uint32_t type_idx,
+ RegLocation rl_src)
+ : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, cont), type_idx_(type_idx),
+ rl_src_(rl_src) {
+ }
+
+ void Compile() OVERRIDE {
+ GenerateTargetLabel();
+
+ m2l_->CallRuntimeHelperImm(kQuickInitializeType, type_idx_, true);
+ m2l_->OpRegCopy(m2l_->TargetReg(kArg2, kRef),
+ m2l_->TargetReg(kRet0, kRef)); // Align usage with fast path
+ m2l_->OpUnconditionalBranch(cont_);
+ }
+
+ private:
+ uint32_t type_idx_;
+ RegLocation rl_src_;
+ };
+
+ AddSlowPath(new (arena_) InitTypeSlowPath(this, slow_path_branch, slow_path_target,
+ type_idx, rl_src));
}
}
/* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */
RegLocation rl_result = GetReturn(kCoreReg);
- if (cu_->instruction_set == kMips) {
- // On MIPS rArg0 != rl_result, place false in result if branch is taken.
+ if (!IsSameReg(rl_result.reg, ref_reg)) {
+ // On MIPS and x86_64 rArg0 != rl_result, place false in result if branch is taken.
LoadConstant(rl_result.reg, 0);
}
- LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0, kRef), 0, NULL);
+ LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, NULL);
/* load object->klass_ */
+ RegStorage ref_class_reg = TargetReg(kArg1, kRef); // kArg1 will hold the Class* of ref.
DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
- LoadRefDisp(TargetReg(kArg0, kRef), mirror::Object::ClassOffset().Int32Value(),
- TargetReg(kArg1, kRef), kNotVolatile);
+ LoadRefDisp(ref_reg, mirror::Object::ClassOffset().Int32Value(),
+ ref_class_reg, kNotVolatile);
/* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
LIR* branchover = NULL;
if (type_known_final) {
- // rl_result == ref == null == 0.
- if (cu_->instruction_set == kThumb2) {
- OpRegReg(kOpCmp, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef)); // Same?
- LIR* it = OpIT(kCondEq, "E"); // if-convert the test
- LoadConstant(rl_result.reg, 1); // .eq case - load true
- LoadConstant(rl_result.reg, 0); // .ne case - load false
- OpEndIT(it);
- } else {
- LoadConstant(rl_result.reg, 0); // ne case - load false
- branchover = OpCmpBranch(kCondNe, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), NULL);
- LoadConstant(rl_result.reg, 1); // eq case - load true
- }
+ // rl_result == ref == class.
+ GenSelectConst32(ref_class_reg, class_reg, kCondEq, 1, 0, rl_result.reg,
+ kCoreReg);
} else {
if (cu_->instruction_set == kThumb2) {
- RegStorage r_tgt = cu_->target64 ?
- LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial)) :
- LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial));
+ RegStorage r_tgt = LoadHelper(kQuickInstanceofNonTrivial);
LIR* it = nullptr;
if (!type_known_abstract) {
/* Uses conditional nullification */
- OpRegReg(kOpCmp, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef)); // Same?
+ OpRegReg(kOpCmp, ref_class_reg, class_reg); // Same?
it = OpIT(kCondEq, "EE"); // if-convert the test
- LoadConstant(TargetReg(kArg0, kNotWide), 1); // .eq case - load true
+ LoadConstant(rl_result.reg, 1); // .eq case - load true
}
- OpRegCopy(TargetReg(kArg0, kRef), TargetReg(kArg2, kRef)); // .ne case - arg0 <= class
+ OpRegCopy(ref_reg, class_reg); // .ne case - arg0 <= class
OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class)
if (it != nullptr) {
OpEndIT(it);
@@ -1297,12 +1153,9 @@
LoadConstant(rl_result.reg, 1); // assume true
branchover = OpCmpBranch(kCondEq, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), NULL);
}
- RegStorage r_tgt = cu_->target64 ?
- LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial)) :
- LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial));
- OpRegCopy(TargetReg(kArg0, kRef), TargetReg(kArg2, kRef)); // .ne case - arg0 <= class
- OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class)
- FreeTemp(r_tgt);
+
+ OpRegCopy(TargetReg(kArg0, kRef), class_reg); // .ne case - arg0 <= class
+ CallRuntimeHelper(kQuickInstanceofNonTrivial, false);
}
}
// TODO: only clobber when type isn't final?
@@ -1311,7 +1164,7 @@
LIR* target = NewLIR0(kPseudoTargetLabel);
StoreValue(rl_dest, rl_result);
branch1->target = target;
- if (branchover != NULL) {
+ if (branchover != nullptr) {
branchover->target = target;
}
}
@@ -1361,13 +1214,7 @@
// Check we have access to type_idx and if not throw IllegalAccessError,
// returns Class* in kRet0
// InitializeTypeAndVerifyAccess(idx, method)
- if (cu_->target64) {
- CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess),
- type_idx, true);
- } else {
- CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess),
- type_idx, true);
- }
+ CallRuntimeHelperImm(kQuickInitializeTypeAndVerifyAccess, type_idx, true);
OpRegCopy(class_reg, TargetReg(kRet0, kRef)); // Align usage with fast path
} else if (use_declaring_class) {
LoadRefDisp(method_reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
@@ -1397,13 +1244,8 @@
// Call out to helper, which will return resolved type in kArg0
// InitializeTypeFromCode(idx, method)
- if (m2l_->cu_->target64) {
- m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx_,
- m2l_->TargetReg(kArg1, kRef), true);
- } else {
- m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_,
- m2l_->TargetReg(kArg1, kRef), true);
- }
+ m2l_->CallRuntimeHelperImmReg(kQuickInitializeType, type_idx_,
+ m2l_->TargetReg(kArg1, kRef), true);
m2l_->OpRegCopy(class_reg_, m2l_->TargetReg(kRet0, kRef)); // Align usage with fast path
m2l_->OpUnconditionalBranch(cont_);
}
@@ -1434,16 +1276,8 @@
m2l_->LoadRefDisp(m2l_->TargetReg(kArg0, kRef), mirror::Object::ClassOffset().Int32Value(),
m2l_->TargetReg(kArg1, kRef), kNotVolatile);
}
- if (m2l_->cu_->target64) {
- m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pCheckCast),
- m2l_->TargetReg(kArg2, kRef), m2l_->TargetReg(kArg1, kRef),
- true);
- } else {
- m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pCheckCast),
- m2l_->TargetReg(kArg2, kRef), m2l_->TargetReg(kArg1, kRef),
- true);
- }
-
+ m2l_->CallRuntimeHelperRegReg(kQuickCheckCast, m2l_->TargetReg(kArg2, kRef),
+ m2l_->TargetReg(kArg1, kRef), true);
m2l_->OpUnconditionalBranch(cont_);
}
@@ -1524,39 +1358,28 @@
}
}
-
-template <size_t pointer_size>
-static void GenShiftOpLongCall(Mir2Lir* mir_to_lir, Instruction::Code opcode, RegLocation rl_src1,
- RegLocation rl_shift) {
- ThreadOffset<pointer_size> func_offset(-1);
-
+void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_shift) {
+ QuickEntrypointEnum target;
switch (opcode) {
case Instruction::SHL_LONG:
case Instruction::SHL_LONG_2ADDR:
- func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pShlLong);
+ target = kQuickShlLong;
break;
case Instruction::SHR_LONG:
case Instruction::SHR_LONG_2ADDR:
- func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pShrLong);
+ target = kQuickShrLong;
break;
case Instruction::USHR_LONG:
case Instruction::USHR_LONG_2ADDR:
- func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pUshrLong);
+ target = kQuickUshrLong;
break;
default:
LOG(FATAL) << "Unexpected case";
+ target = kQuickShlLong;
}
- mir_to_lir->FlushAllRegs(); /* Send everything to home location */
- mir_to_lir->CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_shift, false);
-}
-
-void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_shift) {
- if (cu_->target64) {
- GenShiftOpLongCall<8>(this, opcode, rl_src1, rl_shift);
- } else {
- GenShiftOpLongCall<4>(this, opcode, rl_src1, rl_shift);
- }
+ FlushAllRegs(); /* Send everything to home location */
+ CallRuntimeHelperRegLocationRegLocation(target, rl_src1, rl_shift, false);
RegLocation rl_result = GetReturnWide(kCoreReg);
StoreValueWide(rl_dest, rl_result);
}
@@ -1685,19 +1508,13 @@
if (!done) {
FlushAllRegs(); /* Send everything to home location */
LoadValueDirectFixed(rl_src2, TargetReg(kArg1, kNotWide));
- RegStorage r_tgt = cu_->target64 ?
- CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(8, pIdivmod)) :
- CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(4, pIdivmod));
+ RegStorage r_tgt = CallHelperSetup(kQuickIdivmod);
LoadValueDirectFixed(rl_src1, TargetReg(kArg0, kNotWide));
if (check_zero) {
GenDivZeroCheck(TargetReg(kArg1, kNotWide));
}
// NOTE: callout here is not a safepoint.
- if (cu_->target64) {
- CallHelper(r_tgt, QUICK_ENTRYPOINT_OFFSET(8, pIdivmod), false /* not a safepoint */);
- } else {
- CallHelper(r_tgt, QUICK_ENTRYPOINT_OFFSET(4, pIdivmod), false /* not a safepoint */);
- }
+ CallHelper(r_tgt, kQuickIdivmod, false /* not a safepoint */);
if (op == kOpDiv)
rl_result = GetReturn(kCoreReg);
else
@@ -1956,13 +1773,7 @@
FlushAllRegs(); /* Everything to home location. */
LoadValueDirectFixed(rl_src, TargetReg(kArg0, kNotWide));
Clobber(TargetReg(kArg0, kNotWide));
- if (cu_->target64) {
- CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(8, pIdivmod), TargetReg(kArg0, kNotWide),
- lit, false);
- } else {
- CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(4, pIdivmod), TargetReg(kArg0, kNotWide),
- lit, false);
- }
+ CallRuntimeHelperRegImm(kQuickIdivmod, TargetReg(kArg0, kNotWide), lit, false);
if (is_div)
rl_result = GetReturn(kCoreReg);
else
@@ -1985,158 +1796,105 @@
StoreValue(rl_dest, rl_result);
}
-template <size_t pointer_size>
-static void GenArithOpLongImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, Instruction::Code opcode,
- RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2) {
RegLocation rl_result;
OpKind first_op = kOpBkpt;
OpKind second_op = kOpBkpt;
bool call_out = false;
bool check_zero = false;
- ThreadOffset<pointer_size> func_offset(-1);
- int ret_reg = mir_to_lir->TargetReg(kRet0, kNotWide).GetReg();
+ int ret_reg = TargetReg(kRet0, kNotWide).GetReg();
+ QuickEntrypointEnum target;
switch (opcode) {
case Instruction::NOT_LONG:
- if (cu->instruction_set == kArm64 || cu->instruction_set == kX86_64) {
- mir_to_lir->GenNotLong(rl_dest, rl_src2);
- return;
- }
- rl_src2 = mir_to_lir->LoadValueWide(rl_src2, kCoreReg);
- rl_result = mir_to_lir->EvalLoc(rl_dest, kCoreReg, true);
+ rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+ rl_result = EvalLoc(rl_dest, kCoreReg, true);
// Check for destructive overlap
if (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg()) {
- RegStorage t_reg = mir_to_lir->AllocTemp();
- mir_to_lir->OpRegCopy(t_reg, rl_src2.reg.GetHigh());
- mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow());
- mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetHigh(), t_reg);
- mir_to_lir->FreeTemp(t_reg);
+ RegStorage t_reg = AllocTemp();
+ OpRegCopy(t_reg, rl_src2.reg.GetHigh());
+ OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow());
+ OpRegReg(kOpMvn, rl_result.reg.GetHigh(), t_reg);
+ FreeTemp(t_reg);
} else {
- mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow());
- mir_to_lir->OpRegReg(kOpMvn, rl_result.reg.GetHigh(), rl_src2.reg.GetHigh());
+ OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow());
+ OpRegReg(kOpMvn, rl_result.reg.GetHigh(), rl_src2.reg.GetHigh());
}
- mir_to_lir->StoreValueWide(rl_dest, rl_result);
+ StoreValueWide(rl_dest, rl_result);
return;
case Instruction::ADD_LONG:
case Instruction::ADD_LONG_2ADDR:
- if (cu->instruction_set != kThumb2) {
- mir_to_lir->GenAddLong(opcode, rl_dest, rl_src1, rl_src2);
- return;
- }
first_op = kOpAdd;
second_op = kOpAdc;
break;
case Instruction::SUB_LONG:
case Instruction::SUB_LONG_2ADDR:
- if (cu->instruction_set != kThumb2) {
- mir_to_lir->GenSubLong(opcode, rl_dest, rl_src1, rl_src2);
- return;
- }
first_op = kOpSub;
second_op = kOpSbc;
break;
case Instruction::MUL_LONG:
case Instruction::MUL_LONG_2ADDR:
- if (cu->instruction_set != kMips) {
- mir_to_lir->GenMulLong(opcode, rl_dest, rl_src1, rl_src2);
- return;
- } else {
- call_out = true;
- ret_reg = mir_to_lir->TargetReg(kRet0, kNotWide).GetReg();
- func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLmul);
- }
+ call_out = true;
+ ret_reg = TargetReg(kRet0, kNotWide).GetReg();
+ target = kQuickLmul;
break;
case Instruction::DIV_LONG:
case Instruction::DIV_LONG_2ADDR:
- if (cu->instruction_set == kArm64 || cu->instruction_set == kX86_64) {
- mir_to_lir->GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true);
- return;
- }
call_out = true;
check_zero = true;
- ret_reg = mir_to_lir->TargetReg(kRet0, kNotWide).GetReg();
- func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLdiv);
+ ret_reg = TargetReg(kRet0, kNotWide).GetReg();
+ target = kQuickLdiv;
break;
case Instruction::REM_LONG:
case Instruction::REM_LONG_2ADDR:
- if (cu->instruction_set == kArm64 || cu->instruction_set == kX86_64) {
- mir_to_lir->GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false);
- return;
- }
call_out = true;
check_zero = true;
- func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLmod);
+ target = kQuickLmod;
/* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */
- ret_reg = (cu->instruction_set == kThumb2) ? mir_to_lir->TargetReg(kArg2, kNotWide).GetReg() :
- mir_to_lir->TargetReg(kRet0, kNotWide).GetReg();
+ ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2, kNotWide).GetReg() :
+ TargetReg(kRet0, kNotWide).GetReg();
break;
case Instruction::AND_LONG_2ADDR:
case Instruction::AND_LONG:
- if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64 ||
- cu->instruction_set == kArm64) {
- return mir_to_lir->GenAndLong(opcode, rl_dest, rl_src1, rl_src2);
- }
first_op = kOpAnd;
second_op = kOpAnd;
break;
case Instruction::OR_LONG:
case Instruction::OR_LONG_2ADDR:
- if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64 ||
- cu->instruction_set == kArm64) {
- mir_to_lir->GenOrLong(opcode, rl_dest, rl_src1, rl_src2);
- return;
- }
first_op = kOpOr;
second_op = kOpOr;
break;
case Instruction::XOR_LONG:
case Instruction::XOR_LONG_2ADDR:
- if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64 ||
- cu->instruction_set == kArm64) {
- mir_to_lir->GenXorLong(opcode, rl_dest, rl_src1, rl_src2);
- return;
- }
first_op = kOpXor;
second_op = kOpXor;
break;
- case Instruction::NEG_LONG: {
- mir_to_lir->GenNegLong(rl_dest, rl_src2);
- return;
- }
default:
LOG(FATAL) << "Invalid long arith op";
}
if (!call_out) {
- mir_to_lir->GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2);
+ GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2);
} else {
- mir_to_lir->FlushAllRegs(); /* Send everything to home location */
+ FlushAllRegs(); /* Send everything to home location */
if (check_zero) {
- RegStorage r_tmp1 = mir_to_lir->TargetReg(kArg0, kWide);
- RegStorage r_tmp2 = mir_to_lir->TargetReg(kArg2, kWide);
- mir_to_lir->LoadValueDirectWideFixed(rl_src2, r_tmp2);
- RegStorage r_tgt = mir_to_lir->CallHelperSetup(func_offset);
- mir_to_lir->GenDivZeroCheckWide(r_tmp2);
- mir_to_lir->LoadValueDirectWideFixed(rl_src1, r_tmp1);
+ RegStorage r_tmp1 = TargetReg(kArg0, kWide);
+ RegStorage r_tmp2 = TargetReg(kArg2, kWide);
+ LoadValueDirectWideFixed(rl_src2, r_tmp2);
+ RegStorage r_tgt = CallHelperSetup(target);
+ GenDivZeroCheckWide(r_tmp2);
+ LoadValueDirectWideFixed(rl_src1, r_tmp1);
// NOTE: callout here is not a safepoint
- mir_to_lir->CallHelper(r_tgt, func_offset, false /* not safepoint */);
+ CallHelper(r_tgt, target, false /* not safepoint */);
} else {
- mir_to_lir->CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
+ CallRuntimeHelperRegLocationRegLocation(target, rl_src1, rl_src2, false);
}
// Adjust return regs in to handle case of rem returning kArg2/kArg3
- if (ret_reg == mir_to_lir->TargetReg(kRet0, kNotWide).GetReg())
- rl_result = mir_to_lir->GetReturnWide(kCoreReg);
+ if (ret_reg == TargetReg(kRet0, kNotWide).GetReg())
+ rl_result = GetReturnWide(kCoreReg);
else
- rl_result = mir_to_lir->GetReturnWideAlt();
- mir_to_lir->StoreValueWide(rl_dest, rl_result);
- }
-}
-
-void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) {
- if (cu_->target64) {
- GenArithOpLongImpl<8>(this, cu_, opcode, rl_dest, rl_src1, rl_src2);
- } else {
- GenArithOpLongImpl<4>(this, cu_, opcode, rl_dest, rl_src1, rl_src2);
+ rl_result = GetReturnWideAlt();
+ StoreValueWide(rl_dest, rl_result);
}
}
@@ -2149,17 +1907,15 @@
}
}
-template <size_t pointer_size>
-void Mir2Lir::GenConversionCall(ThreadOffset<pointer_size> func_offset,
- RegLocation rl_dest, RegLocation rl_src) {
+void Mir2Lir::GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest,
+ RegLocation rl_src) {
/*
* Don't optimize the register usage since it calls out to support
* functions
*/
- DCHECK_EQ(pointer_size, GetInstructionSetPointerSize(cu_->instruction_set));
FlushAllRegs(); /* Send everything to home location */
- CallRuntimeHelperRegLocation(func_offset, rl_src, false);
+ CallRuntimeHelperRegLocation(trampoline, rl_src, false);
if (rl_dest.wide) {
RegLocation rl_result;
rl_result = GetReturnWide(LocToRegClass(rl_dest));
@@ -2170,10 +1926,6 @@
StoreValue(rl_dest, rl_result);
}
}
-template void Mir2Lir::GenConversionCall(ThreadOffset<4> func_offset,
- RegLocation rl_dest, RegLocation rl_src);
-template void Mir2Lir::GenConversionCall(ThreadOffset<8> func_offset,
- RegLocation rl_dest, RegLocation rl_src);
class SuspendCheckSlowPath : public Mir2Lir::LIRSlowPath {
public:
@@ -2185,11 +1937,7 @@
m2l_->ResetRegPool();
m2l_->ResetDefTracking();
GenerateTargetLabel(kPseudoSuspendTarget);
- if (cu_->target64) {
- m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(8, pTestSuspend), true);
- } else {
- m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pTestSuspend), true);
- }
+ m2l_->CallRuntimeHelper(kQuickTestSuspend, true);
if (cont_ != nullptr) {
m2l_->OpUnconditionalBranch(cont_);
}
@@ -2198,7 +1946,7 @@
/* Check if we need to check for pending suspend request */
void Mir2Lir::GenSuspendTest(int opt_flags) {
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitSuspendChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitSuspendChecks()) {
if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
return;
}
@@ -2218,7 +1966,7 @@
/* Check if we need to check for pending suspend request */
void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) {
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitSuspendChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitSuspendChecks()) {
if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
OpUnconditionalBranch(target);
return;
@@ -2244,21 +1992,13 @@
/* Call out to helper assembly routine that will null check obj and then lock it. */
void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
FlushAllRegs();
- if (cu_->target64) {
- CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pLockObject), rl_src, true);
- } else {
- CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pLockObject), rl_src, true);
- }
+ CallRuntimeHelperRegLocation(kQuickLockObject, rl_src, true);
}
/* Call out to helper assembly routine that will null check obj and then unlock it. */
void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
FlushAllRegs();
- if (cu_->target64) {
- CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pUnlockObject), rl_src, true);
- } else {
- CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject), rl_src, true);
- }
+ CallRuntimeHelperRegLocation(kQuickUnlockObject, rl_src, true);
}
/* Generic code for generating a wide constant into a VR. */
@@ -2268,4 +2008,92 @@
StoreValueWide(rl_dest, rl_result);
}
+void Mir2Lir::GenSmallPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
+ const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+ const uint16_t entries = table[1];
+ // Chained cmp-and-branch.
+ const int32_t* as_int32 = reinterpret_cast<const int32_t*>(&table[2]);
+ int32_t current_key = as_int32[0];
+ const int32_t* targets = &as_int32[1];
+ rl_src = LoadValue(rl_src, kCoreReg);
+ int i = 0;
+ for (; i < entries; i++, current_key++) {
+ if (!InexpensiveConstantInt(current_key, Instruction::Code::IF_EQ)) {
+ // Switch to using a temp and add.
+ break;
+ }
+ BasicBlock* case_block =
+ mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]);
+ OpCmpImmBranch(kCondEq, rl_src.reg, current_key, &block_label_list_[case_block->id]);
+ }
+ if (i < entries) {
+ // The rest do not seem to be inexpensive. Try to allocate a temp and use add.
+ RegStorage key_temp = AllocTypedTemp(false, kCoreReg, false);
+ if (key_temp.Valid()) {
+ LoadConstantNoClobber(key_temp, current_key);
+ for (; i < entries - 1; i++, current_key++) {
+ BasicBlock* case_block =
+ mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]);
+ OpCmpBranch(kCondEq, rl_src.reg, key_temp, &block_label_list_[case_block->id]);
+ OpRegImm(kOpAdd, key_temp, 1); // Increment key.
+ }
+ BasicBlock* case_block =
+ mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]);
+ OpCmpBranch(kCondEq, rl_src.reg, key_temp, &block_label_list_[case_block->id]);
+ } else {
+ // No free temp, just finish the old loop.
+ for (; i < entries; i++, current_key++) {
+ BasicBlock* case_block =
+ mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]);
+ OpCmpImmBranch(kCondEq, rl_src.reg, current_key, &block_label_list_[case_block->id]);
+ }
+ }
+ }
+}
+
+void Mir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
+ const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+ if (cu_->verbose) {
+ DumpSparseSwitchTable(table);
+ }
+
+ const uint16_t entries = table[1];
+ if (entries <= kSmallSwitchThreshold) {
+ GenSmallPackedSwitch(mir, table_offset, rl_src);
+ } else {
+ // Use the backend-specific implementation.
+ GenLargePackedSwitch(mir, table_offset, rl_src);
+ }
+}
+
+void Mir2Lir::GenSmallSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
+ const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+ const uint16_t entries = table[1];
+ // Chained cmp-and-branch.
+ const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]);
+ const int32_t* targets = &keys[entries];
+ rl_src = LoadValue(rl_src, kCoreReg);
+ for (int i = 0; i < entries; i++) {
+ int key = keys[i];
+ BasicBlock* case_block =
+ mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]);
+ OpCmpImmBranch(kCondEq, rl_src.reg, key, &block_label_list_[case_block->id]);
+ }
+}
+
+void Mir2Lir::GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
+ const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+ if (cu_->verbose) {
+ DumpSparseSwitchTable(table);
+ }
+
+ const uint16_t entries = table[1];
+ if (entries <= kSmallSwitchThreshold) {
+ GenSmallSparseSwitch(mir, table_offset, rl_src);
+ } else {
+ // Use the backend-specific implementation.
+ GenLargeSparseSwitch(mir, table_offset, rl_src);
+ }
+}
+
} // namespace art
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 9dedeae..3cfc9a6 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -25,10 +25,8 @@
#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
#include "mirror/object_array-inl.h"
-#include "mirror/reference-inl.h"
#include "mirror/string.h"
#include "mir_to_lir-inl.h"
-#include "scoped_thread_state_change.h"
#include "x86/codegen_x86.h"
namespace art {
@@ -67,13 +65,6 @@
AddSlowPath(new (arena_) IntrinsicSlowPathPath(this, info, branch, resume));
}
-// Macro to help instantiate.
-// TODO: This might be used to only instantiate <4> on pure 32b systems.
-#define INSTANTIATE(sig_part1, ...) \
- template sig_part1(ThreadOffset<4>, __VA_ARGS__); \
- template sig_part1(ThreadOffset<8>, __VA_ARGS__); \
-
-
/*
* To save scheduling time, helper calls are broken into two parts: generation of
* the helper target address, and the actual call to the helper. Because x86
@@ -81,108 +72,73 @@
* load arguments between the two parts.
*/
// template <size_t pointer_size>
-RegStorage Mir2Lir::CallHelperSetup(ThreadOffset<4> helper_offset) {
- // All CallRuntimeHelperXXX call this first. So make a central check here.
- DCHECK_EQ(4U, GetInstructionSetPointerSize(cu_->instruction_set));
-
+RegStorage Mir2Lir::CallHelperSetup(QuickEntrypointEnum trampoline) {
if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
return RegStorage::InvalidReg();
} else {
- return LoadHelper(helper_offset);
+ return LoadHelper(trampoline);
}
}
-RegStorage Mir2Lir::CallHelperSetup(ThreadOffset<8> helper_offset) {
- // All CallRuntimeHelperXXX call this first. So make a central check here.
- DCHECK_EQ(8U, GetInstructionSetPointerSize(cu_->instruction_set));
+LIR* Mir2Lir::CallHelper(RegStorage r_tgt, QuickEntrypointEnum trampoline, bool safepoint_pc,
+ bool use_link) {
+ LIR* call_inst = InvokeTrampoline(use_link ? kOpBlx : kOpBx, r_tgt, trampoline);
- if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
- return RegStorage::InvalidReg();
- } else {
- return LoadHelper(helper_offset);
- }
-}
-
-/* NOTE: if r_tgt is a temp, it will be freed following use */
-template <size_t pointer_size>
-LIR* Mir2Lir::CallHelper(RegStorage r_tgt, ThreadOffset<pointer_size> helper_offset,
- bool safepoint_pc, bool use_link) {
- LIR* call_inst;
- OpKind op = use_link ? kOpBlx : kOpBx;
- if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
- call_inst = OpThreadMem(op, helper_offset);
- } else {
- call_inst = OpReg(op, r_tgt);
+ if (r_tgt.Valid()) {
FreeTemp(r_tgt);
}
+
if (safepoint_pc) {
MarkSafepointPC(call_inst);
}
return call_inst;
}
-template LIR* Mir2Lir::CallHelper(RegStorage r_tgt, ThreadOffset<4> helper_offset,
- bool safepoint_pc, bool use_link);
-template LIR* Mir2Lir::CallHelper(RegStorage r_tgt, ThreadOffset<8> helper_offset,
- bool safepoint_pc, bool use_link);
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelper(ThreadOffset<pointer_size> helper_offset, bool safepoint_pc) {
- RegStorage r_tgt = CallHelperSetup(helper_offset);
+void Mir2Lir::CallRuntimeHelper(QuickEntrypointEnum trampoline, bool safepoint_pc) {
+ RegStorage r_tgt = CallHelperSetup(trampoline);
ClobberCallerSave();
- CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+ CallHelper(r_tgt, trampoline, safepoint_pc);
}
-INSTANTIATE(void Mir2Lir::CallRuntimeHelper, bool safepoint_pc)
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperImm(ThreadOffset<pointer_size> helper_offset, int arg0,
- bool safepoint_pc) {
- RegStorage r_tgt = CallHelperSetup(helper_offset);
+void Mir2Lir::CallRuntimeHelperImm(QuickEntrypointEnum trampoline, int arg0, bool safepoint_pc) {
+ RegStorage r_tgt = CallHelperSetup(trampoline);
LoadConstant(TargetReg(kArg0, kNotWide), arg0);
ClobberCallerSave();
- CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+ CallHelper(r_tgt, trampoline, safepoint_pc);
}
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperImm, int arg0, bool safepoint_pc)
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperReg(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
+void Mir2Lir::CallRuntimeHelperReg(QuickEntrypointEnum trampoline, RegStorage arg0,
bool safepoint_pc) {
- RegStorage r_tgt = CallHelperSetup(helper_offset);
+ RegStorage r_tgt = CallHelperSetup(trampoline);
OpRegCopy(TargetReg(kArg0, arg0.GetWideKind()), arg0);
ClobberCallerSave();
- CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+ CallHelper(r_tgt, trampoline, safepoint_pc);
}
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperReg, RegStorage arg0, bool safepoint_pc)
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperRegLocation(ThreadOffset<pointer_size> helper_offset,
- RegLocation arg0, bool safepoint_pc) {
- RegStorage r_tgt = CallHelperSetup(helper_offset);
+void Mir2Lir::CallRuntimeHelperRegLocation(QuickEntrypointEnum trampoline, RegLocation arg0,
+ bool safepoint_pc) {
+ RegStorage r_tgt = CallHelperSetup(trampoline);
if (arg0.wide == 0) {
LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, arg0));
} else {
LoadValueDirectWideFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kWide));
}
ClobberCallerSave();
- CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+ CallHelper(r_tgt, trampoline, safepoint_pc);
}
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocation, RegLocation arg0, bool safepoint_pc)
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperImmImm(ThreadOffset<pointer_size> helper_offset, int arg0, int arg1,
+void Mir2Lir::CallRuntimeHelperImmImm(QuickEntrypointEnum trampoline, int arg0, int arg1,
bool safepoint_pc) {
- RegStorage r_tgt = CallHelperSetup(helper_offset);
+ RegStorage r_tgt = CallHelperSetup(trampoline);
LoadConstant(TargetReg(kArg0, kNotWide), arg0);
LoadConstant(TargetReg(kArg1, kNotWide), arg1);
ClobberCallerSave();
- CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+ CallHelper(r_tgt, trampoline, safepoint_pc);
}
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmImm, int arg0, int arg1, bool safepoint_pc)
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperImmRegLocation(ThreadOffset<pointer_size> helper_offset, int arg0,
+void Mir2Lir::CallRuntimeHelperImmRegLocation(QuickEntrypointEnum trampoline, int arg0,
RegLocation arg1, bool safepoint_pc) {
- RegStorage r_tgt = CallHelperSetup(helper_offset);
- DCHECK(!arg1.fp);
+ RegStorage r_tgt = CallHelperSetup(trampoline);
if (arg1.wide == 0) {
LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
} else {
@@ -191,61 +147,49 @@
}
LoadConstant(TargetReg(kArg0, kNotWide), arg0);
ClobberCallerSave();
- CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+ CallHelper(r_tgt, trampoline, safepoint_pc);
}
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmRegLocation, int arg0, RegLocation arg1,
- bool safepoint_pc)
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperRegLocationImm(ThreadOffset<pointer_size> helper_offset,
- RegLocation arg0, int arg1, bool safepoint_pc) {
- RegStorage r_tgt = CallHelperSetup(helper_offset);
+void Mir2Lir::CallRuntimeHelperRegLocationImm(QuickEntrypointEnum trampoline, RegLocation arg0,
+ int arg1, bool safepoint_pc) {
+ RegStorage r_tgt = CallHelperSetup(trampoline);
DCHECK(!arg0.wide);
LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0));
LoadConstant(TargetReg(kArg1, kNotWide), arg1);
ClobberCallerSave();
- CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+ CallHelper(r_tgt, trampoline, safepoint_pc);
}
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocationImm, RegLocation arg0, int arg1,
- bool safepoint_pc)
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperImmReg(ThreadOffset<pointer_size> helper_offset, int arg0,
- RegStorage arg1, bool safepoint_pc) {
- RegStorage r_tgt = CallHelperSetup(helper_offset);
+void Mir2Lir::CallRuntimeHelperImmReg(QuickEntrypointEnum trampoline, int arg0, RegStorage arg1,
+ bool safepoint_pc) {
+ RegStorage r_tgt = CallHelperSetup(trampoline);
OpRegCopy(TargetReg(kArg1, arg1.GetWideKind()), arg1);
LoadConstant(TargetReg(kArg0, kNotWide), arg0);
ClobberCallerSave();
- CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+ CallHelper(r_tgt, trampoline, safepoint_pc);
}
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmReg, int arg0, RegStorage arg1, bool safepoint_pc)
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperRegImm(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
- int arg1, bool safepoint_pc) {
- RegStorage r_tgt = CallHelperSetup(helper_offset);
+void Mir2Lir::CallRuntimeHelperRegImm(QuickEntrypointEnum trampoline, RegStorage arg0, int arg1,
+ bool safepoint_pc) {
+ RegStorage r_tgt = CallHelperSetup(trampoline);
OpRegCopy(TargetReg(kArg0, arg0.GetWideKind()), arg0);
LoadConstant(TargetReg(kArg1, kNotWide), arg1);
ClobberCallerSave();
- CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+ CallHelper(r_tgt, trampoline, safepoint_pc);
}
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegImm, RegStorage arg0, int arg1, bool safepoint_pc)
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperImmMethod(ThreadOffset<pointer_size> helper_offset, int arg0,
+void Mir2Lir::CallRuntimeHelperImmMethod(QuickEntrypointEnum trampoline, int arg0,
bool safepoint_pc) {
- RegStorage r_tgt = CallHelperSetup(helper_offset);
+ RegStorage r_tgt = CallHelperSetup(trampoline);
LoadCurrMethodDirect(TargetReg(kArg1, kRef));
LoadConstant(TargetReg(kArg0, kNotWide), arg0);
ClobberCallerSave();
- CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+ CallHelper(r_tgt, trampoline, safepoint_pc);
}
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmMethod, int arg0, bool safepoint_pc)
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperRegMethod(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
+void Mir2Lir::CallRuntimeHelperRegMethod(QuickEntrypointEnum trampoline, RegStorage arg0,
bool safepoint_pc) {
- RegStorage r_tgt = CallHelperSetup(helper_offset);
+ RegStorage r_tgt = CallHelperSetup(trampoline);
DCHECK(!IsSameReg(TargetReg(kArg1, arg0.GetWideKind()), arg0));
RegStorage r_tmp = TargetReg(kArg0, arg0.GetWideKind());
if (r_tmp.NotExactlyEquals(arg0)) {
@@ -253,15 +197,12 @@
}
LoadCurrMethodDirect(TargetReg(kArg1, kRef));
ClobberCallerSave();
- CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+ CallHelper(r_tgt, trampoline, safepoint_pc);
}
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegMethod, RegStorage arg0, bool safepoint_pc)
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperRegMethodRegLocation(ThreadOffset<pointer_size> helper_offset,
- RegStorage arg0, RegLocation arg2,
- bool safepoint_pc) {
- RegStorage r_tgt = CallHelperSetup(helper_offset);
+void Mir2Lir::CallRuntimeHelperRegMethodRegLocation(QuickEntrypointEnum trampoline, RegStorage arg0,
+ RegLocation arg2, bool safepoint_pc) {
+ RegStorage r_tgt = CallHelperSetup(trampoline);
DCHECK(!IsSameReg(TargetReg(kArg1, arg0.GetWideKind()), arg0));
RegStorage r_tmp = TargetReg(kArg0, arg0.GetWideKind());
if (r_tmp.NotExactlyEquals(arg0)) {
@@ -270,16 +211,13 @@
LoadCurrMethodDirect(TargetReg(kArg1, kRef));
LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
ClobberCallerSave();
- CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+ CallHelper(r_tgt, trampoline, safepoint_pc);
}
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegMethodRegLocation, RegStorage arg0, RegLocation arg2,
- bool safepoint_pc)
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset,
+void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(QuickEntrypointEnum trampoline,
RegLocation arg0, RegLocation arg1,
bool safepoint_pc) {
- RegStorage r_tgt = CallHelperSetup(helper_offset);
+ RegStorage r_tgt = CallHelperSetup(trampoline);
if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
RegStorage arg0_reg = TargetReg((arg0.fp) ? kFArg0 : kArg0, arg0);
@@ -328,10 +266,8 @@
}
}
ClobberCallerSave();
- CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+ CallHelper(r_tgt, trampoline, safepoint_pc);
}
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocationRegLocation, RegLocation arg0,
- RegLocation arg1, bool safepoint_pc)
void Mir2Lir::CopyToArgumentRegs(RegStorage arg0, RegStorage arg1) {
WideKind arg0_kind = arg0.GetWideKind();
@@ -352,59 +288,47 @@
}
}
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperRegReg(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
+void Mir2Lir::CallRuntimeHelperRegReg(QuickEntrypointEnum trampoline, RegStorage arg0,
RegStorage arg1, bool safepoint_pc) {
- RegStorage r_tgt = CallHelperSetup(helper_offset);
+ RegStorage r_tgt = CallHelperSetup(trampoline);
CopyToArgumentRegs(arg0, arg1);
ClobberCallerSave();
- CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+ CallHelper(r_tgt, trampoline, safepoint_pc);
}
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegReg, RegStorage arg0, RegStorage arg1,
- bool safepoint_pc)
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperRegRegImm(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
+void Mir2Lir::CallRuntimeHelperRegRegImm(QuickEntrypointEnum trampoline, RegStorage arg0,
RegStorage arg1, int arg2, bool safepoint_pc) {
- RegStorage r_tgt = CallHelperSetup(helper_offset);
+ RegStorage r_tgt = CallHelperSetup(trampoline);
CopyToArgumentRegs(arg0, arg1);
LoadConstant(TargetReg(kArg2, kNotWide), arg2);
ClobberCallerSave();
- CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+ CallHelper(r_tgt, trampoline, safepoint_pc);
}
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegRegImm, RegStorage arg0, RegStorage arg1, int arg2,
- bool safepoint_pc)
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(ThreadOffset<pointer_size> helper_offset,
- int arg0, RegLocation arg2, bool safepoint_pc) {
- RegStorage r_tgt = CallHelperSetup(helper_offset);
+void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(QuickEntrypointEnum trampoline, int arg0,
+ RegLocation arg2, bool safepoint_pc) {
+ RegStorage r_tgt = CallHelperSetup(trampoline);
LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
LoadCurrMethodDirect(TargetReg(kArg1, kRef));
LoadConstant(TargetReg(kArg0, kNotWide), arg0);
ClobberCallerSave();
- CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+ CallHelper(r_tgt, trampoline, safepoint_pc);
}
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmMethodRegLocation, int arg0, RegLocation arg2,
- bool safepoint_pc)
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperImmMethodImm(ThreadOffset<pointer_size> helper_offset, int arg0,
- int arg2, bool safepoint_pc) {
- RegStorage r_tgt = CallHelperSetup(helper_offset);
+void Mir2Lir::CallRuntimeHelperImmMethodImm(QuickEntrypointEnum trampoline, int arg0, int arg2,
+ bool safepoint_pc) {
+ RegStorage r_tgt = CallHelperSetup(trampoline);
LoadCurrMethodDirect(TargetReg(kArg1, kRef));
LoadConstant(TargetReg(kArg2, kNotWide), arg2);
LoadConstant(TargetReg(kArg0, kNotWide), arg0);
ClobberCallerSave();
- CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+ CallHelper(r_tgt, trampoline, safepoint_pc);
}
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmMethodImm, int arg0, int arg2, bool safepoint_pc)
-template <size_t pointer_size>
-void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset,
- int arg0, RegLocation arg1,
+void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(QuickEntrypointEnum trampoline, int arg0,
+ RegLocation arg1,
RegLocation arg2, bool safepoint_pc) {
- RegStorage r_tgt = CallHelperSetup(helper_offset);
+ RegStorage r_tgt = CallHelperSetup(trampoline);
DCHECK_EQ(static_cast<unsigned int>(arg1.wide), 0U); // The static_cast works around an
// instantiation bug in GCC.
LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
@@ -415,27 +339,22 @@
}
LoadConstant(TargetReg(kArg0, kNotWide), arg0);
ClobberCallerSave();
- CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+ CallHelper(r_tgt, trampoline, safepoint_pc);
}
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation, int arg0, RegLocation arg1,
- RegLocation arg2, bool safepoint_pc)
-template <size_t pointer_size>
void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation(
- ThreadOffset<pointer_size> helper_offset,
+ QuickEntrypointEnum trampoline,
RegLocation arg0,
RegLocation arg1,
RegLocation arg2,
bool safepoint_pc) {
- RegStorage r_tgt = CallHelperSetup(helper_offset);
+ RegStorage r_tgt = CallHelperSetup(trampoline);
LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0));
LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
ClobberCallerSave();
- CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc);
+ CallHelper(r_tgt, trampoline, safepoint_pc);
}
-INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation, RegLocation arg0,
- RegLocation arg1, RegLocation arg2, bool safepoint_pc)
/*
* If there are any ins passed in registers that have not been promoted
@@ -727,11 +646,12 @@
return state + 1;
}
-template <size_t pointer_size>
static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info,
- ThreadOffset<pointer_size> trampoline, int state,
+ QuickEntrypointEnum trampoline, int state,
const MethodReference& target_method, uint32_t method_idx) {
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
+
+
/*
* This handles the case in which the base method is not fully
* resolved at compile time, we bail to a runtime helper.
@@ -739,8 +659,13 @@
if (state == 0) {
if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
// Load trampoline target
- cg->LoadWordDisp(cg->TargetPtrReg(kSelf), trampoline.Int32Value(),
- cg->TargetPtrReg(kInvokeTgt));
+ int32_t disp;
+ if (cu->target64) {
+ disp = GetThreadOffset<8>(trampoline).Int32Value();
+ } else {
+ disp = GetThreadOffset<4>(trampoline).Int32Value();
+ }
+ cg->LoadWordDisp(cg->TargetPtrReg(kSelf), disp, cg->TargetPtrReg(kInvokeTgt));
}
// Load kArg0 with method index
CHECK_EQ(cu->dex_file, target_method.dex_file);
@@ -755,54 +680,32 @@
const MethodReference& target_method,
uint32_t unused, uintptr_t unused2,
uintptr_t unused3, InvokeType unused4) {
- if (cu->target64) {
- ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeStaticTrampolineWithAccessCheck);
- return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
- } else {
- ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeStaticTrampolineWithAccessCheck);
- return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
- }
+ return NextInvokeInsnSP(cu, info, kQuickInvokeStaticTrampolineWithAccessCheck, state,
+ target_method, 0);
}
static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
uint32_t unused, uintptr_t unused2,
uintptr_t unused3, InvokeType unused4) {
- if (cu->target64) {
- ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeDirectTrampolineWithAccessCheck);
- return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
- } else {
- ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeDirectTrampolineWithAccessCheck);
- return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
- }
+ return NextInvokeInsnSP(cu, info, kQuickInvokeDirectTrampolineWithAccessCheck, state,
+ target_method, 0);
}
static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
uint32_t unused, uintptr_t unused2,
uintptr_t unused3, InvokeType unused4) {
- if (cu->target64) {
- ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeSuperTrampolineWithAccessCheck);
- return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
- } else {
- ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeSuperTrampolineWithAccessCheck);
- return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
- }
+ return NextInvokeInsnSP(cu, info, kQuickInvokeSuperTrampolineWithAccessCheck, state,
+ target_method, 0);
}
static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
uint32_t unused, uintptr_t unused2,
uintptr_t unused3, InvokeType unused4) {
- if (cu->target64) {
- ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8,
- pInvokeVirtualTrampolineWithAccessCheck);
- return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
- } else {
- ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4,
- pInvokeVirtualTrampolineWithAccessCheck);
- return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
- }
+ return NextInvokeInsnSP(cu, info, kQuickInvokeVirtualTrampolineWithAccessCheck, state,
+ target_method, 0);
}
static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
@@ -810,15 +713,8 @@
const MethodReference& target_method,
uint32_t unused, uintptr_t unused2,
uintptr_t unused3, InvokeType unused4) {
- if (cu->target64) {
- ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8,
- pInvokeInterfaceTrampolineWithAccessCheck);
- return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
- } else {
- ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4,
- pInvokeInterfaceTrampolineWithAccessCheck);
- return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0);
- }
+ return NextInvokeInsnSP(cu, info, kQuickInvokeInterfaceTrampolineWithAccessCheck, state,
+ target_method, 0);
}
int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state,
@@ -957,21 +853,35 @@
type, skip_this);
if (pcrLabel) {
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
*pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
} else {
*pcrLabel = nullptr;
+ if (!(cu_->disable_opt & (1 << kNullCheckElimination)) &&
+ (info->opt_flags & MIR_IGNORE_NULL_CHECK)) {
+ return call_state;
+ }
// In lieu of generating a check for kArg1 being null, we need to
// perform a load when doing implicit checks.
- RegStorage tmp = AllocTemp();
- Load32Disp(TargetReg(kArg1, kRef), 0, tmp);
- MarkPossibleNullPointerException(info->opt_flags);
- FreeTemp(tmp);
+ GenImplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
}
}
return call_state;
}
+// Default implementation of implicit null pointer check.
+// Overridden by arch specific as necessary.
+void Mir2Lir::GenImplicitNullCheck(RegStorage reg, int opt_flags) {
+ if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
+ return;
+ }
+ RegStorage tmp = AllocTemp();
+ Load32Disp(reg, 0, tmp);
+ MarkPossibleNullPointerException(opt_flags);
+ FreeTemp(tmp);
+}
+
+
/*
* May have 0+ arguments (also used for jumbo). Note that
* source virtual registers may be in physical registers, so may
@@ -1170,13 +1080,8 @@
// Generate memcpy
OpRegRegImm(kOpAdd, TargetReg(kArg0, kRef), TargetPtrReg(kSp), outs_offset);
OpRegRegImm(kOpAdd, TargetReg(kArg1, kRef), TargetPtrReg(kSp), start_offset);
- if (cu_->target64) {
- CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(8, pMemcpy), TargetReg(kArg0, kRef),
- TargetReg(kArg1, kRef), (info->num_arg_words - 3) * 4, false);
- } else {
- CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(4, pMemcpy), TargetReg(kArg0, kRef),
- TargetReg(kArg1, kRef), (info->num_arg_words - 3) * 4, false);
- }
+ CallRuntimeHelperRegRegImm(kQuickMemcpy, TargetReg(kArg0, kRef), TargetReg(kArg1, kRef),
+ (info->num_arg_words - 3) * 4, false);
}
call_state = LoadArgRegs(info, call_state, next_call_insn,
@@ -1186,16 +1091,17 @@
call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
direct_code, direct_method, type);
if (pcrLabel) {
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
*pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
} else {
*pcrLabel = nullptr;
+ if (!(cu_->disable_opt & (1 << kNullCheckElimination)) &&
+ (info->opt_flags & MIR_IGNORE_NULL_CHECK)) {
+ return call_state;
+ }
// In lieu of generating a check for kArg1 being null, we need to
// perform a load when doing implicit checks.
- RegStorage tmp = AllocTemp();
- Load32Disp(TargetReg(kArg1, kRef), 0, tmp);
- MarkPossibleNullPointerException(info->opt_flags);
- FreeTemp(tmp);
+ GenImplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
}
}
return call_state;
@@ -1221,55 +1127,32 @@
return res;
}
-bool Mir2Lir::GenInlinedGet(CallInfo* info) {
+bool Mir2Lir::GenInlinedReferenceGet(CallInfo* info) {
if (cu_->instruction_set == kMips) {
// TODO - add Mips implementation
return false;
}
- // the refrence class is stored in the image dex file which might not be the same as the cu's
- // dex file. Query the reference class for the image dex file then reset to starting dex file
- // in after loading class type.
- uint16_t type_idx = 0;
- const DexFile* ref_dex_file = nullptr;
- {
- ScopedObjectAccess soa(Thread::Current());
- type_idx = mirror::Reference::GetJavaLangRefReference()->GetDexTypeIndex();
- ref_dex_file = mirror::Reference::GetJavaLangRefReference()->GetDexCache()->GetDexFile();
- }
- CHECK(LIKELY(ref_dex_file != nullptr));
-
- // address is either static within the image file, or needs to be patched up after compilation.
- bool unused_type_initialized;
bool use_direct_type_ptr;
uintptr_t direct_type_ptr;
- bool is_finalizable;
- const DexFile* old_dex = cu_->dex_file;
- cu_->dex_file = ref_dex_file;
- RegStorage reg_class = TargetPtrReg(kArg1);
- if (!cu_->compiler_driver->CanEmbedTypeInCode(*ref_dex_file, type_idx, &unused_type_initialized,
- &use_direct_type_ptr, &direct_type_ptr,
- &is_finalizable) || is_finalizable) {
- cu_->dex_file = old_dex;
- // address is not known and post-compile patch is not possible, cannot insert intrinsic.
+ ClassReference ref;
+ if (!cu_->compiler_driver->CanEmbedReferenceTypeInCode(&ref,
+ &use_direct_type_ptr, &direct_type_ptr)) {
return false;
}
+
+ RegStorage reg_class = TargetReg(kArg1, kRef);
+ Clobber(reg_class);
+ LockTemp(reg_class);
if (use_direct_type_ptr) {
LoadConstant(reg_class, direct_type_ptr);
} else {
- LoadClassType(type_idx, kArg1);
+ uint16_t type_idx = ref.first->GetClassDef(ref.second).class_idx_;
+ LoadClassType(*ref.first, type_idx, kArg1);
}
- cu_->dex_file = old_dex;
- // get the offset for flags in reference class.
- uint32_t slow_path_flag_offset = 0;
- uint32_t disable_flag_offset = 0;
- {
- ScopedObjectAccess soa(Thread::Current());
- mirror::Class* reference_class = mirror::Reference::GetJavaLangRefReference();
- slow_path_flag_offset = reference_class->GetSlowPathFlagOffset().Uint32Value();
- disable_flag_offset = reference_class->GetDisableIntrinsicFlagOffset().Uint32Value();
- }
+ uint32_t slow_path_flag_offset = cu_->compiler_driver->GetReferenceSlowFlagOffset();
+ uint32_t disable_flag_offset = cu_->compiler_driver->GetReferenceDisableFlagOffset();
CHECK(slow_path_flag_offset && disable_flag_offset &&
(slow_path_flag_offset != disable_flag_offset));
@@ -1281,11 +1164,19 @@
RegStorage reg_disabled = AllocTemp();
Load32Disp(reg_class, slow_path_flag_offset, reg_slow_path);
Load32Disp(reg_class, disable_flag_offset, reg_disabled);
- OpRegRegReg(kOpOr, reg_slow_path, reg_slow_path, reg_disabled);
+ FreeTemp(reg_class);
+ LIR* or_inst = OpRegRegReg(kOpOr, reg_slow_path, reg_slow_path, reg_disabled);
FreeTemp(reg_disabled);
// if slow path, jump to JNI path target
- LIR* slow_path_branch = OpCmpImmBranch(kCondNe, reg_slow_path, 0, nullptr);
+ LIR* slow_path_branch;
+ if (or_inst->u.m.def_mask->HasBit(ResourceMask::kCCode)) {
+ // Generate conditional branch only, as the OR set a condition state (we are interested in a 'Z' flag).
+ slow_path_branch = OpCondBranch(kCondNe, nullptr);
+ } else {
+ // Generate compare and branch.
+ slow_path_branch = OpCmpImmBranch(kCondNe, reg_slow_path, 0, nullptr);
+ }
FreeTemp(reg_slow_path);
// slow path not enabled, simply load the referent of the reference object
@@ -1320,52 +1211,30 @@
RegLocation rl_obj = info->args[0];
RegLocation rl_idx = info->args[1];
rl_obj = LoadValue(rl_obj, kRefReg);
- // X86 wants to avoid putting a constant index into a register.
- if (!((cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64)&& rl_idx.is_const)) {
- rl_idx = LoadValue(rl_idx, kCoreReg);
- }
+ rl_idx = LoadValue(rl_idx, kCoreReg);
RegStorage reg_max;
GenNullCheck(rl_obj.reg, info->opt_flags);
bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
LIR* range_check_branch = nullptr;
RegStorage reg_off;
RegStorage reg_ptr;
- if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
- reg_off = AllocTemp();
- reg_ptr = AllocTempRef();
- if (range_check) {
- reg_max = AllocTemp();
- Load32Disp(rl_obj.reg, count_offset, reg_max);
- MarkPossibleNullPointerException(info->opt_flags);
- }
- Load32Disp(rl_obj.reg, offset_offset, reg_off);
+ reg_off = AllocTemp();
+ reg_ptr = AllocTempRef();
+ if (range_check) {
+ reg_max = AllocTemp();
+ Load32Disp(rl_obj.reg, count_offset, reg_max);
MarkPossibleNullPointerException(info->opt_flags);
- LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
- if (range_check) {
- // Set up a slow path to allow retry in case of bounds violation */
- OpRegReg(kOpCmp, rl_idx.reg, reg_max);
- FreeTemp(reg_max);
- range_check_branch = OpCondBranch(kCondUge, nullptr);
- }
- OpRegImm(kOpAdd, reg_ptr, data_offset);
- } else {
- if (range_check) {
- // On x86, we can compare to memory directly
- // Set up a launch pad to allow retry in case of bounds violation */
- if (rl_idx.is_const) {
- range_check_branch = OpCmpMemImmBranch(
- kCondUlt, RegStorage::InvalidReg(), rl_obj.reg, count_offset,
- mir_graph_->ConstantValue(rl_idx.orig_sreg), nullptr);
- } else {
- OpRegMem(kOpCmp, rl_idx.reg, rl_obj.reg, count_offset);
- range_check_branch = OpCondBranch(kCondUge, nullptr);
- }
- }
- reg_off = AllocTemp();
- reg_ptr = AllocTempRef();
- Load32Disp(rl_obj.reg, offset_offset, reg_off);
- LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
}
+ Load32Disp(rl_obj.reg, offset_offset, reg_off);
+ MarkPossibleNullPointerException(info->opt_flags);
+ LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
+ if (range_check) {
+ // Set up a slow path to allow retry in case of bounds violation */
+ OpRegReg(kOpCmp, rl_idx.reg, reg_max);
+ FreeTemp(reg_max);
+ range_check_branch = OpCondBranch(kCondUge, nullptr);
+ }
+ OpRegImm(kOpAdd, reg_ptr, data_offset);
if (rl_idx.is_const) {
OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
} else {
@@ -1377,11 +1246,7 @@
}
RegLocation rl_dest = InlineTarget(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
- LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg, 1, kUnsignedHalf);
- } else {
- LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg, kUnsignedHalf);
- }
+ LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg, 1, kUnsignedHalf);
FreeTemp(reg_off);
FreeTemp(reg_ptr);
StoreValue(rl_dest, rl_result);
@@ -1525,20 +1390,6 @@
return true;
}
-bool Mir2Lir::GenInlinedAbsFloat(CallInfo* info) {
- if (cu_->instruction_set == kMips) {
- // TODO - add Mips implementation
- return false;
- }
- RegLocation rl_src = info->args[0];
- rl_src = LoadValue(rl_src, kCoreReg);
- RegLocation rl_dest = InlineTarget(info);
- RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegImm(kOpAnd, rl_result.reg, rl_src.reg, 0x7fffffff);
- StoreValue(rl_dest, rl_result);
- return true;
-}
-
bool Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
// Currently implemented only for ARM64
return false;
@@ -1549,20 +1400,20 @@
return false;
}
-bool Mir2Lir::GenInlinedAbsDouble(CallInfo* info) {
- if (cu_->instruction_set == kMips) {
- // TODO - add Mips implementation
- return false;
- }
- RegLocation rl_src = info->args[0];
- rl_src = LoadValueWide(rl_src, kCoreReg);
- RegLocation rl_dest = InlineTargetWide(info);
- RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+bool Mir2Lir::GenInlinedCeil(CallInfo* info) {
+ return false;
+}
- OpRegCopyWide(rl_result.reg, rl_src.reg);
- OpRegImm(kOpAnd, rl_result.reg.GetHigh(), 0x7fffffff);
- StoreValueWide(rl_dest, rl_result);
- return true;
+bool Mir2Lir::GenInlinedFloor(CallInfo* info) {
+ return false;
+}
+
+bool Mir2Lir::GenInlinedRint(CallInfo* info) {
+ return false;
+}
+
+bool Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) {
+ return false;
}
bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) {
@@ -1626,9 +1477,7 @@
RegLocation rl_start = info->args[2]; // 3rd arg only present in III flavor of IndexOf.
LoadValueDirectFixed(rl_start, reg_start);
}
- RegStorage r_tgt = cu_->target64 ?
- LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pIndexOf)) :
- LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pIndexOf));
+ RegStorage r_tgt = LoadHelper(kQuickIndexOf);
GenExplicitNullCheck(reg_ptr, info->opt_flags);
LIR* high_code_point_branch =
rl_char.is_const ? nullptr : OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, nullptr);
@@ -1667,11 +1516,7 @@
LoadValueDirectFixed(rl_cmp, reg_cmp);
RegStorage r_tgt;
if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
- if (cu_->target64) {
- r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pStringCompareTo));
- } else {
- r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pStringCompareTo));
- }
+ r_tgt = LoadHelper(kQuickStringCompareTo);
} else {
r_tgt = RegStorage::InvalidReg();
}
@@ -1681,15 +1526,7 @@
LIR* cmp_null_check_branch = OpCmpImmBranch(kCondEq, reg_cmp, 0, nullptr);
AddIntrinsicSlowPath(info, cmp_null_check_branch);
// NOTE: not a safepoint
- if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
- OpReg(kOpBlx, r_tgt);
- } else {
- if (cu_->target64) {
- OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(8, pStringCompareTo));
- } else {
- OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(4, pStringCompareTo));
- }
- }
+ CallHelper(r_tgt, kQuickStringCompareTo, false, true);
RegLocation rl_return = GetReturn(kCoreReg);
RegLocation rl_dest = InlineTarget(info);
StoreValue(rl_dest, rl_return);
@@ -1720,16 +1557,6 @@
kNotVolatile);
break;
- case kX86:
- reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg,
- Thread::PeerOffset<4>());
- break;
-
- case kX86_64:
- reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg,
- Thread::PeerOffset<8>());
- break;
-
default:
LOG(FATAL) << "Unexpected isa " << cu_->instruction_set;
}
@@ -1852,29 +1679,29 @@
GenInvokeNoInline(info);
}
-template <size_t pointer_size>
static LIR* GenInvokeNoInlineCall(Mir2Lir* mir_to_lir, InvokeType type) {
- ThreadOffset<pointer_size> trampoline(-1);
+ QuickEntrypointEnum trampoline;
switch (type) {
case kInterface:
- trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeInterfaceTrampolineWithAccessCheck);
+ trampoline = kQuickInvokeInterfaceTrampolineWithAccessCheck;
break;
case kDirect:
- trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeDirectTrampolineWithAccessCheck);
+ trampoline = kQuickInvokeDirectTrampolineWithAccessCheck;
break;
case kStatic:
- trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeStaticTrampolineWithAccessCheck);
+ trampoline = kQuickInvokeStaticTrampolineWithAccessCheck;
break;
case kSuper:
- trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeSuperTrampolineWithAccessCheck);
+ trampoline = kQuickInvokeSuperTrampolineWithAccessCheck;
break;
case kVirtual:
- trampoline = QUICK_ENTRYPOINT_OFFSET(pointer_size, pInvokeVirtualTrampolineWithAccessCheck);
+ trampoline = kQuickInvokeVirtualTrampolineWithAccessCheck;
break;
default:
LOG(FATAL) << "Unexpected invoke type";
+ trampoline = kQuickInvokeInterfaceTrampolineWithAccessCheck;
}
- return mir_to_lir->OpThreadMem(kOpBlx, trampoline);
+ return mir_to_lir->InvokeTrampoline(kOpBlx, RegStorage::InvalidReg(), trampoline);
}
void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
@@ -1945,12 +1772,7 @@
mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
}
} else {
- // TODO: Extract?
- if (cu_->target64) {
- call_inst = GenInvokeNoInlineCall<8>(this, info->type);
- } else {
- call_inst = GenInvokeNoInlineCall<4>(this, info->type);
- }
+ call_inst = GenInvokeNoInlineCall(this, info->type);
}
}
EndInvoke(info);
diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc
index 2893157..e0f4691 100644
--- a/compiler/dex/quick/local_optimizations.cc
+++ b/compiler/dex/quick/local_optimizations.cc
@@ -15,15 +15,43 @@
*/
#include "dex/compiler_internals.h"
+#include "dex/quick/mir_to_lir-inl.h"
namespace art {
#define DEBUG_OPT(X)
+#define LOAD_STORE_CHECK_REG_DEP(mask, check) (mask.Intersects(*check->u.m.def_mask))
+
/* Check RAW, WAR, and RAW dependency on the register operands */
#define CHECK_REG_DEP(use, def, check) (def.Intersects(*check->u.m.use_mask)) || \
(use.Union(def).Intersects(*check->u.m.def_mask))
+/* Load Store Elimination filter:
+ * - Wide Load/Store
+ * - Exclusive Load/Store
+ * - Quad operand Load/Store
+ * - List Load/Store
+ * - IT blocks
+ * - Branch
+ * - Dmb
+ */
+#define LOAD_STORE_FILTER(flags) ((flags & (IS_QUAD_OP|IS_STORE)) == (IS_QUAD_OP|IS_STORE) || \
+ (flags & (IS_QUAD_OP|IS_LOAD)) == (IS_QUAD_OP|IS_LOAD) || \
+ (flags & REG_USE012) == REG_USE012 || \
+ (flags & REG_DEF01) == REG_DEF01 || \
+ (flags & REG_DEF_LIST0) || \
+ (flags & REG_DEF_LIST1) || \
+ (flags & REG_USE_LIST0) || \
+ (flags & REG_USE_LIST1) || \
+ (flags & REG_DEF_FPCS_LIST0) || \
+ (flags & REG_DEF_FPCS_LIST2) || \
+ (flags & REG_USE_FPCS_LIST0) || \
+ (flags & REG_USE_FPCS_LIST2) || \
+ (flags & IS_VOLATILE) || \
+ (flags & IS_BRANCH) || \
+ (flags & IS_IT))
+
/* Scheduler heuristics */
#define MAX_HOIST_DISTANCE 20
#define LDLD_DISTANCE 4
@@ -43,6 +71,7 @@
/* Insert a move to replace the load */
LIR* move_lir;
move_lir = OpRegCopyNoInsert(dest, src);
+ move_lir->dalvik_offset = orig_lir->dalvik_offset;
/*
* Insert the converted instruction after the original since the
* optimization is scannng in the top-down order and the new instruction
@@ -52,8 +81,53 @@
InsertLIRAfter(orig_lir, move_lir);
}
+void Mir2Lir::DumpDependentInsnPair(LIR* check_lir, LIR* this_lir, const char* type) {
+ LOG(INFO) << type;
+ LOG(INFO) << "Check LIR:";
+ DumpLIRInsn(check_lir, 0);
+ LOG(INFO) << "This LIR:";
+ DumpLIRInsn(this_lir, 0);
+}
+
+inline void Mir2Lir::EliminateLoad(LIR* lir, int reg_id) {
+ DCHECK(RegStorage::SameRegType(lir->operands[0], reg_id));
+ RegStorage dest_reg, src_reg;
+
+ /* Same Register - Nop */
+ if (lir->operands[0] == reg_id) {
+ NopLIR(lir);
+ return;
+ }
+
+ /* different Regsister - Move + Nop */
+ switch (reg_id & RegStorage::kShapeTypeMask) {
+ case RegStorage::k32BitSolo | RegStorage::kCoreRegister:
+ dest_reg = RegStorage::Solo32(lir->operands[0]);
+ src_reg = RegStorage::Solo32(reg_id);
+ break;
+ case RegStorage::k64BitSolo | RegStorage::kCoreRegister:
+ dest_reg = RegStorage::Solo64(lir->operands[0]);
+ src_reg = RegStorage::Solo64(reg_id);
+ break;
+ case RegStorage::k32BitSolo | RegStorage::kFloatingPoint:
+ dest_reg = RegStorage::FloatSolo32(lir->operands[0]);
+ src_reg = RegStorage::FloatSolo32(reg_id);
+ break;
+ case RegStorage::k64BitSolo | RegStorage::kFloatingPoint:
+ dest_reg = RegStorage::FloatSolo64(lir->operands[0]);
+ src_reg = RegStorage::FloatSolo64(reg_id);
+ break;
+ default:
+ LOG(INFO) << "Load Store: Unsuported register type!";
+ return;
+ }
+ ConvertMemOpIntoMove(lir, dest_reg, src_reg);
+ NopLIR(lir);
+ return;
+}
+
/*
- * Perform a pass of top-down walk, from the second-last instruction in the
+ * Perform a pass of top-down walk, from the first to the last instruction in the
* superblock, to eliminate redundant loads and stores.
*
* An earlier load can eliminate a later load iff
@@ -66,213 +140,172 @@
* 2) The native register is not clobbered in between
* 3) The memory location is not written to in between
*
- * A later store can be eliminated by an earlier store iff
+ * An earlier store can eliminate a later store iff
* 1) They are must-aliases
* 2) The memory location is not written to in between
*/
void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) {
- LIR* this_lir;
+ LIR* this_lir, *check_lir;
+ std::vector<int> alias_list;
if (head_lir == tail_lir) {
return;
}
- for (this_lir = PREV_LIR(tail_lir); this_lir != head_lir; this_lir = PREV_LIR(this_lir)) {
- if (IsPseudoLirOp(this_lir->opcode)) {
+ for (this_lir = head_lir; this_lir != tail_lir; this_lir = NEXT_LIR(this_lir)) {
+ if (this_lir->flags.is_nop || IsPseudoLirOp(this_lir->opcode)) {
continue;
}
- int sink_distance = 0;
-
uint64_t target_flags = GetTargetInstFlags(this_lir->opcode);
+ /* Target LIR - skip if instr is:
+ * - NOP
+ * - Branch
+ * - Load and store
+ * - Wide load
+ * - Wide store
+ * - Exclusive load/store
+ */
+ if (LOAD_STORE_FILTER(target_flags) ||
+ ((target_flags & (IS_LOAD | IS_STORE)) == (IS_LOAD | IS_STORE)) ||
+ !(target_flags & (IS_LOAD | IS_STORE))) {
+ continue;
+ }
+ int native_reg_id = this_lir->operands[0];
+ int dest_reg_id = this_lir->operands[1];
+ bool is_this_lir_load = target_flags & IS_LOAD;
+ ResourceMask this_mem_mask = kEncodeMem.Intersection(this_lir->u.m.use_mask->Union(
+ *this_lir->u.m.def_mask));
- /* Skip non-interesting instructions */
- if ((this_lir->flags.is_nop == true) ||
- (target_flags & IS_BRANCH) ||
- ((target_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) || // Skip wide loads.
- ((target_flags & (REG_USE0 | REG_USE1 | REG_USE2)) ==
- (REG_USE0 | REG_USE1 | REG_USE2)) || // Skip wide stores.
- // Skip instructions that are neither loads or stores.
- !(target_flags & (IS_LOAD | IS_STORE)) ||
- // Skip instructions that do both load and store.
- ((target_flags & (IS_STORE | IS_LOAD)) == (IS_STORE | IS_LOAD))) {
+ /* Memory region */
+ if (!this_mem_mask.Intersects(kEncodeLiteral.Union(kEncodeDalvikReg)) &&
+ (!this_mem_mask.Intersects(kEncodeLiteral.Union(kEncodeHeapRef)))) {
continue;
}
- int native_reg_id;
- if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
- // If x86, location differs depending on whether memory/reg operation.
- native_reg_id = (target_flags & IS_STORE) ? this_lir->operands[2] : this_lir->operands[0];
- } else {
- native_reg_id = this_lir->operands[0];
- }
- bool is_this_lir_load = target_flags & IS_LOAD;
- LIR* check_lir;
- /* Use the mem mask to determine the rough memory location */
- ResourceMask this_mem_mask = kEncodeMem.Intersection(
- this_lir->u.m.use_mask->Union(*this_lir->u.m.def_mask));
-
- /*
- * Currently only eliminate redundant ld/st for constant and Dalvik
- * register accesses.
- */
- if (!this_mem_mask.Intersects(kEncodeLiteral.Union(kEncodeDalvikReg))) {
+ /* Does not redefine the address */
+ if (this_lir->u.m.def_mask->Intersects(*this_lir->u.m.use_mask)) {
continue;
}
ResourceMask stop_def_reg_mask = this_lir->u.m.def_mask->Without(kEncodeMem);
+ ResourceMask stop_use_reg_mask = this_lir->u.m.use_mask->Without(kEncodeMem);
- /*
- * Add pc to the resource mask to prevent this instruction
- * from sinking past branch instructions. Also take out the memory
- * region bits since stop_mask is used to check data/control
- * dependencies.
- *
- * Note: on x86(-64) and Arm64 we use the IsBranch bit, as the PC is not exposed.
- */
- ResourceMask pc_encoding = GetPCUseDefEncoding();
- if (pc_encoding == kEncodeNone) {
- // TODO: Stop the abuse of kIsBranch as a bit specification for ResourceMask.
- pc_encoding = ResourceMask::Bit(kIsBranch);
+ /* The ARM backend can load/store PC */
+ ResourceMask uses_pc = GetPCUseDefEncoding();
+ if (uses_pc.Intersects(this_lir->u.m.use_mask->Union(*this_lir->u.m.def_mask))) {
+ continue;
}
- ResourceMask stop_use_reg_mask = pc_encoding.Union(*this_lir->u.m.use_mask).
- Without(kEncodeMem);
+ /* Initialize alias list */
+ alias_list.clear();
+ ResourceMask alias_reg_list_mask = kEncodeNone;
+ if (!this_mem_mask.Intersects(kEncodeMem) && !this_mem_mask.Intersects(kEncodeLiteral)) {
+ alias_list.push_back(dest_reg_id);
+ SetupRegMask(&alias_reg_list_mask, dest_reg_id);
+ }
+
+ /* Scan through the BB for posible elimination candidates */
for (check_lir = NEXT_LIR(this_lir); check_lir != tail_lir; check_lir = NEXT_LIR(check_lir)) {
- /*
- * Skip already dead instructions (whose dataflow information is
- * outdated and misleading).
- */
if (check_lir->flags.is_nop || IsPseudoLirOp(check_lir->opcode)) {
continue;
}
- ResourceMask check_mem_mask = kEncodeMem.Intersection(
- check_lir->u.m.use_mask->Union(*check_lir->u.m.def_mask));
- ResourceMask alias_condition = this_mem_mask.Intersection(check_mem_mask);
- bool stop_here = false;
+ if (uses_pc.Intersects(check_lir->u.m.use_mask->Union(*check_lir->u.m.def_mask))) {
+ break;
+ }
- /*
- * Potential aliases seen - check the alias relations
- */
+ ResourceMask check_mem_mask = kEncodeMem.Intersection(check_lir->u.m.use_mask->Union(
+ *check_lir->u.m.def_mask));
+ ResourceMask alias_mem_mask = this_mem_mask.Intersection(check_mem_mask);
uint64_t check_flags = GetTargetInstFlags(check_lir->opcode);
- // TUNING: Support instructions with multiple register targets.
- if ((check_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) {
+ bool stop_here = false;
+ bool pass_over = false;
+
+ /* Check LIR - skip if instr is:
+ * - Wide Load
+ * - Wide Store
+ * - Branch
+ * - Dmb
+ * - Exclusive load/store
+ * - IT blocks
+ * - Quad loads
+ */
+ if (LOAD_STORE_FILTER(check_flags)) {
stop_here = true;
- } else if (!check_mem_mask.Equals(kEncodeMem) && !alias_condition.Equals(kEncodeNone)) {
- bool is_check_lir_load = check_flags & IS_LOAD;
- if (alias_condition.Equals(kEncodeLiteral)) {
- /*
- * Should only see literal loads in the instruction
- * stream.
- */
- DCHECK(!(check_flags & IS_STORE));
- /* Same value && same register type */
- if (check_lir->flags.alias_info == this_lir->flags.alias_info &&
- RegStorage::SameRegType(check_lir->operands[0], native_reg_id)) {
- /*
- * Different destination register - insert
- * a move
- */
- if (check_lir->operands[0] != native_reg_id) {
- // TODO: update for 64-bit regs.
- ConvertMemOpIntoMove(check_lir, RegStorage::Solo32(check_lir->operands[0]),
- RegStorage::Solo32(native_reg_id));
- }
- NopLIR(check_lir);
- }
- } else if (alias_condition.Equals(kEncodeDalvikReg)) {
- /* Must alias */
- if (check_lir->flags.alias_info == this_lir->flags.alias_info) {
- /* Only optimize compatible registers */
- bool reg_compatible = RegStorage::SameRegType(check_lir->operands[0], native_reg_id);
- if ((is_this_lir_load && is_check_lir_load) ||
- (!is_this_lir_load && is_check_lir_load)) {
- /* RAR or RAW */
- if (reg_compatible) {
- /*
- * Different destination register -
- * insert a move
- */
- if (check_lir->operands[0] != native_reg_id) {
- // TODO: update for 64-bit regs.
- ConvertMemOpIntoMove(check_lir, RegStorage::Solo32(check_lir->operands[0]),
- RegStorage::Solo32(native_reg_id));
- }
- NopLIR(check_lir);
- } else {
- /*
- * Destinaions are of different types -
- * something complicated going on so
- * stop looking now.
- */
- stop_here = true;
- }
- } else if (is_this_lir_load && !is_check_lir_load) {
- /* WAR - register value is killed */
- stop_here = true;
- } else if (!is_this_lir_load && !is_check_lir_load) {
- /* WAW - nuke the earlier store */
- NopLIR(this_lir);
- stop_here = true;
- }
- /* Partial overlap */
- } else if (IsDalvikRegisterClobbered(this_lir, check_lir)) {
- /*
- * It is actually ok to continue if check_lir
- * is a read. But it is hard to make a test
- * case for this so we just stop here to be
- * conservative.
- */
- stop_here = true;
+ /* Possible alias or result of earlier pass */
+ } else if (check_flags & IS_MOVE) {
+ for (auto ® : alias_list) {
+ if (RegStorage::RegNum(check_lir->operands[1]) == RegStorage::RegNum(reg)) {
+ pass_over = true;
+ alias_list.push_back(check_lir->operands[0]);
+ SetupRegMask(&alias_reg_list_mask, check_lir->operands[0]);
}
}
- /* Memory content may be updated. Stop looking now. */
+ /* Memory regions */
+ } else if (!alias_mem_mask.Equals(kEncodeNone)) {
+ DCHECK((check_flags & IS_LOAD) || (check_flags & IS_STORE));
+ bool is_check_lir_load = check_flags & IS_LOAD;
+ bool reg_compatible = RegStorage::SameRegType(check_lir->operands[0], native_reg_id);
+
+ if (!alias_mem_mask.Intersects(kEncodeMem) && alias_mem_mask.Equals(kEncodeLiteral)) {
+ DCHECK(check_flags & IS_LOAD);
+ /* Same value && same register type */
+ if (reg_compatible && (this_lir->target == check_lir->target)) {
+ DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "LITERAL"));
+ EliminateLoad(check_lir, native_reg_id);
+ }
+ } else if (((alias_mem_mask.Equals(kEncodeDalvikReg)) || (alias_mem_mask.Equals(kEncodeHeapRef))) &&
+ alias_reg_list_mask.Intersects((check_lir->u.m.use_mask)->Without(kEncodeMem))) {
+ bool same_offset = (GetInstructionOffset(this_lir) == GetInstructionOffset(check_lir));
+ if (same_offset && !is_check_lir_load) {
+ if (check_lir->operands[0] != native_reg_id) {
+ DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "STORE STOP"));
+ stop_here = true;
+ break;
+ }
+ }
+
+ if (reg_compatible && same_offset &&
+ ((is_this_lir_load && is_check_lir_load) /* LDR - LDR */ ||
+ (!is_this_lir_load && is_check_lir_load) /* STR - LDR */ ||
+ (!is_this_lir_load && !is_check_lir_load) /* STR - STR */)) {
+ DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "LOAD STORE"));
+ EliminateLoad(check_lir, native_reg_id);
+ }
+ } else {
+ /* Unsupported memory region */
+ }
+ }
+
+ if (pass_over) {
+ continue;
+ }
+
+ if (stop_here == false) {
+ bool stop_alias = LOAD_STORE_CHECK_REG_DEP(alias_reg_list_mask, check_lir);
+ if (stop_alias) {
+ /* Scan through alias list and if alias remove from alias list. */
+ for (auto ® : alias_list) {
+ stop_alias = false;
+ ResourceMask alias_reg_mask = kEncodeNone;
+ SetupRegMask(&alias_reg_mask, reg);
+ stop_alias = LOAD_STORE_CHECK_REG_DEP(alias_reg_mask, check_lir);
+ if (stop_alias) {
+ ClearRegMask(&alias_reg_list_mask, reg);
+ alias_list.erase(std::remove(alias_list.begin(), alias_list.end(),
+ reg), alias_list.end());
+ }
+ }
+ }
+ ResourceMask stop_search_mask = stop_def_reg_mask.Union(stop_use_reg_mask);
+ stop_search_mask = stop_search_mask.Union(alias_reg_list_mask);
+ stop_here = LOAD_STORE_CHECK_REG_DEP(stop_search_mask, check_lir);
if (stop_here) {
break;
- /* The check_lir has been transformed - check the next one */
- } else if (check_lir->flags.is_nop) {
- continue;
}
- }
-
-
- /*
- * this and check LIRs have no memory dependency. Now check if
- * their register operands have any RAW, WAR, and WAW
- * dependencies. If so, stop looking.
- */
- if (stop_here == false) {
- stop_here = CHECK_REG_DEP(stop_use_reg_mask, stop_def_reg_mask, check_lir);
- }
-
- if (stop_here == true) {
- if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
- // Prevent stores from being sunk between ops that generate ccodes and
- // ops that use them.
- uint64_t flags = GetTargetInstFlags(check_lir->opcode);
- if (sink_distance > 0 && (flags & IS_BRANCH) && (flags & USES_CCODES)) {
- check_lir = PREV_LIR(check_lir);
- sink_distance--;
- }
- }
- DEBUG_OPT(dump_dependent_insn_pair(this_lir, check_lir, "REG CLOBBERED"));
- /* Only sink store instructions */
- if (sink_distance && !is_this_lir_load) {
- LIR* new_store_lir =
- static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocLIR));
- *new_store_lir = *this_lir;
- /*
- * Stop point found - insert *before* the check_lir
- * since the instruction list is scanned in the
- * top-down order.
- */
- InsertLIRBefore(check_lir, new_store_lir);
- NopLIR(this_lir);
- }
+ } else {
break;
- } else if (!check_lir->flags.is_nop) {
- sink_distance++;
}
}
}
@@ -385,7 +418,7 @@
/* Found a new place to put the load - move it here */
if (stop_here == true) {
- DEBUG_OPT(dump_dependent_insn_pair(check_lir, this_lir "HOIST STOP"));
+ DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "HOIST STOP"));
break;
}
}
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 26ea6a8..4577a4c 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -61,8 +61,7 @@
* done:
*
*/
-void MipsMir2Lir::GenSparseSwitch(MIR* mir, DexOffset table_offset,
- RegLocation rl_src) {
+void MipsMir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
if (cu_->verbose) {
DumpSparseSwitchTable(table);
@@ -139,8 +138,7 @@
* jr rRA
* done:
*/
-void MipsMir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset,
- RegLocation rl_src) {
+void MipsMir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
if (cu_->verbose) {
DumpPackedSwitchTable(table);
@@ -245,7 +243,7 @@
GenBarrier();
NewLIR0(kMipsCurrPC); // Really a jal to .+8
// Now, fill the branch delay slot with the helper load
- RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pHandleFillArrayData));
+ RegStorage r_tgt = LoadHelper(kQuickHandleFillArrayData);
GenBarrier(); // Scheduling barrier
// Construct BaseLabel and set up table base register
@@ -332,9 +330,9 @@
m2l_->Load32Disp(rs_rMIPS_SP, 0, rs_rRA);
m2l_->OpRegImm(kOpAdd, rs_rMIPS_SP, sp_displace_);
m2l_->ClobberCallerSave();
- ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pThrowStackOverflow);
- RegStorage r_tgt = m2l_->CallHelperSetup(func_offset); // Doesn't clobber LR.
- m2l_->CallHelper(r_tgt, func_offset, false /* MarkSafepointPC */, false /* UseLink */);
+ RegStorage r_tgt = m2l_->CallHelperSetup(kQuickThrowStackOverflow); // Doesn't clobber LR.
+ m2l_->CallHelper(r_tgt, kQuickThrowStackOverflow, false /* MarkSafepointPC */,
+ false /* UseLink */);
}
private:
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 2c33377..43cbde7 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -31,22 +31,19 @@
RegLocation rl_dest, int lit);
bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
LIR* CheckSuspendUsingLoad() OVERRIDE;
- RegStorage LoadHelper(ThreadOffset<4> offset) OVERRIDE;
- RegStorage LoadHelper(ThreadOffset<8> offset) OVERRIDE;
+ RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
OpSize size, VolatileKind is_volatile) OVERRIDE;
LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
OpSize size) OVERRIDE;
- LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
- RegStorage r_dest, OpSize size) OVERRIDE;
LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size, VolatileKind is_volatile) OVERRIDE;
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
OpSize size) OVERRIDE;
- LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
- RegStorage r_src, OpSize size) OVERRIDE;
+ LIR* GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest);
+ LIR* GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src);
void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
// Required for target - register utilities.
@@ -83,8 +80,6 @@
size_t GetInsnSize(LIR* lir) OVERRIDE;
bool IsUnconditionalBranch(LIR* lir);
- // Check support for volatile load/store of a given size.
- bool SupportsVolatileLoadStore(OpSize size) OVERRIDE;
// Get the register class for load/store of a field.
RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
@@ -97,12 +92,6 @@
RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark);
void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_shift);
- void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2);
void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
@@ -110,21 +99,15 @@
void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2);
void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
+ bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
+ bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long);
bool GenInlinedSqrt(CallInfo* info);
bool GenInlinedPeek(CallInfo* info, OpSize size);
bool GenInlinedPoke(CallInfo* info, OpSize size);
- void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
- void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
- void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div);
+ void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) OVERRIDE;
RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
@@ -136,14 +119,17 @@
void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
void GenSelect(BasicBlock* bb, MIR* mir);
+ void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
+ int32_t true_val, int32_t false_val, RegStorage rs_dest,
+ int dest_reg_class) OVERRIDE;
bool GenMemBarrier(MemBarrierKind barrier_kind);
void GenMoveException(RegLocation rl_dest);
void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
int first_bit, int second_bit);
void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
- void GenPackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
- void GenSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+ void GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+ void GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
bool GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special);
// Required for target - single operation generators.
@@ -161,7 +147,6 @@
void OpRegCopy(RegStorage r_dest, RegStorage r_src);
LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
- LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
@@ -169,14 +154,9 @@
LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
LIR* OpTestSuspend(LIR* target);
- LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) OVERRIDE;
- LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) OVERRIDE;
LIR* OpVldm(RegStorage r_base, int count);
LIR* OpVstm(RegStorage r_base, int count);
- void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset);
void OpRegCopyWide(RegStorage dest, RegStorage src);
- void OpTlsCmp(ThreadOffset<4> offset, int val) OVERRIDE;
- void OpTlsCmp(ThreadOffset<8> offset, int val) OVERRIDE;
// TODO: collapse r_dest.
LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
@@ -199,7 +179,15 @@
return false; // Wide FPRs are formed by pairing.
}
+ LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
+
private:
+ void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+ void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+
void ConvertShortToLongBranch(LIR* lir);
RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2, bool is_div, bool check_zero);
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
index 7087be9..3a4128a 100644
--- a/compiler/dex/quick/mips/fp_mips.cc
+++ b/compiler/dex/quick/mips/fp_mips.cc
@@ -50,8 +50,7 @@
case Instruction::REM_FLOAT_2ADDR:
case Instruction::REM_FLOAT:
FlushAllRegs(); // Send everything to home location
- CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pFmodf), rl_src1, rl_src2,
- false);
+ CallRuntimeHelperRegLocationRegLocation(kQuickFmodf, rl_src1, rl_src2, false);
rl_result = GetReturn(kFPReg);
StoreValue(rl_dest, rl_result);
return;
@@ -93,8 +92,7 @@
case Instruction::REM_DOUBLE_2ADDR:
case Instruction::REM_DOUBLE:
FlushAllRegs(); // Send everything to home location
- CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pFmod), rl_src1, rl_src2,
- false);
+ CallRuntimeHelperRegLocationRegLocation(kQuickFmod, rl_src1, rl_src2, false);
rl_result = GetReturnWide(kFPReg);
StoreValueWide(rl_dest, rl_result);
return;
@@ -133,22 +131,22 @@
op = kMipsFcvtdw;
break;
case Instruction::FLOAT_TO_INT:
- GenConversionCall(QUICK_ENTRYPOINT_OFFSET(4, pF2iz), rl_dest, rl_src);
+ GenConversionCall(kQuickF2iz, rl_dest, rl_src);
return;
case Instruction::DOUBLE_TO_INT:
- GenConversionCall(QUICK_ENTRYPOINT_OFFSET(4, pD2iz), rl_dest, rl_src);
+ GenConversionCall(kQuickD2iz, rl_dest, rl_src);
return;
case Instruction::LONG_TO_DOUBLE:
- GenConversionCall(QUICK_ENTRYPOINT_OFFSET(4, pL2d), rl_dest, rl_src);
+ GenConversionCall(kQuickL2d, rl_dest, rl_src);
return;
case Instruction::FLOAT_TO_LONG:
- GenConversionCall(QUICK_ENTRYPOINT_OFFSET(4, pF2l), rl_dest, rl_src);
+ GenConversionCall(kQuickF2l, rl_dest, rl_src);
return;
case Instruction::LONG_TO_FLOAT:
- GenConversionCall(QUICK_ENTRYPOINT_OFFSET(4, pL2f), rl_dest, rl_src);
+ GenConversionCall(kQuickL2f, rl_dest, rl_src);
return;
case Instruction::DOUBLE_TO_LONG:
- GenConversionCall(QUICK_ENTRYPOINT_OFFSET(4, pD2l), rl_dest, rl_src);
+ GenConversionCall(kQuickD2l, rl_dest, rl_src);
return;
default:
LOG(FATAL) << "Unexpected opcode: " << opcode;
@@ -170,25 +168,26 @@
void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2) {
bool wide = true;
- ThreadOffset<4> offset(-1);
+ QuickEntrypointEnum target;
switch (opcode) {
case Instruction::CMPL_FLOAT:
- offset = QUICK_ENTRYPOINT_OFFSET(4, pCmplFloat);
+ target = kQuickCmplFloat;
wide = false;
break;
case Instruction::CMPG_FLOAT:
- offset = QUICK_ENTRYPOINT_OFFSET(4, pCmpgFloat);
+ target = kQuickCmpgFloat;
wide = false;
break;
case Instruction::CMPL_DOUBLE:
- offset = QUICK_ENTRYPOINT_OFFSET(4, pCmplDouble);
+ target = kQuickCmplDouble;
break;
case Instruction::CMPG_DOUBLE:
- offset = QUICK_ENTRYPOINT_OFFSET(4, pCmpgDouble);
+ target = kQuickCmpgDouble;
break;
default:
LOG(FATAL) << "Unexpected opcode: " << opcode;
+ target = kQuickCmplFloat;
}
FlushAllRegs();
LockCallTemps();
@@ -201,7 +200,7 @@
LoadValueDirectFixed(rl_src1, rs_rMIPS_FARG0);
LoadValueDirectFixed(rl_src2, rs_rMIPS_FARG2);
}
- RegStorage r_tgt = LoadHelper(offset);
+ RegStorage r_tgt = LoadHelper(target);
// NOTE: not a safepoint
OpReg(kOpBlx, r_tgt);
RegLocation rl_result = GetReturn(kCoreReg);
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index c3a4c17..ea56989 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -215,6 +215,18 @@
}
}
+void MipsMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
+ int32_t true_val, int32_t false_val, RegStorage rs_dest,
+ int dest_reg_class) {
+ // Implement as a branch-over.
+ // TODO: Conditional move?
+ LoadConstant(rs_dest, false_val); // Favors false.
+ LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, NULL);
+ LoadConstant(rs_dest, true_val);
+ LIR* target_label = NewLIR0(kPseudoTargetLabel);
+ ne_branchover->target = target_label;
+}
+
void MipsMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
UNIMPLEMENTED(FATAL) << "Need codegen for select";
}
@@ -261,24 +273,21 @@
return rl_dest;
}
-void MipsMir2Lir::OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale,
- int offset) {
- LOG(FATAL) << "Unexpected use of OpLea for Arm";
-}
-
-void MipsMir2Lir::OpTlsCmp(ThreadOffset<4> offset, int val) {
- LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm";
-}
-
-void MipsMir2Lir::OpTlsCmp(ThreadOffset<8> offset, int val) {
- UNIMPLEMENTED(FATAL) << "Should not be called.";
-}
-
bool MipsMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
DCHECK_NE(cu_->instruction_set, kThumb2);
return false;
}
+bool MipsMir2Lir::GenInlinedAbsFloat(CallInfo* info) {
+ // TODO - add Mips implementation
+ return false;
+}
+
+bool MipsMir2Lir::GenInlinedAbsDouble(CallInfo* info) {
+ // TODO - add Mips implementation
+ return false;
+}
+
bool MipsMir2Lir::GenInlinedSqrt(CallInfo* info) {
DCHECK_NE(cu_->instruction_set, kThumb2);
return false;
@@ -383,11 +392,6 @@
}
-void MipsMir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) {
- LOG(FATAL) << "Unexpected use of GenMulLong for Mips";
-}
-
void MipsMir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2) {
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
@@ -432,13 +436,27 @@
StoreValueWide(rl_dest, rl_result);
}
-void MipsMir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
- LOG(FATAL) << "Unexpected use GenNotLong()";
-}
+void MipsMir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) {
+ switch (opcode) {
+ case Instruction::ADD_LONG:
+ case Instruction::ADD_LONG_2ADDR:
+ GenAddLong(opcode, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::SUB_LONG:
+ case Instruction::SUB_LONG_2ADDR:
+ GenSubLong(opcode, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::NEG_LONG:
+ GenNegLong(rl_dest, rl_src2);
+ return;
-void MipsMir2Lir::GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div) {
- LOG(FATAL) << "Unexpected use GenDivRemLong()";
+ default:
+ break;
+ }
+
+ // Fallback for all other ops.
+ Mir2Lir::GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
}
void MipsMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
@@ -461,22 +479,6 @@
StoreValueWide(rl_dest, rl_result);
}
-void MipsMir2Lir::GenAndLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1,
- RegLocation rl_src2) {
- LOG(FATAL) << "Unexpected use of GenAndLong for Mips";
-}
-
-void MipsMir2Lir::GenOrLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) {
- LOG(FATAL) << "Unexpected use of GenOrLong for Mips";
-}
-
-void MipsMir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) {
- LOG(FATAL) << "Unexpected use of GenXorLong for Mips";
-}
-
/*
* Generate array load
*/
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index a5b7824..bc91fbc 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -476,17 +476,12 @@
* ensure that all branch instructions can be restarted if
* there is a trap in the shadow. Allocate a temp register.
*/
-RegStorage MipsMir2Lir::LoadHelper(ThreadOffset<4> offset) {
+RegStorage MipsMir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
// NOTE: native pointer.
- LoadWordDisp(rs_rMIPS_SELF, offset.Int32Value(), rs_rT9);
+ LoadWordDisp(rs_rMIPS_SELF, GetThreadOffset<4>(trampoline).Int32Value(), rs_rT9);
return rs_rT9;
}
-RegStorage MipsMir2Lir::LoadHelper(ThreadOffset<8> offset) {
- UNIMPLEMENTED(FATAL) << "Should not be called.";
- return RegStorage::InvalidReg();
-}
-
LIR* MipsMir2Lir::CheckSuspendUsingLoad() {
RegStorage tmp = AllocTemp();
// NOTE: native pointer.
@@ -496,6 +491,39 @@
return inst;
}
+LIR* MipsMir2Lir::GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest) {
+ DCHECK(!r_dest.IsFloat()); // See RegClassForFieldLoadStore().
+ DCHECK(r_dest.IsPair());
+ ClobberCallerSave();
+ LockCallTemps(); // Using fixed registers
+ RegStorage reg_ptr = TargetReg(kArg0);
+ OpRegRegImm(kOpAdd, reg_ptr, r_base, displacement);
+ RegStorage r_tgt = LoadHelper(kQuickA64Load);
+ LIR *ret = OpReg(kOpBlx, r_tgt);
+ RegStorage reg_ret = RegStorage::MakeRegPair(TargetReg(kRet0), TargetReg(kRet1));
+ OpRegCopyWide(r_dest, reg_ret);
+ return ret;
+}
+
+LIR* MipsMir2Lir::GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src) {
+ DCHECK(!r_src.IsFloat()); // See RegClassForFieldLoadStore().
+ DCHECK(r_src.IsPair());
+ ClobberCallerSave();
+ LockCallTemps(); // Using fixed registers
+ RegStorage temp_ptr = AllocTemp();
+ OpRegRegImm(kOpAdd, temp_ptr, r_base, displacement);
+ RegStorage temp_value = AllocTempWide();
+ OpRegCopyWide(temp_value, r_src);
+ RegStorage reg_ptr = TargetReg(kArg0);
+ OpRegCopy(reg_ptr, temp_ptr);
+ RegStorage reg_value = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
+ OpRegCopyWide(reg_value, temp_value);
+ FreeTemp(temp_ptr);
+ FreeTemp(temp_value);
+ RegStorage r_tgt = LoadHelper(kQuickA64Store);
+ return OpReg(kOpBlx, r_tgt);
+}
+
void MipsMir2Lir::SpillCoreRegs() {
if (num_core_spills_ == 0) {
return;
@@ -530,17 +558,12 @@
return (lir->opcode == kMipsB);
}
-bool MipsMir2Lir::SupportsVolatileLoadStore(OpSize size) {
- // No support for 64-bit atomic load/store on mips.
- return size != k64 && size != kDouble;
-}
-
RegisterClass MipsMir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
if (UNLIKELY(is_volatile)) {
- // On Mips, atomic 64-bit load/store requires an fp register.
+ // On Mips, atomic 64-bit load/store requires a core register.
// Smaller aligned load/store is atomic for both core and fp registers.
if (size == k64 || size == kDouble) {
- return kFPReg;
+ return kCoreReg;
}
}
// TODO: Verify that both core and fp registers are suitable for smaller sizes.
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 75d3c5d..7178ede 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -551,8 +551,9 @@
LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
OpSize size, VolatileKind is_volatile) {
- if (is_volatile == kVolatile) {
- DCHECK(size != k64 && size != kDouble);
+ if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble))) {
+ // Do atomic 64-bit load.
+ return GenAtomic64Load(r_base, displacement, r_dest);
}
// TODO: base this on target.
@@ -654,17 +655,21 @@
LIR* MipsMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size, VolatileKind is_volatile) {
if (is_volatile == kVolatile) {
- DCHECK(size != k64 && size != kDouble);
// Ensure that prior accesses become visible to other threads first.
GenMemBarrier(kAnyStore);
}
- // TODO: base this on target.
- if (size == kWord) {
- size = k32;
- }
LIR* store;
- store = StoreBaseDispBody(r_base, displacement, r_src, size);
+ if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble))) {
+ // Do atomic 64-bit load.
+ store = GenAtomic64Store(r_base, displacement, r_src);
+ } else {
+ // TODO: base this on target.
+ if (size == kWord) {
+ size = k32;
+ }
+ store = StoreBaseDispBody(r_base, displacement, r_src, size);
+ }
if (UNLIKELY(is_volatile == kVolatile)) {
// Preserve order with respect to any subsequent volatile loads.
@@ -675,41 +680,18 @@
return store;
}
-LIR* MipsMir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) {
- LOG(FATAL) << "Unexpected use of OpThreadMem for MIPS";
- return NULL;
-}
-
-LIR* MipsMir2Lir::OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) {
- UNIMPLEMENTED(FATAL) << "Should not be called.";
- return nullptr;
-}
-
LIR* MipsMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
LOG(FATAL) << "Unexpected use of OpMem for MIPS";
return NULL;
}
-LIR* MipsMir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
- int displacement, RegStorage r_src, OpSize size) {
- LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for MIPS";
- return NULL;
-}
-
-LIR* MipsMir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) {
- LOG(FATAL) << "Unexpected use of OpRegMem for MIPS";
- return NULL;
-}
-
-LIR* MipsMir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
- int displacement, RegStorage r_dest, OpSize size) {
- LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for MIPS";
- return NULL;
-}
-
LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS";
return NULL;
}
+LIR* MipsMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+ return OpReg(op, r_tgt);
+}
+
} // namespace art
diff --git a/compiler/dex/quick/mir_to_lir-inl.h b/compiler/dex/quick/mir_to_lir-inl.h
index 9ce5bb7..ff5a46f 100644
--- a/compiler/dex/quick/mir_to_lir-inl.h
+++ b/compiler/dex/quick/mir_to_lir-inl.h
@@ -147,6 +147,15 @@
}
/*
+ * Clear the corresponding bit(s).
+ */
+inline void Mir2Lir::ClearRegMask(ResourceMask* mask, int reg) {
+ DCHECK_EQ((reg & ~RegStorage::kRegValMask), 0);
+ DCHECK(reginfo_map_.Get(reg) != nullptr) << "No info for 0x" << reg;
+ *mask = mask->ClearBits(reginfo_map_.Get(reg)->DefUseMask());
+}
+
+/*
* Set up the proper fields in the resource mask
*/
inline void Mir2Lir::SetupResourceMasks(LIR* lir) {
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index edb3b23..e519011 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -226,9 +226,6 @@
bool wide = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_WIDE));
bool ref = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT));
OpSize size = LoadStoreOpSize(wide, ref);
- if (data.is_volatile && !SupportsVolatileLoadStore(size)) {
- return false;
- }
// Point of no return - no aborts after this
GenPrintLabel(mir);
@@ -273,9 +270,6 @@
bool wide = (data.op_variant == InlineMethodAnalyser::IPutVariant(Instruction::IPUT_WIDE));
bool ref = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT));
OpSize size = LoadStoreOpSize(wide, ref);
- if (data.is_volatile && !SupportsVolatileLoadStore(size)) {
- return false;
- }
// Point of no return - no aborts after this
GenPrintLabel(mir);
@@ -932,11 +926,11 @@
case Instruction::XOR_INT:
case Instruction::XOR_INT_2ADDR:
if (rl_src[0].is_const &&
- InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[0]))) {
+ InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[0]), opcode)) {
GenArithOpIntLit(opcode, rl_dest, rl_src[1],
mir_graph_->ConstantValue(rl_src[0].orig_sreg));
} else if (rl_src[1].is_const &&
- InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]))) {
+ InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]), opcode)) {
GenArithOpIntLit(opcode, rl_dest, rl_src[0],
mir_graph_->ConstantValue(rl_src[1].orig_sreg));
} else {
@@ -957,7 +951,7 @@
case Instruction::USHR_INT:
case Instruction::USHR_INT_2ADDR:
if (rl_src[1].is_const &&
- InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]))) {
+ InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]), opcode)) {
GenArithOpIntLit(opcode, rl_dest, rl_src[0], mir_graph_->ConstantValue(rl_src[1]));
} else {
GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[1]);
@@ -1317,4 +1311,9 @@
rl.ref ? RefCheck::kCheckRef : RefCheck::kCheckNotRef, FPCheck::kIgnoreFP, fail, report);
}
+size_t Mir2Lir::GetInstructionOffset(LIR* lir) {
+ UNIMPLEMENTED(FATAL) << "Unsuppored GetInstructionOffset()";
+ return 0;
+}
+
} // namespace art
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index c68ad6b..4b8f794 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -28,10 +28,13 @@
#include "driver/compiler_driver.h"
#include "instruction_set.h"
#include "leb128.h"
+#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "safe_map.h"
#include "utils/array_ref.h"
#include "utils/arena_allocator.h"
+#include "utils/arena_containers.h"
#include "utils/growable_array.h"
+#include "utils/stack_checks.h"
namespace art {
@@ -49,6 +52,7 @@
#define IS_BINARY_OP (1ULL << kIsBinaryOp)
#define IS_BRANCH (1ULL << kIsBranch)
#define IS_IT (1ULL << kIsIT)
+#define IS_MOVE (1ULL << kIsMoveOp)
#define IS_LOAD (1ULL << kMemLoad)
#define IS_QUAD_OP (1ULL << kIsQuadOp)
#define IS_QUIN_OP (1ULL << kIsQuinOp)
@@ -56,6 +60,7 @@
#define IS_STORE (1ULL << kMemStore)
#define IS_TERTIARY_OP (1ULL << kIsTertiaryOp)
#define IS_UNARY_OP (1ULL << kIsUnaryOp)
+#define IS_VOLATILE (1ULL << kMemVolatile)
#define NEEDS_FIXUP (1ULL << kPCRelFixup)
#define NO_OPERAND (1ULL << kNoOperand)
#define REG_DEF0 (1ULL << kRegDef0)
@@ -92,6 +97,20 @@
#define REG_USE_HI (1ULL << kUseHi)
#define REG_DEF_LO (1ULL << kDefLo)
#define REG_DEF_HI (1ULL << kDefHi)
+#define SCALED_OFFSET_X0 (1ULL << kMemScaledx0)
+#define SCALED_OFFSET_X2 (1ULL << kMemScaledx2)
+#define SCALED_OFFSET_X4 (1ULL << kMemScaledx4)
+
+// Special load/stores
+#define IS_LOADX (IS_LOAD | IS_VOLATILE)
+#define IS_LOAD_OFF (IS_LOAD | SCALED_OFFSET_X0)
+#define IS_LOAD_OFF2 (IS_LOAD | SCALED_OFFSET_X2)
+#define IS_LOAD_OFF4 (IS_LOAD | SCALED_OFFSET_X4)
+
+#define IS_STOREX (IS_STORE | IS_VOLATILE)
+#define IS_STORE_OFF (IS_STORE | SCALED_OFFSET_X0)
+#define IS_STORE_OFF2 (IS_STORE | SCALED_OFFSET_X2)
+#define IS_STORE_OFF4 (IS_STORE | SCALED_OFFSET_X4)
// Common combo register usage patterns.
#define REG_DEF01 (REG_DEF0 | REG_DEF1)
@@ -205,41 +224,14 @@
#define SLOW_TYPE_PATH (cu_->enable_debug & (1 << kDebugSlowTypePath))
#define EXERCISE_SLOWEST_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowestStringPath))
-// Size of a frame that we definitely consider large. Anything larger than this should
-// definitely get a stack overflow check.
-static constexpr size_t kLargeFrameSize = 2 * KB;
-
-// Size of a frame that should be small. Anything leaf method smaller than this should run
-// without a stack overflow check.
-// The constant is from experience with frameworks code.
-static constexpr size_t kSmallFrameSize = 1 * KB;
-
-// Determine whether a frame is small or large, used in the decision on whether to elide a
-// stack overflow check on method entry.
-//
-// A frame is considered large when it's either above kLargeFrameSize, or a quarter of the
-// overflow-usable stack space.
-static constexpr bool IsLargeFrame(size_t size, InstructionSet isa) {
- return size >= kLargeFrameSize || size >= GetStackOverflowReservedBytes(isa) / 4;
-}
-
-// We want to ensure that on all systems kSmallFrameSize will lead to false in IsLargeFrame.
-COMPILE_ASSERT(!IsLargeFrame(kSmallFrameSize, kArm),
- kSmallFrameSize_is_not_a_small_frame_arm);
-COMPILE_ASSERT(!IsLargeFrame(kSmallFrameSize, kArm64),
- kSmallFrameSize_is_not_a_small_frame_arm64);
-COMPILE_ASSERT(!IsLargeFrame(kSmallFrameSize, kMips),
- kSmallFrameSize_is_not_a_small_frame_mips);
-COMPILE_ASSERT(!IsLargeFrame(kSmallFrameSize, kX86),
- kSmallFrameSize_is_not_a_small_frame_x86);
-COMPILE_ASSERT(!IsLargeFrame(kSmallFrameSize, kX86_64),
- kSmallFrameSize_is_not_a_small_frame_x64_64);
-
class Mir2Lir : public Backend {
public:
static constexpr bool kFailOnSizeError = true && kIsDebugBuild;
static constexpr bool kReportSizeError = true && kIsDebugBuild;
+ // TODO: If necessary, this could be made target-dependent.
+ static constexpr uint16_t kSmallSwitchThreshold = 5;
+
/*
* Auxiliary information describing the location of data embedded in the Dalvik
* byte code stream.
@@ -580,6 +572,12 @@
virtual ~Mir2Lir() {}
+ /**
+ * @brief Decodes the LIR offset.
+ * @return Returns the scaled offset of LIR.
+ */
+ virtual size_t GetInstructionOffset(LIR* lir);
+
int32_t s4FromSwitchData(const void* switch_data) {
return *reinterpret_cast<const int32_t*>(switch_data);
}
@@ -669,7 +667,10 @@
void SetMemRefType(LIR* lir, bool is_load, int mem_type);
void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit);
void SetupRegMask(ResourceMask* mask, int reg);
+ void ClearRegMask(ResourceMask* mask, int reg);
void DumpLIRInsn(LIR* arg, unsigned char* base_addr);
+ void EliminateLoad(LIR* lir, int reg_id);
+ void DumpDependentInsnPair(LIR* check_lir, LIR* this_lir, const char* type);
void DumpPromotionMap();
void CodegenDump();
LIR* RawLIR(DexOffset dalvik_offset, int opcode, int op0 = 0, int op1 = 0,
@@ -684,6 +685,7 @@
LIR* ScanLiteralPool(LIR* data_target, int value, unsigned int delta);
LIR* ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi);
LIR* ScanLiteralPoolMethod(LIR* data_target, const MethodReference& method);
+ LIR* ScanLiteralPoolClass(LIR* data_target, const DexFile& dex_file, uint32_t type_idx);
LIR* AddWordData(LIR* *constant_list_p, int value);
LIR* AddWideData(LIR* *constant_list_p, int val_lo, int val_hi);
void ProcessSwitchTables();
@@ -745,14 +747,13 @@
virtual RegStorage AllocPreservedSingle(int s_reg);
virtual RegStorage AllocPreservedDouble(int s_reg);
RegStorage AllocTempBody(GrowableArray<RegisterInfo*> ®s, int* next_temp, bool required);
- virtual RegStorage AllocFreeTemp();
- virtual RegStorage AllocTemp();
- virtual RegStorage AllocTempWide();
- virtual RegStorage AllocTempRef();
- virtual RegStorage AllocTempSingle();
- virtual RegStorage AllocTempDouble();
- virtual RegStorage AllocTypedTemp(bool fp_hint, int reg_class);
- virtual RegStorage AllocTypedTempWide(bool fp_hint, int reg_class);
+ virtual RegStorage AllocTemp(bool required = true);
+ virtual RegStorage AllocTempWide(bool required = true);
+ virtual RegStorage AllocTempRef(bool required = true);
+ virtual RegStorage AllocTempSingle(bool required = true);
+ virtual RegStorage AllocTempDouble(bool required = true);
+ virtual RegStorage AllocTypedTemp(bool fp_hint, int reg_class, bool required = true);
+ virtual RegStorage AllocTypedTempWide(bool fp_hint, int reg_class, bool required = true);
void FlushReg(RegStorage reg);
void FlushRegWide(RegStorage reg);
RegStorage AllocLiveReg(int s_reg, int reg_class, bool wide);
@@ -835,9 +836,9 @@
void MarkPossibleNullPointerExceptionAfter(int opt_flags, LIR* after);
void MarkPossibleStackOverflowException();
void ForceImplicitNullCheck(RegStorage reg, int opt_flags);
- LIR* GenImmedCheck(ConditionCode c_code, RegStorage reg, int imm_val, ThrowKind kind);
LIR* GenNullCheck(RegStorage m_reg, int opt_flags);
LIR* GenExplicitNullCheck(RegStorage m_reg, int opt_flags);
+ virtual void GenImplicitNullCheck(RegStorage reg, int opt_flags);
void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
RegLocation rl_src2, LIR* taken, LIR* fall_through);
void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src,
@@ -871,11 +872,9 @@
RegLocation rl_src1, RegLocation rl_shift);
void GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src, int lit);
- void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2);
- template <size_t pointer_size>
- void GenConversionCall(ThreadOffset<pointer_size> func_offset, RegLocation rl_dest,
- RegLocation rl_src);
+ virtual void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2);
+ void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src);
virtual void GenSuspendTest(int opt_flags);
virtual void GenSuspendTestAndBranch(int opt_flags, LIR* target);
@@ -885,66 +884,44 @@
RegLocation rl_src1, RegLocation rl_src2);
// Shared by all targets - implemented in gen_invoke.cc.
- template <size_t pointer_size>
- LIR* CallHelper(RegStorage r_tgt, ThreadOffset<pointer_size> helper_offset, bool safepoint_pc,
+ LIR* CallHelper(RegStorage r_tgt, QuickEntrypointEnum trampoline, bool safepoint_pc,
bool use_link = true);
- RegStorage CallHelperSetup(ThreadOffset<4> helper_offset);
- RegStorage CallHelperSetup(ThreadOffset<8> helper_offset);
- template <size_t pointer_size>
- void CallRuntimeHelper(ThreadOffset<pointer_size> helper_offset, bool safepoint_pc);
- template <size_t pointer_size>
- void CallRuntimeHelperImm(ThreadOffset<pointer_size> helper_offset, int arg0, bool safepoint_pc);
- template <size_t pointer_size>
- void CallRuntimeHelperReg(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, bool safepoint_pc);
- template <size_t pointer_size>
- void CallRuntimeHelperRegLocation(ThreadOffset<pointer_size> helper_offset, RegLocation arg0,
+ RegStorage CallHelperSetup(QuickEntrypointEnum trampoline);
+
+ void CallRuntimeHelper(QuickEntrypointEnum trampoline, bool safepoint_pc);
+ void CallRuntimeHelperImm(QuickEntrypointEnum trampoline, int arg0, bool safepoint_pc);
+ void CallRuntimeHelperReg(QuickEntrypointEnum trampoline, RegStorage arg0, bool safepoint_pc);
+ void CallRuntimeHelperRegLocation(QuickEntrypointEnum trampoline, RegLocation arg0,
bool safepoint_pc);
- template <size_t pointer_size>
- void CallRuntimeHelperImmImm(ThreadOffset<pointer_size> helper_offset, int arg0, int arg1,
+ void CallRuntimeHelperImmImm(QuickEntrypointEnum trampoline, int arg0, int arg1,
bool safepoint_pc);
- template <size_t pointer_size>
- void CallRuntimeHelperImmRegLocation(ThreadOffset<pointer_size> helper_offset, int arg0,
- RegLocation arg1, bool safepoint_pc);
- template <size_t pointer_size>
- void CallRuntimeHelperRegLocationImm(ThreadOffset<pointer_size> helper_offset, RegLocation arg0,
- int arg1, bool safepoint_pc);
- template <size_t pointer_size>
- void CallRuntimeHelperImmReg(ThreadOffset<pointer_size> helper_offset, int arg0, RegStorage arg1,
+ void CallRuntimeHelperImmRegLocation(QuickEntrypointEnum trampoline, int arg0, RegLocation arg1,
+ bool safepoint_pc);
+ void CallRuntimeHelperRegLocationImm(QuickEntrypointEnum trampoline, RegLocation arg0, int arg1,
+ bool safepoint_pc);
+ void CallRuntimeHelperImmReg(QuickEntrypointEnum trampoline, int arg0, RegStorage arg1,
bool safepoint_pc);
- template <size_t pointer_size>
- void CallRuntimeHelperRegImm(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, int arg1,
+ void CallRuntimeHelperRegImm(QuickEntrypointEnum trampoline, RegStorage arg0, int arg1,
bool safepoint_pc);
- template <size_t pointer_size>
- void CallRuntimeHelperImmMethod(ThreadOffset<pointer_size> helper_offset, int arg0,
+ void CallRuntimeHelperImmMethod(QuickEntrypointEnum trampoline, int arg0, bool safepoint_pc);
+ void CallRuntimeHelperRegMethod(QuickEntrypointEnum trampoline, RegStorage arg0,
bool safepoint_pc);
- template <size_t pointer_size>
- void CallRuntimeHelperRegMethod(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
- bool safepoint_pc);
- template <size_t pointer_size>
- void CallRuntimeHelperRegMethodRegLocation(ThreadOffset<pointer_size> helper_offset,
- RegStorage arg0, RegLocation arg2, bool safepoint_pc);
- template <size_t pointer_size>
- void CallRuntimeHelperRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset,
- RegLocation arg0, RegLocation arg1,
- bool safepoint_pc);
- template <size_t pointer_size>
- void CallRuntimeHelperRegReg(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
- RegStorage arg1, bool safepoint_pc);
- template <size_t pointer_size>
- void CallRuntimeHelperRegRegImm(ThreadOffset<pointer_size> helper_offset, RegStorage arg0,
- RegStorage arg1, int arg2, bool safepoint_pc);
- template <size_t pointer_size>
- void CallRuntimeHelperImmMethodRegLocation(ThreadOffset<pointer_size> helper_offset, int arg0,
+ void CallRuntimeHelperRegMethodRegLocation(QuickEntrypointEnum trampoline, RegStorage arg0,
RegLocation arg2, bool safepoint_pc);
- template <size_t pointer_size>
- void CallRuntimeHelperImmMethodImm(ThreadOffset<pointer_size> helper_offset, int arg0, int arg2,
+ void CallRuntimeHelperRegLocationRegLocation(QuickEntrypointEnum trampoline, RegLocation arg0,
+ RegLocation arg1, bool safepoint_pc);
+ void CallRuntimeHelperRegReg(QuickEntrypointEnum trampoline, RegStorage arg0, RegStorage arg1,
+ bool safepoint_pc);
+ void CallRuntimeHelperRegRegImm(QuickEntrypointEnum trampoline, RegStorage arg0,
+ RegStorage arg1, int arg2, bool safepoint_pc);
+ void CallRuntimeHelperImmMethodRegLocation(QuickEntrypointEnum trampoline, int arg0,
+ RegLocation arg2, bool safepoint_pc);
+ void CallRuntimeHelperImmMethodImm(QuickEntrypointEnum trampoline, int arg0, int arg2,
bool safepoint_pc);
- template <size_t pointer_size>
- void CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset,
- int arg0, RegLocation arg1, RegLocation arg2,
+ void CallRuntimeHelperImmRegLocationRegLocation(QuickEntrypointEnum trampoline, int arg0,
+ RegLocation arg1, RegLocation arg2,
bool safepoint_pc);
- template <size_t pointer_size>
- void CallRuntimeHelperRegLocationRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset,
+ void CallRuntimeHelperRegLocationRegLocationRegLocation(QuickEntrypointEnum trampoline,
RegLocation arg0, RegLocation arg1,
RegLocation arg2,
bool safepoint_pc);
@@ -982,21 +959,25 @@
*/
RegLocation InlineTargetWide(CallInfo* info);
- bool GenInlinedGet(CallInfo* info);
- bool GenInlinedCharAt(CallInfo* info);
+ bool GenInlinedReferenceGet(CallInfo* info);
+ virtual bool GenInlinedCharAt(CallInfo* info);
bool GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty);
virtual bool GenInlinedReverseBits(CallInfo* info, OpSize size);
bool GenInlinedReverseBytes(CallInfo* info, OpSize size);
bool GenInlinedAbsInt(CallInfo* info);
virtual bool GenInlinedAbsLong(CallInfo* info);
- virtual bool GenInlinedAbsFloat(CallInfo* info);
- virtual bool GenInlinedAbsDouble(CallInfo* info);
+ virtual bool GenInlinedAbsFloat(CallInfo* info) = 0;
+ virtual bool GenInlinedAbsDouble(CallInfo* info) = 0;
bool GenInlinedFloatCvt(CallInfo* info);
bool GenInlinedDoubleCvt(CallInfo* info);
+ virtual bool GenInlinedCeil(CallInfo* info);
+ virtual bool GenInlinedFloor(CallInfo* info);
+ virtual bool GenInlinedRint(CallInfo* info);
+ virtual bool GenInlinedRound(CallInfo* info, bool is_double);
virtual bool GenInlinedArrayCopyCharArray(CallInfo* info);
virtual bool GenInlinedIndexOf(CallInfo* info, bool zero_based);
bool GenInlinedStringCompareTo(CallInfo* info);
- bool GenInlinedCurrentThread(CallInfo* info);
+ virtual bool GenInlinedCurrentThread(CallInfo* info);
bool GenInlinedUnsafeGet(CallInfo* info, bool is_long, bool is_volatile);
bool GenInlinedUnsafePut(CallInfo* info, bool is_long, bool is_object,
bool is_volatile, bool is_ordered);
@@ -1133,11 +1114,13 @@
/*
* @brief Load the Class* of a Dex Class type into the register.
+ * @param dex DexFile that contains the class type.
* @param type How the method will be invoked.
* @param register that will contain the code address.
* @note register will be passed to TargetReg to get physical register.
*/
- virtual void LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg);
+ virtual void LoadClassType(const DexFile& dex_file, uint32_t type_idx,
+ SpecialTargetRegister symbolic_reg);
// Routines that work for the generic case, but may be overriden by target.
/*
@@ -1148,10 +1131,12 @@
* @param base_reg The register holding the base address.
* @param offset The offset from the base.
* @param check_value The immediate to compare to.
+ * @param target branch target (or nullptr)
+ * @param compare output for getting LIR for comparison (or nullptr)
* @returns The branch instruction that was generated.
*/
virtual LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
- int offset, int check_value, LIR* target);
+ int offset, int check_value, LIR* target, LIR** compare);
// Required for target - codegen helpers.
virtual bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
@@ -1159,23 +1144,18 @@
virtual bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) = 0;
virtual LIR* CheckSuspendUsingLoad() = 0;
- virtual RegStorage LoadHelper(ThreadOffset<4> offset) = 0;
- virtual RegStorage LoadHelper(ThreadOffset<8> offset) = 0;
+ virtual RegStorage LoadHelper(QuickEntrypointEnum trampoline) = 0;
virtual LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
OpSize size, VolatileKind is_volatile) = 0;
virtual LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
int scale, OpSize size) = 0;
- virtual LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
- int displacement, RegStorage r_dest, OpSize size) = 0;
virtual LIR* LoadConstantNoClobber(RegStorage r_dest, int value) = 0;
virtual LIR* LoadConstantWide(RegStorage r_dest, int64_t value) = 0;
virtual LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size, VolatileKind is_volatile) = 0;
virtual LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
int scale, OpSize size) = 0;
- virtual LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
- int displacement, RegStorage r_src, OpSize size) = 0;
virtual void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) = 0;
// Required for target - register utilities.
@@ -1271,23 +1251,12 @@
virtual size_t GetInsnSize(LIR* lir) = 0;
virtual bool IsUnconditionalBranch(LIR* lir) = 0;
- // Check support for volatile load/store of a given size.
- virtual bool SupportsVolatileLoadStore(OpSize size) = 0;
// Get the register class for load/store of a field.
virtual RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) = 0;
// Required for target - Dalvik-level generators.
virtual void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2) = 0;
- virtual void GenMulLong(Instruction::Code,
- RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) = 0;
- virtual void GenAddLong(Instruction::Code,
- RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) = 0;
- virtual void GenAndLong(Instruction::Code,
- RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) = 0;
virtual void GenArithOpDouble(Instruction::Code opcode,
RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2) = 0;
@@ -1315,16 +1284,6 @@
virtual bool GenInlinedSqrt(CallInfo* info) = 0;
virtual bool GenInlinedPeek(CallInfo* info, OpSize size) = 0;
virtual bool GenInlinedPoke(CallInfo* info, OpSize size) = 0;
- virtual void GenNotLong(RegLocation rl_dest, RegLocation rl_src) = 0;
- virtual void GenNegLong(RegLocation rl_dest, RegLocation rl_src) = 0;
- virtual void GenOrLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) = 0;
- virtual void GenSubLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) = 0;
- virtual void GenXorLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) = 0;
- virtual void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div) = 0;
virtual RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi,
bool is_div) = 0;
virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit,
@@ -1380,6 +1339,13 @@
virtual void GenSelect(BasicBlock* bb, MIR* mir) = 0;
/**
+ * @brief Generates code to select one of the given constants depending on the given opcode.
+ */
+ virtual void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
+ int32_t true_val, int32_t false_val, RegStorage rs_dest,
+ int dest_reg_class) = 0;
+
+ /**
* @brief Used to generate a memory barrier in an architecture specific way.
* @details The last generated LIR will be considered for use as barrier. Namely,
* if the last LIR can be updated in a way where it will serve the semantics of
@@ -1395,8 +1361,19 @@
int first_bit, int second_bit) = 0;
virtual void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) = 0;
virtual void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) = 0;
- virtual void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) = 0;
- virtual void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) = 0;
+
+ // Create code for switch statements. Will decide between short and long versions below.
+ void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
+ void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
+
+ // Potentially backend-specific versions of switch instructions for shorter switch statements.
+ // The default implementation will create a chained compare-and-branch.
+ virtual void GenSmallPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
+ virtual void GenSmallSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
+ // Backend-specific versions of switch instructions for longer switch statements.
+ virtual void GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) = 0;
+ virtual void GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) = 0;
+
virtual void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
RegLocation rl_index, RegLocation rl_dest, int scale) = 0;
virtual void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
@@ -1421,7 +1398,6 @@
virtual void OpRegCopy(RegStorage r_dest, RegStorage r_src) = 0;
virtual LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) = 0;
virtual LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value) = 0;
- virtual LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) = 0;
virtual LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) = 0;
/**
@@ -1461,19 +1437,16 @@
virtual LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1,
RegStorage r_src2) = 0;
virtual LIR* OpTestSuspend(LIR* target) = 0;
- virtual LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) = 0;
- virtual LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) = 0;
virtual LIR* OpVldm(RegStorage r_base, int count) = 0;
virtual LIR* OpVstm(RegStorage r_base, int count) = 0;
- virtual void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale,
- int offset) = 0;
virtual void OpRegCopyWide(RegStorage dest, RegStorage src) = 0;
- virtual void OpTlsCmp(ThreadOffset<4> offset, int val) = 0;
- virtual void OpTlsCmp(ThreadOffset<8> offset, int val) = 0;
virtual bool InexpensiveConstantInt(int32_t value) = 0;
virtual bool InexpensiveConstantFloat(int32_t value) = 0;
virtual bool InexpensiveConstantLong(int64_t value) = 0;
virtual bool InexpensiveConstantDouble(int64_t value) = 0;
+ virtual bool InexpensiveConstantInt(int32_t value, Instruction::Code opcode) {
+ return InexpensiveConstantInt(value);
+ }
// May be optimized by targets.
virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src);
@@ -1482,6 +1455,8 @@
// Temp workaround
void Workaround7250540(RegLocation rl_dest, RegStorage zero_reg);
+ virtual LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) = 0;
+
protected:
Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
@@ -1535,16 +1510,28 @@
void AddSlowPath(LIRSlowPath* slowpath);
- virtual void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
- bool type_known_abstract, bool use_declaring_class,
- bool can_assume_type_is_in_dex_cache,
- uint32_t type_idx, RegLocation rl_dest,
- RegLocation rl_src);
/*
- * @brief Generate the debug_frame FDE information if possible.
- * @returns pointer to vector containg CFE information, or NULL.
+ *
+ * @brief Implement Set up instanceof a class.
+ * @param needs_access_check 'true' if we must check the access.
+ * @param type_known_final 'true' if the type is known to be a final class.
+ * @param type_known_abstract 'true' if the type is known to be an abstract class.
+ * @param use_declaring_class 'true' if the type can be loaded off the current Method*.
+ * @param can_assume_type_is_in_dex_cache 'true' if the type is known to be in the cache.
+ * @param type_idx Type index to use if use_declaring_class is 'false'.
+ * @param rl_dest Result to be set to 0 or 1.
+ * @param rl_src Object to be tested.
*/
- virtual std::vector<uint8_t>* ReturnCallFrameInformation();
+ void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
+ bool type_known_abstract, bool use_declaring_class,
+ bool can_assume_type_is_in_dex_cache,
+ uint32_t type_idx, RegLocation rl_dest,
+ RegLocation rl_src);
+ /*
+ * @brief Generate the eh_frame FDE information if possible.
+ * @returns pointer to vector containg FDE information, or NULL.
+ */
+ virtual std::vector<uint8_t>* ReturnFrameDescriptionEntry();
/**
* @brief Used to insert marker that can be used to associate MIR with LIR.
@@ -1730,8 +1717,8 @@
CodeBuffer code_buffer_;
// The encoding mapping table data (dex -> pc offset and pc offset -> dex) with a size prefix.
std::vector<uint8_t> encoded_mapping_table_;
- std::vector<uint32_t> core_vmap_table_;
- std::vector<uint32_t> fp_vmap_table_;
+ ArenaVector<uint32_t> core_vmap_table_;
+ ArenaVector<uint32_t> fp_vmap_table_;
std::vector<uint8_t> native_gc_map_;
int num_core_spills_;
int num_fp_spills_;
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index fa1c36e..be966e1 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -408,64 +408,67 @@
return RegStorage::InvalidReg(); // No register available
}
-/* Return a temp if one is available, -1 otherwise */
-RegStorage Mir2Lir::AllocFreeTemp() {
- return AllocTempBody(reg_pool_->core_regs_, ®_pool_->next_core_reg_, false);
+RegStorage Mir2Lir::AllocTemp(bool required) {
+ return AllocTempBody(reg_pool_->core_regs_, ®_pool_->next_core_reg_, required);
}
-RegStorage Mir2Lir::AllocTemp() {
- return AllocTempBody(reg_pool_->core_regs_, ®_pool_->next_core_reg_, true);
-}
-
-RegStorage Mir2Lir::AllocTempWide() {
+RegStorage Mir2Lir::AllocTempWide(bool required) {
RegStorage res;
if (reg_pool_->core64_regs_.Size() != 0) {
- res = AllocTempBody(reg_pool_->core64_regs_, ®_pool_->next_core64_reg_, true);
+ res = AllocTempBody(reg_pool_->core64_regs_, ®_pool_->next_core64_reg_, required);
} else {
RegStorage low_reg = AllocTemp();
RegStorage high_reg = AllocTemp();
res = RegStorage::MakeRegPair(low_reg, high_reg);
}
- CheckRegStorage(res, WidenessCheck::kCheckWide, RefCheck::kIgnoreRef, FPCheck::kCheckNotFP);
+ if (required) {
+ CheckRegStorage(res, WidenessCheck::kCheckWide, RefCheck::kIgnoreRef, FPCheck::kCheckNotFP);
+ }
return res;
}
-RegStorage Mir2Lir::AllocTempRef() {
- RegStorage res = AllocTempBody(*reg_pool_->ref_regs_, reg_pool_->next_ref_reg_, true);
- DCHECK(!res.IsPair());
- CheckRegStorage(res, WidenessCheck::kCheckNotWide, RefCheck::kCheckRef, FPCheck::kCheckNotFP);
+RegStorage Mir2Lir::AllocTempRef(bool required) {
+ RegStorage res = AllocTempBody(*reg_pool_->ref_regs_, reg_pool_->next_ref_reg_, required);
+ if (required) {
+ DCHECK(!res.IsPair());
+ CheckRegStorage(res, WidenessCheck::kCheckNotWide, RefCheck::kCheckRef, FPCheck::kCheckNotFP);
+ }
return res;
}
-RegStorage Mir2Lir::AllocTempSingle() {
- RegStorage res = AllocTempBody(reg_pool_->sp_regs_, ®_pool_->next_sp_reg_, true);
- DCHECK(res.IsSingle()) << "Reg: 0x" << std::hex << res.GetRawBits();
- CheckRegStorage(res, WidenessCheck::kCheckNotWide, RefCheck::kCheckNotRef, FPCheck::kIgnoreFP);
+RegStorage Mir2Lir::AllocTempSingle(bool required) {
+ RegStorage res = AllocTempBody(reg_pool_->sp_regs_, ®_pool_->next_sp_reg_, required);
+ if (required) {
+ DCHECK(res.IsSingle()) << "Reg: 0x" << std::hex << res.GetRawBits();
+ CheckRegStorage(res, WidenessCheck::kCheckNotWide, RefCheck::kCheckNotRef, FPCheck::kIgnoreFP);
+ }
return res;
}
-RegStorage Mir2Lir::AllocTempDouble() {
- RegStorage res = AllocTempBody(reg_pool_->dp_regs_, ®_pool_->next_dp_reg_, true);
- DCHECK(res.IsDouble()) << "Reg: 0x" << std::hex << res.GetRawBits();
- CheckRegStorage(res, WidenessCheck::kCheckWide, RefCheck::kCheckNotRef, FPCheck::kIgnoreFP);
+RegStorage Mir2Lir::AllocTempDouble(bool required) {
+ RegStorage res = AllocTempBody(reg_pool_->dp_regs_, ®_pool_->next_dp_reg_, required);
+ if (required) {
+ DCHECK(res.IsDouble()) << "Reg: 0x" << std::hex << res.GetRawBits();
+ CheckRegStorage(res, WidenessCheck::kCheckWide, RefCheck::kCheckNotRef, FPCheck::kIgnoreFP);
+ }
return res;
}
-RegStorage Mir2Lir::AllocTypedTempWide(bool fp_hint, int reg_class) {
+RegStorage Mir2Lir::AllocTypedTempWide(bool fp_hint, int reg_class, bool required) {
DCHECK_NE(reg_class, kRefReg); // NOTE: the Dalvik width of a reference is always 32 bits.
if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
- return AllocTempDouble();
+ return AllocTempDouble(required);
}
- return AllocTempWide();
+ return AllocTempWide(required);
}
-RegStorage Mir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
+RegStorage Mir2Lir::AllocTypedTemp(bool fp_hint, int reg_class, bool required) {
if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
- return AllocTempSingle();
+ return AllocTempSingle(required);
} else if (reg_class == kRefReg) {
- return AllocTempRef();
+ return AllocTempRef(required);
}
- return AllocTemp();
+ return AllocTemp(required);
}
RegStorage Mir2Lir::FindLiveReg(GrowableArray<RegisterInfo*> ®s, int s_reg) {
@@ -1168,12 +1171,13 @@
} else {
counts[p_map_idx].count += use_count;
}
- } else if (!IsInexpensiveConstant(loc)) {
+ } else {
if (loc.wide && WideGPRsAreAliases()) {
- // Longs and doubles can be counted together.
i++;
}
- counts[p_map_idx].count += use_count;
+ if (!IsInexpensiveConstant(loc)) {
+ counts[p_map_idx].count += use_count;
+ }
}
}
}
@@ -1182,9 +1186,10 @@
static int SortCounts(const void *val1, const void *val2) {
const Mir2Lir::RefCounts* op1 = reinterpret_cast<const Mir2Lir::RefCounts*>(val1);
const Mir2Lir::RefCounts* op2 = reinterpret_cast<const Mir2Lir::RefCounts*>(val2);
- // Note that we fall back to sorting on reg so we get stable output
- // on differing qsort implementations (such as on host and target or
- // between local host and build servers).
+ // Note that we fall back to sorting on reg so we get stable output on differing qsort
+ // implementations (such as on host and target or between local host and build servers).
+ // Note also that if a wide val1 and a non-wide val2 have the same count, then val1 always
+ // ``loses'' (as STARTING_WIDE_SREG is or-ed in val1->s_reg).
return (op1->count == op2->count)
? (op1->s_reg - op2->s_reg)
: (op1->count < op2->count ? 1 : -1);
@@ -1227,8 +1232,8 @@
* TUNING: replace with linear scan once we have the ability
* to describe register live ranges for GC.
*/
- size_t core_reg_count_size = cu_->target64 ? num_regs * 2 : num_regs;
- size_t fp_reg_count_size = num_regs * 2;
+ size_t core_reg_count_size = WideGPRsAreAliases() ? num_regs : num_regs * 2;
+ size_t fp_reg_count_size = WideFPRsAreAliases() ? num_regs : num_regs * 2;
RefCounts *core_regs =
static_cast<RefCounts*>(arena_->Alloc(sizeof(RefCounts) * core_reg_count_size,
kArenaAllocRegAlloc));
@@ -1258,7 +1263,6 @@
// Sum use counts of SSA regs by original Dalvik vreg.
CountRefs(core_regs, fp_regs, num_regs);
-
// Sort the count arrays
qsort(core_regs, core_reg_count_size, sizeof(RefCounts), SortCounts);
qsort(fp_regs, fp_reg_count_size, sizeof(RefCounts), SortCounts);
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index 8df5b6d..93b7999 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -35,16 +35,16 @@
rm32_i32, rm32_i32_modrm, \
rm32_i8, rm32_i8_modrm) \
{ kX86 ## opname ## 8MR, kMemReg, mem_use | IS_TERTIARY_OP | REG_USE02 | SETS_CCODES | uses_ccodes, { 0, 0, rm8_r8, 0, 0, 0, 0, 0, true }, #opname "8MR", "[!0r+!1d],!2r" }, \
-{ kX86 ## opname ## 8AR, kArrayReg, mem_use | IS_QUIN_OP | REG_USE014 | SETS_CCODES | uses_ccodes, { 0, 0, rm8_r8, 0, 0, 0, 0, 0, true}, #opname "8AR", "[!0r+!1r<<!2d+!3d],!4r" }, \
+{ kX86 ## opname ## 8AR, kArrayReg, mem_use | IS_QUIN_OP | REG_USE014 | SETS_CCODES | uses_ccodes, { 0, 0, rm8_r8, 0, 0, 0, 0, 0, true }, #opname "8AR", "[!0r+!1r<<!2d+!3d],!4r" }, \
{ kX86 ## opname ## 8TR, kThreadReg, mem_use | IS_BINARY_OP | REG_USE1 | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm8_r8, 0, 0, 0, 0, 0, true }, #opname "8TR", "fs:[!0d],!1r" }, \
{ kX86 ## opname ## 8RR, kRegReg, IS_BINARY_OP | reg_def | REG_USE01 | SETS_CCODES | uses_ccodes, { 0, 0, r8_rm8, 0, 0, 0, 0, 0, true }, #opname "8RR", "!0r,!1r" }, \
{ kX86 ## opname ## 8RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01 | SETS_CCODES | uses_ccodes, { 0, 0, r8_rm8, 0, 0, 0, 0, 0, true }, #opname "8RM", "!0r,[!1r+!2d]" }, \
{ kX86 ## opname ## 8RA, kRegArray, IS_LOAD | IS_QUIN_OP | reg_def | REG_USE012 | SETS_CCODES | uses_ccodes, { 0, 0, r8_rm8, 0, 0, 0, 0, 0, true }, #opname "8RA", "!0r,[!1r+!2r<<!3d+!4d]" }, \
{ kX86 ## opname ## 8RT, kRegThread, IS_LOAD | IS_BINARY_OP | reg_def | REG_USE0 | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, r8_rm8, 0, 0, 0, 0, 0, true }, #opname "8RT", "!0r,fs:[!1d]" }, \
{ kX86 ## opname ## 8RI, kRegImm, IS_BINARY_OP | reg_def | REG_USE0 | SETS_CCODES | uses_ccodes, { 0, 0, rm8_i8, 0, 0, rm8_i8_modrm, ax8_i8, 1, true }, #opname "8RI", "!0r,!1d" }, \
-{ kX86 ## opname ## 8MI, kMemImm, mem_use | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES | uses_ccodes, { 0, 0, rm8_i8, 0, 0, rm8_i8_modrm, 0, 1, true }, #opname "8MI", "[!0r+!1d],!2d" }, \
-{ kX86 ## opname ## 8AI, kArrayImm, mem_use | IS_QUIN_OP | REG_USE01 | SETS_CCODES | uses_ccodes, { 0, 0, rm8_i8, 0, 0, rm8_i8_modrm, 0, 1, true }, #opname "8AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
-{ kX86 ## opname ## 8TI, kThreadImm, mem_use | IS_BINARY_OP | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm8_i8, 0, 0, rm8_i8_modrm, 0, 1, true }, #opname "8TI", "fs:[!0d],!1d" }, \
+{ kX86 ## opname ## 8MI, kMemImm, mem_use | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES | uses_ccodes, { 0, 0, rm8_i8, 0, 0, rm8_i8_modrm, 0, 1, false}, #opname "8MI", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 8AI, kArrayImm, mem_use | IS_QUIN_OP | REG_USE01 | SETS_CCODES | uses_ccodes, { 0, 0, rm8_i8, 0, 0, rm8_i8_modrm, 0, 1, false}, #opname "8AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 8TI, kThreadImm, mem_use | IS_BINARY_OP | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm8_i8, 0, 0, rm8_i8_modrm, 0, 1, false}, #opname "8TI", "fs:[!0d],!1d" }, \
\
{ kX86 ## opname ## 16MR, kMemReg, mem_use | IS_TERTIARY_OP | REG_USE02 | SETS_CCODES | uses_ccodes, { 0x66, 0, rm32_r32, 0, 0, 0, 0, 0, false }, #opname "16MR", "[!0r+!1d],!2r" }, \
{ kX86 ## opname ## 16AR, kArrayReg, mem_use | IS_QUIN_OP | REG_USE014 | SETS_CCODES | uses_ccodes, { 0x66, 0, rm32_r32, 0, 0, 0, 0, 0, false }, #opname "16AR", "[!0r+!1r<<!2d+!3d],!4r" }, \
@@ -170,9 +170,9 @@
{ kX86Mov8RA, kRegArray, IS_LOAD | IS_QUIN_OP | REG_DEF0_USE12, { 0, 0, 0x8A, 0, 0, 0, 0, 0, true }, "Mov8RA", "!0r,[!1r+!2r<<!3d+!4d]" },
{ kX86Mov8RT, kRegThread, IS_LOAD | IS_BINARY_OP | REG_DEF0, { THREAD_PREFIX, 0, 0x8A, 0, 0, 0, 0, 0, true }, "Mov8RT", "!0r,fs:[!1d]" },
{ kX86Mov8RI, kMovRegImm, IS_BINARY_OP | REG_DEF0, { 0, 0, 0xB0, 0, 0, 0, 0, 1, true }, "Mov8RI", "!0r,!1d" },
- { kX86Mov8MI, kMemImm, IS_STORE | IS_TERTIARY_OP | REG_USE0, { 0, 0, 0xC6, 0, 0, 0, 0, 1, true }, "Mov8MI", "[!0r+!1d],!2d" },
- { kX86Mov8AI, kArrayImm, IS_STORE | IS_QUIN_OP | REG_USE01, { 0, 0, 0xC6, 0, 0, 0, 0, 1, true }, "Mov8AI", "[!0r+!1r<<!2d+!3d],!4d" },
- { kX86Mov8TI, kThreadImm, IS_STORE | IS_BINARY_OP, { THREAD_PREFIX, 0, 0xC6, 0, 0, 0, 0, 1, true }, "Mov8TI", "fs:[!0d],!1d" },
+ { kX86Mov8MI, kMemImm, IS_STORE | IS_TERTIARY_OP | REG_USE0, { 0, 0, 0xC6, 0, 0, 0, 0, 1, false}, "Mov8MI", "[!0r+!1d],!2d" },
+ { kX86Mov8AI, kArrayImm, IS_STORE | IS_QUIN_OP | REG_USE01, { 0, 0, 0xC6, 0, 0, 0, 0, 1, false}, "Mov8AI", "[!0r+!1r<<!2d+!3d],!4d" },
+ { kX86Mov8TI, kThreadImm, IS_STORE | IS_BINARY_OP, { THREAD_PREFIX, 0, 0xC6, 0, 0, 0, 0, 1, false}, "Mov8TI", "fs:[!0d],!1d" },
{ kX86Mov16MR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02, { 0x66, 0, 0x89, 0, 0, 0, 0, 0, false }, "Mov16MR", "[!0r+!1d],!2r" },
{ kX86Mov16AR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014, { 0x66, 0, 0x89, 0, 0, 0, 0, 0, false }, "Mov16AR", "[!0r+!1r<<!2d+!3d],!4r" },
@@ -271,21 +271,22 @@
{ kX86Shrd64RRI, kRegRegImmStore, IS_TERTIARY_OP | REG_DEF0_USE01 | SETS_CCODES, { REX_W, 0, 0x0F, 0xAC, 0, 0, 0, 1, false }, "Shrd64RRI", "!0r,!1r,!2d" },
{ kX86Shrd64MRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_LOAD | IS_STORE | SETS_CCODES, { REX_W, 0, 0x0F, 0xAC, 0, 0, 0, 1, false }, "Shrd64MRI", "[!0r+!1d],!2r,!3d" },
- { kX86Test8RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF6, 0, 0, 0, 0, 1, true }, "Test8RI", "!0r,!1d" },
- { kX86Test8MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF6, 0, 0, 0, 0, 1, true }, "Test8MI", "[!0r+!1d],!2d" },
- { kX86Test8AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0xF6, 0, 0, 0, 0, 1, true }, "Test8AI", "[!0r+!1r<<!2d+!3d],!4d" },
- { kX86Test16RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2, false }, "Test16RI", "!0r,!1d" },
- { kX86Test16MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2, false }, "Test16MI", "[!0r+!1d],!2d" },
- { kX86Test16AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2, false }, "Test16AI", "[!0r+!1r<<!2d+!3d],!4d" },
- { kX86Test32RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test32RI", "!0r,!1d" },
- { kX86Test32MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test32MI", "[!0r+!1d],!2d" },
- { kX86Test32AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test32AI", "[!0r+!1r<<!2d+!3d],!4d" },
+ { kX86Test8RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF6, 0, 0, 0, 0, 1, true }, "Test8RI", "!0r,!1d" },
+ { kX86Test8MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF6, 0, 0, 0, 0, 1, true }, "Test8MI", "[!0r+!1d],!2d" },
+ { kX86Test8AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0xF6, 0, 0, 0, 0, 1, true }, "Test8AI", "[!0r+!1r<<!2d+!3d],!4d" },
+ { kX86Test16RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2, false }, "Test16RI", "!0r,!1d" },
+ { kX86Test16MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2, false }, "Test16MI", "[!0r+!1d],!2d" },
+ { kX86Test16AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2, false }, "Test16AI", "[!0r+!1r<<!2d+!3d],!4d" },
+ { kX86Test32RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test32RI", "!0r,!1d" },
+ { kX86Test32MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test32MI", "[!0r+!1d],!2d" },
+ { kX86Test32AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test32AI", "[!0r+!1r<<!2d+!3d],!4d" },
{ kX86Test64RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { REX_W, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test64RI", "!0r,!1d" },
{ kX86Test64MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { REX_W, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test64MI", "[!0r+!1d],!2d" },
{ kX86Test64AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { REX_W, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test64AI", "[!0r+!1r<<!2d+!3d],!4d" },
- { kX86Test32RR, kRegReg, IS_BINARY_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0x85, 0, 0, 0, 0, 0, false }, "Test32RR", "!0r,!1r" },
+ { kX86Test32RR, kRegReg, IS_BINARY_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0x85, 0, 0, 0, 0, 0, false }, "Test32RR", "!0r,!1r" },
{ kX86Test64RR, kRegReg, IS_BINARY_OP | REG_USE01 | SETS_CCODES, { REX_W, 0, 0x85, 0, 0, 0, 0, 0, false }, "Test64RR", "!0r,!1r" },
+ { kX86Test32RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0x85, 0, 0, 0, 0, 0, false }, "Test32RM", "!0r,[!1r+!2d]" },
#define UNARY_ENCODING_MAP(opname, modrm, is_store, sets_ccodes, \
reg, reg_kind, reg_flags, \
@@ -366,7 +367,11 @@
EXT_0F_ENCODING_MAP(Ucomiss, 0x00, 0x2E, SETS_CCODES|REG_USE0),
EXT_0F_ENCODING_MAP(Comisd, 0x66, 0x2F, SETS_CCODES|REG_USE0),
EXT_0F_ENCODING_MAP(Comiss, 0x00, 0x2F, SETS_CCODES|REG_USE0),
+ EXT_0F_ENCODING_MAP(Orpd, 0x66, 0x56, REG_DEF0_USE0),
EXT_0F_ENCODING_MAP(Orps, 0x00, 0x56, REG_DEF0_USE0),
+ EXT_0F_ENCODING_MAP(Andpd, 0x66, 0x54, REG_DEF0_USE0),
+ EXT_0F_ENCODING_MAP(Andps, 0x00, 0x54, REG_DEF0_USE0),
+ EXT_0F_ENCODING_MAP(Xorpd, 0x66, 0x57, REG_DEF0_USE0),
EXT_0F_ENCODING_MAP(Xorps, 0x00, 0x57, REG_DEF0_USE0),
EXT_0F_ENCODING_MAP(Addsd, 0xF2, 0x58, REG_DEF0_USE0),
EXT_0F_ENCODING_MAP(Addss, 0xF3, 0x58, REG_DEF0_USE0),
@@ -402,9 +407,9 @@
EXT_0F_ENCODING_MAP(Haddpd, 0x66, 0x7C, REG_DEF0_USE0),
EXT_0F_ENCODING_MAP(Haddps, 0xF2, 0x7C, REG_DEF0_USE0),
- { kX86PextrbRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0x66, 0, 0x0F, 0x3A, 0x14, 0, 0, 1, false }, "PextbRRI", "!0r,!1r,!2d" },
+ { kX86PextrbRRI, kRegRegImmStore, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0x66, 0, 0x0F, 0x3A, 0x14, 0, 0, 1, false }, "PextbRRI", "!0r,!1r,!2d" },
{ kX86PextrwRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0x66, 0, 0x0F, 0xC5, 0x00, 0, 0, 1, false }, "PextwRRI", "!0r,!1r,!2d" },
- { kX86PextrdRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1, false }, "PextdRRI", "!0r,!1r,!2d" },
+ { kX86PextrdRRI, kRegRegImmStore, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1, false }, "PextdRRI", "!0r,!1r,!2d" },
{ kX86PextrbMRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_STORE, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1, false }, "kX86PextrbMRI", "[!0r+!1d],!2r,!3d" },
{ kX86PextrwMRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_STORE, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1, false }, "kX86PextrwMRI", "[!0r+!1d],!2r,!3d" },
{ kX86PextrdMRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_STORE, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1, false }, "kX86PextrdMRI", "[!0r+!1d],!2r,!3d" },
@@ -473,7 +478,7 @@
{ kX86MovsxdRM, kRegMem, IS_LOAD | IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { REX_W, 0, 0x63, 0, 0, 0, 0, 0, false }, "MovsxdRM", "!0r,[!1r+!2d]" },
{ kX86MovsxdRA, kRegArray, IS_LOAD | IS_QUIN_OP | REG_DEF0 | REG_USE12, { REX_W, 0, 0x63, 0, 0, 0, 0, 0, false }, "MovsxdRA", "!0r,[!1r+!2r<<!3d+!4d]" },
- { kX86Set8R, kRegCond, IS_BINARY_OP | REG_DEF0 | USES_CCODES, { 0, 0, 0x0F, 0x90, 0, 0, 0, 0, true }, "Set8R", "!1c !0r" },
+ { kX86Set8R, kRegCond, IS_BINARY_OP | REG_DEF0 | REG_USE0 | USES_CCODES, { 0, 0, 0x0F, 0x90, 0, 0, 0, 0, true }, "Set8R", "!1c !0r" },
{ kX86Set8M, kMemCond, IS_STORE | IS_TERTIARY_OP | REG_USE0 | USES_CCODES, { 0, 0, 0x0F, 0x90, 0, 0, 0, 0, false }, "Set8M", "!2c [!0r+!1d]" },
{ kX86Set8A, kArrayCond, IS_STORE | IS_QUIN_OP | REG_USE01 | USES_CCODES, { 0, 0, 0x0F, 0x90, 0, 0, 0, 0, false }, "Set8A", "!4c [!0r+!1r<<!2d+!3d]" },
@@ -521,7 +526,7 @@
{ kX86StartOfMethod, kMacro, IS_UNARY_OP | SETS_CCODES, { 0, 0, 0, 0, 0, 0, 0, 0, false }, "StartOfMethod", "!0r" },
{ kX86PcRelLoadRA, kPcRel, IS_LOAD | IS_QUIN_OP | REG_DEF0_USE12, { 0, 0, 0x8B, 0, 0, 0, 0, 0, false }, "PcRelLoadRA", "!0r,[!1r+!2r<<!3d+!4p]" },
- { kX86PcRelAdr, kPcRel, IS_LOAD | IS_BINARY_OP | REG_DEF0, { 0, 0, 0xB8, 0, 0, 0, 0, 4, false }, "PcRelAdr", "!0r,!1d" },
+ { kX86PcRelAdr, kPcRel, IS_LOAD | IS_BINARY_OP | REG_DEF0, { 0, 0, 0xB8, 0, 0, 0, 0, 4, false }, "PcRelAdr", "!0r,!1p" },
{ kX86RepneScasw, kNullary, NO_OPERAND | REG_USEA | REG_USEC | SETS_CCODES, { 0x66, 0xF2, 0xAF, 0, 0, 0, 0, 0, false }, "RepNE ScasW", "" },
};
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index b7441d7..f5f8671 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -27,8 +27,7 @@
* The sparse table in the literal pool is an array of <key,displacement>
* pairs.
*/
-void X86Mir2Lir::GenSparseSwitch(MIR* mir, DexOffset table_offset,
- RegLocation rl_src) {
+void X86Mir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
if (cu_->verbose) {
DumpSparseSwitchTable(table);
@@ -61,8 +60,7 @@
* jmp r_start_of_method
* done:
*/
-void X86Mir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset,
- RegLocation rl_src) {
+void X86Mir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
if (cu_->verbose) {
DumpPackedSwitchTable(table);
@@ -171,13 +169,7 @@
}
NewLIR2(kX86PcRelAdr, payload.GetReg(), WrapPointer(tab_rec));
OpRegReg(kOpAdd, payload, method_start);
- if (cu_->target64) {
- CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pHandleFillArrayData), array_ptr,
- payload, true);
- } else {
- CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pHandleFillArrayData), array_ptr,
- payload, true);
- }
+ CallRuntimeHelperRegReg(kQuickHandleFillArrayData, array_ptr, payload, true);
}
void X86Mir2Lir::GenMoveException(RegLocation rl_dest) {
@@ -222,15 +214,28 @@
LockTemp(rs_rX86_ARG1);
LockTemp(rs_rX86_ARG2);
- /* Build frame, return address already on stack */
- stack_decrement_ = OpRegImm(kOpSub, rs_rX86_SP, frame_size_ - GetInstructionSetPointerSize(cu_->instruction_set));
-
/*
* We can safely skip the stack overflow check if we're
* a leaf *and* our frame size < fudge factor.
*/
- const bool skip_overflow_check = mir_graph_->MethodIsLeaf() &&
- !IsLargeFrame(frame_size_, cu_->target64 ? kX86_64 : kX86);
+ InstructionSet isa = cu_->target64 ? kX86_64 : kX86;
+ const bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !IsLargeFrame(frame_size_, isa);
+
+ // If we doing an implicit stack overflow check, perform the load immediately
+ // before the stack pointer is decremented and anything is saved.
+ if (!skip_overflow_check &&
+ cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
+ // Implicit stack overflow check.
+ // test eax,[esp + -overflow]
+ int overflow = GetStackOverflowReservedBytes(isa);
+ NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rX86_SP.GetReg(), -overflow);
+ MarkPossibleStackOverflowException();
+ }
+
+ /* Build frame, return address already on stack */
+ stack_decrement_ = OpRegImm(kOpSub, rs_rX86_SP, frame_size_ -
+ GetInstructionSetPointerSize(cu_->instruction_set));
+
NewLIR0(kPseudoMethodEntry);
/* Spill core callee saves */
SpillCoreRegs();
@@ -248,37 +253,34 @@
m2l_->OpRegImm(kOpAdd, rs_rX86_SP, sp_displace_);
m2l_->ClobberCallerSave();
// Assumes codegen and target are in thumb2 mode.
- if (cu_->target64) {
- m2l_->CallHelper(RegStorage::InvalidReg(), QUICK_ENTRYPOINT_OFFSET(8, pThrowStackOverflow),
- false /* MarkSafepointPC */, false /* UseLink */);
- } else {
- m2l_->CallHelper(RegStorage::InvalidReg(), QUICK_ENTRYPOINT_OFFSET(4, pThrowStackOverflow),
- false /* MarkSafepointPC */, false /* UseLink */);
- }
+ m2l_->CallHelper(RegStorage::InvalidReg(), kQuickThrowStackOverflow,
+ false /* MarkSafepointPC */, false /* UseLink */);
}
private:
const size_t sp_displace_;
};
- // TODO: for large frames we should do something like:
- // spill ebp
- // lea ebp, [esp + frame_size]
- // cmp ebp, fs:[stack_end_]
- // jcc stack_overflow_exception
- // mov esp, ebp
- // in case a signal comes in that's not using an alternate signal stack and the large frame may
- // have moved us outside of the reserved area at the end of the stack.
- // cmp rs_rX86_SP, fs:[stack_end_]; jcc throw_slowpath
- if (cu_->target64) {
- OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<8>());
- } else {
- OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<4>());
- }
- LIR* branch = OpCondBranch(kCondUlt, nullptr);
- AddSlowPath(
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
+ // TODO: for large frames we should do something like:
+ // spill ebp
+ // lea ebp, [esp + frame_size]
+ // cmp ebp, fs:[stack_end_]
+ // jcc stack_overflow_exception
+ // mov esp, ebp
+ // in case a signal comes in that's not using an alternate signal stack and the large frame
+ // may have moved us outside of the reserved area at the end of the stack.
+ // cmp rs_rX86_SP, fs:[stack_end_]; jcc throw_slowpath
+ if (cu_->target64) {
+ OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<8>());
+ } else {
+ OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<4>());
+ }
+ LIR* branch = OpCondBranch(kCondUlt, nullptr);
+ AddSlowPath(
new(arena_)StackOverflowSlowPath(this, branch,
frame_size_ -
GetInstructionSetPointerSize(cu_->instruction_set)));
+ }
}
FlushIns(ArgLocs, rl_method);
@@ -318,4 +320,14 @@
NewLIR0(kX86Ret);
}
+void X86Mir2Lir::GenImplicitNullCheck(RegStorage reg, int opt_flags) {
+ if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
+ return;
+ }
+ // Implicit null pointer check.
+ // test eax,[arg1+0]
+ NewLIR3(kX86Test32RM, rs_rAX.GetReg(), reg.GetReg(), 0);
+ MarkPossibleNullPointerException(opt_flags);
+}
+
} // namespace art
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index f4fa1b4..24a3fe3 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -65,30 +65,25 @@
// Required for target - codegen helpers.
bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
- RegLocation rl_dest, int lit);
+ RegLocation rl_dest, int lit) OVERRIDE;
bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
LIR* CheckSuspendUsingLoad() OVERRIDE;
- RegStorage LoadHelper(ThreadOffset<4> offset) OVERRIDE;
- RegStorage LoadHelper(ThreadOffset<8> offset) OVERRIDE;
+ RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
OpSize size, VolatileKind is_volatile) OVERRIDE;
LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
OpSize size) OVERRIDE;
- LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
- RegStorage r_dest, OpSize size) OVERRIDE;
LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size, VolatileKind is_volatile) OVERRIDE;
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
OpSize size) OVERRIDE;
- LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
- RegStorage r_src, OpSize size) OVERRIDE;
- void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
+ void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) OVERRIDE;
+ void GenImplicitNullCheck(RegStorage reg, int opt_flags) OVERRIDE;
// Required for target - register utilities.
RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
- RegStorage TargetReg32(SpecialTargetRegister reg);
RegStorage TargetReg(SpecialTargetRegister symbolic_reg, WideKind wide_kind) OVERRIDE {
if (wide_kind == kWide) {
if (cu_->target64) {
@@ -110,108 +105,78 @@
RegStorage TargetPtrReg(SpecialTargetRegister symbolic_reg) OVERRIDE {
return TargetReg(symbolic_reg, cu_->target64 ? kWide : kNotWide);
}
- RegStorage GetArgMappingToPhysicalReg(int arg_num);
- RegStorage GetCoreArgMappingToPhysicalReg(int core_arg_num);
- RegLocation GetReturnAlt();
- RegLocation GetReturnWideAlt();
- RegLocation LocCReturn();
- RegLocation LocCReturnRef();
- RegLocation LocCReturnDouble();
- RegLocation LocCReturnFloat();
- RegLocation LocCReturnWide();
+
+ RegStorage GetArgMappingToPhysicalReg(int arg_num) OVERRIDE;
+
+ RegLocation GetReturnAlt() OVERRIDE;
+ RegLocation GetReturnWideAlt() OVERRIDE;
+ RegLocation LocCReturn() OVERRIDE;
+ RegLocation LocCReturnRef() OVERRIDE;
+ RegLocation LocCReturnDouble() OVERRIDE;
+ RegLocation LocCReturnFloat() OVERRIDE;
+ RegLocation LocCReturnWide() OVERRIDE;
+
ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
- void AdjustSpillMask();
- void ClobberCallerSave();
- void FreeCallTemps();
- void LockCallTemps();
- void CompilerInitializeRegAlloc();
- int VectorRegisterSize();
- int NumReservableVectorRegisters(bool fp_used);
+ void AdjustSpillMask() OVERRIDE;
+ void ClobberCallerSave() OVERRIDE;
+ void FreeCallTemps() OVERRIDE;
+ void LockCallTemps() OVERRIDE;
+
+ void CompilerInitializeRegAlloc() OVERRIDE;
+ int VectorRegisterSize() OVERRIDE;
+ int NumReservableVectorRegisters(bool fp_used) OVERRIDE;
// Required for target - miscellaneous.
- void AssembleLIR();
- int AssignInsnOffsets();
- void AssignOffsets();
- AssemblerStatus AssembleInstructions(CodeOffset start_addr);
+ void AssembleLIR() OVERRIDE;
void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
ResourceMask* use_mask, ResourceMask* def_mask) OVERRIDE;
- const char* GetTargetInstFmt(int opcode);
- const char* GetTargetInstName(int opcode);
- std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
+ const char* GetTargetInstFmt(int opcode) OVERRIDE;
+ const char* GetTargetInstName(int opcode) OVERRIDE;
+ std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) OVERRIDE;
ResourceMask GetPCUseDefEncoding() const OVERRIDE;
- uint64_t GetTargetInstFlags(int opcode);
+ uint64_t GetTargetInstFlags(int opcode) OVERRIDE;
size_t GetInsnSize(LIR* lir) OVERRIDE;
- bool IsUnconditionalBranch(LIR* lir);
+ bool IsUnconditionalBranch(LIR* lir) OVERRIDE;
- // Check support for volatile load/store of a given size.
- bool SupportsVolatileLoadStore(OpSize size) OVERRIDE;
// Get the register class for load/store of a field.
RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
// Required for target - Dalvik-level generators.
- void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
- RegLocation rl_dest, int scale);
+ RegLocation rl_dest, int scale) OVERRIDE;
void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
- RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark);
- void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_shift);
- void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
+ RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) OVERRIDE;
+
void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
+ RegLocation rl_src2) OVERRIDE;
void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2, bool is_double);
+ RegLocation rl_src2) OVERRIDE;
void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
- bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
- bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long);
- bool GenInlinedSqrt(CallInfo* info);
+ RegLocation rl_src2) OVERRIDE;
+ void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+
+ bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object) OVERRIDE;
+ bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) OVERRIDE;
+ bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) OVERRIDE;
+ bool GenInlinedSqrt(CallInfo* info) OVERRIDE;
bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
- bool GenInlinedPeek(CallInfo* info, OpSize size);
- bool GenInlinedPoke(CallInfo* info, OpSize size);
- void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
- void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
- void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div);
- // TODO: collapse reg_lo, reg_hi
- RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
- RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
- void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenDivZeroCheckWide(RegStorage reg);
- void GenArrayBoundsCheck(RegStorage index, RegStorage array_base, int32_t len_offset);
- void GenArrayBoundsCheck(int32_t index, RegStorage array_base, int32_t len_offset);
- void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
- void GenExitSequence();
- void GenSpecialExitSequence();
- void GenFillArrayData(DexOffset table_offset, RegLocation rl_src);
- void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
- void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
- void GenSelect(BasicBlock* bb, MIR* mir);
- bool GenMemBarrier(MemBarrierKind barrier_kind);
- void GenMoveException(RegLocation rl_dest);
- void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
- int first_bit, int second_bit);
- void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
- void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
- void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
- void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
- void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
+ bool GenInlinedPeek(CallInfo* info, OpSize size) OVERRIDE;
+ bool GenInlinedPoke(CallInfo* info, OpSize size) OVERRIDE;
+ bool GenInlinedCharAt(CallInfo* info) OVERRIDE;
+
+ // Long instructions.
+ void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) OVERRIDE;
+ void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) OVERRIDE;
+ void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_shift) OVERRIDE;
+ void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) OVERRIDE;
+ void GenIntToLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+ void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_shift) OVERRIDE;
/*
* @brief Generate a two address long operation with a constant value
@@ -221,6 +186,7 @@
* @return success or not
*/
bool GenLongImm(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op);
+
/*
* @brief Generate a three address long operation with a constant value
* @param rl_dest location of result
@@ -231,7 +197,6 @@
*/
bool GenLongLongImm(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
Instruction::Code op);
-
/**
* @brief Generate a long arithmetic operation.
* @param rl_dest The destination.
@@ -259,6 +224,31 @@
*/
virtual void GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op);
+
+ // TODO: collapse reg_lo, reg_hi
+ RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div)
+ OVERRIDE;
+ RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div) OVERRIDE;
+ void GenDivZeroCheckWide(RegStorage reg) OVERRIDE;
+ void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) OVERRIDE;
+ void GenExitSequence() OVERRIDE;
+ void GenSpecialExitSequence() OVERRIDE;
+ void GenFillArrayData(DexOffset table_offset, RegLocation rl_src) OVERRIDE;
+ void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) OVERRIDE;
+ void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) OVERRIDE;
+ void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
+ void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
+ int32_t true_val, int32_t false_val, RegStorage rs_dest,
+ int dest_reg_class) OVERRIDE;
+ bool GenMemBarrier(MemBarrierKind barrier_kind) OVERRIDE;
+ void GenMoveException(RegLocation rl_dest) OVERRIDE;
+ void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
+ int first_bit, int second_bit) OVERRIDE;
+ void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+ void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+ void GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
+ void GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
+
/**
* @brief Implement instanceof a final class with x86 specific code.
* @param use_declaring_class 'true' if we can use the class itself.
@@ -267,72 +257,39 @@
* @param rl_src Object to be tested.
*/
void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest,
- RegLocation rl_src);
- /*
- *
- * @brief Implement Set up instanceof a class with x86 specific code.
- * @param needs_access_check 'true' if we must check the access.
- * @param type_known_final 'true' if the type is known to be a final class.
- * @param type_known_abstract 'true' if the type is known to be an abstract class.
- * @param use_declaring_class 'true' if the type can be loaded off the current Method*.
- * @param can_assume_type_is_in_dex_cache 'true' if the type is known to be in the cache.
- * @param type_idx Type index to use if use_declaring_class is 'false'.
- * @param rl_dest Result to be set to 0 or 1.
- * @param rl_src Object to be tested.
- */
- void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
- bool type_known_abstract, bool use_declaring_class,
- bool can_assume_type_is_in_dex_cache,
- uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src);
-
- void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_shift);
+ RegLocation rl_src) OVERRIDE;
// Single operation generators.
- LIR* OpUnconditionalBranch(LIR* target);
- LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target);
- LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target);
- LIR* OpCondBranch(ConditionCode cc, LIR* target);
- LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
- LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
- LIR* OpIT(ConditionCode cond, const char* guide);
- void OpEndIT(LIR* it);
- LIR* OpMem(OpKind op, RegStorage r_base, int disp);
- LIR* OpPcRelLoad(RegStorage reg, LIR* target);
- LIR* OpReg(OpKind op, RegStorage r_dest_src);
- void OpRegCopy(RegStorage r_dest, RegStorage r_src);
- LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
- LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
- LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
- LIR* OpRegMem(OpKind op, RegStorage r_dest, RegLocation value);
- LIR* OpMemReg(OpKind op, RegLocation rl_dest, int value);
- LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
- LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
- LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
- LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
- LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
- LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
- LIR* OpTestSuspend(LIR* target);
- LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) OVERRIDE;
- LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) OVERRIDE;
- LIR* OpVldm(RegStorage r_base, int count);
- LIR* OpVstm(RegStorage r_base, int count);
- void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset);
- void OpRegCopyWide(RegStorage dest, RegStorage src);
- void OpTlsCmp(ThreadOffset<4> offset, int val) OVERRIDE;
- void OpTlsCmp(ThreadOffset<8> offset, int val) OVERRIDE;
+ LIR* OpUnconditionalBranch(LIR* target) OVERRIDE;
+ LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) OVERRIDE;
+ LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target) OVERRIDE;
+ LIR* OpCondBranch(ConditionCode cc, LIR* target) OVERRIDE;
+ LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) OVERRIDE;
+ LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
+ LIR* OpIT(ConditionCode cond, const char* guide) OVERRIDE;
+ void OpEndIT(LIR* it) OVERRIDE;
+ LIR* OpMem(OpKind op, RegStorage r_base, int disp) OVERRIDE;
+ LIR* OpPcRelLoad(RegStorage reg, LIR* target) OVERRIDE;
+ LIR* OpReg(OpKind op, RegStorage r_dest_src) OVERRIDE;
+ void OpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
+ LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) OVERRIDE;
+ LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value) OVERRIDE;
+ LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) OVERRIDE;
+ LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) OVERRIDE;
+ LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) OVERRIDE;
+ LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) OVERRIDE;
+ LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) OVERRIDE;
+ LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) OVERRIDE;
+ LIR* OpTestSuspend(LIR* target) OVERRIDE;
+ LIR* OpVldm(RegStorage r_base, int count) OVERRIDE;
+ LIR* OpVstm(RegStorage r_base, int count) OVERRIDE;
+ void OpRegCopyWide(RegStorage dest, RegStorage src) OVERRIDE;
+ bool GenInlinedCurrentThread(CallInfo* info) OVERRIDE;
- void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<4> thread_offset);
- void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<8> thread_offset);
- void SpillCoreRegs();
- void UnSpillCoreRegs();
- void UnSpillFPRegs();
- void SpillFPRegs();
- static const X86EncodingMap EncodingMap[kX86Last];
- bool InexpensiveConstantInt(int32_t value);
- bool InexpensiveConstantFloat(int32_t value);
- bool InexpensiveConstantLong(int64_t value);
- bool InexpensiveConstantDouble(int64_t value);
+ bool InexpensiveConstantInt(int32_t value) OVERRIDE;
+ bool InexpensiveConstantFloat(int32_t value) OVERRIDE;
+ bool InexpensiveConstantLong(int64_t value) OVERRIDE;
+ bool InexpensiveConstantDouble(int64_t value) OVERRIDE;
/*
* @brief Should try to optimize for two address instructions?
@@ -348,13 +305,7 @@
* @param rl_rhs Right hand operand.
*/
void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_lhs,
- RegLocation rl_rhs);
-
- /*
- * @brief Dump a RegLocation using printf
- * @param loc Register location to dump
- */
- static void DumpRegLocation(RegLocation loc);
+ RegLocation rl_rhs) OVERRIDE;
/*
* @brief Load the Method* of a dex method into the register.
@@ -364,31 +315,33 @@
* @note register will be passed to TargetReg to get physical register.
*/
void LoadMethodAddress(const MethodReference& target_method, InvokeType type,
- SpecialTargetRegister symbolic_reg);
+ SpecialTargetRegister symbolic_reg) OVERRIDE;
/*
* @brief Load the Class* of a Dex Class type into the register.
+ * @param dex DexFile that contains the class type.
* @param type How the method will be invoked.
* @param register that will contain the code address.
* @note register will be passed to TargetReg to get physical register.
*/
- void LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg);
+ void LoadClassType(const DexFile& dex_file, uint32_t type_idx,
+ SpecialTargetRegister symbolic_reg) OVERRIDE;
- void FlushIns(RegLocation* ArgLocs, RegLocation rl_method);
+ void FlushIns(RegLocation* ArgLocs, RegLocation rl_method) OVERRIDE;
int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel,
NextCallInsn next_call_insn,
const MethodReference& target_method,
uint32_t vtable_idx,
uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
- bool skip_this);
+ bool skip_this) OVERRIDE;
int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel,
NextCallInsn next_call_insn,
const MethodReference& target_method,
uint32_t vtable_idx,
uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
- bool skip_this);
+ bool skip_this) OVERRIDE;
/*
* @brief Generate a relative call to the method that will be patched at link time.
@@ -401,21 +354,18 @@
/*
* @brief Handle x86 specific literals
*/
- void InstallLiteralPools();
-
- /*
- * @brief Generate the debug_frame CFI information.
- * @returns pointer to vector containing CFE information
- */
- static std::vector<uint8_t>* ReturnCommonCallFrameInformation();
+ void InstallLiteralPools() OVERRIDE;
/*
* @brief Generate the debug_frame FDE information.
* @returns pointer to vector containing CFE information
*/
- std::vector<uint8_t>* ReturnCallFrameInformation();
+ std::vector<uint8_t>* ReturnFrameDescriptionEntry() OVERRIDE;
+
+ LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
protected:
+ RegStorage TargetReg32(SpecialTargetRegister reg);
// Casting of RegStorage
RegStorage As32BitReg(RegStorage reg) {
DCHECK(!reg.IsPair());
@@ -453,6 +403,17 @@
return ret_val;
}
+ LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
+ RegStorage r_dest, OpSize size);
+ LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
+ RegStorage r_src, OpSize size);
+
+ RegStorage GetCoreArgMappingToPhysicalReg(int core_arg_num);
+
+ int AssignInsnOffsets();
+ void AssignOffsets();
+ AssemblerStatus AssembleInstructions(CodeOffset start_addr);
+
size_t ComputeSize(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_index,
int32_t raw_base, int32_t displacement);
void CheckValidByteRegister(const X86EncodingMap* entry, int32_t raw_reg);
@@ -539,6 +500,9 @@
* @returns true if a register is byte addressable.
*/
bool IsByteRegister(RegStorage reg);
+
+ void GenDivRemLongLit(RegLocation rl_dest, RegLocation rl_src, int64_t imm, bool is_div);
+
bool GenInlinedArrayCopyCharArray(CallInfo* info) OVERRIDE;
/*
@@ -747,8 +711,9 @@
* @param divisor divisor number for calculation
* @param magic hold calculated magic number
* @param shift hold calculated shift
+ * @param is_long 'true' if divisor is jlong, 'false' for jint.
*/
- void CalculateMagicAndShift(int divisor, int& magic, int& shift);
+ void CalculateMagicAndShift(int64_t divisor, int64_t& magic, int& shift, bool is_long);
/*
* @brief Generate an integer div or rem operation.
@@ -805,9 +770,13 @@
* @param base_reg The register holding the base address.
* @param offset The offset from the base.
* @param check_value The immediate to compare to.
+ * @param target branch target (or nullptr)
+ * @param compare output for getting LIR for comparison (or nullptr)
*/
LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
- int offset, int check_value, LIR* target);
+ int offset, int check_value, LIR* target, LIR** compare);
+
+ void GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2, bool is_double);
/*
* Can this operation be using core registers without temporaries?
@@ -825,6 +794,36 @@
*/
virtual void GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_double);
+ void GenArrayBoundsCheck(RegStorage index, RegStorage array_base, int32_t len_offset);
+ void GenArrayBoundsCheck(int32_t index, RegStorage array_base, int32_t len_offset);
+
+ LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
+ LIR* OpRegMem(OpKind op, RegStorage r_dest, RegLocation value);
+ LIR* OpMemReg(OpKind op, RegLocation rl_dest, int value);
+ LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset);
+ LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset);
+ void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<4> thread_offset);
+ void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<8> thread_offset);
+ void OpTlsCmp(ThreadOffset<4> offset, int val);
+ void OpTlsCmp(ThreadOffset<8> offset, int val);
+
+ void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset);
+
+ // Try to do a long multiplication where rl_src2 is a constant. This simplified setup might fail,
+ // in which case false will be returned.
+ bool GenMulLongConst(RegLocation rl_dest, RegLocation rl_src1, int64_t val);
+ void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
+ void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+ void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2, bool is_div);
+
+ void SpillCoreRegs();
+ void UnSpillCoreRegs();
+ void UnSpillFPRegs();
+ void SpillFPRegs();
+
/*
* @brief Perform MIR analysis before compiling method.
* @note Invokes Mir2LiR::Materialize after analysis.
@@ -947,6 +946,14 @@
return true; // xmm registers have 64b views even on x86.
}
+ /*
+ * @brief Dump a RegLocation using printf
+ * @param loc Register location to dump
+ */
+ static void DumpRegLocation(RegLocation loc);
+
+ static const X86EncodingMap EncodingMap[kX86Last];
+
private:
// The number of vector registers [0..N] reserved by a call to ReserveVectorRegisters
int num_reserved_vector_regs_;
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index fc65deb..2920fb6 100755
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -292,7 +292,7 @@
branch_normal->target = NewLIR0(kPseudoTargetLabel);
StoreValueWide(rl_dest, rl_result);
} else {
- GenConversionCall(QUICK_ENTRYPOINT_OFFSET(4, pF2l), rl_dest, rl_src);
+ GenConversionCall(kQuickF2l, rl_dest, rl_src);
}
return;
case Instruction::DOUBLE_TO_LONG:
@@ -317,7 +317,7 @@
branch_normal->target = NewLIR0(kPseudoTargetLabel);
StoreValueWide(rl_dest, rl_result);
} else {
- GenConversionCall(QUICK_ENTRYPOINT_OFFSET(4, pD2l), rl_dest, rl_src);
+ GenConversionCall(kQuickD2l, rl_dest, rl_src);
}
return;
default:
@@ -705,4 +705,77 @@
}
}
+bool X86Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) {
+ if (is_double) {
+ RegLocation rl_src1 = LoadValueWide(info->args[0], kFPReg);
+ RegLocation rl_src2 = LoadValueWide(info->args[2], kFPReg);
+ RegLocation rl_dest = InlineTargetWide(info);
+ RegLocation rl_result = EvalLocWide(rl_dest, kFPReg, true);
+
+ // Avoid src2 corruption by OpRegCopyWide.
+ if (rl_result.reg == rl_src2.reg) {
+ std::swap(rl_src2.reg, rl_src1.reg);
+ }
+
+ OpRegCopyWide(rl_result.reg, rl_src1.reg);
+ NewLIR2(kX86UcomisdRR, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
+ // If either arg is NaN, return NaN.
+ LIR* branch_nan = NewLIR2(kX86Jcc8, 0, kX86CondP);
+ // Min/Max branches.
+ LIR* branch_cond1 = NewLIR2(kX86Jcc8, 0, (is_min) ? kX86CondA : kX86CondB);
+ LIR* branch_cond2 = NewLIR2(kX86Jcc8, 0, (is_min) ? kX86CondB : kX86CondA);
+ // If equal, we need to resolve situations like min/max(0.0, -0.0) == -0.0/0.0.
+ NewLIR2((is_min) ? kX86OrpdRR : kX86AndpdRR, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
+ LIR* branch_exit_equal = NewLIR1(kX86Jmp8, 0);
+ // Handle NaN.
+ branch_nan->target = NewLIR0(kPseudoTargetLabel);
+ LoadConstantWide(rl_result.reg, INT64_C(0x7ff8000000000000));
+ LIR* branch_exit_nan = NewLIR1(kX86Jmp8, 0);
+ // Handle Min/Max. Copy greater/lesser value from src2.
+ branch_cond1->target = NewLIR0(kPseudoTargetLabel);
+ OpRegCopyWide(rl_result.reg, rl_src2.reg);
+ // Right operand is already in result reg.
+ branch_cond2->target = NewLIR0(kPseudoTargetLabel);
+ // Exit.
+ branch_exit_nan->target = NewLIR0(kPseudoTargetLabel);
+ branch_exit_equal->target = NewLIR0(kPseudoTargetLabel);
+ StoreValueWide(rl_dest, rl_result);
+ } else {
+ RegLocation rl_src1 = LoadValue(info->args[0], kFPReg);
+ RegLocation rl_src2 = LoadValue(info->args[1], kFPReg);
+ RegLocation rl_dest = InlineTarget(info);
+ RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
+
+ // Avoid src2 corruption by OpRegCopyWide.
+ if (rl_result.reg == rl_src2.reg) {
+ std::swap(rl_src2.reg, rl_src1.reg);
+ }
+
+ OpRegCopy(rl_result.reg, rl_src1.reg);
+ NewLIR2(kX86UcomissRR, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
+ // If either arg is NaN, return NaN.
+ LIR* branch_nan = NewLIR2(kX86Jcc8, 0, kX86CondP);
+ // Min/Max branches.
+ LIR* branch_cond1 = NewLIR2(kX86Jcc8, 0, (is_min) ? kX86CondA : kX86CondB);
+ LIR* branch_cond2 = NewLIR2(kX86Jcc8, 0, (is_min) ? kX86CondB : kX86CondA);
+ // If equal, we need to resolve situations like min/max(0.0, -0.0) == -0.0/0.0.
+ NewLIR2((is_min) ? kX86OrpsRR : kX86AndpsRR, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
+ LIR* branch_exit_equal = NewLIR1(kX86Jmp8, 0);
+ // Handle NaN.
+ branch_nan->target = NewLIR0(kPseudoTargetLabel);
+ LoadConstantNoClobber(rl_result.reg, 0x7fc00000);
+ LIR* branch_exit_nan = NewLIR1(kX86Jmp8, 0);
+ // Handle Min/Max. Copy greater/lesser value from src2.
+ branch_cond1->target = NewLIR0(kPseudoTargetLabel);
+ OpRegCopy(rl_result.reg, rl_src2.reg);
+ // Right operand is already in result reg.
+ branch_cond2->target = NewLIR0(kPseudoTargetLabel);
+ // Exit.
+ branch_exit_nan->target = NewLIR0(kPseudoTargetLabel);
+ branch_exit_equal->target = NewLIR0(kPseudoTargetLabel);
+ StoreValue(rl_dest, rl_result);
+ }
+ return true;
+}
+
} // namespace art
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 2f27482..fdc46e2 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -206,6 +206,67 @@
}
}
+void X86Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
+ int32_t true_val, int32_t false_val, RegStorage rs_dest,
+ int dest_reg_class) {
+ DCHECK(!left_op.IsPair() && !right_op.IsPair() && !rs_dest.IsPair());
+ DCHECK(!left_op.IsFloat() && !right_op.IsFloat() && !rs_dest.IsFloat());
+
+ // We really need this check for correctness, otherwise we will need to do more checks in
+ // non zero/one case
+ if (true_val == false_val) {
+ LoadConstantNoClobber(rs_dest, true_val);
+ return;
+ }
+
+ const bool dest_intersect = IsSameReg(rs_dest, left_op) || IsSameReg(rs_dest, right_op);
+
+ const bool zero_one_case = (true_val == 0 && false_val == 1) || (true_val == 1 && false_val == 0);
+ if (zero_one_case && IsByteRegister(rs_dest)) {
+ if (!dest_intersect) {
+ LoadConstantNoClobber(rs_dest, 0);
+ }
+ OpRegReg(kOpCmp, left_op, right_op);
+ // Set the low byte of the result to 0 or 1 from the compare condition code.
+ NewLIR2(kX86Set8R, rs_dest.GetReg(),
+ X86ConditionEncoding(true_val == 1 ? code : FlipComparisonOrder(code)));
+ if (dest_intersect) {
+ NewLIR2(rs_dest.Is64Bit() ? kX86Movzx8qRR : kX86Movzx8RR, rs_dest.GetReg(), rs_dest.GetReg());
+ }
+ } else {
+ // Be careful rs_dest can be changed only after cmp because it can be the same as one of ops
+ // and it cannot use xor because it makes cc flags to be dirty
+ RegStorage temp_reg = AllocTypedTemp(false, dest_reg_class, false);
+ if (temp_reg.Valid()) {
+ if (false_val == 0 && dest_intersect) {
+ code = FlipComparisonOrder(code);
+ std::swap(true_val, false_val);
+ }
+ if (!dest_intersect) {
+ LoadConstantNoClobber(rs_dest, false_val);
+ }
+ LoadConstantNoClobber(temp_reg, true_val);
+ OpRegReg(kOpCmp, left_op, right_op);
+ if (dest_intersect) {
+ LoadConstantNoClobber(rs_dest, false_val);
+ DCHECK(!last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
+ }
+ OpCondRegReg(kOpCmov, code, rs_dest, temp_reg);
+ FreeTemp(temp_reg);
+ } else {
+ // slow path
+ LIR* cmp_branch = OpCmpBranch(code, left_op, right_op, nullptr);
+ LoadConstantNoClobber(rs_dest, false_val);
+ LIR* that_is_it = NewLIR1(kX86Jmp8, 0);
+ LIR* true_case = NewLIR0(kPseudoTargetLabel);
+ cmp_branch->target = true_case;
+ LoadConstantNoClobber(rs_dest, true_val);
+ LIR* end = NewLIR0(kPseudoTargetLabel);
+ that_is_it->target = end;
+ }
+ }
+}
+
void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
RegLocation rl_result;
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
@@ -452,7 +513,7 @@
OpCondBranch(ccode, taken);
}
-void X86Mir2Lir::CalculateMagicAndShift(int divisor, int& magic, int& shift) {
+void X86Mir2Lir::CalculateMagicAndShift(int64_t divisor, int64_t& magic, int& shift, bool is_long) {
// It does not make sense to calculate magic and shift for zero divisor.
DCHECK_NE(divisor, 0);
@@ -464,8 +525,8 @@
* Let nc be the most negative value of numerator(n) such that nc = kd + 1,
* where divisor(d) <= -2.
* Thus nc can be calculated like:
- * nc = 2^31 + 2^31 % d - 1, where d >= 2
- * nc = -2^31 + (2^31 + 1) % d, where d >= 2.
+ * nc = exp + exp % d - 1, where d >= 2 and exp = 2^31 for int or 2^63 for long
+ * nc = -exp + (exp + 1) % d, where d >= 2 and exp = 2^31 for int or 2^63 for long
*
* So the shift p is the smallest p satisfying
* 2^p > nc * (d - 2^p % d), where d >= 2
@@ -475,27 +536,28 @@
* M = (2^p + d - 2^p % d) / d, where d >= 2
* M = (2^p - d - 2^p % d) / d, where d <= -2.
*
- * Notice that p is always bigger than or equal to 32, so we just return 32-p as
+ * Notice that p is always bigger than or equal to 32/64, so we just return 32-p/64-p as
* the shift number S.
*/
- int32_t p = 31;
- const uint32_t two31 = 0x80000000U;
+ int64_t p = (is_long) ? 63 : 31;
+ const uint64_t exp = (is_long) ? 0x8000000000000000ULL : 0x80000000U;
// Initialize the computations.
- uint32_t abs_d = (divisor >= 0) ? divisor : -divisor;
- uint32_t tmp = two31 + (static_cast<uint32_t>(divisor) >> 31);
- uint32_t abs_nc = tmp - 1 - tmp % abs_d;
- uint32_t quotient1 = two31 / abs_nc;
- uint32_t remainder1 = two31 % abs_nc;
- uint32_t quotient2 = two31 / abs_d;
- uint32_t remainder2 = two31 % abs_d;
+ uint64_t abs_d = (divisor >= 0) ? divisor : -divisor;
+ uint64_t tmp = exp + ((is_long) ? static_cast<uint64_t>(divisor) >> 63 :
+ static_cast<uint32_t>(divisor) >> 31);
+ uint64_t abs_nc = tmp - 1 - tmp % abs_d;
+ uint64_t quotient1 = exp / abs_nc;
+ uint64_t remainder1 = exp % abs_nc;
+ uint64_t quotient2 = exp / abs_d;
+ uint64_t remainder2 = exp % abs_d;
/*
* To avoid handling both positive and negative divisor, Hacker's Delight
* introduces a method to handle these 2 cases together to avoid duplication.
*/
- uint32_t delta;
+ uint64_t delta;
do {
p++;
quotient1 = 2 * quotient1;
@@ -514,7 +576,12 @@
} while (quotient1 < delta || (quotient1 == delta && remainder1 == 0));
magic = (divisor > 0) ? (quotient2 + 1) : (-quotient2 - 1);
- shift = p - 32;
+
+ if (!is_long) {
+ magic = static_cast<int>(magic);
+ }
+
+ shift = (is_long) ? p - 64 : p - 32;
}
RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div) {
@@ -525,52 +592,57 @@
RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src,
int imm, bool is_div) {
// Use a multiply (and fixup) to perform an int div/rem by a constant.
+ RegLocation rl_result;
- // We have to use fixed registers, so flush all the temps.
- FlushAllRegs();
- LockCallTemps(); // Prepare for explicit register usage.
-
- // Assume that the result will be in EDX.
- RegLocation rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, rs_r2, INVALID_SREG, INVALID_SREG};
-
- // handle div/rem by 1 special case.
if (imm == 1) {
+ rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (is_div) {
// x / 1 == x.
- StoreValue(rl_result, rl_src);
+ LoadValueDirectFixed(rl_src, rl_result.reg);
} else {
// x % 1 == 0.
- LoadConstantNoClobber(rs_r0, 0);
- // For this case, return the result in EAX.
- rl_result.reg.SetReg(r0);
+ LoadConstantNoClobber(rl_result.reg, 0);
}
} else if (imm == -1) { // handle 0x80000000 / -1 special case.
+ rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (is_div) {
- LIR *minint_branch = 0;
- LoadValueDirectFixed(rl_src, rs_r0);
- OpRegImm(kOpCmp, rs_r0, 0x80000000);
- minint_branch = NewLIR2(kX86Jcc8, 0, kX86CondEq);
+ LoadValueDirectFixed(rl_src, rl_result.reg);
+ OpRegImm(kOpCmp, rl_result.reg, 0x80000000);
+ LIR *minint_branch = NewLIR2(kX86Jcc8, 0, kX86CondEq);
// for x != MIN_INT, x / -1 == -x.
- NewLIR1(kX86Neg32R, r0);
+ NewLIR1(kX86Neg32R, rl_result.reg.GetReg());
- LIR* branch_around = NewLIR1(kX86Jmp8, 0);
- // The target for cmp/jmp above.
- minint_branch->target = NewLIR0(kPseudoTargetLabel);
// EAX already contains the right value (0x80000000),
- branch_around->target = NewLIR0(kPseudoTargetLabel);
+ minint_branch->target = NewLIR0(kPseudoTargetLabel);
} else {
// x % -1 == 0.
- LoadConstantNoClobber(rs_r0, 0);
+ LoadConstantNoClobber(rl_result.reg, 0);
}
- // For this case, return the result in EAX.
- rl_result.reg.SetReg(r0);
+ } else if (is_div && IsPowerOfTwo(std::abs(imm))) {
+ // Division using shifting.
+ rl_src = LoadValue(rl_src, kCoreReg);
+ rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ if (IsSameReg(rl_result.reg, rl_src.reg)) {
+ RegStorage rs_temp = AllocTypedTemp(false, kCoreReg);
+ rl_result.reg.SetReg(rs_temp.GetReg());
+ }
+ NewLIR3(kX86Lea32RM, rl_result.reg.GetReg(), rl_src.reg.GetReg(), std::abs(imm) - 1);
+ NewLIR2(kX86Test32RR, rl_src.reg.GetReg(), rl_src.reg.GetReg());
+ OpCondRegReg(kOpCmov, kCondPl, rl_result.reg, rl_src.reg);
+ int shift_amount = LowestSetBit(imm);
+ OpRegImm(kOpAsr, rl_result.reg, shift_amount);
+ if (imm < 0) {
+ OpReg(kOpNeg, rl_result.reg);
+ }
} else {
CHECK(imm <= -2 || imm >= 2);
+
// Use H.S.Warren's Hacker's Delight Chapter 10 and
// T,Grablund, P.L.Montogomery's Division by invariant integers using multiplication.
- int magic, shift;
- CalculateMagicAndShift(imm, magic, shift);
+ int64_t magic;
+ int shift;
+ CalculateMagicAndShift((int64_t)imm, magic, shift, false /* is_long */);
/*
* For imm >= 2,
@@ -588,18 +660,22 @@
* 5. Thus, EDX is the quotient
*/
+ FlushReg(rs_r0);
+ Clobber(rs_r0);
+ LockTemp(rs_r0);
+ FlushReg(rs_r2);
+ Clobber(rs_r2);
+ LockTemp(rs_r2);
+
+ // Assume that the result will be in EDX.
+ rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, rs_r2, INVALID_SREG, INVALID_SREG};
+
// Numerator into EAX.
RegStorage numerator_reg;
if (!is_div || (imm > 0 && magic < 0) || (imm < 0 && magic > 0)) {
// We will need the value later.
- if (rl_src.location == kLocPhysReg) {
- // We can use it directly.
- DCHECK(rl_src.reg.GetReg() != rs_r0.GetReg() && rl_src.reg.GetReg() != rs_r2.GetReg());
- numerator_reg = rl_src.reg;
- } else {
- numerator_reg = rs_r1;
- LoadValueDirectFixed(rl_src, numerator_reg);
- }
+ rl_src = LoadValue(rl_src, kCoreReg);
+ numerator_reg = rl_src.reg;
OpRegCopy(rs_r0, numerator_reg);
} else {
// Only need this once. Just put it into EAX.
@@ -1081,14 +1157,10 @@
}
}
// Load array length to kArg1.
- m2l_->OpRegMem(kOpMov, m2l_->TargetReg(kArg1, kNotWide), array_base_, len_offset_);
- if (cu_->target64) {
- m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds),
- new_index, m2l_->TargetReg(kArg1, kNotWide), true);
- } else {
- m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds),
- new_index, m2l_->TargetReg(kArg1, kNotWide), true);
- }
+ X86Mir2Lir* x86_m2l = static_cast<X86Mir2Lir*>(m2l_);
+ x86_m2l->OpRegMem(kOpMov, m2l_->TargetReg(kArg1, kNotWide), array_base_, len_offset_);
+ x86_m2l->CallRuntimeHelperRegReg(kQuickThrowArrayBounds, new_index,
+ m2l_->TargetReg(kArg1, kNotWide), true);
}
private:
@@ -1098,6 +1170,7 @@
};
OpRegMem(kOpCmp, index, array_base, len_offset);
+ MarkPossibleNullPointerException(0);
LIR* branch = OpCondBranch(kCondUge, nullptr);
AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch,
index, array_base, len_offset));
@@ -1120,17 +1193,11 @@
GenerateTargetLabel(kPseudoThrowTarget);
// Load array length to kArg1.
- m2l_->OpRegMem(kOpMov, m2l_->TargetReg(kArg1, kNotWide), array_base_, len_offset_);
- m2l_->LoadConstant(m2l_->TargetReg(kArg0, kNotWide), index_);
- if (cu_->target64) {
- m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds),
- m2l_->TargetReg(kArg0, kNotWide),
- m2l_->TargetReg(kArg1, kNotWide), true);
- } else {
- m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds),
- m2l_->TargetReg(kArg0, kNotWide),
- m2l_->TargetReg(kArg1, kNotWide), true);
- }
+ X86Mir2Lir* x86_m2l = static_cast<X86Mir2Lir*>(m2l_);
+ x86_m2l->OpRegMem(kOpMov, m2l_->TargetReg(kArg1, kNotWide), array_base_, len_offset_);
+ x86_m2l->LoadConstant(m2l_->TargetReg(kArg0, kNotWide), index_);
+ x86_m2l->CallRuntimeHelperRegReg(kQuickThrowArrayBounds, m2l_->TargetReg(kArg0, kNotWide),
+ m2l_->TargetReg(kArg1, kNotWide), true);
}
private:
@@ -1140,6 +1207,7 @@
};
NewLIR3(IS_SIMM8(index) ? kX86Cmp32MI8 : kX86Cmp32MI, array_base.GetReg(), len_offset, index);
+ MarkPossibleNullPointerException(0);
LIR* branch = OpCondBranch(kCondLs, nullptr);
AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch,
index, array_base, len_offset));
@@ -1215,91 +1283,113 @@
}
}
-void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) {
+void X86Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) {
+ if (!cu_->target64) {
+ // Some x86 32b ops are fallback.
+ switch (opcode) {
+ case Instruction::NOT_LONG:
+ case Instruction::DIV_LONG:
+ case Instruction::DIV_LONG_2ADDR:
+ case Instruction::REM_LONG:
+ case Instruction::REM_LONG_2ADDR:
+ Mir2Lir::GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
+ return;
+
+ default:
+ // Everything else we can handle.
+ break;
+ }
+ }
+
+ switch (opcode) {
+ case Instruction::NOT_LONG:
+ GenNotLong(rl_dest, rl_src2);
+ return;
+
+ case Instruction::ADD_LONG:
+ case Instruction::ADD_LONG_2ADDR:
+ GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
+ return;
+
+ case Instruction::SUB_LONG:
+ case Instruction::SUB_LONG_2ADDR:
+ GenLongArith(rl_dest, rl_src1, rl_src2, opcode, false);
+ return;
+
+ case Instruction::MUL_LONG:
+ case Instruction::MUL_LONG_2ADDR:
+ GenMulLong(opcode, rl_dest, rl_src1, rl_src2);
+ return;
+
+ case Instruction::DIV_LONG:
+ case Instruction::DIV_LONG_2ADDR:
+ GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true);
+ return;
+
+ case Instruction::REM_LONG:
+ case Instruction::REM_LONG_2ADDR:
+ GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false);
+ return;
+
+ case Instruction::AND_LONG_2ADDR:
+ case Instruction::AND_LONG:
+ GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
+ return;
+
+ case Instruction::OR_LONG:
+ case Instruction::OR_LONG_2ADDR:
+ GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
+ return;
+
+ case Instruction::XOR_LONG:
+ case Instruction::XOR_LONG_2ADDR:
+ GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
+ return;
+
+ case Instruction::NEG_LONG:
+ GenNegLong(rl_dest, rl_src2);
+ return;
+
+ default:
+ LOG(FATAL) << "Invalid long arith op";
+ return;
+ }
+}
+
+bool X86Mir2Lir::GenMulLongConst(RegLocation rl_dest, RegLocation rl_src1, int64_t val) {
// All memory accesses below reference dalvik regs.
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- if (cu_->target64) {
- if (rl_src1.is_const) {
- std::swap(rl_src1, rl_src2);
- }
- // Are we multiplying by a constant?
- if (rl_src2.is_const) {
- int64_t val = mir_graph_->ConstantValueWide(rl_src2);
- if (val == 0) {
- RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- OpRegReg(kOpXor, rl_result.reg, rl_result.reg);
- StoreValueWide(rl_dest, rl_result);
- return;
- } else if (val == 1) {
- StoreValueWide(rl_dest, rl_src1);
- return;
- } else if (val == 2) {
- GenAddLong(Instruction::ADD_LONG, rl_dest, rl_src1, rl_src1);
- return;
- } else if (IsPowerOfTwo(val)) {
- int shift_amount = LowestSetBit(val);
- if (!BadOverlap(rl_src1, rl_dest)) {
- rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- RegLocation rl_result = GenShiftImmOpLong(Instruction::SHL_LONG, rl_dest,
- rl_src1, shift_amount);
- StoreValueWide(rl_dest, rl_result);
- return;
- }
- }
- }
- rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+ if (val == 0) {
RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- if (rl_result.reg.GetReg() == rl_src1.reg.GetReg() &&
- rl_result.reg.GetReg() == rl_src2.reg.GetReg()) {
- NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
- } else if (rl_result.reg.GetReg() != rl_src1.reg.GetReg() &&
- rl_result.reg.GetReg() == rl_src2.reg.GetReg()) {
- NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_src1.reg.GetReg());
- } else if (rl_result.reg.GetReg() == rl_src1.reg.GetReg() &&
- rl_result.reg.GetReg() != rl_src2.reg.GetReg()) {
- NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
+ if (cu_->target64) {
+ OpRegReg(kOpXor, rl_result.reg, rl_result.reg);
} else {
- OpRegCopy(rl_result.reg, rl_src1.reg);
- NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
- }
- StoreValueWide(rl_dest, rl_result);
- return;
- }
-
- if (rl_src1.is_const) {
- std::swap(rl_src1, rl_src2);
- }
- // Are we multiplying by a constant?
- if (rl_src2.is_const) {
- // Do special compare/branch against simple const operand
- int64_t val = mir_graph_->ConstantValueWide(rl_src2);
- if (val == 0) {
- RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
OpRegReg(kOpXor, rl_result.reg.GetLow(), rl_result.reg.GetLow());
OpRegReg(kOpXor, rl_result.reg.GetHigh(), rl_result.reg.GetHigh());
- StoreValueWide(rl_dest, rl_result);
- return;
- } else if (val == 1) {
- StoreValueWide(rl_dest, rl_src1);
- return;
- } else if (val == 2) {
- GenAddLong(Instruction::ADD_LONG, rl_dest, rl_src1, rl_src1);
- return;
- } else if (IsPowerOfTwo(val)) {
- int shift_amount = LowestSetBit(val);
- if (!BadOverlap(rl_src1, rl_dest)) {
- rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- RegLocation rl_result = GenShiftImmOpLong(Instruction::SHL_LONG, rl_dest,
- rl_src1, shift_amount);
- StoreValueWide(rl_dest, rl_result);
- return;
- }
}
+ StoreValueWide(rl_dest, rl_result);
+ return true;
+ } else if (val == 1) {
+ StoreValueWide(rl_dest, rl_src1);
+ return true;
+ } else if (val == 2) {
+ GenArithOpLong(Instruction::ADD_LONG, rl_dest, rl_src1, rl_src1);
+ return true;
+ } else if (IsPowerOfTwo(val)) {
+ int shift_amount = LowestSetBit(val);
+ if (!BadOverlap(rl_src1, rl_dest)) {
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ RegLocation rl_result = GenShiftImmOpLong(Instruction::SHL_LONG, rl_dest, rl_src1,
+ shift_amount);
+ StoreValueWide(rl_dest, rl_result);
+ return true;
+ }
+ }
- // Okay, just bite the bullet and do it.
+ // Okay, on 32b just bite the bullet and do it, still better than the general case.
+ if (!cu_->target64) {
int32_t val_lo = Low32Bits(val);
int32_t val_hi = High32Bits(val);
FlushAllRegs();
@@ -1340,10 +1430,48 @@
RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
RegStorage::MakeRegPair(rs_r0, rs_r2), INVALID_SREG, INVALID_SREG};
StoreValueWide(rl_dest, rl_result);
+ return true;
+ }
+ return false;
+}
+
+void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) {
+ if (rl_src1.is_const) {
+ std::swap(rl_src1, rl_src2);
+ }
+
+ if (rl_src2.is_const) {
+ if (GenMulLongConst(rl_dest, rl_src1, mir_graph_->ConstantValueWide(rl_src2))) {
+ return;
+ }
+ }
+
+ // All memory accesses below reference dalvik regs.
+ ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+
+ if (cu_->target64) {
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ if (rl_result.reg.GetReg() == rl_src1.reg.GetReg() &&
+ rl_result.reg.GetReg() == rl_src2.reg.GetReg()) {
+ NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
+ } else if (rl_result.reg.GetReg() != rl_src1.reg.GetReg() &&
+ rl_result.reg.GetReg() == rl_src2.reg.GetReg()) {
+ NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_src1.reg.GetReg());
+ } else if (rl_result.reg.GetReg() == rl_src1.reg.GetReg() &&
+ rl_result.reg.GetReg() != rl_src2.reg.GetReg()) {
+ NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
+ } else {
+ OpRegCopy(rl_result.reg, rl_src1.reg);
+ NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
+ }
+ StoreValueWide(rl_dest, rl_result);
return;
}
- // Nope. Do it the hard way
+ // Not multiplying by a constant. Do it the hard way
// Check for V*V. We can eliminate a multiply in that case, as 2L*1H == 2H*1L.
bool is_square = mir_graph_->SRegToVReg(rl_src1.s_reg_low) ==
mir_graph_->SRegToVReg(rl_src2.s_reg_low);
@@ -1613,31 +1741,6 @@
StoreFinalValueWide(rl_dest, rl_src1);
}
-void X86Mir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) {
- GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
-}
-
-void X86Mir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) {
- GenLongArith(rl_dest, rl_src1, rl_src2, opcode, false);
-}
-
-void X86Mir2Lir::GenAndLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) {
- GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
-}
-
-void X86Mir2Lir::GenOrLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) {
- GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
-}
-
-void X86Mir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) {
- GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
-}
-
void X86Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
if (cu_->target64) {
rl_src = LoadValueWide(rl_src, kCoreReg);
@@ -1651,13 +1754,191 @@
}
}
+void X86Mir2Lir::GenDivRemLongLit(RegLocation rl_dest, RegLocation rl_src,
+ int64_t imm, bool is_div) {
+ if (imm == 0) {
+ GenDivZeroException();
+ } else if (imm == 1) {
+ if (is_div) {
+ // x / 1 == x.
+ StoreValueWide(rl_dest, rl_src);
+ } else {
+ // x % 1 == 0.
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ LoadConstantWide(rl_result.reg, 0);
+ StoreValueWide(rl_dest, rl_result);
+ }
+ } else if (imm == -1) { // handle 0x8000000000000000 / -1 special case.
+ if (is_div) {
+ rl_src = LoadValueWide(rl_src, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ RegStorage rs_temp = AllocTempWide();
+
+ OpRegCopy(rl_result.reg, rl_src.reg);
+ LoadConstantWide(rs_temp, 0x8000000000000000);
+
+ // If x == MIN_LONG, return MIN_LONG.
+ OpRegReg(kOpCmp, rl_src.reg, rs_temp);
+ LIR *minint_branch = NewLIR2(kX86Jcc8, 0, kX86CondEq);
+
+ // For x != MIN_LONG, x / -1 == -x.
+ OpReg(kOpNeg, rl_result.reg);
+
+ minint_branch->target = NewLIR0(kPseudoTargetLabel);
+ FreeTemp(rs_temp);
+ StoreValueWide(rl_dest, rl_result);
+ } else {
+ // x % -1 == 0.
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ LoadConstantWide(rl_result.reg, 0);
+ StoreValueWide(rl_dest, rl_result);
+ }
+ } else if (is_div && IsPowerOfTwo(std::abs(imm))) {
+ // Division using shifting.
+ rl_src = LoadValueWide(rl_src, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ if (IsSameReg(rl_result.reg, rl_src.reg)) {
+ RegStorage rs_temp = AllocTypedTempWide(false, kCoreReg);
+ rl_result.reg.SetReg(rs_temp.GetReg());
+ }
+ LoadConstantWide(rl_result.reg, std::abs(imm) - 1);
+ OpRegReg(kOpAdd, rl_result.reg, rl_src.reg);
+ NewLIR2(kX86Test64RR, rl_src.reg.GetReg(), rl_src.reg.GetReg());
+ OpCondRegReg(kOpCmov, kCondPl, rl_result.reg, rl_src.reg);
+ int shift_amount = LowestSetBit(imm);
+ OpRegImm(kOpAsr, rl_result.reg, shift_amount);
+ if (imm < 0) {
+ OpReg(kOpNeg, rl_result.reg);
+ }
+ StoreValueWide(rl_dest, rl_result);
+ } else {
+ CHECK(imm <= -2 || imm >= 2);
+
+ FlushReg(rs_r0q);
+ Clobber(rs_r0q);
+ LockTemp(rs_r0q);
+ FlushReg(rs_r2q);
+ Clobber(rs_r2q);
+ LockTemp(rs_r2q);
+
+ RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, rs_r2q, INVALID_SREG, INVALID_SREG};
+
+ // Use H.S.Warren's Hacker's Delight Chapter 10 and
+ // T,Grablund, P.L.Montogomery's Division by invariant integers using multiplication.
+ int64_t magic;
+ int shift;
+ CalculateMagicAndShift(imm, magic, shift, true /* is_long */);
+
+ /*
+ * For imm >= 2,
+ * int(n/imm) = floor(n/imm) = floor(M*n/2^S), while n > 0
+ * int(n/imm) = ceil(n/imm) = floor(M*n/2^S) +1, while n < 0.
+ * For imm <= -2,
+ * int(n/imm) = ceil(n/imm) = floor(M*n/2^S) +1 , while n > 0
+ * int(n/imm) = floor(n/imm) = floor(M*n/2^S), while n < 0.
+ * We implement this algorithm in the following way:
+ * 1. multiply magic number m and numerator n, get the higher 64bit result in RDX
+ * 2. if imm > 0 and magic < 0, add numerator to RDX
+ * if imm < 0 and magic > 0, sub numerator from RDX
+ * 3. if S !=0, SAR S bits for RDX
+ * 4. add 1 to RDX if RDX < 0
+ * 5. Thus, RDX is the quotient
+ */
+
+ // Numerator into RAX.
+ RegStorage numerator_reg;
+ if (!is_div || (imm > 0 && magic < 0) || (imm < 0 && magic > 0)) {
+ // We will need the value later.
+ rl_src = LoadValueWide(rl_src, kCoreReg);
+ numerator_reg = rl_src.reg;
+ OpRegCopyWide(rs_r0q, numerator_reg);
+ } else {
+ // Only need this once. Just put it into RAX.
+ LoadValueDirectWideFixed(rl_src, rs_r0q);
+ }
+
+ // RDX = magic.
+ LoadConstantWide(rs_r2q, magic);
+
+ // RDX:RAX = magic & dividend.
+ NewLIR1(kX86Imul64DaR, rs_r2q.GetReg());
+
+ if (imm > 0 && magic < 0) {
+ // Add numerator to RDX.
+ DCHECK(numerator_reg.Valid());
+ OpRegReg(kOpAdd, rs_r2q, numerator_reg);
+ } else if (imm < 0 && magic > 0) {
+ DCHECK(numerator_reg.Valid());
+ OpRegReg(kOpSub, rs_r2q, numerator_reg);
+ }
+
+ // Do we need the shift?
+ if (shift != 0) {
+ // Shift RDX by 'shift' bits.
+ OpRegImm(kOpAsr, rs_r2q, shift);
+ }
+
+ // Move RDX to RAX.
+ OpRegCopyWide(rs_r0q, rs_r2q);
+
+ // Move sign bit to bit 0, zeroing the rest.
+ OpRegImm(kOpLsr, rs_r2q, 63);
+
+ // RDX = RDX + RAX.
+ OpRegReg(kOpAdd, rs_r2q, rs_r0q);
+
+ // Quotient is in RDX.
+ if (!is_div) {
+ // We need to compute the remainder.
+ // Remainder is divisor - (quotient * imm).
+ DCHECK(numerator_reg.Valid());
+ OpRegCopyWide(rs_r0q, numerator_reg);
+
+ // Imul doesn't support 64-bit imms.
+ if (imm > std::numeric_limits<int32_t>::max() ||
+ imm < std::numeric_limits<int32_t>::min()) {
+ RegStorage rs_temp = AllocTempWide();
+ LoadConstantWide(rs_temp, imm);
+
+ // RAX = numerator * imm.
+ NewLIR2(kX86Imul64RR, rs_r2q.GetReg(), rs_temp.GetReg());
+
+ FreeTemp(rs_temp);
+ } else {
+ // RAX = numerator * imm.
+ int short_imm = static_cast<int>(imm);
+ NewLIR3(kX86Imul64RRI, rs_r2q.GetReg(), rs_r2q.GetReg(), short_imm);
+ }
+
+ // RDX -= RAX.
+ OpRegReg(kOpSub, rs_r0q, rs_r2q);
+
+ // Store result.
+ OpRegCopyWide(rl_result.reg, rs_r0q);
+ } else {
+ // Store result.
+ OpRegCopyWide(rl_result.reg, rs_r2q);
+ }
+ StoreValueWide(rl_dest, rl_result);
+ FreeTemp(rs_r0q);
+ FreeTemp(rs_r2q);
+ }
+}
+
void X86Mir2Lir::GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div) {
+ RegLocation rl_src2, bool is_div) {
if (!cu_->target64) {
LOG(FATAL) << "Unexpected use GenDivRemLong()";
return;
}
+ if (rl_src2.is_const) {
+ DCHECK(rl_src2.wide);
+ int64_t imm = mir_graph_->ConstantValueWide(rl_src2);
+ GenDivRemLongLit(rl_dest, rl_src1, imm, is_div);
+ return;
+ }
+
// We have to use fixed registers, so flush all the temps.
FlushAllRegs();
LockCallTemps(); // Prepare for explicit register usage.
@@ -1681,7 +1962,7 @@
// RHS is -1.
LoadConstantWide(rs_r6q, 0x8000000000000000);
NewLIR2(kX86Cmp64RR, rs_r0q.GetReg(), rs_r6q.GetReg());
- LIR * minint_branch = NewLIR2(kX86Jcc8, 0, kX86CondNe);
+ LIR *minint_branch = NewLIR2(kX86Jcc8, 0, kX86CondNe);
// In 0x8000000000000000/-1 case.
if (!is_div) {
@@ -1968,7 +2249,7 @@
} else if (shift_amount == 1 &&
(opcode == Instruction::SHL_LONG || opcode == Instruction::SHL_LONG_2ADDR)) {
// Need to handle this here to avoid calling StoreValueWide twice.
- GenAddLong(Instruction::ADD_LONG, rl_dest, rl_src, rl_src);
+ GenArithOpLong(Instruction::ADD_LONG, rl_dest, rl_src, rl_src);
return;
}
if (BadOverlap(rl_src, rl_dest)) {
@@ -2000,7 +2281,7 @@
if (rl_src2.is_const) {
isConstSuccess = GenLongLongImm(rl_dest, rl_src1, rl_src2, opcode);
} else {
- GenSubLong(opcode, rl_dest, rl_src1, rl_src2);
+ GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
isConstSuccess = true;
}
break;
@@ -2397,110 +2678,6 @@
StoreValue(rl_dest, rl_result);
}
-void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
- bool type_known_abstract, bool use_declaring_class,
- bool can_assume_type_is_in_dex_cache,
- uint32_t type_idx, RegLocation rl_dest,
- RegLocation rl_src) {
- FlushAllRegs();
- // May generate a call - use explicit registers.
- LockCallTemps();
- RegStorage method_reg = TargetReg(kArg1, kRef); // kArg1 gets current Method*.
- LoadCurrMethodDirect(method_reg);
- RegStorage class_reg = TargetReg(kArg2, kRef); // kArg2 will hold the Class*.
- RegStorage ref_reg = TargetReg(kArg0, kRef); // kArg2 will hold the ref.
- // Reference must end up in kArg0.
- if (needs_access_check) {
- // Check we have access to type_idx and if not throw IllegalAccessError,
- // Caller function returns Class* in kArg0.
- if (cu_->target64) {
- CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess),
- type_idx, true);
- } else {
- CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess),
- type_idx, true);
- }
- OpRegCopy(class_reg, TargetReg(kRet0, kRef));
- LoadValueDirectFixed(rl_src, ref_reg);
- } else if (use_declaring_class) {
- LoadValueDirectFixed(rl_src, ref_reg);
- LoadRefDisp(method_reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
- class_reg, kNotVolatile);
- } else {
- // Load dex cache entry into class_reg (kArg2).
- LoadValueDirectFixed(rl_src, ref_reg);
- LoadRefDisp(method_reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- class_reg, kNotVolatile);
- int32_t offset_of_type =
- mirror::Array::DataOffset(sizeof(mirror::HeapReference<mirror::Class*>)).Int32Value() +
- (sizeof(mirror::HeapReference<mirror::Class*>) * type_idx);
- LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile);
- if (!can_assume_type_is_in_dex_cache) {
- // Need to test presence of type in dex cache at runtime.
- LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
- // Type is not resolved. Call out to helper, which will return resolved type in kRet0/kArg0.
- if (cu_->target64) {
- CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx, true);
- } else {
- CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx, true);
- }
- OpRegCopy(class_reg, TargetReg(kRet0, kRef)); // Align usage with fast path.
- LoadValueDirectFixed(rl_src, ref_reg); /* Reload Ref. */
- // Rejoin code paths
- LIR* hop_target = NewLIR0(kPseudoTargetLabel);
- hop_branch->target = hop_target;
- }
- }
- /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result. */
- RegLocation rl_result = GetReturn(kRefReg);
-
- // On x86-64 kArg0 is not EAX, so we have to copy ref from kArg0 to EAX.
- if (cu_->target64) {
- OpRegCopy(rl_result.reg, ref_reg);
- }
-
- // For 32-bit, SETcc only works with EAX..EDX.
- DCHECK_LT(rl_result.reg.GetRegNum(), 4);
-
- // Is the class NULL?
- LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, NULL);
-
- RegStorage ref_class_reg = TargetReg(kArg1, kRef); // kArg2 will hold the Class*.
- /* Load object->klass_. */
- DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
- LoadRefDisp(ref_reg, mirror::Object::ClassOffset().Int32Value(), ref_class_reg,
- kNotVolatile);
- /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class. */
- LIR* branchover = nullptr;
- if (type_known_final) {
- // Ensure top 3 bytes of result are 0.
- LoadConstant(rl_result.reg, 0);
- OpRegReg(kOpCmp, ref_class_reg, class_reg);
- // Set the low byte of the result to 0 or 1 from the compare condition code.
- NewLIR2(kX86Set8R, rl_result.reg.GetReg(), kX86CondEq);
- } else {
- if (!type_known_abstract) {
- LoadConstant(rl_result.reg, 1); // Assume result succeeds.
- branchover = OpCmpBranch(kCondEq, ref_class_reg, class_reg, NULL);
- }
- OpRegCopy(TargetReg(kArg0, kRef), class_reg);
- if (cu_->target64) {
- OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial));
- } else {
- OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial));
- }
- }
- // TODO: only clobber when type isn't final?
- ClobberCallerSave();
- /* Branch targets here. */
- LIR* target = NewLIR0(kPseudoTargetLabel);
- StoreValue(rl_dest, rl_result);
- branch1->target = target;
- if (branchover != nullptr) {
- branchover->target = target;
- }
-}
-
void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_lhs, RegLocation rl_rhs) {
OpKind op = kOpBkpt;
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index bb1f379..95941e0 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -24,6 +24,7 @@
#include "mirror/array.h"
#include "mirror/string.h"
#include "x86_lir.h"
+#include "utils/dwarf_cfi.h"
namespace art {
@@ -362,6 +363,7 @@
int64_t value = static_cast<int64_t>(static_cast<int64_t>(operand) << 32 |
static_cast<uint32_t>(lir->operands[operand_number+1]));
buf +=StringPrintf("%" PRId64, value);
+ break;
}
case 'p': {
EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand));
@@ -755,10 +757,6 @@
return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32);
}
-bool X86Mir2Lir::SupportsVolatileLoadStore(OpSize size) {
- return true;
-}
-
RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
// X86_64 can handle any size.
if (cu_->target64) {
@@ -875,21 +873,23 @@
return new X86Mir2Lir(cu, mir_graph, arena);
}
-// Not used in x86
-RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<4> offset) {
- LOG(FATAL) << "Unexpected use of LoadHelper in x86";
- return RegStorage::InvalidReg();
-}
-
-// Not used in x86
-RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<8> offset) {
+// Not used in x86(-64)
+RegStorage X86Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
LOG(FATAL) << "Unexpected use of LoadHelper in x86";
return RegStorage::InvalidReg();
}
LIR* X86Mir2Lir::CheckSuspendUsingLoad() {
- LOG(FATAL) << "Unexpected use of CheckSuspendUsingLoad in x86";
- return nullptr;
+ // First load the pointer in fs:[suspend-trigger] into eax
+ // Then use a test instruction to indirect via that address.
+ if (cu_->target64) {
+ NewLIR2(kX86Mov64RT, rs_rAX.GetReg(),
+ Thread::ThreadSuspendTriggerOffset<8>().Int32Value());
+ } else {
+ NewLIR2(kX86Mov32RT, rs_rAX.GetReg(),
+ Thread::ThreadSuspendTriggerOffset<4>().Int32Value());
+ }
+ return NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rAX.GetReg(), 0);
}
uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) {
@@ -977,19 +977,21 @@
method_address_insns_.Insert(move);
}
-void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) {
+void X86Mir2Lir::LoadClassType(const DexFile& dex_file, uint32_t type_idx,
+ SpecialTargetRegister symbolic_reg) {
/*
* For x86, just generate a 32 bit move immediate instruction, that will be filled
* in at 'link time'. For now, put a unique value based on target to ensure that
* code deduplication works.
*/
- const DexFile::TypeId& id = cu_->dex_file->GetTypeId(type_idx);
+ const DexFile::TypeId& id = dex_file.GetTypeId(type_idx);
uintptr_t ptr = reinterpret_cast<uintptr_t>(&id);
// Generate the move instruction with the unique pointer and save index and type.
LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI,
TargetReg(symbolic_reg, kNotWide).GetReg(),
- static_cast<int>(ptr), type_idx);
+ static_cast<int>(ptr), type_idx,
+ WrapPointer(const_cast<DexFile*>(&dex_file)));
AppendLIR(move);
class_type_address_insns_.Insert(move);
}
@@ -1013,19 +1015,6 @@
return call;
}
-/*
- * @brief Enter a 32 bit quantity into a buffer
- * @param buf buffer.
- * @param data Data value.
- */
-
-static void PushWord(std::vector<uint8_t>&buf, int32_t data) {
- buf.push_back(data & 0xff);
- buf.push_back((data >> 8) & 0xff);
- buf.push_back((data >> 16) & 0xff);
- buf.push_back((data >> 24) & 0xff);
-}
-
void X86Mir2Lir::InstallLiteralPools() {
// These are handled differently for x86.
DCHECK(code_literal_list_ == nullptr);
@@ -1046,10 +1035,10 @@
align_size--;
}
for (LIR *p = const_vectors_; p != nullptr; p = p->next) {
- PushWord(code_buffer_, p->operands[0]);
- PushWord(code_buffer_, p->operands[1]);
- PushWord(code_buffer_, p->operands[2]);
- PushWord(code_buffer_, p->operands[3]);
+ PushWord(&code_buffer_, p->operands[0]);
+ PushWord(&code_buffer_, p->operands[1]);
+ PushWord(&code_buffer_, p->operands[2]);
+ PushWord(&code_buffer_, p->operands[3]);
}
}
@@ -1074,12 +1063,16 @@
for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) {
LIR* p = class_type_address_insns_.Get(i);
DCHECK_EQ(p->opcode, kX86Mov32RI);
+
+ const DexFile* class_dex_file =
+ reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3]));
uint32_t target_method_idx = p->operands[2];
// The offset to patch is the last 4 bytes of the instruction.
int patch_offset = p->offset + p->flags.size - 4;
cu_->compiler_driver->AddClassPatch(cu_->dex_file, cu_->class_def_idx,
- cu_->method_idx, target_method_idx, patch_offset);
+ cu_->method_idx, target_method_idx, class_dex_file,
+ patch_offset);
}
// And now the PC-relative calls to methods.
@@ -1104,11 +1097,6 @@
}
bool X86Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
- if (cu_->target64) {
- // TODO: Implement ArrayCOpy intrinsic for x86_64
- return false;
- }
-
RegLocation rl_src = info->args[0];
RegLocation rl_srcPos = info->args[1];
RegLocation rl_dst = info->args[2];
@@ -1121,31 +1109,32 @@
return false;
}
ClobberCallerSave();
- LockCallTemps(); // Using fixed registers
- LoadValueDirectFixed(rl_src , rs_rAX);
- LoadValueDirectFixed(rl_dst , rs_rCX);
- LIR* src_dst_same = OpCmpBranch(kCondEq, rs_rAX , rs_rCX, nullptr);
- LIR* src_null_branch = OpCmpImmBranch(kCondEq, rs_rAX , 0, nullptr);
- LIR* dst_null_branch = OpCmpImmBranch(kCondEq, rs_rCX , 0, nullptr);
- LoadValueDirectFixed(rl_length , rs_rDX);
- LIR* len_negative = OpCmpImmBranch(kCondLt, rs_rDX , 0, nullptr);
- LIR* len_too_big = OpCmpImmBranch(kCondGt, rs_rDX , 128, nullptr);
- LoadValueDirectFixed(rl_src , rs_rAX);
- LoadWordDisp(rs_rAX , mirror::Array::LengthOffset().Int32Value(), rs_rAX);
+ LockCallTemps(); // Using fixed registers.
+ RegStorage tmp_reg = cu_->target64 ? rs_r11 : rs_rBX;
+ LoadValueDirectFixed(rl_src, rs_rAX);
+ LoadValueDirectFixed(rl_dst, rs_rCX);
+ LIR* src_dst_same = OpCmpBranch(kCondEq, rs_rAX, rs_rCX, nullptr);
+ LIR* src_null_branch = OpCmpImmBranch(kCondEq, rs_rAX, 0, nullptr);
+ LIR* dst_null_branch = OpCmpImmBranch(kCondEq, rs_rCX, 0, nullptr);
+ LoadValueDirectFixed(rl_length, rs_rDX);
+ // If the length of the copy is > 128 characters (256 bytes) or negative then go slow path.
+ LIR* len_too_big = OpCmpImmBranch(kCondHi, rs_rDX, 128, nullptr);
+ LoadValueDirectFixed(rl_src, rs_rAX);
+ LoadWordDisp(rs_rAX, mirror::Array::LengthOffset().Int32Value(), rs_rAX);
LIR* src_bad_len = nullptr;
LIR* srcPos_negative = nullptr;
if (!rl_srcPos.is_const) {
- LoadValueDirectFixed(rl_srcPos , rs_rBX);
- srcPos_negative = OpCmpImmBranch(kCondLt, rs_rBX , 0, nullptr);
- OpRegReg(kOpAdd, rs_rBX, rs_rDX);
- src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr);
+ LoadValueDirectFixed(rl_srcPos, tmp_reg);
+ srcPos_negative = OpCmpImmBranch(kCondLt, tmp_reg, 0, nullptr);
+ OpRegReg(kOpAdd, tmp_reg, rs_rDX);
+ src_bad_len = OpCmpBranch(kCondLt, rs_rAX, tmp_reg, nullptr);
} else {
- int pos_val = mir_graph_->ConstantValue(rl_srcPos.orig_sreg);
+ int32_t pos_val = mir_graph_->ConstantValue(rl_srcPos.orig_sreg);
if (pos_val == 0) {
- src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rDX, nullptr);
+ src_bad_len = OpCmpBranch(kCondLt, rs_rAX, rs_rDX, nullptr);
} else {
- OpRegRegImm(kOpAdd, rs_rBX, rs_rDX, pos_val);
- src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr);
+ OpRegRegImm(kOpAdd, tmp_reg, rs_rDX, pos_val);
+ src_bad_len = OpCmpBranch(kCondLt, rs_rAX, tmp_reg, nullptr);
}
}
LIR* dstPos_negative = nullptr;
@@ -1153,49 +1142,49 @@
LoadValueDirectFixed(rl_dst, rs_rAX);
LoadWordDisp(rs_rAX, mirror::Array::LengthOffset().Int32Value(), rs_rAX);
if (!rl_dstPos.is_const) {
- LoadValueDirectFixed(rl_dstPos , rs_rBX);
- dstPos_negative = OpCmpImmBranch(kCondLt, rs_rBX , 0, nullptr);
- OpRegRegReg(kOpAdd, rs_rBX, rs_rBX, rs_rDX);
- dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr);
+ LoadValueDirectFixed(rl_dstPos, tmp_reg);
+ dstPos_negative = OpCmpImmBranch(kCondLt, tmp_reg, 0, nullptr);
+ OpRegRegReg(kOpAdd, tmp_reg, tmp_reg, rs_rDX);
+ dst_bad_len = OpCmpBranch(kCondLt, rs_rAX, tmp_reg, nullptr);
} else {
- int pos_val = mir_graph_->ConstantValue(rl_dstPos.orig_sreg);
+ int32_t pos_val = mir_graph_->ConstantValue(rl_dstPos.orig_sreg);
if (pos_val == 0) {
- dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rDX, nullptr);
+ dst_bad_len = OpCmpBranch(kCondLt, rs_rAX, rs_rDX, nullptr);
} else {
- OpRegRegImm(kOpAdd, rs_rBX, rs_rDX, pos_val);
- dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr);
+ OpRegRegImm(kOpAdd, tmp_reg, rs_rDX, pos_val);
+ dst_bad_len = OpCmpBranch(kCondLt, rs_rAX, tmp_reg, nullptr);
}
}
- // everything is checked now
- LoadValueDirectFixed(rl_src , rs_rAX);
- LoadValueDirectFixed(rl_dst , rs_rBX);
- LoadValueDirectFixed(rl_srcPos , rs_rCX);
+ // Everything is checked now.
+ LoadValueDirectFixed(rl_src, rs_rAX);
+ LoadValueDirectFixed(rl_dst, tmp_reg);
+ LoadValueDirectFixed(rl_srcPos, rs_rCX);
NewLIR5(kX86Lea32RA, rs_rAX.GetReg(), rs_rAX.GetReg(),
- rs_rCX.GetReg() , 1, mirror::Array::DataOffset(2).Int32Value());
- // RAX now holds the address of the first src element to be copied
+ rs_rCX.GetReg(), 1, mirror::Array::DataOffset(2).Int32Value());
+ // RAX now holds the address of the first src element to be copied.
- LoadValueDirectFixed(rl_dstPos , rs_rCX);
- NewLIR5(kX86Lea32RA, rs_rBX.GetReg(), rs_rBX.GetReg(),
- rs_rCX.GetReg() , 1, mirror::Array::DataOffset(2).Int32Value() );
- // RBX now holds the address of the first dst element to be copied
+ LoadValueDirectFixed(rl_dstPos, rs_rCX);
+ NewLIR5(kX86Lea32RA, tmp_reg.GetReg(), tmp_reg.GetReg(),
+ rs_rCX.GetReg(), 1, mirror::Array::DataOffset(2).Int32Value() );
+ // RBX now holds the address of the first dst element to be copied.
- // check if the number of elements to be copied is odd or even. If odd
+ // Check if the number of elements to be copied is odd or even. If odd
// then copy the first element (so that the remaining number of elements
// is even).
- LoadValueDirectFixed(rl_length , rs_rCX);
+ LoadValueDirectFixed(rl_length, rs_rCX);
OpRegImm(kOpAnd, rs_rCX, 1);
LIR* jmp_to_begin_loop = OpCmpImmBranch(kCondEq, rs_rCX, 0, nullptr);
OpRegImm(kOpSub, rs_rDX, 1);
LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSignedHalf);
- StoreBaseIndexedDisp(rs_rBX, rs_rDX, 1, 0, rs_rCX, kSignedHalf);
+ StoreBaseIndexedDisp(tmp_reg, rs_rDX, 1, 0, rs_rCX, kSignedHalf);
- // since the remaining number of elements is even, we will copy by
+ // Since the remaining number of elements is even, we will copy by
// two elements at a time.
- LIR *beginLoop = NewLIR0(kPseudoTargetLabel);
- LIR* jmp_to_ret = OpCmpImmBranch(kCondEq, rs_rDX , 0, nullptr);
+ LIR* beginLoop = NewLIR0(kPseudoTargetLabel);
+ LIR* jmp_to_ret = OpCmpImmBranch(kCondEq, rs_rDX, 0, nullptr);
OpRegImm(kOpSub, rs_rDX, 2);
LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSingle);
- StoreBaseIndexedDisp(rs_rBX, rs_rDX, 1, 0, rs_rCX, kSingle);
+ StoreBaseIndexedDisp(tmp_reg, rs_rDX, 1, 0, rs_rCX, kSingle);
OpUnconditionalBranch(beginLoop);
LIR *check_failed = NewLIR0(kPseudoTargetLabel);
LIR* launchpad_branch = OpUnconditionalBranch(nullptr);
@@ -1203,7 +1192,6 @@
jmp_to_ret->target = return_point;
jmp_to_begin_loop->target = beginLoop;
src_dst_same->target = check_failed;
- len_negative->target = check_failed;
len_too_big->target = check_failed;
src_null_branch->target = check_failed;
if (srcPos_negative != nullptr)
@@ -1225,19 +1213,12 @@
* otherwise bails to standard library code.
*/
bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
- ClobberCallerSave();
- LockCallTemps(); // Using fixed registers
-
- // EAX: 16 bit character being searched.
- // ECX: count: number of words to be searched.
- // EDI: String being searched.
- // EDX: temporary during execution.
- // EBX or R11: temporary during execution (depending on mode).
-
RegLocation rl_obj = info->args[0];
RegLocation rl_char = info->args[1];
RegLocation rl_start; // Note: only present in III flavor or IndexOf.
- RegStorage tmpReg = cu_->target64 ? rs_r11 : rs_rBX;
+ // RBX is callee-save register in 64-bit mode.
+ RegStorage rs_tmp = cu_->target64 ? rs_r11 : rs_rBX;
+ int start_value = -1;
uint32_t char_value =
rl_char.is_const ? mir_graph_->ConstantValue(rl_char.orig_sreg) : 0;
@@ -1248,6 +1229,31 @@
}
// Okay, we are commited to inlining this.
+ // EAX: 16 bit character being searched.
+ // ECX: count: number of words to be searched.
+ // EDI: String being searched.
+ // EDX: temporary during execution.
+ // EBX or R11: temporary during execution (depending on mode).
+ // REP SCASW: search instruction.
+
+ FlushReg(rs_rAX);
+ Clobber(rs_rAX);
+ LockTemp(rs_rAX);
+ FlushReg(rs_rCX);
+ Clobber(rs_rCX);
+ LockTemp(rs_rCX);
+ FlushReg(rs_rDX);
+ Clobber(rs_rDX);
+ LockTemp(rs_rDX);
+ FlushReg(rs_tmp);
+ Clobber(rs_tmp);
+ LockTemp(rs_tmp);
+ if (cu_->target64) {
+ FlushReg(rs_rDI);
+ Clobber(rs_rDI);
+ LockTemp(rs_rDI);
+ }
+
RegLocation rl_return = GetReturn(kCoreReg);
RegLocation rl_dest = InlineTarget(info);
@@ -1256,13 +1262,13 @@
GenNullCheck(rs_rDX, info->opt_flags);
info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked.
- // Does the character fit in 16 bits?
- LIR* slowpath_branch = nullptr;
+ LIR *slowpath_branch = nullptr, *length_compare = nullptr;
+
+ // We need the value in EAX.
if (rl_char.is_const) {
- // We need the value in EAX.
LoadConstantNoClobber(rs_rAX, char_value);
} else {
- // Character is not a constant; compare at runtime.
+ // Does the character fit in 16 bits? Compare it at runtime.
LoadValueDirectFixed(rl_char, rs_rAX);
slowpath_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr);
}
@@ -1277,23 +1283,33 @@
// Start of char data with array_.
int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
- // Character is in EAX.
- // Object pointer is in EDX.
-
- // We need to preserve EDI, but have no spare registers, so push it on the stack.
- // We have to remember that all stack addresses after this are offset by sizeof(EDI).
- NewLIR1(kX86Push32R, rs_rDI.GetReg());
-
// Compute the number of words to search in to rCX.
Load32Disp(rs_rDX, count_offset, rs_rCX);
- LIR *length_compare = nullptr;
- int start_value = 0;
- bool is_index_on_stack = false;
+
+ // Possible signal here due to null pointer dereference.
+ // Note that the signal handler will expect the top word of
+ // the stack to be the ArtMethod*. If the PUSH edi instruction
+ // below is ahead of the load above then this will not be true
+ // and the signal handler will not work.
+ MarkPossibleNullPointerException(0);
+
+ if (!cu_->target64) {
+ // EDI is callee-save register in 32-bit mode.
+ NewLIR1(kX86Push32R, rs_rDI.GetReg());
+ }
+
if (zero_based) {
+ // Start index is not present.
// We have to handle an empty string. Use special instruction JECXZ.
length_compare = NewLIR0(kX86Jecxz8);
+
+ // Copy the number of words to search in a temporary register.
+ // We will use the register at the end to calculate result.
+ OpRegReg(kOpMov, rs_tmp, rs_rCX);
} else {
+ // Start index is present.
rl_start = info->args[2];
+
// We have to offset by the start index.
if (rl_start.is_const) {
start_value = mir_graph_->ConstantValue(rl_start.orig_sreg);
@@ -1301,73 +1317,55 @@
// Is the start > count?
length_compare = OpCmpImmBranch(kCondLe, rs_rCX, start_value, nullptr);
+ OpRegImm(kOpMov, rs_rDI, start_value);
+
+ // Copy the number of words to search in a temporary register.
+ // We will use the register at the end to calculate result.
+ OpRegReg(kOpMov, rs_tmp, rs_rCX);
if (start_value != 0) {
+ // Decrease the number of words to search by the start index.
OpRegImm(kOpSub, rs_rCX, start_value);
}
} else {
- // Runtime start index.
- rl_start = UpdateLocTyped(rl_start, kCoreReg);
- if (rl_start.location == kLocPhysReg) {
- // Handle "start index < 0" case.
- OpRegReg(kOpXor, tmpReg, tmpReg);
- OpRegReg(kOpCmp, rl_start.reg, tmpReg);
- OpCondRegReg(kOpCmov, kCondLt, rl_start.reg, tmpReg);
-
- // The length of the string should be greater than the start index.
- length_compare = OpCmpBranch(kCondLe, rs_rCX, rl_start.reg, nullptr);
- OpRegReg(kOpSub, rs_rCX, rl_start.reg);
- if (rl_start.reg == rs_rDI) {
- // The special case. We will use EDI further, so lets put start index to stack.
- NewLIR1(kX86Push32R, rs_rDI.GetReg());
- is_index_on_stack = true;
- }
- } else {
+ // Handle "start index < 0" case.
+ if (!cu_->target64 && rl_start.location != kLocPhysReg) {
// Load the start index from stack, remembering that we pushed EDI.
- int displacement = SRegOffset(rl_start.s_reg_low) +
- (cu_->target64 ? 2 : 1) * sizeof(uint32_t);
+ int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
{
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- Load32Disp(rs_rX86_SP, displacement, tmpReg);
+ Load32Disp(rs_rX86_SP, displacement, rs_rDI);
}
- OpRegReg(kOpXor, rs_rDI, rs_rDI);
- OpRegReg(kOpCmp, tmpReg, rs_rDI);
- OpCondRegReg(kOpCmov, kCondLt, tmpReg, rs_rDI);
-
- length_compare = OpCmpBranch(kCondLe, rs_rCX, tmpReg, nullptr);
- OpRegReg(kOpSub, rs_rCX, tmpReg);
- // Put the start index to stack.
- NewLIR1(kX86Push32R, tmpReg.GetReg());
- is_index_on_stack = true;
+ } else {
+ LoadValueDirectFixed(rl_start, rs_rDI);
}
+ OpRegReg(kOpXor, rs_tmp, rs_tmp);
+ OpRegReg(kOpCmp, rs_rDI, rs_tmp);
+ OpCondRegReg(kOpCmov, kCondLt, rs_rDI, rs_tmp);
+
+ // The length of the string should be greater than the start index.
+ length_compare = OpCmpBranch(kCondLe, rs_rCX, rs_rDI, nullptr);
+
+ // Copy the number of words to search in a temporary register.
+ // We will use the register at the end to calculate result.
+ OpRegReg(kOpMov, rs_tmp, rs_rCX);
+
+ // Decrease the number of words to search by the start index.
+ OpRegReg(kOpSub, rs_rCX, rs_rDI);
}
}
- DCHECK(length_compare != nullptr);
- // ECX now contains the count in words to be searched.
-
- // Load the address of the string into R11 or EBX (depending on mode).
+ // Load the address of the string into EDI.
+ // In case of start index we have to add the address to existing value in EDI.
// The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET.
- Load32Disp(rs_rDX, value_offset, rs_rDI);
- Load32Disp(rs_rDX, offset_offset, tmpReg);
- OpLea(tmpReg, rs_rDI, tmpReg, 1, data_offset);
-
- // Now compute into EDI where the search will start.
- if (zero_based || rl_start.is_const) {
- if (start_value == 0) {
- OpRegCopy(rs_rDI, tmpReg);
- } else {
- NewLIR3(kX86Lea32RM, rs_rDI.GetReg(), tmpReg.GetReg(), 2 * start_value);
- }
+ if (zero_based || (!zero_based && rl_start.is_const && start_value == 0)) {
+ Load32Disp(rs_rDX, offset_offset, rs_rDI);
} else {
- if (is_index_on_stack == true) {
- // Load the start index from stack.
- NewLIR1(kX86Pop32R, rs_rDX.GetReg());
- OpLea(rs_rDI, tmpReg, rs_rDX, 1, 0);
- } else {
- OpLea(rs_rDI, tmpReg, rl_start.reg, 1, 0);
- }
+ OpRegMem(kOpAdd, rs_rDI, rs_rDX, offset_offset);
}
+ OpRegImm(kOpLsl, rs_rDI, 1);
+ OpRegMem(kOpAdd, rs_rDI, rs_rDX, value_offset);
+ OpRegImm(kOpAdd, rs_rDI, data_offset);
// EDI now contains the start of the string to be searched.
// We are all prepared to do the search for the character.
@@ -1377,10 +1375,9 @@
LIR* failed_branch = OpCondBranch(kCondNe, nullptr);
// yes, we matched. Compute the index of the result.
- // index = ((curr_ptr - orig_ptr) / 2) - 1.
- OpRegReg(kOpSub, rs_rDI, tmpReg);
- OpRegImm(kOpAsr, rs_rDI, 1);
- NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rs_rDI.GetReg(), -1);
+ OpRegReg(kOpSub, rs_tmp, rs_rCX);
+ NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rs_tmp.GetReg(), -1);
+
LIR *all_done = NewLIR1(kX86Jmp8, 0);
// Failed to match; return -1.
@@ -1391,8 +1388,9 @@
// And join up at the end.
all_done->target = NewLIR0(kPseudoTargetLabel);
- // Restore EDI from the stack.
- NewLIR1(kX86Pop32R, rs_rDI.GetReg());
+
+ if (!cu_->target64)
+ NewLIR1(kX86Pop32R, rs_rDI.GetReg());
// Out of line code returns here.
if (slowpath_branch != nullptr) {
@@ -1401,130 +1399,92 @@
}
StoreValue(rl_dest, rl_return);
+
+ FreeTemp(rs_rAX);
+ FreeTemp(rs_rCX);
+ FreeTemp(rs_rDX);
+ FreeTemp(rs_tmp);
+ if (cu_->target64) {
+ FreeTemp(rs_rDI);
+ }
+
return true;
}
-/*
- * @brief Enter an 'advance LOC' into the FDE buffer
- * @param buf FDE buffer.
- * @param increment Amount by which to increase the current location.
- */
-static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) {
- if (increment < 64) {
- // Encoding in opcode.
- buf.push_back(0x1 << 6 | increment);
- } else if (increment < 256) {
- // Single byte delta.
- buf.push_back(0x02);
- buf.push_back(increment);
- } else if (increment < 256 * 256) {
- // Two byte delta.
- buf.push_back(0x03);
- buf.push_back(increment & 0xff);
- buf.push_back((increment >> 8) & 0xff);
+static bool ARTRegIDToDWARFRegID(bool is_x86_64, int art_reg_id, int* dwarf_reg_id) {
+ if (is_x86_64) {
+ switch (art_reg_id) {
+ case 3 : *dwarf_reg_id = 3; return true; // %rbx
+ // This is the only discrepancy between ART & DWARF register numbering.
+ case 5 : *dwarf_reg_id = 6; return true; // %rbp
+ case 12: *dwarf_reg_id = 12; return true; // %r12
+ case 13: *dwarf_reg_id = 13; return true; // %r13
+ case 14: *dwarf_reg_id = 14; return true; // %r14
+ case 15: *dwarf_reg_id = 15; return true; // %r15
+ default: return false; // Should not get here
+ }
} else {
- // Four byte delta.
- buf.push_back(0x04);
- PushWord(buf, increment);
+ switch (art_reg_id) {
+ case 5: *dwarf_reg_id = 5; return true; // %ebp
+ case 6: *dwarf_reg_id = 6; return true; // %esi
+ case 7: *dwarf_reg_id = 7; return true; // %edi
+ default: return false; // Should not get here
+ }
}
}
-
-std::vector<uint8_t>* X86CFIInitialization() {
- return X86Mir2Lir::ReturnCommonCallFrameInformation();
-}
-
-std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation() {
- std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
-
- // Length of the CIE (except for this field).
- PushWord(*cfi_info, 16);
-
- // CIE id.
- PushWord(*cfi_info, 0xFFFFFFFFU);
-
- // Version: 3.
- cfi_info->push_back(0x03);
-
- // Augmentation: empty string.
- cfi_info->push_back(0x0);
-
- // Code alignment: 1.
- cfi_info->push_back(0x01);
-
- // Data alignment: -4.
- cfi_info->push_back(0x7C);
-
- // Return address register (R8).
- cfi_info->push_back(0x08);
-
- // Initial return PC is 4(ESP): DW_CFA_def_cfa R4 4.
- cfi_info->push_back(0x0C);
- cfi_info->push_back(0x04);
- cfi_info->push_back(0x04);
-
- // Return address location: 0(SP): DW_CFA_offset R8 1 (* -4);.
- cfi_info->push_back(0x2 << 6 | 0x08);
- cfi_info->push_back(0x01);
-
- // And 2 Noops to align to 4 byte boundary.
- cfi_info->push_back(0x0);
- cfi_info->push_back(0x0);
-
- DCHECK_EQ(cfi_info->size() & 3, 0U);
- return cfi_info;
-}
-
-static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) {
- uint8_t buffer[12];
- uint8_t *ptr = EncodeUnsignedLeb128(buffer, value);
- for (uint8_t *p = buffer; p < ptr; p++) {
- buf.push_back(*p);
- }
-}
-
-std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() {
- std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
+std::vector<uint8_t>* X86Mir2Lir::ReturnFrameDescriptionEntry() {
+ std::vector<uint8_t>* cfi_info = new std::vector<uint8_t>;
// Generate the FDE for the method.
DCHECK_NE(data_offset_, 0U);
- // Length (will be filled in later in this routine).
- PushWord(*cfi_info, 0);
-
- // CIE_pointer (can be filled in by linker); might be left at 0 if there is only
- // one CIE for the whole debug_frame section.
- PushWord(*cfi_info, 0);
-
- // 'initial_location' (filled in by linker).
- PushWord(*cfi_info, 0);
-
- // 'address_range' (number of bytes in the method).
- PushWord(*cfi_info, data_offset_);
+ WriteFDEHeader(cfi_info);
+ WriteFDEAddressRange(cfi_info, data_offset_);
// The instructions in the FDE.
if (stack_decrement_ != nullptr) {
// Advance LOC to just past the stack decrement.
uint32_t pc = NEXT_LIR(stack_decrement_)->offset;
- AdvanceLoc(*cfi_info, pc);
+ DW_CFA_advance_loc(cfi_info, pc);
// Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size.
- cfi_info->push_back(0x0e);
- EncodeUnsignedLeb128(*cfi_info, frame_size_);
+ DW_CFA_def_cfa_offset(cfi_info, frame_size_);
+
+ // Handle register spills
+ const uint32_t kSpillInstLen = (cu_->target64) ? 5 : 4;
+ const int kDataAlignmentFactor = (cu_->target64) ? -8 : -4;
+ uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
+ int offset = -(GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
+ for (int reg = 0; mask; mask >>= 1, reg++) {
+ if (mask & 0x1) {
+ pc += kSpillInstLen;
+
+ // Advance LOC to pass this instruction
+ DW_CFA_advance_loc(cfi_info, kSpillInstLen);
+
+ int dwarf_reg_id;
+ if (ARTRegIDToDWARFRegID(cu_->target64, reg, &dwarf_reg_id)) {
+ // DW_CFA_offset_extended_sf reg offset
+ DW_CFA_offset_extended_sf(cfi_info, dwarf_reg_id, offset / kDataAlignmentFactor);
+ }
+
+ offset += GetInstructionSetPointerSize(cu_->instruction_set);
+ }
+ }
// We continue with that stack until the epilogue.
if (stack_increment_ != nullptr) {
uint32_t new_pc = NEXT_LIR(stack_increment_)->offset;
- AdvanceLoc(*cfi_info, new_pc - pc);
+ DW_CFA_advance_loc(cfi_info, new_pc - pc);
// We probably have code snippets after the epilogue, so save the
// current state: DW_CFA_remember_state.
- cfi_info->push_back(0x0a);
+ DW_CFA_remember_state(cfi_info);
- // We have now popped the stack: DW_CFA_def_cfa_offset 4. There is only the return
- // PC on the stack now.
- cfi_info->push_back(0x0e);
- EncodeUnsignedLeb128(*cfi_info, 4);
+ // We have now popped the stack: DW_CFA_def_cfa_offset 4/8.
+ // There is only the return PC on the stack now.
+ DW_CFA_def_cfa_offset(cfi_info, GetInstructionSetPointerSize(cu_->instruction_set));
// Everything after that is the same as before the epilogue.
// Stack bump was followed by RET instruction.
@@ -1532,25 +1492,16 @@
if (post_ret_insn != nullptr) {
pc = new_pc;
new_pc = post_ret_insn->offset;
- AdvanceLoc(*cfi_info, new_pc - pc);
+ DW_CFA_advance_loc(cfi_info, new_pc - pc);
// Restore the state: DW_CFA_restore_state.
- cfi_info->push_back(0x0b);
+ DW_CFA_restore_state(cfi_info);
}
}
}
- // Padding to a multiple of 4
- while ((cfi_info->size() & 3) != 0) {
- // DW_CFA_nop is encoded as 0.
- cfi_info->push_back(0);
- }
+ PadCFI(cfi_info);
+ WriteCFILength(cfi_info);
- // Set the length of the FDE inside the generated bytes.
- uint32_t length = cfi_info->size() - 4;
- (*cfi_info)[0] = length;
- (*cfi_info)[1] = length >> 8;
- (*cfi_info)[2] = length >> 16;
- (*cfi_info)[3] = length >> 24;
return cfi_info;
}
@@ -2478,7 +2429,7 @@
in_to_reg_storage_mapping.Initialize(info->args, info->num_arg_words, &mapper);
const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn();
const int size_of_the_last_mapped = last_mapped_in == -1 ? 1 :
- in_to_reg_storage_mapping.Get(last_mapped_in).Is64BitSolo() ? 2 : 1;
+ info->args[last_mapped_in].wide ? 2 : 1;
int regs_left_to_pass_via_stack = info->num_arg_words - (last_mapped_in + size_of_the_last_mapped);
// Fisrt of all, check whether it make sense to use bulk copying
@@ -2682,7 +2633,7 @@
call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
direct_code, direct_method, type);
if (pcrLabel) {
- if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
*pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
} else {
*pcrLabel = nullptr;
@@ -2697,4 +2648,89 @@
return call_state;
}
+bool X86Mir2Lir::GenInlinedCharAt(CallInfo* info) {
+ // Location of reference to data array
+ int value_offset = mirror::String::ValueOffset().Int32Value();
+ // Location of count
+ int count_offset = mirror::String::CountOffset().Int32Value();
+ // Starting offset within data array
+ int offset_offset = mirror::String::OffsetOffset().Int32Value();
+ // Start of char data with array_
+ int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
+
+ RegLocation rl_obj = info->args[0];
+ RegLocation rl_idx = info->args[1];
+ rl_obj = LoadValue(rl_obj, kRefReg);
+ // X86 wants to avoid putting a constant index into a register.
+ if (!rl_idx.is_const) {
+ rl_idx = LoadValue(rl_idx, kCoreReg);
+ }
+ RegStorage reg_max;
+ GenNullCheck(rl_obj.reg, info->opt_flags);
+ bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
+ LIR* range_check_branch = nullptr;
+ RegStorage reg_off;
+ RegStorage reg_ptr;
+ if (range_check) {
+ // On x86, we can compare to memory directly
+ // Set up a launch pad to allow retry in case of bounds violation */
+ if (rl_idx.is_const) {
+ LIR* comparison;
+ range_check_branch = OpCmpMemImmBranch(
+ kCondUlt, RegStorage::InvalidReg(), rl_obj.reg, count_offset,
+ mir_graph_->ConstantValue(rl_idx.orig_sreg), nullptr, &comparison);
+ MarkPossibleNullPointerExceptionAfter(0, comparison);
+ } else {
+ OpRegMem(kOpCmp, rl_idx.reg, rl_obj.reg, count_offset);
+ MarkPossibleNullPointerException(0);
+ range_check_branch = OpCondBranch(kCondUge, nullptr);
+ }
+ }
+ reg_off = AllocTemp();
+ reg_ptr = AllocTempRef();
+ Load32Disp(rl_obj.reg, offset_offset, reg_off);
+ LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
+ if (rl_idx.is_const) {
+ OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
+ } else {
+ OpRegReg(kOpAdd, reg_off, rl_idx.reg);
+ }
+ FreeTemp(rl_obj.reg);
+ if (rl_idx.location == kLocPhysReg) {
+ FreeTemp(rl_idx.reg);
+ }
+ RegLocation rl_dest = InlineTarget(info);
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg, kUnsignedHalf);
+ FreeTemp(reg_off);
+ FreeTemp(reg_ptr);
+ StoreValue(rl_dest, rl_result);
+ if (range_check) {
+ DCHECK(range_check_branch != nullptr);
+ info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've already null checked.
+ AddIntrinsicSlowPath(info, range_check_branch);
+ }
+ return true;
+}
+
+bool X86Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
+ RegLocation rl_dest = InlineTarget(info);
+
+ // Early exit if the result is unused.
+ if (rl_dest.orig_sreg < 0) {
+ return true;
+ }
+
+ RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
+
+ if (cu_->target64) {
+ OpRegThreadMem(kOpMov, rl_result.reg, Thread::PeerOffset<8>());
+ } else {
+ OpRegThreadMem(kOpMov, rl_result.reg, Thread::PeerOffset<4>());
+ }
+
+ StoreValue(rl_dest, rl_result);
+ return true;
+}
+
} // namespace art
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index 045e58e..a48613f 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -565,6 +565,7 @@
bool is_fp = r_dest.IsFloat();
// TODO: clean this up once we fully recognize 64-bit storage containers.
if (is_fp) {
+ DCHECK(r_dest.IsDouble());
if (value == 0) {
return NewLIR2(kX86XorpsRR, low_reg_val, low_reg_val);
} else if (base_of_code_ != nullptr) {
@@ -591,18 +592,26 @@
kDouble, kNotVolatile);
res->target = data_target;
res->flags.fixup = kFixupLoad;
+ Clobber(rl_method.reg);
store_method_addr_used_ = true;
} else {
- if (val_lo == 0) {
- res = NewLIR2(kX86XorpsRR, low_reg_val, low_reg_val);
+ if (r_dest.IsPair()) {
+ if (val_lo == 0) {
+ res = NewLIR2(kX86XorpsRR, low_reg_val, low_reg_val);
+ } else {
+ res = LoadConstantNoClobber(RegStorage::FloatSolo32(low_reg_val), val_lo);
+ }
+ if (val_hi != 0) {
+ RegStorage r_dest_hi = AllocTempDouble();
+ LoadConstantNoClobber(r_dest_hi, val_hi);
+ NewLIR2(kX86PunpckldqRR, low_reg_val, r_dest_hi.GetReg());
+ FreeTemp(r_dest_hi);
+ }
} else {
- res = LoadConstantNoClobber(RegStorage::FloatSolo32(low_reg_val), val_lo);
- }
- if (val_hi != 0) {
- RegStorage r_dest_hi = AllocTempDouble();
- LoadConstantNoClobber(r_dest_hi, val_hi);
- NewLIR2(kX86PunpckldqRR, low_reg_val, r_dest_hi.GetReg());
- FreeTemp(r_dest_hi);
+ RegStorage r_temp = AllocTypedTempWide(false, kCoreReg);
+ res = LoadConstantWide(r_temp, value);
+ OpRegCopyWide(r_dest, r_temp);
+ FreeTemp(r_temp);
}
}
} else {
@@ -684,9 +693,9 @@
} else {
DCHECK(!r_dest.IsFloat()); // Make sure we're not still using a pair here.
if (r_base == r_dest.GetLow()) {
- load2 = NewLIR3(opcode, r_dest.GetHighReg(), r_base.GetReg(),
+ load = NewLIR3(opcode, r_dest.GetHighReg(), r_base.GetReg(),
displacement + HIWORD_OFFSET);
- load = NewLIR3(opcode, r_dest.GetLowReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
+ load2 = NewLIR3(opcode, r_dest.GetLowReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
} else {
load = NewLIR3(opcode, r_dest.GetLowReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
load2 = NewLIR3(opcode, r_dest.GetHighReg(), r_base.GetReg(),
@@ -712,16 +721,16 @@
if (r_dest.GetHigh() == r_index) {
// We can't use either register for the first load.
RegStorage temp = AllocTemp();
- load2 = NewLIR5(opcode, temp.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
+ load = NewLIR5(opcode, temp.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
displacement + HIWORD_OFFSET);
- load = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale,
+ load2 = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale,
displacement + LOWORD_OFFSET);
OpRegCopy(r_dest.GetHigh(), temp);
FreeTemp(temp);
} else {
- load2 = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale,
+ load = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale,
displacement + HIWORD_OFFSET);
- load = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale,
+ load2 = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale,
displacement + LOWORD_OFFSET);
}
} else {
@@ -744,6 +753,7 @@
}
}
+ // Always return first load generated as this might cause a fault if base is nullptr.
return load;
}
@@ -878,9 +888,12 @@
}
LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
- int offset, int check_value, LIR* target) {
- NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(), offset,
- check_value);
+ int offset, int check_value, LIR* target, LIR** compare) {
+ LIR* inst = NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(),
+ offset, check_value);
+ if (compare != nullptr) {
+ *compare = inst;
+ }
LIR* branch = OpCondBranch(cond, target);
return branch;
}
@@ -1003,8 +1016,8 @@
}
void X86Mir2Lir::AnalyzeDoubleUse(RegLocation use) {
- // If this is a double literal, we will want it in the literal pool.
- if (use.is_const) {
+ // If this is a double literal, we will want it in the literal pool on 32b platforms.
+ if (use.is_const && !cu_->target64) {
store_method_addr_ = true;
}
}
@@ -1038,14 +1051,21 @@
}
void X86Mir2Lir::AnalyzeInvokeStatic(int opcode, BasicBlock * bb, MIR *mir) {
+ // For now this is only actual for x86-32.
+ if (cu_->target64) {
+ return;
+ }
+
uint32_t index = mir->dalvikInsn.vB;
if (!(mir->optimization_flags & MIR_INLINED)) {
DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
+ DexFileMethodInliner* method_inliner =
+ cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file);
InlineMethod method;
- if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
- ->IsIntrinsic(index, &method)) {
+ if (method_inliner->IsIntrinsic(index, &method)) {
switch (method.opcode) {
case kIntrinsicAbsDouble:
+ case kIntrinsicMinMaxDouble:
store_method_addr_ = true;
break;
default:
@@ -1054,4 +1074,13 @@
}
}
}
+
+LIR* X86Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+ if (cu_->target64) {
+ return OpThreadMem(op, GetThreadOffset<8>(trampoline));
+ } else {
+ return OpThreadMem(op, GetThreadOffset<4>(trampoline));
+ }
+}
+
} // namespace art
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index 5657381..500c6b8 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -499,6 +499,7 @@
UnaryOpcode(kX86Test, RI, MI, AI),
kX86Test32RR,
kX86Test64RR,
+ kX86Test32RM,
UnaryOpcode(kX86Not, R, M, A),
UnaryOpcode(kX86Neg, R, M, A),
UnaryOpcode(kX86Mul, DaR, DaM, DaA),
@@ -533,10 +534,14 @@
Binary0fOpCode(kX86Ucomiss), // unordered float compare
Binary0fOpCode(kX86Comisd), // double compare
Binary0fOpCode(kX86Comiss), // float compare
- Binary0fOpCode(kX86Orps), // or of floating point registers
- Binary0fOpCode(kX86Xorps), // xor of floating point registers
- Binary0fOpCode(kX86Addsd), // double add
- Binary0fOpCode(kX86Addss), // float add
+ Binary0fOpCode(kX86Orpd), // double logical OR
+ Binary0fOpCode(kX86Orps), // float logical OR
+ Binary0fOpCode(kX86Andpd), // double logical AND
+ Binary0fOpCode(kX86Andps), // float logical AND
+ Binary0fOpCode(kX86Xorpd), // double logical XOR
+ Binary0fOpCode(kX86Xorps), // float logical XOR
+ Binary0fOpCode(kX86Addsd), // double ADD
+ Binary0fOpCode(kX86Addss), // float ADD
Binary0fOpCode(kX86Mulsd), // double multiply
Binary0fOpCode(kX86Mulss), // float multiply
Binary0fOpCode(kX86Cvtsd2ss), // double to float
diff --git a/compiler/dex/quick_compiler_callbacks.h b/compiler/dex/quick_compiler_callbacks.h
index 7c9614f..cdf71b6 100644
--- a/compiler/dex/quick_compiler_callbacks.h
+++ b/compiler/dex/quick_compiler_callbacks.h
@@ -41,6 +41,11 @@
void ClassRejected(ClassReference ref) OVERRIDE;
+ // We are running in an environment where we can call patchoat safely so we should.
+ bool IsRelocationPossible() OVERRIDE {
+ return true;
+ }
+
private:
VerificationResults* const verification_results_;
DexFileToMethodInlinerMap* const method_inliner_map_;
diff --git a/compiler/dex/reg_storage.h b/compiler/dex/reg_storage.h
index addd628..706933a 100644
--- a/compiler/dex/reg_storage.h
+++ b/compiler/dex/reg_storage.h
@@ -192,6 +192,18 @@
return (reg & (kFloatingPoint | k64BitMask)) == kFloatingPoint;
}
+ static constexpr bool Is32Bit(uint16_t reg) {
+ return ((reg & kShapeMask) == k32BitSolo);
+ }
+
+ static constexpr bool Is64Bit(uint16_t reg) {
+ return ((reg & k64BitMask) == k64Bits);
+ }
+
+ static constexpr bool Is64BitSolo(uint16_t reg) {
+ return ((reg & kShapeMask) == k64BitSolo);
+ }
+
// Used to retrieve either the low register of a pair, or the only register.
int GetReg() const {
DCHECK(!IsPair()) << "reg_ = 0x" << std::hex << reg_;
@@ -265,11 +277,11 @@
}
static constexpr bool SameRegType(RegStorage reg1, RegStorage reg2) {
- return (reg1.IsDouble() == reg2.IsDouble()) && (reg1.IsSingle() == reg2.IsSingle());
+ return ((reg1.reg_ & kShapeTypeMask) == (reg2.reg_ & kShapeTypeMask));
}
static constexpr bool SameRegType(int reg1, int reg2) {
- return (IsDouble(reg1) == IsDouble(reg2)) && (IsSingle(reg1) == IsSingle(reg2));
+ return ((reg1 & kShapeTypeMask) == (reg2 & kShapeTypeMask));
}
// Create a 32-bit solo.
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 01c8f80..1b3f2a1 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -216,7 +216,7 @@
verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
bool is_range = (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE) ||
(inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE);
- const verifier::RegType&
+ verifier::RegType&
reg_type(line->GetRegisterType(is_range ? inst->VRegC_3rc() : inst->VRegC_35c()));
if (!reg_type.HasClass()) {
@@ -284,18 +284,18 @@
const verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
bool is_safe_cast = false;
if (code == Instruction::CHECK_CAST) {
- const verifier::RegType& reg_type(line->GetRegisterType(inst->VRegA_21c()));
- const verifier::RegType& cast_type =
+ verifier::RegType& reg_type(line->GetRegisterType(inst->VRegA_21c()));
+ verifier::RegType& cast_type =
method_verifier->ResolveCheckedClass(inst->VRegB_21c());
is_safe_cast = cast_type.IsStrictlyAssignableFrom(reg_type);
} else {
- const verifier::RegType& array_type(line->GetRegisterType(inst->VRegB_23x()));
+ verifier::RegType& array_type(line->GetRegisterType(inst->VRegB_23x()));
// We only know its safe to assign to an array if the array type is precise. For example,
// an Object[] can have any type of object stored in it, but it may also be assigned a
// String[] in which case the stores need to be of Strings.
if (array_type.IsPreciseReference()) {
- const verifier::RegType& value_type(line->GetRegisterType(inst->VRegA_23x()));
- const verifier::RegType& component_type = method_verifier->GetRegTypeCache()
+ verifier::RegType& value_type(line->GetRegisterType(inst->VRegA_23x()));
+ verifier::RegType& component_type = method_verifier->GetRegTypeCache()
->GetComponentType(array_type, method_verifier->GetClassLoader());
is_safe_cast = component_type.IsStrictlyAssignableFrom(value_type);
}
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
index 892b302..4a3e071 100644
--- a/compiler/dex/vreg_analysis.cc
+++ b/compiler/dex/vreg_analysis.cc
@@ -324,13 +324,15 @@
}
for (int i = 0; ssa_rep->fp_use && i< ssa_rep->num_uses; i++) {
- if (ssa_rep->fp_use[i])
+ if (ssa_rep->fp_use[i]) {
changed |= SetFp(uses[i]);
}
+ }
for (int i = 0; ssa_rep->fp_def && i< ssa_rep->num_defs; i++) {
- if (ssa_rep->fp_def[i])
+ if (ssa_rep->fp_def[i]) {
changed |= SetFp(defs[i]);
}
+ }
// Special-case handling for moves & Phi
if (attrs & (DF_IS_MOVE | DF_NULL_TRANSFER_N)) {
/*
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 89295f2..022ec6b 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -231,8 +231,8 @@
// the super class.
bool can_sharpen_super_based_on_type = (*invoke_type == kSuper) &&
(referrer_class != methods_class) && referrer_class->IsSubClass(methods_class) &&
- resolved_method->GetMethodIndex() < methods_class->GetVTable()->GetLength() &&
- (methods_class->GetVTable()->Get(resolved_method->GetMethodIndex()) == resolved_method);
+ resolved_method->GetMethodIndex() < methods_class->GetVTableLength() &&
+ (methods_class->GetVTableEntry(resolved_method->GetMethodIndex()) == resolved_method);
if (can_sharpen_virtual_based_on_type || can_sharpen_super_based_on_type) {
// Sharpen a virtual call into a direct call. The method_idx is into referrer's
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 9e88c8d..1233a0d 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -353,7 +353,6 @@
compiler_enable_auto_elf_loading_(NULL),
compiler_get_method_code_addr_(NULL),
support_boot_image_fixup_(instruction_set != kMips),
- cfi_info_(nullptr),
dedupe_code_("dedupe code"),
dedupe_mapping_table_("dedupe mapping table"),
dedupe_vmap_table_("dedupe vmap table"),
@@ -376,11 +375,6 @@
CHECK(image_classes_.get() == nullptr);
}
- // Are we generating CFI information?
- if (compiler_options->GetGenerateGDBInformation()) {
- cfi_info_.reset(compiler_->GetCallFrameInformationInitialization(*this));
- }
-
// Read the profile file if one is provided.
if (!profile_file.empty()) {
profile_present_ = profile_file_.LoadFile(profile_file);
@@ -597,7 +591,7 @@
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
CHECK(dex_file != nullptr);
- ResolveDexFile(class_loader, *dex_file, thread_pool, timings);
+ ResolveDexFile(class_loader, *dex_file, dex_files, thread_pool, timings);
}
}
@@ -811,11 +805,15 @@
bool CompilerDriver::CanAssumeTypeIsPresentInDexCache(const DexFile& dex_file, uint32_t type_idx) {
if (IsImage() &&
IsImageClass(dex_file.StringDataByIdx(dex_file.GetTypeId(type_idx).descriptor_idx_))) {
- if (kIsDebugBuild) {
+ {
ScopedObjectAccess soa(Thread::Current());
mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
- CHECK(resolved_class != NULL);
+ if (resolved_class == nullptr) {
+ // Erroneous class.
+ stats_->TypeNotInDexCache();
+ return false;
+ }
}
stats_->TypeInDexCache();
return true;
@@ -933,13 +931,13 @@
}
*out_is_finalizable = resolved_class->IsFinalizable();
const bool compiling_boot = Runtime::Current()->GetHeap()->IsCompilingBoot();
+ const bool support_boot_image_fixup = GetSupportBootImageFixup();
if (compiling_boot) {
// boot -> boot class pointers.
// True if the class is in the image at boot compiling time.
const bool is_image_class = IsImage() && IsImageClass(
dex_file.StringDataByIdx(dex_file.GetTypeId(type_idx).descriptor_idx_));
// True if pc relative load works.
- const bool support_boot_image_fixup = GetSupportBootImageFixup();
if (is_image_class && support_boot_image_fixup) {
*is_type_initialized = resolved_class->IsInitialized();
*use_direct_type_ptr = false;
@@ -952,10 +950,11 @@
// True if the class is in the image at app compiling time.
const bool class_in_image =
Runtime::Current()->GetHeap()->FindSpaceFromObject(resolved_class, false)->IsImageSpace();
- if (class_in_image) {
+ if (class_in_image && support_boot_image_fixup) {
// boot -> app class pointers.
*is_type_initialized = resolved_class->IsInitialized();
- *use_direct_type_ptr = true;
+ // TODO This is somewhat hacky. We should refactor all of this invoke codepath.
+ *use_direct_type_ptr = !GetCompilerOptions().GetIncludePatchInformation();
*direct_type_ptr = reinterpret_cast<uintptr_t>(resolved_class);
return true;
} else {
@@ -968,6 +967,43 @@
}
}
+bool CompilerDriver::CanEmbedReferenceTypeInCode(ClassReference* ref,
+ bool* use_direct_ptr,
+ uintptr_t* direct_type_ptr) {
+ CHECK(ref != nullptr);
+ CHECK(use_direct_ptr != nullptr);
+ CHECK(direct_type_ptr != nullptr);
+
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Class* reference_class = mirror::Reference::GetJavaLangRefReference();
+ bool is_initialized;
+ bool unused_finalizable;
+ // Make sure we have a finished Reference class object before attempting to use it.
+ if (!CanEmbedTypeInCode(*reference_class->GetDexCache()->GetDexFile(),
+ reference_class->GetDexTypeIndex(), &is_initialized,
+ use_direct_ptr, direct_type_ptr, &unused_finalizable) ||
+ !is_initialized) {
+ return false;
+ }
+ ref->first = &reference_class->GetDexFile();
+ ref->second = reference_class->GetDexClassDefIndex();
+ return true;
+}
+
+uint32_t CompilerDriver::GetReferenceSlowFlagOffset() const {
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Class* klass = mirror::Reference::GetJavaLangRefReference();
+ DCHECK(klass->IsInitialized());
+ return klass->GetSlowPathFlagOffset().Uint32Value();
+}
+
+uint32_t CompilerDriver::GetReferenceDisableFlagOffset() const {
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Class* klass = mirror::Reference::GetJavaLangRefReference();
+ DCHECK(klass->IsInitialized());
+ return klass->GetDisableIntrinsicFlagOffset().Uint32Value();
+}
+
void CompilerDriver::ProcessedInstanceField(bool resolved) {
if (!resolved) {
stats_->UnresolvedInstanceField();
@@ -1099,6 +1135,9 @@
*direct_method = 0;
bool use_dex_cache = false;
const bool compiling_boot = Runtime::Current()->GetHeap()->IsCompilingBoot();
+ // TODO This is somewhat hacky. We should refactor all of this invoke codepath.
+ const bool force_relocations = (compiling_boot ||
+ GetCompilerOptions().GetIncludePatchInformation());
if (compiler_->IsPortable()) {
if (sharp_type != kStatic && sharp_type != kDirect) {
return;
@@ -1109,7 +1148,7 @@
return;
}
// TODO: support patching on all architectures.
- use_dex_cache = compiling_boot && !support_boot_image_fixup_;
+ use_dex_cache = force_relocations && !support_boot_image_fixup_;
}
bool method_code_in_boot = (method->GetDeclaringClass()->GetClassLoader() == nullptr);
if (!use_dex_cache) {
@@ -1128,8 +1167,8 @@
if (method_code_in_boot) {
*stats_flags |= kFlagDirectCallToBoot | kFlagDirectMethodToBoot;
}
- if (!use_dex_cache && compiling_boot) {
- if (!IsImageClass(method->GetDeclaringClassDescriptor())) {
+ if (!use_dex_cache && force_relocations) {
+ if (!IsImage() || !IsImageClass(method->GetDeclaringClassDescriptor())) {
// We can only branch directly to Methods that are resolved in the DexCache.
// Otherwise we won't invoke the resolution trampoline.
use_dex_cache = true;
@@ -1150,7 +1189,7 @@
if (dex_method_idx != DexFile::kDexNoIndex) {
target_method->dex_method_index = dex_method_idx;
} else {
- if (compiling_boot && !use_dex_cache) {
+ if (force_relocations && !use_dex_cache) {
target_method->dex_method_index = method->GetDexMethodIndex();
target_method->dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
}
@@ -1167,19 +1206,26 @@
*type = sharp_type;
}
} else {
- bool method_in_image = compiling_boot ||
+ bool method_in_image =
Runtime::Current()->GetHeap()->FindSpaceFromObject(method, false)->IsImageSpace();
- if (method_in_image) {
+ if (method_in_image || compiling_boot) {
+ // We know we must be able to get to the method in the image, so use that pointer.
CHECK(!method->IsAbstract());
*type = sharp_type;
- *direct_method = compiling_boot ? -1 : reinterpret_cast<uintptr_t>(method);
- *direct_code = compiling_boot ? -1 : compiler_->GetEntryPointOf(method);
+ *direct_method = force_relocations ? -1 : reinterpret_cast<uintptr_t>(method);
+ *direct_code = force_relocations ? -1 : compiler_->GetEntryPointOf(method);
target_method->dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
target_method->dex_method_index = method->GetDexMethodIndex();
} else if (!must_use_direct_pointers) {
// Set the code and rely on the dex cache for the method.
*type = sharp_type;
- *direct_code = compiler_->GetEntryPointOf(method);
+ if (force_relocations) {
+ *direct_code = -1;
+ target_method->dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
+ target_method->dex_method_index = method->GetDexMethodIndex();
+ } else {
+ *direct_code = compiler_->GetEntryPointOf(method);
+ }
} else {
// Direct pointers were required but none were available.
VLOG(compiler) << "Dex cache devirtualization failed for: " << PrettyMethod(method);
@@ -1329,12 +1375,14 @@
uint16_t referrer_class_def_idx,
uint32_t referrer_method_idx,
uint32_t target_type_idx,
+ const DexFile* target_type_dex_file,
size_t literal_offset) {
MutexLock mu(Thread::Current(), compiled_methods_lock_);
classes_to_patch_.push_back(new TypePatchInformation(dex_file,
referrer_class_def_idx,
referrer_method_idx,
target_type_idx,
+ target_type_dex_file,
literal_offset));
}
@@ -1346,12 +1394,14 @@
jobject class_loader,
CompilerDriver* compiler,
const DexFile* dex_file,
+ const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool)
: index_(0),
class_linker_(class_linker),
class_loader_(class_loader),
compiler_(compiler),
dex_file_(dex_file),
+ dex_files_(dex_files),
thread_pool_(thread_pool) {}
ClassLinker* GetClassLinker() const {
@@ -1373,6 +1423,10 @@
return dex_file_;
}
+ const std::vector<const DexFile*>& GetDexFiles() const {
+ return dex_files_;
+ }
+
void ForAll(size_t begin, size_t end, Callback callback, size_t work_units) {
Thread* self = Thread::Current();
self->AssertNoPendingException();
@@ -1430,11 +1484,24 @@
const jobject class_loader_;
CompilerDriver* const compiler_;
const DexFile* const dex_file_;
+ const std::vector<const DexFile*>& dex_files_;
ThreadPool* const thread_pool_;
DISALLOW_COPY_AND_ASSIGN(ParallelCompilationManager);
};
+static bool SkipClassCheckClassPath(const char* descriptor, const DexFile& dex_file,
+ const std::vector<const DexFile*>& classpath) {
+ DexFile::ClassPathEntry pair = DexFile::FindInClassPath(descriptor, classpath);
+ CHECK(pair.second != NULL);
+ if (pair.first != &dex_file) {
+ LOG(WARNING) << "Skipping class " << descriptor << " from " << dex_file.GetLocation()
+ << " previously found in " << pair.first->GetLocation();
+ return true;
+ }
+ return false;
+}
+
// Return true if the class should be skipped during compilation.
//
// The first case where we skip is for redundant class definitions in
@@ -1443,20 +1510,23 @@
// The second case where we skip is when an app bundles classes found
// in the boot classpath. Since at runtime we will select the class from
// the boot classpath, we ignore the one from the app.
+//
+// The third case is if the app itself has the class defined in multiple dex files. Then we skip
+// it if it is not the first occurrence.
static bool SkipClass(ClassLinker* class_linker, jobject class_loader, const DexFile& dex_file,
+ const std::vector<const DexFile*>& dex_files,
const DexFile::ClassDef& class_def) {
const char* descriptor = dex_file.GetClassDescriptor(class_def);
+
if (class_loader == NULL) {
- DexFile::ClassPathEntry pair = DexFile::FindInClassPath(descriptor, class_linker->GetBootClassPath());
- CHECK(pair.second != NULL);
- if (pair.first != &dex_file) {
- LOG(WARNING) << "Skipping class " << descriptor << " from " << dex_file.GetLocation()
- << " previously found in " << pair.first->GetLocation();
- return true;
- }
- return false;
+ return SkipClassCheckClassPath(descriptor, dex_file, class_linker->GetBootClassPath());
}
- return class_linker->IsInBootClassPath(descriptor);
+
+ if (class_linker->IsInBootClassPath(descriptor)) {
+ return true;
+ }
+
+ return SkipClassCheckClassPath(descriptor, dex_file, dex_files);
}
// A fast version of SkipClass above if the class pointer is available
@@ -1514,7 +1584,7 @@
// definitions, since many of them many never be referenced by
// generated code.
const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
- if (!SkipClass(class_linker, jclass_loader, dex_file, class_def)) {
+ if (!SkipClass(class_linker, jclass_loader, dex_file, manager->GetDexFiles(), class_def)) {
ScopedObjectAccess soa(self);
StackHandleScope<2> hs(soa.Self());
Handle<mirror::ClassLoader> class_loader(
@@ -1621,13 +1691,15 @@
}
void CompilerDriver::ResolveDexFile(jobject class_loader, const DexFile& dex_file,
+ const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
// TODO: we could resolve strings here, although the string table is largely filled with class
// and method names.
- ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, thread_pool);
+ ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, dex_files,
+ thread_pool);
if (IsImage()) {
// For images we resolve all types, such as array, whereas for applications just those with
// classdefs are resolved by ResolveClassFieldsAndMethods.
@@ -1644,7 +1716,7 @@
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
CHECK(dex_file != NULL);
- VerifyDexFile(class_loader, *dex_file, thread_pool, timings);
+ VerifyDexFile(class_loader, *dex_file, dex_files, thread_pool, timings);
}
}
@@ -1696,10 +1768,12 @@
}
void CompilerDriver::VerifyDexFile(jobject class_loader, const DexFile& dex_file,
+ const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings) {
TimingLogger::ScopedTiming t("Verify Dex File", timings);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, thread_pool);
+ ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, dex_files,
+ thread_pool);
context.ForAll(0, dex_file.NumClassDefs(), VerifyClass, thread_count_);
}
@@ -1789,10 +1863,12 @@
}
void CompilerDriver::InitializeClasses(jobject jni_class_loader, const DexFile& dex_file,
+ const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings) {
TimingLogger::ScopedTiming t("InitializeNoClinit", timings);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- ParallelCompilationManager context(class_linker, jni_class_loader, this, &dex_file, thread_pool);
+ ParallelCompilationManager context(class_linker, jni_class_loader, this, &dex_file, dex_files,
+ thread_pool);
size_t thread_count;
if (IsImage()) {
// TODO: remove this when transactional mode supports multithreading.
@@ -1813,7 +1889,7 @@
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
CHECK(dex_file != NULL);
- InitializeClasses(class_loader, *dex_file, thread_pool, timings);
+ InitializeClasses(class_loader, *dex_file, dex_files, thread_pool, timings);
}
}
@@ -1822,7 +1898,7 @@
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
CHECK(dex_file != NULL);
- CompileDexFile(class_loader, *dex_file, thread_pool, timings);
+ CompileDexFile(class_loader, *dex_file, dex_files, thread_pool, timings);
}
}
@@ -1832,7 +1908,7 @@
const DexFile& dex_file = *manager->GetDexFile();
const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
ClassLinker* class_linker = manager->GetClassLinker();
- if (SkipClass(class_linker, jclass_loader, dex_file, class_def)) {
+ if (SkipClass(class_linker, jclass_loader, dex_file, manager->GetDexFiles(), class_def)) {
return;
}
ClassReference ref(&dex_file, class_def_index);
@@ -1901,10 +1977,11 @@
}
void CompilerDriver::CompileDexFile(jobject class_loader, const DexFile& dex_file,
+ const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings) {
TimingLogger::ScopedTiming t("Compile Dex File", timings);
ParallelCompilationManager context(Runtime::Current()->GetClassLinker(), class_loader, this,
- &dex_file, thread_pool);
+ &dex_file, dex_files, thread_pool);
context.ForAll(0, dex_file.NumClassDefs(), CompilerDriver::CompileClass, thread_count_);
}
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 6dae398..2a5cdb9 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -213,6 +213,12 @@
bool* is_type_initialized, bool* use_direct_type_ptr,
uintptr_t* direct_type_ptr, bool* out_is_finalizable);
+ // Query methods for the java.lang.ref.Reference class.
+ bool CanEmbedReferenceTypeInCode(ClassReference* ref,
+ bool* use_direct_type_ptr, uintptr_t* direct_type_ptr);
+ uint32_t GetReferenceSlowFlagOffset() const;
+ uint32_t GetReferenceDisableFlagOffset() const;
+
// Get the DexCache for the
mirror::DexCache* GetDexCache(const DexCompilationUnit* mUnit)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -356,6 +362,7 @@
uint16_t referrer_class_def_idx,
uint32_t referrer_method_idx,
uint32_t target_method_idx,
+ const DexFile* target_dex_file,
size_t literal_offset)
LOCKS_EXCLUDED(compiled_methods_lock_);
@@ -402,10 +409,6 @@
return dump_passes_;
}
- bool DidIncludeDebugSymbols() const {
- return compiler_options_->GetIncludeDebugSymbols();
- }
-
CumulativeLogger* GetTimingsLogger() const {
return timings_logger_;
}
@@ -549,6 +552,10 @@
class TypePatchInformation : public PatchInformation {
public:
+ const DexFile& GetTargetTypeDexFile() const {
+ return *target_type_dex_file_;
+ }
+
uint32_t GetTargetTypeIdx() const {
return target_type_idx_;
}
@@ -565,13 +572,15 @@
uint16_t referrer_class_def_idx,
uint32_t referrer_method_idx,
uint32_t target_type_idx,
+ const DexFile* target_type_dex_file,
size_t literal_offset)
: PatchInformation(dex_file, referrer_class_def_idx,
referrer_method_idx, literal_offset),
- target_type_idx_(target_type_idx) {
+ target_type_idx_(target_type_idx), target_type_dex_file_(target_type_dex_file) {
}
const uint32_t target_type_idx_;
+ const DexFile* target_type_dex_file_;
friend class CompilerDriver;
DISALLOW_COPY_AND_ASSIGN(TypePatchInformation);
@@ -599,14 +608,6 @@
std::vector<uint8_t>* DeduplicateGCMap(const std::vector<uint8_t>& code);
std::vector<uint8_t>* DeduplicateCFIInfo(const std::vector<uint8_t>* cfi_info);
- /*
- * @brief return the pointer to the Call Frame Information.
- * @return pointer to call frame information for this compilation.
- */
- std::vector<uint8_t>* GetCallFrameInformation() const {
- return cfi_info_.get();
- }
-
ProfileFile profile_file_;
bool profile_present_;
@@ -658,12 +659,14 @@
ThreadPool* thread_pool, TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
void ResolveDexFile(jobject class_loader, const DexFile& dex_file,
+ const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
void Verify(jobject class_loader, const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings);
void VerifyDexFile(jobject class_loader, const DexFile& dex_file,
+ const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
@@ -671,6 +674,7 @@
ThreadPool* thread_pool, TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
void InitializeClasses(jobject class_loader, const DexFile& dex_file,
+ const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_, compiled_classes_lock_);
@@ -681,6 +685,7 @@
void Compile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings);
void CompileDexFile(jobject class_loader, const DexFile& dex_file,
+ const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
void CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
@@ -766,9 +771,6 @@
bool support_boot_image_fixup_;
- // Call Frame Information, which might be generated to help stack tracebacks.
- std::unique_ptr<std::vector<uint8_t>> cfi_info_;
-
// DeDuplication data structures, these own the corresponding byte arrays.
class DedupeHashFunc {
public:
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 92b2fee..c0f91d1 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -56,9 +56,9 @@
include_patch_information_(kDefaultIncludePatchInformation),
top_k_profile_threshold_(kDefaultTopKProfileThreshold),
include_debug_symbols_(kDefaultIncludeDebugSymbols),
- explicit_null_checks_(true),
- explicit_so_checks_(true),
- explicit_suspend_checks_(true)
+ implicit_null_checks_(false),
+ implicit_so_checks_(false),
+ implicit_suspend_checks_(false)
#ifdef ART_SEA_IR_MODE
, sea_ir_mode_(false)
#endif
@@ -74,9 +74,9 @@
bool include_patch_information,
double top_k_profile_threshold,
bool include_debug_symbols,
- bool explicit_null_checks,
- bool explicit_so_checks,
- bool explicit_suspend_checks
+ bool implicit_null_checks,
+ bool implicit_so_checks,
+ bool implicit_suspend_checks
#ifdef ART_SEA_IR_MODE
, bool sea_ir_mode
#endif
@@ -91,9 +91,9 @@
include_patch_information_(include_patch_information),
top_k_profile_threshold_(top_k_profile_threshold),
include_debug_symbols_(include_debug_symbols),
- explicit_null_checks_(explicit_null_checks),
- explicit_so_checks_(explicit_so_checks),
- explicit_suspend_checks_(explicit_suspend_checks)
+ implicit_null_checks_(implicit_null_checks),
+ implicit_so_checks_(implicit_so_checks),
+ implicit_suspend_checks_(implicit_suspend_checks)
#ifdef ART_SEA_IR_MODE
, sea_ir_mode_(sea_ir_mode)
#endif
@@ -160,28 +160,28 @@
return include_debug_symbols_;
}
- bool GetExplicitNullChecks() const {
- return explicit_null_checks_;
+ bool GetImplicitNullChecks() const {
+ return implicit_null_checks_;
}
- void SetExplicitNullChecks(bool new_val) {
- explicit_null_checks_ = new_val;
+ void SetImplicitNullChecks(bool new_val) {
+ implicit_null_checks_ = new_val;
}
- bool GetExplicitStackOverflowChecks() const {
- return explicit_so_checks_;
+ bool GetImplicitStackOverflowChecks() const {
+ return implicit_so_checks_;
}
- void SetExplicitStackOverflowChecks(bool new_val) {
- explicit_so_checks_ = new_val;
+ void SetImplicitStackOverflowChecks(bool new_val) {
+ implicit_so_checks_ = new_val;
}
- bool GetExplicitSuspendChecks() const {
- return explicit_suspend_checks_;
+ bool GetImplicitSuspendChecks() const {
+ return implicit_suspend_checks_;
}
- void SetExplicitSuspendChecks(bool new_val) {
- explicit_suspend_checks_ = new_val;
+ void SetImplicitSuspendChecks(bool new_val) {
+ implicit_suspend_checks_ = new_val;
}
#ifdef ART_SEA_IR_MODE
@@ -208,9 +208,9 @@
// When using a profile file only the top K% of the profiled samples will be compiled.
double top_k_profile_threshold_;
bool include_debug_symbols_;
- bool explicit_null_checks_;
- bool explicit_so_checks_;
- bool explicit_suspend_checks_;
+ bool implicit_null_checks_;
+ bool implicit_so_checks_;
+ bool implicit_suspend_checks_;
#ifdef ART_SEA_IR_MODE
bool sea_ir_mode_;
#endif
diff --git a/compiler/elf_patcher.cc b/compiler/elf_patcher.cc
new file mode 100644
index 0000000..9ae755d
--- /dev/null
+++ b/compiler/elf_patcher.cc
@@ -0,0 +1,296 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "elf_patcher.h"
+
+#include <vector>
+#include <set>
+
+#include "elf_file.h"
+#include "elf_utils.h"
+#include "mirror/art_field-inl.h"
+#include "mirror/art_method-inl.h"
+#include "mirror/array-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/dex_cache-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "mirror/string-inl.h"
+#include "oat.h"
+#include "os.h"
+#include "utils.h"
+
+namespace art {
+
+bool ElfPatcher::Patch(const CompilerDriver* driver, ElfFile* elf_file,
+ const std::string& oat_location,
+ ImageAddressCallback cb, void* cb_data,
+ std::string* error_msg) {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ const OatFile* oat_file = class_linker->FindOpenedOatFileFromOatLocation(oat_location);
+ if (oat_file == nullptr) {
+ CHECK(Runtime::Current()->IsCompiler());
+ oat_file = OatFile::Open(oat_location, oat_location, NULL, false, error_msg);
+ if (oat_file == nullptr) {
+ *error_msg = StringPrintf("Unable to find or open oat file at '%s': %s", oat_location.c_str(),
+ error_msg->c_str());
+ return false;
+ }
+ CHECK_EQ(class_linker->RegisterOatFile(oat_file), oat_file);
+ }
+ return ElfPatcher::Patch(driver, elf_file, oat_file,
+ reinterpret_cast<uintptr_t>(oat_file->Begin()), cb, cb_data, error_msg);
+}
+
+bool ElfPatcher::Patch(const CompilerDriver* driver, ElfFile* elf, const OatFile* oat_file,
+ uintptr_t oat_data_start, ImageAddressCallback cb, void* cb_data,
+ std::string* error_msg) {
+ Elf32_Shdr* data_sec = elf->FindSectionByName(".rodata");
+ if (data_sec == nullptr) {
+ *error_msg = "Unable to find .rodata section and oat header";
+ return false;
+ }
+ OatHeader* oat_header = reinterpret_cast<OatHeader*>(elf->Begin() + data_sec->sh_offset);
+ if (!oat_header->IsValid()) {
+ *error_msg = "Oat header was not valid";
+ return false;
+ }
+
+ ElfPatcher p(driver, elf, oat_file, oat_header, oat_data_start, cb, cb_data, error_msg);
+ return p.PatchElf();
+}
+
+mirror::ArtMethod* ElfPatcher::GetTargetMethod(const CompilerDriver::CallPatchInformation* patch) {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(
+ hs.NewHandle(class_linker->FindDexCache(*patch->GetTargetDexFile())));
+ mirror::ArtMethod* method = class_linker->ResolveMethod(*patch->GetTargetDexFile(),
+ patch->GetTargetMethodIdx(),
+ dex_cache,
+ NullHandle<mirror::ClassLoader>(),
+ NullHandle<mirror::ArtMethod>(),
+ patch->GetTargetInvokeType());
+ CHECK(method != NULL)
+ << patch->GetTargetDexFile()->GetLocation() << " " << patch->GetTargetMethodIdx();
+ CHECK(!method->IsRuntimeMethod())
+ << patch->GetTargetDexFile()->GetLocation() << " " << patch->GetTargetMethodIdx();
+ CHECK(dex_cache->GetResolvedMethods()->Get(patch->GetTargetMethodIdx()) == method)
+ << patch->GetTargetDexFile()->GetLocation() << " " << patch->GetReferrerMethodIdx() << " "
+ << PrettyMethod(dex_cache->GetResolvedMethods()->Get(patch->GetTargetMethodIdx())) << " "
+ << PrettyMethod(method);
+ return method;
+}
+
+mirror::Class* ElfPatcher::GetTargetType(const CompilerDriver::TypePatchInformation* patch) {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(
+ patch->GetTargetTypeDexFile())));
+ mirror::Class* klass = class_linker->ResolveType(patch->GetTargetTypeDexFile(),
+ patch->GetTargetTypeIdx(),
+ dex_cache, NullHandle<mirror::ClassLoader>());
+ CHECK(klass != NULL)
+ << patch->GetTargetTypeDexFile().GetLocation() << " " << patch->GetTargetTypeIdx();
+ CHECK(dex_cache->GetResolvedTypes()->Get(patch->GetTargetTypeIdx()) == klass)
+ << patch->GetDexFile().GetLocation() << " " << patch->GetReferrerMethodIdx() << " "
+ << PrettyClass(dex_cache->GetResolvedTypes()->Get(patch->GetTargetTypeIdx())) << " "
+ << PrettyClass(klass);
+ return klass;
+}
+
+void ElfPatcher::AddPatch(uintptr_t p) {
+ if (write_patches_ && patches_set_.find(p) == patches_set_.end()) {
+ patches_set_.insert(p);
+ patches_.push_back(p);
+ }
+}
+
+uint32_t* ElfPatcher::GetPatchLocation(uintptr_t patch_ptr) {
+ CHECK_GE(patch_ptr, reinterpret_cast<uintptr_t>(oat_file_->Begin()));
+ CHECK_LE(patch_ptr, reinterpret_cast<uintptr_t>(oat_file_->End()));
+ uintptr_t off = patch_ptr - reinterpret_cast<uintptr_t>(oat_file_->Begin());
+ uintptr_t ret = reinterpret_cast<uintptr_t>(oat_header_) + off;
+
+ CHECK_GE(ret, reinterpret_cast<uintptr_t>(elf_file_->Begin()));
+ CHECK_LT(ret, reinterpret_cast<uintptr_t>(elf_file_->End()));
+ return reinterpret_cast<uint32_t*>(ret);
+}
+
+void ElfPatcher::SetPatchLocation(const CompilerDriver::PatchInformation* patch, uint32_t value) {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ const void* quick_oat_code = class_linker->GetQuickOatCodeFor(patch->GetDexFile(),
+ patch->GetReferrerClassDefIdx(),
+ patch->GetReferrerMethodIdx());
+ // TODO: make this Thumb2 specific
+ uint8_t* base = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(quick_oat_code) & ~0x1);
+ uintptr_t patch_ptr = reinterpret_cast<uintptr_t>(base + patch->GetLiteralOffset());
+ uint32_t* patch_location = GetPatchLocation(patch_ptr);
+ if (kIsDebugBuild) {
+ if (patch->IsCall()) {
+ const CompilerDriver::CallPatchInformation* cpatch = patch->AsCall();
+ const DexFile::MethodId& id =
+ cpatch->GetTargetDexFile()->GetMethodId(cpatch->GetTargetMethodIdx());
+ uint32_t expected = reinterpret_cast<uintptr_t>(&id) & 0xFFFFFFFF;
+ uint32_t actual = *patch_location;
+ CHECK(actual == expected || actual == value) << "Patching call failed: " << std::hex
+ << " actual=" << actual
+ << " expected=" << expected
+ << " value=" << value;
+ }
+ if (patch->IsType()) {
+ const CompilerDriver::TypePatchInformation* tpatch = patch->AsType();
+ const DexFile::TypeId& id = tpatch->GetTargetTypeDexFile().GetTypeId(tpatch->GetTargetTypeIdx());
+ uint32_t expected = reinterpret_cast<uintptr_t>(&id) & 0xFFFFFFFF;
+ uint32_t actual = *patch_location;
+ CHECK(actual == expected || actual == value) << "Patching type failed: " << std::hex
+ << " actual=" << actual
+ << " expected=" << expected
+ << " value=" << value;
+ }
+ }
+ *patch_location = value;
+ oat_header_->UpdateChecksum(patch_location, sizeof(value));
+
+ if (patch->IsCall() && patch->AsCall()->IsRelative()) {
+ // We never record relative patches.
+ return;
+ }
+ uintptr_t loc = patch_ptr - (reinterpret_cast<uintptr_t>(oat_file_->Begin()) +
+ oat_header_->GetExecutableOffset());
+ CHECK_GT(patch_ptr, reinterpret_cast<uintptr_t>(oat_file_->Begin()) +
+ oat_header_->GetExecutableOffset());
+ CHECK_LT(loc, oat_file_->Size() - oat_header_->GetExecutableOffset());
+ AddPatch(loc);
+}
+
+bool ElfPatcher::PatchElf() {
+ // TODO if we are adding patches the resulting ELF file might have a
+ // potentially rather large amount of free space where patches might have been
+ // placed. We should adjust the ELF file to get rid of this excess space.
+ if (write_patches_) {
+ patches_.reserve(compiler_driver_->GetCodeToPatch().size() +
+ compiler_driver_->GetMethodsToPatch().size() +
+ compiler_driver_->GetClassesToPatch().size());
+ }
+ Thread* self = Thread::Current();
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ const char* old_cause = self->StartAssertNoThreadSuspension("ElfPatcher");
+
+ typedef std::vector<const CompilerDriver::CallPatchInformation*> CallPatches;
+ const CallPatches& code_to_patch = compiler_driver_->GetCodeToPatch();
+ for (size_t i = 0; i < code_to_patch.size(); i++) {
+ const CompilerDriver::CallPatchInformation* patch = code_to_patch[i];
+
+ mirror::ArtMethod* target = GetTargetMethod(patch);
+ uintptr_t quick_code = reinterpret_cast<uintptr_t>(class_linker->GetQuickOatCodeFor(target));
+ DCHECK_NE(quick_code, 0U) << PrettyMethod(target);
+ const OatFile* target_oat = class_linker->FindOpenedOatFileForDexFile(*patch->GetTargetDexFile());
+ // Get where the data actually starts. if target is this oat_file_ it is oat_data_start_,
+ // otherwise it is wherever target_oat is loaded.
+ uintptr_t oat_data_addr = GetBaseAddressFor(target_oat);
+ uintptr_t code_base = reinterpret_cast<uintptr_t>(target_oat->Begin());
+ uintptr_t code_offset = quick_code - code_base;
+ bool is_quick_offset = false;
+ if (quick_code == reinterpret_cast<uintptr_t>(GetQuickToInterpreterBridge())) {
+ is_quick_offset = true;
+ code_offset = oat_header_->GetQuickToInterpreterBridgeOffset();
+ } else if (quick_code ==
+ reinterpret_cast<uintptr_t>(class_linker->GetQuickGenericJniTrampoline())) {
+ CHECK(target->IsNative());
+ is_quick_offset = true;
+ code_offset = oat_header_->GetQuickGenericJniTrampolineOffset();
+ }
+ uintptr_t value;
+ if (patch->IsRelative()) {
+ // value to patch is relative to the location being patched
+ const void* quick_oat_code =
+ class_linker->GetQuickOatCodeFor(patch->GetDexFile(),
+ patch->GetReferrerClassDefIdx(),
+ patch->GetReferrerMethodIdx());
+ if (is_quick_offset) {
+ // If its a quick offset it means that we are doing a relative patch from the class linker
+ // oat_file to the elf_patcher oat_file so we need to adjust the quick oat code to be the
+ // one in the output oat_file (ie where it is actually going to be loaded).
+ quick_code = PointerToLowMemUInt32(reinterpret_cast<void*>(oat_data_addr + code_offset));
+ quick_oat_code =
+ reinterpret_cast<const void*>(reinterpret_cast<uintptr_t>(quick_oat_code) +
+ oat_data_addr - code_base);
+ }
+ uintptr_t base = reinterpret_cast<uintptr_t>(quick_oat_code);
+ uintptr_t patch_location = base + patch->GetLiteralOffset();
+ value = quick_code - patch_location + patch->RelativeOffset();
+ } else if (code_offset != 0) {
+ value = PointerToLowMemUInt32(reinterpret_cast<void*>(oat_data_addr + code_offset));
+ } else {
+ value = 0;
+ }
+ SetPatchLocation(patch, value);
+ }
+
+ const CallPatches& methods_to_patch = compiler_driver_->GetMethodsToPatch();
+ for (size_t i = 0; i < methods_to_patch.size(); i++) {
+ const CompilerDriver::CallPatchInformation* patch = methods_to_patch[i];
+ mirror::ArtMethod* target = GetTargetMethod(patch);
+ SetPatchLocation(patch, PointerToLowMemUInt32(get_image_address_(cb_data_, target)));
+ }
+
+ const std::vector<const CompilerDriver::TypePatchInformation*>& classes_to_patch =
+ compiler_driver_->GetClassesToPatch();
+ for (size_t i = 0; i < classes_to_patch.size(); i++) {
+ const CompilerDriver::TypePatchInformation* patch = classes_to_patch[i];
+ mirror::Class* target = GetTargetType(patch);
+ SetPatchLocation(patch, PointerToLowMemUInt32(get_image_address_(cb_data_, target)));
+ }
+
+ self->EndAssertNoThreadSuspension(old_cause);
+
+ if (write_patches_) {
+ return WriteOutPatchData();
+ }
+ return true;
+}
+
+bool ElfPatcher::WriteOutPatchData() {
+ Elf32_Shdr* shdr = elf_file_->FindSectionByName(".oat_patches");
+ if (shdr != nullptr) {
+ CHECK_EQ(shdr, elf_file_->FindSectionByType(SHT_OAT_PATCH))
+ << "Incorrect type for .oat_patches section";
+ CHECK_LE(patches_.size() * sizeof(uintptr_t), shdr->sh_size)
+ << "We got more patches than anticipated";
+ CHECK_LE(reinterpret_cast<uintptr_t>(elf_file_->Begin()) + shdr->sh_offset + shdr->sh_size,
+ reinterpret_cast<uintptr_t>(elf_file_->End())) << "section is too large";
+ CHECK(shdr == &elf_file_->GetSectionHeader(elf_file_->GetSectionHeaderNum() - 1) ||
+ shdr->sh_offset + shdr->sh_size <= (shdr + 1)->sh_offset)
+ << "Section overlaps onto next section";
+ // It's mmap'd so we can just memcpy.
+ memcpy(elf_file_->Begin() + shdr->sh_offset, patches_.data(),
+ patches_.size() * sizeof(uintptr_t));
+ // TODO We should fill in the newly empty space between the last patch and
+ // the start of the next section by moving the following sections down if
+ // possible.
+ shdr->sh_size = patches_.size() * sizeof(uintptr_t);
+ return true;
+ } else {
+ LOG(ERROR) << "Unable to find section header for SHT_OAT_PATCH";
+ *error_msg_ = "Unable to find section to write patch information to in ";
+ *error_msg_ += elf_file_->GetFile().GetPath();
+ return false;
+ }
+}
+
+} // namespace art
diff --git a/compiler/elf_patcher.h b/compiler/elf_patcher.h
new file mode 100644
index 0000000..0a9f0a01
--- /dev/null
+++ b/compiler/elf_patcher.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_ELF_PATCHER_H_
+#define ART_COMPILER_ELF_PATCHER_H_
+
+#include "base/mutex.h"
+#include "driver/compiler_driver.h"
+#include "elf_file.h"
+#include "mirror/art_method.h"
+#include "mirror/class.h"
+#include "mirror/object.h"
+#include "oat_file.h"
+#include "oat.h"
+#include "os.h"
+
+namespace art {
+
+class ElfPatcher {
+ public:
+ typedef void* (*ImageAddressCallback)(void* data, mirror::Object* obj);
+
+ static bool Patch(const CompilerDriver* driver, ElfFile* elf_file,
+ const std::string& oat_location,
+ ImageAddressCallback cb, void* cb_data,
+ std::string* error_msg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static bool Patch(const CompilerDriver* driver, ElfFile* elf_file,
+ const OatFile* oat_file, uintptr_t oat_data_begin,
+ ImageAddressCallback cb, void* cb_data,
+ std::string* error_msg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static bool Patch(const CompilerDriver* driver, ElfFile* elf_file,
+ const std::string& oat_location,
+ std::string* error_msg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return ElfPatcher::Patch(driver, elf_file, oat_location,
+ DefaultImageAddressCallback, nullptr, error_msg);
+ }
+
+ static bool Patch(const CompilerDriver* driver, ElfFile* elf_file,
+ const OatFile* oat_file, uintptr_t oat_data_begin,
+ std::string* error_msg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return ElfPatcher::Patch(driver, elf_file, oat_file, oat_data_begin,
+ DefaultImageAddressCallback, nullptr, error_msg);
+ }
+
+ private:
+ ElfPatcher(const CompilerDriver* driver, ElfFile* elf_file, const OatFile* oat_file,
+ OatHeader* oat_header, uintptr_t oat_data_begin,
+ ImageAddressCallback cb, void* cb_data, std::string* error_msg)
+ : compiler_driver_(driver), elf_file_(elf_file), oat_file_(oat_file),
+ oat_header_(oat_header), oat_data_begin_(oat_data_begin), get_image_address_(cb),
+ cb_data_(cb_data), error_msg_(error_msg),
+ write_patches_(compiler_driver_->GetCompilerOptions().GetIncludePatchInformation()) {}
+ ~ElfPatcher() {}
+
+ static void* DefaultImageAddressCallback(void* data_unused, mirror::Object* obj) {
+ return static_cast<void*>(obj);
+ }
+
+ bool PatchElf()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ mirror::ArtMethod* GetTargetMethod(const CompilerDriver::CallPatchInformation* patch)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ mirror::Class* GetTargetType(const CompilerDriver::TypePatchInformation* patch)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void AddPatch(uintptr_t off);
+
+ void SetPatchLocation(const CompilerDriver::PatchInformation* patch, uint32_t value)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Takes the pointer into the oat_file_ and get the pointer in to the ElfFile.
+ uint32_t* GetPatchLocation(uintptr_t patch_ptr);
+
+ bool WriteOutPatchData();
+
+ uintptr_t GetBaseAddressFor(const OatFile* f) {
+ if (f == oat_file_) {
+ return oat_data_begin_;
+ } else {
+ return reinterpret_cast<uintptr_t>(f->Begin());
+ }
+ }
+
+ const CompilerDriver* compiler_driver_;
+
+ // The elf_file containing the oat_data we are patching up
+ ElfFile* elf_file_;
+
+ // The oat_file that is actually loaded.
+ const OatFile* oat_file_;
+
+ // The oat_header_ within the elf_file_
+ OatHeader* oat_header_;
+
+ // Where the elf_file will be loaded during normal runs.
+ uintptr_t oat_data_begin_;
+
+ // Callback to get image addresses.
+ ImageAddressCallback get_image_address_;
+ void* cb_data_;
+
+ std::string* error_msg_;
+ std::vector<uintptr_t> patches_;
+ std::set<uintptr_t> patches_set_;
+ bool write_patches_;
+
+ DISALLOW_COPY_AND_ASSIGN(ElfPatcher);
+};
+
+} // namespace art
+#endif // ART_COMPILER_ELF_PATCHER_H_
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index 4274386..bb5f7e0 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -24,6 +24,7 @@
#include "elf_utils.h"
#include "file_output_stream.h"
#include "globals.h"
+#include "leb128.h"
#include "oat.h"
#include "oat_writer.h"
#include "utils.h"
@@ -38,6 +39,18 @@
return ((binding) << 4) + ((type) & 0xf);
}
+static void UpdateWord(std::vector<uint8_t>* buf, int offset, int data) {
+ (*buf)[offset+0] = data;
+ (*buf)[offset+1] = data >> 8;
+ (*buf)[offset+2] = data >> 16;
+ (*buf)[offset+3] = data >> 24;
+}
+
+static void PushHalf(std::vector<uint8_t>* buf, int data) {
+ buf->push_back(data & 0xff);
+ buf->push_back((data >> 8) & 0xff);
+}
+
bool ElfWriterQuick::ElfBuilder::Write() {
// The basic layout of the elf file. Order may be different in final output.
// +-------------------------+
@@ -106,14 +119,14 @@
// | .strtab\0 | (Optional)
// | .debug_str\0 | (Optional)
// | .debug_info\0 | (Optional)
- // | .debug_frame\0 | (Optional)
+ // | .eh_frame\0 | (Optional)
// | .debug_abbrev\0 | (Optional)
// +-------------------------+ (Optional)
// | .debug_str | (Optional)
// +-------------------------+ (Optional)
// | .debug_info | (Optional)
// +-------------------------+ (Optional)
- // | .debug_frame | (Optional)
+ // | .eh_frame | (Optional)
// +-------------------------+ (Optional)
// | .debug_abbrev | (Optional)
// +-------------------------+
@@ -127,7 +140,7 @@
// | Elf32_Shdr .shstrtab |
// | Elf32_Shdr .debug_str | (Optional)
// | Elf32_Shdr .debug_info | (Optional)
- // | Elf32_Shdr .debug_frame | (Optional)
+ // | Elf32_Shdr .eh_frame | (Optional)
// | Elf32_Shdr .debug_abbrev| (Optional)
// +-------------------------+
@@ -822,42 +835,118 @@
}
}
+std::vector<uint8_t>* ConstructCIEFrameX86(bool is_x86_64) {
+ std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
+
+ // Length (will be filled in later in this routine).
+ PushWord(cfi_info, 0);
+
+ // CIE id: always 0.
+ PushWord(cfi_info, 0);
+
+ // Version: always 1.
+ cfi_info->push_back(0x01);
+
+ // Augmentation: 'zR\0'
+ cfi_info->push_back(0x7a);
+ cfi_info->push_back(0x52);
+ cfi_info->push_back(0x0);
+
+ // Code alignment: 1.
+ EncodeUnsignedLeb128(1, cfi_info);
+
+ // Data alignment.
+ if (is_x86_64) {
+ EncodeSignedLeb128(-8, cfi_info);
+ } else {
+ EncodeSignedLeb128(-4, cfi_info);
+ }
+
+ // Return address register.
+ if (is_x86_64) {
+ // R16(RIP)
+ cfi_info->push_back(0x10);
+ } else {
+ // R8(EIP)
+ cfi_info->push_back(0x08);
+ }
+
+ // Augmentation length: 1.
+ cfi_info->push_back(1);
+
+ // Augmentation data: 0x03 ((DW_EH_PE_absptr << 4) | DW_EH_PE_udata4).
+ cfi_info->push_back(0x03);
+
+ // Initial instructions.
+ if (is_x86_64) {
+ // DW_CFA_def_cfa R7(RSP) 8.
+ cfi_info->push_back(0x0c);
+ cfi_info->push_back(0x07);
+ cfi_info->push_back(0x08);
+
+ // DW_CFA_offset R16(RIP) 1 (* -8).
+ cfi_info->push_back(0x90);
+ cfi_info->push_back(0x01);
+ } else {
+ // DW_CFA_def_cfa R4(ESP) 4.
+ cfi_info->push_back(0x0c);
+ cfi_info->push_back(0x04);
+ cfi_info->push_back(0x04);
+
+ // DW_CFA_offset R8(EIP) 1 (* -4).
+ cfi_info->push_back(0x88);
+ cfi_info->push_back(0x01);
+ }
+
+ // Padding to a multiple of 4
+ while ((cfi_info->size() & 3) != 0) {
+ // DW_CFA_nop is encoded as 0.
+ cfi_info->push_back(0);
+ }
+
+ // Set the length of the CIE inside the generated bytes.
+ uint32_t length = cfi_info->size() - 4;
+ (*cfi_info)[0] = length;
+ (*cfi_info)[1] = length >> 8;
+ (*cfi_info)[2] = length >> 16;
+ (*cfi_info)[3] = length >> 24;
+ return cfi_info;
+}
+
+std::vector<uint8_t>* ConstructCIEFrame(InstructionSet isa) {
+ switch (isa) {
+ case kX86:
+ return ConstructCIEFrameX86(false);
+ case kX86_64:
+ return ConstructCIEFrameX86(true);
+
+ default:
+ // Not implemented.
+ return nullptr;
+ }
+}
+
bool ElfWriterQuick::Write(OatWriter* oat_writer,
const std::vector<const DexFile*>& dex_files_unused,
const std::string& android_root_unused,
bool is_host_unused) {
- const bool debug = false;
- const bool add_symbols = oat_writer->DidAddSymbols();
+ constexpr bool debug = false;
const OatHeader& oat_header = oat_writer->GetOatHeader();
Elf32_Word oat_data_size = oat_header.GetExecutableOffset();
uint32_t oat_exec_size = oat_writer->GetSize() - oat_data_size;
ElfBuilder builder(oat_writer, elf_file_, compiler_driver_->GetInstructionSet(), 0,
- oat_data_size, oat_data_size, oat_exec_size, add_symbols, debug);
+ oat_data_size, oat_data_size, oat_exec_size,
+ compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols(),
+ debug);
- if (add_symbols) {
- AddDebugSymbols(builder, oat_writer, debug);
- }
-
- bool generateDebugInformation = compiler_driver_->GetCallFrameInformation() != nullptr;
- if (generateDebugInformation) {
- ElfRawSectionBuilder debug_info(".debug_info", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
- ElfRawSectionBuilder debug_abbrev(".debug_abbrev", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
- ElfRawSectionBuilder debug_str(".debug_str", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
- ElfRawSectionBuilder debug_frame(".debug_frame", SHT_PROGBITS, 0, nullptr, 0, 4, 0);
- debug_frame.SetBuffer(*compiler_driver_->GetCallFrameInformation());
-
- FillInCFIInformation(oat_writer, debug_info.GetBuffer(),
- debug_abbrev.GetBuffer(), debug_str.GetBuffer());
- builder.RegisterRawSection(debug_info);
- builder.RegisterRawSection(debug_abbrev);
- builder.RegisterRawSection(debug_frame);
- builder.RegisterRawSection(debug_str);
+ if (compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols()) {
+ WriteDebugSymbols(builder, oat_writer);
}
if (compiler_driver_->GetCompilerOptions().GetIncludePatchInformation()) {
ElfRawSectionBuilder oat_patches(".oat_patches", SHT_OAT_PATCH, 0, NULL, 0,
- sizeof(size_t), sizeof(size_t));
+ sizeof(uintptr_t), sizeof(uintptr_t));
ReservePatchSpace(oat_patches.GetBuffer(), debug);
builder.RegisterRawSection(oat_patches);
}
@@ -865,32 +954,62 @@
return builder.Write();
}
-void ElfWriterQuick::AddDebugSymbols(ElfBuilder& builder, OatWriter* oat_writer, bool debug) {
+void ElfWriterQuick::WriteDebugSymbols(ElfBuilder& builder, OatWriter* oat_writer) {
+ std::unique_ptr<std::vector<uint8_t>> cfi_info(
+ ConstructCIEFrame(compiler_driver_->GetInstructionSet()));
+
+ // Iterate over the compiled methods.
const std::vector<OatWriter::DebugInfo>& method_info = oat_writer->GetCFIMethodInfo();
ElfSymtabBuilder* symtab = &builder.symtab_builder_;
for (auto it = method_info.begin(); it != method_info.end(); ++it) {
symtab->AddSymbol(it->method_name_, &builder.text_builder_, it->low_pc_, true,
it->high_pc_ - it->low_pc_, STB_GLOBAL, STT_FUNC);
+
+ // Include CFI for compiled method, if possible.
+ if (cfi_info.get() != nullptr) {
+ DCHECK(it->compiled_method_ != nullptr);
+
+ // Copy in the FDE, if present
+ const std::vector<uint8_t>* fde = it->compiled_method_->GetCFIInfo();
+ if (fde != nullptr) {
+ // Copy the information into cfi_info and then fix the address in the new copy.
+ int cur_offset = cfi_info->size();
+ cfi_info->insert(cfi_info->end(), fde->begin(), fde->end());
+
+ // Set the 'CIE_pointer' field to cur_offset+4.
+ uint32_t CIE_pointer = cur_offset + 4;
+ uint32_t offset_to_update = cur_offset + sizeof(uint32_t);
+ (*cfi_info)[offset_to_update+0] = CIE_pointer;
+ (*cfi_info)[offset_to_update+1] = CIE_pointer >> 8;
+ (*cfi_info)[offset_to_update+2] = CIE_pointer >> 16;
+ (*cfi_info)[offset_to_update+3] = CIE_pointer >> 24;
+
+ // Set the 'initial_location' field to address the start of the method.
+ offset_to_update = cur_offset + 2*sizeof(uint32_t);
+ const uint32_t quick_code_start = it->low_pc_;
+ (*cfi_info)[offset_to_update+0] = quick_code_start;
+ (*cfi_info)[offset_to_update+1] = quick_code_start >> 8;
+ (*cfi_info)[offset_to_update+2] = quick_code_start >> 16;
+ (*cfi_info)[offset_to_update+3] = quick_code_start >> 24;
+ }
+ }
}
-}
-static void UpdateWord(std::vector<uint8_t>*buf, int offset, int data) {
- (*buf)[offset+0] = data;
- (*buf)[offset+1] = data >> 8;
- (*buf)[offset+2] = data >> 16;
- (*buf)[offset+3] = data >> 24;
-}
+ if (cfi_info.get() != nullptr) {
+ // Now lay down the Elf sections.
+ ElfRawSectionBuilder debug_info(".debug_info", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+ ElfRawSectionBuilder debug_abbrev(".debug_abbrev", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+ ElfRawSectionBuilder debug_str(".debug_str", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+ ElfRawSectionBuilder eh_frame(".eh_frame", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, 4, 0);
+ eh_frame.SetBuffer(std::move(*cfi_info.get()));
-static void PushWord(std::vector<uint8_t>*buf, int data) {
- buf->push_back(data & 0xff);
- buf->push_back((data >> 8) & 0xff);
- buf->push_back((data >> 16) & 0xff);
- buf->push_back((data >> 24) & 0xff);
-}
-
-static void PushHalf(std::vector<uint8_t>*buf, int data) {
- buf->push_back(data & 0xff);
- buf->push_back((data >> 8) & 0xff);
+ FillInCFIInformation(oat_writer, debug_info.GetBuffer(), debug_abbrev.GetBuffer(),
+ debug_str.GetBuffer());
+ builder.RegisterRawSection(debug_info);
+ builder.RegisterRawSection(debug_abbrev);
+ builder.RegisterRawSection(eh_frame);
+ builder.RegisterRawSection(debug_str);
+ }
}
void ElfWriterQuick::FillInCFIInformation(OatWriter* oat_writer,
diff --git a/compiler/elf_writer_quick.h b/compiler/elf_writer_quick.h
index a0d36df..8cfe550 100644
--- a/compiler/elf_writer_quick.h
+++ b/compiler/elf_writer_quick.h
@@ -48,9 +48,7 @@
~ElfWriterQuick() {}
class ElfBuilder;
- void AddDebugSymbols(ElfBuilder& builder,
- OatWriter* oat_writer,
- bool debug);
+ void WriteDebugSymbols(ElfBuilder& builder, OatWriter* oat_writer);
void ReservePatchSpace(std::vector<uint8_t>* buffer, bool debug);
class ElfSectionBuilder {
@@ -126,7 +124,7 @@
: ElfSectionBuilder(sec_name, type, flags, link, info, align, entsize) {}
~ElfRawSectionBuilder() {}
std::vector<uint8_t>* GetBuffer() { return &buf_; }
- void SetBuffer(std::vector<uint8_t> buf) { buf_ = buf; }
+ void SetBuffer(std::vector<uint8_t>&& buf) { buf_ = buf; }
protected:
std::vector<uint8_t> buf_;
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 982e6d4..6b23345 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -25,7 +25,6 @@
#include "elf_fixup.h"
#include "gc/space/image_space.h"
#include "image_writer.h"
-#include "implicit_check_options.h"
#include "lock_word.h"
#include "mirror/object-inl.h"
#include "oat_writer.h"
@@ -81,9 +80,7 @@
t.NewTiming("WriteElf");
ScopedObjectAccess soa(Thread::Current());
SafeMap<std::string, std::string> key_value_store;
- key_value_store.Put(ImplicitCheckOptions::kImplicitChecksOatHeaderKey,
- ImplicitCheckOptions::Serialize(true, true, true));
- OatWriter oat_writer(class_linker->GetBootClassPath(), 0, 0, compiler_driver_.get(), &timings,
+ OatWriter oat_writer(class_linker->GetBootClassPath(), 0, 0, 0, compiler_driver_.get(), &timings,
&key_value_store);
bool success = compiler_driver_->WriteElf(GetTestAndroidRoot(),
!kIsTargetBuild,
@@ -144,9 +141,8 @@
std::string image("-Ximage:");
image.append(image_location.GetFilename());
options.push_back(std::make_pair(image.c_str(), reinterpret_cast<void*>(NULL)));
- // Turn off implicit checks for this runtime, as we compiled the image with them off.
- std::string explicit_checks("-implicit-checks:none");
- options.push_back(std::make_pair(explicit_checks.c_str(), reinterpret_cast<void*>(NULL)));
+ // By default the compiler this creates will not include patch information.
+ options.push_back(std::make_pair("-Xnorelocate", nullptr));
if (!Runtime::Create(options, false)) {
LOG(FATAL) << "Failed to create runtime";
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 8ef2964..d102bbc 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -29,6 +29,7 @@
#include "driver/compiler_driver.h"
#include "elf_file.h"
#include "elf_utils.h"
+#include "elf_patcher.h"
#include "elf_writer.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/accounting/heap_bitmap.h"
@@ -84,7 +85,7 @@
return false;
}
std::string error_msg;
- oat_file_ = OatFile::OpenWritable(oat_file.get(), oat_location, &error_msg);
+ oat_file_ = OatFile::OpenReadable(oat_file.get(), oat_location, &error_msg);
if (oat_file_ == nullptr) {
LOG(ERROR) << "Failed to open writable oat file " << oat_filename << " for " << oat_location
<< ": " << error_msg;
@@ -801,214 +802,35 @@
}
}
-static ArtMethod* GetTargetMethod(const CompilerDriver::CallPatchInformation* patch)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- StackHandleScope<1> hs(Thread::Current());
- Handle<mirror::DexCache> dex_cache(
- hs.NewHandle(class_linker->FindDexCache(*patch->GetTargetDexFile())));
- ArtMethod* method = class_linker->ResolveMethod(*patch->GetTargetDexFile(),
- patch->GetTargetMethodIdx(),
- dex_cache,
- NullHandle<mirror::ClassLoader>(),
- NullHandle<mirror::ArtMethod>(),
- patch->GetTargetInvokeType());
- CHECK(method != NULL)
- << patch->GetTargetDexFile()->GetLocation() << " " << patch->GetTargetMethodIdx();
- CHECK(!method->IsRuntimeMethod())
- << patch->GetTargetDexFile()->GetLocation() << " " << patch->GetTargetMethodIdx();
- CHECK(dex_cache->GetResolvedMethods()->Get(patch->GetTargetMethodIdx()) == method)
- << patch->GetTargetDexFile()->GetLocation() << " " << patch->GetReferrerMethodIdx() << " "
- << PrettyMethod(dex_cache->GetResolvedMethods()->Get(patch->GetTargetMethodIdx())) << " "
- << PrettyMethod(method);
- return method;
-}
-
-static Class* GetTargetType(const CompilerDriver::TypePatchInformation* patch)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- StackHandleScope<2> hs(Thread::Current());
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(patch->GetDexFile())));
- Class* klass = class_linker->ResolveType(patch->GetDexFile(), patch->GetTargetTypeIdx(),
- dex_cache, NullHandle<mirror::ClassLoader>());
- CHECK(klass != NULL)
- << patch->GetDexFile().GetLocation() << " " << patch->GetTargetTypeIdx();
- CHECK(dex_cache->GetResolvedTypes()->Get(patch->GetTargetTypeIdx()) == klass)
- << patch->GetDexFile().GetLocation() << " " << patch->GetReferrerMethodIdx() << " "
- << PrettyClass(dex_cache->GetResolvedTypes()->Get(patch->GetTargetTypeIdx())) << " "
- << PrettyClass(klass);
- return klass;
+static OatHeader* GetOatHeaderFromElf(ElfFile* elf) {
+ Elf32_Shdr* data_sec = elf->FindSectionByName(".rodata");
+ if (data_sec == nullptr) {
+ return nullptr;
+ }
+ return reinterpret_cast<OatHeader*>(elf->Begin() + data_sec->sh_offset);
}
void ImageWriter::PatchOatCodeAndMethods(File* elf_file) {
- std::vector<uintptr_t> patches;
- std::set<uintptr_t> patches_set;
- auto maybe_push = [&patches, &patches_set] (uintptr_t p) {
- if (patches_set.find(p) == patches_set.end()) {
- patches.push_back(p);
- patches_set.insert(p);
- }
- };
- const bool add_patches = compiler_driver_.GetCompilerOptions().GetIncludePatchInformation();
- if (add_patches) {
- // TODO if we are adding patches the resulting ELF file might have a potentially rather large
- // amount of free space where patches might have been placed. We should adjust the ELF file to
- // get rid of this excess space.
- patches.reserve(compiler_driver_.GetCodeToPatch().size() +
- compiler_driver_.GetMethodsToPatch().size() +
- compiler_driver_.GetClassesToPatch().size());
+ std::string error_msg;
+ std::unique_ptr<ElfFile> elf(ElfFile::Open(elf_file, PROT_READ|PROT_WRITE,
+ MAP_SHARED, &error_msg));
+ if (elf.get() == nullptr) {
+ LOG(FATAL) << "Unable patch oat file: " << error_msg;
+ return;
}
- uintptr_t loc = 0;
- Thread* self = Thread::Current();
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- const char* old_cause = self->StartAssertNoThreadSuspension("ImageWriter");
-
- typedef std::vector<const CompilerDriver::CallPatchInformation*> CallPatches;
- const CallPatches& code_to_patch = compiler_driver_.GetCodeToPatch();
- for (size_t i = 0; i < code_to_patch.size(); i++) {
- const CompilerDriver::CallPatchInformation* patch = code_to_patch[i];
- ArtMethod* target = GetTargetMethod(patch);
- uintptr_t quick_code = reinterpret_cast<uintptr_t>(class_linker->GetQuickOatCodeFor(target));
- DCHECK_NE(quick_code, 0U) << PrettyMethod(target);
- uintptr_t code_base = reinterpret_cast<uintptr_t>(&oat_file_->GetOatHeader());
- uintptr_t code_offset = quick_code - code_base;
- bool is_quick_offset = false;
- if (quick_code == reinterpret_cast<uintptr_t>(GetQuickToInterpreterBridge())) {
- is_quick_offset = true;
- code_offset = quick_to_interpreter_bridge_offset_;
- } else if (quick_code ==
- reinterpret_cast<uintptr_t>(class_linker->GetQuickGenericJniTrampoline())) {
- CHECK(target->IsNative());
- is_quick_offset = true;
- code_offset = quick_generic_jni_trampoline_offset_;
- }
- uintptr_t value;
- if (patch->IsRelative()) {
- // value to patch is relative to the location being patched
- const void* quick_oat_code =
- class_linker->GetQuickOatCodeFor(patch->GetDexFile(),
- patch->GetReferrerClassDefIdx(),
- patch->GetReferrerMethodIdx());
- if (is_quick_offset) {
- // If its a quick offset it means that we are doing a relative patch from the class linker
- // oat_file to the image writer oat_file so we need to adjust the quick oat code to be the
- // one in the image writer oat_file.
- quick_code = PointerToLowMemUInt32(GetOatAddress(code_offset));
- quick_oat_code =
- reinterpret_cast<const void*>(reinterpret_cast<uintptr_t>(quick_oat_code) +
- reinterpret_cast<uintptr_t>(oat_data_begin_) - code_base);
- }
- uintptr_t base = reinterpret_cast<uintptr_t>(quick_oat_code);
- uintptr_t patch_location = base + patch->GetLiteralOffset();
- value = quick_code - patch_location + patch->RelativeOffset();
- } else {
- value = PointerToLowMemUInt32(GetOatAddress(code_offset));
- }
- SetPatchLocation(patch, value, &loc);
- if (add_patches && !patch->AsCall()->IsRelative()) {
- maybe_push(loc);
- }
+ if (!ElfPatcher::Patch(&compiler_driver_, elf.get(), oat_file_,
+ reinterpret_cast<uintptr_t>(oat_data_begin_),
+ GetImageAddressCallback, reinterpret_cast<void*>(this),
+ &error_msg)) {
+ LOG(FATAL) << "unable to patch oat file: " << error_msg;
+ return;
}
+ OatHeader* oat_header = GetOatHeaderFromElf(elf.get());
+ CHECK(oat_header != nullptr);
+ CHECK(oat_header->IsValid());
- const CallPatches& methods_to_patch = compiler_driver_.GetMethodsToPatch();
- for (size_t i = 0; i < methods_to_patch.size(); i++) {
- const CompilerDriver::CallPatchInformation* patch = methods_to_patch[i];
- ArtMethod* target = GetTargetMethod(patch);
- SetPatchLocation(patch, PointerToLowMemUInt32(GetImageAddress(target)), &loc);
- if (add_patches && !patch->AsCall()->IsRelative()) {
- maybe_push(loc);
- }
- }
-
- const std::vector<const CompilerDriver::TypePatchInformation*>& classes_to_patch =
- compiler_driver_.GetClassesToPatch();
- for (size_t i = 0; i < classes_to_patch.size(); i++) {
- const CompilerDriver::TypePatchInformation* patch = classes_to_patch[i];
- Class* target = GetTargetType(patch);
- SetPatchLocation(patch, PointerToLowMemUInt32(GetImageAddress(target)), &loc);
- if (add_patches) {
- maybe_push(loc);
- }
- }
-
- // Update the image header with the new checksum after patching
ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
- image_header->SetOatChecksum(oat_file_->GetOatHeader().GetChecksum());
- self->EndAssertNoThreadSuspension(old_cause);
-
- // Update the ElfFiles SHT_OAT_PATCH section to include the patches.
- if (add_patches) {
- std::string err;
- // TODO we are mapping in the contents of this file twice. We should be able
- // to do it only once, which would be better.
- std::unique_ptr<ElfFile> file(ElfFile::Open(elf_file, true, false, &err));
- if (file == nullptr) {
- LOG(ERROR) << err;
- }
- Elf32_Shdr* shdr = file->FindSectionByName(".oat_patches");
- if (shdr != nullptr) {
- CHECK_EQ(shdr, file->FindSectionByType(SHT_OAT_PATCH))
- << "Incorrect type for .oat_patches section";
- CHECK_LE(patches.size() * sizeof(uintptr_t), shdr->sh_size)
- << "We got more patches than anticipated";
- CHECK_LE(reinterpret_cast<uintptr_t>(file->Begin()) + shdr->sh_offset + shdr->sh_size,
- reinterpret_cast<uintptr_t>(file->End())) << "section is too large";
- CHECK(shdr == &file->GetSectionHeader(file->GetSectionHeaderNum() - 1) ||
- shdr->sh_offset + shdr->sh_size <= (shdr + 1)->sh_offset)
- << "Section overlaps onto next section";
- // It's mmap'd so we can just memcpy.
- memcpy(file->Begin() + shdr->sh_offset, patches.data(), patches.size()*sizeof(uintptr_t));
- // TODO We should fill in the newly empty space between the last patch and the start of the
- // next section by moving the following sections down if possible.
- shdr->sh_size = patches.size() * sizeof(uintptr_t);
- } else {
- LOG(ERROR) << "Unable to find section header for SHT_OAT_PATCH";
- }
- }
-}
-
-void ImageWriter::SetPatchLocation(const CompilerDriver::PatchInformation* patch, uint32_t value,
- uintptr_t* patched_ptr) {
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- const void* quick_oat_code = class_linker->GetQuickOatCodeFor(patch->GetDexFile(),
- patch->GetReferrerClassDefIdx(),
- patch->GetReferrerMethodIdx());
- OatHeader& oat_header = const_cast<OatHeader&>(oat_file_->GetOatHeader());
- // TODO: make this Thumb2 specific
- uint8_t* base = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(quick_oat_code) & ~0x1);
- uint32_t* patch_location = reinterpret_cast<uint32_t*>(base + patch->GetLiteralOffset());
- if (kIsDebugBuild) {
- if (patch->IsCall()) {
- const CompilerDriver::CallPatchInformation* cpatch = patch->AsCall();
- const DexFile::MethodId& id = cpatch->GetTargetDexFile()->GetMethodId(cpatch->GetTargetMethodIdx());
- uint32_t expected = reinterpret_cast<uintptr_t>(&id) & 0xFFFFFFFF;
- uint32_t actual = *patch_location;
- CHECK(actual == expected || actual == value) << std::hex
- << "actual=" << actual
- << "expected=" << expected
- << "value=" << value;
- }
- if (patch->IsType()) {
- const CompilerDriver::TypePatchInformation* tpatch = patch->AsType();
- const DexFile::TypeId& id = tpatch->GetDexFile().GetTypeId(tpatch->GetTargetTypeIdx());
- uint32_t expected = reinterpret_cast<uintptr_t>(&id) & 0xFFFFFFFF;
- uint32_t actual = *patch_location;
- CHECK(actual == expected || actual == value) << std::hex
- << "actual=" << actual
- << "expected=" << expected
- << "value=" << value;
- }
- }
- *patch_location = value;
- oat_header.UpdateChecksum(patch_location, sizeof(value));
-
- uintptr_t loc = reinterpret_cast<uintptr_t>(patch_location) -
- (reinterpret_cast<uintptr_t>(oat_file_->Begin()) + oat_header.GetExecutableOffset());
- CHECK_GT(reinterpret_cast<uintptr_t>(patch_location),
- reinterpret_cast<uintptr_t>(oat_file_->Begin()) + oat_header.GetExecutableOffset());
- CHECK_LT(loc, oat_file_->Size() - oat_header.GetExecutableOffset());
-
- *patched_ptr = loc;
+ image_header->SetOatChecksum(oat_header->GetChecksum());
}
} // namespace art
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index cf5bc93..e8bcf7f 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -70,6 +70,11 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
size_t GetImageOffset(mirror::Object* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void* GetImageAddressCallback(void* writer, mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return reinterpret_cast<ImageWriter*>(writer)->GetImageAddress(obj);
+ }
+
mirror::Object* GetImageAddress(mirror::Object* object) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (object == NULL) {
@@ -159,9 +164,6 @@
// Patches references in OatFile to expect runtime addresses.
void PatchOatCodeAndMethods(File* elf_file)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetPatchLocation(const CompilerDriver::PatchInformation* patch, uint32_t value,
- uintptr_t* patched_location)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const CompilerDriver& compiler_driver_;
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index b4d863b..75d3030 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -155,13 +155,9 @@
SetUpForTest(false, "bar", "(I)I", nullptr);
// calling through stub will link with &Java_MyClassNatives_bar
- ScopedObjectAccess soa(Thread::Current());
std::string reason;
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader(
- hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader_)));
- ASSERT_TRUE(
- Runtime::Current()->GetJavaVM()->LoadNativeLibrary("", class_loader, &reason)) << reason;
+ ASSERT_TRUE(Runtime::Current()->GetJavaVM()->LoadNativeLibrary(env_, "", class_loader_, &reason))
+ << reason;
jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 24);
EXPECT_EQ(25, result);
@@ -172,13 +168,9 @@
SetUpForTest(true, "sbar", "(I)I", nullptr);
// calling through stub will link with &Java_MyClassNatives_sbar
- ScopedObjectAccess soa(Thread::Current());
std::string reason;
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader(
- hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader_)));
- ASSERT_TRUE(
- Runtime::Current()->GetJavaVM()->LoadNativeLibrary("", class_loader, &reason)) << reason;
+ ASSERT_TRUE(Runtime::Current()->GetJavaVM()->LoadNativeLibrary(env_, "", class_loader_, &reason))
+ << reason;
jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 42);
EXPECT_EQ(43, result);
@@ -720,10 +712,33 @@
EXPECT_EQ(result, 42);
}
+int gJava_MyClassNatives_GetSinkProperties_calls = 0;
+jarray Java_MyClassNatives_GetSinkProperties(JNIEnv* env, jobject thisObj, jstring s) {
+ // 1 = thisObj
+ Thread* self = Thread::Current();
+ EXPECT_EQ(kNative, self->GetState());
+ Locks::mutator_lock_->AssertNotHeld(self);
+ EXPECT_EQ(self->GetJniEnv(), env);
+ EXPECT_TRUE(thisObj != nullptr);
+ EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_));
+ EXPECT_EQ(s, nullptr);
+ gJava_MyClassNatives_GetSinkProperties_calls++;
+ ScopedObjectAccess soa(self);
+ EXPECT_EQ(2U, self->NumStackReferences());
+ EXPECT_TRUE(self->HoldsLock(soa.Decode<mirror::Object*>(thisObj)));
+ return nullptr;
+}
+
TEST_F(JniCompilerTest, GetSinkPropertiesNative) {
TEST_DISABLED_FOR_PORTABLE();
- SetUpForTest(false, "getSinkPropertiesNative", "(Ljava/lang/String;)[Ljava/lang/Object;", nullptr);
- // This space intentionally left blank. Just testing compilation succeeds.
+ SetUpForTest(false, "getSinkPropertiesNative", "(Ljava/lang/String;)[Ljava/lang/Object;",
+ reinterpret_cast<void*>(&Java_MyClassNatives_GetSinkProperties));
+
+ EXPECT_EQ(0, gJava_MyClassNatives_GetSinkProperties_calls);
+ jarray result = down_cast<jarray>(
+ env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, nullptr));
+ EXPECT_EQ(nullptr, result);
+ EXPECT_EQ(1, gJava_MyClassNatives_GetSinkProperties_calls);
}
// This should return jclass, but we're imitating a bug pattern.
@@ -748,10 +763,10 @@
check_jni_abort_catcher.Check("attempt to return an instance of java.lang.String from java.lang.Class MyClassNatives.instanceMethodThatShouldReturnClass()");
// Here, we just call the method incorrectly; we should catch that too.
- env_->CallVoidMethod(jobj_, jmethod_);
+ env_->CallObjectMethod(jobj_, jmethod_);
check_jni_abort_catcher.Check("attempt to return an instance of java.lang.String from java.lang.Class MyClassNatives.instanceMethodThatShouldReturnClass()");
- env_->CallStaticVoidMethod(jklass_, jmethod_);
- check_jni_abort_catcher.Check("calling non-static method java.lang.Class MyClassNatives.instanceMethodThatShouldReturnClass() with CallStaticVoidMethodV");
+ env_->CallStaticObjectMethod(jklass_, jmethod_);
+ check_jni_abort_catcher.Check("calling non-static method java.lang.Class MyClassNatives.instanceMethodThatShouldReturnClass() with CallStaticObjectMethodV");
}
TEST_F(JniCompilerTest, UpcallReturnTypeChecking_Static) {
@@ -766,10 +781,10 @@
check_jni_abort_catcher.Check("attempt to return an instance of java.lang.String from java.lang.Class MyClassNatives.staticMethodThatShouldReturnClass()");
// Here, we just call the method incorrectly; we should catch that too.
- env_->CallStaticVoidMethod(jklass_, jmethod_);
+ env_->CallStaticObjectMethod(jklass_, jmethod_);
check_jni_abort_catcher.Check("attempt to return an instance of java.lang.String from java.lang.Class MyClassNatives.staticMethodThatShouldReturnClass()");
- env_->CallVoidMethod(jobj_, jmethod_);
- check_jni_abort_catcher.Check("calling static method java.lang.Class MyClassNatives.staticMethodThatShouldReturnClass() with CallVoidMethodV");
+ env_->CallObjectMethod(jobj_, jmethod_);
+ check_jni_abort_catcher.Check("calling static method java.lang.Class MyClassNatives.staticMethodThatShouldReturnClass() with CallObjectMethodV");
}
// This should take jclass, but we're imitating a bug pattern.
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index 0a00d7d..b95dad2 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -152,7 +152,8 @@
Arm64JniCallingConvention::Arm64JniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
: JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
- callee_save_regs_.push_back(Arm64ManagedRegister::FromCoreRegister(X19));
+ // TODO: Ugly hard code...
+ // Should generate these according to the spill mask automatically.
callee_save_regs_.push_back(Arm64ManagedRegister::FromCoreRegister(X20));
callee_save_regs_.push_back(Arm64ManagedRegister::FromCoreRegister(X21));
callee_save_regs_.push_back(Arm64ManagedRegister::FromCoreRegister(X22));
@@ -164,30 +165,28 @@
callee_save_regs_.push_back(Arm64ManagedRegister::FromCoreRegister(X28));
callee_save_regs_.push_back(Arm64ManagedRegister::FromCoreRegister(X29));
callee_save_regs_.push_back(Arm64ManagedRegister::FromCoreRegister(X30));
- callee_save_regs_.push_back(Arm64ManagedRegister::FromDRegister(D8));
- callee_save_regs_.push_back(Arm64ManagedRegister::FromDRegister(D9));
- callee_save_regs_.push_back(Arm64ManagedRegister::FromDRegister(D10));
- callee_save_regs_.push_back(Arm64ManagedRegister::FromDRegister(D11));
- callee_save_regs_.push_back(Arm64ManagedRegister::FromDRegister(D12));
- callee_save_regs_.push_back(Arm64ManagedRegister::FromDRegister(D13));
- callee_save_regs_.push_back(Arm64ManagedRegister::FromDRegister(D14));
- callee_save_regs_.push_back(Arm64ManagedRegister::FromDRegister(D15));
}
uint32_t Arm64JniCallingConvention::CoreSpillMask() const {
// Compute spill mask to agree with callee saves initialized in the constructor
- uint32_t result = 0;
- result = 1 << X19 | 1 << X20 | 1 << X21 | 1 << X22 | 1 << X23 | 1 << X24 |
- 1 << X25 | 1 << X26 | 1 << X27 | 1 << X28 | 1 << X29 | 1 << LR;
- return result;
+ // Note: The native jni function may call to some VM runtime functions which may suspend
+ // or trigger GC. And the jni method frame will become top quick frame in those cases.
+ // So we need to satisfy GC to save LR and callee-save registers which is similar to
+ // CalleeSaveMethod(RefOnly) frame.
+ // Jni function is the native function which the java code wants to call.
+ // Jni method is the method that compiled by jni compiler.
+ // Call chain: managed code(java) --> jni method --> jni function.
+ // Thread register(X18, scratched by aapcs64) is not saved on stack, it is saved in ETR(X21).
+ // Suspend register(x19) is preserved by aapcs64 and it is not used in Jni method.
+ return 1 << X20 | 1 << X21 | 1 << X22 | 1 << X23 | 1 << X24 | 1 << X25 |
+ 1 << X26 | 1 << X27 | 1 << X28 | 1 << X29 | 1 << LR;
}
uint32_t Arm64JniCallingConvention::FpSpillMask() const {
// Compute spill mask to agree with callee saves initialized in the constructor
- uint32_t result = 0;
- result = 1 << D8 | 1 << D9 | 1 << D10 | 1 << D11 | 1 << D12 | 1 << D13 |
- 1 << D14 | 1 << D15;
- return result;
+ // Note: All callee-save fp registers will be preserved by aapcs64. And they are not used
+ // in the jni method.
+ return 0;
}
ManagedRegister Arm64JniCallingConvention::ReturnScratchRegister() const {
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index dec84f1..8e87021 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -27,7 +27,7 @@
#include "dex_file-inl.h"
#include "driver/compiler_driver.h"
#include "entrypoints/quick/quick_entrypoints.h"
-#include "jni_internal.h"
+#include "jni_env_ext.h"
#include "mirror/art_method.h"
#include "utils/assembler.h"
#include "utils/managed_register.h"
@@ -90,6 +90,7 @@
// Assembler that holds generated instructions
std::unique_ptr<Assembler> jni_asm(Assembler::Create(instruction_set));
+ jni_asm->InitializeFrameDescriptionEntry();
// Offsets into data structures
// TODO: if cross compiling these offsets are for the host not the target
@@ -183,9 +184,8 @@
// 5. Move frame down to allow space for out going args.
const size_t main_out_arg_size = main_jni_conv->OutArgSize();
- const size_t end_out_arg_size = end_jni_conv->OutArgSize();
- const size_t max_out_arg_size = std::max(main_out_arg_size, end_out_arg_size);
- __ IncreaseFrameSize(max_out_arg_size);
+ size_t current_out_arg_size = main_out_arg_size;
+ __ IncreaseFrameSize(main_out_arg_size);
// 6. Call into appropriate JniMethodStart passing Thread* so that transition out of Runnable
// can occur. The result is the saved JNI local state that is restored by the exit call. We
@@ -244,7 +244,7 @@
// NULL (which must be encoded as NULL).
// Note: we do this prior to materializing the JNIEnv* and static's jclass to
// give as many free registers for the shuffle as possible
- mr_conv->ResetIterator(FrameOffset(frame_size+main_out_arg_size));
+ mr_conv->ResetIterator(FrameOffset(frame_size + main_out_arg_size));
uint32_t args_count = 0;
while (mr_conv->HasNext()) {
args_count++;
@@ -270,7 +270,7 @@
}
if (is_static) {
// Create argument for Class
- mr_conv->ResetIterator(FrameOffset(frame_size+main_out_arg_size));
+ mr_conv->ResetIterator(FrameOffset(frame_size + main_out_arg_size));
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
main_jni_conv->Next(); // Skip JNIEnv*
FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
@@ -333,10 +333,21 @@
// Ensure doubles are 8-byte aligned for MIPS
return_save_location = FrameOffset(return_save_location.Uint32Value() + kMipsPointerSize);
}
- CHECK_LT(return_save_location.Uint32Value(), frame_size+main_out_arg_size);
+ CHECK_LT(return_save_location.Uint32Value(), frame_size + main_out_arg_size);
__ Store(return_save_location, main_jni_conv->ReturnRegister(), main_jni_conv->SizeOfReturnValue());
}
+ // Increase frame size for out args if needed by the end_jni_conv.
+ const size_t end_out_arg_size = end_jni_conv->OutArgSize();
+ if (end_out_arg_size > current_out_arg_size) {
+ size_t out_arg_size_diff = end_out_arg_size - current_out_arg_size;
+ current_out_arg_size = end_out_arg_size;
+ __ IncreaseFrameSize(out_arg_size_diff);
+ saved_cookie_offset = FrameOffset(saved_cookie_offset.SizeValue() + out_arg_size_diff);
+ locked_object_handle_scope_offset =
+ FrameOffset(locked_object_handle_scope_offset.SizeValue() + out_arg_size_diff);
+ return_save_location = FrameOffset(return_save_location.SizeValue() + out_arg_size_diff);
+ }
// thread.
end_jni_conv->ResetIterator(FrameOffset(end_out_arg_size));
ThreadOffset<4> jni_end32(-1);
@@ -403,7 +414,7 @@
}
// 14. Move frame up now we're done with the out arg space.
- __ DecreaseFrameSize(max_out_arg_size);
+ __ DecreaseFrameSize(current_out_arg_size);
// 15. Process pending exceptions from JNI call or monitor exit.
__ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), 0);
@@ -422,12 +433,14 @@
std::vector<uint8_t> managed_code(cs);
MemoryRegion code(&managed_code[0], managed_code.size());
__ FinalizeInstructions(code);
+ jni_asm->FinalizeFrameDescriptionEntry();
return new CompiledMethod(driver,
instruction_set,
managed_code,
frame_size,
main_jni_conv->CoreSpillMask(),
- main_jni_conv->FpSpillMask());
+ main_jni_conv->FpSpillMask(),
+ jni_asm->GetFrameDescriptionEntry());
}
// Copy a single parameter from the managed to the JNI calling convention
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 1444ca0..11d1728 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -20,7 +20,6 @@
#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "dex/quick_compiler_callbacks.h"
#include "entrypoints/quick/quick_entrypoints.h"
-#include "implicit_check_options.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object_array-inl.h"
@@ -118,11 +117,10 @@
ScratchFile tmp;
SafeMap<std::string, std::string> key_value_store;
key_value_store.Put(OatHeader::kImageLocationKey, "lue.art");
- key_value_store.Put(ImplicitCheckOptions::kImplicitChecksOatHeaderKey,
- ImplicitCheckOptions::Serialize(true, true, true));
OatWriter oat_writer(class_linker->GetBootClassPath(),
42U,
4096U,
+ 0,
compiler_driver_.get(),
&timings,
&key_value_store);
@@ -186,10 +184,10 @@
TEST_F(OatTest, OatHeaderSizeCheck) {
// If this test is failing and you have to update these constants,
// it is time to update OatHeader::kOatVersion
- EXPECT_EQ(80U, sizeof(OatHeader));
+ EXPECT_EQ(84U, sizeof(OatHeader));
EXPECT_EQ(8U, sizeof(OatMethodOffsets));
EXPECT_EQ(24U, sizeof(OatQuickMethodHeader));
- EXPECT_EQ(77 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
+ EXPECT_EQ(79 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
}
TEST_F(OatTest, OatHeaderIsValid) {
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 63a3c8c..1ba5d32 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -50,6 +50,7 @@
OatWriter::OatWriter(const std::vector<const DexFile*>& dex_files,
uint32_t image_file_location_oat_checksum,
uintptr_t image_file_location_oat_begin,
+ int32_t image_patch_delta,
const CompilerDriver* compiler,
TimingLogger* timings,
SafeMap<std::string, std::string>* key_value_store)
@@ -57,6 +58,7 @@
dex_files_(&dex_files),
image_file_location_oat_checksum_(image_file_location_oat_checksum),
image_file_location_oat_begin_(image_file_location_oat_begin),
+ image_patch_delta_(image_patch_delta),
key_value_store_(key_value_store),
oat_header_(NULL),
size_dex_file_alignment_(0),
@@ -126,6 +128,7 @@
CHECK_EQ(dex_files_->size(), oat_dex_files_.size());
CHECK_EQ(compiler->IsImage(),
key_value_store_->find(OatHeader::kImageLocationKey) == key_value_store_->end());
+ CHECK_ALIGNED(image_patch_delta_, kPageSize);
}
OatWriter::~OatWriter() {
@@ -354,7 +357,6 @@
uint32_t thumb_offset = compiled_method->CodeDelta();
quick_code_offset = offset_ + sizeof(OatQuickMethodHeader) + thumb_offset;
- bool force_debug_capture = false;
bool deduped = false;
// Deduplicate code arrays.
@@ -397,39 +399,22 @@
offset_ += code_size;
}
- uint32_t quick_code_start = quick_code_offset - writer_->oat_header_->GetExecutableOffset();
- std::vector<uint8_t>* cfi_info = writer_->compiler_driver_->GetCallFrameInformation();
- if (cfi_info != nullptr) {
- // Copy in the FDE, if present
- const std::vector<uint8_t>* fde = compiled_method->GetCFIInfo();
- if (fde != nullptr) {
- // Copy the information into cfi_info and then fix the address in the new copy.
- int cur_offset = cfi_info->size();
- cfi_info->insert(cfi_info->end(), fde->begin(), fde->end());
+ if (writer_->compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols()) {
+ // Record debug information for this function if we are doing that.
- // Set the 'initial_location' field to address the start of the method.
- uint32_t offset_to_update = cur_offset + 2*sizeof(uint32_t);
- (*cfi_info)[offset_to_update+0] = quick_code_start;
- (*cfi_info)[offset_to_update+1] = quick_code_start >> 8;
- (*cfi_info)[offset_to_update+2] = quick_code_start >> 16;
- (*cfi_info)[offset_to_update+3] = quick_code_start >> 24;
- force_debug_capture = true;
- }
- }
-
-
- if (writer_->compiler_driver_->DidIncludeDebugSymbols() || force_debug_capture) {
- // Record debug information for this function if we are doing that or
- // we have CFI and so need it.
std::string name = PrettyMethod(it.GetMemberIndex(), *dex_file_, true);
if (deduped) {
- // TODO We should place the DEDUPED tag on the first instance of a
- // deduplicated symbol so that it will show up in a debuggerd crash
- // report.
+ // TODO We should place the DEDUPED tag on the first instance of a deduplicated symbol
+ // so that it will show up in a debuggerd crash report.
name += " [ DEDUPED ]";
}
- writer_->method_info_.push_back(DebugInfo(name, quick_code_start,
- quick_code_start + code_size));
+
+ const uint32_t quick_code_start = quick_code_offset -
+ writer_->oat_header_->GetExecutableOffset();
+ writer_->method_info_.push_back(DebugInfo(name,
+ quick_code_start,
+ quick_code_start + code_size,
+ compiled_method));
}
}
@@ -808,6 +793,7 @@
oat_header_->SetExecutableOffset(offset);
size_executable_offset_alignment_ = offset - old_offset;
if (compiler_driver_->IsImage()) {
+ CHECK_EQ(image_patch_delta_, 0);
InstructionSet instruction_set = compiler_driver_->GetInstructionSet();
#define DO_TRAMPOLINE(field, fn_name) \
@@ -840,6 +826,7 @@
oat_header_->SetQuickImtConflictTrampolineOffset(0);
oat_header_->SetQuickResolutionTrampolineOffset(0);
oat_header_->SetQuickToInterpreterBridgeOffset(0);
+ oat_header_->SetImagePatchDelta(image_patch_delta_);
}
return offset;
}
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 3d34956..ef5fd6b 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -30,6 +30,7 @@
namespace art {
class BitVector;
+class CompiledMethod;
class OutputStream;
// OatHeader variable length with count of D OatDexFiles
@@ -79,6 +80,7 @@
OatWriter(const std::vector<const DexFile*>& dex_files,
uint32_t image_file_location_oat_checksum,
uintptr_t image_file_location_oat_begin,
+ int32_t image_patch_delta,
const CompilerDriver* compiler,
TimingLogger* timings,
SafeMap<std::string, std::string>* key_value_store);
@@ -96,22 +98,21 @@
~OatWriter();
struct DebugInfo {
- DebugInfo(const std::string& method_name, uint32_t low_pc, uint32_t high_pc)
- : method_name_(method_name), low_pc_(low_pc), high_pc_(high_pc) {
+ DebugInfo(const std::string& method_name, uint32_t low_pc, uint32_t high_pc,
+ CompiledMethod* compiled_method)
+ : method_name_(method_name), low_pc_(low_pc), high_pc_(high_pc),
+ compiled_method_(compiled_method) {
}
- std::string method_name_;
+ std::string method_name_; // Note: this name is a pretty-printed name.
uint32_t low_pc_;
uint32_t high_pc_;
+ CompiledMethod* compiled_method_;
};
const std::vector<DebugInfo>& GetCFIMethodInfo() const {
return method_info_;
}
- bool DidAddSymbols() const {
- return compiler_driver_->DidIncludeDebugSymbols();
- }
-
private:
// The DataAccess classes are helper classes that provide access to members related to
// a given map, i.e. GC map, mapping table or vmap table. By abstracting these away
@@ -253,6 +254,7 @@
// dependencies on the image.
uint32_t image_file_location_oat_checksum_;
uintptr_t image_file_location_oat_begin_;
+ int32_t image_patch_delta_;
// data to write
SafeMap<std::string, std::string>* key_value_store_;
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index f594129..43e6b83 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -34,6 +34,37 @@
namespace art {
+/**
+ * Helper class to add HTemporary instructions. This class is used when
+ * converting a DEX instruction to multiple HInstruction, and where those
+ * instructions do not die at the following instruction, but instead spans
+ * multiple instructions.
+ */
+class Temporaries : public ValueObject {
+ public:
+ Temporaries(HGraph* graph, size_t count) : graph_(graph), count_(count), index_(0) {
+ graph_->UpdateNumberOfTemporaries(count_);
+ }
+
+ void Add(HInstruction* instruction) {
+ // We currently only support vreg size temps.
+ DCHECK(instruction->GetType() != Primitive::kPrimLong
+ && instruction->GetType() != Primitive::kPrimDouble);
+ HInstruction* temp = new (graph_->GetArena()) HTemporary(index_++);
+ instruction->GetBlock()->AddInstruction(temp);
+ DCHECK(temp->GetPrevious() == instruction);
+ }
+
+ private:
+ HGraph* const graph_;
+
+ // The total number of temporaries that will be used.
+ const size_t count_;
+
+ // Current index in the temporary stack, updated by `Add`.
+ size_t index_;
+};
+
static bool IsTypeSupported(Primitive::Type type) {
return type != Primitive::kPrimFloat && type != Primitive::kPrimDouble;
}
@@ -308,9 +339,13 @@
arena_, number_of_arguments, return_type, dex_offset, method_idx);
size_t start_index = 0;
+ Temporaries temps(graph_, is_instance_call ? 1 : 0);
if (is_instance_call) {
HInstruction* arg = LoadLocal(is_range ? register_index : args[0], Primitive::kPrimNot);
- invoke->SetArgumentAt(0, arg);
+ HNullCheck* null_check = new (arena_) HNullCheck(arg, dex_offset);
+ current_block_->AddInstruction(null_check);
+ temps.Add(null_check);
+ invoke->SetArgumentAt(0, null_check);
start_index = 1;
}
@@ -343,37 +378,6 @@
return true;
}
-/**
- * Helper class to add HTemporary instructions. This class is used when
- * converting a DEX instruction to multiple HInstruction, and where those
- * instructions do not die at the following instruction, but instead spans
- * multiple instructions.
- */
-class Temporaries : public ValueObject {
- public:
- Temporaries(HGraph* graph, size_t count) : graph_(graph), count_(count), index_(0) {
- graph_->UpdateNumberOfTemporaries(count_);
- }
-
- void Add(HInstruction* instruction) {
- // We currently only support vreg size temps.
- DCHECK(instruction->GetType() != Primitive::kPrimLong
- && instruction->GetType() != Primitive::kPrimDouble);
- HInstruction* temp = new (graph_->GetArena()) HTemporary(index_++);
- instruction->GetBlock()->AddInstruction(temp);
- DCHECK(temp->GetPrevious() == instruction);
- }
-
- private:
- HGraph* const graph_;
-
- // The total number of temporaries that will be used.
- const size_t count_;
-
- // Current index in the temporary stack, updated by `Add`.
- size_t index_;
-};
-
bool HGraphBuilder::BuildFieldAccess(const Instruction& instruction,
uint32_t dex_offset,
bool is_put) {
@@ -421,6 +425,41 @@
return true;
}
+void HGraphBuilder::BuildArrayAccess(const Instruction& instruction,
+ uint32_t dex_offset,
+ bool is_put,
+ Primitive::Type anticipated_type) {
+ uint8_t source_or_dest_reg = instruction.VRegA_23x();
+ uint8_t array_reg = instruction.VRegB_23x();
+ uint8_t index_reg = instruction.VRegC_23x();
+
+ DCHECK(IsTypeSupported(anticipated_type));
+
+ // We need one temporary for the null check, one for the index, and one for the length.
+ Temporaries temps(graph_, 3);
+
+ HInstruction* object = LoadLocal(array_reg, Primitive::kPrimNot);
+ object = new (arena_) HNullCheck(object, dex_offset);
+ current_block_->AddInstruction(object);
+ temps.Add(object);
+
+ HInstruction* length = new (arena_) HArrayLength(object);
+ current_block_->AddInstruction(length);
+ temps.Add(length);
+ HInstruction* index = LoadLocal(index_reg, Primitive::kPrimInt);
+ index = new (arena_) HBoundsCheck(index, length, dex_offset);
+ current_block_->AddInstruction(index);
+ temps.Add(index);
+ if (is_put) {
+ HInstruction* value = LoadLocal(source_or_dest_reg, anticipated_type);
+ // TODO: Insert a type check node if the type is Object.
+ current_block_->AddInstruction(new (arena_) HArraySet(object, index, value, dex_offset));
+ } else {
+ current_block_->AddInstruction(new (arena_) HArrayGet(object, index, anticipated_type));
+ UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
+ }
+}
+
bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, int32_t dex_offset) {
if (current_block_ == nullptr) {
return true; // Dead code
@@ -693,6 +732,24 @@
break;
}
+#define ARRAY_XX(kind, anticipated_type) \
+ case Instruction::AGET##kind: { \
+ BuildArrayAccess(instruction, dex_offset, false, anticipated_type); \
+ break; \
+ } \
+ case Instruction::APUT##kind: { \
+ BuildArrayAccess(instruction, dex_offset, true, anticipated_type); \
+ break; \
+ }
+
+ ARRAY_XX(, Primitive::kPrimInt);
+ ARRAY_XX(_WIDE, Primitive::kPrimLong);
+ ARRAY_XX(_OBJECT, Primitive::kPrimNot);
+ ARRAY_XX(_BOOLEAN, Primitive::kPrimBoolean);
+ ARRAY_XX(_BYTE, Primitive::kPrimByte);
+ ARRAY_XX(_CHAR, Primitive::kPrimChar);
+ ARRAY_XX(_SHORT, Primitive::kPrimShort);
+
default:
return false;
}
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index f94b8e8..170c427 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -93,6 +93,10 @@
void BuildReturn(const Instruction& instruction, Primitive::Type type);
bool BuildFieldAccess(const Instruction& instruction, uint32_t dex_offset, bool is_get);
+ void BuildArrayAccess(const Instruction& instruction,
+ uint32_t dex_offset,
+ bool is_get,
+ Primitive::Type anticipated_type);
// Builds an invocation node and returns whether the instruction is supported.
bool BuildInvoke(const Instruction& instruction,
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index e0db0f1..bd8c27e 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -30,13 +30,16 @@
namespace art {
-void CodeGenerator::CompileBaseline(CodeAllocator* allocator) {
+void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) {
const GrowableArray<HBasicBlock*>& blocks = GetGraph()->GetBlocks();
DCHECK(blocks.Get(0) == GetGraph()->GetEntryBlock());
DCHECK(GoesToNextBlock(GetGraph()->GetEntryBlock(), blocks.Get(1)));
block_labels_.SetSize(blocks.Size());
DCHECK_EQ(frame_size_, kUninitializedFrameSize);
+ if (!is_leaf) {
+ MarkNotLeaf();
+ }
ComputeFrameSize(GetGraph()->GetMaximumNumberOfOutVRegs()
+ GetGraph()->GetNumberOfLocalVRegs()
+ GetGraph()->GetNumberOfTemporaries()
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 18e3e5a..b31c3a3 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -70,7 +70,7 @@
public:
// Compiles the graph to executable instructions. Returns whether the compilation
// succeeded.
- void CompileBaseline(CodeAllocator* allocator);
+ void CompileBaseline(CodeAllocator* allocator, bool is_leaf = false);
void CompileOptimized(CodeAllocator* allocator);
static CodeGenerator* Create(ArenaAllocator* allocator,
HGraph* graph,
@@ -131,6 +131,14 @@
void BuildNativeGCMap(
std::vector<uint8_t>* vector, const DexCompilationUnit& dex_compilation_unit) const;
+ bool IsLeafMethod() const {
+ return is_leaf_;
+ }
+
+ void MarkNotLeaf() {
+ is_leaf_ = false;
+ }
+
protected:
CodeGenerator(HGraph* graph, size_t number_of_registers)
: frame_size_(kUninitializedFrameSize),
@@ -138,7 +146,8 @@
block_labels_(graph->GetArena(), 0),
pc_infos_(graph->GetArena(), 32),
slow_paths_(graph->GetArena(), 8),
- blocked_registers_(graph->GetArena()->AllocArray<bool>(number_of_registers)) {}
+ blocked_registers_(graph->GetArena()->AllocArray<bool>(number_of_registers)),
+ is_leaf_(true) {}
~CodeGenerator() {}
// Register allocation logic.
@@ -171,6 +180,8 @@
// Temporary data structure used when doing register allocation.
bool* const blocked_registers_;
+ bool is_leaf_;
+
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 73c2d48..eccc970 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -17,12 +17,14 @@
#include "code_generator_arm.h"
#include "entrypoints/quick/quick_entrypoints.h"
+#include "gc/accounting/card_table.h"
#include "mirror/array.h"
#include "mirror/art_method.h"
#include "thread.h"
#include "utils/assembler.h"
#include "utils/arm/assembler_arm.h"
#include "utils/arm/managed_register_arm.h"
+#include "utils/stack_checks.h"
namespace art {
@@ -32,6 +34,29 @@
namespace arm {
+static constexpr bool kExplicitStackOverflowCheck = false;
+
+static constexpr int kNumberOfPushedRegistersAtEntry = 1 + 2; // LR, R6, R7
+static constexpr int kCurrentMethodStackOffset = 0;
+
+static Location ArmCoreLocation(Register reg) {
+ return Location::RegisterLocation(ArmManagedRegister::FromCoreRegister(reg));
+}
+
+static constexpr Register kRuntimeParameterCoreRegisters[] = { R0, R1, R2 };
+static constexpr size_t kRuntimeParameterCoreRegistersLength =
+ arraysize(kRuntimeParameterCoreRegisters);
+
+class InvokeRuntimeCallingConvention : public CallingConvention<Register> {
+ public:
+ InvokeRuntimeCallingConvention()
+ : CallingConvention(kRuntimeParameterCoreRegisters,
+ kRuntimeParameterCoreRegistersLength) {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
+};
+
#define __ reinterpret_cast<ArmAssembler*>(codegen->GetAssembler())->
class NullCheckSlowPathARM : public SlowPathCode {
@@ -51,6 +76,47 @@
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM);
};
+class StackOverflowCheckSlowPathARM : public SlowPathCode {
+ public:
+ StackOverflowCheckSlowPathARM() {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ __ LoadFromOffset(kLoadWord, PC, TR,
+ QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pThrowStackOverflow).Int32Value());
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StackOverflowCheckSlowPathARM);
+};
+
+class BoundsCheckSlowPathARM : public SlowPathCode {
+ public:
+ explicit BoundsCheckSlowPathARM(uint32_t dex_pc,
+ Location index_location,
+ Location length_location)
+ : dex_pc_(dex_pc), index_location_(index_location), length_location_(length_location) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARM* arm_codegen = reinterpret_cast<CodeGeneratorARM*>(codegen);
+ __ Bind(GetEntryLabel());
+ InvokeRuntimeCallingConvention calling_convention;
+ arm_codegen->Move32(ArmCoreLocation(calling_convention.GetRegisterAt(0)), index_location_);
+ arm_codegen->Move32(ArmCoreLocation(calling_convention.GetRegisterAt(1)), length_location_);
+ int32_t offset = QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pThrowArrayBounds).Int32Value();
+ __ ldr(LR, Address(TR, offset));
+ __ blx(LR);
+ codegen->RecordPcInfo(dex_pc_);
+ }
+
+ private:
+ const uint32_t dex_pc_;
+ const Location index_location_;
+ const Location length_location_;
+
+ DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM);
+};
+
#undef __
#define __ reinterpret_cast<ArmAssembler*>(GetAssembler())->
@@ -82,9 +148,6 @@
return EQ; // Unreachable.
}
-static constexpr int kNumberOfPushedRegistersAtEntry = 1 + 2; // LR, R6, R7
-static constexpr int kCurrentMethodStackOffset = 0;
-
void CodeGeneratorARM::DumpCoreRegister(std::ostream& stream, int reg) const {
stream << ArmManagedRegister::FromCoreRegister(Register(reg));
}
@@ -97,7 +160,8 @@
: CodeGenerator(graph, kNumberOfRegIds),
location_builder_(graph, this),
instruction_visitor_(graph, this),
- move_resolver_(graph->GetArena(), this) {}
+ move_resolver_(graph->GetArena(), this),
+ assembler_(true) {}
size_t CodeGeneratorARM::FrameEntrySpillSize() const {
return kNumberOfPushedRegistersAtEntry * kArmWordSize;
@@ -195,16 +259,28 @@
return kNumberOfRegIds;
}
-static Location ArmCoreLocation(Register reg) {
- return Location::RegisterLocation(ArmManagedRegister::FromCoreRegister(reg));
-}
-
InstructionCodeGeneratorARM::InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen)
: HGraphVisitor(graph),
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
void CodeGeneratorARM::GenerateFrameEntry() {
+ bool skip_overflow_check = IsLeafMethod() && !IsLargeFrame(GetFrameSize(), InstructionSet::kArm);
+ if (!skip_overflow_check) {
+ if (kExplicitStackOverflowCheck) {
+ SlowPathCode* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathARM();
+ AddSlowPath(slow_path);
+
+ __ LoadFromOffset(kLoadWord, IP, TR, Thread::StackEndOffset<kArmWordSize>().Int32Value());
+ __ cmp(SP, ShifterOperand(IP));
+ __ b(slow_path->GetEntryLabel(), CC);
+ } else {
+ __ AddConstant(IP, SP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm)));
+ __ ldr(IP, Address(IP, 0));
+ RecordPcInfo(0);
+ }
+ }
+
core_spill_mask_ |= (1 << LR | 1 << R6 | 1 << R7);
__ PushList(1 << LR | 1 << R6 | 1 << R7);
@@ -377,11 +453,17 @@
}
void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstruction* move_for) {
+ LocationSummary* locations = instruction->GetLocations();
+ if (locations != nullptr && locations->Out().Equals(location)) {
+ return;
+ }
+
if (instruction->AsIntConstant() != nullptr) {
int32_t value = instruction->AsIntConstant()->GetValue();
if (location.IsRegister()) {
__ LoadImmediate(location.AsArm().AsCoreRegister(), value);
} else {
+ DCHECK(location.IsStackSlot());
__ LoadImmediate(IP, value);
__ str(IP, Address(SP, location.GetStackIndex()));
}
@@ -391,6 +473,7 @@
__ LoadImmediate(location.AsArm().AsRegisterPairLow(), Low32Bits(value));
__ LoadImmediate(location.AsArm().AsRegisterPairHigh(), High32Bits(value));
} else {
+ DCHECK(location.IsDoubleStackSlot());
__ LoadImmediate(IP, Low32Bits(value));
__ str(IP, Address(SP, location.GetStackIndex()));
__ LoadImmediate(IP, High32Bits(value));
@@ -424,11 +507,11 @@
case Primitive::kPrimShort:
case Primitive::kPrimNot:
case Primitive::kPrimInt:
- Move32(location, instruction->GetLocations()->Out());
+ Move32(location, locations->Out());
break;
case Primitive::kPrimLong:
- Move64(location, instruction->GetLocations()->Out());
+ Move64(location, locations->Out());
break;
default:
@@ -478,20 +561,33 @@
HCondition* condition = cond->AsCondition();
if (condition->NeedsMaterialization()) {
// Condition has been materialized, compare the output to 0
- if (!if_instr->GetLocations()->InAt(0).IsRegister()) {
- LOG(FATAL) << "Materialized condition is not in an ARM register";
- }
+ DCHECK(if_instr->GetLocations()->InAt(0).IsRegister());
__ cmp(if_instr->GetLocations()->InAt(0).AsArm().AsCoreRegister(),
ShifterOperand(0));
__ b(codegen_->GetLabelOf(if_instr->IfTrueSuccessor()), EQ);
} else {
// Condition has not been materialized, use its inputs as the comparison and its
// condition as the branch condition.
- __ cmp(condition->GetLocations()->InAt(0).AsArm().AsCoreRegister(),
- ShifterOperand(condition->GetLocations()->InAt(1).AsArm().AsCoreRegister()));
+ LocationSummary* locations = condition->GetLocations();
+ if (locations->InAt(1).IsRegister()) {
+ __ cmp(locations->InAt(0).AsArm().AsCoreRegister(),
+ ShifterOperand(locations->InAt(1).AsArm().AsCoreRegister()));
+ } else {
+ DCHECK(locations->InAt(1).IsConstant());
+ int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
+ ShifterOperand operand;
+ if (ShifterOperand::CanHoldArm(value, &operand)) {
+ __ cmp(locations->InAt(0).AsArm().AsCoreRegister(), ShifterOperand(value));
+ } else {
+ Register temp = IP;
+ __ LoadImmediate(temp, value);
+ __ cmp(locations->InAt(0).AsArm().AsCoreRegister(), ShifterOperand(temp));
+ }
+ }
__ b(codegen_->GetLabelOf(if_instr->IfTrueSuccessor()),
ARMCondition(condition->GetCondition()));
}
+
if (!codegen_->GoesToNextBlock(if_instr->GetBlock(), if_instr->IfFalseSuccessor())) {
__ b(codegen_->GetLabelOf(if_instr->IfFalseSuccessor()));
}
@@ -501,7 +597,7 @@
void LocationsBuilderARM::VisitCondition(HCondition* comp) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(comp);
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(comp->InputAt(1)));
if (comp->NeedsMaterialization()) {
locations->SetOut(Location::RequiresRegister());
}
@@ -509,16 +605,29 @@
}
void InstructionCodeGeneratorARM::VisitCondition(HCondition* comp) {
- if (comp->NeedsMaterialization()) {
- LocationSummary* locations = comp->GetLocations();
+ if (!comp->NeedsMaterialization()) return;
+
+ LocationSummary* locations = comp->GetLocations();
+ if (locations->InAt(1).IsRegister()) {
__ cmp(locations->InAt(0).AsArm().AsCoreRegister(),
ShifterOperand(locations->InAt(1).AsArm().AsCoreRegister()));
- __ it(ARMCondition(comp->GetCondition()), kItElse);
- __ mov(locations->Out().AsArm().AsCoreRegister(), ShifterOperand(1),
- ARMCondition(comp->GetCondition()));
- __ mov(locations->Out().AsArm().AsCoreRegister(), ShifterOperand(0),
- ARMOppositeCondition(comp->GetCondition()));
+ } else {
+ DCHECK(locations->InAt(1).IsConstant());
+ int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
+ ShifterOperand operand;
+ if (ShifterOperand::CanHoldArm(value, &operand)) {
+ __ cmp(locations->InAt(0).AsArm().AsCoreRegister(), ShifterOperand(value));
+ } else {
+ Register temp = IP;
+ __ LoadImmediate(temp, value);
+ __ cmp(locations->InAt(0).AsArm().AsCoreRegister(), ShifterOperand(temp));
+ }
}
+ __ it(ARMCondition(comp->GetCondition()), kItElse);
+ __ mov(locations->Out().AsArm().AsCoreRegister(), ShifterOperand(1),
+ ARMCondition(comp->GetCondition()));
+ __ mov(locations->Out().AsArm().AsCoreRegister(), ShifterOperand(0),
+ ARMOppositeCondition(comp->GetCondition()));
}
void LocationsBuilderARM::VisitEqual(HEqual* comp) {
@@ -611,20 +720,17 @@
}
void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
- // TODO: Support constant locations.
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
- locations->SetOut(Location::RequiresRegister());
+ locations->SetOut(Location::ConstantLocation(constant));
constant->SetLocations(locations);
}
void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant) {
- codegen_->Move(constant, constant->GetLocations()->Out(), nullptr);
}
void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
- // TODO: Support constant locations.
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
- locations->SetOut(Location::RequiresRegister());
+ locations->SetOut(Location::ConstantLocation(constant));
constant->SetLocations(locations);
}
@@ -688,6 +794,7 @@
}
void LocationsBuilderARM::VisitInvokeStatic(HInvokeStatic* invoke) {
+ codegen_->MarkNotLeaf();
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(invoke);
locations->AddTemp(ArmCoreLocation(R0));
@@ -753,6 +860,7 @@
__ blx(LR);
codegen_->RecordPcInfo(invoke->GetDexPc());
+ DCHECK(!codegen_->IsLeafMethod());
}
void LocationsBuilderARM::VisitAdd(HAdd* add) {
@@ -761,7 +869,7 @@
case Primitive::kPrimInt:
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(add->InputAt(1)));
locations->SetOut(Location::RequiresRegister());
break;
}
@@ -783,9 +891,15 @@
LocationSummary* locations = add->GetLocations();
switch (add->GetResultType()) {
case Primitive::kPrimInt:
- __ add(locations->Out().AsArm().AsCoreRegister(),
- locations->InAt(0).AsArm().AsCoreRegister(),
- ShifterOperand(locations->InAt(1).AsArm().AsCoreRegister()));
+ if (locations->InAt(1).IsRegister()) {
+ __ add(locations->Out().AsArm().AsCoreRegister(),
+ locations->InAt(0).AsArm().AsCoreRegister(),
+ ShifterOperand(locations->InAt(1).AsArm().AsCoreRegister()));
+ } else {
+ __ AddConstant(locations->Out().AsArm().AsCoreRegister(),
+ locations->InAt(0).AsArm().AsCoreRegister(),
+ locations->InAt(1).GetConstant()->AsIntConstant()->GetValue());
+ }
break;
case Primitive::kPrimLong:
@@ -815,7 +929,7 @@
case Primitive::kPrimInt:
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(sub->InputAt(1)));
locations->SetOut(Location::RequiresRegister());
break;
}
@@ -836,11 +950,18 @@
void InstructionCodeGeneratorARM::VisitSub(HSub* sub) {
LocationSummary* locations = sub->GetLocations();
switch (sub->GetResultType()) {
- case Primitive::kPrimInt:
- __ sub(locations->Out().AsArm().AsCoreRegister(),
- locations->InAt(0).AsArm().AsCoreRegister(),
- ShifterOperand(locations->InAt(1).AsArm().AsCoreRegister()));
+ case Primitive::kPrimInt: {
+ if (locations->InAt(1).IsRegister()) {
+ __ sub(locations->Out().AsArm().AsCoreRegister(),
+ locations->InAt(0).AsArm().AsCoreRegister(),
+ ShifterOperand(locations->InAt(1).AsArm().AsCoreRegister()));
+ } else {
+ __ AddConstant(locations->Out().AsArm().AsCoreRegister(),
+ locations->InAt(0).AsArm().AsCoreRegister(),
+ -locations->InAt(1).GetConstant()->AsIntConstant()->GetValue());
+ }
break;
+ }
case Primitive::kPrimLong:
__ subs(locations->Out().AsArm().AsRegisterPairLow(),
@@ -863,21 +984,8 @@
}
}
-static constexpr Register kRuntimeParameterCoreRegisters[] = { R0, R1 };
-static constexpr size_t kRuntimeParameterCoreRegistersLength =
- arraysize(kRuntimeParameterCoreRegisters);
-
-class InvokeRuntimeCallingConvention : public CallingConvention<Register> {
- public:
- InvokeRuntimeCallingConvention()
- : CallingConvention(kRuntimeParameterCoreRegisters,
- kRuntimeParameterCoreRegistersLength) {}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
-};
-
void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) {
+ codegen_->MarkNotLeaf();
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
InvokeRuntimeCallingConvention calling_convention;
locations->AddTemp(ArmCoreLocation(calling_convention.GetRegisterAt(0)));
@@ -896,6 +1004,7 @@
__ blx(LR);
codegen_->RecordPcInfo(instruction->GetDexPc());
+ DCHECK(!codegen_->IsLeafMethod());
}
void LocationsBuilderARM::VisitParameterValue(HParameterValue* instruction) {
@@ -948,9 +1057,11 @@
ShifterOperand(right.AsRegisterPairHigh())); // Signed compare.
__ b(&less, LT);
__ b(&greater, GT);
+ // Do LoadImmediate before any `cmp`, as LoadImmediate might affect
+ // the status flags.
+ __ LoadImmediate(output, 0);
__ cmp(left.AsRegisterPairLow(),
ShifterOperand(right.AsRegisterPairLow())); // Unsigned compare.
- __ LoadImmediate(output, 0);
__ b(&done, EQ);
__ b(&less, CC);
@@ -986,6 +1097,11 @@
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
+ // Temporary registers for the write barrier.
+ if (instruction->InputAt(1)->GetType() == Primitive::kPrimNot) {
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ }
instruction->SetLocations(locations);
}
@@ -1014,6 +1130,11 @@
case Primitive::kPrimNot: {
Register value = locations->InAt(1).AsArm().AsCoreRegister();
__ StoreToOffset(kStoreWord, value, obj, offset);
+ if (field_type == Primitive::kPrimNot) {
+ Register temp = locations->GetTemp(0).AsArm().AsCoreRegister();
+ Register card = locations->GetTemp(1).AsArm().AsCoreRegister();
+ codegen_->MarkGCCard(temp, card, obj, value);
+ }
break;
}
@@ -1115,6 +1236,251 @@
__ b(slow_path->GetEntryLabel(), EQ);
}
+void LocationsBuilderARM::VisitArrayGet(HArrayGet* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister());
+ instruction->SetLocations(locations);
+}
+
+void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).AsArm().AsCoreRegister();
+ Location index = locations->InAt(1);
+
+ switch (instruction->GetType()) {
+ case Primitive::kPrimBoolean: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
+ Register out = locations->Out().AsArm().AsCoreRegister();
+ if (index.IsConstant()) {
+ size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
+ __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
+ } else {
+ __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister()));
+ __ LoadFromOffset(kLoadUnsignedByte, out, IP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimByte: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
+ Register out = locations->Out().AsArm().AsCoreRegister();
+ if (index.IsConstant()) {
+ size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
+ __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
+ } else {
+ __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister()));
+ __ LoadFromOffset(kLoadSignedByte, out, IP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimShort: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
+ Register out = locations->Out().AsArm().AsCoreRegister();
+ if (index.IsConstant()) {
+ size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
+ __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
+ } else {
+ __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister(), LSL, TIMES_2));
+ __ LoadFromOffset(kLoadSignedHalfword, out, IP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimChar: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
+ Register out = locations->Out().AsArm().AsCoreRegister();
+ if (index.IsConstant()) {
+ size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
+ __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
+ } else {
+ __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister(), LSL, TIMES_2));
+ __ LoadFromOffset(kLoadUnsignedHalfword, out, IP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ Register out = locations->Out().AsArm().AsCoreRegister();
+ if (index.IsConstant()) {
+ size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ LoadFromOffset(kLoadWord, out, obj, offset);
+ } else {
+ __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister(), LSL, TIMES_4));
+ __ LoadFromOffset(kLoadWord, out, IP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
+ ArmManagedRegister out = locations->Out().AsArm();
+ if (index.IsConstant()) {
+ size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow(), obj, offset);
+ } else {
+ __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister(), LSL, TIMES_8));
+ __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow(), IP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ }
+}
+
+void LocationsBuilderARM::VisitArraySet(HArraySet* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ Primitive::Type value_type = instruction->InputAt(2)->GetType();
+ if (value_type == Primitive::kPrimNot) {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, ArmCoreLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, ArmCoreLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, ArmCoreLocation(calling_convention.GetRegisterAt(2)));
+ codegen_->MarkNotLeaf();
+ } else {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ locations->SetInAt(2, Location::RequiresRegister());
+ }
+ instruction->SetLocations(locations);
+}
+
+void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).AsArm().AsCoreRegister();
+ Location index = locations->InAt(1);
+ Primitive::Type value_type = instruction->InputAt(2)->GetType();
+
+ switch (value_type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
+ Register value = locations->InAt(2).AsArm().AsCoreRegister();
+ if (index.IsConstant()) {
+ size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
+ __ StoreToOffset(kStoreByte, value, obj, offset);
+ } else {
+ __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister()));
+ __ StoreToOffset(kStoreByte, value, IP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimShort:
+ case Primitive::kPrimChar: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
+ Register value = locations->InAt(2).AsArm().AsCoreRegister();
+ if (index.IsConstant()) {
+ size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
+ __ StoreToOffset(kStoreHalfword, value, obj, offset);
+ } else {
+ __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister(), LSL, TIMES_2));
+ __ StoreToOffset(kStoreHalfword, value, IP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimInt: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ Register value = locations->InAt(2).AsArm().AsCoreRegister();
+ if (index.IsConstant()) {
+ size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ StoreToOffset(kStoreWord, value, obj, offset);
+ } else {
+ __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister(), LSL, TIMES_4));
+ __ StoreToOffset(kStoreWord, value, IP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimNot: {
+ int32_t offset = QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAputObject).Int32Value();
+ __ ldr(LR, Address(TR, offset));
+ __ blx(LR);
+ codegen_->RecordPcInfo(instruction->GetDexPc());
+ DCHECK(!codegen_->IsLeafMethod());
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
+ ArmManagedRegister value = locations->InAt(2).AsArm();
+ if (index.IsConstant()) {
+ size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow(), obj, offset);
+ } else {
+ __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister(), LSL, TIMES_8));
+ __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow(), IP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ }
+}
+
+void LocationsBuilderARM::VisitArrayLength(HArrayLength* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+ instruction->SetLocations(locations);
+}
+
+void InstructionCodeGeneratorARM::VisitArrayLength(HArrayLength* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
+ Register obj = locations->InAt(0).AsArm().AsCoreRegister();
+ Register out = locations->Out().AsArm().AsCoreRegister();
+ __ LoadFromOffset(kLoadWord, out, obj, offset);
+}
+
+void LocationsBuilderARM::VisitBoundsCheck(HBoundsCheck* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ // TODO: Have a normalization phase that makes this instruction never used.
+ locations->SetOut(Location::SameAsFirstInput());
+ instruction->SetLocations(locations);
+}
+
+void InstructionCodeGeneratorARM::VisitBoundsCheck(HBoundsCheck* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ SlowPathCode* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(
+ instruction->GetDexPc(), locations->InAt(0), locations->InAt(1));
+ codegen_->AddSlowPath(slow_path);
+
+ Register index = locations->InAt(0).AsArm().AsCoreRegister();
+ Register length = locations->InAt(1).AsArm().AsCoreRegister();
+
+ __ cmp(index, ShifterOperand(length));
+ __ b(slow_path->GetEntryLabel(), CS);
+}
+
+void CodeGeneratorARM::MarkGCCard(Register temp, Register card, Register object, Register value) {
+ Label is_null;
+ __ CompareAndBranchIfZero(value, &is_null);
+ __ LoadFromOffset(kLoadWord, card, TR, Thread::CardTableOffset<kArmWordSize>().Int32Value());
+ __ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
+ __ strb(card, Address(card, temp));
+ __ Bind(&is_null);
+}
+
void LocationsBuilderARM::VisitTemporary(HTemporary* temp) {
temp->SetLocations(nullptr);
}
@@ -1158,7 +1524,16 @@
__ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
}
} else {
- LOG(FATAL) << "Unimplemented";
+ DCHECK(source.IsConstant());
+ DCHECK(source.GetConstant()->AsIntConstant() != nullptr);
+ int32_t value = source.GetConstant()->AsIntConstant()->GetValue();
+ if (destination.IsRegister()) {
+ __ LoadImmediate(destination.AsArm().AsCoreRegister(), value);
+ } else {
+ DCHECK(destination.IsStackSlot());
+ __ LoadImmediate(IP, value);
+ __ str(IP, Address(SP, destination.GetStackIndex()));
+ }
}
}
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 1b5974f..610625c 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -20,7 +20,7 @@
#include "code_generator.h"
#include "nodes.h"
#include "parallel_move_resolver.h"
-#include "utils/arm/assembler_arm32.h"
+#include "utils/arm/assembler_thumb2.h"
namespace art {
namespace arm {
@@ -89,7 +89,7 @@
#define DECLARE_VISIT_INSTRUCTION(name) \
virtual void Visit##name(H##name* instr);
- FOR_EACH_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
@@ -107,7 +107,7 @@
#define DECLARE_VISIT_INSTRUCTION(name) \
virtual void Visit##name(H##name* instr);
- FOR_EACH_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
@@ -172,19 +172,22 @@
}
virtual InstructionSet GetInstructionSet() const OVERRIDE {
- return InstructionSet::kArm;
+ return InstructionSet::kThumb2;
}
- private:
// Helper method to move a 32bits value between two locations.
void Move32(Location destination, Location source);
// Helper method to move a 64bits value between two locations.
void Move64(Location destination, Location source);
+ // Emit a write barrier.
+ void MarkGCCard(Register temp, Register card, Register object, Register value);
+
+ private:
LocationsBuilderARM location_builder_;
InstructionCodeGeneratorARM instruction_visitor_;
ParallelMoveResolverARM move_resolver_;
- Arm32Assembler assembler_;
+ Thumb2Assembler assembler_;
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARM);
};
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 4e69a0c..ab53b17 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -15,14 +15,16 @@
*/
#include "code_generator_x86.h"
-#include "utils/assembler.h"
-#include "utils/x86/assembler_x86.h"
-#include "utils/x86/managed_register_x86.h"
#include "entrypoints/quick/quick_entrypoints.h"
+#include "gc/accounting/card_table.h"
#include "mirror/array.h"
#include "mirror/art_method.h"
#include "thread.h"
+#include "utils/assembler.h"
+#include "utils/stack_checks.h"
+#include "utils/x86/assembler_x86.h"
+#include "utils/x86/managed_register_x86.h"
namespace art {
@@ -32,6 +34,29 @@
namespace x86 {
+static constexpr bool kExplicitStackOverflowCheck = false;
+
+static constexpr int kNumberOfPushedRegistersAtEntry = 1;
+static constexpr int kCurrentMethodStackOffset = 0;
+
+static Location X86CpuLocation(Register reg) {
+ return Location::RegisterLocation(X86ManagedRegister::FromCpuRegister(reg));
+}
+
+static constexpr Register kRuntimeParameterCoreRegisters[] = { EAX, ECX, EDX };
+static constexpr size_t kRuntimeParameterCoreRegistersLength =
+ arraysize(kRuntimeParameterCoreRegisters);
+
+class InvokeRuntimeCallingConvention : public CallingConvention<Register> {
+ public:
+ InvokeRuntimeCallingConvention()
+ : CallingConvention(kRuntimeParameterCoreRegisters,
+ kRuntimeParameterCoreRegistersLength) {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
+};
+
#define __ reinterpret_cast<X86Assembler*>(codegen->GetAssembler())->
class NullCheckSlowPathX86 : public SlowPathCode {
@@ -49,6 +74,46 @@
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86);
};
+class StackOverflowCheckSlowPathX86 : public SlowPathCode {
+ public:
+ StackOverflowCheckSlowPathX86() {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ __ addl(ESP,
+ Immediate(codegen->GetFrameSize() - kNumberOfPushedRegistersAtEntry * kX86WordSize));
+ __ fs()->jmp(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowStackOverflow)));
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StackOverflowCheckSlowPathX86);
+};
+
+class BoundsCheckSlowPathX86 : public SlowPathCode {
+ public:
+ explicit BoundsCheckSlowPathX86(uint32_t dex_pc,
+ Location index_location,
+ Location length_location)
+ : dex_pc_(dex_pc), index_location_(index_location), length_location_(length_location) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorX86* x86_codegen = reinterpret_cast<CodeGeneratorX86*>(codegen);
+ __ Bind(GetEntryLabel());
+ InvokeRuntimeCallingConvention calling_convention;
+ x86_codegen->Move32(X86CpuLocation(calling_convention.GetRegisterAt(0)), index_location_);
+ x86_codegen->Move32(X86CpuLocation(calling_convention.GetRegisterAt(1)), length_location_);
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowArrayBounds)));
+ codegen->RecordPcInfo(dex_pc_);
+ }
+
+ private:
+ const uint32_t dex_pc_;
+ const Location index_location_;
+ const Location length_location_;
+
+ DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86);
+};
+
#undef __
#define __ reinterpret_cast<X86Assembler*>(GetAssembler())->
@@ -66,9 +131,6 @@
return kEqual;
}
-static constexpr int kNumberOfPushedRegistersAtEntry = 1;
-static constexpr int kCurrentMethodStackOffset = 0;
-
void CodeGeneratorX86::DumpCoreRegister(std::ostream& stream, int reg) const {
stream << X86ManagedRegister::FromCpuRegister(Register(reg));
}
@@ -169,10 +231,6 @@
return kNumberOfRegIds;
}
-static Location X86CpuLocation(Register reg) {
- return Location::RegisterLocation(X86ManagedRegister::FromCpuRegister(reg));
-}
-
InstructionCodeGeneratorX86::InstructionCodeGeneratorX86(HGraph* graph, CodeGeneratorX86* codegen)
: HGraphVisitor(graph),
assembler_(codegen->GetAssembler()),
@@ -183,8 +241,23 @@
static const int kFakeReturnRegister = 8;
core_spill_mask_ |= (1 << kFakeReturnRegister);
+ bool skip_overflow_check = IsLeafMethod() && !IsLargeFrame(GetFrameSize(), InstructionSet::kX86);
+ if (!skip_overflow_check && !kExplicitStackOverflowCheck) {
+ __ testl(EAX, Address(ESP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kX86))));
+ RecordPcInfo(0);
+ }
+
// The return PC has already been pushed on the stack.
__ subl(ESP, Immediate(GetFrameSize() - kNumberOfPushedRegistersAtEntry * kX86WordSize));
+
+ if (!skip_overflow_check && kExplicitStackOverflowCheck) {
+ SlowPathCode* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathX86();
+ AddSlowPath(slow_path);
+
+ __ fs()->cmpl(ESP, Address::Absolute(Thread::StackEndOffset<kX86WordSize>()));
+ __ j(kLess, slow_path->GetEntryLabel());
+ }
+
__ movl(Address(ESP, kCurrentMethodStackOffset), EAX);
}
@@ -226,20 +299,6 @@
return Location();
}
-static constexpr Register kRuntimeParameterCoreRegisters[] = { EAX, ECX, EDX };
-static constexpr size_t kRuntimeParameterCoreRegistersLength =
- arraysize(kRuntimeParameterCoreRegisters);
-
-class InvokeRuntimeCallingConvention : public CallingConvention<Register> {
- public:
- InvokeRuntimeCallingConvention()
- : CallingConvention(kRuntimeParameterCoreRegisters,
- kRuntimeParameterCoreRegistersLength) {}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
-};
-
Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) {
switch (type) {
case Primitive::kPrimBoolean:
@@ -473,6 +532,10 @@
// LHS is guaranteed to be in a register (see LocationsBuilderX86::VisitCondition).
if (rhs.IsRegister()) {
__ cmpl(lhs.AsX86().AsCpuRegister(), rhs.AsX86().AsCpuRegister());
+ } else if (rhs.IsConstant()) {
+ HIntConstant* instruction = rhs.GetConstant()->AsIntConstant();
+ Immediate imm(instruction->AsIntConstant()->GetValue());
+ __ cmpl(lhs.AsX86().AsCpuRegister(), imm);
} else {
__ cmpl(lhs.AsX86().AsCpuRegister(), Address(ESP, rhs.GetStackIndex()));
}
@@ -530,7 +593,7 @@
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::Any());
if (comp->NeedsMaterialization()) {
- locations->SetOut(Location::SameAsFirstInput());
+ locations->SetOut(Location::RequiresRegister());
}
comp->SetLocations(locations);
}
@@ -541,6 +604,10 @@
if (locations->InAt(1).IsRegister()) {
__ cmpl(locations->InAt(0).AsX86().AsCpuRegister(),
locations->InAt(1).AsX86().AsCpuRegister());
+ } else if (locations->InAt(1).IsConstant()) {
+ HConstant* instruction = locations->InAt(1).GetConstant();
+ Immediate imm(instruction->AsIntConstant()->GetValue());
+ __ cmpl(locations->InAt(0).AsX86().AsCpuRegister(), imm);
} else {
__ cmpl(locations->InAt(0).AsX86().AsCpuRegister(),
Address(ESP, locations->InAt(1).GetStackIndex()));
@@ -598,20 +665,17 @@
}
void LocationsBuilderX86::VisitIntConstant(HIntConstant* constant) {
- // TODO: Support constant locations.
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
- locations->SetOut(Location::RequiresRegister());
+ locations->SetOut(Location::ConstantLocation(constant));
constant->SetLocations(locations);
}
void InstructionCodeGeneratorX86::VisitIntConstant(HIntConstant* constant) {
- codegen_->Move(constant, constant->GetLocations()->Out(), nullptr);
}
void LocationsBuilderX86::VisitLongConstant(HLongConstant* constant) {
- // TODO: Support constant locations.
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
- locations->SetOut(Location::RequiresRegister());
+ locations->SetOut(Location::ConstantLocation(constant));
constant->SetLocations(locations);
}
@@ -676,6 +740,7 @@
}
void LocationsBuilderX86::VisitInvokeStatic(HInvokeStatic* invoke) {
+ codegen_->MarkNotLeaf();
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(invoke);
locations->AddTemp(X86CpuLocation(EAX));
@@ -733,6 +798,7 @@
// (temp + offset_of_quick_compiled_code)()
__ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+ DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke->GetDexPc());
}
@@ -769,6 +835,10 @@
if (locations->InAt(1).IsRegister()) {
__ addl(locations->InAt(0).AsX86().AsCpuRegister(),
locations->InAt(1).AsX86().AsCpuRegister());
+ } else if (locations->InAt(1).IsConstant()) {
+ HConstant* instruction = locations->InAt(1).GetConstant();
+ Immediate imm(instruction->AsIntConstant()->GetValue());
+ __ addl(locations->InAt(0).AsX86().AsCpuRegister(), imm);
} else {
__ addl(locations->InAt(0).AsX86().AsCpuRegister(),
Address(ESP, locations->InAt(1).GetStackIndex()));
@@ -838,6 +908,10 @@
if (locations->InAt(1).IsRegister()) {
__ subl(locations->InAt(0).AsX86().AsCpuRegister(),
locations->InAt(1).AsX86().AsCpuRegister());
+ } else if (locations->InAt(1).IsConstant()) {
+ HConstant* instruction = locations->InAt(1).GetConstant();
+ Immediate imm(instruction->AsIntConstant()->GetValue());
+ __ subl(locations->InAt(0).AsX86().AsCpuRegister(), imm);
} else {
__ subl(locations->InAt(0).AsX86().AsCpuRegister(),
Address(ESP, locations->InAt(1).GetStackIndex()));
@@ -875,6 +949,7 @@
}
void LocationsBuilderX86::VisitNewInstance(HNewInstance* instruction) {
+ codegen_->MarkNotLeaf();
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
locations->SetOut(X86CpuLocation(EAX));
InvokeRuntimeCallingConvention calling_convention;
@@ -892,6 +967,7 @@
Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocObjectWithAccessCheck)));
codegen_->RecordPcInfo(instruction->GetDexPc());
+ DCHECK(!codegen_->IsLeafMethod());
}
void LocationsBuilderX86::VisitParameterValue(HParameterValue* instruction) {
@@ -996,6 +1072,12 @@
} else {
locations->SetInAt(1, Location::RequiresRegister());
}
+ // Temporary registers for the write barrier.
+ if (field_type == Primitive::kPrimNot) {
+ locations->AddTemp(Location::RequiresRegister());
+ // Ensure the card is in a byte register.
+ locations->AddTemp(X86CpuLocation(ECX));
+ }
instruction->SetLocations(locations);
}
@@ -1024,6 +1106,12 @@
case Primitive::kPrimNot: {
Register value = locations->InAt(1).AsX86().AsCpuRegister();
__ movl(Address(obj, offset), value);
+
+ if (field_type == Primitive::kPrimNot) {
+ Register temp = locations->GetTemp(0).AsX86().AsCpuRegister();
+ Register card = locations->GetTemp(1).AsX86().AsCpuRegister();
+ codegen_->MarkGCCard(temp, card, obj, value);
+ }
break;
}
@@ -1043,6 +1131,18 @@
}
}
+void CodeGeneratorX86::MarkGCCard(Register temp, Register card, Register object, Register value) {
+ Label is_null;
+ __ testl(value, value);
+ __ j(kEqual, &is_null);
+ __ fs()->movl(card, Address::Absolute(Thread::CardTableOffset<kX86WordSize>().Int32Value()));
+ __ movl(temp, object);
+ __ shrl(temp, Immediate(gc::accounting::CardTable::kCardShift));
+ __ movb(Address(temp, card, TIMES_1, 0),
+ X86ManagedRegister::FromCpuRegister(card).AsByteRegister());
+ __ Bind(&is_null);
+}
+
void LocationsBuilderX86::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
@@ -1130,6 +1230,243 @@
__ j(kEqual, slow_path->GetEntryLabel());
}
+void LocationsBuilderX86::VisitArrayGet(HArrayGet* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister());
+ instruction->SetLocations(locations);
+}
+
+void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).AsX86().AsCpuRegister();
+ Location index = locations->InAt(1);
+
+ switch (instruction->GetType()) {
+ case Primitive::kPrimBoolean: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
+ Register out = locations->Out().AsX86().AsCpuRegister();
+ if (index.IsConstant()) {
+ __ movzxb(out, Address(obj,
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
+ } else {
+ __ movzxb(out, Address(obj, index.AsX86().AsCpuRegister(), TIMES_1, data_offset));
+ }
+ break;
+ }
+
+ case Primitive::kPrimByte: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
+ Register out = locations->Out().AsX86().AsCpuRegister();
+ if (index.IsConstant()) {
+ __ movsxb(out, Address(obj,
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
+ } else {
+ __ movsxb(out, Address(obj, index.AsX86().AsCpuRegister(), TIMES_1, data_offset));
+ }
+ break;
+ }
+
+ case Primitive::kPrimShort: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
+ Register out = locations->Out().AsX86().AsCpuRegister();
+ if (index.IsConstant()) {
+ __ movsxw(out, Address(obj,
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
+ } else {
+ __ movsxw(out, Address(obj, index.AsX86().AsCpuRegister(), TIMES_2, data_offset));
+ }
+ break;
+ }
+
+ case Primitive::kPrimChar: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
+ Register out = locations->Out().AsX86().AsCpuRegister();
+ if (index.IsConstant()) {
+ __ movzxw(out, Address(obj,
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
+ } else {
+ __ movzxw(out, Address(obj, index.AsX86().AsCpuRegister(), TIMES_2, data_offset));
+ }
+ break;
+ }
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ Register out = locations->Out().AsX86().AsCpuRegister();
+ if (index.IsConstant()) {
+ __ movl(out, Address(obj,
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset));
+ } else {
+ __ movl(out, Address(obj, index.AsX86().AsCpuRegister(), TIMES_4, data_offset));
+ }
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
+ X86ManagedRegister out = locations->Out().AsX86();
+ if (index.IsConstant()) {
+ size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ __ movl(out.AsRegisterPairLow(), Address(obj, offset));
+ __ movl(out.AsRegisterPairHigh(), Address(obj, offset + kX86WordSize));
+ } else {
+ __ movl(out.AsRegisterPairLow(),
+ Address(obj, index.AsX86().AsCpuRegister(), TIMES_8, data_offset));
+ __ movl(out.AsRegisterPairHigh(),
+ Address(obj, index.AsX86().AsCpuRegister(), TIMES_8, data_offset + kX86WordSize));
+ }
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ }
+}
+
+void LocationsBuilderX86::VisitArraySet(HArraySet* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ Primitive::Type value_type = instruction->InputAt(2)->GetType();
+ if (value_type == Primitive::kPrimNot) {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, X86CpuLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, X86CpuLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, X86CpuLocation(calling_convention.GetRegisterAt(2)));
+ codegen_->MarkNotLeaf();
+ } else {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ if (value_type == Primitive::kPrimBoolean || value_type == Primitive::kPrimByte) {
+ // Ensure the value is in a byte register.
+ locations->SetInAt(2, X86CpuLocation(EAX));
+ } else {
+ locations->SetInAt(2, Location::RequiresRegister());
+ }
+ }
+
+ instruction->SetLocations(locations);
+}
+
+void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).AsX86().AsCpuRegister();
+ Location index = locations->InAt(1);
+ Primitive::Type value_type = instruction->InputAt(2)->GetType();
+
+ switch (value_type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
+ ByteRegister value = locations->InAt(2).AsX86().AsByteRegister();
+ if (index.IsConstant()) {
+ size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
+ __ movb(Address(obj, offset), value);
+ } else {
+ __ movb(Address(obj, index.AsX86().AsCpuRegister(), TIMES_1, data_offset), value);
+ }
+ break;
+ }
+
+ case Primitive::kPrimShort:
+ case Primitive::kPrimChar: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
+ Register value = locations->InAt(2).AsX86().AsCpuRegister();
+ if (index.IsConstant()) {
+ size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
+ __ movw(Address(obj, offset), value);
+ } else {
+ __ movw(Address(obj, index.AsX86().AsCpuRegister(), TIMES_2, data_offset), value);
+ }
+ break;
+ }
+
+ case Primitive::kPrimInt: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ Register value = locations->InAt(2).AsX86().AsCpuRegister();
+ if (index.IsConstant()) {
+ size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ movl(Address(obj, offset), value);
+ } else {
+ __ movl(Address(obj, index.AsX86().AsCpuRegister(), TIMES_4, data_offset), value);
+ }
+ break;
+ }
+
+ case Primitive::kPrimNot: {
+ DCHECK(!codegen_->IsLeafMethod());
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAputObject)));
+ codegen_->RecordPcInfo(instruction->GetDexPc());
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
+ X86ManagedRegister value = locations->InAt(2).AsX86();
+ if (index.IsConstant()) {
+ size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ __ movl(Address(obj, offset), value.AsRegisterPairLow());
+ __ movl(Address(obj, offset + kX86WordSize), value.AsRegisterPairHigh());
+ } else {
+ __ movl(Address(obj, index.AsX86().AsCpuRegister(), TIMES_8, data_offset),
+ value.AsRegisterPairLow());
+ __ movl(Address(obj, index.AsX86().AsCpuRegister(), TIMES_8, data_offset + kX86WordSize),
+ value.AsRegisterPairHigh());
+ }
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ }
+}
+
+void LocationsBuilderX86::VisitArrayLength(HArrayLength* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+ instruction->SetLocations(locations);
+}
+
+void InstructionCodeGeneratorX86::VisitArrayLength(HArrayLength* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
+ Register obj = locations->InAt(0).AsX86().AsCpuRegister();
+ Register out = locations->Out().AsX86().AsCpuRegister();
+ __ movl(out, Address(obj, offset));
+}
+
+void LocationsBuilderX86::VisitBoundsCheck(HBoundsCheck* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ // TODO: Have a normalization phase that makes this instruction never used.
+ locations->SetOut(Location::SameAsFirstInput());
+ instruction->SetLocations(locations);
+}
+
+void InstructionCodeGeneratorX86::VisitBoundsCheck(HBoundsCheck* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ SlowPathCode* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathX86(
+ instruction->GetDexPc(), locations->InAt(0), locations->InAt(1));
+ codegen_->AddSlowPath(slow_path);
+
+ Register index = locations->InAt(0).AsX86().AsCpuRegister();
+ Register length = locations->InAt(1).AsX86().AsCpuRegister();
+
+ __ cmpl(index, length);
+ __ j(kAboveEqual, slow_path->GetEntryLabel());
+}
+
void LocationsBuilderX86::VisitTemporary(HTemporary* temp) {
temp->SetLocations(nullptr);
}
@@ -1178,6 +1515,14 @@
MoveMemoryToMemory(destination.GetStackIndex(),
source.GetStackIndex());
}
+ } else if (source.IsConstant()) {
+ HIntConstant* instruction = source.GetConstant()->AsIntConstant();
+ Immediate imm(instruction->AsIntConstant()->GetValue());
+ if (destination.IsRegister()) {
+ __ movl(destination.AsX86().AsCpuRegister(), imm);
+ } else {
+ __ movl(Address(ESP, destination.GetStackIndex()), imm);
+ }
} else {
LOG(FATAL) << "Unimplemented";
}
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index d622d2a..7c50204 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -90,7 +90,7 @@
#define DECLARE_VISIT_INSTRUCTION(name) \
virtual void Visit##name(H##name* instr);
- FOR_EACH_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
@@ -108,7 +108,7 @@
#define DECLARE_VISIT_INSTRUCTION(name) \
virtual void Visit##name(H##name* instr);
- FOR_EACH_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
@@ -126,7 +126,7 @@
class CodeGeneratorX86 : public CodeGenerator {
public:
explicit CodeGeneratorX86(HGraph* graph);
- virtual ~CodeGeneratorX86() { }
+ virtual ~CodeGeneratorX86() {}
virtual void GenerateFrameEntry() OVERRIDE;
virtual void GenerateFrameExit() OVERRIDE;
@@ -177,12 +177,15 @@
return InstructionSet::kX86;
}
- private:
// Helper method to move a 32bits value between two locations.
void Move32(Location destination, Location source);
// Helper method to move a 64bits value between two locations.
void Move64(Location destination, Location source);
+ // Emit a write barrier.
+ void MarkGCCard(Register temp, Register card, Register object, Register value);
+
+ private:
LocationsBuilderX86 location_builder_;
InstructionCodeGeneratorX86 instruction_visitor_;
ParallelMoveResolverX86 move_resolver_;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index e3ce5ce..e4259f5 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -17,11 +17,13 @@
#include "code_generator_x86_64.h"
#include "entrypoints/quick/quick_entrypoints.h"
+#include "gc/accounting/card_table.h"
#include "mirror/array.h"
#include "mirror/art_method.h"
#include "mirror/object_reference.h"
#include "thread.h"
#include "utils/assembler.h"
+#include "utils/stack_checks.h"
#include "utils/x86_64/assembler_x86_64.h"
#include "utils/x86_64/managed_register_x86_64.h"
@@ -33,6 +35,32 @@
namespace x86_64 {
+static constexpr bool kExplicitStackOverflowCheck = false;
+
+// Some x86_64 instructions require a register to be available as temp.
+static constexpr Register TMP = R11;
+
+static constexpr int kNumberOfPushedRegistersAtEntry = 1;
+static constexpr int kCurrentMethodStackOffset = 0;
+
+static Location X86_64CpuLocation(Register reg) {
+ return Location::RegisterLocation(X86_64ManagedRegister::FromCpuRegister(reg));
+}
+
+static constexpr Register kRuntimeParameterCoreRegisters[] = { RDI, RSI, RDX };
+static constexpr size_t kRuntimeParameterCoreRegistersLength =
+ arraysize(kRuntimeParameterCoreRegisters);
+
+class InvokeRuntimeCallingConvention : public CallingConvention<Register> {
+ public:
+ InvokeRuntimeCallingConvention()
+ : CallingConvention(kRuntimeParameterCoreRegisters,
+ kRuntimeParameterCoreRegistersLength) {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
+};
+
#define __ reinterpret_cast<X86_64Assembler*>(codegen->GetAssembler())->
class NullCheckSlowPathX86_64 : public SlowPathCode {
@@ -41,7 +69,8 @@
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
__ Bind(GetEntryLabel());
- __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowNullPointer), true));
+ __ gs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowNullPointer), true));
codegen->RecordPcInfo(dex_pc_);
}
@@ -50,6 +79,48 @@
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86_64);
};
+class StackOverflowCheckSlowPathX86_64 : public SlowPathCode {
+ public:
+ StackOverflowCheckSlowPathX86_64() {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ __ addq(CpuRegister(RSP),
+ Immediate(codegen->GetFrameSize() - kNumberOfPushedRegistersAtEntry * kX86_64WordSize));
+ __ gs()->jmp(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowStackOverflow), true));
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StackOverflowCheckSlowPathX86_64);
+};
+
+class BoundsCheckSlowPathX86_64 : public SlowPathCode {
+ public:
+ explicit BoundsCheckSlowPathX86_64(uint32_t dex_pc,
+ Location index_location,
+ Location length_location)
+ : dex_pc_(dex_pc), index_location_(index_location), length_location_(length_location) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorX86_64* x64_codegen = reinterpret_cast<CodeGeneratorX86_64*>(codegen);
+ __ Bind(GetEntryLabel());
+ InvokeRuntimeCallingConvention calling_convention;
+ x64_codegen->Move(X86_64CpuLocation(calling_convention.GetRegisterAt(0)), index_location_);
+ x64_codegen->Move(X86_64CpuLocation(calling_convention.GetRegisterAt(1)), length_location_);
+ __ gs()->call(Address::Absolute(
+ QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowArrayBounds), true));
+ codegen->RecordPcInfo(dex_pc_);
+ }
+
+ private:
+ const uint32_t dex_pc_;
+ const Location index_location_;
+ const Location length_location_;
+
+ DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86_64);
+};
+
#undef __
#define __ reinterpret_cast<X86_64Assembler*>(GetAssembler())->
@@ -67,12 +138,6 @@
return kEqual;
}
-// Some x86_64 instructions require a register to be available as temp.
-static constexpr Register TMP = R11;
-
-static constexpr int kNumberOfPushedRegistersAtEntry = 1;
-static constexpr int kCurrentMethodStackOffset = 0;
-
void CodeGeneratorX86_64::DumpCoreRegister(std::ostream& stream, int reg) const {
stream << X86_64ManagedRegister::FromCpuRegister(Register(reg));
}
@@ -81,10 +146,6 @@
stream << X86_64ManagedRegister::FromXmmRegister(FloatRegister(reg));
}
-static Location X86_64CpuLocation(Register reg) {
- return Location::RegisterLocation(X86_64ManagedRegister::FromCpuRegister(reg));
-}
-
CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph)
: CodeGenerator(graph, kNumberOfRegIds),
location_builder_(graph, this),
@@ -95,7 +156,8 @@
return kNumberOfPushedRegistersAtEntry * kX86_64WordSize;
}
-InstructionCodeGeneratorX86_64::InstructionCodeGeneratorX86_64(HGraph* graph, CodeGeneratorX86_64* codegen)
+InstructionCodeGeneratorX86_64::InstructionCodeGeneratorX86_64(HGraph* graph,
+ CodeGeneratorX86_64* codegen)
: HGraphVisitor(graph),
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
@@ -146,8 +208,28 @@
static const int kFakeReturnRegister = 16;
core_spill_mask_ |= (1 << kFakeReturnRegister);
+ bool skip_overflow_check = IsLeafMethod()
+ && !IsLargeFrame(GetFrameSize(), InstructionSet::kX86_64);
+
+ if (!skip_overflow_check && !kExplicitStackOverflowCheck) {
+ __ testq(CpuRegister(RAX), Address(
+ CpuRegister(RSP), -static_cast<int32_t>(GetStackOverflowReservedBytes(kX86_64))));
+ RecordPcInfo(0);
+ }
+
// The return PC has already been pushed on the stack.
- __ subq(CpuRegister(RSP), Immediate(GetFrameSize() - kNumberOfPushedRegistersAtEntry * kX86_64WordSize));
+ __ subq(CpuRegister(RSP),
+ Immediate(GetFrameSize() - kNumberOfPushedRegistersAtEntry * kX86_64WordSize));
+
+ if (!skip_overflow_check && kExplicitStackOverflowCheck) {
+ SlowPathCode* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathX86_64();
+ AddSlowPath(slow_path);
+
+ __ gs()->cmpq(CpuRegister(RSP),
+ Address::Absolute(Thread::StackEndOffset<kX86_64WordSize>(), true));
+ __ j(kLess, slow_path->GetEntryLabel());
+ }
+
__ movl(Address(CpuRegister(RSP), kCurrentMethodStackOffset), CpuRegister(RDI));
}
@@ -329,7 +411,14 @@
} else {
Location lhs = condition->GetLocations()->InAt(0);
Location rhs = condition->GetLocations()->InAt(1);
- __ cmpl(lhs.AsX86_64().AsCpuRegister(), rhs.AsX86_64().AsCpuRegister());
+ if (rhs.IsRegister()) {
+ __ cmpl(lhs.AsX86_64().AsCpuRegister(), rhs.AsX86_64().AsCpuRegister());
+ } else if (rhs.IsConstant()) {
+ __ cmpl(lhs.AsX86_64().AsCpuRegister(),
+ Immediate(rhs.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ __ cmpl(lhs.AsX86_64().AsCpuRegister(), Address(CpuRegister(RSP), rhs.GetStackIndex()));
+ }
__ j(X86_64Condition(condition->GetCondition()),
codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
}
@@ -382,7 +471,7 @@
void LocationsBuilderX86_64::VisitCondition(HCondition* comp) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(comp);
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
if (comp->NeedsMaterialization()) {
locations->SetOut(Location::RequiresRegister());
}
@@ -391,8 +480,17 @@
void InstructionCodeGeneratorX86_64::VisitCondition(HCondition* comp) {
if (comp->NeedsMaterialization()) {
- __ cmpq(comp->GetLocations()->InAt(0).AsX86_64().AsCpuRegister(),
- comp->GetLocations()->InAt(1).AsX86_64().AsCpuRegister());
+ LocationSummary* locations = comp->GetLocations();
+ if (locations->InAt(1).IsRegister()) {
+ __ cmpq(locations->InAt(0).AsX86_64().AsCpuRegister(),
+ locations->InAt(1).AsX86_64().AsCpuRegister());
+ } else if (locations->InAt(1).IsConstant()) {
+ __ cmpq(locations->InAt(0).AsX86_64().AsCpuRegister(),
+ Immediate(locations->InAt(1).GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ __ cmpq(locations->InAt(0).AsX86_64().AsCpuRegister(),
+ Address(CpuRegister(RSP), locations->InAt(1).GetStackIndex()));
+ }
__ setcc(X86_64Condition(comp->GetCondition()),
comp->GetLocations()->Out().AsX86_64().AsCpuRegister());
}
@@ -480,25 +578,21 @@
}
void LocationsBuilderX86_64::VisitIntConstant(HIntConstant* constant) {
- // TODO: Support constant locations.
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
- locations->SetOut(Location::RequiresRegister());
+ locations->SetOut(Location::ConstantLocation(constant));
constant->SetLocations(locations);
}
void InstructionCodeGeneratorX86_64::VisitIntConstant(HIntConstant* constant) {
- codegen_->Move(constant, constant->GetLocations()->Out(), nullptr);
}
void LocationsBuilderX86_64::VisitLongConstant(HLongConstant* constant) {
- // TODO: Support constant locations.
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
- locations->SetOut(Location::RequiresRegister());
+ locations->SetOut(Location::ConstantLocation(constant));
constant->SetLocations(locations);
}
void InstructionCodeGeneratorX86_64::VisitLongConstant(HLongConstant* constant) {
- codegen_->Move(constant, constant->GetLocations()->Out(), nullptr);
}
void LocationsBuilderX86_64::VisitReturnVoid(HReturnVoid* ret) {
@@ -550,20 +644,6 @@
__ ret();
}
-static constexpr Register kRuntimeParameterCoreRegisters[] = { RDI, RSI, RDX };
-static constexpr size_t kRuntimeParameterCoreRegistersLength =
- arraysize(kRuntimeParameterCoreRegisters);
-
-class InvokeRuntimeCallingConvention : public CallingConvention<Register> {
- public:
- InvokeRuntimeCallingConvention()
- : CallingConvention(kRuntimeParameterCoreRegisters,
- kRuntimeParameterCoreRegistersLength) {}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
-};
-
Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) {
switch (type) {
case Primitive::kPrimBoolean:
@@ -606,6 +686,7 @@
}
void LocationsBuilderX86_64::VisitInvokeStatic(HInvokeStatic* invoke) {
+ codegen_->MarkNotLeaf();
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(invoke);
locations->AddTemp(X86_64CpuLocation(RDI));
@@ -660,13 +741,19 @@
// (temp + offset_of_quick_compiled_code)()
__ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
+ DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke->GetDexPc());
}
void LocationsBuilderX86_64::VisitAdd(HAdd* add) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(add);
switch (add->GetResultType()) {
- case Primitive::kPrimInt:
+ case Primitive::kPrimInt: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->SetOut(Location::SameAsFirstInput());
+ break;
+ }
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -693,8 +780,17 @@
locations->Out().AsX86_64().AsCpuRegister().AsRegister());
switch (add->GetResultType()) {
case Primitive::kPrimInt: {
- __ addl(locations->InAt(0).AsX86_64().AsCpuRegister(),
- locations->InAt(1).AsX86_64().AsCpuRegister());
+ if (locations->InAt(1).IsRegister()) {
+ __ addl(locations->InAt(0).AsX86_64().AsCpuRegister(),
+ locations->InAt(1).AsX86_64().AsCpuRegister());
+ } else if (locations->InAt(1).IsConstant()) {
+ HConstant* instruction = locations->InAt(1).GetConstant();
+ Immediate imm(instruction->AsIntConstant()->GetValue());
+ __ addl(locations->InAt(0).AsX86_64().AsCpuRegister(), imm);
+ } else {
+ __ addl(locations->InAt(0).AsX86_64().AsCpuRegister(),
+ Address(CpuRegister(RSP), locations->InAt(1).GetStackIndex()));
+ }
break;
}
case Primitive::kPrimLong: {
@@ -718,7 +814,12 @@
void LocationsBuilderX86_64::VisitSub(HSub* sub) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(sub);
switch (sub->GetResultType()) {
- case Primitive::kPrimInt:
+ case Primitive::kPrimInt: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->SetOut(Location::SameAsFirstInput());
+ break;
+ }
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -745,8 +846,17 @@
locations->Out().AsX86_64().AsCpuRegister().AsRegister());
switch (sub->GetResultType()) {
case Primitive::kPrimInt: {
- __ subl(locations->InAt(0).AsX86_64().AsCpuRegister(),
- locations->InAt(1).AsX86_64().AsCpuRegister());
+ if (locations->InAt(1).IsRegister()) {
+ __ subl(locations->InAt(0).AsX86_64().AsCpuRegister(),
+ locations->InAt(1).AsX86_64().AsCpuRegister());
+ } else if (locations->InAt(1).IsConstant()) {
+ HConstant* instruction = locations->InAt(1).GetConstant();
+ Immediate imm(instruction->AsIntConstant()->GetValue());
+ __ subl(locations->InAt(0).AsX86_64().AsCpuRegister(), imm);
+ } else {
+ __ subl(locations->InAt(0).AsX86_64().AsCpuRegister(),
+ Address(CpuRegister(RSP), locations->InAt(1).GetStackIndex()));
+ }
break;
}
case Primitive::kPrimLong: {
@@ -768,6 +878,7 @@
}
void LocationsBuilderX86_64::VisitNewInstance(HNewInstance* instruction) {
+ codegen_->MarkNotLeaf();
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
locations->SetOut(X86_64CpuLocation(RAX));
instruction->SetLocations(locations);
@@ -781,6 +892,7 @@
__ gs()->call(Address::Absolute(
QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocObjectWithAccessCheck), true));
+ DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(instruction->GetDexPc());
}
@@ -831,6 +943,11 @@
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
+ // Temporary registers for the write barrier.
+ if (instruction->InputAt(1)->GetType() == Primitive::kPrimNot) {
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ }
instruction->SetLocations(locations);
}
@@ -857,6 +974,11 @@
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
__ movl(Address(obj, offset), value);
+ if (field_type == Primitive::kPrimNot) {
+ CpuRegister temp = locations->GetTemp(0).AsX86_64().AsCpuRegister();
+ CpuRegister card = locations->GetTemp(1).AsX86_64().AsCpuRegister();
+ codegen_->MarkGCCard(temp, card, obj, value);
+ }
break;
}
@@ -954,6 +1076,245 @@
__ j(kEqual, slow_path->GetEntryLabel());
}
+void LocationsBuilderX86_64::VisitArrayGet(HArrayGet* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister());
+ instruction->SetLocations(locations);
+}
+
+void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ CpuRegister obj = locations->InAt(0).AsX86_64().AsCpuRegister();
+ Location index = locations->InAt(1);
+
+ switch (instruction->GetType()) {
+ case Primitive::kPrimBoolean: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
+ CpuRegister out = locations->Out().AsX86_64().AsCpuRegister();
+ if (index.IsConstant()) {
+ __ movzxb(out, Address(obj,
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
+ } else {
+ __ movzxb(out, Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_1, data_offset));
+ }
+ break;
+ }
+
+ case Primitive::kPrimByte: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
+ CpuRegister out = locations->Out().AsX86_64().AsCpuRegister();
+ if (index.IsConstant()) {
+ __ movsxb(out, Address(obj,
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
+ } else {
+ __ movsxb(out, Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_1, data_offset));
+ }
+ break;
+ }
+
+ case Primitive::kPrimShort: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
+ CpuRegister out = locations->Out().AsX86_64().AsCpuRegister();
+ if (index.IsConstant()) {
+ __ movsxw(out, Address(obj,
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
+ } else {
+ __ movsxw(out, Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_2, data_offset));
+ }
+ break;
+ }
+
+ case Primitive::kPrimChar: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
+ CpuRegister out = locations->Out().AsX86_64().AsCpuRegister();
+ if (index.IsConstant()) {
+ __ movzxw(out, Address(obj,
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
+ } else {
+ __ movzxw(out, Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_2, data_offset));
+ }
+ break;
+ }
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ CpuRegister out = locations->Out().AsX86_64().AsCpuRegister();
+ if (index.IsConstant()) {
+ __ movl(out, Address(obj,
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset));
+ } else {
+ __ movl(out, Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_4, data_offset));
+ }
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
+ CpuRegister out = locations->Out().AsX86_64().AsCpuRegister();
+ if (index.IsConstant()) {
+ __ movq(out, Address(obj,
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset));
+ } else {
+ __ movq(out, Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_8, data_offset));
+ }
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ }
+}
+
+void LocationsBuilderX86_64::VisitArraySet(HArraySet* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ Primitive::Type value_type = instruction->InputAt(2)->GetType();
+ if (value_type == Primitive::kPrimNot) {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, X86_64CpuLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, X86_64CpuLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, X86_64CpuLocation(calling_convention.GetRegisterAt(2)));
+ codegen_->MarkNotLeaf();
+ } else {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ locations->SetInAt(2, Location::RequiresRegister());
+ }
+ instruction->SetLocations(locations);
+}
+
+void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ CpuRegister obj = locations->InAt(0).AsX86_64().AsCpuRegister();
+ Location index = locations->InAt(1);
+ Primitive::Type value_type = instruction->InputAt(2)->GetType();
+
+ switch (value_type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
+ CpuRegister value = locations->InAt(2).AsX86_64().AsCpuRegister();
+ if (index.IsConstant()) {
+ size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
+ __ movb(Address(obj, offset), value);
+ } else {
+ __ movb(Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_1, data_offset), value);
+ }
+ break;
+ }
+
+ case Primitive::kPrimShort:
+ case Primitive::kPrimChar: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
+ CpuRegister value = locations->InAt(2).AsX86_64().AsCpuRegister();
+ if (index.IsConstant()) {
+ size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
+ __ movw(Address(obj, offset), value);
+ } else {
+ __ movw(Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_2, data_offset), value);
+ }
+ break;
+ }
+
+ case Primitive::kPrimInt: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ CpuRegister value = locations->InAt(2).AsX86_64().AsCpuRegister();
+ if (index.IsConstant()) {
+ size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ movl(Address(obj, offset), value);
+ } else {
+ __ movl(Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_4, data_offset), value);
+ }
+ break;
+ }
+
+ case Primitive::kPrimNot: {
+ __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAputObject), true));
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(instruction->GetDexPc());
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
+ CpuRegister value = locations->InAt(2).AsX86_64().AsCpuRegister();
+ if (index.IsConstant()) {
+ size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ __ movq(Address(obj, offset), value);
+ } else {
+ __ movq(Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_8, data_offset), value);
+ }
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ }
+}
+
+void LocationsBuilderX86_64::VisitArrayLength(HArrayLength* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+ instruction->SetLocations(locations);
+}
+
+void InstructionCodeGeneratorX86_64::VisitArrayLength(HArrayLength* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
+ CpuRegister obj = locations->InAt(0).AsX86_64().AsCpuRegister();
+ CpuRegister out = locations->Out().AsX86_64().AsCpuRegister();
+ __ movl(out, Address(obj, offset));
+}
+
+void LocationsBuilderX86_64::VisitBoundsCheck(HBoundsCheck* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ // TODO: Have a normalization phase that makes this instruction never used.
+ locations->SetOut(Location::SameAsFirstInput());
+ instruction->SetLocations(locations);
+}
+
+void InstructionCodeGeneratorX86_64::VisitBoundsCheck(HBoundsCheck* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ SlowPathCode* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathX86_64(
+ instruction->GetDexPc(), locations->InAt(0), locations->InAt(1));
+ codegen_->AddSlowPath(slow_path);
+
+ CpuRegister index = locations->InAt(0).AsX86_64().AsCpuRegister();
+ CpuRegister length = locations->InAt(1).AsX86_64().AsCpuRegister();
+
+ __ cmpl(index, length);
+ __ j(kAboveEqual, slow_path->GetEntryLabel());
+}
+
+void CodeGeneratorX86_64::MarkGCCard(CpuRegister temp,
+ CpuRegister card,
+ CpuRegister object,
+ CpuRegister value) {
+ Label is_null;
+ __ testl(value, value);
+ __ j(kEqual, &is_null);
+ __ gs()->movq(card, Address::Absolute(
+ Thread::CardTableOffset<kX86_64WordSize>().Int32Value(), true));
+ __ movq(temp, object);
+ __ shrq(temp, Immediate(gc::accounting::CardTable::kCardShift));
+ __ movb(Address(temp, card, TIMES_1, 0), card);
+ __ Bind(&is_null);
+}
+
void LocationsBuilderX86_64::VisitTemporary(HTemporary* temp) {
temp->SetLocations(nullptr);
}
@@ -1008,6 +1369,26 @@
__ movq(CpuRegister(TMP), Address(CpuRegister(RSP), source.GetStackIndex()));
__ movq(Address(CpuRegister(RSP), destination.GetStackIndex()), CpuRegister(TMP));
}
+ } else if (source.IsConstant()) {
+ HConstant* constant = source.GetConstant();
+ if (constant->IsIntConstant()) {
+ Immediate imm(constant->AsIntConstant()->GetValue());
+ if (destination.IsRegister()) {
+ __ movl(destination.AsX86_64().AsCpuRegister(), imm);
+ } else {
+ __ movl(Address(CpuRegister(RSP), destination.GetStackIndex()), imm);
+ }
+ } else if (constant->IsLongConstant()) {
+ int64_t value = constant->AsLongConstant()->GetValue();
+ if (destination.IsRegister()) {
+ __ movq(destination.AsX86_64().AsCpuRegister(), Immediate(value));
+ } else {
+ __ movq(CpuRegister(TMP), Immediate(value));
+ __ movq(Address(CpuRegister(RSP), destination.GetStackIndex()), CpuRegister(TMP));
+ }
+ } else {
+ LOG(FATAL) << "Unimplemented constant type";
+ }
} else {
LOG(FATAL) << "Unimplemented";
}
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 8283dda..44552ea 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -87,7 +87,7 @@
#define DECLARE_VISIT_INSTRUCTION(name) \
virtual void Visit##name(H##name* instr);
- FOR_EACH_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
@@ -105,7 +105,7 @@
#define DECLARE_VISIT_INSTRUCTION(name) \
virtual void Visit##name(H##name* instr);
- FOR_EACH_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
@@ -176,10 +176,13 @@
return InstructionSet::kX86_64;
}
- private:
+ // Emit a write barrier.
+ void MarkGCCard(CpuRegister temp, CpuRegister card, CpuRegister object, CpuRegister value);
+
// Helper method to move a value between two locations.
void Move(Location destination, Location source);
+ private:
LocationsBuilderX86_64 location_builder_;
InstructionCodeGeneratorX86_64 instruction_visitor_;
ParallelMoveResolverX86_64 move_resolver_;
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 7ec0c84..d7ac10d 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -48,10 +48,17 @@
};
#if defined(__i386__) || defined(__arm__) || defined(__x86_64__)
-static void Run(const InternalCodeAllocator& allocator, bool has_result, int32_t expected) {
+static void Run(const InternalCodeAllocator& allocator,
+ const CodeGenerator& codegen,
+ bool has_result,
+ int32_t expected) {
typedef int32_t (*fptr)();
CommonCompilerTest::MakeExecutable(allocator.GetMemory(), allocator.GetSize());
fptr f = reinterpret_cast<fptr>(allocator.GetMemory());
+ if (codegen.GetInstructionSet() == kThumb2) {
+ // For thumb we need the bottom bit set.
+ f = reinterpret_cast<fptr>(reinterpret_cast<uintptr_t>(f) + 1);
+ }
int32_t result = f();
if (has_result) {
CHECK_EQ(result, expected);
@@ -69,21 +76,23 @@
InternalCodeAllocator allocator;
CodeGenerator* codegen = CodeGenerator::Create(&arena, graph, kX86);
- codegen->CompileBaseline(&allocator);
+ // We avoid doing a stack overflow check that requires the runtime being setup,
+ // by making sure the compiler knows the methods we are running are leaf methods.
+ codegen->CompileBaseline(&allocator, true);
#if defined(__i386__)
- Run(allocator, has_result, expected);
+ Run(allocator, *codegen, has_result, expected);
#endif
codegen = CodeGenerator::Create(&arena, graph, kArm);
- codegen->CompileBaseline(&allocator);
+ codegen->CompileBaseline(&allocator, true);
#if defined(__arm__)
- Run(allocator, has_result, expected);
+ Run(allocator, *codegen, has_result, expected);
#endif
codegen = CodeGenerator::Create(&arena, graph, kX86_64);
- codegen->CompileBaseline(&allocator);
+ codegen->CompileBaseline(&allocator, true);
#if defined(__x86_64__)
- Run(allocator, has_result, expected);
+ Run(allocator, *codegen, has_result, expected);
#endif
}
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index f033e2e..f011e85 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -108,6 +108,10 @@
} else {
codegen_.DumpCoreRegister(output_, location.reg().RegId());
}
+ } else if (location.IsConstant()) {
+ output_ << "constant";
+ } else if (location.IsInvalid()) {
+ output_ << "invalid";
} else if (location.IsStackSlot()) {
output_ << location.GetStackIndex() << "(sp)";
} else {
diff --git a/compiler/optimizing/locations.cc b/compiler/optimizing/locations.cc
index 98766d2..468cfb7 100644
--- a/compiler/optimizing/locations.cc
+++ b/compiler/optimizing/locations.cc
@@ -29,4 +29,11 @@
}
}
+
+Location Location::RegisterOrConstant(HInstruction* instruction) {
+ return instruction->IsConstant()
+ ? Location::ConstantLocation(instruction->AsConstant())
+ : Location::RequiresRegister();
+}
+
} // namespace art
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 40a39ad..aaddb09 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -24,6 +24,7 @@
namespace art {
+class HConstant;
class HInstruction;
/**
@@ -34,23 +35,33 @@
public:
enum Kind {
kInvalid = 0,
- kStackSlot = 1, // Word size slot.
- kDoubleStackSlot = 2, // 64bit stack slot.
- kRegister = 3,
+ kConstant = 1,
+ kStackSlot = 2, // Word size slot.
+ kDoubleStackSlot = 3, // 64bit stack slot.
+ kRegister = 4,
// On 32bits architectures, quick can pass a long where the
// low bits are in the last parameter register, and the high
// bits are in a stack slot. The kQuickParameter kind is for
// handling this special case.
- kQuickParameter = 4,
+ kQuickParameter = 5,
// Unallocated location represents a location that is not fixed and can be
// allocated by a register allocator. Each unallocated location has
// a policy that specifies what kind of location is suitable. Payload
// contains register allocation policy.
- kUnallocated = 5,
+ kUnallocated = 6,
};
Location() : value_(kInvalid) {
+ // Verify that non-tagged location kinds do not interfere with kConstantTag.
+ COMPILE_ASSERT((kInvalid & kLocationTagMask) != kConstant, TagError);
+ COMPILE_ASSERT((kUnallocated & kLocationTagMask) != kConstant, TagError);
+ COMPILE_ASSERT((kStackSlot & kLocationTagMask) != kConstant, TagError);
+ COMPILE_ASSERT((kDoubleStackSlot & kLocationTagMask) != kConstant, TagError);
+ COMPILE_ASSERT((kRegister & kLocationTagMask) != kConstant, TagError);
+ COMPILE_ASSERT((kConstant & kLocationTagMask) == kConstant, TagError);
+ COMPILE_ASSERT((kQuickParameter & kLocationTagMask) == kConstant, TagError);
+
DCHECK(!IsValid());
}
@@ -61,6 +72,20 @@
return *this;
}
+ bool IsConstant() const {
+ return (value_ & kLocationTagMask) == kConstant;
+ }
+
+ static Location ConstantLocation(HConstant* constant) {
+ DCHECK(constant != nullptr);
+ return Location(kConstant | reinterpret_cast<uword>(constant));
+ }
+
+ HConstant* GetConstant() const {
+ DCHECK(IsConstant());
+ return reinterpret_cast<HConstant*>(value_ & ~kLocationTagMask);
+ }
+
bool IsValid() const {
return value_ != kInvalid;
}
@@ -69,11 +94,6 @@
return !IsValid();
}
- bool IsConstant() const {
- // TODO: support constants.
- return false;
- }
-
// Empty location. Used if there the location should be ignored.
static Location NoLocation() {
return Location();
@@ -162,12 +182,13 @@
const char* DebugString() const {
switch (GetKind()) {
- case kInvalid: return "?";
+ case kInvalid: return "I";
case kRegister: return "R";
case kStackSlot: return "S";
case kDoubleStackSlot: return "DS";
case kQuickParameter: return "Q";
case kUnallocated: return "U";
+ case kConstant: return "C";
}
return "?";
}
@@ -196,6 +217,8 @@
return UnallocatedLocation(kRequiresRegister);
}
+ static Location RegisterOrConstant(HInstruction* instruction);
+
// The location of the first input to the instruction will be
// used to replace this unallocated location.
static Location SameAsFirstInput() {
@@ -215,6 +238,7 @@
// Number of bits required to encode Kind value.
static constexpr uint32_t kBitsForKind = 4;
static constexpr uint32_t kBitsForPayload = kWordSize * kBitsPerByte - kBitsForKind;
+ static constexpr uword kLocationTagMask = 0x3;
explicit Location(uword value) : value_(value) {}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index e87b044..bb699e4 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -408,7 +408,7 @@
DISALLOW_COPY_AND_ASSIGN(HBasicBlock);
};
-#define FOR_EACH_INSTRUCTION(M) \
+#define FOR_EACH_CONCRETE_INSTRUCTION(M) \
M(Add) \
M(Condition) \
M(Equal) \
@@ -437,9 +437,16 @@
M(Compare) \
M(InstanceFieldGet) \
M(InstanceFieldSet) \
+ M(ArrayGet) \
+ M(ArraySet) \
+ M(ArrayLength) \
+ M(BoundsCheck) \
M(NullCheck) \
M(Temporary) \
+#define FOR_EACH_INSTRUCTION(M) \
+ FOR_EACH_CONCRETE_INSTRUCTION(M) \
+ M(Constant)
#define FORWARD_DECLARATION(type) class H##type;
FOR_EACH_INSTRUCTION(FORWARD_DECLARATION)
@@ -494,6 +501,7 @@
void SetBlock(HBasicBlock* block) { block_ = block; }
bool IsInBlock() const { return block_ != nullptr; }
bool IsInLoop() const { return block_->IsInLoop(); }
+ bool IsLoopHeaderPhi() { return IsPhi() && block_->IsLoopHeader(); }
virtual size_t InputCount() const = 0;
virtual HInstruction* InputAt(size_t i) const = 0;
@@ -1078,11 +1086,21 @@
DISALLOW_COPY_AND_ASSIGN(HStoreLocal);
};
+class HConstant : public HExpression<0> {
+ public:
+ explicit HConstant(Primitive::Type type) : HExpression(type) {}
+
+ DECLARE_INSTRUCTION(Constant);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HConstant);
+};
+
// Constants of the type int. Those can be from Dex instructions, or
// synthesized (for example with the if-eqz instruction).
-class HIntConstant : public HExpression<0> {
+class HIntConstant : public HConstant {
public:
- explicit HIntConstant(int32_t value) : HExpression(Primitive::kPrimInt), value_(value) {}
+ explicit HIntConstant(int32_t value) : HConstant(Primitive::kPrimInt), value_(value) {}
int32_t GetValue() const { return value_; }
@@ -1094,14 +1112,12 @@
DISALLOW_COPY_AND_ASSIGN(HIntConstant);
};
-class HLongConstant : public HExpression<0> {
+class HLongConstant : public HConstant {
public:
- explicit HLongConstant(int64_t value) : HExpression(Primitive::kPrimLong), value_(value) {}
+ explicit HLongConstant(int64_t value) : HConstant(Primitive::kPrimLong), value_(value) {}
int64_t GetValue() const { return value_; }
- virtual Primitive::Type GetType() const { return Primitive::kPrimLong; }
-
DECLARE_INSTRUCTION(LongConstant);
private:
@@ -1278,13 +1294,12 @@
DECLARE_INSTRUCTION(Phi);
- protected:
+ private:
GrowableArray<HInstruction*> inputs_;
const uint32_t reg_number_;
Primitive::Type type_;
bool is_live_;
- private:
DISALLOW_COPY_AND_ASSIGN(HPhi);
};
@@ -1357,6 +1372,80 @@
DISALLOW_COPY_AND_ASSIGN(HInstanceFieldSet);
};
+class HArrayGet : public HExpression<2> {
+ public:
+ HArrayGet(HInstruction* array, HInstruction* index, Primitive::Type type)
+ : HExpression(type) {
+ SetRawInputAt(0, array);
+ SetRawInputAt(1, index);
+ }
+
+ DECLARE_INSTRUCTION(ArrayGet);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HArrayGet);
+};
+
+class HArraySet : public HTemplateInstruction<3> {
+ public:
+ HArraySet(HInstruction* array,
+ HInstruction* index,
+ HInstruction* value,
+ uint32_t dex_pc) : dex_pc_(dex_pc) {
+ SetRawInputAt(0, array);
+ SetRawInputAt(1, index);
+ SetRawInputAt(2, value);
+ }
+
+ virtual bool NeedsEnvironment() const {
+ // We currently always call a runtime method to catch array store
+ // exceptions.
+ return InputAt(2)->GetType() == Primitive::kPrimNot;
+ }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+
+ DECLARE_INSTRUCTION(ArraySet);
+
+ private:
+ const uint32_t dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(HArraySet);
+};
+
+class HArrayLength : public HExpression<1> {
+ public:
+ explicit HArrayLength(HInstruction* array) : HExpression(Primitive::kPrimInt) {
+ SetRawInputAt(0, array);
+ }
+
+ DECLARE_INSTRUCTION(ArrayLength);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HArrayLength);
+};
+
+class HBoundsCheck : public HExpression<2> {
+ public:
+ HBoundsCheck(HInstruction* index, HInstruction* length, uint32_t dex_pc)
+ : HExpression(index->GetType()), dex_pc_(dex_pc) {
+ DCHECK(index->GetType() == Primitive::kPrimInt);
+ SetRawInputAt(0, index);
+ SetRawInputAt(1, length);
+ }
+
+ virtual bool NeedsEnvironment() const { return true; }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+
+ DECLARE_INSTRUCTION(BoundsCheck);
+
+ private:
+ const uint32_t dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(HBoundsCheck);
+};
+
/**
* Some DEX instructions are folded into multiple HInstructions that need
* to stay live until the last HInstruction. This class
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index b621e51..8a5077b 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -79,13 +79,14 @@
jobject class_loader,
const DexFile& dex_file) const {
InstructionSet instruction_set = GetCompilerDriver()->GetInstructionSet();
- // The optimizing compiler currently does not have a Thumb2 assembler.
- if (instruction_set == kThumb2) {
- instruction_set = kArm;
+ // Always use the thumb2 assembler: some runtime functionality (like implicit stack
+ // overflow checks) assume thumb2.
+ if (instruction_set == kArm) {
+ instruction_set = kThumb2;
}
// Do not attempt to compile on architectures we do not support.
- if (instruction_set != kX86 && instruction_set != kX86_64 && instruction_set != kArm) {
+ if (instruction_set != kX86 && instruction_set != kX86_64 && instruction_set != kThumb2) {
return nullptr;
}
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 68130dd..bd3a7d9 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -153,13 +153,13 @@
if (current->HasRegister()) {
DCHECK(instruction->IsParameterValue());
inactive_.Add(current);
- } else if (current->HasSpillSlot()) {
- DCHECK(instruction->IsParameterValue());
+ } else if (current->HasSpillSlot() || instruction->IsConstant()) {
// Split before first register use.
size_t first_register_use = current->FirstRegisterUse();
if (first_register_use != kNoLifetime) {
LiveInterval* split = Split(current, first_register_use - 1);
- // The new interval may start at a late
+ // Don't add direclty to `unhandled_`, it needs to be sorted and the start
+ // of this new interval might be after intervals already in the list.
AddToUnhandled(split);
} else {
// Nothing to do, we won't allocate a register for this value.
@@ -579,6 +579,11 @@
return;
}
+ if (defined_by->IsConstant()) {
+ // Constants don't need a spill slot.
+ return;
+ }
+
LiveInterval* last_sibling = interval;
while (last_sibling->GetNextSibling() != nullptr) {
last_sibling = last_sibling->GetNextSibling();
@@ -644,11 +649,16 @@
if (interval->HasRegister()) {
return Location::RegisterLocation(ManagedRegister(interval->GetRegister()));
} else {
- DCHECK(interval->GetParent()->HasSpillSlot());
- if (NeedTwoSpillSlot(interval->GetType())) {
- return Location::DoubleStackSlot(interval->GetParent()->GetSpillSlot());
+ HInstruction* defined_by = interval->GetParent()->GetDefinedBy();
+ if (defined_by->IsConstant()) {
+ return defined_by->GetLocations()->Out();
} else {
- return Location::StackSlot(interval->GetParent()->GetSpillSlot());
+ DCHECK(interval->GetParent()->HasSpillSlot());
+ if (NeedTwoSpillSlot(interval->GetType())) {
+ return Location::DoubleStackSlot(interval->GetParent()->GetSpillSlot());
+ } else {
+ return Location::StackSlot(interval->GetParent()->GetSpillSlot());
+ }
}
}
}
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index e35ff56..be1c7ec 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -66,7 +66,10 @@
static bool CanAllocateRegistersFor(const HGraph& graph, InstructionSet instruction_set);
static bool Supports(InstructionSet instruction_set) {
- return instruction_set == kX86 || instruction_set == kArm || instruction_set == kX86_64;
+ return instruction_set == kX86
+ || instruction_set == kArm
+ || instruction_set == kX86_64
+ || instruction_set == kThumb2;
}
size_t GetNumberOfSpillSlots() const {
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index a7283ab..bafe577 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -22,6 +22,7 @@
#include "optimizing_unit_test.h"
#include "register_allocator.h"
#include "ssa_liveness_analysis.h"
+#include "ssa_phi_elimination.h"
#include "utils/arena_allocator.h"
#include "gtest/gtest.h"
@@ -356,4 +357,38 @@
ASSERT_EQ(new_interval->FirstRegisterUse(), last_add->GetLifetimePosition() + 1);
}
+TEST(RegisterAllocatorTest, DeadPhi) {
+ /* Test for a dead loop phi taking as back-edge input a phi that also has
+ * this loop phi as input. Walking backwards in SsaDeadPhiElimination
+ * does not solve the problem because the loop phi will be visited last.
+ *
+ * Test the following snippet:
+ * int a = 0
+ * do {
+ * if (true) {
+ * a = 2;
+ * }
+ * } while (true);
+ */
+
+ const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::CONST_4 | 1 << 8 | 0,
+ Instruction::IF_NE | 1 << 8 | 1 << 12, 3,
+ Instruction::CONST_4 | 2 << 12 | 0 << 8,
+ Instruction::GOTO | 0xFD00,
+ Instruction::RETURN_VOID);
+
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HGraph* graph = BuildSSAGraph(data, &allocator);
+ SsaDeadPhiElimination(graph).Run();
+ CodeGenerator* codegen = CodeGenerator::Create(&allocator, graph, kX86);
+ SsaLivenessAnalysis liveness(*graph, codegen);
+ liveness.Analyze();
+ RegisterAllocator register_allocator(&allocator, codegen, liveness);
+ register_allocator.AllocateRegisters();
+ ASSERT_TRUE(register_allocator.Validate(false));
+}
+
} // namespace art
diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc
index 13fa03f..a079954 100644
--- a/compiler/optimizing/ssa_phi_elimination.cc
+++ b/compiler/optimizing/ssa_phi_elimination.cc
@@ -53,8 +53,9 @@
}
}
- // Remove phis that are not live. Visit in post order to ensure
- // we only remove phis with no users (dead phis might use dead phis).
+ // Remove phis that are not live. Visit in post order so that phis
+ // that are not inputs of loop phis can be removed when they have
+ // no users left (dead phis might use dead phis).
for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
HInstruction* current = block->GetFirstPhi();
@@ -62,6 +63,17 @@
while (current != nullptr) {
next = current->GetNext();
if (current->AsPhi()->IsDead()) {
+ if (current->HasUses()) {
+ for (HUseIterator<HInstruction> it(current->GetUses()); !it.Done(); it.Advance()) {
+ HUseListNode<HInstruction>* user_node = it.Current();
+ HInstruction* user = user_node->GetUser();
+ DCHECK(user->IsLoopHeaderPhi());
+ DCHECK(user->AsPhi()->IsDead());
+ // Just put itself as an input. The phi will be removed in this loop anyway.
+ user->SetRawInputAt(user_node->GetIndex(), user);
+ current->RemoveUser(user, user_node->GetIndex());
+ }
+ }
block->RemovePhi(current->AsPhi());
}
current = next;
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
new file mode 100644
index 0000000..5e1329e
--- /dev/null
+++ b/compiler/optimizing/stack_map_stream.h
@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_STACK_MAP_STREAM_H_
+#define ART_COMPILER_OPTIMIZING_STACK_MAP_STREAM_H_
+
+#include "base/bit_vector.h"
+#include "memory_region.h"
+#include "stack_map.h"
+#include "utils/allocation.h"
+#include "utils/growable_array.h"
+
+namespace art {
+
+/**
+ * Collects and builds a CodeInfo for a method.
+ */
+template<typename T>
+class StackMapStream : public ValueObject {
+ public:
+ explicit StackMapStream(ArenaAllocator* allocator)
+ : stack_maps_(allocator, 10),
+ dex_register_maps_(allocator, 10 * 4),
+ inline_infos_(allocator, 2),
+ stack_mask_max_(-1),
+ number_of_stack_maps_with_inline_info_(0) {}
+
+ // Compute bytes needed to encode a mask with the given maximum element.
+ static uint32_t StackMaskEncodingSize(int max_element) {
+ int number_of_bits = max_element + 1; // Need room for max element too.
+ return RoundUp(number_of_bits, kBitsPerByte) / kBitsPerByte;
+ }
+
+ // See runtime/stack_map.h to know what these fields contain.
+ struct StackMapEntry {
+ uint32_t dex_pc;
+ T native_pc;
+ uint32_t register_mask;
+ BitVector* sp_mask;
+ uint32_t num_dex_registers;
+ uint8_t inlining_depth;
+ size_t dex_register_maps_start_index;
+ size_t inline_infos_start_index;
+ };
+
+ struct DexRegisterEntry {
+ DexRegisterMap::LocationKind kind;
+ int32_t value;
+ };
+
+ struct InlineInfoEntry {
+ uint32_t method_index;
+ };
+
+ void AddStackMapEntry(uint32_t dex_pc,
+ T native_pc,
+ uint32_t register_mask,
+ BitVector* sp_mask,
+ uint32_t num_dex_registers,
+ uint8_t inlining_depth) {
+ StackMapEntry entry;
+ entry.dex_pc = dex_pc;
+ entry.native_pc = native_pc;
+ entry.register_mask = register_mask;
+ entry.sp_mask = sp_mask;
+ entry.num_dex_registers = num_dex_registers;
+ entry.inlining_depth = inlining_depth;
+ entry.dex_register_maps_start_index = dex_register_maps_.Size();
+ entry.inline_infos_start_index = inline_infos_.Size();
+ stack_maps_.Add(entry);
+
+ stack_mask_max_ = std::max(stack_mask_max_, sp_mask->GetHighestBitSet());
+ if (inlining_depth > 0) {
+ number_of_stack_maps_with_inline_info_++;
+ }
+ }
+
+ void AddDexRegisterEntry(DexRegisterMap::LocationKind kind, int32_t value) {
+ DexRegisterEntry entry;
+ entry.kind = kind;
+ entry.value = value;
+ dex_register_maps_.Add(entry);
+ }
+
+ void AddInlineInfoEntry(uint32_t method_index) {
+ InlineInfoEntry entry;
+ entry.method_index = method_index;
+ inline_infos_.Add(entry);
+ }
+
+ size_t ComputeNeededSize() const {
+ return CodeInfo<T>::kFixedSize
+ + ComputeStackMapSize()
+ + ComputeDexRegisterMapSize()
+ + ComputeInlineInfoSize();
+ }
+
+ size_t ComputeStackMapSize() const {
+ return stack_maps_.Size() * (StackMap<T>::kFixedSize + StackMaskEncodingSize(stack_mask_max_));
+ }
+
+ size_t ComputeDexRegisterMapSize() const {
+ // We currently encode all dex register information per stack map.
+ return stack_maps_.Size() * DexRegisterMap::kFixedSize
+ // For each dex register entry.
+ + (dex_register_maps_.Size() * DexRegisterMap::SingleEntrySize());
+ }
+
+ size_t ComputeInlineInfoSize() const {
+ return inline_infos_.Size() * InlineInfo::SingleEntrySize()
+ // For encoding the depth.
+ + (number_of_stack_maps_with_inline_info_ * InlineInfo::kFixedSize);
+ }
+
+ size_t ComputeInlineInfoStart() const {
+ return ComputeDexRegisterMapStart() + ComputeDexRegisterMapSize();
+ }
+
+ size_t ComputeDexRegisterMapStart() const {
+ return CodeInfo<T>::kFixedSize + ComputeStackMapSize();
+ }
+
+ void FillIn(MemoryRegion region) {
+ CodeInfo<T> code_info(region);
+
+ size_t stack_mask_size = StackMaskEncodingSize(stack_mask_max_);
+ uint8_t* memory_start = region.start();
+
+ MemoryRegion dex_register_maps_region = region.Subregion(
+ ComputeDexRegisterMapStart(),
+ ComputeDexRegisterMapSize());
+
+ MemoryRegion inline_infos_region = region.Subregion(
+ ComputeInlineInfoStart(),
+ ComputeInlineInfoSize());
+
+ code_info.SetNumberOfStackMaps(stack_maps_.Size());
+ code_info.SetStackMaskSize(stack_mask_size);
+
+ uintptr_t next_dex_register_map_offset = 0;
+ uintptr_t next_inline_info_offset = 0;
+ for (size_t i = 0, e = stack_maps_.Size(); i < e; ++i) {
+ StackMap<T> stack_map = code_info.GetStackMapAt(i);
+ StackMapEntry entry = stack_maps_.Get(i);
+
+ stack_map.SetDexPc(entry.dex_pc);
+ stack_map.SetNativePc(entry.native_pc);
+ stack_map.SetRegisterMask(entry.register_mask);
+ stack_map.SetStackMask(*entry.sp_mask);
+
+ // Set the register map.
+ MemoryRegion region = dex_register_maps_region.Subregion(
+ next_dex_register_map_offset,
+ DexRegisterMap::kFixedSize + entry.num_dex_registers * DexRegisterMap::SingleEntrySize());
+ next_dex_register_map_offset += region.size();
+ DexRegisterMap dex_register_map(region);
+ stack_map.SetDexRegisterMapOffset(region.start() - memory_start);
+
+ for (size_t i = 0; i < entry.num_dex_registers; ++i) {
+ DexRegisterEntry register_entry =
+ dex_register_maps_.Get(i + entry.dex_register_maps_start_index);
+ dex_register_map.SetRegisterInfo(i, register_entry.kind, register_entry.value);
+ }
+
+ // Set the inlining info.
+ if (entry.inlining_depth != 0) {
+ MemoryRegion region = inline_infos_region.Subregion(
+ next_inline_info_offset,
+ InlineInfo::kFixedSize + entry.inlining_depth * InlineInfo::SingleEntrySize());
+ next_inline_info_offset += region.size();
+ InlineInfo inline_info(region);
+
+ stack_map.SetInlineDescriptorOffset(region.start() - memory_start);
+
+ inline_info.SetDepth(entry.inlining_depth);
+ for (size_t i = 0; i < entry.inlining_depth; ++i) {
+ InlineInfoEntry inline_entry = inline_infos_.Get(i + entry.inline_infos_start_index);
+ inline_info.SetMethodReferenceIndexAtDepth(i, inline_entry.method_index);
+ }
+ } else {
+ stack_map.SetInlineDescriptorOffset(InlineInfo::kNoInlineInfo);
+ }
+ }
+ }
+
+ private:
+ GrowableArray<StackMapEntry> stack_maps_;
+ GrowableArray<DexRegisterEntry> dex_register_maps_;
+ GrowableArray<InlineInfoEntry> inline_infos_;
+ int stack_mask_max_;
+ size_t number_of_stack_maps_with_inline_info_;
+
+ DISALLOW_COPY_AND_ASSIGN(StackMapStream);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_STACK_MAP_STREAM_H_
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
new file mode 100644
index 0000000..a70259e
--- /dev/null
+++ b/compiler/optimizing/stack_map_test.cc
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "stack_map.h"
+#include "stack_map_stream.h"
+#include "utils/arena_bit_vector.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+bool SameBits(MemoryRegion region, const BitVector& bit_vector) {
+ for (size_t i = 0; i < region.size_in_bits(); ++i) {
+ if (region.LoadBit(i) != bit_vector.IsBitSet(i)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+TEST(StackMapTest, Test1) {
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+ StackMapStream<size_t> stream(&arena);
+
+ ArenaBitVector sp_mask(&arena, 0, false);
+ stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, 2, 0);
+ stream.AddDexRegisterEntry(DexRegisterMap::kInStack, 0);
+ stream.AddDexRegisterEntry(DexRegisterMap::kConstant, -2);
+
+ size_t size = stream.ComputeNeededSize();
+ void* memory = arena.Alloc(size, kArenaAllocMisc);
+ MemoryRegion region(memory, size);
+ stream.FillIn(region);
+
+ CodeInfo<size_t> code_info(region);
+ ASSERT_EQ(0u, code_info.GetStackMaskSize());
+ ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
+
+ StackMap<size_t> stack_map = code_info.GetStackMapAt(0);
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePc(64)));
+ ASSERT_EQ(0u, stack_map.GetDexPc());
+ ASSERT_EQ(64u, stack_map.GetNativePc());
+ ASSERT_EQ(0x3u, stack_map.GetRegisterMask());
+ ASSERT_FALSE(stack_map.HasInlineInfo());
+
+ MemoryRegion stack_mask = stack_map.GetStackMask();
+ ASSERT_TRUE(SameBits(stack_mask, sp_mask));
+
+ DexRegisterMap dex_registers = code_info.GetDexRegisterMapOf(stack_map, 2);
+ ASSERT_EQ(DexRegisterMap::kInStack, dex_registers.GetLocationKind(0));
+ ASSERT_EQ(DexRegisterMap::kConstant, dex_registers.GetLocationKind(1));
+ ASSERT_EQ(0, dex_registers.GetValue(0));
+ ASSERT_EQ(-2, dex_registers.GetValue(1));
+}
+
+TEST(StackMapTest, Test2) {
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+ StackMapStream<size_t> stream(&arena);
+
+ ArenaBitVector sp_mask1(&arena, 0, true);
+ sp_mask1.SetBit(2);
+ sp_mask1.SetBit(4);
+ stream.AddStackMapEntry(0, 64, 0x3, &sp_mask1, 2, 2);
+ stream.AddDexRegisterEntry(DexRegisterMap::kInStack, 0);
+ stream.AddDexRegisterEntry(DexRegisterMap::kConstant, -2);
+ stream.AddInlineInfoEntry(42);
+ stream.AddInlineInfoEntry(82);
+
+ ArenaBitVector sp_mask2(&arena, 0, true);
+ sp_mask2.SetBit(3);
+ sp_mask1.SetBit(8);
+ stream.AddStackMapEntry(1, 128, 0xFF, &sp_mask2, 1, 0);
+ stream.AddDexRegisterEntry(DexRegisterMap::kInRegister, 0);
+
+ size_t size = stream.ComputeNeededSize();
+ void* memory = arena.Alloc(size, kArenaAllocMisc);
+ MemoryRegion region(memory, size);
+ stream.FillIn(region);
+
+ CodeInfo<size_t> code_info(region);
+ ASSERT_EQ(1u, code_info.GetStackMaskSize());
+ ASSERT_EQ(2u, code_info.GetNumberOfStackMaps());
+
+ StackMap<size_t> stack_map = code_info.GetStackMapAt(0);
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePc(64)));
+ ASSERT_EQ(0u, stack_map.GetDexPc());
+ ASSERT_EQ(64u, stack_map.GetNativePc());
+ ASSERT_EQ(0x3u, stack_map.GetRegisterMask());
+
+ MemoryRegion stack_mask = stack_map.GetStackMask();
+ ASSERT_TRUE(SameBits(stack_mask, sp_mask1));
+
+ DexRegisterMap dex_registers = code_info.GetDexRegisterMapOf(stack_map, 2);
+ ASSERT_EQ(DexRegisterMap::kInStack, dex_registers.GetLocationKind(0));
+ ASSERT_EQ(DexRegisterMap::kConstant, dex_registers.GetLocationKind(1));
+ ASSERT_EQ(0, dex_registers.GetValue(0));
+ ASSERT_EQ(-2, dex_registers.GetValue(1));
+
+ InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
+ ASSERT_EQ(2u, inline_info.GetDepth());
+ ASSERT_EQ(42u, inline_info.GetMethodReferenceIndexAtDepth(0));
+ ASSERT_EQ(82u, inline_info.GetMethodReferenceIndexAtDepth(1));
+
+ stack_map = code_info.GetStackMapAt(1);
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1u)));
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePc(128u)));
+ ASSERT_EQ(1u, stack_map.GetDexPc());
+ ASSERT_EQ(128u, stack_map.GetNativePc());
+ ASSERT_EQ(0xFFu, stack_map.GetRegisterMask());
+
+ stack_mask = stack_map.GetStackMask();
+ ASSERT_TRUE(SameBits(stack_mask, sp_mask2));
+
+ ASSERT_FALSE(stack_map.HasInlineInfo());
+}
+
+} // namespace art
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index d5225c1..6da375a 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -16,7 +16,7 @@
#include "trampoline_compiler.h"
-#include "jni_internal.h"
+#include "jni_env_ext.h"
#include "utils/arm/assembler_arm.h"
#include "utils/arm64/assembler_arm64.h"
#include "utils/mips/assembler_mips.h"
diff --git a/compiler/utils/arena_allocator.h b/compiler/utils/arena_allocator.h
index f4bcb1d..7bfbb6f 100644
--- a/compiler/utils/arena_allocator.h
+++ b/compiler/utils/arena_allocator.h
@@ -24,6 +24,7 @@
#include "base/mutex.h"
#include "mem_map.h"
#include "utils.h"
+#include "utils/debug_stack.h"
namespace art {
@@ -34,6 +35,9 @@
class ScopedArenaAllocator;
class MemStats;
+template <typename T>
+class ArenaAllocatorAdapter;
+
static constexpr bool kArenaAllocatorCountAllocations = false;
// Type of allocation for memory tuning.
@@ -147,11 +151,14 @@
DISALLOW_COPY_AND_ASSIGN(ArenaPool);
};
-class ArenaAllocator : private ArenaAllocatorStats {
+class ArenaAllocator : private DebugStackRefCounter, private ArenaAllocatorStats {
public:
explicit ArenaAllocator(ArenaPool* pool);
~ArenaAllocator();
+ // Get adapter for use in STL containers. See arena_containers.h .
+ ArenaAllocatorAdapter<void> Adapter(ArenaAllocKind kind = kArenaAllocSTL);
+
// Returns zeroed memory.
void* Alloc(size_t bytes, ArenaAllocKind kind) ALWAYS_INLINE {
if (UNLIKELY(running_on_valgrind_)) {
@@ -190,6 +197,9 @@
Arena* arena_head_;
bool running_on_valgrind_;
+ template <typename U>
+ friend class ArenaAllocatorAdapter;
+
DISALLOW_COPY_AND_ASSIGN(ArenaAllocator);
}; // ArenaAllocator
diff --git a/compiler/utils/arena_containers.h b/compiler/utils/arena_containers.h
new file mode 100644
index 0000000..c48b0c8
--- /dev/null
+++ b/compiler/utils/arena_containers.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ARENA_CONTAINERS_H_
+#define ART_COMPILER_UTILS_ARENA_CONTAINERS_H_
+
+#include <deque>
+#include <queue>
+#include <set>
+#include <vector>
+
+#include "utils/arena_allocator.h"
+#include "safe_map.h"
+
+namespace art {
+
+// Adapter for use of ArenaAllocator in STL containers.
+// Use ArenaAllocator::Adapter() to create an adapter to pass to container constructors.
+// For example,
+// struct Foo {
+// explicit Foo(ArenaAllocator* allocator)
+// : foo_vector(allocator->Adapter(kArenaAllocMisc)),
+// foo_map(std::less<int>(), allocator->Adapter()) {
+// }
+// ArenaVector<int> foo_vector;
+// ArenaSafeMap<int, int> foo_map;
+// };
+template <typename T>
+class ArenaAllocatorAdapter;
+
+template <typename T>
+using ArenaDeque = std::deque<T, ArenaAllocatorAdapter<T>>;
+
+template <typename T>
+using ArenaQueue = std::queue<T, ArenaDeque<T>>;
+
+template <typename T>
+using ArenaVector = std::vector<T, ArenaAllocatorAdapter<T>>;
+
+template <typename T, typename Comparator = std::less<T>>
+using ArenaSet = std::set<T, Comparator, ArenaAllocatorAdapter<T>>;
+
+template <typename K, typename V, typename Comparator = std::less<K>>
+using ArenaSafeMap =
+ SafeMap<K, V, Comparator, ArenaAllocatorAdapter<std::pair<const K, V>>>;
+
+// Implementation details below.
+
+template <bool kCount>
+class ArenaAllocatorAdapterKindImpl;
+
+template <>
+class ArenaAllocatorAdapterKindImpl<false> {
+ public:
+ // Not tracking allocations, ignore the supplied kind and arbitrarily provide kArenaAllocSTL.
+ explicit ArenaAllocatorAdapterKindImpl(ArenaAllocKind kind) { }
+ ArenaAllocatorAdapterKindImpl& operator=(const ArenaAllocatorAdapterKindImpl& other) = default;
+ ArenaAllocKind Kind() { return kArenaAllocSTL; }
+};
+
+template <bool kCount>
+class ArenaAllocatorAdapterKindImpl {
+ public:
+ explicit ArenaAllocatorAdapterKindImpl(ArenaAllocKind kind) : kind_(kind) { }
+ ArenaAllocatorAdapterKindImpl& operator=(const ArenaAllocatorAdapterKindImpl& other) = default;
+ ArenaAllocKind Kind() { return kind_; }
+
+ private:
+ ArenaAllocKind kind_;
+};
+
+typedef ArenaAllocatorAdapterKindImpl<kArenaAllocatorCountAllocations> ArenaAllocatorAdapterKind;
+
+template <>
+class ArenaAllocatorAdapter<void>
+ : private DebugStackReference, private ArenaAllocatorAdapterKind {
+ public:
+ typedef void value_type;
+ typedef void* pointer;
+ typedef const void* const_pointer;
+
+ template <typename U>
+ struct rebind {
+ typedef ArenaAllocatorAdapter<U> other;
+ };
+
+ explicit ArenaAllocatorAdapter(ArenaAllocator* arena_allocator,
+ ArenaAllocKind kind = kArenaAllocSTL)
+ : DebugStackReference(arena_allocator),
+ ArenaAllocatorAdapterKind(kind),
+ arena_allocator_(arena_allocator) {
+ }
+ template <typename U>
+ ArenaAllocatorAdapter(const ArenaAllocatorAdapter<U>& other)
+ : DebugStackReference(other),
+ ArenaAllocatorAdapterKind(other),
+ arena_allocator_(other.arena_allocator_) {
+ }
+ ArenaAllocatorAdapter(const ArenaAllocatorAdapter& other) = default;
+ ArenaAllocatorAdapter& operator=(const ArenaAllocatorAdapter& other) = default;
+ ~ArenaAllocatorAdapter() = default;
+
+ private:
+ ArenaAllocator* arena_allocator_;
+
+ template <typename U>
+ friend class ArenaAllocatorAdapter;
+};
+
+template <typename T>
+class ArenaAllocatorAdapter : private DebugStackReference, private ArenaAllocatorAdapterKind {
+ public:
+ typedef T value_type;
+ typedef T* pointer;
+ typedef T& reference;
+ typedef const T* const_pointer;
+ typedef const T& const_reference;
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+
+ template <typename U>
+ struct rebind {
+ typedef ArenaAllocatorAdapter<U> other;
+ };
+
+ explicit ArenaAllocatorAdapter(ArenaAllocator* arena_allocator, ArenaAllocKind kind)
+ : DebugStackReference(arena_allocator),
+ ArenaAllocatorAdapterKind(kind),
+ arena_allocator_(arena_allocator) {
+ }
+ template <typename U>
+ ArenaAllocatorAdapter(const ArenaAllocatorAdapter<U>& other)
+ : DebugStackReference(other),
+ ArenaAllocatorAdapterKind(other),
+ arena_allocator_(other.arena_allocator_) {
+ }
+ ArenaAllocatorAdapter(const ArenaAllocatorAdapter& other) = default;
+ ArenaAllocatorAdapter& operator=(const ArenaAllocatorAdapter& other) = default;
+ ~ArenaAllocatorAdapter() = default;
+
+ size_type max_size() const {
+ return static_cast<size_type>(-1) / sizeof(T);
+ }
+
+ pointer address(reference x) const { return &x; }
+ const_pointer address(const_reference x) const { return &x; }
+
+ pointer allocate(size_type n, ArenaAllocatorAdapter<void>::pointer hint = nullptr) {
+ DCHECK_LE(n, max_size());
+ return reinterpret_cast<T*>(arena_allocator_->Alloc(n * sizeof(T),
+ ArenaAllocatorAdapterKind::Kind()));
+ }
+ void deallocate(pointer p, size_type n) {
+ }
+
+ void construct(pointer p, const_reference val) {
+ new (static_cast<void*>(p)) value_type(val);
+ }
+ void destroy(pointer p) {
+ p->~value_type();
+ }
+
+ private:
+ ArenaAllocator* arena_allocator_;
+
+ template <typename U>
+ friend class ArenaAllocatorAdapter;
+
+ template <typename U>
+ friend bool operator==(const ArenaAllocatorAdapter<U>& lhs,
+ const ArenaAllocatorAdapter<U>& rhs);
+};
+
+template <typename T>
+inline bool operator==(const ArenaAllocatorAdapter<T>& lhs,
+ const ArenaAllocatorAdapter<T>& rhs) {
+ return lhs.arena_allocator_ == rhs.arena_allocator_;
+}
+
+template <typename T>
+inline bool operator!=(const ArenaAllocatorAdapter<T>& lhs,
+ const ArenaAllocatorAdapter<T>& rhs) {
+ return !(lhs == rhs);
+}
+
+inline ArenaAllocatorAdapter<void> ArenaAllocator::Adapter(ArenaAllocKind kind) {
+ return ArenaAllocatorAdapter<void>(this, kind);
+}
+
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_ARENA_CONTAINERS_H_
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index 8a34928..671ccb6 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -73,6 +73,11 @@
return os;
}
+ShifterOperand::ShifterOperand(uint32_t immed)
+ : type_(kImmediate), rm_(kNoRegister), rs_(kNoRegister),
+ is_rotate_(false), is_shift_(false), shift_(kNoShift), rotate_(0), immed_(immed) {
+ CHECK(immed < (1u << 12) || ArmAssembler::ModifiedImmediate(immed) != kInvalidModifiedImmediate);
+}
uint32_t ShifterOperand::encodingArm() const {
@@ -169,9 +174,7 @@
return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
case MOV:
- if (immediate < (1 << 12)) { // Less than (or equal to) 12 bits can always be done.
- return true;
- }
+ // TODO: Support less than or equal to 12bits.
return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
case MVN:
default:
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index be19174..54965f6 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -35,9 +35,7 @@
is_rotate_(false), is_shift_(false), shift_(kNoShift), rotate_(0), immed_(0) {
}
- explicit ShifterOperand(uint32_t immed) : type_(kImmediate), rm_(kNoRegister), rs_(kNoRegister),
- is_rotate_(false), is_shift_(false), shift_(kNoShift), rotate_(0), immed_(immed) {
- }
+ explicit ShifterOperand(uint32_t immed);
// Data-processing operands - Register
explicit ShifterOperand(Register rm) : type_(kRegister), rm_(rm), rs_(kNoRegister),
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 604f59e..4904428 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -619,7 +619,8 @@
return true;
}
- bool can_contain_high_register = opcode == MOV || opcode == ADD || opcode == SUB;
+ bool can_contain_high_register = (opcode == MOV)
+ || ((opcode == ADD || opcode == SUB) && (rn == rd));
if (IsHighRegister(rd) || IsHighRegister(rn)) {
if (can_contain_high_register) {
@@ -658,6 +659,10 @@
if (so.IsImmediate()) {
return true;
}
+
+ if (!can_contain_high_register) {
+ return true;
+ }
}
if (so.IsRegister() && IsHighRegister(so.GetRegister()) && !can_contain_high_register) {
@@ -757,23 +762,21 @@
int32_t encoding = 0;
if (so.IsImmediate()) {
// Check special cases.
- if ((opcode == SUB || opcode == ADD) && rn == SP) {
- // There are special ADD/SUB rd, SP, #imm12 instructions.
+ if ((opcode == SUB || opcode == ADD) && (so.GetImmediate() < (1u << 12))) {
if (opcode == SUB) {
thumb_opcode = 0b0101;
} else {
thumb_opcode = 0;
}
uint32_t imm = so.GetImmediate();
- CHECK_LT(imm, (1u << 12));
uint32_t i = (imm >> 11) & 1;
uint32_t imm3 = (imm >> 8) & 0b111;
uint32_t imm8 = imm & 0xff;
encoding = B31 | B30 | B29 | B28 | B25 |
- B19 | B18 | B16 |
thumb_opcode << 21 |
+ rn << 16 |
rd << 8 |
i << 26 |
imm3 << 12 |
@@ -877,11 +880,17 @@
rn_shift = 8;
} else {
thumb_opcode = 0b1010;
+ rd = rn;
rn = so.GetRegister();
}
break;
- case CMN: thumb_opcode = 0b1011; rn = so.GetRegister(); break;
+ case CMN: {
+ thumb_opcode = 0b1011;
+ rd = rn;
+ rn = so.GetRegister();
+ break;
+ }
case ORR: thumb_opcode = 0b1100; break;
case MOV:
dp_opcode = 0;
@@ -1371,13 +1380,23 @@
}
if (must_be_32bit) {
- int32_t encoding = 0x1f << 27 | B22 | (load ? B20 : 0) | static_cast<uint32_t>(rd) << 12 |
+ int32_t encoding = 0x1f << 27 | (load ? B20 : 0) | static_cast<uint32_t>(rd) << 12 |
ad.encodingThumb(true);
+ if (half) {
+ encoding |= B21;
+ } else if (!byte) {
+ encoding |= B22;
+ }
Emit32(encoding);
} else {
// 16 bit register offset.
int32_t encoding = B14 | B12 | (load ? B11 : 0) | static_cast<uint32_t>(rd) |
ad.encodingThumb(false);
+ if (byte) {
+ encoding |= B10;
+ } else if (half) {
+ encoding |= B9;
+ }
Emit16(encoding);
}
}
@@ -1470,6 +1489,7 @@
// branch the size may change if it so happens that other branches change size that change
// the distance to the target and that distance puts this branch over the limit for 16 bits.
if (size == Branch::k16Bit) {
+ DCHECK(!force_32bit_branches_);
Emit16(0); // Space for a 16 bit branch.
} else {
Emit32(0); // Space for a 32 bit branch.
@@ -1477,7 +1497,7 @@
} else {
// Branch is to an unbound label. Emit space for it.
uint16_t branch_id = AddBranch(branch_type, pc, cond); // Unresolved branch.
- if (force_32bit_) {
+ if (force_32bit_branches_ || force_32bit_) {
Emit16(static_cast<uint16_t>(label->position_)); // Emit current label link.
Emit16(0); // another 16 bits.
} else {
@@ -2073,6 +2093,7 @@
uint32_t branch_location = branch->GetLocation();
uint16_t next = buffer_.Load<uint16_t>(branch_location); // Get next in chain.
if (changed) {
+ DCHECK(!force_32bit_branches_);
MakeHoleForBranch(branch->GetLocation(), 2);
if (branch->IsCompareAndBranch()) {
// A cbz/cbnz instruction has changed size. There is no valid encoding for
@@ -2506,12 +2527,22 @@
void Thumb2Assembler::CompareAndBranchIfZero(Register r, Label* label) {
- cbz(r, label);
+ if (force_32bit_branches_) {
+ cmp(r, ShifterOperand(0));
+ b(label, EQ);
+ } else {
+ cbz(r, label);
+ }
}
void Thumb2Assembler::CompareAndBranchIfNonZero(Register r, Label* label) {
- cbnz(r, label);
+ if (force_32bit_branches_) {
+ cmp(r, ShifterOperand(0));
+ b(label, NE);
+ } else {
+ cbnz(r, label);
+ }
}
} // namespace arm
} // namespace art
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index 5f24e4e..ee33bf2 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -29,10 +29,13 @@
namespace art {
namespace arm {
-
class Thumb2Assembler FINAL : public ArmAssembler {
public:
- Thumb2Assembler() : force_32bit_(false), it_cond_index_(kNoItCondition), next_condition_(AL) {
+ explicit Thumb2Assembler(bool force_32bit_branches = false)
+ : force_32bit_branches_(force_32bit_branches),
+ force_32bit_(false),
+ it_cond_index_(kNoItCondition),
+ next_condition_(AL) {
}
virtual ~Thumb2Assembler() {
@@ -49,6 +52,10 @@
return force_32bit_;
}
+ bool IsForced32BitBranches() const {
+ return force_32bit_branches_;
+ }
+
void FinalizeInstructions(const MemoryRegion& region) OVERRIDE {
EmitBranches();
Assembler::FinalizeInstructions(region);
@@ -412,7 +419,8 @@
void EmitShift(Register rd, Register rm, Shift shift, uint8_t amount, bool setcc = false);
void EmitShift(Register rd, Register rn, Shift shift, Register rm, bool setcc = false);
- bool force_32bit_; // Force the assembler to use 32 bit thumb2 instructions.
+ bool force_32bit_branches_; // Force the assembler to use 32 bit branch instructions.
+ bool force_32bit_; // Force the assembler to use 32 bit thumb2 instructions.
// IfThen conditions. Used to check that conditional instructions match the preceding IT.
Condition it_conditions_[4];
@@ -605,6 +613,9 @@
private:
// Calculate the size of the branch instruction based on its type and offset.
Size CalculateSize() const {
+ if (assembler_->IsForced32BitBranches()) {
+ return k32Bit;
+ }
if (target_ == kUnresolved) {
if (assembler_->IsForced32Bit() && (type_ == kUnconditional || type_ == kConditional)) {
return k32Bit;
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 5b97ba0..3f90f21 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -626,7 +626,7 @@
// Move ETR(Callee saved) back to TR(Caller saved) reg. We use ETR on calls
// to external functions that might trash TR. We do not need the original
- // X19 saved in BuildFrame().
+ // ETR(X21) saved in BuildFrame().
___ Mov(reg_x(TR), reg_x(ETR));
___ Blr(temp);
@@ -644,20 +644,43 @@
// TODO: *create APCS FP - end of FP chain;
// *add support for saving a different set of callee regs.
- // For now we check that the size of callee regs vector is 20
- // equivalent to the APCS callee saved regs [X19, x30] [D8, D15].
- CHECK_EQ(callee_save_regs.size(), kCalleeSavedRegsSize);
- ___ PushCalleeSavedRegisters();
-
- // Move TR(Caller saved) to ETR(Callee saved). The original X19 has been
- // saved by PushCalleeSavedRegisters(). This way we make sure that TR is not
- // trashed by native code.
- ___ Mov(reg_x(ETR), reg_x(TR));
-
+ // For now we check that the size of callee regs vector is 11.
+ CHECK_EQ(callee_save_regs.size(), kJniRefSpillRegsSize);
// Increase frame to required size - must be at least space to push StackReference<Method>.
- CHECK_GT(frame_size, kCalleeSavedRegsSize * kFramePointerSize);
- size_t adjust = frame_size - (kCalleeSavedRegsSize * kFramePointerSize);
- IncreaseFrameSize(adjust);
+ CHECK_GT(frame_size, kJniRefSpillRegsSize * kFramePointerSize);
+ IncreaseFrameSize(frame_size);
+
+ // TODO: Ugly hard code...
+ // Should generate these according to the spill mask automatically.
+ // TUNING: Use stp.
+ // Note: Must match Arm64JniCallingConvention::CoreSpillMask().
+ size_t reg_offset = frame_size;
+ reg_offset -= 8;
+ StoreToOffset(LR, SP, reg_offset);
+ reg_offset -= 8;
+ StoreToOffset(X29, SP, reg_offset);
+ reg_offset -= 8;
+ StoreToOffset(X28, SP, reg_offset);
+ reg_offset -= 8;
+ StoreToOffset(X27, SP, reg_offset);
+ reg_offset -= 8;
+ StoreToOffset(X26, SP, reg_offset);
+ reg_offset -= 8;
+ StoreToOffset(X25, SP, reg_offset);
+ reg_offset -= 8;
+ StoreToOffset(X24, SP, reg_offset);
+ reg_offset -= 8;
+ StoreToOffset(X23, SP, reg_offset);
+ reg_offset -= 8;
+ StoreToOffset(X22, SP, reg_offset);
+ reg_offset -= 8;
+ StoreToOffset(X21, SP, reg_offset);
+ reg_offset -= 8;
+ StoreToOffset(X20, SP, reg_offset);
+
+ // Move TR(Caller saved) to ETR(Callee saved). The original (ETR)X21 has been saved on stack.
+ // This way we make sure that TR is not trashed by native code.
+ ___ Mov(reg_x(ETR), reg_x(TR));
// Write StackReference<Method>.
DCHECK_EQ(4U, sizeof(StackReference<mirror::ArtMethod>));
@@ -690,22 +713,46 @@
void Arm64Assembler::RemoveFrame(size_t frame_size, const std::vector<ManagedRegister>& callee_save_regs) {
CHECK_ALIGNED(frame_size, kStackAlignment);
- // For now we only check that the size of the frame is greater than the
- // no of APCS callee saved regs [X19, X30] [D8, D15].
- CHECK_EQ(callee_save_regs.size(), kCalleeSavedRegsSize);
- CHECK_GT(frame_size, kCalleeSavedRegsSize * kFramePointerSize);
+ // For now we only check that the size of the frame is greater than the spill size.
+ CHECK_EQ(callee_save_regs.size(), kJniRefSpillRegsSize);
+ CHECK_GT(frame_size, kJniRefSpillRegsSize * kFramePointerSize);
- // Decrease frame size to start of callee saved regs.
- size_t adjust = frame_size - (kCalleeSavedRegsSize * kFramePointerSize);
- DecreaseFrameSize(adjust);
-
- // We move ETR (Callee Saved) back to TR (Caller Saved) which might have
- // been trashed in the native call. The original X19 (ETR) is restored as
- // part of PopCalleeSavedRegisters().
+ // We move ETR(aapcs64 callee saved) back to TR(aapcs64 caller saved) which might have
+ // been trashed in the native call. The original ETR(X21) is restored from stack.
___ Mov(reg_x(TR), reg_x(ETR));
+ // TODO: Ugly hard code...
+ // Should generate these according to the spill mask automatically.
+ // TUNING: Use ldp.
+ // Note: Must match Arm64JniCallingConvention::CoreSpillMask().
+ size_t reg_offset = frame_size;
+ reg_offset -= 8;
+ LoadFromOffset(LR, SP, reg_offset);
+ reg_offset -= 8;
+ LoadFromOffset(X29, SP, reg_offset);
+ reg_offset -= 8;
+ LoadFromOffset(X28, SP, reg_offset);
+ reg_offset -= 8;
+ LoadFromOffset(X27, SP, reg_offset);
+ reg_offset -= 8;
+ LoadFromOffset(X26, SP, reg_offset);
+ reg_offset -= 8;
+ LoadFromOffset(X25, SP, reg_offset);
+ reg_offset -= 8;
+ LoadFromOffset(X24, SP, reg_offset);
+ reg_offset -= 8;
+ LoadFromOffset(X23, SP, reg_offset);
+ reg_offset -= 8;
+ LoadFromOffset(X22, SP, reg_offset);
+ reg_offset -= 8;
+ LoadFromOffset(X21, SP, reg_offset);
+ reg_offset -= 8;
+ LoadFromOffset(X20, SP, reg_offset);
+
+ // Decrease frame size to start of callee saved regs.
+ DecreaseFrameSize(frame_size);
+
// Pop callee saved and return to LR.
- ___ PopCalleeSavedRegisters();
___ Ret();
}
diff --git a/compiler/utils/arm64/constants_arm64.h b/compiler/utils/arm64/constants_arm64.h
index 2a08c95..0cbbb1e 100644
--- a/compiler/utils/arm64/constants_arm64.h
+++ b/compiler/utils/arm64/constants_arm64.h
@@ -29,12 +29,12 @@
namespace art {
namespace arm64 {
-constexpr unsigned int kCalleeSavedRegsSize = 20;
+constexpr unsigned int kJniRefSpillRegsSize = 11;
// Vixl buffer size.
constexpr size_t kBufferSizeArm64 = 4096*2;
-} // arm64
-} // art
+} // namespace arm64
+} // namespace art
#endif // ART_COMPILER_UTILS_ARM64_CONSTANTS_ARM64_H_
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index f72f5e5..4addfa0 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -499,6 +499,10 @@
// and branch to a ExceptionSlowPath if it is.
virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) = 0;
+ virtual void InitializeFrameDescriptionEntry() {}
+ virtual void FinalizeFrameDescriptionEntry() {}
+ virtual std::vector<uint8_t>* GetFrameDescriptionEntry() { return nullptr; }
+
virtual ~Assembler() {}
protected:
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index 754496b..3742913 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -208,12 +208,17 @@
assembler_.reset(new Ass());
// Fake a runtime test for ScratchFile
- std::string android_data;
- CommonRuntimeTest::SetEnvironmentVariables(android_data);
+ CommonRuntimeTest::SetUpAndroidData(android_data_);
SetUpHelpers();
}
+ void TearDown() OVERRIDE {
+ // We leave temporaries in case this failed so we can debug issues.
+ CommonRuntimeTest::TearDownAndroidData(android_data_, false);
+ tmpnam_ = "";
+ }
+
// Override this to set up any architecture-specific things, e.g., register vectors.
virtual void SetUpHelpers() {}
@@ -356,12 +361,15 @@
} else {
if (DisassembleBinaries(*data, *res.code, test_name)) {
if (data->size() > res.code->size()) {
- LOG(WARNING) << "Assembly code is not identical, but disassembly of machine code is "
- "equal: this implies sub-optimal encoding! Our code size=" << data->size() <<
+ // Fail this test with a fancy colored warning being printed.
+ EXPECT_TRUE(false) << "Assembly code is not identical, but disassembly of machine code "
+ "is equal: this implies sub-optimal encoding! Our code size=" << data->size() <<
", gcc size=" << res.code->size();
} else {
+ // Otherwise just print an info message and clean up.
LOG(INFO) << "GCC chose a different encoding than ours, but the overall length is the "
"same.";
+ Clean(&res);
}
} else {
// This will output the assembly.
@@ -687,6 +695,8 @@
std::string resolved_objdump_cmd_;
std::string resolved_disassemble_cmd_;
+ std::string android_data_;
+
static constexpr size_t OBJDUMP_SECTION_LINE_MIN_TOKENS = 6;
};
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 68cb656..891a287 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -14,7 +14,10 @@
* limitations under the License.
*/
+#include <dirent.h>
#include <fstream>
+#include <sys/types.h>
+#include <map>
#include "gtest/gtest.h"
#include "utils/arm/assembler_thumb2.h"
@@ -40,6 +43,8 @@
static constexpr bool kPrintResults = false;
#endif
+static const char* TOOL_PREFIX = "arm-linux-androideabi-";
+
void SetAndroidData() {
const char* data = getenv("ANDROID_DATA");
if (data == nullptr) {
@@ -109,9 +114,9 @@
// Suffix on toolsdir will be something like "arm-eabi-4.8"
while ((entry = readdir(dir)) != nullptr) {
std::string subdir = toolsdir + std::string("/") + std::string(entry->d_name);
- size_t eabi = subdir.find("arm-eabi-");
+ size_t eabi = subdir.find(TOOL_PREFIX);
if (eabi != std::string::npos) {
- std::string suffix = subdir.substr(eabi + sizeof("arm-eabi-"));
+ std::string suffix = subdir.substr(eabi + strlen(TOOL_PREFIX));
double version = strtod(suffix.c_str(), nullptr);
if (version > maxversion) {
maxversion = version;
@@ -166,22 +171,22 @@
}
out.close();
- char cmd[256];
+ char cmd[1024];
// Assemble the .S
- snprintf(cmd, sizeof(cmd), "%sarm-eabi-as %s -o %s.o", toolsdir.c_str(), filename, filename);
+ snprintf(cmd, sizeof(cmd), "%s%sas %s -o %s.o", toolsdir.c_str(), TOOL_PREFIX, filename, filename);
system(cmd);
// Remove the $d symbols to prevent the disassembler dumping the instructions
// as .word
- snprintf(cmd, sizeof(cmd), "%sarm-eabi-objcopy -N '$d' %s.o %s.oo", toolsdir.c_str(),
+ snprintf(cmd, sizeof(cmd), "%s%sobjcopy -N '$d' %s.o %s.oo", toolsdir.c_str(), TOOL_PREFIX,
filename, filename);
system(cmd);
// Disassemble.
- snprintf(cmd, sizeof(cmd), "%sarm-eabi-objdump -d %s.oo | grep '^ *[0-9a-f][0-9a-f]*:'",
- toolsdir.c_str(), filename);
+ snprintf(cmd, sizeof(cmd), "%s%sobjdump -d %s.oo | grep '^ *[0-9a-f][0-9a-f]*:'",
+ toolsdir.c_str(), TOOL_PREFIX, filename);
if (kPrintResults) {
// Print the results only, don't check. This is used to generate new output for inserting
// into the .inc file.
@@ -307,6 +312,9 @@
__ movs(R0, ShifterOperand(R1));
__ mvns(R0, ShifterOperand(R1));
+ // 32 bit variants.
+ __ add(R12, R1, ShifterOperand(R0));
+
size_t cs = __ CodeSize();
std::vector<uint8_t> managed_code(cs);
MemoryRegion code(&managed_code[0], managed_code.size());
@@ -863,6 +871,9 @@
__ StoreToOffset(kStoreWord, R2, R4, 12); // Simple
__ StoreToOffset(kStoreWord, R2, R4, 0x2000); // Offset too big.
+ __ StoreToOffset(kStoreWord, R0, R12, 12);
+ __ StoreToOffset(kStoreHalfword, R0, R12, 12);
+ __ StoreToOffset(kStoreByte, R2, R12, 12);
size_t cs = __ CodeSize();
std::vector<uint8_t> managed_code(cs);
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index 3943e37..3f2641c 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -43,13 +43,14 @@
" 3e: 42c8 cmn r0, r1\n",
" 40: 0008 movs r0, r1\n",
" 42: 43c8 mvns r0, r1\n",
+ " 44: eb01 0c00 add.w ip, r1, r0\n",
nullptr
};
const char* DataProcessingImmediateResults[] = {
" 0: 2055 movs r0, #85 ; 0x55\n",
" 2: f06f 0055 mvn.w r0, #85 ; 0x55\n",
- " 6: f101 0055 add.w r0, r1, #85 ; 0x55\n",
- " a: f1a1 0055 sub.w r0, r1, #85 ; 0x55\n",
+ " 6: f201 0055 addw r0, r1, #85 ; 0x55\n",
+ " a: f2a1 0055 subw r0, r1, #85 ; 0x55\n",
" e: f001 0055 and.w r0, r1, #85 ; 0x55\n",
" 12: f041 0055 orr.w r0, r1, #85 ; 0x55\n",
" 16: f081 0055 eor.w r0, r1, #85 ; 0x55\n",
@@ -355,6 +356,9 @@
" 2: f44f 5c00 mov.w ip, #8192 ; 0x2000\n",
" 6: 44a4 add ip, r4\n",
" 8: f8cc 2000 str.w r2, [ip]\n",
+ " c: f8cc 000c str.w r0, [ip, #12]\n",
+ " 10: f8ac 000c strh.w r0, [ip, #12]\n",
+ " 14: f88c 200c strb.w r2, [ip, #12]\n",
nullptr
};
const char* IfThenResults[] = {
diff --git a/compiler/utils/dwarf_cfi.cc b/compiler/utils/dwarf_cfi.cc
new file mode 100644
index 0000000..b3d1a47
--- /dev/null
+++ b/compiler/utils/dwarf_cfi.cc
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "leb128.h"
+#include "utils.h"
+
+#include "dwarf_cfi.h"
+
+namespace art {
+
+void DW_CFA_advance_loc(std::vector<uint8_t>* buf, uint32_t increment) {
+ if (increment < 64) {
+ // Encoding in opcode.
+ buf->push_back(0x1 << 6 | increment);
+ } else if (increment < 256) {
+ // Single byte delta.
+ buf->push_back(0x02);
+ buf->push_back(increment);
+ } else if (increment < 256 * 256) {
+ // Two byte delta.
+ buf->push_back(0x03);
+ buf->push_back(increment & 0xff);
+ buf->push_back((increment >> 8) & 0xff);
+ } else {
+ // Four byte delta.
+ buf->push_back(0x04);
+ PushWord(buf, increment);
+ }
+}
+
+void DW_CFA_offset_extended_sf(std::vector<uint8_t>* buf, int reg, int32_t offset) {
+ buf->push_back(0x11);
+ EncodeUnsignedLeb128(reg, buf);
+ EncodeSignedLeb128(offset, buf);
+}
+
+void DW_CFA_offset(std::vector<uint8_t>* buf, int reg, uint32_t offset) {
+ buf->push_back((0x2 << 6) | reg);
+ EncodeUnsignedLeb128(offset, buf);
+}
+
+void DW_CFA_def_cfa_offset(std::vector<uint8_t>* buf, int32_t offset) {
+ buf->push_back(0x0e);
+ EncodeUnsignedLeb128(offset, buf);
+}
+
+void DW_CFA_remember_state(std::vector<uint8_t>* buf) {
+ buf->push_back(0x0a);
+}
+
+void DW_CFA_restore_state(std::vector<uint8_t>* buf) {
+ buf->push_back(0x0b);
+}
+
+void WriteFDEHeader(std::vector<uint8_t>* buf) {
+ // 'length' (filled in by other functions).
+ PushWord(buf, 0);
+
+ // 'CIE_pointer' (filled in by linker).
+ PushWord(buf, 0);
+
+ // 'initial_location' (filled in by linker).
+ PushWord(buf, 0);
+
+ // 'address_range' (filled in by other functions).
+ PushWord(buf, 0);
+
+ // Augmentation length: 0
+ buf->push_back(0);
+}
+
+void WriteFDEAddressRange(std::vector<uint8_t>* buf, uint32_t data) {
+ const int kOffsetOfAddressRange = 12;
+ CHECK(buf->size() >= kOffsetOfAddressRange + sizeof(uint32_t));
+
+ uint8_t *p = buf->data() + kOffsetOfAddressRange;
+ p[0] = data;
+ p[1] = data >> 8;
+ p[2] = data >> 16;
+ p[3] = data >> 24;
+}
+
+void WriteCFILength(std::vector<uint8_t>* buf) {
+ uint32_t length = buf->size() - 4;
+ DCHECK_EQ((length & 0x3), 0U);
+ DCHECK_GT(length, 4U);
+
+ uint8_t *p = buf->data();
+ p[0] = length;
+ p[1] = length >> 8;
+ p[2] = length >> 16;
+ p[3] = length >> 24;
+}
+
+void PadCFI(std::vector<uint8_t>* buf) {
+ while (buf->size() & 0x3) {
+ buf->push_back(0);
+ }
+}
+
+} // namespace art
diff --git a/compiler/utils/dwarf_cfi.h b/compiler/utils/dwarf_cfi.h
new file mode 100644
index 0000000..e5acc0e
--- /dev/null
+++ b/compiler/utils/dwarf_cfi.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_DWARF_CFI_H_
+#define ART_COMPILER_UTILS_DWARF_CFI_H_
+
+#include <vector>
+
+namespace art {
+
+/**
+ * @brief Enter a 'DW_CFA_advance_loc' into an FDE buffer
+ * @param buf FDE buffer.
+ * @param increment Amount by which to increase the current location.
+ */
+void DW_CFA_advance_loc(std::vector<uint8_t>* buf, uint32_t increment);
+
+/**
+ * @brief Enter a 'DW_CFA_offset_extended_sf' into an FDE buffer
+ * @param buf FDE buffer.
+ * @param reg Register number.
+ * @param offset Offset of register address from CFA.
+ */
+void DW_CFA_offset_extended_sf(std::vector<uint8_t>* buf, int reg, int32_t offset);
+
+/**
+ * @brief Enter a 'DW_CFA_offset' into an FDE buffer
+ * @param buf FDE buffer.
+ * @param reg Register number.
+ * @param offset Offset of register address from CFA.
+ */
+void DW_CFA_offset(std::vector<uint8_t>* buf, int reg, uint32_t offset);
+
+/**
+ * @brief Enter a 'DW_CFA_def_cfa_offset' into an FDE buffer
+ * @param buf FDE buffer.
+ * @param offset New offset of CFA.
+ */
+void DW_CFA_def_cfa_offset(std::vector<uint8_t>* buf, int32_t offset);
+
+/**
+ * @brief Enter a 'DW_CFA_remember_state' into an FDE buffer
+ * @param buf FDE buffer.
+ */
+void DW_CFA_remember_state(std::vector<uint8_t>* buf);
+
+/**
+ * @brief Enter a 'DW_CFA_restore_state' into an FDE buffer
+ * @param buf FDE buffer.
+ */
+void DW_CFA_restore_state(std::vector<uint8_t>* buf);
+
+/**
+ * @brief Write FDE header into an FDE buffer
+ * @param buf FDE buffer.
+ */
+void WriteFDEHeader(std::vector<uint8_t>* buf);
+
+/**
+ * @brief Set 'address_range' field of an FDE buffer
+ * @param buf FDE buffer.
+ */
+void WriteFDEAddressRange(std::vector<uint8_t>* buf, uint32_t data);
+
+/**
+ * @brief Set 'length' field of an FDE buffer
+ * @param buf FDE buffer.
+ */
+void WriteCFILength(std::vector<uint8_t>* buf);
+
+/**
+ * @brief Pad an FDE buffer with 0 until its size is a multiple of 4
+ * @param buf FDE buffer.
+ */
+void PadCFI(std::vector<uint8_t>* buf);
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_DWARF_CFI_H_
diff --git a/compiler/utils/scoped_arena_allocator.h b/compiler/utils/scoped_arena_allocator.h
index 37799cb..62ea330 100644
--- a/compiler/utils/scoped_arena_allocator.h
+++ b/compiler/utils/scoped_arena_allocator.h
@@ -120,8 +120,8 @@
return arena_stack_->Alloc(bytes, kind);
}
- // ScopedArenaAllocatorAdapter is incomplete here, we need to define this later.
- ScopedArenaAllocatorAdapter<void> Adapter();
+ // Get adapter for use in STL containers. See scoped_arena_containers.h .
+ ScopedArenaAllocatorAdapter<void> Adapter(ArenaAllocKind kind = kArenaAllocSTL);
// Allow a delete-expression to destroy but not deallocate allocators created by Create().
static void operator delete(void* ptr) { UNUSED(ptr); }
@@ -138,125 +138,6 @@
DISALLOW_COPY_AND_ASSIGN(ScopedArenaAllocator);
};
-template <>
-class ScopedArenaAllocatorAdapter<void>
- : private DebugStackReference, private DebugStackIndirectTopRef {
- public:
- typedef void value_type;
- typedef void* pointer;
- typedef const void* const_pointer;
-
- template <typename U>
- struct rebind {
- typedef ScopedArenaAllocatorAdapter<U> other;
- };
-
- explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* arena_allocator)
- : DebugStackReference(arena_allocator),
- DebugStackIndirectTopRef(arena_allocator),
- arena_stack_(arena_allocator->arena_stack_) {
- }
- template <typename U>
- ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter<U>& other)
- : DebugStackReference(other),
- DebugStackIndirectTopRef(other),
- arena_stack_(other.arena_stack_) {
- }
- ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter& other) = default;
- ScopedArenaAllocatorAdapter& operator=(const ScopedArenaAllocatorAdapter& other) = default;
- ~ScopedArenaAllocatorAdapter() = default;
-
- private:
- ArenaStack* arena_stack_;
-
- template <typename U>
- friend class ScopedArenaAllocatorAdapter;
-};
-
-// Adapter for use of ScopedArenaAllocator in STL containers.
-template <typename T>
-class ScopedArenaAllocatorAdapter : private DebugStackReference, private DebugStackIndirectTopRef {
- public:
- typedef T value_type;
- typedef T* pointer;
- typedef T& reference;
- typedef const T* const_pointer;
- typedef const T& const_reference;
- typedef size_t size_type;
- typedef ptrdiff_t difference_type;
-
- template <typename U>
- struct rebind {
- typedef ScopedArenaAllocatorAdapter<U> other;
- };
-
- explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* arena_allocator)
- : DebugStackReference(arena_allocator),
- DebugStackIndirectTopRef(arena_allocator),
- arena_stack_(arena_allocator->arena_stack_) {
- }
- template <typename U>
- ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter<U>& other)
- : DebugStackReference(other),
- DebugStackIndirectTopRef(other),
- arena_stack_(other.arena_stack_) {
- }
- ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter& other) = default;
- ScopedArenaAllocatorAdapter& operator=(const ScopedArenaAllocatorAdapter& other) = default;
- ~ScopedArenaAllocatorAdapter() = default;
-
- size_type max_size() const {
- return static_cast<size_type>(-1) / sizeof(T);
- }
-
- pointer address(reference x) const { return &x; }
- const_pointer address(const_reference x) const { return &x; }
-
- pointer allocate(size_type n, ScopedArenaAllocatorAdapter<void>::pointer hint = nullptr) {
- DCHECK_LE(n, max_size());
- DebugStackIndirectTopRef::CheckTop();
- return reinterpret_cast<T*>(arena_stack_->Alloc(n * sizeof(T), kArenaAllocSTL));
- }
- void deallocate(pointer p, size_type n) {
- DebugStackIndirectTopRef::CheckTop();
- }
-
- void construct(pointer p, const_reference val) {
- DebugStackIndirectTopRef::CheckTop();
- new (static_cast<void*>(p)) value_type(val);
- }
- void destroy(pointer p) {
- DebugStackIndirectTopRef::CheckTop();
- p->~value_type();
- }
-
- private:
- ArenaStack* arena_stack_;
-
- template <typename U>
- friend class ScopedArenaAllocatorAdapter;
-
- template <typename U>
- friend bool operator==(const ScopedArenaAllocatorAdapter<U>& lhs,
- const ScopedArenaAllocatorAdapter<U>& rhs);
-};
-
-template <typename T>
-inline bool operator==(const ScopedArenaAllocatorAdapter<T>& lhs,
- const ScopedArenaAllocatorAdapter<T>& rhs) {
- return lhs.arena_stack_ == rhs.arena_stack_;
-}
-
-template <typename T>
-inline bool operator!=(const ScopedArenaAllocatorAdapter<T>& lhs,
- const ScopedArenaAllocatorAdapter<T>& rhs) {
- return !(lhs == rhs);
-}
-
-inline ScopedArenaAllocatorAdapter<void> ScopedArenaAllocator::Adapter() {
- return ScopedArenaAllocatorAdapter<void>(this);
-}
-
} // namespace art
#endif // ART_COMPILER_UTILS_SCOPED_ARENA_ALLOCATOR_H_
diff --git a/compiler/utils/scoped_arena_containers.h b/compiler/utils/scoped_arena_containers.h
index 6728565..0de7403 100644
--- a/compiler/utils/scoped_arena_containers.h
+++ b/compiler/utils/scoped_arena_containers.h
@@ -22,11 +22,23 @@
#include <set>
#include <vector>
+#include "utils/arena_containers.h" // For ArenaAllocatorAdapterKind.
#include "utils/scoped_arena_allocator.h"
#include "safe_map.h"
namespace art {
+// Adapter for use of ScopedArenaAllocator in STL containers.
+// Use ScopedArenaAllocator::Adapter() to create an adapter to pass to container constructors.
+// For example,
+// void foo(ScopedArenaAllocator* allocator) {
+// ScopedArenaVector<int> foo_vector(allocator->Adapter(kArenaAllocMisc));
+// ScopedArenaSafeMap<int, int> foo_map(std::less<int>(), allocator->Adapter());
+// // Use foo_vector and foo_map...
+// }
+template <typename T>
+class ScopedArenaAllocatorAdapter;
+
template <typename T>
using ScopedArenaDeque = std::deque<T, ScopedArenaAllocatorAdapter<T>>;
@@ -43,6 +55,136 @@
using ScopedArenaSafeMap =
SafeMap<K, V, Comparator, ScopedArenaAllocatorAdapter<std::pair<const K, V>>>;
+// Implementation details below.
+
+template <>
+class ScopedArenaAllocatorAdapter<void>
+ : private DebugStackReference, private DebugStackIndirectTopRef,
+ private ArenaAllocatorAdapterKind {
+ public:
+ typedef void value_type;
+ typedef void* pointer;
+ typedef const void* const_pointer;
+
+ template <typename U>
+ struct rebind {
+ typedef ScopedArenaAllocatorAdapter<U> other;
+ };
+
+ explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* arena_allocator,
+ ArenaAllocKind kind = kArenaAllocSTL)
+ : DebugStackReference(arena_allocator),
+ DebugStackIndirectTopRef(arena_allocator),
+ ArenaAllocatorAdapterKind(kind),
+ arena_stack_(arena_allocator->arena_stack_) {
+ }
+ template <typename U>
+ ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter<U>& other)
+ : DebugStackReference(other),
+ DebugStackIndirectTopRef(other),
+ ArenaAllocatorAdapterKind(other),
+ arena_stack_(other.arena_stack_) {
+ }
+ ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter& other) = default;
+ ScopedArenaAllocatorAdapter& operator=(const ScopedArenaAllocatorAdapter& other) = default;
+ ~ScopedArenaAllocatorAdapter() = default;
+
+ private:
+ ArenaStack* arena_stack_;
+
+ template <typename U>
+ friend class ScopedArenaAllocatorAdapter;
+};
+
+template <typename T>
+class ScopedArenaAllocatorAdapter
+ : private DebugStackReference, private DebugStackIndirectTopRef,
+ private ArenaAllocatorAdapterKind {
+ public:
+ typedef T value_type;
+ typedef T* pointer;
+ typedef T& reference;
+ typedef const T* const_pointer;
+ typedef const T& const_reference;
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+
+ template <typename U>
+ struct rebind {
+ typedef ScopedArenaAllocatorAdapter<U> other;
+ };
+
+ explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* arena_allocator,
+ ArenaAllocKind kind = kArenaAllocSTL)
+ : DebugStackReference(arena_allocator),
+ DebugStackIndirectTopRef(arena_allocator),
+ ArenaAllocatorAdapterKind(kind),
+ arena_stack_(arena_allocator->arena_stack_) {
+ }
+ template <typename U>
+ ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter<U>& other)
+ : DebugStackReference(other),
+ DebugStackIndirectTopRef(other),
+ ArenaAllocatorAdapterKind(other),
+ arena_stack_(other.arena_stack_) {
+ }
+ ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter& other) = default;
+ ScopedArenaAllocatorAdapter& operator=(const ScopedArenaAllocatorAdapter& other) = default;
+ ~ScopedArenaAllocatorAdapter() = default;
+
+ size_type max_size() const {
+ return static_cast<size_type>(-1) / sizeof(T);
+ }
+
+ pointer address(reference x) const { return &x; }
+ const_pointer address(const_reference x) const { return &x; }
+
+ pointer allocate(size_type n, ScopedArenaAllocatorAdapter<void>::pointer hint = nullptr) {
+ DCHECK_LE(n, max_size());
+ DebugStackIndirectTopRef::CheckTop();
+ return reinterpret_cast<T*>(arena_stack_->Alloc(n * sizeof(T),
+ ArenaAllocatorAdapterKind::Kind()));
+ }
+ void deallocate(pointer p, size_type n) {
+ DebugStackIndirectTopRef::CheckTop();
+ }
+
+ void construct(pointer p, const_reference val) {
+ // Don't CheckTop(), allow reusing existing capacity of a vector/deque below the top.
+ new (static_cast<void*>(p)) value_type(val);
+ }
+ void destroy(pointer p) {
+ // Don't CheckTop(), allow reusing existing capacity of a vector/deque below the top.
+ p->~value_type();
+ }
+
+ private:
+ ArenaStack* arena_stack_;
+
+ template <typename U>
+ friend class ScopedArenaAllocatorAdapter;
+
+ template <typename U>
+ friend bool operator==(const ScopedArenaAllocatorAdapter<U>& lhs,
+ const ScopedArenaAllocatorAdapter<U>& rhs);
+};
+
+template <typename T>
+inline bool operator==(const ScopedArenaAllocatorAdapter<T>& lhs,
+ const ScopedArenaAllocatorAdapter<T>& rhs) {
+ return lhs.arena_stack_ == rhs.arena_stack_;
+}
+
+template <typename T>
+inline bool operator!=(const ScopedArenaAllocatorAdapter<T>& lhs,
+ const ScopedArenaAllocatorAdapter<T>& rhs) {
+ return !(lhs == rhs);
+}
+
+inline ScopedArenaAllocatorAdapter<void> ScopedArenaAllocator::Adapter(ArenaAllocKind kind) {
+ return ScopedArenaAllocatorAdapter<void>(this, kind);
+}
+
} // namespace art
#endif // ART_COMPILER_UTILS_SCOPED_ARENA_CONTAINERS_H_
diff --git a/compiler/utils/stack_checks.h b/compiler/utils/stack_checks.h
new file mode 100644
index 0000000..63adbc2
--- /dev/null
+++ b/compiler/utils/stack_checks.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_STACK_CHECKS_H_
+#define ART_COMPILER_UTILS_STACK_CHECKS_H_
+
+#include "instruction_set.h"
+
+namespace art {
+
+// Size of a frame that we definitely consider large. Anything larger than this should
+// definitely get a stack overflow check.
+static constexpr size_t kLargeFrameSize = 2 * KB;
+
+// Size of a frame that should be small. Anything leaf method smaller than this should run
+// without a stack overflow check.
+// The constant is from experience with frameworks code.
+static constexpr size_t kSmallFrameSize = 1 * KB;
+
+// Determine whether a frame is small or large, used in the decision on whether to elide a
+// stack overflow check on method entry.
+//
+// A frame is considered large when it's either above kLargeFrameSize, or a quarter of the
+// overflow-usable stack space.
+static inline bool IsLargeFrame(size_t size, InstructionSet isa) {
+ return size >= kLargeFrameSize || size >= GetStackOverflowReservedBytes(isa) / 4;
+}
+
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_STACK_CHECKS_H_
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 56c6536..48edb15 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -20,6 +20,7 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "memory_region.h"
#include "thread.h"
+#include "utils/dwarf_cfi.h"
namespace art {
namespace x86 {
@@ -806,6 +807,13 @@
}
+void X86Assembler::testl(Register reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x85);
+ EmitOperand(reg, address);
+}
+
+
void X86Assembler::testl(Register reg, const Immediate& immediate) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
// For registers that have a byte variant (EAX, EBX, ECX, and EDX)
@@ -1400,20 +1408,61 @@
EmitOperand(reg_or_opcode, Operand(operand));
}
+void X86Assembler::InitializeFrameDescriptionEntry() {
+ WriteFDEHeader(&cfi_info_);
+}
+
+void X86Assembler::FinalizeFrameDescriptionEntry() {
+ WriteFDEAddressRange(&cfi_info_, buffer_.Size());
+ PadCFI(&cfi_info_);
+ WriteCFILength(&cfi_info_);
+}
+
constexpr size_t kFramePointerSize = 4;
void X86Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
const std::vector<ManagedRegister>& spill_regs,
const ManagedRegisterEntrySpills& entry_spills) {
+ cfi_cfa_offset_ = kFramePointerSize; // Only return address on stack
+ cfi_pc_ = buffer_.Size(); // Nothing emitted yet
+ DCHECK_EQ(cfi_pc_, 0U);
+
+ uint32_t reg_offset = 1;
CHECK_ALIGNED(frame_size, kStackAlignment);
for (int i = spill_regs.size() - 1; i >= 0; --i) {
pushl(spill_regs.at(i).AsX86().AsCpuRegister());
+
+ // DW_CFA_advance_loc
+ DW_CFA_advance_loc(&cfi_info_, buffer_.Size() - cfi_pc_);
+ cfi_pc_ = buffer_.Size();
+ // DW_CFA_def_cfa_offset
+ cfi_cfa_offset_ += kFramePointerSize;
+ DW_CFA_def_cfa_offset(&cfi_info_, cfi_cfa_offset_);
+ // DW_CFA_offset reg offset
+ reg_offset++;
+ DW_CFA_offset(&cfi_info_, spill_regs.at(i).AsX86().DWARFRegId(), reg_offset);
}
+
// return address then method on stack
- addl(ESP, Immediate(-frame_size + (spill_regs.size() * kFramePointerSize) +
- sizeof(StackReference<mirror::ArtMethod>) /*method*/ +
- kFramePointerSize /*return address*/));
+ int32_t adjust = frame_size - (spill_regs.size() * kFramePointerSize) -
+ sizeof(StackReference<mirror::ArtMethod>) /*method*/ -
+ kFramePointerSize /*return address*/;
+ addl(ESP, Immediate(-adjust));
+ // DW_CFA_advance_loc
+ DW_CFA_advance_loc(&cfi_info_, buffer_.Size() - cfi_pc_);
+ cfi_pc_ = buffer_.Size();
+ // DW_CFA_def_cfa_offset
+ cfi_cfa_offset_ += adjust;
+ DW_CFA_def_cfa_offset(&cfi_info_, cfi_cfa_offset_);
+
pushl(method_reg.AsX86().AsCpuRegister());
+ // DW_CFA_advance_loc
+ DW_CFA_advance_loc(&cfi_info_, buffer_.Size() - cfi_pc_);
+ cfi_pc_ = buffer_.Size();
+ // DW_CFA_def_cfa_offset
+ cfi_cfa_offset_ += kFramePointerSize;
+ DW_CFA_def_cfa_offset(&cfi_info_, cfi_cfa_offset_);
+
for (size_t i = 0; i < entry_spills.size(); ++i) {
movl(Address(ESP, frame_size + sizeof(StackReference<mirror::ArtMethod>) +
(i * kFramePointerSize)),
@@ -1435,6 +1484,12 @@
void X86Assembler::IncreaseFrameSize(size_t adjust) {
CHECK_ALIGNED(adjust, kStackAlignment);
addl(ESP, Immediate(-adjust));
+ // DW_CFA_advance_loc
+ DW_CFA_advance_loc(&cfi_info_, buffer_.Size() - cfi_pc_);
+ cfi_pc_ = buffer_.Size();
+ // DW_CFA_def_cfa_offset
+ cfi_cfa_offset_ += adjust;
+ DW_CFA_def_cfa_offset(&cfi_info_, cfi_cfa_offset_);
}
void X86Assembler::DecreaseFrameSize(size_t adjust) {
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 2fc6049..5c4e34f 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -346,6 +346,7 @@
void testl(Register reg1, Register reg2);
void testl(Register reg, const Immediate& imm);
+ void testl(Register reg1, const Address& address);
void andl(Register dst, const Immediate& imm);
void andl(Register dst, Register src);
@@ -570,6 +571,12 @@
// and branch to a ExceptionSlowPath if it is.
void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+ void InitializeFrameDescriptionEntry() OVERRIDE;
+ void FinalizeFrameDescriptionEntry() OVERRIDE;
+ std::vector<uint8_t>* GetFrameDescriptionEntry() OVERRIDE {
+ return &cfi_info_;
+ }
+
private:
inline void EmitUint8(uint8_t value);
inline void EmitInt32(int32_t value);
@@ -588,6 +595,9 @@
void EmitGenericShift(int rm, Register reg, const Immediate& imm);
void EmitGenericShift(int rm, Register operand, Register shifter);
+ std::vector<uint8_t> cfi_info_;
+ uint32_t cfi_cfa_offset_, cfi_pc_;
+
DISALLOW_COPY_AND_ASSIGN(X86Assembler);
};
diff --git a/compiler/utils/x86/managed_register_x86.h b/compiler/utils/x86/managed_register_x86.h
index 09d2b49..5d46ee2 100644
--- a/compiler/utils/x86/managed_register_x86.h
+++ b/compiler/utils/x86/managed_register_x86.h
@@ -88,6 +88,14 @@
// There is a one-to-one mapping between ManagedRegister and register id.
class X86ManagedRegister : public ManagedRegister {
public:
+ int DWARFRegId() const {
+ CHECK(IsCpuRegister());
+ // For all the X86 registers we care about:
+ // EAX, ECX, EDX, EBX, ESP, EBP, ESI, EDI,
+ // DWARF register id is the same as id_.
+ return static_cast<int>(id_);
+ }
+
ByteRegister AsByteRegister() const {
CHECK(IsCpuRegister());
CHECK_LT(AsCpuRegister(), ESP); // ESP, EBP, ESI and EDI cannot be encoded as byte registers.
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 78738d8..62b72c2 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -20,6 +20,7 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "memory_region.h"
#include "thread.h"
+#include "utils/dwarf_cfi.h"
namespace art {
namespace x86_64 {
@@ -283,8 +284,8 @@
void X86_64Assembler::movw(const Address& dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitOptionalRex32(src, dst);
EmitOperandSizeOverride();
+ EmitOptionalRex32(src, dst);
EmitUint8(0x89);
EmitOperand(src.LowBits(), dst);
}
@@ -869,6 +870,22 @@
}
+void X86_64Assembler::cmpq(CpuRegister reg, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ CHECK(imm.is_int32()); // cmpq only supports 32b immediate.
+ EmitRex64(reg);
+ EmitComplex(7, Operand(reg), imm);
+}
+
+
+void X86_64Assembler::cmpq(CpuRegister reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(reg);
+ EmitUint8(0x3B);
+ EmitOperand(reg.LowBits(), address);
+}
+
+
void X86_64Assembler::addl(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst, src);
@@ -934,6 +951,14 @@
}
+void X86_64Assembler::testq(CpuRegister reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(reg);
+ EmitUint8(0x85);
+ EmitOperand(reg.LowBits(), address);
+}
+
+
void X86_64Assembler::andl(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst, src);
@@ -1063,6 +1088,14 @@
}
+void X86_64Assembler::addq(CpuRegister dst, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(dst);
+ EmitUint8(0x03);
+ EmitOperand(dst.LowBits(), address);
+}
+
+
void X86_64Assembler::addq(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
// 0x01 is addq r/m64 <- r/m64 + r64, with op1 in r/m and op2 in reg: so reverse EmitRex64
@@ -1118,6 +1151,14 @@
}
+void X86_64Assembler::subq(CpuRegister reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(reg);
+ EmitUint8(0x2B);
+ EmitOperand(reg.LowBits() & 7, address);
+}
+
+
void X86_64Assembler::subl(CpuRegister reg, const Address& address) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(reg, address);
@@ -1201,7 +1242,7 @@
void X86_64Assembler::shll(CpuRegister reg, const Immediate& imm) {
- EmitGenericShift(4, reg, imm);
+ EmitGenericShift(false, 4, reg, imm);
}
@@ -1211,7 +1252,12 @@
void X86_64Assembler::shrl(CpuRegister reg, const Immediate& imm) {
- EmitGenericShift(5, reg, imm);
+ EmitGenericShift(false, 5, reg, imm);
+}
+
+
+void X86_64Assembler::shrq(CpuRegister reg, const Immediate& imm) {
+ EmitGenericShift(true, 5, reg, imm);
}
@@ -1221,7 +1267,7 @@
void X86_64Assembler::sarl(CpuRegister reg, const Immediate& imm) {
- EmitGenericShift(7, reg, imm);
+ EmitGenericShift(false, 7, reg, imm);
}
@@ -1537,11 +1583,15 @@
}
-void X86_64Assembler::EmitGenericShift(int reg_or_opcode,
- CpuRegister reg,
- const Immediate& imm) {
+void X86_64Assembler::EmitGenericShift(bool wide,
+ int reg_or_opcode,
+ CpuRegister reg,
+ const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
CHECK(imm.is_int8());
+ if (wide) {
+ EmitRex64(reg);
+ }
if (imm.value() == 1) {
EmitUint8(0xD1);
EmitOperand(reg_or_opcode, Operand(reg));
@@ -1554,8 +1604,8 @@
void X86_64Assembler::EmitGenericShift(int reg_or_opcode,
- CpuRegister operand,
- CpuRegister shifter) {
+ CpuRegister operand,
+ CpuRegister shifter) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
CHECK_EQ(shifter.AsRegister(), RCX);
EmitUint8(0xD3);
@@ -1665,11 +1715,26 @@
}
}
+void X86_64Assembler::InitializeFrameDescriptionEntry() {
+ WriteFDEHeader(&cfi_info_);
+}
+
+void X86_64Assembler::FinalizeFrameDescriptionEntry() {
+ WriteFDEAddressRange(&cfi_info_, buffer_.Size());
+ PadCFI(&cfi_info_);
+ WriteCFILength(&cfi_info_);
+}
+
constexpr size_t kFramePointerSize = 8;
void X86_64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
const std::vector<ManagedRegister>& spill_regs,
const ManagedRegisterEntrySpills& entry_spills) {
+ cfi_cfa_offset_ = kFramePointerSize; // Only return address on stack
+ cfi_pc_ = buffer_.Size(); // Nothing emitted yet
+ DCHECK_EQ(cfi_pc_, 0U);
+
+ uint32_t reg_offset = 1;
CHECK_ALIGNED(frame_size, kStackAlignment);
int gpr_count = 0;
for (int i = spill_regs.size() - 1; i >= 0; --i) {
@@ -1677,6 +1742,16 @@
if (spill.IsCpuRegister()) {
pushq(spill.AsCpuRegister());
gpr_count++;
+
+ // DW_CFA_advance_loc
+ DW_CFA_advance_loc(&cfi_info_, buffer_.Size() - cfi_pc_);
+ cfi_pc_ = buffer_.Size();
+ // DW_CFA_def_cfa_offset
+ cfi_cfa_offset_ += kFramePointerSize;
+ DW_CFA_def_cfa_offset(&cfi_info_, cfi_cfa_offset_);
+ // DW_CFA_offset reg offset
+ reg_offset++;
+ DW_CFA_offset(&cfi_info_, spill.DWARFRegId(), reg_offset);
}
}
// return address then method on stack
@@ -1684,6 +1759,13 @@
- (gpr_count * kFramePointerSize)
- kFramePointerSize /*return address*/;
subq(CpuRegister(RSP), Immediate(rest_of_frame));
+ // DW_CFA_advance_loc
+ DW_CFA_advance_loc(&cfi_info_, buffer_.Size() - cfi_pc_);
+ cfi_pc_ = buffer_.Size();
+ // DW_CFA_def_cfa_offset
+ cfi_cfa_offset_ += rest_of_frame;
+ DW_CFA_def_cfa_offset(&cfi_info_, cfi_cfa_offset_);
+
// spill xmms
int64_t offset = rest_of_frame;
for (int i = spill_regs.size() - 1; i >= 0; --i) {
@@ -1747,6 +1829,12 @@
void X86_64Assembler::IncreaseFrameSize(size_t adjust) {
CHECK_ALIGNED(adjust, kStackAlignment);
addq(CpuRegister(RSP), Immediate(-static_cast<int64_t>(adjust)));
+ // DW_CFA_advance_loc
+ DW_CFA_advance_loc(&cfi_info_, buffer_.Size() - cfi_pc_);
+ cfi_pc_ = buffer_.Size();
+ // DW_CFA_def_cfa_offset
+ cfi_cfa_offset_ += adjust;
+ DW_CFA_def_cfa_offset(&cfi_info_, cfi_cfa_offset_);
}
void X86_64Assembler::DecreaseFrameSize(size_t adjust) {
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 7514854..ee11575 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -124,8 +124,8 @@
if (index.NeedsRex()) {
rex_ |= 0x42; // REX.00X0
}
- encoding_[1] = (scale << 6) | (static_cast<uint8_t>(index.AsRegister()) << 3) |
- static_cast<uint8_t>(base.AsRegister());
+ encoding_[1] = (scale << 6) | (static_cast<uint8_t>(index.LowBits()) << 3) |
+ static_cast<uint8_t>(base.LowBits());
length_ = 2;
}
@@ -385,10 +385,14 @@
void cmpl(const Address& address, const Immediate& imm);
void cmpq(CpuRegister reg0, CpuRegister reg1);
+ void cmpq(CpuRegister reg0, const Immediate& imm);
+ void cmpq(CpuRegister reg0, const Address& address);
void testl(CpuRegister reg1, CpuRegister reg2);
void testl(CpuRegister reg, const Immediate& imm);
+ void testq(CpuRegister reg, const Address& address);
+
void andl(CpuRegister dst, const Immediate& imm);
void andl(CpuRegister dst, CpuRegister src);
void andq(CpuRegister dst, const Immediate& imm);
@@ -408,6 +412,7 @@
void addq(CpuRegister reg, const Immediate& imm);
void addq(CpuRegister dst, CpuRegister src);
+ void addq(CpuRegister dst, const Address& address);
void subl(CpuRegister dst, CpuRegister src);
void subl(CpuRegister reg, const Immediate& imm);
@@ -415,6 +420,7 @@
void subq(CpuRegister reg, const Immediate& imm);
void subq(CpuRegister dst, CpuRegister src);
+ void subq(CpuRegister dst, const Address& address);
void cdq();
@@ -437,6 +443,8 @@
void sarl(CpuRegister reg, const Immediate& imm);
void sarl(CpuRegister operand, CpuRegister shifter);
+ void shrq(CpuRegister reg, const Immediate& imm);
+
void negl(CpuRegister reg);
void notl(CpuRegister reg);
@@ -606,6 +614,12 @@
// and branch to a ExceptionSlowPath if it is.
void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+ void InitializeFrameDescriptionEntry() OVERRIDE;
+ void FinalizeFrameDescriptionEntry() OVERRIDE;
+ std::vector<uint8_t>* GetFrameDescriptionEntry() OVERRIDE {
+ return &cfi_info_;
+ }
+
private:
void EmitUint8(uint8_t value);
void EmitInt32(int32_t value);
@@ -622,7 +636,7 @@
void EmitLabelLink(Label* label);
void EmitNearLabelLink(Label* label);
- void EmitGenericShift(int rm, CpuRegister reg, const Immediate& imm);
+ void EmitGenericShift(bool wide, int rm, CpuRegister reg, const Immediate& imm);
void EmitGenericShift(int rm, CpuRegister operand, CpuRegister shifter);
// If any input is not false, output the necessary rex prefix.
@@ -647,6 +661,9 @@
void EmitOptionalByteRegNormalizingRex32(CpuRegister dst, CpuRegister src);
void EmitOptionalByteRegNormalizingRex32(CpuRegister dst, const Operand& operand);
+ std::vector<uint8_t> cfi_info_;
+ uint32_t cfi_cfa_offset_, cfi_pc_;
+
DISALLOW_COPY_AND_ASSIGN(X86_64Assembler);
};
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index dc1758f..4ed7b20 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -128,13 +128,29 @@
TEST_F(AssemblerX86_64Test, Movl) {
GetAssembler()->movl(x86_64::CpuRegister(x86_64::R8), x86_64::CpuRegister(x86_64::R11));
GetAssembler()->movl(x86_64::CpuRegister(x86_64::RAX), x86_64::CpuRegister(x86_64::R11));
+ GetAssembler()->movl(x86_64::CpuRegister(x86_64::RAX), x86_64::Address(
+ x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::RBX), x86_64::TIMES_4, 12));
+ GetAssembler()->movl(x86_64::CpuRegister(x86_64::RAX), x86_64::Address(
+ x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_4, 12));
+ GetAssembler()->movl(x86_64::CpuRegister(x86_64::R8), x86_64::Address(
+ x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_4, 12));
const char* expected =
"movl %R11d, %R8d\n"
- "movl %R11d, %EAX\n";
+ "movl %R11d, %EAX\n"
+ "movl 0xc(%RDI,%RBX,4), %EAX\n"
+ "movl 0xc(%RDI,%R9,4), %EAX\n"
+ "movl 0xc(%RDI,%R9,4), %R8d\n";
DriverStr(expected, "movl");
}
+TEST_F(AssemblerX86_64Test, Movw) {
+ GetAssembler()->movw(x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0),
+ x86_64::CpuRegister(x86_64::R9));
+ const char* expected = "movw %R9w, 0(%RAX)\n";
+ DriverStr(expected, "movw");
+}
+
std::string setcc_test_fn(x86_64::X86_64Assembler* assembler) {
// From Condition
diff --git a/compiler/utils/x86_64/managed_register_x86_64.h b/compiler/utils/x86_64/managed_register_x86_64.h
index 822659f..3a96ad0 100644
--- a/compiler/utils/x86_64/managed_register_x86_64.h
+++ b/compiler/utils/x86_64/managed_register_x86_64.h
@@ -87,6 +87,21 @@
// There is a one-to-one mapping between ManagedRegister and register id.
class X86_64ManagedRegister : public ManagedRegister {
public:
+ int DWARFRegId() const {
+ CHECK(IsCpuRegister());
+ switch (id_) {
+ case RAX: return 0;
+ case RDX: return 1;
+ case RCX: return 2;
+ case RBX: return 3;
+ case RSI: return 4;
+ case RDI: return 5;
+ case RBP: return 6;
+ case RSP: return 7;
+ default: return static_cast<int>(id_); // R8 ~ R15
+ }
+ }
+
CpuRegister AsCpuRegister() const {
CHECK(IsCpuRegister());
return CpuRegister(static_cast<Register>(id_));
diff --git a/dex2oat/Android.mk b/dex2oat/Android.mk
index 28db711..2ef826b 100644
--- a/dex2oat/Android.mk
+++ b/dex2oat/Android.mk
@@ -37,9 +37,9 @@
endif
# We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
-ifeq ($(ART_BUILD_NDEBUG),true)
+ifeq ($(ART_BUILD_HOST_NDEBUG),true)
$(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libart-compiler,art/compiler,host,ndebug))
endif
-ifeq ($(ART_BUILD_DEBUG),true)
+ifeq ($(ART_BUILD_HOST_DEBUG),true)
$(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libartd-compiler,art/compiler,host,debug))
endif
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index eceebaf..403cb80 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -45,11 +45,11 @@
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
#include "elf_fixup.h"
+#include "elf_patcher.h"
#include "elf_stripper.h"
#include "gc/space/image_space.h"
#include "gc/space/space-inl.h"
#include "image_writer.h"
-#include "implicit_check_options.h"
#include "leb128.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
@@ -325,11 +325,28 @@
return ReadImageClasses(image_classes_stream);
}
+ bool PatchOatCode(const CompilerDriver* compiler_driver, File* oat_file,
+ const std::string& oat_location, std::string* error_msg) {
+ // We asked to include patch information but we are not making an image. We need to fix
+ // everything up manually.
+ std::unique_ptr<ElfFile> elf_file(ElfFile::Open(oat_file, PROT_READ|PROT_WRITE,
+ MAP_SHARED, error_msg));
+ if (elf_file.get() == NULL) {
+ LOG(ERROR) << error_msg;
+ return false;
+ }
+ {
+ ReaderMutexLock mu(Thread::Current(), *Locks::mutator_lock_);
+ return ElfPatcher::Patch(compiler_driver, elf_file.get(), oat_location, error_msg);
+ }
+ }
+
const CompilerDriver* CreateOatFile(const std::string& boot_image_option,
const std::string& android_root,
bool is_host,
const std::vector<const DexFile*>& dex_files,
File* oat_file,
+ const std::string& oat_location,
const std::string& bitcode_filename,
bool image,
std::unique_ptr<CompilerDriver::DescriptorSet>& image_classes,
@@ -381,6 +398,7 @@
std::string image_file_location;
uint32_t image_file_location_oat_checksum = 0;
uintptr_t image_file_location_oat_data_begin = 0;
+ int32_t image_patch_delta = 0;
if (!driver->IsImage()) {
TimingLogger::ScopedTiming t3("Loading image checksum", &timings);
gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
@@ -388,6 +406,7 @@
image_file_location_oat_data_begin =
reinterpret_cast<uintptr_t>(image_space->GetImageHeader().GetOatDataBegin());
image_file_location = image_space->GetImageFilename();
+ image_patch_delta = image_space->GetImageHeader().GetPatchDelta();
}
if (!image_file_location.empty()) {
@@ -396,6 +415,7 @@
OatWriter oat_writer(dex_files, image_file_location_oat_checksum,
image_file_location_oat_data_begin,
+ image_patch_delta,
driver.get(),
&timings,
key_value_store);
@@ -406,6 +426,15 @@
return nullptr;
}
+ if (!driver->IsImage() && driver->GetCompilerOptions().GetIncludePatchInformation()) {
+ t2.NewTiming("Patching ELF");
+ std::string error_msg;
+ if (!PatchOatCode(driver.get(), oat_file, oat_location, &error_msg)) {
+ LOG(ERROR) << "Failed to fixup ELF file " << oat_file->GetPath() << ": " << error_msg;
+ return nullptr;
+ }
+ }
+
return driver.release();
}
@@ -742,20 +771,6 @@
*parsed_value = value;
}
-void CheckExplicitCheckOptions(InstructionSet isa, bool* explicit_null_checks,
- bool* explicit_so_checks, bool* explicit_suspend_checks) {
- switch (isa) {
- case kArm:
- case kThumb2:
- break; // All checks implemented, leave as is.
-
- default: // No checks implemented, reset all to explicit checks.
- *explicit_null_checks = true;
- *explicit_so_checks = true;
- *explicit_suspend_checks = true;
- }
-}
-
static int dex2oat(int argc, char** argv) {
#if defined(__linux__) && defined(__arm__)
int major, minor;
@@ -833,16 +848,15 @@
bool dump_timing = false;
bool dump_passes = false;
bool include_patch_information = CompilerOptions::kDefaultIncludePatchInformation;
- bool explicit_include_patch_information = false;
bool include_debug_symbols = kIsDebugBuild;
bool dump_slow_timing = kIsDebugBuild;
bool watch_dog_enabled = true;
bool generate_gdb_information = kIsDebugBuild;
- bool explicit_null_checks = true;
- bool explicit_so_checks = true;
- bool explicit_suspend_checks = true;
- bool has_explicit_checks_options = false;
+ // Checks are all explicit until we know the architecture.
+ bool implicit_null_checks = false;
+ bool implicit_so_checks = false;
+ bool implicit_suspend_checks = false;
for (int i = 0; i < argc; i++) {
const StringPiece option(argv[i]);
@@ -1019,37 +1033,10 @@
} else if (option.starts_with("--dump-cfg-passes=")) {
std::string dump_passes = option.substr(strlen("--dump-cfg-passes=")).data();
PassDriverMEOpts::SetDumpPassList(dump_passes);
- } else if (option.starts_with("--implicit-checks=")) {
- std::string checks = option.substr(strlen("--implicit-checks=")).data();
- std::vector<std::string> checkvec;
- Split(checks, ',', checkvec);
- for (auto& str : checkvec) {
- std::string val = Trim(str);
- if (val == "none") {
- explicit_null_checks = true;
- explicit_so_checks = true;
- explicit_suspend_checks = true;
- } else if (val == "null") {
- explicit_null_checks = false;
- } else if (val == "suspend") {
- explicit_suspend_checks = false;
- } else if (val == "stack") {
- explicit_so_checks = false;
- } else if (val == "all") {
- explicit_null_checks = false;
- explicit_so_checks = false;
- explicit_suspend_checks = false;
- } else {
- Usage("--implicit-checks passed non-recognized value %s", val.c_str());
- }
- }
- has_explicit_checks_options = true;
} else if (option == "--include-patch-information") {
include_patch_information = true;
- explicit_include_patch_information = true;
} else if (option == "--no-include-patch-information") {
include_patch_information = false;
- explicit_include_patch_information = true;
} else {
Usage("Unknown argument %s", option.data());
}
@@ -1176,12 +1163,20 @@
Usage("Unknown --compiler-filter value %s", compiler_filter_string);
}
- ImplicitCheckOptions::CheckISASupport(instruction_set, &explicit_null_checks, &explicit_so_checks,
- &explicit_suspend_checks);
+ // Set the compilation target's implicit checks options.
+ switch (instruction_set) {
+ case kArm:
+ case kThumb2:
+ case kArm64:
+ case kX86:
+ case kX86_64:
+ implicit_null_checks = true;
+ implicit_so_checks = true;
+ break;
- if (!explicit_include_patch_information) {
- include_patch_information =
- (compiler_kind == Compiler::kQuick && CompilerOptions::kDefaultIncludePatchInformation);
+ default:
+ // Defaults are correct.
+ break;
}
std::unique_ptr<CompilerOptions> compiler_options(new CompilerOptions(compiler_filter,
@@ -1194,9 +1189,9 @@
include_patch_information,
top_k_profile_threshold,
include_debug_symbols,
- explicit_null_checks,
- explicit_so_checks,
- explicit_suspend_checks
+ implicit_null_checks,
+ implicit_so_checks,
+ implicit_suspend_checks
#ifdef ART_SEA_IR_MODE
, compiler_options.sea_ir_ =
true;
@@ -1247,7 +1242,7 @@
}
std::unique_ptr<VerificationResults> verification_results(new VerificationResults(
- compiler_options.get()));
+ compiler_options.get()));
DexFileToMethodInlinerMap method_inliner_map;
QuickCompilerCallbacks callbacks(verification_results.get(), &method_inliner_map);
runtime_options.push_back(std::make_pair("compilercallbacks", &callbacks));
@@ -1270,18 +1265,6 @@
}
std::unique_ptr<Dex2Oat> dex2oat(p_dex2oat);
- // TODO: Not sure whether it's a good idea to allow anything else but the runtime option in
- // this case at all, as we'll have to throw away produced code for a mismatch.
- if (!has_explicit_checks_options) {
- if (ImplicitCheckOptions::CheckForCompiling(kRuntimeISA, instruction_set, &explicit_null_checks,
- &explicit_so_checks, &explicit_suspend_checks)) {
- compiler_options->SetExplicitNullChecks(explicit_null_checks);
- compiler_options->SetExplicitStackOverflowChecks(explicit_so_checks);
- compiler_options->SetExplicitSuspendChecks(explicit_suspend_checks);
- }
- }
-
-
// Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start,
// give it away now so that we don't starve GC.
Thread* self = Thread::Current();
@@ -1384,14 +1367,6 @@
std::unique_ptr<SafeMap<std::string, std::string> > key_value_store(
new SafeMap<std::string, std::string>());
- // Insert implicit check options.
- key_value_store->Put(ImplicitCheckOptions::kImplicitChecksOatHeaderKey,
- ImplicitCheckOptions::Serialize(compiler_options->GetExplicitNullChecks(),
- compiler_options->
- GetExplicitStackOverflowChecks(),
- compiler_options->
- GetExplicitSuspendChecks()));
-
// Insert some compiler things.
std::ostringstream oss;
for (int i = 0; i < argc; ++i) {
@@ -1410,6 +1385,7 @@
is_host,
dex_files,
oat_file.get(),
+ oat_location,
bitcode_filename,
image,
image_classes,
@@ -1419,7 +1395,6 @@
compiler_phases_timings,
profile_file,
key_value_store.get()));
-
if (compiler.get() == nullptr) {
LOG(ERROR) << "Failed to create oat file: " << oat_location;
return EXIT_FAILURE;
@@ -1469,9 +1444,9 @@
// memory mapped so we could predict where its contents were based
// on the file size. Now that it is an ELF file, we need to inspect
// the ELF file to understand the in memory segment layout including
- // where the oat header is located within. ImageWriter's
- // PatchOatCodeAndMethods uses the PatchInformation from the
- // Compiler to touch up absolute references in the oat file.
+ // where the oat header is located within. ElfPatcher's Patch method
+ // uses the PatchInformation from the Compiler to touch up absolute
+ // references in the oat file.
//
// 3. We fixup the ELF program headers so that dlopen will try to
// load the .so at the desired location at runtime by offsetting the
diff --git a/disassembler/disassembler.h b/disassembler/disassembler.h
index 7547ab7..183e692 100644
--- a/disassembler/disassembler.h
+++ b/disassembler/disassembler.h
@@ -43,6 +43,10 @@
DISALLOW_COPY_AND_ASSIGN(Disassembler);
};
+static inline bool HasBitSet(uint32_t value, uint32_t bit) {
+ return (value & (1 << bit)) != 0;
+}
+
} // namespace art
#endif // ART_DISASSEMBLER_DISASSEMBLER_H_
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index 1f565e5..56023c1 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -1262,7 +1262,7 @@
case 3:
switch (op2) {
case 0x00: case 0x02: case 0x04: case 0x06: // 000xxx0
- case 0x08: case 0x0A: case 0x0C: case 0x0E: {
+ case 0x08: case 0x09: case 0x0A: case 0x0C: case 0x0E: {
// Store single data item
// |111|11|100|000|0|0000|1111|110000|000000|
// |5 3|21|098|765|4|3 0|5 2|10 6|5 0|
@@ -1275,12 +1275,40 @@
// uint32_t op4 = (instr >> 6) & 0x3F;
switch (op3) {
case 0x0: case 0x4: {
- // STRB Rt,[Rn,#+/-imm8] - 111 11 00 0 0 00 0 nnnn tttt 1 PUWii ii iiii
- // STRB Rt,[Rn,Rm,lsl #imm2] - 111 11 00 0 0 00 0 nnnn tttt 0 00000 ii mmmm
+ // {ST,LD}RB Rt,[Rn,#+/-imm12] - 111 11 00 0 1 00 0 nnnn tttt 1 PUWii ii iiii
+ // {ST,LD}RB Rt,[Rn,#+/-imm8] - 111 11 00 0 0 00 0 nnnn tttt 1 PUWii ii iiii
+ // {ST,LD}RB Rt,[Rn,Rm,lsl #imm2] - 111 11 00 0 0 00 0 nnnn tttt 0 00000 ii mmmm
ArmRegister Rn(instr, 16);
ArmRegister Rt(instr, 12);
- opcode << "strb";
- if ((instr & 0x800) != 0) {
+ opcode << (HasBitSet(instr, 20) ? "ldrb" : "strb");
+ if (HasBitSet(instr, 23)) {
+ uint32_t imm12 = instr & 0xFFF;
+ args << Rt << ", [" << Rn << ",#" << imm12 << "]";
+ } else if ((instr & 0x800) != 0) {
+ uint32_t imm8 = instr & 0xFF;
+ args << Rt << ", [" << Rn << ",#" << imm8 << "]";
+ } else {
+ uint32_t imm2 = (instr >> 4) & 3;
+ ArmRegister Rm(instr, 0);
+ args << Rt << ", [" << Rn << ", " << Rm;
+ if (imm2 != 0) {
+ args << ", " << "lsl #" << imm2;
+ }
+ args << "]";
+ }
+ break;
+ }
+ case 0x1: case 0x5: {
+ // STRH Rt,[Rn,#+/-imm12] - 111 11 00 0 1 01 0 nnnn tttt 1 PUWii ii iiii
+ // STRH Rt,[Rn,#+/-imm8] - 111 11 00 0 0 01 0 nnnn tttt 1 PUWii ii iiii
+ // STRH Rt,[Rn,Rm,lsl #imm2] - 111 11 00 0 0 01 0 nnnn tttt 0 00000 ii mmmm
+ ArmRegister Rn(instr, 16);
+ ArmRegister Rt(instr, 12);
+ opcode << "strh";
+ if (HasBitSet(instr, 23)) {
+ uint32_t imm12 = instr & 0xFFF;
+ args << Rt << ", [" << Rn << ",#" << imm12 << "]";
+ } else if ((instr & 0x800) != 0) {
uint32_t imm8 = instr & 0xFF;
args << Rt << ", [" << Rn << ",#" << imm8 << "]";
} else {
@@ -1351,8 +1379,8 @@
break;
}
- case 0x03: case 0x0B: case 0x13: case 0x1B: { // 00xx011
- // Load halfword
+ case 0x03: case 0x0B: case 0x11: case 0x13: case 0x19: case 0x1B: { // 00xx011
+ // Load byte/halfword
// |111|11|10|0 0|00|0|0000|1111|110000|000000|
// |5 3|21|09|8 7|65|4|3 0|5 2|10 6|5 0|
// |---|--|--|---|--|-|----|----|------|------|
@@ -1380,8 +1408,9 @@
}
} else if (op3 == 3) {
// LDRSH.W Rt, [Rn, #imm12] - 111 11 00 11 011 nnnn tttt iiiiiiiiiiii
+ // LDRSB.W Rt, [Rn, #imm12] - 111 11 00 11 001 nnnn tttt iiiiiiiiiiii
uint32_t imm12 = instr & 0xFFF;
- opcode << "ldrsh.w";
+ opcode << (HasBitSet(instr, 20) ? "ldrsb.w" : "ldrsh.w");
args << Rt << ", [" << Rn << ", #" << imm12 << "]";
if (Rn.r == 9) {
args << " ; ";
@@ -1575,6 +1604,9 @@
opcode << it_conditions_.back();
it_conditions_.pop_back();
}
+ if (opcode.str().size() == 0) {
+ opcode << "UNKNOWN " << op2;
+ }
os << StringPrintf("%p: %08x\t%-7s ", instr_ptr, instr, opcode.str().c_str()) << args.str() << '\n';
return 4;
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index 101a55d..0ca8962 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -268,7 +268,7 @@
target_specific = true;
break;
case 0x63:
- if (rex == 0x48) {
+ if ((rex & REX_W) != 0) {
opcode << "movsxd";
has_modrm = true;
load = true;
@@ -959,7 +959,7 @@
byte_operand = true;
break;
case 0xB8: case 0xB9: case 0xBA: case 0xBB: case 0xBC: case 0xBD: case 0xBE: case 0xBF:
- if (rex == 0x48) {
+ if ((rex & REX_W) != 0) {
opcode << "movabsq";
immediate_bytes = 8;
reg_in_opcode = true;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index b8f20f3..068a450 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -165,6 +165,8 @@
GetQuickToInterpreterBridgeOffset);
#undef DUMP_OAT_HEADER_OFFSET
+ os << "IMAGE PATCH DELTA:\n" << oat_header.GetImagePatchDelta();
+
os << "IMAGE FILE LOCATION OAT CHECKSUM:\n";
os << StringPrintf("0x%08x\n\n", oat_header.GetImageFileLocationOatChecksum());
@@ -771,6 +773,8 @@
os << "OAT FILE END:" << reinterpret_cast<void*>(image_header_.GetOatFileEnd()) << "\n\n";
+ os << "PATCH DELTA:" << image_header_.GetPatchDelta() << "\n\n";
+
{
os << "ROOTS: " << reinterpret_cast<void*>(image_header_.GetImageRoots()) << "\n";
Indenter indent1_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
@@ -819,10 +823,13 @@
os << "OAT LOCATION: " << oat_location;
os << "\n";
std::string error_msg;
- const OatFile* oat_file = class_linker->FindOatFileFromOatLocation(oat_location, &error_msg);
- if (oat_file == NULL) {
- os << "NOT FOUND: " << error_msg << "\n";
- return;
+ const OatFile* oat_file = class_linker->FindOpenedOatFileFromOatLocation(oat_location);
+ if (oat_file == nullptr) {
+ oat_file = OatFile::Open(oat_location, oat_location, NULL, false, &error_msg);
+ if (oat_file == nullptr) {
+ os << "NOT FOUND: " << error_msg << "\n";
+ return;
+ }
}
os << "\n";
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 85b4e6d..eed20da 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -17,11 +17,14 @@
#include <stdio.h>
#include <stdlib.h>
+#include <sys/file.h>
#include <sys/stat.h>
+#include <unistd.h>
#include <string>
#include <vector>
+#include "base/scoped_flock.h"
#include "base/stringpiece.h"
#include "base/stringprintf.h"
#include "elf_utils.h"
@@ -63,6 +66,47 @@
}
}
+static bool LocationToFilename(const std::string& location, InstructionSet isa,
+ std::string* filename) {
+ bool has_system = false;
+ bool has_cache = false;
+ // image_location = /system/framework/boot.art
+ // system_image_location = /system/framework/<image_isa>/boot.art
+ std::string system_filename(GetSystemImageFilename(location.c_str(), isa));
+ if (OS::FileExists(system_filename.c_str())) {
+ has_system = true;
+ }
+
+ bool have_android_data = false;
+ bool dalvik_cache_exists = false;
+ std::string dalvik_cache;
+ GetDalvikCache(GetInstructionSetString(isa), false, &dalvik_cache,
+ &have_android_data, &dalvik_cache_exists);
+
+ std::string cache_filename;
+ if (have_android_data && dalvik_cache_exists) {
+ // Always set output location even if it does not exist,
+ // so that the caller knows where to create the image.
+ //
+ // image_location = /system/framework/boot.art
+ // *image_filename = /data/dalvik-cache/<image_isa>/boot.art
+ std::string error_msg;
+ if (GetDalvikCacheFilename(location.c_str(), dalvik_cache.c_str(),
+ &cache_filename, &error_msg)) {
+ has_cache = true;
+ }
+ }
+ if (has_system) {
+ *filename = system_filename;
+ return true;
+ } else if (has_cache) {
+ *filename = cache_filename;
+ return true;
+ } else {
+ return false;
+ }
+}
+
bool PatchOat::Patch(const std::string& image_location, off_t delta,
File* output_image, InstructionSet isa,
TimingLogger* timings) {
@@ -74,10 +118,15 @@
TimingLogger::ScopedTiming t("Runtime Setup", timings);
const char *isa_name = GetInstructionSetString(isa);
- std::string image_filename(GetSystemImageFilename(image_location.c_str(), isa));
+ std::string image_filename;
+ if (!LocationToFilename(image_location, isa, &image_filename)) {
+ LOG(ERROR) << "Unable to find image at location " << image_location;
+ return false;
+ }
std::unique_ptr<File> input_image(OS::OpenFileForReading(image_filename.c_str()));
if (input_image.get() == nullptr) {
- LOG(ERROR) << "unable to open input image file.";
+ LOG(ERROR) << "unable to open input image file at " << image_filename
+ << " for location " << image_location;
return false;
}
int64_t image_len = input_image->GetLength();
@@ -125,7 +174,7 @@
delta, timings);
t.NewTiming("Patching files");
if (!p.PatchImage()) {
- LOG(INFO) << "Failed to patch image file " << input_image->GetPath();
+ LOG(ERROR) << "Failed to patch image file " << input_image->GetPath();
return false;
}
@@ -159,10 +208,15 @@
isa = ElfISAToInstructionSet(elf_hdr.e_machine);
}
const char* isa_name = GetInstructionSetString(isa);
- std::string image_filename(GetSystemImageFilename(image_location.c_str(), isa));
+ std::string image_filename;
+ if (!LocationToFilename(image_location, isa, &image_filename)) {
+ LOG(ERROR) << "Unable to find image at location " << image_location;
+ return false;
+ }
std::unique_ptr<File> input_image(OS::OpenFileForReading(image_filename.c_str()));
if (input_image.get() == nullptr) {
- LOG(ERROR) << "unable to open input image file.";
+ LOG(ERROR) << "unable to open input image file at " << image_filename
+ << " for location " << image_location;
return false;
}
int64_t image_len = input_image->GetLength();
@@ -216,11 +270,11 @@
delta, timings);
t.NewTiming("Patching files");
if (!p.PatchElf()) {
- LOG(INFO) << "Failed to patch oat file " << input_oat->GetPath();
+ LOG(ERROR) << "Failed to patch oat file " << input_oat->GetPath();
return false;
}
if (!p.PatchImage()) {
- LOG(INFO) << "Failed to patch image file " << input_image->GetPath();
+ LOG(ERROR) << "Failed to patch image file " << input_image->GetPath();
return false;
}
@@ -236,6 +290,7 @@
bool PatchOat::WriteElf(File* out) {
TimingLogger::ScopedTiming t("Writing Elf File", timings_);
+
CHECK(oat_file_.get() != nullptr);
CHECK(out != nullptr);
size_t expect = oat_file_->Size();
@@ -250,6 +305,11 @@
bool PatchOat::WriteImage(File* out) {
TimingLogger::ScopedTiming t("Writing image File", timings_);
+ std::string error_msg;
+
+ ScopedFlock img_flock;
+ img_flock.Init(out, &error_msg);
+
CHECK(image_ != nullptr);
CHECK(out != nullptr);
size_t expect = image_->Size();
@@ -406,16 +466,13 @@
return true;
}
-bool PatchOat::CheckOatFile() {
- Elf32_Shdr* patches_sec = oat_file_->FindSectionByName(".oat_patches");
- if (patches_sec == nullptr) {
+template <typename ptr_t>
+bool PatchOat::CheckOatFile(const Elf32_Shdr& patches_sec) {
+ if (patches_sec.sh_type != SHT_OAT_PATCH) {
return false;
}
- if (patches_sec->sh_type != SHT_OAT_PATCH) {
- return false;
- }
- uintptr_t* patches = reinterpret_cast<uintptr_t*>(oat_file_->Begin() + patches_sec->sh_offset);
- uintptr_t* patches_end = patches + (patches_sec->sh_size/sizeof(uintptr_t));
+ ptr_t* patches = reinterpret_cast<ptr_t*>(oat_file_->Begin() + patches_sec.sh_offset);
+ ptr_t* patches_end = patches + (patches_sec.sh_size / sizeof(ptr_t));
Elf32_Shdr* oat_data_sec = oat_file_->FindSectionByName(".rodata");
Elf32_Shdr* oat_text_sec = oat_file_->FindSectionByName(".text");
if (oat_data_sec == nullptr) {
@@ -437,19 +494,50 @@
return true;
}
+bool PatchOat::PatchOatHeader() {
+ Elf32_Shdr *rodata_sec = oat_file_->FindSectionByName(".rodata");
+ if (rodata_sec == nullptr) {
+ return false;
+ }
+ OatHeader* oat_header = reinterpret_cast<OatHeader*>(oat_file_->Begin() + rodata_sec->sh_offset);
+ if (!oat_header->IsValid()) {
+ LOG(ERROR) << "Elf file " << oat_file_->GetFile().GetPath() << " has an invalid oat header";
+ return false;
+ }
+ oat_header->RelocateOat(delta_);
+ return true;
+}
+
bool PatchOat::PatchElf() {
- TimingLogger::ScopedTiming t("Fixup Elf Headers", timings_);
+ TimingLogger::ScopedTiming t("Fixup Elf Text Section", timings_);
+ if (!PatchTextSection()) {
+ return false;
+ }
+
+ if (!PatchOatHeader()) {
+ return false;
+ }
+
+ bool need_fixup = false;
+ t.NewTiming("Fixup Elf Headers");
// Fixup Phdr's
for (unsigned int i = 0; i < oat_file_->GetProgramHeaderNum(); i++) {
Elf32_Phdr& hdr = oat_file_->GetProgramHeader(i);
- if (hdr.p_vaddr != 0) {
+ if (hdr.p_vaddr != 0 && hdr.p_vaddr != hdr.p_offset) {
+ need_fixup = true;
hdr.p_vaddr += delta_;
}
- if (hdr.p_paddr != 0) {
+ if (hdr.p_paddr != 0 && hdr.p_paddr != hdr.p_offset) {
+ need_fixup = true;
hdr.p_paddr += delta_;
}
}
- // Fixup Shdr's
+ if (!need_fixup) {
+ // This was never passed through ElfFixup so all headers/symbols just have their offset as
+ // their addr. Therefore we do not need to update these parts.
+ return true;
+ }
+ t.NewTiming("Fixup Section Headers");
for (unsigned int i = 0; i < oat_file_->GetSectionHeaderNum(); i++) {
Elf32_Shdr& hdr = oat_file_->GetSectionHeader(i);
if (hdr.sh_addr != 0) {
@@ -457,7 +545,7 @@
}
}
- // Fixup Dynamics.
+ t.NewTiming("Fixup Dynamics");
for (Elf32_Word i = 0; i < oat_file_->GetDynamicNum(); i++) {
Elf32_Dyn& dyn = oat_file_->GetDynamic(i);
if (IsDynamicSectionPointer(dyn.d_tag, oat_file_->GetHeader().e_machine)) {
@@ -481,12 +569,6 @@
}
}
- t.NewTiming("Fixup Elf Text Section");
- // Fixup text
- if (!PatchTextSection()) {
- return false;
- }
-
return true;
}
@@ -511,13 +593,31 @@
bool PatchOat::PatchTextSection() {
Elf32_Shdr* patches_sec = oat_file_->FindSectionByName(".oat_patches");
if (patches_sec == nullptr) {
- LOG(INFO) << ".oat_patches section not found. Aborting patch";
+ LOG(ERROR) << ".oat_patches section not found. Aborting patch";
return false;
}
- DCHECK(CheckOatFile()) << "Oat file invalid";
- CHECK_EQ(patches_sec->sh_type, SHT_OAT_PATCH) << "Unexpected type of .oat_patches";
- uintptr_t* patches = reinterpret_cast<uintptr_t*>(oat_file_->Begin() + patches_sec->sh_offset);
- uintptr_t* patches_end = patches + (patches_sec->sh_size/sizeof(uintptr_t));
+ if (patches_sec->sh_type != SHT_OAT_PATCH) {
+ LOG(ERROR) << "Unexpected type of .oat_patches";
+ return false;
+ }
+
+ switch (patches_sec->sh_entsize) {
+ case sizeof(uint32_t):
+ return PatchTextSection<uint32_t>(*patches_sec);
+ case sizeof(uint64_t):
+ return PatchTextSection<uint64_t>(*patches_sec);
+ default:
+ LOG(ERROR) << ".oat_patches Entsize of " << patches_sec->sh_entsize << "bits "
+ << "is not valid";
+ return false;
+ }
+}
+
+template <typename ptr_t>
+bool PatchOat::PatchTextSection(const Elf32_Shdr& patches_sec) {
+ DCHECK(CheckOatFile<ptr_t>(patches_sec)) << "Oat file invalid";
+ ptr_t* patches = reinterpret_cast<ptr_t*>(oat_file_->Begin() + patches_sec.sh_offset);
+ ptr_t* patches_end = patches + (patches_sec.sh_size / sizeof(ptr_t));
Elf32_Shdr* oat_text_sec = oat_file_->FindSectionByName(".text");
CHECK(oat_text_sec != nullptr);
byte* to_patch = oat_file_->Begin() + oat_text_sec->sh_offset;
@@ -529,7 +629,6 @@
CHECK_LT(reinterpret_cast<uintptr_t>(patch_loc), to_patch_end);
*patch_loc += delta_;
}
-
return true;
}
@@ -585,9 +684,6 @@
UsageError(" --output-oat-file=<file.oat>: Specifies the exact file to write the patched oat");
UsageError(" file to.");
UsageError("");
- UsageError(" --output-oat-location=<file.oat>: Specifies the 'location' to write the patched");
- UsageError(" oat file to. If used one must also specify the --instruction-set");
- UsageError("");
UsageError(" --output-oat-fd=<file-descriptor>: Specifies the file-descriptor to write the");
UsageError(" the patched oat file to.");
UsageError("");
@@ -597,9 +693,6 @@
UsageError(" --output-image-fd=<file-descriptor>: Specifies the file-descriptor to write the");
UsageError(" the patched image file to.");
UsageError("");
- UsageError(" --output-image-location=<file.art>: Specifies the 'location' to write the patched");
- UsageError(" image file to. If used one must also specify the --instruction-set");
- UsageError("");
UsageError(" --orig-base-offset=<original-base-offset>: Specify the base offset the input file");
UsageError(" was compiled with. This is needed if one is specifying a --base-offset");
UsageError("");
@@ -614,7 +707,12 @@
UsageError("");
UsageError(" --patched-image-location=<file.art>: Use the same patch delta as was used to");
UsageError(" patch the given image location. If used one must also specify the");
- UsageError(" --instruction-set flag.");
+ UsageError(" --instruction-set flag. It will search for this image in the same way that");
+ UsageError(" is done when loading one.");
+ UsageError("");
+ UsageError(" --lock-output: Obtain a flock on output oat file before starting.");
+ UsageError("");
+ UsageError(" --no-lock-output: Do not attempt to obtain a flock on output oat file.");
UsageError("");
UsageError(" --dump-timings: dump out patch timing information");
UsageError("");
@@ -658,7 +756,15 @@
return OS::OpenFileReadWrite(name);
} else {
*created = true;
- return OS::CreateEmptyFile(name);
+ std::unique_ptr<File> f(OS::CreateEmptyFile(name));
+ if (f.get() != nullptr) {
+ if (fchmod(f->Fd(), 0644) != 0) {
+ PLOG(ERROR) << "Unable to make " << name << " world readable";
+ unlink(name);
+ return nullptr;
+ }
+ }
+ return f.release();
}
}
@@ -690,11 +796,9 @@
bool have_input_oat = false;
std::string input_image_location;
std::string output_oat_filename;
- std::string output_oat_location;
int output_oat_fd = -1;
bool have_output_oat = false;
std::string output_image_filename;
- std::string output_image_location;
int output_image_fd = -1;
bool have_output_image = false;
uintptr_t base_offset = 0;
@@ -706,6 +810,7 @@
std::string patched_image_filename;
std::string patched_image_location;
bool dump_timings = kIsDebugBuild;
+ bool lock_output = true;
for (int i = 0; i < argc; i++) {
const StringPiece option(argv[i]);
@@ -756,24 +861,15 @@
}
} else if (option.starts_with("--input-image-location=")) {
input_image_location = option.substr(strlen("--input-image-location=")).data();
- } else if (option.starts_with("--output-oat-location=")) {
- if (have_output_oat) {
- Usage("Only one of --output-oat-file, --output-oat-location and --output-oat-fd may "
- "be used.");
- }
- have_output_oat = true;
- output_oat_location = option.substr(strlen("--output-oat-location=")).data();
} else if (option.starts_with("--output-oat-file=")) {
if (have_output_oat) {
- Usage("Only one of --output-oat-file, --output-oat-location and --output-oat-fd may "
- "be used.");
+ Usage("Only one of --output-oat-file, and --output-oat-fd may be used.");
}
have_output_oat = true;
output_oat_filename = option.substr(strlen("--output-oat-file=")).data();
} else if (option.starts_with("--output-oat-fd=")) {
if (have_output_oat) {
- Usage("Only one of --output-oat-file, --output-oat-location and --output-oat-fd may "
- "be used.");
+ Usage("Only one of --output-oat-file, --output-oat-fd may be used.");
}
have_output_oat = true;
const char* oat_fd_str = option.substr(strlen("--output-oat-fd=")).data();
@@ -783,24 +879,15 @@
if (output_oat_fd < 0) {
Usage("--output-oat-fd pass a negative value %d", output_oat_fd);
}
- } else if (option.starts_with("--output-image-location=")) {
- if (have_output_image) {
- Usage("Only one of --output-image-file, --output-image-location and --output-image-fd may "
- "be used.");
- }
- have_output_image = true;
- output_image_location= option.substr(strlen("--output-image-location=")).data();
} else if (option.starts_with("--output-image-file=")) {
if (have_output_image) {
- Usage("Only one of --output-image-file, --output-image-location and --output-image-fd may "
- "be used.");
+ Usage("Only one of --output-image-file, and --output-image-fd may be used.");
}
have_output_image = true;
output_image_filename = option.substr(strlen("--output-image-file=")).data();
} else if (option.starts_with("--output-image-fd=")) {
if (have_output_image) {
- Usage("Only one of --output-image-file, --output-image-location and --output-image-fd "
- "may be used.");
+ Usage("Only one of --output-image-file, and --output-image-fd may be used.");
}
have_output_image = true;
const char* image_fd_str = option.substr(strlen("--output-image-fd=")).data();
@@ -833,6 +920,10 @@
patched_image_location = option.substr(strlen("--patched-image-location=")).data();
} else if (option.starts_with("--patched-image-file=")) {
patched_image_filename = option.substr(strlen("--patched-image-file=")).data();
+ } else if (option == "--lock-output") {
+ lock_output = true;
+ } else if (option == "--no-lock-output") {
+ lock_output = false;
} else if (option == "--dump-timings") {
dump_timings = true;
} else if (option == "--no-dump-timings") {
@@ -882,34 +973,36 @@
if (!isa_set) {
Usage("specifying a location requires specifying an instruction set");
}
- input_oat_filename = GetSystemImageFilename(input_oat_location.c_str(), isa);
+ if (!LocationToFilename(input_oat_location, isa, &input_oat_filename)) {
+ Usage("Unable to find filename for input oat location %s", input_oat_location.c_str());
+ }
if (debug) {
LOG(INFO) << "Using input-oat-file " << input_oat_filename;
}
}
- if (!output_oat_location.empty()) {
- if (!isa_set) {
- Usage("specifying a location requires specifying an instruction set");
- }
- output_oat_filename = GetSystemImageFilename(output_oat_location.c_str(), isa);
- if (debug) {
- LOG(INFO) << "Using output-oat-file " << output_oat_filename;
- }
- }
- if (!output_image_location.empty()) {
- if (!isa_set) {
- Usage("specifying a location requires specifying an instruction set");
- }
- output_image_filename = GetSystemImageFilename(output_image_location.c_str(), isa);
- if (debug) {
- LOG(INFO) << "Using output-image-file " << output_image_filename;
- }
- }
if (!patched_image_location.empty()) {
if (!isa_set) {
Usage("specifying a location requires specifying an instruction set");
}
- patched_image_filename = GetSystemImageFilename(patched_image_location.c_str(), isa);
+ std::string system_filename;
+ bool has_system = false;
+ std::string cache_filename;
+ bool has_cache = false;
+ bool has_android_data_unused = false;
+ if (!gc::space::ImageSpace::FindImageFilename(patched_image_location.c_str(), isa,
+ &system_filename, &has_system, &cache_filename,
+ &has_android_data_unused, &has_cache)) {
+ Usage("Unable to determine image file for location %s", patched_image_location.c_str());
+ }
+ if (has_cache) {
+ patched_image_filename = cache_filename;
+ } else if (has_system) {
+ LOG(WARNING) << "Only image file found was in /system for image location "
+ << patched_image_location;
+ patched_image_filename = system_filename;
+ } else {
+ Usage("Unable to determine image file for location %s", patched_image_location.c_str());
+ }
if (debug) {
LOG(INFO) << "Using patched-image-file " << patched_image_filename;
}
@@ -950,6 +1043,9 @@
CHECK(!input_image_location.empty());
if (output_image_fd != -1) {
+ if (output_image_filename.empty()) {
+ output_image_filename = "output-image-file";
+ }
output_image.reset(new File(output_image_fd, output_image_filename));
} else {
CHECK(!output_image_filename.empty());
@@ -961,13 +1057,22 @@
if (have_oat_files) {
if (input_oat_fd != -1) {
+ if (input_oat_filename.empty()) {
+ input_oat_filename = "input-oat-file";
+ }
input_oat.reset(new File(input_oat_fd, input_oat_filename));
} else {
CHECK(!input_oat_filename.empty());
input_oat.reset(OS::OpenFileForReading(input_oat_filename.c_str()));
+ if (input_oat.get() == nullptr) {
+ LOG(ERROR) << "Could not open input oat file: " << strerror(errno);
+ }
}
if (output_oat_fd != -1) {
+ if (output_oat_filename.empty()) {
+ output_oat_filename = "output-oat-file";
+ }
output_oat.reset(new File(output_oat_fd, output_oat_filename));
} else {
CHECK(!output_oat_filename.empty());
@@ -993,8 +1098,26 @@
}
};
+ if ((have_oat_files && (input_oat.get() == nullptr || output_oat.get() == nullptr)) ||
+ (have_image_files && output_image.get() == nullptr)) {
+ cleanup(false);
+ return EXIT_FAILURE;
+ }
+
+ ScopedFlock output_oat_lock;
+ if (lock_output) {
+ std::string error_msg;
+ if (have_oat_files && !output_oat_lock.Init(output_oat.get(), &error_msg)) {
+ LOG(ERROR) << "Unable to lock output oat " << output_image->GetPath() << ": " << error_msg;
+ cleanup(false);
+ return EXIT_FAILURE;
+ }
+ }
+
if (debug) {
- LOG(INFO) << "moving offset by " << base_delta << " (0x" << std::hex << base_delta << ") bytes";
+ LOG(INFO) << "moving offset by " << base_delta
+ << " (0x" << std::hex << base_delta << ") bytes or "
+ << std::dec << (base_delta/kPageSize) << " pages.";
}
bool ret;
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index a63e6f4..326333e 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -74,11 +74,13 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool InHeap(mirror::Object*);
- bool CheckOatFile();
-
// Patches oat in place, modifying the oat_file given to the constructor.
bool PatchElf();
bool PatchTextSection();
+ // Templatized version to actually do the patching with the right sized offsets.
+ template <typename ptr_t> bool PatchTextSection(const Elf32_Shdr& patches_sec);
+ template <typename ptr_t> bool CheckOatFile(const Elf32_Shdr& patches_sec);
+ bool PatchOatHeader();
bool PatchSymbols(Elf32_Shdr* section);
bool PatchImage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/Android.mk b/runtime/Android.mk
index f2d3c8e..1e037f5 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -81,6 +81,7 @@
interpreter/interpreter.cc \
interpreter/interpreter_common.cc \
interpreter/interpreter_switch_impl.cc \
+ java_vm_ext.cc \
jdwp/jdwp_event.cc \
jdwp/jdwp_expand_buf.cc \
jdwp/jdwp_handler.cc \
@@ -88,6 +89,7 @@
jdwp/jdwp_request.cc \
jdwp/jdwp_socket.cc \
jdwp/object_registry.cc \
+ jni_env_ext.cc \
jni_internal.cc \
jobject_comparator.cc \
mem_map.cc \
@@ -104,6 +106,7 @@
mirror/string.cc \
mirror/throwable.cc \
monitor.cc \
+ native_bridge.cc \
native/dalvik_system_DexFile.cc \
native/dalvik_system_VMDebug.cc \
native/dalvik_system_VMRuntime.cc \
@@ -238,6 +241,7 @@
arch/x86/context_x86.cc \
arch/x86/entrypoints_init_x86.cc \
arch/x86/jni_entrypoints_x86.S \
+ arch/x86/memcmp16_x86.S \
arch/x86/portable_entrypoints_x86.S \
arch/x86/quick_entrypoints_x86.S \
arch/x86/thread_x86.cc \
@@ -246,15 +250,18 @@
LIBART_TARGET_SRC_FILES_x86 := \
$(LIBART_SRC_FILES_x86)
+# Note that the fault_handler_x86.cc is not a mistake. This file is
+# shared between the x86 and x86_64 architectures.
LIBART_SRC_FILES_x86_64 := \
arch/x86_64/context_x86_64.cc \
arch/x86_64/entrypoints_init_x86_64.cc \
arch/x86_64/jni_entrypoints_x86_64.S \
+ arch/x86_64/memcmp16_x86_64.S \
arch/x86_64/portable_entrypoints_x86_64.S \
arch/x86_64/quick_entrypoints_x86_64.S \
arch/x86_64/thread_x86_64.cc \
monitor_pool.cc \
- arch/x86_64/fault_handler_x86_64.cc
+ arch/x86/fault_handler_x86.cc
LIBART_TARGET_SRC_FILES_x86_64 := \
$(LIBART_SRC_FILES_x86_64) \
@@ -292,6 +299,7 @@
dex_file.h \
dex_instruction.h \
gc/collector/gc_type.h \
+ gc/collector_type.h \
gc/space/space.h \
gc/heap.h \
indirect_reference_table.h \
@@ -340,6 +348,7 @@
LOCAL_CPP_EXTENSION := $$(ART_CPP_EXTENSION)
ifeq ($$(art_ndebug_or_debug),ndebug)
LOCAL_MODULE := libart
+ LOCAL_FDO_SUPPORT := true
else # debug
LOCAL_MODULE := libartd
endif
@@ -370,7 +379,9 @@
LOCAL_GENERATED_SOURCES += $$(ENUM_OPERATOR_OUT_GEN)
LOCAL_CFLAGS := $$(LIBART_CFLAGS)
- LOCAL_LDFLAGS := $$(LIBART_LDFLAGS)
+ # TODO(danalbert): Work around the test failures caused by removing -Bsymbolic
+ # by turning it back on for libart until I get a chance to look at them.
+ LOCAL_LDFLAGS := $$(LIBART_LDFLAGS) -Wl,-Bsymbolic
ifeq ($$(art_target_or_host),target)
LOCAL_LDFLAGS += $$(LIBART_TARGET_LDFLAGS)
else
@@ -416,6 +427,7 @@
LOCAL_STATIC_LIBRARIES := libziparchive libz
else # host
LOCAL_STATIC_LIBRARIES += libcutils libziparchive-host libz libutils
+ LOCAL_SHARED_LIBRARIES += libsigchain
LOCAL_LDLIBS += -ldl -lpthread
ifeq ($$(HOST_OS),linux)
LOCAL_LDLIBS += -lrt
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
index 2a82129..be28544 100644
--- a/runtime/arch/arm/fault_handler_arm.cc
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -46,9 +46,10 @@
return instr_size;
}
-void FaultManager::GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method,
+void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
+ mirror::ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
- struct ucontext *uc = (struct ucontext *)context;
+ struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
*out_sp = static_cast<uintptr_t>(sc->arm_sp);
VLOG(signals) << "sp: " << *out_sp;
@@ -60,7 +61,7 @@
// get the method from the top of the stack. However it's in r0.
uintptr_t* fault_addr = reinterpret_cast<uintptr_t*>(sc->fault_address);
uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
- reinterpret_cast<uint8_t*>(*out_sp) - kArmStackOverflowReservedBytes);
+ reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kArm));
if (overflow_addr == fault_addr) {
*out_method = reinterpret_cast<mirror::ArtMethod*>(sc->arm_r0);
} else {
@@ -114,7 +115,7 @@
uint32_t checkinst1 = 0xf8d90000 + Thread::ThreadSuspendTriggerOffset<4>().Int32Value();
uint16_t checkinst2 = 0x6800;
- struct ucontext *uc = (struct ucontext *)context;
+ struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
uint8_t* ptr2 = reinterpret_cast<uint8_t*>(sc->arm_pc);
uint8_t* ptr1 = ptr2 - 4;
@@ -178,7 +179,7 @@
// to the overflow region below the protected region.
bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
- struct ucontext *uc = (struct ucontext *)context;
+ struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
VLOG(signals) << "stack overflow handler with sp at " << std::hex << &uc;
VLOG(signals) << "sigcontext: " << std::hex << sc;
@@ -191,7 +192,7 @@
VLOG(signals) << "checking for stack overflow, sp: " << std::hex << sp <<
", fault_addr: " << fault_addr;
- uintptr_t overflow_addr = sp - kArmStackOverflowReservedBytes;
+ uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kArm);
Thread* self = reinterpret_cast<Thread*>(sc->arm_r9);
CHECK_EQ(self, Thread::Current());
@@ -205,7 +206,7 @@
}
// We know this is a stack overflow. We need to move the sp to the overflow region
- // the exists below the protected region. Determine the address of the next
+ // that exists below the protected region. Determine the address of the next
// available valid address below the protected region.
uintptr_t prevsp = sp;
sp = pregion;
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 4939610..86cb16a 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -365,8 +365,9 @@
ARM_ENTRY art_quick_do_long_jump
vldm r1, {s0-s31} @ load all fprs from argument fprs_
ldr r2, [r0, #60] @ r2 = r15 (PC from gprs_ 60=4*15)
+ ldr r14, [r0, #56] @ (LR from gprs_ 56=4*14)
add r0, r0, #12 @ increment r0 to skip gprs_[0..2] 12=4*3
- ldm r0, {r3-r14} @ load remaining gprs from argument gprs_
+ ldm r0, {r3-r13} @ load remaining gprs from argument gprs_
mov r0, #0 @ clear result registers r0 and r1
mov r1, #0
bx r2 @ do long jump
diff --git a/runtime/arch/arm/quick_method_frame_info_arm.h b/runtime/arch/arm/quick_method_frame_info_arm.h
index 83cacac..7595e94 100644
--- a/runtime/arch/arm/quick_method_frame_info_arm.h
+++ b/runtime/arch/arm/quick_method_frame_info_arm.h
@@ -63,6 +63,22 @@
ArmCalleeSaveFpSpills(type));
}
+constexpr size_t ArmCalleeSaveFpr1Offset(Runtime::CalleeSaveType type) {
+ return ArmCalleeSaveFrameSize(type) -
+ (POPCOUNT(ArmCalleeSaveCoreSpills(type)) +
+ POPCOUNT(ArmCalleeSaveFpSpills(type))) * kArmPointerSize;
+}
+
+constexpr size_t ArmCalleeSaveGpr1Offset(Runtime::CalleeSaveType type) {
+ return ArmCalleeSaveFrameSize(type) -
+ POPCOUNT(ArmCalleeSaveCoreSpills(type)) * kArmPointerSize;
+}
+
+constexpr size_t ArmCalleeSaveLrOffset(Runtime::CalleeSaveType type) {
+ return ArmCalleeSaveFrameSize(type) -
+ POPCOUNT(ArmCalleeSaveCoreSpills(type) & (-(1 << LR))) * kArmPointerSize;
+}
+
} // namespace arm
} // namespace art
diff --git a/runtime/arch/arm64/asm_support_arm64.S b/runtime/arch/arm64/asm_support_arm64.S
index 55de1ec..be167fa 100644
--- a/runtime/arch/arm64/asm_support_arm64.S
+++ b/runtime/arch/arm64/asm_support_arm64.S
@@ -24,15 +24,22 @@
// Register holding suspend check count down.
// 32-bit is enough for the suspend register.
#define wSUSPEND w19
+// xSUSPEND is 64-bit view of wSUSPEND.
+// Used to save/restore the register scratched by managed code.
+#define xSUSPEND x19
// Register holding Thread::Current().
#define xSELF x18
+// x18 is not preserved by aapcs64, save it on xETR(External Thread reg) for restore and later use.
+#define xETR x21
// Frame Pointer
#define xFP x29
// Link Register
#define xLR x30
// Define the intraprocedural linkage temporary registers.
#define xIP0 x16
+#define wIP0 w16
#define xIP1 x17
+#define wIP1 w17
.macro ENTRY name
diff --git a/runtime/arch/arm64/asm_support_arm64.h b/runtime/arch/arm64/asm_support_arm64.h
index f353408..7f0f56f 100644
--- a/runtime/arch/arm64/asm_support_arm64.h
+++ b/runtime/arch/arm64/asm_support_arm64.h
@@ -19,28 +19,26 @@
#include "asm_support.h"
-// TODO Thread offsets need to be checked when on Aarch64.
-
// Note: these callee save methods loads require read barriers.
-// Offset of field Runtime::callee_save_methods_[kSaveAll]
+// Offset of field Runtime::callee_save_methods_[kSaveAll] verified in InitCpu
#define RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET 0
-// Offset of field Runtime::callee_save_methods_[kRefsOnly]
+// Offset of field Runtime::callee_save_methods_[kRefsOnly] verified in InitCpu
#define RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET 8
-// Offset of field Runtime::callee_save_methods_[kRefsAndArgs]
+// Offset of field Runtime::callee_save_methods_[kRefsAndArgs] verified in InitCpu
#define RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET 16
-// Offset of field Thread::suspend_count_ verified in InitCpu
+// Offset of field Thread::suspend_count_
#define THREAD_FLAGS_OFFSET 0
-// Offset of field Thread::card_table_ verified in InitCpu
+// Offset of field Thread::card_table_
#define THREAD_CARD_TABLE_OFFSET 112
-// Offset of field Thread::exception_ verified in InitCpu
+// Offset of field Thread::exception_
#define THREAD_EXCEPTION_OFFSET 120
-// Offset of field Thread::thin_lock_thread_id_ verified in InitCpu
+// Offset of field Thread::thin_lock_thread_id_
#define THREAD_ID_OFFSET 12
-#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 368
-#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 176
-#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 304
+#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 176
+#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 96
+#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 224
// Expected size of a heap reference
#define HEAP_REFERENCE_SIZE 4
diff --git a/runtime/arch/arm64/fault_handler_arm64.cc b/runtime/arch/arm64/fault_handler_arm64.cc
index 74c3023..3a7e689 100644
--- a/runtime/arch/arm64/fault_handler_arm64.cc
+++ b/runtime/arch/arm64/fault_handler_arm64.cc
@@ -21,7 +21,15 @@
#include "globals.h"
#include "base/logging.h"
#include "base/hex_dump.h"
+#include "registers_arm64.h"
+#include "mirror/art_method.h"
+#include "mirror/art_method-inl.h"
+#include "thread.h"
+#include "thread-inl.h"
+extern "C" void art_quick_throw_stack_overflow_from_signal();
+extern "C" void art_quick_throw_null_pointer_exception();
+extern "C" void art_quick_implicit_suspend();
//
// ARM64 specific fault handler functions.
@@ -29,19 +37,163 @@
namespace art {
-void FaultManager::GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method,
+void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
+ mirror::ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
+ struct ucontext *uc = reinterpret_cast<struct ucontext *>(context);
+ struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
+ *out_sp = static_cast<uintptr_t>(sc->sp);
+ VLOG(signals) << "sp: " << *out_sp;
+ if (*out_sp == 0) {
+ return;
+ }
+
+ // In the case of a stack overflow, the stack is not valid and we can't
+ // get the method from the top of the stack. However it's in x0.
+ uintptr_t* fault_addr = reinterpret_cast<uintptr_t*>(sc->fault_address);
+ uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
+ reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kArm64));
+ if (overflow_addr == fault_addr) {
+ *out_method = reinterpret_cast<mirror::ArtMethod*>(sc->regs[0]);
+ } else {
+ // The method is at the top of the stack.
+ *out_method = (reinterpret_cast<StackReference<mirror::ArtMethod>* >(*out_sp)[0]).AsMirrorPtr();
+ }
+
+ // Work out the return PC. This will be the address of the instruction
+ // following the faulting ldr/str instruction.
+ VLOG(signals) << "pc: " << std::hex
+ << static_cast<void*>(reinterpret_cast<uint8_t*>(sc->pc));
+
+ *out_return_pc = sc->pc + 4;
}
bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
- return false;
+ // The code that looks for the catch location needs to know the value of the
+ // PC at the point of call. For Null checks we insert a GC map that is immediately after
+ // the load/store instruction that might cause the fault.
+
+ struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
+ struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
+
+ sc->regs[30] = sc->pc + 4; // LR needs to point to gc map location
+
+ sc->pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception);
+ VLOG(signals) << "Generating null pointer exception";
+ return true;
}
+// A suspend check is done using the following instruction sequence:
+// 0xf7223228: f9405640 ldr x0, [x18, #168]
+// .. some intervening instructions
+// 0xf7223230: f9400000 ldr x0, [x0]
+
+// The offset from r18 is Thread::ThreadSuspendTriggerOffset().
+// To check for a suspend check, we examine the instructions that caused
+// the fault (at PC-4 and PC).
bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
+ // These are the instructions to check for. The first one is the ldr x0,[r18,#xxx]
+ // where xxx is the offset of the suspend trigger.
+ uint32_t checkinst1 = 0xf9400240 | (Thread::ThreadSuspendTriggerOffset<8>().Int32Value() << 7);
+ uint32_t checkinst2 = 0xf9400000;
+
+ struct ucontext *uc = reinterpret_cast<struct ucontext *>(context);
+ struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
+ uint8_t* ptr2 = reinterpret_cast<uint8_t*>(sc->pc);
+ uint8_t* ptr1 = ptr2 - 4;
+ VLOG(signals) << "checking suspend";
+
+ uint32_t inst2 = *reinterpret_cast<uint32_t*>(ptr2);
+ VLOG(signals) << "inst2: " << std::hex << inst2 << " checkinst2: " << checkinst2;
+ if (inst2 != checkinst2) {
+ // Second instruction is not good, not ours.
+ return false;
+ }
+
+ // The first instruction can a little bit up the stream due to load hoisting
+ // in the compiler.
+ uint8_t* limit = ptr1 - 80; // Compiler will hoist to a max of 20 instructions.
+ bool found = false;
+ while (ptr1 > limit) {
+ uint32_t inst1 = *reinterpret_cast<uint32_t*>(ptr1);
+ VLOG(signals) << "inst1: " << std::hex << inst1 << " checkinst1: " << checkinst1;
+ if (inst1 == checkinst1) {
+ found = true;
+ break;
+ }
+ ptr1 -= 4;
+ }
+ if (found) {
+ VLOG(signals) << "suspend check match";
+ // This is a suspend check. Arrange for the signal handler to return to
+ // art_quick_implicit_suspend. Also set LR so that after the suspend check it
+ // will resume the instruction (current PC + 4). PC points to the
+ // ldr x0,[x0,#0] instruction (r0 will be 0, set by the trigger).
+
+ sc->regs[30] = sc->pc + 4;
+ sc->pc = reinterpret_cast<uintptr_t>(art_quick_implicit_suspend);
+
+ // Now remove the suspend trigger that caused this fault.
+ Thread::Current()->RemoveSuspendTrigger();
+ VLOG(signals) << "removed suspend trigger invoking test suspend";
+ return true;
+ }
return false;
}
bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
- return false;
+ struct ucontext *uc = reinterpret_cast<struct ucontext *>(context);
+ struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
+ VLOG(signals) << "stack overflow handler with sp at " << std::hex << &uc;
+ VLOG(signals) << "sigcontext: " << std::hex << sc;
+
+ uintptr_t sp = sc->sp;
+ VLOG(signals) << "sp: " << std::hex << sp;
+
+ uintptr_t fault_addr = sc->fault_address;
+ VLOG(signals) << "fault_addr: " << std::hex << fault_addr;
+ VLOG(signals) << "checking for stack overflow, sp: " << std::hex << sp <<
+ ", fault_addr: " << fault_addr;
+
+ uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kArm64);
+
+ Thread* self = reinterpret_cast<Thread*>(sc->regs[art::arm64::TR]);
+ CHECK_EQ(self, Thread::Current());
+ uintptr_t pregion = reinterpret_cast<uintptr_t>(self->GetStackEnd()) -
+ Thread::kStackOverflowProtectedSize;
+
+ // Check that the fault address is the value expected for a stack overflow.
+ if (fault_addr != overflow_addr) {
+ VLOG(signals) << "Not a stack overflow";
+ return false;
+ }
+
+ // We know this is a stack overflow. We need to move the sp to the overflow region
+ // that exists below the protected region. Determine the address of the next
+ // available valid address below the protected region.
+ uintptr_t prevsp = sp;
+ sp = pregion;
+ VLOG(signals) << "setting sp to overflow region at " << std::hex << sp;
+
+ // Since the compiler puts the implicit overflow
+ // check before the callee save instructions, the SP is already pointing to
+ // the previous frame.
+ VLOG(signals) << "previous frame: " << std::hex << prevsp;
+
+ // Now establish the stack pointer for the signal return.
+ sc->sp = prevsp;
+
+ // Tell the stack overflow code where the new stack pointer should be.
+ sc->regs[art::arm64::IP0] = sp; // aka x16
+
+ // Now arrange for the signal handler to return to art_quick_throw_stack_overflow_from_signal.
+ // The value of LR must be the same as it was when we entered the code that
+ // caused this fault. This will be inserted into a callee save frame by
+ // the function to which this handler returns (art_quick_throw_stack_overflow_from_signal).
+ sc->pc = reinterpret_cast<uintptr_t>(art_quick_throw_stack_overflow_from_signal);
+
+ // The kernel will now return to the address in sc->pc.
+ return true;
}
} // namespace art
+
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 2201b55..04be4a2 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -24,101 +24,22 @@
* Runtime::CreateCalleeSaveMethod(kSaveAll)
*/
.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
- adrp x9, :got:_ZN3art7Runtime9instance_E
- ldr x9, [x9, #:got_lo12:_ZN3art7Runtime9instance_E]
+ adrp xIP0, :got:_ZN3art7Runtime9instance_E
+ ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
// Our registers aren't intermixed - just spill in order.
- ldr x9,[x9] // x9 = & (art::Runtime * art::Runtime.instance_) .
+ ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) .
- // x9 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
+ // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
THIS_LOAD_REQUIRES_READ_BARRIER
- ldr x9, [x9, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET ]
-
- sub sp, sp, #368
- .cfi_adjust_cfa_offset 368
-
- // Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 368)
-#error "SAVE_ALL_CALLEE_SAVE_FRAME(ARM64) size not as expected."
-#endif
-
- // FP args
- stp d0, d1, [sp, #8]
- stp d2, d3, [sp, #24]
- stp d4, d5, [sp, #40]
- stp d6, d7, [sp, #56]
-
- // FP callee-saves
- stp d8, d9, [sp, #72]
- stp d10, d11, [sp, #88]
- stp d12, d13, [sp, #104]
- stp d14, d15, [sp, #120]
-
- stp d16, d17, [sp, #136]
- stp d18, d19, [sp, #152]
- stp d20, d21, [sp, #168]
- stp d22, d23, [sp, #184]
- stp d24, d25, [sp, #200]
- stp d26, d27, [sp, #216]
- stp d28, d29, [sp, #232]
- stp d30, d31, [sp, #248]
-
-
- // Callee saved.
- stp xSELF, x19, [sp, #264]
- .cfi_rel_offset x18, 264
- .cfi_rel_offset x19, 272
-
- stp x20, x21, [sp, #280]
- .cfi_rel_offset x20, 280
- .cfi_rel_offset x21, 288
-
- stp x22, x23, [sp, #296]
- .cfi_rel_offset x22, 296
- .cfi_rel_offset x23, 304
-
- stp x24, x25, [sp, #312]
- .cfi_rel_offset x24, 312
- .cfi_rel_offset x25, 320
-
- stp x26, x27, [sp, #328]
- .cfi_rel_offset x26, 328
- .cfi_rel_offset x27, 336
-
- stp x28, xFP, [sp, #344] // Save FP.
- .cfi_rel_offset x28, 344
- .cfi_rel_offset x29, 352
-
- str xLR, [sp, #360]
- .cfi_rel_offset x30, 360
-
- // Loads appropriate callee-save-method
- str x9, [sp] // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs]
-
-.endm
-
- /*
- * Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kRefsOnly).
- */
-// WIP.
-.macro SETUP_REF_ONLY_CALLEE_SAVE_FRAME
- adrp x9, :got:_ZN3art7Runtime9instance_E
- ldr x9, [x9, #:got_lo12:_ZN3art7Runtime9instance_E]
-
- // Our registers aren't intermixed - just spill in order.
- ldr x9,[x9] // x9 = & (art::Runtime * art::Runtime.instance_) .
-
- // x9 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
- THIS_LOAD_REQUIRES_READ_BARRIER
- ldr x9, [x9, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET ]
+ ldr xIP0, [xIP0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET ]
sub sp, sp, #176
.cfi_adjust_cfa_offset 176
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 176)
-#error "REFS_ONLY_CALLEE_SAVE_FRAME(ARM64) size not as expected."
+#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 176)
+#error "SAVE_ALL_CALLEE_SAVE_FRAME(ARM64) size not as expected."
#endif
// FP callee-saves
@@ -127,11 +48,12 @@
stp d12, d13, [sp, #40]
stp d14, d15, [sp, #56]
- // Callee saved.
- stp xSELF, x19, [sp, #72]
+ // Reserved registers
+ stp xSELF, xSUSPEND, [sp, #72]
.cfi_rel_offset x18, 72
.cfi_rel_offset x19, 80
+ // callee-saves
stp x20, x21, [sp, #88]
.cfi_rel_offset x20, 88
.cfi_rel_offset x21, 96
@@ -148,7 +70,7 @@
.cfi_rel_offset x26, 136
.cfi_rel_offset x27, 144
- stp x28, xFP, [sp, #152] // Save FP.
+ stp x28, x29, [sp, #152]
.cfi_rel_offset x28, 152
.cfi_rel_offset x29, 160
@@ -156,51 +78,107 @@
.cfi_rel_offset x30, 168
// Loads appropriate callee-save-method
- str x9, [sp] // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs]
+ str xIP0, [sp] // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs]
.endm
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kRefsOnly).
+ */
+.macro SETUP_REF_ONLY_CALLEE_SAVE_FRAME
+ adrp xIP0, :got:_ZN3art7Runtime9instance_E
+ ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
+
+ // Our registers aren't intermixed - just spill in order.
+ ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) .
+
+ // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
+ THIS_LOAD_REQUIRES_READ_BARRIER
+ ldr xIP0, [xIP0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET ]
+
+ sub sp, sp, #96
+ .cfi_adjust_cfa_offset 96
+
+ // Ugly compile-time check, but we only have the preprocessor.
+#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 96)
+#error "REFS_ONLY_CALLEE_SAVE_FRAME(ARM64) size not as expected."
+#endif
+
+ // Callee-saves
+ stp x20, x21, [sp, #8]
+ .cfi_rel_offset x20, 8
+ .cfi_rel_offset x21, 16
+
+ stp x22, x23, [sp, #24]
+ .cfi_rel_offset x22, 24
+ .cfi_rel_offset x23, 32
+
+ stp x24, x25, [sp, #40]
+ .cfi_rel_offset x24, 40
+ .cfi_rel_offset x25, 48
+
+ stp x26, x27, [sp, #56]
+ .cfi_rel_offset x26, 56
+ .cfi_rel_offset x27, 64
+
+ stp x28, x29, [sp, #72]
+ .cfi_rel_offset x28, 72
+ .cfi_rel_offset x29, 80
+
+ // LR
+ str xLR, [sp, #88]
+ .cfi_rel_offset x30, 88
+
+ // Save xSELF to xETR.
+ mov xETR, xSELF
+
+ // Loads appropriate callee-save-method
+ str xIP0, [sp] // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs]
+.endm
+
+// TODO: Probably no need to restore registers preserved by aapcs64.
.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
- // FP callee saves
- ldp d8, d9, [sp, #8]
- ldp d10, d11, [sp, #24]
- ldp d12, d13, [sp, #40]
- ldp d14, d15, [sp, #56]
+ // Restore xSELF.
+ mov xSELF, xETR
- // Callee saved.
- ldp xSELF, x19, [sp, #72]
- .cfi_restore x18
- .cfi_restore x19
-
- ldp x20, x21, [sp, #88]
+ // Callee-saves
+ ldp x20, x21, [sp, #8]
.cfi_restore x20
.cfi_restore x21
- ldp x22, x23, [sp, #104]
+ ldp x22, x23, [sp, #24]
.cfi_restore x22
.cfi_restore x23
- ldp x24, x25, [sp, #120]
+ ldp x24, x25, [sp, #40]
.cfi_restore x24
.cfi_restore x25
- ldp x26, x27, [sp, #136]
+ ldp x26, x27, [sp, #56]
.cfi_restore x26
.cfi_restore x27
- ldp x28, xFP, [sp, #152] // Save FP.
+ ldp x28, x29, [sp, #72]
.cfi_restore x28
.cfi_restore x29
- ldr xLR, [sp, #168]
+ // LR
+ ldr xLR, [sp, #88]
.cfi_restore x30
- add sp, sp, #176
- .cfi_adjust_cfa_offset -176
+ add sp, sp, #96
+ .cfi_adjust_cfa_offset -96
.endm
.macro POP_REF_ONLY_CALLEE_SAVE_FRAME
- add sp, sp, #176
- .cfi_adjust_cfa_offset -176
+ // Restore xSELF as it might be scratched.
+ mov xSELF, xETR
+ // ETR
+ ldr xETR, [sp, #16]
+ .cfi_restore x21
+
+ add sp, sp, #96
+ .cfi_adjust_cfa_offset -96
.endm
.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
@@ -210,62 +188,61 @@
.macro SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
- sub sp, sp, #304
- .cfi_adjust_cfa_offset 304
+ sub sp, sp, #224
+ .cfi_adjust_cfa_offset 224
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 304)
+#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 224)
#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(ARM64) size not as expected."
#endif
- stp d0, d1, [sp, #16]
- stp d2, d3, [sp, #32]
- stp d4, d5, [sp, #48]
- stp d6, d7, [sp, #64]
- stp d8, d9, [sp, #80]
- stp d10, d11, [sp, #96]
- stp d12, d13, [sp, #112]
- stp d14, d15, [sp, #128]
+ // FP args
+ stp d0, d1, [sp, #16]
+ stp d2, d3, [sp, #32]
+ stp d4, d5, [sp, #48]
+ stp d6, d7, [sp, #64]
- stp x1, x2, [sp, #144]
- .cfi_rel_offset x1, 144
- .cfi_rel_offset x2, 152
+ // args and x20(callee-save)
+ stp x1, x2, [sp, #80]
+ .cfi_rel_offset x1, 80
+ .cfi_rel_offset x2, 88
- stp x3, x4, [sp, #160]
- .cfi_rel_offset x3, 160
- .cfi_rel_offset x4, 168
+ stp x3, x4, [sp, #96]
+ .cfi_rel_offset x3, 96
+ .cfi_rel_offset x4, 104
- stp x5, x6, [sp, #176]
- .cfi_rel_offset x5, 176
- .cfi_rel_offset x6, 184
+ stp x5, x6, [sp, #112]
+ .cfi_rel_offset x5, 112
+ .cfi_rel_offset x6, 120
- stp x7, xSELF, [sp, #192]
- .cfi_rel_offset x7, 192
- .cfi_rel_offset x18, 200
+ stp x7, x20, [sp, #128]
+ .cfi_rel_offset x7, 128
+ .cfi_rel_offset x20, 136
- stp x19, x20, [sp, #208]
- .cfi_rel_offset x19, 208
- .cfi_rel_offset x20, 216
+ // Callee-saves.
+ stp x21, x22, [sp, #144]
+ .cfi_rel_offset x21, 144
+ .cfi_rel_offset x22, 152
- stp x21, x22, [sp, #224]
- .cfi_rel_offset x21, 224
- .cfi_rel_offset x22, 232
+ stp x23, x24, [sp, #160]
+ .cfi_rel_offset x23, 160
+ .cfi_rel_offset x24, 168
- stp x23, x24, [sp, #240]
- .cfi_rel_offset x23, 240
- .cfi_rel_offset x24, 248
+ stp x25, x26, [sp, #176]
+ .cfi_rel_offset x25, 176
+ .cfi_rel_offset x26, 184
- stp x25, x26, [sp, #256]
- .cfi_rel_offset x25, 256
- .cfi_rel_offset x26, 264
+ stp x27, x28, [sp, #192]
+ .cfi_rel_offset x27, 192
+ .cfi_rel_offset x28, 200
- stp x27, x28, [sp, #272]
- .cfi_rel_offset x27, 272
- .cfi_rel_offset x28, 280
+ // x29(callee-save) and LR
+ stp x29, xLR, [sp, #208]
+ .cfi_rel_offset x29, 208
+ .cfi_rel_offset x30, 216
- stp xFP, xLR, [sp, #288]
- .cfi_rel_offset x29, 288
- .cfi_rel_offset x30, 296
+ // Save xSELF to xETR.
+ mov xETR, xSELF
.endm
/*
@@ -275,75 +252,73 @@
* TODO This is probably too conservative - saving FP & LR.
*/
.macro SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
- adrp x9, :got:_ZN3art7Runtime9instance_E
- ldr x9, [x9, #:got_lo12:_ZN3art7Runtime9instance_E]
+ adrp xIP0, :got:_ZN3art7Runtime9instance_E
+ ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
// Our registers aren't intermixed - just spill in order.
- ldr x9,[x9] // x9 = & (art::Runtime * art::Runtime.instance_) .
+ ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) .
- // x9 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
+ // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
THIS_LOAD_REQUIRES_READ_BARRIER
- ldr x9, [x9, RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET ]
+ ldr xIP0, [xIP0, RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET ]
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
- str x9, [sp] // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs]
+ str xIP0, [sp] // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs]
.endm
+// TODO: Probably no need to restore registers preserved by aapcs64.
.macro RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ // Restore xSELF.
+ mov xSELF, xETR
- ldp d0, d1, [sp, #16]
- ldp d2, d3, [sp, #32]
- ldp d4, d5, [sp, #48]
- ldp d6, d7, [sp, #64]
- ldp d8, d9, [sp, #80]
- ldp d10, d11, [sp, #96]
- ldp d12, d13, [sp, #112]
- ldp d14, d15, [sp, #128]
+ // FP args
+ ldp d0, d1, [sp, #16]
+ ldp d2, d3, [sp, #32]
+ ldp d4, d5, [sp, #48]
+ ldp d6, d7, [sp, #64]
- // args.
- ldp x1, x2, [sp, #144]
+ // args and x20(callee-save)
+ ldp x1, x2, [sp, #80]
.cfi_restore x1
.cfi_restore x2
- ldp x3, x4, [sp, #160]
+ ldp x3, x4, [sp, #96]
.cfi_restore x3
.cfi_restore x4
- ldp x5, x6, [sp, #176]
+ ldp x5, x6, [sp, #112]
.cfi_restore x5
.cfi_restore x6
- ldp x7, xSELF, [sp, #192]
+ ldp x7, x20, [sp, #128]
.cfi_restore x7
- .cfi_restore x18
-
- ldp x19, x20, [sp, #208]
- .cfi_restore x19
.cfi_restore x20
- ldp x21, x22, [sp, #224]
+ // Callee-saves.
+ ldp x21, x22, [sp, #144]
.cfi_restore x21
.cfi_restore x22
- ldp x23, x24, [sp, #240]
+ ldp x23, x24, [sp, #160]
.cfi_restore x23
.cfi_restore x24
- ldp x25, x26, [sp, #256]
+ ldp x25, x26, [sp, #176]
.cfi_restore x25
.cfi_restore x26
- ldp x27, x28, [sp, #272]
+ ldp x27, x28, [sp, #192]
.cfi_restore x27
.cfi_restore x28
- ldp xFP, xLR, [sp, #288]
+ // x29(callee-save) and LR
+ ldp x29, xLR, [sp, #208]
.cfi_restore x29
.cfi_restore x30
- add sp, sp, #304
- .cfi_adjust_cfa_offset -304
+ add sp, sp, #224
+ .cfi_adjust_cfa_offset -224
.endm
.macro RETURN_IF_RESULT_IS_ZERO
@@ -381,7 +356,7 @@
.endm
.macro RETURN_OR_DELIVER_PENDING_EXCEPTION
- RETURN_OR_DELIVER_PENDING_EXCEPTION_REG x9
+ RETURN_OR_DELIVER_PENDING_EXCEPTION_REG xIP0
.endm
// Same as above with x1. This is helpful in stubs that want to avoid clobbering another register.
@@ -400,7 +375,7 @@
.extern \cxx_name
ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
- mov x0, xSELF // pass Thread::Current
+ mov x0, xSELF // pass Thread::Current
mov x1, sp // pass SP
b \cxx_name // \cxx_name(Thread*, SP)
END \c_name
@@ -410,7 +385,7 @@
.extern \cxx_name
ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context.
- mov x1, xSELF // pass Thread::Current.
+ mov x1, xSELF // pass Thread::Current.
mov x2, sp // pass SP.
b \cxx_name // \cxx_name(arg, Thread*, SP).
brk 0
@@ -421,7 +396,7 @@
.extern \cxx_name
ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
- mov x2, xSELF // pass Thread::Current
+ mov x2, xSELF // pass Thread::Current
mov x3, sp // pass SP
b \cxx_name // \cxx_name(arg1, arg2, Thread*, SP)
brk 0
@@ -460,6 +435,31 @@
*/
ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFromCode
+ /*
+ * Invoke stack overflow exception from signal handler.
+ * On entry:
+ * xSELF: thread
+ * SP: address of last known frame
+ * IP0: address of next valid SP below protected region in stack
+ *
+ * This is deceptively simple but hides some complexity. It is called in the case of
+ * a stack overflow condition during implicit checks. The signal handler has been
+ * called by the kernel due to a load from the protected stack region. The handler
+ * works out the address of the previous frame and passes this in SP. However there
+ * is a piece of memory somewhere below the current SP that is not accessible (the
+ * memory that caused the signal). The signal handler works out the next
+ * accessible value of SP and passes this in x16/IP0. This code then sets up the SP
+ * to be this new value and calls the code to create and throw the stack overflow
+ * exception.
+ */
+ENTRY art_quick_throw_stack_overflow_from_signal
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ mov x0, xSELF // pass Thread::Current
+ mov x1, sp // pass SP
+ mov sp, xIP0 // move SP down to below protected region.
+ b artThrowStackOverflowFromCode // artThrowStackOverflowFromCode(Thread*, SP)
+END art_quick_throw_stack_overflow_from_signal
+
/*
* All generated callsites for interface invokes and invocation slow paths will load arguments
* as usual - except instead of loading arg0/x0 with the target Method*, arg0/x0 will contain
@@ -478,7 +478,7 @@
*
* Adapted from ARM32 code.
*
- * Clobbers x12.
+ * Clobbers xIP0.
*/
.macro INVOKE_TRAMPOLINE c_name, cxx_name
.extern \cxx_name
@@ -491,10 +491,10 @@
mov x3, xSELF // pass Thread::Current
mov x4, sp
bl \cxx_name // (method_idx, this, caller, Thread*, SP)
- mov x12, x1 // save Method*->code_
+ mov xIP0, x1 // save Method*->code_
RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
cbz x0, 1f // did we find the target? if not go to exception delivery
- br x12 // tail call to target
+ br xIP0 // tail call to target
1:
DELIVER_PENDING_EXCEPTION
END \c_name
@@ -511,7 +511,7 @@
.macro INVOKE_STUB_CREATE_FRAME
-SAVE_SIZE=6*8 // x4, x5, x19(wSUSPEND), SP, LR & FP saved.
+SAVE_SIZE=6*8 // x4, x5, xSUSPEND, SP, LR & FP saved.
SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE
@@ -527,7 +527,7 @@
.cfi_def_cfa_register x10 // before this.
.cfi_adjust_cfa_offset SAVE_SIZE
- stp x9, x19, [x10, #32] // Save old stack pointer and x19(wSUSPEND)
+ stp x9, xSUSPEND, [x10, #32] // Save old stack pointer and xSUSPEND
.cfi_rel_offset sp, 32
.cfi_rel_offset x19, 40
@@ -608,7 +608,7 @@
str x0, [x4]
.Lexit_art_quick_invoke_stub\@:
- ldp x2, x19, [xFP, #32] // Restore stack pointer and x19.
+ ldp x2, xSUSPEND, [xFP, #32] // Restore stack pointer and xSUSPEND.
.cfi_restore x19
mov sp, x2
.cfi_restore sp
@@ -636,6 +636,7 @@
* | FP'' | <- SP'
* +----------------------+
* +----------------------+
+ * | x19 | <- Used as wSUSPEND, won't be restored by managed code.
* | SP' |
* | X5 |
* | X4 | Saved registers
@@ -1241,8 +1242,6 @@
.endm
// Macros taking opportunity of code similarities for downcalls with referrer.
-
-// TODO: xSELF -> x19. Temporarily rely on xSELF being saved in REF_ONLY
.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
@@ -1256,7 +1255,6 @@
END \name
.endm
-// TODO: xSELF -> x19. Temporarily rely on xSELF being saved in REF_ONLY
.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
@@ -1270,7 +1268,6 @@
END \name
.endm
-// TODO: xSELF -> x19. Temporarily rely on xSELF being saved in REF_ONLY
.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
@@ -1351,6 +1348,14 @@
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
END art_quick_test_suspend
+ENTRY art_quick_implicit_suspend
+ mov x0, xSELF
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves for stack crawl
+ mov x1, sp
+ bl artTestSuspendFromCode // (Thread*, SP)
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+END art_quick_implicit_suspend
+
/*
* Called by managed code that is attempting to call a method on a proxy class. On entry
* x0 holds the proxy method and x1 holds the receiver; The frame size of the invoked proxy
@@ -1363,8 +1368,8 @@
mov x2, xSELF // pass Thread::Current
mov x3, sp // pass SP
bl artQuickProxyInvokeHandler // (Method* proxy method, receiver, Thread*, SP)
- ldr xSELF, [sp, #200] // Restore self pointer.
- ldr x2, [xSELF, THREAD_EXCEPTION_OFFSET]
+ // Use xETR as xSELF might be scratched by native function above.
+ ldr x2, [xETR, THREAD_EXCEPTION_OFFSET]
cbnz x2, .Lexception_in_proxy // success if no exception is pending
RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME // Restore frame
fmov d0, x0 // Store result in d0 in case it was float or double
@@ -1375,14 +1380,14 @@
END art_quick_proxy_invoke_handler
/*
- * Called to resolve an imt conflict. x12 is a hidden argument that holds the target method's
+ * Called to resolve an imt conflict. xIP1 is a hidden argument that holds the target method's
* dex method index.
*/
ENTRY art_quick_imt_conflict_trampoline
ldr w0, [sp, #0] // load caller Method*
ldr w0, [x0, #METHOD_DEX_CACHE_METHODS_OFFSET] // load dex_cache_resolved_methods
add x0, x0, #OBJECT_ARRAY_DATA_OFFSET // get starting address of data
- ldr w0, [x0, x12, lsl 2] // load the target method
+ ldr w0, [x0, xIP1, lsl 2] // load the target method
b art_quick_invoke_interface_trampoline
END art_quick_imt_conflict_trampoline
@@ -1392,10 +1397,10 @@
mov x3, sp
bl artQuickResolutionTrampoline // (called, receiver, Thread*, SP)
cbz x0, 1f
- mov x9, x0 // Remember returned code pointer in x9.
+ mov xIP0, x0 // Remember returned code pointer in xIP0.
ldr w0, [sp, #0] // artQuickResolutionTrampoline puts called method in *SP.
RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
- br x9
+ br xIP0
1:
RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
DELIVER_PENDING_EXCEPTION
@@ -1419,7 +1424,6 @@
* | X22 | callee save
* | X21 | callee save
* | X20 | callee save
- * | X19 | callee save
* | X7 | arg7
* | X6 | arg6
* | X5 | arg5
@@ -1427,14 +1431,6 @@
* | X3 | arg3
* | X2 | arg2
* | X1 | arg1
- * | D15 | float arg 8
- * | D14 | float arg 8
- * | D13 | float arg 8
- * | D12 | callee save
- * | D11 | callee save
- * | D10 | callee save
- * | D9 | callee save
- * | D8 | callee save
* | D7 | float arg 8
* | D6 | float arg 7
* | D5 | float arg 6
@@ -1476,8 +1472,8 @@
// of the frame when the handle scope is inserted.
mov xFP, sp
- mov x8, #5120
- sub sp, sp, x8
+ mov xIP0, #5120
+ sub sp, sp, xIP0
// prepare for artQuickGenericJniTrampoline call
// (Thread*, SP)
@@ -1517,17 +1513,14 @@
add sp, sp, #128
- blr xIP0 // native call.
-
- // Restore self pointer.
- ldr xSELF, [x28, #200]
+ blr xIP0 // native call.
// result sign extension is handled in C code
// prepare for artQuickGenericJniEndTrampoline call
// (Thread*, result, result_f)
// x0 x1 x2 <= C calling convention
mov x1, x0 // Result (from saved)
- mov x0, xSELF // Thread register
+ mov x0, xETR // Thread register, original xSELF might be scratched by native code.
fmov x2, d0 // d0 will contain floating point result, but needs to go into x2
bl artQuickGenericJniEndTrampoline
@@ -1536,11 +1529,9 @@
mov sp, x28
.cfi_def_cfa_register sp
- // Restore self pointer.
- ldr xSELF, [x28, #200]
-
// Pending exceptions possible.
- ldr x1, [xSELF, THREAD_EXCEPTION_OFFSET]
+ // Use xETR as xSELF might be scratched by native code
+ ldr x1, [xETR, THREAD_EXCEPTION_OFFSET]
cbnz x1, .Lexception_in_native
// Tear down the callee-save frame.
@@ -1553,7 +1544,6 @@
.Lentry_error:
mov sp, x28
.cfi_def_cfa_register sp
- ldr xSELF, [x28, #200]
.Lexception_in_native:
RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
DELIVER_PENDING_EXCEPTION
@@ -1592,19 +1582,19 @@
ENTRY art_quick_instrumentation_entry
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
- mov x19, x0 // Preserve method reference in a callee-save.
+ mov x20, x0 // Preserve method reference in a callee-save.
mov x2, xSELF
mov x3, sp
mov x4, xLR
bl artInstrumentationMethodEntryFromCode // (Method*, Object*, Thread*, SP, LR)
- mov x9, x0 // x0 = result of call.
- mov x0, x19 // Reload method reference.
+ mov xIP0, x0 // x0 = result of call.
+ mov x0, x20 // Reload method reference.
RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME // Note: will restore xSELF
adr xLR, art_quick_instrumentation_exit
- br x9 // Tail-call method with lr set to art_quick_instrumentation_exit.
+ br xIP0 // Tail-call method with lr set to art_quick_instrumentation_exit.
END art_quick_instrumentation_entry
.extern artInstrumentationMethodExitFromCode
@@ -1627,18 +1617,16 @@
mov x0, xSELF // Pass Thread.
bl artInstrumentationMethodExitFromCode // (Thread*, SP, gpr_res, fpr_res)
- mov x9, x0 // Return address from instrumentation call.
+ mov xIP0, x0 // Return address from instrumentation call.
mov xLR, x1 // r1 is holding link register if we're to bounce to deoptimize
ldr d0, [sp, #8] // Restore floating-point result.
ldr x0, [sp], 16 // Restore integer result, and drop stack area.
.cfi_adjust_cfa_offset 16
- // Need to restore x18.
- ldr xSELF, [sp, #72]
POP_REF_ONLY_CALLEE_SAVE_FRAME
- br x9 // Tail-call out.
+ br xIP0 // Tail-call out.
END art_quick_instrumentation_exit
/*
@@ -1703,15 +1691,15 @@
.Lindexof_loop4:
ldrh w6, [x0, #2]!
ldrh w7, [x0, #2]!
- ldrh w8, [x0, #2]!
- ldrh w9, [x0, #2]!
+ ldrh wIP0, [x0, #2]!
+ ldrh wIP1, [x0, #2]!
cmp w6, w1
b.eq .Lmatch_0
cmp w7, w1
b.eq .Lmatch_1
- cmp w8, w1
+ cmp wIP0, w1
b.eq .Lmatch_2
- cmp w9, w1
+ cmp wIP1, w1
b.eq .Lmatch_3
subs w2, w2, #4
b.ge .Lindexof_loop4
@@ -1855,17 +1843,17 @@
ret
.Ldo_memcmp16:
- mov x14, x0 // Save x0 and LR. __memcmp16 does not use these temps.
- mov x15, xLR // TODO: Codify and check that?
+ mov xIP0, x0 // Save x0 and LR. __memcmp16 does not use these temps.
+ mov xIP1, xLR // TODO: Codify and check that?
mov x0, x2
uxtw x2, w3
bl __memcmp16
- mov xLR, x15 // Restore LR.
+ mov xLR, xIP1 // Restore LR.
cmp x0, #0 // Check the memcmp difference.
- csel x0, x0, x14, ne // x0 := x0 != 0 ? x14(prev x0=length diff) : x1.
+ csel x0, x0, xIP0, ne // x0 := x0 != 0 ? xIP0(prev x0=length diff) : x1.
ret
END art_quick_string_compareto
diff --git a/runtime/arch/arm64/quick_method_frame_info_arm64.h b/runtime/arch/arm64/quick_method_frame_info_arm64.h
index cb830ac..15c6c07 100644
--- a/runtime/arch/arm64/quick_method_frame_info_arm64.h
+++ b/runtime/arch/arm64/quick_method_frame_info_arm64.h
@@ -20,53 +20,53 @@
#include "quick/quick_method_frame_info.h"
#include "registers_arm64.h"
#include "runtime.h" // for Runtime::CalleeSaveType.
+#include "utils.h" // for POPCOUNT
namespace art {
namespace arm64 {
+// Registers need to be restored but not preserved by aapcs64.
+static constexpr uint32_t kArm64CalleeSaveAlwaysSpills =
+ // Note: ArtMethod::GetReturnPcOffsetInBytes() rely on the assumption that
+ // LR is always saved on the top of the frame for all targets.
+ // That is, lr = *(sp + framesize - pointsize).
+ (1 << art::arm64::LR);
// Callee saved registers
static constexpr uint32_t kArm64CalleeSaveRefSpills =
- (1 << art::arm64::X19) | (1 << art::arm64::X20) | (1 << art::arm64::X21) |
- (1 << art::arm64::X22) | (1 << art::arm64::X23) | (1 << art::arm64::X24) |
- (1 << art::arm64::X25) | (1 << art::arm64::X26) | (1 << art::arm64::X27) |
- (1 << art::arm64::X28);
+ (1 << art::arm64::X20) | (1 << art::arm64::X21) | (1 << art::arm64::X22) |
+ (1 << art::arm64::X23) | (1 << art::arm64::X24) | (1 << art::arm64::X25) |
+ (1 << art::arm64::X26) | (1 << art::arm64::X27) | (1 << art::arm64::X28) |
+ (1 << art::arm64::X29);
// X0 is the method pointer. Not saved.
static constexpr uint32_t kArm64CalleeSaveArgSpills =
(1 << art::arm64::X1) | (1 << art::arm64::X2) | (1 << art::arm64::X3) |
(1 << art::arm64::X4) | (1 << art::arm64::X5) | (1 << art::arm64::X6) |
(1 << art::arm64::X7);
-// TODO This is conservative. Only ALL should include the thread register.
-// The thread register is not preserved by the aapcs64.
-// LR is always saved.
-static constexpr uint32_t kArm64CalleeSaveAllSpills = 0; // (1 << art::arm64::LR);
+static constexpr uint32_t kArm64CalleeSaveAllSpills =
+ // Thread register.
+ (1 << art::arm64::X18) |
+ // Suspend register.
+ 1 << art::arm64::X19;
-// Save callee-saved floating point registers. Rest are scratch/parameters.
+static constexpr uint32_t kArm64CalleeSaveFpAlwaysSpills = 0;
+static constexpr uint32_t kArm64CalleeSaveFpRefSpills = 0;
static constexpr uint32_t kArm64CalleeSaveFpArgSpills =
(1 << art::arm64::D0) | (1 << art::arm64::D1) | (1 << art::arm64::D2) |
(1 << art::arm64::D3) | (1 << art::arm64::D4) | (1 << art::arm64::D5) |
(1 << art::arm64::D6) | (1 << art::arm64::D7);
-static constexpr uint32_t kArm64CalleeSaveFpRefSpills =
+static constexpr uint32_t kArm64FpAllSpills =
(1 << art::arm64::D8) | (1 << art::arm64::D9) | (1 << art::arm64::D10) |
(1 << art::arm64::D11) | (1 << art::arm64::D12) | (1 << art::arm64::D13) |
(1 << art::arm64::D14) | (1 << art::arm64::D15);
-static constexpr uint32_t kArm64FpAllSpills =
- kArm64CalleeSaveFpArgSpills |
- (1 << art::arm64::D16) | (1 << art::arm64::D17) | (1 << art::arm64::D18) |
- (1 << art::arm64::D19) | (1 << art::arm64::D20) | (1 << art::arm64::D21) |
- (1 << art::arm64::D22) | (1 << art::arm64::D23) | (1 << art::arm64::D24) |
- (1 << art::arm64::D25) | (1 << art::arm64::D26) | (1 << art::arm64::D27) |
- (1 << art::arm64::D28) | (1 << art::arm64::D29) | (1 << art::arm64::D30) |
- (1 << art::arm64::D31);
constexpr uint32_t Arm64CalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
- return kArm64CalleeSaveRefSpills |
+ return kArm64CalleeSaveAlwaysSpills | kArm64CalleeSaveRefSpills |
(type == Runtime::kRefsAndArgs ? kArm64CalleeSaveArgSpills : 0) |
- (type == Runtime::kSaveAll ? kArm64CalleeSaveAllSpills : 0) | (1 << art::arm64::FP) |
- (1 << art::arm64::X18) | (1 << art::arm64::LR);
+ (type == Runtime::kSaveAll ? kArm64CalleeSaveAllSpills : 0);
}
constexpr uint32_t Arm64CalleeSaveFpSpills(Runtime::CalleeSaveType type) {
- return kArm64CalleeSaveFpRefSpills |
+ return kArm64CalleeSaveFpAlwaysSpills | kArm64CalleeSaveFpRefSpills |
(type == Runtime::kRefsAndArgs ? kArm64CalleeSaveFpArgSpills: 0) |
(type == Runtime::kSaveAll ? kArm64FpAllSpills : 0);
}
@@ -83,6 +83,22 @@
Arm64CalleeSaveFpSpills(type));
}
+constexpr size_t Arm64CalleeSaveFpr1Offset(Runtime::CalleeSaveType type) {
+ return Arm64CalleeSaveFrameSize(type) -
+ (POPCOUNT(Arm64CalleeSaveCoreSpills(type)) +
+ POPCOUNT(Arm64CalleeSaveFpSpills(type))) * kArm64PointerSize;
+}
+
+constexpr size_t Arm64CalleeSaveGpr1Offset(Runtime::CalleeSaveType type) {
+ return Arm64CalleeSaveFrameSize(type) -
+ POPCOUNT(Arm64CalleeSaveCoreSpills(type)) * kArm64PointerSize;
+}
+
+constexpr size_t Arm64CalleeSaveLrOffset(Runtime::CalleeSaveType type) {
+ return Arm64CalleeSaveFrameSize(type) -
+ POPCOUNT(Arm64CalleeSaveCoreSpills(type) & (-(1 << LR))) * kArm64PointerSize;
+}
+
} // namespace arm64
} // namespace art
diff --git a/runtime/arch/arm64/registers_arm64.h b/runtime/arch/arm64/registers_arm64.h
index ea346e0..9ccab70 100644
--- a/runtime/arch/arm64/registers_arm64.h
+++ b/runtime/arch/arm64/registers_arm64.h
@@ -57,7 +57,7 @@
X30 = 30,
X31 = 31,
TR = 18, // ART Thread Register - Managed Runtime (Caller Saved Reg)
- ETR = 19, // ART Thread Register - External Calls (Callee Saved Reg)
+ ETR = 21, // ART Thread Register - External Calls (Callee Saved Reg)
IP0 = 16, // Used as scratch by VIXL.
IP1 = 17, // Used as scratch by ART JNI Assembler.
FP = 29,
diff --git a/runtime/arch/memcmp16.h b/runtime/arch/memcmp16.h
index 1144c8c..14dc1e3 100644
--- a/runtime/arch/memcmp16.h
+++ b/runtime/arch/memcmp16.h
@@ -30,7 +30,7 @@
//
// In both cases, MemCmp16 is declared.
-#if defined(__aarch64__) || defined(__arm__) || defined(__mips)
+#if defined(__aarch64__) || defined(__arm__) || defined(__mips) || defined(__i386__) || defined(__x86_64__)
extern "C" uint32_t __memcmp16(const uint16_t* s0, const uint16_t* s1, size_t count);
#define MemCmp16 __memcmp16
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 7a2e961..d3e7d5e 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -21,6 +21,7 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/entrypoint_utils.h"
#include "entrypoints/math_entrypoints.h"
+#include "atomic.h"
namespace art {
@@ -196,11 +197,11 @@
qpoints->pCmplDouble = CmplDouble;
qpoints->pCmplFloat = CmplFloat;
qpoints->pFmod = fmod;
- qpoints->pL2d = __floatdidf;
+ qpoints->pL2d = art_l2d;
qpoints->pFmodf = fmodf;
- qpoints->pL2f = __floatdisf;
- qpoints->pD2iz = __fixdfsi;
- qpoints->pF2iz = __fixsfsi;
+ qpoints->pL2f = art_l2f;
+ qpoints->pD2iz = art_d2i;
+ qpoints->pF2iz = art_f2i;
qpoints->pIdivmod = NULL;
qpoints->pD2l = art_d2l;
qpoints->pF2l = art_f2l;
@@ -236,6 +237,10 @@
qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method;
qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
+
+ // Atomic 64-bit load/store
+ qpoints->pA64Load = QuasiAtomic::Read64;
+ qpoints->pA64Store = QuasiAtomic::Write64;
};
} // namespace art
diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc
index 1ecd7d9..0e76aab 100644
--- a/runtime/arch/mips/fault_handler_mips.cc
+++ b/runtime/arch/mips/fault_handler_mips.cc
@@ -29,7 +29,8 @@
namespace art {
-void FaultManager::GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method,
+void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
+ mirror::ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
}
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index ada1523..8786222 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -1076,14 +1076,15 @@
.cfi_startproc
addiu $t9, $ra, 4 # put current address into $t9 to rebuild $gp
GENERATE_GLOBAL_POINTER
- move $t0, $sp # remember bottom of caller's frame
+ move $ra, $zero # link register is to here, so clobber with 0 for later checks
SETUP_REF_ONLY_CALLEE_SAVE_FRAME
+ move $t0, $sp # remember bottom of caller's frame
addiu $sp, $sp, -48 # save return values and set up args
.cfi_adjust_cfa_offset 48
sw $v0, 32($sp)
- .cfi_rel_offset 2, 0
+ .cfi_rel_offset 2, 32
sw $v1, 36($sp)
- .cfi_rel_offset 3, 4
+ .cfi_rel_offset 3, 36
s.s $f0, 40($sp)
s.s $f1, 44($sp)
s.s $f0, 16($sp) # pass fpr result
diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S
index ae39be1..e468c2a 100644
--- a/runtime/arch/x86/asm_support_x86.S
+++ b/runtime/arch/x86/asm_support_x86.S
@@ -81,6 +81,8 @@
#define CFI_DEF_CFA_REGISTER(reg) .cfi_def_cfa_register reg
#define CFI_RESTORE(reg) .cfi_restore reg
#define CFI_REL_OFFSET(reg,size) .cfi_rel_offset reg,size
+ #define CFI_RESTORE_STATE .cfi_restore_state
+ #define CFI_REMEMBER_STATE .cfi_remember_state
#else
// Mac OS' doesn't like cfi_* directives.
#define CFI_STARTPROC
@@ -90,6 +92,8 @@
#define CFI_DEF_CFA_REGISTER(reg)
#define CFI_RESTORE(reg)
#define CFI_REL_OFFSET(reg,size)
+ #define CFI_RESTORE_STATE
+ #define CFI_REMEMBER_STATE
#endif
// Symbols.
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index 7c1980e..8b6c9b1 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -21,27 +21,370 @@
#include "globals.h"
#include "base/logging.h"
#include "base/hex_dump.h"
+#include "mirror/art_method.h"
+#include "mirror/art_method-inl.h"
+#include "thread.h"
+#include "thread-inl.h"
+#if defined(__APPLE__)
+#define ucontext __darwin_ucontext
+#define CTX_ESP uc_mcontext->__ss.__esp
+#define CTX_EIP uc_mcontext->__ss.__eip
+#define CTX_EAX uc_mcontext->__ss.__eax
+#define CTX_METHOD uc_mcontext->__ss.__eax
+#elif defined(__x86_64__)
+#define CTX_ESP uc_mcontext.gregs[REG_RSP]
+#define CTX_EIP uc_mcontext.gregs[REG_RIP]
+#define CTX_EAX uc_mcontext.gregs[REG_RAX]
+#define CTX_METHOD uc_mcontext.gregs[REG_RDI]
+#else
+#define CTX_ESP uc_mcontext.gregs[REG_ESP]
+#define CTX_EIP uc_mcontext.gregs[REG_EIP]
+#define CTX_EAX uc_mcontext.gregs[REG_EAX]
+#define CTX_METHOD uc_mcontext.gregs[REG_EAX]
+#endif
//
-// X86 specific fault handler functions.
+// X86 (and X86_64) specific fault handler functions.
//
namespace art {
-void FaultManager::GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method,
+extern "C" void art_quick_throw_null_pointer_exception();
+extern "C" void art_quick_throw_stack_overflow_from_signal();
+extern "C" void art_quick_test_suspend();
+
+// Get the size of an instruction in bytes.
+// Return 0 if the instruction is not handled.
+static uint32_t GetInstructionSize(const uint8_t* pc) {
+#if defined(__x86_64)
+ const bool x86_64 = true;
+#else
+ const bool x86_64 = false;
+#endif
+
+ const uint8_t* startpc = pc;
+
+ uint8_t opcode = *pc++;
+ uint8_t modrm;
+ bool has_modrm = false;
+ bool two_byte = false;
+ uint32_t displacement_size = 0;
+ uint32_t immediate_size = 0;
+
+ // Prefixes.
+ while (true) {
+ bool prefix_present = false;
+ switch (opcode) {
+ // Group 1
+ case 0xf0:
+ case 0xf2:
+ case 0xf3:
+
+ // Group 2
+ case 0x2e:
+ case 0x36:
+ case 0x3e:
+ case 0x26:
+ case 0x64:
+ case 0x65:
+
+ // Group 3
+ case 0x66:
+
+ // Group 4
+ case 0x67:
+ opcode = *pc++;
+ prefix_present = true;
+ break;
+ }
+ if (!prefix_present) {
+ break;
+ }
+ }
+
+ if (x86_64 && opcode >= 0x40 && opcode <= 0x4f) {
+ opcode = *pc++;
+ }
+
+ if (opcode == 0x0f) {
+ // Two byte opcode
+ two_byte = true;
+ opcode = *pc++;
+ }
+
+ bool unhandled_instruction = false;
+
+ if (two_byte) {
+ switch (opcode) {
+ case 0x10: // vmovsd/ss
+ case 0x11: // vmovsd/ss
+ case 0xb6: // movzx
+ case 0xb7:
+ case 0xbe: // movsx
+ case 0xbf:
+ modrm = *pc++;
+ has_modrm = true;
+ break;
+ default:
+ unhandled_instruction = true;
+ break;
+ }
+ } else {
+ switch (opcode) {
+ case 0x89: // mov
+ case 0x8b:
+ case 0x38: // cmp with memory.
+ case 0x39:
+ case 0x3a:
+ case 0x3b:
+ case 0x3c:
+ case 0x3d:
+ case 0x85: // test.
+ modrm = *pc++;
+ has_modrm = true;
+ break;
+
+ case 0x80: // group 1, byte immediate.
+ case 0x83:
+ modrm = *pc++;
+ has_modrm = true;
+ immediate_size = 1;
+ break;
+
+ case 0x81: // group 1, word immediate.
+ modrm = *pc++;
+ has_modrm = true;
+ immediate_size = 4;
+ break;
+
+ default:
+ unhandled_instruction = true;
+ break;
+ }
+ }
+
+ if (unhandled_instruction) {
+ VLOG(signals) << "Unhandled x86 instruction with opcode " << static_cast<int>(opcode);
+ return 0;
+ }
+
+ if (has_modrm) {
+ uint8_t mod = (modrm >> 6) & 0b11;
+
+ // Check for SIB.
+ if (mod != 0b11 && (modrm & 0b111) == 4) {
+ ++pc; // SIB
+ }
+
+ switch (mod) {
+ case 0b00: break;
+ case 0b01: displacement_size = 1; break;
+ case 0b10: displacement_size = 4; break;
+ case 0b11:
+ break;
+ }
+ }
+
+ // Skip displacement and immediate.
+ pc += displacement_size + immediate_size;
+
+ VLOG(signals) << "x86 instruction length calculated as " << (pc - startpc);
+ return pc - startpc;
+}
+
+void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
+ mirror::ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
+ struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
+ *out_sp = static_cast<uintptr_t>(uc->CTX_ESP);
+ VLOG(signals) << "sp: " << std::hex << *out_sp;
+ if (*out_sp == 0) {
+ return;
+ }
+
+ // In the case of a stack overflow, the stack is not valid and we can't
+ // get the method from the top of the stack. However it's in EAX(x86)/RDI(x86_64).
+ uintptr_t* fault_addr = reinterpret_cast<uintptr_t*>(siginfo->si_addr);
+ uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
+#if defined(__x86_64__)
+ reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kX86_64));
+#else
+ reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kX86));
+#endif
+ if (overflow_addr == fault_addr) {
+ *out_method = reinterpret_cast<mirror::ArtMethod*>(uc->CTX_METHOD);
+ } else {
+ // The method is at the top of the stack.
+ *out_method = (reinterpret_cast<StackReference<mirror::ArtMethod>* >(*out_sp)[0]).AsMirrorPtr();
+ }
+
+ uint8_t* pc = reinterpret_cast<uint8_t*>(uc->CTX_EIP);
+ VLOG(signals) << HexDump(pc, 32, true, "PC ");
+
+ uint32_t instr_size = GetInstructionSize(pc);
+ if (instr_size == 0) {
+ // Unknown instruction, tell caller it's not ours.
+ *out_method = nullptr;
+ return;
+ }
+ *out_return_pc = reinterpret_cast<uintptr_t>(pc + instr_size);
}
bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
+ struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
+ uint8_t* pc = reinterpret_cast<uint8_t*>(uc->CTX_EIP);
+ uint8_t* sp = reinterpret_cast<uint8_t*>(uc->CTX_ESP);
+
+ uint32_t instr_size = GetInstructionSize(pc);
+ if (instr_size == 0) {
+ // Unknown instruction, can't really happen.
+ return false;
+ }
+
+ // We need to arrange for the signal handler to return to the null pointer
+ // exception generator. The return address must be the address of the
+ // next instruction (this instruction + instruction size). The return address
+ // is on the stack at the top address of the current frame.
+
+ // Push the return address onto the stack.
+ uintptr_t retaddr = reinterpret_cast<uintptr_t>(pc + instr_size);
+ uintptr_t* next_sp = reinterpret_cast<uintptr_t*>(sp - sizeof(uintptr_t));
+ *next_sp = retaddr;
+ uc->CTX_ESP = reinterpret_cast<uintptr_t>(next_sp);
+
+ uc->CTX_EIP = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception);
+ VLOG(signals) << "Generating null pointer exception";
+ return true;
+}
+
+// A suspend check is done using the following instruction sequence:
+// (x86)
+// 0xf720f1df: 648B058C000000 mov eax, fs:[0x8c] ; suspend_trigger
+// .. some intervening instructions.
+// 0xf720f1e6: 8500 test eax, [eax]
+// (x86_64)
+// 0x7f579de45d9e: 65488B0425A8000000 movq rax, gs:[0xa8] ; suspend_trigger
+// .. some intervening instructions.
+// 0x7f579de45da7: 8500 test eax, [eax]
+
+// The offset from fs is Thread::ThreadSuspendTriggerOffset().
+// To check for a suspend check, we examine the instructions that caused
+// the fault.
+bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
+ // These are the instructions to check for. The first one is the mov eax, fs:[xxx]
+ // where xxx is the offset of the suspend trigger.
+#if defined(__x86_64__)
+ uint32_t trigger = Thread::ThreadSuspendTriggerOffset<8>().Int32Value();
+#else
+ uint32_t trigger = Thread::ThreadSuspendTriggerOffset<4>().Int32Value();
+#endif
+
+ VLOG(signals) << "Checking for suspension point";
+#if defined(__x86_64__)
+ uint8_t checkinst1[] = {0x65, 0x48, 0x8b, 0x04, 0x25, static_cast<uint8_t>(trigger & 0xff),
+ static_cast<uint8_t>((trigger >> 8) & 0xff), 0, 0};
+#else
+ uint8_t checkinst1[] = {0x64, 0x8b, 0x05, static_cast<uint8_t>(trigger & 0xff),
+ static_cast<uint8_t>((trigger >> 8) & 0xff), 0, 0};
+#endif
+ uint8_t checkinst2[] = {0x85, 0x00};
+
+ struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
+ uint8_t* pc = reinterpret_cast<uint8_t*>(uc->CTX_EIP);
+ uint8_t* sp = reinterpret_cast<uint8_t*>(uc->CTX_ESP);
+
+ if (pc[0] != checkinst2[0] || pc[1] != checkinst2[1]) {
+ // Second instruction is not correct (test eax,[eax]).
+ VLOG(signals) << "Not a suspension point";
+ return false;
+ }
+
+ // The first instruction can a little bit up the stream due to load hoisting
+ // in the compiler.
+ uint8_t* limit = pc - 100; // Compiler will hoist to a max of 20 instructions.
+ uint8_t* ptr = pc - sizeof(checkinst1);
+ bool found = false;
+ while (ptr > limit) {
+ if (memcmp(ptr, checkinst1, sizeof(checkinst1)) == 0) {
+ found = true;
+ break;
+ }
+ ptr -= 1;
+ }
+
+ if (found) {
+ VLOG(signals) << "suspend check match";
+
+ // We need to arrange for the signal handler to return to the null pointer
+ // exception generator. The return address must be the address of the
+ // next instruction (this instruction + 2). The return address
+ // is on the stack at the top address of the current frame.
+
+ // Push the return address onto the stack.
+ uintptr_t retaddr = reinterpret_cast<uintptr_t>(pc + 2);
+ uintptr_t* next_sp = reinterpret_cast<uintptr_t*>(sp - sizeof(uintptr_t));
+ *next_sp = retaddr;
+ uc->CTX_ESP = reinterpret_cast<uintptr_t>(next_sp);
+
+ uc->CTX_EIP = reinterpret_cast<uintptr_t>(art_quick_test_suspend);
+
+ // Now remove the suspend trigger that caused this fault.
+ Thread::Current()->RemoveSuspendTrigger();
+ VLOG(signals) << "removed suspend trigger invoking test suspend";
+ return true;
+ }
+ VLOG(signals) << "Not a suspend check match, first instruction mismatch";
return false;
}
-bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
- return false;
-}
+// The stack overflow check is done using the following instruction:
+// test eax, [esp+ -xxx]
+// where 'xxx' is the size of the overflow area.
+//
+// This is done before any frame is established in the method. The return
+// address for the previous method is on the stack at ESP.
bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
- return false;
+ struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
+ uintptr_t sp = static_cast<uintptr_t>(uc->CTX_ESP);
+
+ uintptr_t fault_addr = reinterpret_cast<uintptr_t>(info->si_addr);
+ VLOG(signals) << "fault_addr: " << std::hex << fault_addr;
+ VLOG(signals) << "checking for stack overflow, sp: " << std::hex << sp <<
+ ", fault_addr: " << fault_addr;
+
+#if defined(__x86_64__)
+ uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kX86_64);
+#else
+ uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kX86);
+#endif
+
+ Thread* self = Thread::Current();
+ uintptr_t pregion = reinterpret_cast<uintptr_t>(self->GetStackEnd()) -
+ Thread::kStackOverflowProtectedSize;
+
+ // Check that the fault address is the value expected for a stack overflow.
+ if (fault_addr != overflow_addr) {
+ VLOG(signals) << "Not a stack overflow";
+ return false;
+ }
+
+ // We know this is a stack overflow. We need to move the sp to the overflow region
+ // that exists below the protected region. Determine the address of the next
+ // available valid address below the protected region.
+ VLOG(signals) << "setting sp to overflow region at " << std::hex << pregion;
+
+ // Since the compiler puts the implicit overflow
+ // check before the callee save instructions, the SP is already pointing to
+ // the previous frame.
+
+ // Tell the stack overflow code where the new stack pointer should be.
+ uc->CTX_EAX = pregion;
+
+ // Now arrange for the signal handler to return to art_quick_throw_stack_overflow_from_signal.
+ uc->CTX_EIP = reinterpret_cast<uintptr_t>(art_quick_throw_stack_overflow_from_signal);
+
+ return true;
}
} // namespace art
diff --git a/runtime/arch/x86/memcmp16_x86.S b/runtime/arch/x86/memcmp16_x86.S
new file mode 100644
index 0000000..a315a37
--- /dev/null
+++ b/runtime/arch/x86/memcmp16_x86.S
@@ -0,0 +1,1038 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asm_support_x86.S"
+
+#define MEMCMP __memcmp16
+
+/* int32_t memcmp16_compare(const uint16_t* s0, const uint16_t* s1, size_t count); */
+
+#ifndef L
+# define L(label) .L##label
+#endif
+
+#define CFI_PUSH(REG) \
+ CFI_ADJUST_CFA_OFFSET(4); \
+ CFI_REL_OFFSET(REG, 0)
+
+#define CFI_POP(REG) \
+ CFI_ADJUST_CFA_OFFSET(-4); \
+ CFI_RESTORE(REG)
+
+#define PUSH(REG) pushl REG; CFI_PUSH (REG)
+#define POP(REG) popl REG; CFI_POP (REG)
+
+#define PARMS 4
+#define BLK1 PARMS
+#define BLK2 BLK1+4
+#define LEN BLK2+4
+#define RETURN_END POP (%edi); POP (%esi); POP (%ebx); ret
+#define RETURN RETURN_END; CFI_RESTORE_STATE; CFI_REMEMBER_STATE
+
+DEFINE_FUNCTION MEMCMP
+ movl LEN(%esp), %ecx
+
+ shl $1, %ecx
+ jz L(zero)
+
+ movl BLK1(%esp), %eax
+ cmp $48, %ecx
+ movl BLK2(%esp), %edx
+ jae L(48bytesormore)
+
+ PUSH (%ebx)
+ add %ecx, %edx
+ add %ecx, %eax
+ jmp L(less48bytes)
+
+ CFI_POP (%ebx)
+
+ .p2align 4
+L(zero):
+ xor %eax, %eax
+ ret
+
+ .p2align 4
+L(48bytesormore):
+ PUSH (%ebx)
+ PUSH (%esi)
+ PUSH (%edi)
+ CFI_REMEMBER_STATE
+ movdqu (%eax), %xmm3
+ movdqu (%edx), %xmm0
+ movl %eax, %edi
+ movl %edx, %esi
+ pcmpeqb %xmm0, %xmm3
+ pmovmskb %xmm3, %edx
+ lea 16(%edi), %edi
+
+ sub $0xffff, %edx
+ lea 16(%esi), %esi
+ jnz L(less16bytes)
+ mov %edi, %edx
+ and $0xf, %edx
+ xor %edx, %edi
+ sub %edx, %esi
+ add %edx, %ecx
+ mov %esi, %edx
+ and $0xf, %edx
+ jz L(shr_0)
+ xor %edx, %esi
+
+ cmp $0, %edx
+ je L(shr_0)
+ cmp $2, %edx
+ je L(shr_2)
+ cmp $4, %edx
+ je L(shr_4)
+ cmp $6, %edx
+ je L(shr_6)
+ cmp $8, %edx
+ je L(shr_8)
+ cmp $10, %edx
+ je L(shr_10)
+ cmp $12, %edx
+ je L(shr_12)
+ jmp L(shr_14)
+
+ .p2align 4
+L(shr_0):
+ cmp $80, %ecx
+ jae L(shr_0_gobble)
+ lea -48(%ecx), %ecx
+ xor %eax, %eax
+ movaps (%esi), %xmm1
+ pcmpeqb (%edi), %xmm1
+ movaps 16(%esi), %xmm2
+ pcmpeqb 16(%edi), %xmm2
+ pand %xmm1, %xmm2
+ pmovmskb %xmm2, %edx
+ add $32, %edi
+ add $32, %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+
+ lea (%ecx, %edi,1), %eax
+ lea (%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_0_gobble):
+ lea -48(%ecx), %ecx
+ movdqa (%esi), %xmm0
+ xor %eax, %eax
+ pcmpeqb (%edi), %xmm0
+ sub $32, %ecx
+ movdqa 16(%esi), %xmm2
+ pcmpeqb 16(%edi), %xmm2
+L(shr_0_gobble_loop):
+ pand %xmm0, %xmm2
+ sub $32, %ecx
+ pmovmskb %xmm2, %edx
+ movdqa %xmm0, %xmm1
+ movdqa 32(%esi), %xmm0
+ movdqa 48(%esi), %xmm2
+ sbb $0xffff, %edx
+ pcmpeqb 32(%edi), %xmm0
+ pcmpeqb 48(%edi), %xmm2
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ jz L(shr_0_gobble_loop)
+
+ pand %xmm0, %xmm2
+ cmp $0, %ecx
+ jge L(shr_0_gobble_loop_next)
+ inc %edx
+ add $32, %ecx
+L(shr_0_gobble_loop_next):
+ test %edx, %edx
+ jnz L(exit)
+
+ pmovmskb %xmm2, %edx
+ movdqa %xmm0, %xmm1
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+ lea (%ecx, %edi,1), %eax
+ lea (%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_2):
+ cmp $80, %ecx
+ lea -48(%ecx), %ecx
+ mov %edx, %eax
+ jae L(shr_2_gobble)
+
+ movdqa 16(%esi), %xmm1
+ movdqa %xmm1, %xmm2
+ palignr $2,(%esi), %xmm1
+ pcmpeqb (%edi), %xmm1
+
+ movdqa 32(%esi), %xmm3
+ palignr $2,%xmm2, %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+ pand %xmm1, %xmm3
+ pmovmskb %xmm3, %edx
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+ lea (%ecx, %edi,1), %eax
+ lea 2(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_2_gobble):
+ sub $32, %ecx
+ movdqa 16(%esi), %xmm0
+ palignr $2,(%esi), %xmm0
+ pcmpeqb (%edi), %xmm0
+
+ movdqa 32(%esi), %xmm3
+ palignr $2,16(%esi), %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+L(shr_2_gobble_loop):
+ pand %xmm0, %xmm3
+ sub $32, %ecx
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+
+ movdqa 64(%esi), %xmm3
+ palignr $2,48(%esi), %xmm3
+ sbb $0xffff, %edx
+ movdqa 48(%esi), %xmm0
+ palignr $2,32(%esi), %xmm0
+ pcmpeqb 32(%edi), %xmm0
+ lea 32(%esi), %esi
+ pcmpeqb 48(%edi), %xmm3
+
+ lea 32(%edi), %edi
+ jz L(shr_2_gobble_loop)
+ pand %xmm0, %xmm3
+
+ cmp $0, %ecx
+ jge L(shr_2_gobble_next)
+ inc %edx
+ add $32, %ecx
+L(shr_2_gobble_next):
+ test %edx, %edx
+ jnz L(exit)
+
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+
+ lea (%ecx, %edi,1), %eax
+ lea 2(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_4):
+ cmp $80, %ecx
+ lea -48(%ecx), %ecx
+ mov %edx, %eax
+ jae L(shr_4_gobble)
+
+ movdqa 16(%esi), %xmm1
+ movdqa %xmm1, %xmm2
+ palignr $4,(%esi), %xmm1
+ pcmpeqb (%edi), %xmm1
+
+ movdqa 32(%esi), %xmm3
+ palignr $4,%xmm2, %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+ pand %xmm1, %xmm3
+ pmovmskb %xmm3, %edx
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+ lea (%ecx, %edi,1), %eax
+ lea 4(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_4_gobble):
+ sub $32, %ecx
+ movdqa 16(%esi), %xmm0
+ palignr $4,(%esi), %xmm0
+ pcmpeqb (%edi), %xmm0
+
+ movdqa 32(%esi), %xmm3
+ palignr $4,16(%esi), %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+L(shr_4_gobble_loop):
+ pand %xmm0, %xmm3
+ sub $32, %ecx
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+
+ movdqa 64(%esi), %xmm3
+ palignr $4,48(%esi), %xmm3
+ sbb $0xffff, %edx
+ movdqa 48(%esi), %xmm0
+ palignr $4,32(%esi), %xmm0
+ pcmpeqb 32(%edi), %xmm0
+ lea 32(%esi), %esi
+ pcmpeqb 48(%edi), %xmm3
+
+ lea 32(%edi), %edi
+ jz L(shr_4_gobble_loop)
+ pand %xmm0, %xmm3
+
+ cmp $0, %ecx
+ jge L(shr_4_gobble_next)
+ inc %edx
+ add $32, %ecx
+L(shr_4_gobble_next):
+ test %edx, %edx
+ jnz L(exit)
+
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+
+ lea (%ecx, %edi,1), %eax
+ lea 4(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_6):
+ cmp $80, %ecx
+ lea -48(%ecx), %ecx
+ mov %edx, %eax
+ jae L(shr_6_gobble)
+
+ movdqa 16(%esi), %xmm1
+ movdqa %xmm1, %xmm2
+ palignr $6,(%esi), %xmm1
+ pcmpeqb (%edi), %xmm1
+
+ movdqa 32(%esi), %xmm3
+ palignr $6,%xmm2, %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+ pand %xmm1, %xmm3
+ pmovmskb %xmm3, %edx
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+ lea (%ecx, %edi,1), %eax
+ lea 6(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_6_gobble):
+ sub $32, %ecx
+ movdqa 16(%esi), %xmm0
+ palignr $6,(%esi), %xmm0
+ pcmpeqb (%edi), %xmm0
+
+ movdqa 32(%esi), %xmm3
+ palignr $6,16(%esi), %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+L(shr_6_gobble_loop):
+ pand %xmm0, %xmm3
+ sub $32, %ecx
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+
+ movdqa 64(%esi), %xmm3
+ palignr $6,48(%esi), %xmm3
+ sbb $0xffff, %edx
+ movdqa 48(%esi), %xmm0
+ palignr $6,32(%esi), %xmm0
+ pcmpeqb 32(%edi), %xmm0
+ lea 32(%esi), %esi
+ pcmpeqb 48(%edi), %xmm3
+
+ lea 32(%edi), %edi
+ jz L(shr_6_gobble_loop)
+ pand %xmm0, %xmm3
+
+ cmp $0, %ecx
+ jge L(shr_6_gobble_next)
+ inc %edx
+ add $32, %ecx
+L(shr_6_gobble_next):
+ test %edx, %edx
+ jnz L(exit)
+
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+
+ lea (%ecx, %edi,1), %eax
+ lea 6(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_8):
+ cmp $80, %ecx
+ lea -48(%ecx), %ecx
+ mov %edx, %eax
+ jae L(shr_8_gobble)
+
+ movdqa 16(%esi), %xmm1
+ movdqa %xmm1, %xmm2
+ palignr $8,(%esi), %xmm1
+ pcmpeqb (%edi), %xmm1
+
+ movdqa 32(%esi), %xmm3
+ palignr $8,%xmm2, %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+ pand %xmm1, %xmm3
+ pmovmskb %xmm3, %edx
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+ lea (%ecx, %edi,1), %eax
+ lea 8(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_8_gobble):
+ sub $32, %ecx
+ movdqa 16(%esi), %xmm0
+ palignr $8,(%esi), %xmm0
+ pcmpeqb (%edi), %xmm0
+
+ movdqa 32(%esi), %xmm3
+ palignr $8,16(%esi), %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+L(shr_8_gobble_loop):
+ pand %xmm0, %xmm3
+ sub $32, %ecx
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+
+ movdqa 64(%esi), %xmm3
+ palignr $8,48(%esi), %xmm3
+ sbb $0xffff, %edx
+ movdqa 48(%esi), %xmm0
+ palignr $8,32(%esi), %xmm0
+ pcmpeqb 32(%edi), %xmm0
+ lea 32(%esi), %esi
+ pcmpeqb 48(%edi), %xmm3
+
+ lea 32(%edi), %edi
+ jz L(shr_8_gobble_loop)
+ pand %xmm0, %xmm3
+
+ cmp $0, %ecx
+ jge L(shr_8_gobble_next)
+ inc %edx
+ add $32, %ecx
+L(shr_8_gobble_next):
+ test %edx, %edx
+ jnz L(exit)
+
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+
+ lea (%ecx, %edi,1), %eax
+ lea 8(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_10):
+ cmp $80, %ecx
+ lea -48(%ecx), %ecx
+ mov %edx, %eax
+ jae L(shr_10_gobble)
+
+ movdqa 16(%esi), %xmm1
+ movdqa %xmm1, %xmm2
+ palignr $10, (%esi), %xmm1
+ pcmpeqb (%edi), %xmm1
+
+ movdqa 32(%esi), %xmm3
+ palignr $10,%xmm2, %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+ pand %xmm1, %xmm3
+ pmovmskb %xmm3, %edx
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+ lea (%ecx, %edi,1), %eax
+ lea 10(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_10_gobble):
+ sub $32, %ecx
+ movdqa 16(%esi), %xmm0
+ palignr $10, (%esi), %xmm0
+ pcmpeqb (%edi), %xmm0
+
+ movdqa 32(%esi), %xmm3
+ palignr $10, 16(%esi), %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+L(shr_10_gobble_loop):
+ pand %xmm0, %xmm3
+ sub $32, %ecx
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+
+ movdqa 64(%esi), %xmm3
+ palignr $10,48(%esi), %xmm3
+ sbb $0xffff, %edx
+ movdqa 48(%esi), %xmm0
+ palignr $10,32(%esi), %xmm0
+ pcmpeqb 32(%edi), %xmm0
+ lea 32(%esi), %esi
+ pcmpeqb 48(%edi), %xmm3
+
+ lea 32(%edi), %edi
+ jz L(shr_10_gobble_loop)
+ pand %xmm0, %xmm3
+
+ cmp $0, %ecx
+ jge L(shr_10_gobble_next)
+ inc %edx
+ add $32, %ecx
+L(shr_10_gobble_next):
+ test %edx, %edx
+ jnz L(exit)
+
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+
+ lea (%ecx, %edi,1), %eax
+ lea 10(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_12):
+ cmp $80, %ecx
+ lea -48(%ecx), %ecx
+ mov %edx, %eax
+ jae L(shr_12_gobble)
+
+ movdqa 16(%esi), %xmm1
+ movdqa %xmm1, %xmm2
+ palignr $12, (%esi), %xmm1
+ pcmpeqb (%edi), %xmm1
+
+ movdqa 32(%esi), %xmm3
+ palignr $12, %xmm2, %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+ pand %xmm1, %xmm3
+ pmovmskb %xmm3, %edx
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+ lea (%ecx, %edi,1), %eax
+ lea 12(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_12_gobble):
+ sub $32, %ecx
+ movdqa 16(%esi), %xmm0
+ palignr $12, (%esi), %xmm0
+ pcmpeqb (%edi), %xmm0
+
+ movdqa 32(%esi), %xmm3
+ palignr $12, 16(%esi), %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+L(shr_12_gobble_loop):
+ pand %xmm0, %xmm3
+ sub $32, %ecx
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+
+ movdqa 64(%esi), %xmm3
+ palignr $12,48(%esi), %xmm3
+ sbb $0xffff, %edx
+ movdqa 48(%esi), %xmm0
+ palignr $12,32(%esi), %xmm0
+ pcmpeqb 32(%edi), %xmm0
+ lea 32(%esi), %esi
+ pcmpeqb 48(%edi), %xmm3
+
+ lea 32(%edi), %edi
+ jz L(shr_12_gobble_loop)
+ pand %xmm0, %xmm3
+
+ cmp $0, %ecx
+ jge L(shr_12_gobble_next)
+ inc %edx
+ add $32, %ecx
+L(shr_12_gobble_next):
+ test %edx, %edx
+ jnz L(exit)
+
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+
+ lea (%ecx, %edi,1), %eax
+ lea 12(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_14):
+ cmp $80, %ecx
+ lea -48(%ecx), %ecx
+ mov %edx, %eax
+ jae L(shr_14_gobble)
+
+ movdqa 16(%esi), %xmm1
+ movdqa %xmm1, %xmm2
+ palignr $14, (%esi), %xmm1
+ pcmpeqb (%edi), %xmm1
+
+ movdqa 32(%esi), %xmm3
+ palignr $14, %xmm2, %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+ pand %xmm1, %xmm3
+ pmovmskb %xmm3, %edx
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+ lea (%ecx, %edi,1), %eax
+ lea 14(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_14_gobble):
+ sub $32, %ecx
+ movdqa 16(%esi), %xmm0
+ palignr $14, (%esi), %xmm0
+ pcmpeqb (%edi), %xmm0
+
+ movdqa 32(%esi), %xmm3
+ palignr $14, 16(%esi), %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+L(shr_14_gobble_loop):
+ pand %xmm0, %xmm3
+ sub $32, %ecx
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+
+ movdqa 64(%esi), %xmm3
+ palignr $14,48(%esi), %xmm3
+ sbb $0xffff, %edx
+ movdqa 48(%esi), %xmm0
+ palignr $14,32(%esi), %xmm0
+ pcmpeqb 32(%edi), %xmm0
+ lea 32(%esi), %esi
+ pcmpeqb 48(%edi), %xmm3
+
+ lea 32(%edi), %edi
+ jz L(shr_14_gobble_loop)
+ pand %xmm0, %xmm3
+
+ cmp $0, %ecx
+ jge L(shr_14_gobble_next)
+ inc %edx
+ add $32, %ecx
+L(shr_14_gobble_next):
+ test %edx, %edx
+ jnz L(exit)
+
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+
+ lea (%ecx, %edi,1), %eax
+ lea 14(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(exit):
+ pmovmskb %xmm1, %ebx
+ sub $0xffff, %ebx
+ jz L(first16bytes)
+ lea -16(%esi), %esi
+ lea -16(%edi), %edi
+ mov %ebx, %edx
+
+L(first16bytes):
+ add %eax, %esi
+L(less16bytes):
+ test %dl, %dl
+ jz L(next_four_words)
+ test $15, %dl
+ jz L(second_two_words)
+ test $3, %dl
+ jz L(second_word)
+ movzwl -16(%edi), %eax
+ movzwl -16(%esi), %ebx
+ subl %ebx, %eax
+ RETURN
+
+ .p2align 4
+L(second_word):
+ movzwl -14(%edi), %eax
+ movzwl -14(%esi), %ebx
+ subl %ebx, %eax
+ RETURN
+
+ .p2align 4
+L(second_two_words):
+ test $63, %dl
+ jz L(fourth_word)
+ movzwl -12(%edi), %eax
+ movzwl -12(%esi), %ebx
+ subl %ebx, %eax
+ RETURN
+
+ .p2align 4
+L(fourth_word):
+ movzwl -10(%edi), %eax
+ movzwl -10(%esi), %ebx
+ subl %ebx, %eax
+ RETURN
+
+ .p2align 4
+L(next_four_words):
+ test $15, %dh
+ jz L(fourth_two_words)
+ test $3, %dh
+ jz L(sixth_word)
+ movzwl -8(%edi), %eax
+ movzwl -8(%esi), %ebx
+ subl %ebx, %eax
+ RETURN
+
+ .p2align 4
+L(sixth_word):
+ movzwl -6(%edi), %eax
+ movzwl -6(%esi), %ebx
+ subl %ebx, %eax
+ RETURN
+
+ .p2align 4
+L(fourth_two_words):
+ test $63, %dh
+ jz L(eighth_word)
+ movzwl -4(%edi), %eax
+ movzwl -4(%esi), %ebx
+ subl %ebx, %eax
+ RETURN
+
+ .p2align 4
+L(eighth_word):
+ movzwl -2(%edi), %eax
+ movzwl -2(%esi), %ebx
+ subl %ebx, %eax
+ RETURN
+
+
+ CFI_PUSH (%ebx)
+
+ .p2align 4
+L(more8bytes):
+ cmp $16, %ecx
+ jae L(more16bytes)
+ cmp $8, %ecx
+ je L(8bytes)
+ cmp $10, %ecx
+ je L(10bytes)
+ cmp $12, %ecx
+ je L(12bytes)
+ jmp L(14bytes)
+
+ .p2align 4
+L(more16bytes):
+ cmp $24, %ecx
+ jae L(more24bytes)
+ cmp $16, %ecx
+ je L(16bytes)
+ cmp $18, %ecx
+ je L(18bytes)
+ cmp $20, %ecx
+ je L(20bytes)
+ jmp L(22bytes)
+
+ .p2align 4
+L(more24bytes):
+ cmp $32, %ecx
+ jae L(more32bytes)
+ cmp $24, %ecx
+ je L(24bytes)
+ cmp $26, %ecx
+ je L(26bytes)
+ cmp $28, %ecx
+ je L(28bytes)
+ jmp L(30bytes)
+
+ .p2align 4
+L(more32bytes):
+ cmp $40, %ecx
+ jae L(more40bytes)
+ cmp $32, %ecx
+ je L(32bytes)
+ cmp $34, %ecx
+ je L(34bytes)
+ cmp $36, %ecx
+ je L(36bytes)
+ jmp L(38bytes)
+
+ .p2align 4
+L(less48bytes):
+ cmp $8, %ecx
+ jae L(more8bytes)
+ cmp $2, %ecx
+ je L(2bytes)
+ cmp $4, %ecx
+ je L(4bytes)
+ jmp L(6bytes)
+
+ .p2align 4
+L(more40bytes):
+ cmp $40, %ecx
+ je L(40bytes)
+ cmp $42, %ecx
+ je L(42bytes)
+ cmp $44, %ecx
+ je L(44bytes)
+ jmp L(46bytes)
+
+ .p2align 4
+L(46bytes):
+ movzwl -46(%eax), %ecx
+ movzwl -46(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(44bytes):
+ movzwl -44(%eax), %ecx
+ movzwl -44(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(42bytes):
+ movzwl -42(%eax), %ecx
+ movzwl -42(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(40bytes):
+ movzwl -40(%eax), %ecx
+ movzwl -40(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(38bytes):
+ movzwl -38(%eax), %ecx
+ movzwl -38(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(36bytes):
+ movzwl -36(%eax), %ecx
+ movzwl -36(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(34bytes):
+ movzwl -34(%eax), %ecx
+ movzwl -34(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(32bytes):
+ movzwl -32(%eax), %ecx
+ movzwl -32(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(30bytes):
+ movzwl -30(%eax), %ecx
+ movzwl -30(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(28bytes):
+ movzwl -28(%eax), %ecx
+ movzwl -28(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(26bytes):
+ movzwl -26(%eax), %ecx
+ movzwl -26(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(24bytes):
+ movzwl -24(%eax), %ecx
+ movzwl -24(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(22bytes):
+ movzwl -22(%eax), %ecx
+ movzwl -22(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(20bytes):
+ movzwl -20(%eax), %ecx
+ movzwl -20(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(18bytes):
+ movzwl -18(%eax), %ecx
+ movzwl -18(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(16bytes):
+ movzwl -16(%eax), %ecx
+ movzwl -16(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(14bytes):
+ movzwl -14(%eax), %ecx
+ movzwl -14(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(12bytes):
+ movzwl -12(%eax), %ecx
+ movzwl -12(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(10bytes):
+ movzwl -10(%eax), %ecx
+ movzwl -10(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(8bytes):
+ movzwl -8(%eax), %ecx
+ movzwl -8(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(6bytes):
+ movzwl -6(%eax), %ecx
+ movzwl -6(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(4bytes):
+ movzwl -4(%eax), %ecx
+ movzwl -4(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(2bytes):
+ movzwl -2(%eax), %eax
+ movzwl -2(%edx), %ebx
+ subl %ebx, %eax
+ POP (%ebx)
+ ret
+ CFI_PUSH (%ebx)
+
+ .p2align 4
+L(memcmp16_exit):
+ POP (%ebx)
+ mov %ecx, %eax
+ ret
+END_FUNCTION MEMCMP
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 24b9e46..6d74b83 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -173,6 +173,21 @@
*/
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode
+// On entry to this function, EAX contains the ESP value for the overflow region.
+DEFINE_FUNCTION art_quick_throw_stack_overflow_from_signal
+ // Here, the ESP is above the protected region. We need to create a
+ // callee save frame and then move ESP down to the overflow region.
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ mov %esp, %ecx // get current stack pointer
+ mov %eax, %esp // move ESP to the overflow region.
+ PUSH ecx // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ CFI_ADJUST_CFA_OFFSET(4)
+ SETUP_GOT_NOSAVE // clobbers ebx (harmless here)
+ call PLT_SYMBOL(artThrowStackOverflowFromCode) // artThrowStackOverflowFromCode(Thread*, SP)
+ int3 // unreached
+END_FUNCTION art_quick_throw_stack_overflow_from_signal
+
/*
* Called by managed code, saves callee saves and then calls artThrowException
* that will place a mock Method* at the bottom of the stack. Arg1 holds the exception.
@@ -1098,7 +1113,8 @@
DEFINE_FUNCTION art_quick_resolution_trampoline
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
- PUSH esp // pass SP
+ movl %esp, %edi
+ PUSH EDI // pass SP. do not just PUSH ESP; that messes up unwinding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass receiver
@@ -1262,14 +1278,12 @@
mov %esp, %ecx // Remember SP
subl LITERAL(8), %esp // Save float return value.
CFI_ADJUST_CFA_OFFSET(8)
- movd %xmm0, (%esp)
+ movq %xmm0, (%esp)
PUSH edx // Save gpr return value.
PUSH eax
- subl LITERAL(8), %esp // Align stack
- movd %xmm0, (%esp)
- subl LITERAL(8), %esp // Pass float return value.
- CFI_ADJUST_CFA_OFFSET(8)
- movd %xmm0, (%esp)
+ subl LITERAL(16), %esp // Align stack
+ CFI_ADJUST_CFA_OFFSET(16)
+ movq %xmm0, (%esp) // Pass float return value.
PUSH edx // Pass gpr return value.
PUSH eax
PUSH ecx // Pass SP.
@@ -1284,7 +1298,7 @@
// (ebx is pretending to be our LR).
POP eax // Restore gpr return value.
POP edx
- movd (%esp), %xmm0 // Restore fpr return value.
+ movq (%esp), %xmm0 // Restore fpr return value.
addl LITERAL(8), %esp
CFI_ADJUST_CFA_OFFSET(-8)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
diff --git a/runtime/arch/x86_64/fault_handler_x86_64.cc b/runtime/arch/x86_64/fault_handler_x86_64.cc
deleted file mode 100644
index 233d3c7..0000000
--- a/runtime/arch/x86_64/fault_handler_x86_64.cc
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-#include "fault_handler.h"
-#include <sys/ucontext.h>
-#include "base/macros.h"
-#include "globals.h"
-#include "base/logging.h"
-#include "base/hex_dump.h"
-
-
-//
-// X86_64 specific fault handler functions.
-//
-
-namespace art {
-
-void FaultManager::GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method,
- uintptr_t* out_return_pc, uintptr_t* out_sp) {
-}
-
-bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
- return false;
-}
-
-bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
- return false;
-}
-
-bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
- return false;
-}
-} // namespace art
diff --git a/runtime/arch/x86_64/memcmp16_x86_64.S b/runtime/arch/x86_64/memcmp16_x86_64.S
new file mode 100755
index 0000000..46e4ba3
--- /dev/null
+++ b/runtime/arch/x86_64/memcmp16_x86_64.S
@@ -0,0 +1,1210 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asm_support_x86_64.S"
+
+#define MEMCMP __memcmp16
+
+/*
+ * Half of Silvermont L1 Data Cache size
+ *(see original file cache.h in bionic/libc/arch-x86_64/).
+ * This value is used for specific optimization on big lengths.
+ */
+#define DATA_CACHE_SIZE_HALF (12*1024)
+
+#ifndef L
+# define L(label) .L##label
+#endif
+
+#ifndef ALIGN
+# define ALIGN(n) .p2align n
+#endif
+
+#define JMPTBL(I, B) (I - B)
+
+#define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \
+ lea TABLE(%rip), %r11; \
+ movslq (%r11, INDEX, SCALE), %rcx; \
+ add %r11, %rcx; \
+ jmp *%rcx; \
+ ud2
+
+DEFINE_FUNCTION MEMCMP
+ pxor %xmm0, %xmm0
+ shl $1, %rdx
+ cmp $79, %rdx
+ ja L(79bytesormore)
+ add %rdx, %rsi
+ add %rdx, %rdi
+ BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
+
+ ALIGN (4)
+L(79bytesormore):
+ movdqu (%rsi), %xmm1
+ movdqu (%rdi), %xmm2
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(16bytesin256)
+ mov %rsi, %rcx
+ and $-16, %rsi
+ add $16, %rsi
+ sub %rsi, %rcx
+
+ sub %rcx, %rdi
+ add %rcx, %rdx
+ test $0xf, %rdi
+ jz L(2aligned)
+
+ cmp $128, %rdx
+ ja L(128bytesormore)
+L(less128bytes):
+ sub $64, %rdx
+
+ movdqu (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(16bytesin256)
+
+ movdqu 16(%rdi), %xmm2
+ pxor 16(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(32bytesin256)
+
+ movdqu 32(%rdi), %xmm2
+ pxor 32(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(48bytesin256)
+
+ movdqu 48(%rdi), %xmm2
+ pxor 48(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(64bytesin256)
+ cmp $32, %rdx
+ jb L(less32bytesin64)
+
+ movdqu 64(%rdi), %xmm2
+ pxor 64(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(80bytesin256)
+
+ movdqu 80(%rdi), %xmm2
+ pxor 80(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(96bytesin256)
+ sub $32, %rdx
+ add $32, %rdi
+ add $32, %rsi
+L(less32bytesin64):
+ add $64, %rdi
+ add $64, %rsi
+ add %rdx, %rsi
+ add %rdx, %rdi
+ BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
+
+L(128bytesormore):
+ cmp $512, %rdx
+ ja L(512bytesormore)
+ cmp $256, %rdx
+ ja L(less512bytes)
+L(less256bytes):
+ sub $128, %rdx
+
+ movdqu (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(16bytesin256)
+
+ movdqu 16(%rdi), %xmm2
+ pxor 16(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(32bytesin256)
+
+ movdqu 32(%rdi), %xmm2
+ pxor 32(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(48bytesin256)
+
+ movdqu 48(%rdi), %xmm2
+ pxor 48(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(64bytesin256)
+
+ movdqu 64(%rdi), %xmm2
+ pxor 64(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(80bytesin256)
+
+ movdqu 80(%rdi), %xmm2
+ pxor 80(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(96bytesin256)
+
+ movdqu 96(%rdi), %xmm2
+ pxor 96(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(112bytesin256)
+
+ movdqu 112(%rdi), %xmm2
+ pxor 112(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(128bytesin256)
+
+ add $128, %rsi
+ add $128, %rdi
+
+ cmp $64, %rdx
+ jae L(less128bytes)
+
+ cmp $32, %rdx
+ jb L(less32bytesin128)
+
+ movdqu (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(16bytesin256)
+
+ movdqu 16(%rdi), %xmm2
+ pxor 16(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(32bytesin256)
+ sub $32, %rdx
+ add $32, %rdi
+ add $32, %rsi
+L(less32bytesin128):
+ add %rdx, %rsi
+ add %rdx, %rdi
+ BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
+
+L(less512bytes):
+ sub $256, %rdx
+ movdqu (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(16bytesin256)
+
+ movdqu 16(%rdi), %xmm2
+ pxor 16(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(32bytesin256)
+
+ movdqu 32(%rdi), %xmm2
+ pxor 32(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(48bytesin256)
+
+ movdqu 48(%rdi), %xmm2
+ pxor 48(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(64bytesin256)
+
+ movdqu 64(%rdi), %xmm2
+ pxor 64(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(80bytesin256)
+
+ movdqu 80(%rdi), %xmm2
+ pxor 80(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(96bytesin256)
+
+ movdqu 96(%rdi), %xmm2
+ pxor 96(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(112bytesin256)
+
+ movdqu 112(%rdi), %xmm2
+ pxor 112(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(128bytesin256)
+
+ movdqu 128(%rdi), %xmm2
+ pxor 128(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(144bytesin256)
+
+ movdqu 144(%rdi), %xmm2
+ pxor 144(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(160bytesin256)
+
+ movdqu 160(%rdi), %xmm2
+ pxor 160(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(176bytesin256)
+
+ movdqu 176(%rdi), %xmm2
+ pxor 176(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(192bytesin256)
+
+ movdqu 192(%rdi), %xmm2
+ pxor 192(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(208bytesin256)
+
+ movdqu 208(%rdi), %xmm2
+ pxor 208(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(224bytesin256)
+
+ movdqu 224(%rdi), %xmm2
+ pxor 224(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(240bytesin256)
+
+ movdqu 240(%rdi), %xmm2
+ pxor 240(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(256bytesin256)
+
+ add $256, %rsi
+ add $256, %rdi
+
+ cmp $128, %rdx
+ jae L(less256bytes)
+
+ cmp $64, %rdx
+ jae L(less128bytes)
+
+ cmp $32, %rdx
+ jb L(less32bytesin256)
+
+ movdqu (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(16bytesin256)
+
+ movdqu 16(%rdi), %xmm2
+ pxor 16(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(32bytesin256)
+ sub $32, %rdx
+ add $32, %rdi
+ add $32, %rsi
+L(less32bytesin256):
+ add %rdx, %rsi
+ add %rdx, %rdi
+ BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
+
+ ALIGN (4)
+L(512bytesormore):
+#ifdef DATA_CACHE_SIZE_HALF
+ mov $DATA_CACHE_SIZE_HALF, %r8
+#else
+ mov __x86_64_data_cache_size_half(%rip), %r8
+#endif
+ mov %r8, %r9
+ shr $1, %r8
+ add %r9, %r8
+ cmp %r8, %rdx
+ ja L(L2_L3_cache_unaglined)
+ sub $64, %rdx
+ ALIGN (4)
+L(64bytesormore_loop):
+ movdqu (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ movdqa %xmm2, %xmm1
+
+ movdqu 16(%rdi), %xmm3
+ pxor 16(%rsi), %xmm3
+ por %xmm3, %xmm1
+
+ movdqu 32(%rdi), %xmm4
+ pxor 32(%rsi), %xmm4
+ por %xmm4, %xmm1
+
+ movdqu 48(%rdi), %xmm5
+ pxor 48(%rsi), %xmm5
+ por %xmm5, %xmm1
+
+ ptest %xmm1, %xmm0
+ jnc L(64bytesormore_loop_end)
+ add $64, %rsi
+ add $64, %rdi
+ sub $64, %rdx
+ jae L(64bytesormore_loop)
+
+ add $64, %rdx
+ add %rdx, %rsi
+ add %rdx, %rdi
+ BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
+
+L(L2_L3_cache_unaglined):
+ sub $64, %rdx
+ ALIGN (4)
+L(L2_L3_unaligned_128bytes_loop):
+ prefetchnta 0x1c0(%rdi)
+ prefetchnta 0x1c0(%rsi)
+ movdqu (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ movdqa %xmm2, %xmm1
+
+ movdqu 16(%rdi), %xmm3
+ pxor 16(%rsi), %xmm3
+ por %xmm3, %xmm1
+
+ movdqu 32(%rdi), %xmm4
+ pxor 32(%rsi), %xmm4
+ por %xmm4, %xmm1
+
+ movdqu 48(%rdi), %xmm5
+ pxor 48(%rsi), %xmm5
+ por %xmm5, %xmm1
+
+ ptest %xmm1, %xmm0
+ jnc L(64bytesormore_loop_end)
+ add $64, %rsi
+ add $64, %rdi
+ sub $64, %rdx
+ jae L(L2_L3_unaligned_128bytes_loop)
+
+ add $64, %rdx
+ add %rdx, %rsi
+ add %rdx, %rdi
+ BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
+
+/*
+ * This case is for machines which are sensitive for unaligned instructions.
+ */
+ ALIGN (4)
+L(2aligned):
+ cmp $128, %rdx
+ ja L(128bytesormorein2aligned)
+L(less128bytesin2aligned):
+ sub $64, %rdx
+
+ movdqa (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(16bytesin256)
+
+ movdqa 16(%rdi), %xmm2
+ pxor 16(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(32bytesin256)
+
+ movdqa 32(%rdi), %xmm2
+ pxor 32(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(48bytesin256)
+
+ movdqa 48(%rdi), %xmm2
+ pxor 48(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(64bytesin256)
+ cmp $32, %rdx
+ jb L(less32bytesin64in2alinged)
+
+ movdqa 64(%rdi), %xmm2
+ pxor 64(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(80bytesin256)
+
+ movdqa 80(%rdi), %xmm2
+ pxor 80(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(96bytesin256)
+ sub $32, %rdx
+ add $32, %rdi
+ add $32, %rsi
+L(less32bytesin64in2alinged):
+ add $64, %rdi
+ add $64, %rsi
+ add %rdx, %rsi
+ add %rdx, %rdi
+ BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
+
+ ALIGN (4)
+L(128bytesormorein2aligned):
+ cmp $512, %rdx
+ ja L(512bytesormorein2aligned)
+ cmp $256, %rdx
+ ja L(256bytesormorein2aligned)
+L(less256bytesin2alinged):
+ sub $128, %rdx
+
+ movdqa (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(16bytesin256)
+
+ movdqa 16(%rdi), %xmm2
+ pxor 16(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(32bytesin256)
+
+ movdqa 32(%rdi), %xmm2
+ pxor 32(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(48bytesin256)
+
+ movdqa 48(%rdi), %xmm2
+ pxor 48(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(64bytesin256)
+
+ movdqa 64(%rdi), %xmm2
+ pxor 64(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(80bytesin256)
+
+ movdqa 80(%rdi), %xmm2
+ pxor 80(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(96bytesin256)
+
+ movdqa 96(%rdi), %xmm2
+ pxor 96(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(112bytesin256)
+
+ movdqa 112(%rdi), %xmm2
+ pxor 112(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(128bytesin256)
+
+ add $128, %rsi
+ add $128, %rdi
+
+ cmp $64, %rdx
+ jae L(less128bytesin2aligned)
+
+ cmp $32, %rdx
+ jb L(less32bytesin128in2aligned)
+
+ movdqu (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(16bytesin256)
+
+ movdqu 16(%rdi), %xmm2
+ pxor 16(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(32bytesin256)
+ sub $32, %rdx
+ add $32, %rdi
+ add $32, %rsi
+L(less32bytesin128in2aligned):
+ add %rdx, %rsi
+ add %rdx, %rdi
+ BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
+
+ ALIGN (4)
+L(256bytesormorein2aligned):
+
+ sub $256, %rdx
+ movdqa (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(16bytesin256)
+
+ movdqa 16(%rdi), %xmm2
+ pxor 16(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(32bytesin256)
+
+ movdqa 32(%rdi), %xmm2
+ pxor 32(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(48bytesin256)
+
+ movdqa 48(%rdi), %xmm2
+ pxor 48(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(64bytesin256)
+
+ movdqa 64(%rdi), %xmm2
+ pxor 64(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(80bytesin256)
+
+ movdqa 80(%rdi), %xmm2
+ pxor 80(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(96bytesin256)
+
+ movdqa 96(%rdi), %xmm2
+ pxor 96(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(112bytesin256)
+
+ movdqa 112(%rdi), %xmm2
+ pxor 112(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(128bytesin256)
+
+ movdqa 128(%rdi), %xmm2
+ pxor 128(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(144bytesin256)
+
+ movdqa 144(%rdi), %xmm2
+ pxor 144(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(160bytesin256)
+
+ movdqa 160(%rdi), %xmm2
+ pxor 160(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(176bytesin256)
+
+ movdqa 176(%rdi), %xmm2
+ pxor 176(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(192bytesin256)
+
+ movdqa 192(%rdi), %xmm2
+ pxor 192(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(208bytesin256)
+
+ movdqa 208(%rdi), %xmm2
+ pxor 208(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(224bytesin256)
+
+ movdqa 224(%rdi), %xmm2
+ pxor 224(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(240bytesin256)
+
+ movdqa 240(%rdi), %xmm2
+ pxor 240(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(256bytesin256)
+
+ add $256, %rsi
+ add $256, %rdi
+
+ cmp $128, %rdx
+ jae L(less256bytesin2alinged)
+
+ cmp $64, %rdx
+ jae L(less128bytesin2aligned)
+
+ cmp $32, %rdx
+ jb L(less32bytesin256in2alinged)
+
+ movdqa (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(16bytesin256)
+
+ movdqa 16(%rdi), %xmm2
+ pxor 16(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(32bytesin256)
+ sub $32, %rdx
+ add $32, %rdi
+ add $32, %rsi
+L(less32bytesin256in2alinged):
+ add %rdx, %rsi
+ add %rdx, %rdi
+ BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
+
+ ALIGN (4)
+L(512bytesormorein2aligned):
+#ifdef DATA_CACHE_SIZE_HALF
+ mov $DATA_CACHE_SIZE_HALF, %r8
+#else
+ mov __x86_64_data_cache_size_half(%rip), %r8
+#endif
+ mov %r8, %r9
+ shr $1, %r8
+ add %r9, %r8
+ cmp %r8, %rdx
+ ja L(L2_L3_cache_aglined)
+
+ sub $64, %rdx
+ ALIGN (4)
+L(64bytesormore_loopin2aligned):
+ movdqa (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ movdqa %xmm2, %xmm1
+
+ movdqa 16(%rdi), %xmm3
+ pxor 16(%rsi), %xmm3
+ por %xmm3, %xmm1
+
+ movdqa 32(%rdi), %xmm4
+ pxor 32(%rsi), %xmm4
+ por %xmm4, %xmm1
+
+ movdqa 48(%rdi), %xmm5
+ pxor 48(%rsi), %xmm5
+ por %xmm5, %xmm1
+
+ ptest %xmm1, %xmm0
+ jnc L(64bytesormore_loop_end)
+ add $64, %rsi
+ add $64, %rdi
+ sub $64, %rdx
+ jae L(64bytesormore_loopin2aligned)
+
+ add $64, %rdx
+ add %rdx, %rsi
+ add %rdx, %rdi
+ BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
+L(L2_L3_cache_aglined):
+ sub $64, %rdx
+ ALIGN (4)
+L(L2_L3_aligned_128bytes_loop):
+ prefetchnta 0x1c0(%rdi)
+ prefetchnta 0x1c0(%rsi)
+ movdqa (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ movdqa %xmm2, %xmm1
+
+ movdqa 16(%rdi), %xmm3
+ pxor 16(%rsi), %xmm3
+ por %xmm3, %xmm1
+
+ movdqa 32(%rdi), %xmm4
+ pxor 32(%rsi), %xmm4
+ por %xmm4, %xmm1
+
+ movdqa 48(%rdi), %xmm5
+ pxor 48(%rsi), %xmm5
+ por %xmm5, %xmm1
+
+ ptest %xmm1, %xmm0
+ jnc L(64bytesormore_loop_end)
+ add $64, %rsi
+ add $64, %rdi
+ sub $64, %rdx
+ jae L(L2_L3_aligned_128bytes_loop)
+
+ add $64, %rdx
+ add %rdx, %rsi
+ add %rdx, %rdi
+ BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
+
+
+ ALIGN (4)
+L(64bytesormore_loop_end):
+ add $16, %rdi
+ add $16, %rsi
+ ptest %xmm2, %xmm0
+ jnc L(16bytes)
+
+ add $16, %rdi
+ add $16, %rsi
+ ptest %xmm3, %xmm0
+ jnc L(16bytes)
+
+ add $16, %rdi
+ add $16, %rsi
+ ptest %xmm4, %xmm0
+ jnc L(16bytes)
+
+ add $16, %rdi
+ add $16, %rsi
+ jmp L(16bytes)
+
+L(256bytesin256):
+ add $256, %rdi
+ add $256, %rsi
+ jmp L(16bytes)
+L(240bytesin256):
+ add $240, %rdi
+ add $240, %rsi
+ jmp L(16bytes)
+L(224bytesin256):
+ add $224, %rdi
+ add $224, %rsi
+ jmp L(16bytes)
+L(208bytesin256):
+ add $208, %rdi
+ add $208, %rsi
+ jmp L(16bytes)
+L(192bytesin256):
+ add $192, %rdi
+ add $192, %rsi
+ jmp L(16bytes)
+L(176bytesin256):
+ add $176, %rdi
+ add $176, %rsi
+ jmp L(16bytes)
+L(160bytesin256):
+ add $160, %rdi
+ add $160, %rsi
+ jmp L(16bytes)
+L(144bytesin256):
+ add $144, %rdi
+ add $144, %rsi
+ jmp L(16bytes)
+L(128bytesin256):
+ add $128, %rdi
+ add $128, %rsi
+ jmp L(16bytes)
+L(112bytesin256):
+ add $112, %rdi
+ add $112, %rsi
+ jmp L(16bytes)
+L(96bytesin256):
+ add $96, %rdi
+ add $96, %rsi
+ jmp L(16bytes)
+L(80bytesin256):
+ add $80, %rdi
+ add $80, %rsi
+ jmp L(16bytes)
+L(64bytesin256):
+ add $64, %rdi
+ add $64, %rsi
+ jmp L(16bytes)
+L(48bytesin256):
+ add $16, %rdi
+ add $16, %rsi
+L(32bytesin256):
+ add $16, %rdi
+ add $16, %rsi
+L(16bytesin256):
+ add $16, %rdi
+ add $16, %rsi
+L(16bytes):
+ mov -16(%rdi), %rax
+ mov -16(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+L(8bytes):
+ mov -8(%rdi), %rax
+ mov -8(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+ xor %eax, %eax
+ ret
+
+ ALIGN (4)
+L(12bytes):
+ mov -12(%rdi), %rax
+ mov -12(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+L(4bytes):
+ mov -4(%rsi), %ecx
+ mov -4(%rdi), %eax
+ cmp %eax, %ecx
+ jne L(diffin4bytes)
+L(0bytes):
+ xor %eax, %eax
+ ret
+
+ ALIGN (4)
+L(66bytes):
+ movdqu -66(%rdi), %xmm1
+ movdqu -66(%rsi), %xmm2
+ mov $-66, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(50bytes):
+ movdqu -50(%rdi), %xmm1
+ movdqu -50(%rsi), %xmm2
+ mov $-50, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(34bytes):
+ movdqu -34(%rdi), %xmm1
+ movdqu -34(%rsi), %xmm2
+ mov $-34, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(18bytes):
+ mov -18(%rdi), %rax
+ mov -18(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+L(10bytes):
+ mov -10(%rdi), %rax
+ mov -10(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+ movzwl -2(%rdi), %eax
+ movzwl -2(%rsi), %ecx
+ cmp %cl, %al
+ jne L(end)
+ and $0xffff, %eax
+ and $0xffff, %ecx
+ sub %ecx, %eax
+ ret
+
+ ALIGN (4)
+L(14bytes):
+ mov -14(%rdi), %rax
+ mov -14(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+ mov -8(%rdi), %rax
+ mov -8(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+ xor %eax, %eax
+ ret
+
+ ALIGN (4)
+L(6bytes):
+ mov -6(%rdi), %eax
+ mov -6(%rsi), %ecx
+ cmp %eax, %ecx
+ jne L(diffin4bytes)
+L(2bytes):
+ movzwl -2(%rsi), %ecx
+ movzwl -2(%rdi), %eax
+ cmp %cl, %al
+ jne L(end)
+ and $0xffff, %eax
+ and $0xffff, %ecx
+ sub %ecx, %eax
+ ret
+
+ ALIGN (4)
+L(68bytes):
+ movdqu -68(%rdi), %xmm2
+ movdqu -68(%rsi), %xmm1
+ mov $-68, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(52bytes):
+ movdqu -52(%rdi), %xmm2
+ movdqu -52(%rsi), %xmm1
+ mov $-52, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(36bytes):
+ movdqu -36(%rdi), %xmm2
+ movdqu -36(%rsi), %xmm1
+ mov $-36, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(20bytes):
+ movdqu -20(%rdi), %xmm2
+ movdqu -20(%rsi), %xmm1
+ mov $-20, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+ mov -4(%rdi), %eax
+ mov -4(%rsi), %ecx
+ cmp %eax, %ecx
+ jne L(diffin4bytes)
+ xor %eax, %eax
+ ret
+
+ ALIGN (4)
+L(70bytes):
+ movdqu -70(%rsi), %xmm1
+ movdqu -70(%rdi), %xmm2
+ mov $-70, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(54bytes):
+ movdqu -54(%rsi), %xmm1
+ movdqu -54(%rdi), %xmm2
+ mov $-54, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(38bytes):
+ movdqu -38(%rsi), %xmm1
+ movdqu -38(%rdi), %xmm2
+ mov $-38, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(22bytes):
+ movdqu -22(%rsi), %xmm1
+ movdqu -22(%rdi), %xmm2
+ mov $-22, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+ mov -8(%rdi), %rax
+ mov -8(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+ xor %eax, %eax
+ ret
+
+ ALIGN (4)
+L(72bytes):
+ movdqu -72(%rsi), %xmm1
+ movdqu -72(%rdi), %xmm2
+ mov $-72, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(56bytes):
+ movdqu -56(%rdi), %xmm2
+ movdqu -56(%rsi), %xmm1
+ mov $-56, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(40bytes):
+ movdqu -40(%rdi), %xmm2
+ movdqu -40(%rsi), %xmm1
+ mov $-40, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(24bytes):
+ movdqu -24(%rdi), %xmm2
+ movdqu -24(%rsi), %xmm1
+ mov $-24, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+ mov -8(%rdi), %rax
+ mov -8(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+ xor %eax, %eax
+ ret
+
+ ALIGN (4)
+L(74bytes):
+ movdqu -74(%rsi), %xmm1
+ movdqu -74(%rdi), %xmm2
+ mov $-74, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(58bytes):
+ movdqu -58(%rdi), %xmm2
+ movdqu -58(%rsi), %xmm1
+ mov $-58, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(42bytes):
+ movdqu -42(%rdi), %xmm2
+ movdqu -42(%rsi), %xmm1
+ mov $-42, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(26bytes):
+ movdqu -26(%rdi), %xmm2
+ movdqu -26(%rsi), %xmm1
+ mov $-26, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+ mov -10(%rdi), %rax
+ mov -10(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+ movzwl -2(%rdi), %eax
+ movzwl -2(%rsi), %ecx
+ jmp L(end)
+
+ ALIGN (4)
+L(76bytes):
+ movdqu -76(%rsi), %xmm1
+ movdqu -76(%rdi), %xmm2
+ mov $-76, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(60bytes):
+ movdqu -60(%rdi), %xmm2
+ movdqu -60(%rsi), %xmm1
+ mov $-60, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(44bytes):
+ movdqu -44(%rdi), %xmm2
+ movdqu -44(%rsi), %xmm1
+ mov $-44, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(28bytes):
+ movdqu -28(%rdi), %xmm2
+ movdqu -28(%rsi), %xmm1
+ mov $-28, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+ mov -12(%rdi), %rax
+ mov -12(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+ mov -4(%rdi), %eax
+ mov -4(%rsi), %ecx
+ cmp %eax, %ecx
+ jne L(diffin4bytes)
+ xor %eax, %eax
+ ret
+
+ ALIGN (4)
+L(78bytes):
+ movdqu -78(%rsi), %xmm1
+ movdqu -78(%rdi), %xmm2
+ mov $-78, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(62bytes):
+ movdqu -62(%rdi), %xmm2
+ movdqu -62(%rsi), %xmm1
+ mov $-62, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(46bytes):
+ movdqu -46(%rdi), %xmm2
+ movdqu -46(%rsi), %xmm1
+ mov $-46, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(30bytes):
+ movdqu -30(%rdi), %xmm2
+ movdqu -30(%rsi), %xmm1
+ mov $-30, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+ mov -14(%rdi), %rax
+ mov -14(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+ mov -8(%rdi), %rax
+ mov -8(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+ xor %eax, %eax
+ ret
+
+ ALIGN (4)
+L(64bytes):
+ movdqu -64(%rdi), %xmm2
+ movdqu -64(%rsi), %xmm1
+ mov $-64, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(48bytes):
+ movdqu -48(%rdi), %xmm2
+ movdqu -48(%rsi), %xmm1
+ mov $-48, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(32bytes):
+ movdqu -32(%rdi), %xmm2
+ movdqu -32(%rsi), %xmm1
+ mov $-32, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+
+ mov -16(%rdi), %rax
+ mov -16(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+
+ mov -8(%rdi), %rax
+ mov -8(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+ xor %eax, %eax
+ ret
+
+/*
+ * Aligned 8 bytes to avoid 2 branch "taken" in one 16 alinged code block.
+ */
+ ALIGN (3)
+L(less16bytes):
+ movsbq %dl, %rdx
+ mov (%rsi, %rdx), %rcx
+ mov (%rdi, %rdx), %rax
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+ mov 8(%rsi, %rdx), %rcx
+ mov 8(%rdi, %rdx), %rax
+L(diffin8bytes):
+ cmp %eax, %ecx
+ jne L(diffin4bytes)
+ shr $32, %rcx
+ shr $32, %rax
+L(diffin4bytes):
+ cmp %cx, %ax
+ jne L(end)
+ shr $16, %ecx
+ shr $16, %eax
+ jmp L(end)
+
+ ALIGN (4)
+L(end):
+ and $0xffff, %eax
+ and $0xffff, %ecx
+ sub %ecx, %eax
+ ret
+
+END_FUNCTION MEMCMP
+
+ ALIGN (3)
+L(table_64bytes):
+ .int JMPTBL (L(0bytes), L(table_64bytes))
+ .int JMPTBL (L(2bytes), L(table_64bytes))
+ .int JMPTBL (L(4bytes), L(table_64bytes))
+ .int JMPTBL (L(6bytes), L(table_64bytes))
+ .int JMPTBL (L(8bytes), L(table_64bytes))
+ .int JMPTBL (L(10bytes), L(table_64bytes))
+ .int JMPTBL (L(12bytes), L(table_64bytes))
+ .int JMPTBL (L(14bytes), L(table_64bytes))
+ .int JMPTBL (L(16bytes), L(table_64bytes))
+ .int JMPTBL (L(18bytes), L(table_64bytes))
+ .int JMPTBL (L(20bytes), L(table_64bytes))
+ .int JMPTBL (L(22bytes), L(table_64bytes))
+ .int JMPTBL (L(24bytes), L(table_64bytes))
+ .int JMPTBL (L(26bytes), L(table_64bytes))
+ .int JMPTBL (L(28bytes), L(table_64bytes))
+ .int JMPTBL (L(30bytes), L(table_64bytes))
+ .int JMPTBL (L(32bytes), L(table_64bytes))
+ .int JMPTBL (L(34bytes), L(table_64bytes))
+ .int JMPTBL (L(36bytes), L(table_64bytes))
+ .int JMPTBL (L(38bytes), L(table_64bytes))
+ .int JMPTBL (L(40bytes), L(table_64bytes))
+ .int JMPTBL (L(42bytes), L(table_64bytes))
+ .int JMPTBL (L(44bytes), L(table_64bytes))
+ .int JMPTBL (L(46bytes), L(table_64bytes))
+ .int JMPTBL (L(48bytes), L(table_64bytes))
+ .int JMPTBL (L(50bytes), L(table_64bytes))
+ .int JMPTBL (L(52bytes), L(table_64bytes))
+ .int JMPTBL (L(54bytes), L(table_64bytes))
+ .int JMPTBL (L(56bytes), L(table_64bytes))
+ .int JMPTBL (L(58bytes), L(table_64bytes))
+ .int JMPTBL (L(60bytes), L(table_64bytes))
+ .int JMPTBL (L(62bytes), L(table_64bytes))
+ .int JMPTBL (L(64bytes), L(table_64bytes))
+ .int JMPTBL (L(66bytes), L(table_64bytes))
+ .int JMPTBL (L(68bytes), L(table_64bytes))
+ .int JMPTBL (L(70bytes), L(table_64bytes))
+ .int JMPTBL (L(72bytes), L(table_64bytes))
+ .int JMPTBL (L(74bytes), L(table_64bytes))
+ .int JMPTBL (L(76bytes), L(table_64bytes))
+ .int JMPTBL (L(78bytes), L(table_64bytes))
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 50b2de4..f021ada 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -284,6 +284,18 @@
*/
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode
+// On entry to this function, RAX contains the ESP value for the overflow region.
+DEFINE_FUNCTION art_quick_throw_stack_overflow_from_signal
+ // Here, the RSP is above the protected region. We need to create a
+ // callee save frame and then move RSP down to the overflow region.
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ mov %rsp, %rsi // get current stack pointer, pass SP as second arg
+ mov %rax, %rsp // move RSP to the overflow region.
+ mov %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current() as first arg
+ call PLT_SYMBOL(artThrowStackOverflowFromCode) // artThrowStackOverflowFromCode(Thread*, SP)
+ int3 // unreached
+END_FUNCTION art_quick_throw_stack_overflow_from_signal
+
/*
* Called by managed code, saves callee saves and then calls artThrowException
* that will place a mock Method* at the bottom of the stack. Arg1 holds the exception.
@@ -1495,7 +1507,7 @@
PUSH rax // Save integer result.
subq LITERAL(8), %rsp // Save floating-point result.
CFI_ADJUST_CFA_OFFSET(8)
- movd %xmm0, (%rsp)
+ movq %xmm0, (%rsp)
movq %gs:THREAD_SELF_OFFSET, %rdi // Pass Thread.
movq %rax, %rdx // Pass integer result.
@@ -1506,7 +1518,7 @@
movq %rax, %rdi // Store return PC
movq %rdx, %rsi // Store second return PC in hidden arg.
- movd (%rsp), %xmm0 // Restore floating-point result.
+ movq (%rsp), %xmm0 // Restore floating-point result.
addq LITERAL(8), %rsp
CFI_ADJUST_CFA_OFFSET(-8)
POP rax // Restore integer result.
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index fe5a2ef..fae9271 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -176,6 +176,7 @@
#endif
#define PURE __attribute__ ((__pure__))
+#define WARN_UNUSED __attribute__((warn_unused_result))
template<typename T> void UNUSED(const T&) {}
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 7779547..abe0aa0 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -35,12 +35,14 @@
Mutex* Locks::breakpoint_lock_ = nullptr;
ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
+Mutex* Locks::jni_libraries_lock_ = nullptr;
Mutex* Locks::logging_lock_ = nullptr;
Mutex* Locks::mem_maps_lock_ = nullptr;
Mutex* Locks::modify_ldt_lock_ = nullptr;
ReaderWriterMutex* Locks::mutator_lock_ = nullptr;
Mutex* Locks::runtime_shutdown_lock_ = nullptr;
Mutex* Locks::thread_list_lock_ = nullptr;
+Mutex* Locks::thread_list_suspend_thread_lock_ = nullptr;
Mutex* Locks::thread_suspend_count_lock_ = nullptr;
Mutex* Locks::trace_lock_ = nullptr;
Mutex* Locks::profiler_lock_ = nullptr;
@@ -149,7 +151,8 @@
for (int i = kLockLevelCount - 1; i >= 0; --i) {
if (i != level_) {
BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
- if (held_mutex != NULL) {
+ // We expect waits to happen while holding the thread list suspend thread lock.
+ if (held_mutex != NULL && i != kThreadListSuspendThreadLock) {
LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
<< "(level " << LockLevel(i) << ") while performing wait on "
<< "\"" << name_ << "\" (level " << level_ << ")";
@@ -161,16 +164,10 @@
}
}
-inline void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) {
+void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) {
if (kLogLockContentions) {
// Atomically add value to wait_time.
- uint64_t new_val, old_val;
- volatile int64_t* addr = reinterpret_cast<volatile int64_t*>(&wait_time);
- volatile const int64_t* caddr = const_cast<volatile const int64_t*>(addr);
- do {
- old_val = static_cast<uint64_t>(QuasiAtomic::Read64(caddr));
- new_val = old_val + value;
- } while (!QuasiAtomic::Cas64(static_cast<int64_t>(old_val), static_cast<int64_t>(new_val), addr));
+ wait_time.FetchAndAddSequentiallyConsistent(value);
}
}
@@ -204,7 +201,7 @@
if (kLogLockContentions) {
const ContentionLogData* data = contention_log_data_;
const ContentionLogEntry* log = data->contention_log;
- uint64_t wait_time = data->wait_time;
+ uint64_t wait_time = data->wait_time.LoadRelaxed();
uint32_t contention_count = data->contention_count.LoadRelaxed();
if (contention_count == 0) {
os << "never contended";
@@ -838,9 +835,11 @@
DCHECK(breakpoint_lock_ != nullptr);
DCHECK(classlinker_classes_lock_ != nullptr);
DCHECK(heap_bitmap_lock_ != nullptr);
+ DCHECK(jni_libraries_lock_ != nullptr);
DCHECK(logging_lock_ != nullptr);
DCHECK(mutator_lock_ != nullptr);
DCHECK(thread_list_lock_ != nullptr);
+ DCHECK(thread_list_suspend_thread_lock_ != nullptr);
DCHECK(thread_suspend_count_lock_ != nullptr);
DCHECK(trace_lock_ != nullptr);
DCHECK(profiler_lock_ != nullptr);
@@ -848,13 +847,18 @@
DCHECK(intern_table_lock_ != nullptr);
} else {
// Create global locks in level order from highest lock level to lowest.
- LockLevel current_lock_level = kMutatorLock;
- DCHECK(mutator_lock_ == nullptr);
- mutator_lock_ = new ReaderWriterMutex("mutator lock", current_lock_level);
+ LockLevel current_lock_level = kThreadListSuspendThreadLock;
+ DCHECK(thread_list_suspend_thread_lock_ == nullptr);
+ thread_list_suspend_thread_lock_ =
+ new Mutex("thread list suspend thread by .. lock", current_lock_level);
#define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
- DCHECK_LT(new_level, current_lock_level); \
- current_lock_level = new_level;
+ DCHECK_LT(new_level, current_lock_level); \
+ current_lock_level = new_level;
+
+ UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock);
+ DCHECK(mutator_lock_ == nullptr);
+ mutator_lock_ = new ReaderWriterMutex("mutator lock", current_lock_level);
UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock);
DCHECK(heap_bitmap_lock_ == nullptr);
@@ -876,6 +880,10 @@
DCHECK(thread_list_lock_ == nullptr);
thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
+ UPDATE_CURRENT_LOCK_LEVEL(kJniLoadLibraryLock);
+ DCHECK(jni_libraries_lock_ == nullptr);
+ jni_libraries_lock_ = new Mutex("JNI shared libraries map lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock);
DCHECK(breakpoint_lock_ == nullptr);
breakpoint_lock_ = new Mutex("breakpoint lock", current_lock_level);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 8d2cd07..fd76629 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -70,10 +70,10 @@
kMarkSweepMarkStackLock,
kTransactionLogLock,
kInternTableLock,
+ kOatFileSecondaryLookupLock,
kDefaultMutexLevel,
kMarkSweepLargeObjectLock,
kPinTableLock,
- kLoadLibraryLock,
kJdwpObjectRegistryLock,
kModifyLdtLock,
kAllocatedThreadIdsLock,
@@ -82,6 +82,7 @@
kBreakpointLock,
kMonitorLock,
kMonitorListLock,
+ kJniLoadLibraryLock,
kThreadListLock,
kBreakpointInvokeLock,
kDeoptimizationLock,
@@ -93,6 +94,7 @@
kRuntimeShutdownLock,
kHeapBitmapLock,
kMutatorLock,
+ kThreadListSuspendThreadLock,
kZygoteCreationLock,
kLockLevelCount // Must come last.
@@ -160,7 +162,7 @@
// Number of times the Mutex has been contended.
AtomicInteger contention_count;
// Sum of time waited by all contenders in ns.
- volatile uint64_t wait_time;
+ Atomic<uint64_t> wait_time;
void AddToWaitTime(uint64_t value);
ContentionLogData() : wait_time(0) {}
};
@@ -474,6 +476,15 @@
public:
static void Init();
+ // There's a potential race for two threads to try to suspend each other and for both of them
+ // to succeed and get blocked becoming runnable. This lock ensures that only one thread is
+ // requesting suspension of another at any time. As the the thread list suspend thread logic
+ // transitions to runnable, if the current thread were tried to be suspended then this thread
+ // would block holding this lock until it could safely request thread suspension of the other
+ // thread without that thread having a suspension request against this thread. This avoids a
+ // potential deadlock cycle.
+ static Mutex* thread_list_suspend_thread_lock_;
+
// The mutator_lock_ is used to allow mutators to execute in a shared (reader) mode or to block
// mutators by having an exclusive (writer) owner. In normal execution each mutator thread holds
// a share on the mutator_lock_. The garbage collector may also execute with shared access but
@@ -532,7 +543,7 @@
// else | .. running ..
// Goto x | .. running ..
// .. running .. | .. running ..
- static ReaderWriterMutex* mutator_lock_;
+ static ReaderWriterMutex* mutator_lock_ ACQUIRED_AFTER(thread_list_suspend_thread_lock_);
// Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
@@ -550,8 +561,11 @@
// attaching and detaching.
static Mutex* thread_list_lock_ ACQUIRED_AFTER(trace_lock_);
+ // Guards maintaining loading library data structures.
+ static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
+
// Guards breakpoints.
- static Mutex* breakpoint_lock_ ACQUIRED_AFTER(thread_list_lock_);
+ static Mutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
// Guards lists of classes within the class linker.
static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
diff --git a/runtime/base/scoped_flock.cc b/runtime/base/scoped_flock.cc
index 351de3d..bf091d0 100644
--- a/runtime/base/scoped_flock.cc
+++ b/runtime/base/scoped_flock.cc
@@ -58,6 +58,22 @@
}
}
+bool ScopedFlock::Init(File* file, std::string* error_msg) {
+ file_.reset(new File(dup(file->Fd())));
+ if (file_->Fd() == -1) {
+ file_.reset();
+ *error_msg = StringPrintf("Failed to duplicate open file '%s': %s",
+ file->GetPath().c_str(), strerror(errno));
+ return false;
+ }
+ if (0 != TEMP_FAILURE_RETRY(flock(file_->Fd(), LOCK_EX))) {
+ file_.reset();
+ *error_msg = StringPrintf("Failed to lock file '%s': %s", file->GetPath().c_str(), strerror(errno));
+ return false;
+ }
+ return true;
+}
+
File* ScopedFlock::GetFile() {
CHECK(file_.get() != NULL);
return file_.get();
diff --git a/runtime/base/scoped_flock.h b/runtime/base/scoped_flock.h
index f8ed805..08612e3 100644
--- a/runtime/base/scoped_flock.h
+++ b/runtime/base/scoped_flock.h
@@ -37,6 +37,10 @@
// changed (usually due to a new file being created at the same path)
// between attempts to lock it.
bool Init(const char* filename, std::string* error_msg);
+ // Attempt to acquire an exclusive file lock (see flock(2)) on 'file'.
+ // Returns true if the lock could be acquired or false if an error
+ // occured.
+ bool Init(File* file, std::string* error_msg);
// Returns the (locked) file associated with this instance.
File* GetFile();
@@ -45,6 +49,7 @@
bool HasFile();
~ScopedFlock();
+
private:
std::unique_ptr<File> file_;
DISALLOW_COPY_AND_ASSIGN(ScopedFlock);
diff --git a/runtime/base/unix_file/fd_file_test.cc b/runtime/base/unix_file/fd_file_test.cc
index 33b3d3e..3481f2f 100644
--- a/runtime/base/unix_file/fd_file_test.cc
+++ b/runtime/base/unix_file/fd_file_test.cc
@@ -59,6 +59,9 @@
EXPECT_TRUE(file.Open(good_path, O_RDONLY));
EXPECT_GE(file.Fd(), 0);
EXPECT_TRUE(file.IsOpened());
+
+ file.Close();
+ ASSERT_EQ(unlink(good_path.c_str()), 0);
}
TEST_F(FdFileTest, ReadFullyEmptyFile) {
diff --git a/runtime/base/unix_file/mapped_file_test.cc b/runtime/base/unix_file/mapped_file_test.cc
index 7e45321..59334d4 100644
--- a/runtime/base/unix_file/mapped_file_test.cc
+++ b/runtime/base/unix_file/mapped_file_test.cc
@@ -30,7 +30,7 @@
}
void SetUp() {
- art::CommonRuntimeTest::SetEnvironmentVariables(android_data_);
+ RandomAccessFileTest::SetUp();
good_path_ = GetTmpPath("some-file.txt");
int fd = TEMP_FAILURE_RETRY(open(good_path_.c_str(), O_CREAT|O_RDWR, 0666));
@@ -42,6 +42,12 @@
ASSERT_TRUE(CopyFile(src, &dst));
}
+ void TearDown() {
+ ASSERT_EQ(unlink(good_path_.c_str()), 0);
+
+ RandomAccessFileTest::TearDown();
+ }
+
virtual RandomAccessFile* MakeTestFile() {
TEMP_FAILURE_RETRY(truncate(good_path_.c_str(), 0));
MappedFile* f = new MappedFile;
diff --git a/runtime/base/unix_file/random_access_file_test.h b/runtime/base/unix_file/random_access_file_test.h
index 1d0b866..0002433 100644
--- a/runtime/base/unix_file/random_access_file_test.h
+++ b/runtime/base/unix_file/random_access_file_test.h
@@ -35,7 +35,11 @@
virtual RandomAccessFile* MakeTestFile() = 0;
virtual void SetUp() {
- art::CommonRuntimeTest::SetEnvironmentVariables(android_data_);
+ art::CommonRuntimeTest::SetUpAndroidData(android_data_);
+ }
+
+ virtual void TearDown() {
+ art::CommonRuntimeTest::TearDownAndroidData(android_data_, true);
}
std::string GetTmpPath(const std::string& name) {
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 9ad8a07..99277a0 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -25,6 +25,7 @@
#include "dex_file-inl.h"
#include "field_helper.h"
#include "gc/space/space.h"
+#include "java_vm_ext.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
@@ -35,62 +36,16 @@
#include "runtime.h"
#include "scoped_thread_state_change.h"
#include "thread.h"
+#include "well_known_classes.h"
namespace art {
-static void JniAbort(const char* jni_function_name, const char* msg) {
- Thread* self = Thread::Current();
- ScopedObjectAccess soa(self);
- mirror::ArtMethod* current_method = self->GetCurrentMethod(nullptr);
-
- std::ostringstream os;
- os << "JNI DETECTED ERROR IN APPLICATION: " << msg;
-
- if (jni_function_name != nullptr) {
- os << "\n in call to " << jni_function_name;
- }
- // TODO: is this useful given that we're about to dump the calling thread's stack?
- if (current_method != nullptr) {
- os << "\n from " << PrettyMethod(current_method);
- }
- os << "\n";
- self->Dump(os);
-
- JavaVMExt* vm = Runtime::Current()->GetJavaVM();
- if (vm->check_jni_abort_hook != nullptr) {
- vm->check_jni_abort_hook(vm->check_jni_abort_hook_data, os.str());
- } else {
- // Ensure that we get a native stack trace for this thread.
- self->TransitionFromRunnableToSuspended(kNative);
- LOG(FATAL) << os.str();
- self->TransitionFromSuspendedToRunnable(); // Unreachable, keep annotalysis happy.
- }
-}
-
-static void JniAbortV(const char* jni_function_name, const char* fmt, va_list ap) {
- std::string msg;
- StringAppendV(&msg, fmt, ap);
- JniAbort(jni_function_name, msg.c_str());
-}
-
-void JniAbortF(const char* jni_function_name, const char* fmt, ...) {
- va_list args;
- va_start(args, fmt);
- JniAbortV(jni_function_name, fmt, args);
- va_end(args);
-}
-
/*
* ===========================================================================
* JNI function helpers
* ===========================================================================
*/
-static bool IsHandleScopeLocalRef(JNIEnv* env, jobject localRef) {
- return GetIndirectRefKind(localRef) == kHandleScopeOrInvalid &&
- reinterpret_cast<JNIEnvExt*>(env)->self->HandleScopeContains(localRef);
-}
-
// Flags passed into ScopedCheck.
#define kFlag_Default 0x0000
@@ -109,134 +64,88 @@
#define kFlag_Invocation 0x8000 // Part of the invocation interface (JavaVM*).
#define kFlag_ForceTrace 0x80000000 // Add this to a JNI function's flags if you want to trace every call.
-
-static const char* gBuiltInPrefixes[] = {
- "Landroid/",
- "Lcom/android/",
- "Lcom/google/android/",
- "Ldalvik/",
- "Ljava/",
- "Ljavax/",
- "Llibcore/",
- "Lorg/apache/harmony/",
- nullptr
+/*
+ * Java primitive types:
+ * B - jbyte
+ * C - jchar
+ * D - jdouble
+ * F - jfloat
+ * I - jint
+ * J - jlong
+ * S - jshort
+ * Z - jboolean (shown as true and false)
+ * V - void
+ *
+ * Java reference types:
+ * L - jobject
+ * a - jarray
+ * c - jclass
+ * s - jstring
+ * t - jthrowable
+ *
+ * JNI types:
+ * b - jboolean (shown as JNI_TRUE and JNI_FALSE)
+ * f - jfieldID
+ * i - JNI error value (JNI_OK, JNI_ERR, JNI_EDETACHED, JNI_EVERSION)
+ * m - jmethodID
+ * p - void*
+ * r - jint (for release mode arguments)
+ * u - const char* (Modified UTF-8)
+ * z - jsize (for lengths; use i if negative values are okay)
+ * v - JavaVM*
+ * w - jobjectRefType
+ * E - JNIEnv*
+ * . - no argument; just print "..." (used for varargs JNI calls)
+ *
+ */
+union JniValueType {
+ jarray a;
+ jboolean b;
+ jclass c;
+ jfieldID f;
+ jint i;
+ jmethodID m;
+ const void* p; // Pointer.
+ jint r; // Release mode.
+ jstring s;
+ jthrowable t;
+ const char* u; // Modified UTF-8.
+ JavaVM* v;
+ jobjectRefType w;
+ jsize z;
+ jbyte B;
+ jchar C;
+ jdouble D;
+ JNIEnv* E;
+ jfloat F;
+ jint I;
+ jlong J;
+ jobject L;
+ jshort S;
+ const void* V; // void
+ jboolean Z;
};
-static bool ShouldTrace(JavaVMExt* vm, mirror::ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // If both "-Xcheck:jni" and "-Xjnitrace:" are enabled, we print trace messages
- // when a native method that matches the -Xjnitrace argument calls a JNI function
- // such as NewByteArray.
- // If -verbose:third-party-jni is on, we want to log any JNI function calls
- // made by a third-party native method.
- std::string class_name(method->GetDeclaringClassDescriptor());
- if (!vm->trace.empty() && class_name.find(vm->trace) != std::string::npos) {
- return true;
- }
- if (VLOG_IS_ON(third_party_jni)) {
- // Return true if we're trying to log all third-party JNI activity and 'method' doesn't look
- // like part of Android.
- for (size_t i = 0; gBuiltInPrefixes[i] != nullptr; ++i) {
- if (StartsWith(class_name, gBuiltInPrefixes[i])) {
- return false;
- }
- }
- return true;
- }
- return false;
-}
-
class ScopedCheck {
public:
- // For JNIEnv* functions.
- explicit ScopedCheck(JNIEnv* env, int flags, const char* functionName)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
- : soa_(env) {
- Init(flags, functionName, true);
- CheckThread(flags);
+ explicit ScopedCheck(int flags, const char* functionName, bool has_method = true)
+ : function_name_(functionName), flags_(flags), indent_(0), has_method_(has_method) {
}
- // For JavaVM* functions.
- // TODO: it's not correct that this is a lock function, but making it so aids annotalysis.
- explicit ScopedCheck(JavaVM* vm, bool has_method, const char* functionName)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
- : soa_(vm) {
- Init(kFlag_Invocation, functionName, has_method);
- }
-
- ~ScopedCheck() UNLOCK_FUNCTION(Locks::mutator_lock_) {}
-
- const ScopedObjectAccess& soa() {
- return soa_;
- }
-
- bool ForceCopy() {
- return Runtime::Current()->GetJavaVM()->force_copy;
- }
+ ~ScopedCheck() {}
// Checks that 'class_name' is a valid "fully-qualified" JNI class name, like "java/lang/Thread"
// or "[Ljava/lang/Object;". A ClassLoader can actually normalize class names a couple of
// times, so using "java.lang.Thread" instead of "java/lang/Thread" might work in some
// circumstances, but this is incorrect.
- void CheckClassName(const char* class_name) {
+ bool CheckClassName(const char* class_name) {
if ((class_name == nullptr) || !IsValidJniClassName(class_name)) {
- JniAbortF(function_name_,
- "illegal class name '%s'\n"
- " (should be of the form 'package/Class', [Lpackage/Class;' or '[[B')",
- class_name);
+ AbortF("illegal class name '%s'\n"
+ " (should be of the form 'package/Class', [Lpackage/Class;' or '[[B')",
+ class_name);
+ return false;
}
- }
-
- /*
- * Verify that the field is of the appropriate type. If the field has an
- * object type, "java_object" is the object we're trying to assign into it.
- *
- * Works for both static and instance fields.
- */
- void CheckFieldType(jvalue value, jfieldID fid, char prim, bool isStatic)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- StackHandleScope<1> hs(Thread::Current());
- Handle<mirror::ArtField> f(hs.NewHandle(CheckFieldID(fid)));
- if (f.Get() == nullptr) {
- return;
- }
- mirror::Class* field_type = FieldHelper(f).GetType();
- if (!field_type->IsPrimitive()) {
- jobject java_object = value.l;
- if (java_object != nullptr) {
- mirror::Object* obj = soa_.Decode<mirror::Object*>(java_object);
- // If java_object is a weak global ref whose referent has been cleared,
- // obj will be NULL. Otherwise, obj should always be non-NULL
- // and valid.
- if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(obj)) {
- Runtime::Current()->GetHeap()->DumpSpaces();
- JniAbortF(function_name_, "field operation on invalid %s: %p",
- ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object);
- return;
- } else {
- if (!obj->InstanceOf(field_type)) {
- JniAbortF(function_name_, "attempt to set field %s with value of wrong type: %s",
- PrettyField(f.Get()).c_str(), PrettyTypeOf(obj).c_str());
- return;
- }
- }
- }
- } else if (field_type != Runtime::Current()->GetClassLinker()->FindPrimitiveClass(prim)) {
- JniAbortF(function_name_, "attempt to set field %s with value of wrong type: %c",
- PrettyField(f.Get()).c_str(), prim);
- return;
- }
-
- if (isStatic != f.Get()->IsStatic()) {
- if (isStatic) {
- JniAbortF(function_name_, "accessing non-static field %s as static",
- PrettyField(f.Get()).c_str());
- } else {
- JniAbortF(function_name_, "accessing static field %s as non-static",
- PrettyField(f.Get()).c_str());
- }
- return;
- }
+ return true;
}
/*
@@ -244,59 +153,87 @@
*
* Assumes "jobj" has already been validated.
*/
- void CheckInstanceFieldID(jobject java_object, jfieldID fid)
+ bool CheckInstanceFieldID(ScopedObjectAccess& soa, jobject java_object, jfieldID fid)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::Object* o = soa_.Decode<mirror::Object*>(java_object);
- if (o == nullptr || !Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
- Runtime::Current()->GetHeap()->DumpSpaces();
- JniAbortF(function_name_, "field operation on invalid %s: %p",
- ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object);
- return;
+ mirror::Object* o = soa.Decode<mirror::Object*>(java_object);
+ if (o == nullptr) {
+ AbortF("field operation on NULL object: %p", java_object);
+ return false;
+ }
+ if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+ AbortF("field operation on invalid %s: %p",
+ ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(),
+ java_object);
+ return false;
}
- mirror::ArtField* f = CheckFieldID(fid);
+ mirror::ArtField* f = CheckFieldID(soa, fid);
if (f == nullptr) {
- return;
+ return false;
}
mirror::Class* c = o->GetClass();
if (c->FindInstanceField(f->GetName(), f->GetTypeDescriptor()) == nullptr) {
- JniAbortF(function_name_, "jfieldID %s not valid for an object of class %s",
- PrettyField(f).c_str(), PrettyTypeOf(o).c_str());
+ AbortF("jfieldID %s not valid for an object of class %s",
+ PrettyField(f).c_str(), PrettyTypeOf(o).c_str());
+ return false;
}
+ return true;
}
/*
* Verify that the pointer value is non-NULL.
*/
- void CheckNonNull(const void* ptr) {
- if (ptr == nullptr) {
- JniAbortF(function_name_, "non-nullable argument was NULL");
+ bool CheckNonNull(const void* ptr) {
+ if (UNLIKELY(ptr == nullptr)) {
+ AbortF("non-nullable argument was NULL");
+ return false;
}
+ return true;
}
/*
* Verify that the method's return type matches the type of call.
* 'expectedType' will be "L" for all objects, including arrays.
*/
- void CheckSig(jmethodID mid, const char* expectedType, bool isStatic)
+ bool CheckMethodAndSig(ScopedObjectAccess& soa, jobject jobj, jclass jc,
+ jmethodID mid, Primitive::Type type, InvokeType invoke)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* m = CheckMethodID(mid);
+ mirror::ArtMethod* m = CheckMethodID(soa, mid);
if (m == nullptr) {
- return;
+ return false;
}
- if (*expectedType != m->GetShorty()[0]) {
- JniAbortF(function_name_, "the return type of %s does not match %s",
- function_name_, PrettyMethod(m).c_str());
+ if (type != Primitive::GetType(m->GetShorty()[0])) {
+ AbortF("the return type of %s does not match %s", function_name_, PrettyMethod(m).c_str());
+ return false;
}
- if (isStatic != m->IsStatic()) {
- if (isStatic) {
- JniAbortF(function_name_, "calling non-static method %s with %s",
- PrettyMethod(m).c_str(), function_name_);
+ bool is_static = (invoke == kStatic);
+ if (is_static != m->IsStatic()) {
+ if (is_static) {
+ AbortF("calling non-static method %s with %s",
+ PrettyMethod(m).c_str(), function_name_);
} else {
- JniAbortF(function_name_, "calling static method %s with %s",
- PrettyMethod(m).c_str(), function_name_);
+ AbortF("calling static method %s with %s",
+ PrettyMethod(m).c_str(), function_name_);
+ }
+ return false;
+ }
+ if (invoke != kVirtual) {
+ mirror::Class* c = soa.Decode<mirror::Class*>(jc);
+ if (!m->GetDeclaringClass()->IsAssignableFrom(c)) {
+ AbortF("can't call %s %s with class %s", invoke == kStatic ? "static" : "nonvirtual",
+ PrettyMethod(m).c_str(), PrettyClass(c).c_str());
+ return false;
}
}
+ if (invoke != kStatic) {
+ mirror::Object* o = soa.Decode<mirror::Object*>(jobj);
+ if (!o->InstanceOf(m->GetDeclaringClass())) {
+ AbortF("can't call %s on instance of %s", PrettyMethod(m).c_str(), PrettyTypeOf(o).c_str());
+ return false;
+ }
+ }
+ return true;
}
/*
@@ -304,17 +241,18 @@
*
* Assumes "java_class" has already been validated.
*/
- void CheckStaticFieldID(jclass java_class, jfieldID fid)
+ bool CheckStaticFieldID(ScopedObjectAccess& soa, jclass java_class, jfieldID fid)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::Class* c = soa_.Decode<mirror::Class*>(java_class);
- mirror::ArtField* f = CheckFieldID(fid);
+ mirror::Class* c = soa.Decode<mirror::Class*>(java_class);
+ mirror::ArtField* f = CheckFieldID(soa, fid);
if (f == nullptr) {
- return;
+ return false;
}
if (f->GetDeclaringClass() != c) {
- JniAbortF(function_name_, "static jfieldID %p not valid for class %s",
- fid, PrettyClass(c).c_str());
+ AbortF("static jfieldID %p not valid for class %s", fid, PrettyClass(c).c_str());
+ return false;
}
+ return true;
}
/*
@@ -326,17 +264,18 @@
*
* Instances of "java_class" must be instances of the method's declaring class.
*/
- void CheckStaticMethod(jclass java_class, jmethodID mid)
+ bool CheckStaticMethod(ScopedObjectAccess& soa, jclass java_class, jmethodID mid)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* m = CheckMethodID(mid);
+ mirror::ArtMethod* m = CheckMethodID(soa, mid);
if (m == nullptr) {
- return;
+ return false;
}
- mirror::Class* c = soa_.Decode<mirror::Class*>(java_class);
+ mirror::Class* c = soa.Decode<mirror::Class*>(java_class);
if (!m->GetDeclaringClass()->IsAssignableFrom(c)) {
- JniAbortF(function_name_, "can't call static %s on class %s",
- PrettyMethod(m).c_str(), PrettyClass(c).c_str());
+ AbortF("can't call static %s on class %s", PrettyMethod(m).c_str(), PrettyClass(c).c_str());
+ return false;
}
+ return true;
}
/*
@@ -346,17 +285,18 @@
* (Note the mid might point to a declaration in an interface; this
* will be handled automatically by the instanceof check.)
*/
- void CheckVirtualMethod(jobject java_object, jmethodID mid)
+ bool CheckVirtualMethod(ScopedObjectAccess& soa, jobject java_object, jmethodID mid)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* m = CheckMethodID(mid);
+ mirror::ArtMethod* m = CheckMethodID(soa, mid);
if (m == nullptr) {
- return;
+ return false;
}
- mirror::Object* o = soa_.Decode<mirror::Object*>(java_object);
+ mirror::Object* o = soa.Decode<mirror::Object*>(java_object);
if (!o->InstanceOf(m->GetDeclaringClass())) {
- JniAbortF(function_name_, "can't call %s on instance of %s",
- PrettyMethod(m).c_str(), PrettyTypeOf(o).c_str());
+ AbortF("can't call %s on instance of %s", PrettyMethod(m).c_str(), PrettyTypeOf(o).c_str());
+ return false;
}
+ return true;
}
/**
@@ -395,11 +335,10 @@
*
* Use the kFlag_NullableUtf flag where 'u' field(s) are nullable.
*/
- void Check(bool entry, const char* fmt0, ...) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- va_list ap;
-
+ bool Check(ScopedObjectAccess& soa, bool entry, const char* fmt, JniValueType* args)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* traceMethod = nullptr;
- if (has_method_ && (!soa_.Vm()->trace.empty() || VLOG_IS_ON(third_party_jni))) {
+ if (has_method_ && soa.Vm()->IsTracingEnabled()) {
// We need to guard some of the invocation interface's calls: a bad caller might
// use DetachCurrentThread or GetEnv on a thread that's not yet attached.
Thread* self = Thread::Current();
@@ -409,124 +348,14 @@
}
if (((flags_ & kFlag_ForceTrace) != 0) ||
- (traceMethod != nullptr && ShouldTrace(soa_.Vm(), traceMethod))) {
- va_start(ap, fmt0);
+ (traceMethod != nullptr && soa.Vm()->ShouldTrace(traceMethod))) {
std::string msg;
- for (const char* fmt = fmt0; *fmt;) {
- char ch = *fmt++;
- if (ch == 'B') { // jbyte
- jbyte b = va_arg(ap, int);
- if (b >= 0 && b < 10) {
- StringAppendF(&msg, "%d", b);
- } else {
- StringAppendF(&msg, "%#x (%d)", b, b);
- }
- } else if (ch == 'C') { // jchar
- jchar c = va_arg(ap, int);
- if (c < 0x7f && c >= ' ') {
- StringAppendF(&msg, "U+%x ('%c')", c, c);
- } else {
- StringAppendF(&msg, "U+%x", c);
- }
- } else if (ch == 'F' || ch == 'D') { // jfloat, jdouble
- StringAppendF(&msg, "%g", va_arg(ap, double));
- } else if (ch == 'I' || ch == 'S') { // jint, jshort
- StringAppendF(&msg, "%d", va_arg(ap, int));
- } else if (ch == 'J') { // jlong
- StringAppendF(&msg, "%" PRId64, va_arg(ap, jlong));
- } else if (ch == 'Z') { // jboolean
- StringAppendF(&msg, "%s", va_arg(ap, int) ? "true" : "false");
- } else if (ch == 'V') { // void
- msg += "void";
- } else if (ch == 'v') { // JavaVM*
- JavaVM* vm = va_arg(ap, JavaVM*);
- StringAppendF(&msg, "(JavaVM*)%p", vm);
- } else if (ch == 'E') { // JNIEnv*
- JNIEnv* env = va_arg(ap, JNIEnv*);
- StringAppendF(&msg, "(JNIEnv*)%p", env);
- } else if (ch == 'L' || ch == 'a' || ch == 's') { // jobject, jarray, jstring
- // For logging purposes, these are identical.
- jobject o = va_arg(ap, jobject);
- if (o == nullptr) {
- msg += "NULL";
- } else {
- StringAppendF(&msg, "%p", o);
- }
- } else if (ch == 'b') { // jboolean (JNI-style)
- jboolean b = va_arg(ap, int);
- msg += (b ? "JNI_TRUE" : "JNI_FALSE");
- } else if (ch == 'c') { // jclass
- jclass jc = va_arg(ap, jclass);
- mirror::Class* c = reinterpret_cast<mirror::Class*>(Thread::Current()->DecodeJObject(jc));
- if (c == nullptr) {
- msg += "NULL";
- } else if (c == kInvalidIndirectRefObject ||
- !Runtime::Current()->GetHeap()->IsValidObjectAddress(c)) {
- StringAppendF(&msg, "INVALID POINTER:%p", jc);
- } else if (!c->IsClass()) {
- msg += "INVALID NON-CLASS OBJECT OF TYPE:" + PrettyTypeOf(c);
- } else {
- msg += PrettyClass(c);
- if (!entry) {
- StringAppendF(&msg, " (%p)", jc);
- }
- }
- } else if (ch == 'f') { // jfieldID
- jfieldID fid = va_arg(ap, jfieldID);
- mirror::ArtField* f = reinterpret_cast<mirror::ArtField*>(fid);
- msg += PrettyField(f);
- if (!entry) {
- StringAppendF(&msg, " (%p)", fid);
- }
- } else if (ch == 'z') { // non-negative jsize
- // You might expect jsize to be size_t, but it's not; it's the same as jint.
- // We only treat this specially so we can do the non-negative check.
- // TODO: maybe this wasn't worth it?
- jint i = va_arg(ap, jint);
- StringAppendF(&msg, "%d", i);
- } else if (ch == 'm') { // jmethodID
- jmethodID mid = va_arg(ap, jmethodID);
- mirror::ArtMethod* m = reinterpret_cast<mirror::ArtMethod*>(mid);
- msg += PrettyMethod(m);
- if (!entry) {
- StringAppendF(&msg, " (%p)", mid);
- }
- } else if (ch == 'p') { // void* ("pointer")
- void* p = va_arg(ap, void*);
- if (p == nullptr) {
- msg += "NULL";
- } else {
- StringAppendF(&msg, "(void*) %p", p);
- }
- } else if (ch == 'r') { // jint (release mode)
- jint releaseMode = va_arg(ap, jint);
- if (releaseMode == 0) {
- msg += "0";
- } else if (releaseMode == JNI_ABORT) {
- msg += "JNI_ABORT";
- } else if (releaseMode == JNI_COMMIT) {
- msg += "JNI_COMMIT";
- } else {
- StringAppendF(&msg, "invalid release mode %d", releaseMode);
- }
- } else if (ch == 'u') { // const char* (Modified UTF-8)
- const char* utf = va_arg(ap, const char*);
- if (utf == nullptr) {
- msg += "NULL";
- } else {
- StringAppendF(&msg, "\"%s\"", utf);
- }
- } else if (ch == '.') {
- msg += "...";
- } else {
- JniAbortF(function_name_, "unknown trace format specifier: %c", ch);
- return;
- }
- if (*fmt) {
+ for (size_t i = 0; fmt[i] != '\0'; ++i) {
+ TracePossibleHeapValue(soa, entry, fmt[i], args[i], &msg);
+ if (fmt[i + 1] != '\0') {
StringAppendF(&msg, ", ");
}
}
- va_end(ap);
if ((flags_ & kFlag_ForceTrace) != 0) {
LOG(INFO) << "JNI: call to " << function_name_ << "(" << msg << ")";
@@ -546,43 +375,227 @@
// We always do the thorough checks on entry, and never on exit...
if (entry) {
- va_start(ap, fmt0);
- for (const char* fmt = fmt0; *fmt; ++fmt) {
- char ch = *fmt;
- if (ch == 'a') {
- CheckArray(va_arg(ap, jarray));
- } else if (ch == 'c') {
- CheckInstance(kClass, va_arg(ap, jclass));
- } else if (ch == 'L') {
- CheckObject(va_arg(ap, jobject));
- } else if (ch == 'r') {
- CheckReleaseMode(va_arg(ap, jint));
- } else if (ch == 's') {
- CheckInstance(kString, va_arg(ap, jstring));
- } else if (ch == 'u') {
- if ((flags_ & kFlag_Release) != 0) {
- CheckNonNull(va_arg(ap, const char*));
- } else {
- bool nullable = ((flags_ & kFlag_NullableUtf) != 0);
- CheckUtfString(va_arg(ap, const char*), nullable);
- }
- } else if (ch == 'z') {
- CheckLengthPositive(va_arg(ap, jsize));
- } else if (strchr("BCISZbfmpEv", ch) != nullptr) {
- va_arg(ap, uint32_t); // Skip this argument.
- } else if (ch == 'D' || ch == 'F') {
- va_arg(ap, double); // Skip this argument.
- } else if (ch == 'J') {
- va_arg(ap, uint64_t); // Skip this argument.
- } else if (ch == '.') {
- } else {
- LOG(FATAL) << "Unknown check format specifier: " << ch;
+ for (size_t i = 0; fmt[i] != '\0'; ++i) {
+ if (!CheckPossibleHeapValue(soa, fmt[i], args[i])) {
+ return false;
}
}
- va_end(ap);
}
+ return true;
}
+ bool CheckNonHeap(JavaVMExt* vm, bool entry, const char* fmt, JniValueType* args) {
+ bool should_trace = (flags_ & kFlag_ForceTrace) != 0;
+ if (!should_trace && vm->IsTracingEnabled()) {
+ // We need to guard some of the invocation interface's calls: a bad caller might
+ // use DetachCurrentThread or GetEnv on a thread that's not yet attached.
+ Thread* self = Thread::Current();
+ if ((flags_ & kFlag_Invocation) == 0 || self != nullptr) {
+ ScopedObjectAccess soa(self);
+ mirror::ArtMethod* traceMethod = self->GetCurrentMethod(nullptr);
+ should_trace = (traceMethod != nullptr && vm->ShouldTrace(traceMethod));
+ }
+ }
+ if (should_trace) {
+ std::string msg;
+ for (size_t i = 0; fmt[i] != '\0'; ++i) {
+ TraceNonHeapValue(fmt[i], args[i], &msg);
+ if (fmt[i + 1] != '\0') {
+ StringAppendF(&msg, ", ");
+ }
+ }
+
+ if ((flags_ & kFlag_ForceTrace) != 0) {
+ LOG(INFO) << "JNI: call to " << function_name_ << "(" << msg << ")";
+ } else if (entry) {
+ if (has_method_) {
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ mirror::ArtMethod* traceMethod = self->GetCurrentMethod(nullptr);
+ std::string methodName(PrettyMethod(traceMethod, false));
+ LOG(INFO) << "JNI: " << methodName << " -> " << function_name_ << "(" << msg << ")";
+ indent_ = methodName.size() + 1;
+ } else {
+ LOG(INFO) << "JNI: -> " << function_name_ << "(" << msg << ")";
+ indent_ = 0;
+ }
+ } else {
+ LOG(INFO) << StringPrintf("JNI: %*s<- %s returned %s", indent_, "", function_name_, msg.c_str());
+ }
+ }
+
+ // We always do the thorough checks on entry, and never on exit...
+ if (entry) {
+ for (size_t i = 0; fmt[i] != '\0'; ++i) {
+ if (!CheckNonHeapValue(fmt[i], args[i])) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ bool CheckReflectedMethod(ScopedObjectAccess& soa, jobject jmethod)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* method = soa.Decode<mirror::Object*>(jmethod);
+ if (method == nullptr) {
+ AbortF("expected non-null method");
+ return false;
+ }
+ mirror::Class* c = method->GetClass();
+ if (soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_reflect_Method) != c &&
+ soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_reflect_Constructor) != c) {
+ AbortF("expected java.lang.reflect.Method or "
+ "java.lang.reflect.Constructor but got object of type %s: %p",
+ PrettyTypeOf(method).c_str(), jmethod);
+ return false;
+ }
+ return true;
+ }
+
+ bool CheckConstructor(ScopedObjectAccess& soa, jmethodID mid)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* method = soa.DecodeMethod(mid);
+ if (method == nullptr) {
+ AbortF("expected non-null constructor");
+ return false;
+ }
+ if (!method->IsConstructor() || method->IsStatic()) {
+ AbortF("expected a constructor but %s: %p", PrettyTypeOf(method).c_str(), mid);
+ return false;
+ }
+ return true;
+ }
+
+ bool CheckReflectedField(ScopedObjectAccess& soa, jobject jfield)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* field = soa.Decode<mirror::Object*>(jfield);
+ if (field == nullptr) {
+ AbortF("expected non-null java.lang.reflect.Field");
+ return false;
+ }
+ mirror::Class* c = field->GetClass();
+ if (soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_reflect_Field) != c) {
+ AbortF("expected java.lang.reflect.Field but got object of type %s: %p",
+ PrettyTypeOf(field).c_str(), jfield);
+ return false;
+ }
+ return true;
+ }
+
+ bool CheckThrowable(ScopedObjectAccess& soa, jthrowable jobj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* obj = soa.Decode<mirror::Object*>(jobj);
+ if (!obj->GetClass()->IsThrowableClass()) {
+ AbortF("expected java.lang.Throwable but got object of type "
+ "%s: %p", PrettyTypeOf(obj).c_str(), obj);
+ return false;
+ }
+ return true;
+ }
+
+ bool CheckThrowableClass(ScopedObjectAccess& soa, jclass jc)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Class* c = soa.Decode<mirror::Class*>(jc);
+ if (!c->IsThrowableClass()) {
+ AbortF("expected java.lang.Throwable class but got object of "
+ "type %s: %p", PrettyDescriptor(c).c_str(), c);
+ return false;
+ }
+ return true;
+ }
+
+ bool CheckReferenceKind(IndirectRefKind expected_kind, JavaVMExt* vm, Thread* self, jobject obj) {
+ IndirectRefKind found_kind;
+ if (expected_kind == kLocal) {
+ found_kind = GetIndirectRefKind(obj);
+ if (found_kind == kHandleScopeOrInvalid && self->HandleScopeContains(obj)) {
+ found_kind = kLocal;
+ }
+ } else {
+ found_kind = GetIndirectRefKind(obj);
+ }
+ if (obj != nullptr && found_kind != expected_kind) {
+ AbortF("expected reference of kind %s but found %s: %p",
+ ToStr<IndirectRefKind>(expected_kind).c_str(),
+ ToStr<IndirectRefKind>(GetIndirectRefKind(obj)).c_str(),
+ obj);
+ return false;
+ }
+ return true;
+ }
+
+ bool CheckInstantiableNonArray(ScopedObjectAccess& soa, jclass jc)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Class* c = soa.Decode<mirror::Class*>(jc);
+ if (!c->IsInstantiableNonArray()) {
+ AbortF("can't make objects of type %s: %p", PrettyDescriptor(c).c_str(), c);
+ return false;
+ }
+ return true;
+ }
+
+ bool CheckPrimitiveArrayType(ScopedObjectAccess& soa, jarray array, Primitive::Type type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (!CheckArray(soa, array)) {
+ return false;
+ }
+ mirror::Array* a = soa.Decode<mirror::Array*>(array);
+ if (a->GetClass()->GetComponentType()->GetPrimitiveType() != type) {
+ AbortF("incompatible array type %s expected %s[]: %p",
+ PrettyDescriptor(a->GetClass()).c_str(), PrettyDescriptor(type).c_str(), array);
+ return false;
+ }
+ return true;
+ }
+
+ bool CheckFieldAccess(ScopedObjectAccess& soa, jobject obj, jfieldID fid, bool is_static,
+ Primitive::Type type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (is_static && !CheckStaticFieldID(soa, down_cast<jclass>(obj), fid)) {
+ return false;
+ }
+ if (!is_static && !CheckInstanceFieldID(soa, obj, fid)) {
+ return false;
+ }
+ mirror::ArtField* field = soa.DecodeField(fid);
+ DCHECK(field != nullptr); // Already checked by Check.
+ if (is_static != field->IsStatic()) {
+ AbortF("attempt to access %s field %s: %p",
+ field->IsStatic() ? "static" : "non-static", PrettyField(field).c_str(), fid);
+ return false;
+ }
+ if (type != field->GetTypeAsPrimitiveType()) {
+ AbortF("attempt to access field %s of type %s with the wrong type %s: %p",
+ PrettyField(field).c_str(), PrettyDescriptor(field->GetTypeDescriptor()).c_str(),
+ PrettyDescriptor(type).c_str(), fid);
+ return false;
+ }
+ if (is_static) {
+ mirror::Object* o = soa.Decode<mirror::Object*>(obj);
+ if (o == nullptr || !o->IsClass()) {
+ AbortF("attempt to access static field %s with a class argument of type %s: %p",
+ PrettyField(field).c_str(), PrettyTypeOf(o).c_str(), fid);
+ return false;
+ }
+ mirror::Class* c = o->AsClass();
+ if (field->GetDeclaringClass() != c) {
+ AbortF("attempt to access static field %s with an incompatible class argument of %s: %p",
+ PrettyField(field).c_str(), PrettyDescriptor(c).c_str(), fid);
+ return false;
+ }
+ } else {
+ mirror::Object* o = soa.Decode<mirror::Object*>(obj);
+ if (o == nullptr || !field->GetDeclaringClass()->IsAssignableFrom(o->GetClass())) {
+ AbortF("attempt to access field %s from an object argument of type %s: %p",
+ PrettyField(field).c_str(), PrettyTypeOf(o).c_str(), fid);
+ return false;
+ }
+ }
+ return true;
+ }
+
+ private:
enum InstanceKind {
kClass,
kDirectByteBuffer,
@@ -598,7 +611,7 @@
* Because we're looking at an object on the GC heap, we have to switch
* to "running" mode before doing the checks.
*/
- bool CheckInstance(InstanceKind kind, jobject java_object)
+ bool CheckInstance(ScopedObjectAccess& soa, InstanceKind kind, jobject java_object, bool null_ok)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const char* what = nullptr;
switch (kind) {
@@ -622,15 +635,20 @@
}
if (java_object == nullptr) {
- JniAbortF(function_name_, "%s received null %s", function_name_, what);
- return false;
+ if (null_ok) {
+ return true;
+ } else {
+ AbortF("%s received NULL %s", function_name_, what);
+ return false;
+ }
}
- mirror::Object* obj = soa_.Decode<mirror::Object*>(java_object);
+ mirror::Object* obj = soa.Decode<mirror::Object*>(java_object);
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(obj)) {
- Runtime::Current()->GetHeap()->DumpSpaces();
- JniAbortF(function_name_, "%s is an invalid %s: %p (%p)",
- what, ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object, obj);
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+ AbortF("%s is an invalid %s: %p (%p)",
+ what, ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(),
+ java_object, obj);
return false;
}
@@ -652,114 +670,333 @@
break;
}
if (!okay) {
- JniAbortF(function_name_, "%s has wrong type: %s", what, PrettyTypeOf(obj).c_str());
+ AbortF("%s has wrong type: %s", what, PrettyTypeOf(obj).c_str());
return false;
}
return true;
}
- private:
- // Set "has_method" to true if we have a valid thread with a method pointer.
- // We won't have one before attaching a thread, after detaching a thread, or
- // when shutting down the runtime.
- void Init(int flags, const char* functionName, bool has_method) {
- flags_ = flags;
- function_name_ = functionName;
- has_method_ = has_method;
+ /*
+ * Verify that the "mode" argument passed to a primitive array Release
+ * function is one of the valid values.
+ */
+ bool CheckReleaseMode(jint mode) {
+ if (mode != 0 && mode != JNI_COMMIT && mode != JNI_ABORT) {
+ AbortF("unknown value for release mode: %d", mode);
+ return false;
+ }
+ return true;
}
+ bool CheckPossibleHeapValue(ScopedObjectAccess& soa, char fmt, JniValueType arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ switch (fmt) {
+ case 'a': // jarray
+ return CheckArray(soa, arg.a);
+ case 'c': // jclass
+ return CheckInstance(soa, kClass, arg.c, false);
+ case 'f': // jfieldID
+ return CheckFieldID(soa, arg.f) != nullptr;
+ case 'm': // jmethodID
+ return CheckMethodID(soa, arg.m) != nullptr;
+ case 'r': // release int
+ return CheckReleaseMode(arg.r);
+ case 's': // jstring
+ return CheckInstance(soa, kString, arg.s, false);
+ case 't': // jthrowable
+ return CheckInstance(soa, kThrowable, arg.t, false);
+ case 'E': // JNIEnv*
+ return CheckThread(arg.E);
+ case 'L': // jobject
+ return CheckInstance(soa, kObject, arg.L, true);
+ default:
+ return CheckNonHeapValue(fmt, arg);
+ }
+ }
+
+ bool CheckNonHeapValue(char fmt, JniValueType arg) {
+ switch (fmt) {
+ case '.': // ...
+ case 'p': // TODO: pointer - null or readable?
+ case 'v': // JavaVM*
+ case 'B': // jbyte
+ case 'C': // jchar
+ case 'D': // jdouble
+ case 'F': // jfloat
+ case 'I': // jint
+ case 'J': // jlong
+ case 'S': // jshort
+ break; // Ignored.
+ case 'b': // jboolean, why two? Fall-through.
+ case 'Z':
+ return CheckBoolean(arg.Z);
+ case 'u': // utf8
+ if ((flags_ & kFlag_Release) != 0) {
+ return CheckNonNull(arg.u);
+ } else {
+ bool nullable = ((flags_ & kFlag_NullableUtf) != 0);
+ return CheckUtfString(arg.u, nullable);
+ }
+ case 'w': // jobjectRefType
+ switch (arg.w) {
+ case JNIInvalidRefType:
+ case JNILocalRefType:
+ case JNIGlobalRefType:
+ case JNIWeakGlobalRefType:
+ break;
+ default:
+ AbortF("Unknown reference type");
+ return false;
+ }
+ break;
+ case 'z': // jsize
+ return CheckLengthPositive(arg.z);
+ default:
+ AbortF("unknown format specifier: '%c'", fmt);
+ return false;
+ }
+ return true;
+ }
+
+ void TracePossibleHeapValue(ScopedObjectAccess& soa, bool entry, char fmt, JniValueType arg,
+ std::string* msg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ switch (fmt) {
+ case 'L': // jobject fall-through.
+ case 'a': // jarray fall-through.
+ case 's': // jstring fall-through.
+ case 't': // jthrowable fall-through.
+ if (arg.L == nullptr) {
+ *msg += "NULL";
+ } else {
+ StringAppendF(msg, "%p", arg.L);
+ }
+ break;
+ case 'c': { // jclass
+ jclass jc = arg.c;
+ mirror::Class* c = soa.Decode<mirror::Class*>(jc);
+ if (c == nullptr) {
+ *msg += "NULL";
+ } else if (c == kInvalidIndirectRefObject ||
+ !Runtime::Current()->GetHeap()->IsValidObjectAddress(c)) {
+ StringAppendF(msg, "INVALID POINTER:%p", jc);
+ } else if (!c->IsClass()) {
+ *msg += "INVALID NON-CLASS OBJECT OF TYPE:" + PrettyTypeOf(c);
+ } else {
+ *msg += PrettyClass(c);
+ if (!entry) {
+ StringAppendF(msg, " (%p)", jc);
+ }
+ }
+ break;
+ }
+ case 'f': { // jfieldID
+ jfieldID fid = arg.f;
+ mirror::ArtField* f = soa.DecodeField(fid);
+ *msg += PrettyField(f);
+ if (!entry) {
+ StringAppendF(msg, " (%p)", fid);
+ }
+ break;
+ }
+ case 'm': { // jmethodID
+ jmethodID mid = arg.m;
+ mirror::ArtMethod* m = soa.DecodeMethod(mid);
+ *msg += PrettyMethod(m);
+ if (!entry) {
+ StringAppendF(msg, " (%p)", mid);
+ }
+ break;
+ }
+ default:
+ TraceNonHeapValue(fmt, arg, msg);
+ break;
+ }
+ }
+
+ void TraceNonHeapValue(char fmt, JniValueType arg, std::string* msg) {
+ switch (fmt) {
+ case 'B': // jbyte
+ if (arg.B >= 0 && arg.B < 10) {
+ StringAppendF(msg, "%d", arg.B);
+ } else {
+ StringAppendF(msg, "%#x (%d)", arg.B, arg.B);
+ }
+ break;
+ case 'C': // jchar
+ if (arg.C < 0x7f && arg.C >= ' ') {
+ StringAppendF(msg, "U+%x ('%c')", arg.C, arg.C);
+ } else {
+ StringAppendF(msg, "U+%x", arg.C);
+ }
+ break;
+ case 'F': // jfloat
+ StringAppendF(msg, "%g", arg.F);
+ break;
+ case 'D': // jdouble
+ StringAppendF(msg, "%g", arg.D);
+ break;
+ case 'S': // jshort
+ StringAppendF(msg, "%d", arg.S);
+ break;
+ case 'i': // jint - fall-through.
+ case 'I': // jint
+ StringAppendF(msg, "%d", arg.I);
+ break;
+ case 'J': // jlong
+ StringAppendF(msg, "%" PRId64, arg.J);
+ break;
+ case 'Z': // jboolean
+ case 'b': // jboolean (JNI-style)
+ *msg += arg.b == JNI_TRUE ? "true" : "false";
+ break;
+ case 'V': // void
+ DCHECK(arg.V == nullptr);
+ *msg += "void";
+ break;
+ case 'v': // JavaVM*
+ StringAppendF(msg, "(JavaVM*)%p", arg.v);
+ break;
+ case 'E':
+ StringAppendF(msg, "(JNIEnv*)%p", arg.E);
+ break;
+ case 'z': // non-negative jsize
+ // You might expect jsize to be size_t, but it's not; it's the same as jint.
+ // We only treat this specially so we can do the non-negative check.
+ // TODO: maybe this wasn't worth it?
+ StringAppendF(msg, "%d", arg.z);
+ break;
+ case 'p': // void* ("pointer")
+ if (arg.p == nullptr) {
+ *msg += "NULL";
+ } else {
+ StringAppendF(msg, "(void*) %p", arg.p);
+ }
+ break;
+ case 'r': { // jint (release mode)
+ jint releaseMode = arg.r;
+ if (releaseMode == 0) {
+ *msg += "0";
+ } else if (releaseMode == JNI_ABORT) {
+ *msg += "JNI_ABORT";
+ } else if (releaseMode == JNI_COMMIT) {
+ *msg += "JNI_COMMIT";
+ } else {
+ StringAppendF(msg, "invalid release mode %d", releaseMode);
+ }
+ break;
+ }
+ case 'u': // const char* (Modified UTF-8)
+ if (arg.u == nullptr) {
+ *msg += "NULL";
+ } else {
+ StringAppendF(msg, "\"%s\"", arg.u);
+ }
+ break;
+ case 'w': // jobjectRefType
+ switch (arg.w) {
+ case JNIInvalidRefType:
+ *msg += "invalid reference type";
+ break;
+ case JNILocalRefType:
+ *msg += "local ref type";
+ break;
+ case JNIGlobalRefType:
+ *msg += "global ref type";
+ break;
+ case JNIWeakGlobalRefType:
+ *msg += "weak global ref type";
+ break;
+ default:
+ *msg += "unknown ref type";
+ break;
+ }
+ break;
+ case '.':
+ *msg += "...";
+ break;
+ default:
+ LOG(FATAL) << function_name_ << ": unknown trace format specifier: '" << fmt << "'";
+ }
+ }
/*
* Verify that "array" is non-NULL and points to an Array object.
*
* Since we're dealing with objects, switch to "running" mode.
*/
- void CheckArray(jarray java_array) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (java_array == nullptr) {
- JniAbortF(function_name_, "jarray was NULL");
- return;
+ bool CheckArray(ScopedObjectAccess& soa, jarray java_array)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (UNLIKELY(java_array == nullptr)) {
+ AbortF("jarray was NULL");
+ return false;
}
- mirror::Array* a = soa_.Decode<mirror::Array*>(java_array);
- if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(a)) {
- Runtime::Current()->GetHeap()->DumpSpaces();
- JniAbortF(function_name_, "jarray is an invalid %s: %p (%p)",
- ToStr<IndirectRefKind>(GetIndirectRefKind(java_array)).c_str(), java_array, a);
+ mirror::Array* a = soa.Decode<mirror::Array*>(java_array);
+ if (UNLIKELY(!Runtime::Current()->GetHeap()->IsValidObjectAddress(a))) {
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+ AbortF("jarray is an invalid %s: %p (%p)",
+ ToStr<IndirectRefKind>(GetIndirectRefKind(java_array)).c_str(),
+ java_array, a);
+ return false;
} else if (!a->IsArrayInstance()) {
- JniAbortF(function_name_, "jarray argument has non-array type: %s", PrettyTypeOf(a).c_str());
+ AbortF("jarray argument has non-array type: %s", PrettyTypeOf(a).c_str());
+ return false;
}
+ return true;
}
- void CheckLengthPositive(jsize length) {
+ bool CheckBoolean(jboolean z) {
+ if (z != JNI_TRUE && z != JNI_FALSE) {
+ AbortF("unexpected jboolean value: %d", z);
+ return false;
+ }
+ return true;
+ }
+
+ bool CheckLengthPositive(jsize length) {
if (length < 0) {
- JniAbortF(function_name_, "negative jsize: %d", length);
+ AbortF("negative jsize: %d", length);
+ return false;
}
+ return true;
}
- mirror::ArtField* CheckFieldID(jfieldID fid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtField* CheckFieldID(ScopedObjectAccess& soa, jfieldID fid)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (fid == nullptr) {
- JniAbortF(function_name_, "jfieldID was NULL");
+ AbortF("jfieldID was NULL");
return nullptr;
}
- mirror::ArtField* f = soa_.DecodeField(fid);
+ mirror::ArtField* f = soa.DecodeField(fid);
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(f) || !f->IsArtField()) {
- Runtime::Current()->GetHeap()->DumpSpaces();
- JniAbortF(function_name_, "invalid jfieldID: %p", fid);
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+ AbortF("invalid jfieldID: %p", fid);
return nullptr;
}
return f;
}
- mirror::ArtMethod* CheckMethodID(jmethodID mid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* CheckMethodID(ScopedObjectAccess& soa, jmethodID mid)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (mid == nullptr) {
- JniAbortF(function_name_, "jmethodID was NULL");
+ AbortF("jmethodID was NULL");
return nullptr;
}
- mirror::ArtMethod* m = soa_.DecodeMethod(mid);
+ mirror::ArtMethod* m = soa.DecodeMethod(mid);
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(m) || !m->IsArtMethod()) {
- Runtime::Current()->GetHeap()->DumpSpaces();
- JniAbortF(function_name_, "invalid jmethodID: %p", mid);
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+ AbortF("invalid jmethodID: %p", mid);
return nullptr;
}
return m;
}
- /*
- * Verify that "jobj" is a valid object, and that it's an object that JNI
- * is allowed to know about. We allow NULL references.
- *
- * Switches to "running" mode before performing checks.
- */
- void CheckObject(jobject java_object)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (java_object == nullptr) {
- return;
- }
-
- mirror::Object* o = soa_.Decode<mirror::Object*>(java_object);
- if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
- Runtime::Current()->GetHeap()->DumpSpaces();
- // TODO: when we remove work_around_app_jni_bugs, this should be impossible.
- JniAbortF(function_name_, "native code passing in reference to invalid %s: %p",
- ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object);
- }
- }
-
- /*
- * Verify that the "mode" argument passed to a primitive array Release
- * function is one of the valid values.
- */
- void CheckReleaseMode(jint mode) {
- if (mode != 0 && mode != JNI_COMMIT && mode != JNI_ABORT) {
- JniAbortF(function_name_, "unknown value for release mode: %d", mode);
- }
- }
-
- void CheckThread(int flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool CheckThread(JNIEnv* env) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Thread* self = Thread::Current();
if (self == nullptr) {
- JniAbortF(function_name_, "a thread (tid %d) is making JNI calls without being attached", GetTid());
- return;
+ AbortF("a thread (tid %d) is making JNI calls without being attached", GetTid());
+ return false;
}
// Get the *correct* JNIEnv by going through our TLS pointer.
@@ -767,21 +1004,22 @@
// Verify that the current thread is (a) attached and (b) associated with
// this particular instance of JNIEnv.
- if (soa_.Env() != threadEnv) {
- JniAbortF(function_name_, "thread %s using JNIEnv* from thread %s",
- ToStr<Thread>(*self).c_str(), ToStr<Thread>(*soa_.Self()).c_str());
- return;
+ if (env != threadEnv) {
+ AbortF("thread %s using JNIEnv* from thread %s",
+ ToStr<Thread>(*self).c_str(), ToStr<Thread>(*self).c_str());
+ return false;
}
// Verify that, if this thread previously made a critical "get" call, we
// do the corresponding "release" call before we try anything else.
- switch (flags & kFlag_CritMask) {
+ switch (flags_ & kFlag_CritMask) {
case kFlag_CritOkay: // okay to call this method
break;
case kFlag_CritBad: // not okay to call
if (threadEnv->critical) {
- JniAbortF(function_name_, "thread %s using JNI after critical get", ToStr<Thread>(*self).c_str());
- return;
+ AbortF("thread %s using JNI after critical get",
+ ToStr<Thread>(*self).c_str());
+ return false;
}
break;
case kFlag_CritGet: // this is a "get" call
@@ -791,44 +1029,46 @@
case kFlag_CritRelease: // this is a "release" call
threadEnv->critical--;
if (threadEnv->critical < 0) {
- JniAbortF(function_name_, "thread %s called too many critical releases", ToStr<Thread>(*self).c_str());
- return;
+ AbortF("thread %s called too many critical releases",
+ ToStr<Thread>(*self).c_str());
+ return false;
}
break;
default:
- LOG(FATAL) << "Bad flags (internal error): " << flags;
+ LOG(FATAL) << "Bad flags (internal error): " << flags_;
}
// Verify that, if an exception has been raised, the native code doesn't
// make any JNI calls other than the Exception* methods.
- if ((flags & kFlag_ExcepOkay) == 0 && self->IsExceptionPending()) {
+ if ((flags_ & kFlag_ExcepOkay) == 0 && self->IsExceptionPending()) {
ThrowLocation throw_location;
mirror::Throwable* exception = self->GetException(&throw_location);
std::string type(PrettyTypeOf(exception));
- JniAbortF(function_name_, "JNI %s called with pending exception '%s' thrown in %s",
- function_name_, type.c_str(), throw_location.Dump().c_str());
- return;
+ AbortF("JNI %s called with pending exception '%s' thrown in %s",
+ function_name_, type.c_str(), throw_location.Dump().c_str());
+ return false;
}
+ return true;
}
// Verifies that "bytes" points to valid Modified UTF-8 data.
- void CheckUtfString(const char* bytes, bool nullable) {
+ bool CheckUtfString(const char* bytes, bool nullable) {
if (bytes == nullptr) {
if (!nullable) {
- JniAbortF(function_name_, "non-nullable const char* was NULL");
- return;
+ AbortF("non-nullable const char* was NULL");
+ return false;
}
- return;
+ return true;
}
const char* errorKind = nullptr;
uint8_t utf8 = CheckUtfBytes(bytes, &errorKind);
if (errorKind != nullptr) {
- JniAbortF(function_name_,
- "input is not valid Modified UTF-8: illegal %s byte %#x\n"
- " string: '%s'", errorKind, utf8, bytes);
- return;
+ AbortF("input is not valid Modified UTF-8: illegal %s byte %#x\n"
+ " string: '%s'", errorKind, utf8, bytes);
+ return false;
}
+ return true;
}
static uint8_t CheckUtfBytes(const char* bytes, const char** errorKind) {
@@ -880,92 +1120,120 @@
return 0;
}
- const ScopedObjectAccess soa_;
- const char* function_name_;
- int flags_;
- bool has_method_;
+ void AbortF(const char* fmt, ...) __attribute__((__format__(__printf__, 2, 3))) {
+ va_list args;
+ va_start(args, fmt);
+ Runtime::Current()->GetJavaVM()->JniAbortV(function_name_, fmt, args);
+ va_end(args);
+ }
+
+ // The name of the JNI function being checked.
+ const char* const function_name_;
+
+ const int flags_;
int indent_;
+ const bool has_method_;
+
DISALLOW_COPY_AND_ASSIGN(ScopedCheck);
};
-#define CHECK_JNI_ENTRY(flags, types, args...) \
- ScopedCheck sc(env, flags, __FUNCTION__); \
- sc.Check(true, types, ##args)
-
-#define CHECK_JNI_EXIT(type, exp) ({ \
- auto _rc = (exp); \
- sc.Check(false, type, _rc); \
- _rc; })
-#define CHECK_JNI_EXIT_VOID() \
- sc.Check(false, "V")
-
/*
* ===========================================================================
* Guarded arrays
* ===========================================================================
*/
-#define kGuardLen 512 /* must be multiple of 2 */
-#define kGuardPattern 0xd5e3 /* uncommon values; d5e3d5e3 invalid addr */
-#define kGuardMagic 0xffd5aa96
-
/* this gets tucked in at the start of the buffer; struct size must be even */
-struct GuardedCopy {
- uint32_t magic;
- uLong adler;
- size_t original_length;
- const void* original_ptr;
-
- /* find the GuardedCopy given the pointer into the "live" data */
- static inline const GuardedCopy* FromData(const void* dataBuf) {
- return reinterpret_cast<const GuardedCopy*>(ActualBuffer(dataBuf));
- }
-
+class GuardedCopy {
+ public:
/*
* Create an over-sized buffer to hold the contents of "buf". Copy it in,
* filling in the area around it with guard data.
- *
- * We use a 16-bit pattern to make a rogue memset less likely to elude us.
*/
- static void* Create(const void* buf, size_t len, bool modOkay) {
- size_t newLen = ActualLength(len);
- uint8_t* newBuf = DebugAlloc(newLen);
-
- // Fill it in with a pattern.
- uint16_t* pat = reinterpret_cast<uint16_t*>(newBuf);
- for (size_t i = 0; i < newLen / 2; i++) {
- *pat++ = kGuardPattern;
- }
-
- // Copy the data in; note "len" could be zero.
- memcpy(newBuf + kGuardLen / 2, buf, len);
+ static void* Create(const void* original_buf, size_t len, bool mod_okay) {
+ const size_t new_len = LengthIncludingRedZones(len);
+ uint8_t* const new_buf = DebugAlloc(new_len);
// If modification is not expected, grab a checksum.
uLong adler = 0;
- if (!modOkay) {
- adler = adler32(0L, Z_NULL, 0);
- adler = adler32(adler, reinterpret_cast<const Bytef*>(buf), len);
- *reinterpret_cast<uLong*>(newBuf) = adler;
+ if (!mod_okay) {
+ adler = adler32(adler32(0L, Z_NULL, 0), reinterpret_cast<const Bytef*>(original_buf), len);
}
- GuardedCopy* pExtra = reinterpret_cast<GuardedCopy*>(newBuf);
- pExtra->magic = kGuardMagic;
- pExtra->adler = adler;
- pExtra->original_ptr = buf;
- pExtra->original_length = len;
+ GuardedCopy* copy = new (new_buf) GuardedCopy(original_buf, len, adler);
- return newBuf + kGuardLen / 2;
+ // Fill begin region with canary pattern.
+ const size_t kStartCanaryLength = (GuardedCopy::kRedZoneSize / 2) - sizeof(GuardedCopy);
+ for (size_t i = 0, j = 0; i < kStartCanaryLength; ++i) {
+ const_cast<char*>(copy->StartRedZone())[i] = kCanary[j];
+ if (kCanary[j] == '\0') {
+ j = 0;
+ }
+ }
+
+ // Copy the data in; note "len" could be zero.
+ memcpy(const_cast<uint8_t*>(copy->BufferWithinRedZones()), original_buf, len);
+
+ // Fill end region with canary pattern.
+ for (size_t i = 0, j = 0; i < kEndCanaryLength; ++i) {
+ const_cast<char*>(copy->EndRedZone())[i] = kCanary[j];
+ if (kCanary[j] == '\0') {
+ j = 0;
+ }
+ }
+
+ return const_cast<uint8_t*>(copy->BufferWithinRedZones());
}
/*
+ * Create a guarded copy of a primitive array. Modifications to the copied
+ * data are allowed. Returns a pointer to the copied data.
+ */
+ static void* CreateGuardedPACopy(JNIEnv* env, const jarray java_array, jboolean* is_copy) {
+ ScopedObjectAccess soa(env);
+
+ mirror::Array* a = soa.Decode<mirror::Array*>(java_array);
+ size_t component_size = a->GetClass()->GetComponentSize();
+ size_t byte_count = a->GetLength() * component_size;
+ void* result = Create(a->GetRawData(component_size, 0), byte_count, true);
+ if (is_copy != nullptr) {
+ *is_copy = JNI_TRUE;
+ }
+ return result;
+ }
+
+ /*
+ * Perform the array "release" operation, which may or may not copy data
+ * back into the managed heap, and may or may not release the underlying storage.
+ */
+ static void* ReleaseGuardedPACopy(const char* function_name, JNIEnv* env, jarray java_array,
+ void* embedded_buf, int mode) {
+ ScopedObjectAccess soa(env);
+ mirror::Array* a = soa.Decode<mirror::Array*>(java_array);
+
+ if (!GuardedCopy::Check(function_name, embedded_buf, true)) {
+ return nullptr;
+ }
+ if (mode != JNI_ABORT) {
+ size_t len = FromEmbedded(embedded_buf)->original_length_;
+ memcpy(a->GetRawData(a->GetClass()->GetComponentSize(), 0), embedded_buf, len);
+ }
+ if (mode != JNI_COMMIT) {
+ return Destroy(embedded_buf);
+ }
+ return embedded_buf;
+ }
+
+
+ /*
* Free up the guard buffer, scrub it, and return the original pointer.
*/
- static void* Destroy(void* dataBuf) {
- const GuardedCopy* pExtra = GuardedCopy::FromData(dataBuf);
- void* original_ptr = const_cast<void*>(pExtra->original_ptr);
- size_t len = pExtra->original_length;
- DebugFree(dataBuf, len);
+ static void* Destroy(void* embedded_buf) {
+ GuardedCopy* copy = FromEmbedded(embedded_buf);
+ void* original_ptr = const_cast<void*>(copy->original_ptr_);
+ size_t len = LengthIncludingRedZones(copy->original_length_);
+ DebugFree(copy, len);
return original_ptr;
}
@@ -975,67 +1243,16 @@
*
* The caller has already checked that "dataBuf" is non-NULL.
*/
- static void Check(const char* functionName, const void* dataBuf, bool modOkay) {
- static const uint32_t kMagicCmp = kGuardMagic;
- const uint8_t* fullBuf = ActualBuffer(dataBuf);
- const GuardedCopy* pExtra = GuardedCopy::FromData(dataBuf);
-
- // Before we do anything with "pExtra", check the magic number. We
- // do the check with memcmp rather than "==" in case the pointer is
- // unaligned. If it points to completely bogus memory we're going
- // to crash, but there's no easy way around that.
- if (memcmp(&pExtra->magic, &kMagicCmp, 4) != 0) {
- uint8_t buf[4];
- memcpy(buf, &pExtra->magic, 4);
- JniAbortF(functionName,
- "guard magic does not match (found 0x%02x%02x%02x%02x) -- incorrect data pointer %p?",
- buf[3], buf[2], buf[1], buf[0], dataBuf); // Assumes little-endian.
- }
-
- size_t len = pExtra->original_length;
-
- // Check bottom half of guard; skip over optional checksum storage.
- const uint16_t* pat = reinterpret_cast<const uint16_t*>(fullBuf);
- for (size_t i = sizeof(GuardedCopy) / 2; i < (kGuardLen / 2 - sizeof(GuardedCopy)) / 2; i++) {
- if (pat[i] != kGuardPattern) {
- JniAbortF(functionName, "guard pattern(1) disturbed at %p +%zd", fullBuf, i*2);
- }
- }
-
- int offset = kGuardLen / 2 + len;
- if (offset & 0x01) {
- // Odd byte; expected value depends on endian.
- const uint16_t patSample = kGuardPattern;
- uint8_t expected_byte = reinterpret_cast<const uint8_t*>(&patSample)[1];
- if (fullBuf[offset] != expected_byte) {
- JniAbortF(functionName, "guard pattern disturbed in odd byte after %p +%d 0x%02x 0x%02x",
- fullBuf, offset, fullBuf[offset], expected_byte);
- }
- offset++;
- }
-
- // Check top half of guard.
- pat = reinterpret_cast<const uint16_t*>(fullBuf + offset);
- for (size_t i = 0; i < kGuardLen / 4; i++) {
- if (pat[i] != kGuardPattern) {
- JniAbortF(functionName, "guard pattern(2) disturbed at %p +%zd", fullBuf, offset + i*2);
- }
- }
-
- // If modification is not expected, verify checksum. Strictly speaking
- // this is wrong: if we told the client that we made a copy, there's no
- // reason they can't alter the buffer.
- if (!modOkay) {
- uLong adler = adler32(0L, Z_NULL, 0);
- adler = adler32(adler, (const Bytef*)dataBuf, len);
- if (pExtra->adler != adler) {
- JniAbortF(functionName, "buffer modified (0x%08lx vs 0x%08lx) at address %p",
- pExtra->adler, adler, dataBuf);
- }
- }
+ static bool Check(const char* function_name, const void* embedded_buf, bool mod_okay) {
+ const GuardedCopy* copy = FromEmbedded(embedded_buf);
+ return copy->CheckHeader(function_name, mod_okay) && copy->CheckRedZones(function_name);
}
private:
+ GuardedCopy(const void* original_buf, size_t len, uLong adler) :
+ magic_(kGuardMagic), adler_(adler), original_ptr_(original_buf), original_length_(len) {
+ }
+
static uint8_t* DebugAlloc(size_t len) {
void* result = mmap(nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
if (result == MAP_FAILED) {
@@ -1044,68 +1261,126 @@
return reinterpret_cast<uint8_t*>(result);
}
- static void DebugFree(void* dataBuf, size_t len) {
- uint8_t* fullBuf = ActualBuffer(dataBuf);
- size_t totalByteCount = ActualLength(len);
- // TODO: we could mprotect instead, and keep the allocation around for a while.
- // This would be even more expensive, but it might catch more errors.
- // if (mprotect(fullBuf, totalByteCount, PROT_NONE) != 0) {
- // PLOG(WARNING) << "mprotect(PROT_NONE) failed";
- // }
- if (munmap(fullBuf, totalByteCount) != 0) {
- PLOG(FATAL) << "munmap(" << reinterpret_cast<void*>(fullBuf) << ", " << totalByteCount << ") failed";
+ static void DebugFree(void* buf, size_t len) {
+ if (munmap(buf, len) != 0) {
+ PLOG(FATAL) << "munmap(" << buf << ", " << len << ") failed";
}
}
- static const uint8_t* ActualBuffer(const void* dataBuf) {
- return reinterpret_cast<const uint8_t*>(dataBuf) - kGuardLen / 2;
+ static size_t LengthIncludingRedZones(size_t len) {
+ return len + kRedZoneSize;
}
- static uint8_t* ActualBuffer(void* dataBuf) {
- return reinterpret_cast<uint8_t*>(dataBuf) - kGuardLen / 2;
+ // Get the GuardedCopy from the interior pointer.
+ static GuardedCopy* FromEmbedded(void* embedded_buf) {
+ return reinterpret_cast<GuardedCopy*>(
+ reinterpret_cast<uint8_t*>(embedded_buf) - (kRedZoneSize / 2));
}
- // Underlying length of a user allocation of 'length' bytes.
- static size_t ActualLength(size_t length) {
- return (length + kGuardLen + 1) & ~0x01;
+ static const GuardedCopy* FromEmbedded(const void* embedded_buf) {
+ return reinterpret_cast<const GuardedCopy*>(
+ reinterpret_cast<const uint8_t*>(embedded_buf) - (kRedZoneSize / 2));
}
+
+ static void AbortF(const char* jni_function_name, const char* fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+ Runtime::Current()->GetJavaVM()->JniAbortV(jni_function_name, fmt, args);
+ va_end(args);
+ }
+
+ bool CheckHeader(const char* function_name, bool mod_okay) const {
+ static const uint32_t kMagicCmp = kGuardMagic;
+
+ // Before we do anything with "pExtra", check the magic number. We
+ // do the check with memcmp rather than "==" in case the pointer is
+ // unaligned. If it points to completely bogus memory we're going
+ // to crash, but there's no easy way around that.
+ if (UNLIKELY(memcmp(&magic_, &kMagicCmp, 4) != 0)) {
+ uint8_t buf[4];
+ memcpy(buf, &magic_, 4);
+ AbortF(function_name,
+ "guard magic does not match (found 0x%02x%02x%02x%02x) -- incorrect data pointer %p?",
+ buf[3], buf[2], buf[1], buf[0], this); // Assumes little-endian.
+ return false;
+ }
+
+ // If modification is not expected, verify checksum. Strictly speaking this is wrong: if we
+ // told the client that we made a copy, there's no reason they can't alter the buffer.
+ if (!mod_okay) {
+ uLong computed_adler =
+ adler32(adler32(0L, Z_NULL, 0), BufferWithinRedZones(), original_length_);
+ if (computed_adler != adler_) {
+ AbortF(function_name, "buffer modified (0x%08lx vs 0x%08lx) at address %p",
+ computed_adler, adler_, this);
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool CheckRedZones(const char* function_name) const {
+ // Check the begin red zone.
+ const size_t kStartCanaryLength = (GuardedCopy::kRedZoneSize / 2) - sizeof(GuardedCopy);
+ for (size_t i = 0, j = 0; i < kStartCanaryLength; ++i) {
+ if (UNLIKELY(StartRedZone()[i] != kCanary[j])) {
+ AbortF(function_name, "guard pattern before buffer disturbed at %p +%zd", this, i);
+ return false;
+ }
+ if (kCanary[j] == '\0') {
+ j = 0;
+ }
+ }
+
+ // Check end region.
+ for (size_t i = 0, j = 0; i < kEndCanaryLength; ++i) {
+ if (UNLIKELY(EndRedZone()[i] != kCanary[j])) {
+ size_t offset_from_buffer_start =
+ &(EndRedZone()[i]) - &(StartRedZone()[kStartCanaryLength]);
+ AbortF(function_name, "guard pattern after buffer disturbed at %p +%zd", this,
+ offset_from_buffer_start);
+ return false;
+ }
+ if (kCanary[j] == '\0') {
+ j = 0;
+ }
+ }
+ return true;
+ }
+
+ // Location that canary value will be written before the guarded region.
+ const char* StartRedZone() const {
+ const uint8_t* buf = reinterpret_cast<const uint8_t*>(this);
+ return reinterpret_cast<const char*>(buf + sizeof(GuardedCopy));
+ }
+
+ // Return the interior embedded buffer.
+ const uint8_t* BufferWithinRedZones() const {
+ const uint8_t* embedded_buf = reinterpret_cast<const uint8_t*>(this) + (kRedZoneSize / 2);
+ return embedded_buf;
+ }
+
+ // Location that canary value will be written after the guarded region.
+ const char* EndRedZone() const {
+ const uint8_t* buf = reinterpret_cast<const uint8_t*>(this);
+ size_t buf_len = LengthIncludingRedZones(original_length_);
+ return reinterpret_cast<const char*>(buf + (buf_len - (kRedZoneSize / 2)));
+ }
+
+ static constexpr size_t kRedZoneSize = 512;
+ static constexpr size_t kEndCanaryLength = kRedZoneSize / 2;
+
+ // Value written before and after the guarded array.
+ static const char* const kCanary;
+
+ static constexpr uint32_t kGuardMagic = 0xffd5aa96;
+
+ const uint32_t magic_;
+ const uLong adler_;
+ const void* const original_ptr_;
+ const size_t original_length_;
};
-
-/*
- * Create a guarded copy of a primitive array. Modifications to the copied
- * data are allowed. Returns a pointer to the copied data.
- */
-static void* CreateGuardedPACopy(JNIEnv* env, const jarray java_array, jboolean* isCopy) {
- ScopedObjectAccess soa(env);
-
- mirror::Array* a = soa.Decode<mirror::Array*>(java_array);
- size_t component_size = a->GetClass()->GetComponentSize();
- size_t byte_count = a->GetLength() * component_size;
- void* result = GuardedCopy::Create(a->GetRawData(component_size, 0), byte_count, true);
- if (isCopy != nullptr) {
- *isCopy = JNI_TRUE;
- }
- return result;
-}
-
-/*
- * Perform the array "release" operation, which may or may not copy data
- * back into the managed heap, and may or may not release the underlying storage.
- */
-static void ReleaseGuardedPACopy(JNIEnv* env, jarray java_array, void* dataBuf, int mode) {
- ScopedObjectAccess soa(env);
- mirror::Array* a = soa.Decode<mirror::Array*>(java_array);
-
- GuardedCopy::Check(__FUNCTION__, dataBuf, true);
-
- if (mode != JNI_ABORT) {
- size_t len = GuardedCopy::FromData(dataBuf)->original_length;
- memcpy(a->GetRawData(a->GetClass()->GetComponentSize(), 0), dataBuf, len);
- }
- if (mode != JNI_COMMIT) {
- GuardedCopy::Destroy(dataBuf);
- }
-}
+const char* const GuardedCopy::kCanary = "JNI BUFFER RED ZONE";
/*
* ===========================================================================
@@ -1116,668 +1391,1954 @@
class CheckJNI {
public:
static jint GetVersion(JNIEnv* env) {
- CHECK_JNI_ENTRY(kFlag_Default, "E", env);
- return CHECK_JNI_EXIT("I", baseEnv(env)->GetVersion(env));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[1] = {{.E = env }};
+ if (sc.Check(soa, true, "E", args)) {
+ JniValueType result;
+ result.I = baseEnv(env)->GetVersion(env);
+ if (sc.Check(soa, false, "I", &result)) {
+ return result.I;
+ }
+ }
+ return JNI_ERR;
}
- static jclass DefineClass(JNIEnv* env, const char* name, jobject loader, const jbyte* buf, jsize bufLen) {
- CHECK_JNI_ENTRY(kFlag_Default, "EuLpz", env, name, loader, buf, bufLen);
- sc.CheckClassName(name);
- return CHECK_JNI_EXIT("c", baseEnv(env)->DefineClass(env, name, loader, buf, bufLen));
+ static jint GetJavaVM(JNIEnv *env, JavaVM **vm) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env }, {.p = vm}};
+ if (sc.Check(soa, true, "Ep", args)) {
+ JniValueType result;
+ result.i = baseEnv(env)->GetJavaVM(env, vm);
+ if (sc.Check(soa, false, "i", &result)) {
+ return result.i;
+ }
+ }
+ return JNI_ERR;
+ }
+
+ static jint RegisterNatives(JNIEnv* env, jclass c, const JNINativeMethod* methods, jint nMethods) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[4] = {{.E = env }, {.c = c}, {.p = methods}, {.I = nMethods}};
+ if (sc.Check(soa, true, "EcpI", args)) {
+ JniValueType result;
+ result.i = baseEnv(env)->RegisterNatives(env, c, methods, nMethods);
+ if (sc.Check(soa, false, "i", &result)) {
+ return result.i;
+ }
+ }
+ return JNI_ERR;
+ }
+
+ static jint UnregisterNatives(JNIEnv* env, jclass c) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env }, {.c = c}};
+ if (sc.Check(soa, true, "Ec", args)) {
+ JniValueType result;
+ result.i = baseEnv(env)->UnregisterNatives(env, c);
+ if (sc.Check(soa, false, "i", &result)) {
+ return result.i;
+ }
+ }
+ return JNI_ERR;
+ }
+
+ static jobjectRefType GetObjectRefType(JNIEnv* env, jobject obj) {
+ // Note: we use "Ep" rather than "EL" because this is the one JNI function that it's okay to
+ // pass an invalid reference to.
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env }, {.p = obj}};
+ if (sc.Check(soa, true, "Ep", args)) {
+ JniValueType result;
+ result.w = baseEnv(env)->GetObjectRefType(env, obj);
+ if (sc.Check(soa, false, "w", &result)) {
+ return result.w;
+ }
+ }
+ return JNIInvalidRefType;
+ }
+
+ static jclass DefineClass(JNIEnv* env, const char* name, jobject loader, const jbyte* buf,
+ jsize bufLen) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[5] = {{.E = env}, {.u = name}, {.L = loader}, {.p = buf}, {.z = bufLen}};
+ if (sc.Check(soa, true, "EuLpz", args) && sc.CheckClassName(name)) {
+ JniValueType result;
+ result.c = baseEnv(env)->DefineClass(env, name, loader, buf, bufLen);
+ if (sc.Check(soa, false, "c", &result)) {
+ return result.c;
+ }
+ }
+ return nullptr;
}
static jclass FindClass(JNIEnv* env, const char* name) {
- CHECK_JNI_ENTRY(kFlag_Default, "Eu", env, name);
- sc.CheckClassName(name);
- return CHECK_JNI_EXIT("c", baseEnv(env)->FindClass(env, name));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.u = name}};
+ if (sc.Check(soa, true, "Eu", args) && sc.CheckClassName(name)) {
+ JniValueType result;
+ result.c = baseEnv(env)->FindClass(env, name);
+ if (sc.Check(soa, false, "c", &result)) {
+ return result.c;
+ }
+ }
+ return nullptr;
}
static jclass GetSuperclass(JNIEnv* env, jclass c) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ec", env, c);
- return CHECK_JNI_EXIT("c", baseEnv(env)->GetSuperclass(env, c));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.c = c}};
+ if (sc.Check(soa, true, "Ec", args)) {
+ JniValueType result;
+ result.c = baseEnv(env)->GetSuperclass(env, c);
+ if (sc.Check(soa, false, "c", &result)) {
+ return result.c;
+ }
+ }
+ return nullptr;
}
static jboolean IsAssignableFrom(JNIEnv* env, jclass c1, jclass c2) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ecc", env, c1, c2);
- return CHECK_JNI_EXIT("b", baseEnv(env)->IsAssignableFrom(env, c1, c2));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[3] = {{.E = env}, {.c = c1}, {.c = c2}};
+ if (sc.Check(soa, true, "Ecc", args)) {
+ JniValueType result;
+ result.b = baseEnv(env)->IsAssignableFrom(env, c1, c2);
+ if (sc.Check(soa, false, "b", &result)) {
+ return result.b;
+ }
+ }
+ return JNI_FALSE;
}
static jmethodID FromReflectedMethod(JNIEnv* env, jobject method) {
- CHECK_JNI_ENTRY(kFlag_Default, "EL", env, method);
- // TODO: check that 'field' is a java.lang.reflect.Method.
- return CHECK_JNI_EXIT("m", baseEnv(env)->FromReflectedMethod(env, method));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.L = method}};
+ if (sc.Check(soa, true, "EL", args) && sc.CheckReflectedMethod(soa, method)) {
+ JniValueType result;
+ result.m = baseEnv(env)->FromReflectedMethod(env, method);
+ if (sc.Check(soa, false, "m", &result)) {
+ return result.m;
+ }
+ }
+ return nullptr;
}
static jfieldID FromReflectedField(JNIEnv* env, jobject field) {
- CHECK_JNI_ENTRY(kFlag_Default, "EL", env, field);
- // TODO: check that 'field' is a java.lang.reflect.Field.
- return CHECK_JNI_EXIT("f", baseEnv(env)->FromReflectedField(env, field));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.L = field}};
+ if (sc.Check(soa, true, "EL", args) && sc.CheckReflectedField(soa, field)) {
+ JniValueType result;
+ result.f = baseEnv(env)->FromReflectedField(env, field);
+ if (sc.Check(soa, false, "f", &result)) {
+ return result.f;
+ }
+ }
+ return nullptr;
}
static jobject ToReflectedMethod(JNIEnv* env, jclass cls, jmethodID mid, jboolean isStatic) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ecmb", env, cls, mid, isStatic);
- return CHECK_JNI_EXIT("L", baseEnv(env)->ToReflectedMethod(env, cls, mid, isStatic));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[4] = {{.E = env}, {.c = cls}, {.m = mid}, {.b = isStatic}};
+ if (sc.Check(soa, true, "Ecmb", args)) {
+ JniValueType result;
+ result.L = baseEnv(env)->ToReflectedMethod(env, cls, mid, isStatic);
+ if (sc.Check(soa, false, "L", &result) && (result.L != nullptr)) {
+ DCHECK(sc.CheckReflectedMethod(soa, result.L));
+ return result.L;
+ }
+ }
+ return nullptr;
}
static jobject ToReflectedField(JNIEnv* env, jclass cls, jfieldID fid, jboolean isStatic) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ecfb", env, cls, fid, isStatic);
- return CHECK_JNI_EXIT("L", baseEnv(env)->ToReflectedField(env, cls, fid, isStatic));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[4] = {{.E = env}, {.c = cls}, {.f = fid}, {.b = isStatic}};
+ if (sc.Check(soa, true, "Ecfb", args)) {
+ JniValueType result;
+ result.L = baseEnv(env)->ToReflectedField(env, cls, fid, isStatic);
+ if (sc.Check(soa, false, "L", &result) && (result.L != nullptr)) {
+ DCHECK(sc.CheckReflectedField(soa, result.L));
+ return result.L;
+ }
+ }
+ return nullptr;
}
static jint Throw(JNIEnv* env, jthrowable obj) {
- CHECK_JNI_ENTRY(kFlag_Default, "EL", env, obj);
- // TODO: check that 'obj' is a java.lang.Throwable.
- return CHECK_JNI_EXIT("I", baseEnv(env)->Throw(env, obj));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.t = obj}};
+ if (sc.Check(soa, true, "Et", args) && sc.CheckThrowable(soa, obj)) {
+ JniValueType result;
+ result.i = baseEnv(env)->Throw(env, obj);
+ if (sc.Check(soa, false, "i", &result)) {
+ return result.i;
+ }
+ }
+ return JNI_ERR;
}
static jint ThrowNew(JNIEnv* env, jclass c, const char* message) {
- CHECK_JNI_ENTRY(kFlag_NullableUtf, "Ecu", env, c, message);
- return CHECK_JNI_EXIT("I", baseEnv(env)->ThrowNew(env, c, message));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_NullableUtf, __FUNCTION__);
+ JniValueType args[5] = {{.E = env}, {.c = c}, {.u = message}};
+ if (sc.Check(soa, true, "Ecu", args) && sc.CheckThrowableClass(soa, c)) {
+ JniValueType result;
+ result.i = baseEnv(env)->ThrowNew(env, c, message);
+ if (sc.Check(soa, false, "i", &result)) {
+ return result.i;
+ }
+ }
+ return JNI_ERR;
}
static jthrowable ExceptionOccurred(JNIEnv* env) {
- CHECK_JNI_ENTRY(kFlag_ExcepOkay, "E", env);
- return CHECK_JNI_EXIT("L", baseEnv(env)->ExceptionOccurred(env));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_ExcepOkay, __FUNCTION__);
+ JniValueType args[1] = {{.E = env}};
+ if (sc.Check(soa, true, "E", args)) {
+ JniValueType result;
+ result.t = baseEnv(env)->ExceptionOccurred(env);
+ if (sc.Check(soa, false, "t", &result)) {
+ return result.t;
+ }
+ }
+ return nullptr;
}
static void ExceptionDescribe(JNIEnv* env) {
- CHECK_JNI_ENTRY(kFlag_ExcepOkay, "E", env);
- baseEnv(env)->ExceptionDescribe(env);
- CHECK_JNI_EXIT_VOID();
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_ExcepOkay, __FUNCTION__);
+ JniValueType args[1] = {{.E = env}};
+ if (sc.Check(soa, true, "E", args)) {
+ JniValueType result;
+ baseEnv(env)->ExceptionDescribe(env);
+ result.V = nullptr;
+ sc.Check(soa, false, "V", &result);
+ }
}
static void ExceptionClear(JNIEnv* env) {
- CHECK_JNI_ENTRY(kFlag_ExcepOkay, "E", env);
- baseEnv(env)->ExceptionClear(env);
- CHECK_JNI_EXIT_VOID();
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_ExcepOkay, __FUNCTION__);
+ JniValueType args[1] = {{.E = env}};
+ if (sc.Check(soa, true, "E", args)) {
+ JniValueType result;
+ baseEnv(env)->ExceptionClear(env);
+ result.V = nullptr;
+ sc.Check(soa, false, "V", &result);
+ }
+ }
+
+ static jboolean ExceptionCheck(JNIEnv* env) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_CritOkay | kFlag_ExcepOkay, __FUNCTION__);
+ JniValueType args[1] = {{.E = env}};
+ if (sc.Check(soa, true, "E", args)) {
+ JniValueType result;
+ result.b = baseEnv(env)->ExceptionCheck(env);
+ if (sc.Check(soa, false, "b", &result)) {
+ return result.b;
+ }
+ }
+ return JNI_FALSE;
}
static void FatalError(JNIEnv* env, const char* msg) {
// The JNI specification doesn't say it's okay to call FatalError with a pending exception,
// but you're about to abort anyway, and it's quite likely that you have a pending exception,
// and it's not unimaginable that you don't know that you do. So we allow it.
- CHECK_JNI_ENTRY(kFlag_ExcepOkay | kFlag_NullableUtf, "Eu", env, msg);
- baseEnv(env)->FatalError(env, msg);
- CHECK_JNI_EXIT_VOID();
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_ExcepOkay | kFlag_NullableUtf, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.u = msg}};
+ if (sc.Check(soa, true, "Eu", args)) {
+ JniValueType result;
+ baseEnv(env)->FatalError(env, msg);
+ // Unreachable.
+ result.V = nullptr;
+ sc.Check(soa, false, "V", &result);
+ }
}
static jint PushLocalFrame(JNIEnv* env, jint capacity) {
- CHECK_JNI_ENTRY(kFlag_Default | kFlag_ExcepOkay, "EI", env, capacity);
- return CHECK_JNI_EXIT("I", baseEnv(env)->PushLocalFrame(env, capacity));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_ExcepOkay, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.I = capacity}};
+ if (sc.Check(soa, true, "EI", args)) {
+ JniValueType result;
+ result.i = baseEnv(env)->PushLocalFrame(env, capacity);
+ if (sc.Check(soa, false, "i", &result)) {
+ return result.i;
+ }
+ }
+ return JNI_ERR;
}
static jobject PopLocalFrame(JNIEnv* env, jobject res) {
- CHECK_JNI_ENTRY(kFlag_Default | kFlag_ExcepOkay, "EL", env, res);
- return CHECK_JNI_EXIT("L", baseEnv(env)->PopLocalFrame(env, res));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_ExcepOkay, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.L = res}};
+ if (sc.Check(soa, true, "EL", args)) {
+ JniValueType result;
+ result.L = baseEnv(env)->PopLocalFrame(env, res);
+ sc.Check(soa, false, "L", &result);
+ return result.L;
+ }
+ return nullptr;
}
static jobject NewGlobalRef(JNIEnv* env, jobject obj) {
- CHECK_JNI_ENTRY(kFlag_Default, "EL", env, obj);
- return CHECK_JNI_EXIT("L", baseEnv(env)->NewGlobalRef(env, obj));
+ return NewRef(__FUNCTION__, env, obj, kGlobal);
}
- static jobject NewLocalRef(JNIEnv* env, jobject ref) {
- CHECK_JNI_ENTRY(kFlag_Default, "EL", env, ref);
- return CHECK_JNI_EXIT("L", baseEnv(env)->NewLocalRef(env, ref));
- }
-
- static void DeleteGlobalRef(JNIEnv* env, jobject globalRef) {
- CHECK_JNI_ENTRY(kFlag_Default | kFlag_ExcepOkay, "EL", env, globalRef);
- if (globalRef != nullptr && GetIndirectRefKind(globalRef) != kGlobal) {
- JniAbortF(__FUNCTION__, "DeleteGlobalRef on %s: %p",
- ToStr<IndirectRefKind>(GetIndirectRefKind(globalRef)).c_str(), globalRef);
- } else {
- baseEnv(env)->DeleteGlobalRef(env, globalRef);
- CHECK_JNI_EXIT_VOID();
- }
- }
-
- static void DeleteWeakGlobalRef(JNIEnv* env, jweak weakGlobalRef) {
- CHECK_JNI_ENTRY(kFlag_Default | kFlag_ExcepOkay, "EL", env, weakGlobalRef);
- if (weakGlobalRef != nullptr && GetIndirectRefKind(weakGlobalRef) != kWeakGlobal) {
- JniAbortF(__FUNCTION__, "DeleteWeakGlobalRef on %s: %p",
- ToStr<IndirectRefKind>(GetIndirectRefKind(weakGlobalRef)).c_str(), weakGlobalRef);
- } else {
- baseEnv(env)->DeleteWeakGlobalRef(env, weakGlobalRef);
- CHECK_JNI_EXIT_VOID();
- }
- }
-
- static void DeleteLocalRef(JNIEnv* env, jobject localRef) {
- CHECK_JNI_ENTRY(kFlag_Default | kFlag_ExcepOkay, "EL", env, localRef);
- if (localRef != nullptr && GetIndirectRefKind(localRef) != kLocal && !IsHandleScopeLocalRef(env, localRef)) {
- JniAbortF(__FUNCTION__, "DeleteLocalRef on %s: %p",
- ToStr<IndirectRefKind>(GetIndirectRefKind(localRef)).c_str(), localRef);
- } else {
- baseEnv(env)->DeleteLocalRef(env, localRef);
- CHECK_JNI_EXIT_VOID();
- }
- }
-
- static jint EnsureLocalCapacity(JNIEnv *env, jint capacity) {
- CHECK_JNI_ENTRY(kFlag_Default, "EI", env, capacity);
- return CHECK_JNI_EXIT("I", baseEnv(env)->EnsureLocalCapacity(env, capacity));
- }
-
- static jboolean IsSameObject(JNIEnv* env, jobject ref1, jobject ref2) {
- CHECK_JNI_ENTRY(kFlag_Default, "ELL", env, ref1, ref2);
- return CHECK_JNI_EXIT("b", baseEnv(env)->IsSameObject(env, ref1, ref2));
- }
-
- static jobject AllocObject(JNIEnv* env, jclass c) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ec", env, c);
- return CHECK_JNI_EXIT("L", baseEnv(env)->AllocObject(env, c));
- }
-
- static jobject NewObject(JNIEnv* env, jclass c, jmethodID mid, ...) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ecm.", env, c, mid);
- va_list args;
- va_start(args, mid);
- jobject result = baseEnv(env)->NewObjectV(env, c, mid, args);
- va_end(args);
- return CHECK_JNI_EXIT("L", result);
- }
-
- static jobject NewObjectV(JNIEnv* env, jclass c, jmethodID mid, va_list args) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ecm.", env, c, mid);
- return CHECK_JNI_EXIT("L", baseEnv(env)->NewObjectV(env, c, mid, args));
- }
-
- static jobject NewObjectA(JNIEnv* env, jclass c, jmethodID mid, jvalue* args) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ecm.", env, c, mid);
- return CHECK_JNI_EXIT("L", baseEnv(env)->NewObjectA(env, c, mid, args));
- }
-
- static jclass GetObjectClass(JNIEnv* env, jobject obj) {
- CHECK_JNI_ENTRY(kFlag_Default, "EL", env, obj);
- return CHECK_JNI_EXIT("c", baseEnv(env)->GetObjectClass(env, obj));
- }
-
- static jboolean IsInstanceOf(JNIEnv* env, jobject obj, jclass c) {
- CHECK_JNI_ENTRY(kFlag_Default, "ELc", env, obj, c);
- return CHECK_JNI_EXIT("b", baseEnv(env)->IsInstanceOf(env, obj, c));
- }
-
- static jmethodID GetMethodID(JNIEnv* env, jclass c, const char* name, const char* sig) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ecuu", env, c, name, sig);
- return CHECK_JNI_EXIT("m", baseEnv(env)->GetMethodID(env, c, name, sig));
- }
-
- static jfieldID GetFieldID(JNIEnv* env, jclass c, const char* name, const char* sig) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ecuu", env, c, name, sig);
- return CHECK_JNI_EXIT("f", baseEnv(env)->GetFieldID(env, c, name, sig));
- }
-
- static jmethodID GetStaticMethodID(JNIEnv* env, jclass c, const char* name, const char* sig) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ecuu", env, c, name, sig);
- return CHECK_JNI_EXIT("m", baseEnv(env)->GetStaticMethodID(env, c, name, sig));
- }
-
- static jfieldID GetStaticFieldID(JNIEnv* env, jclass c, const char* name, const char* sig) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ecuu", env, c, name, sig);
- return CHECK_JNI_EXIT("f", baseEnv(env)->GetStaticFieldID(env, c, name, sig));
- }
-
-#define FIELD_ACCESSORS(_ctype, _jname, _jvalue_type, _type) \
- static _ctype GetStatic##_jname##Field(JNIEnv* env, jclass c, jfieldID fid) { \
- CHECK_JNI_ENTRY(kFlag_Default, "Ecf", env, c, fid); \
- sc.CheckStaticFieldID(c, fid); \
- return CHECK_JNI_EXIT(_type, baseEnv(env)->GetStatic##_jname##Field(env, c, fid)); \
- } \
- static _ctype Get##_jname##Field(JNIEnv* env, jobject obj, jfieldID fid) { \
- CHECK_JNI_ENTRY(kFlag_Default, "ELf", env, obj, fid); \
- sc.CheckInstanceFieldID(obj, fid); \
- return CHECK_JNI_EXIT(_type, baseEnv(env)->Get##_jname##Field(env, obj, fid)); \
- } \
- static void SetStatic##_jname##Field(JNIEnv* env, jclass c, jfieldID fid, _ctype value) { \
- CHECK_JNI_ENTRY(kFlag_Default, "Ecf" _type, env, c, fid, value); \
- sc.CheckStaticFieldID(c, fid); \
- /* "value" arg only used when type == ref */ \
- jvalue java_type_value; \
- java_type_value._jvalue_type = value; \
- sc.CheckFieldType(java_type_value, fid, _type[0], true); \
- baseEnv(env)->SetStatic##_jname##Field(env, c, fid, value); \
- CHECK_JNI_EXIT_VOID(); \
- } \
- static void Set##_jname##Field(JNIEnv* env, jobject obj, jfieldID fid, _ctype value) { \
- CHECK_JNI_ENTRY(kFlag_Default, "ELf" _type, env, obj, fid, value); \
- sc.CheckInstanceFieldID(obj, fid); \
- /* "value" arg only used when type == ref */ \
- jvalue java_type_value; \
- java_type_value._jvalue_type = value; \
- sc.CheckFieldType(java_type_value, fid, _type[0], false); \
- baseEnv(env)->Set##_jname##Field(env, obj, fid, value); \
- CHECK_JNI_EXIT_VOID(); \
- }
-
-FIELD_ACCESSORS(jobject, Object, l, "L");
-FIELD_ACCESSORS(jboolean, Boolean, z, "Z");
-FIELD_ACCESSORS(jbyte, Byte, b, "B");
-FIELD_ACCESSORS(jchar, Char, c, "C");
-FIELD_ACCESSORS(jshort, Short, s, "S");
-FIELD_ACCESSORS(jint, Int, i, "I");
-FIELD_ACCESSORS(jlong, Long, j, "J");
-FIELD_ACCESSORS(jfloat, Float, f, "F");
-FIELD_ACCESSORS(jdouble, Double, d, "D");
-
-#define CALL(_ctype, _jname, _retdecl, _retasgn, _retok, _retsig) \
- /* Virtual... */ \
- static _ctype Call##_jname##Method(JNIEnv* env, jobject obj, \
- jmethodID mid, ...) \
- { \
- CHECK_JNI_ENTRY(kFlag_Default, "ELm.", env, obj, mid); /* TODO: args! */ \
- sc.CheckSig(mid, _retsig, false); \
- sc.CheckVirtualMethod(obj, mid); \
- _retdecl; \
- va_list args; \
- va_start(args, mid); \
- _retasgn(baseEnv(env)->Call##_jname##MethodV(env, obj, mid, args)); \
- va_end(args); \
- _retok; \
- } \
- static _ctype Call##_jname##MethodV(JNIEnv* env, jobject obj, \
- jmethodID mid, va_list args) \
- { \
- CHECK_JNI_ENTRY(kFlag_Default, "ELm.", env, obj, mid); /* TODO: args! */ \
- sc.CheckSig(mid, _retsig, false); \
- sc.CheckVirtualMethod(obj, mid); \
- _retdecl; \
- _retasgn(baseEnv(env)->Call##_jname##MethodV(env, obj, mid, args)); \
- _retok; \
- } \
- static _ctype Call##_jname##MethodA(JNIEnv* env, jobject obj, \
- jmethodID mid, jvalue* args) \
- { \
- CHECK_JNI_ENTRY(kFlag_Default, "ELm.", env, obj, mid); /* TODO: args! */ \
- sc.CheckSig(mid, _retsig, false); \
- sc.CheckVirtualMethod(obj, mid); \
- _retdecl; \
- _retasgn(baseEnv(env)->Call##_jname##MethodA(env, obj, mid, args)); \
- _retok; \
- } \
- /* Non-virtual... */ \
- static _ctype CallNonvirtual##_jname##Method(JNIEnv* env, \
- jobject obj, jclass c, jmethodID mid, ...) \
- { \
- CHECK_JNI_ENTRY(kFlag_Default, "ELcm.", env, obj, c, mid); /* TODO: args! */ \
- sc.CheckSig(mid, _retsig, false); \
- sc.CheckVirtualMethod(obj, mid); \
- _retdecl; \
- va_list args; \
- va_start(args, mid); \
- _retasgn(baseEnv(env)->CallNonvirtual##_jname##MethodV(env, obj, c, mid, args)); \
- va_end(args); \
- _retok; \
- } \
- static _ctype CallNonvirtual##_jname##MethodV(JNIEnv* env, \
- jobject obj, jclass c, jmethodID mid, va_list args) \
- { \
- CHECK_JNI_ENTRY(kFlag_Default, "ELcm.", env, obj, c, mid); /* TODO: args! */ \
- sc.CheckSig(mid, _retsig, false); \
- sc.CheckVirtualMethod(obj, mid); \
- _retdecl; \
- _retasgn(baseEnv(env)->CallNonvirtual##_jname##MethodV(env, obj, c, mid, args)); \
- _retok; \
- } \
- static _ctype CallNonvirtual##_jname##MethodA(JNIEnv* env, \
- jobject obj, jclass c, jmethodID mid, jvalue* args) \
- { \
- CHECK_JNI_ENTRY(kFlag_Default, "ELcm.", env, obj, c, mid); /* TODO: args! */ \
- sc.CheckSig(mid, _retsig, false); \
- sc.CheckVirtualMethod(obj, mid); \
- _retdecl; \
- _retasgn(baseEnv(env)->CallNonvirtual##_jname##MethodA(env, obj, c, mid, args)); \
- _retok; \
- } \
- /* Static... */ \
- static _ctype CallStatic##_jname##Method(JNIEnv* env, jclass c, jmethodID mid, ...) \
- { \
- CHECK_JNI_ENTRY(kFlag_Default, "Ecm.", env, c, mid); /* TODO: args! */ \
- sc.CheckSig(mid, _retsig, true); \
- sc.CheckStaticMethod(c, mid); \
- _retdecl; \
- va_list args; \
- va_start(args, mid); \
- _retasgn(baseEnv(env)->CallStatic##_jname##MethodV(env, c, mid, args)); \
- va_end(args); \
- _retok; \
- } \
- static _ctype CallStatic##_jname##MethodV(JNIEnv* env, jclass c, jmethodID mid, va_list args) \
- { \
- CHECK_JNI_ENTRY(kFlag_Default, "Ecm.", env, c, mid); /* TODO: args! */ \
- sc.CheckSig(mid, _retsig, true); \
- sc.CheckStaticMethod(c, mid); \
- _retdecl; \
- _retasgn(baseEnv(env)->CallStatic##_jname##MethodV(env, c, mid, args)); \
- _retok; \
- } \
- static _ctype CallStatic##_jname##MethodA(JNIEnv* env, jclass c, jmethodID mid, jvalue* args) \
- { \
- CHECK_JNI_ENTRY(kFlag_Default, "Ecm.", env, c, mid); /* TODO: args! */ \
- sc.CheckSig(mid, _retsig, true); \
- sc.CheckStaticMethod(c, mid); \
- _retdecl; \
- _retasgn(baseEnv(env)->CallStatic##_jname##MethodA(env, c, mid, args)); \
- _retok; \
- }
-
-#define NON_VOID_RETURN(_retsig, _ctype) return CHECK_JNI_EXIT(_retsig, (_ctype) result)
-#define VOID_RETURN CHECK_JNI_EXIT_VOID()
-
-CALL(jobject, Object, mirror::Object* result, result = reinterpret_cast<mirror::Object*>, NON_VOID_RETURN("L", jobject), "L");
-CALL(jboolean, Boolean, jboolean result, result =, NON_VOID_RETURN("Z", jboolean), "Z");
-CALL(jbyte, Byte, jbyte result, result =, NON_VOID_RETURN("B", jbyte), "B");
-CALL(jchar, Char, jchar result, result =, NON_VOID_RETURN("C", jchar), "C");
-CALL(jshort, Short, jshort result, result =, NON_VOID_RETURN("S", jshort), "S");
-CALL(jint, Int, jint result, result =, NON_VOID_RETURN("I", jint), "I");
-CALL(jlong, Long, jlong result, result =, NON_VOID_RETURN("J", jlong), "J");
-CALL(jfloat, Float, jfloat result, result =, NON_VOID_RETURN("F", jfloat), "F");
-CALL(jdouble, Double, jdouble result, result =, NON_VOID_RETURN("D", jdouble), "D");
-CALL(void, Void, , , VOID_RETURN, "V");
-
- static jstring NewString(JNIEnv* env, const jchar* unicodeChars, jsize len) {
- CHECK_JNI_ENTRY(kFlag_Default, "Epz", env, unicodeChars, len);
- return CHECK_JNI_EXIT("s", baseEnv(env)->NewString(env, unicodeChars, len));
- }
-
- static jsize GetStringLength(JNIEnv* env, jstring string) {
- CHECK_JNI_ENTRY(kFlag_CritOkay, "Es", env, string);
- return CHECK_JNI_EXIT("I", baseEnv(env)->GetStringLength(env, string));
- }
-
- static const jchar* GetStringChars(JNIEnv* env, jstring java_string, jboolean* isCopy) {
- CHECK_JNI_ENTRY(kFlag_CritOkay, "Esp", env, java_string, isCopy);
- const jchar* result = baseEnv(env)->GetStringChars(env, java_string, isCopy);
- if (sc.ForceCopy() && result != nullptr) {
- mirror::String* s = sc.soa().Decode<mirror::String*>(java_string);
- int byteCount = s->GetLength() * 2;
- result = (const jchar*) GuardedCopy::Create(result, byteCount, false);
- if (isCopy != nullptr) {
- *isCopy = JNI_TRUE;
- }
- }
- return CHECK_JNI_EXIT("p", result);
- }
-
- static void ReleaseStringChars(JNIEnv* env, jstring string, const jchar* chars) {
- CHECK_JNI_ENTRY(kFlag_Default | kFlag_ExcepOkay, "Esp", env, string, chars);
- sc.CheckNonNull(chars);
- if (sc.ForceCopy()) {
- GuardedCopy::Check(__FUNCTION__, chars, false);
- chars = reinterpret_cast<const jchar*>(GuardedCopy::Destroy(const_cast<jchar*>(chars)));
- }
- baseEnv(env)->ReleaseStringChars(env, string, chars);
- CHECK_JNI_EXIT_VOID();
- }
-
- static jstring NewStringUTF(JNIEnv* env, const char* bytes) {
- CHECK_JNI_ENTRY(kFlag_NullableUtf, "Eu", env, bytes); // TODO: show pointer and truncate string.
- return CHECK_JNI_EXIT("s", baseEnv(env)->NewStringUTF(env, bytes));
- }
-
- static jsize GetStringUTFLength(JNIEnv* env, jstring string) {
- CHECK_JNI_ENTRY(kFlag_CritOkay, "Es", env, string);
- return CHECK_JNI_EXIT("I", baseEnv(env)->GetStringUTFLength(env, string));
- }
-
- static const char* GetStringUTFChars(JNIEnv* env, jstring string, jboolean* isCopy) {
- CHECK_JNI_ENTRY(kFlag_CritOkay, "Esp", env, string, isCopy);
- const char* result = baseEnv(env)->GetStringUTFChars(env, string, isCopy);
- if (sc.ForceCopy() && result != nullptr) {
- result = (const char*) GuardedCopy::Create(result, strlen(result) + 1, false);
- if (isCopy != nullptr) {
- *isCopy = JNI_TRUE;
- }
- }
- return CHECK_JNI_EXIT("u", result); // TODO: show pointer and truncate string.
- }
-
- static void ReleaseStringUTFChars(JNIEnv* env, jstring string, const char* utf) {
- CHECK_JNI_ENTRY(kFlag_ExcepOkay | kFlag_Release, "Esu", env, string, utf); // TODO: show pointer and truncate string.
- if (sc.ForceCopy()) {
- GuardedCopy::Check(__FUNCTION__, utf, false);
- utf = reinterpret_cast<const char*>(GuardedCopy::Destroy(const_cast<char*>(utf)));
- }
- baseEnv(env)->ReleaseStringUTFChars(env, string, utf);
- CHECK_JNI_EXIT_VOID();
- }
-
- static jsize GetArrayLength(JNIEnv* env, jarray array) {
- CHECK_JNI_ENTRY(kFlag_CritOkay, "Ea", env, array);
- return CHECK_JNI_EXIT("I", baseEnv(env)->GetArrayLength(env, array));
- }
-
- static jobjectArray NewObjectArray(JNIEnv* env, jsize length, jclass elementClass, jobject initialElement) {
- CHECK_JNI_ENTRY(kFlag_Default, "EzcL", env, length, elementClass, initialElement);
- return CHECK_JNI_EXIT("a", baseEnv(env)->NewObjectArray(env, length, elementClass, initialElement));
- }
-
- static jobject GetObjectArrayElement(JNIEnv* env, jobjectArray array, jsize index) {
- CHECK_JNI_ENTRY(kFlag_Default, "EaI", env, array, index);
- return CHECK_JNI_EXIT("L", baseEnv(env)->GetObjectArrayElement(env, array, index));
- }
-
- static void SetObjectArrayElement(JNIEnv* env, jobjectArray array, jsize index, jobject value) {
- CHECK_JNI_ENTRY(kFlag_Default, "EaIL", env, array, index, value);
- baseEnv(env)->SetObjectArrayElement(env, array, index, value);
- CHECK_JNI_EXIT_VOID();
- }
-
-#define NEW_PRIMITIVE_ARRAY(_artype, _jname) \
- static _artype New##_jname##Array(JNIEnv* env, jsize length) { \
- CHECK_JNI_ENTRY(kFlag_Default, "Ez", env, length); \
- return CHECK_JNI_EXIT("a", baseEnv(env)->New##_jname##Array(env, length)); \
- }
-NEW_PRIMITIVE_ARRAY(jbooleanArray, Boolean);
-NEW_PRIMITIVE_ARRAY(jbyteArray, Byte);
-NEW_PRIMITIVE_ARRAY(jcharArray, Char);
-NEW_PRIMITIVE_ARRAY(jshortArray, Short);
-NEW_PRIMITIVE_ARRAY(jintArray, Int);
-NEW_PRIMITIVE_ARRAY(jlongArray, Long);
-NEW_PRIMITIVE_ARRAY(jfloatArray, Float);
-NEW_PRIMITIVE_ARRAY(jdoubleArray, Double);
-
-struct ForceCopyGetChecker {
- public:
- ForceCopyGetChecker(ScopedCheck& sc, jboolean* isCopy) {
- force_copy = sc.ForceCopy();
- no_copy = 0;
- if (force_copy && isCopy != nullptr) {
- // Capture this before the base call tramples on it.
- no_copy = *reinterpret_cast<uint32_t*>(isCopy);
- }
- }
-
- template<typename ResultT>
- ResultT Check(JNIEnv* env, jarray array, jboolean* isCopy, ResultT result) {
- if (force_copy && result != nullptr) {
- result = reinterpret_cast<ResultT>(CreateGuardedPACopy(env, array, isCopy));
- }
- return result;
- }
-
- uint32_t no_copy;
- bool force_copy;
-};
-
-#define GET_PRIMITIVE_ARRAY_ELEMENTS(_ctype, _jname) \
- static _ctype* Get##_jname##ArrayElements(JNIEnv* env, _ctype##Array array, jboolean* isCopy) { \
- CHECK_JNI_ENTRY(kFlag_Default, "Eap", env, array, isCopy); \
- _ctype* result = ForceCopyGetChecker(sc, isCopy).Check(env, array, isCopy, baseEnv(env)->Get##_jname##ArrayElements(env, array, isCopy)); \
- return CHECK_JNI_EXIT("p", result); \
- }
-
-#define RELEASE_PRIMITIVE_ARRAY_ELEMENTS(_ctype, _jname) \
- static void Release##_jname##ArrayElements(JNIEnv* env, _ctype##Array array, _ctype* elems, jint mode) { \
- CHECK_JNI_ENTRY(kFlag_Default | kFlag_ExcepOkay, "Eapr", env, array, elems, mode); \
- sc.CheckNonNull(elems); \
- if (sc.ForceCopy()) { \
- ReleaseGuardedPACopy(env, array, elems, mode); \
- } \
- baseEnv(env)->Release##_jname##ArrayElements(env, array, elems, mode); \
- CHECK_JNI_EXIT_VOID(); \
- }
-
-#define GET_PRIMITIVE_ARRAY_REGION(_ctype, _jname) \
- static void Get##_jname##ArrayRegion(JNIEnv* env, _ctype##Array array, jsize start, jsize len, _ctype* buf) { \
- CHECK_JNI_ENTRY(kFlag_Default, "EaIIp", env, array, start, len, buf); \
- baseEnv(env)->Get##_jname##ArrayRegion(env, array, start, len, buf); \
- CHECK_JNI_EXIT_VOID(); \
- }
-
-#define SET_PRIMITIVE_ARRAY_REGION(_ctype, _jname) \
- static void Set##_jname##ArrayRegion(JNIEnv* env, _ctype##Array array, jsize start, jsize len, const _ctype* buf) { \
- CHECK_JNI_ENTRY(kFlag_Default, "EaIIp", env, array, start, len, buf); \
- baseEnv(env)->Set##_jname##ArrayRegion(env, array, start, len, buf); \
- CHECK_JNI_EXIT_VOID(); \
- }
-
-#define PRIMITIVE_ARRAY_FUNCTIONS(_ctype, _jname, _typechar) \
- GET_PRIMITIVE_ARRAY_ELEMENTS(_ctype, _jname); \
- RELEASE_PRIMITIVE_ARRAY_ELEMENTS(_ctype, _jname); \
- GET_PRIMITIVE_ARRAY_REGION(_ctype, _jname); \
- SET_PRIMITIVE_ARRAY_REGION(_ctype, _jname);
-
-// TODO: verify primitive array type matches call type.
-PRIMITIVE_ARRAY_FUNCTIONS(jboolean, Boolean, 'Z');
-PRIMITIVE_ARRAY_FUNCTIONS(jbyte, Byte, 'B');
-PRIMITIVE_ARRAY_FUNCTIONS(jchar, Char, 'C');
-PRIMITIVE_ARRAY_FUNCTIONS(jshort, Short, 'S');
-PRIMITIVE_ARRAY_FUNCTIONS(jint, Int, 'I');
-PRIMITIVE_ARRAY_FUNCTIONS(jlong, Long, 'J');
-PRIMITIVE_ARRAY_FUNCTIONS(jfloat, Float, 'F');
-PRIMITIVE_ARRAY_FUNCTIONS(jdouble, Double, 'D');
-
- static jint RegisterNatives(JNIEnv* env, jclass c, const JNINativeMethod* methods, jint nMethods) {
- CHECK_JNI_ENTRY(kFlag_Default, "EcpI", env, c, methods, nMethods);
- return CHECK_JNI_EXIT("I", baseEnv(env)->RegisterNatives(env, c, methods, nMethods));
- }
-
- static jint UnregisterNatives(JNIEnv* env, jclass c) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ec", env, c);
- return CHECK_JNI_EXIT("I", baseEnv(env)->UnregisterNatives(env, c));
- }
-
- static jint MonitorEnter(JNIEnv* env, jobject obj) {
- CHECK_JNI_ENTRY(kFlag_Default, "EL", env, obj);
- if (!sc.CheckInstance(ScopedCheck::kObject, obj)) {
- return JNI_ERR; // Only for jni_internal_test. Real code will have aborted already.
- }
- return CHECK_JNI_EXIT("I", baseEnv(env)->MonitorEnter(env, obj));
- }
-
- static jint MonitorExit(JNIEnv* env, jobject obj) {
- CHECK_JNI_ENTRY(kFlag_Default | kFlag_ExcepOkay, "EL", env, obj);
- if (!sc.CheckInstance(ScopedCheck::kObject, obj)) {
- return JNI_ERR; // Only for jni_internal_test. Real code will have aborted already.
- }
- return CHECK_JNI_EXIT("I", baseEnv(env)->MonitorExit(env, obj));
- }
-
- static jint GetJavaVM(JNIEnv *env, JavaVM **vm) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ep", env, vm);
- return CHECK_JNI_EXIT("I", baseEnv(env)->GetJavaVM(env, vm));
- }
-
- static void GetStringRegion(JNIEnv* env, jstring str, jsize start, jsize len, jchar* buf) {
- CHECK_JNI_ENTRY(kFlag_CritOkay, "EsIIp", env, str, start, len, buf);
- baseEnv(env)->GetStringRegion(env, str, start, len, buf);
- CHECK_JNI_EXIT_VOID();
- }
-
- static void GetStringUTFRegion(JNIEnv* env, jstring str, jsize start, jsize len, char* buf) {
- CHECK_JNI_ENTRY(kFlag_CritOkay, "EsIIp", env, str, start, len, buf);
- baseEnv(env)->GetStringUTFRegion(env, str, start, len, buf);
- CHECK_JNI_EXIT_VOID();
- }
-
- static void* GetPrimitiveArrayCritical(JNIEnv* env, jarray array, jboolean* isCopy) {
- CHECK_JNI_ENTRY(kFlag_CritGet, "Eap", env, array, isCopy);
- void* result = baseEnv(env)->GetPrimitiveArrayCritical(env, array, isCopy);
- if (sc.ForceCopy() && result != nullptr) {
- result = CreateGuardedPACopy(env, array, isCopy);
- }
- return CHECK_JNI_EXIT("p", result);
- }
-
- static void ReleasePrimitiveArrayCritical(JNIEnv* env, jarray array, void* carray, jint mode) {
- CHECK_JNI_ENTRY(kFlag_CritRelease | kFlag_ExcepOkay, "Eapr", env, array, carray, mode);
- sc.CheckNonNull(carray);
- if (sc.ForceCopy()) {
- ReleaseGuardedPACopy(env, array, carray, mode);
- }
- baseEnv(env)->ReleasePrimitiveArrayCritical(env, array, carray, mode);
- CHECK_JNI_EXIT_VOID();
- }
-
- static const jchar* GetStringCritical(JNIEnv* env, jstring java_string, jboolean* isCopy) {
- CHECK_JNI_ENTRY(kFlag_CritGet, "Esp", env, java_string, isCopy);
- const jchar* result = baseEnv(env)->GetStringCritical(env, java_string, isCopy);
- if (sc.ForceCopy() && result != nullptr) {
- mirror::String* s = sc.soa().Decode<mirror::String*>(java_string);
- int byteCount = s->GetLength() * 2;
- result = (const jchar*) GuardedCopy::Create(result, byteCount, false);
- if (isCopy != nullptr) {
- *isCopy = JNI_TRUE;
- }
- }
- return CHECK_JNI_EXIT("p", result);
- }
-
- static void ReleaseStringCritical(JNIEnv* env, jstring string, const jchar* carray) {
- CHECK_JNI_ENTRY(kFlag_CritRelease | kFlag_ExcepOkay, "Esp", env, string, carray);
- sc.CheckNonNull(carray);
- if (sc.ForceCopy()) {
- GuardedCopy::Check(__FUNCTION__, carray, false);
- carray = reinterpret_cast<const jchar*>(GuardedCopy::Destroy(const_cast<jchar*>(carray)));
- }
- baseEnv(env)->ReleaseStringCritical(env, string, carray);
- CHECK_JNI_EXIT_VOID();
+ static jobject NewLocalRef(JNIEnv* env, jobject obj) {
+ return NewRef(__FUNCTION__, env, obj, kLocal);
}
static jweak NewWeakGlobalRef(JNIEnv* env, jobject obj) {
- CHECK_JNI_ENTRY(kFlag_Default, "EL", env, obj);
- return CHECK_JNI_EXIT("L", baseEnv(env)->NewWeakGlobalRef(env, obj));
+ return NewRef(__FUNCTION__, env, obj, kWeakGlobal);
}
- static jboolean ExceptionCheck(JNIEnv* env) {
- CHECK_JNI_ENTRY(kFlag_CritOkay | kFlag_ExcepOkay, "E", env);
- return CHECK_JNI_EXIT("b", baseEnv(env)->ExceptionCheck(env));
+ static void DeleteGlobalRef(JNIEnv* env, jobject obj) {
+ DeleteRef(__FUNCTION__, env, obj, kGlobal);
}
- static jobjectRefType GetObjectRefType(JNIEnv* env, jobject obj) {
- // Note: we use "Ep" rather than "EL" because this is the one JNI function
- // that it's okay to pass an invalid reference to.
- CHECK_JNI_ENTRY(kFlag_Default, "Ep", env, obj);
- // TODO: proper decoding of jobjectRefType!
- return CHECK_JNI_EXIT("I", baseEnv(env)->GetObjectRefType(env, obj));
+ static void DeleteWeakGlobalRef(JNIEnv* env, jweak obj) {
+ DeleteRef(__FUNCTION__, env, obj, kWeakGlobal);
+ }
+
+ static void DeleteLocalRef(JNIEnv* env, jobject obj) {
+ DeleteRef(__FUNCTION__, env, obj, kLocal);
+ }
+
+ static jint EnsureLocalCapacity(JNIEnv *env, jint capacity) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.I = capacity}};
+ if (sc.Check(soa, true, "EI", args)) {
+ JniValueType result;
+ result.i = baseEnv(env)->EnsureLocalCapacity(env, capacity);
+ if (sc.Check(soa, false, "i", &result)) {
+ return result.i;
+ }
+ }
+ return JNI_ERR;
+ }
+
+ static jboolean IsSameObject(JNIEnv* env, jobject ref1, jobject ref2) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[3] = {{.E = env}, {.L = ref1}, {.L = ref2}};
+ if (sc.Check(soa, true, "ELL", args)) {
+ JniValueType result;
+ result.b = baseEnv(env)->IsSameObject(env, ref1, ref2);
+ if (sc.Check(soa, false, "b", &result)) {
+ return result.b;
+ }
+ }
+ return JNI_FALSE;
+ }
+
+ static jobject AllocObject(JNIEnv* env, jclass c) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.c = c}};
+ if (sc.Check(soa, true, "Ec", args) && sc.CheckInstantiableNonArray(soa, c)) {
+ JniValueType result;
+ result.L = baseEnv(env)->AllocObject(env, c);
+ if (sc.Check(soa, false, "L", &result)) {
+ return result.L;
+ }
+ }
+ return nullptr;
+ }
+
+ static jobject NewObjectV(JNIEnv* env, jclass c, jmethodID mid, va_list vargs) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[3] = {{.E = env}, {.c = c}, {.m = mid}};
+ if (sc.Check(soa, true, "Ecm.", args) && sc.CheckInstantiableNonArray(soa, c) &&
+ sc.CheckConstructor(soa, mid)) {
+ JniValueType result;
+ result.L = baseEnv(env)->NewObjectV(env, c, mid, vargs);
+ if (sc.Check(soa, false, "L", &result)) {
+ return result.L;
+ }
+ }
+ return nullptr;
+ }
+
+ static jobject NewObject(JNIEnv* env, jclass c, jmethodID mid, ...) {
+ va_list args;
+ va_start(args, mid);
+ jobject result = NewObjectV(env, c, mid, args);
+ va_end(args);
+ return result;
+ }
+
+ static jobject NewObjectA(JNIEnv* env, jclass c, jmethodID mid, jvalue* vargs) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[3] = {{.E = env}, {.c = c}, {.m = mid}};
+ if (sc.Check(soa, true, "Ecm.", args) && sc.CheckInstantiableNonArray(soa, c) &&
+ sc.CheckConstructor(soa, mid)) {
+ JniValueType result;
+ result.L = baseEnv(env)->NewObjectA(env, c, mid, vargs);
+ if (sc.Check(soa, false, "L", &result)) {
+ return result.L;
+ }
+ }
+ return nullptr;
+ }
+
+ static jclass GetObjectClass(JNIEnv* env, jobject obj) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.L = obj}};
+ if (sc.Check(soa, true, "EL", args)) {
+ JniValueType result;
+ result.c = baseEnv(env)->GetObjectClass(env, obj);
+ if (sc.Check(soa, false, "c", &result)) {
+ return result.c;
+ }
+ }
+ return nullptr;
+ }
+
+ static jboolean IsInstanceOf(JNIEnv* env, jobject obj, jclass c) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[3] = {{.E = env}, {.L = obj}, {.c = c}};
+ if (sc.Check(soa, true, "ELc", args)) {
+ JniValueType result;
+ result.b = baseEnv(env)->IsInstanceOf(env, obj, c);
+ if (sc.Check(soa, false, "b", &result)) {
+ return result.b;
+ }
+ }
+ return JNI_FALSE;
+ }
+
+ static jmethodID GetMethodID(JNIEnv* env, jclass c, const char* name, const char* sig) {
+ return GetMethodIDInternal(__FUNCTION__, env, c, name, sig, false);
+ }
+
+ static jmethodID GetStaticMethodID(JNIEnv* env, jclass c, const char* name, const char* sig) {
+ return GetMethodIDInternal(__FUNCTION__, env, c, name, sig, true);
+ }
+
+ static jfieldID GetFieldID(JNIEnv* env, jclass c, const char* name, const char* sig) {
+ return GetFieldIDInternal(__FUNCTION__, env, c, name, sig, false);
+ }
+
+ static jfieldID GetStaticFieldID(JNIEnv* env, jclass c, const char* name, const char* sig) {
+ return GetFieldIDInternal(__FUNCTION__, env, c, name, sig, true);
+ }
+
+#define FIELD_ACCESSORS(jtype, name, ptype, shorty) \
+ static jtype GetStatic##name##Field(JNIEnv* env, jclass c, jfieldID fid) { \
+ return GetField(__FUNCTION__, env, c, fid, true, ptype).shorty; \
+ } \
+ \
+ static jtype Get##name##Field(JNIEnv* env, jobject obj, jfieldID fid) { \
+ return GetField(__FUNCTION__, env, obj, fid, false, ptype).shorty; \
+ } \
+ \
+ static void SetStatic##name##Field(JNIEnv* env, jclass c, jfieldID fid, jtype v) { \
+ JniValueType value; \
+ value.shorty = v; \
+ SetField(__FUNCTION__, env, c, fid, true, ptype, value); \
+ } \
+ \
+ static void Set##name##Field(JNIEnv* env, jobject obj, jfieldID fid, jtype v) { \
+ JniValueType value; \
+ value.shorty = v; \
+ SetField(__FUNCTION__, env, obj, fid, false, ptype, value); \
+ }
+
+ FIELD_ACCESSORS(jobject, Object, Primitive::kPrimNot, L)
+ FIELD_ACCESSORS(jboolean, Boolean, Primitive::kPrimBoolean, Z)
+ FIELD_ACCESSORS(jbyte, Byte, Primitive::kPrimByte, B)
+ FIELD_ACCESSORS(jchar, Char, Primitive::kPrimChar, C)
+ FIELD_ACCESSORS(jshort, Short, Primitive::kPrimShort, S)
+ FIELD_ACCESSORS(jint, Int, Primitive::kPrimInt, I)
+ FIELD_ACCESSORS(jlong, Long, Primitive::kPrimLong, J)
+ FIELD_ACCESSORS(jfloat, Float, Primitive::kPrimFloat, F)
+ FIELD_ACCESSORS(jdouble, Double, Primitive::kPrimDouble, D)
+#undef FIELD_ACCESSORS
+
+ static void CallVoidMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* vargs) {
+ CallMethodA(__FUNCTION__, env, obj, nullptr, mid, vargs, Primitive::kPrimVoid, kVirtual);
+ }
+
+ static void CallNonvirtualVoidMethodA(JNIEnv* env, jobject obj, jclass c, jmethodID mid,
+ jvalue* vargs) {
+ CallMethodA(__FUNCTION__, env, obj, c, mid, vargs, Primitive::kPrimVoid, kDirect);
+ }
+
+ static void CallStaticVoidMethodA(JNIEnv* env, jclass c, jmethodID mid, jvalue* vargs) {
+ CallMethodA(__FUNCTION__, env, c, nullptr, mid, vargs, Primitive::kPrimVoid, kStatic);
+ }
+
+ static void CallVoidMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list vargs) {
+ CallMethodV(__FUNCTION__, env, obj, nullptr, mid, vargs, Primitive::kPrimVoid, kVirtual);
+ }
+
+ static void CallNonvirtualVoidMethodV(JNIEnv* env, jobject obj, jclass c, jmethodID mid,
+ va_list vargs) {
+ CallMethodV(__FUNCTION__, env, obj, c, mid, vargs, Primitive::kPrimVoid, kDirect);
+ }
+
+ static void CallStaticVoidMethodV(JNIEnv* env, jclass c, jmethodID mid, va_list vargs) {
+ CallMethodV(__FUNCTION__, env, nullptr, c, mid, vargs, Primitive::kPrimVoid, kStatic);
+ }
+
+ static void CallVoidMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
+ va_list vargs;
+ va_start(vargs, mid);
+ CallMethodV(__FUNCTION__, env, obj, nullptr, mid, vargs, Primitive::kPrimVoid, kVirtual);
+ va_end(vargs);
+ }
+
+ static void CallNonvirtualVoidMethod(JNIEnv* env, jobject obj, jclass c, jmethodID mid, ...) {
+ va_list vargs;
+ va_start(vargs, mid);
+ CallMethodV(__FUNCTION__, env, obj, c, mid, vargs, Primitive::kPrimVoid, kDirect);
+ va_end(vargs);
+ }
+
+ static void CallStaticVoidMethod(JNIEnv* env, jclass c, jmethodID mid, ...) {
+ va_list vargs;
+ va_start(vargs, mid);
+ CallMethodV(__FUNCTION__, env, nullptr, c, mid, vargs, Primitive::kPrimVoid, kStatic);
+ va_end(vargs);
+ }
+
+#define CALL(rtype, name, ptype, shorty) \
+ static rtype Call##name##MethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* vargs) { \
+ return CallMethodA(__FUNCTION__, env, obj, nullptr, mid, vargs, ptype, kVirtual).shorty; \
+ } \
+ \
+ static rtype CallNonvirtual##name##MethodA(JNIEnv* env, jobject obj, jclass c, jmethodID mid, \
+ jvalue* vargs) { \
+ return CallMethodA(__FUNCTION__, env, obj, c, mid, vargs, ptype, kDirect).shorty; \
+ } \
+ \
+ static rtype CallStatic##name##MethodA(JNIEnv* env, jclass c, jmethodID mid, jvalue* vargs) { \
+ return CallMethodA(__FUNCTION__, env, nullptr, c, mid, vargs, ptype, kStatic).shorty; \
+ } \
+ \
+ static rtype Call##name##MethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list vargs) { \
+ return CallMethodV(__FUNCTION__, env, obj, nullptr, mid, vargs, ptype, kVirtual).shorty; \
+ } \
+ \
+ static rtype CallNonvirtual##name##MethodV(JNIEnv* env, jobject obj, jclass c, jmethodID mid, \
+ va_list vargs) { \
+ return CallMethodV(__FUNCTION__, env, obj, c, mid, vargs, ptype, kDirect).shorty; \
+ } \
+ \
+ static rtype CallStatic##name##MethodV(JNIEnv* env, jclass c, jmethodID mid, va_list vargs) { \
+ return CallMethodV(__FUNCTION__, env, nullptr, c, mid, vargs, ptype, kStatic).shorty; \
+ } \
+ \
+ static rtype Call##name##Method(JNIEnv* env, jobject obj, jmethodID mid, ...) { \
+ va_list vargs; \
+ va_start(vargs, mid); \
+ rtype result = \
+ CallMethodV(__FUNCTION__, env, obj, nullptr, mid, vargs, ptype, kVirtual).shorty; \
+ va_end(vargs); \
+ return result; \
+ } \
+ \
+ static rtype CallNonvirtual##name##Method(JNIEnv* env, jobject obj, jclass c, jmethodID mid, \
+ ...) { \
+ va_list vargs; \
+ va_start(vargs, mid); \
+ rtype result = \
+ CallMethodV(__FUNCTION__, env, obj, c, mid, vargs, ptype, kDirect).shorty; \
+ va_end(vargs); \
+ return result; \
+ } \
+ \
+ static rtype CallStatic##name##Method(JNIEnv* env, jclass c, jmethodID mid, ...) { \
+ va_list vargs; \
+ va_start(vargs, mid); \
+ rtype result = \
+ CallMethodV(__FUNCTION__, env, nullptr, c, mid, vargs, ptype, kStatic).shorty; \
+ va_end(vargs); \
+ return result; \
+ }
+
+ CALL(jobject, Object, Primitive::kPrimNot, L)
+ CALL(jboolean, Boolean, Primitive::kPrimBoolean, Z)
+ CALL(jbyte, Byte, Primitive::kPrimByte, B)
+ CALL(jchar, Char, Primitive::kPrimChar, C)
+ CALL(jshort, Short, Primitive::kPrimShort, S)
+ CALL(jint, Int, Primitive::kPrimInt, I)
+ CALL(jlong, Long, Primitive::kPrimLong, J)
+ CALL(jfloat, Float, Primitive::kPrimFloat, F)
+ CALL(jdouble, Double, Primitive::kPrimDouble, D)
+#undef CALL
+
+ static jstring NewString(JNIEnv* env, const jchar* unicode_chars, jsize len) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[3] = {{.E = env}, {.p = unicode_chars}, {.z = len}};
+ if (sc.Check(soa, true, "Epz", args)) {
+ JniValueType result;
+ result.s = baseEnv(env)->NewString(env, unicode_chars, len);
+ if (sc.Check(soa, false, "s", &result)) {
+ return result.s;
+ }
+ }
+ return nullptr;
+ }
+
+ static jstring NewStringUTF(JNIEnv* env, const char* chars) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_NullableUtf, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.u = chars}};
+ if (sc.Check(soa, true, "Eu", args)) {
+ JniValueType result;
+ // TODO: stale? show pointer and truncate string.
+ result.s = baseEnv(env)->NewStringUTF(env, chars);
+ if (sc.Check(soa, false, "s", &result)) {
+ return result.s;
+ }
+ }
+ return nullptr;
+ }
+
+ static jsize GetStringLength(JNIEnv* env, jstring string) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_CritOkay, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.s = string}};
+ if (sc.Check(soa, true, "Es", args)) {
+ JniValueType result;
+ result.z = baseEnv(env)->GetStringLength(env, string);
+ if (sc.Check(soa, false, "z", &result)) {
+ return result.z;
+ }
+ }
+ return JNI_ERR;
+ }
+
+ static jsize GetStringUTFLength(JNIEnv* env, jstring string) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_CritOkay, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.s = string}};
+ if (sc.Check(soa, true, "Es", args)) {
+ JniValueType result;
+ result.z = baseEnv(env)->GetStringUTFLength(env, string);
+ if (sc.Check(soa, false, "z", &result)) {
+ return result.z;
+ }
+ }
+ return JNI_ERR;
+ }
+
+ static const jchar* GetStringChars(JNIEnv* env, jstring string, jboolean* is_copy) {
+ return reinterpret_cast<const jchar*>(GetStringCharsInternal(__FUNCTION__, env, string,
+ is_copy, false, false));
+ }
+
+ static const char* GetStringUTFChars(JNIEnv* env, jstring string, jboolean* is_copy) {
+ return reinterpret_cast<const char*>(GetStringCharsInternal(__FUNCTION__, env, string,
+ is_copy, true, false));
+ }
+
+ static const jchar* GetStringCritical(JNIEnv* env, jstring string, jboolean* is_copy) {
+ return reinterpret_cast<const jchar*>(GetStringCharsInternal(__FUNCTION__, env, string,
+ is_copy, false, true));
+ }
+
+ static void ReleaseStringChars(JNIEnv* env, jstring string, const jchar* chars) {
+ ReleaseStringCharsInternal(__FUNCTION__, env, string, chars, false, false);
+ }
+
+ static void ReleaseStringUTFChars(JNIEnv* env, jstring string, const char* utf) {
+ ReleaseStringCharsInternal(__FUNCTION__, env, string, utf, true, false);
+ }
+
+ static void ReleaseStringCritical(JNIEnv* env, jstring string, const jchar* chars) {
+ ReleaseStringCharsInternal(__FUNCTION__, env, string, chars, false, true);
+ }
+
+ static void GetStringRegion(JNIEnv* env, jstring string, jsize start, jsize len, jchar* buf) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_CritOkay, __FUNCTION__);
+ JniValueType args[5] = {{.E = env}, {.s = string}, {.z = start}, {.z = len}, {.p = buf}};
+ // Note: the start and len arguments are checked as 'I' rather than 'z' as invalid indices
+ // result in ArrayIndexOutOfBoundsExceptions in the base implementation.
+ if (sc.Check(soa, true, "EsIIp", args)) {
+ baseEnv(env)->GetStringRegion(env, string, start, len, buf);
+ JniValueType result;
+ result.V = nullptr;
+ sc.Check(soa, false, "V", &result);
+ }
+ }
+
+ static void GetStringUTFRegion(JNIEnv* env, jstring string, jsize start, jsize len, char* buf) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_CritOkay, __FUNCTION__);
+ JniValueType args[5] = {{.E = env}, {.s = string}, {.z = start}, {.z = len}, {.p = buf}};
+ // Note: the start and len arguments are checked as 'I' rather than 'z' as invalid indices
+ // result in ArrayIndexOutOfBoundsExceptions in the base implementation.
+ if (sc.Check(soa, true, "EsIIp", args)) {
+ baseEnv(env)->GetStringUTFRegion(env, string, start, len, buf);
+ JniValueType result;
+ result.V = nullptr;
+ sc.Check(soa, false, "V", &result);
+ }
+ }
+
+ static jsize GetArrayLength(JNIEnv* env, jarray array) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_CritOkay, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.a = array}};
+ if (sc.Check(soa, true, "Ea", args)) {
+ JniValueType result;
+ result.z = baseEnv(env)->GetArrayLength(env, array);
+ if (sc.Check(soa, false, "z", &result)) {
+ return result.z;
+ }
+ }
+ return JNI_ERR;
+ }
+
+ static jobjectArray NewObjectArray(JNIEnv* env, jsize length, jclass element_class,
+ jobject initial_element) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[4] =
+ {{.E = env}, {.z = length}, {.c = element_class}, {.L = initial_element}};
+ if (sc.Check(soa, true, "EzcL", args)) {
+ JniValueType result;
+ // Note: assignability tests of initial_element are done in the base implementation.
+ result.a = baseEnv(env)->NewObjectArray(env, length, element_class, initial_element);
+ if (sc.Check(soa, false, "a", &result)) {
+ return down_cast<jobjectArray>(result.a);
+ }
+ }
+ return nullptr;
+ }
+
+ static jobject GetObjectArrayElement(JNIEnv* env, jobjectArray array, jsize index) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[3] = {{.E = env}, {.a = array}, {.z = index}};
+ if (sc.Check(soa, true, "Eaz", args)) {
+ JniValueType result;
+ result.L = baseEnv(env)->GetObjectArrayElement(env, array, index);
+ if (sc.Check(soa, false, "L", &result)) {
+ return result.L;
+ }
+ }
+ return nullptr;
+ }
+
+ static void SetObjectArrayElement(JNIEnv* env, jobjectArray array, jsize index, jobject value) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[4] = {{.E = env}, {.a = array}, {.z = index}, {.L = value}};
+ // Note: the index arguments is checked as 'I' rather than 'z' as invalid indices result in
+ // ArrayIndexOutOfBoundsExceptions in the base implementation. Similarly invalid stores result
+ // in ArrayStoreExceptions.
+ if (sc.Check(soa, true, "EaIL", args)) {
+ baseEnv(env)->SetObjectArrayElement(env, array, index, value);
+ JniValueType result;
+ result.V = nullptr;
+ sc.Check(soa, false, "V", &result);
+ }
+ }
+
+ static jbooleanArray NewBooleanArray(JNIEnv* env, jsize length) {
+ return down_cast<jbooleanArray>(NewPrimitiveArray(__FUNCTION__, env, length,
+ Primitive::kPrimBoolean));
+ }
+
+ static jbyteArray NewByteArray(JNIEnv* env, jsize length) {
+ return down_cast<jbyteArray>(NewPrimitiveArray(__FUNCTION__, env, length,
+ Primitive::kPrimByte));
+ }
+
+ static jcharArray NewCharArray(JNIEnv* env, jsize length) {
+ return down_cast<jcharArray>(NewPrimitiveArray(__FUNCTION__, env, length,
+ Primitive::kPrimChar));
+ }
+
+ static jshortArray NewShortArray(JNIEnv* env, jsize length) {
+ return down_cast<jshortArray>(NewPrimitiveArray(__FUNCTION__, env, length,
+ Primitive::kPrimShort));
+ }
+
+ static jintArray NewIntArray(JNIEnv* env, jsize length) {
+ return down_cast<jintArray>(NewPrimitiveArray(__FUNCTION__, env, length, Primitive::kPrimInt));
+ }
+
+ static jlongArray NewLongArray(JNIEnv* env, jsize length) {
+ return down_cast<jlongArray>(NewPrimitiveArray(__FUNCTION__, env, length,
+ Primitive::kPrimLong));
+ }
+
+ static jfloatArray NewFloatArray(JNIEnv* env, jsize length) {
+ return down_cast<jfloatArray>(NewPrimitiveArray(__FUNCTION__, env, length,
+ Primitive::kPrimFloat));
+ }
+
+ static jdoubleArray NewDoubleArray(JNIEnv* env, jsize length) {
+ return down_cast<jdoubleArray>(NewPrimitiveArray(__FUNCTION__, env, length,
+ Primitive::kPrimDouble));
+ }
+
+#define PRIMITIVE_ARRAY_FUNCTIONS(ctype, name, ptype) \
+ static ctype* Get##name##ArrayElements(JNIEnv* env, ctype##Array array, jboolean* is_copy) { \
+ return reinterpret_cast<ctype*>( \
+ GetPrimitiveArrayElements(__FUNCTION__, ptype, env, array, is_copy)); \
+ } \
+ \
+ static void Release##name##ArrayElements(JNIEnv* env, ctype##Array array, ctype* elems, \
+ jint mode) { \
+ ReleasePrimitiveArrayElements(__FUNCTION__, ptype, env, array, elems, mode); \
+ } \
+ \
+ static void Get##name##ArrayRegion(JNIEnv* env, ctype##Array array, jsize start, jsize len, \
+ ctype* buf) { \
+ GetPrimitiveArrayRegion(__FUNCTION__, ptype, env, array, start, len, buf); \
+ } \
+ \
+ static void Set##name##ArrayRegion(JNIEnv* env, ctype##Array array, jsize start, jsize len, \
+ const ctype* buf) { \
+ SetPrimitiveArrayRegion(__FUNCTION__, ptype, env, array, start, len, buf); \
+ }
+
+ PRIMITIVE_ARRAY_FUNCTIONS(jboolean, Boolean, Primitive::kPrimBoolean)
+ PRIMITIVE_ARRAY_FUNCTIONS(jbyte, Byte, Primitive::kPrimByte)
+ PRIMITIVE_ARRAY_FUNCTIONS(jchar, Char, Primitive::kPrimChar)
+ PRIMITIVE_ARRAY_FUNCTIONS(jshort, Short, Primitive::kPrimShort)
+ PRIMITIVE_ARRAY_FUNCTIONS(jint, Int, Primitive::kPrimInt)
+ PRIMITIVE_ARRAY_FUNCTIONS(jlong, Long, Primitive::kPrimLong)
+ PRIMITIVE_ARRAY_FUNCTIONS(jfloat, Float, Primitive::kPrimFloat)
+ PRIMITIVE_ARRAY_FUNCTIONS(jdouble, Double, Primitive::kPrimDouble)
+#undef PRIMITIVE_ARRAY_FUNCTIONS
+
+ static jint MonitorEnter(JNIEnv* env, jobject obj) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.L = obj}};
+ if (sc.Check(soa, true, "EL", args)) {
+ JniValueType result;
+ result.i = baseEnv(env)->MonitorEnter(env, obj);
+ if (sc.Check(soa, false, "i", &result)) {
+ return result.i;
+ }
+ }
+ return JNI_ERR;
+ }
+
+ static jint MonitorExit(JNIEnv* env, jobject obj) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_ExcepOkay, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.L = obj}};
+ if (sc.Check(soa, true, "EL", args)) {
+ JniValueType result;
+ result.i = baseEnv(env)->MonitorExit(env, obj);
+ if (sc.Check(soa, false, "i", &result)) {
+ return result.i;
+ }
+ }
+ return JNI_ERR;
+ }
+
+ static void* GetPrimitiveArrayCritical(JNIEnv* env, jarray array, jboolean* is_copy) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_CritGet, __FUNCTION__);
+ JniValueType args[3] = {{.E = env}, {.a = array}, {.p = is_copy}};
+ if (sc.Check(soa, true, "Eap", args)) {
+ JniValueType result;
+ result.p = baseEnv(env)->GetPrimitiveArrayCritical(env, array, is_copy);
+ if (result.p != nullptr && soa.ForceCopy()) {
+ result.p = GuardedCopy::CreateGuardedPACopy(env, array, is_copy);
+ }
+ if (sc.Check(soa, false, "p", &result)) {
+ return const_cast<void*>(result.p);
+ }
+ }
+ return nullptr;
+ }
+
+ static void ReleasePrimitiveArrayCritical(JNIEnv* env, jarray array, void* carray, jint mode) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_CritRelease | kFlag_ExcepOkay, __FUNCTION__);
+ sc.CheckNonNull(carray);
+ JniValueType args[4] = {{.E = env}, {.a = array}, {.p = carray}, {.r = mode}};
+ if (sc.Check(soa, true, "Eapr", args)) {
+ if (soa.ForceCopy()) {
+ GuardedCopy::ReleaseGuardedPACopy(__FUNCTION__, env, array, carray, mode);
+ }
+ baseEnv(env)->ReleasePrimitiveArrayCritical(env, array, carray, mode);
+ JniValueType result;
+ result.V = nullptr;
+ sc.Check(soa, false, "V", &result);
+ }
}
static jobject NewDirectByteBuffer(JNIEnv* env, void* address, jlong capacity) {
- CHECK_JNI_ENTRY(kFlag_Default, "EpJ", env, address, capacity);
- if (address == nullptr) {
- JniAbortF(__FUNCTION__, "non-nullable address is NULL");
- return nullptr;
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[3] = {{.E = env}, {.p = address}, {.J = capacity}};
+ if (sc.Check(soa, true, "EpJ", args)) {
+ JniValueType result;
+ // Note: the validity of address and capacity are checked in the base implementation.
+ result.L = baseEnv(env)->NewDirectByteBuffer(env, address, capacity);
+ if (sc.Check(soa, false, "L", &result)) {
+ return result.L;
+ }
}
- return CHECK_JNI_EXIT("L", baseEnv(env)->NewDirectByteBuffer(env, address, capacity));
+ return nullptr;
}
static void* GetDirectBufferAddress(JNIEnv* env, jobject buf) {
- CHECK_JNI_ENTRY(kFlag_Default, "EL", env, buf);
- // TODO: check that 'buf' is a java.nio.Buffer.
- return CHECK_JNI_EXIT("p", baseEnv(env)->GetDirectBufferAddress(env, buf));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.L = buf}};
+ if (sc.Check(soa, true, "EL", args)) {
+ JniValueType result;
+ // Note: this is implemented in the base environment by a GetLongField which will sanity
+ // check the type of buf in GetLongField above.
+ result.p = baseEnv(env)->GetDirectBufferAddress(env, buf);
+ if (sc.Check(soa, false, "p", &result)) {
+ return const_cast<void*>(result.p);
+ }
+ }
+ return nullptr;
}
static jlong GetDirectBufferCapacity(JNIEnv* env, jobject buf) {
- CHECK_JNI_ENTRY(kFlag_Default, "EL", env, buf);
- // TODO: check that 'buf' is a java.nio.Buffer.
- return CHECK_JNI_EXIT("J", baseEnv(env)->GetDirectBufferCapacity(env, buf));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.L = buf}};
+ if (sc.Check(soa, true, "EL", args)) {
+ JniValueType result;
+ // Note: this is implemented in the base environment by a GetIntField which will sanity
+ // check the type of buf in GetIntField above.
+ result.J = baseEnv(env)->GetDirectBufferCapacity(env, buf);
+ if (sc.Check(soa, false, "J", &result)) {
+ return result.J;
+ }
+ }
+ return JNI_ERR;
}
private:
- static inline const JNINativeInterface* baseEnv(JNIEnv* env) {
+ static JavaVMExt* GetJavaVMExt(JNIEnv* env) {
+ return reinterpret_cast<JNIEnvExt*>(env)->vm;
+ }
+
+ static const JNINativeInterface* baseEnv(JNIEnv* env) {
return reinterpret_cast<JNIEnvExt*>(env)->unchecked_functions;
}
+
+ static jobject NewRef(const char* function_name, JNIEnv* env, jobject obj, IndirectRefKind kind) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, function_name);
+ JniValueType args[2] = {{.E = env}, {.L = obj}};
+ if (sc.Check(soa, true, "EL", args)) {
+ JniValueType result;
+ switch (kind) {
+ case kGlobal:
+ result.L = baseEnv(env)->NewGlobalRef(env, obj);
+ break;
+ case kLocal:
+ result.L = baseEnv(env)->NewLocalRef(env, obj);
+ break;
+ case kWeakGlobal:
+ result.L = baseEnv(env)->NewWeakGlobalRef(env, obj);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected reference kind: " << kind;
+ }
+ if (sc.Check(soa, false, "L", &result)) {
+ DCHECK_EQ(IsSameObject(env, obj, result.L), JNI_TRUE);
+ DCHECK(sc.CheckReferenceKind(kind, soa.Vm(), soa.Self(), result.L));
+ return result.L;
+ }
+ }
+ return nullptr;
+ }
+
+ static void DeleteRef(const char* function_name, JNIEnv* env, jobject obj, IndirectRefKind kind) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_ExcepOkay, function_name);
+ JniValueType args[2] = {{.E = env}, {.L = obj}};
+ sc.Check(soa, true, "EL", args);
+ if (sc.CheckReferenceKind(kind, soa.Vm(), soa.Self(), obj)) {
+ JniValueType result;
+ switch (kind) {
+ case kGlobal:
+ baseEnv(env)->DeleteGlobalRef(env, obj);
+ break;
+ case kLocal:
+ baseEnv(env)->DeleteLocalRef(env, obj);
+ break;
+ case kWeakGlobal:
+ baseEnv(env)->DeleteWeakGlobalRef(env, obj);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected reference kind: " << kind;
+ }
+ result.V = nullptr;
+ sc.Check(soa, false, "V", &result);
+ }
+ }
+
+ static jmethodID GetMethodIDInternal(const char* function_name, JNIEnv* env, jclass c,
+ const char* name, const char* sig, bool is_static) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, function_name);
+ JniValueType args[4] = {{.E = env}, {.c = c}, {.u = name}, {.u = sig}};
+ if (sc.Check(soa, true, "Ecuu", args)) {
+ JniValueType result;
+ if (is_static) {
+ result.m = baseEnv(env)->GetStaticMethodID(env, c, name, sig);
+ } else {
+ result.m = baseEnv(env)->GetMethodID(env, c, name, sig);
+ }
+ if (sc.Check(soa, false, "m", &result)) {
+ return result.m;
+ }
+ }
+ return nullptr;
+ }
+
+ static jfieldID GetFieldIDInternal(const char* function_name, JNIEnv* env, jclass c,
+ const char* name, const char* sig, bool is_static) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, function_name);
+ JniValueType args[4] = {{.E = env}, {.c = c}, {.u = name}, {.u = sig}};
+ if (sc.Check(soa, true, "Ecuu", args)) {
+ JniValueType result;
+ if (is_static) {
+ result.f = baseEnv(env)->GetStaticFieldID(env, c, name, sig);
+ } else {
+ result.f = baseEnv(env)->GetFieldID(env, c, name, sig);
+ }
+ if (sc.Check(soa, false, "f", &result)) {
+ return result.f;
+ }
+ }
+ return nullptr;
+ }
+
+ static JniValueType GetField(const char* function_name, JNIEnv* env, jobject obj, jfieldID fid,
+ bool is_static, Primitive::Type type) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, function_name);
+ JniValueType args[3] = {{.E = env}, {.L = obj}, {.f = fid}};
+ JniValueType result;
+ if (sc.Check(soa, true, is_static ? "Ecf" : "ELf", args) &&
+ sc.CheckFieldAccess(soa, obj, fid, is_static, type)) {
+ const char* result_check = nullptr;
+ switch (type) {
+ case Primitive::kPrimNot:
+ if (is_static) {
+ result.L = baseEnv(env)->GetStaticObjectField(env, down_cast<jclass>(obj), fid);
+ } else {
+ result.L = baseEnv(env)->GetObjectField(env, obj, fid);
+ }
+ result_check = "L";
+ break;
+ case Primitive::kPrimBoolean:
+ if (is_static) {
+ result.Z = baseEnv(env)->GetStaticBooleanField(env, down_cast<jclass>(obj), fid);
+ } else {
+ result.Z = baseEnv(env)->GetBooleanField(env, obj, fid);
+ }
+ result_check = "Z";
+ break;
+ case Primitive::kPrimByte:
+ if (is_static) {
+ result.B = baseEnv(env)->GetStaticByteField(env, down_cast<jclass>(obj), fid);
+ } else {
+ result.B = baseEnv(env)->GetByteField(env, obj, fid);
+ }
+ result_check = "B";
+ break;
+ case Primitive::kPrimChar:
+ if (is_static) {
+ result.C = baseEnv(env)->GetStaticCharField(env, down_cast<jclass>(obj), fid);
+ } else {
+ result.C = baseEnv(env)->GetCharField(env, obj, fid);
+ }
+ result_check = "C";
+ break;
+ case Primitive::kPrimShort:
+ if (is_static) {
+ result.S = baseEnv(env)->GetStaticShortField(env, down_cast<jclass>(obj), fid);
+ } else {
+ result.S = baseEnv(env)->GetShortField(env, obj, fid);
+ }
+ result_check = "S";
+ break;
+ case Primitive::kPrimInt:
+ if (is_static) {
+ result.I = baseEnv(env)->GetStaticIntField(env, down_cast<jclass>(obj), fid);
+ } else {
+ result.I = baseEnv(env)->GetIntField(env, obj, fid);
+ }
+ result_check = "I";
+ break;
+ case Primitive::kPrimLong:
+ if (is_static) {
+ result.J = baseEnv(env)->GetStaticLongField(env, down_cast<jclass>(obj), fid);
+ } else {
+ result.J = baseEnv(env)->GetLongField(env, obj, fid);
+ }
+ result_check = "J";
+ break;
+ case Primitive::kPrimFloat:
+ if (is_static) {
+ result.F = baseEnv(env)->GetStaticFloatField(env, down_cast<jclass>(obj), fid);
+ } else {
+ result.F = baseEnv(env)->GetFloatField(env, obj, fid);
+ }
+ result_check = "F";
+ break;
+ case Primitive::kPrimDouble:
+ if (is_static) {
+ result.D = baseEnv(env)->GetStaticDoubleField(env, down_cast<jclass>(obj), fid);
+ } else {
+ result.D = baseEnv(env)->GetDoubleField(env, obj, fid);
+ }
+ result_check = "D";
+ break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unexpected type: " << type;
+ break;
+ }
+ if (sc.Check(soa, false, result_check, &result)) {
+ return result;
+ }
+ }
+ result.J = 0;
+ return result;
+ }
+
+ static void SetField(const char* function_name, JNIEnv* env, jobject obj, jfieldID fid,
+ bool is_static, Primitive::Type type, JniValueType value) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, function_name);
+ JniValueType args[4] = {{.E = env}, {.L = obj}, {.f = fid}, value};
+ char sig[5] = { 'E', is_static ? 'c' : 'L', 'f',
+ type == Primitive::kPrimNot ? 'L' : Primitive::Descriptor(type)[0], '\0'};
+ if (sc.Check(soa, true, sig, args) &&
+ sc.CheckFieldAccess(soa, obj, fid, is_static, type)) {
+ switch (type) {
+ case Primitive::kPrimNot:
+ if (is_static) {
+ baseEnv(env)->SetStaticObjectField(env, down_cast<jclass>(obj), fid, value.L);
+ } else {
+ baseEnv(env)->SetObjectField(env, obj, fid, value.L);
+ }
+ break;
+ case Primitive::kPrimBoolean:
+ if (is_static) {
+ baseEnv(env)->SetStaticBooleanField(env, down_cast<jclass>(obj), fid, value.Z);
+ } else {
+ baseEnv(env)->SetBooleanField(env, obj, fid, value.Z);
+ }
+ break;
+ case Primitive::kPrimByte:
+ if (is_static) {
+ baseEnv(env)->SetStaticByteField(env, down_cast<jclass>(obj), fid, value.B);
+ } else {
+ baseEnv(env)->SetByteField(env, obj, fid, value.B);
+ }
+ break;
+ case Primitive::kPrimChar:
+ if (is_static) {
+ baseEnv(env)->SetStaticCharField(env, down_cast<jclass>(obj), fid, value.C);
+ } else {
+ baseEnv(env)->SetCharField(env, obj, fid, value.C);
+ }
+ break;
+ case Primitive::kPrimShort:
+ if (is_static) {
+ baseEnv(env)->SetStaticShortField(env, down_cast<jclass>(obj), fid, value.S);
+ } else {
+ baseEnv(env)->SetShortField(env, obj, fid, value.S);
+ }
+ break;
+ case Primitive::kPrimInt:
+ if (is_static) {
+ baseEnv(env)->SetStaticIntField(env, down_cast<jclass>(obj), fid, value.I);
+ } else {
+ baseEnv(env)->SetIntField(env, obj, fid, value.I);
+ }
+ break;
+ case Primitive::kPrimLong:
+ if (is_static) {
+ baseEnv(env)->SetStaticLongField(env, down_cast<jclass>(obj), fid, value.J);
+ } else {
+ baseEnv(env)->SetLongField(env, obj, fid, value.J);
+ }
+ break;
+ case Primitive::kPrimFloat:
+ if (is_static) {
+ baseEnv(env)->SetStaticFloatField(env, down_cast<jclass>(obj), fid, value.F);
+ } else {
+ baseEnv(env)->SetFloatField(env, obj, fid, value.F);
+ }
+ break;
+ case Primitive::kPrimDouble:
+ if (is_static) {
+ baseEnv(env)->SetStaticDoubleField(env, down_cast<jclass>(obj), fid, value.D);
+ } else {
+ baseEnv(env)->SetDoubleField(env, obj, fid, value.D);
+ }
+ break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unexpected type: " << type;
+ break;
+ }
+ JniValueType result;
+ result.V = nullptr;
+ sc.Check(soa, false, "V", &result);
+ }
+ }
+
+ static bool CheckCallArgs(ScopedObjectAccess& soa, ScopedCheck& sc, JNIEnv* env, jobject obj,
+ jclass c, jmethodID mid, InvokeType invoke)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool checked;
+ switch (invoke) {
+ case kVirtual: {
+ DCHECK(c == nullptr);
+ JniValueType args[3] = {{.E = env}, {.L = obj}, {.m = mid}};
+ checked = sc.Check(soa, true, "ELm.", args);
+ break;
+ }
+ case kDirect: {
+ JniValueType args[4] = {{.E = env}, {.L = obj}, {.c = c}, {.m = mid}};
+ checked = sc.Check(soa, true, "ELcm.", args);
+ break;
+ }
+ case kStatic: {
+ DCHECK(obj == nullptr);
+ JniValueType args[3] = {{.E = env}, {.c = c}, {.m = mid}};
+ checked = sc.Check(soa, true, "Ecm.", args);
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected invoke: " << invoke;
+ checked = false;
+ break;
+ }
+ return checked;
+ }
+
+ static JniValueType CallMethodA(const char* function_name, JNIEnv* env, jobject obj, jclass c,
+ jmethodID mid, jvalue* vargs, Primitive::Type type,
+ InvokeType invoke) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, function_name);
+ JniValueType result;
+ if (CheckCallArgs(soa, sc, env, obj, c, mid, invoke) &&
+ sc.CheckMethodAndSig(soa, obj, c, mid, type, invoke)) {
+ const char* result_check;
+ switch (type) {
+ case Primitive::kPrimNot:
+ result_check = "L";
+ switch (invoke) {
+ case kVirtual:
+ result.L = baseEnv(env)->CallObjectMethodA(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.L = baseEnv(env)->CallNonvirtualObjectMethodA(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.L = baseEnv(env)->CallStaticObjectMethodA(env, c, mid, vargs);
+ break;
+ default:
+ break;
+ }
+ break;
+ case Primitive::kPrimBoolean:
+ result_check = "Z";
+ switch (invoke) {
+ case kVirtual:
+ result.Z = baseEnv(env)->CallBooleanMethodA(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.Z = baseEnv(env)->CallNonvirtualBooleanMethodA(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.Z = baseEnv(env)->CallStaticBooleanMethodA(env, c, mid, vargs);
+ break;
+ default:
+ break;
+ }
+ break;
+ case Primitive::kPrimByte:
+ result_check = "B";
+ switch (invoke) {
+ case kVirtual:
+ result.B = baseEnv(env)->CallByteMethodA(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.B = baseEnv(env)->CallNonvirtualByteMethodA(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.B = baseEnv(env)->CallStaticByteMethodA(env, c, mid, vargs);
+ break;
+ default:
+ break;
+ }
+ break;
+ case Primitive::kPrimChar:
+ result_check = "C";
+ switch (invoke) {
+ case kVirtual:
+ result.C = baseEnv(env)->CallCharMethodA(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.C = baseEnv(env)->CallNonvirtualCharMethodA(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.C = baseEnv(env)->CallStaticCharMethodA(env, c, mid, vargs);
+ break;
+ default:
+ break;
+ }
+ break;
+ case Primitive::kPrimShort:
+ result_check = "S";
+ switch (invoke) {
+ case kVirtual:
+ result.S = baseEnv(env)->CallShortMethodA(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.S = baseEnv(env)->CallNonvirtualShortMethodA(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.S = baseEnv(env)->CallStaticShortMethodA(env, c, mid, vargs);
+ break;
+ default:
+ break;
+ }
+ break;
+ case Primitive::kPrimInt:
+ result_check = "I";
+ switch (invoke) {
+ case kVirtual:
+ result.I = baseEnv(env)->CallIntMethodA(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.I = baseEnv(env)->CallNonvirtualIntMethodA(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.I = baseEnv(env)->CallStaticIntMethodA(env, c, mid, vargs);
+ break;
+ default:
+ break;
+ }
+ break;
+ case Primitive::kPrimLong:
+ result_check = "J";
+ switch (invoke) {
+ case kVirtual:
+ result.J = baseEnv(env)->CallLongMethodA(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.J = baseEnv(env)->CallNonvirtualLongMethodA(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.J = baseEnv(env)->CallStaticLongMethodA(env, c, mid, vargs);
+ break;
+ default:
+ break;
+ }
+ break;
+ case Primitive::kPrimFloat:
+ result_check = "F";
+ switch (invoke) {
+ case kVirtual:
+ result.F = baseEnv(env)->CallFloatMethodA(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.F = baseEnv(env)->CallNonvirtualFloatMethodA(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.F = baseEnv(env)->CallStaticFloatMethodA(env, c, mid, vargs);
+ break;
+ default:
+ break;
+ }
+ break;
+ case Primitive::kPrimDouble:
+ result_check = "D";
+ switch (invoke) {
+ case kVirtual:
+ result.D = baseEnv(env)->CallDoubleMethodA(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.D = baseEnv(env)->CallNonvirtualDoubleMethodA(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.D = baseEnv(env)->CallStaticDoubleMethodA(env, c, mid, vargs);
+ break;
+ default:
+ break;
+ }
+ break;
+ case Primitive::kPrimVoid:
+ result_check = "V";
+ result.V = nullptr;
+ switch (invoke) {
+ case kVirtual:
+ baseEnv(env)->CallVoidMethodA(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ baseEnv(env)->CallNonvirtualVoidMethodA(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ baseEnv(env)->CallStaticVoidMethodA(env, c, mid, vargs);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected invoke: " << invoke;
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unexpected return type: " << type;
+ result_check = nullptr;
+ }
+ if (sc.Check(soa, false, result_check, &result)) {
+ return result;
+ }
+ }
+ result.J = 0;
+ return result;
+ }
+
+ static JniValueType CallMethodV(const char* function_name, JNIEnv* env, jobject obj, jclass c,
+ jmethodID mid, va_list vargs, Primitive::Type type,
+ InvokeType invoke) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, function_name);
+ JniValueType result;
+ if (CheckCallArgs(soa, sc, env, obj, c, mid, invoke) &&
+ sc.CheckMethodAndSig(soa, obj, c, mid, type, invoke)) {
+ const char* result_check;
+ switch (type) {
+ case Primitive::kPrimNot:
+ result_check = "L";
+ switch (invoke) {
+ case kVirtual:
+ result.L = baseEnv(env)->CallObjectMethodV(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.L = baseEnv(env)->CallNonvirtualObjectMethodV(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.L = baseEnv(env)->CallStaticObjectMethodV(env, c, mid, vargs);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected invoke: " << invoke;
+ }
+ break;
+ case Primitive::kPrimBoolean:
+ result_check = "Z";
+ switch (invoke) {
+ case kVirtual:
+ result.Z = baseEnv(env)->CallBooleanMethodV(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.Z = baseEnv(env)->CallNonvirtualBooleanMethodV(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.Z = baseEnv(env)->CallStaticBooleanMethodV(env, c, mid, vargs);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected invoke: " << invoke;
+ }
+ break;
+ case Primitive::kPrimByte:
+ result_check = "B";
+ switch (invoke) {
+ case kVirtual:
+ result.B = baseEnv(env)->CallByteMethodV(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.B = baseEnv(env)->CallNonvirtualByteMethodV(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.B = baseEnv(env)->CallStaticByteMethodV(env, c, mid, vargs);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected invoke: " << invoke;
+ }
+ break;
+ case Primitive::kPrimChar:
+ result_check = "C";
+ switch (invoke) {
+ case kVirtual:
+ result.C = baseEnv(env)->CallCharMethodV(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.C = baseEnv(env)->CallNonvirtualCharMethodV(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.C = baseEnv(env)->CallStaticCharMethodV(env, c, mid, vargs);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected invoke: " << invoke;
+ }
+ break;
+ case Primitive::kPrimShort:
+ result_check = "S";
+ switch (invoke) {
+ case kVirtual:
+ result.S = baseEnv(env)->CallShortMethodV(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.S = baseEnv(env)->CallNonvirtualShortMethodV(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.S = baseEnv(env)->CallStaticShortMethodV(env, c, mid, vargs);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected invoke: " << invoke;
+ }
+ break;
+ case Primitive::kPrimInt:
+ result_check = "I";
+ switch (invoke) {
+ case kVirtual:
+ result.I = baseEnv(env)->CallIntMethodV(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.I = baseEnv(env)->CallNonvirtualIntMethodV(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.I = baseEnv(env)->CallStaticIntMethodV(env, c, mid, vargs);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected invoke: " << invoke;
+ }
+ break;
+ case Primitive::kPrimLong:
+ result_check = "J";
+ switch (invoke) {
+ case kVirtual:
+ result.J = baseEnv(env)->CallLongMethodV(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.J = baseEnv(env)->CallNonvirtualLongMethodV(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.J = baseEnv(env)->CallStaticLongMethodV(env, c, mid, vargs);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected invoke: " << invoke;
+ }
+ break;
+ case Primitive::kPrimFloat:
+ result_check = "F";
+ switch (invoke) {
+ case kVirtual:
+ result.F = baseEnv(env)->CallFloatMethodV(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.F = baseEnv(env)->CallNonvirtualFloatMethodV(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.F = baseEnv(env)->CallStaticFloatMethodV(env, c, mid, vargs);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected invoke: " << invoke;
+ }
+ break;
+ case Primitive::kPrimDouble:
+ result_check = "D";
+ switch (invoke) {
+ case kVirtual:
+ result.D = baseEnv(env)->CallDoubleMethodV(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.D = baseEnv(env)->CallNonvirtualDoubleMethodV(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.D = baseEnv(env)->CallStaticDoubleMethodV(env, c, mid, vargs);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected invoke: " << invoke;
+ }
+ break;
+ case Primitive::kPrimVoid:
+ result_check = "V";
+ result.V = nullptr;
+ switch (invoke) {
+ case kVirtual:
+ baseEnv(env)->CallVoidMethodV(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ baseEnv(env)->CallNonvirtualVoidMethodV(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ baseEnv(env)->CallStaticVoidMethodV(env, c, mid, vargs);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected invoke: " << invoke;
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unexpected return type: " << type;
+ result_check = nullptr;
+ }
+ if (sc.Check(soa, false, result_check, &result)) {
+ return result;
+ }
+ }
+ result.J = 0;
+ return result;
+ }
+
+ static const void* GetStringCharsInternal(const char* function_name, JNIEnv* env, jstring string,
+ jboolean* is_copy, bool utf, bool critical) {
+ ScopedObjectAccess soa(env);
+ int flags = critical ? kFlag_CritGet : kFlag_CritOkay;
+ ScopedCheck sc(flags, function_name);
+ JniValueType args[3] = {{.E = env}, {.s = string}, {.p = is_copy}};
+ if (sc.Check(soa, true, "Esp", args)) {
+ JniValueType result;
+ if (utf) {
+ CHECK(!critical);
+ result.u = baseEnv(env)->GetStringUTFChars(env, string, is_copy);
+ } else {
+ if (critical) {
+ result.p = baseEnv(env)->GetStringCritical(env, string, is_copy);
+ } else {
+ result.p = baseEnv(env)->GetStringChars(env, string, is_copy);
+ }
+ }
+ // TODO: could we be smarter about not copying when local_is_copy?
+ if (result.p != nullptr && soa.ForceCopy()) {
+ if (utf) {
+ size_t length_in_bytes = strlen(result.u) + 1;
+ result.u =
+ reinterpret_cast<const char*>(GuardedCopy::Create(result.u, length_in_bytes, false));
+ } else {
+ size_t length_in_bytes = baseEnv(env)->GetStringLength(env, string) * 2;
+ result.p =
+ reinterpret_cast<const jchar*>(GuardedCopy::Create(result.p, length_in_bytes, false));
+ }
+ if (is_copy != nullptr) {
+ *is_copy = JNI_TRUE;
+ }
+ }
+ if (sc.Check(soa, false, utf ? "u" : "p", &result)) {
+ return utf ? result.u : result.p;
+ }
+ }
+ return nullptr;
+ }
+
+ static void ReleaseStringCharsInternal(const char* function_name, JNIEnv* env, jstring string,
+ const void* chars, bool utf, bool critical) {
+ ScopedObjectAccess soa(env);
+ int flags = kFlag_ExcepOkay | kFlag_Release;
+ if (critical) {
+ flags |= kFlag_CritRelease;
+ }
+ ScopedCheck sc(flags, function_name);
+ sc.CheckNonNull(chars);
+ bool force_copy_ok = !soa.ForceCopy() || GuardedCopy::Check(function_name, chars, false);
+ if (force_copy_ok && soa.ForceCopy()) {
+ chars = reinterpret_cast<const jchar*>(GuardedCopy::Destroy(const_cast<void*>(chars)));
+ }
+ if (force_copy_ok) {
+ JniValueType args[3] = {{.E = env}, {.s = string}, {.p = chars}};
+ if (sc.Check(soa, true, utf ? "Esu" : "Esp", args)) {
+ if (utf) {
+ CHECK(!critical);
+ baseEnv(env)->ReleaseStringUTFChars(env, string, reinterpret_cast<const char*>(chars));
+ } else {
+ if (critical) {
+ baseEnv(env)->ReleaseStringCritical(env, string, reinterpret_cast<const jchar*>(chars));
+ } else {
+ baseEnv(env)->ReleaseStringChars(env, string, reinterpret_cast<const jchar*>(chars));
+ }
+ }
+ JniValueType result;
+ sc.Check(soa, false, "V", &result);
+ }
+ }
+ }
+
+ static jarray NewPrimitiveArray(const char* function_name, JNIEnv* env, jsize length,
+ Primitive::Type type) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.z = length}};
+ if (sc.Check(soa, true, "Ez", args)) {
+ JniValueType result;
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ result.a = baseEnv(env)->NewBooleanArray(env, length);
+ break;
+ case Primitive::kPrimByte:
+ result.a = baseEnv(env)->NewByteArray(env, length);
+ break;
+ case Primitive::kPrimChar:
+ result.a = baseEnv(env)->NewCharArray(env, length);
+ break;
+ case Primitive::kPrimShort:
+ result.a = baseEnv(env)->NewShortArray(env, length);
+ break;
+ case Primitive::kPrimInt:
+ result.a = baseEnv(env)->NewIntArray(env, length);
+ break;
+ case Primitive::kPrimLong:
+ result.a = baseEnv(env)->NewLongArray(env, length);
+ break;
+ case Primitive::kPrimFloat:
+ result.a = baseEnv(env)->NewFloatArray(env, length);
+ break;
+ case Primitive::kPrimDouble:
+ result.a = baseEnv(env)->NewDoubleArray(env, length);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected primitive type: " << type;
+ }
+ if (sc.Check(soa, false, "a", &result)) {
+ return result.a;
+ }
+ }
+ return nullptr;
+ }
+
+ static void* GetPrimitiveArrayElements(const char* function_name, Primitive::Type type,
+ JNIEnv* env, jarray array, jboolean* is_copy) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, function_name);
+ JniValueType args[3] = {{.E = env}, {.a = array}, {.p = is_copy}};
+ if (sc.Check(soa, true, "Eap", args) && sc.CheckPrimitiveArrayType(soa, array, type)) {
+ JniValueType result;
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ result.p = baseEnv(env)->GetBooleanArrayElements(env, down_cast<jbooleanArray>(array),
+ is_copy);
+ break;
+ case Primitive::kPrimByte:
+ result.p = baseEnv(env)->GetByteArrayElements(env, down_cast<jbyteArray>(array),
+ is_copy);
+ break;
+ case Primitive::kPrimChar:
+ result.p = baseEnv(env)->GetCharArrayElements(env, down_cast<jcharArray>(array),
+ is_copy);
+ break;
+ case Primitive::kPrimShort:
+ result.p = baseEnv(env)->GetShortArrayElements(env, down_cast<jshortArray>(array),
+ is_copy);
+ break;
+ case Primitive::kPrimInt:
+ result.p = baseEnv(env)->GetIntArrayElements(env, down_cast<jintArray>(array), is_copy);
+ break;
+ case Primitive::kPrimLong:
+ result.p = baseEnv(env)->GetLongArrayElements(env, down_cast<jlongArray>(array),
+ is_copy);
+ break;
+ case Primitive::kPrimFloat:
+ result.p = baseEnv(env)->GetFloatArrayElements(env, down_cast<jfloatArray>(array),
+ is_copy);
+ break;
+ case Primitive::kPrimDouble:
+ result.p = baseEnv(env)->GetDoubleArrayElements(env, down_cast<jdoubleArray>(array),
+ is_copy);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected primitive type: " << type;
+ }
+ if (result.p != nullptr && soa.ForceCopy()) {
+ result.p = GuardedCopy::CreateGuardedPACopy(env, array, is_copy);
+ if (is_copy != nullptr) {
+ *is_copy = JNI_TRUE;
+ }
+ }
+ if (sc.Check(soa, false, "p", &result)) {
+ return const_cast<void*>(result.p);
+ }
+ }
+ return nullptr;
+ }
+
+ static void ReleasePrimitiveArrayElements(const char* function_name, Primitive::Type type,
+ JNIEnv* env, jarray array, void* elems, jint mode) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_ExcepOkay, function_name);
+ if (sc.CheckNonNull(elems) && sc.CheckPrimitiveArrayType(soa, array, type)) {
+ if (soa.ForceCopy()) {
+ elems = GuardedCopy::ReleaseGuardedPACopy(function_name, env, array, elems, mode);
+ }
+ if (!soa.ForceCopy() || elems != nullptr) {
+ JniValueType args[4] = {{.E = env}, {.a = array}, {.p = elems}, {.r = mode}};
+ if (sc.Check(soa, true, "Eapr", args)) {
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ baseEnv(env)->ReleaseBooleanArrayElements(env, down_cast<jbooleanArray>(array),
+ reinterpret_cast<jboolean*>(elems), mode);
+ break;
+ case Primitive::kPrimByte:
+ baseEnv(env)->ReleaseByteArrayElements(env, down_cast<jbyteArray>(array),
+ reinterpret_cast<jbyte*>(elems), mode);
+ break;
+ case Primitive::kPrimChar:
+ baseEnv(env)->ReleaseCharArrayElements(env, down_cast<jcharArray>(array),
+ reinterpret_cast<jchar*>(elems), mode);
+ break;
+ case Primitive::kPrimShort:
+ baseEnv(env)->ReleaseShortArrayElements(env, down_cast<jshortArray>(array),
+ reinterpret_cast<jshort*>(elems), mode);
+ break;
+ case Primitive::kPrimInt:
+ baseEnv(env)->ReleaseIntArrayElements(env, down_cast<jintArray>(array),
+ reinterpret_cast<jint*>(elems), mode);
+ break;
+ case Primitive::kPrimLong:
+ baseEnv(env)->ReleaseLongArrayElements(env, down_cast<jlongArray>(array),
+ reinterpret_cast<jlong*>(elems), mode);
+ break;
+ case Primitive::kPrimFloat:
+ baseEnv(env)->ReleaseFloatArrayElements(env, down_cast<jfloatArray>(array),
+ reinterpret_cast<jfloat*>(elems), mode);
+ break;
+ case Primitive::kPrimDouble:
+ baseEnv(env)->ReleaseDoubleArrayElements(env, down_cast<jdoubleArray>(array),
+ reinterpret_cast<jdouble*>(elems), mode);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected primitive type: " << type;
+ }
+ JniValueType result;
+ result.V = nullptr;
+ sc.Check(soa, false, "V", &result);
+ }
+ }
+ }
+ }
+
+ static void GetPrimitiveArrayRegion(const char* function_name, Primitive::Type type, JNIEnv* env,
+ jarray array, jsize start, jsize len, void* buf) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, function_name);
+ JniValueType args[5] = {{.E = env}, {.a = array}, {.z = start}, {.z = len}, {.p = buf}};
+ // Note: the start and len arguments are checked as 'I' rather than 'z' as invalid indices
+ // result in ArrayIndexOutOfBoundsExceptions in the base implementation.
+ if (sc.Check(soa, true, "EaIIp", args) && sc.CheckPrimitiveArrayType(soa, array, type)) {
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ baseEnv(env)->GetBooleanArrayRegion(env, down_cast<jbooleanArray>(array), start, len,
+ reinterpret_cast<jboolean*>(buf));
+ break;
+ case Primitive::kPrimByte:
+ baseEnv(env)->GetByteArrayRegion(env, down_cast<jbyteArray>(array), start, len,
+ reinterpret_cast<jbyte*>(buf));
+ break;
+ case Primitive::kPrimChar:
+ baseEnv(env)->GetCharArrayRegion(env, down_cast<jcharArray>(array), start, len,
+ reinterpret_cast<jchar*>(buf));
+ break;
+ case Primitive::kPrimShort:
+ baseEnv(env)->GetShortArrayRegion(env, down_cast<jshortArray>(array), start, len,
+ reinterpret_cast<jshort*>(buf));
+ break;
+ case Primitive::kPrimInt:
+ baseEnv(env)->GetIntArrayRegion(env, down_cast<jintArray>(array), start, len,
+ reinterpret_cast<jint*>(buf));
+ break;
+ case Primitive::kPrimLong:
+ baseEnv(env)->GetLongArrayRegion(env, down_cast<jlongArray>(array), start, len,
+ reinterpret_cast<jlong*>(buf));
+ break;
+ case Primitive::kPrimFloat:
+ baseEnv(env)->GetFloatArrayRegion(env, down_cast<jfloatArray>(array), start, len,
+ reinterpret_cast<jfloat*>(buf));
+ break;
+ case Primitive::kPrimDouble:
+ baseEnv(env)->GetDoubleArrayRegion(env, down_cast<jdoubleArray>(array), start, len,
+ reinterpret_cast<jdouble*>(buf));
+ break;
+ default:
+ LOG(FATAL) << "Unexpected primitive type: " << type;
+ }
+ JniValueType result;
+ result.V = nullptr;
+ sc.Check(soa, false, "V", &result);
+ }
+ }
+
+ static void SetPrimitiveArrayRegion(const char* function_name, Primitive::Type type, JNIEnv* env,
+ jarray array, jsize start, jsize len, const void* buf) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, function_name);
+ JniValueType args[5] = {{.E = env}, {.a = array}, {.z = start}, {.z = len}, {.p = buf}};
+ // Note: the start and len arguments are checked as 'I' rather than 'z' as invalid indices
+ // result in ArrayIndexOutOfBoundsExceptions in the base implementation.
+ if (sc.Check(soa, true, "EaIIp", args) && sc.CheckPrimitiveArrayType(soa, array, type)) {
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ baseEnv(env)->SetBooleanArrayRegion(env, down_cast<jbooleanArray>(array), start, len,
+ reinterpret_cast<const jboolean*>(buf));
+ break;
+ case Primitive::kPrimByte:
+ baseEnv(env)->SetByteArrayRegion(env, down_cast<jbyteArray>(array), start, len,
+ reinterpret_cast<const jbyte*>(buf));
+ break;
+ case Primitive::kPrimChar:
+ baseEnv(env)->SetCharArrayRegion(env, down_cast<jcharArray>(array), start, len,
+ reinterpret_cast<const jchar*>(buf));
+ break;
+ case Primitive::kPrimShort:
+ baseEnv(env)->SetShortArrayRegion(env, down_cast<jshortArray>(array), start, len,
+ reinterpret_cast<const jshort*>(buf));
+ break;
+ case Primitive::kPrimInt:
+ baseEnv(env)->SetIntArrayRegion(env, down_cast<jintArray>(array), start, len,
+ reinterpret_cast<const jint*>(buf));
+ break;
+ case Primitive::kPrimLong:
+ baseEnv(env)->SetLongArrayRegion(env, down_cast<jlongArray>(array), start, len,
+ reinterpret_cast<const jlong*>(buf));
+ break;
+ case Primitive::kPrimFloat:
+ baseEnv(env)->SetFloatArrayRegion(env, down_cast<jfloatArray>(array), start, len,
+ reinterpret_cast<const jfloat*>(buf));
+ break;
+ case Primitive::kPrimDouble:
+ baseEnv(env)->SetDoubleArrayRegion(env, down_cast<jdoubleArray>(array), start, len,
+ reinterpret_cast<const jdouble*>(buf));
+ break;
+ default:
+ LOG(FATAL) << "Unexpected primitive type: " << type;
+ }
+ JniValueType result;
+ result.V = nullptr;
+ sc.Check(soa, false, "V", &result);
+ }
+ }
};
const JNINativeInterface gCheckNativeInterface = {
@@ -2023,38 +3584,58 @@
class CheckJII {
public:
static jint DestroyJavaVM(JavaVM* vm) {
- ScopedCheck sc(vm, false, __FUNCTION__);
- sc.Check(true, "v", vm);
- return CHECK_JNI_EXIT("I", BaseVm(vm)->DestroyJavaVM(vm));
+ ScopedCheck sc(kFlag_Invocation, __FUNCTION__, false);
+ JniValueType args[1] = {{.v = vm}};
+ sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), true, "v", args);
+ JniValueType result;
+ result.i = BaseVm(vm)->DestroyJavaVM(vm);
+ sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), false, "i", &result);
+ return result.i;
}
static jint AttachCurrentThread(JavaVM* vm, JNIEnv** p_env, void* thr_args) {
- ScopedCheck sc(vm, false, __FUNCTION__);
- sc.Check(true, "vpp", vm, p_env, thr_args);
- return CHECK_JNI_EXIT("I", BaseVm(vm)->AttachCurrentThread(vm, p_env, thr_args));
+ ScopedCheck sc(kFlag_Invocation, __FUNCTION__);
+ JniValueType args[3] = {{.v = vm}, {.p = p_env}, {.p = thr_args}};
+ sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), true, "vpp", args);
+ JniValueType result;
+ result.i = BaseVm(vm)->AttachCurrentThread(vm, p_env, thr_args);
+ sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), false, "i", &result);
+ return result.i;
}
static jint AttachCurrentThreadAsDaemon(JavaVM* vm, JNIEnv** p_env, void* thr_args) {
- ScopedCheck sc(vm, false, __FUNCTION__);
- sc.Check(true, "vpp", vm, p_env, thr_args);
- return CHECK_JNI_EXIT("I", BaseVm(vm)->AttachCurrentThreadAsDaemon(vm, p_env, thr_args));
+ ScopedCheck sc(kFlag_Invocation, __FUNCTION__);
+ JniValueType args[3] = {{.v = vm}, {.p = p_env}, {.p = thr_args}};
+ sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), true, "vpp", args);
+ JniValueType result;
+ result.i = BaseVm(vm)->AttachCurrentThreadAsDaemon(vm, p_env, thr_args);
+ sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), false, "i", &result);
+ return result.i;
}
static jint DetachCurrentThread(JavaVM* vm) {
- ScopedCheck sc(vm, true, __FUNCTION__);
- sc.Check(true, "v", vm);
- return CHECK_JNI_EXIT("I", BaseVm(vm)->DetachCurrentThread(vm));
+ ScopedCheck sc(kFlag_Invocation, __FUNCTION__);
+ JniValueType args[1] = {{.v = vm}};
+ sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), true, "v", args);
+ JniValueType result;
+ result.i = BaseVm(vm)->DetachCurrentThread(vm);
+ sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), false, "i", &result);
+ return result.i;
}
- static jint GetEnv(JavaVM* vm, void** env, jint version) {
- ScopedCheck sc(vm, true, __FUNCTION__);
- sc.Check(true, "vpI", vm);
- return CHECK_JNI_EXIT("I", BaseVm(vm)->GetEnv(vm, env, version));
+ static jint GetEnv(JavaVM* vm, void** p_env, jint version) {
+ ScopedCheck sc(kFlag_Invocation, __FUNCTION__);
+ JniValueType args[3] = {{.v = vm}, {.p = p_env}, {.I = version}};
+ sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), true, "vpI", args);
+ JniValueType result;
+ result.i = BaseVm(vm)->GetEnv(vm, p_env, version);
+ sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), false, "i", &result);
+ return result.i;
}
private:
- static inline const JNIInvokeInterface* BaseVm(JavaVM* vm) {
- return reinterpret_cast<JavaVMExt*>(vm)->unchecked_functions;
+ static const JNIInvokeInterface* BaseVm(JavaVM* vm) {
+ return reinterpret_cast<JavaVMExt*>(vm)->GetUncheckedFunctions();
}
};
diff --git a/test/HelloWorld/HelloWorld.java b/runtime/check_jni.h
similarity index 68%
copy from test/HelloWorld/HelloWorld.java
copy to runtime/check_jni.h
index c6861ce..f41abf8 100644
--- a/test/HelloWorld/HelloWorld.java
+++ b/runtime/check_jni.h
@@ -14,8 +14,16 @@
* limitations under the License.
*/
-class HelloWorld {
- public static void main(String[] args) {
- System.logI("Hello, world!");
- }
-}
+#ifndef ART_RUNTIME_CHECK_JNI_H_
+#define ART_RUNTIME_CHECK_JNI_H_
+
+#include <jni.h>
+
+namespace art {
+
+const JNINativeInterface* GetCheckJniNativeInterface();
+const JNIInvokeInterface* GetCheckJniInvokeInterface();
+
+} // namespace art
+
+#endif // ART_RUNTIME_CHECK_JNI_H_
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 25eb3a3..9921bdd 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_CLASS_LINKER_INL_H_
#include "class_linker.h"
+#include "gc_root-inl.h"
#include "gc/heap-inl.h"
#include "mirror/art_field.h"
#include "mirror/class_loader.h"
@@ -40,8 +41,7 @@
inline mirror::Class* ClassLinker::FindArrayClass(Thread* self, mirror::Class** element_class) {
for (size_t i = 0; i < kFindArrayCacheSize; ++i) {
// Read the cached array class once to avoid races with other threads setting it.
- mirror::Class* array_class = ReadBarrier::BarrierForRoot<mirror::Class, kWithReadBarrier>(
- &find_array_class_cache_[i]);
+ mirror::Class* array_class = find_array_class_cache_[i].Read();
if (array_class != nullptr && array_class->GetComponentType() == *element_class) {
return array_class;
}
@@ -54,7 +54,7 @@
mirror::Class* array_class = FindClass(self, descriptor.c_str(), class_loader);
// Benign races in storing array class and incrementing index.
size_t victim_index = find_array_class_cache_next_victim_;
- find_array_class_cache_[victim_index] = array_class;
+ find_array_class_cache_[victim_index] = GcRoot<mirror::Class>(array_class);
find_array_class_cache_next_victim_ = (victim_index + 1) % kFindArrayCacheSize;
return array_class;
}
@@ -77,7 +77,7 @@
inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx,
mirror::ArtMethod* referrer) {
- mirror::Class* resolved_type = referrer->GetDexCacheResolvedTypes()->Get(type_idx);
+ mirror::Class* resolved_type = referrer->GetDexCacheResolvedType(type_idx);
if (UNLIKELY(resolved_type == nullptr)) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
StackHandleScope<2> hs(Thread::Current());
@@ -85,9 +85,8 @@
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
const DexFile& dex_file = *dex_cache->GetDexFile();
resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader);
- if (resolved_type != nullptr) {
- DCHECK_EQ(dex_cache->GetResolvedType(type_idx), resolved_type);
- }
+ // Note: We cannot check here to see whether we added the type to the cache. The type
+ // might be an erroneous class, which results in it being hidden from us.
}
return resolved_type;
}
@@ -102,9 +101,8 @@
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
const DexFile& dex_file = *dex_cache->GetDexFile();
resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader);
- if (resolved_type != nullptr) {
- DCHECK_EQ(dex_cache->GetResolvedType(type_idx), resolved_type);
- }
+ // Note: We cannot check here to see whether we added the type to the cache. The type
+ // might be an erroneous class, which results in it being hidden from us.
}
return resolved_type;
}
@@ -112,8 +110,7 @@
inline mirror::ArtMethod* ClassLinker::GetResolvedMethod(uint32_t method_idx,
mirror::ArtMethod* referrer,
InvokeType type) {
- mirror::ArtMethod* resolved_method =
- referrer->GetDexCacheResolvedMethods()->Get(method_idx);
+ mirror::ArtMethod* resolved_method = referrer->GetDexCacheResolvedMethod(method_idx);
if (resolved_method == nullptr || resolved_method->IsRuntimeMethod()) {
return nullptr;
}
@@ -135,9 +132,8 @@
const DexFile* dex_file = h_dex_cache->GetDexFile();
resolved_method = ResolveMethod(*dex_file, method_idx, h_dex_cache, h_class_loader, h_referrer,
type);
- if (resolved_method != nullptr) {
- DCHECK_EQ(h_dex_cache->GetResolvedMethod(method_idx), resolved_method);
- }
+ // Note: We cannot check here to see whether we added the method to the cache. It
+ // might be an erroneous class, which results in it being hidden from us.
return resolved_method;
}
@@ -156,9 +152,8 @@
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
const DexFile& dex_file = *dex_cache->GetDexFile();
resolved_field = ResolveField(dex_file, field_idx, dex_cache, class_loader, is_static);
- if (resolved_field != nullptr) {
- DCHECK_EQ(dex_cache->GetResolvedField(field_idx), resolved_field);
- }
+ // Note: We cannot check here to see whether we added the field to the cache. The type
+ // might be an erroneous class, which results in it being hidden from us.
}
return resolved_field;
}
@@ -204,10 +199,8 @@
inline mirror::Class* ClassLinker::GetClassRoot(ClassRoot class_root)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(class_roots_ != NULL);
- mirror::ObjectArray<mirror::Class>* class_roots =
- ReadBarrier::BarrierForRoot<mirror::ObjectArray<mirror::Class>, kWithReadBarrier>(
- &class_roots_);
+ DCHECK(!class_roots_.IsNull());
+ mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read();
mirror::Class* klass = class_roots->Get(class_root);
DCHECK(klass != NULL);
return klass;
@@ -216,7 +209,7 @@
inline mirror::DexCache* ClassLinker::GetDexCache(size_t idx) {
dex_lock_.AssertSharedHeld(Thread::Current());
DCHECK(idx < dex_caches_.size());
- return ReadBarrier::BarrierForRoot<mirror::DexCache, kWithReadBarrier>(&dex_caches_[idx]);
+ return dex_caches_[idx].Read();
}
} // namespace art
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 2c11f8b..3b4976f 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -16,9 +16,6 @@
#include "class_linker.h"
-#include <fcntl.h>
-#include <sys/file.h>
-#include <sys/stat.h>
#include <deque>
#include <memory>
#include <string>
@@ -34,6 +31,7 @@
#include "compiler_callbacks.h"
#include "debugger.h"
#include "dex_file-inl.h"
+#include "gc_root-inl.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/accounting/heap_bitmap.h"
#include "gc/heap.h"
@@ -267,9 +265,10 @@
java_lang_ref_Reference->SetStatus(mirror::Class::kStatusResolved, self);
// Create storage for root classes, save away our work so far (requires descriptors).
- class_roots_ = mirror::ObjectArray<mirror::Class>::Alloc(self, object_array_class.Get(),
- kClassRootsMax);
- CHECK(class_roots_ != NULL);
+ class_roots_ = GcRoot<mirror::ObjectArray<mirror::Class> >(
+ mirror::ObjectArray<mirror::Class>::Alloc(self, object_array_class.Get(),
+ kClassRootsMax));
+ CHECK(!class_roots_.IsNull());
SetClassRoot(kJavaLangClass, java_lang_Class.Get());
SetClassRoot(kJavaLangObject, java_lang_Object.Get());
SetClassRoot(kClassArrayClass, class_array_class.Get());
@@ -289,7 +288,7 @@
SetClassRoot(kPrimitiveVoid, CreatePrimitiveClass(self, Primitive::kPrimVoid));
// Create array interface entries to populate once we can load system classes.
- array_iftable_ = AllocIfTable(self, 2);
+ array_iftable_ = GcRoot<mirror::IfTable>(AllocIfTable(self, 2));
// Create int array type for AllocDexCache (done in AppendToBootClassPath).
Handle<mirror::Class> int_array_class(hs.NewHandle(
@@ -428,8 +427,7 @@
// We assume that Cloneable/Serializable don't have superinterfaces -- normally we'd have to
// crawl up and explicitly list all of the supers as well.
{
- mirror::IfTable* array_iftable =
- ReadBarrier::BarrierForRoot<mirror::IfTable, kWithReadBarrier>(&array_iftable_);
+ mirror::IfTable* array_iftable = array_iftable_.Read();
array_iftable->SetInterface(0, java_lang_Cloneable);
array_iftable->SetInterface(1, java_io_Serializable);
}
@@ -559,7 +557,7 @@
// if possible add new checks there to catch errors early
}
- CHECK(array_iftable_ != NULL);
+ CHECK(!array_iftable_.IsNull());
// disable the slow paths in FindClass and CreatePrimitiveClass now
// that Object, Class, and Object[] are setup
@@ -614,6 +612,14 @@
argv.push_back("--compiler-filter=verify-none");
}
+ if (Runtime::Current()->MustRelocateIfPossible()) {
+ argv.push_back("--runtime-arg");
+ argv.push_back("-Xrelocate");
+ } else {
+ argv.push_back("--runtime-arg");
+ argv.push_back("-Xnorelocate");
+ }
+
if (!kIsTargetBuild) {
argv.push_back("--host");
}
@@ -679,14 +685,6 @@
return NULL;
}
-static std::string GetMultiDexClassesDexName(size_t number, const char* dex_location) {
- if (number == 0) {
- return dex_location;
- } else {
- return StringPrintf("%s" kMultiDexSeparatorString "classes%zu.dex", dex_location, number + 1);
- }
-}
-
static bool LoadMultiDexFilesFromOatFile(const OatFile* oat_file, const char* dex_location,
bool generated,
std::vector<std::string>* error_msgs,
@@ -699,7 +697,7 @@
bool success = true;
for (size_t i = 0; success; ++i) {
- std::string next_name_str = GetMultiDexClassesDexName(i, dex_location);
+ std::string next_name_str = DexFile::GetMultiDexClassesDexName(i, dex_location);
const char* next_name = next_name_str.c_str();
uint32_t dex_location_checksum;
@@ -814,6 +812,7 @@
return false;
}
+ // TODO Caller specifically asks for this oat_location. We should honor it. Probably?
open_oat_file.reset(FindOatFileInOatLocationForDexFile(dex_location, dex_location_checksum,
oat_location, &error_msg));
@@ -833,8 +832,6 @@
// There's no point in going forward and eventually try to regenerate the
// file if we couldn't remove the obsolete one. Mostly likely we will fail
// with the same error when trying to write the new file.
- // In case the clean up failure is due to permission issues it's *mandatory*
- // to stop to avoid regenerating under the wrong user.
// TODO: should we maybe do this only when we get permission issues? (i.e. EACCESS).
if (obsolete_file_cleanup_failed) {
return false;
@@ -940,6 +937,13 @@
actual_image_oat_offset);
return nullptr;
}
+ int32_t expected_patch_delta = image_header.GetPatchDelta();
+ int32_t actual_patch_delta = oat_file->GetOatHeader().GetImagePatchDelta();
+ if (expected_patch_delta != actual_patch_delta) {
+ *error_msg = StringPrintf("Failed to find oat file at '%s' with expected patch delta %d, "
+ " found %d", oat_location, expected_patch_delta, actual_patch_delta);
+ return nullptr;
+ }
const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_location,
&dex_location_checksum);
if (oat_dex_file == nullptr) {
@@ -987,11 +991,25 @@
return oat_file.release();
}
-bool ClassLinker::VerifyOatFileChecksums(const OatFile* oat_file,
- const char* dex_location,
- uint32_t dex_location_checksum,
- const InstructionSet instruction_set,
- std::string* error_msg) {
+bool ClassLinker::VerifyOatImageChecksum(const OatFile* oat_file,
+ const InstructionSet instruction_set) {
+ Runtime* runtime = Runtime::Current();
+ const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
+ uint32_t image_oat_checksum = 0;
+ if (instruction_set == kRuntimeISA) {
+ const ImageHeader& image_header = image_space->GetImageHeader();
+ image_oat_checksum = image_header.GetOatChecksum();
+ } else {
+ std::unique_ptr<ImageHeader> image_header(gc::space::ImageSpace::ReadImageHeaderOrDie(
+ image_space->GetImageLocation().c_str(), instruction_set));
+ image_oat_checksum = image_header->GetOatChecksum();
+ }
+ return oat_file->GetOatHeader().GetImageFileLocationOatChecksum() == image_oat_checksum;
+}
+
+bool ClassLinker::VerifyOatChecksums(const OatFile* oat_file,
+ const InstructionSet instruction_set,
+ std::string* error_msg) {
Runtime* runtime = Runtime::Current();
const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
@@ -1000,19 +1018,42 @@
// image header from the image for the right instruction set.
uint32_t image_oat_checksum = 0;
uintptr_t image_oat_data_begin = 0;
- if (instruction_set == kRuntimeISA) {
+ int32_t image_patch_delta = 0;
+ if (instruction_set == Runtime::Current()->GetInstructionSet()) {
const ImageHeader& image_header = image_space->GetImageHeader();
image_oat_checksum = image_header.GetOatChecksum();
image_oat_data_begin = reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin());
+ image_patch_delta = image_header.GetPatchDelta();
} else {
std::unique_ptr<ImageHeader> image_header(gc::space::ImageSpace::ReadImageHeaderOrDie(
image_space->GetImageLocation().c_str(), instruction_set));
image_oat_checksum = image_header->GetOatChecksum();
image_oat_data_begin = reinterpret_cast<uintptr_t>(image_header->GetOatDataBegin());
+ image_patch_delta = image_header->GetPatchDelta();
}
const OatHeader& oat_header = oat_file->GetOatHeader();
- bool image_check = ((oat_header.GetImageFileLocationOatChecksum() == image_oat_checksum)
- && (oat_header.GetImageFileLocationOatDataBegin() == image_oat_data_begin));
+ bool ret = ((oat_header.GetImageFileLocationOatChecksum() == image_oat_checksum)
+ && (oat_header.GetImagePatchDelta() == image_patch_delta)
+ && (oat_header.GetImageFileLocationOatDataBegin() == image_oat_data_begin));
+ if (!ret) {
+ *error_msg = StringPrintf("oat file '%s' mismatch (0x%x, %d, %d) with (0x%x, %" PRIdPTR ", %d)",
+ oat_file->GetLocation().c_str(),
+ oat_file->GetOatHeader().GetImageFileLocationOatChecksum(),
+ oat_file->GetOatHeader().GetImageFileLocationOatDataBegin(),
+ oat_file->GetOatHeader().GetImagePatchDelta(),
+ image_oat_checksum, image_oat_data_begin, image_patch_delta);
+ }
+ return ret;
+}
+
+bool ClassLinker::VerifyOatAndDexFileChecksums(const OatFile* oat_file,
+ const char* dex_location,
+ uint32_t dex_location_checksum,
+ const InstructionSet instruction_set,
+ std::string* error_msg) {
+ if (!VerifyOatChecksums(oat_file, instruction_set, error_msg)) {
+ return false;
+ }
const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_location,
&dex_location_checksum);
@@ -1028,39 +1069,22 @@
}
return false;
}
- bool dex_check = dex_location_checksum == oat_dex_file->GetDexFileLocationChecksum();
- if (image_check && dex_check) {
- return true;
- }
-
- if (!image_check) {
- ScopedObjectAccess soa(Thread::Current());
- *error_msg = StringPrintf("oat file '%s' mismatch (0x%x, %d) with (0x%x, %" PRIdPTR ")",
- oat_file->GetLocation().c_str(),
- oat_file->GetOatHeader().GetImageFileLocationOatChecksum(),
- oat_file->GetOatHeader().GetImageFileLocationOatDataBegin(),
- image_oat_checksum, image_oat_data_begin);
- }
- if (!dex_check) {
+ if (dex_location_checksum != oat_dex_file->GetDexFileLocationChecksum()) {
*error_msg = StringPrintf("oat file '%s' mismatch (0x%x) with '%s' (0x%x)",
oat_file->GetLocation().c_str(),
oat_dex_file->GetDexFileLocationChecksum(),
dex_location, dex_location_checksum);
+ return false;
}
- return false;
+ return true;
}
-const OatFile* ClassLinker::LoadOatFileAndVerifyDexFile(const std::string& oat_file_location,
- const char* dex_location,
- std::string* error_msg,
- bool* open_failed) {
- std::unique_ptr<const OatFile> oat_file(FindOatFileFromOatLocation(oat_file_location, error_msg));
- if (oat_file.get() == nullptr) {
- *open_failed = true;
- return nullptr;
- }
- *open_failed = false;
+bool ClassLinker::VerifyOatWithDexFile(const OatFile* oat_file,
+ const char* dex_location,
+ std::string* error_msg) {
+ CHECK(oat_file != nullptr);
+ CHECK(dex_location != nullptr);
std::unique_ptr<const DexFile> dex_file;
uint32_t dex_location_checksum;
if (!DexFile::GetChecksum(dex_location, &dex_location_checksum, error_msg)) {
@@ -1070,26 +1094,21 @@
const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_location, NULL);
if (oat_dex_file == nullptr) {
*error_msg = StringPrintf("Dex checksum mismatch for location '%s' and failed to find oat "
- "dex file '%s': %s", oat_file_location.c_str(), dex_location,
+ "dex file '%s': %s", oat_file->GetLocation().c_str(), dex_location,
error_msg->c_str());
- return nullptr;
+ return false;
}
dex_file.reset(oat_dex_file->OpenDexFile(error_msg));
} else {
- bool verified = VerifyOatFileChecksums(oat_file.get(), dex_location, dex_location_checksum,
- kRuntimeISA, error_msg);
+ bool verified = VerifyOatAndDexFileChecksums(oat_file, dex_location, dex_location_checksum,
+ kRuntimeISA, error_msg);
if (!verified) {
- return nullptr;
+ return false;
}
dex_file.reset(oat_file->GetOatDexFile(dex_location,
&dex_location_checksum)->OpenDexFile(error_msg));
}
-
- if (dex_file.get() != nullptr) {
- return oat_file.release();
- } else {
- return nullptr;
- }
+ return dex_file.get() != nullptr;
}
const OatFile* ClassLinker::FindOatFileContainingDexFileFromDexLocation(
@@ -1099,51 +1118,25 @@
std::vector<std::string>* error_msgs,
bool* obsolete_file_cleanup_failed) {
*obsolete_file_cleanup_failed = false;
- // Look for an existing file next to dex. for example, for
- // /foo/bar/baz.jar, look for /foo/bar/<isa>/baz.odex.
- std::string odex_filename(DexFilenameToOdexFilename(dex_location, isa));
- bool open_failed;
+ bool already_opened = false;
+ std::string dex_location_str(dex_location);
+ std::unique_ptr<const OatFile> oat_file(OpenOatFileFromDexLocation(dex_location_str, isa,
+ &already_opened,
+ obsolete_file_cleanup_failed,
+ error_msgs));
std::string error_msg;
- const OatFile* oat_file = LoadOatFileAndVerifyDexFile(odex_filename, dex_location, &error_msg,
- &open_failed);
- if (oat_file != nullptr) {
- return oat_file;
- }
- if (dex_location_checksum == nullptr) {
- error_msgs->push_back(StringPrintf("Failed to open oat file from %s and no classes.dex found in"
- "%s: %s", odex_filename.c_str(), dex_location,
+ if (oat_file.get() == nullptr) {
+ error_msgs->push_back(StringPrintf("Failed to open oat file from dex location '%s'",
+ dex_location));
+ return nullptr;
+ } else if (!VerifyOatWithDexFile(oat_file.get(), dex_location, &error_msg)) {
+ error_msgs->push_back(StringPrintf("Failed to verify oat file '%s' found for dex location "
+ "'%s': %s", oat_file->GetLocation().c_str(), dex_location,
error_msg.c_str()));
return nullptr;
+ } else {
+ return oat_file.release();
}
-
- std::string cache_error_msg;
- const std::string dalvik_cache(GetDalvikCacheOrDie(GetInstructionSetString(kRuntimeISA)));
- std::string cache_location(GetDalvikCacheFilenameOrDie(dex_location,
- dalvik_cache.c_str()));
- oat_file = LoadOatFileAndVerifyDexFile(cache_location, dex_location, &cache_error_msg,
- &open_failed);
- if (oat_file != nullptr) {
- return oat_file;
- }
-
- if (!open_failed && TEMP_FAILURE_RETRY(unlink(cache_location.c_str())) != 0) {
- std::string error_msg = StringPrintf("Failed to remove obsolete file from %s when searching"
- "for dex file %s: %s",
- cache_location.c_str(), dex_location, strerror(errno));
- error_msgs->push_back(error_msg);
- VLOG(class_linker) << error_msg;
- // Let the caller know that we couldn't remove the obsolete file.
- // This is a good indication that further writes may fail as well.
- *obsolete_file_cleanup_failed = true;
- }
-
- std::string compound_msg = StringPrintf("Failed to open oat file from %s (error '%s') or %s "
- "(error '%s').", odex_filename.c_str(), error_msg.c_str(),
- cache_location.c_str(), cache_error_msg.c_str());
- VLOG(class_linker) << compound_msg;
- error_msgs->push_back(compound_msg);
-
- return nullptr;
}
const OatFile* ClassLinker::FindOpenedOatFileFromOatLocation(const std::string& oat_location) {
@@ -1158,6 +1151,277 @@
return nullptr;
}
+const OatFile* ClassLinker::OpenOatFileFromDexLocation(const std::string& dex_location,
+ InstructionSet isa,
+ bool *already_opened,
+ bool *obsolete_file_cleanup_failed,
+ std::vector<std::string>* error_msgs) {
+ // Find out if we've already opened the file
+ const OatFile* ret = nullptr;
+ std::string odex_filename(DexFilenameToOdexFilename(dex_location, isa));
+ ret = FindOpenedOatFileFromOatLocation(odex_filename);
+ if (ret != nullptr) {
+ *already_opened = true;
+ return ret;
+ }
+
+ std::string dalvik_cache;
+ bool have_android_data = false;
+ bool have_dalvik_cache = false;
+ GetDalvikCache(GetInstructionSetString(kRuntimeISA), false, &dalvik_cache,
+ &have_android_data, &have_dalvik_cache);
+ std::string cache_filename;
+ if (have_dalvik_cache) {
+ cache_filename = GetDalvikCacheFilenameOrDie(dex_location.c_str(), dalvik_cache.c_str());
+ ret = FindOpenedOatFileFromOatLocation(cache_filename);
+ if (ret != nullptr) {
+ *already_opened = true;
+ return ret;
+ }
+ } else {
+ // If we need to relocate we should just place odex back where it started.
+ cache_filename = odex_filename;
+ }
+
+ ret = nullptr;
+
+ // We know that neither the odex nor the cache'd version is already in use, if it even exists.
+ //
+ // Now we do the following:
+ // 1) Try and open the odex version
+ // 2) If present, checksum-verified & relocated correctly return it
+ // 3) Close the odex version to free up its address space.
+ // 4) Try and open the cache version
+ // 5) If present, checksum-verified & relocated correctly return it
+ // 6) Close the cache version to free up its address space.
+ // 7) If we should relocate:
+ // a) If we have opened and checksum-verified the odex version relocate it to
+ // 'cache_filename' and return it
+ // b) If we have opened and checksum-verified the cache version relocate it in place and return
+ // it. This should not happen often (I think only the run-test's will hit this case).
+ // 8) If the cache-version was present we should delete it since it must be obsolete if we get to
+ // this point.
+ // 9) Return nullptr
+
+ *already_opened = false;
+ const Runtime* runtime = Runtime::Current();
+ CHECK(runtime != nullptr);
+ bool executable = !runtime->IsCompiler();
+
+ std::string odex_error_msg;
+ bool should_patch_system = false;
+ bool odex_checksum_verified = false;
+ {
+ // There is a high probability that these both these oat files map similar/the same address
+ // spaces so we must scope them like this so they each gets its turn.
+ std::unique_ptr<OatFile> odex_oat_file(OatFile::Open(odex_filename, odex_filename, NULL,
+ executable, &odex_error_msg));
+ if (odex_oat_file.get() != nullptr && CheckOatFile(odex_oat_file.get(), isa,
+ &odex_checksum_verified,
+ &odex_error_msg)) {
+ error_msgs->push_back(odex_error_msg);
+ return odex_oat_file.release();
+ } else if (odex_checksum_verified) {
+ // We can just relocate
+ should_patch_system = true;
+ odex_error_msg = "Image Patches are incorrect";
+ }
+ }
+
+
+ std::string cache_error_msg;
+ bool should_patch_cache = false;
+ bool cache_checksum_verified = false;
+ if (have_dalvik_cache) {
+ std::unique_ptr<OatFile> cache_oat_file(OatFile::Open(cache_filename, cache_filename, NULL,
+ executable, &cache_error_msg));
+ if (cache_oat_file.get() != nullptr && CheckOatFile(cache_oat_file.get(), isa,
+ &cache_checksum_verified,
+ &cache_error_msg)) {
+ error_msgs->push_back(cache_error_msg);
+ return cache_oat_file.release();
+ } else if (cache_checksum_verified) {
+ // We can just relocate
+ should_patch_cache = true;
+ cache_error_msg = "Image Patches are incorrect";
+ }
+ } else if (have_android_data) {
+ // dalvik_cache does not exist but android data does. This means we should be able to create
+ // it, so we should try.
+ GetDalvikCacheOrDie(GetInstructionSetString(kRuntimeISA), true);
+ }
+
+ ret = nullptr;
+ std::string error_msg;
+ if (runtime->CanRelocate()) {
+ // Run relocation
+ const std::string& image_location =
+ Runtime::Current()->GetHeap()->GetImageSpace()->GetImageLocation();
+ if (odex_checksum_verified && should_patch_system) {
+ ret = PatchAndRetrieveOat(odex_filename, cache_filename, image_location, isa, &error_msg);
+ } else if (cache_checksum_verified && should_patch_cache) {
+ CHECK(have_dalvik_cache);
+ ret = PatchAndRetrieveOat(cache_filename, cache_filename, image_location, isa, &error_msg);
+ }
+ }
+ if (ret == nullptr && have_dalvik_cache && OS::FileExists(cache_filename.c_str())) {
+ // implicitly: were able to fine where the cached version is but we were unable to use it,
+ // either as a destination for relocation or to open a file. We should delete it if it is
+ // there.
+ if (TEMP_FAILURE_RETRY(unlink(cache_filename.c_str())) != 0) {
+ std::string rm_error_msg = StringPrintf("Failed to remove obsolete file from %s when "
+ "searching for dex file %s: %s",
+ cache_filename.c_str(), dex_location.c_str(),
+ strerror(errno));
+ error_msgs->push_back(rm_error_msg);
+ VLOG(class_linker) << rm_error_msg;
+ // Let the caller know that we couldn't remove the obsolete file.
+ // This is a good indication that further writes may fail as well.
+ *obsolete_file_cleanup_failed = true;
+ }
+ }
+ if (ret == nullptr) {
+ VLOG(class_linker) << error_msg;
+ error_msgs->push_back(error_msg);
+ std::string relocation_msg;
+ if (runtime->CanRelocate()) {
+ relocation_msg = StringPrintf(" and relocation failed");
+ }
+ if (have_dalvik_cache && cache_checksum_verified) {
+ error_msg = StringPrintf("Failed to open oat file from %s (error %s) or %s "
+ "(error %s)%s.", odex_filename.c_str(), odex_error_msg.c_str(),
+ cache_filename.c_str(), cache_error_msg.c_str(),
+ relocation_msg.c_str());
+ } else {
+ error_msg = StringPrintf("Failed to open oat file from %s (error %s) (no "
+ "dalvik_cache availible)%s.", odex_filename.c_str(),
+ odex_error_msg.c_str(), relocation_msg.c_str());
+ }
+ VLOG(class_linker) << error_msg;
+ error_msgs->push_back(error_msg);
+ }
+ return ret;
+}
+
+const OatFile* ClassLinker::PatchAndRetrieveOat(const std::string& input_oat,
+ const std::string& output_oat,
+ const std::string& image_location,
+ InstructionSet isa,
+ std::string* error_msg) {
+ Locks::mutator_lock_->AssertNotHeld(Thread::Current()); // Avoid starving GC.
+ std::string patchoat(Runtime::Current()->GetPatchoatExecutable());
+
+ std::string isa_arg("--instruction-set=");
+ isa_arg += GetInstructionSetString(isa);
+ std::string input_oat_filename_arg("--input-oat-file=");
+ input_oat_filename_arg += input_oat;
+ std::string output_oat_filename_arg("--output-oat-file=");
+ output_oat_filename_arg += output_oat;
+ std::string patched_image_arg("--patched-image-location=");
+ patched_image_arg += image_location;
+
+ std::vector<std::string> argv;
+ argv.push_back(patchoat);
+ argv.push_back(isa_arg);
+ argv.push_back(input_oat_filename_arg);
+ argv.push_back(output_oat_filename_arg);
+ argv.push_back(patched_image_arg);
+
+ std::string command_line(Join(argv, ' '));
+ LOG(INFO) << "Relocate Oat File: " << command_line;
+ bool success = Exec(argv, error_msg);
+ if (success) {
+ std::unique_ptr<OatFile> output(OatFile::Open(output_oat, output_oat, NULL,
+ !Runtime::Current()->IsCompiler(), error_msg));
+ bool checksum_verified = false;
+ if (output.get() != nullptr && CheckOatFile(output.get(), isa, &checksum_verified, error_msg)) {
+ return output.release();
+ } else if (output.get() != nullptr) {
+ *error_msg = StringPrintf("Patching of oat file '%s' succeeded "
+ "but output file '%s' failed verifcation: %s",
+ input_oat.c_str(), output_oat.c_str(), error_msg->c_str());
+ } else {
+ *error_msg = StringPrintf("Patching of oat file '%s' succeeded "
+ "but was unable to open output file '%s': %s",
+ input_oat.c_str(), output_oat.c_str(), error_msg->c_str());
+ }
+ } else {
+ *error_msg = StringPrintf("Patching of oat file '%s to '%s' "
+ "failed: %s", input_oat.c_str(), output_oat.c_str(),
+ error_msg->c_str());
+ }
+ return nullptr;
+}
+
+int32_t ClassLinker::GetRequiredDelta(const OatFile* oat_file, InstructionSet isa) {
+ Runtime* runtime = Runtime::Current();
+ int32_t real_patch_delta;
+ const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
+ if (isa == Runtime::Current()->GetInstructionSet()) {
+ const ImageHeader& image_header = image_space->GetImageHeader();
+ real_patch_delta = image_header.GetPatchDelta();
+ } else {
+ std::unique_ptr<ImageHeader> image_header(gc::space::ImageSpace::ReadImageHeaderOrDie(
+ image_space->GetImageLocation().c_str(), isa));
+ real_patch_delta = image_header->GetPatchDelta();
+ }
+ const OatHeader& oat_header = oat_file->GetOatHeader();
+ return real_patch_delta - oat_header.GetImagePatchDelta();
+}
+
+bool ClassLinker::CheckOatFile(const OatFile* oat_file, InstructionSet isa,
+ bool* checksum_verified,
+ std::string* error_msg) {
+ std::string compound_msg("Oat file failed to verify: ");
+ Runtime* runtime = Runtime::Current();
+ uint32_t real_image_checksum;
+ void* real_image_oat_offset;
+ int32_t real_patch_delta;
+ const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
+ if (isa == Runtime::Current()->GetInstructionSet()) {
+ const ImageHeader& image_header = image_space->GetImageHeader();
+ real_image_checksum = image_header.GetOatChecksum();
+ real_image_oat_offset = image_header.GetOatDataBegin();
+ real_patch_delta = image_header.GetPatchDelta();
+ } else {
+ std::unique_ptr<ImageHeader> image_header(gc::space::ImageSpace::ReadImageHeaderOrDie(
+ image_space->GetImageLocation().c_str(), isa));
+ real_image_checksum = image_header->GetOatChecksum();
+ real_image_oat_offset = image_header->GetOatDataBegin();
+ real_patch_delta = image_header->GetPatchDelta();
+ }
+
+ const OatHeader& oat_header = oat_file->GetOatHeader();
+
+ uint32_t oat_image_checksum = oat_header.GetImageFileLocationOatChecksum();
+ *checksum_verified = oat_image_checksum == real_image_checksum;
+ if (!*checksum_verified) {
+ compound_msg += StringPrintf(" Oat Image Checksum Incorrect (expected 0x%x, recieved 0x%x)",
+ real_image_checksum, oat_image_checksum);
+ }
+
+ void* oat_image_oat_offset =
+ reinterpret_cast<void*>(oat_header.GetImageFileLocationOatDataBegin());
+ bool offset_verified = oat_image_oat_offset == real_image_oat_offset;
+ if (!offset_verified) {
+ compound_msg += StringPrintf(" Oat Image oat offset incorrect (expected 0x%p, recieved 0x%p)",
+ real_image_oat_offset, oat_image_oat_offset);
+ }
+
+ int32_t oat_patch_delta = oat_header.GetImagePatchDelta();
+ bool patch_delta_verified = oat_patch_delta == real_patch_delta;
+ if (!patch_delta_verified) {
+ compound_msg += StringPrintf(" Oat image patch delta incorrect (expected 0x%x, recieved 0x%x)",
+ real_patch_delta, oat_patch_delta);
+ }
+
+ bool ret = (*checksum_verified && offset_verified && patch_delta_verified);
+ if (ret) {
+ *error_msg = compound_msg;
+ }
+ return ret;
+}
+
const OatFile* ClassLinker::FindOatFileFromOatLocation(const std::string& oat_location,
std::string* error_msg) {
const OatFile* oat_file = FindOpenedOatFileFromOatLocation(oat_location);
@@ -1221,7 +1485,7 @@
Handle<mirror::ObjectArray<mirror::Class>> class_roots(hs.NewHandle(
space->GetImageHeader().GetImageRoot(ImageHeader::kClassRoots)->
AsObjectArray<mirror::Class>()));
- class_roots_ = class_roots.Get();
+ class_roots_ = GcRoot<mirror::ObjectArray<mirror::Class>>(class_roots.Get());
// Special case of setting up the String class early so that we can test arbitrary objects
// as being Strings or not
@@ -1261,11 +1525,11 @@
// reinit class_roots_
mirror::Class::SetClassClass(class_roots->Get(kJavaLangClass));
- class_roots_ = class_roots.Get();
+ class_roots_ = GcRoot<mirror::ObjectArray<mirror::Class>>(class_roots.Get());
// reinit array_iftable_ from any array class instance, they should be ==
- array_iftable_ = GetClassRoot(kObjectArrayClass)->GetIfTable();
- DCHECK(array_iftable_ == GetClassRoot(kBooleanArrayClass)->GetIfTable());
+ array_iftable_ = GcRoot<mirror::IfTable>(GetClassRoot(kObjectArrayClass)->GetIfTable());
+ DCHECK(array_iftable_.Read() == GetClassRoot(kBooleanArrayClass)->GetIfTable());
// String class root was set above
mirror::Reference::SetClass(GetClassRoot(kJavaLangRefReference));
mirror::ArtField::SetClass(GetClassRoot(kJavaLangReflectArtField));
@@ -1288,22 +1552,23 @@
void ClassLinker::VisitClassRoots(RootCallback* callback, void* arg, VisitRootFlags flags) {
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
if ((flags & kVisitRootFlagAllRoots) != 0) {
- for (std::pair<const size_t, mirror::Class*>& it : class_table_) {
- callback(reinterpret_cast<mirror::Object**>(&it.second), arg, 0, kRootStickyClass);
+ for (std::pair<const size_t, GcRoot<mirror::Class> >& it : class_table_) {
+ it.second.VisitRoot(callback, arg, 0, kRootStickyClass);
}
} else if ((flags & kVisitRootFlagNewRoots) != 0) {
for (auto& pair : new_class_roots_) {
- mirror::Object* old_ref = pair.second;
- callback(reinterpret_cast<mirror::Object**>(&pair.second), arg, 0, kRootStickyClass);
- if (UNLIKELY(pair.second != old_ref)) {
+ mirror::Class* old_ref = pair.second.Read<kWithoutReadBarrier>();
+ pair.second.VisitRoot(callback, arg, 0, kRootStickyClass);
+ mirror::Class* new_ref = pair.second.Read<kWithoutReadBarrier>();
+ if (UNLIKELY(new_ref != old_ref)) {
// Uh ohes, GC moved a root in the log. Need to search the class_table and update the
// corresponding object. This is slow, but luckily for us, this may only happen with a
// concurrent moving GC.
for (auto it = class_table_.lower_bound(pair.first), end = class_table_.end();
it != end && it->first == pair.first; ++it) {
// If the class stored matches the old class, update it to the new value.
- if (old_ref == it->second) {
- it->second = pair.second;
+ if (old_ref == it->second.Read<kWithoutReadBarrier>()) {
+ it->second = GcRoot<mirror::Class>(new_ref);
}
}
}
@@ -1325,17 +1590,17 @@
// reinit references to when reinitializing a ClassLinker from a
// mapped image.
void ClassLinker::VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags) {
- callback(reinterpret_cast<mirror::Object**>(&class_roots_), arg, 0, kRootVMInternal);
+ class_roots_.VisitRoot(callback, arg, 0, kRootVMInternal);
Thread* self = Thread::Current();
{
ReaderMutexLock mu(self, dex_lock_);
if ((flags & kVisitRootFlagAllRoots) != 0) {
- for (mirror::DexCache*& dex_cache : dex_caches_) {
- callback(reinterpret_cast<mirror::Object**>(&dex_cache), arg, 0, kRootVMInternal);
+ for (GcRoot<mirror::DexCache>& dex_cache : dex_caches_) {
+ dex_cache.VisitRoot(callback, arg, 0, kRootVMInternal);
}
} else if ((flags & kVisitRootFlagNewRoots) != 0) {
for (size_t index : new_dex_cache_roots_) {
- callback(reinterpret_cast<mirror::Object**>(&dex_caches_[index]), arg, 0, kRootVMInternal);
+ dex_caches_[index].VisitRoot(callback, arg, 0, kRootVMInternal);
}
}
if ((flags & kVisitRootFlagClearRootLog) != 0) {
@@ -1348,12 +1613,11 @@
}
}
VisitClassRoots(callback, arg, flags);
- callback(reinterpret_cast<mirror::Object**>(&array_iftable_), arg, 0, kRootVMInternal);
- DCHECK(array_iftable_ != nullptr);
+ array_iftable_.VisitRoot(callback, arg, 0, kRootVMInternal);
+ DCHECK(!array_iftable_.IsNull());
for (size_t i = 0; i < kFindArrayCacheSize; ++i) {
- if (find_array_class_cache_[i] != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&find_array_class_cache_[i]), arg, 0,
- kRootVMInternal);
+ if (!find_array_class_cache_[i].IsNull()) {
+ find_array_class_cache_[i].VisitRoot(callback, arg, 0, kRootVMInternal);
}
}
}
@@ -1363,9 +1627,8 @@
MoveImageClassesToClassTable();
}
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- for (std::pair<const size_t, mirror::Class*>& it : class_table_) {
- mirror::Class** root = &it.second;
- mirror::Class* c = ReadBarrier::BarrierForRoot<mirror::Class, kWithReadBarrier>(root);
+ for (std::pair<const size_t, GcRoot<mirror::Class> >& it : class_table_) {
+ mirror::Class* c = it.second.Read();
if (!visitor(c, arg)) {
return;
}
@@ -1660,23 +1923,26 @@
// size when the class becomes resolved.
klass.Assign(AllocClass(self, SizeOfClassWithoutEmbeddedTables(dex_file, dex_class_def)));
}
- if (UNLIKELY(klass.Get() == NULL)) {
+ if (UNLIKELY(klass.Get() == nullptr)) {
CHECK(self->IsExceptionPending()); // Expect an OOME.
- return NULL;
+ return nullptr;
}
klass->SetDexCache(FindDexCache(dex_file));
LoadClass(dex_file, dex_class_def, klass, class_loader.Get());
- // Check for a pending exception during load
- if (self->IsExceptionPending()) {
- klass->SetStatus(mirror::Class::kStatusError, self);
- return NULL;
- }
ObjectLock<mirror::Class> lock(self, klass);
+ if (self->IsExceptionPending()) {
+ // An exception occured during load, set status to erroneous while holding klass' lock in case
+ // notification is necessary.
+ if (!klass->IsErroneous()) {
+ klass->SetStatus(mirror::Class::kStatusError, self);
+ }
+ return nullptr;
+ }
klass->SetClinitThreadId(self->GetTid());
// Add the newly loaded class to the loaded classes table.
mirror::Class* existing = InsertClass(descriptor, klass.Get(), Hash(descriptor));
- if (existing != NULL) {
+ if (existing != nullptr) {
// We failed to insert because we raced with another thread. Calling EnsureResolved may cause
// this thread to block.
return EnsureResolved(self, descriptor, existing);
@@ -1686,8 +1952,10 @@
CHECK(!klass->IsLoaded());
if (!LoadSuperAndInterfaces(klass, dex_file)) {
// Loading failed.
- klass->SetStatus(mirror::Class::kStatusError, self);
- return NULL;
+ if (!klass->IsErroneous()) {
+ klass->SetStatus(mirror::Class::kStatusError, self);
+ }
+ return nullptr;
}
CHECK(klass->IsLoaded());
// Link the class (if necessary)
@@ -1698,8 +1966,10 @@
mirror::Class* new_class = nullptr;
if (!LinkClass(self, descriptor, klass, interfaces, &new_class)) {
// Linking failed.
- klass->SetStatus(mirror::Class::kStatusError, self);
- return NULL;
+ if (!klass->IsErroneous()) {
+ klass->SetStatus(mirror::Class::kStatusError, self);
+ }
+ return nullptr;
}
CHECK(new_class != nullptr) << descriptor;
CHECK(new_class->IsResolved()) << descriptor;
@@ -2284,7 +2554,7 @@
CHECK(dex_cache.Get() != NULL) << dex_file.GetLocation();
CHECK(dex_cache->GetLocation()->Equals(dex_file.GetLocation()))
<< dex_cache->GetLocation()->ToModifiedUtf8() << " " << dex_file.GetLocation();
- dex_caches_.push_back(dex_cache.Get());
+ dex_caches_.push_back(GcRoot<mirror::DexCache>(dex_cache.Get()));
dex_cache->SetDexFile(&dex_file);
if (log_new_dex_caches_roots_) {
// TODO: This is not safe if we can remove dex caches.
@@ -2401,7 +2671,14 @@
Handle<mirror::Class> component_type(hs.NewHandle(FindClass(self, descriptor + 1, class_loader)));
if (component_type.Get() == nullptr) {
DCHECK(self->IsExceptionPending());
- return nullptr;
+ // We need to accept erroneous classes as component types.
+ component_type.Assign(LookupClass(descriptor + 1, class_loader.Get()));
+ if (component_type.Get() == nullptr) {
+ DCHECK(self->IsExceptionPending());
+ return nullptr;
+ } else {
+ self->ClearException();
+ }
}
if (UNLIKELY(component_type->IsPrimitiveVoid())) {
ThrowNoClassDefFoundError("Attempt to create array of void primitive type");
@@ -2494,8 +2771,7 @@
// Use the single, global copies of "interfaces" and "iftable"
// (remember not to free them for arrays).
{
- mirror::IfTable* array_iftable =
- ReadBarrier::BarrierForRoot<mirror::IfTable, kWithReadBarrier>(&array_iftable_);
+ mirror::IfTable* array_iftable = array_iftable_.Read();
CHECK(array_iftable != nullptr);
new_class->SetIfTable(array_iftable);
}
@@ -2579,9 +2855,9 @@
}
}
VerifyObject(klass);
- class_table_.insert(std::make_pair(hash, klass));
+ class_table_.insert(std::make_pair(hash, GcRoot<mirror::Class>(klass)));
if (log_new_class_table_roots_) {
- new_class_roots_.push_back(std::make_pair(hash, klass));
+ new_class_roots_.push_back(std::make_pair(hash, GcRoot<mirror::Class>(klass)));
}
return NULL;
}
@@ -2603,8 +2879,8 @@
for (auto it = class_table_.lower_bound(hash), end = class_table_.end(); it != end && it->first == hash;
++it) {
- mirror::Class* entry = it->second;
- if (entry == existing) {
+ mirror::Class* klass = it->second.Read();
+ if (klass == existing) {
class_table_.erase(it);
break;
}
@@ -2622,9 +2898,9 @@
}
VerifyObject(klass);
- class_table_.insert(std::make_pair(hash, klass));
+ class_table_.insert(std::make_pair(hash, GcRoot<mirror::Class>(klass)));
if (log_new_class_table_roots_) {
- new_class_roots_.push_back(std::make_pair(hash, klass));
+ new_class_roots_.push_back(std::make_pair(hash, GcRoot<mirror::Class>(klass)));
}
return existing;
@@ -2636,8 +2912,7 @@
for (auto it = class_table_.lower_bound(hash), end = class_table_.end();
it != end && it->first == hash;
++it) {
- mirror::Class** root = &it->second;
- mirror::Class* klass = ReadBarrier::BarrierForRoot<mirror::Class, kWithReadBarrier>(root);
+ mirror::Class* klass = it->second.Read();
if (klass->GetClassLoader() == class_loader && klass->DescriptorEquals(descriptor)) {
class_table_.erase(it);
return true;
@@ -2681,14 +2956,12 @@
size_t hash) {
auto end = class_table_.end();
for (auto it = class_table_.lower_bound(hash); it != end && it->first == hash; ++it) {
- mirror::Class** root = &it->second;
- mirror::Class* klass = ReadBarrier::BarrierForRoot<mirror::Class, kWithReadBarrier>(root);
+ mirror::Class* klass = it->second.Read();
if (klass->GetClassLoader() == class_loader && klass->DescriptorEquals(descriptor)) {
if (kIsDebugBuild) {
// Check for duplicates in the table.
for (++it; it != end && it->first == hash; ++it) {
- mirror::Class** root2 = &it->second;
- mirror::Class* klass2 = ReadBarrier::BarrierForRoot<mirror::Class, kWithReadBarrier>(root2);
+ mirror::Class* klass2 = it->second.Read();
CHECK(!(klass2->GetClassLoader() == class_loader &&
klass2->DescriptorEquals(descriptor)))
<< PrettyClass(klass) << " " << klass << " " << klass->GetClassLoader() << " "
@@ -2732,9 +3005,9 @@
CHECK(existing == klass) << PrettyClassAndClassLoader(existing) << " != "
<< PrettyClassAndClassLoader(klass);
} else {
- class_table_.insert(std::make_pair(hash, klass));
+ class_table_.insert(std::make_pair(hash, GcRoot<mirror::Class>(klass)));
if (log_new_class_table_roots_) {
- new_class_roots_.push_back(std::make_pair(hash, klass));
+ new_class_roots_.push_back(std::make_pair(hash, GcRoot<mirror::Class>(klass)));
}
}
}
@@ -2780,8 +3053,7 @@
ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
for (auto it = class_table_.lower_bound(hash), end = class_table_.end();
it != end && it->first == hash; ++it) {
- mirror::Class** root = &it->second;
- mirror::Class* klass = ReadBarrier::BarrierForRoot<mirror::Class, kWithReadBarrier>(root);
+ mirror::Class* klass = it->second.Read();
if (klass->DescriptorEquals(descriptor)) {
result.push_back(klass);
}
@@ -3213,22 +3485,21 @@
DCHECK(proxy_class->IsProxyClass());
DCHECK(proxy_method->IsProxyMethod());
// Locate the dex cache of the original interface/Object
- mirror::DexCache* dex_cache = NULL;
+ mirror::DexCache* dex_cache = nullptr;
{
- mirror::ObjectArray<mirror::Class>* resolved_types = proxy_method->GetDexCacheResolvedTypes();
ReaderMutexLock mu(Thread::Current(), dex_lock_);
for (size_t i = 0; i != dex_caches_.size(); ++i) {
mirror::DexCache* a_dex_cache = GetDexCache(i);
- if (a_dex_cache->GetResolvedTypes() == resolved_types) {
+ if (proxy_method->HasSameDexCacheResolvedTypes(a_dex_cache->GetResolvedTypes())) {
dex_cache = a_dex_cache;
break;
}
}
}
- CHECK(dex_cache != NULL);
+ CHECK(dex_cache != nullptr);
uint32_t method_idx = proxy_method->GetDexMethodIndex();
mirror::ArtMethod* resolved_method = dex_cache->GetResolvedMethod(method_idx);
- CHECK(resolved_method != NULL);
+ CHECK(resolved_method != nullptr);
return resolved_method;
}
@@ -3241,14 +3512,19 @@
proxy_class->GetDirectMethods();
CHECK_EQ(proxy_direct_methods->GetLength(), 16);
mirror::ArtMethod* proxy_constructor = proxy_direct_methods->Get(2);
- // Clone the existing constructor of Proxy (our constructor would just invoke it so steal its
- // code_ too)
- mirror::ArtMethod* constructor =
- down_cast<mirror::ArtMethod*>(proxy_constructor->Clone(self));
- if (constructor == NULL) {
+ mirror::ArtMethod* constructor = down_cast<mirror::ArtMethod*>(proxy_constructor->Clone(self));
+ if (constructor == nullptr) {
CHECK(self->IsExceptionPending()); // OOME.
- return NULL;
+ return nullptr;
}
+ // Make the proxy constructor's code always point to the uninstrumented code. This avoids
+ // getting a method enter event for the proxy constructor as the proxy constructor doesn't
+ // have an activation.
+ bool have_portable_code;
+ constructor->SetEntryPointFromQuickCompiledCode(GetQuickOatCodeFor(proxy_constructor));
+ constructor->SetEntryPointFromPortableCompiledCode(GetPortableOatCodeFor(proxy_constructor,
+ &have_portable_code));
+
// Make this constructor public and fix the class to be our Proxy version
constructor->SetAccessFlags((constructor->GetAccessFlags() & ~kAccProtected) | kAccPublic);
constructor->SetDeclaringClass(klass.Get());
@@ -3303,8 +3579,8 @@
// The proxy method doesn't have its own dex cache or dex file and so it steals those of its
// interface prototype. The exception to this are Constructors and the Class of the Proxy itself.
CHECK_EQ(prototype->GetDexCacheStrings(), method->GetDexCacheStrings());
- CHECK_EQ(prototype->GetDexCacheResolvedMethods(), method->GetDexCacheResolvedMethods());
- CHECK_EQ(prototype->GetDexCacheResolvedTypes(), method->GetDexCacheResolvedTypes());
+ CHECK(prototype->HasSameDexCacheResolvedMethods(method.Get()));
+ CHECK(prototype->HasSameDexCacheResolvedTypes(method.Get()));
CHECK_EQ(prototype->GetDexMethodIndex(), method->GetDexMethodIndex());
MethodHelper mh(method);
@@ -3570,9 +3846,9 @@
MethodHelper super_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
if (klass->HasSuperClass() &&
klass->GetClassLoader() != klass->GetSuperClass()->GetClassLoader()) {
- for (int i = klass->GetSuperClass()->GetVTable()->GetLength() - 1; i >= 0; --i) {
- mh.ChangeMethod(klass->GetVTable()->GetWithoutChecks(i));
- super_mh.ChangeMethod(klass->GetSuperClass()->GetVTable()->GetWithoutChecks(i));
+ for (int i = klass->GetSuperClass()->GetVTableLength() - 1; i >= 0; --i) {
+ mh.ChangeMethod(klass->GetVTableEntry(i));
+ super_mh.ChangeMethod(klass->GetSuperClass()->GetVTableEntry(i));
if (mh.GetMethod() != super_mh.GetMethod() &&
!mh.HasSameSignatureWithDifferentClassLoaders(&super_mh)) {
ThrowLinkageError(klass.Get(),
@@ -3730,10 +4006,6 @@
// This will notify waiters on new_class that saw the not yet resolved
// class in the class_table_ during EnsureResolved.
new_class_h->SetStatus(mirror::Class::kStatusResolved, self);
-
- // Only embedded imt should be used from this point.
- new_class_h->SetImTable(NULL);
- // TODO: remove vtable and only use embedded vtable.
}
return true;
}
@@ -3866,17 +4138,31 @@
bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass) {
if (klass->HasSuperClass()) {
uint32_t max_count = klass->NumVirtualMethods() +
- klass->GetSuperClass()->GetVTable()->GetLength();
- size_t actual_count = klass->GetSuperClass()->GetVTable()->GetLength();
+ klass->GetSuperClass()->GetVTableLength();
+ size_t actual_count = klass->GetSuperClass()->GetVTableLength();
CHECK_LE(actual_count, max_count);
- // TODO: do not assign to the vtable field until it is fully constructed.
StackHandleScope<3> hs(self);
- Handle<mirror::ObjectArray<mirror::ArtMethod>> vtable(
- hs.NewHandle(klass->GetSuperClass()->GetVTable()->CopyOf(self, max_count)));
- if (UNLIKELY(vtable.Get() == NULL)) {
- CHECK(self->IsExceptionPending()); // OOME.
- return false;
+ Handle<mirror::ObjectArray<mirror::ArtMethod>> vtable;
+ mirror::Class* super_class = klass->GetSuperClass();
+ if (super_class->ShouldHaveEmbeddedImtAndVTable()) {
+ vtable = hs.NewHandle(AllocArtMethodArray(self, max_count));
+ if (UNLIKELY(vtable.Get() == nullptr)) {
+ CHECK(self->IsExceptionPending()); // OOME.
+ return false;
+ }
+ int len = super_class->GetVTableLength();
+ for (int i = 0; i < len; i++) {
+ vtable->Set<false>(i, super_class->GetVTableEntry(i));
+ }
+ } else {
+ CHECK(super_class->GetVTable() != nullptr) << PrettyClass(super_class);
+ vtable = hs.NewHandle(super_class->GetVTable()->CopyOf(self, max_count));
+ if (UNLIKELY(vtable.Get() == nullptr)) {
+ CHECK(self->IsExceptionPending()); // OOME.
+ return false;
+ }
}
+
// See if any of our virtual methods override the superclass.
MethodHelper local_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
MethodHelper super_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
@@ -4662,7 +4948,7 @@
bool is_static) {
DCHECK(dex_cache.Get() != nullptr);
mirror::ArtField* resolved = dex_cache->GetResolvedField(field_idx);
- if (resolved != NULL) {
+ if (resolved != nullptr) {
return resolved;
}
const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
@@ -4670,9 +4956,9 @@
StackHandleScope<1> hs(self);
Handle<mirror::Class> klass(
hs.NewHandle(ResolveType(dex_file, field_id.class_idx_, dex_cache, class_loader)));
- if (klass.Get() == NULL) {
+ if (klass.Get() == nullptr) {
DCHECK(Thread::Current()->IsExceptionPending());
- return NULL;
+ return nullptr;
}
if (is_static) {
@@ -4681,7 +4967,7 @@
resolved = klass->FindInstanceField(dex_cache.Get(), field_idx);
}
- if (resolved == NULL) {
+ if (resolved == nullptr) {
const char* name = dex_file.GetFieldName(field_id);
const char* type = dex_file.GetFieldTypeDescriptor(field_id);
if (is_static) {
@@ -4689,7 +4975,7 @@
} else {
resolved = klass->FindInstanceField(name, type);
}
- if (resolved == NULL) {
+ if (resolved == nullptr) {
ThrowNoSuchFieldError(is_static ? "static " : "instance ", klass.Get(), type, name);
return NULL;
}
@@ -4704,7 +4990,7 @@
Handle<mirror::ClassLoader> class_loader) {
DCHECK(dex_cache.Get() != nullptr);
mirror::ArtField* resolved = dex_cache->GetResolvedField(field_idx);
- if (resolved != NULL) {
+ if (resolved != nullptr) {
return resolved;
}
const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
@@ -4747,9 +5033,8 @@
std::vector<mirror::Class*> all_classes;
{
ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- for (std::pair<const size_t, mirror::Class*>& it : class_table_) {
- mirror::Class** root = &it.second;
- mirror::Class* klass = ReadBarrier::BarrierForRoot<mirror::Class, kWithReadBarrier>(root);
+ for (std::pair<const size_t, GcRoot<mirror::Class> >& it : class_table_) {
+ mirror::Class* klass = it.second.Read();
all_classes.push_back(klass);
}
}
@@ -4789,9 +5074,7 @@
DCHECK(klass != NULL);
DCHECK(klass->GetClassLoader() == NULL);
- mirror::ObjectArray<mirror::Class>* class_roots =
- ReadBarrier::BarrierForRoot<mirror::ObjectArray<mirror::Class>, kWithReadBarrier>(
- &class_roots_);
+ mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read();
DCHECK(class_roots != NULL);
DCHECK(class_roots->Get(class_root) == NULL);
class_roots->Set<false>(class_root, klass);
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 64bffc9..8c09042 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -24,11 +24,11 @@
#include "base/macros.h"
#include "base/mutex.h"
#include "dex_file.h"
+#include "gc_root.h"
#include "gtest/gtest.h"
#include "jni.h"
#include "oat_file.h"
#include "object_callbacks.h"
-#include "read_barrier.h"
namespace art {
@@ -245,9 +245,11 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void VisitClassRoots(RootCallback* callback, void* arg, VisitRootFlags flags)
- LOCKS_EXCLUDED(Locks::classlinker_classes_lock_);
+ LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags)
- LOCKS_EXCLUDED(dex_lock_);
+ LOCKS_EXCLUDED(dex_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::DexCache* FindDexCache(const DexFile& dex_file)
LOCKS_EXCLUDED(dex_lock_)
@@ -265,10 +267,6 @@
std::string* error_msg)
LOCKS_EXCLUDED(Locks::mutator_lock_);
- const OatFile* FindOatFileFromOatLocation(const std::string& location,
- std::string* error_msg)
- LOCKS_EXCLUDED(dex_lock_);
-
// Find or create the oat file holding dex_location. Then load all corresponding dex files
// (if multidex) into the given vector.
bool OpenDexFilesFromOat(const char* dex_location, const char* oat_location,
@@ -276,12 +274,18 @@
std::vector<const DexFile*>* dex_files)
LOCKS_EXCLUDED(dex_lock_, Locks::mutator_lock_);
+ // Returns true if the given oat file has the same image checksum as the image it is paired with.
+ static bool VerifyOatImageChecksum(const OatFile* oat_file, const InstructionSet instruction_set);
+ // Returns true if the oat file checksums match with the image and the offsets are such that it
+ // could be loaded with it.
+ static bool VerifyOatChecksums(const OatFile* oat_file, const InstructionSet instruction_set,
+ std::string* error_msg);
// Returns true if oat file contains the dex file with the given location and checksum.
- static bool VerifyOatFileChecksums(const OatFile* oat_file,
- const char* dex_location,
- uint32_t dex_location_checksum,
- InstructionSet instruction_set,
- std::string* error_msg);
+ static bool VerifyOatAndDexFileChecksums(const OatFile* oat_file,
+ const char* dex_location,
+ uint32_t dex_location_checksum,
+ InstructionSet instruction_set,
+ std::string* error_msg);
// TODO: replace this with multiple methods that allocate the correct managed type.
template <class T>
@@ -382,9 +386,7 @@
mirror::ArtMethod* AllocArtMethod(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Class>* GetClassRoots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ObjectArray<mirror::Class>* class_roots =
- ReadBarrier::BarrierForRoot<mirror::ObjectArray<mirror::Class>, kWithReadBarrier>(
- &class_roots_);
+ mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read();
DCHECK(class_roots != NULL);
return class_roots;
}
@@ -546,9 +548,31 @@
const OatFile* FindOpenedOatFile(const char* oat_location, const char* dex_location,
const uint32_t* const dex_location_checksum)
LOCKS_EXCLUDED(dex_lock_);
+
+ // Will open the oat file directly without relocating, even if we could/should do relocation.
+ const OatFile* FindOatFileFromOatLocation(const std::string& oat_location,
+ std::string* error_msg)
+ LOCKS_EXCLUDED(dex_lock_);
+
const OatFile* FindOpenedOatFileFromOatLocation(const std::string& oat_location)
LOCKS_EXCLUDED(dex_lock_);
+ const OatFile* OpenOatFileFromDexLocation(const std::string& dex_location,
+ InstructionSet isa,
+ bool* already_opened,
+ bool* obsolete_file_cleanup_failed,
+ std::vector<std::string>* error_msg)
+ LOCKS_EXCLUDED(dex_lock_, Locks::mutator_lock_);
+
+ const OatFile* PatchAndRetrieveOat(const std::string& input, const std::string& output,
+ const std::string& image_location, InstructionSet isa,
+ std::string* error_msg)
+ LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+ bool CheckOatFile(const OatFile* oat_file, InstructionSet isa,
+ bool* checksum_verified, std::string* error_msg);
+ int32_t GetRequiredDelta(const OatFile* oat_file, InstructionSet isa);
+
// Note: will not register the oat file.
const OatFile* FindOatFileInOatLocationForDexFile(const char* dex_location,
uint32_t dex_location_checksum,
@@ -575,14 +599,10 @@
bool* obsolete_file_cleanup_failed)
LOCKS_EXCLUDED(dex_lock_, Locks::mutator_lock_);
- // Find a verify an oat file with the given dex file. Will return nullptr when the oat file
- // was not found or the dex file could not be verified.
- // Note: Does not register the oat file.
- const OatFile* LoadOatFileAndVerifyDexFile(const std::string& oat_file_location,
- const char* dex_location,
- std::string* error_msg,
- bool* open_failed)
- LOCKS_EXCLUDED(dex_lock_);
+ // verify an oat file with the given dex file. Will return false when the dex file could not be
+ // verified. Will return true otherwise.
+ bool VerifyOatWithDexFile(const OatFile* oat_file, const char* dex_location,
+ std::string* error_msg);
mirror::ArtMethod* CreateProxyConstructor(Thread* self, Handle<mirror::Class> klass,
mirror::Class* proxy_class)
@@ -595,18 +615,18 @@
mutable ReaderWriterMutex dex_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::vector<size_t> new_dex_cache_roots_ GUARDED_BY(dex_lock_);;
- std::vector<mirror::DexCache*> dex_caches_ GUARDED_BY(dex_lock_);
+ std::vector<GcRoot<mirror::DexCache>> dex_caches_ GUARDED_BY(dex_lock_);
std::vector<const OatFile*> oat_files_ GUARDED_BY(dex_lock_);
// multimap from a string hash code of a class descriptor to
// mirror::Class* instances. Results should be compared for a matching
// Class::descriptor_ and Class::class_loader_.
- typedef std::multimap<size_t, mirror::Class*> Table;
+ typedef std::multimap<size_t, GcRoot<mirror::Class>> Table;
// This contains strong roots. To enable concurrent root scanning of
// the class table, be careful to use a read barrier when accessing this.
Table class_table_ GUARDED_BY(Locks::classlinker_classes_lock_);
- std::vector<std::pair<size_t, mirror::Class*>> new_class_roots_;
+ std::vector<std::pair<size_t, GcRoot<mirror::Class>>> new_class_roots_;
// Do we need to search dex caches to find image classes?
bool dex_cache_image_class_lookup_required_;
@@ -635,7 +655,7 @@
// retire a class, the version of the class in the table is returned and this may differ from
// the class passed in.
mirror::Class* EnsureResolved(Thread* self, const char* descriptor, mirror::Class* klass)
- __attribute__((warn_unused_result)) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ WARN_UNUSED SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FixupTemporaryDeclaringClass(mirror::Class* temp_class, mirror::Class* new_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -680,7 +700,7 @@
kJavaLangStackTraceElementArrayClass,
kClassRootsMax,
};
- mirror::ObjectArray<mirror::Class>* class_roots_;
+ GcRoot<mirror::ObjectArray<mirror::Class>> class_roots_;
mirror::Class* GetClassRoot(ClassRoot class_root) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -696,12 +716,12 @@
}
// The interface table used by all arrays.
- mirror::IfTable* array_iftable_;
+ GcRoot<mirror::IfTable> array_iftable_;
// A cache of the last FindArrayClass results. The cache serves to avoid creating array class
// descriptors for the sake of performing FindClass.
static constexpr size_t kFindArrayCacheSize = 16;
- mirror::Class* find_array_class_cache_[kFindArrayCacheSize];
+ GcRoot<mirror::Class> find_array_class_cache_[kFindArrayCacheSize];
size_t find_array_class_cache_next_victim_;
bool init_done_;
@@ -720,6 +740,8 @@
const void* quick_to_interpreter_bridge_trampoline_;
friend class ImageWriter; // for GetClassRoots
+ friend class ImageDumper; // for FindOpenedOatFileFromOatLocation
+ friend class ElfPatcher; // for FindOpenedOatFileForDexFile & FindOpenedOatFileFromOatLocation
FRIEND_TEST(ClassLinkerTest, ClassRootDescriptors);
FRIEND_TEST(mirror::DexCacheTest, Open);
FRIEND_TEST(ExceptionTest, FindExceptionHandler);
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 21fe006..8d93265 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -91,7 +91,7 @@
EXPECT_EQ(0U, primitive->NumInstanceFields());
EXPECT_EQ(0U, primitive->NumStaticFields());
EXPECT_EQ(0U, primitive->NumDirectInterfaces());
- EXPECT_TRUE(primitive->GetVTable() == NULL);
+ EXPECT_FALSE(primitive->HasVTable());
EXPECT_EQ(0, primitive->GetIfTableCount());
EXPECT_TRUE(primitive->GetIfTable() == NULL);
EXPECT_EQ(kAccPublic | kAccFinal | kAccAbstract, primitive->GetAccessFlags());
@@ -143,7 +143,7 @@
EXPECT_EQ(0U, array->NumInstanceFields());
EXPECT_EQ(0U, array->NumStaticFields());
EXPECT_EQ(2U, array->NumDirectInterfaces());
- EXPECT_TRUE(array->GetVTable() != NULL);
+ EXPECT_TRUE(array->ShouldHaveEmbeddedImtAndVTable());
EXPECT_EQ(2, array->GetIfTableCount());
ASSERT_TRUE(array->GetIfTable() != NULL);
mirror::Class* direct_interface0 = mirror::Class::GetDirectInterface(self, array, 0);
@@ -156,20 +156,20 @@
}
void AssertMethod(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- EXPECT_TRUE(method != NULL);
- EXPECT_TRUE(method->GetClass() != NULL);
- EXPECT_TRUE(method->GetName() != NULL);
+ EXPECT_TRUE(method != nullptr);
+ EXPECT_TRUE(method->GetClass() != nullptr);
+ EXPECT_TRUE(method->GetName() != nullptr);
EXPECT_TRUE(method->GetSignature() != Signature::NoSignature());
- EXPECT_TRUE(method->GetDexCacheStrings() != NULL);
- EXPECT_TRUE(method->GetDexCacheResolvedMethods() != NULL);
- EXPECT_TRUE(method->GetDexCacheResolvedTypes() != NULL);
+ EXPECT_TRUE(method->GetDexCacheStrings() != nullptr);
+ EXPECT_TRUE(method->HasDexCacheResolvedMethods());
+ EXPECT_TRUE(method->HasDexCacheResolvedTypes());
EXPECT_EQ(method->GetDeclaringClass()->GetDexCache()->GetStrings(),
method->GetDexCacheStrings());
- EXPECT_EQ(method->GetDeclaringClass()->GetDexCache()->GetResolvedMethods(),
- method->GetDexCacheResolvedMethods());
- EXPECT_EQ(method->GetDeclaringClass()->GetDexCache()->GetResolvedTypes(),
- method->GetDexCacheResolvedTypes());
+ EXPECT_TRUE(method->HasSameDexCacheResolvedMethods(
+ method->GetDeclaringClass()->GetDexCache()->GetResolvedMethods()));
+ EXPECT_TRUE(method->HasSameDexCacheResolvedTypes(
+ method->GetDeclaringClass()->GetDexCache()->GetResolvedTypes()));
}
void AssertField(mirror::Class* klass, mirror::ArtField* field)
@@ -216,7 +216,7 @@
EXPECT_NE(0U, klass->NumDirectMethods());
}
}
- EXPECT_EQ(klass->IsInterface(), klass->GetVTable() == NULL);
+ EXPECT_EQ(klass->IsInterface(), !klass->HasVTable());
mirror::IfTable* iftable = klass->GetIfTable();
for (int i = 0; i < klass->GetIfTableCount(); i++) {
mirror::Class* interface = iftable->GetInterface(i);
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index f47f13d..ab4a2bb 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -22,6 +22,7 @@
#include <ScopedLocalRef.h>
#include "../../external/icu/icu4c/source/common/unicode/uvernum.h"
+#include "base/macros.h"
#include "base/logging.h"
#include "base/stl_util.h"
#include "base/stringprintf.h"
@@ -29,6 +30,7 @@
#include "class_linker.h"
#include "compiler_callbacks.h"
#include "dex_file.h"
+#include "gc_root-inl.h"
#include "gc/heap.h"
#include "gtest/gtest.h"
#include "jni_internal.h"
@@ -93,7 +95,7 @@
CommonRuntimeTest::CommonRuntimeTest() {}
CommonRuntimeTest::~CommonRuntimeTest() {}
-void CommonRuntimeTest::SetEnvironmentVariables(std::string& android_data) {
+void CommonRuntimeTest::SetUpAndroidRoot() {
if (IsHost()) {
// $ANDROID_ROOT is set on the device, but not necessarily on the host.
// But it needs to be set so that icu4c can find its locale data.
@@ -133,15 +135,36 @@
setenv("ANDROID_HOST_OUT", getenv("ANDROID_ROOT"), 1);
}
}
+}
+void CommonRuntimeTest::SetUpAndroidData(std::string& android_data) {
// On target, Cannot use /mnt/sdcard because it is mounted noexec, so use subdir of dalvik-cache
- android_data = (IsHost() ? "/tmp/art-data-XXXXXX" : "/data/dalvik-cache/art-data-XXXXXX");
+ if (IsHost()) {
+ const char* tmpdir = getenv("TMPDIR");
+ if (tmpdir != nullptr && tmpdir[0] != 0) {
+ android_data = tmpdir;
+ } else {
+ android_data = "/tmp";
+ }
+ } else {
+ android_data = "/data/dalvik-cache";
+ }
+ android_data += "/art-data-XXXXXX";
if (mkdtemp(&android_data[0]) == nullptr) {
PLOG(FATAL) << "mkdtemp(\"" << &android_data[0] << "\") failed";
}
setenv("ANDROID_DATA", android_data.c_str(), 1);
}
+void CommonRuntimeTest::TearDownAndroidData(const std::string& android_data, bool fail_on_error) {
+ if (fail_on_error) {
+ ASSERT_EQ(rmdir(android_data.c_str()), 0);
+ } else {
+ rmdir(android_data.c_str());
+ }
+}
+
+
const DexFile* CommonRuntimeTest::LoadExpectSingleDexFile(const char* location) {
std::vector<const DexFile*> dex_files;
std::string error_msg;
@@ -155,7 +178,8 @@
}
void CommonRuntimeTest::SetUp() {
- SetEnvironmentVariables(android_data_);
+ SetUpAndroidRoot();
+ SetUpAndroidData(android_data_);
dalvik_cache_.append(android_data_.c_str());
dalvik_cache_.append("/dalvik-cache");
int mkdir_result = mkdir(dalvik_cache_.c_str(), 0700);
@@ -199,27 +223,40 @@
runtime_->GetHeap()->VerifyHeap(); // Check for heap corruption before the test
}
-void CommonRuntimeTest::TearDown() {
- const char* android_data = getenv("ANDROID_DATA");
- ASSERT_TRUE(android_data != nullptr);
- DIR* dir = opendir(dalvik_cache_.c_str());
+void CommonRuntimeTest::ClearDirectory(const char* dirpath) {
+ ASSERT_TRUE(dirpath != nullptr);
+ DIR* dir = opendir(dirpath);
ASSERT_TRUE(dir != nullptr);
dirent* e;
+ struct stat s;
while ((e = readdir(dir)) != nullptr) {
if ((strcmp(e->d_name, ".") == 0) || (strcmp(e->d_name, "..") == 0)) {
continue;
}
- std::string filename(dalvik_cache_);
+ std::string filename(dirpath);
filename.push_back('/');
filename.append(e->d_name);
- int unlink_result = unlink(filename.c_str());
- ASSERT_EQ(0, unlink_result);
+ int stat_result = lstat(filename.c_str(), &s);
+ ASSERT_EQ(0, stat_result) << "unable to stat " << filename;
+ if (S_ISDIR(s.st_mode)) {
+ ClearDirectory(filename.c_str());
+ int rmdir_result = rmdir(filename.c_str());
+ ASSERT_EQ(0, rmdir_result) << filename;
+ } else {
+ int unlink_result = unlink(filename.c_str());
+ ASSERT_EQ(0, unlink_result) << filename;
+ }
}
closedir(dir);
+}
+
+void CommonRuntimeTest::TearDown() {
+ const char* android_data = getenv("ANDROID_DATA");
+ ASSERT_TRUE(android_data != nullptr);
+ ClearDirectory(dalvik_cache_.c_str());
int rmdir_cache_result = rmdir(dalvik_cache_.c_str());
ASSERT_EQ(0, rmdir_cache_result);
- int rmdir_data_result = rmdir(android_data_.c_str());
- ASSERT_EQ(0, rmdir_data_result);
+ TearDownAndroidData(android_data_, true);
// icu4c has a fixed 10-element array "gCommonICUDataArray".
// If we run > 10 tests, we fill that array and u_setCommonData fails.
@@ -248,6 +285,19 @@
return StringPrintf("%s/framework/%s.jar", GetAndroidRoot(), jar_prefix.c_str());
}
+std::string CommonRuntimeTest::GetLibCoreOatFileName() {
+ return GetOatFileName("core");
+}
+
+std::string CommonRuntimeTest::GetOatFileName(const std::string& oat_prefix) {
+ if (IsHost()) {
+ const char* host_dir = getenv("ANDROID_HOST_OUT");
+ CHECK(host_dir != nullptr);
+ return StringPrintf("%s/framework/%s.art", host_dir, oat_prefix.c_str());
+ }
+ return StringPrintf("%s/framework/%s.art", GetAndroidRoot(), oat_prefix.c_str());
+}
+
std::string CommonRuntimeTest::GetTestAndroidRoot() {
if (IsHost()) {
const char* host_dir = getenv("ANDROID_HOST_OUT");
@@ -257,6 +307,17 @@
return GetAndroidRoot();
}
+// Check that for target builds we have ART_TARGET_NATIVETEST_DIR set.
+#ifdef ART_TARGET
+#ifndef ART_TARGET_NATIVETEST_DIR
+#error "ART_TARGET_NATIVETEST_DIR not set."
+#endif
+// Wrap it as a string literal.
+#define ART_TARGET_NATIVETEST_DIR_STRING STRINGIFY(ART_TARGET_NATIVETEST_DIR) "/"
+#else
+#define ART_TARGET_NATIVETEST_DIR_STRING ""
+#endif
+
std::vector<const DexFile*> CommonRuntimeTest::OpenTestDexFiles(const char* name) {
CHECK(name != nullptr);
std::string filename;
@@ -264,7 +325,7 @@
filename += getenv("ANDROID_HOST_OUT");
filename += "/framework/";
} else {
- filename += "/data/nativetest/art/";
+ filename += ART_TARGET_NATIVETEST_DIR_STRING;
}
filename += "art-gtest-";
filename += name;
@@ -293,23 +354,22 @@
for (const DexFile* dex_file : dex_files) {
class_linker_->RegisterDexFile(*dex_file);
}
- ScopedObjectAccessUnchecked soa(Thread::Current());
- ScopedLocalRef<jobject> class_loader_local(soa.Env(),
- soa.Env()->AllocObject(WellKnownClasses::dalvik_system_PathClassLoader));
- jobject class_loader = soa.Env()->NewGlobalRef(class_loader_local.get());
- soa.Self()->SetClassLoaderOverride(soa.Decode<mirror::ClassLoader*>(class_loader_local.get()));
+ Thread* self = Thread::Current();
+ JNIEnvExt* env = self->GetJniEnv();
+ ScopedLocalRef<jobject> class_loader_local(env,
+ env->AllocObject(WellKnownClasses::dalvik_system_PathClassLoader));
+ jobject class_loader = env->NewGlobalRef(class_loader_local.get());
+ self->SetClassLoaderOverride(class_loader_local.get());
Runtime::Current()->SetCompileTimeClassPath(class_loader, dex_files);
return class_loader;
}
CheckJniAbortCatcher::CheckJniAbortCatcher() : vm_(Runtime::Current()->GetJavaVM()) {
- vm_->check_jni_abort_hook = Hook;
- vm_->check_jni_abort_hook_data = &actual_;
+ vm_->SetCheckJniAbortHook(Hook, &actual_);
}
CheckJniAbortCatcher::~CheckJniAbortCatcher() {
- vm_->check_jni_abort_hook = nullptr;
- vm_->check_jni_abort_hook_data = nullptr;
+ vm_->SetCheckJniAbortHook(nullptr, nullptr);
EXPECT_TRUE(actual_.empty()) << actual_;
}
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index d045031..12c1241 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -64,7 +64,13 @@
class CommonRuntimeTest : public testing::Test {
public:
- static void SetEnvironmentVariables(std::string& android_data);
+ static void SetUpAndroidRoot();
+
+ // Note: setting up ANDROID_DATA may create a temporary directory. If this is used in a
+ // non-derived class, be sure to also call the corresponding tear-down below.
+ static void SetUpAndroidData(std::string& android_data);
+
+ static void TearDownAndroidData(const std::string& android_data, bool fail_on_error);
CommonRuntimeTest();
~CommonRuntimeTest();
@@ -81,12 +87,22 @@
// Allow subclases such as CommonCompilerTest to add extra options.
virtual void SetUpRuntimeOptions(RuntimeOptions* options) {}
+ void ClearDirectory(const char* dirpath);
+
virtual void TearDown();
+ // Gets the path of the libcore dex file.
std::string GetLibCoreDexFileName();
+ // Gets the path of the specified dex file for host or target.
std::string GetDexFileName(const std::string& jar_prefix);
+ // Gets the path of the libcore oat file.
+ std::string GetLibCoreOatFileName();
+
+ // Gets the path of the specified oat file for host or target.
+ std::string GetOatFileName(const std::string& oat_prefix);
+
std::string GetTestAndroidRoot();
std::vector<const DexFile*> OpenTestDexFiles(const char* name)
@@ -122,7 +138,7 @@
private:
static void Hook(void* data, const std::string& reason);
- JavaVMExt* vm_;
+ JavaVMExt* const vm_;
std::string actual_;
DISALLOW_COPY_AND_ASSIGN(CheckJniAbortCatcher);
diff --git a/runtime/compiler_callbacks.h b/runtime/compiler_callbacks.h
index b07043f..d1a6861 100644
--- a/runtime/compiler_callbacks.h
+++ b/runtime/compiler_callbacks.h
@@ -36,6 +36,10 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
virtual void ClassRejected(ClassReference ref) = 0;
+ // Return true if we should attempt to relocate to a random base address if we have not already
+ // done so. Return false if relocating in this way would be problematic.
+ virtual bool IsRelocationPossible() = 0;
+
protected:
CompilerCallbacks() { }
};
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 4cf4c09..bc13379 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -2250,15 +2250,18 @@
}
JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspension) {
- ScopedLocalRef<jobject> peer(Thread::Current()->GetJniEnv(), NULL);
+ Thread* self = Thread::Current();
+ ScopedLocalRef<jobject> peer(self->GetJniEnv(), NULL);
{
- ScopedObjectAccess soa(Thread::Current());
+ ScopedObjectAccess soa(self);
peer.reset(soa.AddLocalReference<jobject>(gRegistry->Get<mirror::Object*>(thread_id)));
}
if (peer.get() == NULL) {
return JDWP::ERR_THREAD_NOT_ALIVE;
}
- // Suspend thread to build stack trace.
+ // Suspend thread to build stack trace. Take suspend thread lock to avoid races with threads
+ // trying to suspend this one.
+ MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
bool timed_out;
Thread* thread = ThreadList::SuspendThreadByPeer(peer.get(), request_suspension, true,
&timed_out);
@@ -2450,12 +2453,9 @@
}
case JDWP::JT_DOUBLE: {
CHECK_EQ(width_, 8U);
- uint32_t lo;
- uint32_t hi;
- if (GetVReg(m, reg, kDoubleLoVReg, &lo) && GetVReg(m, reg + 1, kDoubleHiVReg, &hi)) {
- uint64_t longVal = (static_cast<uint64_t>(hi) << 32) | lo;
- VLOG(jdwp) << "get double local " << reg << " = "
- << hi << ":" << lo << " = " << longVal;
+ uint64_t longVal;
+ if (GetVRegPair(m, reg, kDoubleLoVReg, kDoubleHiVReg, &longVal)) {
+ VLOG(jdwp) << "get double local " << reg << " = " << longVal;
JDWP::Set8BE(buf_+1, longVal);
} else {
VLOG(jdwp) << "failed to get double local " << reg;
@@ -2465,12 +2465,9 @@
}
case JDWP::JT_LONG: {
CHECK_EQ(width_, 8U);
- uint32_t lo;
- uint32_t hi;
- if (GetVReg(m, reg, kLongLoVReg, &lo) && GetVReg(m, reg + 1, kLongHiVReg, &hi)) {
- uint64_t longVal = (static_cast<uint64_t>(hi) << 32) | lo;
- VLOG(jdwp) << "get long local " << reg << " = "
- << hi << ":" << lo << " = " << longVal;
+ uint64_t longVal;
+ if (GetVRegPair(m, reg, kLongLoVReg, kLongHiVReg, &longVal)) {
+ VLOG(jdwp) << "get long local " << reg << " = " << longVal;
JDWP::Set8BE(buf_+1, longVal);
} else {
VLOG(jdwp) << "failed to get long local " << reg;
@@ -2593,28 +2590,18 @@
}
case JDWP::JT_DOUBLE: {
CHECK_EQ(width_, 8U);
- const uint32_t lo = static_cast<uint32_t>(value_);
- const uint32_t hi = static_cast<uint32_t>(value_ >> 32);
- bool success = SetVReg(m, reg, lo, kDoubleLoVReg);
- success &= SetVReg(m, reg + 1, hi, kDoubleHiVReg);
+ bool success = SetVRegPair(m, reg, value_, kDoubleLoVReg, kDoubleHiVReg);
if (!success) {
- uint64_t longVal = (static_cast<uint64_t>(hi) << 32) | lo;
- VLOG(jdwp) << "failed to set double local " << reg << " = "
- << hi << ":" << lo << " = " << longVal;
+ VLOG(jdwp) << "failed to set double local " << reg << " = " << value_;
error_ = kFailureErrorCode;
}
break;
}
case JDWP::JT_LONG: {
CHECK_EQ(width_, 8U);
- const uint32_t lo = static_cast<uint32_t>(value_);
- const uint32_t hi = static_cast<uint32_t>(value_ >> 32);
- bool success = SetVReg(m, reg, lo, kLongLoVReg);
- success &= SetVReg(m, reg + 1, hi, kLongHiVReg);
+ bool success = SetVRegPair(m, reg, value_, kLongLoVReg, kLongHiVReg);
if (!success) {
- uint64_t longVal = (static_cast<uint64_t>(hi) << 32) | lo;
- VLOG(jdwp) << "failed to set double local " << reg << " = "
- << hi << ":" << lo << " = " << longVal;
+ VLOG(jdwp) << "failed to set double local " << reg << " = " << value_;
error_ = kFailureErrorCode;
}
break;
@@ -3048,7 +3035,7 @@
// Sanity checks all existing breakpoints on the same method.
static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m, bool need_full_deoptimization)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::breakpoint_lock_) {
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::breakpoint_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (kIsDebugBuild) {
for (const Breakpoint& breakpoint : gBreakpoints) {
CHECK_EQ(need_full_deoptimization, breakpoint.NeedFullDeoptimization());
@@ -3144,7 +3131,7 @@
ScopedThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
LOCKS_EXCLUDED(Locks::thread_list_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
- thread_(NULL),
+ thread_(nullptr),
error_(JDWP::ERR_NONE),
self_suspend_(false),
other_suspend_(false) {
@@ -3160,10 +3147,15 @@
soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
jobject thread_peer = gRegistry->GetJObject(thread_id);
bool timed_out;
- Thread* suspended_thread = ThreadList::SuspendThreadByPeer(thread_peer, true, true,
- &timed_out);
+ Thread* suspended_thread;
+ {
+ // Take suspend thread lock to avoid races with threads trying to suspend this one.
+ MutexLock mu(soa.Self(), *Locks::thread_list_suspend_thread_lock_);
+ suspended_thread = ThreadList::SuspendThreadByPeer(thread_peer, true, true,
+ &timed_out);
+ }
CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kWaitingForDebuggerSuspension);
- if (suspended_thread == NULL) {
+ if (suspended_thread == nullptr) {
// Thread terminated from under us while suspending.
error_ = JDWP::ERR_INVALID_THREAD;
} else {
@@ -3957,7 +3949,8 @@
HeapChunkContext(bool merge, bool native)
: buf_(16384 - 16),
type_(0),
- merge_(merge) {
+ merge_(merge),
+ chunk_overhead_(0) {
Reset();
if (native) {
type_ = CHUNK_TYPE("NHSG");
@@ -3972,6 +3965,14 @@
}
}
+ void SetChunkOverhead(size_t chunk_overhead) {
+ chunk_overhead_ = chunk_overhead;
+ }
+
+ void ResetStartOfNextChunk() {
+ startOfNextMemoryChunk_ = nullptr;
+ }
+
void EnsureHeader(const void* chunk_ptr) {
if (!needHeader_) {
return;
@@ -4016,7 +4017,7 @@
void Reset() {
p_ = &buf_[0];
- startOfNextMemoryChunk_ = NULL;
+ ResetStartOfNextChunk();
totalAllocationUnits_ = 0;
needHeader_ = true;
pieceLenField_ = NULL;
@@ -4043,6 +4044,8 @@
*/
bool native = type_ == CHUNK_TYPE("NHSG");
+ // TODO: I'm not sure using start of next chunk works well with multiple spaces. We shouldn't
+ // count gaps inbetween spaces as free memory.
if (startOfNextMemoryChunk_ != NULL) {
// Transmit any pending free memory. Native free memory of
// over kMaxFreeLen could be because of the use of mmaps, so
@@ -4069,11 +4072,8 @@
// OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
// If it's the same, we should combine them.
uint8_t state = ExamineObject(obj, native);
- // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
- // allocation then the first sizeof(size_t) may belong to it.
- const size_t dlMallocOverhead = sizeof(size_t);
- AppendChunk(state, start, used_bytes + dlMallocOverhead);
- startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + dlMallocOverhead;
+ AppendChunk(state, start, used_bytes + chunk_overhead_);
+ startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
}
void AppendChunk(uint8_t state, void* ptr, size_t length)
@@ -4162,10 +4162,18 @@
uint32_t type_;
bool merge_;
bool needHeader_;
+ size_t chunk_overhead_;
DISALLOW_COPY_AND_ASSIGN(HeapChunkContext);
};
+static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
+ HeapChunkContext::HeapChunkCallback(
+ obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg);
+}
+
void Dbg::DdmSendHeapSegments(bool native) {
Dbg::HpsgWhen when;
Dbg::HpsgWhat what;
@@ -4206,14 +4214,27 @@
#endif
} else {
gc::Heap* heap = Runtime::Current()->GetHeap();
- const std::vector<gc::space::ContinuousSpace*>& spaces = heap->GetContinuousSpaces();
- typedef std::vector<gc::space::ContinuousSpace*>::const_iterator It;
- for (It cur = spaces.begin(), end = spaces.end(); cur != end; ++cur) {
- if ((*cur)->IsMallocSpace()) {
- (*cur)->AsMallocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
+ for (const auto& space : heap->GetContinuousSpaces()) {
+ if (space->IsDlMallocSpace()) {
+ // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
+ // allocation then the first sizeof(size_t) may belong to it.
+ context.SetChunkOverhead(sizeof(size_t));
+ space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
+ } else if (space->IsRosAllocSpace()) {
+ context.SetChunkOverhead(0);
+ space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
+ } else if (space->IsBumpPointerSpace()) {
+ context.SetChunkOverhead(0);
+ ReaderMutexLock mu(self, *Locks::mutator_lock_);
+ WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
+ space->AsBumpPointerSpace()->Walk(BumpPointerSpaceCallback, &context);
+ } else {
+ UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space;
}
+ context.ResetStartOfNextChunk();
}
// Walk the large objects, these are not in the AllocSpace.
+ context.SetChunkOverhead(0);
heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
}
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index e5bc7c8..e1a7771 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -951,6 +951,38 @@
return std::make_pair(tmp, colon_ptr + 1);
}
+std::string DexFile::GetMultiDexClassesDexName(size_t number, const char* dex_location) {
+ if (number == 0) {
+ return dex_location;
+ } else {
+ return StringPrintf("%s" kMultiDexSeparatorString "classes%zu.dex", dex_location, number + 1);
+ }
+}
+
+std::string DexFile::GetDexCanonicalLocation(const char* dex_location) {
+ CHECK_NE(dex_location, static_cast<const char*>(nullptr));
+ char* path = nullptr;
+ if (!IsMultiDexLocation(dex_location)) {
+ path = realpath(dex_location, nullptr);
+ } else {
+ std::pair<const char*, const char*> pair = DexFile::SplitMultiDexLocation(dex_location);
+ const char* dex_real_location(realpath(pair.first, nullptr));
+ delete pair.first;
+ if (dex_real_location != nullptr) {
+ int length = strlen(dex_real_location) + strlen(pair.second) + strlen(kMultiDexSeparatorString) + 1;
+ char* multidex_canonical_location = reinterpret_cast<char*>(malloc(sizeof(char) * length));
+ snprintf(multidex_canonical_location, length, "%s" kMultiDexSeparatorString "%s", dex_real_location, pair.second);
+ free(const_cast<char*>(dex_real_location));
+ path = multidex_canonical_location;
+ }
+ }
+
+ // If realpath fails then we just copy the argument.
+ std::string result(path == nullptr ? dex_location : path);
+ free(path);
+ return result;
+}
+
std::ostream& operator<<(std::ostream& os, const DexFile& dex_file) {
os << StringPrintf("[DexFile: %s dex-checksum=%08x location-checksum=%08x %p-%p]",
dex_file.GetLocation().c_str(),
@@ -958,6 +990,7 @@
dex_file.Begin(), dex_file.Begin() + dex_file.Size());
return os;
}
+
std::string Signature::ToString() const {
if (dex_file_ == nullptr) {
CHECK(proto_id_ == nullptr);
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 04f1cc1..2794af6 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -382,6 +382,20 @@
return location_;
}
+ // For normal dex files, location and base location coincide. If a dex file is part of a multidex
+ // archive, the base location is the name of the originating jar/apk, stripped of any internal
+ // classes*.dex path.
+ const std::string GetBaseLocation() const {
+ if (IsMultiDexLocation(location_.c_str())) {
+ std::pair<const char*, const char*> pair = SplitMultiDexLocation(location_.c_str());
+ std::string res(pair.first);
+ delete[] pair.first;
+ return res;
+ } else {
+ return location_;
+ }
+ }
+
// For DexFiles directly from .dex files, this is the checksum from the DexFile::Header.
// For DexFiles opened from a zip files, this will be the ZipEntry CRC32 of classes.dex.
uint32_t GetLocationChecksum() const {
@@ -827,6 +841,23 @@
return size_;
}
+ static std::string GetMultiDexClassesDexName(size_t number, const char* dex_location);
+
+ // Returns the canonical form of the given dex location.
+ //
+ // There are different flavors of "dex locations" as follows:
+ // the file name of a dex file:
+ // The actual file path that the dex file has on disk.
+ // dex_location:
+ // This acts as a key for the class linker to know which dex file to load.
+ // It may correspond to either an old odex file or a particular dex file
+ // inside an oat file. In the first case it will also match the file name
+ // of the dex file. In the second case (oat) it will include the file name
+ // and possibly some multidex annotation to uniquely identify it.
+ // canonical_dex_location:
+ // the dex_location where it's file name part has been made canonical.
+ static std::string GetDexCanonicalLocation(const char* dex_location);
+
private:
// Opens a .dex file
static const DexFile* OpenFile(int fd, const char* location, bool verify, std::string* error_msg);
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 284aa89..330d045 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -345,4 +345,34 @@
}
}
+TEST_F(DexFileTest, GetMultiDexClassesDexName) {
+ std::string dex_location_str = "/system/app/framework.jar";
+ const char* dex_location = dex_location_str.c_str();
+ ASSERT_EQ("/system/app/framework.jar", DexFile::GetMultiDexClassesDexName(0, dex_location));
+ ASSERT_EQ("/system/app/framework.jar:classes2.dex", DexFile::GetMultiDexClassesDexName(1, dex_location));
+ ASSERT_EQ("/system/app/framework.jar:classes101.dex", DexFile::GetMultiDexClassesDexName(100, dex_location));
+}
+
+TEST_F(DexFileTest, GetDexCanonicalLocation) {
+ ScratchFile file;
+ char* dex_location_real = realpath(file.GetFilename().c_str(), nullptr);
+ std::string dex_location(dex_location_real);
+
+ ASSERT_EQ(dex_location, DexFile::GetDexCanonicalLocation(dex_location.c_str()));
+ std::string multidex_location = DexFile::GetMultiDexClassesDexName(1, dex_location.c_str());
+ ASSERT_EQ(multidex_location, DexFile::GetDexCanonicalLocation(multidex_location.c_str()));
+
+ std::string dex_location_sym = dex_location + "symlink";
+ ASSERT_EQ(0, symlink(dex_location.c_str(), dex_location_sym.c_str()));
+
+ ASSERT_EQ(dex_location, DexFile::GetDexCanonicalLocation(dex_location_sym.c_str()));
+
+ std::string multidex_location_sym = DexFile::GetMultiDexClassesDexName(1, dex_location_sym.c_str());
+ ASSERT_EQ(multidex_location, DexFile::GetDexCanonicalLocation(multidex_location_sym.c_str()));
+
+ ASSERT_EQ(0, unlink(dex_location_sym.c_str()));
+
+ free(dex_location_real);
+}
+
} // namespace art
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 291e2d0..7e6bdfa 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -170,13 +170,29 @@
return true;
}
-bool DexFileVerifier::CheckPointerRange(const void* start, const void* end, const char* label) {
+bool DexFileVerifier::CheckListSize(const void* start, size_t count, size_t elem_size,
+ const char* label) {
+ // Check that size is not 0.
+ CHECK_NE(elem_size, 0U);
+
const byte* range_start = reinterpret_cast<const byte*>(start);
- const byte* range_end = reinterpret_cast<const byte*>(end);
const byte* file_start = reinterpret_cast<const byte*>(begin_);
+
+ // Check for overflow.
+ uintptr_t max = 0 - 1;
+ size_t available_bytes_till_end_of_mem = max - reinterpret_cast<uintptr_t>(start);
+ size_t max_count = available_bytes_till_end_of_mem / elem_size;
+ if (max_count < count) {
+ ErrorStringPrintf("Overflow in range for %s: %zx for %zu@%zu", label,
+ static_cast<size_t>(range_start - file_start),
+ count, elem_size);
+ return false;
+ }
+
+ const byte* range_end = range_start + count * elem_size;
const byte* file_end = file_start + size_;
- if (UNLIKELY((range_start < file_start) || (range_start > file_end) ||
- (range_end < file_start) || (range_end > file_end))) {
+ if (UNLIKELY((range_start < file_start) || (range_end > file_end))) {
+ // Note: these two tests are enough as we make sure above that there's no overflow.
ErrorStringPrintf("Bad range for %s: %zx to %zx", label,
static_cast<size_t>(range_start - file_start),
static_cast<size_t>(range_end - file_start));
@@ -185,12 +201,6 @@
return true;
}
-bool DexFileVerifier::CheckListSize(const void* start, uint32_t count,
- uint32_t element_size, const char* label) {
- const byte* list_start = reinterpret_cast<const byte*>(start);
- return CheckPointerRange(list_start, list_start + (count * element_size), label);
-}
-
bool DexFileVerifier::CheckIndex(uint32_t field, uint32_t limit, const char* label) {
if (UNLIKELY(field >= limit)) {
ErrorStringPrintf("Bad index for %s: %x >= %x", label, field, limit);
@@ -329,7 +339,7 @@
uint32_t DexFileVerifier::ReadUnsignedLittleEndian(uint32_t size) {
uint32_t result = 0;
- if (LIKELY(CheckPointerRange(ptr_, ptr_ + size, "encoded_value"))) {
+ if (LIKELY(CheckListSize(ptr_, size, sizeof(byte), "encoded_value"))) {
for (uint32_t i = 0; i < size; i++) {
result |= ((uint32_t) *(ptr_++)) << (i * 8);
}
@@ -447,7 +457,7 @@
bool DexFileVerifier::CheckPadding(size_t offset, uint32_t aligned_offset) {
if (offset < aligned_offset) {
- if (!CheckPointerRange(begin_ + offset, begin_ + aligned_offset, "section")) {
+ if (!CheckListSize(begin_ + offset, aligned_offset - offset, sizeof(byte), "section")) {
return false;
}
while (offset < aligned_offset) {
@@ -463,7 +473,7 @@
}
bool DexFileVerifier::CheckEncodedValue() {
- if (!CheckPointerRange(ptr_, ptr_ + 1, "encoded_value header")) {
+ if (!CheckListSize(ptr_, 1, sizeof(byte), "encoded_value header")) {
return false;
}
@@ -656,7 +666,7 @@
bool DexFileVerifier::CheckIntraCodeItem() {
const DexFile::CodeItem* code_item = reinterpret_cast<const DexFile::CodeItem*>(ptr_);
- if (!CheckPointerRange(code_item, code_item + 1, "code")) {
+ if (!CheckListSize(code_item, 1, sizeof(DexFile::CodeItem), "code")) {
return false;
}
@@ -945,7 +955,7 @@
}
bool DexFileVerifier::CheckIntraAnnotationItem() {
- if (!CheckPointerRange(ptr_, ptr_ + 1, "annotation visibility")) {
+ if (!CheckListSize(ptr_, 1, sizeof(byte), "annotation visibility")) {
return false;
}
@@ -970,7 +980,7 @@
bool DexFileVerifier::CheckIntraAnnotationsDirectoryItem() {
const DexFile::AnnotationsDirectoryItem* item =
reinterpret_cast<const DexFile::AnnotationsDirectoryItem*>(ptr_);
- if (!CheckPointerRange(item, item + 1, "annotations_directory")) {
+ if (!CheckListSize(item, 1, sizeof(DexFile::AnnotationsDirectoryItem), "annotations_directory")) {
return false;
}
@@ -1064,42 +1074,42 @@
// Check depending on the section type.
switch (type) {
case DexFile::kDexTypeStringIdItem: {
- if (!CheckPointerRange(ptr_, ptr_ + sizeof(DexFile::StringId), "string_ids")) {
+ if (!CheckListSize(ptr_, 1, sizeof(DexFile::StringId), "string_ids")) {
return false;
}
ptr_ += sizeof(DexFile::StringId);
break;
}
case DexFile::kDexTypeTypeIdItem: {
- if (!CheckPointerRange(ptr_, ptr_ + sizeof(DexFile::TypeId), "type_ids")) {
+ if (!CheckListSize(ptr_, 1, sizeof(DexFile::TypeId), "type_ids")) {
return false;
}
ptr_ += sizeof(DexFile::TypeId);
break;
}
case DexFile::kDexTypeProtoIdItem: {
- if (!CheckPointerRange(ptr_, ptr_ + sizeof(DexFile::ProtoId), "proto_ids")) {
+ if (!CheckListSize(ptr_, 1, sizeof(DexFile::ProtoId), "proto_ids")) {
return false;
}
ptr_ += sizeof(DexFile::ProtoId);
break;
}
case DexFile::kDexTypeFieldIdItem: {
- if (!CheckPointerRange(ptr_, ptr_ + sizeof(DexFile::FieldId), "field_ids")) {
+ if (!CheckListSize(ptr_, 1, sizeof(DexFile::FieldId), "field_ids")) {
return false;
}
ptr_ += sizeof(DexFile::FieldId);
break;
}
case DexFile::kDexTypeMethodIdItem: {
- if (!CheckPointerRange(ptr_, ptr_ + sizeof(DexFile::MethodId), "method_ids")) {
+ if (!CheckListSize(ptr_, 1, sizeof(DexFile::MethodId), "method_ids")) {
return false;
}
ptr_ += sizeof(DexFile::MethodId);
break;
}
case DexFile::kDexTypeClassDefItem: {
- if (!CheckPointerRange(ptr_, ptr_ + sizeof(DexFile::ClassDef), "class_defs")) {
+ if (!CheckListSize(ptr_, 1, sizeof(DexFile::ClassDef), "class_defs")) {
return false;
}
ptr_ += sizeof(DexFile::ClassDef);
@@ -1110,7 +1120,7 @@
const DexFile::TypeItem* item = &list->GetTypeItem(0);
uint32_t count = list->Size();
- if (!CheckPointerRange(list, list + 1, "type_list") ||
+ if (!CheckListSize(list, 1, sizeof(DexFile::TypeList), "type_list") ||
!CheckListSize(item, count, sizeof(DexFile::TypeItem), "type_list size")) {
return false;
}
@@ -1123,7 +1133,8 @@
const DexFile::AnnotationSetRefItem* item = list->list_;
uint32_t count = list->size_;
- if (!CheckPointerRange(list, list + 1, "annotation_set_ref_list") ||
+ if (!CheckListSize(list, 1, sizeof(DexFile::AnnotationSetRefList),
+ "annotation_set_ref_list") ||
!CheckListSize(item, count, sizeof(DexFile::AnnotationSetRefItem),
"annotation_set_ref_list size")) {
return false;
@@ -1137,7 +1148,7 @@
const uint32_t* item = set->entries_;
uint32_t count = set->size_;
- if (!CheckPointerRange(set, set + 1, "annotation_set_item") ||
+ if (!CheckListSize(set, 1, sizeof(DexFile::AnnotationSetItem), "annotation_set_item") ||
!CheckListSize(item, count, sizeof(uint32_t), "annotation_set_item size")) {
return false;
}
@@ -1644,12 +1655,25 @@
bool DexFileVerifier::CheckInterClassDefItem() {
const DexFile::ClassDef* item = reinterpret_cast<const DexFile::ClassDef*>(ptr_);
+ // Check for duplicate class def.
+ if (defined_classes_.find(item->class_idx_) != defined_classes_.end()) {
+ ErrorStringPrintf("Redefinition of class with type idx: '%d'", item->class_idx_);
+ return false;
+ }
+ defined_classes_.insert(item->class_idx_);
+
LOAD_STRING_BY_TYPE(class_descriptor, item->class_idx_, "inter_class_def_item class_idx")
if (UNLIKELY(!IsValidDescriptor(class_descriptor) || class_descriptor[0] != 'L')) {
ErrorStringPrintf("Invalid class descriptor: '%s'", class_descriptor);
return false;
}
+ // Only allow non-runtime modifiers.
+ if ((item->access_flags_ & ~kAccJavaFlagsMask) != 0) {
+ ErrorStringPrintf("Invalid class flags: '%d'", item->access_flags_);
+ return false;
+ }
+
if (item->interfaces_off_ != 0 &&
!CheckOffsetToTypeMap(item->interfaces_off_, DexFile::kDexTypeTypeList)) {
return false;
diff --git a/runtime/dex_file_verifier.h b/runtime/dex_file_verifier.h
index f845993..0af3549 100644
--- a/runtime/dex_file_verifier.h
+++ b/runtime/dex_file_verifier.h
@@ -17,6 +17,8 @@
#ifndef ART_RUNTIME_DEX_FILE_VERIFIER_H_
#define ART_RUNTIME_DEX_FILE_VERIFIER_H_
+#include <unordered_set>
+
#include "dex_file.h"
#include "safe_map.h"
@@ -40,8 +42,7 @@
bool Verify();
bool CheckShortyDescriptorMatch(char shorty_char, const char* descriptor, bool is_return_type);
- bool CheckPointerRange(const void* start, const void* end, const char* label);
- bool CheckListSize(const void* start, uint32_t count, uint32_t element_size, const char* label);
+ bool CheckListSize(const void* start, size_t count, size_t element_size, const char* label);
bool CheckIndex(uint32_t field, uint32_t limit, const char* label);
bool CheckHeader();
@@ -115,6 +116,9 @@
const void* previous_item_;
std::string failure_reason_;
+
+ // Set of type ids for which there are ClassDef elements in the dex file.
+ std::unordered_set<decltype(DexFile::ClassDef::class_idx_)> defined_classes_;
};
} // namespace art
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index e5402e1..6179b5e 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -837,6 +837,7 @@
}
}
+ bool reserved = false;
for (Elf32_Word i = 0; i < GetProgramHeaderNum(); i++) {
Elf32_Phdr& program_header = GetProgramHeader(i);
@@ -853,10 +854,8 @@
// Found something to load.
- // If p_vaddr is zero, it must be the first loadable segment,
- // since they must be in order. Since it is zero, there isn't a
- // specific address requested, so first request a contiguous chunk
- // of required size for all segments, but with no
+ // Before load the actual segments, reserve a contiguous chunk
+ // of required size and address for all segments, but with no
// permissions. We'll then carve that up with the proper
// permissions as we load the actual segments. If p_vaddr is
// non-zero, the segments require the specific address specified,
@@ -870,18 +869,24 @@
return false;
}
size_t file_length = static_cast<size_t>(temp_file_length);
- if (program_header.p_vaddr == 0) {
+ if (!reserved) {
+ byte* reserve_base = ((program_header.p_vaddr != 0) ?
+ reinterpret_cast<byte*>(program_header.p_vaddr) : nullptr);
std::string reservation_name("ElfFile reservation for ");
reservation_name += file_->GetPath();
std::unique_ptr<MemMap> reserve(MemMap::MapAnonymous(reservation_name.c_str(),
- nullptr, GetLoadedSize(), PROT_NONE, false,
- error_msg));
+ reserve_base,
+ GetLoadedSize(), PROT_NONE, false,
+ error_msg));
if (reserve.get() == nullptr) {
*error_msg = StringPrintf("Failed to allocate %s: %s",
reservation_name.c_str(), error_msg->c_str());
return false;
}
- base_address_ = reserve->Begin();
+ reserved = true;
+ if (reserve_base == nullptr) {
+ base_address_ = reserve->Begin();
+ }
segments_.push_back(reserve.release());
}
// empty segment, nothing to map
@@ -1033,18 +1038,13 @@
}
static bool IsFDE(FDE* frame) {
- // TODO This seems to be the constant everyone uses (for the .debug_frame
- // section at least), however we should investigate this further.
- const uint32_t kDwarfCIE_id = 0xffffffff;
- const uint32_t kReservedLengths[] = {0xffffffff, 0xfffffff0};
- return frame->CIE_pointer != kDwarfCIE_id &&
- frame->raw_length_ != kReservedLengths[0] && frame->raw_length_ != kReservedLengths[1];
+ return frame->CIE_pointer != 0;
}
// TODO This only works for 32-bit Elf Files.
-static bool FixupDebugFrame(uintptr_t text_start, byte* dbg_frame, size_t dbg_frame_size) {
- FDE* last_frame = reinterpret_cast<FDE*>(dbg_frame + dbg_frame_size);
- FDE* frame = NextFDE(reinterpret_cast<FDE*>(dbg_frame));
+static bool FixupEHFrame(uintptr_t text_start, byte* eh_frame, size_t eh_frame_size) {
+ FDE* last_frame = reinterpret_cast<FDE*>(eh_frame + eh_frame_size);
+ FDE* frame = NextFDE(reinterpret_cast<FDE*>(eh_frame));
for (; frame < last_frame; frame = NextFDE(frame)) {
if (!IsFDE(frame)) {
return false;
@@ -1301,7 +1301,7 @@
static bool FixupDebugSections(const byte* dbg_abbrev, size_t dbg_abbrev_size,
uintptr_t text_start,
byte* dbg_info, size_t dbg_info_size,
- byte* dbg_frame, size_t dbg_frame_size) {
+ byte* eh_frame, size_t eh_frame_size) {
std::unique_ptr<DebugAbbrev> abbrev(DebugAbbrev::Create(dbg_abbrev, dbg_abbrev_size));
if (abbrev.get() == nullptr) {
return false;
@@ -1313,7 +1313,7 @@
return false;
}
return FixupDebugInfo(text_start, iter.get())
- && FixupDebugFrame(text_start, dbg_frame, dbg_frame_size);
+ && FixupEHFrame(text_start, eh_frame, eh_frame_size);
}
void ElfFile::GdbJITSupport() {
@@ -1334,20 +1334,16 @@
// Do we have interesting sections?
const Elf32_Shdr* debug_info = all.FindSectionByName(".debug_info");
const Elf32_Shdr* debug_abbrev = all.FindSectionByName(".debug_abbrev");
- const Elf32_Shdr* debug_frame = all.FindSectionByName(".debug_frame");
+ const Elf32_Shdr* eh_frame = all.FindSectionByName(".eh_frame");
const Elf32_Shdr* debug_str = all.FindSectionByName(".debug_str");
const Elf32_Shdr* strtab_sec = all.FindSectionByName(".strtab");
const Elf32_Shdr* symtab_sec = all.FindSectionByName(".symtab");
Elf32_Shdr* text_sec = all.FindSectionByName(".text");
- if (debug_info == nullptr || debug_abbrev == nullptr || debug_frame == nullptr ||
- debug_str == nullptr || text_sec == nullptr || strtab_sec == nullptr || symtab_sec == nullptr) {
+ if (debug_info == nullptr || debug_abbrev == nullptr || eh_frame == nullptr ||
+ debug_str == nullptr || text_sec == nullptr || strtab_sec == nullptr ||
+ symtab_sec == nullptr) {
return;
}
-#ifdef __LP64__
- if (true) {
- return; // No ELF debug support in 64bit.
- }
-#endif
// We need to add in a strtab and symtab to the image.
// all is MAP_PRIVATE so it can be written to freely.
// We also already have strtab and symtab so we are fine there.
@@ -1364,7 +1360,7 @@
if (!FixupDebugSections(
all.Begin() + debug_abbrev->sh_offset, debug_abbrev->sh_size, text_sec->sh_addr,
all.Begin() + debug_info->sh_offset, debug_info->sh_size,
- all.Begin() + debug_frame->sh_offset, debug_frame->sh_size)) {
+ all.Begin() + eh_frame->sh_offset, eh_frame->sh_size)) {
LOG(ERROR) << "Failed to load GDB data";
return;
}
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 542e1a9..b874a74 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -25,7 +25,6 @@
#include "indirect_reference_table.h"
#include "invoke_type.h"
#include "jni_internal.h"
-#include "method_helper.h"
#include "mirror/art_method.h"
#include "mirror/array.h"
#include "mirror/class-inl.h"
@@ -38,11 +37,11 @@
// TODO: Fix no thread safety analysis when GCC can handle template specialization.
template <const bool kAccessCheck>
-ALWAYS_INLINE static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
- mirror::ArtMethod* method,
- Thread* self, bool* slow_path)
- NO_THREAD_SAFETY_ANALYSIS {
- mirror::Class* klass = method->GetDexCacheResolvedTypes()->GetWithoutChecks(type_idx);
+ALWAYS_INLINE
+static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
+ mirror::ArtMethod* method,
+ Thread* self, bool* slow_path) {
+ mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx);
if (UNLIKELY(klass == NULL)) {
klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method);
*slow_path = true;
@@ -88,9 +87,10 @@
}
// TODO: Fix no thread safety analysis when annotalysis is smarter.
-ALWAYS_INLINE static inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
- Thread* self, bool* slow_path)
- NO_THREAD_SAFETY_ANALYSIS {
+ALWAYS_INLINE
+static inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
+ Thread* self,
+ bool* slow_path) {
if (UNLIKELY(!klass->IsInitialized())) {
StackHandleScope<1> hs(self);
Handle<mirror::Class> h_class(hs.NewHandle(klass));
@@ -118,11 +118,11 @@
// check.
// TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
template <bool kAccessCheck, bool kInstrumented>
-ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
- mirror::ArtMethod* method,
- Thread* self,
- gc::AllocatorType allocator_type)
- NO_THREAD_SAFETY_ANALYSIS {
+ALWAYS_INLINE
+static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
+ mirror::ArtMethod* method,
+ Thread* self,
+ gc::AllocatorType allocator_type) {
bool slow_path = false;
mirror::Class* klass = CheckObjectAlloc<kAccessCheck>(type_idx, method, self, &slow_path);
if (UNLIKELY(slow_path)) {
@@ -138,11 +138,11 @@
// Given the context of a calling Method and a resolved class, create an instance.
// TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
template <bool kInstrumented>
-ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
- mirror::ArtMethod* method,
- Thread* self,
- gc::AllocatorType allocator_type)
- NO_THREAD_SAFETY_ANALYSIS {
+ALWAYS_INLINE
+static inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
+ mirror::ArtMethod* method,
+ Thread* self,
+ gc::AllocatorType allocator_type) {
DCHECK(klass != nullptr);
bool slow_path = false;
klass = CheckClassInitializedForObjectAlloc(klass, self, &slow_path);
@@ -161,11 +161,11 @@
// Given the context of a calling Method and an initialized class, create an instance.
// TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
template <bool kInstrumented>
-ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
- mirror::ArtMethod* method,
- Thread* self,
- gc::AllocatorType allocator_type)
- NO_THREAD_SAFETY_ANALYSIS {
+ALWAYS_INLINE
+static inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
+ mirror::ArtMethod* method,
+ Thread* self,
+ gc::AllocatorType allocator_type) {
DCHECK(klass != nullptr);
// Pass in false since the object can not be finalizable.
return klass->Alloc<kInstrumented, false>(self, allocator_type);
@@ -174,17 +174,17 @@
// TODO: Fix no thread safety analysis when GCC can handle template specialization.
template <bool kAccessCheck>
-ALWAYS_INLINE static inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
- mirror::ArtMethod* method,
- int32_t component_count,
- bool* slow_path)
- NO_THREAD_SAFETY_ANALYSIS {
+ALWAYS_INLINE
+static inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
+ mirror::ArtMethod* method,
+ int32_t component_count,
+ bool* slow_path) {
if (UNLIKELY(component_count < 0)) {
ThrowNegativeArraySizeException(component_count);
*slow_path = true;
return nullptr; // Failure
}
- mirror::Class* klass = method->GetDexCacheResolvedTypes()->GetWithoutChecks(type_idx);
+ mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx);
if (UNLIKELY(klass == nullptr)) { // Not in dex cache so try to resolve
klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method);
*slow_path = true;
@@ -211,12 +211,12 @@
// check.
// TODO: Fix no thread safety analysis when GCC can handle template specialization.
template <bool kAccessCheck, bool kInstrumented>
-ALWAYS_INLINE static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
- mirror::ArtMethod* method,
- int32_t component_count,
- Thread* self,
- gc::AllocatorType allocator_type)
- NO_THREAD_SAFETY_ANALYSIS {
+ALWAYS_INLINE
+static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
+ mirror::ArtMethod* method,
+ int32_t component_count,
+ Thread* self,
+ gc::AllocatorType allocator_type) {
bool slow_path = false;
mirror::Class* klass = CheckArrayAlloc<kAccessCheck>(type_idx, method, component_count,
&slow_path);
@@ -234,12 +234,12 @@
}
template <bool kAccessCheck, bool kInstrumented>
-ALWAYS_INLINE static inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass,
- mirror::ArtMethod* method,
- int32_t component_count,
- Thread* self,
- gc::AllocatorType allocator_type)
- NO_THREAD_SAFETY_ANALYSIS {
+ALWAYS_INLINE
+static inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass,
+ mirror::ArtMethod* method,
+ int32_t component_count,
+ Thread* self,
+ gc::AllocatorType allocator_type) {
DCHECK(klass != nullptr);
if (UNLIKELY(component_count < 0)) {
ThrowNegativeArraySizeException(component_count);
@@ -398,26 +398,26 @@
case kDirect:
return resolved_method;
case kVirtual: {
- mirror::ObjectArray<mirror::ArtMethod>* vtable = (*this_object)->GetClass()->GetVTable();
+ mirror::Class* klass = (*this_object)->GetClass();
uint16_t vtable_index = resolved_method->GetMethodIndex();
if (access_check &&
- (vtable == nullptr || vtable_index >= static_cast<uint32_t>(vtable->GetLength()))) {
+ (!klass->HasVTable() ||
+ vtable_index >= static_cast<uint32_t>(klass->GetVTableLength()))) {
// Behavior to agree with that of the verifier.
ThrowNoSuchMethodError(type, resolved_method->GetDeclaringClass(),
resolved_method->GetName(), resolved_method->GetSignature());
return nullptr; // Failure.
}
- DCHECK(vtable != nullptr);
- return vtable->GetWithoutChecks(vtable_index);
+ DCHECK(klass->HasVTable()) << PrettyClass(klass);
+ return klass->GetVTableEntry(vtable_index);
}
case kSuper: {
mirror::Class* super_class = (*referrer)->GetDeclaringClass()->GetSuperClass();
uint16_t vtable_index = resolved_method->GetMethodIndex();
- mirror::ObjectArray<mirror::ArtMethod>* vtable;
if (access_check) {
// Check existence of super class.
- vtable = (super_class != nullptr) ? super_class->GetVTable() : nullptr;
- if (vtable == nullptr || vtable_index >= static_cast<uint32_t>(vtable->GetLength())) {
+ if (super_class == nullptr || !super_class->HasVTable() ||
+ vtable_index >= static_cast<uint32_t>(super_class->GetVTableLength())) {
// Behavior to agree with that of the verifier.
ThrowNoSuchMethodError(type, resolved_method->GetDeclaringClass(),
resolved_method->GetName(), resolved_method->GetSignature());
@@ -426,10 +426,9 @@
} else {
// Super class must exist.
DCHECK(super_class != nullptr);
- vtable = super_class->GetVTable();
}
- DCHECK(vtable != nullptr);
- return vtable->GetWithoutChecks(vtable_index);
+ DCHECK(super_class->HasVTable());
+ return super_class->GetVTableEntry(vtable_index);
}
case kInterface: {
uint32_t imt_index = resolved_method->GetDexMethodIndex() % mirror::Class::kImtSize;
@@ -476,8 +475,7 @@
// Fast path field resolution that can't initialize classes or throw exceptions.
static inline mirror::ArtField* FindFieldFast(uint32_t field_idx,
mirror::ArtMethod* referrer,
- FindFieldType type, size_t expected_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ FindFieldType type, size_t expected_size) {
mirror::ArtField* resolved_field =
referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx);
if (UNLIKELY(resolved_field == nullptr)) {
@@ -534,8 +532,7 @@
static inline mirror::ArtMethod* FindMethodFast(uint32_t method_idx,
mirror::Object* this_object,
mirror::ArtMethod* referrer,
- bool access_check, InvokeType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool access_check, InvokeType type) {
bool is_direct = type == kStatic || type == kDirect;
if (UNLIKELY(this_object == NULL && !is_direct)) {
return NULL;
@@ -565,19 +562,18 @@
} else if (is_direct) {
return resolved_method;
} else if (type == kSuper) {
- return referrer->GetDeclaringClass()->GetSuperClass()->GetVTable()->
- Get(resolved_method->GetMethodIndex());
+ return referrer->GetDeclaringClass()->GetSuperClass()
+ ->GetVTableEntry(resolved_method->GetMethodIndex());
} else {
DCHECK(type == kVirtual);
- return this_object->GetClass()->GetVTable()->Get(resolved_method->GetMethodIndex());
+ return this_object->GetClass()->GetVTableEntry(resolved_method->GetMethodIndex());
}
}
static inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx,
mirror::ArtMethod* referrer,
Thread* self, bool can_run_clinit,
- bool verify_access)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool verify_access) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
mirror::Class* klass = class_linker->ResolveType(type_idx, referrer);
if (UNLIKELY(klass == nullptr)) {
@@ -611,14 +607,12 @@
}
static inline mirror::String* ResolveStringFromCode(mirror::ArtMethod* referrer,
- uint32_t string_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t string_idx) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
return class_linker->ResolveString(string_idx, referrer);
}
-static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self)
- NO_THREAD_SAFETY_ANALYSIS /* SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) */ {
+static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) {
// Save any pending exception over monitor exit call.
mirror::Throwable* saved_exception = NULL;
ThrowLocation saved_throw_location;
@@ -642,27 +636,7 @@
}
}
-static inline void CheckReferenceResult(mirror::Object* o, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (o == NULL) {
- return;
- }
- mirror::ArtMethod* m = self->GetCurrentMethod(NULL);
- if (o == kInvalidIndirectRefObject) {
- JniAbortF(NULL, "invalid reference returned from %s", PrettyMethod(m).c_str());
- }
- // Make sure that the result is an instance of the type this method was expected to return.
- StackHandleScope<1> hs(self);
- Handle<mirror::ArtMethod> h_m(hs.NewHandle(m));
- mirror::Class* return_type = MethodHelper(h_m).GetReturnType();
-
- if (!o->InstanceOf(return_type)) {
- JniAbortF(NULL, "attempt to return an instance of %s from %s", PrettyTypeOf(o).c_str(),
- PrettyMethod(h_m.Get()).c_str());
- }
-}
-
-static inline void CheckSuspend(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+static inline void CheckSuspend(Thread* thread) {
for (;;) {
if (thread->ReadFlag(kCheckpointRequest)) {
thread->RunCheckpointFunction();
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index d063dfb..4755b9e 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -20,6 +20,7 @@
#include "class_linker-inl.h"
#include "dex_file-inl.h"
#include "gc/accounting/card_table-inl.h"
+#include "method_helper-inl.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
@@ -40,7 +41,7 @@
ThrowNegativeArraySizeException(component_count);
return nullptr; // Failure
}
- mirror::Class* klass = referrer->GetDexCacheResolvedTypes()->GetWithoutChecks(type_idx);
+ mirror::Class* klass = referrer->GetDexCacheResolvedType<false>(type_idx);
if (UNLIKELY(klass == NULL)) { // Not in dex cache so try to resolve
klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, referrer);
if (klass == NULL) { // Error
@@ -109,8 +110,8 @@
void ThrowStackOverflowError(Thread* self) {
if (self->IsHandlingStackOverflow()) {
- LOG(ERROR) << "Recursive stack overflow.";
- // We don't fail here because SetStackEndForStackOverflow will print better diagnostics.
+ LOG(ERROR) << "Recursive stack overflow.";
+ // We don't fail here because SetStackEndForStackOverflow will print better diagnostics.
}
if (Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()) {
@@ -122,15 +123,90 @@
JNIEnvExt* env = self->GetJniEnv();
std::string msg("stack size ");
msg += PrettySize(self->GetStackSize());
- // Use low-level JNI routine and pre-baked error class to avoid class linking operations that
- // would consume more stack.
- int rc = ::art::ThrowNewException(env, WellKnownClasses::java_lang_StackOverflowError,
- msg.c_str(), NULL);
- if (rc != JNI_OK) {
- // TODO: ThrowNewException failed presumably because of an OOME, we continue to throw the OOME
- // or die in the CHECK below. We may want to throw a pre-baked StackOverflowError
- // instead.
- LOG(ERROR) << "Couldn't throw new StackOverflowError because JNI ThrowNew failed.";
+
+ // Avoid running Java code for exception initialization.
+ // TODO: Checks to make this a bit less brittle.
+
+ std::string error_msg;
+
+ // Allocate an uninitialized object.
+ ScopedLocalRef<jobject> exc(env,
+ env->AllocObject(WellKnownClasses::java_lang_StackOverflowError));
+ if (exc.get() != nullptr) {
+ // "Initialize".
+ // StackOverflowError -> VirtualMachineError -> Error -> Throwable -> Object.
+ // Only Throwable has "custom" fields:
+ // String detailMessage.
+ // Throwable cause (= this).
+ // List<Throwable> suppressedExceptions (= Collections.emptyList()).
+ // Object stackState;
+ // StackTraceElement[] stackTrace;
+ // Only Throwable has a non-empty constructor:
+ // this.stackTrace = EmptyArray.STACK_TRACE_ELEMENT;
+ // fillInStackTrace();
+
+ // detailMessage.
+ // TODO: Use String::FromModifiedUTF...?
+ ScopedLocalRef<jstring> s(env, env->NewStringUTF(msg.c_str()));
+ if (s.get() != nullptr) {
+ jfieldID detail_message_id = env->GetFieldID(WellKnownClasses::java_lang_Throwable,
+ "detailMessage", "Ljava/lang/String;");
+ env->SetObjectField(exc.get(), detail_message_id, s.get());
+
+ // cause.
+ jfieldID cause_id = env->GetFieldID(WellKnownClasses::java_lang_Throwable,
+ "cause", "Ljava/lang/Throwable;");
+ env->SetObjectField(exc.get(), cause_id, exc.get());
+
+ // suppressedExceptions.
+ jfieldID emptylist_id = env->GetStaticFieldID(WellKnownClasses::java_util_Collections,
+ "EMPTY_LIST", "Ljava/util/List;");
+ ScopedLocalRef<jobject> emptylist(env, env->GetStaticObjectField(
+ WellKnownClasses::java_util_Collections, emptylist_id));
+ CHECK(emptylist.get() != nullptr);
+ jfieldID suppressed_id = env->GetFieldID(WellKnownClasses::java_lang_Throwable,
+ "suppressedExceptions", "Ljava/util/List;");
+ env->SetObjectField(exc.get(), suppressed_id, emptylist.get());
+
+ // stackState is set as result of fillInStackTrace. fillInStackTrace calls
+ // nativeFillInStackTrace.
+ ScopedLocalRef<jobject> stack_state_val(env, nullptr);
+ {
+ ScopedObjectAccessUnchecked soa(env);
+ stack_state_val.reset(soa.Self()->CreateInternalStackTrace<false>(soa));
+ }
+ if (stack_state_val.get() != nullptr) {
+ jfieldID stackstateID = env->GetFieldID(WellKnownClasses::java_lang_Throwable,
+ "stackState", "Ljava/lang/Object;");
+ env->SetObjectField(exc.get(), stackstateID, stack_state_val.get());
+
+ // stackTrace.
+ jfieldID stack_trace_elem_id = env->GetStaticFieldID(
+ WellKnownClasses::libcore_util_EmptyArray, "STACK_TRACE_ELEMENT",
+ "[Ljava/lang/StackTraceElement;");
+ ScopedLocalRef<jobject> stack_trace_elem(env, env->GetStaticObjectField(
+ WellKnownClasses::libcore_util_EmptyArray, stack_trace_elem_id));
+ jfieldID stacktrace_id = env->GetFieldID(
+ WellKnownClasses::java_lang_Throwable, "stackTrace", "[Ljava/lang/StackTraceElement;");
+ env->SetObjectField(exc.get(), stacktrace_id, stack_trace_elem.get());
+
+ // Throw the exception.
+ ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+ self->SetException(throw_location,
+ reinterpret_cast<mirror::Throwable*>(self->DecodeJObject(exc.get())));
+ } else {
+ error_msg = "Could not create stack trace.";
+ }
+ } else {
+ // Could not allocate a string object.
+ error_msg = "Couldn't throw new StackOverflowError because JNI NewStringUTF failed.";
+ }
+ } else {
+ error_msg = "Could not allocate StackOverflowError object.";
+ }
+
+ if (!error_msg.empty()) {
+ LOG(ERROR) << error_msg;
CHECK(self->IsExceptionPending());
}
@@ -138,6 +214,27 @@
self->ResetDefaultStackEnd(!explicit_overflow_check); // Return to default stack size.
}
+void CheckReferenceResult(mirror::Object* o, Thread* self) {
+ if (o == NULL) {
+ return;
+ }
+ mirror::ArtMethod* m = self->GetCurrentMethod(NULL);
+ if (o == kInvalidIndirectRefObject) {
+ Runtime::Current()->GetJavaVM()->JniAbortF(NULL, "invalid reference returned from %s",
+ PrettyMethod(m).c_str());
+ }
+ // Make sure that the result is an instance of the type this method was expected to return.
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ArtMethod> h_m(hs.NewHandle(m));
+ mirror::Class* return_type = MethodHelper(h_m).GetReturnType();
+
+ if (!o->InstanceOf(return_type)) {
+ Runtime::Current()->GetJavaVM()->JniAbortF(NULL, "attempt to return an instance of %s from %s",
+ PrettyTypeOf(o).c_str(),
+ PrettyMethod(h_m.Get()).c_str());
+ }
+}
+
JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, const char* shorty,
jobject rcvr_jobj, jobject interface_method_jobj,
std::vector<jvalue>& args) {
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 11a67ac..44c89ad 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -40,67 +40,63 @@
class ScopedObjectAccessAlreadyRunnable;
class Thread;
-// TODO: Fix no thread safety analysis when GCC can handle template specialization.
template <const bool kAccessCheck>
ALWAYS_INLINE static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
mirror::ArtMethod* method,
Thread* self, bool* slow_path)
- NO_THREAD_SAFETY_ANALYSIS;
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// TODO: Fix no thread safety analysis when annotalysis is smarter.
ALWAYS_INLINE static inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
Thread* self, bool* slow_path)
- NO_THREAD_SAFETY_ANALYSIS;
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it
// cannot be resolved, throw an error. If it can, use it to create an instance.
// When verification/compiler hasn't been able to verify access, optionally perform an access
// check.
-// TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
mirror::ArtMethod* method,
Thread* self,
- gc::AllocatorType allocator_type);
+ gc::AllocatorType allocator_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Given the context of a calling Method and a resolved class, create an instance.
-// TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
template <bool kInstrumented>
ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
mirror::ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type)
- NO_THREAD_SAFETY_ANALYSIS;
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Given the context of a calling Method and an initialized class, create an instance.
-// TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
template <bool kInstrumented>
ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
mirror::ArtMethod* method,
Thread* self,
- gc::AllocatorType allocator_type);
+ gc::AllocatorType allocator_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-// TODO: Fix no thread safety analysis when GCC can handle template specialization.
template <bool kAccessCheck>
ALWAYS_INLINE static inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
mirror::ArtMethod* method,
int32_t component_count,
bool* slow_path)
- NO_THREAD_SAFETY_ANALYSIS;
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Given the context of a calling Method, use its DexCache to resolve a type to an array Class. If
// it cannot be resolved, throw an error. If it can, use it to create an array.
// When verification/compiler hasn't been able to verify access, optionally perform an access
// check.
-// TODO: Fix no thread safety analysis when GCC can handle template specialization.
template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
mirror::ArtMethod* method,
int32_t component_count,
Thread* self,
gc::AllocatorType allocator_type)
- NO_THREAD_SAFETY_ANALYSIS;
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE static inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass,
@@ -108,7 +104,7 @@
int32_t component_count,
Thread* self,
gc::AllocatorType allocator_type)
- NO_THREAD_SAFETY_ANALYSIS;
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::ArtMethod* method,
int32_t component_count, Thread* self,
@@ -171,10 +167,11 @@
uint32_t string_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+// TODO: annotalysis disabled as monitor semantics are maintained in Java code.
static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ NO_THREAD_SAFETY_ANALYSIS;
-static inline void CheckReferenceResult(mirror::Object* o, Thread* self)
+void CheckReferenceResult(mirror::Object* o, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static inline void CheckSuspend(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/entrypoints/portable/portable_throw_entrypoints.cc b/runtime/entrypoints/portable/portable_throw_entrypoints.cc
index be6231c..4317358 100644
--- a/runtime/entrypoints/portable/portable_throw_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_throw_entrypoints.cc
@@ -98,7 +98,7 @@
}
// Does this catch exception type apply?
mirror::Class* iter_exception_type =
- current_method->GetDexCacheResolvedTypes()->Get(iter_type_idx);
+ current_method->GetDexCacheResolvedType(iter_type_idx);
if (UNLIKELY(iter_exception_type == NULL)) {
// TODO: check, the verifier (class linker?) should take care of resolving all exception
// classes early.
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index 1f2713a..9d850c5 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -25,11 +25,34 @@
namespace art {
+static constexpr bool kUseTlabFastPath = true;
+
#define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, suffix2, instrumented_bool, allocator_type) \
extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \
uint32_t type_idx, mirror::ArtMethod* method, Thread* self, \
StackReference<mirror::ArtMethod>* sp) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
+ mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx); \
+ if (LIKELY(klass != nullptr && klass->IsInitialized() && !klass->IsFinalizable())) { \
+ size_t byte_count = klass->GetObjectSize(); \
+ byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment); \
+ mirror::Object* obj; \
+ if (LIKELY(byte_count < self->TlabSize())) { \
+ obj = self->AllocTlab(byte_count); \
+ DCHECK(obj != nullptr) << "AllocTlab can't fail"; \
+ obj->SetClass(klass); \
+ if (kUseBakerOrBrooksReadBarrier) { \
+ if (kUseBrooksReadBarrier) { \
+ obj->SetReadBarrierPointer(obj); \
+ } \
+ obj->AssertReadBarrierPointer(); \
+ } \
+ QuasiAtomic::ThreadFenceForConstructor(); \
+ return obj; \
+ } \
+ } \
+ } \
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
return AllocObjectFromCode<false, instrumented_bool>(type_idx, method, self, allocator_type); \
} \
@@ -37,6 +60,26 @@
mirror::Class* klass, mirror::ArtMethod* method, Thread* self, \
StackReference<mirror::ArtMethod>* sp) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
+ if (LIKELY(klass->IsInitialized())) { \
+ size_t byte_count = klass->GetObjectSize(); \
+ byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment); \
+ mirror::Object* obj; \
+ if (LIKELY(byte_count < self->TlabSize())) { \
+ obj = self->AllocTlab(byte_count); \
+ DCHECK(obj != nullptr) << "AllocTlab can't fail"; \
+ obj->SetClass(klass); \
+ if (kUseBakerOrBrooksReadBarrier) { \
+ if (kUseBrooksReadBarrier) { \
+ obj->SetReadBarrierPointer(obj); \
+ } \
+ obj->AssertReadBarrierPointer(); \
+ } \
+ QuasiAtomic::ThreadFenceForConstructor(); \
+ return obj; \
+ } \
+ } \
+ } \
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
return AllocObjectFromCodeResolved<instrumented_bool>(klass, method, self, allocator_type); \
} \
@@ -44,6 +87,24 @@
mirror::Class* klass, mirror::ArtMethod* method, Thread* self, \
StackReference<mirror::ArtMethod>* sp) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
+ size_t byte_count = klass->GetObjectSize(); \
+ byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment); \
+ mirror::Object* obj; \
+ if (LIKELY(byte_count < self->TlabSize())) { \
+ obj = self->AllocTlab(byte_count); \
+ DCHECK(obj != nullptr) << "AllocTlab can't fail"; \
+ obj->SetClass(klass); \
+ if (kUseBakerOrBrooksReadBarrier) { \
+ if (kUseBrooksReadBarrier) { \
+ obj->SetReadBarrierPointer(obj); \
+ } \
+ obj->AssertReadBarrierPointer(); \
+ } \
+ QuasiAtomic::ThreadFenceForConstructor(); \
+ return obj; \
+ } \
+ } \
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
return AllocObjectFromCodeInitialized<instrumented_bool>(klass, method, self, allocator_type); \
} \
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index 032f6be..8c108a8 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -37,107 +37,11 @@
// Pointers to functions that are called by quick compiler generated code via thread-local storage.
struct PACKED(4) QuickEntryPoints {
- // Alloc
- void* (*pAllocArray)(uint32_t, void*, int32_t);
- void* (*pAllocArrayResolved)(void*, void*, int32_t);
- void* (*pAllocArrayWithAccessCheck)(uint32_t, void*, int32_t);
- void* (*pAllocObject)(uint32_t, void*);
- void* (*pAllocObjectResolved)(void*, void*);
- void* (*pAllocObjectInitialized)(void*, void*);
- void* (*pAllocObjectWithAccessCheck)(uint32_t, void*);
- void* (*pCheckAndAllocArray)(uint32_t, void*, int32_t);
- void* (*pCheckAndAllocArrayWithAccessCheck)(uint32_t, void*, int32_t);
-
- // Cast
- uint32_t (*pInstanceofNonTrivial)(const mirror::Class*, const mirror::Class*);
- void (*pCheckCast)(void*, void*);
-
- // DexCache
- void* (*pInitializeStaticStorage)(uint32_t, void*);
- void* (*pInitializeTypeAndVerifyAccess)(uint32_t, void*);
- void* (*pInitializeType)(uint32_t, void*);
- void* (*pResolveString)(void*, uint32_t);
-
- // Field
- int (*pSet32Instance)(uint32_t, void*, int32_t); // field_idx, obj, src
- int (*pSet32Static)(uint32_t, int32_t);
- int (*pSet64Instance)(uint32_t, void*, int64_t);
- int (*pSet64Static)(uint32_t, int64_t);
- int (*pSetObjInstance)(uint32_t, void*, void*);
- int (*pSetObjStatic)(uint32_t, void*);
- int32_t (*pGet32Instance)(uint32_t, void*);
- int32_t (*pGet32Static)(uint32_t);
- int64_t (*pGet64Instance)(uint32_t, void*);
- int64_t (*pGet64Static)(uint32_t);
- void* (*pGetObjInstance)(uint32_t, void*);
- void* (*pGetObjStatic)(uint32_t);
-
- // Array
- void (*pAputObjectWithNullAndBoundCheck)(void*, uint32_t, void*); // array, index, src
- void (*pAputObjectWithBoundCheck)(void*, uint32_t, void*); // array, index, src
- void (*pAputObject)(void*, uint32_t, void*); // array, index, src
- void (*pHandleFillArrayData)(void*, void*);
-
- // JNI
- uint32_t (*pJniMethodStart)(Thread*);
- uint32_t (*pJniMethodStartSynchronized)(jobject to_lock, Thread* self);
- void (*pJniMethodEnd)(uint32_t cookie, Thread* self);
- void (*pJniMethodEndSynchronized)(uint32_t cookie, jobject locked, Thread* self);
- mirror::Object* (*pJniMethodEndWithReference)(jobject result, uint32_t cookie, Thread* self);
- mirror::Object* (*pJniMethodEndWithReferenceSynchronized)(jobject result, uint32_t cookie,
- jobject locked, Thread* self);
- void (*pQuickGenericJniTrampoline)(mirror::ArtMethod*);
-
- // Locks
- void (*pLockObject)(void*);
- void (*pUnlockObject)(void*);
-
- // Math
- int32_t (*pCmpgDouble)(double, double);
- int32_t (*pCmpgFloat)(float, float);
- int32_t (*pCmplDouble)(double, double);
- int32_t (*pCmplFloat)(float, float);
- double (*pFmod)(double, double);
- double (*pL2d)(int64_t);
- float (*pFmodf)(float, float);
- float (*pL2f)(int64_t);
- int32_t (*pD2iz)(double);
- int32_t (*pF2iz)(float);
- int32_t (*pIdivmod)(int32_t, int32_t);
- int64_t (*pD2l)(double);
- int64_t (*pF2l)(float);
- int64_t (*pLdiv)(int64_t, int64_t);
- int64_t (*pLmod)(int64_t, int64_t);
- int64_t (*pLmul)(int64_t, int64_t);
- uint64_t (*pShlLong)(uint64_t, uint32_t);
- uint64_t (*pShrLong)(uint64_t, uint32_t);
- uint64_t (*pUshrLong)(uint64_t, uint32_t);
-
- // Intrinsics
- int32_t (*pIndexOf)(void*, uint32_t, uint32_t, uint32_t);
- int32_t (*pStringCompareTo)(void*, void*);
- void* (*pMemcpy)(void*, const void*, size_t);
-
- // Invocation
- void (*pQuickImtConflictTrampoline)(mirror::ArtMethod*);
- void (*pQuickResolutionTrampoline)(mirror::ArtMethod*);
- void (*pQuickToInterpreterBridge)(mirror::ArtMethod*);
- void (*pInvokeDirectTrampolineWithAccessCheck)(uint32_t, void*);
- void (*pInvokeInterfaceTrampolineWithAccessCheck)(uint32_t, void*);
- void (*pInvokeStaticTrampolineWithAccessCheck)(uint32_t, void*);
- void (*pInvokeSuperTrampolineWithAccessCheck)(uint32_t, void*);
- void (*pInvokeVirtualTrampolineWithAccessCheck)(uint32_t, void*);
-
- // Thread
- void (*pTestSuspend)(); // Stub that is periodically called to test the suspend count
-
- // Throws
- void (*pDeliverException)(void*);
- void (*pThrowArrayBounds)(int32_t, int32_t);
- void (*pThrowDivZero)();
- void (*pThrowNoSuchMethod)(int32_t);
- void (*pThrowNullPointer)();
- void (*pThrowStackOverflow)(void*);
+#define ENTRYPOINT_ENUM(name, rettype, ...) rettype ( * p ## name )( __VA_ARGS__ );
+#include "quick_entrypoints_list.h"
+ QUICK_ENTRYPOINT_LIST(ENTRYPOINT_ENUM)
+#undef QUICK_ENTRYPOINT_LIST
+#undef ENTRYPOINT_ENUM
};
diff --git a/runtime/entrypoints/quick/quick_entrypoints_enum.h b/runtime/entrypoints/quick/quick_entrypoints_enum.h
new file mode 100644
index 0000000..84158cd
--- /dev/null
+++ b/runtime/entrypoints/quick/quick_entrypoints_enum.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_ENUM_H_
+#define ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_ENUM_H_
+
+#include "quick_entrypoints.h"
+#include "thread.h"
+
+namespace art {
+
+// Define an enum for the entrypoints. Names are prepended a 'kQuick'.
+enum QuickEntrypointEnum
+{ // NOLINT(whitespace/braces)
+#define ENTRYPOINT_ENUM(name, rettype, ...) kQuick ## name,
+#include "quick_entrypoints_list.h"
+ QUICK_ENTRYPOINT_LIST(ENTRYPOINT_ENUM)
+#undef QUICK_ENTRYPOINT_LIST
+#undef ENTRYPOINT_ENUM
+};
+
+std::ostream& operator<<(std::ostream& os, const QuickEntrypointEnum& kind);
+
+// Translate a QuickEntrypointEnum value to the corresponding ThreadOffset.
+template <size_t pointer_size>
+static ThreadOffset<pointer_size> GetThreadOffset(QuickEntrypointEnum trampoline) {
+ switch (trampoline)
+ { // NOLINT(whitespace/braces)
+ #define ENTRYPOINT_ENUM(name, rettype, ...) case kQuick ## name : \
+ return QUICK_ENTRYPOINT_OFFSET(pointer_size, p ## name);
+ #include "quick_entrypoints_list.h"
+ QUICK_ENTRYPOINT_LIST(ENTRYPOINT_ENUM)
+ #undef QUICK_ENTRYPOINT_LIST
+ #undef ENTRYPOINT_ENUM
+ };
+ LOG(FATAL) << "Unexpected trampoline " << static_cast<int>(trampoline);
+ return ThreadOffset<pointer_size>(-1);
+}
+
+} // namespace art
+
+
+#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_ENUM_H_
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
new file mode 100644
index 0000000..f858743
--- /dev/null
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_LIST_H_
+#define ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_LIST_H_
+
+// All quick entrypoints. Format is name, return type, argument types.
+
+#define QUICK_ENTRYPOINT_LIST(V) \
+ V(AllocArray, void*, uint32_t, void*, int32_t) \
+ V(AllocArrayResolved, void*, void*, void*, int32_t) \
+ V(AllocArrayWithAccessCheck, void*, uint32_t, void*, int32_t) \
+ V(AllocObject, void*, uint32_t, void*) \
+ V(AllocObjectResolved, void*, void*, void*) \
+ V(AllocObjectInitialized, void*, void*, void*) \
+ V(AllocObjectWithAccessCheck, void*, uint32_t, void*) \
+ V(CheckAndAllocArray, void*, uint32_t, void*, int32_t) \
+ V(CheckAndAllocArrayWithAccessCheck, void*, uint32_t, void*, int32_t) \
+\
+ V(InstanceofNonTrivial, uint32_t, const mirror::Class*, const mirror::Class*) \
+ V(CheckCast, void , void*, void*) \
+\
+ V(InitializeStaticStorage, void*, uint32_t, void*) \
+ V(InitializeTypeAndVerifyAccess, void*, uint32_t, void*) \
+ V(InitializeType, void*, uint32_t, void*) \
+ V(ResolveString, void*, void*, uint32_t) \
+\
+ V(Set32Instance, int, uint32_t, void*, int32_t) \
+ V(Set32Static, int, uint32_t, int32_t) \
+ V(Set64Instance, int, uint32_t, void*, int64_t) \
+ V(Set64Static, int, uint32_t, int64_t) \
+ V(SetObjInstance, int, uint32_t, void*, void*) \
+ V(SetObjStatic, int, uint32_t, void*) \
+ V(Get32Instance, int32_t, uint32_t, void*) \
+ V(Get32Static, int32_t, uint32_t) \
+ V(Get64Instance, int64_t, uint32_t, void*) \
+ V(Get64Static, int64_t, uint32_t) \
+ V(GetObjInstance, void*, uint32_t, void*) \
+ V(GetObjStatic, void*, uint32_t) \
+\
+ V(AputObjectWithNullAndBoundCheck, void, void*, uint32_t, void*) \
+ V(AputObjectWithBoundCheck, void, void*, uint32_t, void*) \
+ V(AputObject, void, void*, uint32_t, void*) \
+ V(HandleFillArrayData, void, void*, void*) \
+\
+ V(JniMethodStart, uint32_t, Thread*) \
+ V(JniMethodStartSynchronized, uint32_t, jobject to_lock, Thread* self) \
+ V(JniMethodEnd, void, uint32_t cookie, Thread* self) \
+ V(JniMethodEndSynchronized, void, uint32_t cookie, jobject locked, Thread* self) \
+ V(JniMethodEndWithReference, mirror::Object*, jobject result, uint32_t cookie, Thread* self) \
+ V(JniMethodEndWithReferenceSynchronized, mirror::Object*, jobject result, uint32_t cookie, jobject locked, Thread* self) \
+ V(QuickGenericJniTrampoline, void, mirror::ArtMethod*) \
+\
+ V(LockObject, void, void*) \
+ V(UnlockObject, void, void*) \
+\
+ V(CmpgDouble, int32_t, double, double) \
+ V(CmpgFloat, int32_t, float, float) \
+ V(CmplDouble, int32_t, double, double) \
+ V(CmplFloat, int32_t, float, float) \
+ V(Fmod, double, double, double) \
+ V(L2d, double, int64_t) \
+ V(Fmodf, float, float, float) \
+ V(L2f, float, int64_t) \
+ V(D2iz, int32_t, double) \
+ V(F2iz, int32_t, float) \
+ V(Idivmod, int32_t, int32_t, int32_t) \
+ V(D2l, int64_t, double) \
+ V(F2l, int64_t, float) \
+ V(Ldiv, int64_t, int64_t, int64_t) \
+ V(Lmod, int64_t, int64_t, int64_t) \
+ V(Lmul, int64_t, int64_t, int64_t) \
+ V(ShlLong, uint64_t, uint64_t, uint32_t) \
+ V(ShrLong, uint64_t, uint64_t, uint32_t) \
+ V(UshrLong, uint64_t, uint64_t, uint32_t) \
+\
+ V(IndexOf, int32_t, void*, uint32_t, uint32_t, uint32_t) \
+ V(StringCompareTo, int32_t, void*, void*) \
+ V(Memcpy, void*, void*, const void*, size_t) \
+\
+ V(QuickImtConflictTrampoline, void, mirror::ArtMethod*) \
+ V(QuickResolutionTrampoline, void, mirror::ArtMethod*) \
+ V(QuickToInterpreterBridge, void, mirror::ArtMethod*) \
+ V(InvokeDirectTrampolineWithAccessCheck, void, uint32_t, void*) \
+ V(InvokeInterfaceTrampolineWithAccessCheck, void, uint32_t, void*) \
+ V(InvokeStaticTrampolineWithAccessCheck, void, uint32_t, void*) \
+ V(InvokeSuperTrampolineWithAccessCheck, void, uint32_t, void*) \
+ V(InvokeVirtualTrampolineWithAccessCheck, void, uint32_t, void*) \
+\
+ V(TestSuspend, void, void) \
+\
+ V(DeliverException, void, void*) \
+ V(ThrowArrayBounds, void, int32_t, int32_t) \
+ V(ThrowDivZero, void, void) \
+ V(ThrowNoSuchMethod, void, int32_t) \
+ V(ThrowNullPointer, void, void) \
+ V(ThrowStackOverflow, void, void*) \
+\
+ V(A64Load, int64_t, volatile const int64_t *) \
+ V(A64Store, void, volatile int64_t *, int64_t)
+
+
+#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_LIST_H_
+#undef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_LIST_H_ // #define is only for lint.
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 338bd06..4730701 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -58,9 +58,12 @@
static constexpr bool kQuickSoftFloatAbi = true; // This is a soft float ABI.
static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs.
static constexpr size_t kNumQuickFprArgs = 0; // 0 arguments passed in FPRs.
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0; // Offset of first FPR arg.
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 8; // Offset of first GPR arg.
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 44; // Offset of return address.
+ static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
+ arm::ArmCalleeSaveFpr1Offset(Runtime::kRefsAndArgs); // Offset of first FPR arg.
+ static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
+ arm::ArmCalleeSaveGpr1Offset(Runtime::kRefsAndArgs); // Offset of first GPR arg.
+ static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
+ arm::ArmCalleeSaveLrOffset(Runtime::kRefsAndArgs); // Offset of return address.
static size_t GprIndexToGprOffset(uint32_t gpr_index) {
return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
}
@@ -74,13 +77,13 @@
// | arg1 spill | |
// | Method* | ---
// | LR |
- // | X28 |
+ // | X29 |
// | : |
- // | X19 |
+ // | X20 |
// | X7 |
// | : |
// | X1 |
- // | D15 |
+ // | D7 |
// | : |
// | D0 |
// | | padding
@@ -88,9 +91,12 @@
static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI.
static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs.
static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs.
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16; // Offset of first FPR arg.
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 144; // Offset of first GPR arg.
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 296; // Offset of return address.
+ static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
+ arm64::Arm64CalleeSaveFpr1Offset(Runtime::kRefsAndArgs); // Offset of first FPR arg.
+ static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
+ arm64::Arm64CalleeSaveGpr1Offset(Runtime::kRefsAndArgs); // Offset of first GPR arg.
+ static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
+ arm64::Arm64CalleeSaveLrOffset(Runtime::kRefsAndArgs); // Offset of return address.
static size_t GprIndexToGprOffset(uint32_t gpr_index) {
return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
}
@@ -586,8 +592,7 @@
const char* old_cause =
self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
// Register the top of the managed stack, making stack crawlable.
- DCHECK_EQ(sp->AsMirrorPtr(), proxy_method)
- << PrettyMethod(proxy_method);
+ DCHECK_EQ(sp->AsMirrorPtr(), proxy_method) << PrettyMethod(proxy_method);
self->SetTopOfStack(sp, 0);
DCHECK_EQ(proxy_method->GetFrameSizeInBytes(),
Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes())
@@ -785,8 +790,8 @@
// We came here because of sharpening. Ensure the dex cache is up-to-date on the method index
// of the sharpened method.
- if (called->GetDexCacheResolvedMethods() == caller->GetDexCacheResolvedMethods()) {
- caller->GetDexCacheResolvedMethods()->Set<false>(called->GetDexMethodIndex(), called);
+ if (called->HasSameDexCacheResolvedMethods(caller)) {
+ caller->SetDexCacheResolvedMethod(called->GetDexMethodIndex(), called);
} else {
// Calling from one dex file to another, need to compute the method index appropriate to
// the caller's dex file. Since we get here only if the original called was a runtime
@@ -796,7 +801,7 @@
MethodHelper mh(hs.NewHandle(called));
uint32_t method_index = mh.FindDexMethodIndexInOtherDexFile(*dex_file, dex_method_idx);
if (method_index != DexFile::kDexNoIndex) {
- caller->GetDexCacheResolvedMethods()->Set<false>(method_index, called);
+ caller->SetDexCacheResolvedMethod(method_index, called);
}
}
}
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 79c68a2..ae1b94f 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -259,8 +259,10 @@
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowDivZero, pThrowNoSuchMethod, kPointerSize);
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowNoSuchMethod, pThrowNullPointer, kPointerSize);
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowNullPointer, pThrowStackOverflow, kPointerSize);
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowStackOverflow, pA64Load, kPointerSize);
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pA64Load, pA64Store, kPointerSize);
- CHECKED(OFFSETOF_MEMBER(QuickEntryPoints, pThrowStackOverflow)
+ CHECKED(OFFSETOF_MEMBER(QuickEntryPoints, pA64Store)
+ kPointerSize == sizeof(QuickEntryPoints), QuickEntryPoints_all);
}
};
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index b4355ca..8ddaf5c 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -18,12 +18,9 @@
#include <sys/mman.h>
#include <sys/ucontext.h>
-
#include "mirror/art_method.h"
#include "mirror/class.h"
-#ifdef HAVE_ANDROID_OS
#include "sigchain.h"
-#endif
#include "thread-inl.h"
#include "verify_object-inl.h"
@@ -40,6 +37,7 @@
// Signal handler called on SIGSEGV.
static void art_fault_handler(int sig, siginfo_t* info, void* context) {
+ // std::cout << "handling fault in ART handler\n";
fault_manager.HandleFault(sig, info, context);
}
@@ -48,10 +46,6 @@
}
FaultManager::~FaultManager() {
-#ifdef HAVE_ANDROID_OS
- UnclaimSignalChain(SIGSEGV);
-#endif
- sigaction(SIGSEGV, &oldaction_, nullptr); // Restore old handler.
}
@@ -65,11 +59,12 @@
#endif
// Set our signal handler now.
- sigaction(SIGSEGV, &action, &oldaction_);
-#ifdef HAVE_ANDROID_OS
+ int e = sigaction(SIGSEGV, &action, &oldaction_);
+ if (e != 0) {
+ VLOG(signals) << "Failed to claim SEGV: " << strerror(errno);
+ }
// Make sure our signal handler is called before any user handlers.
ClaimSignalChain(SIGSEGV, &oldaction_);
-#endif
}
void FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
@@ -77,8 +72,13 @@
//
// If malloc calls abort, it will be holding its lock.
// If the handler tries to call malloc, it will deadlock.
+
+ // Also, there is only an 8K stack available here to logging can cause memory
+ // overwrite issues if you are unlucky. If you want to enable logging and
+ // are getting crashes, allocate more space for the alternate signal stack.
+
VLOG(signals) << "Handling fault";
- if (IsInGeneratedCode(context, true)) {
+ if (IsInGeneratedCode(info, context, true)) {
VLOG(signals) << "in generated code, looking for handler";
for (const auto& handler : generated_code_handlers_) {
VLOG(signals) << "invoking Action on handler " << handler;
@@ -92,13 +92,11 @@
return;
}
}
+
art_sigsegv_fault();
-#ifdef HAVE_ANDROID_OS
+ // Pass this on to the next handler in the chain, or the default if none.
InvokeUserSignalHandler(sig, info, context);
-#else
- oldaction_.sa_sigaction(sig, info, context);
-#endif
}
void FaultManager::AddHandler(FaultHandler* handler, bool generated_code) {
@@ -125,7 +123,7 @@
// This function is called within the signal handler. It checks that
// the mutator_lock is held (shared). No annotalysis is done.
-bool FaultManager::IsInGeneratedCode(void* context, bool check_dex_pc) {
+bool FaultManager::IsInGeneratedCode(siginfo_t* siginfo, void* context, bool check_dex_pc) {
// We can only be running Java code in the current thread if it
// is in Runnable state.
VLOG(signals) << "Checking for generated code";
@@ -154,7 +152,7 @@
// Get the architecture specific method address and return address. These
// are in architecture specific files in arch/<arch>/fault_handler_<arch>.
- GetMethodAndReturnPCAndSP(context, &method_obj, &return_pc, &sp);
+ GetMethodAndReturnPcAndSp(siginfo, context, &method_obj, &return_pc, &sp);
// If we don't have a potential method, we're outta here.
VLOG(signals) << "potential method: " << method_obj;
@@ -235,12 +233,12 @@
bool JavaStackTraceHandler::Action(int sig, siginfo_t* siginfo, void* context) {
// Make sure that we are in the generated code, but we may not have a dex pc.
- if (manager_->IsInGeneratedCode(context, false)) {
+ if (manager_->IsInGeneratedCode(siginfo, context, false)) {
LOG(ERROR) << "Dumping java stack trace for crash in generated code";
mirror::ArtMethod* method = nullptr;
uintptr_t return_pc = 0;
uintptr_t sp = 0;
- manager_->GetMethodAndReturnPCAndSP(context, &method, &return_pc, &sp);
+ manager_->GetMethodAndReturnPcAndSp(siginfo, context, &method, &return_pc, &sp);
Thread* self = Thread::Current();
// Inside of generated code, sp[0] is the method, so sp is the frame.
StackReference<mirror::ArtMethod>* frame =
diff --git a/runtime/fault_handler.h b/runtime/fault_handler.h
index 026f5b9..1acd024 100644
--- a/runtime/fault_handler.h
+++ b/runtime/fault_handler.h
@@ -43,9 +43,16 @@
void HandleFault(int sig, siginfo_t* info, void* context);
void AddHandler(FaultHandler* handler, bool generated_code);
void RemoveHandler(FaultHandler* handler);
- void GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method,
- uintptr_t* out_return_pc, uintptr_t* out_sp);
- bool IsInGeneratedCode(void *context, bool check_dex_pc) NO_THREAD_SAFETY_ANALYSIS;
+
+ // Note that the following two functions are called in the context of a signal handler.
+ // The IsInGeneratedCode() function checks that the mutator lock is held before it
+ // calls GetMethodAndReturnPCAndSP().
+ // TODO: think about adding lock assertions and fake lock and unlock functions.
+ void GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context, mirror::ArtMethod** out_method,
+ uintptr_t* out_return_pc, uintptr_t* out_sp)
+ NO_THREAD_SAFETY_ANALYSIS;
+ bool IsInGeneratedCode(siginfo_t* siginfo, void *context, bool check_dex_pc)
+ NO_THREAD_SAFETY_ANALYSIS;
private:
std::vector<FaultHandler*> generated_code_handlers_;
diff --git a/runtime/gc/accounting/card_table-inl.h b/runtime/gc/accounting/card_table-inl.h
index 46b9363..217360f 100644
--- a/runtime/gc/accounting/card_table-inl.h
+++ b/runtime/gc/accounting/card_table-inl.h
@@ -37,12 +37,13 @@
// Align the address down.
address -= shift_in_bytes;
const size_t shift_in_bits = shift_in_bytes * kBitsPerByte;
- AtomicInteger* word_atomic = reinterpret_cast<AtomicInteger*>(address);
+ Atomic<uintptr_t>* word_atomic = reinterpret_cast<Atomic<uintptr_t>*>(address);
// Word with the byte we are trying to cas cleared.
- const int32_t cur_word = word_atomic->LoadRelaxed() & ~(0xFF << shift_in_bits);
- const int32_t old_word = cur_word | (static_cast<int32_t>(old_value) << shift_in_bits);
- const int32_t new_word = cur_word | (static_cast<int32_t>(new_value) << shift_in_bits);
+ const uintptr_t cur_word = word_atomic->LoadRelaxed() &
+ ~(static_cast<uintptr_t>(0xFF) << shift_in_bits);
+ const uintptr_t old_word = cur_word | (static_cast<uintptr_t>(old_value) << shift_in_bits);
+ const uintptr_t new_word = cur_word | (static_cast<uintptr_t>(new_value) << shift_in_bits);
return word_atomic->CompareExchangeWeakRelaxed(old_word, new_word);
#endif
}
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index ceb42e5..0498550 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -28,6 +28,11 @@
namespace gc {
namespace accounting {
+constexpr size_t CardTable::kCardShift;
+constexpr size_t CardTable::kCardSize;
+constexpr uint8_t CardTable::kCardClean;
+constexpr uint8_t CardTable::kCardDirty;
+
/*
* Maintain a card table from the write barrier. All writes of
* non-NULL values to heap addresses should go through an entry in
@@ -55,9 +60,9 @@
size_t capacity = heap_capacity / kCardSize;
/* Allocate an extra 256 bytes to allow fixed low-byte of base */
std::string error_msg;
- std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous("card table", NULL,
- capacity + 256, PROT_READ | PROT_WRITE,
- false, &error_msg));
+ std::unique_ptr<MemMap> mem_map(
+ MemMap::MapAnonymous("card table", nullptr, capacity + 256, PROT_READ | PROT_WRITE,
+ false, &error_msg));
CHECK(mem_map.get() != NULL) << "couldn't allocate card table: " << error_msg;
// All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
// don't clear the card table to avoid unnecessary pages being allocated
@@ -67,17 +72,17 @@
CHECK(cardtable_begin != NULL);
// We allocated up to a bytes worth of extra space to allow biased_begin's byte value to equal
- // GC_CARD_DIRTY, compute a offset value to make this the case
+ // kCardDirty, compute a offset value to make this the case
size_t offset = 0;
byte* biased_begin = reinterpret_cast<byte*>(reinterpret_cast<uintptr_t>(cardtable_begin) -
(reinterpret_cast<uintptr_t>(heap_begin) >> kCardShift));
- if (((uintptr_t)biased_begin & 0xff) != kCardDirty) {
- int delta = kCardDirty - (reinterpret_cast<uintptr_t>(biased_begin) & 0xff);
+ uintptr_t biased_byte = reinterpret_cast<uintptr_t>(biased_begin) & 0xff;
+ if (biased_byte != kCardDirty) {
+ int delta = kCardDirty - biased_byte;
offset = delta + (delta < 0 ? 0x100 : 0);
biased_begin += offset;
}
CHECK_EQ(reinterpret_cast<uintptr_t>(biased_begin) & 0xff, kCardDirty);
-
return new CardTable(mem_map.release(), biased_begin, offset);
}
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index 7934974..fbeea85 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -46,10 +46,10 @@
// WriteBarrier, and from there to here.
class CardTable {
public:
- static const size_t kCardShift = 7;
- static const size_t kCardSize = (1 << kCardShift);
- static const uint8_t kCardClean = 0x0;
- static const uint8_t kCardDirty = 0x70;
+ static constexpr size_t kCardShift = 7;
+ static constexpr size_t kCardSize = 1 << kCardShift;
+ static constexpr uint8_t kCardClean = 0x0;
+ static constexpr uint8_t kCardDirty = 0x70;
static CardTable* Create(const byte* heap_begin, size_t heap_capacity);
diff --git a/runtime/gc/accounting/card_table_test.cc b/runtime/gc/accounting/card_table_test.cc
new file mode 100644
index 0000000..433855a
--- /dev/null
+++ b/runtime/gc/accounting/card_table_test.cc
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "card_table-inl.h"
+
+#include <string>
+
+#include "atomic.h"
+#include "common_runtime_test.h"
+#include "handle_scope-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/string-inl.h" // Strings are easiest to allocate
+#include "scoped_thread_state_change.h"
+#include "thread_pool.h"
+#include "utils.h"
+
+namespace art {
+
+namespace mirror {
+ class Object;
+} // namespace mirror
+
+namespace gc {
+namespace accounting {
+
+class CardTableTest : public CommonRuntimeTest {
+ public:
+ std::unique_ptr<CardTable> card_table_;
+
+ void CommonSetup() {
+ if (card_table_.get() == nullptr) {
+ card_table_.reset(CardTable::Create(heap_begin_, heap_size_));
+ EXPECT_TRUE(card_table_.get() != nullptr);
+ } else {
+ ClearCardTable();
+ }
+ }
+ // Default values for the test, not random to avoid undeterministic behaviour.
+ CardTableTest() : heap_begin_(reinterpret_cast<byte*>(0x2000000)), heap_size_(2 * MB) {
+ }
+ void ClearCardTable() {
+ card_table_->ClearCardTable();
+ }
+ byte* HeapBegin() const {
+ return heap_begin_;
+ }
+ byte* HeapLimit() const {
+ return HeapBegin() + heap_size_;
+ }
+ // Return a pseudo random card for an address.
+ byte PseudoRandomCard(const byte* addr) const {
+ size_t offset = RoundDown(addr - heap_begin_, CardTable::kCardSize);
+ return 1 + offset % 254;
+ }
+ void FillRandom() {
+ for (const byte* addr = HeapBegin(); addr != HeapLimit(); addr += CardTable::kCardSize) {
+ EXPECT_TRUE(card_table_->AddrIsInCardTable(addr));
+ byte* card = card_table_->CardFromAddr(addr);
+ *card = PseudoRandomCard(addr);
+ }
+ }
+
+ private:
+ byte* const heap_begin_;
+ const size_t heap_size_;
+};
+
+TEST_F(CardTableTest, TestMarkCard) {
+ CommonSetup();
+ for (const byte* addr = HeapBegin(); addr < HeapLimit(); addr += kObjectAlignment) {
+ auto obj = reinterpret_cast<const mirror::Object*>(addr);
+ EXPECT_EQ(card_table_->GetCard(obj), CardTable::kCardClean);
+ EXPECT_TRUE(!card_table_->IsDirty(obj));
+ card_table_->MarkCard(addr);
+ EXPECT_TRUE(card_table_->IsDirty(obj));
+ EXPECT_EQ(card_table_->GetCard(obj), CardTable::kCardDirty);
+ byte* card_addr = card_table_->CardFromAddr(addr);
+ EXPECT_EQ(*card_addr, CardTable::kCardDirty);
+ *card_addr = CardTable::kCardClean;
+ EXPECT_EQ(*card_addr, CardTable::kCardClean);
+ }
+}
+
+class UpdateVisitor {
+ public:
+ byte operator()(byte c) const {
+ return c * 93 + 123;
+ }
+ void operator()(byte* /*card*/, byte /*expected_value*/, byte /*new_value*/) const {
+ }
+};
+
+TEST_F(CardTableTest, TestModifyCardsAtomic) {
+ CommonSetup();
+ FillRandom();
+ const size_t delta = std::min(static_cast<size_t>(HeapLimit() - HeapBegin()),
+ 8U * CardTable::kCardSize);
+ UpdateVisitor visitor;
+ size_t start_offset = 0;
+ for (byte* cstart = HeapBegin(); cstart < HeapBegin() + delta; cstart += CardTable::kCardSize) {
+ start_offset = (start_offset + kObjectAlignment) % CardTable::kCardSize;
+ size_t end_offset = 0;
+ for (byte* cend = HeapLimit() - delta; cend < HeapLimit(); cend += CardTable::kCardSize) {
+ // Don't always start at a card boundary.
+ byte* start = cstart + start_offset;
+ byte* end = cend - end_offset;
+ end_offset = (end_offset + kObjectAlignment) % CardTable::kCardSize;
+ // Modify cards.
+ card_table_->ModifyCardsAtomic(start, end, visitor, visitor);
+ // Check adjacent cards not modified.
+ for (byte* cur = start - CardTable::kCardSize; cur >= HeapBegin();
+ cur -= CardTable::kCardSize) {
+ EXPECT_EQ(card_table_->GetCard(reinterpret_cast<mirror::Object*>(cur)),
+ PseudoRandomCard(cur));
+ }
+ for (byte* cur = end + CardTable::kCardSize; cur < HeapLimit();
+ cur += CardTable::kCardSize) {
+ EXPECT_EQ(card_table_->GetCard(reinterpret_cast<mirror::Object*>(cur)),
+ PseudoRandomCard(cur));
+ }
+ // Verify Range.
+ for (byte* cur = start; cur < AlignUp(end, CardTable::kCardSize);
+ cur += CardTable::kCardSize) {
+ byte* card = card_table_->CardFromAddr(cur);
+ byte value = PseudoRandomCard(cur);
+ EXPECT_EQ(visitor(value), *card);
+ // Restore for next iteration.
+ *card = value;
+ }
+ }
+ }
+}
+
+// TODO: Add test for CardTable::Scan.
+} // namespace accounting
+} // namespace gc
+} // namespace art
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 228d1dc..2686af0 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -185,7 +185,7 @@
<< from_space->GetGcRetentionPolicy();
LOG(INFO) << "ToSpace " << to_space->GetName() << " type "
<< to_space->GetGcRetentionPolicy();
- heap->DumpSpaces();
+ heap->DumpSpaces(LOG(INFO));
LOG(FATAL) << "FATAL ERROR";
}
}
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 46d79bf..07b61e6 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -55,7 +55,8 @@
: heap_(heap),
name_(name),
pause_histogram_((name_ + " paused").c_str(), kPauseBucketSize, kPauseBucketCount),
- cumulative_timings_(name) {
+ cumulative_timings_(name),
+ pause_histogram_lock_("pause histogram lock", kDefaultMutexLevel, true) {
ResetCumulativeStatistics();
}
@@ -65,10 +66,11 @@
void GarbageCollector::ResetCumulativeStatistics() {
cumulative_timings_.Reset();
- pause_histogram_.Reset();
total_time_ns_ = 0;
total_freed_objects_ = 0;
total_freed_bytes_ = 0;
+ MutexLock mu(Thread::Current(), pause_histogram_lock_);
+ pause_histogram_.Reset();
}
void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
@@ -95,6 +97,7 @@
}
total_time_ns_ += current_iteration->GetDurationNs();
for (uint64_t pause_time : current_iteration->GetPauseTimes()) {
+ MutexLock mu(self, pause_histogram_lock_);
pause_histogram_.AddValue(pause_time / 1000);
}
ATRACE_END();
@@ -137,8 +140,11 @@
}
void GarbageCollector::ResetMeasurements() {
+ {
+ MutexLock mu(Thread::Current(), pause_histogram_lock_);
+ pause_histogram_.Reset();
+ }
cumulative_timings_.Reset();
- pause_histogram_.Reset();
total_time_ns_ = 0;
total_freed_objects_ = 0;
total_freed_bytes_ = 0;
@@ -171,6 +177,38 @@
heap_->RecordFree(freed.objects, freed.bytes);
}
+uint64_t GarbageCollector::GetTotalPausedTimeNs() {
+ MutexLock mu(Thread::Current(), pause_histogram_lock_);
+ return pause_histogram_.AdjustedSum();
+}
+
+void GarbageCollector::DumpPerformanceInfo(std::ostream& os) {
+ const CumulativeLogger& logger = GetCumulativeTimings();
+ const size_t iterations = logger.GetIterations();
+ if (iterations == 0) {
+ return;
+ }
+ os << ConstDumpable<CumulativeLogger>(logger);
+ const uint64_t total_ns = logger.GetTotalNs();
+ double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
+ const uint64_t freed_bytes = GetTotalFreedBytes();
+ const uint64_t freed_objects = GetTotalFreedObjects();
+ {
+ MutexLock mu(Thread::Current(), pause_histogram_lock_);
+ if (pause_histogram_.SampleSize() > 0) {
+ Histogram<uint64_t>::CumulativeData cumulative_data;
+ pause_histogram_.CreateHistogram(&cumulative_data);
+ pause_histogram_.PrintConfidenceIntervals(os, 0.99, cumulative_data);
+ }
+ }
+ os << GetName() << " total time: " << PrettyDuration(total_ns)
+ << " mean time: " << PrettyDuration(total_ns / iterations) << "\n"
+ << GetName() << " freed: " << freed_objects
+ << " objects with total size " << PrettySize(freed_bytes) << "\n"
+ << GetName() << " throughput: " << freed_objects / seconds << "/s / "
+ << PrettySize(freed_bytes / seconds) << "/s\n";
+}
+
} // namespace collector
} // namespace gc
} // namespace art
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 885569e..b809469 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -119,18 +119,13 @@
GarbageCollector(Heap* heap, const std::string& name);
virtual ~GarbageCollector() { }
-
const char* GetName() const {
return name_.c_str();
}
-
virtual GcType GetGcType() const = 0;
-
virtual CollectorType GetCollectorType() const = 0;
-
// Run the garbage collector.
void Run(GcCause gc_cause, bool clear_soft_references);
-
Heap* GetHeap() const {
return heap_;
}
@@ -138,24 +133,17 @@
const CumulativeLogger& GetCumulativeTimings() const {
return cumulative_timings_;
}
-
void ResetCumulativeStatistics();
-
// Swap the live and mark bitmaps of spaces that are active for the collector. For partial GC,
// this is the allocation space, for full GC then we swap the zygote bitmaps too.
void SwapBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- uint64_t GetTotalPausedTimeNs() const {
- return pause_histogram_.AdjustedSum();
- }
+ uint64_t GetTotalPausedTimeNs() LOCKS_EXCLUDED(pause_histogram_lock_);
int64_t GetTotalFreedBytes() const {
return total_freed_bytes_;
}
uint64_t GetTotalFreedObjects() const {
return total_freed_objects_;
}
- const Histogram<uint64_t>& GetPauseHistogram() const {
- return pause_histogram_;
- }
// Reset the cumulative timings and pause histogram.
void ResetMeasurements();
// Returns the estimated throughput in bytes / second.
@@ -174,11 +162,11 @@
void RecordFree(const ObjectBytePair& freed);
// Record a free of large objects.
void RecordFreeLOS(const ObjectBytePair& freed);
+ void DumpPerformanceInfo(std::ostream& os) LOCKS_EXCLUDED(pause_histogram_lock_);
protected:
// Run all of the GC phases.
virtual void RunPhases() = 0;
-
// Revoke all the thread-local buffers.
virtual void RevokeAllThreadLocalBuffers() = 0;
@@ -188,11 +176,12 @@
Heap* const heap_;
std::string name_;
// Cumulative statistics.
- Histogram<uint64_t> pause_histogram_;
+ Histogram<uint64_t> pause_histogram_ GUARDED_BY(pause_histogram_lock_);
uint64_t total_time_ns_;
uint64_t total_freed_objects_;
int64_t total_freed_bytes_;
CumulativeLogger cumulative_timings_;
+ mutable Mutex pause_histogram_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
};
} // namespace collector
diff --git a/runtime/gc/collector/mark_sweep-inl.h b/runtime/gc/collector/mark_sweep-inl.h
index 974952d..104ed36 100644
--- a/runtime/gc/collector/mark_sweep-inl.h
+++ b/runtime/gc/collector/mark_sweep-inl.h
@@ -32,10 +32,7 @@
template<typename MarkVisitor, typename ReferenceVisitor>
inline void MarkSweep::ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor,
const ReferenceVisitor& ref_visitor) {
- if (kIsDebugBuild && !IsMarked(obj)) {
- heap_->DumpSpaces();
- LOG(FATAL) << "Scanning unmarked object " << obj;
- }
+ DCHECK(IsMarked(obj)) << "Scanning unmarked object " << obj << "\n" << heap_->DumpSpaces();
obj->VisitReferences<false>(visitor, ref_visitor);
if (kCountScannedTypes) {
mirror::Class* klass = obj->GetClass<kVerifyNone>();
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 7e97b3b..95530be 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -313,10 +313,8 @@
}
}
}
- if (current_space_bitmap_ == nullptr) {
- heap_->DumpSpaces();
- LOG(FATAL) << "Could not find a default mark bitmap";
- }
+ CHECK(current_space_bitmap_ != nullptr) << "Could not find a default mark bitmap\n"
+ << heap_->DumpSpaces();
}
void MarkSweep::ExpandMarkStack() {
@@ -943,12 +941,9 @@
void MarkSweep::VerifyIsLive(const Object* obj) {
if (!heap_->GetLiveBitmap()->Test(obj)) {
- if (std::find(heap_->allocation_stack_->Begin(), heap_->allocation_stack_->End(), obj) ==
- heap_->allocation_stack_->End()) {
- // Object not found!
- heap_->DumpSpaces();
- LOG(FATAL) << "Found dead object " << obj;
- }
+ accounting::ObjectStack* allocation_stack = heap_->allocation_stack_.get();
+ CHECK(std::find(allocation_stack->Begin(), allocation_stack->End(), obj) !=
+ allocation_stack->End()) << "Found dead object " << obj << "\n" << heap_->DumpSpaces();
}
}
diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h
index 47682cc..922a71c 100644
--- a/runtime/gc/collector/semi_space-inl.h
+++ b/runtime/gc/collector/semi_space-inl.h
@@ -64,34 +64,25 @@
// Verify all the objects have the correct forward pointer installed.
obj->AssertReadBarrierPointer();
}
- if (!immune_region_.ContainsObject(obj)) {
- if (from_space_->HasAddress(obj)) {
- mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj);
- // If the object has already been moved, return the new forward address.
- if (UNLIKELY(forward_address == nullptr)) {
- forward_address = MarkNonForwardedObject(obj);
- DCHECK(forward_address != nullptr);
- // Make sure to only update the forwarding address AFTER you copy the object so that the
- // monitor word doesn't Get stomped over.
- obj->SetLockWord(
- LockWord::FromForwardingAddress(reinterpret_cast<size_t>(forward_address)), false);
- // Push the object onto the mark stack for later processing.
- MarkStackPush(forward_address);
- }
- obj_ptr->Assign(forward_address);
- } else {
- BitmapSetSlowPathVisitor visitor(this);
- if (kIsDebugBuild && mark_bitmap_->GetContinuousSpaceBitmap(obj) != nullptr) {
- // If a bump pointer space only collection, we should not
- // reach here as we don't/won't mark the objects in the
- // non-moving space (except for the promoted objects.) Note
- // the non-moving space is added to the immune space.
- DCHECK(!generational_ || whole_heap_collection_);
- }
- if (!mark_bitmap_->Set(obj, visitor)) {
- // This object was not previously marked.
- MarkStackPush(obj);
- }
+ if (from_space_->HasAddress(obj)) {
+ mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj);
+ // If the object has already been moved, return the new forward address.
+ if (UNLIKELY(forward_address == nullptr)) {
+ forward_address = MarkNonForwardedObject(obj);
+ DCHECK(forward_address != nullptr);
+ // Make sure to only update the forwarding address AFTER you copy the object so that the
+ // monitor word doesn't Get stomped over.
+ obj->SetLockWord(
+ LockWord::FromForwardingAddress(reinterpret_cast<size_t>(forward_address)), false);
+ // Push the object onto the mark stack for later processing.
+ MarkStackPush(forward_address);
+ }
+ obj_ptr->Assign(forward_address);
+ } else if (!collect_from_space_only_ && !immune_region_.ContainsObject(obj)) {
+ BitmapSetSlowPathVisitor visitor(this);
+ if (!mark_bitmap_->Set(obj, visitor)) {
+ // This object was not previously marked.
+ MarkStackPush(obj);
}
}
}
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index cabfe21..8fb33ce 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -63,23 +63,23 @@
WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
// Mark all of the spaces we never collect as immune.
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
- if (space->GetLiveBitmap() != nullptr) {
- if (space == to_space_) {
- CHECK(to_space_->IsContinuousMemMapAllocSpace());
- to_space_->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
- } else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect
- || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect
- // Add the main free list space and the non-moving
- // space to the immune space if a bump pointer space
- // only collection.
- || (generational_ && !whole_heap_collection_ &&
- (space == GetHeap()->GetNonMovingSpace() ||
- space == GetHeap()->GetPrimaryFreeListSpace()))) {
- CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
+ if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
+ space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
+ CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
+ } else if (space->GetLiveBitmap() != nullptr) {
+ if (space == to_space_ || collect_from_space_only_) {
+ if (collect_from_space_only_) {
+ // Bind the bitmaps of the main free list space and the non-moving space we are doing a
+ // bump pointer space only collection.
+ CHECK(space == GetHeap()->GetPrimaryFreeListSpace() ||
+ space == GetHeap()->GetNonMovingSpace());
+ }
+ CHECK(space->IsContinuousMemMapAllocSpace());
+ space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
}
}
}
- if (generational_ && !whole_heap_collection_) {
+ if (collect_from_space_only_) {
// We won't collect the large object space if a bump pointer space only collection.
is_large_object_space_immune_ = true;
}
@@ -95,7 +95,7 @@
bytes_promoted_(0),
bytes_promoted_since_last_whole_heap_collection_(0),
large_object_bytes_allocated_at_last_whole_heap_collection_(0),
- whole_heap_collection_(true),
+ collect_from_space_only_(generational),
collector_name_(name_),
swap_semi_spaces_(true) {
}
@@ -147,6 +147,10 @@
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
mark_bitmap_ = heap_->GetMarkBitmap();
}
+ if (generational_) {
+ promo_dest_space_ = GetHeap()->GetPrimaryFreeListSpace();
+ }
+ fallback_space_ = GetHeap()->GetNonMovingSpace();
}
void SemiSpace::ProcessReferences(Thread* self) {
@@ -180,9 +184,9 @@
GetCurrentIteration()->GetClearSoftReferences()) {
// If an explicit, native allocation-triggered, or last attempt
// collection, collect the whole heap.
- whole_heap_collection_ = true;
+ collect_from_space_only_ = false;
}
- if (whole_heap_collection_) {
+ if (!collect_from_space_only_) {
VLOG(heap) << "Whole heap collection";
name_ = collector_name_ + " whole";
} else {
@@ -191,7 +195,7 @@
}
}
- if (!generational_ || whole_heap_collection_) {
+ if (!collect_from_space_only_) {
// If non-generational, always clear soft references.
// If generational, clear soft references if a whole heap collection.
GetCurrentIteration()->SetClearSoftReferences(true);
@@ -218,7 +222,6 @@
heap_->GetCardTable()->ClearCardTable();
// Need to do this before the checkpoint since we don't want any threads to add references to
// the live stack during the recursive mark.
- t.NewTiming("SwapStacks");
if (kUseThreadLocalAllocationStack) {
TimingLogger::ScopedTiming t("RevokeAllThreadLocalAllocationStacks", GetTimings());
heap_->RevokeAllThreadLocalAllocationStacks(self_);
@@ -227,8 +230,6 @@
{
WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
MarkRoots();
- // Mark roots of immune spaces.
- UpdateAndMarkModUnion();
// Recursively mark remaining objects.
MarkReachableObjects();
}
@@ -259,46 +260,6 @@
}
}
-void SemiSpace::UpdateAndMarkModUnion() {
- for (auto& space : heap_->GetContinuousSpaces()) {
- // If the space is immune then we need to mark the references to other spaces.
- if (immune_region_.ContainsSpace(space)) {
- accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
- if (table != nullptr) {
- // TODO: Improve naming.
- TimingLogger::ScopedTiming t(
- space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
- "UpdateAndMarkImageModUnionTable",
- GetTimings());
- table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
- } else if (heap_->FindRememberedSetFromSpace(space) != nullptr) {
- DCHECK(kUseRememberedSet);
- // If a bump pointer space only collection, the non-moving
- // space is added to the immune space. The non-moving space
- // doesn't have a mod union table, but has a remembered
- // set. Its dirty cards will be scanned later in
- // MarkReachableObjects().
- DCHECK(generational_ && !whole_heap_collection_ &&
- (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace()))
- << "Space " << space->GetName() << " "
- << "generational_=" << generational_ << " "
- << "whole_heap_collection_=" << whole_heap_collection_ << " ";
- } else {
- DCHECK(!kUseRememberedSet);
- // If a bump pointer space only collection, the non-moving
- // space is added to the immune space. But the non-moving
- // space doesn't have a mod union table. Instead, its live
- // bitmap will be scanned later in MarkReachableObjects().
- DCHECK(generational_ && !whole_heap_collection_ &&
- (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace()))
- << "Space " << space->GetName() << " "
- << "generational_=" << generational_ << " "
- << "whole_heap_collection_=" << whole_heap_collection_ << " ";
- }
- }
- }
-}
-
class SemiSpaceScanObjectVisitor {
public:
explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {}
@@ -355,20 +316,30 @@
heap_->MarkAllocStackAsLive(live_stack);
live_stack->Reset();
}
- t.NewTiming("UpdateAndMarkRememberedSets");
for (auto& space : heap_->GetContinuousSpaces()) {
- // If the space is immune and has no mod union table (the
- // non-moving space when the bump pointer space only collection is
- // enabled,) then we need to scan its live bitmap or dirty cards as roots
- // (including the objects on the live stack which have just marked
- // in the live bitmap above in MarkAllocStackAsLive().)
- if (immune_region_.ContainsSpace(space) &&
- heap_->FindModUnionTableFromSpace(space) == nullptr) {
- DCHECK(generational_ && !whole_heap_collection_ &&
- (space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace()));
- accounting::RememberedSet* rem_set = heap_->FindRememberedSetFromSpace(space);
- if (kUseRememberedSet) {
- DCHECK(rem_set != nullptr);
+ // If the space is immune then we need to mark the references to other spaces.
+ accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
+ if (table != nullptr) {
+ // TODO: Improve naming.
+ TimingLogger::ScopedTiming t2(
+ space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
+ "UpdateAndMarkImageModUnionTable",
+ GetTimings());
+ table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
+ DCHECK(GetHeap()->FindRememberedSetFromSpace(space) == nullptr);
+ } else if (collect_from_space_only_ && space->GetLiveBitmap() != nullptr) {
+ // If the space has no mod union table (the non-moving space and main spaces when the bump
+ // pointer space only collection is enabled,) then we need to scan its live bitmap or dirty
+ // cards as roots (including the objects on the live stack which have just marked in the live
+ // bitmap above in MarkAllocStackAsLive().)
+ DCHECK(space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())
+ << "Space " << space->GetName() << " "
+ << "generational_=" << generational_ << " "
+ << "collect_from_space_only_=" << collect_from_space_only_;
+ accounting::RememberedSet* rem_set = GetHeap()->FindRememberedSetFromSpace(space);
+ CHECK_EQ(rem_set != nullptr, kUseRememberedSet);
+ if (rem_set != nullptr) {
+ TimingLogger::ScopedTiming t2("UpdateAndMarkRememberedSet", GetTimings());
rem_set->UpdateAndMarkReferences(MarkHeapReferenceCallback, DelayReferenceReferentCallback,
from_space_, this);
if (kIsDebugBuild) {
@@ -383,7 +354,7 @@
visitor);
}
} else {
- DCHECK(rem_set == nullptr);
+ TimingLogger::ScopedTiming t2("VisitLiveBits", GetTimings());
accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
SemiSpaceScanObjectVisitor visitor(this);
live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
@@ -393,9 +364,10 @@
}
}
+ CHECK_EQ(is_large_object_space_immune_, collect_from_space_only_);
if (is_large_object_space_immune_) {
TimingLogger::ScopedTiming t("VisitLargeObjects", GetTimings());
- DCHECK(generational_ && !whole_heap_collection_);
+ DCHECK(collect_from_space_only_);
// Delay copying the live set to the marked set until here from
// BindBitmaps() as the large objects on the allocation stack may
// be newly added to the live set above in MarkAllocStackAsLive().
@@ -506,19 +478,20 @@
}
mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
- size_t object_size = obj->SizeOf();
+ const size_t object_size = obj->SizeOf();
size_t bytes_allocated;
mirror::Object* forward_address = nullptr;
if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) {
// If it's allocated before the last GC (older), move
// (pseudo-promote) it to the main free list space (as sort
// of an old generation.)
- space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace();
- forward_address = promo_dest_space->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
- nullptr);
+ forward_address = promo_dest_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
+ nullptr);
if (UNLIKELY(forward_address == nullptr)) {
// If out of space, fall back to the to-space.
forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr);
+ // No logic for marking the bitmap, so it must be null.
+ DCHECK(to_space_live_bitmap_ == nullptr);
} else {
bytes_promoted_ += bytes_allocated;
// Dirty the card at the destionation as it may contain
@@ -526,12 +499,12 @@
// space.
GetHeap()->WriteBarrierEveryFieldOf(forward_address);
// Handle the bitmaps marking.
- accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap();
+ accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space_->GetLiveBitmap();
DCHECK(live_bitmap != nullptr);
- accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
+ accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap();
DCHECK(mark_bitmap != nullptr);
DCHECK(!live_bitmap->Test(forward_address));
- if (!whole_heap_collection_) {
+ if (collect_from_space_only_) {
// If collecting the bump pointer spaces only, live_bitmap == mark_bitmap.
DCHECK_EQ(live_bitmap, mark_bitmap);
@@ -559,12 +532,23 @@
mark_bitmap->Set(forward_address);
}
}
- DCHECK(forward_address != nullptr);
} else {
// If it's allocated after the last GC (younger), copy it to the to-space.
forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr);
+ if (forward_address != nullptr && to_space_live_bitmap_ != nullptr) {
+ to_space_live_bitmap_->Set(forward_address);
+ }
}
- CHECK(forward_address != nullptr) << "Out of memory in the to-space.";
+ // If it's still null, attempt to use the fallback space.
+ if (UNLIKELY(forward_address == nullptr)) {
+ forward_address = fallback_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
+ nullptr);
+ CHECK(forward_address != nullptr) << "Out of memory in the to-space and fallback space.";
+ accounting::ContinuousSpaceBitmap* bitmap = fallback_space_->GetLiveBitmap();
+ if (bitmap != nullptr) {
+ bitmap->Set(forward_address);
+ }
+ }
++objects_moved_;
bytes_moved_ += bytes_allocated;
// Copy over the object and add it to the mark stack since we still need to update its
@@ -579,11 +563,10 @@
}
forward_address->AssertReadBarrierPointer();
}
- if (to_space_live_bitmap_ != nullptr) {
- to_space_live_bitmap_->Set(forward_address);
- }
DCHECK(to_space_->HasAddress(forward_address) ||
- (generational_ && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address)));
+ fallback_space_->HasAddress(forward_address) ||
+ (generational_ && promo_dest_space_->HasAddress(forward_address)))
+ << forward_address << "\n" << GetHeap()->DumpSpaces();
return forward_address;
}
@@ -648,7 +631,7 @@
}
bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const {
- return space != from_space_ && space != to_space_ && !immune_region_.ContainsSpace(space);
+ return space != from_space_ && space != to_space_;
}
void SemiSpace::Sweep(bool swap_bitmaps) {
@@ -714,22 +697,20 @@
// Scan anything that's on the mark stack.
void SemiSpace::ProcessMarkStack() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
- space::MallocSpace* promo_dest_space = nullptr;
accounting::ContinuousSpaceBitmap* live_bitmap = nullptr;
- if (generational_ && !whole_heap_collection_) {
+ if (collect_from_space_only_) {
// If a bump pointer space only collection (and the promotion is
// enabled,) we delay the live-bitmap marking of promoted objects
// from MarkObject() until this function.
- promo_dest_space = GetHeap()->GetPrimaryFreeListSpace();
- live_bitmap = promo_dest_space->GetLiveBitmap();
+ live_bitmap = promo_dest_space_->GetLiveBitmap();
DCHECK(live_bitmap != nullptr);
- accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
+ accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap();
DCHECK(mark_bitmap != nullptr);
DCHECK_EQ(live_bitmap, mark_bitmap);
}
while (!mark_stack_->IsEmpty()) {
Object* obj = mark_stack_->PopBack();
- if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) {
+ if (collect_from_space_only_ && promo_dest_space_->HasAddress(obj)) {
// obj has just been promoted. Mark the live bitmap for it,
// which is delayed from MarkObject().
DCHECK(!live_bitmap->Test(obj));
@@ -742,16 +723,12 @@
inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
// All immune objects are assumed marked.
- if (immune_region_.ContainsObject(obj)) {
- return obj;
- }
if (from_space_->HasAddress(obj)) {
// Returns either the forwarding address or nullptr.
return GetForwardingAddressInFromSpace(obj);
- } else if (to_space_->HasAddress(obj)) {
- // Should be unlikely.
- // Already forwarded, must be marked.
- return obj;
+ } else if (collect_from_space_only_ || immune_region_.ContainsObject(obj) ||
+ to_space_->HasAddress(obj)) {
+ return obj; // Already forwarded, must be marked.
}
return mark_bitmap_->Test(obj) ? obj : nullptr;
}
@@ -777,9 +754,9 @@
if (generational_) {
// Decide whether to do a whole heap collection or a bump pointer
// only space collection at the next collection by updating
- // whole_heap_collection.
- if (!whole_heap_collection_) {
- // Enable whole_heap_collection if the bytes promoted since the
+ // collect_from_space_only_.
+ if (collect_from_space_only_) {
+ // Disable collect_from_space_only_ if the bytes promoted since the
// last whole heap collection or the large object bytes
// allocated exceeds a threshold.
bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_;
@@ -792,14 +769,14 @@
current_los_bytes_allocated >=
last_los_bytes_allocated + kLargeObjectBytesAllocatedThreshold;
if (bytes_promoted_threshold_exceeded || large_object_bytes_threshold_exceeded) {
- whole_heap_collection_ = true;
+ collect_from_space_only_ = false;
}
} else {
// Reset the counters.
bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_;
large_object_bytes_allocated_at_last_whole_heap_collection_ =
GetHeap()->GetLargeObjectsSpace()->GetBytesAllocated();
- whole_heap_collection_ = false;
+ collect_from_space_only_ = true;
}
}
// Clear all of the spaces' mark bitmaps.
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 7f6d1dc..71a83f2 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -244,9 +244,14 @@
// large objects were allocated at the last whole heap collection.
uint64_t large_object_bytes_allocated_at_last_whole_heap_collection_;
- // Used for the generational mode. When true, collect the whole
- // heap. When false, collect only the bump pointer spaces.
- bool whole_heap_collection_;
+ // Used for generational mode. When true, we only collect the from_space_.
+ bool collect_from_space_only_;
+
+ // The space which we are promoting into, only used for GSS.
+ space::ContinuousMemMapAllocSpace* promo_dest_space_;
+
+ // The space which we copy to if the to_space_ is full.
+ space::ContinuousMemMapAllocSpace* fallback_space_;
// How many objects and bytes we moved, used so that we don't need to Get the size of the
// to_space_ when calculating how many objects and bytes we freed.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 292173d..6d8190e 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -96,6 +96,7 @@
static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"};
static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"};
static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
+static constexpr size_t kGSSBumpPointerSpaceCapacity = 32 * MB;
Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
double target_utilization, double foreground_heap_growth_multiplier, size_t capacity,
@@ -179,16 +180,16 @@
running_on_valgrind_(Runtime::Current()->RunningOnValgrind()),
use_tlab_(use_tlab),
main_space_backup_(nullptr),
- min_interval_homogeneous_space_compaction_by_oom_(min_interval_homogeneous_space_compaction_by_oom),
+ min_interval_homogeneous_space_compaction_by_oom_(
+ min_interval_homogeneous_space_compaction_by_oom),
last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom) {
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Heap() entering";
}
- const bool is_zygote = Runtime::Current()->IsZygote();
// If we aren't the zygote, switch to the default non zygote allocator. This may update the
// entrypoints.
- if (!is_zygote) {
+ if (!Runtime::Current()->IsZygote()) {
large_object_threshold_ = kDefaultLargeObjectThreshold;
// Background compaction is currently not supported for command line runs.
if (background_collector_type_ != foreground_collector_type_) {
@@ -197,7 +198,6 @@
}
}
ChangeCollector(desired_collector_type_);
-
live_bitmap_.reset(new accounting::HeapBitmap(this));
mark_bitmap_.reset(new accounting::HeapBitmap(this));
// Requested begin for the alloc space, to follow the mapped image and oat files
@@ -213,130 +213,117 @@
CHECK_GT(oat_file_end_addr, image_space->End());
requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
}
-
/*
requested_alloc_space_begin -> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+- nonmoving space (kNonMovingSpaceCapacity) +-
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
- +- main alloc space (capacity_) +-
+ +-main alloc space / bump space 1 (capacity_) +-
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
- +- main alloc space 1 (capacity_) +-
+ +-????????????????????????????????????????????+-
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+ +-main alloc space2 / bump space 2 (capacity_)+-
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
*/
- bool create_backup_main_space =
+ bool support_homogeneous_space_compaction =
background_collector_type == gc::kCollectorTypeHomogeneousSpaceCompact ||
use_homogeneous_space_compaction_for_oom;
- if (is_zygote) {
- // Reserve the address range before we create the non moving space to make sure bitmaps don't
- // take it.
- std::string error_str;
- MemMap* main_space_map = MemMap::MapAnonymous(
- kMemMapSpaceName[0], requested_alloc_space_begin + kNonMovingSpaceCapacity, capacity_,
- PROT_READ | PROT_WRITE, true, &error_str);
- CHECK(main_space_map != nullptr) << error_str;
- MemMap* main_space_1_map = nullptr;
- // Attempt to reserve an extra mem_map for homogeneous space compaction right after the main space map.
- if (create_backup_main_space) {
- main_space_1_map = MemMap::MapAnonymous(kMemMapSpaceName[1], main_space_map->End(), capacity_,
- PROT_READ | PROT_WRITE, true, &error_str);
- if (main_space_1_map == nullptr) {
- LOG(WARNING) << "Failed to create map " << kMemMapSpaceName[1] << " with error "
- << error_str;
- }
- }
+ // We may use the same space the main space for the non moving space if we don't need to compact
+ // from the main space.
+ // This is not the case if we support homogeneous compaction or have a moving background
+ // collector type.
+ const bool is_zygote = Runtime::Current()->IsZygote();
+ bool separate_non_moving_space = is_zygote ||
+ support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
+ IsMovingGc(background_collector_type_);
+ if (foreground_collector_type == kCollectorTypeGSS) {
+ separate_non_moving_space = false;
+ }
+ std::unique_ptr<MemMap> main_mem_map_1;
+ std::unique_ptr<MemMap> main_mem_map_2;
+ byte* request_begin = requested_alloc_space_begin;
+ if (request_begin != nullptr && separate_non_moving_space) {
+ request_begin += kNonMovingSpaceCapacity;
+ }
+ std::string error_str;
+ std::unique_ptr<MemMap> non_moving_space_mem_map;
+ if (separate_non_moving_space) {
+ // Reserve the non moving mem map before the other two since it needs to be at a specific
+ // address.
+ non_moving_space_mem_map.reset(
+ MemMap::MapAnonymous("non moving space", requested_alloc_space_begin,
+ kNonMovingSpaceCapacity, PROT_READ | PROT_WRITE, true, &error_str));
+ CHECK(non_moving_space_mem_map != nullptr) << error_str;
+ }
+ // Attempt to create 2 mem maps at or after the requested begin.
+ main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0], request_begin, capacity_,
+ PROT_READ | PROT_WRITE, &error_str));
+ CHECK(main_mem_map_1.get() != nullptr) << error_str;
+ if (support_homogeneous_space_compaction ||
+ background_collector_type_ == kCollectorTypeSS ||
+ foreground_collector_type_ == kCollectorTypeSS) {
+ main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(),
+ capacity_, PROT_READ | PROT_WRITE,
+ &error_str));
+ CHECK(main_mem_map_2.get() != nullptr) << error_str;
+ }
+ // Create the non moving space first so that bitmaps don't take up the address range.
+ if (separate_non_moving_space) {
// Non moving space is always dlmalloc since we currently don't have support for multiple
// active rosalloc spaces.
- non_moving_space_ = space::DlMallocSpace::Create(
- "zygote / non moving space", initial_size, kNonMovingSpaceCapacity,
- kNonMovingSpaceCapacity, requested_alloc_space_begin, false);
+ const size_t size = non_moving_space_mem_map->Size();
+ non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(
+ non_moving_space_mem_map.release(), "zygote / non moving space", initial_size,
+ initial_size, size, size, false);
non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
- CreateMainMallocSpace(main_space_map, initial_size, growth_limit_, capacity_);
- if (main_space_1_map != nullptr) {
- const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
- main_space_backup_ = CreateMallocSpaceFromMemMap(main_space_1_map, initial_size,
- growth_limit_, capacity_, name, true);
- }
+ CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
+ << requested_alloc_space_begin;
+ AddSpace(non_moving_space_);
+ }
+ // Create other spaces based on whether or not we have a moving GC.
+ if (IsMovingGc(foreground_collector_type_) && foreground_collector_type_ != kCollectorTypeGSS) {
+ // Create bump pointer spaces.
+ // We only to create the bump pointer if the foreground collector is a compacting GC.
+ // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
+ bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1",
+ main_mem_map_1.release());
+ CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
+ AddSpace(bump_pointer_space_);
+ temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
+ main_mem_map_2.release());
+ CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
+ AddSpace(temp_space_);
+ CHECK(separate_non_moving_space);
} else {
- std::string error_str;
- byte* request_begin = requested_alloc_space_begin;
- if (request_begin == nullptr) {
- // Disable homogeneous space compaction since we don't have an image.
- create_backup_main_space = false;
- }
- MemMap* main_space_1_map = nullptr;
- if (create_backup_main_space) {
- request_begin += kNonMovingSpaceCapacity;
- // Attempt to reserve an extra mem_map for homogeneous space compaction right after the main space map.
- main_space_1_map = MemMap::MapAnonymous(kMemMapSpaceName[1], request_begin + capacity_,
- capacity_, PROT_READ | PROT_WRITE, true, &error_str);
- if (main_space_1_map == nullptr) {
- LOG(WARNING) << "Failed to create map " << kMemMapSpaceName[1] << " with error "
- << error_str;
- request_begin = requested_alloc_space_begin;
- }
- }
- MemMap* main_space_map = MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity_,
- PROT_READ | PROT_WRITE, true, &error_str);
- CHECK(main_space_map != nullptr) << error_str;
- // Introduce a seperate non moving space.
- if (main_space_1_map != nullptr) {
- // Do this before creating the main malloc space to prevent bitmaps from being placed here.
- non_moving_space_ = space::DlMallocSpace::Create(
- "non moving space", kDefaultInitialSize, kNonMovingSpaceCapacity, kNonMovingSpaceCapacity,
- requested_alloc_space_begin, false);
- non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
- }
- // Create the main free list space, which doubles as the non moving space. We can do this since
- // non zygote means that we won't have any background compaction.
- CreateMainMallocSpace(main_space_map, initial_size, growth_limit_, capacity_);
- if (main_space_1_map != nullptr) {
- const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
- main_space_backup_ = CreateMallocSpaceFromMemMap(main_space_1_map, initial_size,
- growth_limit_, capacity_, name, true);
- CHECK(main_space_backup_ != nullptr);
- } else {
+ CreateMainMallocSpace(main_mem_map_1.release(), initial_size, growth_limit_, capacity_);
+ CHECK(main_space_ != nullptr);
+ AddSpace(main_space_);
+ if (!separate_non_moving_space) {
non_moving_space_ = main_space_;
+ CHECK(!non_moving_space_->CanMoveObjects());
+ }
+ if (foreground_collector_type_ == kCollectorTypeGSS) {
+ CHECK_EQ(foreground_collector_type_, background_collector_type_);
+ // Create bump pointer spaces instead of a backup space.
+ main_mem_map_2.release();
+ bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space 1",
+ kGSSBumpPointerSpaceCapacity, nullptr);
+ CHECK(bump_pointer_space_ != nullptr);
+ AddSpace(bump_pointer_space_);
+ temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2",
+ kGSSBumpPointerSpaceCapacity, nullptr);
+ CHECK(temp_space_ != nullptr);
+ AddSpace(temp_space_);
+ } else if (main_mem_map_2.get() != nullptr) {
+ const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
+ main_space_backup_.reset(CreateMallocSpaceFromMemMap(main_mem_map_2.release(), initial_size,
+ growth_limit_, capacity_, name, true));
+ CHECK(main_space_backup_.get() != nullptr);
+ // Add the space so its accounted for in the heap_begin and heap_end.
+ AddSpace(main_space_backup_.get());
}
}
CHECK(non_moving_space_ != nullptr);
-
- // We need to create the bump pointer if the foreground collector is a compacting GC. We only
- // create the bump pointer space if we are not a moving foreground collector but have a moving
- // background collector since the heap transition code will create the temp space by recycling
- // the bitmap from the main space.
- if (kMovingCollector &&
- (IsMovingGc(foreground_collector_type_) || IsMovingGc(background_collector_type_))) {
- // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
- // Divide by 2 for a temporary fix for reducing virtual memory usage.
- const size_t bump_pointer_space_capacity = capacity_ / 2;
- bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space",
- bump_pointer_space_capacity, nullptr);
- CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
- AddSpace(bump_pointer_space_);
- temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2",
- bump_pointer_space_capacity, nullptr);
- CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
- AddSpace(temp_space_);
- }
- if (non_moving_space_ != main_space_) {
- AddSpace(non_moving_space_);
- }
- if (main_space_backup_ != nullptr) {
- AddSpace(main_space_backup_);
- } else {
- const char* disable_msg = "Disabling homogenous space compact due to no backup main space";
- if (background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact) {
- background_collector_type_ = collector_type_;
- LOG(WARNING) << disable_msg;
- } else if (use_homogeneous_space_compaction_for_oom_) {
- LOG(WARNING) << disable_msg;
- }
- use_homogeneous_space_compaction_for_oom_ = false;
- }
- if (main_space_ != nullptr) {
- AddSpace(main_space_);
- }
-
+ CHECK(!non_moving_space_->CanMoveObjects());
// Allocate the large object space.
if (kUseFreeListSpaceForLOS) {
large_object_space_ = space::FreeListSpace::Create("large object space", nullptr, capacity_);
@@ -345,19 +332,19 @@
}
CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
AddSpace(large_object_space_);
-
// Compute heap capacity. Continuous spaces are sorted in order of Begin().
CHECK(!continuous_spaces_.empty());
-
// Relies on the spaces being sorted.
byte* heap_begin = continuous_spaces_.front()->Begin();
byte* heap_end = continuous_spaces_.back()->Limit();
size_t heap_capacity = heap_end - heap_begin;
-
+ // Remove the main backup space since it slows down the GC to have unused extra spaces.
+ if (main_space_backup_.get() != nullptr) {
+ RemoveSpace(main_space_backup_.get());
+ }
// Allocate the card table.
card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
CHECK(card_table_.get() != NULL) << "Failed to create card table";
-
// Card cache for now since it makes it easier for us to update the references to the copying
// spaces.
accounting::ModUnionTable* mod_union_table =
@@ -365,17 +352,14 @@
GetImageSpace());
CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
AddModUnionTable(mod_union_table);
-
if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) {
accounting::RememberedSet* non_moving_space_rem_set =
new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set";
AddRememberedSet(non_moving_space_rem_set);
}
-
- // TODO: Count objects in the image space here.
+ // TODO: Count objects in the image space here?
num_bytes_allocated_.StoreRelaxed(0);
-
mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize,
kDefaultMarkStackSize));
const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize;
@@ -383,7 +367,6 @@
"allocation stack", max_allocation_stack_size_, alloc_stack_capacity));
live_stack_.reset(accounting::ObjectStack::Create(
"live stack", max_allocation_stack_size_, alloc_stack_capacity));
-
// It's still too early to take a lock because there are no threads yet, but we can create locks
// now. We don't create it earlier to make it clear that you can't use locks during heap
// initialization.
@@ -392,13 +375,11 @@
*gc_complete_lock_));
heap_trim_request_lock_ = new Mutex("Heap trim request lock");
last_gc_size_ = GetBytesAllocated();
-
if (ignore_max_footprint_) {
SetIdealFootprint(std::numeric_limits<size_t>::max());
concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
}
CHECK_NE(max_allowed_footprint_, 0U);
-
// Create our garbage collectors.
for (size_t i = 0; i < 2; ++i) {
const bool concurrent = i != 0;
@@ -417,26 +398,38 @@
mark_compact_collector_ = new collector::MarkCompact(this);
garbage_collectors_.push_back(mark_compact_collector_);
}
-
- if (GetImageSpace() != nullptr && main_space_ != nullptr) {
- // Check that there's no gap between the image space and the main space so that the immune
- // region won't break (eg. due to a large object allocated in the gap).
- bool no_gap = MemMap::CheckNoGaps(GetImageSpace()->GetMemMap(), main_space_->GetMemMap());
+ if (GetImageSpace() != nullptr && non_moving_space_ != nullptr) {
+ // Check that there's no gap between the image space and the non moving space so that the
+ // immune region won't break (eg. due to a large object allocated in the gap).
+ bool no_gap = MemMap::CheckNoGaps(GetImageSpace()->GetMemMap(),
+ non_moving_space_->GetMemMap());
if (!no_gap) {
MemMap::DumpMaps(LOG(ERROR));
LOG(FATAL) << "There's a gap between the image space and the main space";
}
}
-
if (running_on_valgrind_) {
Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
}
-
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Heap() exiting";
}
}
+MemMap* Heap::MapAnonymousPreferredAddress(const char* name, byte* request_begin, size_t capacity,
+ int prot_flags, std::string* out_error_str) {
+ while (true) {
+ MemMap* map = MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity,
+ PROT_READ | PROT_WRITE, true, out_error_str);
+ if (map != nullptr || request_begin == nullptr) {
+ return map;
+ }
+ // Retry a second time with no specified request begin.
+ request_begin = nullptr;
+ }
+ return nullptr;
+}
+
space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map, size_t initial_size,
size_t growth_limit, size_t capacity,
const char* name, bool can_move_objects) {
@@ -474,7 +467,8 @@
if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
// After the zygote we want this to be false if we don't have background compaction enabled so
// that getting primitive array elements is faster.
- can_move_objects = !have_zygote_space_;
+ // We never have homogeneous compaction with GSS and don't need a space with movable objects.
+ can_move_objects = !have_zygote_space_ && foreground_collector_type_ != kCollectorTypeGSS;
}
if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
RemoveRememberedSet(main_space_);
@@ -675,18 +669,11 @@
}
void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
- space::ContinuousSpace* space1 = rosalloc_space_ != nullptr ? rosalloc_space_ : non_moving_space_;
- space::ContinuousSpace* space2 = dlmalloc_space_ != nullptr ? dlmalloc_space_ : non_moving_space_;
- // This is just logic to handle a case of either not having a rosalloc or dlmalloc space.
+ space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_;
+ space::ContinuousSpace* space2 = non_moving_space_;
// TODO: Generalize this to n bitmaps?
- if (space1 == nullptr) {
- DCHECK(space2 != nullptr);
- space1 = space2;
- }
- if (space2 == nullptr) {
- DCHECK(space1 != nullptr);
- space2 = space1;
- }
+ CHECK(space1 != nullptr);
+ CHECK(space2 != nullptr);
MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
large_object_space_->GetLiveBitmap(), stack);
}
@@ -705,7 +692,7 @@
accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
if (live_bitmap != nullptr) {
- DCHECK(mark_bitmap != nullptr);
+ CHECK(mark_bitmap != nullptr);
live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
}
@@ -716,7 +703,7 @@
return a->Begin() < b->Begin();
});
} else {
- DCHECK(space->IsDiscontinuousSpace());
+ CHECK(space->IsDiscontinuousSpace());
space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
@@ -771,15 +758,11 @@
}
void Heap::RegisterGCAllocation(size_t bytes) {
- if (this != nullptr) {
- gc_memory_overhead_.FetchAndAddSequentiallyConsistent(bytes);
- }
+ gc_memory_overhead_.FetchAndAddSequentiallyConsistent(bytes);
}
void Heap::RegisterGCDeAllocation(size_t bytes) {
- if (this != nullptr) {
- gc_memory_overhead_.FetchAndSubSequentiallyConsistent(bytes);
- }
+ gc_memory_overhead_.FetchAndSubSequentiallyConsistent(bytes);
}
void Heap::DumpGcPerformanceInfo(std::ostream& os) {
@@ -789,28 +772,9 @@
// Dump cumulative loggers for each GC type.
uint64_t total_paused_time = 0;
for (auto& collector : garbage_collectors_) {
- const CumulativeLogger& logger = collector->GetCumulativeTimings();
- const size_t iterations = logger.GetIterations();
- const Histogram<uint64_t>& pause_histogram = collector->GetPauseHistogram();
- if (iterations != 0 && pause_histogram.SampleSize() != 0) {
- os << ConstDumpable<CumulativeLogger>(logger);
- const uint64_t total_ns = logger.GetTotalNs();
- const uint64_t total_pause_ns = collector->GetTotalPausedTimeNs();
- double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
- const uint64_t freed_bytes = collector->GetTotalFreedBytes();
- const uint64_t freed_objects = collector->GetTotalFreedObjects();
- Histogram<uint64_t>::CumulativeData cumulative_data;
- pause_histogram.CreateHistogram(&cumulative_data);
- pause_histogram.PrintConfidenceIntervals(os, 0.99, cumulative_data);
- os << collector->GetName() << " total time: " << PrettyDuration(total_ns)
- << " mean time: " << PrettyDuration(total_ns / iterations) << "\n"
- << collector->GetName() << " freed: " << freed_objects
- << " objects with total size " << PrettySize(freed_bytes) << "\n"
- << collector->GetName() << " throughput: " << freed_objects / seconds << "/s / "
- << PrettySize(freed_bytes / seconds) << "/s\n";
- total_duration += total_ns;
- total_paused_time += total_pause_ns;
- }
+ total_duration += collector->GetCumulativeTimings().GetTotalNs();
+ total_paused_time += collector->GetTotalPausedTimeNs();
+ collector->DumpPerformanceInfo(os);
collector->ResetMeasurements();
}
uint64_t allocation_time =
@@ -903,12 +867,15 @@
<< " free bytes";
// If the allocation failed due to fragmentation, print out the largest continuous allocation.
if (total_bytes_free >= byte_count) {
- space::MallocSpace* space = nullptr;
+ space::AllocSpace* space = nullptr;
if (allocator_type == kAllocatorTypeNonMoving) {
space = non_moving_space_;
} else if (allocator_type == kAllocatorTypeRosAlloc ||
allocator_type == kAllocatorTypeDlMalloc) {
space = main_space_;
+ } else if (allocator_type == kAllocatorTypeBumpPointer ||
+ allocator_type == kAllocatorTypeTLAB) {
+ space = bump_pointer_space_;
}
if (space != nullptr) {
space->LogFragmentationAllocFailure(oss, byte_count);
@@ -1125,7 +1092,13 @@
return false;
}
-void Heap::DumpSpaces(std::ostream& stream) {
+std::string Heap::DumpSpaces() const {
+ std::ostringstream oss;
+ DumpSpaces(oss);
+ return oss.str();
+}
+
+void Heap::DumpSpaces(std::ostream& stream) const {
for (const auto& space : continuous_spaces_) {
accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
@@ -1143,9 +1116,10 @@
}
void Heap::VerifyObjectBody(mirror::Object* obj) {
- if (this == nullptr && verify_object_mode_ == kVerifyObjectModeDisabled) {
+ if (verify_object_mode_ == kVerifyObjectModeDisabled) {
return;
}
+
// Ignore early dawn of the universe verifications.
if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) {
return;
@@ -1158,10 +1132,7 @@
if (verify_object_mode_ > kVerifyObjectModeFast) {
// Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
- if (!IsLiveObjectLocked(obj)) {
- DumpSpaces();
- LOG(FATAL) << "Object is dead: " << obj;
- }
+ CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces();
}
}
@@ -1512,15 +1483,18 @@
tl->SuspendAll();
uint64_t start_time = NanoTime();
// Launch compaction.
- space::MallocSpace* to_space = main_space_backup_;
+ space::MallocSpace* to_space = main_space_backup_.release();
space::MallocSpace* from_space = main_space_;
to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
const uint64_t space_size_before_compaction = from_space->Size();
+ AddSpace(to_space);
Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
// Leave as prot read so that we can still run ROSAlloc verification on this space.
from_space->GetMemMap()->Protect(PROT_READ);
const uint64_t space_size_after_compaction = to_space->Size();
- std::swap(main_space_, main_space_backup_);
+ main_space_ = to_space;
+ main_space_backup_.reset(from_space);
+ RemoveSpace(from_space);
SetSpaceAsDefault(main_space_); // Set as default to reset the proper dlmalloc space.
// Update performed homogeneous space compaction count.
count_performed_homogeneous_space_compaction_++;
@@ -1587,17 +1561,38 @@
}
tl->SuspendAll();
switch (collector_type) {
- case kCollectorTypeSS:
- // Fall-through.
- case kCollectorTypeGSS: {
+ case kCollectorTypeSS: {
if (!IsMovingGc(collector_type_)) {
+ // Create the bump pointer space from the backup space.
+ CHECK(main_space_backup_ != nullptr);
+ std::unique_ptr<MemMap> mem_map(main_space_backup_->ReleaseMemMap());
// We are transitioning from non moving GC -> moving GC, since we copied from the bump
// pointer space last transition it will be protected.
- bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
+ CHECK(mem_map != nullptr);
+ mem_map->Protect(PROT_READ | PROT_WRITE);
+ bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
+ mem_map.release());
+ AddSpace(bump_pointer_space_);
Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
+ // Use the now empty main space mem map for the bump pointer temp space.
+ mem_map.reset(main_space_->ReleaseMemMap());
+ // Unset the pointers just in case.
+ if (dlmalloc_space_ == main_space_) {
+ dlmalloc_space_ = nullptr;
+ } else if (rosalloc_space_ == main_space_) {
+ rosalloc_space_ = nullptr;
+ }
// Remove the main space so that we don't try to trim it, this doens't work for debug
// builds since RosAlloc attempts to read the magic number from a protected page.
RemoveSpace(main_space_);
+ RemoveRememberedSet(main_space_);
+ delete main_space_; // Delete the space since it has been removed.
+ main_space_ = nullptr;
+ RemoveRememberedSet(main_space_backup_.get());
+ main_space_backup_.reset(nullptr); // Deletes the space.
+ temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
+ mem_map.release());
+ AddSpace(temp_space_);
}
break;
}
@@ -1605,10 +1600,32 @@
// Fall through.
case kCollectorTypeCMS: {
if (IsMovingGc(collector_type_)) {
+ CHECK(temp_space_ != nullptr);
+ std::unique_ptr<MemMap> mem_map(temp_space_->ReleaseMemMap());
+ RemoveSpace(temp_space_);
+ temp_space_ = nullptr;
+ mem_map->Protect(PROT_READ | PROT_WRITE);
+ CreateMainMallocSpace(mem_map.get(), kDefaultInitialSize, mem_map->Size(),
+ mem_map->Size());
+ mem_map.release();
// Compact to the main space from the bump pointer space, don't need to swap semispaces.
AddSpace(main_space_);
- main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
+ mem_map.reset(bump_pointer_space_->ReleaseMemMap());
+ RemoveSpace(bump_pointer_space_);
+ bump_pointer_space_ = nullptr;
+ const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
+ // Temporarily unprotect the backup mem map so rosalloc can write the debug magic number.
+ if (kIsDebugBuild && kUseRosAlloc) {
+ mem_map->Protect(PROT_READ | PROT_WRITE);
+ }
+ main_space_backup_.reset(CreateMallocSpaceFromMemMap(mem_map.get(), kDefaultInitialSize,
+ mem_map->Size(), mem_map->Size(),
+ name, true));
+ if (kIsDebugBuild && kUseRosAlloc) {
+ mem_map->Protect(PROT_NONE);
+ }
+ mem_map.release();
}
break;
}
@@ -1811,6 +1828,7 @@
// there.
non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
// Change the collector to the post zygote one.
+ bool same_space = non_moving_space_ == main_space_;
if (kCompactZygote) {
DCHECK(semi_space_collector_ != nullptr);
// Temporarily disable rosalloc verification because the zygote
@@ -1877,6 +1895,11 @@
space::ZygoteSpace* zygote_space = old_alloc_space->CreateZygoteSpace("alloc space",
low_memory_mode_,
&non_moving_space_);
+ CHECK(!non_moving_space_->CanMoveObjects());
+ if (same_space) {
+ main_space_ = non_moving_space_;
+ SetSpaceAsDefault(main_space_);
+ }
delete old_alloc_space;
CHECK(zygote_space != nullptr) << "Failed creating zygote space";
AddSpace(zygote_space);
@@ -2178,7 +2201,7 @@
LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
}
- // Attmept to find the class inside of the recently freed objects.
+ // Attempt to find the class inside of the recently freed objects.
space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
if (ref_space != nullptr && ref_space->IsMallocSpace()) {
space::MallocSpace* space = ref_space->AsMallocSpace();
@@ -2353,7 +2376,7 @@
accounting::RememberedSet* remembered_set = table_pair.second;
remembered_set->Dump(LOG(ERROR) << remembered_set->GetName() << ": ");
}
- DumpSpaces();
+ DumpSpaces(LOG(ERROR));
}
return visitor.GetFailureCount();
}
@@ -2470,12 +2493,7 @@
visitor(*it);
}
}
-
- if (visitor.Failed()) {
- DumpSpaces();
- return false;
- }
- return true;
+ return !visitor.Failed();
}
void Heap::SwapStacks(Thread* self) {
@@ -2496,6 +2514,17 @@
}
}
+void Heap::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
+ if (kIsDebugBuild) {
+ if (rosalloc_space_ != nullptr) {
+ rosalloc_space_->AssertThreadLocalBuffersAreRevoked(thread);
+ }
+ if (bump_pointer_space_ != nullptr) {
+ bump_pointer_space_->AssertThreadLocalBuffersAreRevoked(thread);
+ }
+ }
+}
+
void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() {
if (kIsDebugBuild) {
if (bump_pointer_space_ != nullptr) {
@@ -2573,9 +2602,8 @@
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
SwapStacks(self);
// Sort the live stack so that we can quickly binary search it later.
- if (!VerifyMissingCardMarks()) {
- LOG(FATAL) << "Pre " << gc->GetName() << " missing card mark verification failed";
- }
+ CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName()
+ << " missing card mark verification failed\n" << DumpSpaces();
SwapStacks(self);
}
if (verify_mod_union_table_) {
@@ -3089,6 +3117,7 @@
CHECK(space != nullptr);
auto it = remembered_sets_.find(space);
CHECK(it != remembered_sets_.end());
+ delete it->second;
remembered_sets_.erase(it);
CHECK(remembered_sets_.find(space) == remembered_sets_.end());
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index b207953..1851662 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -444,8 +444,7 @@
bool fail_ok) const;
space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const;
- void DumpForSigQuit(std::ostream& os);
-
+ void DumpForSigQuit(std::ostream& os) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
// Do a pending heap transition or trim.
void DoPendingTransitionOrTrim() LOCKS_EXCLUDED(heap_trim_request_lock_);
@@ -456,6 +455,7 @@
void RevokeThreadLocalBuffers(Thread* thread);
void RevokeRosAllocThreadLocalBuffers(Thread* thread);
void RevokeAllThreadLocalBuffers();
+ void AssertThreadLocalBuffersAreRevoked(Thread* thread);
void AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
void RosAllocVerification(TimingLogger* timings, const char* name)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -539,7 +539,8 @@
}
}
- void DumpSpaces(std::ostream& stream = LOG(INFO));
+ std::string DumpSpaces() const WARN_UNUSED;
+ void DumpSpaces(std::ostream& stream) const;
// Dump object should only be used by the signal handler.
void DumpObject(std::ostream& stream, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
@@ -572,6 +573,7 @@
accounting::RememberedSet* FindRememberedSetFromSpace(space::Space* space);
void AddRememberedSet(accounting::RememberedSet* remembered_set);
+ // Also deletes the remebered set.
void RemoveRememberedSet(space::Space* space);
bool IsCompilingBoot() const;
@@ -593,8 +595,13 @@
void FinishGC(Thread* self, collector::GcType gc_type) LOCKS_EXCLUDED(gc_complete_lock_);
+ // Create a mem map with a preferred base address.
+ static MemMap* MapAnonymousPreferredAddress(const char* name, byte* request_begin,
+ size_t capacity, int prot_flags,
+ std::string* out_error_str);
+
bool SupportHSpaceCompaction() const {
- // Returns true if we can do hspace compaction.
+ // Returns true if we can do hspace compaction
return main_space_backup_ != nullptr;
}
@@ -656,7 +663,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template <bool kGrow>
- bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size);
+ ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size);
// Returns true if the address passed in is within the address range of a continuous space.
bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const
@@ -1006,7 +1013,8 @@
const bool use_tlab_;
// Pointer to the space which becomes the new main space when we do homogeneous space compaction.
- space::MallocSpace* main_space_backup_;
+ // Use unique_ptr since the space is only added during the homogeneous compaction phase.
+ std::unique_ptr<space::MallocSpace> main_space_backup_;
// Minimal interval allowed between two homogeneous space compactions caused by OOM.
uint64_t min_interval_homogeneous_space_compaction_by_oom_;
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 8b35692..fb6bbac 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -258,6 +258,14 @@
return true;
}
+void BumpPointerSpace::LogFragmentationAllocFailure(std::ostream& os,
+ size_t /* failed_alloc_bytes */) {
+ size_t max_contiguous_allocation = Limit() - End();
+ os << "; failed due to fragmentation (largest possible contiguous allocation "
+ << max_contiguous_allocation << " bytes)";
+ // Caller's job to print failed_alloc_bytes.
+}
+
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index feee34f..71b15ba 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -151,6 +151,9 @@
bytes_allocated_.FetchAndSubSequentiallyConsistent(bytes);
}
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Object alignment within the space.
static constexpr size_t kAlignment = 8;
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index a87aa89..1d10af2 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -16,6 +16,8 @@
#include "image_space.h"
+#include <random>
+
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "base/scoped_flock.h"
@@ -43,6 +45,26 @@
live_bitmap_.reset(live_bitmap);
}
+static int32_t ChooseRelocationOffsetDelta(int32_t min_delta, int32_t max_delta) {
+ CHECK_ALIGNED(min_delta, kPageSize);
+ CHECK_ALIGNED(max_delta, kPageSize);
+ CHECK_LT(min_delta, max_delta);
+
+ std::default_random_engine generator;
+ generator.seed(NanoTime() * getpid());
+ std::uniform_int_distribution<int32_t> distribution(min_delta, max_delta);
+ int32_t r = distribution(generator);
+ if (r % 2 == 0) {
+ r = RoundUp(r, kPageSize);
+ } else {
+ r = RoundDown(r, kPageSize);
+ }
+ CHECK_LE(min_delta, r);
+ CHECK_GE(max_delta, r);
+ CHECK_ALIGNED(r, kPageSize);
+ return r;
+}
+
static bool GenerateImage(const std::string& image_filename, std::string* error_msg) {
const std::string boot_class_path_string(Runtime::Current()->GetBootClassPathString());
std::vector<std::string> boot_class_path;
@@ -73,12 +95,13 @@
Runtime::Current()->AddCurrentRuntimeFeaturesAsDex2OatArguments(&arg_vector);
- arg_vector.push_back(StringPrintf("--base=0x%x", ART_BASE_ADDRESS));
+ int32_t base_offset = ChooseRelocationOffsetDelta(ART_BASE_ADDRESS_MIN_DELTA,
+ ART_BASE_ADDRESS_MAX_DELTA);
+ LOG(INFO) << "Using an offset of 0x" << std::hex << base_offset << " from default "
+ << "art base address of 0x" << std::hex << ART_BASE_ADDRESS;
+ arg_vector.push_back(StringPrintf("--base=0x%x", ART_BASE_ADDRESS + base_offset));
- if (kIsTargetBuild) {
- arg_vector.push_back("--image-classes-zip=/system/framework/framework.jar");
- arg_vector.push_back("--image-classes=preloaded-classes");
- } else {
+ if (!kIsTargetBuild) {
arg_vector.push_back("--host");
}
@@ -94,86 +117,284 @@
bool ImageSpace::FindImageFilename(const char* image_location,
const InstructionSet image_isa,
- std::string* image_filename,
- bool *is_system) {
+ std::string* system_filename,
+ bool* has_system,
+ std::string* cache_filename,
+ bool* dalvik_cache_exists,
+ bool* has_cache) {
+ *has_system = false;
+ *has_cache = false;
// image_location = /system/framework/boot.art
// system_image_location = /system/framework/<image_isa>/boot.art
std::string system_image_filename(GetSystemImageFilename(image_location, image_isa));
if (OS::FileExists(system_image_filename.c_str())) {
- *image_filename = system_image_filename;
- *is_system = true;
- return true;
+ *system_filename = system_image_filename;
+ *has_system = true;
}
- const std::string dalvik_cache = GetDalvikCacheOrDie(GetInstructionSetString(image_isa));
+ bool have_android_data = false;
+ *dalvik_cache_exists = false;
+ std::string dalvik_cache;
+ GetDalvikCache(GetInstructionSetString(image_isa), true, &dalvik_cache,
+ &have_android_data, dalvik_cache_exists);
- // Always set output location even if it does not exist,
- // so that the caller knows where to create the image.
- //
- // image_location = /system/framework/boot.art
- // *image_filename = /data/dalvik-cache/<image_isa>/boot.art
- *image_filename = GetDalvikCacheFilenameOrDie(image_location, dalvik_cache.c_str());
- *is_system = false;
- return OS::FileExists(image_filename->c_str());
+ if (have_android_data && *dalvik_cache_exists) {
+ // Always set output location even if it does not exist,
+ // so that the caller knows where to create the image.
+ //
+ // image_location = /system/framework/boot.art
+ // *image_filename = /data/dalvik-cache/<image_isa>/boot.art
+ std::string error_msg;
+ if (!GetDalvikCacheFilename(image_location, dalvik_cache.c_str(), cache_filename, &error_msg)) {
+ LOG(WARNING) << error_msg;
+ return *has_system;
+ }
+ *has_cache = OS::FileExists(cache_filename->c_str());
+ }
+ return *has_system || *has_cache;
+}
+
+static bool ReadSpecificImageHeader(const char* filename, ImageHeader* image_header) {
+ std::unique_ptr<File> image_file(OS::OpenFileForReading(filename));
+ if (image_file.get() == nullptr) {
+ return false;
+ }
+ const bool success = image_file->ReadFully(image_header, sizeof(ImageHeader));
+ if (!success || !image_header->IsValid()) {
+ return false;
+ }
+ return true;
+}
+
+// Relocate the image at image_location to dest_filename and relocate it by a random amount.
+static bool RelocateImage(const char* image_location, const char* dest_filename,
+ InstructionSet isa, std::string* error_msg) {
+ std::string patchoat(Runtime::Current()->GetPatchoatExecutable());
+
+ std::string input_image_location_arg("--input-image-location=");
+ input_image_location_arg += image_location;
+
+ std::string output_image_filename_arg("--output-image-file=");
+ output_image_filename_arg += dest_filename;
+
+ std::string input_oat_location_arg("--input-oat-location=");
+ input_oat_location_arg += ImageHeader::GetOatLocationFromImageLocation(image_location);
+
+ std::string output_oat_filename_arg("--output-oat-file=");
+ output_oat_filename_arg += ImageHeader::GetOatLocationFromImageLocation(dest_filename);
+
+ std::string instruction_set_arg("--instruction-set=");
+ instruction_set_arg += GetInstructionSetString(isa);
+
+ std::string base_offset_arg("--base-offset-delta=");
+ StringAppendF(&base_offset_arg, "%d", ChooseRelocationOffsetDelta(ART_BASE_ADDRESS_MIN_DELTA,
+ ART_BASE_ADDRESS_MAX_DELTA));
+
+ std::vector<std::string> argv;
+ argv.push_back(patchoat);
+
+ argv.push_back(input_image_location_arg);
+ argv.push_back(output_image_filename_arg);
+
+ argv.push_back(input_oat_location_arg);
+ argv.push_back(output_oat_filename_arg);
+
+ argv.push_back(instruction_set_arg);
+ argv.push_back(base_offset_arg);
+
+ std::string command_line(Join(argv, ' '));
+ LOG(INFO) << "RelocateImage: " << command_line;
+ return Exec(argv, error_msg);
+}
+
+static ImageHeader* ReadSpecificImageHeaderOrDie(const char* filename) {
+ std::unique_ptr<ImageHeader> hdr(new ImageHeader);
+ if (!ReadSpecificImageHeader(filename, hdr.get())) {
+ LOG(FATAL) << "Unable to read image header for " << filename;
+ return nullptr;
+ }
+ return hdr.release();
}
ImageHeader* ImageSpace::ReadImageHeaderOrDie(const char* image_location,
const InstructionSet image_isa) {
- std::string image_filename;
- bool is_system = false;
- if (FindImageFilename(image_location, image_isa, &image_filename, &is_system)) {
- std::unique_ptr<File> image_file(OS::OpenFileForReading(image_filename.c_str()));
- std::unique_ptr<ImageHeader> image_header(new ImageHeader);
- const bool success = image_file->ReadFully(image_header.get(), sizeof(ImageHeader));
- if (!success || !image_header->IsValid()) {
- LOG(FATAL) << "Invalid Image header for: " << image_filename;
- return nullptr;
+ std::string system_filename;
+ bool has_system = false;
+ std::string cache_filename;
+ bool has_cache = false;
+ bool dalvik_cache_exists = false;
+ if (FindImageFilename(image_location, image_isa, &system_filename, &has_system,
+ &cache_filename, &dalvik_cache_exists, &has_cache)) {
+ if (Runtime::Current()->ShouldRelocate()) {
+ if (has_system && has_cache) {
+ std::unique_ptr<ImageHeader> sys_hdr(new ImageHeader);
+ std::unique_ptr<ImageHeader> cache_hdr(new ImageHeader);
+ if (!ReadSpecificImageHeader(system_filename.c_str(), sys_hdr.get())) {
+ LOG(FATAL) << "Unable to read image header for " << image_location << " at "
+ << system_filename;
+ return nullptr;
+ }
+ if (!ReadSpecificImageHeader(cache_filename.c_str(), cache_hdr.get())) {
+ LOG(FATAL) << "Unable to read image header for " << image_location << " at "
+ << cache_filename;
+ return nullptr;
+ }
+ if (sys_hdr->GetOatChecksum() != cache_hdr->GetOatChecksum()) {
+ LOG(FATAL) << "Unable to find a relocated version of image file " << image_location;
+ return nullptr;
+ }
+ return cache_hdr.release();
+ } else if (!has_cache) {
+ LOG(FATAL) << "Unable to find a relocated version of image file " << image_location;
+ return nullptr;
+ } else if (!has_system && has_cache) {
+ // This can probably just use the cache one.
+ return ReadSpecificImageHeaderOrDie(cache_filename.c_str());
+ }
+ } else {
+ // We don't want to relocate, Just pick the appropriate one if we have it and return.
+ if (has_system && has_cache) {
+ // We want the cache if the checksum matches, otherwise the system.
+ std::unique_ptr<ImageHeader> system(ReadSpecificImageHeaderOrDie(system_filename.c_str()));
+ std::unique_ptr<ImageHeader> cache(ReadSpecificImageHeaderOrDie(cache_filename.c_str()));
+ if (system.get() == nullptr ||
+ (cache.get() != nullptr && cache->GetOatChecksum() == system->GetOatChecksum())) {
+ return cache.release();
+ } else {
+ return system.release();
+ }
+ } else if (has_system) {
+ return ReadSpecificImageHeaderOrDie(system_filename.c_str());
+ } else if (has_cache) {
+ return ReadSpecificImageHeaderOrDie(cache_filename.c_str());
+ }
}
-
- return image_header.release();
}
LOG(FATAL) << "Unable to find image file for: " << image_location;
return nullptr;
}
+static bool ChecksumsMatch(const char* image_a, const char* image_b) {
+ ImageHeader hdr_a;
+ ImageHeader hdr_b;
+ return ReadSpecificImageHeader(image_a, &hdr_a) && ReadSpecificImageHeader(image_b, &hdr_b)
+ && hdr_a.GetOatChecksum() == hdr_b.GetOatChecksum();
+}
+
ImageSpace* ImageSpace::Create(const char* image_location,
const InstructionSet image_isa) {
- std::string image_filename;
std::string error_msg;
- bool is_system = false;
- const bool found_image = FindImageFilename(image_location, image_isa, &image_filename,
- &is_system);
+ std::string system_filename;
+ bool has_system = false;
+ std::string cache_filename;
+ bool has_cache = false;
+ bool dalvik_cache_exists = false;
+ const bool found_image = FindImageFilename(image_location, image_isa, &system_filename,
+ &has_system, &cache_filename, &dalvik_cache_exists,
+ &has_cache);
- // Note that we must not use the file descriptor associated with
- // ScopedFlock::GetFile to Init the image file. We want the file
- // descriptor (and the associated exclusive lock) to be released when
- // we leave Create.
- ScopedFlock image_lock;
- image_lock.Init(image_filename.c_str(), &error_msg);
-
+ ImageSpace* space;
+ bool relocate = Runtime::Current()->ShouldRelocate();
if (found_image) {
- ImageSpace* space = ImageSpace::Init(image_filename.c_str(), image_location, !is_system,
- &error_msg);
+ const std::string* image_filename;
+ bool is_system = false;
+ bool relocated_version_used = false;
+ if (relocate) {
+ CHECK(dalvik_cache_exists) << "Requiring relocation for image " << image_location << " "
+ << "at " << system_filename << " but we do not have any "
+ << "dalvik_cache to find/place it in.";
+ if (has_system) {
+ if (has_cache && ChecksumsMatch(system_filename.c_str(), cache_filename.c_str())) {
+ // We already have a relocated version
+ image_filename = &cache_filename;
+ relocated_version_used = true;
+ } else {
+ // We cannot have a relocated version, Relocate the system one and use it.
+ if (RelocateImage(image_location, cache_filename.c_str(), image_isa,
+ &error_msg)) {
+ relocated_version_used = true;
+ image_filename = &cache_filename;
+ } else {
+ LOG(FATAL) << "Unable to relocate image " << image_location << " "
+ << "from " << system_filename << " to " << cache_filename << ": "
+ << error_msg;
+ return nullptr;
+ }
+ }
+ } else {
+ CHECK(has_cache);
+ // We can just use cache's since it should be fine. This might or might not be relocated.
+ image_filename = &cache_filename;
+ }
+ } else {
+ if (has_system && has_cache) {
+ // Check they have the same cksum. If they do use the cache. Otherwise system.
+ if (ChecksumsMatch(system_filename.c_str(), cache_filename.c_str())) {
+ image_filename = &cache_filename;
+ relocated_version_used = true;
+ } else {
+ image_filename = &system_filename;
+ is_system = true;
+ }
+ } else if (has_system) {
+ image_filename = &system_filename;
+ is_system = true;
+ } else {
+ CHECK(has_cache);
+ image_filename = &cache_filename;
+ }
+ }
+ {
+ // Note that we must not use the file descriptor associated with
+ // ScopedFlock::GetFile to Init the image file. We want the file
+ // descriptor (and the associated exclusive lock) to be released when
+ // we leave Create.
+ ScopedFlock image_lock;
+ image_lock.Init(image_filename->c_str(), &error_msg);
+ LOG(INFO) << "Using image file " << image_filename->c_str() << " for image location "
+ << image_location;
+ // If we are in /system we can assume the image is good. We can also
+ // assume this if we are using a relocated image (i.e. image checksum
+ // matches) since this is only different by the offset. We need this to
+ // make sure that host tests continue to work.
+ space = ImageSpace::Init(image_filename->c_str(), image_location,
+ !(is_system || relocated_version_used), &error_msg);
+ }
if (space != nullptr) {
return space;
}
- // If the /system file exists, it should be up-to-date, don't try to generate it.
- // If it's not the /system file, log a warning and fall through to GenerateImage.
- if (is_system) {
- LOG(FATAL) << "Failed to load image '" << image_filename << "': " << error_msg;
+ // If the /system file exists, it should be up-to-date, don't try to generate it. Same if it is
+ // a relocated copy from something in /system (i.e. checksum's match).
+ // Otherwise, log a warning and fall through to GenerateImage.
+ if (relocated_version_used) {
+ LOG(FATAL) << "Attempted to use relocated version of " << image_location << " "
+ << "at " << cache_filename << " generated from " << system_filename << " "
+ << "but image failed to load: " << error_msg;
+ return nullptr;
+ } else if (is_system) {
+ LOG(FATAL) << "Failed to load /system image '" << *image_filename << "': " << error_msg;
return nullptr;
} else {
LOG(WARNING) << error_msg;
}
}
- CHECK(GenerateImage(image_filename, &error_msg))
- << "Failed to generate image '" << image_filename << "': " << error_msg;
- ImageSpace* space = ImageSpace::Init(image_filename.c_str(), image_location, true, &error_msg);
+ CHECK(dalvik_cache_exists) << "No place to put generated image.";
+ CHECK(GenerateImage(cache_filename, &error_msg))
+ << "Failed to generate image '" << cache_filename << "': " << error_msg;
+ {
+ // Note that we must not use the file descriptor associated with
+ // ScopedFlock::GetFile to Init the image file. We want the file
+ // descriptor (and the associated exclusive lock) to be released when
+ // we leave Create.
+ ScopedFlock image_lock;
+ image_lock.Init(cache_filename.c_str(), &error_msg);
+ space = ImageSpace::Init(cache_filename.c_str(), image_location, true, &error_msg);
+ }
if (space == nullptr) {
- LOG(FATAL) << "Failed to load image '" << image_filename << "': " << error_msg;
+ LOG(FATAL) << "Failed to load generated image '" << cache_filename << "': " << error_msg;
}
return space;
}
@@ -316,6 +537,15 @@
" in image %s", oat_checksum, image_oat_checksum, GetName());
return nullptr;
}
+ int32_t image_patch_delta = image_header.GetPatchDelta();
+ int32_t oat_patch_delta = oat_file->GetOatHeader().GetImagePatchDelta();
+ if (oat_patch_delta != image_patch_delta) {
+ // We should have already relocated by this point. Bail out.
+ *error_msg = StringPrintf("Failed to match oat file patch delta %d to expected patch delta %d "
+ "in image %s", oat_patch_delta, image_patch_delta, GetName());
+ return nullptr;
+ }
+
return oat_file;
}
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index dd9b580..6be3b8f 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -98,6 +98,20 @@
return false;
}
+ // Returns the filename of the image corresponding to
+ // requested image_location, or the filename where a new image
+ // should be written if one doesn't exist. Looks for a generated
+ // image in the specified location and then in the dalvik-cache.
+ //
+ // Returns true if an image was found, false otherwise.
+ static bool FindImageFilename(const char* image_location,
+ InstructionSet image_isa,
+ std::string* system_location,
+ bool* has_system,
+ std::string* data_location,
+ bool* dalvik_cache_exists,
+ bool* has_data);
+
private:
// Tries to initialize an ImageSpace from the given image path,
// returning NULL on error.
@@ -110,17 +124,6 @@
bool validate_oat_file, std::string* error_msg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Returns the filename of the image corresponding to
- // requested image_location, or the filename where a new image
- // should be written if one doesn't exist. Looks for a generated
- // image in the specified location and then in the dalvik-cache.
- //
- // Returns true if an image was found, false otherwise.
- static bool FindImageFilename(const char* image_location,
- InstructionSet image_isa,
- std::string* location,
- bool* is_system);
-
OatFile* OpenOatFile(const char* image, std::string* error_msg) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index abae8ff..d5a03c6 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -431,6 +431,11 @@
return scc.freed;
}
+void LargeObjectSpace::LogFragmentationAllocFailure(std::ostream& /*os*/,
+ size_t /*failed_alloc_bytes*/) {
+ UNIMPLEMENTED(FATAL);
+}
+
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 01982d0..b1c20ca 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -89,6 +89,9 @@
return end_;
}
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
protected:
explicit LargeObjectSpace(const std::string& name, byte* begin, byte* end);
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 27f92b5..ba7e5c1 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -51,12 +51,12 @@
live_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
StringPrintf("allocspace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
Begin(), NonGrowthLimitCapacity()));
- DCHECK(live_bitmap_.get() != nullptr) << "could not create allocspace live bitmap #"
+ CHECK(live_bitmap_.get() != nullptr) << "could not create allocspace live bitmap #"
<< bitmap_index;
mark_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
StringPrintf("allocspace %s mark-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
Begin(), NonGrowthLimitCapacity()));
- DCHECK(live_bitmap_.get() != nullptr) << "could not create allocspace mark bitmap #"
+ CHECK(live_bitmap_.get() != nullptr) << "could not create allocspace mark bitmap #"
<< bitmap_index;
}
for (auto& freed : recent_freed_objects_) {
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index 6f49fbf..a52b92b 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -133,8 +133,6 @@
return can_move_objects_;
}
- virtual void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) = 0;
-
protected:
MallocSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end,
byte* limit, size_t growth_limit, bool create_bitmaps, bool can_move_objects,
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index 92c6f53..3f39c77 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -338,6 +338,12 @@
rosalloc_->RevokeAllThreadLocalRuns();
}
+void RosAllocSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
+ if (kIsDebugBuild) {
+ rosalloc_->AssertThreadLocalRunsAreRevoked(thread);
+ }
+}
+
void RosAllocSpace::AssertAllThreadLocalBuffersAreRevoked() {
if (kIsDebugBuild) {
rosalloc_->AssertAllThreadLocalRunsAreRevoked();
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index f505305..f1ce115 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -101,6 +101,7 @@
void RevokeThreadLocalBuffers(Thread* thread);
void RevokeAllThreadLocalBuffers();
+ void AssertThreadLocalBuffersAreRevoked(Thread* thread);
void AssertAllThreadLocalBuffersAreRevoked();
// Returns the class of a recently freed object.
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index fff4df1..523d4fe 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -223,6 +223,8 @@
// threads, if the alloc space implementation uses any.
virtual void RevokeAllThreadLocalBuffers() = 0;
+ virtual void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) = 0;
+
protected:
struct SweepCallbackContext {
SweepCallbackContext(bool swap_bitmaps, space::Space* space);
@@ -407,11 +409,11 @@
// Clear the space back to an empty space.
virtual void Clear() = 0;
- accounting::ContinuousSpaceBitmap* GetLiveBitmap() const {
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
return live_bitmap_.get();
}
- accounting::ContinuousSpaceBitmap* GetMarkBitmap() const {
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
return mark_bitmap_.get();
}
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index fb3a12e..51d84f5 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -77,25 +77,30 @@
mirror::Object* ZygoteSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size) {
- LOG(FATAL) << "Unimplemented";
+ UNIMPLEMENTED(FATAL);
return nullptr;
}
size_t ZygoteSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
- LOG(FATAL) << "Unimplemented";
+ UNIMPLEMENTED(FATAL);
return 0;
}
size_t ZygoteSpace::Free(Thread* self, mirror::Object* ptr) {
- LOG(FATAL) << "Unimplemented";
+ UNIMPLEMENTED(FATAL);
return 0;
}
size_t ZygoteSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
- LOG(FATAL) << "Unimplemented";
+ UNIMPLEMENTED(FATAL);
return 0;
}
+void ZygoteSpace::LogFragmentationAllocFailure(std::ostream& /*os*/,
+ size_t /*failed_alloc_bytes*/) {
+ UNIMPLEMENTED(FATAL);
+}
+
void ZygoteSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
DCHECK(context->space->IsZygoteSpace());
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 5d5fe76..0cf4bb1 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -74,6 +74,9 @@
return false;
}
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
protected:
virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() {
return &SweepCallback;
diff --git a/runtime/gc_root-inl.h b/runtime/gc_root-inl.h
new file mode 100644
index 0000000..482f7bc
--- /dev/null
+++ b/runtime/gc_root-inl.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_ROOT_INL_H_
+#define ART_RUNTIME_GC_ROOT_INL_H_
+
+#include "gc_root.h"
+
+#include "read_barrier-inl.h"
+
+namespace art {
+
+template<class MirrorType>
+template<ReadBarrierOption kReadBarrierOption>
+inline MirrorType* GcRoot<MirrorType>::Read() {
+ return ReadBarrier::BarrierForRoot<MirrorType, kReadBarrierOption>(&root_);
+}
+
+} // namespace art
+#endif // ART_RUNTIME_GC_ROOT_INL_H_
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
new file mode 100644
index 0000000..86a8847
--- /dev/null
+++ b/runtime/gc_root.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_ROOT_H_
+#define ART_RUNTIME_GC_ROOT_H_
+
+#include "base/mutex.h" // For Locks::mutator_lock_.
+#include "object_callbacks.h"
+
+namespace art {
+
+template<class MirrorType>
+class GcRoot {
+ public:
+ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ ALWAYS_INLINE MirrorType* Read() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void VisitRoot(RootCallback* callback, void* arg, uint32_t thread_id, RootType root_type) {
+ callback(reinterpret_cast<mirror::Object**>(&root_), arg, thread_id, root_type);
+ }
+
+ // This is only used by IrtIterator.
+ ALWAYS_INLINE MirrorType** AddressWithoutBarrier() {
+ return &root_;
+ }
+
+ bool IsNull() const {
+ // It's safe to null-check it without a read barrier.
+ return root_ == nullptr;
+ }
+
+ ALWAYS_INLINE explicit GcRoot<MirrorType>() : root_(nullptr) {
+ }
+
+ ALWAYS_INLINE explicit GcRoot<MirrorType>(MirrorType* ref)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : root_(ref) {
+ }
+
+ private:
+ MirrorType* root_;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_GC_ROOT_H_
diff --git a/runtime/globals.h b/runtime/globals.h
index 1d9f22c..107e064 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -118,6 +118,8 @@
static constexpr TraceClockSource kDefaultTraceClockSource = kTraceClockSourceWall;
#endif
+static constexpr bool kDefaultMustRelocate = true;
+
} // namespace art
#endif // ART_RUNTIME_GLOBALS_H_
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 7e3b6ba..fd67197 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -151,7 +151,8 @@
enum HprofHeapId {
HPROF_HEAP_DEFAULT = 0,
HPROF_HEAP_ZYGOTE = 'Z',
- HPROF_HEAP_APP = 'A'
+ HPROF_HEAP_APP = 'A',
+ HPROF_HEAP_IMAGE = 'I',
};
enum HprofBasicType {
@@ -633,8 +634,12 @@
// U1: NUL-terminated magic string.
fwrite(magic, 1, sizeof(magic), header_fp_);
- // U4: size of identifiers. We're using addresses as IDs, so make sure a pointer fits.
- U4_TO_BUF_BE(buf, 0, sizeof(void*));
+ // U4: size of identifiers. We're using addresses as IDs and our heap references are stored
+ // as uint32_t.
+ // Note of warning: hprof-conv hard-codes the size of identifiers to 4.
+ COMPILE_ASSERT(sizeof(mirror::HeapReference<mirror::Object>) == sizeof(uint32_t),
+ UnexpectedHeapReferenceSize);
+ U4_TO_BUF_BE(buf, 0, sizeof(uint32_t));
fwrite(buf, 1, sizeof(uint32_t), header_fp_);
// The current time, in milliseconds since 0:00 GMT, 1/1/70.
@@ -842,26 +847,37 @@
int Hprof::DumpHeapObject(mirror::Object* obj) {
HprofRecord* rec = ¤t_record_;
- HprofHeapId desiredHeap = false ? HPROF_HEAP_ZYGOTE : HPROF_HEAP_APP; // TODO: zygote objects?
-
+ gc::space::ContinuousSpace* space =
+ Runtime::Current()->GetHeap()->FindContinuousSpaceFromObject(obj, true);
+ HprofHeapId heap_type = HPROF_HEAP_APP;
+ if (space != nullptr) {
+ if (space->IsZygoteSpace()) {
+ heap_type = HPROF_HEAP_ZYGOTE;
+ } else if (space->IsImageSpace()) {
+ heap_type = HPROF_HEAP_IMAGE;
+ }
+ }
if (objects_in_segment_ >= OBJECTS_PER_SEGMENT || rec->Size() >= BYTES_PER_SEGMENT) {
StartNewHeapDumpSegment();
}
- if (desiredHeap != current_heap_) {
+ if (heap_type != current_heap_) {
HprofStringId nameId;
// This object is in a different heap than the current one.
// Emit a HEAP_DUMP_INFO tag to change heaps.
rec->AddU1(HPROF_HEAP_DUMP_INFO);
- rec->AddU4((uint32_t)desiredHeap); // uint32_t: heap id
- switch (desiredHeap) {
+ rec->AddU4(static_cast<uint32_t>(heap_type)); // uint32_t: heap type
+ switch (heap_type) {
case HPROF_HEAP_APP:
nameId = LookupStringId("app");
break;
case HPROF_HEAP_ZYGOTE:
nameId = LookupStringId("zygote");
break;
+ case HPROF_HEAP_IMAGE:
+ nameId = LookupStringId("image");
+ break;
default:
// Internal error
LOG(ERROR) << "Unexpected desiredHeap";
@@ -869,7 +885,7 @@
break;
}
rec->AddStringId(nameId);
- current_heap_ = desiredHeap;
+ current_heap_ = heap_type;
}
mirror::Class* c = obj->GetClass();
diff --git a/runtime/implicit_check_options.h b/runtime/implicit_check_options.h
deleted file mode 100644
index a6595b8..0000000
--- a/runtime/implicit_check_options.h
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_IMPLICIT_CHECK_OPTIONS_H_
-#define ART_RUNTIME_IMPLICIT_CHECK_OPTIONS_H_
-
-#include "gc/heap.h"
-#include "gc/space/image_space.h"
-#include "instruction_set.h"
-#include "runtime.h"
-
-#include <string>
-
-namespace art {
-
-class ImplicitCheckOptions {
- public:
- static constexpr const char* kImplicitChecksOatHeaderKey = "implicit-checks";
-
- static std::string Serialize(bool explicit_null_checks, bool explicit_stack_overflow_checks,
- bool explicit_suspend_checks) {
- char tmp[4];
- tmp[0] = explicit_null_checks ? 'N' : 'n';
- tmp[1] = explicit_stack_overflow_checks ? 'O' : 'o';
- tmp[2] = explicit_suspend_checks ? 'S' : 's';
- tmp[3] = 0;
- return std::string(tmp);
- }
-
- static bool Parse(const char* str, bool* explicit_null_checks,
- bool* explicit_stack_overflow_checks, bool* explicit_suspend_checks) {
- if (str != nullptr && str[0] != 0 && str[1] != 0 && str[2] != 0 &&
- (str[0] == 'n' || str[0] == 'N') &&
- (str[1] == 'o' || str[1] == 'O') &&
- (str[2] == 's' || str[2] == 'S')) {
- *explicit_null_checks = str[0] == 'N';
- *explicit_stack_overflow_checks = str[1] == 'O';
- *explicit_suspend_checks = str[2] == 'S';
- return true;
- } else {
- return false;
- }
- }
-
- // Check whether the given flags are correct with respect to the current runtime and the given
- // executable flag.
- static bool CheckRuntimeSupport(bool executable, bool explicit_null_checks,
- bool explicit_stack_overflow_checks,
- bool explicit_suspend_checks, std::string* error_msg) {
- if (!executable) {
- // Not meant to be run, i.e., either we are compiling or dumping. Just accept.
- return true;
- }
-
- Runtime* runtime = Runtime::Current();
- // We really should have a runtime.
- DCHECK_NE(static_cast<Runtime*>(nullptr), runtime);
-
- if (runtime->GetInstrumentation()->IsForcedInterpretOnly()) {
- // We are an interpret-only environment. Ignore the check value.
- return true;
- }
-
- if (runtime->ExplicitNullChecks() != explicit_null_checks ||
- runtime->ExplicitStackOverflowChecks() != explicit_stack_overflow_checks ||
- runtime->ExplicitSuspendChecks() != explicit_suspend_checks) {
- if (error_msg != nullptr) {
- // Create an error message.
-
- std::ostringstream os;
- os << "Explicit check options do not match runtime: ";
- os << runtime->ExplicitNullChecks() << " vs " << explicit_null_checks << " | ";
- os << runtime->ExplicitStackOverflowChecks() << " vs " << explicit_stack_overflow_checks
- << " | ";
- os << runtime->ExplicitSuspendChecks() << " vs " << explicit_suspend_checks;
-
- *error_msg = os.str();
- }
-
- // Currently we do not create correct images when pre-opting, so the emulator will fail with
- // this change. Once the change is in the tree, REMOVE.
- if (true) {
- // At least try to log it, though.
- if (error_msg != nullptr) {
- LOG(WARNING) << *error_msg;
- }
- return true;
- } else {
- return false;
- }
- }
-
- // Accepted.
- return true;
- }
-
- // Check (and override) the flags depending on current support in the ISA.
- // Right now will reset all flags to explicit except on ARM.
- static void CheckISASupport(InstructionSet isa, bool* explicit_null_checks,
- bool* explicit_stack_overflow_checks, bool* explicit_suspend_checks) {
- switch (isa) {
- case kArm:
- case kThumb2:
- break; // All checks implemented, leave as is.
-
- default: // No checks implemented, reset all to explicit checks.
- *explicit_null_checks = true;
- *explicit_stack_overflow_checks = true;
- *explicit_suspend_checks = true;
- }
- }
-
- static bool CheckForCompiling(InstructionSet host, InstructionSet target,
- bool* explicit_null_checks, bool* explicit_stack_overflow_checks,
- bool* explicit_suspend_checks) {
- // Check the boot image settings.
- Runtime* runtime = Runtime::Current();
- if (runtime != nullptr) {
- gc::space::ImageSpace* ispace = runtime->GetHeap()->GetImageSpace();
- if (ispace != nullptr) {
- const OatFile* oat_file = ispace->GetOatFile();
- if (oat_file != nullptr) {
- const char* v = oat_file->GetOatHeader().GetStoreValueByKey(kImplicitChecksOatHeaderKey);
- if (!Parse(v, explicit_null_checks, explicit_stack_overflow_checks,
- explicit_suspend_checks)) {
- LOG(FATAL) << "Should have been able to parse boot image implicit check values";
- }
- return true;
- }
- }
- }
-
- // Check the current runtime.
- bool cross_compiling = true;
- switch (host) {
- case kArm:
- case kThumb2:
- cross_compiling = target != kArm && target != kThumb2;
- break;
- default:
- cross_compiling = host != target;
- break;
- }
- if (!cross_compiling) {
- Runtime* runtime = Runtime::Current();
- *explicit_null_checks = runtime->ExplicitNullChecks();
- *explicit_stack_overflow_checks = runtime->ExplicitStackOverflowChecks();
- *explicit_suspend_checks = runtime->ExplicitSuspendChecks();
- return true;
- }
-
- // Give up.
- return false;
- }
-};
-
-} // namespace art
-
-#endif // ART_RUNTIME_IMPLICIT_CHECK_OPTIONS_H_
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index f561643..c826716 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -46,7 +46,7 @@
AbortIfNoCheckJNI();
return false;
}
- if (UNLIKELY(table_[idx] == nullptr)) {
+ if (UNLIKELY(table_[idx].IsNull())) {
LOG(ERROR) << "JNI ERROR (app bug): accessed deleted " << kind_ << " " << iref;
AbortIfNoCheckJNI();
return false;
@@ -75,11 +75,11 @@
if (!GetChecked(iref)) {
return kInvalidIndirectRefObject;
}
- mirror::Object** root = &table_[ExtractIndex(iref)];
- mirror::Object* obj = *root;
+ uint32_t idx = ExtractIndex(iref);
+ mirror::Object* obj = table_[idx].Read<kWithoutReadBarrier>();
if (LIKELY(obj != kClearedJniWeakGlobal)) {
// The read barrier or VerifyObject won't handle kClearedJniWeakGlobal.
- obj = ReadBarrier::BarrierForRoot<mirror::Object, kReadBarrierOption>(root);
+ obj = table_[idx].Read();
VerifyObject(obj);
}
return obj;
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index ad798ed..1ba2291 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -56,7 +56,8 @@
void IndirectReferenceTable::AbortIfNoCheckJNI() {
// If -Xcheck:jni is on, it'll give a more detailed error before aborting.
- if (!Runtime::Current()->GetJavaVM()->check_jni) {
+ JavaVMExt* vm = Runtime::Current()->GetJavaVM();
+ if (!vm->IsCheckJniEnabled()) {
// Otherwise, we want to abort rather than hand back a bad reference.
LOG(FATAL) << "JNI ERROR (app bug): see above.";
}
@@ -74,8 +75,9 @@
table_mem_map_.reset(MemMap::MapAnonymous("indirect ref table", nullptr, table_bytes,
PROT_READ | PROT_WRITE, false, &error_str));
CHECK(table_mem_map_.get() != nullptr) << error_str;
+ CHECK_EQ(table_mem_map_->Size(), table_bytes);
- table_ = reinterpret_cast<mirror::Object**>(table_mem_map_->Begin());
+ table_ = reinterpret_cast<GcRoot<mirror::Object>*>(table_mem_map_->Begin());
CHECK(table_ != nullptr);
memset(table_, 0xd1, initial_bytes);
@@ -131,20 +133,22 @@
if (numHoles > 0) {
DCHECK_GT(topIndex, 1U);
// Find the first hole; likely to be near the end of the list.
- mirror::Object** pScan = &table_[topIndex - 1];
- DCHECK(*pScan != NULL);
- while (*--pScan != NULL) {
+ GcRoot<mirror::Object>* pScan = &table_[topIndex - 1];
+ DCHECK(!pScan->IsNull());
+ --pScan;
+ while (!pScan->IsNull()) {
DCHECK_GE(pScan, table_ + prevState.parts.topIndex);
+ --pScan;
}
UpdateSlotAdd(obj, pScan - table_);
result = ToIndirectRef(pScan - table_);
- *pScan = obj;
+ *pScan = GcRoot<mirror::Object>(obj);
segment_state_.parts.numHoles--;
} else {
// Add to the end.
UpdateSlotAdd(obj, topIndex);
result = ToIndirectRef(topIndex);
- table_[topIndex++] = obj;
+ table_[topIndex++] = GcRoot<mirror::Object>(obj);
segment_state_.parts.topIndex = topIndex;
}
if (false) {
@@ -210,15 +214,16 @@
return false;
}
- table_[idx] = NULL;
+ table_[idx] = GcRoot<mirror::Object>(nullptr);
int numHoles = segment_state_.parts.numHoles - prevState.parts.numHoles;
if (numHoles != 0) {
while (--topIndex > bottomIndex && numHoles != 0) {
if (false) {
LOG(INFO) << "+++ checking for hole at " << topIndex-1
- << " (cookie=" << cookie << ") val=" << table_[topIndex - 1];
+ << " (cookie=" << cookie << ") val="
+ << table_[topIndex - 1].Read<kWithoutReadBarrier>();
}
- if (table_[topIndex-1] != NULL) {
+ if (!table_[topIndex-1].IsNull()) {
break;
}
if (false) {
@@ -238,7 +243,7 @@
// Not the top-most entry. This creates a hole. We NULL out the
// entry to prevent somebody from deleting it twice and screwing up
// the hole count.
- if (table_[idx] == NULL) {
+ if (table_[idx].IsNull()) {
LOG(INFO) << "--- WEIRD: removing null entry " << idx;
return false;
}
@@ -246,7 +251,7 @@
return false;
}
- table_[idx] = NULL;
+ table_[idx] = GcRoot<mirror::Object>(nullptr);
segment_state_.parts.numHoles++;
if (false) {
LOG(INFO) << "+++ left hole at " << idx << ", holes=" << segment_state_.parts.numHoles;
@@ -268,17 +273,16 @@
os << kind_ << " table dump:\n";
ReferenceTable::Table entries;
for (size_t i = 0; i < Capacity(); ++i) {
- mirror::Object** root = &table_[i];
- mirror::Object* obj = *root;
+ mirror::Object* obj = table_[i].Read<kWithoutReadBarrier>();
if (UNLIKELY(obj == nullptr)) {
// Remove NULLs.
} else if (UNLIKELY(obj == kClearedJniWeakGlobal)) {
// ReferenceTable::Dump() will handle kClearedJniWeakGlobal
// while the read barrier won't.
- entries.push_back(obj);
+ entries.push_back(GcRoot<mirror::Object>(obj));
} else {
- obj = ReadBarrier::BarrierForRoot<mirror::Object, kWithReadBarrier>(root);
- entries.push_back(obj);
+ obj = table_[i].Read();
+ entries.push_back(GcRoot<mirror::Object>(obj));
}
}
ReferenceTable::Dump(os, entries);
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index b3a855d..d25bc42 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -24,16 +24,19 @@
#include "base/logging.h"
#include "base/mutex.h"
-#include "mem_map.h"
+#include "gc_root.h"
#include "object_callbacks.h"
#include "offsets.h"
-#include "read_barrier.h"
+#include "read_barrier_option.h"
namespace art {
+
namespace mirror {
class Object;
} // namespace mirror
+class MemMap;
+
/*
* Maintain a table of indirect references. Used for local/global JNI
* references.
@@ -204,12 +207,13 @@
class IrtIterator {
public:
- explicit IrtIterator(mirror::Object** table, size_t i, size_t capacity)
+ explicit IrtIterator(GcRoot<mirror::Object>* table, size_t i, size_t capacity)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: table_(table), i_(i), capacity_(capacity) {
SkipNullsAndTombstones();
}
- IrtIterator& operator++() {
+ IrtIterator& operator++() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
++i_;
SkipNullsAndTombstones();
return *this;
@@ -217,7 +221,7 @@
mirror::Object** operator*() {
// This does not have a read barrier as this is used to visit roots.
- return &table_[i_];
+ return table_[i_].AddressWithoutBarrier();
}
bool equals(const IrtIterator& rhs) const {
@@ -225,14 +229,16 @@
}
private:
- void SkipNullsAndTombstones() {
+ void SkipNullsAndTombstones() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// We skip NULLs and tombstones. Clients don't want to see implementation details.
- while (i_ < capacity_ && (table_[i_] == NULL || table_[i_] == kClearedJniWeakGlobal)) {
+ while (i_ < capacity_ &&
+ (table_[i_].IsNull() ||
+ table_[i_].Read<kWithoutReadBarrier>() == kClearedJniWeakGlobal)) {
++i_;
}
}
- mirror::Object** const table_;
+ GcRoot<mirror::Object>* const table_;
size_t i_;
size_t capacity_;
};
@@ -309,7 +315,8 @@
return IrtIterator(table_, Capacity(), Capacity());
}
- void VisitRoots(RootCallback* callback, void* arg, uint32_t tid, RootType root_type);
+ void VisitRoots(RootCallback* callback, void* arg, uint32_t tid, RootType root_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
uint32_t GetSegmentState() const {
return segment_state_.all;
@@ -373,7 +380,7 @@
std::unique_ptr<MemMap> slot_mem_map_;
// bottom of the stack. Do not directly access the object references
// in this as they are roots. Use Get() that has a read barrier.
- mirror::Object** table_;
+ GcRoot<mirror::Object>* table_;
/* bit mask, ORed into all irefs */
IndirectRefKind kind_;
/* extended debugging info */
diff --git a/runtime/instruction_set.cc b/runtime/instruction_set.cc
index 5b60396..d7e358c 100644
--- a/runtime/instruction_set.cc
+++ b/runtime/instruction_set.cc
@@ -83,6 +83,44 @@
}
}
+
+static constexpr size_t kDefaultStackOverflowReservedBytes = 16 * KB;
+static constexpr size_t kMipsStackOverflowReservedBytes = kDefaultStackOverflowReservedBytes;
+
+// TODO: Lower once implicit stack-overflow checks can work with less than 16K.
+static constexpr size_t kArmStackOverflowReservedBytes = (kIsDebugBuild ? 16 : 16) * KB;
+static constexpr size_t kArm64StackOverflowReservedBytes = (kIsDebugBuild ? 16 : 16) * KB;
+static constexpr size_t kX86StackOverflowReservedBytes = (kIsDebugBuild ? 16 : 16) * KB;
+static constexpr size_t kX86_64StackOverflowReservedBytes = (kIsDebugBuild ? 16 : 16) * KB;
+
+size_t GetStackOverflowReservedBytes(InstructionSet isa) {
+ switch (isa) {
+ case kArm: // Intentional fall-through.
+ case kThumb2:
+ return kArmStackOverflowReservedBytes;
+
+ case kArm64:
+ return kArm64StackOverflowReservedBytes;
+
+ case kMips:
+ return kMipsStackOverflowReservedBytes;
+
+ case kX86:
+ return kX86StackOverflowReservedBytes;
+
+ case kX86_64:
+ return kX86_64StackOverflowReservedBytes;
+
+ case kNone:
+ LOG(FATAL) << "kNone has no stack overflow size";
+ return 0;
+
+ default:
+ LOG(FATAL) << "Unknown instruction set" << isa;
+ return 0;
+ }
+}
+
std::string InstructionSetFeatures::GetFeatureString() const {
std::string result;
if ((mask_ & kHwDiv) != 0) {
diff --git a/runtime/instruction_set.h b/runtime/instruction_set.h
index dce1c15..f212811 100644
--- a/runtime/instruction_set.h
+++ b/runtime/instruction_set.h
@@ -169,33 +169,7 @@
}
}
-static constexpr size_t kDefaultStackOverflowReservedBytes = 16 * KB;
-static constexpr size_t kArmStackOverflowReservedBytes = kDefaultStackOverflowReservedBytes;
-static constexpr size_t kMipsStackOverflowReservedBytes = kDefaultStackOverflowReservedBytes;
-
-// TODO: shrink reserved space, in particular for 64bit.
-
-// Worst-case, we would need about 2.6x the amount of x86_64 for many more registers.
-// But this one works rather well.
-static constexpr size_t kArm64StackOverflowReservedBytes = 32 * KB;
-// TODO: Bumped to workaround regression (http://b/14982147) Specifically to fix:
-// test-art-host-run-test-interpreter-018-stack-overflow
-// test-art-host-run-test-interpreter-107-int-math2
-static constexpr size_t kX86StackOverflowReservedBytes = (kIsDebugBuild ? 32 : 24) * KB;
-static constexpr size_t kX86_64StackOverflowReservedBytes = 32 * KB;
-
-static constexpr size_t GetStackOverflowReservedBytes(InstructionSet isa) {
- return (isa == kArm || isa == kThumb2) ? kArmStackOverflowReservedBytes :
- isa == kArm64 ? kArm64StackOverflowReservedBytes :
- isa == kMips ? kMipsStackOverflowReservedBytes :
- isa == kX86 ? kX86StackOverflowReservedBytes :
- isa == kX86_64 ? kX86_64StackOverflowReservedBytes :
- isa == kNone ? (LOG(FATAL) << "kNone has no stack overflow size", 0) :
- (LOG(FATAL) << "Unknown instruction set" << isa, 0);
-}
-
-static constexpr size_t kRuntimeStackOverflowReservedBytes =
- GetStackOverflowReservedBytes(kRuntimeISA);
+size_t GetStackOverflowReservedBytes(InstructionSet isa);
enum InstructionFeatures {
kHwDiv = 0x1, // Supports hardware divide.
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index f4eaa61..16be077 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -25,6 +25,7 @@
#include "debugger.h"
#include "dex_file-inl.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
+#include "gc_root-inl.h"
#include "interpreter/interpreter.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
@@ -519,7 +520,7 @@
bool empty;
{
ReaderMutexLock mu(self, deoptimized_methods_lock_);
- empty = deoptimized_methods_.empty(); // Avoid lock violation.
+ empty = IsDeoptimizedMethodsEmpty(); // Avoid lock violation.
}
if (empty) {
instrumentation_stubs_installed_ = false;
@@ -580,7 +581,7 @@
}
void Instrumentation::UpdateMethodsCode(mirror::ArtMethod* method, const void* quick_code,
- const void* portable_code, bool have_portable_code) const {
+ const void* portable_code, bool have_portable_code) {
const void* new_portable_code;
const void* new_quick_code;
bool new_have_portable_code;
@@ -617,20 +618,74 @@
UpdateEntrypoints(method, new_quick_code, new_portable_code, new_have_portable_code);
}
+bool Instrumentation::AddDeoptimizedMethod(mirror::ArtMethod* method) {
+ // Note that the insert() below isn't read barrier-aware. So, this
+ // FindDeoptimizedMethod() call is necessary or else we would end up
+ // storing the same method twice in the map (the from-space and the
+ // to-space ones).
+ if (FindDeoptimizedMethod(method)) {
+ // Already in the map. Return.
+ return false;
+ }
+ // Not found. Add it.
+ int32_t hash_code = method->IdentityHashCode();
+ deoptimized_methods_.insert(std::make_pair(hash_code, GcRoot<mirror::ArtMethod>(method)));
+ return true;
+}
+
+bool Instrumentation::FindDeoptimizedMethod(mirror::ArtMethod* method) {
+ int32_t hash_code = method->IdentityHashCode();
+ auto range = deoptimized_methods_.equal_range(hash_code);
+ for (auto it = range.first; it != range.second; ++it) {
+ mirror::ArtMethod* m = it->second.Read();
+ if (m == method) {
+ // Found.
+ return true;
+ }
+ }
+ // Not found.
+ return false;
+}
+
+mirror::ArtMethod* Instrumentation::BeginDeoptimizedMethod() {
+ auto it = deoptimized_methods_.begin();
+ if (it == deoptimized_methods_.end()) {
+ // Empty.
+ return nullptr;
+ }
+ return it->second.Read();
+}
+
+bool Instrumentation::RemoveDeoptimizedMethod(mirror::ArtMethod* method) {
+ int32_t hash_code = method->IdentityHashCode();
+ auto range = deoptimized_methods_.equal_range(hash_code);
+ for (auto it = range.first; it != range.second; ++it) {
+ mirror::ArtMethod* m = it->second.Read();
+ if (m == method) {
+ // Found. Erase and return.
+ deoptimized_methods_.erase(it);
+ return true;
+ }
+ }
+ // Not found.
+ return false;
+}
+
+bool Instrumentation::IsDeoptimizedMethodsEmpty() const {
+ return deoptimized_methods_.empty();
+}
+
void Instrumentation::Deoptimize(mirror::ArtMethod* method) {
CHECK(!method->IsNative());
CHECK(!method->IsProxyMethod());
CHECK(!method->IsAbstract());
Thread* self = Thread::Current();
- std::pair<std::set<mirror::ArtMethod*>::iterator, bool> pair;
{
WriterMutexLock mu(self, deoptimized_methods_lock_);
- pair = deoptimized_methods_.insert(method);
+ bool has_not_been_deoptimized = AddDeoptimizedMethod(method);
+ CHECK(has_not_been_deoptimized) << "Method " << PrettyMethod(method) << " is already deoptimized";
}
- bool already_deoptimized = !pair.second;
- CHECK(!already_deoptimized) << "Method " << PrettyMethod(method) << " is already deoptimized";
-
if (!interpreter_stubs_installed_) {
UpdateEntrypoints(method, GetQuickInstrumentationEntryPoint(), GetPortableToInterpreterBridge(),
false);
@@ -652,11 +707,10 @@
bool empty;
{
WriterMutexLock mu(self, deoptimized_methods_lock_);
- auto it = deoptimized_methods_.find(method);
- CHECK(it != deoptimized_methods_.end()) << "Method " << PrettyMethod(method)
+ bool found_and_erased = RemoveDeoptimizedMethod(method);
+ CHECK(found_and_erased) << "Method " << PrettyMethod(method)
<< " is not deoptimized";
- deoptimized_methods_.erase(it);
- empty = deoptimized_methods_.empty();
+ empty = IsDeoptimizedMethodsEmpty();
}
// Restore code and possibly stack only if we did not deoptimize everything.
@@ -684,15 +738,15 @@
}
}
-bool Instrumentation::IsDeoptimized(mirror::ArtMethod* method) const {
- ReaderMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
+bool Instrumentation::IsDeoptimized(mirror::ArtMethod* method) {
DCHECK(method != nullptr);
- return deoptimized_methods_.find(method) != deoptimized_methods_.end();
+ ReaderMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
+ return FindDeoptimizedMethod(method);
}
void Instrumentation::EnableDeoptimization() {
ReaderMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
- CHECK(deoptimized_methods_.empty());
+ CHECK(IsDeoptimizedMethodsEmpty());
CHECK_EQ(deoptimization_enabled_, false);
deoptimization_enabled_ = true;
}
@@ -708,10 +762,11 @@
mirror::ArtMethod* method;
{
ReaderMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
- if (deoptimized_methods_.empty()) {
+ if (IsDeoptimizedMethodsEmpty()) {
break;
}
- method = *deoptimized_methods_.begin();
+ method = BeginDeoptimizedMethod();
+ CHECK(method != nullptr);
}
Undeoptimize(method);
}
@@ -963,16 +1018,12 @@
void Instrumentation::VisitRoots(RootCallback* callback, void* arg) {
WriterMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
- if (deoptimized_methods_.empty()) {
+ if (IsDeoptimizedMethodsEmpty()) {
return;
}
- std::set<mirror::ArtMethod*> new_deoptimized_methods;
- for (mirror::ArtMethod* method : deoptimized_methods_) {
- DCHECK(method != nullptr);
- callback(reinterpret_cast<mirror::Object**>(&method), arg, 0, kRootVMInternal);
- new_deoptimized_methods.insert(method);
+ for (auto pair : deoptimized_methods_) {
+ pair.second.VisitRoot(callback, arg, 0, kRootVMInternal);
}
- deoptimized_methods_ = new_deoptimized_methods;
}
std::string InstrumentationStackFrame::Dump() const {
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index d0cb4de..66c6b38 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -18,13 +18,14 @@
#define ART_RUNTIME_INSTRUMENTATION_H_
#include <stdint.h>
-#include <set>
#include <list>
+#include <map>
#include "atomic.h"
#include "instruction_set.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "gc_root.h"
#include "object_callbacks.h"
namespace art {
@@ -162,7 +163,9 @@
LOCKS_EXCLUDED(Locks::thread_list_lock_, deoptimized_methods_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsDeoptimized(mirror::ArtMethod* method) const LOCKS_EXCLUDED(deoptimized_methods_lock_);
+ bool IsDeoptimized(mirror::ArtMethod* method)
+ LOCKS_EXCLUDED(deoptimized_methods_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Enable method tracing by installing instrumentation entry/exit stubs.
void EnableMethodTracing()
@@ -186,7 +189,7 @@
// Update the code of a method respecting any installed stubs.
void UpdateMethodsCode(mirror::ArtMethod* method, const void* quick_code,
- const void* portable_code, bool have_portable_code) const
+ const void* portable_code, bool have_portable_code)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get the quick code for the given method. More efficient than asking the class linker as it
@@ -367,6 +370,23 @@
mirror::ArtField* field, const JValue& field_value) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Read barrier-aware utility functions for accessing deoptimized_methods_
+ bool AddDeoptimizedMethod(mirror::ArtMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(deoptimized_methods_lock_);
+ bool FindDeoptimizedMethod(mirror::ArtMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_LOCKS_REQUIRED(deoptimized_methods_lock_);
+ bool RemoveDeoptimizedMethod(mirror::ArtMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(deoptimized_methods_lock_);
+ mirror::ArtMethod* BeginDeoptimizedMethod()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_LOCKS_REQUIRED(deoptimized_methods_lock_);
+ bool IsDeoptimizedMethodsEmpty() const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_LOCKS_REQUIRED(deoptimized_methods_lock_);
+
// Have we hijacked ArtMethod::code_ so that it calls instrumentation/interpreter code?
bool instrumentation_stubs_installed_;
@@ -421,7 +441,8 @@
// The set of methods being deoptimized (by the debugger) which must be executed with interpreter
// only.
mutable ReaderWriterMutex deoptimized_methods_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- std::set<mirror::ArtMethod*> deoptimized_methods_ GUARDED_BY(deoptimized_methods_lock_);
+ std::multimap<int32_t, GcRoot<mirror::ArtMethod>> deoptimized_methods_
+ GUARDED_BY(deoptimized_methods_lock_);
bool deoptimization_enabled_;
// Current interpreter handler table. This is updated each time the thread state flags are
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 1430500..aadd85a 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -58,27 +58,27 @@
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
if ((flags & kVisitRootFlagAllRoots) != 0) {
for (auto& strong_intern : strong_interns_) {
- callback(reinterpret_cast<mirror::Object**>(&strong_intern.second), arg, 0,
- kRootInternedString);
- DCHECK(strong_intern.second != nullptr);
+ strong_intern.second.VisitRoot(callback, arg, 0, kRootInternedString);
+ DCHECK(!strong_intern.second.IsNull());
}
} else if ((flags & kVisitRootFlagNewRoots) != 0) {
for (auto& pair : new_strong_intern_roots_) {
- mirror::String* old_ref = pair.second;
- callback(reinterpret_cast<mirror::Object**>(&pair.second), arg, 0, kRootInternedString);
- if (UNLIKELY(pair.second != old_ref)) {
- // Uh ohes, GC moved a root in the log. Need to search the strong interns and update the
- // corresponding object. This is slow, but luckily for us, this may only happen with a
- // concurrent moving GC.
- for (auto it = strong_interns_.lower_bound(pair.first), end = strong_interns_.end();
+ mirror::String* old_ref = pair.second.Read<kWithoutReadBarrier>();
+ pair.second.VisitRoot(callback, arg, 0, kRootInternedString);
+ mirror::String* new_ref = pair.second.Read<kWithoutReadBarrier>();
+ if (UNLIKELY(new_ref != old_ref)) {
+ // Uh ohes, GC moved a root in the log. Need to search the strong interns and update the
+ // corresponding object. This is slow, but luckily for us, this may only happen with a
+ // concurrent moving GC.
+ for (auto it = strong_interns_.lower_bound(pair.first), end = strong_interns_.end();
it != end && it->first == pair.first; ++it) {
- // If the class stored matches the old class, update it to the new value.
- if (old_ref == it->second) {
- it->second = pair.second;
- }
- }
- }
- }
+ // If the class stored matches the old class, update it to the new value.
+ if (old_ref == it->second.Read<kWithoutReadBarrier>()) {
+ it->second = GcRoot<mirror::String>(new_ref);
+ }
+ }
+ }
+ }
}
if ((flags & kVisitRootFlagClearRootLog) != 0) {
@@ -105,9 +105,7 @@
Locks::intern_table_lock_->AssertHeld(Thread::Current());
for (auto it = table->lower_bound(hash_code), end = table->end();
it != end && it->first == hash_code; ++it) {
- mirror::String* existing_string;
- mirror::String** root = &it->second;
- existing_string = ReadBarrier::BarrierForRoot<mirror::String, kWithReadBarrier>(root);
+ mirror::String* existing_string = it->second.Read();
if (existing_string->Equals(s)) {
return existing_string;
}
@@ -121,9 +119,9 @@
runtime->RecordStrongStringInsertion(s, hash_code);
}
if (log_new_roots_) {
- new_strong_intern_roots_.push_back(std::make_pair(hash_code, s));
+ new_strong_intern_roots_.push_back(std::make_pair(hash_code, GcRoot<mirror::String>(s)));
}
- strong_interns_.insert(std::make_pair(hash_code, s));
+ strong_interns_.insert(std::make_pair(hash_code, GcRoot<mirror::String>(s)));
return s;
}
@@ -132,7 +130,7 @@
if (runtime->IsActiveTransaction()) {
runtime->RecordWeakStringInsertion(s, hash_code);
}
- weak_interns_.insert(std::make_pair(hash_code, s));
+ weak_interns_.insert(std::make_pair(hash_code, GcRoot<mirror::String>(s)));
return s;
}
@@ -151,9 +149,7 @@
void InternTable::Remove(Table* table, mirror::String* s, int32_t hash_code) {
for (auto it = table->lower_bound(hash_code), end = table->end();
it != end && it->first == hash_code; ++it) {
- mirror::String* existing_string;
- mirror::String** root = &it->second;
- existing_string = ReadBarrier::BarrierForRoot<mirror::String, kWithReadBarrier>(root);
+ mirror::String* existing_string = it->second.Read();
if (existing_string == s) {
table->erase(it);
return;
@@ -308,13 +304,13 @@
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
for (auto it = weak_interns_.begin(), end = weak_interns_.end(); it != end;) {
// This does not need a read barrier because this is called by GC.
- mirror::Object* object = it->second;
+ mirror::Object* object = it->second.Read<kWithoutReadBarrier>();
mirror::Object* new_object = callback(object, arg);
if (new_object == nullptr) {
// TODO: use it = weak_interns_.erase(it) when we get a c++11 stl.
weak_interns_.erase(it++);
} else {
- it->second = down_cast<mirror::String*>(new_object);
+ it->second = GcRoot<mirror::String>(down_cast<mirror::String*>(new_object));
++it;
}
}
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 6dc7f7b..435cc43 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -20,6 +20,7 @@
#include <map>
#include "base/mutex.h"
+#include "gc_root.h"
#include "object_callbacks.h"
namespace art {
@@ -59,7 +60,8 @@
// Interns a potentially new string in the 'weak' table. (See above.)
mirror::String* InternWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SweepInternTableWeaks(IsMarkedCallback* callback, void* arg);
+ void SweepInternTableWeaks(IsMarkedCallback* callback, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool ContainsWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -67,7 +69,8 @@
size_t StrongSize() const;
size_t WeakSize() const;
- void VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags);
+ void VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void DumpForSigQuit(std::ostream& os) const;
@@ -75,7 +78,7 @@
void AllowNewInterns() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
- typedef std::multimap<int32_t, mirror::String*> Table;
+ typedef std::multimap<int32_t, GcRoot<mirror::String>> Table;
mirror::String* Insert(mirror::String* s, bool is_strong)
LOCKS_EXCLUDED(Locks::intern_table_lock_)
@@ -122,7 +125,7 @@
// directly access the strings in it. Use functions that contain
// read barriers.
Table strong_interns_ GUARDED_BY(Locks::intern_table_lock_);
- std::vector<std::pair<int32_t, mirror::String*>> new_strong_intern_roots_
+ std::vector<std::pair<int32_t, GcRoot<mirror::String>>> new_strong_intern_roots_
GUARDED_BY(Locks::intern_table_lock_);
// Since this contains (weak) roots, they need a read barrier. Do
// not directly access the strings in it. Use functions that contain
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index e3068b3..47a7f0d 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -397,7 +397,8 @@
void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receiver,
uint32_t* args, JValue* result) {
DCHECK_EQ(self, Thread::Current());
- if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
+ bool implicit_check = !Runtime::Current()->ExplicitStackOverflowChecks();
+ if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEndForInterpreter(implicit_check))) {
ThrowStackOverflowError(self);
return;
}
@@ -509,7 +510,8 @@
JValue EnterInterpreterFromStub(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame) {
DCHECK_EQ(self, Thread::Current());
- if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
+ bool implicit_check = !Runtime::Current()->ExplicitStackOverflowChecks();
+ if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEndForInterpreter(implicit_check))) {
ThrowStackOverflowError(self);
return JValue();
}
@@ -520,7 +522,8 @@
extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh,
const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame, JValue* result) {
- if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
+ bool implicit_check = !Runtime::Current()->ExplicitStackOverflowChecks();
+ if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEndForInterpreter(implicit_check))) {
ThrowStackOverflowError(self);
return;
}
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 630b324..b35da0c 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -37,7 +37,6 @@
CHECK(self->IsExceptionPending());
return false;
}
- f->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
Object* obj;
if (is_static) {
obj = f->GetDeclaringClass();
@@ -48,6 +47,7 @@
return false;
}
}
+ f->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
// Report this field access to instrumentation if needed.
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasFieldReadListeners())) {
@@ -213,7 +213,6 @@
CHECK(self->IsExceptionPending());
return false;
}
- f->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
Object* obj;
if (is_static) {
obj = f->GetDeclaringClass();
@@ -225,6 +224,7 @@
return false;
}
}
+ f->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
uint32_t vregA = is_static ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
// Report this field access to instrumentation if needed. Since we only have the offset of
// the field from the base of the object, we need to look for it first.
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 1bcd27e..5a1d01e 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -140,7 +140,8 @@
return false;
}
const uint32_t vtable_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
- ArtMethod* const method = receiver->GetClass()->GetVTable()->GetWithoutChecks(vtable_idx);
+ CHECK(receiver->GetClass()->ShouldHaveEmbeddedImtAndVTable());
+ ArtMethod* const method = receiver->GetClass()->GetEmbeddedVTableEntry(vtable_idx);
if (UNLIKELY(method == nullptr)) {
CHECK(self->IsExceptionPending());
result->SetJ(0);
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
new file mode 100644
index 0000000..9eab3fd
--- /dev/null
+++ b/runtime/java_vm_ext.cc
@@ -0,0 +1,829 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_internal.h"
+
+#include <dlfcn.h>
+
+#include "base/mutex.h"
+#include "base/stl_util.h"
+#include "check_jni.h"
+#include "indirect_reference_table-inl.h"
+#include "mirror/art_method.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
+#include "native_bridge.h"
+#include "java_vm_ext.h"
+#include "parsed_options.h"
+#include "ScopedLocalRef.h"
+#include "scoped_thread_state_change.h"
+#include "thread-inl.h"
+#include "thread_list.h"
+
+namespace art {
+
+static const size_t kPinTableInitial = 16; // Arbitrary.
+static const size_t kPinTableMax = 1024; // Arbitrary sanity check.
+
+static size_t gGlobalsInitial = 512; // Arbitrary.
+static size_t gGlobalsMax = 51200; // Arbitrary sanity check. (Must fit in 16 bits.)
+
+static const size_t kWeakGlobalsInitial = 16; // Arbitrary.
+static const size_t kWeakGlobalsMax = 51200; // Arbitrary sanity check. (Must fit in 16 bits.)
+
+static bool IsBadJniVersion(int version) {
+ // We don't support JNI_VERSION_1_1. These are the only other valid versions.
+ return version != JNI_VERSION_1_2 && version != JNI_VERSION_1_4 && version != JNI_VERSION_1_6;
+}
+
+class SharedLibrary {
+ public:
+ SharedLibrary(JNIEnv* env, Thread* self, const std::string& path, void* handle,
+ jobject class_loader)
+ : path_(path),
+ handle_(handle),
+ needs_native_bridge_(false),
+ class_loader_(env->NewGlobalRef(class_loader)),
+ jni_on_load_lock_("JNI_OnLoad lock"),
+ jni_on_load_cond_("JNI_OnLoad condition variable", jni_on_load_lock_),
+ jni_on_load_thread_id_(self->GetThreadId()),
+ jni_on_load_result_(kPending) {
+ }
+
+ ~SharedLibrary() {
+ Thread* self = Thread::Current();
+ if (self != nullptr) {
+ self->GetJniEnv()->DeleteGlobalRef(class_loader_);
+ }
+ }
+
+ jobject GetClassLoader() const {
+ return class_loader_;
+ }
+
+ const std::string& GetPath() const {
+ return path_;
+ }
+
+ /*
+ * Check the result of an earlier call to JNI_OnLoad on this library.
+ * If the call has not yet finished in another thread, wait for it.
+ */
+ bool CheckOnLoadResult()
+ LOCKS_EXCLUDED(jni_on_load_lock_) {
+ Thread* self = Thread::Current();
+ bool okay;
+ {
+ MutexLock mu(self, jni_on_load_lock_);
+
+ if (jni_on_load_thread_id_ == self->GetThreadId()) {
+ // Check this so we don't end up waiting for ourselves. We need to return "true" so the
+ // caller can continue.
+ LOG(INFO) << *self << " recursive attempt to load library " << "\"" << path_ << "\"";
+ okay = true;
+ } else {
+ while (jni_on_load_result_ == kPending) {
+ VLOG(jni) << "[" << *self << " waiting for \"" << path_ << "\" " << "JNI_OnLoad...]";
+ jni_on_load_cond_.Wait(self);
+ }
+
+ okay = (jni_on_load_result_ == kOkay);
+ VLOG(jni) << "[Earlier JNI_OnLoad for \"" << path_ << "\" "
+ << (okay ? "succeeded" : "failed") << "]";
+ }
+ }
+ return okay;
+ }
+
+ void SetResult(bool result) LOCKS_EXCLUDED(jni_on_load_lock_) {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, jni_on_load_lock_);
+
+ jni_on_load_result_ = result ? kOkay : kFailed;
+ jni_on_load_thread_id_ = 0;
+
+ // Broadcast a wakeup to anybody sleeping on the condition variable.
+ jni_on_load_cond_.Broadcast(self);
+ }
+
+ void SetNeedsNativeBridge() {
+ needs_native_bridge_ = true;
+ }
+
+ bool NeedsNativeBridge() const {
+ return needs_native_bridge_;
+ }
+
+ void* FindSymbol(const std::string& symbol_name) {
+ return dlsym(handle_, symbol_name.c_str());
+ }
+
+ void* FindSymbolWithNativeBridge(const std::string& symbol_name, const char* shorty) {
+ CHECK(NeedsNativeBridge());
+
+ uint32_t len = 0;
+ return NativeBridgeGetTrampoline(handle_, symbol_name.c_str(), shorty, len);
+ }
+
+ private:
+ enum JNI_OnLoadState {
+ kPending,
+ kFailed,
+ kOkay,
+ };
+
+ // Path to library "/system/lib/libjni.so".
+ const std::string path_;
+
+ // The void* returned by dlopen(3).
+ void* const handle_;
+
+ // True if a native bridge is required.
+ bool needs_native_bridge_;
+
+ // The ClassLoader this library is associated with, a global JNI reference that is
+ // created/deleted with the scope of the library.
+ const jobject class_loader_;
+
+ // Guards remaining items.
+ Mutex jni_on_load_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ // Wait for JNI_OnLoad in other thread.
+ ConditionVariable jni_on_load_cond_ GUARDED_BY(jni_on_load_lock_);
+ // Recursive invocation guard.
+ uint32_t jni_on_load_thread_id_ GUARDED_BY(jni_on_load_lock_);
+ // Result of earlier JNI_OnLoad call.
+ JNI_OnLoadState jni_on_load_result_ GUARDED_BY(jni_on_load_lock_);
+};
+
+// This exists mainly to keep implementation details out of the header file.
+class Libraries {
+ public:
+ Libraries() {
+ }
+
+ ~Libraries() {
+ STLDeleteValues(&libraries_);
+ }
+
+ void Dump(std::ostream& os) const {
+ bool first = true;
+ for (const auto& library : libraries_) {
+ if (!first) {
+ os << ' ';
+ }
+ first = false;
+ os << library.first;
+ }
+ }
+
+ size_t size() const {
+ return libraries_.size();
+ }
+
+ SharedLibrary* Get(const std::string& path) {
+ auto it = libraries_.find(path);
+ return (it == libraries_.end()) ? nullptr : it->second;
+ }
+
+ void Put(const std::string& path, SharedLibrary* library) {
+ libraries_.Put(path, library);
+ }
+
+ // See section 11.3 "Linking Native Methods" of the JNI spec.
+ void* FindNativeMethod(mirror::ArtMethod* m, std::string& detail)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::jni_libraries_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ std::string jni_short_name(JniShortName(m));
+ std::string jni_long_name(JniLongName(m));
+ const mirror::ClassLoader* declaring_class_loader = m->GetDeclaringClass()->GetClassLoader();
+ ScopedObjectAccessUnchecked soa(Thread::Current());
+ for (const auto& lib : libraries_) {
+ SharedLibrary* library = lib.second;
+ if (soa.Decode<mirror::ClassLoader*>(library->GetClassLoader()) != declaring_class_loader) {
+ // We only search libraries loaded by the appropriate ClassLoader.
+ continue;
+ }
+ // Try the short name then the long name...
+ void* fn;
+ if (library->NeedsNativeBridge()) {
+ const char* shorty = m->GetShorty();
+ fn = library->FindSymbolWithNativeBridge(jni_short_name, shorty);
+ if (fn == nullptr) {
+ fn = library->FindSymbolWithNativeBridge(jni_long_name, shorty);
+ }
+ } else {
+ fn = library->FindSymbol(jni_short_name);
+ if (fn == nullptr) {
+ fn = library->FindSymbol(jni_long_name);
+ }
+ }
+ if (fn == nullptr) {
+ fn = library->FindSymbol(jni_long_name);
+ }
+ if (fn != nullptr) {
+ VLOG(jni) << "[Found native code for " << PrettyMethod(m)
+ << " in \"" << library->GetPath() << "\"]";
+ return fn;
+ }
+ }
+ detail += "No implementation found for ";
+ detail += PrettyMethod(m);
+ detail += " (tried " + jni_short_name + " and " + jni_long_name + ")";
+ LOG(ERROR) << detail;
+ return nullptr;
+ }
+
+ private:
+ SafeMap<std::string, SharedLibrary*> libraries_;
+};
+
+
+class JII {
+ public:
+ static jint DestroyJavaVM(JavaVM* vm) {
+ if (vm == nullptr) {
+ return JNI_ERR;
+ }
+ JavaVMExt* raw_vm = reinterpret_cast<JavaVMExt*>(vm);
+ delete raw_vm->GetRuntime();
+ return JNI_OK;
+ }
+
+ static jint AttachCurrentThread(JavaVM* vm, JNIEnv** p_env, void* thr_args) {
+ return AttachCurrentThreadInternal(vm, p_env, thr_args, false);
+ }
+
+ static jint AttachCurrentThreadAsDaemon(JavaVM* vm, JNIEnv** p_env, void* thr_args) {
+ return AttachCurrentThreadInternal(vm, p_env, thr_args, true);
+ }
+
+ static jint DetachCurrentThread(JavaVM* vm) {
+ if (vm == nullptr || Thread::Current() == nullptr) {
+ return JNI_ERR;
+ }
+ JavaVMExt* raw_vm = reinterpret_cast<JavaVMExt*>(vm);
+ Runtime* runtime = raw_vm->GetRuntime();
+ runtime->DetachCurrentThread();
+ return JNI_OK;
+ }
+
+ static jint GetEnv(JavaVM* vm, void** env, jint version) {
+ // GetEnv always returns a JNIEnv* for the most current supported JNI version,
+ // and unlike other calls that take a JNI version doesn't care if you supply
+ // JNI_VERSION_1_1, which we don't otherwise support.
+ if (IsBadJniVersion(version) && version != JNI_VERSION_1_1) {
+ LOG(ERROR) << "Bad JNI version passed to GetEnv: " << version;
+ return JNI_EVERSION;
+ }
+ if (vm == nullptr || env == nullptr) {
+ return JNI_ERR;
+ }
+ Thread* thread = Thread::Current();
+ if (thread == nullptr) {
+ *env = nullptr;
+ return JNI_EDETACHED;
+ }
+ *env = thread->GetJniEnv();
+ return JNI_OK;
+ }
+
+ private:
+ static jint AttachCurrentThreadInternal(JavaVM* vm, JNIEnv** p_env, void* raw_args, bool as_daemon) {
+ if (vm == nullptr || p_env == nullptr) {
+ return JNI_ERR;
+ }
+
+ // Return immediately if we're already attached.
+ Thread* self = Thread::Current();
+ if (self != nullptr) {
+ *p_env = self->GetJniEnv();
+ return JNI_OK;
+ }
+
+ Runtime* runtime = reinterpret_cast<JavaVMExt*>(vm)->GetRuntime();
+
+ // No threads allowed in zygote mode.
+ if (runtime->IsZygote()) {
+ LOG(ERROR) << "Attempt to attach a thread in the zygote";
+ return JNI_ERR;
+ }
+
+ JavaVMAttachArgs* args = static_cast<JavaVMAttachArgs*>(raw_args);
+ const char* thread_name = nullptr;
+ jobject thread_group = nullptr;
+ if (args != nullptr) {
+ if (IsBadJniVersion(args->version)) {
+ LOG(ERROR) << "Bad JNI version passed to "
+ << (as_daemon ? "AttachCurrentThreadAsDaemon" : "AttachCurrentThread") << ": "
+ << args->version;
+ return JNI_EVERSION;
+ }
+ thread_name = args->name;
+ thread_group = args->group;
+ }
+
+ if (!runtime->AttachCurrentThread(thread_name, as_daemon, thread_group, !runtime->IsCompiler())) {
+ *p_env = nullptr;
+ return JNI_ERR;
+ } else {
+ *p_env = Thread::Current()->GetJniEnv();
+ return JNI_OK;
+ }
+ }
+};
+
+const JNIInvokeInterface gJniInvokeInterface = {
+ nullptr, // reserved0
+ nullptr, // reserved1
+ nullptr, // reserved2
+ JII::DestroyJavaVM,
+ JII::AttachCurrentThread,
+ JII::DetachCurrentThread,
+ JII::GetEnv,
+ JII::AttachCurrentThreadAsDaemon
+};
+
+JavaVMExt::JavaVMExt(Runtime* runtime, ParsedOptions* options)
+ : runtime_(runtime),
+ check_jni_abort_hook_(nullptr),
+ check_jni_abort_hook_data_(nullptr),
+ check_jni_(false), // Initialized properly in the constructor body below.
+ force_copy_(options->force_copy_),
+ tracing_enabled_(!options->jni_trace_.empty() || VLOG_IS_ON(third_party_jni)),
+ trace_(options->jni_trace_),
+ pins_lock_("JNI pin table lock", kPinTableLock),
+ pin_table_("pin table", kPinTableInitial, kPinTableMax),
+ globals_lock_("JNI global reference table lock"),
+ globals_(gGlobalsInitial, gGlobalsMax, kGlobal),
+ libraries_(new Libraries),
+ unchecked_functions_(&gJniInvokeInterface),
+ weak_globals_lock_("JNI weak global reference table lock"),
+ weak_globals_(kWeakGlobalsInitial, kWeakGlobalsMax, kWeakGlobal),
+ allow_new_weak_globals_(true),
+ weak_globals_add_condition_("weak globals add condition", weak_globals_lock_) {
+ functions = unchecked_functions_;
+ if (options->check_jni_) {
+ SetCheckJniEnabled(true);
+ }
+}
+
+JavaVMExt::~JavaVMExt() {
+}
+
+void JavaVMExt::JniAbort(const char* jni_function_name, const char* msg) {
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ mirror::ArtMethod* current_method = self->GetCurrentMethod(nullptr);
+
+ std::ostringstream os;
+ os << "JNI DETECTED ERROR IN APPLICATION: " << msg;
+
+ if (jni_function_name != nullptr) {
+ os << "\n in call to " << jni_function_name;
+ }
+ // TODO: is this useful given that we're about to dump the calling thread's stack?
+ if (current_method != nullptr) {
+ os << "\n from " << PrettyMethod(current_method);
+ }
+ os << "\n";
+ self->Dump(os);
+
+ if (check_jni_abort_hook_ != nullptr) {
+ check_jni_abort_hook_(check_jni_abort_hook_data_, os.str());
+ } else {
+ // Ensure that we get a native stack trace for this thread.
+ self->TransitionFromRunnableToSuspended(kNative);
+ LOG(FATAL) << os.str();
+ self->TransitionFromSuspendedToRunnable(); // Unreachable, keep annotalysis happy.
+ }
+}
+
+void JavaVMExt::JniAbortV(const char* jni_function_name, const char* fmt, va_list ap) {
+ std::string msg;
+ StringAppendV(&msg, fmt, ap);
+ JniAbort(jni_function_name, msg.c_str());
+}
+
+void JavaVMExt::JniAbortF(const char* jni_function_name, const char* fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+ JniAbortV(jni_function_name, fmt, args);
+ va_end(args);
+}
+
+bool JavaVMExt::ShouldTrace(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Fast where no tracing is enabled.
+ if (trace_.empty() && !VLOG_IS_ON(third_party_jni)) {
+ return false;
+ }
+ // Perform checks based on class name.
+ StringPiece class_name(method->GetDeclaringClassDescriptor());
+ if (!trace_.empty() && class_name.find(trace_) != std::string::npos) {
+ return true;
+ }
+ if (!VLOG_IS_ON(third_party_jni)) {
+ return false;
+ }
+ // Return true if we're trying to log all third-party JNI activity and 'method' doesn't look
+ // like part of Android.
+ static const char* gBuiltInPrefixes[] = {
+ "Landroid/",
+ "Lcom/android/",
+ "Lcom/google/android/",
+ "Ldalvik/",
+ "Ljava/",
+ "Ljavax/",
+ "Llibcore/",
+ "Lorg/apache/harmony/",
+ };
+ for (size_t i = 0; i < arraysize(gBuiltInPrefixes); ++i) {
+ if (class_name.starts_with(gBuiltInPrefixes[i])) {
+ return false;
+ }
+ }
+ return true;
+}
+
+jobject JavaVMExt::AddGlobalRef(Thread* self, mirror::Object* obj) {
+ // Check for null after decoding the object to handle cleared weak globals.
+ if (obj == nullptr) {
+ return nullptr;
+ }
+ WriterMutexLock mu(self, globals_lock_);
+ IndirectRef ref = globals_.Add(IRT_FIRST_SEGMENT, obj);
+ return reinterpret_cast<jobject>(ref);
+}
+
+jweak JavaVMExt::AddWeakGlobalRef(Thread* self, mirror::Object* obj) {
+ if (obj == nullptr) {
+ return nullptr;
+ }
+ MutexLock mu(self, weak_globals_lock_);
+ while (UNLIKELY(!allow_new_weak_globals_)) {
+ weak_globals_add_condition_.WaitHoldingLocks(self);
+ }
+ IndirectRef ref = weak_globals_.Add(IRT_FIRST_SEGMENT, obj);
+ return reinterpret_cast<jweak>(ref);
+}
+
+void JavaVMExt::DeleteGlobalRef(Thread* self, jobject obj) {
+ if (obj == nullptr) {
+ return;
+ }
+ WriterMutexLock mu(self, globals_lock_);
+ if (!globals_.Remove(IRT_FIRST_SEGMENT, obj)) {
+ LOG(WARNING) << "JNI WARNING: DeleteGlobalRef(" << obj << ") "
+ << "failed to find entry";
+ }
+}
+
+void JavaVMExt::DeleteWeakGlobalRef(Thread* self, jweak obj) {
+ if (obj == nullptr) {
+ return;
+ }
+ MutexLock mu(self, weak_globals_lock_);
+ if (!weak_globals_.Remove(IRT_FIRST_SEGMENT, obj)) {
+ LOG(WARNING) << "JNI WARNING: DeleteWeakGlobalRef(" << obj << ") "
+ << "failed to find entry";
+ }
+}
+
+static void ThreadEnableCheckJni(Thread* thread, void* arg) {
+ bool* check_jni = reinterpret_cast<bool*>(arg);
+ thread->GetJniEnv()->SetCheckJniEnabled(*check_jni);
+}
+
+bool JavaVMExt::SetCheckJniEnabled(bool enabled) {
+ bool old_check_jni = check_jni_;
+ check_jni_ = enabled;
+ functions = enabled ? GetCheckJniInvokeInterface() : unchecked_functions_;
+ MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+ runtime_->GetThreadList()->ForEach(ThreadEnableCheckJni, &check_jni_);
+ return old_check_jni;
+}
+
+void JavaVMExt::DumpForSigQuit(std::ostream& os) {
+ os << "JNI: CheckJNI is " << (check_jni_ ? "on" : "off");
+ if (force_copy_) {
+ os << " (with forcecopy)";
+ }
+ Thread* self = Thread::Current();
+ {
+ MutexLock mu(self, pins_lock_);
+ os << "; pins=" << pin_table_.Size();
+ }
+ {
+ ReaderMutexLock mu(self, globals_lock_);
+ os << "; globals=" << globals_.Capacity();
+ }
+ {
+ MutexLock mu(self, weak_globals_lock_);
+ if (weak_globals_.Capacity() > 0) {
+ os << " (plus " << weak_globals_.Capacity() << " weak)";
+ }
+ }
+ os << '\n';
+
+ {
+ MutexLock mu(self, *Locks::jni_libraries_lock_);
+ os << "Libraries: " << Dumpable<Libraries>(*libraries_) << " (" << libraries_->size() << ")\n";
+ }
+}
+
+void JavaVMExt::DisallowNewWeakGlobals() {
+ MutexLock mu(Thread::Current(), weak_globals_lock_);
+ allow_new_weak_globals_ = false;
+}
+
+void JavaVMExt::AllowNewWeakGlobals() {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, weak_globals_lock_);
+ allow_new_weak_globals_ = true;
+ weak_globals_add_condition_.Broadcast(self);
+}
+
+mirror::Object* JavaVMExt::DecodeGlobal(Thread* self, IndirectRef ref) {
+ return globals_.SynchronizedGet(self, &globals_lock_, ref);
+}
+
+mirror::Object* JavaVMExt::DecodeWeakGlobal(Thread* self, IndirectRef ref) {
+ MutexLock mu(self, weak_globals_lock_);
+ while (UNLIKELY(!allow_new_weak_globals_)) {
+ weak_globals_add_condition_.WaitHoldingLocks(self);
+ }
+ return weak_globals_.Get(ref);
+}
+
+void JavaVMExt::PinPrimitiveArray(Thread* self, mirror::Array* array) {
+ MutexLock mu(self, pins_lock_);
+ pin_table_.Add(array);
+}
+
+void JavaVMExt::UnpinPrimitiveArray(Thread* self, mirror::Array* array) {
+ MutexLock mu(self, pins_lock_);
+ pin_table_.Remove(array);
+}
+
+void JavaVMExt::DumpReferenceTables(std::ostream& os) {
+ Thread* self = Thread::Current();
+ {
+ ReaderMutexLock mu(self, globals_lock_);
+ globals_.Dump(os);
+ }
+ {
+ MutexLock mu(self, weak_globals_lock_);
+ weak_globals_.Dump(os);
+ }
+ {
+ MutexLock mu(self, pins_lock_);
+ pin_table_.Dump(os);
+ }
+}
+
+bool JavaVMExt::LoadNativeLibrary(JNIEnv* env, const std::string& path, jobject class_loader,
+ std::string* error_msg) {
+ error_msg->clear();
+
+ // See if we've already loaded this library. If we have, and the class loader
+ // matches, return successfully without doing anything.
+ // TODO: for better results we should canonicalize the pathname (or even compare
+ // inodes). This implementation is fine if everybody is using System.loadLibrary.
+ SharedLibrary* library;
+ Thread* self = Thread::Current();
+ {
+ // TODO: move the locking (and more of this logic) into Libraries.
+ MutexLock mu(self, *Locks::jni_libraries_lock_);
+ library = libraries_->Get(path);
+ }
+ if (library != nullptr) {
+ if (env->IsSameObject(library->GetClassLoader(), class_loader) == JNI_FALSE) {
+ // The library will be associated with class_loader. The JNI
+ // spec says we can't load the same library into more than one
+ // class loader.
+ StringAppendF(error_msg, "Shared library \"%s\" already opened by "
+ "ClassLoader %p; can't open in ClassLoader %p",
+ path.c_str(), library->GetClassLoader(), class_loader);
+ LOG(WARNING) << error_msg;
+ return false;
+ }
+ VLOG(jni) << "[Shared library \"" << path << "\" already loaded in "
+ << " ClassLoader " << class_loader << "]";
+ if (!library->CheckOnLoadResult()) {
+ StringAppendF(error_msg, "JNI_OnLoad failed on a previous attempt "
+ "to load \"%s\"", path.c_str());
+ return false;
+ }
+ return true;
+ }
+
+ // Open the shared library. Because we're using a full path, the system
+ // doesn't have to search through LD_LIBRARY_PATH. (It may do so to
+ // resolve this library's dependencies though.)
+
+ // Failures here are expected when java.library.path has several entries
+ // and we have to hunt for the lib.
+
+ // Below we dlopen but there is no paired dlclose, this would be necessary if we supported
+ // class unloading. Libraries will only be unloaded when the reference count (incremented by
+ // dlopen) becomes zero from dlclose.
+
+ Locks::mutator_lock_->AssertNotHeld(self);
+ const char* path_str = path.empty() ? nullptr : path.c_str();
+ void* handle = dlopen(path_str, RTLD_LAZY);
+ bool needs_native_bridge = false;
+ if (handle == nullptr) {
+ if (NativeBridgeIsSupported(path_str)) {
+ handle = NativeBridgeLoadLibrary(path_str, RTLD_LAZY);
+ needs_native_bridge = true;
+ }
+ }
+
+ VLOG(jni) << "[Call to dlopen(\"" << path << "\", RTLD_LAZY) returned " << handle << "]";
+
+ if (handle == nullptr) {
+ *error_msg = dlerror();
+ LOG(ERROR) << "dlopen(\"" << path << "\", RTLD_LAZY) failed: " << *error_msg;
+ return false;
+ }
+
+ if (env->ExceptionCheck() == JNI_TRUE) {
+ LOG(ERROR) << "Unexpected exception:";
+ env->ExceptionDescribe();
+ env->ExceptionClear();
+ }
+ // Create a new entry.
+ // TODO: move the locking (and more of this logic) into Libraries.
+ bool created_library = false;
+ {
+ // Create SharedLibrary ahead of taking the libraries lock to maintain lock ordering.
+ std::unique_ptr<SharedLibrary> new_library(
+ new SharedLibrary(env, self, path, handle, class_loader));
+ MutexLock mu(self, *Locks::jni_libraries_lock_);
+ library = libraries_->Get(path);
+ if (library == nullptr) { // We won race to get libraries_lock.
+ library = new_library.release();
+ libraries_->Put(path, library);
+ created_library = true;
+ }
+ }
+ if (!created_library) {
+ LOG(INFO) << "WOW: we lost a race to add shared library: "
+ << "\"" << path << "\" ClassLoader=" << class_loader;
+ return library->CheckOnLoadResult();
+ }
+ VLOG(jni) << "[Added shared library \"" << path << "\" for ClassLoader " << class_loader << "]";
+
+ bool was_successful = false;
+ void* sym;
+ if (needs_native_bridge) {
+ library->SetNeedsNativeBridge();
+ sym = library->FindSymbolWithNativeBridge("JNI_OnLoad", nullptr);
+ } else {
+ sym = dlsym(handle, "JNI_OnLoad");
+ }
+ if (sym == nullptr) {
+ VLOG(jni) << "[No JNI_OnLoad found in \"" << path << "\"]";
+ was_successful = true;
+ } else {
+ // Call JNI_OnLoad. We have to override the current class
+ // loader, which will always be "null" since the stuff at the
+ // top of the stack is around Runtime.loadLibrary(). (See
+ // the comments in the JNI FindClass function.)
+ ScopedLocalRef<jobject> old_class_loader(env, env->NewLocalRef(self->GetClassLoaderOverride()));
+ self->SetClassLoaderOverride(class_loader);
+
+ VLOG(jni) << "[Calling JNI_OnLoad in \"" << path << "\"]";
+ typedef int (*JNI_OnLoadFn)(JavaVM*, void*);
+ JNI_OnLoadFn jni_on_load = reinterpret_cast<JNI_OnLoadFn>(sym);
+ int version = (*jni_on_load)(this, nullptr);
+
+ self->SetClassLoaderOverride(old_class_loader.get());
+
+ if (version == JNI_ERR) {
+ StringAppendF(error_msg, "JNI_ERR returned from JNI_OnLoad in \"%s\"", path.c_str());
+ } else if (IsBadJniVersion(version)) {
+ StringAppendF(error_msg, "Bad JNI version returned from JNI_OnLoad in \"%s\": %d",
+ path.c_str(), version);
+ // It's unwise to call dlclose() here, but we can mark it
+ // as bad and ensure that future load attempts will fail.
+ // We don't know how far JNI_OnLoad got, so there could
+ // be some partially-initialized stuff accessible through
+ // newly-registered native method calls. We could try to
+ // unregister them, but that doesn't seem worthwhile.
+ } else {
+ was_successful = true;
+ }
+ VLOG(jni) << "[Returned " << (was_successful ? "successfully" : "failure")
+ << " from JNI_OnLoad in \"" << path << "\"]";
+ }
+
+ library->SetResult(was_successful);
+ return was_successful;
+}
+
+void* JavaVMExt::FindCodeForNativeMethod(mirror::ArtMethod* m) {
+ CHECK(m->IsNative());
+ mirror::Class* c = m->GetDeclaringClass();
+ // If this is a static method, it could be called before the class has been initialized.
+ CHECK(c->IsInitializing()) << c->GetStatus() << " " << PrettyMethod(m);
+ std::string detail;
+ void* native_method;
+ Thread* self = Thread::Current();
+ {
+ MutexLock mu(self, *Locks::jni_libraries_lock_);
+ native_method = libraries_->FindNativeMethod(m, detail);
+ }
+ // Throwing can cause libraries_lock to be reacquired.
+ if (native_method == nullptr) {
+ ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+ self->ThrowNewException(throw_location, "Ljava/lang/UnsatisfiedLinkError;", detail.c_str());
+ }
+ return native_method;
+}
+
+void JavaVMExt::SweepJniWeakGlobals(IsMarkedCallback* callback, void* arg) {
+ MutexLock mu(Thread::Current(), weak_globals_lock_);
+ for (mirror::Object** entry : weak_globals_) {
+ // Since this is called by the GC, we don't need a read barrier.
+ mirror::Object* obj = *entry;
+ mirror::Object* new_obj = callback(obj, arg);
+ if (new_obj == nullptr) {
+ new_obj = kClearedJniWeakGlobal;
+ }
+ *entry = new_obj;
+ }
+}
+
+void JavaVMExt::VisitRoots(RootCallback* callback, void* arg) {
+ Thread* self = Thread::Current();
+ {
+ ReaderMutexLock mu(self, globals_lock_);
+ globals_.VisitRoots(callback, arg, 0, kRootJNIGlobal);
+ }
+ {
+ MutexLock mu(self, pins_lock_);
+ pin_table_.VisitRoots(callback, arg, 0, kRootVMInternal);
+ }
+ // The weak_globals table is visited by the GC itself (because it mutates the table).
+}
+
+// JNI Invocation interface.
+
+extern "C" jint JNI_CreateJavaVM(JavaVM** p_vm, JNIEnv** p_env, void* vm_args) {
+ const JavaVMInitArgs* args = static_cast<JavaVMInitArgs*>(vm_args);
+ if (IsBadJniVersion(args->version)) {
+ LOG(ERROR) << "Bad JNI version passed to CreateJavaVM: " << args->version;
+ return JNI_EVERSION;
+ }
+ RuntimeOptions options;
+ for (int i = 0; i < args->nOptions; ++i) {
+ JavaVMOption* option = &args->options[i];
+ options.push_back(std::make_pair(std::string(option->optionString), option->extraInfo));
+ }
+ bool ignore_unrecognized = args->ignoreUnrecognized;
+ if (!Runtime::Create(options, ignore_unrecognized)) {
+ return JNI_ERR;
+ }
+ Runtime* runtime = Runtime::Current();
+ bool started = runtime->Start();
+ if (!started) {
+ delete Thread::Current()->GetJniEnv();
+ delete runtime->GetJavaVM();
+ LOG(WARNING) << "CreateJavaVM failed";
+ return JNI_ERR;
+ }
+ *p_env = Thread::Current()->GetJniEnv();
+ *p_vm = runtime->GetJavaVM();
+ return JNI_OK;
+}
+
+extern "C" jint JNI_GetCreatedJavaVMs(JavaVM** vms, jsize, jsize* vm_count) {
+ Runtime* runtime = Runtime::Current();
+ if (runtime == nullptr) {
+ *vm_count = 0;
+ } else {
+ *vm_count = 1;
+ vms[0] = runtime->GetJavaVM();
+ }
+ return JNI_OK;
+}
+
+// Historically unsupported.
+extern "C" jint JNI_GetDefaultJavaVMInitArgs(void* /*vm_args*/) {
+ return JNI_ERR;
+}
+
+} // namespace art
diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h
new file mode 100644
index 0000000..da0b8e3
--- /dev/null
+++ b/runtime/java_vm_ext.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JAVA_VM_EXT_H_
+#define ART_RUNTIME_JAVA_VM_EXT_H_
+
+#include "jni.h"
+
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "indirect_reference_table.h"
+#include "reference_table.h"
+
+namespace art {
+
+namespace mirror {
+ class ArtMethod;
+ class Array;
+} // namespace mirror
+
+class Libraries;
+class ParsedOptions;
+class Runtime;
+
+class JavaVMExt : public JavaVM {
+ public:
+ JavaVMExt(Runtime* runtime, ParsedOptions* options);
+ ~JavaVMExt();
+
+ bool ForceCopy() const {
+ return force_copy_;
+ }
+
+ bool IsCheckJniEnabled() const {
+ return check_jni_;
+ }
+
+ bool IsTracingEnabled() const {
+ return tracing_enabled_;
+ }
+
+ Runtime* GetRuntime() const {
+ return runtime_;
+ }
+
+ void SetCheckJniAbortHook(void (*hook)(void*, const std::string&), void* data) {
+ check_jni_abort_hook_ = hook;
+ check_jni_abort_hook_data_ = data;
+ }
+
+ // Aborts execution unless there is an abort handler installed in which case it will return. Its
+ // therefore important that callers return after aborting as otherwise code following the abort
+ // will be executed in the abort handler case.
+ void JniAbort(const char* jni_function_name, const char* msg);
+
+ void JniAbortV(const char* jni_function_name, const char* fmt, va_list ap);
+
+ void JniAbortF(const char* jni_function_name, const char* fmt, ...)
+ __attribute__((__format__(__printf__, 3, 4)));
+
+ // If both "-Xcheck:jni" and "-Xjnitrace:" are enabled, we print trace messages
+ // when a native method that matches the -Xjnitrace argument calls a JNI function
+ // such as NewByteArray.
+ // If -verbose:third-party-jni is on, we want to log any JNI function calls
+ // made by a third-party native method.
+ bool ShouldTrace(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ /**
+ * Loads the given shared library. 'path' is an absolute pathname.
+ *
+ * Returns 'true' on success. On failure, sets 'detail' to a
+ * human-readable description of the error.
+ */
+ bool LoadNativeLibrary(JNIEnv* env, const std::string& path, jobject javaLoader,
+ std::string* error_msg);
+
+ /**
+ * Returns a pointer to the code for the native method 'm', found
+ * using dlsym(3) on every native library that's been loaded so far.
+ */
+ void* FindCodeForNativeMethod(mirror::ArtMethod* m)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void DumpForSigQuit(std::ostream& os)
+ LOCKS_EXCLUDED(Locks::jni_libraries_lock_, globals_lock_, weak_globals_lock_, pins_lock_);
+
+ void DumpReferenceTables(std::ostream& os)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ bool SetCheckJniEnabled(bool enabled);
+
+ void VisitRoots(RootCallback* callback, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void DisallowNewWeakGlobals() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void AllowNewWeakGlobals() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ jobject AddGlobalRef(Thread* self, mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ jweak AddWeakGlobalRef(Thread* self, mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void DeleteGlobalRef(Thread* self, jobject obj);
+
+ void DeleteWeakGlobalRef(Thread* self, jweak obj);
+
+ void SweepJniWeakGlobals(IsMarkedCallback* callback, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ mirror::Object* DecodeGlobal(Thread* self, IndirectRef ref)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ mirror::Object* DecodeWeakGlobal(Thread* self, IndirectRef ref)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void PinPrimitiveArray(Thread* self, mirror::Array* array)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(pins_lock_);
+
+ void UnpinPrimitiveArray(Thread* self, mirror::Array* array)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(pins_lock_);
+
+ const JNIInvokeInterface* GetUncheckedFunctions() const {
+ return unchecked_functions_;
+ }
+
+ private:
+ Runtime* const runtime_;
+
+ // Used for testing. By default, we'll LOG(FATAL) the reason.
+ void (*check_jni_abort_hook_)(void* data, const std::string& reason);
+ void* check_jni_abort_hook_data_;
+
+ // Extra checking.
+ bool check_jni_;
+ bool force_copy_;
+ const bool tracing_enabled_;
+
+ // Extra diagnostics.
+ const std::string trace_;
+
+ // Used to hold references to pinned primitive arrays.
+ Mutex pins_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ ReferenceTable pin_table_ GUARDED_BY(pins_lock_);
+
+ // JNI global references.
+ ReaderWriterMutex globals_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ // Not guarded by globals_lock since we sometimes use SynchronizedGet in Thread::DecodeJObject.
+ IndirectReferenceTable globals_;
+
+ std::unique_ptr<Libraries> libraries_ GUARDED_BY(Locks::jni_libraries_lock_);
+
+ // Used by -Xcheck:jni.
+ const JNIInvokeInterface* const unchecked_functions_;
+
+ // JNI weak global references.
+ Mutex weak_globals_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ // Since weak_globals_ contain weak roots, be careful not to
+ // directly access the object references in it. Use Get() with the
+ // read barrier enabled.
+ IndirectReferenceTable weak_globals_ GUARDED_BY(weak_globals_lock_);
+ bool allow_new_weak_globals_ GUARDED_BY(weak_globals_lock_);
+ ConditionVariable weak_globals_add_condition_ GUARDED_BY(weak_globals_lock_);
+
+ DISALLOW_COPY_AND_ASSIGN(JavaVMExt);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_JAVA_VM_EXT_H_
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index 325b089..8fd07cc 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -339,7 +339,7 @@
ConditionVariable attach_cond_ GUARDED_BY(attach_lock_);
// Time of last debugger activity, in milliseconds.
- int64_t last_activity_time_ms_;
+ Atomic<int64_t> last_activity_time_ms_;
// Global counters and a mutex to protect them.
AtomicInteger request_serial_;
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 05bfe0d..b9379f5 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -1671,7 +1671,7 @@
* so waitForDebugger() doesn't return if we stall for a bit here.
*/
Dbg::GoActive();
- QuasiAtomic::Write64(&last_activity_time_ms_, 0);
+ last_activity_time_ms_.StoreSequentiallyConsistent(0);
}
/*
@@ -1751,7 +1751,7 @@
* the initial setup. Only update if this is a non-DDMS packet.
*/
if (request.GetCommandSet() != kJDWPDdmCmdSet) {
- QuasiAtomic::Write64(&last_activity_time_ms_, MilliTime());
+ last_activity_time_ms_.StoreSequentiallyConsistent(MilliTime());
}
/* tell the VM that GC is okay again */
diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc
index 64e9f37..7795b7c 100644
--- a/runtime/jdwp/jdwp_main.cc
+++ b/runtime/jdwp/jdwp_main.cc
@@ -577,7 +577,7 @@
return -1;
}
- int64_t last = QuasiAtomic::Read64(&last_activity_time_ms_);
+ int64_t last = last_activity_time_ms_.LoadSequentiallyConsistent();
/* initializing or in the middle of something? */
if (last == 0) {
diff --git a/runtime/jni_internal-inl.h b/runtime/jni_env_ext-inl.h
similarity index 89%
rename from runtime/jni_internal-inl.h
rename to runtime/jni_env_ext-inl.h
index 6cf9a61..dc6a3e8 100644
--- a/runtime/jni_internal-inl.h
+++ b/runtime/jni_env_ext-inl.h
@@ -14,10 +14,10 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_JNI_INTERNAL_INL_H_
-#define ART_RUNTIME_JNI_INTERNAL_INL_H_
+#ifndef ART_RUNTIME_JNI_ENV_EXT_INL_H_
+#define ART_RUNTIME_JNI_ENV_EXT_INL_H_
-#include "jni_internal.h"
+#include "jni_env_ext.h"
#include "utils.h"
@@ -44,4 +44,4 @@
} // namespace art
-#endif // ART_RUNTIME_JNI_INTERNAL_INL_H_
+#endif // ART_RUNTIME_JNI_ENV_EXT_INL_H_
diff --git a/runtime/jni_env_ext.cc b/runtime/jni_env_ext.cc
new file mode 100644
index 0000000..180e3d7
--- /dev/null
+++ b/runtime/jni_env_ext.cc
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_env_ext.h"
+
+#include "check_jni.h"
+#include "indirect_reference_table.h"
+#include "java_vm_ext.h"
+#include "jni_internal.h"
+
+namespace art {
+
+static constexpr size_t kMonitorsInitial = 32; // Arbitrary.
+static constexpr size_t kMonitorsMax = 4096; // Arbitrary sanity check.
+
+static constexpr size_t kLocalsInitial = 64; // Arbitrary.
+
+JNIEnvExt::JNIEnvExt(Thread* self, JavaVMExt* vm)
+ : self(self),
+ vm(vm),
+ local_ref_cookie(IRT_FIRST_SEGMENT),
+ locals(kLocalsInitial, kLocalsMax, kLocal),
+ check_jni(false),
+ critical(0),
+ monitors("monitors", kMonitorsInitial, kMonitorsMax) {
+ functions = unchecked_functions = GetJniNativeInterface();
+ if (vm->IsCheckJniEnabled()) {
+ SetCheckJniEnabled(true);
+ }
+}
+
+JNIEnvExt::~JNIEnvExt() {
+}
+
+jobject JNIEnvExt::NewLocalRef(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (obj == nullptr) {
+ return nullptr;
+ }
+ return reinterpret_cast<jobject>(locals.Add(local_ref_cookie, obj));
+}
+
+void JNIEnvExt::DeleteLocalRef(jobject obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (obj != nullptr) {
+ locals.Remove(local_ref_cookie, reinterpret_cast<IndirectRef>(obj));
+ }
+}
+
+void JNIEnvExt::SetCheckJniEnabled(bool enabled) {
+ check_jni = enabled;
+ functions = enabled ? GetCheckJniNativeInterface() : GetJniNativeInterface();
+}
+
+void JNIEnvExt::DumpReferenceTables(std::ostream& os) {
+ locals.Dump(os);
+ monitors.Dump(os);
+}
+
+void JNIEnvExt::PushFrame(int capacity) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(capacity); // cpplint gets confused with (int) and thinks its a cast.
+ // TODO: take 'capacity' into account.
+ stacked_local_ref_cookies.push_back(local_ref_cookie);
+ local_ref_cookie = locals.GetSegmentState();
+}
+
+void JNIEnvExt::PopFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ locals.SetSegmentState(local_ref_cookie);
+ local_ref_cookie = stacked_local_ref_cookies.back();
+ stacked_local_ref_cookies.pop_back();
+}
+
+Offset JNIEnvExt::SegmentStateOffset() {
+ return Offset(OFFSETOF_MEMBER(JNIEnvExt, locals) +
+ IndirectReferenceTable::SegmentStateOffset().Int32Value());
+}
+
+} // namespace art
diff --git a/runtime/jni_env_ext.h b/runtime/jni_env_ext.h
new file mode 100644
index 0000000..af87cb4
--- /dev/null
+++ b/runtime/jni_env_ext.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JNI_ENV_EXT_H_
+#define ART_RUNTIME_JNI_ENV_EXT_H_
+
+#include <jni.h>
+
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "indirect_reference_table.h"
+#include "object_callbacks.h"
+#include "reference_table.h"
+
+namespace art {
+
+class JavaVMExt;
+
+// Maximum number of local references in the indirect reference table. The value is arbitrary but
+// low enough that it forces sanity checks.
+static constexpr size_t kLocalsMax = 512;
+
+struct JNIEnvExt : public JNIEnv {
+ JNIEnvExt(Thread* self, JavaVMExt* vm);
+ ~JNIEnvExt();
+
+ void DumpReferenceTables(std::ostream& os)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void SetCheckJniEnabled(bool enabled);
+
+ void PushFrame(int capacity);
+ void PopFrame();
+
+ template<typename T>
+ T AddLocalReference(mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static Offset SegmentStateOffset();
+
+ static Offset LocalRefCookieOffset() {
+ return Offset(OFFSETOF_MEMBER(JNIEnvExt, local_ref_cookie));
+ }
+
+ static Offset SelfOffset() {
+ return Offset(OFFSETOF_MEMBER(JNIEnvExt, self));
+ }
+
+ jobject NewLocalRef(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void DeleteLocalRef(jobject obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ Thread* const self;
+ JavaVMExt* const vm;
+
+ // Cookie used when using the local indirect reference table.
+ uint32_t local_ref_cookie;
+
+ // JNI local references.
+ IndirectReferenceTable locals GUARDED_BY(Locks::mutator_lock_);
+
+ // Stack of cookies corresponding to PushLocalFrame/PopLocalFrame calls.
+ // TODO: to avoid leaks (and bugs), we need to clear this vector on entry (or return)
+ // to a native method.
+ std::vector<uint32_t> stacked_local_ref_cookies;
+
+ // Frequently-accessed fields cached from JavaVM.
+ bool check_jni;
+
+ // How many nested "critical" JNI calls are we in?
+ int critical;
+
+ // Entered JNI monitors, for bulk exit on thread detach.
+ ReferenceTable monitors;
+
+ // Used by -Xcheck:jni.
+ const JNINativeInterface* unchecked_functions;
+};
+
+// Used to save and restore the JNIEnvExt state when not going through code created by the JNI
+// compiler.
+class ScopedJniEnvLocalRefState {
+ public:
+ explicit ScopedJniEnvLocalRefState(JNIEnvExt* env) : env_(env) {
+ saved_local_ref_cookie_ = env->local_ref_cookie;
+ env->local_ref_cookie = env->locals.GetSegmentState();
+ }
+
+ ~ScopedJniEnvLocalRefState() {
+ env_->locals.SetSegmentState(env_->local_ref_cookie);
+ env_->local_ref_cookie = saved_local_ref_cookie_;
+ }
+
+ private:
+ JNIEnvExt* const env_;
+ uint32_t saved_local_ref_cookie_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedJniEnvLocalRefState);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_JNI_ENV_EXT_H_
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index f9c7ec6..d5e92a4 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -29,10 +29,12 @@
#include "base/stl_util.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
+#include "gc_root.h"
#include "gc/accounting/card_table-inl.h"
#include "indirect_reference_table-inl.h"
#include "interpreter/interpreter.h"
-#include "jni.h"
+#include "jni_env_ext.h"
+#include "java_vm_ext.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
@@ -41,6 +43,7 @@
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
#include "mirror/throwable.h"
+#include "native_bridge.h"
#include "parsed_options.h"
#include "reflection.h"
#include "runtime.h"
@@ -53,31 +56,6 @@
namespace art {
-static const size_t kMonitorsInitial = 32; // Arbitrary.
-static const size_t kMonitorsMax = 4096; // Arbitrary sanity check.
-
-static const size_t kLocalsInitial = 64; // Arbitrary.
-static const size_t kLocalsMax = 512; // Arbitrary sanity check.
-
-static const size_t kPinTableInitial = 16; // Arbitrary.
-static const size_t kPinTableMax = 1024; // Arbitrary sanity check.
-
-static size_t gGlobalsInitial = 512; // Arbitrary.
-static size_t gGlobalsMax = 51200; // Arbitrary sanity check. (Must fit in 16 bits.)
-
-static const size_t kWeakGlobalsInitial = 16; // Arbitrary.
-static const size_t kWeakGlobalsMax = 51200; // Arbitrary sanity check. (Must fit in 16 bits.)
-
-static jweak AddWeakGlobalReference(ScopedObjectAccess& soa, mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return soa.Vm()->AddWeakGlobalReference(soa.Self(), obj);
-}
-
-static bool IsBadJniVersion(int version) {
- // We don't support JNI_VERSION_1_1. These are the only other valid versions.
- return version != JNI_VERSION_1_2 && version != JNI_VERSION_1_4 && version != JNI_VERSION_1_6;
-}
-
// Section 12.3.2 of the JNI spec describes JNI class descriptors. They're
// separated with slashes but aren't wrapped with "L;" like regular descriptors
// (i.e. "a/b/C" rather than "La/b/C;"). Arrays of reference types are an
@@ -167,7 +145,7 @@
mirror::ArtMethod* method = soa.Self()->GetCurrentMethod(nullptr);
// If we are running Runtime.nativeLoad, use the overriding ClassLoader it set.
if (method == soa.DecodeMethod(WellKnownClasses::java_lang_Runtime_nativeLoad)) {
- return soa.Self()->GetClassLoaderOverride();
+ return soa.Decode<mirror::ClassLoader*>(soa.Self()->GetClassLoaderOverride());
}
// If we have a method, use its ClassLoader for context.
if (method != nullptr) {
@@ -180,7 +158,7 @@
return class_loader;
}
// See if the override ClassLoader is set for gtests.
- class_loader = soa.Self()->GetClassLoaderOverride();
+ class_loader = soa.Decode<mirror::ClassLoader*>(soa.Self()->GetClassLoaderOverride());
if (class_loader != nullptr) {
// If so, CommonCompilerTest should have set UseCompileTimeClassPath.
CHECK(Runtime::Current()->UseCompileTimeClassPath());
@@ -238,20 +216,6 @@
return soa.EncodeField(field);
}
-static void PinPrimitiveArray(const ScopedObjectAccess& soa, mirror::Array* array)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- JavaVMExt* vm = soa.Vm();
- MutexLock mu(soa.Self(), vm->pins_lock);
- vm->pin_table.Add(array);
-}
-
-static void UnpinPrimitiveArray(const ScopedObjectAccess& soa, mirror::Array* array)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- JavaVMExt* vm = soa.Vm();
- MutexLock mu(soa.Self(), vm->pins_lock);
- vm->pin_table.Remove(array);
-}
-
static void ThrowAIOOBE(ScopedObjectAccess& soa, mirror::Array* array, jsize start,
jsize length, const char* identifier)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -314,224 +278,10 @@
return JNI_OK;
}
-static jint JII_AttachCurrentThread(JavaVM* vm, JNIEnv** p_env, void* raw_args, bool as_daemon) {
- if (vm == nullptr || p_env == nullptr) {
- return JNI_ERR;
- }
-
- // Return immediately if we're already attached.
- Thread* self = Thread::Current();
- if (self != nullptr) {
- *p_env = self->GetJniEnv();
- return JNI_OK;
- }
-
- Runtime* runtime = reinterpret_cast<JavaVMExt*>(vm)->runtime;
-
- // No threads allowed in zygote mode.
- if (runtime->IsZygote()) {
- LOG(ERROR) << "Attempt to attach a thread in the zygote";
- return JNI_ERR;
- }
-
- JavaVMAttachArgs* args = static_cast<JavaVMAttachArgs*>(raw_args);
- const char* thread_name = nullptr;
- jobject thread_group = nullptr;
- if (args != nullptr) {
- if (IsBadJniVersion(args->version)) {
- LOG(ERROR) << "Bad JNI version passed to "
- << (as_daemon ? "AttachCurrentThreadAsDaemon" : "AttachCurrentThread") << ": "
- << args->version;
- return JNI_EVERSION;
- }
- thread_name = args->name;
- thread_group = args->group;
- }
-
- if (!runtime->AttachCurrentThread(thread_name, as_daemon, thread_group, !runtime->IsCompiler())) {
- *p_env = nullptr;
- return JNI_ERR;
- } else {
- *p_env = Thread::Current()->GetJniEnv();
- return JNI_OK;
- }
+static JavaVMExt* JavaVmExtFromEnv(JNIEnv* env) {
+ return reinterpret_cast<JNIEnvExt*>(env)->vm;
}
-class SharedLibrary {
- public:
- SharedLibrary(const std::string& path, void* handle, mirror::Object* class_loader)
- : path_(path),
- handle_(handle),
- class_loader_(class_loader),
- jni_on_load_lock_("JNI_OnLoad lock"),
- jni_on_load_cond_("JNI_OnLoad condition variable", jni_on_load_lock_),
- jni_on_load_thread_id_(Thread::Current()->GetThreadId()),
- jni_on_load_result_(kPending) {
- }
-
- mirror::Object* GetClassLoader() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::Object** root = &class_loader_;
- return ReadBarrier::BarrierForRoot<mirror::Object, kWithReadBarrier>(root);
- }
-
- std::string GetPath() {
- return path_;
- }
-
- /*
- * Check the result of an earlier call to JNI_OnLoad on this library.
- * If the call has not yet finished in another thread, wait for it.
- */
- bool CheckOnLoadResult()
- LOCKS_EXCLUDED(jni_on_load_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Thread* self = Thread::Current();
- self->TransitionFromRunnableToSuspended(kWaitingForJniOnLoad);
- bool okay;
- {
- MutexLock mu(self, jni_on_load_lock_);
-
- if (jni_on_load_thread_id_ == self->GetThreadId()) {
- // Check this so we don't end up waiting for ourselves. We need to return "true" so the
- // caller can continue.
- LOG(INFO) << *self << " recursive attempt to load library " << "\"" << path_ << "\"";
- okay = true;
- } else {
- while (jni_on_load_result_ == kPending) {
- VLOG(jni) << "[" << *self << " waiting for \"" << path_ << "\" " << "JNI_OnLoad...]";
- jni_on_load_cond_.Wait(self);
- }
-
- okay = (jni_on_load_result_ == kOkay);
- VLOG(jni) << "[Earlier JNI_OnLoad for \"" << path_ << "\" "
- << (okay ? "succeeded" : "failed") << "]";
- }
- }
- self->TransitionFromSuspendedToRunnable();
- return okay;
- }
-
- void SetResult(bool result) LOCKS_EXCLUDED(jni_on_load_lock_) {
- Thread* self = Thread::Current();
- MutexLock mu(self, jni_on_load_lock_);
-
- jni_on_load_result_ = result ? kOkay : kFailed;
- jni_on_load_thread_id_ = 0;
-
- // Broadcast a wakeup to anybody sleeping on the condition variable.
- jni_on_load_cond_.Broadcast(self);
- }
-
- void* FindSymbol(const std::string& symbol_name) {
- return dlsym(handle_, symbol_name.c_str());
- }
-
- void VisitRoots(RootCallback* visitor, void* arg) {
- if (class_loader_ != nullptr) {
- visitor(&class_loader_, arg, 0, kRootVMInternal);
- }
- }
-
- private:
- enum JNI_OnLoadState {
- kPending,
- kFailed,
- kOkay,
- };
-
- // Path to library "/system/lib/libjni.so".
- std::string path_;
-
- // The void* returned by dlopen(3).
- void* handle_;
-
- // The ClassLoader this library is associated with.
- mirror::Object* class_loader_;
-
- // Guards remaining items.
- Mutex jni_on_load_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- // Wait for JNI_OnLoad in other thread.
- ConditionVariable jni_on_load_cond_ GUARDED_BY(jni_on_load_lock_);
- // Recursive invocation guard.
- uint32_t jni_on_load_thread_id_ GUARDED_BY(jni_on_load_lock_);
- // Result of earlier JNI_OnLoad call.
- JNI_OnLoadState jni_on_load_result_ GUARDED_BY(jni_on_load_lock_);
-};
-
-// This exists mainly to keep implementation details out of the header file.
-class Libraries {
- public:
- Libraries() {
- }
-
- ~Libraries() {
- STLDeleteValues(&libraries_);
- }
-
- void Dump(std::ostream& os) const {
- bool first = true;
- for (const auto& library : libraries_) {
- if (!first) {
- os << ' ';
- }
- first = false;
- os << library.first;
- }
- }
-
- size_t size() const {
- return libraries_.size();
- }
-
- SharedLibrary* Get(const std::string& path) {
- auto it = libraries_.find(path);
- return (it == libraries_.end()) ? nullptr : it->second;
- }
-
- void Put(const std::string& path, SharedLibrary* library) {
- libraries_.Put(path, library);
- }
-
- // See section 11.3 "Linking Native Methods" of the JNI spec.
- void* FindNativeMethod(mirror::ArtMethod* m, std::string& detail)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- std::string jni_short_name(JniShortName(m));
- std::string jni_long_name(JniLongName(m));
- const mirror::ClassLoader* declaring_class_loader = m->GetDeclaringClass()->GetClassLoader();
- for (const auto& lib : libraries_) {
- SharedLibrary* library = lib.second;
- if (library->GetClassLoader() != declaring_class_loader) {
- // We only search libraries loaded by the appropriate ClassLoader.
- continue;
- }
- // Try the short name then the long name...
- void* fn = library->FindSymbol(jni_short_name);
- if (fn == nullptr) {
- fn = library->FindSymbol(jni_long_name);
- }
- if (fn != nullptr) {
- VLOG(jni) << "[Found native code for " << PrettyMethod(m)
- << " in \"" << library->GetPath() << "\"]";
- return fn;
- }
- }
- detail += "No implementation found for ";
- detail += PrettyMethod(m);
- detail += " (tried " + jni_short_name + " and " + jni_long_name + ")";
- LOG(ERROR) << detail;
- return nullptr;
- }
-
- void VisitRoots(RootCallback* callback, void* arg) {
- for (auto& lib_pair : libraries_) {
- lib_pair.second->VisitRoots(callback, arg);
- }
- }
-
- private:
- SafeMap<std::string, SharedLibrary*> libraries_;
-};
-
#define CHECK_NON_NULL_ARGUMENT(value) \
CHECK_NON_NULL_ARGUMENT_FN_NAME(__FUNCTION__, value, nullptr)
@@ -546,13 +296,13 @@
#define CHECK_NON_NULL_ARGUMENT_FN_NAME(name, value, return_val) \
if (UNLIKELY(value == nullptr)) { \
- JniAbortF(name, #value " == null"); \
+ JavaVmExtFromEnv(env)->JniAbortF(name, #value " == null"); \
return return_val; \
}
#define CHECK_NON_NULL_MEMCPY_ARGUMENT(length, value) \
if (UNLIKELY(length != 0 && value == nullptr)) { \
- JniAbortF(__FUNCTION__, #value " == null"); \
+ JavaVmExtFromEnv(env)->JniAbortF(__FUNCTION__, #value " == null"); \
return; \
}
@@ -644,13 +394,15 @@
return soa.AddLocalReference<jclass>(c->GetSuperClass());
}
+ // Note: java_class1 should be safely castable to java_class2, and
+ // not the other way around.
static jboolean IsAssignableFrom(JNIEnv* env, jclass java_class1, jclass java_class2) {
CHECK_NON_NULL_ARGUMENT_RETURN(java_class1, JNI_FALSE);
CHECK_NON_NULL_ARGUMENT_RETURN(java_class2, JNI_FALSE);
ScopedObjectAccess soa(env);
mirror::Class* c1 = soa.Decode<mirror::Class*>(java_class1);
mirror::Class* c2 = soa.Decode<mirror::Class*>(java_class2);
- return c1->IsAssignableFrom(c2) ? JNI_TRUE : JNI_FALSE;
+ return c2->IsAssignableFrom(c1) ? JNI_TRUE : JNI_FALSE;
}
static jboolean IsInstanceOf(JNIEnv* env, jobject jobj, jclass java_class) {
@@ -751,10 +503,10 @@
static jint PushLocalFrame(JNIEnv* env, jint capacity) {
// TODO: SOA may not be necessary but I do it to please lock annotations.
ScopedObjectAccess soa(env);
- if (EnsureLocalCapacity(soa, capacity, "PushLocalFrame") != JNI_OK) {
+ if (EnsureLocalCapacityInternal(soa, capacity, "PushLocalFrame") != JNI_OK) {
return JNI_ERR;
}
- static_cast<JNIEnvExt*>(env)->PushFrame(capacity);
+ down_cast<JNIEnvExt*>(env)->PushFrame(capacity);
return JNI_OK;
}
@@ -768,48 +520,31 @@
static jint EnsureLocalCapacity(JNIEnv* env, jint desired_capacity) {
// TODO: SOA may not be necessary but I do it to please lock annotations.
ScopedObjectAccess soa(env);
- return EnsureLocalCapacity(soa, desired_capacity, "EnsureLocalCapacity");
+ return EnsureLocalCapacityInternal(soa, desired_capacity, "EnsureLocalCapacity");
}
static jobject NewGlobalRef(JNIEnv* env, jobject obj) {
ScopedObjectAccess soa(env);
mirror::Object* decoded_obj = soa.Decode<mirror::Object*>(obj);
- // Check for null after decoding the object to handle cleared weak globals.
- if (decoded_obj == nullptr) {
- return nullptr;
- }
- JavaVMExt* vm = soa.Vm();
- IndirectReferenceTable& globals = vm->globals;
- WriterMutexLock mu(soa.Self(), vm->globals_lock);
- IndirectRef ref = globals.Add(IRT_FIRST_SEGMENT, decoded_obj);
- return reinterpret_cast<jobject>(ref);
+ return soa.Vm()->AddGlobalRef(soa.Self(), decoded_obj);
}
static void DeleteGlobalRef(JNIEnv* env, jobject obj) {
- if (obj == nullptr) {
- return;
- }
- JavaVMExt* vm = reinterpret_cast<JNIEnvExt*>(env)->vm;
- IndirectReferenceTable& globals = vm->globals;
- Thread* self = reinterpret_cast<JNIEnvExt*>(env)->self;
- WriterMutexLock mu(self, vm->globals_lock);
-
- if (!globals.Remove(IRT_FIRST_SEGMENT, obj)) {
- LOG(WARNING) << "JNI WARNING: DeleteGlobalRef(" << obj << ") "
- << "failed to find entry";
- }
+ JavaVMExt* vm = down_cast<JNIEnvExt*>(env)->vm;
+ Thread* self = down_cast<JNIEnvExt*>(env)->self;
+ vm->DeleteGlobalRef(self, obj);
}
static jweak NewWeakGlobalRef(JNIEnv* env, jobject obj) {
ScopedObjectAccess soa(env);
- return AddWeakGlobalReference(soa, soa.Decode<mirror::Object*>(obj));
+ mirror::Object* decoded_obj = soa.Decode<mirror::Object*>(obj);
+ return soa.Vm()->AddWeakGlobalRef(soa.Self(), decoded_obj);
}
static void DeleteWeakGlobalRef(JNIEnv* env, jweak obj) {
- if (obj != nullptr) {
- ScopedObjectAccess soa(env);
- soa.Vm()->DeleteWeakGlobalRef(soa.Self(), obj);
- }
+ JavaVMExt* vm = down_cast<JNIEnvExt*>(env)->vm;
+ Thread* self = down_cast<JNIEnvExt*>(env)->self;
+ vm->DeleteWeakGlobalRef(self, obj);
}
static jobject NewLocalRef(JNIEnv* env, jobject obj) {
@@ -826,7 +561,6 @@
if (obj == nullptr) {
return;
}
- ScopedObjectAccess soa(env);
IndirectReferenceTable& locals = reinterpret_cast<JNIEnvExt*>(env)->locals;
uint32_t cookie = reinterpret_cast<JNIEnvExt*>(env)->local_ref_cookie;
@@ -1892,11 +1626,11 @@
static jstring NewString(JNIEnv* env, const jchar* chars, jsize char_count) {
if (UNLIKELY(char_count < 0)) {
- JniAbortF("NewString", "char_count < 0: %d", char_count);
+ JavaVmExtFromEnv(env)->JniAbortF("NewString", "char_count < 0: %d", char_count);
return nullptr;
}
if (UNLIKELY(chars == nullptr && char_count > 0)) {
- JniAbortF("NewString", "chars == null && char_count > 0");
+ JavaVmExtFromEnv(env)->JniAbortF("NewString", "chars == null && char_count > 0");
return nullptr;
}
ScopedObjectAccess soa(env);
@@ -1958,7 +1692,7 @@
ScopedObjectAccess soa(env);
mirror::String* s = soa.Decode<mirror::String*>(java_string);
mirror::CharArray* chars = s->GetCharArray();
- PinPrimitiveArray(soa, chars);
+ soa.Vm()->PinPrimitiveArray(soa.Self(), chars);
gc::Heap* heap = Runtime::Current()->GetHeap();
if (heap->IsMovableObject(chars)) {
if (is_copy != nullptr) {
@@ -1987,7 +1721,7 @@
if (chars != (s_chars->GetData() + s->GetOffset())) {
delete[] chars;
}
- UnpinPrimitiveArray(soa, s->GetCharArray());
+ soa.Vm()->UnpinPrimitiveArray(soa.Self(), s->GetCharArray());
}
static const jchar* GetStringCritical(JNIEnv* env, jstring java_string, jboolean* is_copy) {
@@ -1996,7 +1730,7 @@
mirror::String* s = soa.Decode<mirror::String*>(java_string);
mirror::CharArray* chars = s->GetCharArray();
int32_t offset = s->GetOffset();
- PinPrimitiveArray(soa, chars);
+ soa.Vm()->PinPrimitiveArray(soa.Self(), chars);
gc::Heap* heap = Runtime::Current()->GetHeap();
if (heap->IsMovableObject(chars)) {
StackHandleScope<1> hs(soa.Self());
@@ -2012,7 +1746,8 @@
static void ReleaseStringCritical(JNIEnv* env, jstring java_string, const jchar* chars) {
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(java_string);
ScopedObjectAccess soa(env);
- UnpinPrimitiveArray(soa, soa.Decode<mirror::String*>(java_string)->GetCharArray());
+ soa.Vm()->UnpinPrimitiveArray(soa.Self(),
+ soa.Decode<mirror::String*>(java_string)->GetCharArray());
gc::Heap* heap = Runtime::Current()->GetHeap();
mirror::String* s = soa.Decode<mirror::String*>(java_string);
mirror::CharArray* s_chars = s->GetCharArray();
@@ -2048,7 +1783,8 @@
ScopedObjectAccess soa(env);
mirror::Object* obj = soa.Decode<mirror::Object*>(java_array);
if (UNLIKELY(!obj->IsArrayInstance())) {
- JniAbortF("GetArrayLength", "not an array: %s", PrettyTypeOf(obj).c_str());
+ soa.Vm()->JniAbortF("GetArrayLength", "not an array: %s", PrettyTypeOf(obj).c_str());
+ return 0;
}
mirror::Array* array = obj->AsArray();
return array->GetLength();
@@ -2103,7 +1839,7 @@
static jobjectArray NewObjectArray(JNIEnv* env, jsize length, jclass element_jclass,
jobject initial_element) {
if (UNLIKELY(length < 0)) {
- JniAbortF("NewObjectArray", "negative array length: %d", length);
+ JavaVmExtFromEnv(env)->JniAbortF("NewObjectArray", "negative array length: %d", length);
return nullptr;
}
CHECK_NON_NULL_ARGUMENT(element_jclass);
@@ -2114,8 +1850,8 @@
{
mirror::Class* element_class = soa.Decode<mirror::Class*>(element_jclass);
if (UNLIKELY(element_class->IsPrimitive())) {
- JniAbortF("NewObjectArray", "not an object type: %s",
- PrettyDescriptor(element_class).c_str());
+ soa.Vm()->JniAbortF("NewObjectArray", "not an object type: %s",
+ PrettyDescriptor(element_class).c_str());
return nullptr;
}
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -2133,10 +1869,11 @@
if (initial_object != nullptr) {
mirror::Class* element_class = result->GetClass()->GetComponentType();
if (UNLIKELY(!element_class->IsAssignableFrom(initial_object->GetClass()))) {
- JniAbortF("NewObjectArray", "cannot assign object of type '%s' to array with element "
- "type of '%s'", PrettyDescriptor(initial_object->GetClass()).c_str(),
- PrettyDescriptor(element_class).c_str());
-
+ soa.Vm()->JniAbortF("NewObjectArray", "cannot assign object of type '%s' to array with "
+ "element type of '%s'",
+ PrettyDescriptor(initial_object->GetClass()).c_str(),
+ PrettyDescriptor(element_class).c_str());
+ return nullptr;
} else {
for (jsize i = 0; i < length; ++i) {
result->SetWithoutChecks<false>(i, initial_object);
@@ -2156,8 +1893,8 @@
ScopedObjectAccess soa(env);
mirror::Array* array = soa.Decode<mirror::Array*>(java_array);
if (UNLIKELY(!array->GetClass()->IsPrimitiveArray())) {
- JniAbortF("GetPrimitiveArrayCritical", "expected primitive array, given %s",
- PrettyDescriptor(array->GetClass()).c_str());
+ soa.Vm()->JniAbortF("GetPrimitiveArrayCritical", "expected primitive array, given %s",
+ PrettyDescriptor(array->GetClass()).c_str());
return nullptr;
}
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -2166,7 +1903,7 @@
// Re-decode in case the object moved since IncrementDisableGC waits for GC to complete.
array = soa.Decode<mirror::Array*>(java_array);
}
- PinPrimitiveArray(soa, array);
+ soa.Vm()->PinPrimitiveArray(soa.Self(), array);
if (is_copy != nullptr) {
*is_copy = JNI_FALSE;
}
@@ -2179,8 +1916,8 @@
ScopedObjectAccess soa(env);
mirror::Array* array = soa.Decode<mirror::Array*>(java_array);
if (UNLIKELY(!array->GetClass()->IsPrimitiveArray())) {
- JniAbortF("ReleasePrimitiveArrayCritical", "expected primitive array, given %s",
- PrettyDescriptor(array->GetClass()).c_str());
+ soa.Vm()->JniAbortF("ReleasePrimitiveArrayCritical", "expected primitive array, given %s",
+ PrettyDescriptor(array->GetClass()).c_str());
return;
}
const size_t component_size = array->GetClass()->GetComponentSize();
@@ -2352,8 +2089,9 @@
static jint RegisterNativeMethods(JNIEnv* env, jclass java_class, const JNINativeMethod* methods,
jint method_count, bool return_errors) {
if (UNLIKELY(method_count < 0)) {
- JniAbortF("RegisterNatives", "negative method count: %d", method_count);
- return JNI_ERR; // Not reached.
+ JavaVmExtFromEnv(env)->JniAbortF("RegisterNatives", "negative method count: %d",
+ method_count);
+ return JNI_ERR; // Not reached except in unit tests.
}
CHECK_NON_NULL_ARGUMENT_FN_NAME("RegisterNatives", java_class, JNI_ERR);
ScopedObjectAccess soa(env);
@@ -2477,17 +2215,21 @@
static jobject NewDirectByteBuffer(JNIEnv* env, void* address, jlong capacity) {
if (capacity < 0) {
- JniAbortF("NewDirectByteBuffer", "negative buffer capacity: %" PRId64, capacity);
+ JavaVmExtFromEnv(env)->JniAbortF("NewDirectByteBuffer", "negative buffer capacity: %" PRId64,
+ capacity);
return nullptr;
}
if (address == nullptr && capacity != 0) {
- JniAbortF("NewDirectByteBuffer", "non-zero capacity for nullptr pointer: %" PRId64, capacity);
+ JavaVmExtFromEnv(env)->JniAbortF("NewDirectByteBuffer",
+ "non-zero capacity for nullptr pointer: %" PRId64, capacity);
return nullptr;
}
// At the moment, the capacity of DirectByteBuffer is limited to a signed int.
if (capacity > INT_MAX) {
- JniAbortF("NewDirectByteBuffer", "buffer capacity greater than maximum jint: %" PRId64, capacity);
+ JavaVmExtFromEnv(env)->JniAbortF("NewDirectByteBuffer",
+ "buffer capacity greater than maximum jint: %" PRId64,
+ capacity);
return nullptr;
}
jlong address_arg = reinterpret_cast<jlong>(address);
@@ -2541,8 +2283,9 @@
}
private:
- static jint EnsureLocalCapacity(ScopedObjectAccess& soa, jint desired_capacity,
- const char* caller) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static jint EnsureLocalCapacityInternal(ScopedObjectAccess& soa, jint desired_capacity,
+ const char* caller)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// TODO: we should try to expand the table if necessary.
if (desired_capacity < 0 || desired_capacity > static_cast<jint>(kLocalsMax)) {
LOG(ERROR) << "Invalid capacity given to " << caller << ": " << desired_capacity;
@@ -2559,11 +2302,11 @@
template<typename JniT, typename ArtT>
static JniT NewPrimitiveArray(JNIEnv* env, jsize length) {
+ ScopedObjectAccess soa(env);
if (UNLIKELY(length < 0)) {
- JniAbortF("NewPrimitiveArray", "negative array length: %d", length);
+ soa.Vm()->JniAbortF("NewPrimitiveArray", "negative array length: %d", length);
return nullptr;
}
- ScopedObjectAccess soa(env);
ArtT* result = ArtT::Alloc(soa.Self(), length);
return soa.AddLocalReference<JniT>(result);
}
@@ -2574,9 +2317,11 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ArtArrayT* array = soa.Decode<ArtArrayT*>(java_array);
if (UNLIKELY(ArtArrayT::GetArrayClass() != array->GetClass())) {
- JniAbortF(fn_name, "attempt to %s %s primitive array elements with an object of type %s",
- operation, PrettyDescriptor(ArtArrayT::GetArrayClass()->GetComponentType()).c_str(),
- PrettyDescriptor(array->GetClass()).c_str());
+ soa.Vm()->JniAbortF(fn_name,
+ "attempt to %s %s primitive array elements with an object of type %s",
+ operation,
+ PrettyDescriptor(ArtArrayT::GetArrayClass()->GetComponentType()).c_str(),
+ PrettyDescriptor(array->GetClass()).c_str());
return nullptr;
}
DCHECK_EQ(sizeof(ElementT), array->GetClass()->GetComponentSize());
@@ -2593,7 +2338,7 @@
if (UNLIKELY(array == nullptr)) {
return nullptr;
}
- PinPrimitiveArray(soa, array);
+ soa.Vm()->PinPrimitiveArray(soa.Self(), array);
// Only make a copy if necessary.
if (Runtime::Current()->GetHeap()->IsMovableObject(array)) {
if (is_copy != nullptr) {
@@ -2639,8 +2384,9 @@
// heap address. TODO: This might be slow to check, may be worth keeping track of which
// copies we make?
if (heap->IsNonDiscontinuousSpaceHeapAddress(reinterpret_cast<mirror::Object*>(elements))) {
- JniAbortF("ReleaseArrayElements", "invalid element pointer %p, array elements are %p",
- reinterpret_cast<void*>(elements), array_data);
+ soa.Vm()->JniAbortF("ReleaseArrayElements",
+ "invalid element pointer %p, array elements are %p",
+ reinterpret_cast<void*>(elements), array_data);
return;
}
}
@@ -2655,7 +2401,7 @@
// Non copy to a movable object must means that we had disabled the moving GC.
heap->DecrementDisableMovingGC(soa.Self());
}
- UnpinPrimitiveArray(soa, array);
+ soa.Vm()->UnpinPrimitiveArray(soa.Self(), array);
}
}
@@ -2936,472 +2682,8 @@
JNI::GetObjectRefType,
};
-JNIEnvExt::JNIEnvExt(Thread* self, JavaVMExt* vm)
- : self(self),
- vm(vm),
- local_ref_cookie(IRT_FIRST_SEGMENT),
- locals(kLocalsInitial, kLocalsMax, kLocal),
- check_jni(false),
- critical(0),
- monitors("monitors", kMonitorsInitial, kMonitorsMax) {
- functions = unchecked_functions = &gJniNativeInterface;
- if (vm->check_jni) {
- SetCheckJniEnabled(true);
- }
-}
-
-JNIEnvExt::~JNIEnvExt() {
-}
-
-jobject JNIEnvExt::NewLocalRef(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (obj == nullptr) {
- return nullptr;
- }
- return reinterpret_cast<jobject>(locals.Add(local_ref_cookie, obj));
-}
-
-void JNIEnvExt::DeleteLocalRef(jobject obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (obj != nullptr) {
- locals.Remove(local_ref_cookie, reinterpret_cast<IndirectRef>(obj));
- }
-}
-void JNIEnvExt::SetCheckJniEnabled(bool enabled) {
- check_jni = enabled;
- functions = enabled ? GetCheckJniNativeInterface() : &gJniNativeInterface;
-}
-
-void JNIEnvExt::DumpReferenceTables(std::ostream& os) {
- locals.Dump(os);
- monitors.Dump(os);
-}
-
-void JNIEnvExt::PushFrame(int capacity) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- UNUSED(capacity); // cpplint gets confused with (int) and thinks its a cast.
- // TODO: take 'capacity' into account.
- stacked_local_ref_cookies.push_back(local_ref_cookie);
- local_ref_cookie = locals.GetSegmentState();
-}
-
-void JNIEnvExt::PopFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- locals.SetSegmentState(local_ref_cookie);
- local_ref_cookie = stacked_local_ref_cookies.back();
- stacked_local_ref_cookies.pop_back();
-}
-
-Offset JNIEnvExt::SegmentStateOffset() {
- return Offset(OFFSETOF_MEMBER(JNIEnvExt, locals) +
- IndirectReferenceTable::SegmentStateOffset().Int32Value());
-}
-
-// JNI Invocation interface.
-
-extern "C" jint JNI_CreateJavaVM(JavaVM** p_vm, JNIEnv** p_env, void* vm_args) {
- const JavaVMInitArgs* args = static_cast<JavaVMInitArgs*>(vm_args);
- if (IsBadJniVersion(args->version)) {
- LOG(ERROR) << "Bad JNI version passed to CreateJavaVM: " << args->version;
- return JNI_EVERSION;
- }
- RuntimeOptions options;
- for (int i = 0; i < args->nOptions; ++i) {
- JavaVMOption* option = &args->options[i];
- options.push_back(std::make_pair(std::string(option->optionString), option->extraInfo));
- }
- bool ignore_unrecognized = args->ignoreUnrecognized;
- if (!Runtime::Create(options, ignore_unrecognized)) {
- return JNI_ERR;
- }
- Runtime* runtime = Runtime::Current();
- bool started = runtime->Start();
- if (!started) {
- delete Thread::Current()->GetJniEnv();
- delete runtime->GetJavaVM();
- LOG(WARNING) << "CreateJavaVM failed";
- return JNI_ERR;
- }
- *p_env = Thread::Current()->GetJniEnv();
- *p_vm = runtime->GetJavaVM();
- return JNI_OK;
-}
-
-extern "C" jint JNI_GetCreatedJavaVMs(JavaVM** vms, jsize, jsize* vm_count) {
- Runtime* runtime = Runtime::Current();
- if (runtime == nullptr) {
- *vm_count = 0;
- } else {
- *vm_count = 1;
- vms[0] = runtime->GetJavaVM();
- }
- return JNI_OK;
-}
-
-// Historically unsupported.
-extern "C" jint JNI_GetDefaultJavaVMInitArgs(void* /*vm_args*/) {
- return JNI_ERR;
-}
-
-class JII {
- public:
- static jint DestroyJavaVM(JavaVM* vm) {
- if (vm == nullptr) {
- return JNI_ERR;
- }
- JavaVMExt* raw_vm = reinterpret_cast<JavaVMExt*>(vm);
- delete raw_vm->runtime;
- return JNI_OK;
- }
-
- static jint AttachCurrentThread(JavaVM* vm, JNIEnv** p_env, void* thr_args) {
- return JII_AttachCurrentThread(vm, p_env, thr_args, false);
- }
-
- static jint AttachCurrentThreadAsDaemon(JavaVM* vm, JNIEnv** p_env, void* thr_args) {
- return JII_AttachCurrentThread(vm, p_env, thr_args, true);
- }
-
- static jint DetachCurrentThread(JavaVM* vm) {
- if (vm == nullptr || Thread::Current() == nullptr) {
- return JNI_ERR;
- }
- JavaVMExt* raw_vm = reinterpret_cast<JavaVMExt*>(vm);
- Runtime* runtime = raw_vm->runtime;
- runtime->DetachCurrentThread();
- return JNI_OK;
- }
-
- static jint GetEnv(JavaVM* vm, void** env, jint version) {
- // GetEnv always returns a JNIEnv* for the most current supported JNI version,
- // and unlike other calls that take a JNI version doesn't care if you supply
- // JNI_VERSION_1_1, which we don't otherwise support.
- if (IsBadJniVersion(version) && version != JNI_VERSION_1_1) {
- LOG(ERROR) << "Bad JNI version passed to GetEnv: " << version;
- return JNI_EVERSION;
- }
- if (vm == nullptr || env == nullptr) {
- return JNI_ERR;
- }
- Thread* thread = Thread::Current();
- if (thread == nullptr) {
- *env = nullptr;
- return JNI_EDETACHED;
- }
- *env = thread->GetJniEnv();
- return JNI_OK;
- }
-};
-
-const JNIInvokeInterface gJniInvokeInterface = {
- nullptr, // reserved0
- nullptr, // reserved1
- nullptr, // reserved2
- JII::DestroyJavaVM,
- JII::AttachCurrentThread,
- JII::DetachCurrentThread,
- JII::GetEnv,
- JII::AttachCurrentThreadAsDaemon
-};
-
-JavaVMExt::JavaVMExt(Runtime* runtime, ParsedOptions* options)
- : runtime(runtime),
- check_jni_abort_hook(nullptr),
- check_jni_abort_hook_data(nullptr),
- check_jni(false),
- force_copy(false), // TODO: add a way to enable this
- trace(options->jni_trace_),
- pins_lock("JNI pin table lock", kPinTableLock),
- pin_table("pin table", kPinTableInitial, kPinTableMax),
- globals_lock("JNI global reference table lock"),
- globals(gGlobalsInitial, gGlobalsMax, kGlobal),
- libraries_lock("JNI shared libraries map lock", kLoadLibraryLock),
- libraries(new Libraries),
- weak_globals_lock_("JNI weak global reference table lock"),
- weak_globals_(kWeakGlobalsInitial, kWeakGlobalsMax, kWeakGlobal),
- allow_new_weak_globals_(true),
- weak_globals_add_condition_("weak globals add condition", weak_globals_lock_) {
- functions = unchecked_functions = &gJniInvokeInterface;
- if (options->check_jni_) {
- SetCheckJniEnabled(true);
- }
-}
-
-JavaVMExt::~JavaVMExt() {
- delete libraries;
-}
-
-jweak JavaVMExt::AddWeakGlobalReference(Thread* self, mirror::Object* obj) {
- if (obj == nullptr) {
- return nullptr;
- }
- MutexLock mu(self, weak_globals_lock_);
- while (UNLIKELY(!allow_new_weak_globals_)) {
- weak_globals_add_condition_.WaitHoldingLocks(self);
- }
- IndirectRef ref = weak_globals_.Add(IRT_FIRST_SEGMENT, obj);
- return reinterpret_cast<jweak>(ref);
-}
-
-void JavaVMExt::DeleteWeakGlobalRef(Thread* self, jweak obj) {
- MutexLock mu(self, weak_globals_lock_);
- if (!weak_globals_.Remove(IRT_FIRST_SEGMENT, obj)) {
- LOG(WARNING) << "JNI WARNING: DeleteWeakGlobalRef(" << obj << ") "
- << "failed to find entry";
- }
-}
-
-void JavaVMExt::SetCheckJniEnabled(bool enabled) {
- check_jni = enabled;
- functions = enabled ? GetCheckJniInvokeInterface() : &gJniInvokeInterface;
-}
-
-void JavaVMExt::DumpForSigQuit(std::ostream& os) {
- os << "JNI: CheckJNI is " << (check_jni ? "on" : "off");
- if (force_copy) {
- os << " (with forcecopy)";
- }
- Thread* self = Thread::Current();
- {
- MutexLock mu(self, pins_lock);
- os << "; pins=" << pin_table.Size();
- }
- {
- ReaderMutexLock mu(self, globals_lock);
- os << "; globals=" << globals.Capacity();
- }
- {
- MutexLock mu(self, weak_globals_lock_);
- if (weak_globals_.Capacity() > 0) {
- os << " (plus " << weak_globals_.Capacity() << " weak)";
- }
- }
- os << '\n';
-
- {
- MutexLock mu(self, libraries_lock);
- os << "Libraries: " << Dumpable<Libraries>(*libraries) << " (" << libraries->size() << ")\n";
- }
-}
-
-void JavaVMExt::DisallowNewWeakGlobals() {
- MutexLock mu(Thread::Current(), weak_globals_lock_);
- allow_new_weak_globals_ = false;
-}
-
-void JavaVMExt::AllowNewWeakGlobals() {
- Thread* self = Thread::Current();
- MutexLock mu(self, weak_globals_lock_);
- allow_new_weak_globals_ = true;
- weak_globals_add_condition_.Broadcast(self);
-}
-
-mirror::Object* JavaVMExt::DecodeWeakGlobal(Thread* self, IndirectRef ref) {
- MutexLock mu(self, weak_globals_lock_);
- while (UNLIKELY(!allow_new_weak_globals_)) {
- weak_globals_add_condition_.WaitHoldingLocks(self);
- }
- return weak_globals_.Get(ref);
-}
-
-void JavaVMExt::DumpReferenceTables(std::ostream& os) {
- Thread* self = Thread::Current();
- {
- ReaderMutexLock mu(self, globals_lock);
- globals.Dump(os);
- }
- {
- MutexLock mu(self, weak_globals_lock_);
- weak_globals_.Dump(os);
- }
- {
- MutexLock mu(self, pins_lock);
- pin_table.Dump(os);
- }
-}
-
-bool JavaVMExt::LoadNativeLibrary(const std::string& path,
- Handle<mirror::ClassLoader> class_loader,
- std::string* detail) {
- detail->clear();
-
- // See if we've already loaded this library. If we have, and the class loader
- // matches, return successfully without doing anything.
- // TODO: for better results we should canonicalize the pathname (or even compare
- // inodes). This implementation is fine if everybody is using System.loadLibrary.
- SharedLibrary* library;
- Thread* self = Thread::Current();
- {
- // TODO: move the locking (and more of this logic) into Libraries.
- MutexLock mu(self, libraries_lock);
- library = libraries->Get(path);
- }
- if (library != nullptr) {
- if (library->GetClassLoader() != class_loader.Get()) {
- // The library will be associated with class_loader. The JNI
- // spec says we can't load the same library into more than one
- // class loader.
- StringAppendF(detail, "Shared library \"%s\" already opened by "
- "ClassLoader %p; can't open in ClassLoader %p",
- path.c_str(), library->GetClassLoader(), class_loader.Get());
- LOG(WARNING) << detail;
- return false;
- }
- VLOG(jni) << "[Shared library \"" << path << "\" already loaded in "
- << "ClassLoader " << class_loader.Get() << "]";
- if (!library->CheckOnLoadResult()) {
- StringAppendF(detail, "JNI_OnLoad failed on a previous attempt "
- "to load \"%s\"", path.c_str());
- return false;
- }
- return true;
- }
-
- // Open the shared library. Because we're using a full path, the system
- // doesn't have to search through LD_LIBRARY_PATH. (It may do so to
- // resolve this library's dependencies though.)
-
- // Failures here are expected when java.library.path has several entries
- // and we have to hunt for the lib.
-
- // Below we dlopen but there is no paired dlclose, this would be necessary if we supported
- // class unloading. Libraries will only be unloaded when the reference count (incremented by
- // dlopen) becomes zero from dlclose.
-
- // This can execute slowly for a large library on a busy system, so we
- // want to switch from kRunnable while it executes. This allows the GC to ignore us.
- self->TransitionFromRunnableToSuspended(kWaitingForJniOnLoad);
- void* handle = dlopen(path.empty() ? nullptr : path.c_str(), RTLD_LAZY);
- self->TransitionFromSuspendedToRunnable();
-
- VLOG(jni) << "[Call to dlopen(\"" << path << "\", RTLD_LAZY) returned " << handle << "]";
-
- if (handle == nullptr) {
- *detail = dlerror();
- LOG(ERROR) << "dlopen(\"" << path << "\", RTLD_LAZY) failed: " << *detail;
- return false;
- }
-
- // Create a new entry.
- // TODO: move the locking (and more of this logic) into Libraries.
- bool created_library = false;
- {
- MutexLock mu(self, libraries_lock);
- library = libraries->Get(path);
- if (library == nullptr) { // We won race to get libraries_lock
- library = new SharedLibrary(path, handle, class_loader.Get());
- libraries->Put(path, library);
- created_library = true;
- }
- }
- if (!created_library) {
- LOG(INFO) << "WOW: we lost a race to add shared library: "
- << "\"" << path << "\" ClassLoader=" << class_loader.Get();
- return library->CheckOnLoadResult();
- }
-
- VLOG(jni) << "[Added shared library \"" << path << "\" for ClassLoader " << class_loader.Get()
- << "]";
-
- bool was_successful = false;
- void* sym = dlsym(handle, "JNI_OnLoad");
- if (sym == nullptr) {
- VLOG(jni) << "[No JNI_OnLoad found in \"" << path << "\"]";
- was_successful = true;
- } else {
- // Call JNI_OnLoad. We have to override the current class
- // loader, which will always be "null" since the stuff at the
- // top of the stack is around Runtime.loadLibrary(). (See
- // the comments in the JNI FindClass function.)
- typedef int (*JNI_OnLoadFn)(JavaVM*, void*);
- JNI_OnLoadFn jni_on_load = reinterpret_cast<JNI_OnLoadFn>(sym);
- StackHandleScope<1> hs(self);
- Handle<mirror::ClassLoader> old_class_loader(hs.NewHandle(self->GetClassLoaderOverride()));
- self->SetClassLoaderOverride(class_loader.Get());
-
- int version = 0;
- {
- ScopedThreadStateChange tsc(self, kNative);
- VLOG(jni) << "[Calling JNI_OnLoad in \"" << path << "\"]";
- version = (*jni_on_load)(this, nullptr);
- }
-
- self->SetClassLoaderOverride(old_class_loader.Get());
-
- if (version == JNI_ERR) {
- StringAppendF(detail, "JNI_ERR returned from JNI_OnLoad in \"%s\"", path.c_str());
- } else if (IsBadJniVersion(version)) {
- StringAppendF(detail, "Bad JNI version returned from JNI_OnLoad in \"%s\": %d",
- path.c_str(), version);
- // It's unwise to call dlclose() here, but we can mark it
- // as bad and ensure that future load attempts will fail.
- // We don't know how far JNI_OnLoad got, so there could
- // be some partially-initialized stuff accessible through
- // newly-registered native method calls. We could try to
- // unregister them, but that doesn't seem worthwhile.
- } else {
- was_successful = true;
- }
- VLOG(jni) << "[Returned " << (was_successful ? "successfully" : "failure")
- << " from JNI_OnLoad in \"" << path << "\"]";
- }
-
- library->SetResult(was_successful);
- return was_successful;
-}
-
-void* JavaVMExt::FindCodeForNativeMethod(mirror::ArtMethod* m) {
- CHECK(m->IsNative());
- mirror::Class* c = m->GetDeclaringClass();
- // If this is a static method, it could be called before the class has been initialized.
- if (m->IsStatic()) {
- c = EnsureInitialized(Thread::Current(), c);
- if (c == nullptr) {
- return nullptr;
- }
- } else {
- CHECK(c->IsInitializing()) << c->GetStatus() << " " << PrettyMethod(m);
- }
- std::string detail;
- void* native_method;
- Thread* self = Thread::Current();
- {
- MutexLock mu(self, libraries_lock);
- native_method = libraries->FindNativeMethod(m, detail);
- }
- // Throwing can cause libraries_lock to be reacquired.
- if (native_method == nullptr) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- self->ThrowNewException(throw_location, "Ljava/lang/UnsatisfiedLinkError;", detail.c_str());
- }
- return native_method;
-}
-
-void JavaVMExt::SweepJniWeakGlobals(IsMarkedCallback* callback, void* arg) {
- MutexLock mu(Thread::Current(), weak_globals_lock_);
- for (mirror::Object** entry : weak_globals_) {
- // Since this is called by the GC, we don't need a read barrier.
- mirror::Object* obj = *entry;
- mirror::Object* new_obj = callback(obj, arg);
- if (new_obj == nullptr) {
- new_obj = kClearedJniWeakGlobal;
- }
- *entry = new_obj;
- }
-}
-
-void JavaVMExt::VisitRoots(RootCallback* callback, void* arg) {
- Thread* self = Thread::Current();
- {
- ReaderMutexLock mu(self, globals_lock);
- globals.VisitRoots(callback, arg, 0, kRootJNIGlobal);
- }
- {
- MutexLock mu(self, pins_lock);
- pin_table.VisitRoots(callback, arg, 0, kRootVMInternal);
- }
- {
- MutexLock mu(self, libraries_lock);
- // Libraries contains shared libraries which hold a pointer to a class loader.
- libraries->VisitRoots(callback, arg);
- }
- // The weak_globals table is visited by the GC itself (because it mutates the table).
+const JNINativeInterface* GetJniNativeInterface() {
+ return &gJniNativeInterface;
}
void RegisterNativeMethods(JNIEnv* env, const char* jni_class_name, const JNINativeMethod* methods,
diff --git a/runtime/jni_internal.h b/runtime/jni_internal.h
index abb71b7..48b10f5 100644
--- a/runtime/jni_internal.h
+++ b/runtime/jni_internal.h
@@ -17,16 +17,8 @@
#ifndef ART_RUNTIME_JNI_INTERNAL_H_
#define ART_RUNTIME_JNI_INTERNAL_H_
-#include "jni.h"
-
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "indirect_reference_table.h"
-#include "object_callbacks.h"
-#include "reference_table.h"
-
+#include <jni.h>
#include <iosfwd>
-#include <string>
#ifndef NATIVE_METHOD
#define NATIVE_METHOD(className, functionName, signature) \
@@ -36,187 +28,18 @@
RegisterNativeMethods(env, jni_class_name, gMethods, arraysize(gMethods))
namespace art {
-namespace mirror {
- class ArtField;
- class ArtMethod;
- class ClassLoader;
-} // namespace mirror
-union JValue;
-class Libraries;
-class ParsedOptions;
-class Runtime;
-class ScopedObjectAccess;
-template<class T> class Handle;
-class Thread;
-void JniAbortF(const char* jni_function_name, const char* fmt, ...)
- __attribute__((__format__(__printf__, 2, 3)));
+const JNINativeInterface* GetJniNativeInterface();
+
+// Similar to RegisterNatives except its passed a descriptor for a class name and failures are
+// fatal.
void RegisterNativeMethods(JNIEnv* env, const char* jni_class_name, const JNINativeMethod* methods,
jint method_count);
int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobject cause);
-class JavaVMExt : public JavaVM {
- public:
- JavaVMExt(Runtime* runtime, ParsedOptions* options);
- ~JavaVMExt();
-
- /**
- * Loads the given shared library. 'path' is an absolute pathname.
- *
- * Returns 'true' on success. On failure, sets 'detail' to a
- * human-readable description of the error.
- */
- bool LoadNativeLibrary(const std::string& path, Handle<mirror::ClassLoader> class_loader,
- std::string* detail)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- /**
- * Returns a pointer to the code for the native method 'm', found
- * using dlsym(3) on every native library that's been loaded so far.
- */
- void* FindCodeForNativeMethod(mirror::ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- void DumpForSigQuit(std::ostream& os);
-
- void DumpReferenceTables(std::ostream& os)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- void SetCheckJniEnabled(bool enabled);
-
- void VisitRoots(RootCallback* callback, void* arg);
-
- void DisallowNewWeakGlobals() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
- void AllowNewWeakGlobals() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- jweak AddWeakGlobalReference(Thread* self, mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void DeleteWeakGlobalRef(Thread* self, jweak obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SweepJniWeakGlobals(IsMarkedCallback* callback, void* arg);
- mirror::Object* DecodeWeakGlobal(Thread* self, IndirectRef ref)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- Runtime* runtime;
-
- // Used for testing. By default, we'll LOG(FATAL) the reason.
- void (*check_jni_abort_hook)(void* data, const std::string& reason);
- void* check_jni_abort_hook_data;
-
- // Extra checking.
- bool check_jni;
- bool force_copy;
-
- // Extra diagnostics.
- std::string trace;
-
- // Used to hold references to pinned primitive arrays.
- Mutex pins_lock DEFAULT_MUTEX_ACQUIRED_AFTER;
- ReferenceTable pin_table GUARDED_BY(pins_lock);
-
- // JNI global references.
- ReaderWriterMutex globals_lock DEFAULT_MUTEX_ACQUIRED_AFTER;
- // Not guarded by globals_lock since we sometimes use SynchronizedGet in Thread::DecodeJObject.
- IndirectReferenceTable globals;
-
- Mutex libraries_lock DEFAULT_MUTEX_ACQUIRED_AFTER;
- Libraries* libraries GUARDED_BY(libraries_lock);
-
- // Used by -Xcheck:jni.
- const JNIInvokeInterface* unchecked_functions;
-
- private:
- // TODO: Make the other members of this class also private.
- // JNI weak global references.
- Mutex weak_globals_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- // Since weak_globals_ contain weak roots, be careful not to
- // directly access the object references in it. Use Get() with the
- // read barrier enabled.
- IndirectReferenceTable weak_globals_ GUARDED_BY(weak_globals_lock_);
- bool allow_new_weak_globals_ GUARDED_BY(weak_globals_lock_);
- ConditionVariable weak_globals_add_condition_ GUARDED_BY(weak_globals_lock_);
-};
-
-struct JNIEnvExt : public JNIEnv {
- JNIEnvExt(Thread* self, JavaVMExt* vm);
- ~JNIEnvExt();
-
- void DumpReferenceTables(std::ostream& os)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- void SetCheckJniEnabled(bool enabled);
-
- void PushFrame(int capacity);
- void PopFrame();
-
- template<typename T>
- T AddLocalReference(mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- static Offset SegmentStateOffset();
-
- static Offset LocalRefCookieOffset() {
- return Offset(OFFSETOF_MEMBER(JNIEnvExt, local_ref_cookie));
- }
-
- static Offset SelfOffset() {
- return Offset(OFFSETOF_MEMBER(JNIEnvExt, self));
- }
-
- jobject NewLocalRef(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void DeleteLocalRef(jobject obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- Thread* const self;
- JavaVMExt* vm;
-
- // Cookie used when using the local indirect reference table.
- uint32_t local_ref_cookie;
-
- // JNI local references.
- IndirectReferenceTable locals GUARDED_BY(Locks::mutator_lock_);
-
- // Stack of cookies corresponding to PushLocalFrame/PopLocalFrame calls.
- // TODO: to avoid leaks (and bugs), we need to clear this vector on entry (or return)
- // to a native method.
- std::vector<uint32_t> stacked_local_ref_cookies;
-
- // Frequently-accessed fields cached from JavaVM.
- bool check_jni;
-
- // How many nested "critical" JNI calls are we in?
- int critical;
-
- // Entered JNI monitors, for bulk exit on thread detach.
- ReferenceTable monitors;
-
- // Used by -Xcheck:jni.
- const JNINativeInterface* unchecked_functions;
-};
-
-const JNINativeInterface* GetCheckJniNativeInterface();
-const JNIInvokeInterface* GetCheckJniInvokeInterface();
-
-// Used to save and restore the JNIEnvExt state when not going through code created by the JNI
-// compiler
-class ScopedJniEnvLocalRefState {
- public:
- explicit ScopedJniEnvLocalRefState(JNIEnvExt* env) : env_(env) {
- saved_local_ref_cookie_ = env->local_ref_cookie;
- env->local_ref_cookie = env->locals.GetSegmentState();
- }
-
- ~ScopedJniEnvLocalRefState() {
- env_->locals.SetSegmentState(env_->local_ref_cookie);
- env_->local_ref_cookie = saved_local_ref_cookie_;
- }
-
- private:
- JNIEnvExt* env_;
- uint32_t saved_local_ref_cookie_;
- DISALLOW_COPY_AND_ASSIGN(ScopedJniEnvLocalRefState);
-};
-
} // namespace art
std::ostream& operator<<(std::ostream& os, const jobjectRefType& rhs);
+
#endif // ART_RUNTIME_JNI_INTERNAL_H_
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 7c7e60c..844d14a 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -17,6 +17,7 @@
#include "jni_internal.h"
#include "common_compiler_test.h"
+#include "java_vm_ext.h"
#include "mirror/art_method-inl.h"
#include "mirror/string-inl.h"
#include "scoped_thread_state_change.h"
@@ -53,24 +54,15 @@
}
void ExpectException(jclass exception_class) {
- EXPECT_TRUE(env_->ExceptionCheck());
+ ScopedObjectAccess soa(env_);
+ EXPECT_TRUE(env_->ExceptionCheck())
+ << PrettyDescriptor(soa.Decode<mirror::Class*>(exception_class));
jthrowable exception = env_->ExceptionOccurred();
EXPECT_NE(nullptr, exception);
env_->ExceptionClear();
EXPECT_TRUE(env_->IsInstanceOf(exception, exception_class));
}
- void ExpectClassFound(const char* name) {
- EXPECT_NE(env_->FindClass(name), nullptr) << name;
- EXPECT_FALSE(env_->ExceptionCheck()) << name;
- }
-
- void ExpectClassNotFound(const char* name) {
- EXPECT_EQ(env_->FindClass(name), nullptr) << name;
- EXPECT_TRUE(env_->ExceptionCheck()) << name;
- env_->ExceptionClear();
- }
-
void CleanUpJniEnv() {
if (aioobe_ != nullptr) {
env_->DeleteGlobalRef(aioobe_);
@@ -98,6 +90,510 @@
return soa.AddLocalReference<jclass>(c);
}
+ void ExpectClassFound(const char* name) {
+ EXPECT_NE(env_->FindClass(name), nullptr) << name;
+ EXPECT_FALSE(env_->ExceptionCheck()) << name;
+ }
+
+ void ExpectClassNotFound(const char* name, bool check_jni, const char* check_jni_msg,
+ CheckJniAbortCatcher* abort_catcher) {
+ EXPECT_EQ(env_->FindClass(name), nullptr) << name;
+ if (!check_jni || check_jni_msg == nullptr) {
+ EXPECT_TRUE(env_->ExceptionCheck()) << name;
+ env_->ExceptionClear();
+ } else {
+ abort_catcher->Check(check_jni_msg);
+ }
+ }
+
+ void FindClassTest(bool check_jni) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ CheckJniAbortCatcher check_jni_abort_catcher;
+
+ // Null argument is always an abort.
+ env_->FindClass(nullptr);
+ check_jni_abort_catcher.Check(check_jni ? "non-nullable const char* was NULL"
+ : "name == null");
+
+ // Reference types...
+ ExpectClassFound("java/lang/String");
+ // ...for arrays too, where you must include "L;".
+ ExpectClassFound("[Ljava/lang/String;");
+ // Primitive arrays are okay too, if the primitive type is valid.
+ ExpectClassFound("[C");
+
+ // But primitive types aren't allowed...
+ ExpectClassNotFound("C", check_jni, nullptr, &check_jni_abort_catcher);
+ ExpectClassNotFound("V", check_jni, nullptr, &check_jni_abort_catcher);
+ ExpectClassNotFound("K", check_jni, nullptr, &check_jni_abort_catcher);
+
+ if (check_jni) {
+ // Check JNI will reject invalid class names as aborts but without pending exceptions.
+ EXPECT_EQ(env_->FindClass("java.lang.String"), nullptr);
+ EXPECT_FALSE(env_->ExceptionCheck());
+ check_jni_abort_catcher.Check("illegal class name 'java.lang.String'");
+
+ EXPECT_EQ(env_->FindClass("[Ljava.lang.String;"), nullptr);
+ EXPECT_FALSE(env_->ExceptionCheck());
+ check_jni_abort_catcher.Check("illegal class name '[Ljava.lang.String;'");
+ } else {
+ // Without check JNI we're tolerant and replace '.' with '/'.
+ ExpectClassFound("java.lang.String");
+ ExpectClassFound("[Ljava.lang.String;");
+ }
+
+ ExpectClassNotFound("Ljava.lang.String;", check_jni, "illegal class name 'Ljava.lang.String;'",
+ &check_jni_abort_catcher);
+ ExpectClassNotFound("[java.lang.String", check_jni, "illegal class name '[java.lang.String'",
+ &check_jni_abort_catcher);
+
+ // You can't include the "L;" in a JNI class descriptor.
+ ExpectClassNotFound("Ljava/lang/String;", check_jni, "illegal class name 'Ljava/lang/String;'",
+ &check_jni_abort_catcher);
+
+ // But you must include it for an array of any reference type.
+ ExpectClassNotFound("[java/lang/String", check_jni, "illegal class name '[java/lang/String'",
+ &check_jni_abort_catcher);
+
+ ExpectClassNotFound("[K", check_jni, "illegal class name '[K'", &check_jni_abort_catcher);
+
+ // Void arrays aren't allowed.
+ ExpectClassNotFound("[V", check_jni, "illegal class name '[V'", &check_jni_abort_catcher);
+
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
+ void GetFieldIdBadArgumentTest(bool check_jni) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ CheckJniAbortCatcher check_jni_abort_catcher;
+
+ jclass c = env_->FindClass("java/lang/String");
+ ASSERT_NE(c, nullptr);
+
+ jfieldID fid = env_->GetFieldID(nullptr, "count", "I");
+ EXPECT_EQ(nullptr, fid);
+ check_jni_abort_catcher.Check(check_jni ? "GetFieldID received NULL jclass"
+ : "java_class == null");
+ fid = env_->GetFieldID(c, nullptr, "I");
+ EXPECT_EQ(nullptr, fid);
+ check_jni_abort_catcher.Check(check_jni ? "non-nullable const char* was NULL"
+ : "name == null");
+ fid = env_->GetFieldID(c, "count", nullptr);
+ EXPECT_EQ(nullptr, fid);
+ check_jni_abort_catcher.Check(check_jni ? "non-nullable const char* was NULL"
+ : "sig == null");
+
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
+ void GetStaticFieldIdBadArgumentTest(bool check_jni) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ CheckJniAbortCatcher check_jni_abort_catcher;
+
+ jclass c = env_->FindClass("java/lang/String");
+ ASSERT_NE(c, nullptr);
+
+ jfieldID fid = env_->GetStaticFieldID(nullptr, "CASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;");
+ EXPECT_EQ(nullptr, fid);
+ check_jni_abort_catcher.Check(check_jni ? "GetStaticFieldID received NULL jclass"
+ : "java_class == null");
+ fid = env_->GetStaticFieldID(c, nullptr, "Ljava/util/Comparator;");
+ EXPECT_EQ(nullptr, fid);
+ check_jni_abort_catcher.Check(check_jni ? "non-nullable const char* was NULL"
+ : "name == null");
+ fid = env_->GetStaticFieldID(c, "CASE_INSENSITIVE_ORDER", nullptr);
+ EXPECT_EQ(nullptr, fid);
+ check_jni_abort_catcher.Check(check_jni ? "non-nullable const char* was NULL"
+ : "sig == null");
+
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
+ void GetMethodIdBadArgumentTest(bool check_jni) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ CheckJniAbortCatcher check_jni_abort_catcher;
+
+ jmethodID method = env_->GetMethodID(nullptr, "<init>", "(Ljava/lang/String;)V");
+ EXPECT_EQ(nullptr, method);
+ check_jni_abort_catcher.Check(check_jni ? "GetMethodID received NULL jclass"
+ : "java_class == null");
+ jclass jlnsme = env_->FindClass("java/lang/NoSuchMethodError");
+ ASSERT_TRUE(jlnsme != nullptr);
+ method = env_->GetMethodID(jlnsme, nullptr, "(Ljava/lang/String;)V");
+ EXPECT_EQ(nullptr, method);
+ check_jni_abort_catcher.Check(check_jni ? "non-nullable const char* was NULL"
+ : "name == null");
+ method = env_->GetMethodID(jlnsme, "<init>", nullptr);
+ EXPECT_EQ(nullptr, method);
+ check_jni_abort_catcher.Check(check_jni ? "non-nullable const char* was NULL"
+ : "sig == null");
+
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
+ void GetStaticMethodIdBadArgumentTest(bool check_jni) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ CheckJniAbortCatcher check_jni_abort_catcher;
+
+ jmethodID method = env_->GetStaticMethodID(nullptr, "valueOf", "(I)Ljava/lang/String;");
+ EXPECT_EQ(nullptr, method);
+ check_jni_abort_catcher.Check(check_jni ? "GetStaticMethodID received NULL jclass"
+ : "java_class == null");
+ jclass jlstring = env_->FindClass("java/lang/String");
+ method = env_->GetStaticMethodID(jlstring, nullptr, "(I)Ljava/lang/String;");
+ EXPECT_EQ(nullptr, method);
+ check_jni_abort_catcher.Check(check_jni ? "non-nullable const char* was NULL"
+ : "name == null");
+ method = env_->GetStaticMethodID(jlstring, "valueOf", nullptr);
+ EXPECT_EQ(nullptr, method);
+ check_jni_abort_catcher.Check(check_jni ? "non-nullable const char* was NULL"
+ : "sig == null");
+
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
+ void GetFromReflectedField_ToReflectedFieldBadArgumentTest(bool check_jni) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ CheckJniAbortCatcher check_jni_abort_catcher;
+
+ jclass c = env_->FindClass("java/lang/String");
+ ASSERT_NE(c, nullptr);
+ jfieldID fid = env_->GetFieldID(c, "count", "I");
+ ASSERT_NE(fid, nullptr);
+
+ // Check class argument for null argument, not checked in non-check JNI.
+ jobject field = env_->ToReflectedField(nullptr, fid, JNI_FALSE);
+ if (check_jni) {
+ EXPECT_EQ(field, nullptr);
+ check_jni_abort_catcher.Check("ToReflectedField received NULL jclass");
+ } else {
+ EXPECT_NE(field, nullptr);
+ }
+
+ field = env_->ToReflectedField(c, nullptr, JNI_FALSE);
+ EXPECT_EQ(field, nullptr);
+ check_jni_abort_catcher.Check(check_jni ? "jfieldID was NULL"
+ : "fid == null");
+
+ fid = env_->FromReflectedField(nullptr);
+ ASSERT_EQ(fid, nullptr);
+ check_jni_abort_catcher.Check(check_jni ? "expected non-null java.lang.reflect.Field"
+ : "jlr_field == null");
+
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
+ void GetFromReflectedMethod_ToReflectedMethodBadArgumentTest(bool check_jni) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ CheckJniAbortCatcher check_jni_abort_catcher;
+
+ jclass c = env_->FindClass("java/lang/String");
+ ASSERT_NE(c, nullptr);
+ jmethodID mid = env_->GetMethodID(c, "<init>", "()V");
+ ASSERT_NE(mid, nullptr);
+
+ // Check class argument for null argument, not checked in non-check JNI.
+ jobject method = env_->ToReflectedMethod(nullptr, mid, JNI_FALSE);
+ if (check_jni) {
+ EXPECT_EQ(method, nullptr);
+ check_jni_abort_catcher.Check("ToReflectedMethod received NULL jclass");
+ } else {
+ EXPECT_NE(method, nullptr);
+ }
+
+ method = env_->ToReflectedMethod(c, nullptr, JNI_FALSE);
+ EXPECT_EQ(method, nullptr);
+ check_jni_abort_catcher.Check(check_jni ? "jmethodID was NULL"
+ : "mid == null");
+ mid = env_->FromReflectedMethod(method);
+ ASSERT_EQ(mid, nullptr);
+ check_jni_abort_catcher.Check(check_jni ? "expected non-null method" : "jlr_method == null");
+
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
+ void RegisterAndUnregisterNativesBadArguments(bool check_jni,
+ CheckJniAbortCatcher* check_jni_abort_catcher) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ // Passing a class of null is a failure.
+ {
+ JNINativeMethod methods[] = { };
+ EXPECT_EQ(env_->RegisterNatives(nullptr, methods, 0), JNI_ERR);
+ check_jni_abort_catcher->Check(check_jni ? "RegisterNatives received NULL jclass"
+ : "java_class == null");
+ }
+
+ // Passing methods as null is a failure.
+ jclass jlobject = env_->FindClass("java/lang/Object");
+ EXPECT_EQ(env_->RegisterNatives(jlobject, nullptr, 1), JNI_ERR);
+ check_jni_abort_catcher->Check("methods == null");
+
+ // Unregisters null is a failure.
+ EXPECT_EQ(env_->UnregisterNatives(nullptr), JNI_ERR);
+ check_jni_abort_catcher->Check(check_jni ? "UnregisterNatives received NULL jclass"
+ : "java_class == null");
+
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
+
+ void GetPrimitiveArrayElementsOfWrongType(bool check_jni) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ CheckJniAbortCatcher jni_abort_catcher;
+
+ jbooleanArray array = env_->NewBooleanArray(10);
+ jboolean is_copy;
+ EXPECT_EQ(env_->GetByteArrayElements(reinterpret_cast<jbyteArray>(array), &is_copy), nullptr);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected byte[]"
+ : "attempt to get byte primitive array elements with an object of type boolean[]");
+ EXPECT_EQ(env_->GetShortArrayElements(reinterpret_cast<jshortArray>(array), &is_copy), nullptr);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected short[]"
+ : "attempt to get short primitive array elements with an object of type boolean[]");
+ EXPECT_EQ(env_->GetCharArrayElements(reinterpret_cast<jcharArray>(array), &is_copy), nullptr);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected char[]"
+ : "attempt to get char primitive array elements with an object of type boolean[]");
+ EXPECT_EQ(env_->GetIntArrayElements(reinterpret_cast<jintArray>(array), &is_copy), nullptr);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected int[]"
+ : "attempt to get int primitive array elements with an object of type boolean[]");
+ EXPECT_EQ(env_->GetLongArrayElements(reinterpret_cast<jlongArray>(array), &is_copy), nullptr);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected long[]"
+ : "attempt to get long primitive array elements with an object of type boolean[]");
+ EXPECT_EQ(env_->GetFloatArrayElements(reinterpret_cast<jfloatArray>(array), &is_copy), nullptr);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected float[]"
+ : "attempt to get float primitive array elements with an object of type boolean[]");
+ EXPECT_EQ(env_->GetDoubleArrayElements(reinterpret_cast<jdoubleArray>(array), &is_copy), nullptr);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected double[]"
+ : "attempt to get double primitive array elements with an object of type boolean[]");
+ jbyteArray array2 = env_->NewByteArray(10);
+ EXPECT_EQ(env_->GetBooleanArrayElements(reinterpret_cast<jbooleanArray>(array2), &is_copy),
+ nullptr);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type byte[] expected boolean[]"
+ : "attempt to get boolean primitive array elements with an object of type byte[]");
+ jobject object = env_->NewStringUTF("Test String");
+ EXPECT_EQ(env_->GetBooleanArrayElements(reinterpret_cast<jbooleanArray>(object), &is_copy),
+ nullptr);
+ jni_abort_catcher.Check(
+ check_jni ? "jarray argument has non-array type: java.lang.String"
+ : "attempt to get boolean primitive array elements with an object of type java.lang.String");
+
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
+ void ReleasePrimitiveArrayElementsOfWrongType(bool check_jni) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ CheckJniAbortCatcher jni_abort_catcher;
+
+ jbooleanArray array = env_->NewBooleanArray(10);
+ ASSERT_TRUE(array != nullptr);
+ jboolean is_copy;
+ jboolean* elements = env_->GetBooleanArrayElements(array, &is_copy);
+ ASSERT_TRUE(elements != nullptr);
+ env_->ReleaseByteArrayElements(reinterpret_cast<jbyteArray>(array),
+ reinterpret_cast<jbyte*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected byte[]"
+ : "attempt to release byte primitive array elements with an object of type boolean[]");
+ env_->ReleaseShortArrayElements(reinterpret_cast<jshortArray>(array),
+ reinterpret_cast<jshort*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected short[]"
+ : "attempt to release short primitive array elements with an object of type boolean[]");
+ env_->ReleaseCharArrayElements(reinterpret_cast<jcharArray>(array),
+ reinterpret_cast<jchar*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected char[]"
+ : "attempt to release char primitive array elements with an object of type boolean[]");
+ env_->ReleaseIntArrayElements(reinterpret_cast<jintArray>(array),
+ reinterpret_cast<jint*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected int[]"
+ : "attempt to release int primitive array elements with an object of type boolean[]");
+ env_->ReleaseLongArrayElements(reinterpret_cast<jlongArray>(array),
+ reinterpret_cast<jlong*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected long[]"
+ : "attempt to release long primitive array elements with an object of type boolean[]");
+ env_->ReleaseFloatArrayElements(reinterpret_cast<jfloatArray>(array),
+ reinterpret_cast<jfloat*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected float[]"
+ : "attempt to release float primitive array elements with an object of type boolean[]");
+ env_->ReleaseDoubleArrayElements(reinterpret_cast<jdoubleArray>(array),
+ reinterpret_cast<jdouble*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected double[]"
+ : "attempt to release double primitive array elements with an object of type boolean[]");
+ jbyteArray array2 = env_->NewByteArray(10);
+ env_->ReleaseBooleanArrayElements(reinterpret_cast<jbooleanArray>(array2), elements, 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type byte[] expected boolean[]"
+ : "attempt to release boolean primitive array elements with an object of type byte[]");
+ jobject object = env_->NewStringUTF("Test String");
+ env_->ReleaseBooleanArrayElements(reinterpret_cast<jbooleanArray>(object), elements, 0);
+ jni_abort_catcher.Check(
+ check_jni ? "jarray argument has non-array type: java.lang.String"
+ : "attempt to release boolean primitive array elements with an object of type "
+ "java.lang.String");
+
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
+ void GetReleasePrimitiveArrayCriticalOfWrongType(bool check_jni) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ CheckJniAbortCatcher jni_abort_catcher;
+
+ jobject object = env_->NewStringUTF("Test String");
+ jboolean is_copy;
+ void* elements = env_->GetPrimitiveArrayCritical(reinterpret_cast<jarray>(object), &is_copy);
+ jni_abort_catcher.Check(check_jni ? "jarray argument has non-array type: java.lang.String"
+ : "expected primitive array, given java.lang.String");
+ env_->ReleasePrimitiveArrayCritical(reinterpret_cast<jarray>(object), elements, 0);
+ jni_abort_catcher.Check(check_jni ? "jarray argument has non-array type: java.lang.String"
+ : "expected primitive array, given java.lang.String");
+
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
+ void GetPrimitiveArrayRegionElementsOfWrongType(bool check_jni) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ CheckJniAbortCatcher jni_abort_catcher;
+ constexpr size_t kLength = 10;
+ jbooleanArray array = env_->NewBooleanArray(kLength);
+ ASSERT_TRUE(array != nullptr);
+ jboolean elements[kLength];
+ env_->GetByteArrayRegion(reinterpret_cast<jbyteArray>(array), 0, kLength,
+ reinterpret_cast<jbyte*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected byte[]"
+ : "attempt to get region of byte primitive array elements with an object of type boolean[]");
+ env_->GetShortArrayRegion(reinterpret_cast<jshortArray>(array), 0, kLength,
+ reinterpret_cast<jshort*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected short[]"
+ : "attempt to get region of short primitive array elements with an object of type boolean[]");
+ env_->GetCharArrayRegion(reinterpret_cast<jcharArray>(array), 0, kLength,
+ reinterpret_cast<jchar*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected char[]"
+ : "attempt to get region of char primitive array elements with an object of type boolean[]");
+ env_->GetIntArrayRegion(reinterpret_cast<jintArray>(array), 0, kLength,
+ reinterpret_cast<jint*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected int[]"
+ : "attempt to get region of int primitive array elements with an object of type boolean[]");
+ env_->GetLongArrayRegion(reinterpret_cast<jlongArray>(array), 0, kLength,
+ reinterpret_cast<jlong*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected long[]"
+ : "attempt to get region of long primitive array elements with an object of type boolean[]");
+ env_->GetFloatArrayRegion(reinterpret_cast<jfloatArray>(array), 0, kLength,
+ reinterpret_cast<jfloat*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected float[]"
+ : "attempt to get region of float primitive array elements with an object of type boolean[]");
+ env_->GetDoubleArrayRegion(reinterpret_cast<jdoubleArray>(array), 0, kLength,
+ reinterpret_cast<jdouble*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected double[]"
+ : "attempt to get region of double primitive array elements with an object of type boolean[]");
+ jbyteArray array2 = env_->NewByteArray(10);
+ env_->GetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(array2), 0, kLength,
+ reinterpret_cast<jboolean*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type byte[] expected boolean[]"
+ : "attempt to get region of boolean primitive array elements with an object of type byte[]");
+ jobject object = env_->NewStringUTF("Test String");
+ env_->GetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(object), 0, kLength,
+ reinterpret_cast<jboolean*>(elements));
+ jni_abort_catcher.Check(check_jni ? "jarray argument has non-array type: java.lang.String"
+ : "attempt to get region of boolean primitive array elements with an object of type "
+ "java.lang.String");
+
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
+ void SetPrimitiveArrayRegionElementsOfWrongType(bool check_jni) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ CheckJniAbortCatcher jni_abort_catcher;
+ constexpr size_t kLength = 10;
+ jbooleanArray array = env_->NewBooleanArray(kLength);
+ ASSERT_TRUE(array != nullptr);
+ jboolean elements[kLength];
+ env_->SetByteArrayRegion(reinterpret_cast<jbyteArray>(array), 0, kLength,
+ reinterpret_cast<jbyte*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected byte[]"
+ : "attempt to set region of byte primitive array elements with an object of type boolean[]");
+ env_->SetShortArrayRegion(reinterpret_cast<jshortArray>(array), 0, kLength,
+ reinterpret_cast<jshort*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected short[]"
+ : "attempt to set region of short primitive array elements with an object of type boolean[]");
+ env_->SetCharArrayRegion(reinterpret_cast<jcharArray>(array), 0, kLength,
+ reinterpret_cast<jchar*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected char[]"
+ : "attempt to set region of char primitive array elements with an object of type boolean[]");
+ env_->SetIntArrayRegion(reinterpret_cast<jintArray>(array), 0, kLength,
+ reinterpret_cast<jint*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected int[]"
+ : "attempt to set region of int primitive array elements with an object of type boolean[]");
+ env_->SetLongArrayRegion(reinterpret_cast<jlongArray>(array), 0, kLength,
+ reinterpret_cast<jlong*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected long[]"
+ : "attempt to set region of long primitive array elements with an object of type boolean[]");
+ env_->SetFloatArrayRegion(reinterpret_cast<jfloatArray>(array), 0, kLength,
+ reinterpret_cast<jfloat*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected float[]"
+ : "attempt to set region of float primitive array elements with an object of type boolean[]");
+ env_->SetDoubleArrayRegion(reinterpret_cast<jdoubleArray>(array), 0, kLength,
+ reinterpret_cast<jdouble*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected double[]"
+ : "attempt to set region of double primitive array elements with an object of type boolean[]");
+ jbyteArray array2 = env_->NewByteArray(10);
+ env_->SetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(array2), 0, kLength,
+ reinterpret_cast<jboolean*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type byte[] expected boolean[]"
+ : "attempt to set region of boolean primitive array elements with an object of type byte[]");
+ jobject object = env_->NewStringUTF("Test String");
+ env_->SetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(object), 0, kLength,
+ reinterpret_cast<jboolean*>(elements));
+ jni_abort_catcher.Check(check_jni ? "jarray argument has non-array type: java.lang.String"
+ : "attempt to set region of boolean primitive array elements with an object of type "
+ "java.lang.String");
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
+ void NewObjectArrayBadArguments(bool check_jni) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ CheckJniAbortCatcher jni_abort_catcher;
+
+ jclass element_class = env_->FindClass("java/lang/String");
+ ASSERT_NE(element_class, nullptr);
+
+ env_->NewObjectArray(-1, element_class, nullptr);
+ jni_abort_catcher.Check(check_jni ? "negative jsize: -1" : "negative array length: -1");
+
+ env_->NewObjectArray(std::numeric_limits<jint>::min(), element_class, nullptr);
+ jni_abort_catcher.Check(check_jni ? "negative jsize: -2147483648"
+ : "negative array length: -2147483648");
+
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
JavaVMExt* vm_;
JNIEnv* env_;
jclass aioobe_;
@@ -125,48 +621,8 @@
}
TEST_F(JniInternalTest, FindClass) {
- // Reference types...
- ExpectClassFound("java/lang/String");
- // ...for arrays too, where you must include "L;".
- ExpectClassFound("[Ljava/lang/String;");
- // Primitive arrays are okay too, if the primitive type is valid.
- ExpectClassFound("[C");
-
- {
- CheckJniAbortCatcher check_jni_abort_catcher;
- env_->FindClass(nullptr);
- check_jni_abort_catcher.Check("name == null");
-
- // We support . as well as / for compatibility, if -Xcheck:jni is off.
- ExpectClassFound("java.lang.String");
- check_jni_abort_catcher.Check("illegal class name 'java.lang.String'");
- ExpectClassNotFound("Ljava.lang.String;");
- check_jni_abort_catcher.Check("illegal class name 'Ljava.lang.String;'");
- ExpectClassFound("[Ljava.lang.String;");
- check_jni_abort_catcher.Check("illegal class name '[Ljava.lang.String;'");
- ExpectClassNotFound("[java.lang.String");
- check_jni_abort_catcher.Check("illegal class name '[java.lang.String'");
-
- // You can't include the "L;" in a JNI class descriptor.
- ExpectClassNotFound("Ljava/lang/String;");
- check_jni_abort_catcher.Check("illegal class name 'Ljava/lang/String;'");
-
- // But you must include it for an array of any reference type.
- ExpectClassNotFound("[java/lang/String");
- check_jni_abort_catcher.Check("illegal class name '[java/lang/String'");
-
- ExpectClassNotFound("[K");
- check_jni_abort_catcher.Check("illegal class name '[K'");
-
- // Void arrays aren't allowed.
- ExpectClassNotFound("[V");
- check_jni_abort_catcher.Check("illegal class name '[V'");
- }
-
- // But primitive types aren't allowed...
- ExpectClassNotFound("C");
- ExpectClassNotFound("V");
- ExpectClassNotFound("K");
+ FindClassTest(false);
+ FindClassTest(true);
}
TEST_F(JniInternalTest, GetFieldID) {
@@ -208,16 +664,8 @@
ExpectException(jlnsfe);
// Bad arguments.
- CheckJniAbortCatcher check_jni_abort_catcher;
- fid = env_->GetFieldID(nullptr, "count", "I");
- EXPECT_EQ(nullptr, fid);
- check_jni_abort_catcher.Check("java_class == null");
- fid = env_->GetFieldID(c, nullptr, "I");
- EXPECT_EQ(nullptr, fid);
- check_jni_abort_catcher.Check("name == null");
- fid = env_->GetFieldID(c, "count", nullptr);
- EXPECT_EQ(nullptr, fid);
- check_jni_abort_catcher.Check("sig == null");
+ GetFieldIdBadArgumentTest(false);
+ GetFieldIdBadArgumentTest(true);
}
TEST_F(JniInternalTest, GetStaticFieldID) {
@@ -253,16 +701,8 @@
ExpectException(jlnsfe);
// Bad arguments.
- CheckJniAbortCatcher check_jni_abort_catcher;
- fid = env_->GetStaticFieldID(nullptr, "CASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;");
- EXPECT_EQ(nullptr, fid);
- check_jni_abort_catcher.Check("java_class == null");
- fid = env_->GetStaticFieldID(c, nullptr, "Ljava/util/Comparator;");
- EXPECT_EQ(nullptr, fid);
- check_jni_abort_catcher.Check("name == null");
- fid = env_->GetStaticFieldID(c, "CASE_INSENSITIVE_ORDER", nullptr);
- EXPECT_EQ(nullptr, fid);
- check_jni_abort_catcher.Check("sig == null");
+ GetStaticFieldIdBadArgumentTest(false);
+ GetStaticFieldIdBadArgumentTest(true);
}
TEST_F(JniInternalTest, GetMethodID) {
@@ -302,16 +742,8 @@
EXPECT_FALSE(env_->ExceptionCheck());
// Bad arguments.
- CheckJniAbortCatcher check_jni_abort_catcher;
- method = env_->GetMethodID(nullptr, "<init>", "(Ljava/lang/String;)V");
- EXPECT_EQ(nullptr, method);
- check_jni_abort_catcher.Check("java_class == null");
- method = env_->GetMethodID(jlnsme, nullptr, "(Ljava/lang/String;)V");
- EXPECT_EQ(nullptr, method);
- check_jni_abort_catcher.Check("name == null");
- method = env_->GetMethodID(jlnsme, "<init>", nullptr);
- EXPECT_EQ(nullptr, method);
- check_jni_abort_catcher.Check("sig == null");
+ GetMethodIdBadArgumentTest(false);
+ GetMethodIdBadArgumentTest(true);
}
TEST_F(JniInternalTest, GetStaticMethodID) {
@@ -340,16 +772,8 @@
EXPECT_FALSE(env_->ExceptionCheck());
// Bad arguments.
- CheckJniAbortCatcher check_jni_abort_catcher;
- method = env_->GetStaticMethodID(nullptr, "valueOf", "(I)Ljava/lang/String;");
- EXPECT_EQ(nullptr, method);
- check_jni_abort_catcher.Check("java_class == null");
- method = env_->GetStaticMethodID(jlstring, nullptr, "(I)Ljava/lang/String;");
- EXPECT_EQ(nullptr, method);
- check_jni_abort_catcher.Check("name == null");
- method = env_->GetStaticMethodID(jlstring, "valueOf", nullptr);
- EXPECT_EQ(nullptr, method);
- check_jni_abort_catcher.Check("sig == null");
+ GetStaticMethodIdBadArgumentTest(false);
+ GetStaticMethodIdBadArgumentTest(true);
}
TEST_F(JniInternalTest, FromReflectedField_ToReflectedField) {
@@ -370,13 +794,8 @@
ASSERT_EQ(4, env_->GetIntField(s, fid2));
// Bad arguments.
- CheckJniAbortCatcher check_jni_abort_catcher;
- field = env_->ToReflectedField(c, nullptr, JNI_FALSE);
- EXPECT_EQ(field, nullptr);
- check_jni_abort_catcher.Check("fid == null");
- fid2 = env_->FromReflectedField(nullptr);
- ASSERT_EQ(fid2, nullptr);
- check_jni_abort_catcher.Check("jlr_field == null");
+ GetFromReflectedField_ToReflectedFieldBadArgumentTest(false);
+ GetFromReflectedField_ToReflectedFieldBadArgumentTest(true);
}
TEST_F(JniInternalTest, FromReflectedMethod_ToReflectedMethod) {
@@ -417,13 +836,8 @@
ASSERT_EQ(4, env_->CallIntMethod(s, mid2));
// Bad arguments.
- CheckJniAbortCatcher check_jni_abort_catcher;
- method = env_->ToReflectedMethod(c, nullptr, JNI_FALSE);
- EXPECT_EQ(method, nullptr);
- check_jni_abort_catcher.Check("mid == null");
- mid2 = env_->FromReflectedMethod(method);
- ASSERT_EQ(mid2, nullptr);
- check_jni_abort_catcher.Check("jlr_method == null");
+ GetFromReflectedMethod_ToReflectedMethodBadArgumentTest(false);
+ GetFromReflectedMethod_ToReflectedMethodBadArgumentTest(true);
}
static void BogusMethod() {
@@ -498,23 +912,11 @@
}
EXPECT_FALSE(env_->ExceptionCheck());
- // Passing a class of null is a failure.
- {
- JNINativeMethod methods[] = { };
- EXPECT_EQ(env_->RegisterNatives(nullptr, methods, 0), JNI_ERR);
- check_jni_abort_catcher.Check("java_class == null");
- }
-
- // Passing methods as null is a failure.
- EXPECT_EQ(env_->RegisterNatives(jlobject, nullptr, 1), JNI_ERR);
- check_jni_abort_catcher.Check("methods == null");
-
- // Unregisters null is a failure.
- EXPECT_EQ(env_->UnregisterNatives(nullptr), JNI_ERR);
- check_jni_abort_catcher.Check("java_class == null");
-
// Unregistering a class with no natives is a warning.
EXPECT_EQ(env_->UnregisterNatives(jlnsme), JNI_OK);
+
+ RegisterAndUnregisterNativesBadArguments(false, &check_jni_abort_catcher);
+ RegisterAndUnregisterNativesBadArguments(true, &check_jni_abort_catcher);
}
#define EXPECT_PRIMITIVE_ARRAY(new_fn, \
@@ -528,6 +930,7 @@
\
{ \
CheckJniAbortCatcher jni_abort_catcher; \
+ down_cast<JNIEnvExt*>(env_)->SetCheckJniEnabled(false); \
/* Allocate an negative sized array and check it has the right failure type. */ \
EXPECT_EQ(env_->new_fn(-1), nullptr); \
jni_abort_catcher.Check("negative array length: -1"); \
@@ -550,6 +953,7 @@
jni_abort_catcher.Check("buf == null"); \
env_->set_region_fn(a, 0, size, nullptr); \
jni_abort_catcher.Check("buf == null"); \
+ down_cast<JNIEnvExt*>(env_)->SetCheckJniEnabled(true); \
} \
/* Allocate an array and check it has the right type and length. */ \
scalar_type ## Array a = env_->new_fn(size); \
@@ -654,189 +1058,28 @@
}
TEST_F(JniInternalTest, GetPrimitiveArrayElementsOfWrongType) {
- CheckJniAbortCatcher jni_abort_catcher;
- jbooleanArray array = env_->NewBooleanArray(10);
- jboolean is_copy;
- EXPECT_EQ(env_->GetByteArrayElements(reinterpret_cast<jbyteArray>(array), &is_copy), nullptr);
- jni_abort_catcher.Check(
- "attempt to get byte primitive array elements with an object of type boolean[]");
- EXPECT_EQ(env_->GetShortArrayElements(reinterpret_cast<jshortArray>(array), &is_copy), nullptr);
- jni_abort_catcher.Check(
- "attempt to get short primitive array elements with an object of type boolean[]");
- EXPECT_EQ(env_->GetCharArrayElements(reinterpret_cast<jcharArray>(array), &is_copy), nullptr);
- jni_abort_catcher.Check(
- "attempt to get char primitive array elements with an object of type boolean[]");
- EXPECT_EQ(env_->GetIntArrayElements(reinterpret_cast<jintArray>(array), &is_copy), nullptr);
- jni_abort_catcher.Check(
- "attempt to get int primitive array elements with an object of type boolean[]");
- EXPECT_EQ(env_->GetLongArrayElements(reinterpret_cast<jlongArray>(array), &is_copy), nullptr);
- jni_abort_catcher.Check(
- "attempt to get long primitive array elements with an object of type boolean[]");
- EXPECT_EQ(env_->GetFloatArrayElements(reinterpret_cast<jfloatArray>(array), &is_copy), nullptr);
- jni_abort_catcher.Check(
- "attempt to get float primitive array elements with an object of type boolean[]");
- EXPECT_EQ(env_->GetDoubleArrayElements(reinterpret_cast<jdoubleArray>(array), &is_copy), nullptr);
- jni_abort_catcher.Check(
- "attempt to get double primitive array elements with an object of type boolean[]");
- jbyteArray array2 = env_->NewByteArray(10);
- EXPECT_EQ(env_->GetBooleanArrayElements(reinterpret_cast<jbooleanArray>(array2), &is_copy),
- nullptr);
- jni_abort_catcher.Check(
- "attempt to get boolean primitive array elements with an object of type byte[]");
- jobject object = env_->NewStringUTF("Test String");
- EXPECT_EQ(env_->GetBooleanArrayElements(reinterpret_cast<jbooleanArray>(object), &is_copy),
- nullptr);
- jni_abort_catcher.Check(
- "attempt to get boolean primitive array elements with an object of type java.lang.String");
+ GetPrimitiveArrayElementsOfWrongType(false);
+ GetPrimitiveArrayElementsOfWrongType(true);
}
TEST_F(JniInternalTest, ReleasePrimitiveArrayElementsOfWrongType) {
- CheckJniAbortCatcher jni_abort_catcher;
- jbooleanArray array = env_->NewBooleanArray(10);
- ASSERT_TRUE(array != nullptr);
- jboolean is_copy;
- jboolean* elements = env_->GetBooleanArrayElements(array, &is_copy);
- ASSERT_TRUE(elements != nullptr);
- env_->ReleaseByteArrayElements(reinterpret_cast<jbyteArray>(array),
- reinterpret_cast<jbyte*>(elements), 0);
- jni_abort_catcher.Check(
- "attempt to release byte primitive array elements with an object of type boolean[]");
- env_->ReleaseShortArrayElements(reinterpret_cast<jshortArray>(array),
- reinterpret_cast<jshort*>(elements), 0);
- jni_abort_catcher.Check(
- "attempt to release short primitive array elements with an object of type boolean[]");
- env_->ReleaseCharArrayElements(reinterpret_cast<jcharArray>(array),
- reinterpret_cast<jchar*>(elements), 0);
- jni_abort_catcher.Check(
- "attempt to release char primitive array elements with an object of type boolean[]");
- env_->ReleaseIntArrayElements(reinterpret_cast<jintArray>(array),
- reinterpret_cast<jint*>(elements), 0);
- jni_abort_catcher.Check(
- "attempt to release int primitive array elements with an object of type boolean[]");
- env_->ReleaseLongArrayElements(reinterpret_cast<jlongArray>(array),
- reinterpret_cast<jlong*>(elements), 0);
- jni_abort_catcher.Check(
- "attempt to release long primitive array elements with an object of type boolean[]");
- env_->ReleaseFloatArrayElements(reinterpret_cast<jfloatArray>(array),
- reinterpret_cast<jfloat*>(elements), 0);
- jni_abort_catcher.Check(
- "attempt to release float primitive array elements with an object of type boolean[]");
- env_->ReleaseDoubleArrayElements(reinterpret_cast<jdoubleArray>(array),
- reinterpret_cast<jdouble*>(elements), 0);
- jni_abort_catcher.Check(
- "attempt to release double primitive array elements with an object of type boolean[]");
- jbyteArray array2 = env_->NewByteArray(10);
- env_->ReleaseBooleanArrayElements(reinterpret_cast<jbooleanArray>(array2), elements, 0);
- jni_abort_catcher.Check(
- "attempt to release boolean primitive array elements with an object of type byte[]");
- jobject object = env_->NewStringUTF("Test String");
- env_->ReleaseBooleanArrayElements(reinterpret_cast<jbooleanArray>(object), elements, 0);
- jni_abort_catcher.Check(
- "attempt to release boolean primitive array elements with an object of type "
- "java.lang.String");
+ ReleasePrimitiveArrayElementsOfWrongType(false);
+ ReleasePrimitiveArrayElementsOfWrongType(true);
}
+
TEST_F(JniInternalTest, GetReleasePrimitiveArrayCriticalOfWrongType) {
- CheckJniAbortCatcher jni_abort_catcher;
- jobject object = env_->NewStringUTF("Test String");
- jboolean is_copy;
- void* elements = env_->GetPrimitiveArrayCritical(reinterpret_cast<jarray>(object), &is_copy);
- jni_abort_catcher.Check("expected primitive array, given java.lang.String");
- env_->ReleasePrimitiveArrayCritical(reinterpret_cast<jarray>(object), elements, 0);
- jni_abort_catcher.Check("expected primitive array, given java.lang.String");
+ GetReleasePrimitiveArrayCriticalOfWrongType(false);
+ GetReleasePrimitiveArrayCriticalOfWrongType(true);
}
TEST_F(JniInternalTest, GetPrimitiveArrayRegionElementsOfWrongType) {
- CheckJniAbortCatcher jni_abort_catcher;
- constexpr size_t kLength = 10;
- jbooleanArray array = env_->NewBooleanArray(kLength);
- ASSERT_TRUE(array != nullptr);
- jboolean elements[kLength];
- env_->GetByteArrayRegion(reinterpret_cast<jbyteArray>(array), 0, kLength,
- reinterpret_cast<jbyte*>(elements));
- jni_abort_catcher.Check(
- "attempt to get region of byte primitive array elements with an object of type boolean[]");
- env_->GetShortArrayRegion(reinterpret_cast<jshortArray>(array), 0, kLength,
- reinterpret_cast<jshort*>(elements));
- jni_abort_catcher.Check(
- "attempt to get region of short primitive array elements with an object of type boolean[]");
- env_->GetCharArrayRegion(reinterpret_cast<jcharArray>(array), 0, kLength,
- reinterpret_cast<jchar*>(elements));
- jni_abort_catcher.Check(
- "attempt to get region of char primitive array elements with an object of type boolean[]");
- env_->GetIntArrayRegion(reinterpret_cast<jintArray>(array), 0, kLength,
- reinterpret_cast<jint*>(elements));
- jni_abort_catcher.Check(
- "attempt to get region of int primitive array elements with an object of type boolean[]");
- env_->GetLongArrayRegion(reinterpret_cast<jlongArray>(array), 0, kLength,
- reinterpret_cast<jlong*>(elements));
- jni_abort_catcher.Check(
- "attempt to get region of long primitive array elements with an object of type boolean[]");
- env_->GetFloatArrayRegion(reinterpret_cast<jfloatArray>(array), 0, kLength,
- reinterpret_cast<jfloat*>(elements));
- jni_abort_catcher.Check(
- "attempt to get region of float primitive array elements with an object of type boolean[]");
- env_->GetDoubleArrayRegion(reinterpret_cast<jdoubleArray>(array), 0, kLength,
- reinterpret_cast<jdouble*>(elements));
- jni_abort_catcher.Check(
- "attempt to get region of double primitive array elements with an object of type boolean[]");
- jbyteArray array2 = env_->NewByteArray(10);
- env_->GetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(array2), 0, kLength,
- reinterpret_cast<jboolean*>(elements));
- jni_abort_catcher.Check(
- "attempt to get region of boolean primitive array elements with an object of type byte[]");
- jobject object = env_->NewStringUTF("Test String");
- env_->GetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(object), 0, kLength,
- reinterpret_cast<jboolean*>(elements));
- jni_abort_catcher.Check(
- "attempt to get region of boolean primitive array elements with an object of type "
- "java.lang.String");
+ GetPrimitiveArrayRegionElementsOfWrongType(false);
+ GetPrimitiveArrayRegionElementsOfWrongType(true);
}
TEST_F(JniInternalTest, SetPrimitiveArrayRegionElementsOfWrongType) {
- CheckJniAbortCatcher jni_abort_catcher;
- constexpr size_t kLength = 10;
- jbooleanArray array = env_->NewBooleanArray(kLength);
- ASSERT_TRUE(array != nullptr);
- jboolean elements[kLength];
- env_->SetByteArrayRegion(reinterpret_cast<jbyteArray>(array), 0, kLength,
- reinterpret_cast<jbyte*>(elements));
- jni_abort_catcher.Check(
- "attempt to set region of byte primitive array elements with an object of type boolean[]");
- env_->SetShortArrayRegion(reinterpret_cast<jshortArray>(array), 0, kLength,
- reinterpret_cast<jshort*>(elements));
- jni_abort_catcher.Check(
- "attempt to set region of short primitive array elements with an object of type boolean[]");
- env_->SetCharArrayRegion(reinterpret_cast<jcharArray>(array), 0, kLength,
- reinterpret_cast<jchar*>(elements));
- jni_abort_catcher.Check(
- "attempt to set region of char primitive array elements with an object of type boolean[]");
- env_->SetIntArrayRegion(reinterpret_cast<jintArray>(array), 0, kLength,
- reinterpret_cast<jint*>(elements));
- jni_abort_catcher.Check(
- "attempt to set region of int primitive array elements with an object of type boolean[]");
- env_->SetLongArrayRegion(reinterpret_cast<jlongArray>(array), 0, kLength,
- reinterpret_cast<jlong*>(elements));
- jni_abort_catcher.Check(
- "attempt to set region of long primitive array elements with an object of type boolean[]");
- env_->SetFloatArrayRegion(reinterpret_cast<jfloatArray>(array), 0, kLength,
- reinterpret_cast<jfloat*>(elements));
- jni_abort_catcher.Check(
- "attempt to set region of float primitive array elements with an object of type boolean[]");
- env_->SetDoubleArrayRegion(reinterpret_cast<jdoubleArray>(array), 0, kLength,
- reinterpret_cast<jdouble*>(elements));
- jni_abort_catcher.Check(
- "attempt to set region of double primitive array elements with an object of type boolean[]");
- jbyteArray array2 = env_->NewByteArray(10);
- env_->SetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(array2), 0, kLength,
- reinterpret_cast<jboolean*>(elements));
- jni_abort_catcher.Check(
- "attempt to set region of boolean primitive array elements with an object of type byte[]");
- jobject object = env_->NewStringUTF("Test String");
- env_->SetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(object), 0, kLength,
- reinterpret_cast<jboolean*>(elements));
- jni_abort_catcher.Check(
- "attempt to set region of boolean primitive array elements with an object of type "
- "java.lang.String");
+ SetPrimitiveArrayRegionElementsOfWrongType(false);
+ SetPrimitiveArrayRegionElementsOfWrongType(true);
}
TEST_F(JniInternalTest, NewObjectArray) {
@@ -857,12 +1100,8 @@
EXPECT_TRUE(env_->IsSameObject(env_->GetObjectArrayElement(a, 0), nullptr));
// Negative array length checks.
- CheckJniAbortCatcher jni_abort_catcher;
- env_->NewObjectArray(-1, element_class, nullptr);
- jni_abort_catcher.Check("negative array length: -1");
-
- env_->NewObjectArray(std::numeric_limits<jint>::min(), element_class, nullptr);
- jni_abort_catcher.Check("negative array length: -2147483648");
+ NewObjectArrayBadArguments(false);
+ NewObjectArrayBadArguments(true);
}
TEST_F(JniInternalTest, NewObjectArrayWithPrimitiveClasses) {
@@ -872,6 +1111,7 @@
};
ASSERT_EQ(strlen(primitive_descriptors), arraysize(primitive_names));
+ bool old_check_jni = vm_->SetCheckJniEnabled(false);
CheckJniAbortCatcher jni_abort_catcher;
for (size_t i = 0; i < strlen(primitive_descriptors); ++i) {
env_->NewObjectArray(0, nullptr, nullptr);
@@ -881,6 +1121,16 @@
std::string error_msg(StringPrintf("not an object type: %s", primitive_names[i]));
jni_abort_catcher.Check(error_msg.c_str());
}
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
+ for (size_t i = 0; i < strlen(primitive_descriptors); ++i) {
+ env_->NewObjectArray(0, nullptr, nullptr);
+ jni_abort_catcher.Check("NewObjectArray received NULL jclass");
+ jclass primitive_class = GetPrimitiveClass(primitive_descriptors[i]);
+ env_->NewObjectArray(1, primitive_class, nullptr);
+ std::string error_msg(StringPrintf("not an object type: %s", primitive_names[i]));
+ jni_abort_catcher.Check(error_msg.c_str());
+ }
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
}
TEST_F(JniInternalTest, NewObjectArrayWithInitialValue) {
@@ -940,8 +1190,13 @@
// Null as class should fail.
CheckJniAbortCatcher jni_abort_catcher;
+ bool old_check_jni = vm_->SetCheckJniEnabled(false);
EXPECT_EQ(env_->GetSuperclass(nullptr), nullptr);
jni_abort_catcher.Check("java_class == null");
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
+ EXPECT_EQ(env_->GetSuperclass(nullptr), nullptr);
+ jni_abort_catcher.Check("GetSuperclass received NULL jclass");
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
}
TEST_F(JniInternalTest, IsAssignableFrom) {
@@ -950,15 +1205,42 @@
jclass string_class = env_->FindClass("java/lang/String");
ASSERT_NE(string_class, nullptr);
- ASSERT_TRUE(env_->IsAssignableFrom(object_class, string_class));
- ASSERT_FALSE(env_->IsAssignableFrom(string_class, object_class));
+ // A superclass is assignable from an instance of its
+ // subclass but not vice versa.
+ ASSERT_TRUE(env_->IsAssignableFrom(string_class, object_class));
+ ASSERT_FALSE(env_->IsAssignableFrom(object_class, string_class));
+
+ jclass charsequence_interface = env_->FindClass("java/lang/CharSequence");
+ ASSERT_NE(charsequence_interface, nullptr);
+
+ // An interface is assignable from an instance of an implementing
+ // class but not vice versa.
+ ASSERT_TRUE(env_->IsAssignableFrom(string_class, charsequence_interface));
+ ASSERT_FALSE(env_->IsAssignableFrom(charsequence_interface, string_class));
+
+ // Check that arrays are covariant.
+ jclass string_array_class = env_->FindClass("[Ljava/lang/String;");
+ ASSERT_NE(string_array_class, nullptr);
+ jclass object_array_class = env_->FindClass("[Ljava/lang/Object;");
+ ASSERT_NE(object_array_class, nullptr);
+ ASSERT_TRUE(env_->IsAssignableFrom(string_array_class, object_array_class));
+ ASSERT_FALSE(env_->IsAssignableFrom(object_array_class, string_array_class));
+
+ // Primitive types are tested in 004-JniTest.
// Null as either class should fail.
CheckJniAbortCatcher jni_abort_catcher;
+ bool old_check_jni = vm_->SetCheckJniEnabled(false);
EXPECT_EQ(env_->IsAssignableFrom(nullptr, string_class), JNI_FALSE);
jni_abort_catcher.Check("java_class1 == null");
EXPECT_EQ(env_->IsAssignableFrom(object_class, nullptr), JNI_FALSE);
jni_abort_catcher.Check("java_class2 == null");
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
+ EXPECT_EQ(env_->IsAssignableFrom(nullptr, string_class), JNI_FALSE);
+ jni_abort_catcher.Check("IsAssignableFrom received NULL jclass");
+ EXPECT_EQ(env_->IsAssignableFrom(object_class, nullptr), JNI_FALSE);
+ jni_abort_catcher.Check("IsAssignableFrom received NULL jclass");
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
}
TEST_F(JniInternalTest, GetObjectRefType) {
@@ -1043,10 +1325,17 @@
TEST_F(JniInternalTest, NewStringNegativeLength) {
CheckJniAbortCatcher jni_abort_catcher;
+ bool old_check_jni = vm_->SetCheckJniEnabled(false);
env_->NewString(nullptr, -1);
jni_abort_catcher.Check("char_count < 0: -1");
env_->NewString(nullptr, std::numeric_limits<jint>::min());
jni_abort_catcher.Check("char_count < 0: -2147483648");
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
+ env_->NewString(nullptr, -1);
+ jni_abort_catcher.Check("negative jsize: -1");
+ env_->NewString(nullptr, std::numeric_limits<jint>::min());
+ jni_abort_catcher.Check("negative jsize: -2147483648");
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
}
TEST_F(JniInternalTest, GetStringLength_GetStringUTFLength) {
@@ -1104,10 +1393,17 @@
TEST_F(JniInternalTest, GetStringUTFChars_ReleaseStringUTFChars) {
// Passing in a nullptr jstring is ignored normally, but caught by -Xcheck:jni.
+ bool old_check_jni = vm_->SetCheckJniEnabled(false);
{
CheckJniAbortCatcher check_jni_abort_catcher;
EXPECT_EQ(env_->GetStringUTFChars(nullptr, nullptr), nullptr);
- check_jni_abort_catcher.Check("GetStringUTFChars received null jstring");
+ }
+ {
+ CheckJniAbortCatcher check_jni_abort_catcher;
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
+ EXPECT_EQ(env_->GetStringUTFChars(nullptr, nullptr), nullptr);
+ check_jni_abort_catcher.Check("GetStringUTFChars received NULL jstring");
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
}
jstring s = env_->NewStringUTF("hello");
@@ -1202,10 +1498,17 @@
// Null as array should fail.
CheckJniAbortCatcher jni_abort_catcher;
+ bool old_check_jni = vm_->SetCheckJniEnabled(false);
EXPECT_EQ(nullptr, env_->GetObjectArrayElement(nullptr, 0));
jni_abort_catcher.Check("java_array == null");
env_->SetObjectArrayElement(nullptr, 0, nullptr);
jni_abort_catcher.Check("java_array == null");
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
+ EXPECT_EQ(nullptr, env_->GetObjectArrayElement(nullptr, 0));
+ jni_abort_catcher.Check("jarray was NULL");
+ env_->SetObjectArrayElement(nullptr, 0, nullptr);
+ jni_abort_catcher.Check("jarray was NULL");
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
}
#define EXPECT_STATIC_PRIMITIVE_FIELD(type, field_name, sig, value1, value2) \
@@ -1217,15 +1520,28 @@
env_->SetStatic ## type ## Field(c, fid, value2); \
EXPECT_EQ(value2, env_->GetStatic ## type ## Field(c, fid)); \
\
+ bool old_check_jni = vm_->SetCheckJniEnabled(false); \
+ { \
+ CheckJniAbortCatcher jni_abort_catcher; \
+ env_->GetStatic ## type ## Field(nullptr, fid); \
+ env_->SetStatic ## type ## Field(nullptr, fid, value1); \
+ } \
CheckJniAbortCatcher jni_abort_catcher; \
- env_->GetStatic ## type ## Field(nullptr, fid); \
- jni_abort_catcher.Check("received null jclass"); \
- env_->SetStatic ## type ## Field(nullptr, fid, value1); \
- jni_abort_catcher.Check("received null jclass"); \
env_->GetStatic ## type ## Field(c, nullptr); \
jni_abort_catcher.Check("fid == null"); \
env_->SetStatic ## type ## Field(c, nullptr, value1); \
jni_abort_catcher.Check("fid == null"); \
+ \
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true)); \
+ env_->GetStatic ## type ## Field(nullptr, fid); \
+ jni_abort_catcher.Check("received NULL jclass"); \
+ env_->SetStatic ## type ## Field(nullptr, fid, value1); \
+ jni_abort_catcher.Check("received NULL jclass"); \
+ env_->GetStatic ## type ## Field(c, nullptr); \
+ jni_abort_catcher.Check("jfieldID was NULL"); \
+ env_->SetStatic ## type ## Field(c, nullptr, value1); \
+ jni_abort_catcher.Check("jfieldID was NULL"); \
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni)); \
} while (false)
#define EXPECT_PRIMITIVE_FIELD(instance, type, field_name, sig, value1, value2) \
@@ -1237,6 +1553,7 @@
env_->Set ## type ## Field(instance, fid, value2); \
EXPECT_EQ(value2, env_->Get ## type ## Field(instance, fid)); \
\
+ bool old_check_jni = vm_->SetCheckJniEnabled(false); \
CheckJniAbortCatcher jni_abort_catcher; \
env_->Get ## type ## Field(nullptr, fid); \
jni_abort_catcher.Check("obj == null"); \
@@ -1246,6 +1563,16 @@
jni_abort_catcher.Check("fid == null"); \
env_->Set ## type ## Field(instance, nullptr, value1); \
jni_abort_catcher.Check("fid == null"); \
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true)); \
+ env_->Get ## type ## Field(nullptr, fid); \
+ jni_abort_catcher.Check("field operation on NULL object:"); \
+ env_->Set ## type ## Field(nullptr, fid, value1); \
+ jni_abort_catcher.Check("field operation on NULL object:"); \
+ env_->Get ## type ## Field(instance, nullptr); \
+ jni_abort_catcher.Check("jfieldID was NULL"); \
+ env_->Set ## type ## Field(instance, nullptr, value1); \
+ jni_abort_catcher.Check("jfieldID was NULL"); \
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni)); \
} while (false)
@@ -1337,12 +1664,17 @@
// Currently, deleting an already-deleted reference is just a CheckJNI warning.
{
+ bool old_check_jni = vm_->SetCheckJniEnabled(false);
+ {
+ CheckJniAbortCatcher check_jni_abort_catcher;
+ env_->DeleteLocalRef(s);
+ }
CheckJniAbortCatcher check_jni_abort_catcher;
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
env_->DeleteLocalRef(s);
-
- std::string expected(StringPrintf("native code passing in reference to "
- "invalid local reference: %p", s));
+ std::string expected(StringPrintf("jobject is an invalid local reference: %p", s));
check_jni_abort_catcher.Check(expected.c_str());
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
}
s = env_->NewStringUTF("");
@@ -1433,12 +1765,17 @@
// Currently, deleting an already-deleted reference is just a CheckJNI warning.
{
+ bool old_check_jni = vm_->SetCheckJniEnabled(false);
+ {
+ CheckJniAbortCatcher check_jni_abort_catcher;
+ env_->DeleteGlobalRef(o);
+ }
CheckJniAbortCatcher check_jni_abort_catcher;
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
env_->DeleteGlobalRef(o);
-
- std::string expected(StringPrintf("native code passing in reference to "
- "invalid global reference: %p", o));
+ std::string expected(StringPrintf("jobject is an invalid global reference: %p", o));
check_jni_abort_catcher.Check(expected.c_str());
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
}
jobject o1 = env_->NewGlobalRef(s);
@@ -1478,12 +1815,17 @@
// Currently, deleting an already-deleted reference is just a CheckJNI warning.
{
+ bool old_check_jni = vm_->SetCheckJniEnabled(false);
+ {
+ CheckJniAbortCatcher check_jni_abort_catcher;
+ env_->DeleteWeakGlobalRef(o);
+ }
CheckJniAbortCatcher check_jni_abort_catcher;
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
env_->DeleteWeakGlobalRef(o);
-
- std::string expected(StringPrintf("native code passing in reference to "
- "invalid weak global reference: %p", o));
+ std::string expected(StringPrintf("jobject is an invalid weak global reference: %p", o));
check_jni_abort_catcher.Check(expected.c_str());
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
}
jobject o1 = env_->NewWeakGlobalRef(s);
@@ -1502,8 +1844,6 @@
}
TEST_F(JniInternalTest, Throw) {
- EXPECT_EQ(JNI_ERR, env_->Throw(nullptr));
-
jclass exception_class = env_->FindClass("java/lang/RuntimeException");
ASSERT_TRUE(exception_class != nullptr);
jthrowable exception = reinterpret_cast<jthrowable>(env_->AllocObject(exception_class));
@@ -1514,11 +1854,18 @@
jthrowable thrown_exception = env_->ExceptionOccurred();
env_->ExceptionClear();
EXPECT_TRUE(env_->IsSameObject(exception, thrown_exception));
+
+ // Bad argument.
+ bool old_check_jni = vm_->SetCheckJniEnabled(false);
+ EXPECT_EQ(JNI_ERR, env_->Throw(nullptr));
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
+ CheckJniAbortCatcher check_jni_abort_catcher;
+ EXPECT_EQ(JNI_ERR, env_->Throw(nullptr));
+ check_jni_abort_catcher.Check("Throw received NULL jthrowable");
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
}
TEST_F(JniInternalTest, ThrowNew) {
- EXPECT_EQ(JNI_ERR, env_->Throw(nullptr));
-
jclass exception_class = env_->FindClass("java/lang/RuntimeException");
ASSERT_TRUE(exception_class != nullptr);
@@ -1535,6 +1882,16 @@
thrown_exception = env_->ExceptionOccurred();
env_->ExceptionClear();
EXPECT_TRUE(env_->IsInstanceOf(thrown_exception, exception_class));
+
+ // Bad argument.
+ bool old_check_jni = vm_->SetCheckJniEnabled(false);
+ CheckJniAbortCatcher check_jni_abort_catcher;
+ EXPECT_EQ(JNI_ERR, env_->ThrowNew(nullptr, nullptr));
+ check_jni_abort_catcher.Check("c == null");
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
+ EXPECT_EQ(JNI_ERR, env_->ThrowNew(nullptr, nullptr));
+ check_jni_abort_catcher.Check("ThrowNew received NULL jclass");
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
}
TEST_F(JniInternalTest, NewDirectBuffer_GetDirectBufferAddress_GetDirectBufferCapacity) {
diff --git a/runtime/lock_word.h b/runtime/lock_word.h
index ab86eaa..e585412 100644
--- a/runtime/lock_word.h
+++ b/runtime/lock_word.h
@@ -65,7 +65,7 @@
kThinLockOwnerMask = (1 << kThinLockOwnerSize) - 1,
// Count in higher bits.
kThinLockCountShift = kThinLockOwnerSize + kThinLockOwnerShift,
- kThinLockCountMask = (1 << kThinLockCountShift) - 1,
+ kThinLockCountMask = (1 << kThinLockCountSize) - 1,
kThinLockMaxCount = kThinLockCountMask,
// State in the highest bits.
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 1074253..c281b22 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -130,8 +130,67 @@
uintptr_t MemMap::next_mem_pos_ = GenerateNextMemPos();
#endif
+// Return true if the address range is contained in a single /proc/self/map entry.
+static bool CheckOverlapping(uintptr_t begin,
+ uintptr_t end,
+ std::string* error_msg) {
+ std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
+ if (map.get() == nullptr) {
+ *error_msg = StringPrintf("Failed to build process map");
+ return false;
+ }
+ for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
+ if ((begin >= it->start && begin < it->end) // start of new within old
+ && (end > it->start && end <= it->end)) { // end of new within old
+ return true;
+ }
+ }
+ std::string maps;
+ ReadFileToString("/proc/self/maps", &maps);
+ *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " does not overlap "
+ "any existing map:\n%s\n",
+ begin, end, maps.c_str());
+ return false;
+}
+
+// Return true if the address range does not conflict with any /proc/self/maps entry.
+static bool CheckNonOverlapping(uintptr_t begin,
+ uintptr_t end,
+ std::string* error_msg) {
+ std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
+ if (map.get() == nullptr) {
+ *error_msg = StringPrintf("Failed to build process map");
+ return false;
+ }
+ for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
+ if ((begin >= it->start && begin < it->end) // start of new within old
+ || (end > it->start && end < it->end) // end of new within old
+ || (begin <= it->start && end > it->end)) { // start/end of new includes all of old
+ std::ostringstream map_info;
+ map_info << std::make_pair(it, map->end());
+ *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with "
+ "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n%s",
+ begin, end,
+ static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end),
+ it->name.c_str(),
+ map_info.str().c_str());
+ return false;
+ }
+ }
+ return true;
+}
+
+// CheckMapRequest to validate a non-MAP_FAILED mmap result based on
+// the expected value, calling munmap if validation fails, giving the
+// reason in error_msg.
+//
+// If the expected_ptr is nullptr, nothing is checked beyond the fact
+// that the actual_ptr is not MAP_FAILED. However, if expected_ptr is
+// non-null, we check that pointer is the actual_ptr == expected_ptr,
+// and if not, report in error_msg what the conflict mapping was if
+// found, or a generic error in other cases.
static bool CheckMapRequest(byte* expected_ptr, void* actual_ptr, size_t byte_count,
- std::ostringstream* error_msg) {
+ std::string* error_msg) {
// Handled first by caller for more specific error messages.
CHECK(actual_ptr != MAP_FAILED);
@@ -139,6 +198,10 @@
return true;
}
+ uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
+ uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
+ uintptr_t limit = expected + byte_count;
+
if (expected_ptr == actual_ptr) {
return true;
}
@@ -149,40 +212,19 @@
PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count);
}
- uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
- uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
- uintptr_t limit = expected + byte_count;
-
- std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid()));
- if (!map->Build()) {
- *error_msg << StringPrintf("Failed to build process map to determine why mmap returned "
- "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, actual, expected);
-
+ if (!CheckNonOverlapping(expected, limit, error_msg)) {
return false;
}
- for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
- if ((expected >= it->start && expected < it->end) // start of new within old
- || (limit > it->start && limit < it->end) // end of new within old
- || (expected <= it->start && limit > it->end)) { // start/end of new includes all of old
- *error_msg
- << StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with "
- "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n",
- expected, limit,
- static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end),
- it->name.c_str())
- << std::make_pair(it, map->end());
- return false;
- }
- }
- *error_msg << StringPrintf("Failed to mmap at expected address, mapped at "
- "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, actual, expected);
+
+ *error_msg = StringPrintf("Failed to mmap at expected address, mapped at "
+ "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, actual, expected);
return false;
}
-MemMap* MemMap::MapAnonymous(const char* name, byte* expected, size_t byte_count, int prot,
+MemMap* MemMap::MapAnonymous(const char* name, byte* expected_ptr, size_t byte_count, int prot,
bool low_4gb, std::string* error_msg) {
if (byte_count == 0) {
- return new MemMap(name, nullptr, 0, nullptr, 0, prot);
+ return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
}
size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
@@ -222,11 +264,11 @@
// 4GB.
if (low_4gb && (
// Start out of bounds.
- (reinterpret_cast<uintptr_t>(expected) >> 32) != 0 ||
+ (reinterpret_cast<uintptr_t>(expected_ptr) >> 32) != 0 ||
// End out of bounds. For simplicity, this will fail for the last page of memory.
- (reinterpret_cast<uintptr_t>(expected + page_aligned_byte_count) >> 32) != 0)) {
+ (reinterpret_cast<uintptr_t>(expected_ptr + page_aligned_byte_count) >> 32) != 0)) {
*error_msg = StringPrintf("The requested address space (%p, %p) cannot fit in low_4gb",
- expected, expected + page_aligned_byte_count);
+ expected_ptr, expected_ptr + page_aligned_byte_count);
return nullptr;
}
#endif
@@ -238,7 +280,7 @@
#if USE_ART_LOW_4G_ALLOCATOR
// MAP_32BIT only available on x86_64.
void* actual = MAP_FAILED;
- if (low_4gb && expected == nullptr) {
+ if (low_4gb && expected_ptr == nullptr) {
bool first_run = true;
for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
@@ -294,18 +336,18 @@
saved_errno = ENOMEM;
}
} else {
- actual = mmap(expected, page_aligned_byte_count, prot, flags, fd.get(), 0);
+ actual = mmap(expected_ptr, page_aligned_byte_count, prot, flags, fd.get(), 0);
saved_errno = errno;
}
#else
#if defined(__LP64__)
- if (low_4gb && expected == nullptr) {
+ if (low_4gb && expected_ptr == nullptr) {
flags |= MAP_32BIT;
}
#endif
- void* actual = mmap(expected, page_aligned_byte_count, prot, flags, fd.get(), 0);
+ void* actual = mmap(expected_ptr, page_aligned_byte_count, prot, flags, fd.get(), 0);
saved_errno = errno;
#endif
@@ -314,44 +356,51 @@
ReadFileToString("/proc/self/maps", &maps);
*error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s\n%s",
- expected, page_aligned_byte_count, prot, flags, fd.get(),
+ expected_ptr, page_aligned_byte_count, prot, flags, fd.get(),
strerror(saved_errno), maps.c_str());
return nullptr;
}
std::ostringstream check_map_request_error_msg;
- if (!CheckMapRequest(expected, actual, page_aligned_byte_count, &check_map_request_error_msg)) {
- *error_msg = check_map_request_error_msg.str();
+ if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
return nullptr;
}
return new MemMap(name, reinterpret_cast<byte*>(actual), byte_count, actual,
- page_aligned_byte_count, prot);
+ page_aligned_byte_count, prot, false);
}
-MemMap* MemMap::MapFileAtAddress(byte* expected, size_t byte_count, int prot, int flags, int fd,
+MemMap* MemMap::MapFileAtAddress(byte* expected_ptr, size_t byte_count, int prot, int flags, int fd,
off_t start, bool reuse, const char* filename,
std::string* error_msg) {
CHECK_NE(0, prot);
CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
+ uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
+ uintptr_t limit = expected + byte_count;
if (reuse) {
// reuse means it is okay that it overlaps an existing page mapping.
// Only use this if you actually made the page reservation yourself.
- CHECK(expected != nullptr);
+ CHECK(expected_ptr != nullptr);
+ if (!CheckOverlapping(expected, limit, error_msg)) {
+ return nullptr;
+ }
flags |= MAP_FIXED;
} else {
CHECK_EQ(0, flags & MAP_FIXED);
+ if (expected_ptr != nullptr && !CheckNonOverlapping(expected, limit, error_msg)) {
+ return nullptr;
+ }
}
if (byte_count == 0) {
- return new MemMap(filename, nullptr, 0, nullptr, 0, prot);
+ return new MemMap(filename, nullptr, 0, nullptr, 0, prot, false);
}
// Adjust 'offset' to be page-aligned as required by mmap.
int page_offset = start % kPageSize;
off_t page_aligned_offset = start - page_offset;
// Adjust 'byte_count' to be page-aligned as we will map this anyway.
size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
- // The 'expected' is modified (if specified, ie non-null) to be page aligned to the file but not
- // necessarily to virtual memory. mmap will page align 'expected' for us.
- byte* page_aligned_expected = (expected == nullptr) ? nullptr : (expected - page_offset);
+ // The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but
+ // not necessarily to virtual memory. mmap will page align 'expected' for us.
+ byte* page_aligned_expected = (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
byte* actual = reinterpret_cast<byte*>(mmap(page_aligned_expected,
page_aligned_byte_count,
@@ -373,21 +422,22 @@
return nullptr;
}
std::ostringstream check_map_request_error_msg;
- if (!CheckMapRequest(expected, actual, page_aligned_byte_count, &check_map_request_error_msg)) {
- *error_msg = check_map_request_error_msg.str();
+ if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
return nullptr;
}
return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count,
- prot);
+ prot, reuse);
}
MemMap::~MemMap() {
if (base_begin_ == nullptr && base_size_ == 0) {
return;
}
- int result = munmap(base_begin_, base_size_);
- if (result == -1) {
- PLOG(FATAL) << "munmap failed";
+ if (!reuse_) {
+ int result = munmap(base_begin_, base_size_);
+ if (result == -1) {
+ PLOG(FATAL) << "munmap failed";
+ }
}
// Remove it from maps_.
@@ -405,9 +455,9 @@
}
MemMap::MemMap(const std::string& name, byte* begin, size_t size, void* base_begin,
- size_t base_size, int prot)
+ size_t base_size, int prot, bool reuse)
: name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
- prot_(prot) {
+ prot_(prot), reuse_(reuse) {
if (size_ == 0) {
CHECK(begin_ == nullptr);
CHECK(base_begin_ == nullptr);
@@ -437,7 +487,7 @@
byte* new_base_end = new_end;
DCHECK_LE(new_base_end, old_base_end);
if (new_base_end == old_base_end) {
- return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot);
+ return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot, false);
}
size_ = new_end - reinterpret_cast<byte*>(begin_);
base_size_ = new_base_end - reinterpret_cast<byte*>(base_begin_);
@@ -489,7 +539,7 @@
maps.c_str());
return nullptr;
}
- return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot);
+ return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
}
void MemMap::MadviseDontNeedAndZero() {
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index defa6a5..872c63b 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -73,7 +73,9 @@
// Map part of a file, taking care of non-page aligned offsets. The
// "start" offset is absolute, not relative. This version allows
- // requesting a specific address for the base of the mapping.
+ // requesting a specific address for the base of the
+ // mapping. "reuse" allows us to create a view into an existing
+ // mapping where we do not take ownership of the memory.
//
// On success, returns returns a MemMap instance. On failure, returns a NULL;
static MemMap* MapFileAtAddress(byte* addr, size_t byte_count, int prot, int flags, int fd,
@@ -134,7 +136,7 @@
private:
MemMap(const std::string& name, byte* begin, size_t size, void* base_begin, size_t base_size,
- int prot) LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+ int prot, bool reuse) LOCKS_EXCLUDED(Locks::mem_maps_lock_);
static void DumpMaps(std::ostream& os, const std::multimap<void*, MemMap*>& mem_maps)
LOCKS_EXCLUDED(Locks::mem_maps_lock_);
@@ -145,7 +147,7 @@
static MemMap* GetLargestMemMapAt(void* address)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_);
- std::string name_;
+ const std::string name_;
byte* const begin_; // Start of data.
size_t size_; // Length of data.
@@ -153,6 +155,11 @@
size_t base_size_; // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
int prot_; // Protection of the map.
+ // When reuse_ is true, this is just a view of an existing mapping
+ // and we do not take ownership and are not responsible for
+ // unmapping.
+ const bool reuse_;
+
#if USE_ART_LOW_4G_ALLOCATOR
static uintptr_t next_mem_pos_; // Next memory location to check for low_4g extent.
#endif
diff --git a/runtime/memory_region.h b/runtime/memory_region.h
index 849ab1c..bab2e86 100644
--- a/runtime/memory_region.h
+++ b/runtime/memory_region.h
@@ -56,14 +56,31 @@
return ComputeInternalPointer<T>(offset);
}
+ // Load a single bit in the region. The bit at offset 0 is the least
+ // significant bit in the first byte.
+ bool LoadBit(uintptr_t bit_offset) const {
+ uint8_t bit_mask;
+ uint8_t byte = *ComputeBitPointer(bit_offset, &bit_mask);
+ return byte & bit_mask;
+ }
+
+ void StoreBit(uintptr_t bit_offset, bool value) const {
+ uint8_t bit_mask;
+ uint8_t* byte = ComputeBitPointer(bit_offset, &bit_mask);
+ if (value) {
+ *byte |= bit_mask;
+ } else {
+ *byte &= ~bit_mask;
+ }
+ }
+
void CopyFrom(size_t offset, const MemoryRegion& from) const;
// Compute a sub memory region based on an existing one.
- void Subregion(const MemoryRegion& from, uintptr_t offset, uintptr_t size) {
- CHECK_GE(from.size(), size);
- CHECK_LE(offset, from.size() - size);
- pointer_ = reinterpret_cast<void*>(from.start() + offset);
- size_ = size;
+ MemoryRegion Subregion(uintptr_t offset, uintptr_t size) const {
+ CHECK_GE(this->size(), size);
+ CHECK_LE(offset, this->size() - size);
+ return MemoryRegion(reinterpret_cast<void*>(start() + offset), size);
}
// Compute an extended memory region based on an existing one.
@@ -90,8 +107,6 @@
void* pointer_;
size_t size_;
-
- DISALLOW_COPY_AND_ASSIGN(MemoryRegion);
};
} // namespace art
diff --git a/runtime/method_helper-inl.h b/runtime/method_helper-inl.h
index 42a6089..3a5056a 100644
--- a/runtime/method_helper-inl.h
+++ b/runtime/method_helper-inl.h
@@ -19,13 +19,16 @@
#include "method_helper.h"
+#include "class_linker.h"
+#include "mirror/object_array.h"
#include "runtime.h"
+#include "thread-inl.h"
namespace art {
inline mirror::Class* MethodHelper::GetClassFromTypeIdx(uint16_t type_idx, bool resolve) {
mirror::ArtMethod* method = GetMethod();
- mirror::Class* type = method->GetDexCacheResolvedTypes()->Get(type_idx);
+ mirror::Class* type = method->GetDexCacheResolvedType(type_idx);
if (type == nullptr && resolve) {
type = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method);
CHECK(type != nullptr || Thread::Current()->IsExceptionPending());
@@ -33,6 +36,15 @@
return type;
}
+inline mirror::Class* MethodHelper::GetReturnType(bool resolve) {
+ mirror::ArtMethod* method = GetMethod();
+ const DexFile* dex_file = method->GetDexFile();
+ const DexFile::MethodId& method_id = dex_file->GetMethodId(method->GetDexMethodIndex());
+ const DexFile::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
+ uint16_t return_type_idx = proto_id.return_type_idx_;
+ return GetClassFromTypeIdx(return_type_idx, resolve);
+}
+
inline mirror::String* MethodHelper::ResolveString(uint32_t string_idx) {
mirror::ArtMethod* method = GetMethod();
mirror::String* s = method->GetDexCacheStrings()->Get(string_idx);
diff --git a/runtime/method_helper.cc b/runtime/method_helper.cc
index 4b1b1da..1bd2f90 100644
--- a/runtime/method_helper.cc
+++ b/runtime/method_helper.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "method_helper.h"
+#include "method_helper-inl.h"
#include "class_linker.h"
#include "dex_file-inl.h"
@@ -53,6 +53,32 @@
return dex_file->GetMethodSignature(mid) == other_dex_file->GetMethodSignature(other_mid);
}
+bool MethodHelper::HasSameSignatureWithDifferentClassLoaders(MethodHelper* other) {
+ if (UNLIKELY(GetReturnType() != other->GetReturnType())) {
+ return false;
+ }
+ const DexFile::TypeList* types = method_->GetParameterTypeList();
+ const DexFile::TypeList* other_types = other->method_->GetParameterTypeList();
+ if (types == nullptr) {
+ return (other_types == nullptr) || (other_types->Size() == 0);
+ } else if (UNLIKELY(other_types == nullptr)) {
+ return types->Size() == 0;
+ }
+ uint32_t num_types = types->Size();
+ if (UNLIKELY(num_types != other_types->Size())) {
+ return false;
+ }
+ for (uint32_t i = 0; i < num_types; ++i) {
+ mirror::Class* param_type = GetClassFromTypeIdx(types->GetTypeItem(i).type_idx_);
+ mirror::Class* other_param_type =
+ other->GetClassFromTypeIdx(other_types->GetTypeItem(i).type_idx_);
+ if (UNLIKELY(param_type != other_param_type)) {
+ return false;
+ }
+ }
+ return true;
+}
+
uint32_t MethodHelper::FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfile)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* method = GetMethod();
diff --git a/runtime/method_helper.h b/runtime/method_helper.h
index 012695e..62465be 100644
--- a/runtime/method_helper.h
+++ b/runtime/method_helper.h
@@ -75,14 +75,7 @@
// May cause thread suspension due to GetClassFromTypeIdx calling ResolveType this caused a large
// number of bugs at call sites.
- mirror::Class* GetReturnType(bool resolve = true) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method = GetMethod();
- const DexFile* dex_file = method->GetDexFile();
- const DexFile::MethodId& method_id = dex_file->GetMethodId(method->GetDexMethodIndex());
- const DexFile::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
- uint16_t return_type_idx = proto_id.return_type_idx_;
- return GetClassFromTypeIdx(return_type_idx, resolve);
- }
+ mirror::Class* GetReturnType(bool resolve = true) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
size_t NumArgs() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// "1 +" because the first in Args is the receiver.
@@ -115,31 +108,7 @@
bool HasSameNameAndSignature(MethodHelper* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool HasSameSignatureWithDifferentClassLoaders(MethodHelper* other)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (UNLIKELY(GetReturnType() != other->GetReturnType())) {
- return false;
- }
- const DexFile::TypeList* types = method_->GetParameterTypeList();
- const DexFile::TypeList* other_types = other->method_->GetParameterTypeList();
- if (types == nullptr) {
- return (other_types == nullptr) || (other_types->Size() == 0);
- } else if (UNLIKELY(other_types == nullptr)) {
- return types->Size() == 0;
- }
- uint32_t num_types = types->Size();
- if (UNLIKELY(num_types != other_types->Size())) {
- return false;
- }
- for (uint32_t i = 0; i < num_types; ++i) {
- mirror::Class* param_type = GetClassFromTypeIdx(types->GetTypeItem(i).type_idx_);
- mirror::Class* other_param_type =
- other->GetClassFromTypeIdx(other_types->GetTypeItem(i).type_idx_);
- if (UNLIKELY(param_type != other_param_type)) {
- return false;
- }
- }
- return true;
- }
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Class* GetClassFromTypeIdx(uint16_t type_idx, bool resolve = true)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index f3c8250..2c0ea36 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -166,8 +166,8 @@
template<class T>
inline void PrimitiveArray<T>::VisitRoots(RootCallback* callback, void* arg) {
- if (array_class_ != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&array_class_), arg, 0, kRootStickyClass);
+ if (!array_class_.IsNull()) {
+ array_class_.VisitRoot(callback, arg, 0, kRootStickyClass);
}
}
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index 63f9860..f54af85 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -124,7 +124,7 @@
art::ThrowArrayStoreException(object->GetClass(), this->GetClass());
}
-template <typename T> Class* PrimitiveArray<T>::array_class_ = NULL;
+template <typename T> GcRoot<Class> PrimitiveArray<T>::array_class_;
// Explicitly instantiate all the primitive array types.
template class PrimitiveArray<uint8_t>; // BooleanArray
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 6588b57..7af88d6 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -17,10 +17,10 @@
#ifndef ART_RUNTIME_MIRROR_ARRAY_H_
#define ART_RUNTIME_MIRROR_ARRAY_H_
+#include "gc_root.h"
#include "gc/allocator_type.h"
#include "object.h"
#include "object_callbacks.h"
-#include "read_barrier.h"
namespace art {
@@ -159,27 +159,26 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void SetArrayClass(Class* array_class) {
- CHECK(array_class_ == nullptr);
+ CHECK(array_class_.IsNull());
CHECK(array_class != nullptr);
- array_class_ = array_class;
+ array_class_ = GcRoot<Class>(array_class);
}
static Class* GetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(array_class_ != nullptr);
- return ReadBarrier::BarrierForRoot<mirror::Class, kWithReadBarrier>(
- &array_class_);
+ DCHECK(!array_class_.IsNull());
+ return array_class_.Read();
}
static void ResetArrayClass() {
- CHECK(array_class_ != nullptr);
- array_class_ = nullptr;
+ CHECK(!array_class_.IsNull());
+ array_class_ = GcRoot<Class>(nullptr);
}
static void VisitRoots(RootCallback* callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
- static Class* array_class_;
+ static GcRoot<Class> array_class_;
DISALLOW_IMPLICIT_CONSTRUCTORS(PrimitiveArray);
};
diff --git a/runtime/mirror/art_field.cc b/runtime/mirror/art_field.cc
index da21dfe..3c7c6ce 100644
--- a/runtime/mirror/art_field.cc
+++ b/runtime/mirror/art_field.cc
@@ -29,7 +29,7 @@
namespace mirror {
// TODO: Get global references for these
-Class* ArtField::java_lang_reflect_ArtField_ = NULL;
+GcRoot<Class> ArtField::java_lang_reflect_ArtField_;
ArtField* ArtField::FromReflectedField(const ScopedObjectAccessAlreadyRunnable& soa,
jobject jlr_field) {
@@ -40,14 +40,14 @@
}
void ArtField::SetClass(Class* java_lang_reflect_ArtField) {
- CHECK(java_lang_reflect_ArtField_ == NULL);
+ CHECK(java_lang_reflect_ArtField_.IsNull());
CHECK(java_lang_reflect_ArtField != NULL);
- java_lang_reflect_ArtField_ = java_lang_reflect_ArtField;
+ java_lang_reflect_ArtField_ = GcRoot<Class>(java_lang_reflect_ArtField);
}
void ArtField::ResetClass() {
- CHECK(java_lang_reflect_ArtField_ != NULL);
- java_lang_reflect_ArtField_ = NULL;
+ CHECK(!java_lang_reflect_ArtField_.IsNull());
+ java_lang_reflect_ArtField_ = GcRoot<Class>(nullptr);
}
void ArtField::SetOffset(MemberOffset num_bytes) {
@@ -64,9 +64,8 @@
}
void ArtField::VisitRoots(RootCallback* callback, void* arg) {
- if (java_lang_reflect_ArtField_ != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&java_lang_reflect_ArtField_), arg, 0,
- kRootStickyClass);
+ if (!java_lang_reflect_ArtField_.IsNull()) {
+ java_lang_reflect_ArtField_.VisitRoot(callback, arg, 0, kRootStickyClass);
}
}
diff --git a/runtime/mirror/art_field.h b/runtime/mirror/art_field.h
index 741c6eb..f3dfa15 100644
--- a/runtime/mirror/art_field.h
+++ b/runtime/mirror/art_field.h
@@ -19,11 +19,12 @@
#include <jni.h>
+#include "gc_root.h"
#include "modifiers.h"
#include "object.h"
#include "object_callbacks.h"
#include "primitive.h"
-#include "read_barrier.h"
+#include "read_barrier_option.h"
namespace art {
@@ -135,9 +136,8 @@
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
static Class* GetJavaLangReflectArtField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(java_lang_reflect_ArtField_ != nullptr);
- return ReadBarrier::BarrierForRoot<mirror::Class, kReadBarrierOption>(
- &java_lang_reflect_ArtField_);
+ DCHECK(!java_lang_reflect_ArtField_.IsNull());
+ return java_lang_reflect_ArtField_.Read<kReadBarrierOption>();
}
static void SetClass(Class* java_lang_reflect_ArtField);
@@ -180,7 +180,7 @@
// Offset of field within an instance or in the Class' static fields
uint32_t offset_;
- static Class* java_lang_reflect_ArtField_;
+ static GcRoot<Class> java_lang_reflect_ArtField_;
friend struct art::ArtFieldOffsets; // for verifying offset information
DISALLOW_IMPLICIT_CONSTRUCTORS(ArtField);
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index 01b05a6..0dd1588 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -19,6 +19,8 @@
#include "art_method.h"
+#include "art_field.h"
+#include "class.h"
#include "class_linker.h"
#include "dex_cache.h"
#include "dex_file.h"
@@ -41,9 +43,8 @@
template<ReadBarrierOption kReadBarrierOption>
inline Class* ArtMethod::GetJavaLangReflectArtMethod() {
- DCHECK(java_lang_reflect_ArtMethod_ != nullptr);
- return ReadBarrier::BarrierForRoot<mirror::Class, kReadBarrierOption>(
- &java_lang_reflect_ArtMethod_);
+ DCHECK(!java_lang_reflect_ArtMethod_.IsNull());
+ return java_lang_reflect_ArtMethod_.Read<kReadBarrierOption>();
}
inline Class* ArtMethod::GetDeclaringClass() {
@@ -88,11 +89,60 @@
OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_methods_));
}
+inline ArtMethod* ArtMethod::GetDexCacheResolvedMethod(uint16_t method_index) {
+ ArtMethod* method = GetDexCacheResolvedMethods()->Get(method_index);
+ if (method != nullptr && !method->GetDeclaringClass()->IsErroneous()) {
+ return method;
+ } else {
+ return nullptr;
+ }
+}
+
+inline void ArtMethod::SetDexCacheResolvedMethod(uint16_t method_idx, ArtMethod* new_method) {
+ GetDexCacheResolvedMethods()->Set<false>(method_idx, new_method);
+}
+
+inline bool ArtMethod::HasDexCacheResolvedMethods() {
+ return GetDexCacheResolvedMethods() != nullptr;
+}
+
+inline bool ArtMethod::HasSameDexCacheResolvedMethods(ObjectArray<ArtMethod>* other_cache) {
+ return GetDexCacheResolvedMethods() == other_cache;
+}
+
+inline bool ArtMethod::HasSameDexCacheResolvedMethods(ArtMethod* other) {
+ return GetDexCacheResolvedMethods() == other->GetDexCacheResolvedMethods();
+}
+
+
inline ObjectArray<Class>* ArtMethod::GetDexCacheResolvedTypes() {
return GetFieldObject<ObjectArray<Class>>(
OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_types_));
}
+template <bool kWithCheck>
+inline Class* ArtMethod::GetDexCacheResolvedType(uint32_t type_index) {
+ Class* klass;
+ if (kWithCheck) {
+ klass = GetDexCacheResolvedTypes()->Get(type_index);
+ } else {
+ klass = GetDexCacheResolvedTypes()->GetWithoutChecks(type_index);
+ }
+ return (klass != nullptr && !klass->IsErroneous()) ? klass : nullptr;
+}
+
+inline bool ArtMethod::HasDexCacheResolvedTypes() {
+ return GetDexCacheResolvedTypes() != nullptr;
+}
+
+inline bool ArtMethod::HasSameDexCacheResolvedTypes(ObjectArray<Class>* other_cache) {
+ return GetDexCacheResolvedTypes() == other_cache;
+}
+
+inline bool ArtMethod::HasSameDexCacheResolvedTypes(ArtMethod* other) {
+ return GetDexCacheResolvedTypes() == other->GetDexCacheResolvedTypes();
+}
+
inline uint32_t ArtMethod::GetCodeSize() {
DCHECK(!IsRuntimeMethod() && !IsProxyMethod()) << PrettyMethod(this);
const void* code = EntryPointToCodePointer(GetEntryPointFromQuickCompiledCode());
@@ -397,7 +447,7 @@
inline bool ArtMethod::IsResolvedTypeIdx(uint16_t type_idx) {
mirror::ArtMethod* method = GetInterfaceMethodIfProxy();
- return method->GetDexCacheResolvedTypes()->Get(type_idx) != nullptr;
+ return method->GetDexCacheResolvedType(type_idx) != nullptr;
}
inline int32_t ArtMethod::GetLineNumFromDexPC(uint32_t dex_pc) {
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 167f848..8eacb1c 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -47,7 +47,7 @@
#endif
// TODO: get global references for these
-Class* ArtMethod::java_lang_reflect_ArtMethod_ = NULL;
+GcRoot<Class> ArtMethod::java_lang_reflect_ArtMethod_;
ArtMethod* ArtMethod::FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa,
jobject jlr_method) {
@@ -60,9 +60,8 @@
void ArtMethod::VisitRoots(RootCallback* callback, void* arg) {
- if (java_lang_reflect_ArtMethod_ != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&java_lang_reflect_ArtMethod_), arg, 0,
- kRootStickyClass);
+ if (!java_lang_reflect_ArtMethod_.IsNull()) {
+ java_lang_reflect_ArtMethod_.VisitRoot(callback, arg, 0, kRootStickyClass);
}
}
@@ -80,14 +79,14 @@
}
void ArtMethod::SetClass(Class* java_lang_reflect_ArtMethod) {
- CHECK(java_lang_reflect_ArtMethod_ == NULL);
+ CHECK(java_lang_reflect_ArtMethod_.IsNull());
CHECK(java_lang_reflect_ArtMethod != NULL);
- java_lang_reflect_ArtMethod_ = java_lang_reflect_ArtMethod;
+ java_lang_reflect_ArtMethod_ = GcRoot<Class>(java_lang_reflect_ArtMethod);
}
void ArtMethod::ResetClass() {
- CHECK(java_lang_reflect_ArtMethod_ != NULL);
- java_lang_reflect_ArtMethod_ = NULL;
+ CHECK(!java_lang_reflect_ArtMethod_.IsNull());
+ java_lang_reflect_ArtMethod_ = GcRoot<Class>(nullptr);
}
void ArtMethod::SetDexCacheStrings(ObjectArray<String>* new_dex_cache_strings) {
@@ -130,12 +129,11 @@
Class* declaring_class = GetDeclaringClass();
Class* super_class = declaring_class->GetSuperClass();
uint16_t method_index = GetMethodIndex();
- ObjectArray<ArtMethod>* super_class_vtable = super_class->GetVTable();
ArtMethod* result = NULL;
// Did this method override a super class method? If so load the result from the super class'
// vtable
- if (super_class_vtable != NULL && method_index < super_class_vtable->GetLength()) {
- result = super_class_vtable->Get(method_index);
+ if (super_class->HasVTable() && method_index < super_class->GetVTableLength()) {
+ result = super_class->GetVTableEntry(method_index);
} else {
// Method didn't override superclass method so search interfaces
if (IsProxyMethod()) {
@@ -159,12 +157,12 @@
}
}
}
-#ifndef NDEBUG
- StackHandleScope<2> hs(Thread::Current());
- MethodHelper result_mh(hs.NewHandle(result));
- MethodHelper this_mh(hs.NewHandle(this));
- DCHECK(result == NULL || this_mh.HasSameNameAndSignature(&result_mh));
-#endif
+ if (kIsDebugBuild) {
+ StackHandleScope<2> hs(Thread::Current());
+ MethodHelper result_mh(hs.NewHandle(result));
+ MethodHelper this_mh(hs.NewHandle(this));
+ DCHECK(result == nullptr || this_mh.HasSameNameAndSignature(&result_mh));
+ }
return result;
}
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 081bee1..4ebceff 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_MIRROR_ART_METHOD_H_
#include "dex_file.h"
+#include "gc_root.h"
#include "invoke_type.h"
#include "modifiers.h"
#include "object.h"
@@ -215,13 +216,25 @@
return OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_types_);
}
- ObjectArray<ArtMethod>* GetDexCacheResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* GetDexCacheResolvedMethod(uint16_t method_idx)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetDexCacheResolvedMethod(uint16_t method_idx, ArtMethod* new_method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetDexCacheResolvedMethods(ObjectArray<ArtMethod>* new_dex_cache_methods)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool HasDexCacheResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool HasSameDexCacheResolvedMethods(ArtMethod* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool HasSameDexCacheResolvedMethods(ObjectArray<ArtMethod>* other_cache)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ObjectArray<Class>* GetDexCacheResolvedTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template <bool kWithCheck = true>
+ Class* GetDexCacheResolvedType(uint32_t type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetDexCacheResolvedTypes(ObjectArray<Class>* new_dex_cache_types)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool HasDexCacheResolvedTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool HasSameDexCacheResolvedTypes(ArtMethod* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool HasSameDexCacheResolvedTypes(ObjectArray<Class>* other_cache)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Find the method that this method overrides
ArtMethod* FindOverriddenMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -514,9 +527,13 @@
// ifTable.
uint32_t method_index_;
- static Class* java_lang_reflect_ArtMethod_;
+ static GcRoot<Class> java_lang_reflect_ArtMethod_;
private:
+ ObjectArray<ArtMethod>* GetDexCacheResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ ObjectArray<Class>* GetDexCacheResolvedTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
friend struct art::ArtMethodOffsets; // for verifying offset information
DISALLOW_IMPLICIT_CONSTRUCTORS(ArtMethod);
};
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 329a984..c3754d7 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -161,6 +161,37 @@
CHECK(method == GetImTable()->Get(i));
}
+inline bool Class::HasVTable() {
+ return (GetVTable() != nullptr) || ShouldHaveEmbeddedImtAndVTable();
+}
+
+inline int32_t Class::GetVTableLength() {
+ if (ShouldHaveEmbeddedImtAndVTable()) {
+ return GetEmbeddedVTableLength();
+ }
+ return (GetVTable() != nullptr) ? GetVTable()->GetLength() : 0;
+}
+
+inline ArtMethod* Class::GetVTableEntry(uint32_t i) {
+ if (ShouldHaveEmbeddedImtAndVTable()) {
+ return GetEmbeddedVTableEntry(i);
+ }
+ return (GetVTable() != nullptr) ? GetVTable()->Get(i) : nullptr;
+}
+
+inline int32_t Class::GetEmbeddedVTableLength() {
+ return GetField32(EmbeddedVTableLengthOffset());
+}
+
+inline void Class::SetEmbeddedVTableLength(int32_t len) {
+ SetField32<false>(EmbeddedVTableLengthOffset(), len);
+}
+
+inline ArtMethod* Class::GetEmbeddedVTableEntry(uint32_t i) {
+ uint32_t offset = EmbeddedVTableOffset().Uint32Value() + i * sizeof(VTableEntry);
+ return GetFieldObject<mirror::ArtMethod>(MemberOffset(offset));
+}
+
inline void Class::SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method) {
uint32_t offset = EmbeddedVTableOffset().Uint32Value() + i * sizeof(VTableEntry);
SetFieldObject<false>(MemberOffset(offset), method);
@@ -340,12 +371,12 @@
DCHECK(!method->GetDeclaringClass()->IsInterface() || method->IsMiranda());
// The argument method may from a super class.
// Use the index to a potentially overridden one for this instance's class.
- return GetVTable()->Get(method->GetMethodIndex());
+ return GetVTableEntry(method->GetMethodIndex());
}
inline ArtMethod* Class::FindVirtualMethodForSuper(ArtMethod* method) {
DCHECK(!method->GetDeclaringClass()->IsInterface());
- return GetSuperClass()->GetVTable()->Get(method->GetMethodIndex());
+ return GetSuperClass()->GetVTableEntry(method->GetMethodIndex());
}
inline ArtMethod* Class::FindVirtualMethodForVirtualOrInterface(ArtMethod* method) {
@@ -534,13 +565,19 @@
if (has_embedded_tables) {
uint32_t embedded_imt_size = kImtSize * sizeof(ImTableEntry);
uint32_t embedded_vtable_size = num_vtable_entries * sizeof(VTableEntry);
- size += embedded_imt_size + embedded_vtable_size;
+ size += embedded_imt_size +
+ sizeof(int32_t) /* vtable len */ +
+ embedded_vtable_size;
}
// Space used by reference statics.
size += num_ref_static_fields * sizeof(HeapReference<Object>);
// Possible pad for alignment.
- if (((size & 7) != 0) && (num_64bit_static_fields > 0) && (num_32bit_static_fields == 0)) {
+ if (((size & 7) != 0) && (num_64bit_static_fields > 0)) {
size += sizeof(uint32_t);
+ if (num_32bit_static_fields != 0) {
+ // Shuffle one 32 bit static field forward.
+ num_32bit_static_fields--;
+ }
}
// Space used for primitive static fields.
size += (num_32bit_static_fields * sizeof(uint32_t)) +
@@ -574,7 +611,10 @@
pos += sizeof(ImTableEntry);
}
- count = ((GetVTable() != NULL) ? GetVTable()->GetLength() : 0);
+ // Skip vtable length.
+ pos += sizeof(int32_t);
+
+ count = GetEmbeddedVTableLength();
for (size_t i = 0; i < count; ++i) {
MemberOffset offset = MemberOffset(pos);
visitor(this, offset, true);
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index fadf80e..f29ba73 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -36,24 +36,24 @@
namespace art {
namespace mirror {
-Class* Class::java_lang_Class_ = nullptr;
+GcRoot<Class> Class::java_lang_Class_;
void Class::SetClassClass(Class* java_lang_Class) {
- CHECK(java_lang_Class_ == nullptr)
- << ReadBarrier::BarrierForRoot<mirror::Class, kWithReadBarrier>(&java_lang_Class_)
+ CHECK(java_lang_Class_.IsNull())
+ << java_lang_Class_.Read()
<< " " << java_lang_Class;
CHECK(java_lang_Class != nullptr);
- java_lang_Class_ = java_lang_Class;
+ java_lang_Class_ = GcRoot<Class>(java_lang_Class);
}
void Class::ResetClass() {
- CHECK(java_lang_Class_ != nullptr);
- java_lang_Class_ = nullptr;
+ CHECK(!java_lang_Class_.IsNull());
+ java_lang_Class_ = GcRoot<Class>(nullptr);
}
void Class::VisitRoots(RootCallback* callback, void* arg) {
- if (java_lang_Class_ != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&java_lang_Class_), arg, 0, kRootStickyClass);
+ if (!java_lang_Class_.IsNull()) {
+ java_lang_Class_.VisitRoot(callback, arg, 0, kRootStickyClass);
}
}
@@ -827,39 +827,67 @@
}
table = GetVTableDuringLinking();
- CHECK(table != nullptr);
+ CHECK(table != nullptr) << PrettyClass(this);
+ SetEmbeddedVTableLength(table->GetLength());
for (int32_t i = 0; i < table->GetLength(); i++) {
SetEmbeddedVTableEntry(i, table->Get(i));
}
+
+ SetImTable(nullptr);
+ // Keep java.lang.Object class's vtable around for since it's easier
+ // to be reused by array classes during their linking.
+ if (!IsObjectClass()) {
+ SetVTable(nullptr);
+ }
}
+// The pre-fence visitor for Class::CopyOf().
+class CopyClassVisitor {
+ public:
+ explicit CopyClassVisitor(Thread* self, Handle<mirror::Class>* orig,
+ size_t new_length, size_t copy_bytes)
+ : self_(self), orig_(orig), new_length_(new_length),
+ copy_bytes_(copy_bytes) {
+ }
+
+ void operator()(Object* obj, size_t usable_size) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(usable_size);
+ mirror::Class* new_class_obj = obj->AsClass();
+ mirror::Object::CopyObject(self_, new_class_obj, orig_->Get(), copy_bytes_);
+ new_class_obj->SetStatus(Class::kStatusResolving, self_);
+ new_class_obj->PopulateEmbeddedImtAndVTable();
+ new_class_obj->SetClassSize(new_length_);
+ }
+
+ private:
+ Thread* const self_;
+ Handle<mirror::Class>* const orig_;
+ const size_t new_length_;
+ const size_t copy_bytes_;
+ DISALLOW_COPY_AND_ASSIGN(CopyClassVisitor);
+};
+
Class* Class::CopyOf(Thread* self, int32_t new_length) {
DCHECK_GE(new_length, static_cast<int32_t>(sizeof(Class)));
// We may get copied by a compacting GC.
StackHandleScope<1> hs(self);
Handle<mirror::Class> h_this(hs.NewHandle(this));
gc::Heap* heap = Runtime::Current()->GetHeap();
- InitializeClassVisitor visitor(new_length);
+ // The num_bytes (3rd param) is sizeof(Class) as opposed to SizeOf()
+ // to skip copying the tail part that we will overwrite here.
+ CopyClassVisitor visitor(self, &h_this, new_length, sizeof(Class));
mirror::Object* new_class =
- kMovingClasses ? heap->AllocObject<true>(self, java_lang_Class_, new_length, visitor)
- : heap->AllocNonMovableObject<true>(self, java_lang_Class_, new_length, visitor);
+ kMovingClasses
+ ? heap->AllocObject<true>(self, java_lang_Class_.Read(), new_length, visitor)
+ : heap->AllocNonMovableObject<true>(self, java_lang_Class_.Read(), new_length, visitor);
if (UNLIKELY(new_class == nullptr)) {
CHECK(self->IsExceptionPending()); // Expect an OOME.
return NULL;
}
- mirror::Class* new_class_obj = new_class->AsClass();
- memcpy(new_class_obj, h_this.Get(), sizeof(Class));
-
- new_class_obj->SetStatus(kStatusResolving, self);
- new_class_obj->PopulateEmbeddedImtAndVTable();
- // Correct some fields.
- new_class_obj->SetLockWord(LockWord(), false);
- new_class_obj->SetClassSize(new_length);
-
- Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(new_class_obj);
- return new_class_obj;
+ return new_class->AsClass();
}
} // namespace mirror
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 648bdde..519685a 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_MIRROR_CLASS_H_
#include "dex_file.h"
+#include "gc_root.h"
#include "gc/allocator_type.h"
#include "invoke_type.h"
#include "modifiers.h"
@@ -25,7 +26,7 @@
#include "object_array.h"
#include "object_callbacks.h"
#include "primitive.h"
-#include "read_barrier.h"
+#include "read_barrier_option.h"
/*
* A magic value for refOffsets. Ignore the bits and walk the super
@@ -448,8 +449,14 @@
bool IsObjectClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return !IsPrimitive() && GetSuperClass() == NULL;
}
+
+ bool IsInstantiableNonArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return !IsPrimitive() && !IsInterface() && !IsAbstract() && !IsArrayClass();
+ }
+
bool IsInstantiable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return (!IsPrimitive() && !IsInterface() && !IsAbstract()) || ((IsAbstract()) && IsArrayClass());
+ return (!IsPrimitive() && !IsInterface() && !IsAbstract()) ||
+ ((IsAbstract()) && IsArrayClass());
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -692,18 +699,34 @@
return MemberOffset(sizeof(Class));
}
- static MemberOffset EmbeddedVTableOffset() {
+ static MemberOffset EmbeddedVTableLengthOffset() {
return MemberOffset(sizeof(Class) + kImtSize * sizeof(mirror::Class::ImTableEntry));
}
+ static MemberOffset EmbeddedVTableOffset() {
+ return MemberOffset(sizeof(Class) + kImtSize * sizeof(mirror::Class::ImTableEntry) + sizeof(int32_t));
+ }
+
bool ShouldHaveEmbeddedImtAndVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return IsInstantiable();
}
+ bool HasVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
ArtMethod* GetEmbeddedImTableEntry(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetEmbeddedImTableEntry(uint32_t i, ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int32_t GetVTableLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ ArtMethod* GetVTableEntry(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ int32_t GetEmbeddedVTableLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void SetEmbeddedVTableLength(int32_t len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ ArtMethod* GetEmbeddedVTableEntry(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
void SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void PopulateEmbeddedImtAndVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -920,9 +943,8 @@
}
static Class* GetJavaLangClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(java_lang_Class_ != NULL);
- return ReadBarrier::BarrierForRoot<mirror::Class, kWithReadBarrier>(
- &java_lang_Class_);
+ DCHECK(!java_lang_Class_.IsNull());
+ return java_lang_Class_.Read();
}
// Can't call this SetClass or else gets called instead of Object::SetClass in places.
@@ -1140,7 +1162,7 @@
uint32_t fields_[0];
// java.lang.Class
- static Class* java_lang_Class_;
+ static GcRoot<Class> java_lang_Class_;
friend struct art::ClassOffsets; // for verifying offset information
DISALLOW_IMPLICIT_CONSTRUCTORS(Class);
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index 08cff99..d3fcb55 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -19,6 +19,8 @@
#include "dex_cache.h"
+#include "base/logging.h"
+#include "mirror/class.h"
#include "runtime.h"
namespace art {
@@ -41,6 +43,12 @@
}
}
+inline void DexCache::SetResolvedType(uint32_t type_idx, Class* resolved) {
+ // TODO default transaction support.
+ DCHECK(resolved == nullptr || !resolved->IsErroneous());
+ GetResolvedTypes()->Set(type_idx, resolved);
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index bfd603a..3c947ab 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -17,7 +17,9 @@
#ifndef ART_RUNTIME_MIRROR_DEX_CACHE_H_
#define ART_RUNTIME_MIRROR_DEX_CACHE_H_
+#include "art_field.h"
#include "art_method.h"
+#include "class.h"
#include "object.h"
#include "object_array.h"
@@ -30,9 +32,6 @@
namespace mirror {
-class ArtField;
-class ArtMethod;
-class Class;
class String;
// C++ mirror of java.lang.DexCache.
@@ -103,11 +102,8 @@
return GetResolvedTypes()->Get(type_idx);
}
- void SetResolvedType(uint32_t type_idx, Class* resolved) ALWAYS_INLINE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // TODO default transaction support.
- GetResolvedTypes()->Set(type_idx, resolved);
- }
+ void SetResolvedType(uint32_t type_idx, Class* resolved)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
ArtMethod* GetResolvedMethod(uint32_t method_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -118,7 +114,12 @@
ArtField* GetResolvedField(uint32_t field_idx) ALWAYS_INLINE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetResolvedFields()->Get(field_idx);
+ ArtField* field = GetResolvedFields()->Get(field_idx);
+ if (UNLIKELY(field == nullptr || field->GetDeclaringClass()->IsErroneous())) {
+ return nullptr;
+ } else {
+ return field;
+ }
}
void SetResolvedField(uint32_t field_idx, ArtField* resolved) ALWAYS_INLINE
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 961bc64..3543654 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -65,8 +65,8 @@
Object* const dest_obj_;
};
-static Object* CopyObject(Thread* self, mirror::Object* dest, mirror::Object* src, size_t num_bytes)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+Object* Object::CopyObject(Thread* self, mirror::Object* dest, mirror::Object* src,
+ size_t num_bytes) {
// Copy instance data. We assume memcpy copies by words.
// TODO: expose and use move32.
byte* src_bytes = reinterpret_cast<byte*>(src);
@@ -107,7 +107,7 @@
void operator()(Object* obj, size_t usable_size) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
UNUSED(usable_size);
- CopyObject(self_, obj, orig_->Get(), num_bytes_);
+ Object::CopyObject(self_, obj, orig_->Get(), num_bytes_);
}
private:
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 4fae470..a6b6227 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -370,6 +370,13 @@
// Generate an identity hash code.
static int32_t GenerateIdentityHashCode();
+ // A utility function that copies an object in a read barrier and
+ // write barrier-aware way. This is internally used by Clone() and
+ // Class::CopyOf().
+ static Object* CopyObject(Thread* self, mirror::Object* dest, mirror::Object* src,
+ size_t num_bytes)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// The Class representing the type of the object.
HeapReference<Class> klass_;
// Monitor and hash code information.
@@ -386,6 +393,8 @@
friend class art::ImageWriter;
friend class art::Monitor;
friend struct art::ObjectOffsets; // for verifying offset information
+ friend class CopyObjectVisitor; // for CopyObject().
+ friend class CopyClassVisitor; // for CopyObject().
DISALLOW_IMPLICIT_CONSTRUCTORS(Object);
};
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index a7ea6c9..da3c36c 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -73,7 +73,12 @@
}
};
-// Keep the assembly code in sync
+// Keep constants in sync.
+TEST_F(ObjectTest, Constants) {
+ EXPECT_EQ(kObjectReferenceSize, sizeof(mirror::HeapReference<mirror::Object>));
+}
+
+// Keep the assembly code constats in sync.
TEST_F(ObjectTest, AsmConstants) {
EXPECT_EQ(CLASS_OFFSET, Object::ClassOffset().Int32Value());
EXPECT_EQ(LOCK_WORD_OFFSET, Object::MonitorOffset().Int32Value());
diff --git a/runtime/mirror/reference.cc b/runtime/mirror/reference.cc
index 077cd4b..c36bd98 100644
--- a/runtime/mirror/reference.cc
+++ b/runtime/mirror/reference.cc
@@ -19,23 +19,22 @@
namespace art {
namespace mirror {
-Class* Reference::java_lang_ref_Reference_ = nullptr;
+GcRoot<Class> Reference::java_lang_ref_Reference_;
void Reference::SetClass(Class* java_lang_ref_Reference) {
- CHECK(java_lang_ref_Reference_ == nullptr);
+ CHECK(java_lang_ref_Reference_.IsNull());
CHECK(java_lang_ref_Reference != nullptr);
- java_lang_ref_Reference_ = java_lang_ref_Reference;
+ java_lang_ref_Reference_ = GcRoot<Class>(java_lang_ref_Reference);
}
void Reference::ResetClass() {
- CHECK(java_lang_ref_Reference_ != nullptr);
- java_lang_ref_Reference_ = nullptr;
+ CHECK(!java_lang_ref_Reference_.IsNull());
+ java_lang_ref_Reference_ = GcRoot<Class>(nullptr);
}
void Reference::VisitRoots(RootCallback* callback, void* arg) {
- if (java_lang_ref_Reference_ != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&java_lang_ref_Reference_),
- arg, 0, kRootStickyClass);
+ if (!java_lang_ref_Reference_.IsNull()) {
+ java_lang_ref_Reference_.VisitRoot(callback, arg, 0, kRootStickyClass);
}
}
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index 07d47d3..7345448 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -18,9 +18,10 @@
#define ART_RUNTIME_MIRROR_REFERENCE_H_
#include "class.h"
+#include "gc_root.h"
#include "object.h"
#include "object_callbacks.h"
-#include "read_barrier.h"
+#include "read_barrier_option.h"
#include "thread.h"
namespace art {
@@ -94,9 +95,8 @@
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
static Class* GetJavaLangRefReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(java_lang_ref_Reference_ != nullptr);
- return ReadBarrier::BarrierForRoot<mirror::Class, kReadBarrierOption>(
- &java_lang_ref_Reference_);
+ DCHECK(!java_lang_ref_Reference_.IsNull());
+ return java_lang_ref_Reference_.Read<kReadBarrierOption>();
}
static void SetClass(Class* klass);
static void ResetClass(void);
@@ -114,7 +114,7 @@
HeapReference<Reference> queue_next_; // Note this is Java volatile:
HeapReference<Object> referent_; // Note this is Java volatile:
- static Class* java_lang_ref_Reference_;
+ static GcRoot<Class> java_lang_ref_Reference_;
friend struct art::ReferenceOffsets; // for verifying offset information
friend class gc::ReferenceProcessor;
diff --git a/runtime/mirror/stack_trace_element.cc b/runtime/mirror/stack_trace_element.cc
index b1de2b6..1eb20f7 100644
--- a/runtime/mirror/stack_trace_element.cc
+++ b/runtime/mirror/stack_trace_element.cc
@@ -26,17 +26,17 @@
namespace art {
namespace mirror {
-Class* StackTraceElement::java_lang_StackTraceElement_ = NULL;
+GcRoot<Class> StackTraceElement::java_lang_StackTraceElement_;
void StackTraceElement::SetClass(Class* java_lang_StackTraceElement) {
- CHECK(java_lang_StackTraceElement_ == NULL);
+ CHECK(java_lang_StackTraceElement_.IsNull());
CHECK(java_lang_StackTraceElement != NULL);
- java_lang_StackTraceElement_ = java_lang_StackTraceElement;
+ java_lang_StackTraceElement_ = GcRoot<Class>(java_lang_StackTraceElement);
}
void StackTraceElement::ResetClass() {
- CHECK(java_lang_StackTraceElement_ != NULL);
- java_lang_StackTraceElement_ = NULL;
+ CHECK(!java_lang_StackTraceElement_.IsNull());
+ java_lang_StackTraceElement_ = GcRoot<Class>(nullptr);
}
StackTraceElement* StackTraceElement::Alloc(Thread* self, Handle<String> declaring_class,
@@ -68,9 +68,8 @@
}
void StackTraceElement::VisitRoots(RootCallback* callback, void* arg) {
- if (java_lang_StackTraceElement_ != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&java_lang_StackTraceElement_), arg, 0,
- kRootStickyClass);
+ if (!java_lang_StackTraceElement_.IsNull()) {
+ java_lang_StackTraceElement_.VisitRoot(callback, arg, 0, kRootStickyClass);
}
}
diff --git a/runtime/mirror/stack_trace_element.h b/runtime/mirror/stack_trace_element.h
index 52b0927..70acd1c 100644
--- a/runtime/mirror/stack_trace_element.h
+++ b/runtime/mirror/stack_trace_element.h
@@ -17,9 +17,9 @@
#ifndef ART_RUNTIME_MIRROR_STACK_TRACE_ELEMENT_H_
#define ART_RUNTIME_MIRROR_STACK_TRACE_ELEMENT_H_
+#include "gc_root.h"
#include "object.h"
#include "object_callbacks.h"
-#include "read_barrier.h"
namespace art {
@@ -57,9 +57,8 @@
static void VisitRoots(RootCallback* callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static Class* GetStackTraceElement() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(java_lang_StackTraceElement_ != NULL);
- return ReadBarrier::BarrierForRoot<mirror::Class, kWithReadBarrier>(
- &java_lang_StackTraceElement_);
+ DCHECK(!java_lang_StackTraceElement_.IsNull());
+ return java_lang_StackTraceElement_.Read();
}
private:
@@ -74,7 +73,7 @@
int32_t line_number)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static Class* java_lang_StackTraceElement_;
+ static GcRoot<Class> java_lang_StackTraceElement_;
friend struct art::StackTraceElementOffsets; // for verifying offset information
DISALLOW_IMPLICIT_CONSTRUCTORS(StackTraceElement);
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index 5c57dce..e81e431 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -31,7 +31,7 @@
namespace mirror {
// TODO: get global references for these
-Class* String::java_lang_String_ = NULL;
+GcRoot<Class> String::java_lang_String_;
int32_t String::FastIndexOf(int32_t ch, int32_t start) {
int32_t count = GetLength();
@@ -52,14 +52,14 @@
}
void String::SetClass(Class* java_lang_String) {
- CHECK(java_lang_String_ == NULL);
+ CHECK(java_lang_String_.IsNull());
CHECK(java_lang_String != NULL);
- java_lang_String_ = java_lang_String;
+ java_lang_String_ = GcRoot<Class>(java_lang_String);
}
void String::ResetClass() {
- CHECK(java_lang_String_ != NULL);
- java_lang_String_ = NULL;
+ CHECK(!java_lang_String_.IsNull());
+ java_lang_String_ = GcRoot<Class>(nullptr);
}
int32_t String::GetHashCode() {
@@ -233,8 +233,8 @@
}
void String::VisitRoots(RootCallback* callback, void* arg) {
- if (java_lang_String_ != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&java_lang_String_), arg, 0, kRootStickyClass);
+ if (!java_lang_String_.IsNull()) {
+ java_lang_String_.VisitRoot(callback, arg, 0, kRootStickyClass);
}
}
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index 46bdd59..66a5dd8 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -19,9 +19,9 @@
#include <gtest/gtest.h>
+#include "gc_root.h"
#include "object.h"
#include "object_callbacks.h"
-#include "read_barrier.h"
namespace art {
@@ -111,9 +111,8 @@
int32_t CompareTo(String* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static Class* GetJavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(java_lang_String_ != NULL);
- return ReadBarrier::BarrierForRoot<mirror::Class, kWithReadBarrier>(
- &java_lang_String_);
+ DCHECK(!java_lang_String_.IsNull());
+ return java_lang_String_.Read();
}
static void SetClass(Class* java_lang_String);
@@ -160,7 +159,7 @@
int32_t offset_;
- static Class* java_lang_String_;
+ static GcRoot<Class> java_lang_String_;
friend struct art::StringOffsets; // for verifying offset information
FRIEND_TEST(ObjectTest, StringLength); // for SetOffset and SetCount
diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc
index 1c3f1ed..93ed4d4 100644
--- a/runtime/mirror/throwable.cc
+++ b/runtime/mirror/throwable.cc
@@ -30,7 +30,7 @@
namespace art {
namespace mirror {
-Class* Throwable::java_lang_Throwable_ = NULL;
+GcRoot<Class> Throwable::java_lang_Throwable_;
void Throwable::SetDetailMessage(String* new_detail_message) {
if (Runtime::Current()->IsActiveTransaction()) {
@@ -127,19 +127,19 @@
}
void Throwable::SetClass(Class* java_lang_Throwable) {
- CHECK(java_lang_Throwable_ == NULL);
+ CHECK(java_lang_Throwable_.IsNull());
CHECK(java_lang_Throwable != NULL);
- java_lang_Throwable_ = java_lang_Throwable;
+ java_lang_Throwable_ = GcRoot<Class>(java_lang_Throwable);
}
void Throwable::ResetClass() {
- CHECK(java_lang_Throwable_ != NULL);
- java_lang_Throwable_ = NULL;
+ CHECK(!java_lang_Throwable_.IsNull());
+ java_lang_Throwable_ = GcRoot<Class>(nullptr);
}
void Throwable::VisitRoots(RootCallback* callback, void* arg) {
- if (java_lang_Throwable_ != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&java_lang_Throwable_), arg, 0, kRootStickyClass);
+ if (!java_lang_Throwable_.IsNull()) {
+ java_lang_Throwable_.VisitRoot(callback, arg, 0, kRootStickyClass);
}
}
diff --git a/runtime/mirror/throwable.h b/runtime/mirror/throwable.h
index cf54ad6..f90812d 100644
--- a/runtime/mirror/throwable.h
+++ b/runtime/mirror/throwable.h
@@ -17,9 +17,9 @@
#ifndef ART_RUNTIME_MIRROR_THROWABLE_H_
#define ART_RUNTIME_MIRROR_THROWABLE_H_
+#include "gc_root.h"
#include "object.h"
#include "object_callbacks.h"
-#include "read_barrier.h"
#include "string.h"
namespace art {
@@ -47,9 +47,8 @@
bool IsCheckedException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static Class* GetJavaLangThrowable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(java_lang_Throwable_ != NULL);
- return ReadBarrier::BarrierForRoot<mirror::Class, kWithReadBarrier>(
- &java_lang_Throwable_);
+ DCHECK(!java_lang_Throwable_.IsNull());
+ return java_lang_Throwable_.Read();
}
static void SetClass(Class* java_lang_Throwable);
@@ -72,7 +71,7 @@
HeapReference<Object> stack_trace_;
HeapReference<Object> suppressed_exceptions_;
- static Class* java_lang_Throwable_;
+ static GcRoot<Class> java_lang_Throwable_;
friend struct art::ThrowableOffsets; // for verifying offset information
DISALLOW_IMPLICIT_CONSTRUCTORS(Throwable);
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 4b26eda..433c1b2 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -84,7 +84,7 @@
num_waiters_(0),
owner_(owner),
lock_count_(0),
- obj_(obj),
+ obj_(GcRoot<mirror::Object>(obj)),
wait_set_(NULL),
hash_code_(hash_code),
locking_method_(NULL),
@@ -107,7 +107,7 @@
num_waiters_(0),
owner_(owner),
lock_count_(0),
- obj_(obj),
+ obj_(GcRoot<mirror::Object>(obj)),
wait_set_(NULL),
hash_code_(hash_code),
locking_method_(NULL),
@@ -165,7 +165,9 @@
bool success = GetObject()->CasLockWordWeakSequentiallyConsistent(lw, fat);
// Lock profiling.
if (success && owner_ != nullptr && lock_profiling_threshold_ != 0) {
- locking_method_ = owner_->GetCurrentMethod(&locking_dex_pc_);
+ // Do not abort on dex pc errors. This can easily happen when we want to dump a stack trace on
+ // abort.
+ locking_method_ = owner_->GetCurrentMethod(&locking_dex_pc_, false);
}
return success;
}
@@ -223,7 +225,7 @@
}
void Monitor::SetObject(mirror::Object* object) {
- obj_ = object;
+ obj_ = GcRoot<mirror::Object>(object);
}
void Monitor::Lock(Thread* self) {
@@ -634,7 +636,7 @@
}
// The monitor is deflated, mark the object as nullptr so that we know to delete it during the
// next GC.
- monitor->obj_ = nullptr;
+ monitor->obj_ = GcRoot<mirror::Object>(nullptr);
}
return true;
}
@@ -680,6 +682,8 @@
Thread* owner;
{
ScopedThreadStateChange tsc(self, kBlocked);
+ // Take suspend thread lock to avoid races with threads trying to suspend this one.
+ MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
owner = thread_list->SuspendThreadByThreadId(owner_thread_id, false, &timed_out);
}
if (owner != nullptr) {
@@ -745,10 +749,10 @@
contention_count++;
Runtime* runtime = Runtime::Current();
if (contention_count <= runtime->GetMaxSpinsBeforeThinkLockInflation()) {
- // TODO: Consider switch thread state to kBlocked when we are yielding.
+ // TODO: Consider switching the thread state to kBlocked when we are yielding.
// Use sched_yield instead of NanoSleep since NanoSleep can wait much longer than the
// parameter you pass in. This can cause thread suspension to take excessively long
- // make long pauses. See b/16307460.
+ // and make long pauses. See b/16307460.
sched_yield();
} else {
contention_count = 0;
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 0d0ad0b..26d43c9 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -26,8 +26,9 @@
#include "atomic.h"
#include "base/mutex.h"
+#include "gc_root.h"
#include "object_callbacks.h"
-#include "read_barrier.h"
+#include "read_barrier_option.h"
#include "thread_state.h"
namespace art {
@@ -95,7 +96,7 @@
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
mirror::Object* GetObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return ReadBarrier::BarrierForRoot<mirror::Object, kReadBarrierOption>(&obj_);
+ return obj_.Read<kReadBarrierOption>();
}
void SetObject(mirror::Object* object);
@@ -197,7 +198,7 @@
// What object are we part of. This is a weak root. Do not access
// this directly, use GetObject() to read it so it will be guarded
// by a read barrier.
- mirror::Object* obj_;
+ GcRoot<mirror::Object> obj_;
// Threads currently waiting on this monitor.
Thread* wait_set_ GUARDED_BY(monitor_lock_);
diff --git a/runtime/monitor_pool.cc b/runtime/monitor_pool.cc
index 440a6be..4964aa0 100644
--- a/runtime/monitor_pool.cc
+++ b/runtime/monitor_pool.cc
@@ -52,7 +52,7 @@
monitor_chunks_.StoreRelaxed(new_backing);
capacity_ = new_capacity;
old_chunk_arrays_.push_back(old_backing);
- LOG(INFO) << "Resizing to capacity " << capacity_;
+ VLOG(monitor) << "Resizing to capacity " << capacity_;
}
}
@@ -64,7 +64,7 @@
CHECK_EQ(0U, reinterpret_cast<uintptr_t>(chunk) % kMonitorAlignment);
// Add the chunk.
- *(monitor_chunks_.LoadRelaxed()+num_chunks_) = reinterpret_cast<uintptr_t>(chunk);
+ *(monitor_chunks_.LoadRelaxed() + num_chunks_) = reinterpret_cast<uintptr_t>(chunk);
num_chunks_++;
// Set up the free list
@@ -96,7 +96,7 @@
// Enough space, or need to resize?
if (first_free_ == nullptr) {
- LOG(INFO) << "Allocating a new chunk.";
+ VLOG(monitor) << "Allocating a new chunk.";
AllocateChunk();
}
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 440d3d0..c3304e6 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -275,8 +275,96 @@
}
}
-static jboolean IsDexOptNeededInternal(JNIEnv* env, const char* filename,
+// Java: dalvik.system.DexFile.UP_TO_DATE
+static const jbyte kUpToDate = 0;
+// Java: dalvik.system.DexFile.DEXOPT_NEEDED
+static const jbyte kPatchoatNeeded = 1;
+// Java: dalvik.system.DexFile.PATCHOAT_NEEDED
+static const jbyte kDexoptNeeded = 2;
+
+template <const bool kVerboseLogging, const bool kReasonLogging>
+static jbyte IsDexOptNeededForFile(const std::string& oat_filename, const char* filename,
+ InstructionSet target_instruction_set) {
+ std::string error_msg;
+ std::unique_ptr<const OatFile> oat_file(OatFile::Open(oat_filename, oat_filename, nullptr,
+ false, &error_msg));
+ if (oat_file.get() == nullptr) {
+ if (kVerboseLogging) {
+ LOG(INFO) << "DexFile_isDexOptNeeded failed to open oat file '" << oat_filename
+ << "' for file location '" << filename << "': " << error_msg;
+ }
+ error_msg.clear();
+ return kDexoptNeeded;
+ }
+ bool should_relocate_if_possible = Runtime::Current()->ShouldRelocate();
+ uint32_t location_checksum = 0;
+ const art::OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(filename, nullptr,
+ kReasonLogging);
+ if (oat_dex_file != nullptr) {
+ // If its not possible to read the classes.dex assume up-to-date as we won't be able to
+ // compile it anyway.
+ if (!DexFile::GetChecksum(filename, &location_checksum, &error_msg)) {
+ if (kVerboseLogging) {
+ LOG(INFO) << "DexFile_isDexOptNeeded found precompiled stripped file: "
+ << filename << " for " << oat_filename << ": " << error_msg;
+ }
+ if (ClassLinker::VerifyOatChecksums(oat_file.get(), target_instruction_set, &error_msg)) {
+ if (kVerboseLogging) {
+ LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
+ << " is up-to-date for " << filename;
+ }
+ return kUpToDate;
+ } else if (should_relocate_if_possible &&
+ ClassLinker::VerifyOatImageChecksum(oat_file.get(), target_instruction_set)) {
+ if (kVerboseLogging) {
+ LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
+ << " needs to be relocated for " << filename;
+ }
+ return kPatchoatNeeded;
+ } else {
+ if (kVerboseLogging) {
+ LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
+ << " is out of date for " << filename;
+ }
+ return kDexoptNeeded;
+ }
+ // If we get here the file is out of date and we should use the system one to relocate.
+ } else {
+ if (ClassLinker::VerifyOatAndDexFileChecksums(oat_file.get(), filename, location_checksum,
+ target_instruction_set, &error_msg)) {
+ if (kVerboseLogging) {
+ LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
+ << " is up-to-date for " << filename;
+ }
+ return kUpToDate;
+ } else if (location_checksum == oat_dex_file->GetDexFileLocationChecksum()
+ && should_relocate_if_possible
+ && ClassLinker::VerifyOatImageChecksum(oat_file.get(), target_instruction_set)) {
+ if (kVerboseLogging) {
+ LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
+ << " needs to be relocated for " << filename;
+ }
+ return kPatchoatNeeded;
+ } else {
+ if (kVerboseLogging) {
+ LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
+ << " is out of date for " << filename;
+ }
+ return kDexoptNeeded;
+ }
+ }
+ } else {
+ if (kVerboseLogging) {
+ LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
+ << " does not contain " << filename;
+ }
+ return kDexoptNeeded;
+ }
+}
+
+static jbyte IsDexOptNeededInternal(JNIEnv* env, const char* filename,
const char* pkgname, const char* instruction_set, const jboolean defer) {
+ // TODO disable this logging.
const bool kVerboseLogging = false; // Spammy logging.
const bool kReasonLogging = true; // Logging of reason for returning JNI_TRUE.
@@ -285,7 +373,7 @@
ScopedLocalRef<jclass> fnfe(env, env->FindClass("java/io/FileNotFoundException"));
const char* message = (filename == nullptr) ? "<empty file name>" : filename;
env->ThrowNew(fnfe.get(), message);
- return JNI_FALSE;
+ return kUpToDate;
}
// Always treat elements of the bootclasspath as up-to-date. The
@@ -301,78 +389,45 @@
if (kVerboseLogging) {
LOG(INFO) << "DexFile_isDexOptNeeded ignoring boot class path file: " << filename;
}
- return JNI_FALSE;
+ return kUpToDate;
}
}
- const InstructionSet target_instruction_set = GetInstructionSetFromString(instruction_set);
-
- // Check if we have an odex file next to the dex file.
- std::string odex_filename(DexFilenameToOdexFilename(filename, kRuntimeISA));
- std::string error_msg;
- std::unique_ptr<const OatFile> oat_file(OatFile::Open(odex_filename, odex_filename, NULL, false,
- &error_msg));
- if (oat_file.get() == nullptr) {
- if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded failed to open oat file '" << filename
- << "': " << error_msg;
- }
- error_msg.clear();
- } else {
- const art::OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(filename, NULL,
- kReasonLogging);
- if (oat_dex_file != nullptr) {
- uint32_t location_checksum;
- // If its not possible to read the classes.dex assume up-to-date as we won't be able to
- // compile it anyway.
- if (!DexFile::GetChecksum(filename, &location_checksum, &error_msg)) {
- if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded ignoring precompiled stripped file: "
- << filename << ": " << error_msg;
- }
- return JNI_FALSE;
- }
- if (ClassLinker::VerifyOatFileChecksums(oat_file.get(), filename, location_checksum,
- target_instruction_set,
- &error_msg)) {
- if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded precompiled file " << odex_filename
- << " has an up-to-date checksum compared to " << filename;
- }
- return JNI_FALSE;
- } else {
- if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded found precompiled file " << odex_filename
- << " with an out-of-date checksum compared to " << filename
- << ": " << error_msg;
- }
- error_msg.clear();
- }
- }
- }
+ bool force_system_only = false;
+ bool require_system_version = false;
// Check the profile file. We need to rerun dex2oat if the profile has changed significantly
// since the last time, or it's new.
// If the 'defer' argument is true then this will be retried later. In this case we
// need to make sure that the profile file copy is not made so that we will get the
// same result second time.
+ std::string profile_file;
+ std::string prev_profile_file;
+ bool should_copy_profile = false;
if (Runtime::Current()->GetProfilerOptions().IsEnabled() && (pkgname != nullptr)) {
- const std::string profile_file = GetDalvikCacheOrDie("profiles", false /* create_if_absent */)
+ profile_file = GetDalvikCacheOrDie("profiles", false /* create_if_absent */)
+ std::string("/") + pkgname;
- const std::string prev_profile_file = profile_file + std::string("@old");
+ prev_profile_file = profile_file + std::string("@old");
struct stat profstat, prevstat;
int e1 = stat(profile_file.c_str(), &profstat);
+ int e1_errno = errno;
int e2 = stat(prev_profile_file.c_str(), &prevstat);
+ int e2_errno = errno;
if (e1 < 0) {
- // No profile file, need to run dex2oat
- if (kReasonLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded profile file " << profile_file << " doesn't exist";
+ if (e1_errno != EACCES) {
+ // No profile file, need to run dex2oat, unless we find a file in system
+ if (kReasonLogging) {
+ LOG(INFO) << "DexFile_isDexOptNeededInternal profile file " << profile_file << " doesn't exist. "
+ << "Will check odex to see if we can find a working version.";
+ }
+ // Force it to only accept system files/files with versions in system.
+ require_system_version = true;
+ } else {
+ LOG(INFO) << "DexFile_isDexOptNeededInternal recieved EACCES trying to stat profile file "
+ << profile_file;
}
- return JNI_TRUE;
- }
-
- if (e2 == 0) {
+ } else if (e2 == 0) {
// There is a previous profile file. Check if the profile has changed significantly.
// A change in profile is considered significant if X% (change_thr property) of the top K%
// (compile_thr property) samples has changed.
@@ -384,7 +439,7 @@
bool old_ok = old_profile.LoadFile(prev_profile_file);
if (!new_ok || !old_ok) {
if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded Ignoring invalid profiles: "
+ LOG(INFO) << "DexFile_isDexOptNeededInternal Ignoring invalid profiles: "
<< (new_ok ? "" : profile_file) << " " << (old_ok ? "" : prev_profile_file);
}
} else {
@@ -393,7 +448,7 @@
old_profile.GetTopKSamples(old_top_k, top_k_threshold);
if (new_top_k.empty()) {
if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded empty profile: " << profile_file;
+ LOG(INFO) << "DexFile_isDexOptNeededInternal empty profile: " << profile_file;
}
// If the new topK is empty we shouldn't optimize so we leave the change_percent at 0.0.
} else {
@@ -405,7 +460,7 @@
if (kVerboseLogging) {
std::set<std::string>::iterator end = diff.end();
for (std::set<std::string>::iterator it = diff.begin(); it != end; it++) {
- LOG(INFO) << "DexFile_isDexOptNeeded new in topK: " << *it;
+ LOG(INFO) << "DexFile_isDexOptNeededInternal new in topK: " << *it;
}
}
}
@@ -413,67 +468,85 @@
if (change_percent > change_threshold) {
if (kReasonLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded size of new profile file " << profile_file <<
+ LOG(INFO) << "DexFile_isDexOptNeededInternal size of new profile file " << profile_file <<
" is significantly different from old profile file " << prev_profile_file << " (top "
<< top_k_threshold << "% samples changed in proportion of " << change_percent << "%)";
}
- if (!defer) {
- CopyProfileFile(profile_file.c_str(), prev_profile_file.c_str());
- }
- return JNI_TRUE;
+ should_copy_profile = !defer;
+ // Force us to only accept system files.
+ force_system_only = true;
}
- } else {
+ } else if (e2_errno == ENOENT) {
// Previous profile does not exist. Make a copy of the current one.
if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded previous profile doesn't exist: " << prev_profile_file;
+ LOG(INFO) << "DexFile_isDexOptNeededInternal previous profile doesn't exist: " << prev_profile_file;
}
- if (!defer) {
- CopyProfileFile(profile_file.c_str(), prev_profile_file.c_str());
- }
+ should_copy_profile = !defer;
+ } else {
+ PLOG(INFO) << "Unable to stat previous profile file " << prev_profile_file;
}
}
- // Check if we have an oat file in the cache
- const std::string cache_dir(GetDalvikCacheOrDie(instruction_set));
- const std::string cache_location(
- GetDalvikCacheFilenameOrDie(filename, cache_dir.c_str()));
- oat_file.reset(OatFile::Open(cache_location, filename, NULL, false, &error_msg));
- if (oat_file.get() == nullptr) {
- if (kReasonLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location
- << " does not exist for " << filename << ": " << error_msg;
+ const InstructionSet target_instruction_set = GetInstructionSetFromString(instruction_set);
+
+ // Get the filename for odex file next to the dex file.
+ std::string odex_filename(DexFilenameToOdexFilename(filename, target_instruction_set));
+ // Get the filename for the dalvik-cache file
+ std::string cache_dir;
+ bool have_android_data = false;
+ bool dalvik_cache_exists = false;
+ GetDalvikCache(instruction_set, false, &cache_dir, &have_android_data, &dalvik_cache_exists);
+ std::string cache_filename; // was cache_location
+ bool have_cache_filename = false;
+ if (dalvik_cache_exists) {
+ std::string error_msg;
+ have_cache_filename = GetDalvikCacheFilename(filename, cache_dir.c_str(), &cache_filename,
+ &error_msg);
+ if (!have_cache_filename && kVerboseLogging) {
+ LOG(INFO) << "DexFile_isDexOptNeededInternal failed to find cache file for dex file " << filename
+ << ": " << error_msg;
}
- return JNI_TRUE;
}
- uint32_t location_checksum;
- if (!DexFile::GetChecksum(filename, &location_checksum, &error_msg)) {
- if (kReasonLogging) {
- LOG(ERROR) << "DexFile_isDexOptNeeded failed to compute checksum of " << filename
- << " (error " << error_msg << ")";
+ bool should_relocate_if_possible = Runtime::Current()->ShouldRelocate();
+
+ jbyte dalvik_cache_decision = -1;
+ // Lets try the cache first (since we want to load from there since thats where the relocated
+ // versions will be).
+ if (have_cache_filename && !force_system_only) {
+ // We can use the dalvik-cache if we find a good file.
+ dalvik_cache_decision =
+ IsDexOptNeededForFile<kVerboseLogging, kReasonLogging>(cache_filename, filename,
+ target_instruction_set);
+ // We will only return DexOptNeeded if both the cache and system return it.
+ if (dalvik_cache_decision != kDexoptNeeded && !require_system_version) {
+ CHECK(!(dalvik_cache_decision == kPatchoatNeeded && !should_relocate_if_possible))
+ << "May not return PatchoatNeeded when patching is disabled.";
+ return dalvik_cache_decision;
}
- return JNI_TRUE;
+ // We couldn't find one thats easy. We should now try the system.
}
- if (!ClassLinker::VerifyOatFileChecksums(oat_file.get(), filename, location_checksum,
- target_instruction_set, &error_msg)) {
- if (kReasonLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location
- << " has out-of-date checksum compared to " << filename
- << " (error " << error_msg << ")";
- }
- return JNI_TRUE;
+ jbyte system_decision =
+ IsDexOptNeededForFile<kVerboseLogging, kReasonLogging>(odex_filename, filename,
+ target_instruction_set);
+ CHECK(!(system_decision == kPatchoatNeeded && !should_relocate_if_possible))
+ << "May not return PatchoatNeeded when patching is disabled.";
+
+ if (require_system_version && system_decision == kPatchoatNeeded
+ && dalvik_cache_decision == kUpToDate) {
+ // We have a version from system relocated to the cache. Return it.
+ return dalvik_cache_decision;
}
- if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location
- << " is up-to-date for " << filename;
+ if (should_copy_profile && system_decision == kDexoptNeeded) {
+ CopyProfileFile(profile_file.c_str(), prev_profile_file.c_str());
}
- CHECK(error_msg.empty()) << error_msg;
- return JNI_FALSE;
+
+ return system_decision;
}
-static jboolean DexFile_isDexOptNeededInternal(JNIEnv* env, jclass, jstring javaFilename,
+static jbyte DexFile_isDexOptNeededInternal(JNIEnv* env, jclass, jstring javaFilename,
jstring javaPkgname, jstring javaInstructionSet, jboolean defer) {
ScopedUtfChars filename(env, javaFilename);
NullableScopedUtfChars pkgname(env, javaPkgname);
@@ -487,8 +560,8 @@
static jboolean DexFile_isDexOptNeeded(JNIEnv* env, jclass, jstring javaFilename) {
const char* instruction_set = GetInstructionSetString(kRuntimeISA);
ScopedUtfChars filename(env, javaFilename);
- return IsDexOptNeededInternal(env, filename.c_str(), nullptr /* pkgname */,
- instruction_set, false /* defer */);
+ return kUpToDate != IsDexOptNeededInternal(env, filename.c_str(), nullptr /* pkgname */,
+ instruction_set, false /* defer */);
}
@@ -497,7 +570,7 @@
NATIVE_METHOD(DexFile, defineClassNative, "(Ljava/lang/String;Ljava/lang/ClassLoader;J)Ljava/lang/Class;"),
NATIVE_METHOD(DexFile, getClassNameList, "(J)[Ljava/lang/String;"),
NATIVE_METHOD(DexFile, isDexOptNeeded, "(Ljava/lang/String;)Z"),
- NATIVE_METHOD(DexFile, isDexOptNeededInternal, "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Z)Z"),
+ NATIVE_METHOD(DexFile, isDexOptNeededInternal, "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Z)B"),
NATIVE_METHOD(DexFile, openDexFileNative, "(Ljava/lang/String;Ljava/lang/String;I)J"),
};
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index b0b64aa..a9ef8fc 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -167,7 +167,7 @@
}
static jboolean VMRuntime_isCheckJniEnabled(JNIEnv* env, jobject) {
- return Runtime::Current()->GetJavaVM()->check_jni ? JNI_TRUE : JNI_FALSE;
+ return Runtime::Current()->GetJavaVM()->IsCheckJniEnabled() ? JNI_TRUE : JNI_FALSE;
}
static void VMRuntime_setTargetSdkVersionNative(JNIEnv*, jobject, jint target_sdk_version) {
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index cf31064..5f718ba 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -35,7 +35,12 @@
// Suspend thread to build stack trace.
soa.Self()->TransitionFromRunnableToSuspended(kNative);
bool timed_out;
- Thread* thread = ThreadList::SuspendThreadByPeer(peer, true, false, &timed_out);
+ Thread* thread;
+ {
+ // Take suspend thread lock to avoid races with threads trying to suspend this one.
+ MutexLock mu(soa.Self(), *Locks::thread_list_suspend_thread_lock_);
+ thread = ThreadList::SuspendThreadByPeer(peer, true, false, &timed_out);
+ }
if (thread != nullptr) {
// Must be runnable to create returned array.
CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kNative);
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 820bd04..df6055d 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -17,6 +17,7 @@
#include <stdlib.h>
#include "debugger.h"
+#include "java_vm_ext.h"
#include "jni_internal.h"
#include "JNIHelp.h"
#include "thread-inl.h"
@@ -47,7 +48,7 @@
}
static void EnableDebugFeatures(uint32_t debug_flags) {
- // Must match values in dalvik.system.Zygote.
+ // Must match values in com.android.internal.os.Zygote.
enum {
DEBUG_ENABLE_DEBUGGER = 1,
DEBUG_ENABLE_CHECKJNI = 1 << 1,
@@ -59,7 +60,7 @@
if ((debug_flags & DEBUG_ENABLE_CHECKJNI) != 0) {
Runtime* runtime = Runtime::Current();
JavaVMExt* vm = runtime->GetJavaVM();
- if (!vm->check_jni) {
+ if (!vm->IsCheckJniEnabled()) {
LOG(INFO) << "Late-enabling -Xcheck:jni";
vm->SetCheckJniEnabled(true);
// There's only one thread running at this point, so only one JNIEnv to fix up.
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index e577c2c..124bdf5 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -71,7 +71,10 @@
jthrowable cnfe = reinterpret_cast<jthrowable>(env->NewObject(WellKnownClasses::java_lang_ClassNotFoundException,
WellKnownClasses::java_lang_ClassNotFoundException_init,
javaName, cause.get()));
- env->Throw(cnfe);
+ if (cnfe != nullptr) {
+ // Make sure allocation didn't fail with an OOME.
+ env->Throw(cnfe);
+ }
return nullptr;
}
if (initialize) {
diff --git a/runtime/native/java_lang_DexCache.cc b/runtime/native/java_lang_DexCache.cc
index 51cd5b8..c1c6c26 100644
--- a/runtime/native/java_lang_DexCache.cc
+++ b/runtime/native/java_lang_DexCache.cc
@@ -15,6 +15,7 @@
*/
#include "dex_file.h"
+#include "jni_internal.h"
#include "mirror/dex_cache.h"
#include "mirror/object-inl.h"
#include "scoped_fast_native_object_access.h"
diff --git a/runtime/native/java_lang_Runtime.cc b/runtime/native/java_lang_Runtime.cc
index 496a1b2..a85eec7 100644
--- a/runtime/native/java_lang_Runtime.cc
+++ b/runtime/native/java_lang_Runtime.cc
@@ -38,11 +38,15 @@
}
static void Runtime_nativeExit(JNIEnv*, jclass, jint status) {
+ LOG(INFO) << "System.exit called, status: " << status;
Runtime::Current()->CallExitHook(status);
exit(status);
}
-static jstring Runtime_nativeLoad(JNIEnv* env, jclass, jstring javaFilename, jobject javaLoader, jstring javaLdLibraryPath) {
+static jstring Runtime_nativeLoad(JNIEnv* env, jclass, jstring javaFilename, jobject javaLoader,
+ jstring javaLdLibraryPath) {
+ // TODO: returns NULL on success or an error message describing the failure on failure. This
+ // should be refactored in terms of suppressed exceptions.
ScopedUtfChars filename(env, javaFilename);
if (filename.c_str() == NULL) {
return NULL;
@@ -63,14 +67,10 @@
}
}
- std::string detail;
+ std::string error_msg;
{
- ScopedObjectAccess soa(env);
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ClassLoader> classLoader(
- hs.NewHandle(soa.Decode<mirror::ClassLoader*>(javaLoader)));
JavaVMExt* vm = Runtime::Current()->GetJavaVM();
- bool success = vm->LoadNativeLibrary(filename.c_str(), classLoader, &detail);
+ bool success = vm->LoadNativeLibrary(env, filename.c_str(), javaLoader, &error_msg);
if (success) {
return nullptr;
}
@@ -78,7 +78,7 @@
// Don't let a pending exception from JNI_OnLoad cause a CheckJNI issue with NewStringUTF.
env->ExceptionClear();
- return env->NewStringUTF(detail.c_str());
+ return env->NewStringUTF(error_msg.c_str());
}
static jlong Runtime_maxMemory(JNIEnv*, jclass) {
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index bae67f2..8f83f96 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -116,18 +116,25 @@
static void Thread_nativeSetName(JNIEnv* env, jobject peer, jstring java_name) {
ScopedUtfChars name(env, java_name);
+ Thread* self;
{
ScopedObjectAccess soa(env);
if (soa.Decode<mirror::Object*>(peer) == soa.Self()->GetPeer()) {
soa.Self()->SetThreadName(name.c_str());
return;
}
+ self = soa.Self();
}
// Suspend thread to avoid it from killing itself while we set its name. We don't just hold the
// thread list lock to avoid this, as setting the thread name causes mutator to lock/unlock
// in the DDMS send code.
bool timed_out;
- Thread* thread = ThreadList::SuspendThreadByPeer(peer, true, false, &timed_out);
+ // Take suspend thread lock to avoid races with threads trying to suspend this one.
+ Thread* thread;
+ {
+ MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
+ thread = ThreadList::SuspendThreadByPeer(peer, true, false, &timed_out);
+ }
if (thread != NULL) {
{
ScopedObjectAccess soa(env);
diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc
index f2b8a03..fefddae 100644
--- a/runtime/native/java_lang_VMClassLoader.cc
+++ b/runtime/native/java_lang_VMClassLoader.cc
@@ -62,25 +62,28 @@
*/
static jstring VMClassLoader_getBootClassPathResource(JNIEnv* env, jclass, jstring javaName, jint index) {
ScopedUtfChars name(env, javaName);
- if (name.c_str() == NULL) {
- return NULL;
+ if (name.c_str() == nullptr) {
+ return nullptr;
}
const std::vector<const DexFile*>& path = Runtime::Current()->GetClassLinker()->GetBootClassPath();
if (index < 0 || size_t(index) >= path.size()) {
- return NULL;
+ return nullptr;
}
const DexFile* dex_file = path[index];
- const std::string& location(dex_file->GetLocation());
+
+ // For multidex locations, e.g., x.jar:classes2.dex, we want to look into x.jar.
+ const std::string& location(dex_file->GetBaseLocation());
+
std::string error_msg;
std::unique_ptr<ZipArchive> zip_archive(ZipArchive::Open(location.c_str(), &error_msg));
if (zip_archive.get() == nullptr) {
LOG(WARNING) << "Failed to open zip archive '" << location << "': " << error_msg;
- return NULL;
+ return nullptr;
}
std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(name.c_str(), &error_msg));
- if (zip_entry.get() == NULL) {
- return NULL;
+ if (zip_entry.get() == nullptr) {
+ return nullptr;
}
std::string url;
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc
index 163ae20..8b2aecb 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc
@@ -16,6 +16,7 @@
#include "base/logging.h"
#include "debugger.h"
+#include "jni_internal.h"
#include "scoped_fast_native_object_access.h"
#include "ScopedPrimitiveArray.h"
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
index e17e60a..45ef9ae 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
@@ -61,7 +61,12 @@
}
// Suspend thread to build stack trace.
- Thread* thread = thread_list->SuspendThreadByThreadId(thin_lock_id, false, &timed_out);
+ Thread* thread;
+ {
+ // Take suspend thread lock to avoid races with threads trying to suspend this one.
+ MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
+ thread = thread_list->SuspendThreadByThreadId(thin_lock_id, false, &timed_out);
+ }
if (thread != nullptr) {
{
ScopedObjectAccess soa(env);
diff --git a/runtime/native_bridge.cc b/runtime/native_bridge.cc
new file mode 100644
index 0000000..d0b516b
--- /dev/null
+++ b/runtime/native_bridge.cc
@@ -0,0 +1,267 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "native_bridge.h"
+
+#include <dlfcn.h>
+#include <stdio.h>
+#include "jni.h"
+
+#include "base/mutex.h"
+#include "mirror/art_method-inl.h"
+#include "mirror/class-inl.h"
+#include "scoped_thread_state_change.h"
+#include "ScopedLocalRef.h"
+#include "thread.h"
+
+#ifdef HAVE_ANDROID_OS
+#include "cutils/properties.h"
+#endif
+
+
+namespace art {
+
+// The symbol name exposed by native-bridge with the type of NativeBridgeCallbacks.
+static constexpr const char* kNativeBridgeInterfaceSymbol = "NativeBridgeItf";
+
+// The library name we are supposed to load.
+static std::string native_bridge_library_string = "";
+
+// Whether a native bridge is available (loaded and ready).
+static bool available = false;
+// Whether we have already initialized (or tried to).
+static bool initialized = false;
+
+struct NativeBridgeCallbacks;
+static NativeBridgeCallbacks* callbacks = nullptr;
+
+// ART interfaces to native-bridge.
+struct NativeBridgeArtCallbacks {
+ // Get shorty of a Java method. The shorty is supposed to be persistent in memory.
+ //
+ // Parameters:
+ // env [IN] pointer to JNIenv.
+ // mid [IN] Java methodID.
+ // Returns:
+ // short descriptor for method.
+ const char* (*getMethodShorty)(JNIEnv* env, jmethodID mid);
+
+ // Get number of native methods for specified class.
+ //
+ // Parameters:
+ // env [IN] pointer to JNIenv.
+ // clazz [IN] Java class object.
+ // Returns:
+ // number of native methods.
+ uint32_t (*getNativeMethodCount)(JNIEnv* env, jclass clazz);
+
+ // Get at most 'method_count' native methods for specified class 'clazz'. Results are outputed
+ // via 'methods' [OUT]. The signature pointer in JNINativeMethod is reused as the method shorty.
+ //
+ // Parameters:
+ // env [IN] pointer to JNIenv.
+ // clazz [IN] Java class object.
+ // methods [OUT] array of method with the name, shorty, and fnPtr.
+ // method_count [IN] max number of elements in methods.
+ // Returns:
+ // number of method it actually wrote to methods.
+ uint32_t (*getNativeMethods)(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
+ uint32_t method_count);
+};
+
+// Native-bridge interfaces to ART
+struct NativeBridgeCallbacks {
+ // Initialize native-bridge. Native-bridge's internal implementation must ensure MT safety and
+ // that the native-bridge is initialized only once. Thus it is OK to call this interface for an
+ // already initialized native-bridge.
+ //
+ // Parameters:
+ // art_cbs [IN] the pointer to NativeBridgeArtCallbacks.
+ // Returns:
+ // true iff initialization was successful.
+ bool (*initialize)(NativeBridgeArtCallbacks* art_cbs);
+
+ // Load a shared library that is supported by the native-bridge.
+ //
+ // Parameters:
+ // libpath [IN] path to the shared library
+ // flag [IN] the stardard RTLD_XXX defined in bionic dlfcn.h
+ // Returns:
+ // The opaque handle of the shared library if sucessful, otherwise NULL
+ void* (*loadLibrary)(const char* libpath, int flag);
+
+ // Get a native-bridge trampoline for specified native method. The trampoline has same
+ // sigature as the native method.
+ //
+ // Parameters:
+ // handle [IN] the handle returned from loadLibrary
+ // shorty [IN] short descriptor of native method
+ // len [IN] length of shorty
+ // Returns:
+ // address of trampoline if successful, otherwise NULL
+ void* (*getTrampoline)(void* handle, const char* name, const char* shorty, uint32_t len);
+
+ // Check whether native library is valid and is for an ABI that is supported by native-bridge.
+ //
+ // Parameters:
+ // libpath [IN] path to the shared library
+ // Returns:
+ // TRUE if library is supported by native-bridge, FALSE otherwise
+ bool (*isSupported)(const char* libpath);
+};
+
+static const char* GetMethodShorty(JNIEnv* env, jmethodID mid) {
+ ScopedObjectAccess soa(env);
+ StackHandleScope<1> scope(soa.Self());
+ mirror::ArtMethod* m = soa.DecodeMethod(mid);
+ MethodHelper mh(scope.NewHandle(m));
+ return mh.GetShorty();
+}
+
+static uint32_t GetNativeMethodCount(JNIEnv* env, jclass clazz) {
+ if (clazz == nullptr)
+ return 0;
+
+ ScopedObjectAccess soa(env);
+ mirror::Class* c = soa.Decode<mirror::Class*>(clazz);
+
+ uint32_t native_method_count = 0;
+ for (uint32_t i = 0; i < c->NumDirectMethods(); ++i) {
+ mirror::ArtMethod* m = c->GetDirectMethod(i);
+ if (m->IsNative()) {
+ native_method_count++;
+ }
+ }
+ for (uint32_t i = 0; i < c->NumVirtualMethods(); ++i) {
+ mirror::ArtMethod* m = c->GetVirtualMethod(i);
+ if (m->IsNative()) {
+ native_method_count++;
+ }
+ }
+ return native_method_count;
+}
+
+static uint32_t GetNativeMethods(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
+ uint32_t method_count) {
+ if ((clazz == nullptr) || (methods == nullptr)) {
+ return 0;
+ }
+ ScopedObjectAccess soa(env);
+ mirror::Class* c = soa.Decode<mirror::Class*>(clazz);
+
+ uint32_t count = 0;
+ for (uint32_t i = 0; i < c->NumDirectMethods(); ++i) {
+ mirror::ArtMethod* m = c->GetDirectMethod(i);
+ if (m->IsNative()) {
+ if (count < method_count) {
+ methods[count].name = m->GetName();
+ methods[count].signature = m->GetShorty();
+ methods[count].fnPtr = const_cast<void*>(m->GetNativeMethod());
+ count++;
+ } else {
+ LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m);
+ }
+ }
+ }
+ for (uint32_t i = 0; i < c->NumVirtualMethods(); ++i) {
+ mirror::ArtMethod* m = c->GetVirtualMethod(i);
+ if (m->IsNative()) {
+ if (count < method_count) {
+ methods[count].name = m->GetName();
+ methods[count].signature = m->GetShorty();
+ methods[count].fnPtr = const_cast<void*>(m->GetNativeMethod());
+ count++;
+ } else {
+ LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m);
+ }
+ }
+ }
+ return count;
+}
+
+static NativeBridgeArtCallbacks NativeBridgeArtItf = {
+ GetMethodShorty,
+ GetNativeMethodCount,
+ GetNativeMethods
+};
+
+void SetNativeBridgeLibraryString(const std::string& nb_library_string) {
+ // This is called when the runtime starts and nothing is working concurrently
+ // so we don't need a lock here.
+
+ native_bridge_library_string = nb_library_string;
+
+ if (native_bridge_library_string.empty()) {
+ initialized = true;
+ available = false;
+ }
+}
+
+static bool NativeBridgeInitialize() {
+ // TODO: Missing annotalysis static lock ordering of DEFAULT_MUTEX_ACQUIRED, place lock into
+ // global order or remove.
+ static Mutex lock("native bridge lock");
+ MutexLock mu(Thread::Current(), lock);
+
+ if (initialized) {
+ // Somebody did it before.
+ return available;
+ }
+
+ available = false;
+
+ void* handle = dlopen(native_bridge_library_string.c_str(), RTLD_LAZY);
+ if (handle != nullptr) {
+ callbacks = reinterpret_cast<NativeBridgeCallbacks*>(dlsym(handle,
+ kNativeBridgeInterfaceSymbol));
+
+ if (callbacks != nullptr) {
+ available = callbacks->initialize(&NativeBridgeArtItf);
+ }
+
+ if (!available) {
+ dlclose(handle);
+ }
+ }
+
+ initialized = true;
+
+ return available;
+}
+
+void* NativeBridgeLoadLibrary(const char* libpath, int flag) {
+ if (NativeBridgeInitialize()) {
+ return callbacks->loadLibrary(libpath, flag);
+ }
+ return nullptr;
+}
+
+void* NativeBridgeGetTrampoline(void* handle, const char* name, const char* shorty,
+ uint32_t len) {
+ if (NativeBridgeInitialize()) {
+ return callbacks->getTrampoline(handle, name, shorty, len);
+ }
+ return nullptr;
+}
+
+bool NativeBridgeIsSupported(const char* libpath) {
+ if (NativeBridgeInitialize()) {
+ return callbacks->isSupported(libpath);
+ }
+ return false;
+}
+
+}; // namespace art
diff --git a/runtime/native_bridge.h b/runtime/native_bridge.h
new file mode 100644
index 0000000..be647fc
--- /dev/null
+++ b/runtime/native_bridge.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_BRIDGE_H_
+#define ART_RUNTIME_NATIVE_BRIDGE_H_
+
+#include <string>
+
+namespace art {
+
+// Initialize the native bridge, if any. Should be called by Runtime::Init(). An empty string
+// signals that we do not want to load a native bridge.
+void SetNativeBridgeLibraryString(const std::string& native_bridge_library_string);
+
+// Load a shared library that is supported by the native-bridge.
+void* NativeBridgeLoadLibrary(const char* libpath, int flag);
+
+// Get a native-bridge trampoline for specified native method.
+void* NativeBridgeGetTrampoline(void* handle, const char* name, const char* shorty, uint32_t len);
+
+// True if native library is valid and is for an ABI that is supported by native-bridge.
+bool NativeBridgeIsSupported(const char* libpath);
+
+}; // namespace art
+
+#endif // ART_RUNTIME_NATIVE_BRIDGE_H_
diff --git a/runtime/noop_compiler_callbacks.h b/runtime/noop_compiler_callbacks.h
index 65498de..e9ad353 100644
--- a/runtime/noop_compiler_callbacks.h
+++ b/runtime/noop_compiler_callbacks.h
@@ -32,6 +32,11 @@
void ClassRejected(ClassReference ref) OVERRIDE {}
+ // This is only used by compilers which need to be able to run without relocation even when it
+ // would normally be enabled. For example the patchoat executable, and dex2oat --image, both need
+ // to disable the relocation since both deal with writing out the images directly.
+ bool IsRelocationPossible() OVERRIDE { return false; }
+
private:
DISALLOW_COPY_AND_ASSIGN(NoopCompilerCallbacks);
};
diff --git a/runtime/oat.cc b/runtime/oat.cc
index 1421baf..0a8c35b 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -23,7 +23,7 @@
namespace art {
const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' };
-const uint8_t OatHeader::kOatVersion[] = { '0', '3', '7', '\0' };
+const uint8_t OatHeader::kOatVersion[] = { '0', '3', '8', '\0' };
static size_t ComputeOatHeaderSize(const SafeMap<std::string, std::string>* variable_data) {
size_t estimate = 0U;
@@ -67,6 +67,8 @@
const SafeMap<std::string, std::string>* variable_data) {
memcpy(magic_, kOatMagic, sizeof(kOatMagic));
memcpy(version_, kOatVersion, sizeof(kOatVersion));
+ executable_offset_ = 0;
+ image_patch_delta_ = 0;
adler32_checksum_ = adler32(0L, Z_NULL, 0);
@@ -98,7 +100,6 @@
UpdateChecksum(&key_value_store_, key_value_store_size_);
}
- executable_offset_ = 0;
interpreter_to_interpreter_bridge_offset_ = 0;
interpreter_to_compiled_code_bridge_offset_ = 0;
jni_dlsym_lookup_offset_ = 0;
@@ -118,6 +119,12 @@
if (memcmp(version_, kOatVersion, sizeof(kOatVersion)) != 0) {
return false;
}
+ if (!IsAligned<kPageSize>(executable_offset_)) {
+ return false;
+ }
+ if (!IsAligned<kPageSize>(image_patch_delta_)) {
+ return false;
+ }
return true;
}
@@ -355,6 +362,26 @@
UpdateChecksum(&quick_to_interpreter_bridge_offset_, sizeof(offset));
}
+int32_t OatHeader::GetImagePatchDelta() const {
+ CHECK(IsValid());
+ return image_patch_delta_;
+}
+
+void OatHeader::RelocateOat(off_t delta) {
+ CHECK(IsValid());
+ CHECK_ALIGNED(delta, kPageSize);
+ image_patch_delta_ += delta;
+ if (image_file_location_oat_data_begin_ != 0) {
+ image_file_location_oat_data_begin_ += delta;
+ }
+}
+
+void OatHeader::SetImagePatchDelta(int32_t off) {
+ CHECK(IsValid());
+ CHECK_ALIGNED(off, kPageSize);
+ image_patch_delta_ = off;
+}
+
uint32_t OatHeader::GetImageFileLocationOatChecksum() const {
CHECK(IsValid());
return image_file_location_oat_checksum_;
diff --git a/runtime/oat.h b/runtime/oat.h
index fbed596..6d5fefe 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -88,6 +88,10 @@
uint32_t GetQuickToInterpreterBridgeOffset() const;
void SetQuickToInterpreterBridgeOffset(uint32_t offset);
+ int32_t GetImagePatchDelta() const;
+ void RelocateOat(off_t delta);
+ void SetImagePatchDelta(int32_t off);
+
InstructionSet GetInstructionSet() const;
const InstructionSetFeatures& GetInstructionSetFeatures() const;
uint32_t GetImageFileLocationOatChecksum() const;
@@ -129,6 +133,9 @@
uint32_t quick_resolution_trampoline_offset_;
uint32_t quick_to_interpreter_bridge_offset_;
+ // The amount that the image this oat is associated with has been patched.
+ int32_t image_patch_delta_;
+
uint32_t image_file_location_oat_checksum_;
uint32_t image_file_location_oat_data_begin_;
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 9cefcb6..971daf8 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -18,12 +18,12 @@
#include <dlfcn.h>
#include <sstream>
+#include <string.h>
#include "base/bit_vector.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "elf_file.h"
-#include "implicit_check_options.h"
#include "oat.h"
#include "mirror/art_method.h"
#include "mirror/art_method-inl.h"
@@ -80,36 +80,7 @@
}
ret.reset(OpenElfFile(file.get(), location, requested_base, false, executable, error_msg));
}
-
- if (ret.get() == nullptr) {
- return nullptr;
- }
-
- // Embedded options check. Right now only implicit checks.
- // TODO: Refactor to somewhere else?
- const char* implicit_checks_value = ret->GetOatHeader().
- GetStoreValueByKey(ImplicitCheckOptions::kImplicitChecksOatHeaderKey);
-
- if (implicit_checks_value == nullptr) {
- *error_msg = "Did not find implicit checks value.";
- return nullptr;
- }
-
- bool explicit_null_checks, explicit_so_checks, explicit_suspend_checks;
- if (ImplicitCheckOptions::Parse(implicit_checks_value, &explicit_null_checks,
- &explicit_so_checks, &explicit_suspend_checks)) {
- // Check whether the runtime agrees with the recorded checks.
- if (ImplicitCheckOptions::CheckRuntimeSupport(executable, explicit_null_checks,
- explicit_so_checks, explicit_suspend_checks,
- error_msg)) {
- return ret.release();
- } else {
- return nullptr;
- }
- } else {
- *error_msg = "Failed parsing implicit check options.";
- return nullptr;
- }
+ return ret.release();
}
OatFile* OatFile::OpenWritable(File* file, const std::string& location, std::string* error_msg) {
@@ -117,6 +88,11 @@
return OpenElfFile(file, location, NULL, true, false, error_msg);
}
+OatFile* OatFile::OpenReadable(File* file, const std::string& location, std::string* error_msg) {
+ CheckLocation(location);
+ return OpenElfFile(file, location, NULL, false, false, error_msg);
+}
+
OatFile* OatFile::OpenDlopen(const std::string& elf_filename,
const std::string& location,
byte* requested_base,
@@ -145,7 +121,8 @@
}
OatFile::OatFile(const std::string& location)
- : location_(location), begin_(NULL), end_(NULL), dlopen_handle_(NULL) {
+ : location_(location), begin_(NULL), end_(NULL), dlopen_handle_(NULL),
+ secondary_lookup_lock_("OatFile secondary lookup lock", kOatFileSecondaryLookupLock) {
CHECK(!location_.empty());
}
@@ -325,12 +302,12 @@
return false;
}
+ // Create the OatDexFile and add it to the owning map indexed by the dex file location.
OatDexFile* oat_dex_file = new OatDexFile(this,
dex_file_location,
dex_file_checksum,
dex_file_pointer,
methods_offsets_pointer);
- // Use a StringPiece backed by the oat_dex_file's internal std::string as the key.
StringPiece key(oat_dex_file->GetDexFileLocation());
oat_dex_files_.Put(key, oat_dex_file);
}
@@ -354,30 +331,79 @@
const OatFile::OatDexFile* OatFile::GetOatDexFile(const char* dex_location,
const uint32_t* dex_location_checksum,
bool warn_if_not_found) const {
- Table::const_iterator it = oat_dex_files_.find(dex_location);
- if (it != oat_dex_files_.end()) {
- const OatFile::OatDexFile* oat_dex_file = it->second;
- if (dex_location_checksum == NULL ||
- oat_dex_file->GetDexFileLocationChecksum() == *dex_location_checksum) {
- return oat_dex_file;
+ // NOTE: We assume here that the canonical location for a given dex_location never
+ // changes. If it does (i.e. some symlink used by the filename changes) we may return
+ // an incorrect OatDexFile. As long as we have a checksum to check, we shall return
+ // an identical file or fail; otherwise we may see some unpredictable failures.
+
+ // TODO: Additional analysis of usage patterns to see if this can be simplified
+ // without any performance loss, for example by not doing the first lock-free lookup.
+
+ const OatFile::OatDexFile* oat_dex_file = nullptr;
+ StringPiece key(dex_location);
+ // Try to find the key cheaply in the oat_dex_files_ map which holds dex locations
+ // directly mentioned in the oat file and doesn't require locking.
+ auto primary_it = oat_dex_files_.find(key);
+ if (primary_it != oat_dex_files_.end()) {
+ oat_dex_file = primary_it->second;
+ DCHECK(oat_dex_file != nullptr);
+ } else {
+ // This dex_location is not one of the dex locations directly mentioned in the
+ // oat file. The correct lookup is via the canonical location but first see in
+ // the secondary_oat_dex_files_ whether we've looked up this location before.
+ MutexLock mu(Thread::Current(), secondary_lookup_lock_);
+ auto secondary_lb = secondary_oat_dex_files_.lower_bound(key);
+ if (secondary_lb != secondary_oat_dex_files_.end() && key == secondary_lb->first) {
+ oat_dex_file = secondary_lb->second; // May be nullptr.
+ } else {
+ // We haven't seen this dex_location before, we must check the canonical location.
+ if (UNLIKELY(oat_dex_files_by_canonical_location_.empty())) {
+ // Lazily fill in the oat_dex_files_by_canonical_location_.
+ for (const auto& entry : oat_dex_files_) {
+ const std::string& dex_location = entry.second->GetDexFileLocation();
+ string_cache_.emplace_back(DexFile::GetDexCanonicalLocation(dex_location.c_str()));
+ StringPiece canonical_location_key(string_cache_.back());
+ oat_dex_files_by_canonical_location_.Put(canonical_location_key, entry.second);
+ }
+ }
+ std::string dex_canonical_location = DexFile::GetDexCanonicalLocation(dex_location);
+ StringPiece canonical_key(dex_canonical_location);
+ auto canonical_it = oat_dex_files_by_canonical_location_.find(canonical_key);
+ if (canonical_it != oat_dex_files_by_canonical_location_.end()) {
+ oat_dex_file = canonical_it->second;
+ } // else keep nullptr.
+
+ // Copy the key to the string_cache_ and store the result in secondary map.
+ string_cache_.emplace_back(key.data(), key.length());
+ StringPiece key_copy(string_cache_.back());
+ secondary_oat_dex_files_.PutBefore(secondary_lb, key_copy, oat_dex_file);
}
}
+ if (oat_dex_file != nullptr &&
+ (dex_location_checksum == nullptr ||
+ oat_dex_file->GetDexFileLocationChecksum() == *dex_location_checksum)) {
+ return oat_dex_file;
+ }
if (warn_if_not_found) {
+ std::string dex_canonical_location = DexFile::GetDexCanonicalLocation(dex_location);
std::string checksum("<unspecified>");
if (dex_location_checksum != NULL) {
checksum = StringPrintf("0x%08x", *dex_location_checksum);
}
LOG(WARNING) << "Failed to find OatDexFile for DexFile " << dex_location
+ << " ( canonical path " << dex_canonical_location << ")"
<< " with checksum " << checksum << " in OatFile " << GetLocation();
if (kIsDebugBuild) {
for (Table::const_iterator it = oat_dex_files_.begin(); it != oat_dex_files_.end(); ++it) {
LOG(WARNING) << "OatFile " << GetLocation()
<< " contains OatDexFile " << it->second->GetDexFileLocation()
+ << " (canonical path " << it->first << ")"
<< " with checksum 0x" << std::hex << it->second->GetDexFileLocationChecksum();
}
}
}
+
return NULL;
}
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 44f4466..9710a2a 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -17,9 +17,11 @@
#ifndef ART_RUNTIME_OAT_FILE_H_
#define ART_RUNTIME_OAT_FILE_H_
+#include <list>
#include <string>
#include <vector>
+#include "base/mutex.h"
#include "base/stringpiece.h"
#include "dex_file.h"
#include "invoke_type.h"
@@ -52,6 +54,8 @@
// ImageWriter which wants to open a writable version from an existing
// file descriptor for patching.
static OatFile* OpenWritable(File* file, const std::string& location, std::string* error_msg);
+ // Opens an oat file from an already opened File. Maps it PROT_READ, MAP_PRIVATE.
+ static OatFile* OpenReadable(File* file, const std::string& location, std::string* error_msg);
// Open an oat file backed by a std::vector with the given location.
static OatFile* OpenMemory(std::vector<uint8_t>& oat_contents,
@@ -225,7 +229,8 @@
const OatDexFile* GetOatDexFile(const char* dex_location,
const uint32_t* const dex_location_checksum,
- bool exception_if_not_found = true) const;
+ bool exception_if_not_found = true) const
+ LOCKS_EXCLUDED(secondary_lookup_lock_);
std::vector<const OatDexFile*> GetOatDexFiles() const;
@@ -277,10 +282,38 @@
// dlopen handle during runtime.
void* dlopen_handle_;
- // NOTE: We use a StringPiece as the key type to avoid a memory allocation on every lookup
- // with a const char* key.
+ // NOTE: We use a StringPiece as the key type to avoid a memory allocation on every
+ // lookup with a const char* key. The StringPiece doesn't own its backing storage,
+ // therefore we're using the OatDexFile::dex_file_location_ as the backing storage
+ // for keys in oat_dex_files_ and the string_cache_ entries for the backing storage
+ // of keys in secondary_oat_dex_files_ and oat_dex_files_by_canonical_location_.
typedef SafeMap<StringPiece, const OatDexFile*> Table;
- Table oat_dex_files_;
+
+ // Map each plain dex file location retrieved from the oat file to its OatDexFile.
+ // This map doesn't change after it's constructed in Setup() and therefore doesn't
+ // need any locking and provides the cheapest dex file lookup for GetOatDexFile()
+ // for a very frequent use case. Never contains a nullptr value.
+ Table oat_dex_files_; // Owns the OatDexFile* values.
+
+ // Lock guarding all members needed for secondary lookup in GetOatDexFile().
+ mutable Mutex secondary_lookup_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+
+ // If the primary oat_dex_files_ lookup fails, use a secondary map. This map stores
+ // the results of all previous secondary lookups, whether successful (non-null) or
+ // failed (null). If it doesn't contain an entry we need to calculate the canonical
+ // location and use oat_dex_files_by_canonical_location_.
+ mutable Table secondary_oat_dex_files_ GUARDED_BY(secondary_lookup_lock_);
+
+ // Map the canonical location to an OatDexFile. This lazily constructed map is used
+ // when we're doing the secondary lookup for a given location for the first time.
+ mutable Table oat_dex_files_by_canonical_location_ GUARDED_BY(secondary_lookup_lock_);
+
+ // Cache of strings. Contains the backing storage for keys in the secondary_oat_dex_files_
+ // and the lazily initialized oat_dex_files_by_canonical_location_.
+ // NOTE: We're keeping references to contained strings in form of StringPiece and adding
+ // new strings to the end. The adding of a new element must not touch any previously stored
+ // elements. std::list<> and std::deque<> satisfy this requirement, std::vector<> doesn't.
+ mutable std::list<std::string> string_cache_ GUARDED_BY(secondary_lookup_lock_);
friend class OatClass;
friend class OatDexFile;
diff --git a/runtime/object_callbacks.h b/runtime/object_callbacks.h
index 0e6f4d8..592deed 100644
--- a/runtime/object_callbacks.h
+++ b/runtime/object_callbacks.h
@@ -24,6 +24,8 @@
// For size_t.
#include <stdlib.h>
+#include "base/macros.h"
+
namespace art {
namespace mirror {
class Class;
@@ -57,8 +59,7 @@
// A callback for visiting an object in the heap.
typedef void (ObjectCallback)(mirror::Object* obj, void* arg);
// A callback used for marking an object, returns the new address of the object if the object moved.
-typedef mirror::Object* (MarkObjectCallback)(mirror::Object* obj, void* arg)
- __attribute__((warn_unused_result));
+typedef mirror::Object* (MarkObjectCallback)(mirror::Object* obj, void* arg) WARN_UNUSED;
// A callback for verifying roots.
typedef void (VerifyRootCallback)(const mirror::Object* root, void* arg, size_t vreg,
const StackVisitor* visitor, RootType root_type);
@@ -68,13 +69,12 @@
// A callback for testing if an object is marked, returns nullptr if not marked, otherwise the new
// address the object (if the object didn't move, returns the object input parameter).
-typedef mirror::Object* (IsMarkedCallback)(mirror::Object* object, void* arg)
- __attribute__((warn_unused_result));
+typedef mirror::Object* (IsMarkedCallback)(mirror::Object* object, void* arg) WARN_UNUSED;
// Returns true if the object in the heap reference is marked, if it is marked and has moved the
// callback updates the heap reference contain the new value.
typedef bool (IsHeapReferenceMarkedCallback)(mirror::HeapReference<mirror::Object>* object,
- void* arg) __attribute__((warn_unused_result));
+ void* arg) WARN_UNUSED;
typedef void (ProcessMarkStackCallback)(void* arg);
} // namespace art
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 577691c..12f9f33 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -177,6 +177,7 @@
}
// -Xcheck:jni is off by default for regular builds but on by default in debug builds.
check_jni_ = kIsDebugBuild;
+ force_copy_ = false;
heap_initial_size_ = gc::Heap::kDefaultInitialSize;
heap_maximum_size_ = gc::Heap::kDefaultMaximumSize;
@@ -221,6 +222,7 @@
compiler_callbacks_ = nullptr;
is_zygote_ = false;
+ must_relocate_ = kDefaultMustRelocate;
if (kPoisonHeapReferences) {
// kPoisonHeapReferences currently works only with the interpreter only.
// TODO: make it work with the compiler.
@@ -265,38 +267,6 @@
verify_ = true;
image_isa_ = kRuntimeISA;
- // Default to explicit checks. Switch off with -implicit-checks:.
- // or setprop dalvik.vm.implicit_checks check1,check2,...
-#ifdef HAVE_ANDROID_OS
- {
- char buf[PROP_VALUE_MAX];
- property_get("dalvik.vm.implicit_checks", buf, "null,stack");
- std::string checks(buf);
- std::vector<std::string> checkvec;
- Split(checks, ',', checkvec);
- explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
- kExplicitStackOverflowCheck;
- for (auto& str : checkvec) {
- std::string val = Trim(str);
- if (val == "none") {
- explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
- kExplicitStackOverflowCheck;
- } else if (val == "null") {
- explicit_checks_ &= ~kExplicitNullCheck;
- } else if (val == "suspend") {
- explicit_checks_ &= ~kExplicitSuspendCheck;
- } else if (val == "stack") {
- explicit_checks_ &= ~kExplicitStackOverflowCheck;
- } else if (val == "all") {
- explicit_checks_ = 0;
- }
- }
- }
-#else
- explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
- kExplicitStackOverflowCheck;
-#endif
-
for (size_t i = 0; i < options.size(); ++i) {
if (true && options[0].first == "-Xzygote") {
LOG(INFO) << "option[" << i << "]=" << options[i].first;
@@ -312,6 +282,7 @@
Exit(0);
} else if (StartsWith(option, "-Xbootclasspath:")) {
boot_class_path_string_ = option.substr(strlen("-Xbootclasspath:")).data();
+ LOG(INFO) << "setting boot class path to " << boot_class_path_string_;
} else if (option == "-classpath" || option == "-cp") {
// TODO: support -Djava.class.path
i++;
@@ -330,6 +301,8 @@
}
} else if (StartsWith(option, "-Xcheck:jni")) {
check_jni_ = true;
+ } else if (StartsWith(option, "-Xjniopts:forcecopy")) {
+ force_copy_ = true;
} else if (StartsWith(option, "-Xrunjdwp:") || StartsWith(option, "-agentlib:jdwp=")) {
std::string tail(option.substr(option[1] == 'X' ? 10 : 15));
// TODO: move parsing logic out of Dbg
@@ -421,6 +394,7 @@
ignore_max_footprint_ = true;
} else if (option == "-XX:LowMemoryMode") {
low_memory_mode_ = true;
+ // TODO Might want to turn off must_relocate here.
} else if (option == "-XX:UseTLAB") {
use_tlab_ = true;
} else if (option == "-XX:EnableHSpaceCompactForOOM") {
@@ -439,6 +413,14 @@
reinterpret_cast<const char*>(options[i].second));
} else if (option == "-Xzygote") {
is_zygote_ = true;
+ } else if (StartsWith(option, "-Xpatchoat:")) {
+ if (!ParseStringAfterChar(option, ':', &patchoat_executable_)) {
+ return false;
+ }
+ } else if (option == "-Xrelocate") {
+ must_relocate_ = true;
+ } else if (option == "-Xnorelocate") {
+ must_relocate_ = false;
} else if (option == "-Xint") {
interpreter_only_ = true;
} else if (StartsWith(option, "-Xgc:")) {
@@ -589,54 +571,6 @@
if (!ParseUnsignedInteger(option, ':', &profiler_options_.max_stack_depth_)) {
return false;
}
- } else if (StartsWith(option, "-implicit-checks:")) {
- std::string checks;
- if (!ParseStringAfterChar(option, ':', &checks)) {
- return false;
- }
- std::vector<std::string> checkvec;
- Split(checks, ',', checkvec);
- for (auto& str : checkvec) {
- std::string val = Trim(str);
- if (val == "none") {
- explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
- kExplicitStackOverflowCheck;
- } else if (val == "null") {
- explicit_checks_ &= ~kExplicitNullCheck;
- } else if (val == "suspend") {
- explicit_checks_ &= ~kExplicitSuspendCheck;
- } else if (val == "stack") {
- explicit_checks_ &= ~kExplicitStackOverflowCheck;
- } else if (val == "all") {
- explicit_checks_ = 0;
- } else {
- return false;
- }
- }
- } else if (StartsWith(option, "-explicit-checks:")) {
- std::string checks;
- if (!ParseStringAfterChar(option, ':', &checks)) {
- return false;
- }
- std::vector<std::string> checkvec;
- Split(checks, ',', checkvec);
- for (auto& str : checkvec) {
- std::string val = Trim(str);
- if (val == "none") {
- explicit_checks_ = 0;
- } else if (val == "null") {
- explicit_checks_ |= kExplicitNullCheck;
- } else if (val == "suspend") {
- explicit_checks_ |= kExplicitSuspendCheck;
- } else if (val == "stack") {
- explicit_checks_ |= kExplicitStackOverflowCheck;
- } else if (val == "all") {
- explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
- kExplicitStackOverflowCheck;
- } else {
- return false;
- }
- }
} else if (StartsWith(option, "-Xcompiler:")) {
if (!ParseStringAfterChar(option, ':', &compiler_executable_)) {
return false;
@@ -665,6 +599,10 @@
Usage("Unknown -Xverify option %s\n", verify_mode.c_str());
return false;
}
+ } else if (StartsWith(option, "-XX:NativeBridge=")) {
+ if (!ParseStringAfterChar(option, '=', &native_bridge_library_string_)) {
+ return false;
+ }
} else if (StartsWith(option, "-ea") ||
StartsWith(option, "-da") ||
StartsWith(option, "-enableassertions") ||
@@ -678,7 +616,6 @@
StartsWith(option, "-Xint:") ||
StartsWith(option, "-Xdexopt:") ||
(option == "-Xnoquithandler") ||
- StartsWith(option, "-Xjniopts:") ||
StartsWith(option, "-Xjnigreflimit:") ||
(option == "-Xgenregmap") ||
(option == "-Xnogenregmap") ||
@@ -837,6 +774,8 @@
UsageMessage(stream, " -Xcompiler:filename\n");
UsageMessage(stream, " -Xcompiler-option dex2oat-option\n");
UsageMessage(stream, " -Ximage-compiler-option dex2oat-option\n");
+ UsageMessage(stream, " -Xpatchoat:filename\n");
+ UsageMessage(stream, " -X[no]relocate\n");
UsageMessage(stream, "\n");
UsageMessage(stream, "The following previously supported Dalvik options are ignored:\n");
diff --git a/runtime/parsed_options.h b/runtime/parsed_options.h
index b1de62a..c328ca7 100644
--- a/runtime/parsed_options.h
+++ b/runtime/parsed_options.h
@@ -44,9 +44,13 @@
std::string class_path_string_;
std::string image_;
bool check_jni_;
+ bool force_copy_;
std::string jni_trace_;
+ std::string native_bridge_library_string_;
CompilerCallbacks* compiler_callbacks_;
bool is_zygote_;
+ bool must_relocate_;
+ std::string patchoat_executable_;
bool interpreter_only_;
bool is_explicit_gc_disabled_;
bool use_tlab_;
@@ -93,10 +97,6 @@
bool verify_;
InstructionSet image_isa_;
- static constexpr uint32_t kExplicitNullCheck = 1;
- static constexpr uint32_t kExplicitSuspendCheck = 2;
- static constexpr uint32_t kExplicitStackOverflowCheck = 4;
- uint32_t explicit_checks_;
// Whether or not we use homogeneous space compaction to avoid OOM errors. If enabled,
// the heap will attempt to create an extra space which enables compacting from a malloc space to
// another malloc space when we are about to throw OOM.
diff --git a/runtime/primitive.cc b/runtime/primitive.cc
index 16ca0fe..a639f93 100644
--- a/runtime/primitive.cc
+++ b/runtime/primitive.cc
@@ -30,6 +30,7 @@
"PrimDouble",
"PrimVoid",
};
+
std::ostream& operator<<(std::ostream& os, const Primitive::Type& type) {
int32_t int_type = static_cast<int32_t>(type);
if (type >= Primitive::kPrimNot && type <= Primitive::kPrimVoid) {
diff --git a/runtime/primitive.h b/runtime/primitive.h
index b436bd2..a36e9cb 100644
--- a/runtime/primitive.h
+++ b/runtime/primitive.h
@@ -21,12 +21,10 @@
#include "base/logging.h"
#include "base/macros.h"
-#include "mirror/object_reference.h"
namespace art {
-namespace mirror {
-class Object;
-} // namespace mirror
+
+static constexpr size_t kObjectReferenceSize = 4;
class Primitive {
public:
@@ -79,7 +77,7 @@
case kPrimFloat: return 4;
case kPrimLong:
case kPrimDouble: return 8;
- case kPrimNot: return sizeof(mirror::HeapReference<mirror::Object>);
+ case kPrimNot: return kObjectReferenceSize;
default:
LOG(FATAL) << "Invalid type " << static_cast<int>(type);
return 0;
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index bd6656d..3081421 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -17,14 +17,14 @@
#include <jni.h>
#include <vector>
-#include "common_compiler_test.h"
+#include "common_runtime_test.h"
#include "field_helper.h"
#include "mirror/art_field-inl.h"
#include "scoped_thread_state_change.h"
namespace art {
-class ProxyTest : public CommonCompilerTest {
+class ProxyTest : public CommonRuntimeTest {
public:
// Generate a proxy class with the given name and interfaces. This is a simplification from what
// libcore does to fit to our test needs. We do not check for duplicated interfaces or methods and
@@ -103,6 +103,12 @@
soa.Self()->AssertNoPendingException();
return proxyClass;
}
+
+ protected:
+ void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+ options->push_back(std::make_pair(StringPrintf("-Ximage:%s", GetLibCoreOatFileName().c_str()),
+ nullptr));
+ }
};
// Creates a proxy class and check ClassHelper works correctly.
diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h
index 982553d..c4d51cb 100644
--- a/runtime/quick/inline_method_analyser.h
+++ b/runtime/quick/inline_method_analyser.h
@@ -48,7 +48,12 @@
kIntrinsicMinMaxFloat,
kIntrinsicMinMaxDouble,
kIntrinsicSqrt,
- kIntrinsicGet,
+ kIntrinsicCeil,
+ kIntrinsicFloor,
+ kIntrinsicRint,
+ kIntrinsicRoundFloat,
+ kIntrinsicRoundDouble,
+ kIntrinsicReferenceGet,
kIntrinsicCharAt,
kIntrinsicCompareTo,
kIntrinsicIsEmptyOrLength,
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 6581f9b..41d6989 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -194,6 +194,10 @@
}
private:
+ static VRegKind GetVRegKind(uint16_t reg, const std::vector<int32_t>& kinds) {
+ return static_cast<VRegKind>(kinds.at(reg * 2));
+ }
+
bool HandleDeoptimization(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = m->GetCodeItem();
CHECK(code_item != nullptr);
@@ -210,9 +214,9 @@
&m->GetClassDef(), code_item, m->GetDexMethodIndex(), m,
m->GetAccessFlags(), false, true, true);
verifier.Verify();
- std::vector<int32_t> kinds = verifier.DescribeVRegs(dex_pc);
+ const std::vector<int32_t> kinds(verifier.DescribeVRegs(dex_pc));
for (uint16_t reg = 0; reg < num_regs; ++reg) {
- VRegKind kind = static_cast<VRegKind>(kinds.at(reg * 2));
+ VRegKind kind = GetVRegKind(reg, kinds);
switch (kind) {
case kUndefined:
new_frame->SetVReg(reg, 0xEBADDE09);
@@ -224,6 +228,36 @@
new_frame->SetVRegReference(reg,
reinterpret_cast<mirror::Object*>(GetVReg(m, reg, kind)));
break;
+ case kLongLoVReg:
+ if (GetVRegKind(reg + 1, kinds), kLongHiVReg) {
+ // Treat it as a "long" register pair.
+ new_frame->SetVRegLong(reg, GetVRegPair(m, reg, kLongLoVReg, kLongHiVReg));
+ } else {
+ new_frame->SetVReg(reg, GetVReg(m, reg, kind));
+ }
+ break;
+ case kLongHiVReg:
+ if (GetVRegKind(reg - 1, kinds), kLongLoVReg) {
+ // Nothing to do: we treated it as a "long" register pair.
+ } else {
+ new_frame->SetVReg(reg, GetVReg(m, reg, kind));
+ }
+ break;
+ case kDoubleLoVReg:
+ if (GetVRegKind(reg + 1, kinds), kDoubleHiVReg) {
+ // Treat it as a "double" register pair.
+ new_frame->SetVRegLong(reg, GetVRegPair(m, reg, kDoubleLoVReg, kDoubleHiVReg));
+ } else {
+ new_frame->SetVReg(reg, GetVReg(m, reg, kind));
+ }
+ break;
+ case kDoubleHiVReg:
+ if (GetVRegKind(reg - 1, kinds), kDoubleLoVReg) {
+ // Nothing to do: we treated it as a "double" register pair.
+ } else {
+ new_frame->SetVReg(reg, GetVReg(m, reg, kind));
+ }
+ break;
default:
new_frame->SetVReg(reg, GetVReg(m, reg, kind));
break;
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index cd35863..70aba9b 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -24,7 +24,6 @@
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/string-inl.h"
-#include "read_barrier.h"
#include "thread.h"
#include "utils.h"
@@ -46,14 +45,13 @@
LOG(FATAL) << "ReferenceTable '" << name_ << "' "
<< "overflowed (" << max_size_ << " entries)";
}
- entries_.push_back(obj);
+ entries_.push_back(GcRoot<mirror::Object>(obj));
}
void ReferenceTable::Remove(mirror::Object* obj) {
// We iterate backwards on the assumption that references are LIFO.
for (int i = entries_.size() - 1; i >= 0; --i) {
- mirror::Object* entry =
- ReadBarrier::BarrierForRoot<mirror::Object, kWithReadBarrier>(&entries_[i]);
+ mirror::Object* entry = entries_[i].Read();
if (entry == obj) {
entries_.erase(entries_.begin() + i);
return;
@@ -71,10 +69,12 @@
}
struct ObjectComparator {
- bool operator()(mirror::Object* obj1, mirror::Object* obj2)
+ bool operator()(GcRoot<mirror::Object> root1, GcRoot<mirror::Object> root2)
// TODO: enable analysis when analysis can work with the STL.
NO_THREAD_SAFETY_ANALYSIS {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+ mirror::Object* obj1 = root1.Read<kWithoutReadBarrier>();
+ mirror::Object* obj2 = root2.Read<kWithoutReadBarrier>();
// Ensure null references and cleared jweaks appear at the end.
if (obj1 == NULL) {
return true;
@@ -163,8 +163,7 @@
}
os << " Last " << (count - first) << " entries (of " << count << "):\n";
for (int idx = count - 1; idx >= first; --idx) {
- mirror::Object* ref =
- ReadBarrier::BarrierForRoot<mirror::Object, kWithReadBarrier>(&entries[idx]);
+ mirror::Object* ref = entries[idx].Read();
if (ref == NULL) {
continue;
}
@@ -200,17 +199,17 @@
// Make a copy of the table and sort it.
Table sorted_entries;
for (size_t i = 0; i < entries.size(); ++i) {
- mirror::Object* entry =
- ReadBarrier::BarrierForRoot<mirror::Object, kWithReadBarrier>(&entries[i]);
- sorted_entries.push_back(entry);
+ mirror::Object* entry = entries[i].Read();
+ sorted_entries.push_back(GcRoot<mirror::Object>(entry));
}
std::sort(sorted_entries.begin(), sorted_entries.end(), ObjectComparator());
// Remove any uninteresting stuff from the list. The sort moved them all to the end.
- while (!sorted_entries.empty() && sorted_entries.back() == NULL) {
+ while (!sorted_entries.empty() && sorted_entries.back().IsNull()) {
sorted_entries.pop_back();
}
- while (!sorted_entries.empty() && sorted_entries.back() == kClearedJniWeakGlobal) {
+ while (!sorted_entries.empty() &&
+ sorted_entries.back().Read<kWithoutReadBarrier>() == kClearedJniWeakGlobal) {
sorted_entries.pop_back();
}
if (sorted_entries.empty()) {
@@ -222,8 +221,8 @@
size_t equiv = 0;
size_t identical = 0;
for (size_t idx = 1; idx < count; idx++) {
- mirror::Object* prev = sorted_entries[idx-1];
- mirror::Object* current = sorted_entries[idx];
+ mirror::Object* prev = sorted_entries[idx-1].Read<kWithoutReadBarrier>();
+ mirror::Object* current = sorted_entries[idx].Read<kWithoutReadBarrier>();
size_t element_count = GetElementCount(prev);
if (current == prev) {
// Same reference, added more than once.
@@ -238,13 +237,15 @@
}
}
// Handle the last entry.
- DumpSummaryLine(os, sorted_entries.back(), GetElementCount(sorted_entries.back()), identical, equiv);
+ DumpSummaryLine(os, sorted_entries.back().Read<kWithoutReadBarrier>(),
+ GetElementCount(sorted_entries.back().Read<kWithoutReadBarrier>()),
+ identical, equiv);
}
void ReferenceTable::VisitRoots(RootCallback* visitor, void* arg, uint32_t tid,
RootType root_type) {
- for (auto& ref : entries_) {
- visitor(&ref, arg, tid, root_type);
+ for (GcRoot<mirror::Object>& root : entries_) {
+ root.VisitRoot(visitor, arg, tid, root_type);
}
}
diff --git a/runtime/reference_table.h b/runtime/reference_table.h
index 1cd0999..8765442 100644
--- a/runtime/reference_table.h
+++ b/runtime/reference_table.h
@@ -23,6 +23,7 @@
#include <vector>
#include "base/mutex.h"
+#include "gc_root.h"
#include "object_callbacks.h"
namespace art {
@@ -50,7 +51,7 @@
void VisitRoots(RootCallback* visitor, void* arg, uint32_t tid, RootType root_type);
private:
- typedef std::vector<mirror::Object*> Table;
+ typedef std::vector<GcRoot<mirror::Object>> Table;
static void Dump(std::ostream& os, Table& entries)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
friend class IndirectReferenceTable; // For Dump.
diff --git a/runtime/reference_table_test.cc b/runtime/reference_table_test.cc
index d2877f9..db98e1f 100644
--- a/runtime/reference_table_test.cc
+++ b/runtime/reference_table_test.cc
@@ -17,7 +17,7 @@
#include "reference_table.h"
#include "common_runtime_test.h"
-#include "mirror/array.h"
+#include "mirror/array-inl.h"
#include "mirror/string.h"
#include "scoped_thread_state_change.h"
#include "thread-inl.h"
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 0af4117..0169ccc 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -20,7 +20,7 @@
#include "common_throws.h"
#include "dex_file-inl.h"
#include "jni_internal.h"
-#include "method_helper.h"
+#include "method_helper-inl.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
@@ -347,7 +347,7 @@
std::unique_ptr<uint32_t[]> large_arg_array_;
};
-static void CheckMethodArguments(mirror::ArtMethod* m, uint32_t* args)
+static void CheckMethodArguments(JavaVMExt* vm, mirror::ArtMethod* m, uint32_t* args)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const DexFile::TypeList* params = m->GetParameterTypeList();
if (params == nullptr) {
@@ -375,11 +375,11 @@
self->ClearException();
++error_count;
} else if (!param_type->IsPrimitive()) {
- // TODO: check primitives are in range.
// TODO: There is a compaction bug here since GetClassFromTypeIdx can cause thread suspension,
// this is a hard to fix problem since the args can contain Object*, we need to save and
// restore them by using a visitor similar to the ones used in the trampoline entrypoints.
- mirror::Object* argument = reinterpret_cast<mirror::Object*>(args[i + offset]);
+ mirror::Object* argument =
+ (reinterpret_cast<StackReference<mirror::Object>*>(&args[i + offset]))->AsMirrorPtr();
if (argument != nullptr && !argument->InstanceOf(param_type)) {
LOG(ERROR) << "JNI ERROR (app bug): attempt to pass an instance of "
<< PrettyTypeOf(argument) << " as argument " << (i + 1)
@@ -388,13 +388,40 @@
}
} else if (param_type->IsPrimitiveLong() || param_type->IsPrimitiveDouble()) {
offset++;
+ } else {
+ int32_t arg = static_cast<int32_t>(args[i + offset]);
+ if (param_type->IsPrimitiveBoolean()) {
+ if (arg != JNI_TRUE && arg != JNI_FALSE) {
+ LOG(ERROR) << "JNI ERROR (app bug): expected jboolean (0/1) but got value of "
+ << arg << " as argument " << (i + 1) << " to " << PrettyMethod(h_m.Get());
+ ++error_count;
+ }
+ } else if (param_type->IsPrimitiveByte()) {
+ if (arg < -128 || arg > 127) {
+ LOG(ERROR) << "JNI ERROR (app bug): expected jbyte but got value of "
+ << arg << " as argument " << (i + 1) << " to " << PrettyMethod(h_m.Get());
+ ++error_count;
+ }
+ } else if (param_type->IsPrimitiveChar()) {
+ if (args[i + offset] > 0xFFFF) {
+ LOG(ERROR) << "JNI ERROR (app bug): expected jchar but got value of "
+ << arg << " as argument " << (i + 1) << " to " << PrettyMethod(h_m.Get());
+ ++error_count;
+ }
+ } else if (param_type->IsPrimitiveShort()) {
+ if (arg < -32768 || arg > 0x7FFF) {
+ LOG(ERROR) << "JNI ERROR (app bug): expected jshort but got value of "
+ << arg << " as argument " << (i + 1) << " to " << PrettyMethod(h_m.Get());
+ ++error_count;
+ }
+ }
}
}
- if (error_count > 0) {
+ if (UNLIKELY(error_count > 0)) {
// TODO: pass the JNI function name (such as "CallVoidMethodV") through so we can call JniAbort
// with an argument.
- JniAbortF(nullptr, "bad arguments passed to %s (see above for details)",
- PrettyMethod(h_m.Get()).c_str());
+ vm->JniAbortF(nullptr, "bad arguments passed to %s (see above for details)",
+ PrettyMethod(h_m.Get()).c_str());
}
}
@@ -411,7 +438,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
uint32_t* args = arg_array->GetArray();
if (UNLIKELY(soa.Env()->check_jni)) {
- CheckMethodArguments(method, args);
+ CheckMethodArguments(soa.Vm(), method, args);
}
method->Invoke(soa.Self(), args, arg_array->GetNumBytes(), result, shorty);
}
@@ -567,11 +594,6 @@
return true;
}
-static std::string PrettyDescriptor(Primitive::Type type) {
- std::string descriptor_string(Primitive::Descriptor(type));
- return PrettyDescriptor(descriptor_string);
-}
-
bool ConvertPrimitiveValue(const ThrowLocation* throw_location, bool unbox_for_result,
Primitive::Type srcType, Primitive::Type dstType,
const JValue& src, JValue* dst) {
diff --git a/runtime/reflection.h b/runtime/reflection.h
index 2c54c06..61370c6 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_REFLECTION_H_
#define ART_RUNTIME_REFLECTION_H_
+#include "base/mutex.h"
#include "jni.h"
#include "primitive.h"
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index f776bcd..ac9026b 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -41,32 +41,29 @@
inline mirror::ArtMethod* Runtime::GetResolutionMethod() {
CHECK(HasResolutionMethod());
- return ReadBarrier::BarrierForRoot<mirror::ArtMethod, kWithReadBarrier>(&resolution_method_);
+ return resolution_method_.Read();
}
inline mirror::ArtMethod* Runtime::GetImtConflictMethod() {
CHECK(HasImtConflictMethod());
- return ReadBarrier::BarrierForRoot<mirror::ArtMethod, kWithReadBarrier>(&imt_conflict_method_);
+ return imt_conflict_method_.Read();
}
inline mirror::ObjectArray<mirror::ArtMethod>* Runtime::GetDefaultImt()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
CHECK(HasDefaultImt());
- return ReadBarrier::BarrierForRoot<mirror::ObjectArray<mirror::ArtMethod>, kWithReadBarrier>(
- &default_imt_);
+ return default_imt_.Read();
}
inline mirror::ArtMethod* Runtime::GetCalleeSaveMethod(CalleeSaveType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(HasCalleeSaveMethod(type));
- return ReadBarrier::BarrierForRoot<mirror::ArtMethod, kWithReadBarrier>(
- &callee_save_methods_[type]);
+ return callee_save_methods_[type].Read();
}
inline mirror::ArtMethod* Runtime::GetCalleeSaveMethodUnchecked(CalleeSaveType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return ReadBarrier::BarrierForRoot<mirror::ArtMethod, kWithReadBarrier>(
- &callee_save_methods_[type]);
+ return callee_save_methods_[type].Read();
}
} // namespace art
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 0ddd2ae..d677729 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -63,6 +63,7 @@
#include "mirror/stack_trace_element.h"
#include "mirror/throwable.h"
#include "monitor.h"
+#include "native_bridge.h"
#include "parsed_options.h"
#include "oat_file.h"
#include "quick/quick_method_frame_info.h"
@@ -94,13 +95,10 @@
Runtime* Runtime::instance_ = NULL;
Runtime::Runtime()
- : pre_allocated_OutOfMemoryError_(nullptr),
- resolution_method_(nullptr),
- imt_conflict_method_(nullptr),
- default_imt_(nullptr),
- instruction_set_(kNone),
+ : instruction_set_(kNone),
compiler_callbacks_(nullptr),
is_zygote_(false),
+ must_relocate_(false),
is_concurrent_gc_enabled_(true),
is_explicit_gc_disabled_(false),
default_stack_size_(0),
@@ -141,20 +139,13 @@
suspend_handler_(nullptr),
stack_overflow_handler_(nullptr),
verify_(false),
- target_sdk_version_(0) {
- for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
- callee_save_methods_[i] = nullptr;
- }
+ target_sdk_version_(0),
+ implicit_null_checks_(false),
+ implicit_so_checks_(false),
+ implicit_suspend_checks_(false) {
}
Runtime::~Runtime() {
- if (method_trace_ && Thread::Current() == nullptr) {
- // We need a current thread to shutdown method tracing: re-attach it now.
- JNIEnv* unused_env;
- if (GetJavaVM()->AttachCurrentThread(&unused_env, nullptr) != JNI_OK) {
- LOG(ERROR) << "Could not attach current thread before runtime shutdown.";
- }
- }
if (dump_gc_performance_on_shutdown_) {
// This can't be called from the Heap destructor below because it
// could call RosAlloc::InspectAll() which needs the thread_list
@@ -348,7 +339,7 @@
ScopedObjectAccess soa(Thread::Current());
ClassLinker* cl = Runtime::Current()->GetClassLinker();
- StackHandleScope<3> hs(soa.Self());
+ StackHandleScope<2> hs(soa.Self());
Handle<mirror::Class> class_loader_class(
hs.NewHandle(soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader)));
CHECK(cl->EnsureInitialized(class_loader_class, true, true));
@@ -358,15 +349,12 @@
CHECK(getSystemClassLoader != NULL);
JValue result = InvokeWithJValues(soa, nullptr, soa.EncodeMethod(getSystemClassLoader), nullptr);
- Handle<mirror::ClassLoader> class_loader(
- hs.NewHandle(down_cast<mirror::ClassLoader*>(result.GetL())));
- CHECK(class_loader.Get() != nullptr);
JNIEnv* env = soa.Self()->GetJniEnv();
ScopedLocalRef<jobject> system_class_loader(env,
- soa.AddLocalReference<jobject>(class_loader.Get()));
+ soa.AddLocalReference<jobject>(result.GetL()));
CHECK(system_class_loader.get() != nullptr);
- soa.Self()->SetClassLoaderOverride(class_loader.Get());
+ soa.Self()->SetClassLoaderOverride(system_class_loader.get());
Handle<mirror::Class> thread_class(
hs.NewHandle(soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread)));
@@ -377,11 +365,21 @@
CHECK(contextClassLoader != NULL);
// We can't run in a transaction yet.
- contextClassLoader->SetObject<false>(soa.Self()->GetPeer(), class_loader.Get());
+ contextClassLoader->SetObject<false>(soa.Self()->GetPeer(),
+ soa.Decode<mirror::ClassLoader*>(system_class_loader.get()));
return env->NewGlobalRef(system_class_loader.get());
}
+std::string Runtime::GetPatchoatExecutable() const {
+ if (!patchoat_executable_.empty()) {
+ return patchoat_executable_;
+ }
+ std::string patchoat_executable_(GetAndroidRoot());
+ patchoat_executable_ += (kIsDebugBuild ? "/bin/patchoatd" : "/bin/patchoat");
+ return patchoat_executable_;
+}
+
std::string Runtime::GetCompilerExecutable() const {
if (!compiler_executable_.empty()) {
return compiler_executable_;
@@ -554,6 +552,8 @@
properties_ = options->properties_;
compiler_callbacks_ = options->compiler_callbacks_;
+ patchoat_executable_ = options->patchoat_executable_;
+ must_relocate_ = options->must_relocate_;
is_zygote_ = options->is_zygote_;
is_explicit_gc_disabled_ = options->is_explicit_gc_disabled_;
@@ -581,41 +581,6 @@
GetInstrumentation()->ForceInterpretOnly();
}
- bool implicit_checks_supported = false;
- switch (kRuntimeISA) {
- case kArm:
- case kThumb2:
- implicit_checks_supported = true;
- break;
- default:
- break;
- }
-
- if (!options->interpreter_only_ && implicit_checks_supported &&
- (options->explicit_checks_ != (ParsedOptions::kExplicitSuspendCheck |
- ParsedOptions::kExplicitNullCheck |
- ParsedOptions::kExplicitStackOverflowCheck) || kEnableJavaStackTraceHandler)) {
- fault_manager.Init();
-
- // These need to be in a specific order. The null point check handler must be
- // after the suspend check and stack overflow check handlers.
- if ((options->explicit_checks_ & ParsedOptions::kExplicitSuspendCheck) == 0) {
- suspend_handler_ = new SuspensionHandler(&fault_manager);
- }
-
- if ((options->explicit_checks_ & ParsedOptions::kExplicitStackOverflowCheck) == 0) {
- stack_overflow_handler_ = new StackOverflowHandler(&fault_manager);
- }
-
- if ((options->explicit_checks_ & ParsedOptions::kExplicitNullCheck) == 0) {
- null_pointer_handler_ = new NullPointerHandler(&fault_manager);
- }
-
- if (kEnableJavaStackTraceHandler) {
- new JavaStackTraceHandler(&fault_manager);
- }
- }
-
heap_ = new gc::Heap(options->heap_initial_size_,
options->heap_growth_limit_,
options->heap_min_free_,
@@ -648,6 +613,43 @@
BlockSignals();
InitPlatformSignalHandlers();
+ // Change the implicit checks flags based on runtime architecture.
+ switch (kRuntimeISA) {
+ case kArm:
+ case kThumb2:
+ case kX86:
+ case kArm64:
+ case kX86_64:
+ implicit_null_checks_ = true;
+ implicit_so_checks_ = true;
+ break;
+ default:
+ // Keep the defaults.
+ break;
+ }
+
+ if (implicit_null_checks_ || implicit_so_checks_ || implicit_suspend_checks_) {
+ fault_manager.Init();
+
+ // These need to be in a specific order. The null point check handler must be
+ // after the suspend check and stack overflow check handlers.
+ if (implicit_suspend_checks_) {
+ suspend_handler_ = new SuspensionHandler(&fault_manager);
+ }
+
+ if (implicit_so_checks_) {
+ stack_overflow_handler_ = new StackOverflowHandler(&fault_manager);
+ }
+
+ if (implicit_null_checks_) {
+ null_pointer_handler_ = new NullPointerHandler(&fault_manager);
+ }
+
+ if (kEnableJavaStackTraceHandler) {
+ new JavaStackTraceHandler(&fault_manager);
+ }
+ }
+
java_vm_ = new JavaVMExt(this, options.get());
Thread::Startup();
@@ -700,9 +702,12 @@
self->ThrowNewException(ThrowLocation(), "Ljava/lang/OutOfMemoryError;",
"OutOfMemoryError thrown while trying to throw OutOfMemoryError; "
"no stack available");
- pre_allocated_OutOfMemoryError_ = self->GetException(NULL);
+ pre_allocated_OutOfMemoryError_ = GcRoot<mirror::Throwable>(self->GetException(NULL));
self->ClearException();
+ // Look for a native bridge.
+ SetNativeBridgeLibraryString(options->native_bridge_library_string_);
+
VLOG(startup) << "Runtime::Init exiting";
return true;
}
@@ -729,13 +734,9 @@
{
std::string mapped_name(StringPrintf(OS_SHARED_LIB_FORMAT_STR, "javacore"));
std::string reason;
- self->TransitionFromSuspendedToRunnable();
- StackHandleScope<1> hs(self);
- auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
- if (!instance_->java_vm_->LoadNativeLibrary(mapped_name, class_loader, &reason)) {
+ if (!instance_->java_vm_->LoadNativeLibrary(env, mapped_name, nullptr, &reason)) {
LOG(FATAL) << "LoadNativeLibrary failed for \"" << mapped_name << "\": " << reason;
}
- self->TransitionFromRunnableToSuspended(kNative);
}
// Initialize well known classes that may invoke runtime native methods.
@@ -912,8 +913,7 @@
}
mirror::Throwable* Runtime::GetPreAllocatedOutOfMemoryError() {
- mirror::Throwable* oome = ReadBarrier::BarrierForRoot<mirror::Throwable, kWithReadBarrier>(
- &pre_allocated_OutOfMemoryError_);
+ mirror::Throwable* oome = pre_allocated_OutOfMemoryError_.Read();
if (oome == NULL) {
LOG(ERROR) << "Failed to return pre-allocated OOME";
}
@@ -952,23 +952,21 @@
void Runtime::VisitNonThreadRoots(RootCallback* callback, void* arg) {
java_vm_->VisitRoots(callback, arg);
- if (pre_allocated_OutOfMemoryError_ != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&pre_allocated_OutOfMemoryError_), arg, 0,
- kRootVMInternal);
- DCHECK(pre_allocated_OutOfMemoryError_ != nullptr);
+ if (!pre_allocated_OutOfMemoryError_.IsNull()) {
+ pre_allocated_OutOfMemoryError_.VisitRoot(callback, arg, 0, kRootVMInternal);
+ DCHECK(!pre_allocated_OutOfMemoryError_.IsNull());
}
- callback(reinterpret_cast<mirror::Object**>(&resolution_method_), arg, 0, kRootVMInternal);
- DCHECK(resolution_method_ != nullptr);
+ resolution_method_.VisitRoot(callback, arg, 0, kRootVMInternal);
+ DCHECK(!resolution_method_.IsNull());
if (HasImtConflictMethod()) {
- callback(reinterpret_cast<mirror::Object**>(&imt_conflict_method_), arg, 0, kRootVMInternal);
+ imt_conflict_method_.VisitRoot(callback, arg, 0, kRootVMInternal);
}
if (HasDefaultImt()) {
- callback(reinterpret_cast<mirror::Object**>(&default_imt_), arg, 0, kRootVMInternal);
+ default_imt_.VisitRoot(callback, arg, 0, kRootVMInternal);
}
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
- if (callee_save_methods_[i] != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&callee_save_methods_[i]), arg, 0,
- kRootVMInternal);
+ if (!callee_save_methods_[i].IsNull()) {
+ callee_save_methods_[i].VisitRoot(callback, arg, 0, kRootVMInternal);
}
}
{
@@ -1106,7 +1104,7 @@
void Runtime::SetCalleeSaveMethod(mirror::ArtMethod* method, CalleeSaveType type) {
DCHECK_LT(static_cast<int>(type), static_cast<int>(kLastCalleeSaveType));
- callee_save_methods_[type] = method;
+ callee_save_methods_[type] = GcRoot<mirror::ArtMethod>(method);
}
const std::vector<const DexFile*>& Runtime::GetCompileTimeClassPath(jobject class_loader) {
@@ -1222,37 +1220,6 @@
argv->push_back("--compiler-filter=interpret-only");
}
- argv->push_back("--runtime-arg");
- std::string checkstr = "-implicit-checks";
-
- int nchecks = 0;
- char checksep = ':';
-
- if (!ExplicitNullChecks()) {
- checkstr += checksep;
- checksep = ',';
- checkstr += "null";
- ++nchecks;
- }
- if (!ExplicitSuspendChecks()) {
- checkstr += checksep;
- checksep = ',';
- checkstr += "suspend";
- ++nchecks;
- }
-
- if (!ExplicitStackOverflowChecks()) {
- checkstr += checksep;
- checksep = ',';
- checkstr += "stack";
- ++nchecks;
- }
-
- if (nchecks == 0) {
- checkstr += ":none";
- }
- argv->push_back(checkstr);
-
// Make the dex2oat instruction set match that of the launching runtime. If we have multiple
// architecture support, dex2oat may be compiled as a different instruction-set than that
// currently being executed.
diff --git a/runtime/runtime.h b/runtime/runtime.h
index fccccbd..a85c2e4 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -21,10 +21,13 @@
#include <stdio.h>
#include <iosfwd>
+#include <set>
#include <string>
#include <utility>
#include <vector>
+#include "compiler_callbacks.h"
+#include "gc_root.h"
#include "instrumentation.h"
#include "instruction_set.h"
#include "jobject_comparator.h"
@@ -54,7 +57,6 @@
class MethodVerifier;
}
class ClassLinker;
-class CompilerCallbacks;
class DexFile;
class InternTable;
class JavaVMExt;
@@ -91,6 +93,18 @@
return compiler_callbacks_ != nullptr;
}
+ bool CanRelocate() const {
+ return !IsCompiler() || compiler_callbacks_->IsRelocationPossible();
+ }
+
+ bool ShouldRelocate() const {
+ return must_relocate_ && CanRelocate();
+ }
+
+ bool MustRelocateIfPossible() const {
+ return must_relocate_;
+ }
+
CompilerCallbacks* GetCompilerCallbacks() {
return compiler_callbacks_;
}
@@ -104,6 +118,7 @@
}
std::string GetCompilerExecutable() const;
+ std::string GetPatchoatExecutable() const;
const std::vector<std::string>& GetCompilerOptions() const {
return compiler_options_;
@@ -173,7 +188,7 @@
void DetachCurrentThread() LOCKS_EXCLUDED(Locks::mutator_lock_);
void DumpForSigQuit(std::ostream& os)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
void DumpLockHolders(std::ostream& os);
~Runtime();
@@ -268,11 +283,11 @@
mirror::ArtMethod* GetResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool HasResolutionMethod() const {
- return resolution_method_ != nullptr;
+ return !resolution_method_.IsNull();
}
void SetResolutionMethod(mirror::ArtMethod* method) {
- resolution_method_ = method;
+ resolution_method_ = GcRoot<mirror::ArtMethod>(method);
}
mirror::ArtMethod* CreateResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -281,11 +296,11 @@
mirror::ArtMethod* GetImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool HasImtConflictMethod() const {
- return imt_conflict_method_ != nullptr;
+ return !imt_conflict_method_.IsNull();
}
void SetImtConflictMethod(mirror::ArtMethod* method) {
- imt_conflict_method_ = method;
+ imt_conflict_method_ = GcRoot<mirror::ArtMethod>(method);
}
mirror::ArtMethod* CreateImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -295,11 +310,11 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool HasDefaultImt() const {
- return default_imt_ != nullptr;
+ return !default_imt_.IsNull();
}
void SetDefaultImt(mirror::ObjectArray<mirror::ArtMethod>* imt) {
- default_imt_ = imt;
+ default_imt_ = GcRoot<mirror::ObjectArray<mirror::ArtMethod>>(imt);
}
mirror::ObjectArray<mirror::ArtMethod>* CreateDefaultImt(ClassLinker* cl)
@@ -314,7 +329,7 @@
};
bool HasCalleeSaveMethod(CalleeSaveType type) const {
- return callee_save_methods_[type] != NULL;
+ return !callee_save_methods_[type].IsNull();
}
mirror::ArtMethod* GetCalleeSaveMethod(CalleeSaveType type)
@@ -474,21 +489,23 @@
static constexpr int kProfileForground = 0;
static constexpr int kProfileBackgrouud = 1;
- mirror::ArtMethod* callee_save_methods_[kLastCalleeSaveType];
- mirror::Throwable* pre_allocated_OutOfMemoryError_;
- mirror::ArtMethod* resolution_method_;
- mirror::ArtMethod* imt_conflict_method_;
- mirror::ObjectArray<mirror::ArtMethod>* default_imt_;
+ GcRoot<mirror::ArtMethod> callee_save_methods_[kLastCalleeSaveType];
+ GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_;
+ GcRoot<mirror::ArtMethod> resolution_method_;
+ GcRoot<mirror::ArtMethod> imt_conflict_method_;
+ GcRoot<mirror::ObjectArray<mirror::ArtMethod>> default_imt_;
InstructionSet instruction_set_;
QuickMethodFrameInfo callee_save_method_frame_infos_[kLastCalleeSaveType];
CompilerCallbacks* compiler_callbacks_;
bool is_zygote_;
+ bool must_relocate_;
bool is_concurrent_gc_enabled_;
bool is_explicit_gc_disabled_;
std::string compiler_executable_;
+ std::string patchoat_executable_;
std::vector<std::string> compiler_options_;
std::vector<std::string> image_compiler_options_;
@@ -589,6 +606,11 @@
// Specifies target SDK version to allow workarounds for certain API levels.
int32_t target_sdk_version_;
+ // Implicit checks flags.
+ bool implicit_null_checks_; // NullPointer checks are implicit.
+ bool implicit_so_checks_; // StackOverflow checks are implicit.
+ bool implicit_suspend_checks_; // Thread suspension checks are implicit.
+
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
diff --git a/runtime/safe_map.h b/runtime/safe_map.h
index 941fd0e..f7e238c 100644
--- a/runtime/safe_map.h
+++ b/runtime/safe_map.h
@@ -52,6 +52,7 @@
return *this;
}
+ allocator_type get_allocator() const { return map_.get_allocator(); }
key_compare key_comp() const { return map_.key_comp(); }
value_compare value_comp() const { return map_.value_comp(); }
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index d691623..ae3eaf2 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -18,7 +18,9 @@
#define ART_RUNTIME_SCOPED_THREAD_STATE_CHANGE_H_
#include "base/casts.h"
-#include "jni_internal-inl.h"
+#include "java_vm_ext.h"
+#include "jni_env_ext-inl.h"
+#include "read_barrier.h"
#include "thread-inl.h"
#include "verify_object.h"
@@ -113,6 +115,10 @@
return vm_;
}
+ bool ForceCopy() const {
+ return vm_->ForceCopy();
+ }
+
/*
* Add a local reference for an object to the indirect reference table associated with the
* current stack frame. When the native function returns, the reference will be discarded.
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 71e566e..2d0060e 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -144,8 +144,8 @@
bool StackVisitor::GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const {
- if (cur_quick_frame_ != NULL) {
- DCHECK(context_ != NULL); // You can't reliably read registers without a context.
+ if (cur_quick_frame_ != nullptr) {
+ DCHECK(context_ != nullptr); // You can't reliably read registers without a context.
DCHECK(m == GetMethod());
const void* code_pointer = m->GetQuickOatCodePointer();
DCHECK(code_pointer != nullptr);
@@ -158,14 +158,12 @@
uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
uint32_t reg = vmap_table.ComputeRegister(spill_mask, vmap_offset, kind);
uintptr_t ptr_val;
- bool success = false;
- bool target64 = (kRuntimeISA == kArm64) || (kRuntimeISA == kX86_64);
- if (is_float) {
- success = GetFPR(reg, &ptr_val);
- } else {
- success = GetGPR(reg, &ptr_val);
+ bool success = is_float ? GetFPR(reg, &ptr_val) : GetGPR(reg, &ptr_val);
+ if (!success) {
+ return false;
}
- if (success && target64) {
+ bool target64 = Is64BitInstructionSet(kRuntimeISA);
+ if (target64) {
bool wide_lo = (kind == kLongLoVReg) || (kind == kDoubleLoVReg);
bool wide_hi = (kind == kLongHiVReg) || (kind == kDoubleHiVReg);
int64_t value_long = static_cast<int64_t>(ptr_val);
@@ -176,10 +174,11 @@
}
}
*val = ptr_val;
- return success;
+ return true;
} else {
const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != NULL) << PrettyMethod(m); // Can't be NULL or how would we compile its instructions?
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ // its instructions?
*val = *GetVRegAddr(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
return true;
@@ -190,10 +189,64 @@
}
}
+bool StackVisitor::GetVRegPair(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
+ VRegKind kind_hi, uint64_t* val) const {
+ if (kind_lo == kLongLoVReg) {
+ DCHECK_EQ(kind_hi, kLongHiVReg);
+ } else if (kind_lo == kDoubleLoVReg) {
+ DCHECK_EQ(kind_hi, kDoubleHiVReg);
+ } else {
+ LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi;
+ }
+ if (cur_quick_frame_ != nullptr) {
+ DCHECK(context_ != nullptr); // You can't reliably read registers without a context.
+ DCHECK(m == GetMethod());
+ const void* code_pointer = m->GetQuickOatCodePointer();
+ DCHECK(code_pointer != nullptr);
+ const VmapTable vmap_table(m->GetVmapTable(code_pointer));
+ QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
+ uint32_t vmap_offset_lo, vmap_offset_hi;
+ // TODO: IsInContext stops before spotting floating point registers.
+ if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) &&
+ vmap_table.IsInContext(vreg + 1, kind_hi, &vmap_offset_hi)) {
+ bool is_float = (kind_lo == kDoubleLoVReg);
+ uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
+ uint32_t reg_lo = vmap_table.ComputeRegister(spill_mask, vmap_offset_lo, kind_lo);
+ uint32_t reg_hi = vmap_table.ComputeRegister(spill_mask, vmap_offset_hi, kind_hi);
+ uintptr_t ptr_val_lo, ptr_val_hi;
+ bool success = is_float ? GetFPR(reg_lo, &ptr_val_lo) : GetGPR(reg_lo, &ptr_val_lo);
+ success &= is_float ? GetFPR(reg_hi, &ptr_val_hi) : GetGPR(reg_hi, &ptr_val_hi);
+ if (!success) {
+ return false;
+ }
+ bool target64 = Is64BitInstructionSet(kRuntimeISA);
+ if (target64) {
+ int64_t value_long_lo = static_cast<int64_t>(ptr_val_lo);
+ int64_t value_long_hi = static_cast<int64_t>(ptr_val_hi);
+ ptr_val_lo = static_cast<uintptr_t>(value_long_lo & 0xFFFFFFFF);
+ ptr_val_hi = static_cast<uintptr_t>(value_long_hi >> 32);
+ }
+ *val = (static_cast<uint64_t>(ptr_val_hi) << 32) | static_cast<uint32_t>(ptr_val_lo);
+ return true;
+ } else {
+ const DexFile::CodeItem* code_item = m->GetCodeItem();
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ // its instructions?
+ uint32_t* addr = GetVRegAddr(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
+ frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
+ *val = *reinterpret_cast<uint64_t*>(addr);
+ return true;
+ }
+ } else {
+ *val = cur_shadow_frame_->GetVRegLong(vreg);
+ return true;
+ }
+}
+
bool StackVisitor::SetVReg(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value,
VRegKind kind) {
- if (cur_quick_frame_ != NULL) {
- DCHECK(context_ != NULL); // You can't reliably write registers without a context.
+ if (cur_quick_frame_ != nullptr) {
+ DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
const void* code_pointer = m->GetQuickOatCodePointer();
DCHECK(code_pointer != nullptr);
@@ -205,7 +258,7 @@
bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
const uint32_t reg = vmap_table.ComputeRegister(spill_mask, vmap_offset, kind);
- bool target64 = (kRuntimeISA == kArm64) || (kRuntimeISA == kX86_64);
+ bool target64 = Is64BitInstructionSet(kRuntimeISA);
// Deal with 32 or 64-bit wide registers in a way that builds on all targets.
if (target64) {
bool wide_lo = (kind == kLongLoVReg) || (kind == kDoubleLoVReg);
@@ -234,11 +287,11 @@
}
} else {
const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != NULL) << PrettyMethod(m); // Can't be NULL or how would we compile its instructions?
- int offset = GetVRegOffset(code_item, frame_info.CoreSpillMask(), frame_info.FpSpillMask(),
- frame_info.FrameSizeInBytes(), vreg, kRuntimeISA);
- byte* vreg_addr = reinterpret_cast<byte*>(GetCurrentQuickFrame()) + offset;
- *reinterpret_cast<uint32_t*>(vreg_addr) = new_value;
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ // its instructions?
+ uint32_t* addr = GetVRegAddr(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
+ frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
+ *addr = new_value;
return true;
}
} else {
@@ -247,6 +300,68 @@
}
}
+bool StackVisitor::SetVRegPair(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
+ VRegKind kind_lo, VRegKind kind_hi) {
+ if (kind_lo == kLongLoVReg) {
+ DCHECK_EQ(kind_hi, kLongHiVReg);
+ } else if (kind_lo == kDoubleLoVReg) {
+ DCHECK_EQ(kind_hi, kDoubleHiVReg);
+ } else {
+ LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi;
+ }
+ if (cur_quick_frame_ != nullptr) {
+ DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
+ DCHECK(m == GetMethod());
+ const void* code_pointer = m->GetQuickOatCodePointer();
+ DCHECK(code_pointer != nullptr);
+ const VmapTable vmap_table(m->GetVmapTable(code_pointer));
+ QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
+ uint32_t vmap_offset_lo, vmap_offset_hi;
+ // TODO: IsInContext stops before spotting floating point registers.
+ if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) &&
+ vmap_table.IsInContext(vreg + 1, kind_hi, &vmap_offset_hi)) {
+ bool is_float = (kind_lo == kDoubleLoVReg);
+ uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
+ uint32_t reg_lo = vmap_table.ComputeRegister(spill_mask, vmap_offset_lo, kind_lo);
+ uint32_t reg_hi = vmap_table.ComputeRegister(spill_mask, vmap_offset_hi, kind_hi);
+ uintptr_t new_value_lo = static_cast<uintptr_t>(new_value & 0xFFFFFFFF);
+ uintptr_t new_value_hi = static_cast<uintptr_t>(new_value >> 32);
+ bool target64 = Is64BitInstructionSet(kRuntimeISA);
+ // Deal with 32 or 64-bit wide registers in a way that builds on all targets.
+ if (target64) {
+ uintptr_t old_reg_val_lo, old_reg_val_hi;
+ bool success = is_float ? GetFPR(reg_lo, &old_reg_val_lo) : GetGPR(reg_lo, &old_reg_val_lo);
+ success &= is_float ? GetFPR(reg_hi, &old_reg_val_hi) : GetGPR(reg_hi, &old_reg_val_hi);
+ if (!success) {
+ return false;
+ }
+ uint64_t new_vreg_portion_lo = static_cast<uint64_t>(new_value_lo);
+ uint64_t new_vreg_portion_hi = static_cast<uint64_t>(new_value_hi) << 32;
+ uint64_t old_reg_val_lo_as_wide = static_cast<uint64_t>(old_reg_val_lo);
+ uint64_t old_reg_val_hi_as_wide = static_cast<uint64_t>(old_reg_val_hi);
+ uint64_t mask_lo = static_cast<uint64_t>(0xffffffff) << 32;
+ uint64_t mask_hi = 0xffffffff;
+ new_value_lo = static_cast<uintptr_t>((old_reg_val_lo_as_wide & mask_lo) | new_vreg_portion_lo);
+ new_value_hi = static_cast<uintptr_t>((old_reg_val_hi_as_wide & mask_hi) | new_vreg_portion_hi);
+ }
+ bool success = is_float ? SetFPR(reg_lo, new_value_lo) : SetGPR(reg_lo, new_value_lo);
+ success &= is_float ? SetFPR(reg_hi, new_value_hi) : SetGPR(reg_hi, new_value_hi);
+ return success;
+ } else {
+ const DexFile::CodeItem* code_item = m->GetCodeItem();
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ // its instructions?
+ uint32_t* addr = GetVRegAddr(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
+ frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
+ *reinterpret_cast<uint64_t*>(addr) = new_value;
+ return true;
+ }
+ } else {
+ cur_shadow_frame_->SetVRegLong(vreg, new_value);
+ return true;
+ }
+}
+
uintptr_t* StackVisitor::GetGPRAddress(uint32_t reg) const {
DCHECK(cur_quick_frame_ != NULL) << "This is a quick frame routine";
return context_->GetGPRAddress(reg);
diff --git a/runtime/stack.h b/runtime/stack.h
index ef498ef..578f569 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -568,9 +568,26 @@
return val;
}
+ bool GetVRegPair(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
+ uint64_t* val) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ uint64_t GetVRegPair(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
+ VRegKind kind_hi) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint64_t val;
+ bool success = GetVRegPair(m, vreg, kind_lo, kind_hi, &val);
+ CHECK(success) << "Failed to read vreg pair " << vreg
+ << " of kind [" << kind_lo << "," << kind_hi << "]";
+ return val;
+ }
+
bool SetVReg(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool SetVRegPair(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
+ VRegKind kind_lo, VRegKind kind_hi)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
uintptr_t* GetGPRAddress(uint32_t reg) const;
// This is a fast-path for getting/setting values in a quick frame.
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
new file mode 100644
index 0000000..7d3a48f
--- /dev/null
+++ b/runtime/stack_map.h
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_STACK_MAP_H_
+#define ART_RUNTIME_STACK_MAP_H_
+
+#include "base/bit_vector.h"
+#include "memory_region.h"
+
+namespace art {
+
+/**
+ * Classes in the following file are wrapper on stack map information backed
+ * by a MemoryRegion. As such they read and write to the region, they don't have
+ * their own fields.
+ */
+
+/**
+ * Inline information for a specific PC. The information is of the form:
+ * [inlining_depth, [method_dex reference]+]
+ */
+class InlineInfo {
+ public:
+ explicit InlineInfo(MemoryRegion region) : region_(region) {}
+
+ uint8_t GetDepth() const {
+ return region_.Load<uint8_t>(kDepthOffset);
+ }
+
+ void SetDepth(uint8_t depth) {
+ region_.Store<uint8_t>(kDepthOffset, depth);
+ }
+
+ uint32_t GetMethodReferenceIndexAtDepth(uint8_t depth) const {
+ return region_.Load<uint32_t>(kFixedSize + depth * SingleEntrySize());
+ }
+
+ void SetMethodReferenceIndexAtDepth(uint8_t depth, uint32_t index) {
+ region_.Store<uint32_t>(kFixedSize + depth * SingleEntrySize(), index);
+ }
+
+ static size_t SingleEntrySize() {
+ return sizeof(uint32_t);
+ }
+
+ private:
+ static constexpr int kDepthOffset = 0;
+ static constexpr int kFixedSize = kDepthOffset + sizeof(uint8_t);
+
+ static constexpr uint32_t kNoInlineInfo = -1;
+
+ MemoryRegion region_;
+
+ template<typename T> friend class CodeInfo;
+ template<typename T> friend class StackMap;
+ template<typename T> friend class StackMapStream;
+};
+
+/**
+ * Information on dex register values for a specific PC. The information is
+ * of the form:
+ * [location_kind, register_value]+.
+ *
+ * The location_kind for a Dex register can either be:
+ * - Constant: register_value holds the constant,
+ * - Stack: register_value holds the stack offset,
+ * - Register: register_value holds the register number.
+ */
+class DexRegisterMap {
+ public:
+ explicit DexRegisterMap(MemoryRegion region) : region_(region) {}
+
+ enum LocationKind {
+ kInStack,
+ kInRegister,
+ kConstant
+ };
+
+ LocationKind GetLocationKind(uint16_t register_index) const {
+ return region_.Load<LocationKind>(
+ kFixedSize + register_index * SingleEntrySize());
+ }
+
+ void SetRegisterInfo(uint16_t register_index, LocationKind kind, int32_t value) {
+ size_t entry = kFixedSize + register_index * SingleEntrySize();
+ region_.Store<LocationKind>(entry, kind);
+ region_.Store<int32_t>(entry + sizeof(LocationKind), value);
+ }
+
+ int32_t GetValue(uint16_t register_index) const {
+ return region_.Load<int32_t>(
+ kFixedSize + sizeof(LocationKind) + register_index * SingleEntrySize());
+ }
+
+ static size_t SingleEntrySize() {
+ return sizeof(LocationKind) + sizeof(int32_t);
+ }
+
+ private:
+ static constexpr int kFixedSize = 0;
+
+ MemoryRegion region_;
+
+ template <typename T> friend class CodeInfo;
+ template <typename T> friend class StackMapStream;
+};
+
+/**
+ * A Stack Map holds compilation information for a specific PC necessary for:
+ * - Mapping it to a dex PC,
+ * - Knowing which stack entries are objects,
+ * - Knowing which registers hold objects,
+ * - Knowing the inlining information,
+ * - Knowing the values of dex registers.
+ *
+ * The information is of the form:
+ * [dex_pc, native_pc, dex_register_map_offset, inlining_info_offset, register_mask, stack_mask].
+ *
+ * Note that register_mask is fixed size, but stack_mask is variable size, depending on the
+ * stack size of a method.
+ */
+template <typename T>
+class StackMap {
+ public:
+ explicit StackMap(MemoryRegion region) : region_(region) {}
+
+ uint32_t GetDexPc() const {
+ return region_.Load<uint32_t>(kDexPcOffset);
+ }
+
+ void SetDexPc(uint32_t dex_pc) {
+ region_.Store<uint32_t>(kDexPcOffset, dex_pc);
+ }
+
+ T GetNativePc() const {
+ return region_.Load<T>(kNativePcOffset);
+ }
+
+ void SetNativePc(T native_pc) {
+ return region_.Store<T>(kNativePcOffset, native_pc);
+ }
+
+ uint32_t GetDexRegisterMapOffset() const {
+ return region_.Load<uint32_t>(kDexRegisterMapOffsetOffset);
+ }
+
+ void SetDexRegisterMapOffset(uint32_t offset) {
+ return region_.Store<uint32_t>(kDexRegisterMapOffsetOffset, offset);
+ }
+
+ uint32_t GetInlineDescriptorOffset() const {
+ return region_.Load<uint32_t>(kInlineDescriptorOffsetOffset);
+ }
+
+ void SetInlineDescriptorOffset(uint32_t offset) {
+ return region_.Store<uint32_t>(kInlineDescriptorOffsetOffset, offset);
+ }
+
+ uint32_t GetRegisterMask() const {
+ return region_.Load<uint32_t>(kRegisterMaskOffset);
+ }
+
+ void SetRegisterMask(uint32_t mask) {
+ region_.Store<uint32_t>(kRegisterMaskOffset, mask);
+ }
+
+ MemoryRegion GetStackMask() const {
+ return region_.Subregion(kStackMaskOffset, StackMaskSize());
+ }
+
+ void SetStackMask(const BitVector& sp_map) {
+ MemoryRegion region = GetStackMask();
+ for (size_t i = 0; i < region.size_in_bits(); i++) {
+ region.StoreBit(i, sp_map.IsBitSet(i));
+ }
+ }
+
+ bool HasInlineInfo() const {
+ return GetInlineDescriptorOffset() != InlineInfo::kNoInlineInfo;
+ }
+
+ bool Equals(const StackMap& other) {
+ return region_.pointer() == other.region_.pointer()
+ && region_.size() == other.region_.size();
+ }
+
+ private:
+ static constexpr int kDexPcOffset = 0;
+ static constexpr int kNativePcOffset = kDexPcOffset + sizeof(uint32_t);
+ static constexpr int kDexRegisterMapOffsetOffset = kNativePcOffset + sizeof(T);
+ static constexpr int kInlineDescriptorOffsetOffset =
+ kDexRegisterMapOffsetOffset + sizeof(uint32_t);
+ static constexpr int kRegisterMaskOffset = kInlineDescriptorOffsetOffset + sizeof(uint32_t);
+ static constexpr int kFixedSize = kRegisterMaskOffset + sizeof(uint32_t);
+ static constexpr int kStackMaskOffset = kFixedSize;
+
+ size_t StackMaskSize() const { return region_.size() - kFixedSize; }
+
+ MemoryRegion region_;
+
+ template <typename U> friend class CodeInfo;
+ template <typename U> friend class StackMapStream;
+};
+
+
+/**
+ * Wrapper around all compiler information collected for a method.
+ * The information is of the form:
+ * [number_of_stack_maps, stack_mask_size, StackMap+, DexRegisterInfo+, InlineInfo*].
+ */
+template <typename T>
+class CodeInfo {
+ public:
+ explicit CodeInfo(MemoryRegion region) : region_(region) {}
+
+ StackMap<T> GetStackMapAt(size_t i) const {
+ size_t size = StackMapSize();
+ return StackMap<T>(GetStackMaps().Subregion(i * size, size));
+ }
+
+ uint32_t GetStackMaskSize() const {
+ return region_.Load<uint32_t>(kStackMaskSizeOffset);
+ }
+
+ void SetStackMaskSize(uint32_t size) {
+ region_.Store<uint32_t>(kStackMaskSizeOffset, size);
+ }
+
+ size_t GetNumberOfStackMaps() const {
+ return region_.Load<uint32_t>(kNumberOfStackMapsOffset);
+ }
+
+ void SetNumberOfStackMaps(uint32_t number_of_stack_maps) {
+ region_.Store<uint32_t>(kNumberOfStackMapsOffset, number_of_stack_maps);
+ }
+
+ size_t StackMapSize() const {
+ return StackMap<T>::kFixedSize + GetStackMaskSize();
+ }
+
+ DexRegisterMap GetDexRegisterMapOf(StackMap<T> stack_map, uint32_t number_of_dex_registers) {
+ uint32_t offset = stack_map.GetDexRegisterMapOffset();
+ return DexRegisterMap(region_.Subregion(offset,
+ DexRegisterMap::kFixedSize + number_of_dex_registers * DexRegisterMap::SingleEntrySize()));
+ }
+
+ InlineInfo GetInlineInfoOf(StackMap<T> stack_map) {
+ uint32_t offset = stack_map.GetInlineDescriptorOffset();
+ uint8_t depth = region_.Load<uint8_t>(offset);
+ return InlineInfo(region_.Subregion(offset,
+ InlineInfo::kFixedSize + depth * InlineInfo::SingleEntrySize()));
+ }
+
+ StackMap<T> GetStackMapForDexPc(uint32_t dex_pc) {
+ for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
+ StackMap<T> stack_map = GetStackMapAt(i);
+ if (stack_map.GetDexPc() == dex_pc) {
+ return stack_map;
+ }
+ }
+ LOG(FATAL) << "Unreachable";
+ return StackMap<T>(MemoryRegion());
+ }
+
+ StackMap<T> GetStackMapForNativePc(T native_pc) {
+ // TODO: stack maps are sorted by native pc, we can do a binary search.
+ for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
+ StackMap<T> stack_map = GetStackMapAt(i);
+ if (stack_map.GetNativePc() == native_pc) {
+ return stack_map;
+ }
+ }
+ LOG(FATAL) << "Unreachable";
+ return StackMap<T>(MemoryRegion());
+ }
+
+ private:
+ static constexpr int kNumberOfStackMapsOffset = 0;
+ static constexpr int kStackMaskSizeOffset = kNumberOfStackMapsOffset + sizeof(uint32_t);
+ static constexpr int kFixedSize = kStackMaskSizeOffset + sizeof(uint32_t);
+
+ MemoryRegion GetStackMaps() const {
+ return region_.size() == 0
+ ? MemoryRegion()
+ : region_.Subregion(kFixedSize, StackMapSize() * GetNumberOfStackMaps());
+ }
+
+ MemoryRegion region_;
+ template<typename U> friend class StackMapStream;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_STACK_MAP_H_
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 38f1307..bd399e7 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -24,7 +24,7 @@
#include "base/casts.h"
#include "base/mutex-inl.h"
#include "gc/heap.h"
-#include "jni_internal.h"
+#include "jni_env_ext.h"
namespace art {
@@ -57,26 +57,24 @@
}
inline void Thread::AssertThreadSuspensionIsAllowable(bool check_locks) const {
-#ifdef NDEBUG
- UNUSED(check_locks); // Keep GCC happy about unused parameters.
-#else
- CHECK_EQ(0u, tls32_.no_thread_suspension) << tlsPtr_.last_no_thread_suspension_cause;
- if (check_locks) {
- bool bad_mutexes_held = false;
- for (int i = kLockLevelCount - 1; i >= 0; --i) {
- // We expect no locks except the mutator_lock_.
- if (i != kMutatorLock) {
- BaseMutex* held_mutex = GetHeldMutex(static_cast<LockLevel>(i));
- if (held_mutex != NULL) {
- LOG(ERROR) << "holding \"" << held_mutex->GetName()
- << "\" at point where thread suspension is expected";
- bad_mutexes_held = true;
+ if (kIsDebugBuild) {
+ CHECK_EQ(0u, tls32_.no_thread_suspension) << tlsPtr_.last_no_thread_suspension_cause;
+ if (check_locks) {
+ bool bad_mutexes_held = false;
+ for (int i = kLockLevelCount - 1; i >= 0; --i) {
+ // We expect no locks except the mutator_lock_ or thread list suspend thread lock.
+ if (i != kMutatorLock && i != kThreadListSuspendThreadLock) {
+ BaseMutex* held_mutex = GetHeldMutex(static_cast<LockLevel>(i));
+ if (held_mutex != NULL) {
+ LOG(ERROR) << "holding \"" << held_mutex->GetName()
+ << "\" at point where thread suspension is expected";
+ bad_mutexes_held = true;
+ }
}
}
+ CHECK(!bad_mutexes_held);
}
- CHECK(!bad_mutexes_held);
}
-#endif
}
inline void Thread::TransitionFromRunnableToSuspended(ThreadState new_state) {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 9fa158d..8e6da74 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -76,6 +76,8 @@
bool Thread::is_started_ = false;
pthread_key_t Thread::pthread_key_self_;
ConditionVariable* Thread::resume_cond_ = nullptr;
+const size_t Thread::kStackOverflowImplicitCheckSize = kStackOverflowProtectedSize +
+ GetStackOverflowReservedBytes(kRuntimeISA);
static const char* kThreadNameDuringStartup = "<native thread without managed peer>";
@@ -219,7 +221,7 @@
// It's likely that callers are trying to ensure they have at least a certain amount of
// stack space, so we should add our reserved space on top of what they requested, rather
// than implicitly take it away from them.
- stack_size += kRuntimeStackOverflowReservedBytes;
+ stack_size += GetStackOverflowReservedBytes(kRuntimeISA);
} else {
// If we are going to use implicit stack checks, allocate space for the protected
// region at the bottom of the stack.
@@ -232,47 +234,95 @@
return stack_size;
}
+// Global variable to prevent the compiler optimizing away the page reads for the stack.
+byte dont_optimize_this;
+
// Install a protected region in the stack. This is used to trigger a SIGSEGV if a stack
// overflow is detected. It is located right below the stack_end_. Just below that
// is the StackOverflow reserved region used when creating the StackOverflow
// exception.
+//
+// There is a little complexity here that deserves a special mention. When running on the
+// host (glibc), the process's main thread's stack is allocated with a special flag
+// to prevent memory being allocated when it's not needed. This flag makes the
+// kernel only allocate memory for the stack by growing down in memory. Because we
+// want to put an mprotected region far away from that at the stack top, we need
+// to make sure the pages for the stack are mapped in before we call mprotect. We do
+// this by reading every page from the stack bottom (highest address) to the stack top.
+// We then madvise this away.
void Thread::InstallImplicitProtection(bool is_main_stack) {
byte* pregion = tlsPtr_.stack_end;
+ byte* stack_lowmem = tlsPtr_.stack_begin;
+ byte* stack_top = reinterpret_cast<byte*>(reinterpret_cast<uintptr_t>(&pregion) &
+ ~(kPageSize - 1)); // Page containing current top of stack.
+ const bool running_on_intel = (kRuntimeISA == kX86) || (kRuntimeISA == kX86_64);
+
+ if (running_on_intel) {
+ // On Intel, we need to map in the main stack. This must be done by reading from the
+ // current stack pointer downwards as the stack is mapped using VM_GROWSDOWN
+ // in the kernel. Any access more than a page below the current SP will cause
+ // a segv.
+ if (is_main_stack) {
+ // First we need to unprotect the protected region because this may
+ // be called more than once for a particular stack and we will crash
+ // if we try to read the protected page.
+ mprotect(pregion - kStackOverflowProtectedSize, kStackOverflowProtectedSize, PROT_READ);
+
+ // Read every page from the high address to the low.
+ for (byte* p = stack_top; p > stack_lowmem; p -= kPageSize) {
+ dont_optimize_this = *p;
+ }
+ }
+ }
+
+ // Check and place a marker word at the lowest usable address in the stack. This
+ // is used to prevent a double protection.
constexpr uint32_t kMarker = 0xdadadada;
uintptr_t *marker = reinterpret_cast<uintptr_t*>(pregion);
if (*marker == kMarker) {
- // The region has already been set up.
+ // The region has already been set up. But on the main stack on the host we have
+ // removed the protected region in order to read the stack memory. We need to put
+ // this back again.
+ if (is_main_stack && running_on_intel) {
+ mprotect(pregion - kStackOverflowProtectedSize, kStackOverflowProtectedSize, PROT_NONE);
+ madvise(stack_lowmem, stack_top - stack_lowmem, MADV_DONTNEED);
+ }
return;
}
// Add marker so that we can detect a second attempt to do this.
*marker = kMarker;
- pregion -= kStackOverflowProtectedSize;
-
- // Touch the pages in the region to map them in. Otherwise mprotect fails. Only
- // need to do this on the main stack. We only need to touch one byte per page.
- if (is_main_stack) {
- byte* start = pregion;
- byte* end = pregion + kStackOverflowProtectedSize;
- while (start < end) {
- *start = static_cast<byte>(0);
- start += kPageSize;
+ if (!running_on_intel) {
+ // Running on !Intel, stacks are mapped cleanly. The protected region for the
+ // main stack just needs to be mapped in. We do this by writing one byte per page.
+ for (byte* p = pregion - kStackOverflowProtectedSize; p < pregion; p += kPageSize) {
+ *p = 0;
}
}
+ pregion -= kStackOverflowProtectedSize;
+
VLOG(threads) << "installing stack protected region at " << std::hex <<
static_cast<void*>(pregion) << " to " <<
static_cast<void*>(pregion + kStackOverflowProtectedSize - 1);
+
if (mprotect(pregion, kStackOverflowProtectedSize, PROT_NONE) == -1) {
LOG(FATAL) << "Unable to create protected region in stack for implicit overflow check. Reason:"
- << strerror(errno);
+ << strerror(errno) << kStackOverflowProtectedSize;
}
// Tell the kernel that we won't be needing these pages any more.
+ // NB. madvise will probably write zeroes into the memory (on linux it does).
if (is_main_stack) {
- madvise(pregion, kStackOverflowProtectedSize, MADV_DONTNEED);
+ if (running_on_intel) {
+ // On the host, it's the whole stack (minus a page to prevent overwrite of stack top).
+ madvise(stack_lowmem, stack_top - stack_lowmem - kPageSize, MADV_DONTNEED);
+ } else {
+ // On Android, just the protected region.
+ madvise(pregion, kStackOverflowProtectedSize, MADV_DONTNEED);
+ }
}
}
@@ -488,7 +538,7 @@
tlsPtr_.stack_begin = reinterpret_cast<byte*>(read_stack_base);
tlsPtr_.stack_size = read_stack_size;
- if (read_stack_size <= kRuntimeStackOverflowReservedBytes) {
+ if (read_stack_size <= GetStackOverflowReservedBytes(kRuntimeISA)) {
LOG(FATAL) << "Attempt to attach a thread with a too-small stack (" << read_stack_size
<< " bytes)";
}
@@ -533,13 +583,17 @@
// Install the protected region if we are doing implicit overflow checks.
if (implicit_stack_check) {
if (is_main_thread) {
- // The main thread has a 16K protected region at the bottom. We need
+ size_t guardsize;
+ pthread_attr_t attributes;
+ CHECK_PTHREAD_CALL(pthread_attr_init, (&attributes), "guard size query");
+ CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, &guardsize), "guard size query");
+ CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), "guard size query");
+ // The main thread might have protected region at the bottom. We need
// to install our own region so we need to move the limits
// of the stack to make room for it.
- constexpr uint32_t kDelta = 16 * KB;
- tlsPtr_.stack_begin += kDelta;
- tlsPtr_.stack_end += kDelta;
- tlsPtr_.stack_size -= kDelta;
+ tlsPtr_.stack_begin += guardsize;
+ tlsPtr_.stack_end += guardsize;
+ tlsPtr_.stack_size -= guardsize;
}
InstallImplicitProtection(is_main_thread);
}
@@ -1086,7 +1140,7 @@
if (UNLIKELY(IsExceptionPending())) {
ScopedObjectAccess soa(Thread::Current());
mirror::Throwable* exception = GetException(nullptr);
- LOG(FATAL) << "Throwing new exception " << msg << " with unexpected pending exception: "
+ LOG(FATAL) << "Throwing new exception '" << msg << "' with unexpected pending exception: "
<< exception->Dump();
}
}
@@ -1109,6 +1163,21 @@
Thread* self = this;
DCHECK_EQ(self, Thread::Current());
+ if (tlsPtr_.jni_env != nullptr) {
+ // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
+ tlsPtr_.jni_env->monitors.VisitRoots(MonitorExitVisitor, self, 0, kRootVMInternal);
+ // Release locally held global references which releasing may require the mutator lock.
+ if (tlsPtr_.jpeer != nullptr) {
+ // If pthread_create fails we don't have a jni env here.
+ tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.jpeer);
+ tlsPtr_.jpeer = nullptr;
+ }
+ if (tlsPtr_.class_loader_override != nullptr) {
+ tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.class_loader_override);
+ tlsPtr_.class_loader_override = nullptr;
+ }
+ }
+
if (tlsPtr_.opeer != nullptr) {
ScopedObjectAccess soa(self);
// We may need to call user-supplied managed code, do this before final clean-up.
@@ -1136,22 +1205,16 @@
ObjectLock<mirror::Object> locker(self, h_obj);
locker.NotifyAll();
}
+ tlsPtr_.opeer = nullptr;
}
- // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
- if (tlsPtr_.jni_env != nullptr) {
- tlsPtr_.jni_env->monitors.VisitRoots(MonitorExitVisitor, self, 0, kRootVMInternal);
- }
+ Runtime::Current()->GetHeap()->RevokeThreadLocalBuffers(this);
}
Thread::~Thread() {
- if (tlsPtr_.jni_env != nullptr && tlsPtr_.jpeer != nullptr) {
- // If pthread_create fails we don't have a jni env here.
- tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.jpeer);
- tlsPtr_.jpeer = nullptr;
- }
- tlsPtr_.opeer = nullptr;
-
+ CHECK(tlsPtr_.class_loader_override == nullptr);
+ CHECK(tlsPtr_.jpeer == nullptr);
+ CHECK(tlsPtr_.opeer == nullptr);
bool initialized = (tlsPtr_.jni_env != nullptr); // Did Thread::Init run?
if (initialized) {
delete tlsPtr_.jni_env;
@@ -1183,7 +1246,7 @@
delete tlsPtr_.name;
delete tlsPtr_.stack_trace_sample;
- Runtime::Current()->GetHeap()->RevokeThreadLocalBuffers(this);
+ Runtime::Current()->GetHeap()->AssertThreadLocalBuffersAreRevoked(this);
TearDownAlternateSignalStack();
}
@@ -1293,11 +1356,10 @@
result = kInvalidIndirectRefObject;
}
} else if (kind == kGlobal) {
- JavaVMExt* const vm = Runtime::Current()->GetJavaVM();
- result = vm->globals.SynchronizedGet(const_cast<Thread*>(this), &vm->globals_lock, ref);
+ result = tlsPtr_.jni_env->vm->DecodeGlobal(const_cast<Thread*>(this), ref);
} else {
DCHECK_EQ(kind, kWeakGlobal);
- result = Runtime::Current()->GetJavaVM()->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
+ result = tlsPtr_.jni_env->vm->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
if (result == kClearedJniWeakGlobal) {
// This is a special case where it's okay to return nullptr.
return nullptr;
@@ -1305,7 +1367,8 @@
}
if (UNLIKELY(result == nullptr)) {
- JniAbortF(nullptr, "use of deleted %s %p", ToStr<IndirectRefKind>(kind).c_str(), obj);
+ tlsPtr_.jni_env->vm->JniAbortF(nullptr, "use of deleted %s %p",
+ ToStr<IndirectRefKind>(kind).c_str(), obj);
}
return result;
}
@@ -1345,6 +1408,13 @@
}
}
+void Thread::SetClassLoaderOverride(jobject class_loader_override) {
+ if (tlsPtr_.class_loader_override != nullptr) {
+ GetJniEnv()->DeleteGlobalRef(tlsPtr_.class_loader_override);
+ }
+ tlsPtr_.class_loader_override = GetJniEnv()->NewGlobalRef(class_loader_override);
+}
+
class CountStackDepthVisitor : public StackVisitor {
public:
explicit CountStackDepthVisitor(Thread* thread)
@@ -1879,6 +1949,8 @@
QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethod)
QUICK_ENTRY_POINT_INFO(pThrowNullPointer)
QUICK_ENTRY_POINT_INFO(pThrowStackOverflow)
+ QUICK_ENTRY_POINT_INFO(pA64Load)
+ QUICK_ENTRY_POINT_INFO(pA64Store)
#undef QUICK_ENTRY_POINT_INFO
os << offset;
@@ -1916,10 +1988,13 @@
return result;
}
+// Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
+// so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack.
struct CurrentMethodVisitor FINAL : public StackVisitor {
- CurrentMethodVisitor(Thread* thread, Context* context)
+ CurrentMethodVisitor(Thread* thread, Context* context, bool abort_on_error)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, context), this_object_(nullptr), method_(nullptr), dex_pc_(0) {}
+ : StackVisitor(thread, context), this_object_(nullptr), method_(nullptr), dex_pc_(0),
+ abort_on_error_(abort_on_error) {}
bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* m = GetMethod();
if (m->IsRuntimeMethod()) {
@@ -1930,16 +2005,17 @@
this_object_ = GetThisObject();
}
method_ = m;
- dex_pc_ = GetDexPc();
+ dex_pc_ = GetDexPc(abort_on_error_);
return false;
}
mirror::Object* this_object_;
mirror::ArtMethod* method_;
uint32_t dex_pc_;
+ const bool abort_on_error_;
};
-mirror::ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc) const {
- CurrentMethodVisitor visitor(const_cast<Thread*>(this), nullptr);
+mirror::ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error) const {
+ CurrentMethodVisitor visitor(const_cast<Thread*>(this), nullptr, abort_on_error);
visitor.WalkStack(false);
if (dex_pc != nullptr) {
*dex_pc = visitor.dex_pc_;
@@ -1949,7 +2025,7 @@
ThrowLocation Thread::GetCurrentLocationForThrow() {
Context* context = GetLongJumpContext();
- CurrentMethodVisitor visitor(this, context);
+ CurrentMethodVisitor visitor(this, context, true);
visitor.WalkStack(false);
ReleaseLongJumpContext(context);
return ThrowLocation(visitor.this_object_, visitor.method_, visitor.dex_pc_);
@@ -2113,11 +2189,6 @@
const uint32_t tid_;
};
-void Thread::SetClassLoaderOverride(mirror::ClassLoader* class_loader_override) {
- VerifyObject(class_loader_override);
- tlsPtr_.class_loader_override = class_loader_override;
-}
-
void Thread::VisitRoots(RootCallback* visitor, void* arg) {
uint32_t thread_id = GetThreadId();
if (tlsPtr_.opeer != nullptr) {
@@ -2127,10 +2198,6 @@
visitor(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception), arg, thread_id, kRootNativeStack);
}
tlsPtr_.throw_location.VisitRoots(visitor, arg);
- if (tlsPtr_.class_loader_override != nullptr) {
- visitor(reinterpret_cast<mirror::Object**>(&tlsPtr_.class_loader_override), arg, thread_id,
- kRootNativeStack);
- }
if (tlsPtr_.monitor_enter_object != nullptr) {
visitor(&tlsPtr_.monitor_enter_object, arg, thread_id, kRootNativeStack);
}
@@ -2193,7 +2260,7 @@
if (tlsPtr_.stack_end == tlsPtr_.stack_begin) {
// However, we seem to have already extended to use the full stack.
LOG(ERROR) << "Need to increase kStackOverflowReservedBytes (currently "
- << kRuntimeStackOverflowReservedBytes << ")?";
+ << GetStackOverflowReservedBytes(kRuntimeISA) << ")?";
DumpStack(LOG(ERROR));
LOG(FATAL) << "Recursive stack overflow.";
}
diff --git a/runtime/thread.h b/runtime/thread.h
index d08c2fc..c2b200b 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -104,8 +104,7 @@
// is protected against reads and the lower is available for use while
// throwing the StackOverflow exception.
static constexpr size_t kStackOverflowProtectedSize = 16 * KB;
- static constexpr size_t kStackOverflowImplicitCheckSize = kStackOverflowProtectedSize +
- kRuntimeStackOverflowReservedBytes;
+ static const size_t kStackOverflowImplicitCheckSize;
// Creates a new native thread corresponding to the given managed peer.
// Used to implement Thread.start.
@@ -323,7 +322,9 @@
tlsPtr_.long_jump_context = context;
}
- mirror::ArtMethod* GetCurrentMethod(uint32_t* dex_pc) const
+ // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
+ // abort the runtime iff abort_on_error is true.
+ mirror::ArtMethod* GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error = true) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
ThrowLocation GetCurrentLocationForThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -430,12 +431,11 @@
tlsPtr_.wait_next = next;
}
- mirror::ClassLoader* GetClassLoaderOverride() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ jobject GetClassLoaderOverride() {
return tlsPtr_.class_loader_override;
}
- void SetClassLoaderOverride(mirror::ClassLoader* class_loader_override)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetClassLoaderOverride(jobject class_loader_override);
// Create the internal representation of a stack trace, that is more time
// and space efficient to compute than the StackTraceElement[].
@@ -551,6 +551,16 @@
return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
}
+ byte* GetStackEndForInterpreter(bool implicit_overflow_check) const {
+ if (implicit_overflow_check) {
+ // The interpreter needs the extra overflow bytes that stack_end does
+ // not include.
+ return tlsPtr_.stack_end + GetStackOverflowReservedBytes(kRuntimeISA);
+ } else {
+ return tlsPtr_.stack_end;
+ }
+ }
+
byte* GetStackEnd() const {
return tlsPtr_.stack_end;
}
@@ -567,7 +577,7 @@
// overflow region.
tlsPtr_.stack_end = tlsPtr_.stack_begin + kStackOverflowImplicitCheckSize;
} else {
- tlsPtr_.stack_end = tlsPtr_.stack_begin + kRuntimeStackOverflowReservedBytes;
+ tlsPtr_.stack_end = tlsPtr_.stack_begin + GetStackOverflowReservedBytes(kRuntimeISA);
}
}
@@ -1029,7 +1039,7 @@
// Needed to get the right ClassLoader in JNI_OnLoad, but also
// useful for testing.
- mirror::ClassLoader* class_loader_override;
+ jobject class_loader_override;
// Thread local, lazily allocated, long jump context. Used to deliver exceptions.
Context* long_jump_context;
diff --git a/runtime/thread_linux.cc b/runtime/thread_linux.cc
index ee66ccc..9aacb30 100644
--- a/runtime/thread_linux.cc
+++ b/runtime/thread_linux.cc
@@ -32,11 +32,18 @@
}
}
+// The default SIGSTKSZ on linux is 8K. If we do any logging in a signal
+// handler this is too small. We allocate 16K instead.
+static constexpr int kHostAltSigStackSize = 16*1024; // 16K signal stack.
+
void Thread::SetUpAlternateSignalStack() {
// Create and set an alternate signal stack.
+#ifdef HAVE_ANDROID_OS
+ LOG(FATAL) << "Invalid use of alternate signal stack on Android";
+#endif
stack_t ss;
- ss.ss_sp = new uint8_t[SIGSTKSZ];
- ss.ss_size = SIGSTKSZ;
+ ss.ss_sp = new uint8_t[kHostAltSigStackSize];
+ ss.ss_size = kHostAltSigStackSize;
ss.ss_flags = 0;
CHECK(ss.ss_sp != NULL);
SigAltStack(&ss, NULL);
@@ -56,7 +63,7 @@
// Tell the kernel to stop using it.
ss.ss_sp = NULL;
ss.ss_flags = SS_DISABLE;
- ss.ss_size = SIGSTKSZ; // Avoid ENOMEM failure with Mac OS' buggy libc.
+ ss.ss_size = kHostAltSigStackSize; // Avoid ENOMEM failure with Mac OS' buggy libc.
SigAltStack(&ss, NULL);
// Free it.
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index b649b62..5077a89 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -170,16 +170,7 @@
// individual thread requires polling. delay_us is the requested sleep and total_delay_us
// accumulates the total time spent sleeping for timeouts. The first sleep is just a yield,
// subsequently sleeps increase delay_us from 1ms to 500ms by doubling.
-static void ThreadSuspendSleep(Thread* self, useconds_t* delay_us, useconds_t* total_delay_us,
- bool holding_locks) {
- if (!holding_locks) {
- for (int i = kLockLevelCount - 1; i >= 0; --i) {
- BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
- if (held_mutex != NULL) {
- LOG(FATAL) << "Holding " << held_mutex->GetName() << " while sleeping for thread suspension";
- }
- }
- }
+static void ThreadSuspendSleep(Thread* self, useconds_t* delay_us, useconds_t* total_delay_us) {
useconds_t new_delay_us = (*delay_us) * 2;
CHECK_GE(new_delay_us, *delay_us);
if (new_delay_us < 500000) { // Don't allow sleeping to be more than 0.5s.
@@ -244,7 +235,7 @@
useconds_t total_delay_us = 0;
do {
useconds_t delay_us = 100;
- ThreadSuspendSleep(self, &delay_us, &total_delay_us, true);
+ ThreadSuspendSleep(self, &delay_us, &total_delay_us);
} while (!thread->IsSuspended());
// Shouldn't need to wait for longer than 1000 microseconds.
constexpr useconds_t kLongWaitThresholdUS = 1000;
@@ -303,16 +294,19 @@
void ThreadList::SuspendAll() {
Thread* self = Thread::Current();
- DCHECK(self != nullptr);
- VLOG(threads) << *self << " SuspendAll starting...";
+ if (self != nullptr) {
+ VLOG(threads) << *self << " SuspendAll starting...";
+ } else {
+ VLOG(threads) << "Thread[null] SuspendAll starting...";
+ }
ATRACE_BEGIN("Suspending mutator threads");
uint64_t start_time = NanoTime();
Locks::mutator_lock_->AssertNotHeld(self);
Locks::thread_list_lock_->AssertNotHeld(self);
Locks::thread_suspend_count_lock_->AssertNotHeld(self);
- if (kDebugLocking) {
+ if (kDebugLocking && self != nullptr) {
CHECK_NE(self->GetState(), kRunnable);
}
{
@@ -353,14 +347,21 @@
ATRACE_END();
ATRACE_BEGIN("Mutator threads suspended");
- VLOG(threads) << *self << " SuspendAll complete";
+ if (self != nullptr) {
+ VLOG(threads) << *self << " SuspendAll complete";
+ } else {
+ VLOG(threads) << "Thread[null] SuspendAll complete";
+ }
}
void ThreadList::ResumeAll() {
Thread* self = Thread::Current();
- DCHECK(self != nullptr);
- VLOG(threads) << *self << " ResumeAll starting";
+ if (self != nullptr) {
+ VLOG(threads) << *self << " ResumeAll starting";
+ } else {
+ VLOG(threads) << "Thread[null] ResumeAll starting";
+ }
ATRACE_END();
ATRACE_BEGIN("Resuming mutator threads");
@@ -386,11 +387,20 @@
// Broadcast a notification to all suspended threads, some or all of
// which may choose to wake up. No need to wait for them.
- VLOG(threads) << *self << " ResumeAll waking others";
+ if (self != nullptr) {
+ VLOG(threads) << *self << " ResumeAll waking others";
+ } else {
+ VLOG(threads) << "Thread[null] ResumeAll waking others";
+ }
Thread::resume_cond_->Broadcast(self);
}
ATRACE_END();
- VLOG(threads) << *self << " ResumeAll complete";
+
+ if (self != nullptr) {
+ VLOG(threads) << *self << " ResumeAll complete";
+ } else {
+ VLOG(threads) << "Thread[null] ResumeAll complete";
+ }
}
void ThreadList::Resume(Thread* thread, bool for_debugger) {
@@ -444,6 +454,11 @@
while (true) {
Thread* thread;
{
+ // Note: this will transition to runnable and potentially suspend. We ensure only one thread
+ // is requesting another suspend, to avoid deadlock, by requiring this function be called
+ // holding Locks::thread_list_suspend_thread_lock_. Its important this thread suspend rather
+ // than request thread suspension, to avoid potential cycles in threads requesting each other
+ // suspend.
ScopedObjectAccess soa(self);
MutexLock mu(self, *Locks::thread_list_lock_);
thread = Thread::FromManagedThread(soa, peer);
@@ -483,7 +498,7 @@
}
// Release locks and come out of runnable state.
}
- ThreadSuspendSleep(self, &delay_us, &total_delay_us, false);
+ ThreadSuspendSleep(self, &delay_us, &total_delay_us);
}
}
@@ -502,9 +517,14 @@
CHECK_NE(thread_id, kInvalidThreadId);
while (true) {
{
- Thread* thread = NULL;
+ // Note: this will transition to runnable and potentially suspend. We ensure only one thread
+ // is requesting another suspend, to avoid deadlock, by requiring this function be called
+ // holding Locks::thread_list_suspend_thread_lock_. Its important this thread suspend rather
+ // than request thread suspension, to avoid potential cycles in threads requesting each other
+ // suspend.
ScopedObjectAccess soa(self);
MutexLock mu(self, *Locks::thread_list_lock_);
+ Thread* thread = nullptr;
for (const auto& it : list_) {
if (it->GetThreadId() == thread_id) {
thread = it;
@@ -550,7 +570,7 @@
}
// Release locks and come out of runnable state.
}
- ThreadSuspendSleep(self, &delay_us, &total_delay_us, false);
+ ThreadSuspendSleep(self, &delay_us, &total_delay_us);
}
}
@@ -781,6 +801,8 @@
void ThreadList::Unregister(Thread* self) {
DCHECK_EQ(self, Thread::Current());
+ CHECK_NE(self->GetState(), kRunnable);
+ Locks::mutator_lock_->AssertNotHeld(self);
VLOG(threads) << "ThreadList::Unregister() " << *self;
@@ -795,14 +817,18 @@
// Note: deliberately not using MutexLock that could hold a stale self pointer.
Locks::thread_list_lock_->ExclusiveLock(self);
CHECK(Contains(self));
- // Note: we don't take the thread_suspend_count_lock_ here as to be suspending a thread other
- // than yourself you need to hold the thread_list_lock_ (see Thread::ModifySuspendCount).
+ Locks::thread_suspend_count_lock_->ExclusiveLock(self);
+ bool removed = false;
if (!self->IsSuspended()) {
list_.remove(self);
+ removed = true;
+ }
+ Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
+ Locks::thread_list_lock_->ExclusiveUnlock(self);
+ if (removed) {
delete self;
self = nullptr;
}
- Locks::thread_list_lock_->ExclusiveUnlock(self);
}
// Release the thread ID after the thread is finished and deleted to avoid cases where we can
// temporarily have multiple threads with the same thread id. When this occurs, it causes
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index d46987a..1b67ac0 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -68,6 +68,7 @@
// is set to true.
static Thread* SuspendThreadByPeer(jobject peer, bool request_suspension, bool debug_suspension,
bool* timed_out)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_suspend_thread_lock_)
LOCKS_EXCLUDED(Locks::mutator_lock_,
Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_);
@@ -77,6 +78,7 @@
// the thread terminating. Note that as thread ids are recycled this may not suspend the expected
// thread, that may be terminating. If the suspension times out then *timeout is set to true.
Thread* SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspension, bool* timed_out)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_suspend_thread_lock_)
LOCKS_EXCLUDED(Locks::mutator_lock_,
Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_);
diff --git a/runtime/trace.cc b/runtime/trace.cc
index f51b8c4..ca5e150 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -178,29 +178,29 @@
(clock_source_ == kTraceClockSourceDual);
}
-static void MeasureClockOverhead(Trace* trace) {
- if (trace->UseThreadCpuClock()) {
+void Trace::MeasureClockOverhead() {
+ if (UseThreadCpuClock()) {
Thread::Current()->GetCpuMicroTime();
}
- if (trace->UseWallClock()) {
+ if (UseWallClock()) {
MicroTime();
}
}
// Compute an average time taken to measure clocks.
-static uint32_t GetClockOverheadNanoSeconds(Trace* trace) {
+uint32_t Trace::GetClockOverheadNanoSeconds() {
Thread* self = Thread::Current();
uint64_t start = self->GetCpuMicroTime();
for (int i = 4000; i > 0; i--) {
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
+ MeasureClockOverhead();
+ MeasureClockOverhead();
+ MeasureClockOverhead();
+ MeasureClockOverhead();
+ MeasureClockOverhead();
+ MeasureClockOverhead();
+ MeasureClockOverhead();
+ MeasureClockOverhead();
}
uint64_t elapsed_us = self->GetCpuMicroTime() - start;
@@ -444,7 +444,8 @@
Trace::Trace(File* trace_file, int buffer_size, int flags, bool sampling_enabled)
: trace_file_(trace_file), buf_(new uint8_t[buffer_size]()), flags_(flags),
sampling_enabled_(sampling_enabled), clock_source_(default_clock_source_),
- buffer_size_(buffer_size), start_time_(MicroTime()), cur_offset_(0), overflow_(false) {
+ buffer_size_(buffer_size), start_time_(MicroTime()),
+ clock_overhead_ns_(GetClockOverheadNanoSeconds()), cur_offset_(0), overflow_(false) {
// Set up the beginning of the trace.
uint16_t trace_version = GetTraceVersion(clock_source_);
memset(buf_.get(), 0, kTraceHeaderLength);
@@ -480,7 +481,6 @@
uint64_t elapsed = MicroTime() - start_time_;
size_t final_offset = cur_offset_.LoadRelaxed();
- uint32_t clock_overhead_ns = GetClockOverheadNanoSeconds(this);
if ((flags_ & kTraceCountAllocs) != 0) {
Runtime::Current()->SetStatsEnabled(false);
@@ -506,7 +506,7 @@
os << StringPrintf("elapsed-time-usec=%" PRIu64 "\n", elapsed);
size_t num_records = (final_offset - kTraceHeaderLength) / GetRecordSize(clock_source_);
os << StringPrintf("num-method-calls=%zd\n", num_records);
- os << StringPrintf("clock-call-overhead-nsec=%d\n", clock_overhead_ns);
+ os << StringPrintf("clock-call-overhead-nsec=%d\n", clock_overhead_ns_);
os << StringPrintf("vm=art\n");
if ((flags_ & kTraceCountAllocs) != 0) {
os << StringPrintf("alloc-count=%d\n", Runtime::Current()->GetStat(KIND_ALLOCATED_OBJECTS));
diff --git a/runtime/trace.h b/runtime/trace.h
index d7836b8..45a02da 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -68,6 +68,8 @@
bool UseWallClock();
bool UseThreadCpuClock();
+ void MeasureClockOverhead();
+ uint32_t GetClockOverheadNanoSeconds();
void CompareAndUpdateStackTrace(Thread* thread, std::vector<mirror::ArtMethod*>* stack_trace)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -155,6 +157,9 @@
// Time trace was created.
const uint64_t start_time_;
+ // Clock overhead.
+ const uint32_t clock_overhead_ns_;
+
// Offset into buf_.
AtomicInteger cur_offset_;
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 8b1ad39..48d6cdf 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -281,6 +281,11 @@
return result;
}
+std::string PrettyDescriptor(Primitive::Type type) {
+ std::string descriptor_string(Primitive::Descriptor(type));
+ return PrettyDescriptor(descriptor_string);
+}
+
std::string PrettyField(mirror::ArtField* f, bool with_type) {
if (f == NULL) {
return "null";
@@ -1154,22 +1159,55 @@
}
const char* GetAndroidData() {
+ std::string error_msg;
+ const char* dir = GetAndroidDataSafe(&error_msg);
+ if (dir != nullptr) {
+ return dir;
+ } else {
+ LOG(FATAL) << error_msg;
+ return "";
+ }
+}
+
+const char* GetAndroidDataSafe(std::string* error_msg) {
const char* android_data = getenv("ANDROID_DATA");
if (android_data == NULL) {
if (OS::DirectoryExists("/data")) {
android_data = "/data";
} else {
- LOG(FATAL) << "ANDROID_DATA not set and /data does not exist";
- return "";
+ *error_msg = "ANDROID_DATA not set and /data does not exist";
+ return nullptr;
}
}
if (!OS::DirectoryExists(android_data)) {
- LOG(FATAL) << "Failed to find ANDROID_DATA directory " << android_data;
- return "";
+ *error_msg = StringPrintf("Failed to find ANDROID_DATA directory %s", android_data);
+ return nullptr;
}
return android_data;
}
+void GetDalvikCache(const char* subdir, const bool create_if_absent, std::string* dalvik_cache,
+ bool* have_android_data, bool* dalvik_cache_exists) {
+ CHECK(subdir != nullptr);
+ std::string error_msg;
+ const char* android_data = GetAndroidDataSafe(&error_msg);
+ if (android_data == nullptr) {
+ *have_android_data = false;
+ *dalvik_cache_exists = false;
+ return;
+ } else {
+ *have_android_data = true;
+ }
+ const std::string dalvik_cache_root(StringPrintf("%s/dalvik-cache/", android_data));
+ *dalvik_cache = dalvik_cache_root + subdir;
+ *dalvik_cache_exists = OS::DirectoryExists(dalvik_cache->c_str());
+ if (create_if_absent && !*dalvik_cache_exists && strcmp(android_data, "/data") != 0) {
+ // Don't create the system's /data/dalvik-cache/... because it needs special permissions.
+ *dalvik_cache_exists = ((mkdir(dalvik_cache_root.c_str(), 0700) == 0 || errno == EEXIST) &&
+ (mkdir(dalvik_cache->c_str(), 0700) == 0 || errno == EEXIST));
+ }
+}
+
std::string GetDalvikCacheOrDie(const char* subdir, const bool create_if_absent) {
CHECK(subdir != nullptr);
const char* android_data = GetAndroidData();
@@ -1196,17 +1234,29 @@
return dalvik_cache;
}
-std::string GetDalvikCacheFilenameOrDie(const char* location, const char* cache_location) {
+bool GetDalvikCacheFilename(const char* location, const char* cache_location,
+ std::string* filename, std::string* error_msg) {
if (location[0] != '/') {
- LOG(FATAL) << "Expected path in location to be absolute: "<< location;
+ *error_msg = StringPrintf("Expected path in location to be absolute: %s", location);
+ return false;
}
std::string cache_file(&location[1]); // skip leading slash
- if (!EndsWith(location, ".dex") && !EndsWith(location, ".art")) {
+ if (!EndsWith(location, ".dex") && !EndsWith(location, ".art") && !EndsWith(location, ".oat")) {
cache_file += "/";
cache_file += DexFile::kClassesDex;
}
std::replace(cache_file.begin(), cache_file.end(), '/', '@');
- return StringPrintf("%s/%s", cache_location, cache_file.c_str());
+ *filename = StringPrintf("%s/%s", cache_location, cache_file.c_str());
+ return true;
+}
+
+std::string GetDalvikCacheFilenameOrDie(const char* location, const char* cache_location) {
+ std::string ret;
+ std::string error_msg;
+ if (!GetDalvikCacheFilename(location, cache_location, &ret, &error_msg)) {
+ LOG(FATAL) << error_msg;
+ }
+ return ret;
}
static void InsertIsaDirectory(const InstructionSet isa, std::string* filename) {
@@ -1309,4 +1359,29 @@
return true;
}
+void EncodeUnsignedLeb128(uint32_t data, std::vector<uint8_t>* dst) {
+ size_t encoded_size = UnsignedLeb128Size(data);
+ size_t cur_index = dst->size();
+ dst->resize(dst->size() + encoded_size);
+ uint8_t* write_pos = &((*dst)[cur_index]);
+ uint8_t* write_pos_after = EncodeUnsignedLeb128(write_pos, data);
+ DCHECK_EQ(static_cast<size_t>(write_pos_after - write_pos), encoded_size);
+}
+
+void EncodeSignedLeb128(int32_t data, std::vector<uint8_t>* dst) {
+ size_t encoded_size = SignedLeb128Size(data);
+ size_t cur_index = dst->size();
+ dst->resize(dst->size() + encoded_size);
+ uint8_t* write_pos = &((*dst)[cur_index]);
+ uint8_t* write_pos_after = EncodeSignedLeb128(write_pos, data);
+ DCHECK_EQ(static_cast<size_t>(write_pos_after - write_pos), encoded_size);
+}
+
+void PushWord(std::vector<uint8_t>* buf, int data) {
+ buf->push_back(data & 0xff);
+ buf->push_back((data >> 8) & 0xff);
+ buf->push_back((data >> 16) & 0xff);
+ buf->push_back((data >> 24) & 0xff);
+}
+
} // namespace art
diff --git a/runtime/utils.h b/runtime/utils.h
index 2cb3af7..f6773be 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -24,13 +24,10 @@
#include <vector>
#include "base/logging.h"
+#include "base/mutex.h"
#include "globals.h"
#include "instruction_set.h"
-#include "base/mutex.h"
-
-#ifdef HAVE_ANDROID_OS
-#include "cutils/properties.h"
-#endif
+#include "primitive.h"
namespace art {
@@ -167,8 +164,7 @@
// For rounding integers.
template<typename T>
-static constexpr T RoundDown(T x, typename TypeIdentity<T>::type n)
- __attribute__((warn_unused_result));
+static constexpr T RoundDown(T x, typename TypeIdentity<T>::type n) WARN_UNUSED;
template<typename T>
static constexpr T RoundDown(T x, typename TypeIdentity<T>::type n) {
@@ -178,8 +174,7 @@
}
template<typename T>
-static constexpr T RoundUp(T x, typename TypeIdentity<T>::type n)
- __attribute__((warn_unused_result));
+static constexpr T RoundUp(T x, typename TypeIdentity<T>::type n) WARN_UNUSED;
template<typename T>
static constexpr T RoundUp(T x, typename TypeIdentity<T>::type n) {
@@ -188,7 +183,7 @@
// For aligning pointers.
template<typename T>
-static inline T* AlignDown(T* x, uintptr_t n) __attribute__((warn_unused_result));
+static inline T* AlignDown(T* x, uintptr_t n) WARN_UNUSED;
template<typename T>
static inline T* AlignDown(T* x, uintptr_t n) {
@@ -196,7 +191,7 @@
}
template<typename T>
-static inline T* AlignUp(T* x, uintptr_t n) __attribute__((warn_unused_result));
+static inline T* AlignUp(T* x, uintptr_t n) WARN_UNUSED;
template<typename T>
static inline T* AlignUp(T* x, uintptr_t n) {
@@ -281,6 +276,7 @@
std::string PrettyDescriptor(const std::string& descriptor);
std::string PrettyDescriptor(mirror::Class* klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+std::string PrettyDescriptor(Primitive::Type type);
// Returns a human-readable signature for 'f'. Something like "a.b.C.f" or
// "int a.b.C.f" (depending on the value of 'with_type').
@@ -441,11 +437,22 @@
// Find $ANDROID_DATA, /data, or abort.
const char* GetAndroidData();
+// Find $ANDROID_DATA, /data, or return nullptr.
+const char* GetAndroidDataSafe(std::string* error_msg);
// Returns the dalvik-cache location, or dies trying. subdir will be
// appended to the cache location.
std::string GetDalvikCacheOrDie(const char* subdir, bool create_if_absent = true);
+// Return true if we found the dalvik cache and stored it in the dalvik_cache argument.
+// have_android_data will be set to true if we have an ANDROID_DATA that exists,
+// dalvik_cache_exists will be true if there is a dalvik-cache directory that is present.
+void GetDalvikCache(const char* subdir, bool create_if_absent, std::string* dalvik_cache,
+ bool* have_android_data, bool* dalvik_cache_exists);
+// Returns the absolute dalvik-cache path for a DexFile or OatFile. The path returned will be
+// rooted at cache_location.
+bool GetDalvikCacheFilename(const char* file_location, const char* cache_location,
+ std::string* filename, std::string* error_msg);
// Returns the absolute dalvik-cache path for a DexFile or OatFile, or
// dies trying. The path returned will be rooted at cache_location.
std::string GetDalvikCacheFilenameOrDie(const char* file_location,
@@ -488,6 +495,11 @@
}
};
+void PushWord(std::vector<uint8_t>* buf, int32_t data);
+
+void EncodeUnsignedLeb128(uint32_t data, std::vector<uint8_t>* buf);
+void EncodeSignedLeb128(int32_t data, std::vector<uint8_t>* buf);
+
} // namespace art
#endif // ART_RUNTIME_UTILS_H_
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index 7cd5980..d6c90e1 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -350,6 +350,8 @@
GetDalvikCacheFilenameOrDie("/system/framework/core.jar", "/foo").c_str());
EXPECT_STREQ("/foo/system@framework@boot.art",
GetDalvikCacheFilenameOrDie("/system/framework/boot.art", "/foo").c_str());
+ EXPECT_STREQ("/foo/system@framework@boot.oat",
+ GetDalvikCacheFilenameOrDie("/system/framework/boot.oat", "/foo").c_str());
}
TEST_F(UtilsTest, GetSystemImageFilename) {
diff --git a/runtime/verifier/method_verifier-inl.h b/runtime/verifier/method_verifier-inl.h
index 62ecf4b..d4fe106 100644
--- a/runtime/verifier/method_verifier-inl.h
+++ b/runtime/verifier/method_verifier-inl.h
@@ -66,9 +66,9 @@
return !failure_messages_.empty();
}
-inline const RegType& MethodVerifier::ResolveCheckedClass(uint32_t class_idx) {
+inline RegType& MethodVerifier::ResolveCheckedClass(uint32_t class_idx) {
DCHECK(!HasFailures());
- const RegType& result = ResolveClassAndCheckAccess(class_idx);
+ RegType& result = ResolveClassAndCheckAccess(class_idx);
DCHECK(!HasFailures());
return result;
}
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index a1b86e0..329b4dc 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -30,7 +30,7 @@
#include "indenter.h"
#include "intern_table.h"
#include "leb128.h"
-#include "method_helper.h"
+#include "method_helper-inl.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class.h"
@@ -1175,7 +1175,7 @@
// If this is a constructor for a class other than java.lang.Object, mark the first ("this")
// argument as uninitialized. This restricts field access until the superclass constructor is
// called.
- const RegType& declaring_class = GetDeclaringClass();
+ RegType& declaring_class = GetDeclaringClass();
if (IsConstructor() && !declaring_class.IsJavaLangObject()) {
reg_line->SetRegisterType(arg_start + cur_arg,
reg_types_.UninitializedThisArgument(declaring_class));
@@ -1207,7 +1207,7 @@
// it's effectively considered initialized the instant we reach here (in the sense that we
// can return without doing anything or call virtual methods).
{
- const RegType& reg_type = ResolveClassAndCheckAccess(iterator.GetTypeIdx());
+ RegType& reg_type = ResolveClassAndCheckAccess(iterator.GetTypeIdx());
if (!reg_type.IsNonZeroReferenceTypes()) {
DCHECK(HasFailures());
return false;
@@ -1241,8 +1241,8 @@
return false;
}
- const RegType& lo_half = descriptor[0] == 'J' ? reg_types_.LongLo() : reg_types_.DoubleLo();
- const RegType& hi_half = descriptor[0] == 'J' ? reg_types_.LongHi() : reg_types_.DoubleHi();
+ RegType& lo_half = descriptor[0] == 'J' ? reg_types_.LongLo() : reg_types_.DoubleLo();
+ RegType& hi_half = descriptor[0] == 'J' ? reg_types_.LongHi() : reg_types_.DoubleHi();
reg_line->SetRegisterTypeWide(arg_start + cur_arg, lo_half, hi_half);
cur_arg++;
break;
@@ -1536,7 +1536,7 @@
* This statement can only appear as the first instruction in an exception handler. We verify
* that as part of extracting the exception type from the catch block list.
*/
- const RegType& res_type = GetCaughtExceptionType();
+ RegType& res_type = GetCaughtExceptionType();
work_line_->SetRegisterType(inst->VRegA_11x(), res_type);
break;
}
@@ -1550,7 +1550,7 @@
case Instruction::RETURN:
if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
/* check the method signature */
- const RegType& return_type = GetMethodReturnType();
+ RegType& return_type = GetMethodReturnType();
if (!return_type.IsCategory1Types()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected non-category 1 return type "
<< return_type;
@@ -1558,7 +1558,7 @@
// Compilers may generate synthetic functions that write byte values into boolean fields.
// Also, it may use integer values for boolean, byte, short, and character return types.
const uint32_t vregA = inst->VRegA_11x();
- const RegType& src_type = work_line_->GetRegisterType(vregA);
+ RegType& src_type = work_line_->GetRegisterType(vregA);
bool use_src = ((return_type.IsBoolean() && src_type.IsByte()) ||
((return_type.IsBoolean() || return_type.IsByte() ||
return_type.IsShort() || return_type.IsChar()) &&
@@ -1575,7 +1575,7 @@
case Instruction::RETURN_WIDE:
if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
/* check the method signature */
- const RegType& return_type = GetMethodReturnType();
+ RegType& return_type = GetMethodReturnType();
if (!return_type.IsCategory2Types()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-wide not expected";
} else {
@@ -1590,7 +1590,7 @@
break;
case Instruction::RETURN_OBJECT:
if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
- const RegType& return_type = GetMethodReturnType();
+ RegType& return_type = GetMethodReturnType();
if (!return_type.IsReferenceTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-object not expected";
} else {
@@ -1598,7 +1598,7 @@
DCHECK(!return_type.IsZero());
DCHECK(!return_type.IsUninitializedReference());
const uint32_t vregA = inst->VRegA_11x();
- const RegType& reg_type = work_line_->GetRegisterType(vregA);
+ RegType& reg_type = work_line_->GetRegisterType(vregA);
// Disallow returning uninitialized values and verify that the reference in vAA is an
// instance of the "return_type"
if (reg_type.IsUninitializedTypes()) {
@@ -1645,29 +1645,29 @@
/* could be long or double; resolved upon use */
case Instruction::CONST_WIDE_16: {
int64_t val = static_cast<int16_t>(inst->VRegB_21s());
- const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
- const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+ RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+ RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
work_line_->SetRegisterTypeWide(inst->VRegA_21s(), lo, hi);
break;
}
case Instruction::CONST_WIDE_32: {
int64_t val = static_cast<int32_t>(inst->VRegB_31i());
- const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
- const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+ RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+ RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
work_line_->SetRegisterTypeWide(inst->VRegA_31i(), lo, hi);
break;
}
case Instruction::CONST_WIDE: {
int64_t val = inst->VRegB_51l();
- const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
- const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+ RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+ RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
work_line_->SetRegisterTypeWide(inst->VRegA_51l(), lo, hi);
break;
}
case Instruction::CONST_WIDE_HIGH16: {
int64_t val = static_cast<uint64_t>(inst->VRegB_21h()) << 48;
- const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
- const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+ RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+ RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
work_line_->SetRegisterTypeWide(inst->VRegA_21h(), lo, hi);
break;
}
@@ -1680,7 +1680,7 @@
case Instruction::CONST_CLASS: {
// Get type from instruction if unresolved then we need an access check
// TODO: check Compiler::CanAccessTypeWithoutChecks returns false when res_type is unresolved
- const RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
+ RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
// Register holds class, ie its type is class, on error it will hold Conflict.
work_line_->SetRegisterType(inst->VRegA_21c(),
res_type.IsConflict() ? res_type
@@ -1726,8 +1726,17 @@
*/
const bool is_checkcast = (inst->Opcode() == Instruction::CHECK_CAST);
const uint32_t type_idx = (is_checkcast) ? inst->VRegB_21c() : inst->VRegC_22c();
- const RegType& res_type = ResolveClassAndCheckAccess(type_idx);
+ RegType& res_type = ResolveClassAndCheckAccess(type_idx);
if (res_type.IsConflict()) {
+ // If this is a primitive type, fail HARD.
+ mirror::Class* klass = (*dex_cache_)->GetResolvedType(type_idx);
+ if (klass != nullptr && klass->IsPrimitive()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "using primitive type "
+ << dex_file_->StringByTypeIdx(type_idx) << " in instanceof in "
+ << GetDeclaringClass();
+ break;
+ }
+
DCHECK_NE(failures_.size(), 0U);
if (!is_checkcast) {
work_line_->SetRegisterType(inst->VRegA_22c(), reg_types_.Boolean());
@@ -1736,7 +1745,7 @@
}
// TODO: check Compiler::CanAccessTypeWithoutChecks returns false when res_type is unresolved
uint32_t orig_type_reg = (is_checkcast) ? inst->VRegA_21c() : inst->VRegB_22c();
- const RegType& orig_type = work_line_->GetRegisterType(orig_type_reg);
+ RegType& orig_type = work_line_->GetRegisterType(orig_type_reg);
if (!res_type.IsNonZeroReferenceTypes()) {
if (is_checkcast) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "check-cast on unexpected class " << res_type;
@@ -1759,18 +1768,20 @@
break;
}
case Instruction::ARRAY_LENGTH: {
- const RegType& res_type = work_line_->GetRegisterType(inst->VRegB_12x());
+ RegType& res_type = work_line_->GetRegisterType(inst->VRegB_12x());
if (res_type.IsReferenceTypes()) {
if (!res_type.IsArrayTypes() && !res_type.IsZero()) { // ie not an array or null
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array-length on non-array " << res_type;
} else {
work_line_->SetRegisterType(inst->VRegA_12x(), reg_types_.Integer());
}
+ } else {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array-length on non-array " << res_type;
}
break;
}
case Instruction::NEW_INSTANCE: {
- const RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
+ RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
if (res_type.IsConflict()) {
DCHECK_NE(failures_.size(), 0U);
break; // bad class
@@ -1782,7 +1793,7 @@
<< "new-instance on primitive, interface or abstract class" << res_type;
// Soft failure so carry on to set register type.
}
- const RegType& uninit_type = reg_types_.Uninitialized(res_type, work_insn_idx_);
+ RegType& uninit_type = reg_types_.Uninitialized(res_type, work_insn_idx_);
// Any registers holding previous allocations from this address that have not yet been
// initialized must be marked invalid.
work_line_->MarkUninitRefsAsInvalid(uninit_type);
@@ -1835,7 +1846,7 @@
work_line_->SetRegisterType(inst->VRegA_23x(), reg_types_.Integer());
break;
case Instruction::THROW: {
- const RegType& res_type = work_line_->GetRegisterType(inst->VRegA_11x());
+ RegType& res_type = work_line_->GetRegisterType(inst->VRegA_11x());
if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(res_type)) {
Fail(res_type.IsUnresolvedTypes() ? VERIFY_ERROR_NO_CLASS : VERIFY_ERROR_BAD_CLASS_SOFT)
<< "thrown class " << res_type << " not instanceof Throwable";
@@ -1856,14 +1867,14 @@
case Instruction::FILL_ARRAY_DATA: {
/* Similar to the verification done for APUT */
- const RegType& array_type = work_line_->GetRegisterType(inst->VRegA_31t());
+ RegType& array_type = work_line_->GetRegisterType(inst->VRegA_31t());
/* array_type can be null if the reg type is Zero */
if (!array_type.IsZero()) {
if (!array_type.IsArrayTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid fill-array-data with array type "
<< array_type;
} else {
- const RegType& component_type = reg_types_.GetComponentType(array_type,
+ RegType& component_type = reg_types_.GetComponentType(array_type,
class_loader_->Get());
DCHECK(!component_type.IsConflict());
if (component_type.IsNonZeroReferenceTypes()) {
@@ -1891,8 +1902,8 @@
}
case Instruction::IF_EQ:
case Instruction::IF_NE: {
- const RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
- const RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
+ RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
+ RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
bool mismatch = false;
if (reg_type1.IsZero()) { // zero then integral or reference expected
mismatch = !reg_type2.IsReferenceTypes() && !reg_type2.IsIntegralTypes();
@@ -1911,8 +1922,8 @@
case Instruction::IF_GE:
case Instruction::IF_GT:
case Instruction::IF_LE: {
- const RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
- const RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
+ RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
+ RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
if (!reg_type1.IsIntegralTypes() || !reg_type2.IsIntegralTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "args to 'if' (" << reg_type1 << ","
<< reg_type2 << ") must be integral";
@@ -1921,7 +1932,7 @@
}
case Instruction::IF_EQZ:
case Instruction::IF_NEZ: {
- const RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
+ RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
if (!reg_type.IsReferenceTypes() && !reg_type.IsIntegralTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "type " << reg_type
<< " unexpected as arg to if-eqz/if-nez";
@@ -1967,11 +1978,12 @@
// type is assignable to the original then allow optimization. This check is performed to
// ensure that subsequent merges don't lose type information - such as becoming an
// interface from a class that would lose information relevant to field checks.
- const RegType& orig_type = work_line_->GetRegisterType(instance_of_inst->VRegB_22c());
- const RegType& cast_type = ResolveClassAndCheckAccess(instance_of_inst->VRegC_22c());
+ RegType& orig_type = work_line_->GetRegisterType(instance_of_inst->VRegB_22c());
+ RegType& cast_type = ResolveClassAndCheckAccess(instance_of_inst->VRegC_22c());
if (!orig_type.Equals(cast_type) &&
!cast_type.IsUnresolvedTypes() && !orig_type.IsUnresolvedTypes() &&
+ cast_type.HasClass() && // Could be conflict type, make sure it has a class.
!cast_type.GetClass()->IsInterface() &&
(orig_type.IsZero() ||
orig_type.IsStrictlyAssignableFrom(cast_type.Merge(orig_type, ®_types_)))) {
@@ -2022,7 +2034,7 @@
case Instruction::IF_GEZ:
case Instruction::IF_GTZ:
case Instruction::IF_LEZ: {
- const RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
+ RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
if (!reg_type.IsIntegralTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "type " << reg_type
<< " unexpected as arg to if-ltz/if-gez/if-gtz/if-lez";
@@ -2171,7 +2183,7 @@
inst->Opcode() == Instruction::INVOKE_SUPER_RANGE);
mirror::ArtMethod* called_method = VerifyInvocationArgs(inst, METHOD_VIRTUAL, is_range,
is_super);
- const RegType* return_type = nullptr;
+ RegType* return_type = nullptr;
if (called_method != nullptr) {
Thread* self = Thread::Current();
StackHandleScope<1> hs(self);
@@ -2227,7 +2239,7 @@
* allowing the latter only if the "this" argument is the same as the "this" argument to
* this method (which implies that we're in a constructor ourselves).
*/
- const RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
+ RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
if (this_type.IsConflict()) // failure.
break;
@@ -2238,7 +2250,7 @@
}
/* must be in same class or in superclass */
- // const RegType& this_super_klass = this_type.GetSuperClass(®_types_);
+ // RegType& this_super_klass = this_type.GetSuperClass(®_types_);
// TODO: re-enable constructor type verification
// if (this_super_klass.IsConflict()) {
// Unknown super class, fail so we re-check at runtime.
@@ -2259,7 +2271,7 @@
*/
work_line_->MarkRefsAsInitialized(this_type);
}
- const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(),
+ RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(),
return_type_descriptor, false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(return_type);
@@ -2285,7 +2297,7 @@
} else {
descriptor = called_method->GetReturnTypeDescriptor();
}
- const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
+ RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(return_type);
@@ -2313,7 +2325,7 @@
/* Get the type of the "this" arg, which should either be a sub-interface of called
* interface or Object (see comments in RegType::JoinClass).
*/
- const RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
+ RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
if (this_type.IsZero()) {
/* null pointer always passes (and always fails at runtime) */
} else {
@@ -2343,7 +2355,7 @@
} else {
descriptor = abs_method->GetReturnTypeDescriptor();
}
- const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
+ RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(return_type);
@@ -2609,7 +2621,7 @@
mirror::ArtMethod* called_method = VerifyInvokeVirtualQuickArgs(inst, is_range);
if (called_method != NULL) {
const char* descriptor = called_method->GetReturnTypeDescriptor();
- const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
+ RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(return_type);
@@ -2763,12 +2775,30 @@
* "try" block when they throw, control transfers out of the method.)
*/
if ((opcode_flags & Instruction::kThrow) != 0 && insn_flags_[work_insn_idx_].IsInTry()) {
- bool within_catch_all = false;
+ bool has_catch_all_handler = false;
CatchHandlerIterator iterator(*code_item_, work_insn_idx_);
+ // Need the linker to try and resolve the handled class to check if it's Throwable.
+ ClassLinker* linker = Runtime::Current()->GetClassLinker();
+
for (; iterator.HasNext(); iterator.Next()) {
- if (iterator.GetHandlerTypeIndex() == DexFile::kDexNoIndex16) {
- within_catch_all = true;
+ uint16_t handler_type_idx = iterator.GetHandlerTypeIndex();
+ if (handler_type_idx == DexFile::kDexNoIndex16) {
+ has_catch_all_handler = true;
+ } else {
+ // It is also a catch-all if it is java.lang.Throwable.
+ mirror::Class* klass = linker->ResolveType(*dex_file_, handler_type_idx, *dex_cache_,
+ *class_loader_);
+ if (klass != nullptr) {
+ if (klass == mirror::Throwable::GetJavaLangThrowable()) {
+ has_catch_all_handler = true;
+ }
+ } else {
+ // Clear exception.
+ Thread* self = Thread::Current();
+ DCHECK(self->IsExceptionPending());
+ self->ClearException();
+ }
}
/*
* Merge registers into the "catch" block. We want to use the "savedRegs" rather than
@@ -2784,7 +2814,7 @@
* If the monitor stack depth is nonzero, there must be a "catch all" handler for this
* instruction. This does apply to monitor-exit because of async exception handling.
*/
- if (work_line_->MonitorStackDepth() > 0 && !within_catch_all) {
+ if (work_line_->MonitorStackDepth() > 0 && !has_catch_all_handler) {
/*
* The state in work_line reflects the post-execution state. If the current instruction is a
* monitor-enter and the monitor stack was empty, we don't need a catch-all (if it throws,
@@ -2875,11 +2905,11 @@
return true;
} // NOLINT(readability/fn_size)
-const RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) {
+RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) {
const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
- const RegType& referrer = GetDeclaringClass();
+ RegType& referrer = GetDeclaringClass();
mirror::Class* klass = (*dex_cache_)->GetResolvedType(class_idx);
- const RegType& result =
+ RegType& result =
klass != NULL ? reg_types_.FromClass(descriptor, klass,
klass->CannotBeAssignedFromOtherTypes())
: reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
@@ -2902,8 +2932,8 @@
return result;
}
-const RegType& MethodVerifier::GetCaughtExceptionType() {
- const RegType* common_super = NULL;
+RegType& MethodVerifier::GetCaughtExceptionType() {
+ RegType* common_super = NULL;
if (code_item_->tries_size_ != 0) {
const byte* handlers_ptr = DexFile::GetCatchHandlerData(*code_item_, 0);
uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
@@ -2914,7 +2944,7 @@
if (iterator.GetHandlerTypeIndex() == DexFile::kDexNoIndex16) {
common_super = ®_types_.JavaLangThrowable(false);
} else {
- const RegType& exception = ResolveClassAndCheckAccess(iterator.GetHandlerTypeIndex());
+ RegType& exception = ResolveClassAndCheckAccess(iterator.GetHandlerTypeIndex());
if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(exception)) {
if (exception.IsUnresolvedTypes()) {
// We don't know enough about the type. Fail here and let runtime handle it.
@@ -2949,7 +2979,7 @@
mirror::ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(uint32_t dex_method_idx,
MethodType method_type) {
const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx);
- const RegType& klass_type = ResolveClassAndCheckAccess(method_id.class_idx_);
+ RegType& klass_type = ResolveClassAndCheckAccess(method_id.class_idx_);
if (klass_type.IsConflict()) {
std::string append(" in attempt to access method ");
append += dex_file_->GetMethodName(method_id);
@@ -2960,7 +2990,7 @@
return NULL; // Can't resolve Class so no more to do here
}
mirror::Class* klass = klass_type.GetClass();
- const RegType& referrer = GetDeclaringClass();
+ RegType& referrer = GetDeclaringClass();
mirror::ArtMethod* res_method = (*dex_cache_)->GetResolvedMethod(dex_method_idx);
if (res_method == NULL) {
const char* name = dex_file_->GetMethodName(method_id);
@@ -3067,7 +3097,7 @@
* rigorous check here (which is okay since we have to do it at runtime).
*/
if (method_type != METHOD_STATIC) {
- const RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
+ RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
if (actual_arg_type.IsConflict()) { // GetInvocationThis failed.
CHECK(have_pending_hard_failure_);
return nullptr;
@@ -3081,14 +3111,14 @@
} else {
// Check whether the name of the called method is "<init>"
const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
- if (strcmp(dex_file_->GetMethodName(dex_file_->GetMethodId(method_idx)), "init") != 0) {
+ if (strcmp(dex_file_->GetMethodName(dex_file_->GetMethodId(method_idx)), "<init>") != 0) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "'this' arg must be initialized";
return nullptr;
}
}
}
if (method_type != METHOD_INTERFACE && !actual_arg_type.IsZero()) {
- const RegType* res_method_class;
+ RegType* res_method_class;
if (res_method != nullptr) {
mirror::Class* klass = res_method->GetDeclaringClass();
res_method_class = ®_types_.FromClass(klass->GetDescriptor().c_str(), klass,
@@ -3129,12 +3159,12 @@
return nullptr;
}
- const RegType& reg_type = reg_types_.FromDescriptor(class_loader_->Get(), param_descriptor,
+ RegType& reg_type = reg_types_.FromDescriptor(class_loader_->Get(), param_descriptor,
false);
uint32_t get_reg = is_range ? inst->VRegC_3rc() + static_cast<uint32_t>(sig_registers) :
arg[sig_registers];
if (reg_type.IsIntegralTypes()) {
- const RegType& src_type = work_line_->GetRegisterType(get_reg);
+ RegType& src_type = work_line_->GetRegisterType(get_reg);
if (!src_type.IsIntegralTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register v" << get_reg << " has type " << src_type
<< " but expected " << reg_type;
@@ -3217,7 +3247,7 @@
// has a vtable entry for the target method.
if (is_super) {
DCHECK(method_type == METHOD_VIRTUAL);
- const RegType& super = GetDeclaringClass().GetSuperClass(®_types_);
+ RegType& super = GetDeclaringClass().GetSuperClass(®_types_);
if (super.IsUnresolvedTypes()) {
Fail(VERIFY_ERROR_NO_METHOD) << "unknown super class in invoke-super from "
<< PrettyMethod(dex_method_idx_, *dex_file_)
@@ -3225,7 +3255,7 @@
return nullptr;
}
mirror::Class* super_klass = super.GetClass();
- if (res_method->GetMethodIndex() >= super_klass->GetVTable()->GetLength()) {
+ if (res_method->GetMethodIndex() >= super_klass->GetVTableLength()) {
Fail(VERIFY_ERROR_NO_METHOD) << "invalid invoke-super from "
<< PrettyMethod(dex_method_idx_, *dex_file_)
<< " to super " << super
@@ -3245,25 +3275,26 @@
RegisterLine* reg_line, bool is_range) {
DCHECK(inst->Opcode() == Instruction::INVOKE_VIRTUAL_QUICK ||
inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
- const RegType& actual_arg_type = reg_line->GetInvocationThis(inst, is_range);
+ RegType& actual_arg_type = reg_line->GetInvocationThis(inst, is_range);
if (!actual_arg_type.HasClass()) {
VLOG(verifier) << "Failed to get mirror::Class* from '" << actual_arg_type << "'";
return nullptr;
}
- mirror::ObjectArray<mirror::ArtMethod>* vtable = nullptr;
mirror::Class* klass = actual_arg_type.GetClass();
+ mirror::Class* dispatch_class;
if (klass->IsInterface()) {
// Derive Object.class from Class.class.getSuperclass().
mirror::Class* object_klass = klass->GetClass()->GetSuperClass();
CHECK(object_klass->IsObjectClass());
- vtable = object_klass->GetVTable();
+ dispatch_class = object_klass;
} else {
- vtable = klass->GetVTable();
+ dispatch_class = klass;
}
- CHECK(vtable != nullptr) << PrettyDescriptor(klass);
+ CHECK(dispatch_class->HasVTable()) << PrettyDescriptor(dispatch_class);
uint16_t vtable_index = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
- CHECK_LT(static_cast<int32_t>(vtable_index), vtable->GetLength()) << PrettyDescriptor(klass);
- mirror::ArtMethod* res_method = vtable->Get(vtable_index);
+ CHECK_LT(static_cast<int32_t>(vtable_index), dispatch_class->GetVTableLength())
+ << PrettyDescriptor(klass);
+ mirror::ArtMethod* res_method = dispatch_class->GetVTableEntry(vtable_index);
CHECK(!Thread::Current()->IsExceptionPending());
return res_method;
}
@@ -3282,7 +3313,7 @@
// We use vAA as our expected arg count, rather than res_method->insSize, because we need to
// match the call to the signature. Also, we might be calling through an abstract method
// definition (which doesn't have register count values).
- const RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
+ RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
if (actual_arg_type.IsConflict()) { // GetInvocationThis failed.
return NULL;
}
@@ -3306,7 +3337,7 @@
}
if (!actual_arg_type.IsZero()) {
mirror::Class* klass = res_method->GetDeclaringClass();
- const RegType& res_method_class =
+ RegType& res_method_class =
reg_types_.FromClass(klass->GetDescriptor().c_str(), klass,
klass->CannotBeAssignedFromOtherTypes());
if (!res_method_class.IsAssignableFrom(actual_arg_type)) {
@@ -3342,7 +3373,7 @@
<< " missing signature component";
return NULL;
}
- const RegType& reg_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
+ RegType& reg_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
uint32_t get_reg = is_range ? inst->VRegC_3rc() + actual_args : arg[actual_args];
if (!work_line_->VerifyRegisterType(get_reg, reg_type)) {
return res_method;
@@ -3370,7 +3401,7 @@
DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY_RANGE);
type_idx = inst->VRegB_3rc();
}
- const RegType& res_type = ResolveClassAndCheckAccess(type_idx);
+ RegType& res_type = ResolveClassAndCheckAccess(type_idx);
if (res_type.IsConflict()) { // bad class
DCHECK_NE(failures_.size(), 0U);
} else {
@@ -3381,12 +3412,12 @@
/* make sure "size" register is valid type */
work_line_->VerifyRegisterType(inst->VRegB_22c(), reg_types_.Integer());
/* set register type to array class */
- const RegType& precise_type = reg_types_.FromUninitialized(res_type);
+ RegType& precise_type = reg_types_.FromUninitialized(res_type);
work_line_->SetRegisterType(inst->VRegA_22c(), precise_type);
} else {
// Verify each register. If "arg_count" is bad, VerifyRegisterType() will run off the end of
// the list and fail. It's legal, if silly, for arg_count to be zero.
- const RegType& expected_type = reg_types_.GetComponentType(res_type, class_loader_->Get());
+ RegType& expected_type = reg_types_.GetComponentType(res_type, class_loader_->Get());
uint32_t arg_count = (is_range) ? inst->VRegA_3rc() : inst->VRegA_35c();
uint32_t arg[5];
if (!is_range) {
@@ -3400,19 +3431,19 @@
}
}
// filled-array result goes into "result" register
- const RegType& precise_type = reg_types_.FromUninitialized(res_type);
+ RegType& precise_type = reg_types_.FromUninitialized(res_type);
work_line_->SetResultRegisterType(precise_type);
}
}
}
void MethodVerifier::VerifyAGet(const Instruction* inst,
- const RegType& insn_type, bool is_primitive) {
- const RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
+ RegType& insn_type, bool is_primitive) {
+ RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
if (!index_type.IsArrayIndexTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Invalid reg type for array index (" << index_type << ")";
} else {
- const RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
+ RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
if (array_type.IsZero()) {
// Null array class; this code path will fail at runtime. Infer a merge-able type from the
// instruction type. TODO: have a proper notion of bottom here.
@@ -3428,7 +3459,7 @@
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aget";
} else {
/* verify the class */
- const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->Get());
+ RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->Get());
if (!component_type.IsReferenceTypes() && !is_primitive) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "primitive array type " << array_type
<< " source for aget-object";
@@ -3455,12 +3486,12 @@
}
}
-void MethodVerifier::VerifyPrimitivePut(const RegType& target_type, const RegType& insn_type,
+void MethodVerifier::VerifyPrimitivePut(RegType& target_type, RegType& insn_type,
const uint32_t vregA) {
// Primitive assignability rules are weaker than regular assignability rules.
bool instruction_compatible;
bool value_compatible;
- const RegType& value_type = work_line_->GetRegisterType(vregA);
+ RegType& value_type = work_line_->GetRegisterType(vregA);
if (target_type.IsIntegralTypes()) {
instruction_compatible = target_type.Equals(insn_type);
value_compatible = value_type.IsIntegralTypes();
@@ -3469,10 +3500,12 @@
value_compatible = value_type.IsFloatTypes();
} else if (target_type.IsLong()) {
instruction_compatible = insn_type.IsLong();
- value_compatible = value_type.IsLongTypes();
+ RegType& value_type_hi = work_line_->GetRegisterType(vregA + 1);
+ value_compatible = value_type.IsLongTypes() && value_type.CheckWidePair(value_type_hi);
} else if (target_type.IsDouble()) {
instruction_compatible = insn_type.IsLong(); // no put-double, so expect put-long
- value_compatible = value_type.IsDoubleTypes();
+ RegType& value_type_hi = work_line_->GetRegisterType(vregA + 1);
+ value_compatible = value_type.IsDoubleTypes() && value_type.CheckWidePair(value_type_hi);
} else {
instruction_compatible = false; // reference with primitive store
value_compatible = false; // unused
@@ -3493,19 +3526,19 @@
}
void MethodVerifier::VerifyAPut(const Instruction* inst,
- const RegType& insn_type, bool is_primitive) {
- const RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
+ RegType& insn_type, bool is_primitive) {
+ RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
if (!index_type.IsArrayIndexTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Invalid reg type for array index (" << index_type << ")";
} else {
- const RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
+ RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
if (array_type.IsZero()) {
// Null array type; this code path will fail at runtime. Infer a merge-able type from the
// instruction type.
} else if (!array_type.IsArrayTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aput";
} else {
- const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->Get());
+ RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->Get());
const uint32_t vregA = inst->VRegA_23x();
if (is_primitive) {
VerifyPrimitivePut(component_type, insn_type, vregA);
@@ -3527,7 +3560,7 @@
mirror::ArtField* MethodVerifier::GetStaticField(int field_idx) {
const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
// Check access to class
- const RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
+ RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
if (klass_type.IsConflict()) { // bad class
AppendToLastFailMessage(StringPrintf(" in attempt to access static field %d (%s) in %s",
field_idx, dex_file_->GetFieldName(field_id),
@@ -3559,10 +3592,10 @@
return field;
}
-mirror::ArtField* MethodVerifier::GetInstanceField(const RegType& obj_type, int field_idx) {
+mirror::ArtField* MethodVerifier::GetInstanceField(RegType& obj_type, int field_idx) {
const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
// Check access to class
- const RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
+ RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
if (klass_type.IsConflict()) {
AppendToLastFailMessage(StringPrintf(" in attempt to access instance field %d (%s) in %s",
field_idx, dex_file_->GetFieldName(field_id),
@@ -3596,7 +3629,7 @@
return field;
} else {
mirror::Class* klass = field->GetDeclaringClass();
- const RegType& field_klass =
+ RegType& field_klass =
reg_types_.FromClass(dex_file_->GetFieldDeclaringClassDescriptor(field_id),
klass, klass->CannotBeAssignedFromOtherTypes());
if (obj_type.IsUninitializedTypes() &&
@@ -3621,17 +3654,17 @@
}
}
-void MethodVerifier::VerifyISGet(const Instruction* inst, const RegType& insn_type,
+void MethodVerifier::VerifyISGet(const Instruction* inst, RegType& insn_type,
bool is_primitive, bool is_static) {
uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
mirror::ArtField* field;
if (is_static) {
field = GetStaticField(field_idx);
} else {
- const RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
+ RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
field = GetInstanceField(object_type, field_idx);
}
- const RegType* field_type = nullptr;
+ RegType* field_type = nullptr;
if (field != NULL) {
Thread* self = Thread::Current();
mirror::Class* field_type_class;
@@ -3687,17 +3720,17 @@
}
}
-void MethodVerifier::VerifyISPut(const Instruction* inst, const RegType& insn_type,
+void MethodVerifier::VerifyISPut(const Instruction* inst, RegType& insn_type,
bool is_primitive, bool is_static) {
uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
mirror::ArtField* field;
if (is_static) {
field = GetStaticField(field_idx);
} else {
- const RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
+ RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
field = GetInstanceField(object_type, field_idx);
}
- const RegType* field_type = nullptr;
+ RegType* field_type = nullptr;
if (field != NULL) {
if (field->IsFinal() && field->GetDeclaringClass() != GetDeclaringClass().GetClass()) {
Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot modify final field " << PrettyField(field)
@@ -3749,7 +3782,7 @@
inst->Opcode() == Instruction::IPUT_QUICK ||
inst->Opcode() == Instruction::IPUT_WIDE_QUICK ||
inst->Opcode() == Instruction::IPUT_OBJECT_QUICK);
- const RegType& object_type = reg_line->GetRegisterType(inst->VRegB_22c());
+ RegType& object_type = reg_line->GetRegisterType(inst->VRegB_22c());
if (!object_type.HasClass()) {
VLOG(verifier) << "Failed to get mirror::Class* from '" << object_type << "'";
return nullptr;
@@ -3764,7 +3797,7 @@
return f;
}
-void MethodVerifier::VerifyIGetQuick(const Instruction* inst, const RegType& insn_type,
+void MethodVerifier::VerifyIGetQuick(const Instruction* inst, RegType& insn_type,
bool is_primitive) {
DCHECK(Runtime::Current()->IsStarted());
mirror::ArtField* field = GetQuickFieldAccess(inst, work_line_.get());
@@ -3779,7 +3812,7 @@
FieldHelper fh(h_field);
field_type_class = fh.GetType(can_load_classes_);
}
- const RegType* field_type;
+ RegType* field_type;
if (field_type_class != nullptr) {
field_type = ®_types_.FromClass(field->GetTypeDescriptor(), field_type_class,
field_type_class->CannotBeAssignedFromOtherTypes());
@@ -3824,7 +3857,7 @@
}
}
-void MethodVerifier::VerifyIPutQuick(const Instruction* inst, const RegType& insn_type,
+void MethodVerifier::VerifyIPutQuick(const Instruction* inst, RegType& insn_type,
bool is_primitive) {
DCHECK(Runtime::Current()->IsStarted());
mirror::ArtField* field = GetQuickFieldAccess(inst, work_line_.get());
@@ -3834,7 +3867,7 @@
}
const char* descriptor = field->GetTypeDescriptor();
mirror::ClassLoader* loader = field->GetDeclaringClass()->GetClassLoader();
- const RegType& field_type = reg_types_.FromDescriptor(loader, descriptor, false);
+ RegType& field_type = reg_types_.FromDescriptor(loader, descriptor, false);
if (field != NULL) {
if (field->IsFinal() && field->GetDeclaringClass() != GetDeclaringClass().GetClass()) {
Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot modify final field " << PrettyField(field)
@@ -3847,7 +3880,7 @@
// Primitive field assignability rules are weaker than regular assignability rules
bool instruction_compatible;
bool value_compatible;
- const RegType& value_type = work_line_->GetRegisterType(vregA);
+ RegType& value_type = work_line_->GetRegisterType(vregA);
if (field_type.IsIntegralTypes()) {
instruction_compatible = insn_type.IsIntegralTypes();
value_compatible = value_type.IsIntegralTypes();
@@ -3965,7 +3998,7 @@
return &insn_flags_[work_insn_idx_];
}
-const RegType& MethodVerifier::GetMethodReturnType() {
+RegType& MethodVerifier::GetMethodReturnType() {
if (return_type_ == nullptr) {
if (mirror_method_ != NULL) {
Thread* self = Thread::Current();
@@ -3995,7 +4028,7 @@
return *return_type_;
}
-const RegType& MethodVerifier::GetDeclaringClass() {
+RegType& MethodVerifier::GetDeclaringClass() {
if (declaring_class_ == NULL) {
const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
const char* descriptor
@@ -4016,7 +4049,7 @@
DCHECK(line != nullptr) << "No register line at DEX pc " << StringPrintf("0x%x", dex_pc);
std::vector<int32_t> result;
for (size_t i = 0; i < line->NumRegs(); ++i) {
- const RegType& type = line->GetRegisterType(i);
+ RegType& type = line->GetRegisterType(i);
if (type.IsConstant()) {
result.push_back(type.IsPreciseConstant() ? kConstant : kImpreciseConstant);
result.push_back(type.ConstantValue());
@@ -4056,7 +4089,7 @@
return result;
}
-const RegType& MethodVerifier::DetermineCat1Constant(int32_t value, bool precise) {
+RegType& MethodVerifier::DetermineCat1Constant(int32_t value, bool precise) {
if (precise) {
// Precise constant type.
return reg_types_.FromCat1Const(value, true);
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 757c419..e63a90c 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -230,7 +230,7 @@
bool HasCheckCasts() const;
bool HasVirtualOrInterfaceInvokes() const;
bool HasFailures() const;
- const RegType& ResolveCheckedClass(uint32_t class_idx)
+ RegType& ResolveCheckedClass(uint32_t class_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
@@ -471,34 +471,34 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Helper to perform verification on puts of primitive type.
- void VerifyPrimitivePut(const RegType& target_type, const RegType& insn_type,
+ void VerifyPrimitivePut(RegType& target_type, RegType& insn_type,
const uint32_t vregA) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an aget instruction. The destination register's type will be set to
// be that of component type of the array unless the array type is unknown, in which case a
// bottom type inferred from the type of instruction is used. is_primitive is false for an
// aget-object.
- void VerifyAGet(const Instruction* inst, const RegType& insn_type,
+ void VerifyAGet(const Instruction* inst, RegType& insn_type,
bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an aput instruction.
- void VerifyAPut(const Instruction* inst, const RegType& insn_type,
+ void VerifyAPut(const Instruction* inst, RegType& insn_type,
bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Lookup instance field and fail for resolution violations
- mirror::ArtField* GetInstanceField(const RegType& obj_type, int field_idx)
+ mirror::ArtField* GetInstanceField(RegType& obj_type, int field_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Lookup static field and fail for resolution violations
mirror::ArtField* GetStaticField(int field_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an iget or sget instruction.
- void VerifyISGet(const Instruction* inst, const RegType& insn_type,
+ void VerifyISGet(const Instruction* inst, RegType& insn_type,
bool is_primitive, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an iput or sput instruction.
- void VerifyISPut(const Instruction* inst, const RegType& insn_type,
+ void VerifyISPut(const Instruction* inst, RegType& insn_type,
bool is_primitive, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -508,18 +508,18 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an iget-quick instruction.
- void VerifyIGetQuick(const Instruction* inst, const RegType& insn_type,
+ void VerifyIGetQuick(const Instruction* inst, RegType& insn_type,
bool is_primitive)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an iput-quick instruction.
- void VerifyIPutQuick(const Instruction* inst, const RegType& insn_type,
+ void VerifyIPutQuick(const Instruction* inst, RegType& insn_type,
bool is_primitive)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolves a class based on an index and performs access checks to ensure the referrer can
// access the resolved class.
- const RegType& ResolveClassAndCheckAccess(uint32_t class_idx)
+ RegType& ResolveClassAndCheckAccess(uint32_t class_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -527,7 +527,7 @@
* address, determine the Join of all exceptions that can land here. Fails if no matching
* exception handler can be found or if the Join of exception types fails.
*/
- const RegType& GetCaughtExceptionType()
+ RegType& GetCaughtExceptionType()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -613,14 +613,14 @@
}
// Return the register type for the method.
- const RegType& GetMethodReturnType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ RegType& GetMethodReturnType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get a type representing the declaring class of the method.
- const RegType& GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ RegType& GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
InstructionFlags* CurrentInsnFlags();
- const RegType& DetermineCat1Constant(int32_t value, bool precise)
+ RegType& DetermineCat1Constant(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
RegTypeCache reg_types_;
@@ -641,7 +641,7 @@
// Its object representation if known.
mirror::ArtMethod* mirror_method_ GUARDED_BY(Locks::mutator_lock_);
const uint32_t method_access_flags_; // Method's access flags.
- const RegType* return_type_; // Lazily computed return type of the method.
+ RegType* return_type_; // Lazily computed return type of the method.
const DexFile* const dex_file_; // The dex file containing the method.
// The dex_cache for the declaring class of the method.
Handle<mirror::DexCache>* dex_cache_ GUARDED_BY(Locks::mutator_lock_);
@@ -649,7 +649,7 @@
Handle<mirror::ClassLoader>* class_loader_ GUARDED_BY(Locks::mutator_lock_);
const DexFile::ClassDef* const class_def_; // The class def of the declaring class of the method.
const DexFile::CodeItem* const code_item_; // The code item containing the code for the method.
- const RegType* declaring_class_; // Lazily computed reg type of the method's declaring class.
+ RegType* declaring_class_; // Lazily computed reg type of the method's declaring class.
// Instruction widths and flags, one entry per code unit.
std::unique_ptr<InstructionFlags[]> insn_flags_;
// The dex PC of a FindLocksAtDexPc request, -1 otherwise.
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index f0729e4..6422cdf 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -81,7 +81,7 @@
: PrimitiveType(klass, descriptor, cache_id) {
}
-std::string PreciseConstType::Dump() const {
+std::string PreciseConstType::Dump() {
std::stringstream result;
uint32_t val = ConstantValue();
if (val == 0) {
@@ -98,47 +98,47 @@
return result.str();
}
-std::string BooleanType::Dump() const {
+std::string BooleanType::Dump() {
return "Boolean";
}
-std::string ConflictType::Dump() const {
+std::string ConflictType::Dump() {
return "Conflict";
}
-std::string ByteType::Dump() const {
+std::string ByteType::Dump() {
return "Byte";
}
-std::string ShortType::Dump() const {
+std::string ShortType::Dump() {
return "Short";
}
-std::string CharType::Dump() const {
+std::string CharType::Dump() {
return "Char";
}
-std::string FloatType::Dump() const {
+std::string FloatType::Dump() {
return "Float";
}
-std::string LongLoType::Dump() const {
+std::string LongLoType::Dump() {
return "Long (Low Half)";
}
-std::string LongHiType::Dump() const {
+std::string LongHiType::Dump() {
return "Long (High Half)";
}
-std::string DoubleLoType::Dump() const {
+std::string DoubleLoType::Dump() {
return "Double (Low Half)";
}
-std::string DoubleHiType::Dump() const {
+std::string DoubleHiType::Dump() {
return "Double (High Half)";
}
-std::string IntegerType::Dump() const {
+std::string IntegerType::Dump() {
return "Integer";
}
@@ -361,7 +361,7 @@
}
}
-std::string UndefinedType::Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+std::string UndefinedType::Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return "Undefined";
}
@@ -391,7 +391,7 @@
DCHECK(klass->IsInstantiable());
}
-std::string UnresolvedMergedType::Dump() const {
+std::string UnresolvedMergedType::Dump() {
std::stringstream result;
std::set<uint16_t> types = GetMergedTypes();
result << "UnresolvedMergedReferences(";
@@ -405,59 +405,59 @@
return result.str();
}
-std::string UnresolvedSuperClass::Dump() const {
+std::string UnresolvedSuperClass::Dump() {
std::stringstream result;
uint16_t super_type_id = GetUnresolvedSuperClassChildId();
result << "UnresolvedSuperClass(" << reg_type_cache_->GetFromId(super_type_id).Dump() << ")";
return result.str();
}
-std::string UnresolvedReferenceType::Dump() const {
+std::string UnresolvedReferenceType::Dump() {
std::stringstream result;
result << "Unresolved Reference" << ": " << PrettyDescriptor(GetDescriptor());
return result.str();
}
-std::string UnresolvedUninitializedRefType::Dump() const {
+std::string UnresolvedUninitializedRefType::Dump() {
std::stringstream result;
result << "Unresolved And Uninitialized Reference" << ": " << PrettyDescriptor(GetDescriptor());
result << " Allocation PC: " << GetAllocationPc();
return result.str();
}
-std::string UnresolvedUninitializedThisRefType::Dump() const {
+std::string UnresolvedUninitializedThisRefType::Dump() {
std::stringstream result;
result << "Unresolved And Uninitialized This Reference" << PrettyDescriptor(GetDescriptor());
return result.str();
}
-std::string ReferenceType::Dump() const {
+std::string ReferenceType::Dump() {
std::stringstream result;
result << "Reference" << ": " << PrettyDescriptor(GetClass());
return result.str();
}
-std::string PreciseReferenceType::Dump() const {
+std::string PreciseReferenceType::Dump() {
std::stringstream result;
result << "Precise Reference" << ": "<< PrettyDescriptor(GetClass());
return result.str();
}
-std::string UninitializedReferenceType::Dump() const {
+std::string UninitializedReferenceType::Dump() {
std::stringstream result;
result << "Uninitialized Reference" << ": " << PrettyDescriptor(GetClass());
result << " Allocation PC: " << GetAllocationPc();
return result.str();
}
-std::string UninitializedThisReferenceType::Dump() const {
+std::string UninitializedThisReferenceType::Dump() {
std::stringstream result;
result << "Uninitialized This Reference" << ": " << PrettyDescriptor(GetClass());
result << "Allocation PC: " << GetAllocationPc();
return result.str();
}
-std::string ImpreciseConstType::Dump() const {
+std::string ImpreciseConstType::Dump() {
std::stringstream result;
uint32_t val = ConstantValue();
if (val == 0) {
@@ -472,7 +472,7 @@
}
return result.str();
}
-std::string PreciseConstLoType::Dump() const {
+std::string PreciseConstLoType::Dump() {
std::stringstream result;
int32_t val = ConstantValueLo();
@@ -486,7 +486,7 @@
return result.str();
}
-std::string ImpreciseConstLoType::Dump() const {
+std::string ImpreciseConstLoType::Dump() {
std::stringstream result;
int32_t val = ConstantValueLo();
@@ -500,7 +500,7 @@
return result.str();
}
-std::string PreciseConstHiType::Dump() const {
+std::string PreciseConstHiType::Dump() {
std::stringstream result;
int32_t val = ConstantValueHi();
result << "Precise ";
@@ -513,7 +513,7 @@
return result.str();
}
-std::string ImpreciseConstHiType::Dump() const {
+std::string ImpreciseConstHiType::Dump() {
std::stringstream result;
int32_t val = ConstantValueHi();
result << "Imprecise ";
@@ -530,7 +530,7 @@
: RegType(NULL, "", cache_id), constant_(constant) {
}
-const RegType& UndefinedType::Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
+RegType& UndefinedType::Merge(RegType& incoming_type, RegTypeCache* reg_types)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (incoming_type.IsUndefined()) {
return *this; // Undefined MERGE Undefined => Undefined
@@ -538,7 +538,7 @@
return reg_types->Conflict();
}
-const RegType& RegType::HighHalf(RegTypeCache* cache) const {
+RegType& RegType::HighHalf(RegTypeCache* cache) const {
DCHECK(IsLowHalf());
if (IsLongLo()) {
return cache->LongHi();
@@ -586,12 +586,10 @@
}
std::set<uint16_t> UnresolvedMergedType::GetMergedTypes() const {
std::pair<uint16_t, uint16_t> refs = GetTopMergedTypes();
- const RegType& _left(reg_type_cache_->GetFromId(refs.first));
- RegType& __left(const_cast<RegType&>(_left));
- UnresolvedMergedType* left = down_cast<UnresolvedMergedType*>(&__left);
+ RegType& _left(reg_type_cache_->GetFromId(refs.first));
+ UnresolvedMergedType* left = down_cast<UnresolvedMergedType*>(&_left);
- RegType& _right(
- const_cast<RegType&>(reg_type_cache_->GetFromId(refs.second)));
+ RegType& _right(reg_type_cache_->GetFromId(refs.second));
UnresolvedMergedType* right = down_cast<UnresolvedMergedType*>(&_right);
std::set<uint16_t> types;
@@ -614,7 +612,7 @@
return types;
}
-const RegType& RegType::GetSuperClass(RegTypeCache* cache) const {
+RegType& RegType::GetSuperClass(RegTypeCache* cache) {
if (!IsUnresolvedTypes()) {
mirror::Class* super_klass = GetClass()->GetSuperClass();
if (super_klass != NULL) {
@@ -635,7 +633,7 @@
}
}
-bool RegType::CanAccess(const RegType& other) const {
+bool RegType::CanAccess(RegType& other) {
if (Equals(other)) {
return true; // Trivial accessibility.
} else {
@@ -651,7 +649,7 @@
}
}
-bool RegType::CanAccessMember(mirror::Class* klass, uint32_t access_flags) const {
+bool RegType::CanAccessMember(mirror::Class* klass, uint32_t access_flags) {
if ((access_flags & kAccPublic) != 0) {
return true;
}
@@ -662,7 +660,7 @@
}
}
-bool RegType::IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool RegType::IsObjectArrayTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) {
// Primitive arrays will always resolve
DCHECK(descriptor_[1] == 'L' || descriptor_[1] == '[');
@@ -675,11 +673,11 @@
}
}
-bool RegType::IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool RegType::IsJavaLangObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return IsReference() && GetClass()->IsObjectClass();
}
-bool RegType::IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool RegType::IsArrayTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) {
return descriptor_[0] == '[';
} else if (HasClass()) {
@@ -689,7 +687,7 @@
}
}
-bool RegType::IsJavaLangObjectArray() const {
+bool RegType::IsJavaLangObjectArray() {
if (HasClass()) {
mirror::Class* type = GetClass();
return type->IsArrayClass() && type->GetComponentType()->IsObjectClass();
@@ -697,7 +695,7 @@
return false;
}
-bool RegType::IsInstantiableTypes() const {
+bool RegType::IsInstantiableTypes() {
return IsUnresolvedTypes() || (IsNonZeroReferenceTypes() && GetClass()->IsInstantiable());
}
@@ -705,7 +703,7 @@
: ConstantType(constat, cache_id) {
}
-static bool AssignableFrom(const RegType& lhs, const RegType& rhs, bool strict)
+static bool AssignableFrom(RegType& lhs, RegType& rhs, bool strict)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (lhs.Equals(rhs)) {
return true;
@@ -753,11 +751,11 @@
}
}
-bool RegType::IsAssignableFrom(const RegType& src) const {
+bool RegType::IsAssignableFrom(RegType& src) {
return AssignableFrom(*this, src, false);
}
-bool RegType::IsStrictlyAssignableFrom(const RegType& src) const {
+bool RegType::IsStrictlyAssignableFrom(RegType& src) {
return AssignableFrom(*this, src, true);
}
@@ -775,11 +773,11 @@
}
}
-static const RegType& SelectNonConstant(const RegType& a, const RegType& b) {
+static RegType& SelectNonConstant(RegType& a, RegType& b) {
return a.IsConstantTypes() ? b : a;
}
-const RegType& RegType::Merge(const RegType& incoming_type, RegTypeCache* reg_types) const {
+RegType& RegType::Merge(RegType& incoming_type, RegTypeCache* reg_types) {
DCHECK(!Equals(incoming_type)); // Trivial equality handled by caller
if (IsConflict()) {
return *this; // Conflict MERGE * => Conflict
@@ -958,16 +956,16 @@
void RegType::CheckInvariants() const {
if (IsConstant() || IsConstantLo() || IsConstantHi()) {
CHECK(descriptor_.empty()) << *this;
- CHECK(klass_ == NULL) << *this;
+ CHECK(klass_.IsNull()) << *this;
}
- if (klass_ != NULL) {
+ if (!klass_.IsNull()) {
CHECK(!descriptor_.empty()) << *this;
}
}
void RegType::VisitRoots(RootCallback* callback, void* arg) {
- if (klass_ != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&klass_), arg, 0, kRootUnknown);
+ if (!klass_.IsNull()) {
+ klass_.VisitRoot(callback, arg, 0, kRootUnknown);
}
}
@@ -978,36 +976,37 @@
void UnresolvedUninitializedThisRefType::CheckInvariants() const {
CHECK_EQ(GetAllocationPc(), 0U) << *this;
CHECK(!descriptor_.empty()) << *this;
- CHECK(klass_ == NULL) << *this;
+ CHECK(klass_.IsNull()) << *this;
}
void UnresolvedUninitializedRefType::CheckInvariants() const {
CHECK(!descriptor_.empty()) << *this;
- CHECK(klass_ == NULL) << *this;
+ CHECK(klass_.IsNull()) << *this;
}
void UnresolvedMergedType::CheckInvariants() const {
// Unresolved merged types: merged types should be defined.
CHECK(descriptor_.empty()) << *this;
- CHECK(klass_ == NULL) << *this;
+ CHECK(klass_.IsNull()) << *this;
CHECK_NE(merged_types_.first, 0U) << *this;
CHECK_NE(merged_types_.second, 0U) << *this;
}
void UnresolvedReferenceType::CheckInvariants() const {
CHECK(!descriptor_.empty()) << *this;
- CHECK(klass_ == NULL) << *this;
+ CHECK(klass_.IsNull()) << *this;
}
void UnresolvedSuperClass::CheckInvariants() const {
// Unresolved merged types: merged types should be defined.
CHECK(descriptor_.empty()) << *this;
- CHECK(klass_ == NULL) << *this;
+ CHECK(klass_.IsNull()) << *this;
CHECK_NE(unresolved_child_id_, 0U) << *this;
}
std::ostream& operator<<(std::ostream& os, const RegType& rhs) {
- os << rhs.Dump();
+ RegType& rhs_non_const = const_cast<RegType&>(rhs);
+ os << rhs_non_const.Dump();
return os;
}
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index e985f3a..1682d4e 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -25,6 +25,8 @@
#include "jni.h"
#include "base/macros.h"
+#include "base/mutex.h"
+#include "gc_root.h"
#include "globals.h"
#include "object_callbacks.h"
#include "primitive.h"
@@ -107,7 +109,7 @@
return IsLowHalf();
}
// Check this is the low half, and that type_h is its matching high-half.
- inline bool CheckWidePair(const RegType& type_h) const {
+ inline bool CheckWidePair(RegType& type_h) const {
if (IsLowHalf()) {
return ((IsPreciseConstantLo() && type_h.IsPreciseConstantHi()) ||
(IsPreciseConstantLo() && type_h.IsImpreciseConstantHi()) ||
@@ -119,7 +121,7 @@
return false;
}
// The high half that corresponds to this low half
- const RegType& HighHalf(RegTypeCache* cache) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ RegType& HighHalf(RegTypeCache* cache) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsConstantBoolean() const {
return IsConstant() && (ConstantValue() >= 0) && (ConstantValue() <= 1);
@@ -198,55 +200,54 @@
virtual bool HasClass() const {
return false;
}
- bool IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsJavaLangObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsArrayTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsObjectArrayTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Primitive::Type GetPrimitiveType() const;
- bool IsJavaLangObjectArray() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsInstantiableTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsJavaLangObjectArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsInstantiableTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const std::string& GetDescriptor() const {
DCHECK(HasClass() || (IsUnresolvedTypes() && !IsUnresolvedMergedReference() &&
!IsUnresolvedSuperClass()));
return descriptor_;
}
- mirror::Class* GetClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Class* GetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(!IsUnresolvedReference());
- DCHECK(klass_ != NULL) << Dump();
+ DCHECK(!klass_.IsNull()) << Dump();
DCHECK(HasClass());
- return klass_;
+ return klass_.Read();
}
uint16_t GetId() const {
return cache_id_;
}
- const RegType& GetSuperClass(RegTypeCache* cache) const
+ RegType& GetSuperClass(RegTypeCache* cache)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- virtual std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ virtual std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
// Can this type access other?
- bool CanAccess(const RegType& other) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool CanAccess(RegType& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Can this type access a member with the given properties?
- bool CanAccessMember(mirror::Class* klass, uint32_t access_flags) const
+ bool CanAccessMember(mirror::Class* klass, uint32_t access_flags)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Can this type be assigned by src?
// Note: Object and interface types may always be assigned to one another, see comment on
// ClassJoin.
- bool IsAssignableFrom(const RegType& src) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsAssignableFrom(RegType& src) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Can this type be assigned by src? Variant of IsAssignableFrom that doesn't allow assignment to
// an interface from an Object.
- bool IsStrictlyAssignableFrom(const RegType& src) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsStrictlyAssignableFrom(RegType& src) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Are these RegTypes the same?
- bool Equals(const RegType& other) const {
+ bool Equals(RegType& other) const {
return GetId() == other.GetId();
}
// Compute the merge of this register from one edge (path) with incoming_type from another.
- virtual const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
+ virtual RegType& Merge(RegType& incoming_type, RegTypeCache* reg_types)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -275,7 +276,7 @@
protected:
RegType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : descriptor_(descriptor), klass_(klass), cache_id_(cache_id) {
+ : descriptor_(descriptor), klass_(GcRoot<mirror::Class>(klass)), cache_id_(cache_id) {
if (kIsDebugBuild) {
CheckInvariants();
}
@@ -285,7 +286,7 @@
const std::string descriptor_;
- mirror::Class* klass_; // Non-const only due to moving classes.
+ GcRoot<mirror::Class> klass_;
const uint16_t cache_id_;
friend class RegTypeCache;
@@ -301,7 +302,7 @@
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get the singleton Conflict instance.
static ConflictType* GetInstance();
@@ -331,7 +332,7 @@
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get the singleton Undefined instance.
static UndefinedType* GetInstance();
@@ -350,7 +351,7 @@
: RegType(klass, descriptor, cache_id) {
}
- virtual const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
+ virtual RegType& Merge(RegType& incoming_type, RegTypeCache* reg_types)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static UndefinedType* instance_;
@@ -373,7 +374,7 @@
bool IsInteger() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static IntegerType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -392,7 +393,7 @@
bool IsBoolean() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static BooleanType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -412,7 +413,7 @@
bool IsByte() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static ByteType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -431,7 +432,7 @@
bool IsShort() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static ShortType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -450,7 +451,7 @@
bool IsChar() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static CharType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -469,7 +470,7 @@
bool IsFloat() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static FloatType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -491,7 +492,7 @@
class LongLoType : public Cat2Type {
public:
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsLongLo() const {
return true;
}
@@ -513,7 +514,7 @@
class LongHiType : public Cat2Type {
public:
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsLongHi() const {
return true;
}
@@ -532,7 +533,7 @@
class DoubleLoType : public Cat2Type {
public:
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsDoubleLo() const {
return true;
}
@@ -554,7 +555,7 @@
class DoubleHiType : public Cat2Type {
public:
- std::string Dump() const;
+ std::string Dump();
virtual bool IsDoubleHi() const {
return true;
}
@@ -621,7 +622,7 @@
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
class PreciseConstLoType : public ConstantType {
@@ -633,7 +634,7 @@
bool IsPreciseConstantLo() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
class PreciseConstHiType : public ConstantType {
@@ -645,7 +646,7 @@
bool IsPreciseConstantHi() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
class ImpreciseConstType : public ConstantType {
@@ -655,7 +656,7 @@
bool IsImpreciseConstant() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
class ImpreciseConstLoType : public ConstantType {
@@ -666,7 +667,7 @@
bool IsImpreciseConstantLo() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
class ImpreciseConstHiType : public ConstantType {
@@ -677,7 +678,7 @@
bool IsImpreciseConstantHi() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
// Common parent of all uninitialized types. Uninitialized types are created by "new" dex
@@ -718,7 +719,7 @@
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
// Similar to UnresolvedReferenceType but not yet having been passed to a constructor.
@@ -737,7 +738,7 @@
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
@@ -762,7 +763,7 @@
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -782,7 +783,7 @@
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
@@ -807,7 +808,7 @@
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
// A type of register holding a reference to an Object of type GetClass and only an object of that
@@ -829,7 +830,7 @@
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
// Common parent of unresolved types.
@@ -857,7 +858,7 @@
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
@@ -883,7 +884,7 @@
return static_cast<uint16_t>(unresolved_child_id_ & 0xFFFF);
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -918,7 +919,7 @@
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h
index fc9e5c9..fdf96a8 100644
--- a/runtime/verifier/reg_type_cache-inl.h
+++ b/runtime/verifier/reg_type_cache-inl.h
@@ -24,14 +24,14 @@
namespace art {
namespace verifier {
-inline const art::verifier::RegType& RegTypeCache::GetFromId(uint16_t id) const {
+inline RegType& RegTypeCache::GetFromId(uint16_t id) const {
DCHECK_LT(id, entries_.size());
RegType* result = entries_[id];
DCHECK(result != NULL);
return *result;
}
-inline const ConstantType& RegTypeCache::FromCat1Const(int32_t value, bool precise) {
+inline ConstantType& RegTypeCache::FromCat1Const(int32_t value, bool precise) {
// We only expect 0 to be a precise constant.
DCHECK(value != 0 || precise);
if (precise && (value >= kMinSmallConstant) && (value <= kMaxSmallConstant)) {
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 91fba4d..c0e4351 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -65,8 +65,8 @@
DCHECK_EQ(entries_.size(), primitive_count_);
}
-const RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader, const char* descriptor,
- bool precise) {
+RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader, const char* descriptor,
+ bool precise) {
DCHECK(RegTypeCache::primitive_initialized_);
if (descriptor[1] == '\0') {
switch (descriptor[0]) {
@@ -97,7 +97,7 @@
}
};
-const RegType& RegTypeCache::RegTypeFromPrimitiveType(Primitive::Type prim_type) const {
+RegType& RegTypeCache::RegTypeFromPrimitiveType(Primitive::Type prim_type) const {
CHECK(RegTypeCache::primitive_initialized_);
switch (prim_type) {
case Primitive::kPrimBoolean:
@@ -156,8 +156,8 @@
return klass;
}
-const RegType& RegTypeCache::From(mirror::ClassLoader* loader, const char* descriptor,
- bool precise) {
+RegType& RegTypeCache::From(mirror::ClassLoader* loader, const char* descriptor,
+ bool precise) {
// Try looking up the class in the cache first.
for (size_t i = primitive_count_; i < entries_.size(); i++) {
if (MatchDescriptor(i, descriptor, precise)) {
@@ -185,7 +185,7 @@
} else {
entry = new ReferenceType(klass, descriptor, entries_.size());
}
- entries_.push_back(entry);
+ AddEntry(entry);
return *entry;
} else { // Class not resolved.
// We tried loading the class and failed, this might get an exception raised
@@ -198,7 +198,7 @@
}
if (IsValidDescriptor(descriptor)) {
RegType* entry = new UnresolvedReferenceType(descriptor, entries_.size());
- entries_.push_back(entry);
+ AddEntry(entry);
return *entry;
} else {
// The descriptor is broken return the unknown type as there's nothing sensible that
@@ -208,7 +208,7 @@
}
}
-const RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) {
+RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) {
DCHECK(klass != nullptr);
if (klass->IsPrimitive()) {
// Note: precise isn't used for primitive classes. A char is assignable to an int. All
@@ -218,7 +218,7 @@
// Look for the reference in the list of entries to have.
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
- if (cur_entry->klass_ == klass && MatchingPrecisionForClass(cur_entry, precise)) {
+ if (cur_entry->klass_.Read() == klass && MatchingPrecisionForClass(cur_entry, precise)) {
return *cur_entry;
}
}
@@ -229,7 +229,7 @@
} else {
entry = new ReferenceType(klass, descriptor, entries_.size());
}
- entries_.push_back(entry);
+ AddEntry(entry);
return *entry;
}
}
@@ -311,17 +311,15 @@
}
}
-const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, const RegType& right) {
+RegType& RegTypeCache::FromUnresolvedMerge(RegType& left, RegType& right) {
std::set<uint16_t> types;
if (left.IsUnresolvedMergedReference()) {
- RegType& non_const(const_cast<RegType&>(left));
- types = (down_cast<UnresolvedMergedType*>(&non_const))->GetMergedTypes();
+ types = (down_cast<UnresolvedMergedType*>(&left))->GetMergedTypes();
} else {
types.insert(left.GetId());
}
if (right.IsUnresolvedMergedReference()) {
- RegType& non_const(const_cast<RegType&>(right));
- std::set<uint16_t> right_types = (down_cast<UnresolvedMergedType*>(&non_const))->GetMergedTypes();
+ std::set<uint16_t> right_types = (down_cast<UnresolvedMergedType*>(&right))->GetMergedTypes();
types.insert(right_types.begin(), right_types.end());
} else {
types.insert(right.GetId());
@@ -339,7 +337,7 @@
}
// Create entry.
RegType* entry = new UnresolvedMergedType(left.GetId(), right.GetId(), this, entries_.size());
- entries_.push_back(entry);
+ AddEntry(entry);
if (kIsDebugBuild) {
UnresolvedMergedType* tmp_entry = down_cast<UnresolvedMergedType*>(entry);
std::set<uint16_t> check_types = tmp_entry->GetMergedTypes();
@@ -348,7 +346,7 @@
return *entry;
}
-const RegType& RegTypeCache::FromUnresolvedSuperClass(const RegType& child) {
+RegType& RegTypeCache::FromUnresolvedSuperClass(RegType& child) {
// Check if entry already exists.
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
@@ -363,11 +361,11 @@
}
}
RegType* entry = new UnresolvedSuperClass(child.GetId(), this, entries_.size());
- entries_.push_back(entry);
+ AddEntry(entry);
return *entry;
}
-const UninitializedType& RegTypeCache::Uninitialized(const RegType& type, uint32_t allocation_pc) {
+UninitializedType& RegTypeCache::Uninitialized(RegType& type, uint32_t allocation_pc) {
UninitializedType* entry = NULL;
const std::string& descriptor(type.GetDescriptor());
if (type.IsUnresolvedTypes()) {
@@ -393,11 +391,11 @@
}
entry = new UninitializedReferenceType(klass, descriptor, allocation_pc, entries_.size());
}
- entries_.push_back(entry);
+ AddEntry(entry);
return *entry;
}
-const RegType& RegTypeCache::FromUninitialized(const RegType& uninit_type) {
+RegType& RegTypeCache::FromUninitialized(RegType& uninit_type) {
RegType* entry;
if (uninit_type.IsUnresolvedTypes()) {
@@ -435,48 +433,48 @@
return Conflict();
}
}
- entries_.push_back(entry);
+ AddEntry(entry);
return *entry;
}
-const ImpreciseConstType& RegTypeCache::ByteConstant() {
- const ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::min(), false);
+ImpreciseConstType& RegTypeCache::ByteConstant() {
+ ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::min(), false);
DCHECK(result.IsImpreciseConstant());
- return *down_cast<const ImpreciseConstType*>(&result);
+ return *down_cast<ImpreciseConstType*>(&result);
}
-const ImpreciseConstType& RegTypeCache::CharConstant() {
+ImpreciseConstType& RegTypeCache::CharConstant() {
int32_t jchar_max = static_cast<int32_t>(std::numeric_limits<jchar>::max());
- const ConstantType& result = FromCat1Const(jchar_max, false);
+ ConstantType& result = FromCat1Const(jchar_max, false);
DCHECK(result.IsImpreciseConstant());
- return *down_cast<const ImpreciseConstType*>(&result);
+ return *down_cast<ImpreciseConstType*>(&result);
}
-const ImpreciseConstType& RegTypeCache::ShortConstant() {
- const ConstantType& result = FromCat1Const(std::numeric_limits<jshort>::min(), false);
+ImpreciseConstType& RegTypeCache::ShortConstant() {
+ ConstantType& result = FromCat1Const(std::numeric_limits<jshort>::min(), false);
DCHECK(result.IsImpreciseConstant());
- return *down_cast<const ImpreciseConstType*>(&result);
+ return *down_cast<ImpreciseConstType*>(&result);
}
-const ImpreciseConstType& RegTypeCache::IntConstant() {
- const ConstantType& result = FromCat1Const(std::numeric_limits<jint>::max(), false);
+ImpreciseConstType& RegTypeCache::IntConstant() {
+ ConstantType& result = FromCat1Const(std::numeric_limits<jint>::max(), false);
DCHECK(result.IsImpreciseConstant());
- return *down_cast<const ImpreciseConstType*>(&result);
+ return *down_cast<ImpreciseConstType*>(&result);
}
-const ImpreciseConstType& RegTypeCache::PosByteConstant() {
- const ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::max(), false);
+ImpreciseConstType& RegTypeCache::PosByteConstant() {
+ ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::max(), false);
DCHECK(result.IsImpreciseConstant());
- return *down_cast<const ImpreciseConstType*>(&result);
+ return *down_cast<ImpreciseConstType*>(&result);
}
-const ImpreciseConstType& RegTypeCache::PosShortConstant() {
- const ConstantType& result = FromCat1Const(std::numeric_limits<jshort>::max(), false);
+ImpreciseConstType& RegTypeCache::PosShortConstant() {
+ ConstantType& result = FromCat1Const(std::numeric_limits<jshort>::max(), false);
DCHECK(result.IsImpreciseConstant());
- return *down_cast<const ImpreciseConstType*>(&result);
+ return *down_cast<ImpreciseConstType*>(&result);
}
-const UninitializedType& RegTypeCache::UninitializedThisArgument(const RegType& type) {
+UninitializedType& RegTypeCache::UninitializedThisArgument(RegType& type) {
UninitializedType* entry;
const std::string& descriptor(type.GetDescriptor());
if (type.IsUnresolvedTypes()) {
@@ -498,14 +496,14 @@
}
entry = new UninitializedThisReferenceType(klass, descriptor, entries_.size());
}
- entries_.push_back(entry);
+ AddEntry(entry);
return *entry;
}
-const ConstantType& RegTypeCache::FromCat1NonSmallConstant(int32_t value, bool precise) {
+ConstantType& RegTypeCache::FromCat1NonSmallConstant(int32_t value, bool precise) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
- if (cur_entry->klass_ == NULL && cur_entry->IsConstant() &&
+ if (cur_entry->klass_.IsNull() && cur_entry->IsConstant() &&
cur_entry->IsPreciseConstant() == precise &&
(down_cast<ConstantType*>(cur_entry))->ConstantValue() == value) {
return *down_cast<ConstantType*>(cur_entry);
@@ -517,11 +515,11 @@
} else {
entry = new ImpreciseConstType(value, entries_.size());
}
- entries_.push_back(entry);
+ AddEntry(entry);
return *entry;
}
-const ConstantType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) {
+ConstantType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
if (cur_entry->IsConstantLo() && (cur_entry->IsPrecise() == precise) &&
@@ -535,11 +533,11 @@
} else {
entry = new ImpreciseConstLoType(value, entries_.size());
}
- entries_.push_back(entry);
+ AddEntry(entry);
return *entry;
}
-const ConstantType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) {
+ConstantType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
if (cur_entry->IsConstantHi() && (cur_entry->IsPrecise() == precise) &&
@@ -553,11 +551,11 @@
} else {
entry = new ImpreciseConstHiType(value, entries_.size());
}
- entries_.push_back(entry);
+ AddEntry(entry);
return *entry;
}
-const RegType& RegTypeCache::GetComponentType(const RegType& array, mirror::ClassLoader* loader) {
+RegType& RegTypeCache::GetComponentType(RegType& array, mirror::ClassLoader* loader) {
if (!array.IsArrayTypes()) {
return Conflict();
} else if (array.IsUnresolvedTypes()) {
@@ -566,8 +564,15 @@
return FromDescriptor(loader, component.c_str(), false);
} else {
mirror::Class* klass = array.GetClass()->GetComponentType();
- return FromClass(klass->GetDescriptor().c_str(), klass,
- klass->CannotBeAssignedFromOtherTypes());
+ if (klass->IsErroneous()) {
+ // Arrays may have erroneous component types, use unresolved in that case.
+ // We assume that the primitive classes are not erroneous, so we know it is a
+ // reference type.
+ return FromDescriptor(loader, klass->GetDescriptor().c_str(), false);
+ } else {
+ return FromClass(klass->GetDescriptor().c_str(), klass,
+ klass->CannotBeAssignedFromOtherTypes());
+ }
}
}
@@ -586,5 +591,9 @@
}
}
+void RegTypeCache::AddEntry(RegType* new_entry) {
+ entries_.push_back(new_entry);
+}
+
} // namespace verifier
} // namespace art
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index 70d5f07..d46cf2c 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -49,99 +49,99 @@
}
}
static void ShutDown();
- const art::verifier::RegType& GetFromId(uint16_t id) const;
- const RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise)
+ RegType& GetFromId(uint16_t id) const;
+ RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
+ RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ConstantType& FromCat1Const(int32_t value, bool precise)
+ ConstantType& FromCat1Const(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ConstantType& FromCat2ConstLo(int32_t value, bool precise)
+ ConstantType& FromCat2ConstLo(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ConstantType& FromCat2ConstHi(int32_t value, bool precise)
+ ConstantType& FromCat2ConstHi(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& FromDescriptor(mirror::ClassLoader* loader, const char* descriptor, bool precise)
+ RegType& FromDescriptor(mirror::ClassLoader* loader, const char* descriptor, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& FromUnresolvedMerge(const RegType& left, const RegType& right)
+ RegType& FromUnresolvedMerge(RegType& left, RegType& right)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& FromUnresolvedSuperClass(const RegType& child)
+ RegType& FromUnresolvedSuperClass(RegType& child)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& JavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ RegType& JavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// String is final and therefore always precise.
return From(NULL, "Ljava/lang/String;", true);
}
- const RegType& JavaLangThrowable(bool precise)
+ RegType& JavaLangThrowable(bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return From(NULL, "Ljava/lang/Throwable;", precise);
}
- const ConstantType& Zero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ConstantType& Zero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return FromCat1Const(0, true);
}
- const ConstantType& One() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ConstantType& One() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return FromCat1Const(1, true);
}
size_t GetCacheSize() {
return entries_.size();
}
- const RegType& Boolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ RegType& Boolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return *BooleanType::GetInstance();
}
- const RegType& Byte() {
+ RegType& Byte() {
return *ByteType::GetInstance();
}
- const RegType& Char() {
+ RegType& Char() {
return *CharType::GetInstance();
}
- const RegType& Short() {
+ RegType& Short() {
return *ShortType::GetInstance();
}
- const RegType& Integer() {
+ RegType& Integer() {
return *IntegerType::GetInstance();
}
- const RegType& Float() {
+ RegType& Float() {
return *FloatType::GetInstance();
}
- const RegType& LongLo() {
+ RegType& LongLo() {
return *LongLoType::GetInstance();
}
- const RegType& LongHi() {
+ RegType& LongHi() {
return *LongHiType::GetInstance();
}
- const RegType& DoubleLo() {
+ RegType& DoubleLo() {
return *DoubleLoType::GetInstance();
}
- const RegType& DoubleHi() {
+ RegType& DoubleHi() {
return *DoubleHiType::GetInstance();
}
- const RegType& Undefined() {
+ RegType& Undefined() {
return *UndefinedType::GetInstance();
}
- const RegType& Conflict() {
+ RegType& Conflict() {
return *ConflictType::GetInstance();
}
- const RegType& JavaLangClass(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ RegType& JavaLangClass(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return From(NULL, "Ljava/lang/Class;", precise);
}
- const RegType& JavaLangObject(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ RegType& JavaLangObject(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return From(NULL, "Ljava/lang/Object;", precise);
}
- const UninitializedType& Uninitialized(const RegType& type, uint32_t allocation_pc)
+ UninitializedType& Uninitialized(RegType& type, uint32_t allocation_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Create an uninitialized 'this' argument for the given type.
- const UninitializedType& UninitializedThisArgument(const RegType& type)
+ UninitializedType& UninitializedThisArgument(RegType& type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& FromUninitialized(const RegType& uninit_type)
+ RegType& FromUninitialized(RegType& uninit_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ImpreciseConstType& ByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ImpreciseConstType& CharConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ImpreciseConstType& ShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ImpreciseConstType& IntConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ImpreciseConstType& PosByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ImpreciseConstType& PosShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& GetComponentType(const RegType& array, mirror::ClassLoader* loader)
+ ImpreciseConstType& ByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ImpreciseConstType& CharConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ImpreciseConstType& ShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ImpreciseConstType& IntConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ImpreciseConstType& PosByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ImpreciseConstType& PosShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ RegType& GetComponentType(RegType& array, mirror::ClassLoader* loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& RegTypeFromPrimitiveType(Primitive::Type) const;
+ RegType& RegTypeFromPrimitiveType(Primitive::Type) const;
void VisitRoots(RootCallback* callback, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -151,9 +151,11 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool MatchDescriptor(size_t idx, const char* descriptor, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise)
+ ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void AddEntry(RegType* new_entry);
+
template <class Type>
static Type* CreatePrimitiveTypeInstance(const std::string& descriptor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index 9dc0df1..e27558a 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -33,21 +33,21 @@
// Tests creating primitive types types.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- const RegType& ref_type_const_0 = cache.FromCat1Const(10, true);
- const RegType& ref_type_const_1 = cache.FromCat1Const(10, true);
- const RegType& ref_type_const_2 = cache.FromCat1Const(30, true);
- const RegType& ref_type_const_3 = cache.FromCat1Const(30, false);
+ RegType& ref_type_const_0 = cache.FromCat1Const(10, true);
+ RegType& ref_type_const_1 = cache.FromCat1Const(10, true);
+ RegType& ref_type_const_2 = cache.FromCat1Const(30, true);
+ RegType& ref_type_const_3 = cache.FromCat1Const(30, false);
EXPECT_TRUE(ref_type_const_0.Equals(ref_type_const_1));
EXPECT_FALSE(ref_type_const_0.Equals(ref_type_const_2));
EXPECT_FALSE(ref_type_const_0.Equals(ref_type_const_3));
- const RegType& ref_type_const_wide_0 = cache.FromCat2ConstHi(50, true);
- const RegType& ref_type_const_wide_1 = cache.FromCat2ConstHi(50, true);
+ RegType& ref_type_const_wide_0 = cache.FromCat2ConstHi(50, true);
+ RegType& ref_type_const_wide_1 = cache.FromCat2ConstHi(50, true);
EXPECT_TRUE(ref_type_const_wide_0.Equals(ref_type_const_wide_1));
- const RegType& ref_type_const_wide_2 = cache.FromCat2ConstLo(50, true);
- const RegType& ref_type_const_wide_3 = cache.FromCat2ConstLo(50, true);
- const RegType& ref_type_const_wide_4 = cache.FromCat2ConstLo(55, true);
+ RegType& ref_type_const_wide_2 = cache.FromCat2ConstLo(50, true);
+ RegType& ref_type_const_wide_3 = cache.FromCat2ConstLo(50, true);
+ RegType& ref_type_const_wide_4 = cache.FromCat2ConstLo(55, true);
EXPECT_TRUE(ref_type_const_wide_2.Equals(ref_type_const_wide_3));
EXPECT_FALSE(ref_type_const_wide_2.Equals(ref_type_const_wide_4));
}
@@ -56,11 +56,11 @@
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
int64_t val = static_cast<int32_t>(1234);
- const RegType& precise_lo = cache.FromCat2ConstLo(static_cast<int32_t>(val), true);
- const RegType& precise_hi = cache.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
- const RegType& precise_const = cache.FromCat1Const(static_cast<int32_t>(val >> 32), true);
- const RegType& long_lo = cache.LongLo();
- const RegType& long_hi = cache.LongHi();
+ RegType& precise_lo = cache.FromCat2ConstLo(static_cast<int32_t>(val), true);
+ RegType& precise_hi = cache.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+ RegType& precise_const = cache.FromCat1Const(static_cast<int32_t>(val >> 32), true);
+ RegType& long_lo = cache.LongLo();
+ RegType& long_hi = cache.LongHi();
// Check sanity of types.
EXPECT_TRUE(precise_lo.IsLowHalf());
EXPECT_FALSE(precise_hi.IsLowHalf());
@@ -80,7 +80,7 @@
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- const RegType& bool_reg_type = cache.Boolean();
+ RegType& bool_reg_type = cache.Boolean();
EXPECT_FALSE(bool_reg_type.IsUndefined());
EXPECT_FALSE(bool_reg_type.IsConflict());
EXPECT_FALSE(bool_reg_type.IsZero());
@@ -112,7 +112,7 @@
EXPECT_TRUE(bool_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(bool_reg_type.IsNonZeroReferenceTypes());
- const RegType& byte_reg_type = cache.Byte();
+ RegType& byte_reg_type = cache.Byte();
EXPECT_FALSE(byte_reg_type.IsUndefined());
EXPECT_FALSE(byte_reg_type.IsConflict());
EXPECT_FALSE(byte_reg_type.IsZero());
@@ -144,7 +144,7 @@
EXPECT_TRUE(byte_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(byte_reg_type.IsNonZeroReferenceTypes());
- const RegType& char_reg_type = cache.Char();
+ RegType& char_reg_type = cache.Char();
EXPECT_FALSE(char_reg_type.IsUndefined());
EXPECT_FALSE(char_reg_type.IsConflict());
EXPECT_FALSE(char_reg_type.IsZero());
@@ -176,7 +176,7 @@
EXPECT_TRUE(char_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(char_reg_type.IsNonZeroReferenceTypes());
- const RegType& short_reg_type = cache.Short();
+ RegType& short_reg_type = cache.Short();
EXPECT_FALSE(short_reg_type.IsUndefined());
EXPECT_FALSE(short_reg_type.IsConflict());
EXPECT_FALSE(short_reg_type.IsZero());
@@ -208,7 +208,7 @@
EXPECT_TRUE(short_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(short_reg_type.IsNonZeroReferenceTypes());
- const RegType& int_reg_type = cache.Integer();
+ RegType& int_reg_type = cache.Integer();
EXPECT_FALSE(int_reg_type.IsUndefined());
EXPECT_FALSE(int_reg_type.IsConflict());
EXPECT_FALSE(int_reg_type.IsZero());
@@ -240,7 +240,7 @@
EXPECT_TRUE(int_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(int_reg_type.IsNonZeroReferenceTypes());
- const RegType& long_reg_type = cache.LongLo();
+ RegType& long_reg_type = cache.LongLo();
EXPECT_FALSE(long_reg_type.IsUndefined());
EXPECT_FALSE(long_reg_type.IsConflict());
EXPECT_FALSE(long_reg_type.IsZero());
@@ -272,7 +272,7 @@
EXPECT_FALSE(long_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(long_reg_type.IsNonZeroReferenceTypes());
- const RegType& float_reg_type = cache.Float();
+ RegType& float_reg_type = cache.Float();
EXPECT_FALSE(float_reg_type.IsUndefined());
EXPECT_FALSE(float_reg_type.IsConflict());
EXPECT_FALSE(float_reg_type.IsZero());
@@ -304,7 +304,7 @@
EXPECT_FALSE(float_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(float_reg_type.IsNonZeroReferenceTypes());
- const RegType& double_reg_type = cache.DoubleLo();
+ RegType& double_reg_type = cache.DoubleLo();
EXPECT_FALSE(double_reg_type.IsUndefined());
EXPECT_FALSE(double_reg_type.IsConflict());
EXPECT_FALSE(double_reg_type.IsZero());
@@ -344,9 +344,9 @@
// match the one that is imprecise.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- const RegType& imprecise_obj = cache.JavaLangObject(false);
- const RegType& precise_obj = cache.JavaLangObject(true);
- const RegType& precise_obj_2 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
+ RegType& imprecise_obj = cache.JavaLangObject(false);
+ RegType& precise_obj = cache.JavaLangObject(true);
+ RegType& precise_obj_2 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
EXPECT_TRUE(precise_obj.Equals(precise_obj_2));
EXPECT_FALSE(imprecise_obj.Equals(precise_obj));
@@ -359,14 +359,14 @@
// a hit second time.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- const RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
EXPECT_TRUE(ref_type_0.IsNonZeroReferenceTypes());
- const RegType& ref_type_1 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ RegType& ref_type_1 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.Equals(ref_type_1));
- const RegType& unresolved_super_class = cache.FromUnresolvedSuperClass(ref_type_0);
+ RegType& unresolved_super_class = cache.FromUnresolvedSuperClass(ref_type_0);
EXPECT_TRUE(unresolved_super_class.IsUnresolvedSuperClass());
EXPECT_TRUE(unresolved_super_class.IsNonZeroReferenceTypes());
}
@@ -375,21 +375,21 @@
// Tests creating types uninitialized types from unresolved types.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- const RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
- const RegType& ref_type = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ RegType& ref_type = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.Equals(ref_type));
// Create an uninitialized type of this unresolved type
- const RegType& unresolved_unintialised = cache.Uninitialized(ref_type, 1101ull);
+ RegType& unresolved_unintialised = cache.Uninitialized(ref_type, 1101ull);
EXPECT_TRUE(unresolved_unintialised.IsUnresolvedAndUninitializedReference());
EXPECT_TRUE(unresolved_unintialised.IsUninitializedTypes());
EXPECT_TRUE(unresolved_unintialised.IsNonZeroReferenceTypes());
// Create an uninitialized type of this unresolved type with different PC
- const RegType& ref_type_unresolved_unintialised_1 = cache.Uninitialized(ref_type, 1102ull);
+ RegType& ref_type_unresolved_unintialised_1 = cache.Uninitialized(ref_type, 1102ull);
EXPECT_TRUE(unresolved_unintialised.IsUnresolvedAndUninitializedReference());
EXPECT_FALSE(unresolved_unintialised.Equals(ref_type_unresolved_unintialised_1));
// Create an uninitialized type of this unresolved type with the same PC
- const RegType& unresolved_unintialised_2 = cache.Uninitialized(ref_type, 1101ull);
+ RegType& unresolved_unintialised_2 = cache.Uninitialized(ref_type, 1101ull);
EXPECT_TRUE(unresolved_unintialised.Equals(unresolved_unintialised_2));
}
@@ -397,12 +397,12 @@
// Tests types for proper Dump messages.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- const RegType& unresolved_ref = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
- const RegType& unresolved_ref_another = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExistEither;", true);
- const RegType& resolved_ref = cache.JavaLangString();
- const RegType& resolved_unintialiesd = cache.Uninitialized(resolved_ref, 10);
- const RegType& unresolved_unintialized = cache.Uninitialized(unresolved_ref, 12);
- const RegType& unresolved_merged = cache.FromUnresolvedMerge(unresolved_ref, unresolved_ref_another);
+ RegType& unresolved_ref = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ RegType& unresolved_ref_another = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExistEither;", true);
+ RegType& resolved_ref = cache.JavaLangString();
+ RegType& resolved_unintialiesd = cache.Uninitialized(resolved_ref, 10);
+ RegType& unresolved_unintialized = cache.Uninitialized(unresolved_ref, 12);
+ RegType& unresolved_merged = cache.FromUnresolvedMerge(unresolved_ref, unresolved_ref_another);
std::string expected = "Unresolved Reference: java.lang.DoesNotExist";
EXPECT_EQ(expected, unresolved_ref.Dump());
@@ -422,16 +422,16 @@
// The JavaLangObject method instead of FromDescriptor. String class is final.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- const RegType& ref_type = cache.JavaLangString();
- const RegType& ref_type_2 = cache.JavaLangString();
- const RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/String;", true);
+ RegType& ref_type = cache.JavaLangString();
+ RegType& ref_type_2 = cache.JavaLangString();
+ RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/String;", true);
EXPECT_TRUE(ref_type.Equals(ref_type_2));
EXPECT_TRUE(ref_type_2.Equals(ref_type_3));
EXPECT_TRUE(ref_type.IsPreciseReference());
// Create an uninitialized type out of this:
- const RegType& ref_type_unintialized = cache.Uninitialized(ref_type, 0110ull);
+ RegType& ref_type_unintialized = cache.Uninitialized(ref_type, 0110ull);
EXPECT_TRUE(ref_type_unintialized.IsUninitializedReference());
EXPECT_FALSE(ref_type_unintialized.IsUnresolvedAndUninitializedReference());
}
@@ -442,9 +442,9 @@
// The JavaLangObject method instead of FromDescriptor. Object Class in not final.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- const RegType& ref_type = cache.JavaLangObject(true);
- const RegType& ref_type_2 = cache.JavaLangObject(true);
- const RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
+ RegType& ref_type = cache.JavaLangObject(true);
+ RegType& ref_type_2 = cache.JavaLangObject(true);
+ RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
EXPECT_TRUE(ref_type.Equals(ref_type_2));
EXPECT_TRUE(ref_type_3.Equals(ref_type_2));
@@ -455,20 +455,19 @@
// String and object , LUB is object.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache_new(true);
- const RegType& string = cache_new.JavaLangString();
- const RegType& Object = cache_new.JavaLangObject(true);
+ RegType& string = cache_new.JavaLangString();
+ RegType& Object = cache_new.JavaLangObject(true);
EXPECT_TRUE(string.Merge(Object, &cache_new).IsJavaLangObject());
// Merge two unresolved types.
- const RegType& ref_type_0 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ RegType& ref_type_0 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
- const RegType& ref_type_1 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExistToo;", true);
+ RegType& ref_type_1 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExistToo;", true);
EXPECT_FALSE(ref_type_0.Equals(ref_type_1));
- const RegType& merged = ref_type_1.Merge(ref_type_0, &cache_new);
+ RegType& merged = ref_type_1.Merge(ref_type_0, &cache_new);
EXPECT_TRUE(merged.IsUnresolvedMergedReference());
- RegType& merged_nonconst = const_cast<RegType&>(merged);
- std::set<uint16_t> merged_ids = (down_cast<UnresolvedMergedType*>(&merged_nonconst))->GetMergedTypes();
+ std::set<uint16_t> merged_ids = (down_cast<UnresolvedMergedType*>(&merged))->GetMergedTypes();
EXPECT_EQ(ref_type_0.GetId(), *(merged_ids.begin()));
EXPECT_EQ(ref_type_1.GetId(), *((++merged_ids.begin())));
}
@@ -479,27 +478,27 @@
RegTypeCache cache_new(true);
constexpr int32_t kTestConstantValue = 10;
- const RegType& float_type = cache_new.Float();
- const RegType& precise_cst = cache_new.FromCat1Const(kTestConstantValue, true);
- const RegType& imprecise_cst = cache_new.FromCat1Const(kTestConstantValue, false);
+ RegType& float_type = cache_new.Float();
+ RegType& precise_cst = cache_new.FromCat1Const(kTestConstantValue, true);
+ RegType& imprecise_cst = cache_new.FromCat1Const(kTestConstantValue, false);
{
// float MERGE precise cst => float.
- const RegType& merged = float_type.Merge(precise_cst, &cache_new);
+ RegType& merged = float_type.Merge(precise_cst, &cache_new);
EXPECT_TRUE(merged.IsFloat());
}
{
// precise cst MERGE float => float.
- const RegType& merged = precise_cst.Merge(float_type, &cache_new);
+ RegType& merged = precise_cst.Merge(float_type, &cache_new);
EXPECT_TRUE(merged.IsFloat());
}
{
// float MERGE imprecise cst => float.
- const RegType& merged = float_type.Merge(imprecise_cst, &cache_new);
+ RegType& merged = float_type.Merge(imprecise_cst, &cache_new);
EXPECT_TRUE(merged.IsFloat());
}
{
// imprecise cst MERGE float => float.
- const RegType& merged = imprecise_cst.Merge(float_type, &cache_new);
+ RegType& merged = imprecise_cst.Merge(float_type, &cache_new);
EXPECT_TRUE(merged.IsFloat());
}
}
@@ -510,50 +509,50 @@
RegTypeCache cache_new(true);
constexpr int32_t kTestConstantValue = 10;
- const RegType& long_lo_type = cache_new.LongLo();
- const RegType& long_hi_type = cache_new.LongHi();
- const RegType& precise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, true);
- const RegType& imprecise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, false);
- const RegType& precise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, true);
- const RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
+ RegType& long_lo_type = cache_new.LongLo();
+ RegType& long_hi_type = cache_new.LongHi();
+ RegType& precise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, true);
+ RegType& imprecise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, false);
+ RegType& precise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, true);
+ RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
{
// lo MERGE precise cst lo => lo.
- const RegType& merged = long_lo_type.Merge(precise_cst_lo, &cache_new);
+ RegType& merged = long_lo_type.Merge(precise_cst_lo, &cache_new);
EXPECT_TRUE(merged.IsLongLo());
}
{
// precise cst lo MERGE lo => lo.
- const RegType& merged = precise_cst_lo.Merge(long_lo_type, &cache_new);
+ RegType& merged = precise_cst_lo.Merge(long_lo_type, &cache_new);
EXPECT_TRUE(merged.IsLongLo());
}
{
// lo MERGE imprecise cst lo => lo.
- const RegType& merged = long_lo_type.Merge(imprecise_cst_lo, &cache_new);
+ RegType& merged = long_lo_type.Merge(imprecise_cst_lo, &cache_new);
EXPECT_TRUE(merged.IsLongLo());
}
{
// imprecise cst lo MERGE lo => lo.
- const RegType& merged = imprecise_cst_lo.Merge(long_lo_type, &cache_new);
+ RegType& merged = imprecise_cst_lo.Merge(long_lo_type, &cache_new);
EXPECT_TRUE(merged.IsLongLo());
}
{
// hi MERGE precise cst hi => hi.
- const RegType& merged = long_hi_type.Merge(precise_cst_hi, &cache_new);
+ RegType& merged = long_hi_type.Merge(precise_cst_hi, &cache_new);
EXPECT_TRUE(merged.IsLongHi());
}
{
// precise cst hi MERGE hi => hi.
- const RegType& merged = precise_cst_hi.Merge(long_hi_type, &cache_new);
+ RegType& merged = precise_cst_hi.Merge(long_hi_type, &cache_new);
EXPECT_TRUE(merged.IsLongHi());
}
{
// hi MERGE imprecise cst hi => hi.
- const RegType& merged = long_hi_type.Merge(imprecise_cst_hi, &cache_new);
+ RegType& merged = long_hi_type.Merge(imprecise_cst_hi, &cache_new);
EXPECT_TRUE(merged.IsLongHi());
}
{
// imprecise cst hi MERGE hi => hi.
- const RegType& merged = imprecise_cst_hi.Merge(long_hi_type, &cache_new);
+ RegType& merged = imprecise_cst_hi.Merge(long_hi_type, &cache_new);
EXPECT_TRUE(merged.IsLongHi());
}
}
@@ -564,50 +563,50 @@
RegTypeCache cache_new(true);
constexpr int32_t kTestConstantValue = 10;
- const RegType& double_lo_type = cache_new.DoubleLo();
- const RegType& double_hi_type = cache_new.DoubleHi();
- const RegType& precise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, true);
- const RegType& imprecise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, false);
- const RegType& precise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, true);
- const RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
+ RegType& double_lo_type = cache_new.DoubleLo();
+ RegType& double_hi_type = cache_new.DoubleHi();
+ RegType& precise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, true);
+ RegType& imprecise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, false);
+ RegType& precise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, true);
+ RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
{
// lo MERGE precise cst lo => lo.
- const RegType& merged = double_lo_type.Merge(precise_cst_lo, &cache_new);
+ RegType& merged = double_lo_type.Merge(precise_cst_lo, &cache_new);
EXPECT_TRUE(merged.IsDoubleLo());
}
{
// precise cst lo MERGE lo => lo.
- const RegType& merged = precise_cst_lo.Merge(double_lo_type, &cache_new);
+ RegType& merged = precise_cst_lo.Merge(double_lo_type, &cache_new);
EXPECT_TRUE(merged.IsDoubleLo());
}
{
// lo MERGE imprecise cst lo => lo.
- const RegType& merged = double_lo_type.Merge(imprecise_cst_lo, &cache_new);
+ RegType& merged = double_lo_type.Merge(imprecise_cst_lo, &cache_new);
EXPECT_TRUE(merged.IsDoubleLo());
}
{
// imprecise cst lo MERGE lo => lo.
- const RegType& merged = imprecise_cst_lo.Merge(double_lo_type, &cache_new);
+ RegType& merged = imprecise_cst_lo.Merge(double_lo_type, &cache_new);
EXPECT_TRUE(merged.IsDoubleLo());
}
{
// hi MERGE precise cst hi => hi.
- const RegType& merged = double_hi_type.Merge(precise_cst_hi, &cache_new);
+ RegType& merged = double_hi_type.Merge(precise_cst_hi, &cache_new);
EXPECT_TRUE(merged.IsDoubleHi());
}
{
// precise cst hi MERGE hi => hi.
- const RegType& merged = precise_cst_hi.Merge(double_hi_type, &cache_new);
+ RegType& merged = precise_cst_hi.Merge(double_hi_type, &cache_new);
EXPECT_TRUE(merged.IsDoubleHi());
}
{
// hi MERGE imprecise cst hi => hi.
- const RegType& merged = double_hi_type.Merge(imprecise_cst_hi, &cache_new);
+ RegType& merged = double_hi_type.Merge(imprecise_cst_hi, &cache_new);
EXPECT_TRUE(merged.IsDoubleHi());
}
{
// imprecise cst hi MERGE hi => hi.
- const RegType& merged = imprecise_cst_hi.Merge(double_hi_type, &cache_new);
+ RegType& merged = imprecise_cst_hi.Merge(double_hi_type, &cache_new);
EXPECT_TRUE(merged.IsDoubleHi());
}
}
@@ -616,8 +615,8 @@
// Tests creating primitive types types.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache_new(true);
- const RegType& imprecise_const = cache_new.FromCat1Const(10, false);
- const RegType& precise_const = cache_new.FromCat1Const(10, true);
+ RegType& imprecise_const = cache_new.FromCat1Const(10, false);
+ RegType& precise_const = cache_new.FromCat1Const(10, true);
EXPECT_TRUE(imprecise_const.IsImpreciseConstant());
EXPECT_TRUE(precise_const.IsPreciseConstant());
diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h
index 0989cd0..378c6d3 100644
--- a/runtime/verifier/register_line-inl.h
+++ b/runtime/verifier/register_line-inl.h
@@ -25,7 +25,7 @@
namespace art {
namespace verifier {
-inline const RegType& RegisterLine::GetRegisterType(uint32_t vsrc) const {
+inline RegType& RegisterLine::GetRegisterType(uint32_t vsrc) const {
// The register index was validated during the static pass, so we don't need to check it here.
DCHECK_LT(vsrc, num_regs_);
return verifier_->GetRegTypeCache()->GetFromId(line_[vsrc]);
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 556056c..4d67cfb 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -36,7 +36,7 @@
return true;
}
-bool RegisterLine::SetRegisterType(uint32_t vdst, const RegType& new_type) {
+bool RegisterLine::SetRegisterType(uint32_t vdst, RegType& new_type) {
DCHECK_LT(vdst, num_regs_);
if (new_type.IsLowHalf() || new_type.IsHighHalf()) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Expected category1 register type not '"
@@ -53,8 +53,8 @@
return true;
}
-bool RegisterLine::SetRegisterTypeWide(uint32_t vdst, const RegType& new_type1,
- const RegType& new_type2) {
+bool RegisterLine::SetRegisterTypeWide(uint32_t vdst, RegType& new_type1,
+ RegType& new_type2) {
DCHECK_LT(vdst + 1, num_regs_);
if (!new_type1.CheckWidePair(new_type2)) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "Invalid wide pair '"
@@ -75,21 +75,21 @@
result_[1] = result_[0];
}
-void RegisterLine::SetResultRegisterType(const RegType& new_type) {
+void RegisterLine::SetResultRegisterType(RegType& new_type) {
DCHECK(!new_type.IsLowHalf());
DCHECK(!new_type.IsHighHalf());
result_[0] = new_type.GetId();
result_[1] = verifier_->GetRegTypeCache()->Undefined().GetId();
}
-void RegisterLine::SetResultRegisterTypeWide(const RegType& new_type1,
- const RegType& new_type2) {
+void RegisterLine::SetResultRegisterTypeWide(RegType& new_type1,
+ RegType& new_type2) {
DCHECK(new_type1.CheckWidePair(new_type2));
result_[0] = new_type1.GetId();
result_[1] = new_type2.GetId();
}
-const RegType& RegisterLine::GetInvocationThis(const Instruction* inst, bool is_range) {
+RegType& RegisterLine::GetInvocationThis(const Instruction* inst, bool is_range) {
const size_t args_count = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
if (args_count < 1) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invoke lacks 'this'";
@@ -97,7 +97,7 @@
}
/* Get the element type of the array held in vsrc */
const uint32_t this_reg = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
- const RegType& this_type = GetRegisterType(this_reg);
+ RegType& this_type = GetRegisterType(this_reg);
if (!this_type.IsReferenceTypes()) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "tried to get class from non-reference register v"
<< this_reg << " (type=" << this_type << ")";
@@ -107,9 +107,9 @@
}
bool RegisterLine::VerifyRegisterType(uint32_t vsrc,
- const RegType& check_type) {
+ RegType& check_type) {
// Verify the src register type against the check type refining the type of the register
- const RegType& src_type = GetRegisterType(vsrc);
+ RegType& src_type = GetRegisterType(vsrc);
if (!(check_type.IsAssignableFrom(src_type))) {
enum VerifyError fail_type;
if (!check_type.IsNonZeroReferenceTypes() || !src_type.IsNonZeroReferenceTypes()) {
@@ -125,7 +125,7 @@
return false;
}
if (check_type.IsLowHalf()) {
- const RegType& src_type_h = GetRegisterType(vsrc + 1);
+ RegType& src_type_h = GetRegisterType(vsrc + 1);
if (!src_type.CheckWidePair(src_type_h)) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register v" << vsrc << " has type "
<< src_type << "/" << src_type_h;
@@ -139,17 +139,17 @@
return true;
}
-bool RegisterLine::VerifyRegisterTypeWide(uint32_t vsrc, const RegType& check_type1,
- const RegType& check_type2) {
+bool RegisterLine::VerifyRegisterTypeWide(uint32_t vsrc, RegType& check_type1,
+ RegType& check_type2) {
DCHECK(check_type1.CheckWidePair(check_type2));
// Verify the src register type against the check type refining the type of the register
- const RegType& src_type = GetRegisterType(vsrc);
+ RegType& src_type = GetRegisterType(vsrc);
if (!check_type1.IsAssignableFrom(src_type)) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register v" << vsrc << " has type " << src_type
<< " but expected " << check_type1;
return false;
}
- const RegType& src_type_h = GetRegisterType(vsrc + 1);
+ RegType& src_type_h = GetRegisterType(vsrc + 1);
if (!src_type.CheckWidePair(src_type_h)) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register v" << vsrc << " has type "
<< src_type << "/" << src_type_h;
@@ -162,9 +162,9 @@
return true;
}
-void RegisterLine::MarkRefsAsInitialized(const RegType& uninit_type) {
+void RegisterLine::MarkRefsAsInitialized(RegType& uninit_type) {
DCHECK(uninit_type.IsUninitializedTypes());
- const RegType& init_type = verifier_->GetRegTypeCache()->FromUninitialized(uninit_type);
+ RegType& init_type = verifier_->GetRegTypeCache()->FromUninitialized(uninit_type);
size_t changed = 0;
for (uint32_t i = 0; i < num_regs_; i++) {
if (GetRegisterType(i).Equals(uninit_type)) {
@@ -200,7 +200,7 @@
}
}
-std::string RegisterLine::Dump() const {
+std::string RegisterLine::Dump() {
std::string result;
for (size_t i = 0; i < num_regs_; i++) {
result += StringPrintf("%zd:[", i);
@@ -213,7 +213,7 @@
return result;
}
-void RegisterLine::MarkUninitRefsAsInvalid(const RegType& uninit_type) {
+void RegisterLine::MarkUninitRefsAsInvalid(RegType& uninit_type) {
for (size_t i = 0; i < num_regs_; i++) {
if (GetRegisterType(i).Equals(uninit_type)) {
line_[i] = verifier_->GetRegTypeCache()->Conflict().GetId();
@@ -224,7 +224,7 @@
void RegisterLine::CopyRegister1(uint32_t vdst, uint32_t vsrc, TypeCategory cat) {
DCHECK(cat == kTypeCategory1nr || cat == kTypeCategoryRef);
- const RegType& type = GetRegisterType(vsrc);
+ RegType& type = GetRegisterType(vsrc);
if (!SetRegisterType(vdst, type)) {
return;
}
@@ -238,8 +238,8 @@
}
void RegisterLine::CopyRegister2(uint32_t vdst, uint32_t vsrc) {
- const RegType& type_l = GetRegisterType(vsrc);
- const RegType& type_h = GetRegisterType(vsrc + 1);
+ RegType& type_l = GetRegisterType(vsrc);
+ RegType& type_h = GetRegisterType(vsrc + 1);
if (!type_l.CheckWidePair(type_h)) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "copy2 v" << vdst << "<-v" << vsrc
@@ -250,7 +250,7 @@
}
void RegisterLine::CopyResultRegister1(uint32_t vdst, bool is_reference) {
- const RegType& type = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
+ RegType& type = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
if ((!is_reference && !type.IsCategory1Types()) ||
(is_reference && !type.IsReferenceTypes())) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD)
@@ -267,8 +267,8 @@
* register to another register, and reset the result register.
*/
void RegisterLine::CopyResultRegister2(uint32_t vdst) {
- const RegType& type_l = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
- const RegType& type_h = verifier_->GetRegTypeCache()->GetFromId(result_[1]);
+ RegType& type_l = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
+ RegType& type_h = verifier_->GetRegTypeCache()->GetFromId(result_[1]);
if (!type_l.IsCategory2Types()) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD)
<< "copyRes2 v" << vdst << "<- result0" << " type=" << type_l;
@@ -281,40 +281,40 @@
}
void RegisterLine::CheckUnaryOp(const Instruction* inst,
- const RegType& dst_type,
- const RegType& src_type) {
+ RegType& dst_type,
+ RegType& src_type) {
if (VerifyRegisterType(inst->VRegB_12x(), src_type)) {
SetRegisterType(inst->VRegA_12x(), dst_type);
}
}
void RegisterLine::CheckUnaryOpWide(const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
- const RegType& src_type1, const RegType& src_type2) {
+ RegType& dst_type1, RegType& dst_type2,
+ RegType& src_type1, RegType& src_type2) {
if (VerifyRegisterTypeWide(inst->VRegB_12x(), src_type1, src_type2)) {
SetRegisterTypeWide(inst->VRegA_12x(), dst_type1, dst_type2);
}
}
void RegisterLine::CheckUnaryOpToWide(const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
- const RegType& src_type) {
+ RegType& dst_type1, RegType& dst_type2,
+ RegType& src_type) {
if (VerifyRegisterType(inst->VRegB_12x(), src_type)) {
SetRegisterTypeWide(inst->VRegA_12x(), dst_type1, dst_type2);
}
}
void RegisterLine::CheckUnaryOpFromWide(const Instruction* inst,
- const RegType& dst_type,
- const RegType& src_type1, const RegType& src_type2) {
+ RegType& dst_type,
+ RegType& src_type1, RegType& src_type2) {
if (VerifyRegisterTypeWide(inst->VRegB_12x(), src_type1, src_type2)) {
SetRegisterType(inst->VRegA_12x(), dst_type);
}
}
void RegisterLine::CheckBinaryOp(const Instruction* inst,
- const RegType& dst_type,
- const RegType& src_type1, const RegType& src_type2,
+ RegType& dst_type,
+ RegType& src_type1, RegType& src_type2,
bool check_boolean_op) {
const uint32_t vregB = inst->VRegB_23x();
const uint32_t vregC = inst->VRegC_23x();
@@ -333,9 +333,9 @@
}
void RegisterLine::CheckBinaryOpWide(const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
- const RegType& src_type1_1, const RegType& src_type1_2,
- const RegType& src_type2_1, const RegType& src_type2_2) {
+ RegType& dst_type1, RegType& dst_type2,
+ RegType& src_type1_1, RegType& src_type1_2,
+ RegType& src_type2_1, RegType& src_type2_2) {
if (VerifyRegisterTypeWide(inst->VRegB_23x(), src_type1_1, src_type1_2) &&
VerifyRegisterTypeWide(inst->VRegC_23x(), src_type2_1, src_type2_2)) {
SetRegisterTypeWide(inst->VRegA_23x(), dst_type1, dst_type2);
@@ -343,8 +343,8 @@
}
void RegisterLine::CheckBinaryOpWideShift(const Instruction* inst,
- const RegType& long_lo_type, const RegType& long_hi_type,
- const RegType& int_type) {
+ RegType& long_lo_type, RegType& long_hi_type,
+ RegType& int_type) {
if (VerifyRegisterTypeWide(inst->VRegB_23x(), long_lo_type, long_hi_type) &&
VerifyRegisterType(inst->VRegC_23x(), int_type)) {
SetRegisterTypeWide(inst->VRegA_23x(), long_lo_type, long_hi_type);
@@ -352,8 +352,8 @@
}
void RegisterLine::CheckBinaryOp2addr(const Instruction* inst,
- const RegType& dst_type, const RegType& src_type1,
- const RegType& src_type2, bool check_boolean_op) {
+ RegType& dst_type, RegType& src_type1,
+ RegType& src_type2, bool check_boolean_op) {
const uint32_t vregA = inst->VRegA_12x();
const uint32_t vregB = inst->VRegB_12x();
if (VerifyRegisterType(vregA, src_type1) &&
@@ -371,9 +371,9 @@
}
void RegisterLine::CheckBinaryOp2addrWide(const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
- const RegType& src_type1_1, const RegType& src_type1_2,
- const RegType& src_type2_1, const RegType& src_type2_2) {
+ RegType& dst_type1, RegType& dst_type2,
+ RegType& src_type1_1, RegType& src_type1_2,
+ RegType& src_type2_1, RegType& src_type2_2) {
const uint32_t vregA = inst->VRegA_12x();
const uint32_t vregB = inst->VRegB_12x();
if (VerifyRegisterTypeWide(vregA, src_type1_1, src_type1_2) &&
@@ -383,8 +383,8 @@
}
void RegisterLine::CheckBinaryOp2addrWideShift(const Instruction* inst,
- const RegType& long_lo_type, const RegType& long_hi_type,
- const RegType& int_type) {
+ RegType& long_lo_type, RegType& long_hi_type,
+ RegType& int_type) {
const uint32_t vregA = inst->VRegA_12x();
const uint32_t vregB = inst->VRegB_12x();
if (VerifyRegisterTypeWide(vregA, long_lo_type, long_hi_type) &&
@@ -394,7 +394,7 @@
}
void RegisterLine::CheckLiteralOp(const Instruction* inst,
- const RegType& dst_type, const RegType& src_type,
+ RegType& dst_type, RegType& src_type,
bool check_boolean_op, bool is_lit16) {
const uint32_t vregA = is_lit16 ? inst->VRegA_22s() : inst->VRegA_22b();
const uint32_t vregB = is_lit16 ? inst->VRegB_22s() : inst->VRegB_22b();
@@ -413,7 +413,7 @@
}
void RegisterLine::PushMonitor(uint32_t reg_idx, int32_t insn_idx) {
- const RegType& reg_type = GetRegisterType(reg_idx);
+ RegType& reg_type = GetRegisterType(reg_idx);
if (!reg_type.IsReferenceTypes()) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-enter on non-object (" << reg_type << ")";
} else if (monitors_.size() >= 32) {
@@ -425,7 +425,7 @@
}
void RegisterLine::PopMonitor(uint32_t reg_idx) {
- const RegType& reg_type = GetRegisterType(reg_idx);
+ RegType& reg_type = GetRegisterType(reg_idx);
if (!reg_type.IsReferenceTypes()) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-exit on non-object (" << reg_type << ")";
} else if (monitors_.empty()) {
@@ -460,9 +460,9 @@
DCHECK(incoming_line != nullptr);
for (size_t idx = 0; idx < num_regs_; idx++) {
if (line_[idx] != incoming_line->line_[idx]) {
- const RegType& incoming_reg_type = incoming_line->GetRegisterType(idx);
- const RegType& cur_type = GetRegisterType(idx);
- const RegType& new_type = cur_type.Merge(incoming_reg_type, verifier_->GetRegTypeCache());
+ RegType& incoming_reg_type = incoming_line->GetRegisterType(idx);
+ RegType& cur_type = GetRegisterType(idx);
+ RegType& new_type = cur_type.Merge(incoming_reg_type, verifier_->GetRegTypeCache());
changed = changed || !cur_type.Equals(new_type);
line_[idx] = new_type.GetId();
}
@@ -508,7 +508,8 @@
std::ostream& operator<<(std::ostream& os, const RegisterLine& rhs)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- os << rhs.Dump();
+ RegisterLine& rhs_non_const = const_cast<RegisterLine&>(rhs);
+ os << rhs_non_const.Dump();
return os;
}
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index 57c7517..b0018d2 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -81,26 +81,26 @@
// Set the type of register N, verifying that the register is valid. If "newType" is the "Lo"
// part of a 64-bit value, register N+1 will be set to "newType+1".
// The register index was validated during the static pass, so we don't need to check it here.
- bool SetRegisterType(uint32_t vdst, const RegType& new_type)
+ bool SetRegisterType(uint32_t vdst, RegType& new_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool SetRegisterTypeWide(uint32_t vdst, const RegType& new_type1, const RegType& new_type2)
+ bool SetRegisterTypeWide(uint32_t vdst, RegType& new_type1, RegType& new_type2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/* Set the type of the "result" register. */
- void SetResultRegisterType(const RegType& new_type)
+ void SetResultRegisterType(RegType& new_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetResultRegisterTypeWide(const RegType& new_type1, const RegType& new_type2)
+ void SetResultRegisterTypeWide(RegType& new_type1, RegType& new_type2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get the type of register vsrc.
- const RegType& GetRegisterType(uint32_t vsrc) const;
+ RegType& GetRegisterType(uint32_t vsrc) const;
- bool VerifyRegisterType(uint32_t vsrc, const RegType& check_type)
+ bool VerifyRegisterType(uint32_t vsrc, RegType& check_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool VerifyRegisterTypeWide(uint32_t vsrc, const RegType& check_type1, const RegType& check_type2)
+ bool VerifyRegisterTypeWide(uint32_t vsrc, RegType& check_type1, RegType& check_type2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CopyFromLine(const RegisterLine* src) {
@@ -110,7 +110,7 @@
reg_to_lock_depths_ = src->reg_to_lock_depths_;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FillWithGarbage() {
memset(&line_, 0xf1, num_regs_ * sizeof(uint16_t));
@@ -126,7 +126,7 @@
* to prevent them from being used (otherwise, MarkRefsAsInitialized would mark the old ones and
* the new ones at the same time).
*/
- void MarkUninitRefsAsInvalid(const RegType& uninit_type)
+ void MarkUninitRefsAsInvalid(RegType& uninit_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -134,7 +134,7 @@
* reference type. This is called when an appropriate constructor is invoked -- all copies of
* the reference must be marked as initialized.
*/
- void MarkRefsAsInitialized(const RegType& uninit_type)
+ void MarkRefsAsInitialized(RegType& uninit_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -173,30 +173,30 @@
* The argument count is in vA, and the first argument is in vC, for both "simple" and "range"
* versions. We just need to make sure vA is >= 1 and then return vC.
*/
- const RegType& GetInvocationThis(const Instruction* inst, bool is_range)
+ RegType& GetInvocationThis(const Instruction* inst, bool is_range)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
* Verify types for a simple two-register instruction (e.g. "neg-int").
* "dst_type" is stored into vA, and "src_type" is verified against vB.
*/
- void CheckUnaryOp(const Instruction* inst, const RegType& dst_type,
- const RegType& src_type)
+ void CheckUnaryOp(const Instruction* inst, RegType& dst_type,
+ RegType& src_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckUnaryOpWide(const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
- const RegType& src_type1, const RegType& src_type2)
+ RegType& dst_type1, RegType& dst_type2,
+ RegType& src_type1, RegType& src_type2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckUnaryOpToWide(const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
- const RegType& src_type)
+ RegType& dst_type1, RegType& dst_type2,
+ RegType& src_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckUnaryOpFromWide(const Instruction* inst,
- const RegType& dst_type,
- const RegType& src_type1, const RegType& src_type2)
+ RegType& dst_type,
+ RegType& src_type1, RegType& src_type2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -205,19 +205,19 @@
* against vB/vC.
*/
void CheckBinaryOp(const Instruction* inst,
- const RegType& dst_type, const RegType& src_type1, const RegType& src_type2,
+ RegType& dst_type, RegType& src_type1, RegType& src_type2,
bool check_boolean_op)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckBinaryOpWide(const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
- const RegType& src_type1_1, const RegType& src_type1_2,
- const RegType& src_type2_1, const RegType& src_type2_2)
+ RegType& dst_type1, RegType& dst_type2,
+ RegType& src_type1_1, RegType& src_type1_2,
+ RegType& src_type2_1, RegType& src_type2_2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckBinaryOpWideShift(const Instruction* inst,
- const RegType& long_lo_type, const RegType& long_hi_type,
- const RegType& int_type)
+ RegType& long_lo_type, RegType& long_hi_type,
+ RegType& int_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -225,20 +225,20 @@
* are verified against vA/vB, then "dst_type" is stored into vA.
*/
void CheckBinaryOp2addr(const Instruction* inst,
- const RegType& dst_type,
- const RegType& src_type1, const RegType& src_type2,
+ RegType& dst_type,
+ RegType& src_type1, RegType& src_type2,
bool check_boolean_op)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckBinaryOp2addrWide(const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
- const RegType& src_type1_1, const RegType& src_type1_2,
- const RegType& src_type2_1, const RegType& src_type2_2)
+ RegType& dst_type1, RegType& dst_type2,
+ RegType& src_type1_1, RegType& src_type1_2,
+ RegType& src_type2_1, RegType& src_type2_2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckBinaryOp2addrWideShift(const Instruction* inst,
- const RegType& long_lo_type, const RegType& long_hi_type,
- const RegType& int_type)
+ RegType& long_lo_type, RegType& long_hi_type,
+ RegType& int_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -248,7 +248,7 @@
* If "check_boolean_op" is set, we use the constant value in vC.
*/
void CheckLiteralOp(const Instruction* inst,
- const RegType& dst_type, const RegType& src_type,
+ RegType& dst_type, RegType& src_type,
bool check_boolean_op, bool is_lit16)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index fdc6e3f..3a6a72b 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -47,6 +47,8 @@
jclass WellKnownClasses::java_lang_ThreadGroup;
jclass WellKnownClasses::java_lang_Throwable;
jclass WellKnownClasses::java_nio_DirectByteBuffer;
+jclass WellKnownClasses::java_util_Collections;
+jclass WellKnownClasses::libcore_util_EmptyArray;
jclass WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk;
jclass WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer;
@@ -150,6 +152,8 @@
java_lang_ThreadGroup = CacheClass(env, "java/lang/ThreadGroup");
java_lang_Throwable = CacheClass(env, "java/lang/Throwable");
java_nio_DirectByteBuffer = CacheClass(env, "java/nio/DirectByteBuffer");
+ java_util_Collections = CacheClass(env, "java/util/Collections");
+ libcore_util_EmptyArray = CacheClass(env, "libcore/util/EmptyArray");
org_apache_harmony_dalvik_ddmc_Chunk = CacheClass(env, "org/apache/harmony/dalvik/ddmc/Chunk");
org_apache_harmony_dalvik_ddmc_DdmServer = CacheClass(env, "org/apache/harmony/dalvik/ddmc/DdmServer");
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index f6c2930..7639f50 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -60,7 +60,9 @@
static jclass java_lang_ThreadGroup;
static jclass java_lang_Thread$UncaughtExceptionHandler;
static jclass java_lang_Throwable;
+ static jclass java_util_Collections;
static jclass java_nio_DirectByteBuffer;
+ static jclass libcore_util_EmptyArray;
static jclass org_apache_harmony_dalvik_ddmc_Chunk;
static jclass org_apache_harmony_dalvik_ddmc_DdmServer;
diff --git a/sigchainlib/Android.mk b/sigchainlib/Android.mk
index 8e25339..d86735d 100644
--- a/sigchainlib/Android.mk
+++ b/sigchainlib/Android.mk
@@ -23,8 +23,23 @@
LOCAL_MODULE_TAGS := optional
LOCAL_CFLAGS += $(ART_TARGET_CFLAGS)
LOCAL_SRC_FILES := sigchain.cc
+LOCAL_CLANG = $(ART_TARGET_CLANG)
LOCAL_MODULE:= libsigchain
LOCAL_SHARED_LIBRARIES := liblog libdl
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.common_build.mk
include $(BUILD_SHARED_LIBRARY)
+
+# Build host library.
+include $(CLEAR_VARS)
+LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
+LOCAL_MODULE_TAGS := optional
+LOCAL_IS_HOST_MODULE := true
+LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
+LOCAL_CLANG = $(ART_HOST_CLANG)
+LOCAL_SRC_FILES := sigchain.cc
+LOCAL_MODULE:= libsigchain
+LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
+LOCAL_LDLIBS = -ldl
+LOCAL_MULTILIB := both
+include $(BUILD_HOST_SHARED_LIBRARY)
diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc
index 5a5805f..6f93083 100644
--- a/sigchainlib/sigchain.cc
+++ b/sigchainlib/sigchain.cc
@@ -14,12 +14,22 @@
* limitations under the License.
*/
+#ifdef HAVE_ANDROID_OS
#include <android/log.h>
+#else
+#include <stdarg.h>
+#include <iostream>
+#endif
+
#include <dlfcn.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
+#if defined(__APPLE__)
+#define _NSIG NSIG
+#endif
+
namespace art {
class SignalAction {
@@ -67,7 +77,11 @@
va_list ap;
va_start(ap, format);
vsnprintf(buf, sizeof(buf), format, ap);
+#ifdef HAVE_ANDROID_OS
__android_log_write(ANDROID_LOG_ERROR, "libsigchain", buf);
+#else
+ std::cout << buf << "\n";
+#endif
va_end(ap);
}
@@ -104,10 +118,16 @@
if ((action.sa_flags & SA_SIGINFO) == 0) {
if (action.sa_handler != NULL) {
action.sa_handler(sig);
+ } else {
+ signal(sig, SIG_DFL);
+ raise(sig);
}
} else {
if (action.sa_sigaction != NULL) {
action.sa_sigaction(sig, info, context);
+ } else {
+ signal(sig, SIG_DFL);
+ raise(sig);
}
}
}
diff --git a/test/001-nop/build b/test/000-nop/build
similarity index 100%
rename from test/001-nop/build
rename to test/000-nop/build
diff --git a/test/001-nop/expected.txt b/test/000-nop/expected.txt
similarity index 100%
rename from test/001-nop/expected.txt
rename to test/000-nop/expected.txt
diff --git a/test/001-nop/info.txt b/test/000-nop/info.txt
similarity index 100%
rename from test/001-nop/info.txt
rename to test/000-nop/info.txt
diff --git a/test/001-nop/run b/test/000-nop/run
similarity index 100%
rename from test/001-nop/run
rename to test/000-nop/run
diff --git a/test/001-HelloWorld/expected.txt b/test/001-HelloWorld/expected.txt
new file mode 100644
index 0000000..af5626b
--- /dev/null
+++ b/test/001-HelloWorld/expected.txt
@@ -0,0 +1 @@
+Hello, world!
diff --git a/test/001-HelloWorld/info.txt b/test/001-HelloWorld/info.txt
new file mode 100644
index 0000000..641dd9a
--- /dev/null
+++ b/test/001-HelloWorld/info.txt
@@ -0,0 +1 @@
+Imported from oat test. Print "Hello World."
diff --git a/test/HelloWorld/HelloWorld.java b/test/001-HelloWorld/src/Main.java
similarity index 90%
rename from test/HelloWorld/HelloWorld.java
rename to test/001-HelloWorld/src/Main.java
index c6861ce..1ef6289 100644
--- a/test/HelloWorld/HelloWorld.java
+++ b/test/001-HelloWorld/src/Main.java
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-class HelloWorld {
+public class Main {
public static void main(String[] args) {
- System.logI("Hello, world!");
+ System.out.println("Hello, world!");
}
}
diff --git a/test/001-Main/expected.txt b/test/001-Main/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/001-Main/expected.txt
diff --git a/test/001-Main/info.txt b/test/001-Main/info.txt
new file mode 100644
index 0000000..e4b33a0
--- /dev/null
+++ b/test/001-Main/info.txt
@@ -0,0 +1 @@
+Import of a previous oat test. Empty main, just test starting up the runtime.
diff --git a/test/HelloWorld/HelloWorld.java b/test/001-Main/src/Main.java
similarity index 85%
copy from test/HelloWorld/HelloWorld.java
copy to test/001-Main/src/Main.java
index c6861ce..3486866 100644
--- a/test/HelloWorld/HelloWorld.java
+++ b/test/001-Main/src/Main.java
@@ -14,8 +14,7 @@
* limitations under the License.
*/
-class HelloWorld {
- public static void main(String[] args) {
- System.logI("Hello, world!");
- }
+public class Main {
+ public static void main(String args[]) {
+ }
}
diff --git a/test/003-omnibus-opcodes/src/IntMath.java b/test/003-omnibus-opcodes/src/IntMath.java
index 2e2962a..ad540fd 100644
--- a/test/003-omnibus-opcodes/src/IntMath.java
+++ b/test/003-omnibus-opcodes/src/IntMath.java
@@ -335,8 +335,8 @@
special = (start+i) / 15;
break;
}
+ Main.assertTrue(normal == special);
}
- Main.assertTrue(normal == special);
}
}
diff --git a/test/004-InterfaceTest/expected.txt b/test/004-InterfaceTest/expected.txt
new file mode 100644
index 0000000..4854e24
--- /dev/null
+++ b/test/004-InterfaceTest/expected.txt
@@ -0,0 +1,2 @@
+test_virtual done
+test_interface done
diff --git a/test/004-InterfaceTest/info.txt b/test/004-InterfaceTest/info.txt
new file mode 100644
index 0000000..00b0d9a
--- /dev/null
+++ b/test/004-InterfaceTest/info.txt
@@ -0,0 +1 @@
+Imported from oat tests.
diff --git a/test/InterfaceTest/InterfaceTest.java b/test/004-InterfaceTest/src/Main.java
similarity index 91%
rename from test/InterfaceTest/InterfaceTest.java
rename to test/004-InterfaceTest/src/Main.java
index ed18eb3..9ebac59 100644
--- a/test/InterfaceTest/InterfaceTest.java
+++ b/test/004-InterfaceTest/src/Main.java
@@ -17,7 +17,7 @@
import java.util.Map;
import java.util.HashMap;
-class InterfaceTest {
+public class Main {
public static long test_virtual(HashMap map) {
Integer intobj = new Integer(0);
@@ -44,10 +44,10 @@
public static void main(String[] args) {
HashMap hashmap = new HashMap();
long elapsed = test_virtual(hashmap);
- System.logI("virtual map put: " + elapsed);
+ System.out.println("test_virtual done");
hashmap.clear();
elapsed = test_interface(hashmap);
- System.logI("interface map put: " + elapsed);
+ System.out.println("test_interface done");
}
}
diff --git a/test/004-JniTest/expected.txt b/test/004-JniTest/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/004-JniTest/expected.txt
diff --git a/test/004-JniTest/info.txt b/test/004-JniTest/info.txt
new file mode 100644
index 0000000..00b0d9a
--- /dev/null
+++ b/test/004-JniTest/info.txt
@@ -0,0 +1 @@
+Imported from oat tests.
diff --git a/test/JniTest/jni_test.cc b/test/004-JniTest/jni_test.cc
similarity index 84%
rename from test/JniTest/jni_test.cc
rename to test/004-JniTest/jni_test.cc
index 36cad72..554712a 100644
--- a/test/JniTest/jni_test.cc
+++ b/test/004-JniTest/jni_test.cc
@@ -42,7 +42,7 @@
int attach_result = jvm->AttachCurrentThread(&env, &args);
assert(attach_result == 0);
- jclass clazz = env->FindClass("JniTest");
+ jclass clazz = env->FindClass("Main");
assert(clazz != NULL);
assert(!env->ExceptionCheck());
@@ -56,7 +56,7 @@
}
// http://b/10994325
-extern "C" JNIEXPORT void JNICALL Java_JniTest_testFindClassOnAttachedNativeThread(JNIEnv*,
+extern "C" JNIEXPORT void JNICALL Java_Main_testFindClassOnAttachedNativeThread(JNIEnv*,
jclass) {
pthread_t pthread;
int pthread_create_result = pthread_create(&pthread,
@@ -76,7 +76,7 @@
int attach_result = jvm->AttachCurrentThread(&env, &args);
assert(attach_result == 0);
- jclass clazz = env->FindClass("JniTest");
+ jclass clazz = env->FindClass("Main");
assert(clazz != NULL);
assert(!env->ExceptionCheck());
@@ -91,7 +91,7 @@
return NULL;
}
-extern "C" JNIEXPORT void JNICALL Java_JniTest_testFindFieldOnAttachedNativeThreadNative(JNIEnv*,
+extern "C" JNIEXPORT void JNICALL Java_Main_testFindFieldOnAttachedNativeThreadNative(JNIEnv*,
jclass) {
pthread_t pthread;
int pthread_create_result = pthread_create(&pthread,
@@ -111,7 +111,7 @@
int attach_result = jvm->AttachCurrentThread(&env, &args);
assert(attach_result == 0);
- jclass clazz = env->FindClass("JniTest");
+ jclass clazz = env->FindClass("Main");
assert(clazz != NULL);
assert(!env->ExceptionCheck());
@@ -151,7 +151,7 @@
}
// http://b/15539150
-extern "C" JNIEXPORT void JNICALL Java_JniTest_testReflectFieldGetFromAttachedNativeThreadNative(
+extern "C" JNIEXPORT void JNICALL Java_Main_testReflectFieldGetFromAttachedNativeThreadNative(
JNIEnv*, jclass) {
pthread_t pthread;
int pthread_create_result = pthread_create(&pthread,
@@ -165,22 +165,22 @@
// http://b/11243757
-extern "C" JNIEXPORT void JNICALL Java_JniTest_testCallStaticVoidMethodOnSubClassNative(JNIEnv* env,
+extern "C" JNIEXPORT void JNICALL Java_Main_testCallStaticVoidMethodOnSubClassNative(JNIEnv* env,
jclass) {
- jclass super_class = env->FindClass("JniTest$testCallStaticVoidMethodOnSubClass_SuperClass");
+ jclass super_class = env->FindClass("Main$testCallStaticVoidMethodOnSubClass_SuperClass");
assert(super_class != NULL);
jmethodID execute = env->GetStaticMethodID(super_class, "execute", "()V");
assert(execute != NULL);
- jclass sub_class = env->FindClass("JniTest$testCallStaticVoidMethodOnSubClass_SubClass");
+ jclass sub_class = env->FindClass("Main$testCallStaticVoidMethodOnSubClass_SubClass");
assert(sub_class != NULL);
env->CallStaticVoidMethod(sub_class, execute);
}
-extern "C" JNIEXPORT jobject JNICALL Java_JniTest_testGetMirandaMethodNative(JNIEnv* env, jclass) {
- jclass abstract_class = env->FindClass("JniTest$testGetMirandaMethod_MirandaAbstract");
+extern "C" JNIEXPORT jobject JNICALL Java_Main_testGetMirandaMethodNative(JNIEnv* env, jclass) {
+ jclass abstract_class = env->FindClass("Main$testGetMirandaMethod_MirandaAbstract");
assert(abstract_class != NULL);
jmethodID miranda_method = env->GetMethodID(abstract_class, "inInterface", "()Z");
assert(miranda_method != NULL);
@@ -188,7 +188,7 @@
}
// https://code.google.com/p/android/issues/detail?id=63055
-extern "C" void JNICALL Java_JniTest_testZeroLengthByteBuffers(JNIEnv* env, jclass) {
+extern "C" void JNICALL Java_Main_testZeroLengthByteBuffers(JNIEnv* env, jclass) {
std::vector<uint8_t> buffer(1);
jobject byte_buffer = env->NewDirectByteBuffer(&buffer[0], 0);
assert(byte_buffer != NULL);
@@ -201,7 +201,7 @@
constexpr size_t kByteReturnSize = 7;
jbyte byte_returns[kByteReturnSize] = { 0, 1, 2, 127, -1, -2, -128 };
-extern "C" jbyte JNICALL Java_JniTest_byteMethod(JNIEnv* env, jclass klass, jbyte b1, jbyte b2,
+extern "C" jbyte JNICALL Java_Main_byteMethod(JNIEnv* env, jclass klass, jbyte b1, jbyte b2,
jbyte b3, jbyte b4, jbyte b5, jbyte b6,
jbyte b7, jbyte b8, jbyte b9, jbyte b10) {
// We use b1 to drive the output.
@@ -226,7 +226,7 @@
static_cast<jshort>(0x8000) };
// The weird static_cast is because short int is only guaranteed down to -32767, not Java's -32768.
-extern "C" jshort JNICALL Java_JniTest_shortMethod(JNIEnv* env, jclass klass, jshort s1, jshort s2,
+extern "C" jshort JNICALL Java_Main_shortMethod(JNIEnv* env, jclass klass, jshort s1, jshort s2,
jshort s3, jshort s4, jshort s5, jshort s6,
jshort s7, jshort s8, jshort s9, jshort s10) {
// We use s1 to drive the output.
@@ -246,7 +246,7 @@
return short_returns[s1];
}
-extern "C" jboolean JNICALL Java_JniTest_booleanMethod(JNIEnv* env, jclass klass, jboolean b1,
+extern "C" jboolean JNICALL Java_Main_booleanMethod(JNIEnv* env, jclass klass, jboolean b1,
jboolean b2, jboolean b3, jboolean b4,
jboolean b5, jboolean b6, jboolean b7,
jboolean b8, jboolean b9, jboolean b10) {
@@ -268,7 +268,7 @@
constexpr size_t kCharReturnSize = 8;
jchar char_returns[kCharReturnSize] = { 0, 1, 2, 127, 255, 256, 15000, 34000 };
-extern "C" jchar JNICALL Java_JniTest_charMethod(JNIEnv* env, jclass klacc, jchar c1, jchar c2,
+extern "C" jchar JNICALL Java_Main_charMethod(JNIEnv* env, jclass klacc, jchar c1, jchar c2,
jchar c3, jchar c4, jchar c5, jchar c6,
jchar c7, jchar c8, jchar c9, jchar c10) {
// We use c1 to drive the output.
@@ -286,3 +286,8 @@
return char_returns[c1];
}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_nativeIsAssignableFrom(JNIEnv* env, jclass,
+ jclass from, jclass to) {
+ return env->IsAssignableFrom(from, to);
+}
diff --git a/test/JniTest/JniTest.java b/test/004-JniTest/src/Main.java
similarity index 88%
rename from test/JniTest/JniTest.java
rename to test/004-JniTest/src/Main.java
index 33418a9..ae133be 100644
--- a/test/JniTest/JniTest.java
+++ b/test/004-JniTest/src/Main.java
@@ -16,7 +16,7 @@
import java.lang.reflect.Method;
-class JniTest {
+public class Main {
public static void main(String[] args) {
System.loadLibrary("arttest");
testFindClassOnAttachedNativeThread();
@@ -29,6 +29,7 @@
testShortMethod();
testBooleanMethod();
testCharMethod();
+ testIsAssignableFromOnPrimitiveTypes();
}
private static native void testFindClassOnAttachedNativeThread();
@@ -151,4 +152,19 @@
}
}
}
+
+ // http://b/16531674
+ private static void testIsAssignableFromOnPrimitiveTypes() {
+ if (!nativeIsAssignableFrom(int.class, Integer.TYPE)) {
+ System.out.println("IsAssignableFrom(int.class, Integer.TYPE) returned false, expected true");
+ throw new AssertionError();
+ }
+
+ if (!nativeIsAssignableFrom(Integer.TYPE, int.class)) {
+ System.out.println("IsAssignableFrom(Integer.TYPE, int.class) returned false, expected true");
+ throw new AssertionError();
+ }
+ }
+
+ native static boolean nativeIsAssignableFrom(Class<?> from, Class<?> to);
}
diff --git a/test/004-NativeAllocations/expected.txt b/test/004-NativeAllocations/expected.txt
new file mode 100644
index 0000000..f75da10
--- /dev/null
+++ b/test/004-NativeAllocations/expected.txt
@@ -0,0 +1 @@
+Test complete
diff --git a/test/004-NativeAllocations/info.txt b/test/004-NativeAllocations/info.txt
new file mode 100644
index 0000000..00b0d9a
--- /dev/null
+++ b/test/004-NativeAllocations/info.txt
@@ -0,0 +1 @@
+Imported from oat tests.
diff --git a/test/NativeAllocations/NativeAllocations.java b/test/004-NativeAllocations/src/Main.java
similarity index 98%
rename from test/NativeAllocations/NativeAllocations.java
rename to test/004-NativeAllocations/src/Main.java
index 9423b91..483c667 100644
--- a/test/NativeAllocations/NativeAllocations.java
+++ b/test/004-NativeAllocations/src/Main.java
@@ -16,7 +16,7 @@
import java.lang.reflect.*;
-class NativeAllocations {
+public class Main {
static Object nativeLock = new Object();
static int nativeBytes = 0;
static Object runtime;
diff --git a/test/004-ReferenceMap/expected.txt b/test/004-ReferenceMap/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/004-ReferenceMap/expected.txt
diff --git a/test/004-ReferenceMap/info.txt b/test/004-ReferenceMap/info.txt
new file mode 100644
index 0000000..00b0d9a
--- /dev/null
+++ b/test/004-ReferenceMap/info.txt
@@ -0,0 +1 @@
+Imported from oat tests.
diff --git a/test/ReferenceMap/ReferenceMap.java b/test/004-ReferenceMap/src/Main.java
similarity index 91%
rename from test/ReferenceMap/ReferenceMap.java
rename to test/004-ReferenceMap/src/Main.java
index c746b68..f9a5498 100644
--- a/test/ReferenceMap/ReferenceMap.java
+++ b/test/004-ReferenceMap/src/Main.java
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-public class ReferenceMap {
- public ReferenceMap() {
+public class Main {
+ public Main() {
}
Object f() {
@@ -41,7 +41,7 @@
}
public static void main(String[] args) {
- ReferenceMap rm = new ReferenceMap();
+ Main rm = new Main();
rm.f();
}
}
diff --git a/test/ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
similarity index 99%
rename from test/ReferenceMap/stack_walk_refmap_jni.cc
rename to test/004-ReferenceMap/stack_walk_refmap_jni.cc
index e5a1786..7929554 100644
--- a/test/ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -271,7 +271,7 @@
// 0x0032 - 0x0033 reg=2 y Ljava/lang/Object;
// 0x0000 - 0x0033 reg=8 this LReferenceMap;
-extern "C" JNIEXPORT jint JNICALL Java_ReferenceMap_refmap(JNIEnv*, jobject, jint count) {
+extern "C" JNIEXPORT jint JNICALL Java_Main_refmap(JNIEnv*, jobject, jint count) {
// Visitor
ScopedObjectAccess soa(Thread::Current());
ReferenceMap2Visitor mapper(soa.Self());
diff --git a/test/004-SignalTest/expected.txt b/test/004-SignalTest/expected.txt
new file mode 100644
index 0000000..fd5ec00
--- /dev/null
+++ b/test/004-SignalTest/expected.txt
@@ -0,0 +1,5 @@
+init signal test
+Caught NullPointerException
+Caught StackOverflowError
+signal caught
+Signal test OK
diff --git a/test/004-SignalTest/info.txt b/test/004-SignalTest/info.txt
new file mode 100644
index 0000000..00b0d9a
--- /dev/null
+++ b/test/004-SignalTest/info.txt
@@ -0,0 +1 @@
+Imported from oat tests.
diff --git a/test/SignalTest/signaltest.cc b/test/004-SignalTest/signaltest.cc
similarity index 88%
rename from test/SignalTest/signaltest.cc
rename to test/004-SignalTest/signaltest.cc
index dfe3197..a2dd664 100644
--- a/test/SignalTest/signaltest.cc
+++ b/test/004-SignalTest/signaltest.cc
@@ -41,7 +41,7 @@
static struct sigaction oldaction;
-extern "C" JNIEXPORT void JNICALL Java_SignalTest_initSignalTest(JNIEnv*, jclass) {
+extern "C" JNIEXPORT void JNICALL Java_Main_initSignalTest(JNIEnv*, jclass) {
struct sigaction action;
action.sa_sigaction = signalhandler;
sigemptyset(&action.sa_mask);
@@ -53,7 +53,7 @@
sigaction(SIGSEGV, &action, &oldaction);
}
-extern "C" JNIEXPORT void JNICALL Java_SignalTest_terminateSignalTest(JNIEnv*, jclass) {
+extern "C" JNIEXPORT void JNICALL Java_Main_terminateSignalTest(JNIEnv*, jclass) {
sigaction(SIGSEGV, &oldaction, nullptr);
}
@@ -61,7 +61,7 @@
// to nullptr.
char *p = nullptr;
-extern "C" JNIEXPORT jint JNICALL Java_SignalTest_testSignal(JNIEnv*, jclass) {
+extern "C" JNIEXPORT jint JNICALL Java_Main_testSignal(JNIEnv*, jclass) {
#ifdef __arm__
// On ARM we cause a real SEGV.
*p = 'a';
diff --git a/test/SignalTest/SignalTest.java b/test/004-SignalTest/src/Main.java
similarity index 98%
rename from test/SignalTest/SignalTest.java
rename to test/004-SignalTest/src/Main.java
index 7f15aea..0391592 100644
--- a/test/SignalTest/SignalTest.java
+++ b/test/004-SignalTest/src/Main.java
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-class SignalTest {
+public class Main {
private static native void initSignalTest();
private static native void terminateSignalTest();
private static native int testSignal();
diff --git a/test/004-StackWalk/expected.txt b/test/004-StackWalk/expected.txt
new file mode 100644
index 0000000..bde0024
--- /dev/null
+++ b/test/004-StackWalk/expected.txt
@@ -0,0 +1,4 @@
+1st call
+172001234567891011121314151617181920652310201919
+2nd call
+172001234567891011121314151617181920652310201919
diff --git a/test/004-StackWalk/info.txt b/test/004-StackWalk/info.txt
new file mode 100644
index 0000000..00b0d9a
--- /dev/null
+++ b/test/004-StackWalk/info.txt
@@ -0,0 +1 @@
+Imported from oat tests.
diff --git a/test/StackWalk/StackWalk.java b/test/004-StackWalk/src/Main.java
similarity index 92%
rename from test/StackWalk/StackWalk.java
rename to test/004-StackWalk/src/Main.java
index f7c78ff..1e2a91b 100644
--- a/test/StackWalk/StackWalk.java
+++ b/test/004-StackWalk/src/Main.java
@@ -1,5 +1,5 @@
-public class StackWalk {
- public StackWalk() {
+public class Main {
+ public Main() {
}
int f() {
@@ -76,18 +76,18 @@
s4 = s18 = s19;
s += s4;
s += s18;
- refmap(0);
+ stackmap(0);
return s;
}
- native int refmap(int x);
+ native int stackmap(int x);
static {
System.loadLibrary("arttest");
}
public static void main(String[] args) {
- StackWalk st = new StackWalk();
+ Main st = new Main();
st.f();
}
}
diff --git a/test/StackWalk/stack_walk_jni.cc b/test/004-StackWalk/stack_walk_jni.cc
similarity index 93%
rename from test/StackWalk/stack_walk_jni.cc
rename to test/004-StackWalk/stack_walk_jni.cc
index e404f6a..30a0d59 100644
--- a/test/StackWalk/stack_walk_jni.cc
+++ b/test/004-StackWalk/stack_walk_jni.cc
@@ -95,13 +95,12 @@
CHECK_REGS(2, 4, 5, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 21, 25);
}
}
- LOG(INFO) << reinterpret_cast<const void*>(reg_bitmap);
return true;
}
};
-extern "C" JNIEXPORT jint JNICALL Java_StackWalk_refmap(JNIEnv*, jobject, jint count) {
+extern "C" JNIEXPORT jint JNICALL Java_Main_stackmap(JNIEnv*, jobject, jint count) {
ScopedObjectAccess soa(Thread::Current());
CHECK_EQ(count, 0);
gJava_StackWalk_refmap_calls++;
@@ -113,7 +112,7 @@
return count + 1;
}
-extern "C" JNIEXPORT jint JNICALL Java_StackWalk2_refmap2(JNIEnv*, jobject, jint count) {
+extern "C" JNIEXPORT jint JNICALL Java_Main_refmap2(JNIEnv*, jobject, jint count) {
ScopedObjectAccess soa(Thread::Current());
gJava_StackWalk_refmap_calls++;
diff --git a/test/004-ThreadStress/check b/test/004-ThreadStress/check
new file mode 100755
index 0000000..ffbb8cf
--- /dev/null
+++ b/test/004-ThreadStress/check
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Only compare the last line.
+tail -n 1 "$2" | diff --strip-trailing-cr -q "$1" - >/dev/null
\ No newline at end of file
diff --git a/test/004-ThreadStress/expected.txt b/test/004-ThreadStress/expected.txt
new file mode 100644
index 0000000..a26fb4f
--- /dev/null
+++ b/test/004-ThreadStress/expected.txt
@@ -0,0 +1 @@
+Finishing worker
diff --git a/test/004-ThreadStress/info.txt b/test/004-ThreadStress/info.txt
new file mode 100644
index 0000000..00b0d9a
--- /dev/null
+++ b/test/004-ThreadStress/info.txt
@@ -0,0 +1 @@
+Imported from oat tests.
diff --git a/test/ThreadStress/ThreadStress.java b/test/004-ThreadStress/src/Main.java
similarity index 89%
rename from test/ThreadStress/ThreadStress.java
rename to test/004-ThreadStress/src/Main.java
index 5dccc68..0c1c97d 100644
--- a/test/ThreadStress/ThreadStress.java
+++ b/test/004-ThreadStress/src/Main.java
@@ -14,9 +14,7 @@
* limitations under the License.
*/
-import android.system.ErrnoException;
-import android.system.Os;
-import android.system.OsConstants;
+import java.lang.reflect.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
@@ -26,7 +24,7 @@
// Run on host with:
// javac ThreadTest.java && java ThreadStress && rm *.class
-class ThreadStress implements Runnable {
+public class Main implements Runnable {
public static final boolean DEBUG = false;
@@ -80,7 +78,7 @@
// Fill in the Operation[] array for each thread by laying
// down references to operation according to their desired
// frequency.
- final ThreadStress[] threadStresses = new ThreadStress[numberOfThreads];
+ final Main[] threadStresses = new Main[numberOfThreads];
for (int t = 0; t < threadStresses.length; t++) {
Operation[] operations = new Operation[operationsPerThread];
int o = 0;
@@ -98,7 +96,7 @@
}
// Randomize the oepration order
Collections.shuffle(Arrays.asList(operations));
- threadStresses[t] = new ThreadStress(lock, t, operations);
+ threadStresses[t] = new Main(lock, t, operations);
}
// Enable to dump operation counds per thread to make sure its
@@ -129,9 +127,9 @@
// operationsPerThread.
Thread[] runners = new Thread[numberOfThreads];
for (int r = 0; r < runners.length; r++) {
- final ThreadStress ts = threadStresses[r];
+ final Main ts = threadStresses[r];
runners[r] = new Thread("Runner thread " + r) {
- final ThreadStress threadStress = ts;
+ final Main threadStress = ts;
public void run() {
int id = threadStress.id;
System.out.println("Starting worker for " + id);
@@ -146,7 +144,7 @@
+ (operationsPerThread - threadStress.nextOperation)
+ " operations remaining.");
}
- System.out.println("Finishing worker for " + id);
+ System.out.println("Finishing worker");
}
};
}
@@ -179,7 +177,7 @@
private int nextOperation;
- private ThreadStress(Object lock, int id, Operation[] operations) {
+ private Main(Object lock, int id, Operation[] operations) {
this.lock = lock;
this.id = id;
this.operations = operations;
@@ -204,8 +202,8 @@
}
case SIGQUIT: {
try {
- Os.kill(Os.getpid(), OsConstants.SIGQUIT);
- } catch (ErrnoException ex) {
+ SIGQUIT();
+ } catch (Exception ex) {
}
}
case SLEEP: {
@@ -267,4 +265,17 @@
}
}
}
+
+ private static void SIGQUIT() throws Exception {
+ Class<?> osClass = Class.forName("android.system.Os");
+ Method getpid = osClass.getDeclaredMethod("getpid");
+ int pid = (Integer)getpid.invoke(null);
+
+ Class<?> osConstants = Class.forName("android.system.OsConstants");
+ Field sigquitField = osConstants.getDeclaredField("SIGQUIT");
+ int sigquit = (Integer)sigquitField.get(null);
+
+ Method kill = osClass.getDeclaredMethod("kill", int.class, int.class);
+ kill.invoke(null, pid, sigquit);
+ }
}
diff --git a/test/004-UnsafeTest/expected.txt b/test/004-UnsafeTest/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/004-UnsafeTest/expected.txt
diff --git a/test/004-UnsafeTest/info.txt b/test/004-UnsafeTest/info.txt
new file mode 100644
index 0000000..00b0d9a
--- /dev/null
+++ b/test/004-UnsafeTest/info.txt
@@ -0,0 +1 @@
+Imported from oat tests.
diff --git a/test/UnsafeTest/UnsafeTest.java b/test/004-UnsafeTest/src/Main.java
similarity index 95%
rename from test/UnsafeTest/UnsafeTest.java
rename to test/004-UnsafeTest/src/Main.java
index 9e2ff87..8c8ad16 100644
--- a/test/UnsafeTest/UnsafeTest.java
+++ b/test/004-UnsafeTest/src/Main.java
@@ -17,21 +17,21 @@
import java.lang.reflect.Field;
import sun.misc.Unsafe;
-public class UnsafeTest {
+public class Main {
static {
System.loadLibrary("arttest");
}
private static void check(int actual, int expected, String msg) {
if (actual != expected) {
- System.logE(msg + " : " + actual + " != " + expected);
+ System.out.println(msg + " : " + actual + " != " + expected);
System.exit(-1);
}
}
private static void check(long actual, long expected, String msg) {
if (actual != expected) {
- System.logE(msg + " : " + actual + " != " + expected);
+ System.out.println(msg + " : " + actual + " != " + expected);
System.exit(-1);
}
}
diff --git a/test/UnsafeTest/unsafe_test.cc b/test/004-UnsafeTest/unsafe_test.cc
similarity index 85%
rename from test/UnsafeTest/unsafe_test.cc
rename to test/004-UnsafeTest/unsafe_test.cc
index e36ee14..ca0e39e 100644
--- a/test/UnsafeTest/unsafe_test.cc
+++ b/test/004-UnsafeTest/unsafe_test.cc
@@ -24,14 +24,14 @@
namespace art {
-extern "C" JNIEXPORT jint JNICALL Java_UnsafeTest_vmArrayBaseOffset(JNIEnv* env, jclass, jobject classObj) {
+extern "C" JNIEXPORT jint JNICALL Java_Main_vmArrayBaseOffset(JNIEnv* env, jclass, jobject classObj) {
ScopedObjectAccess soa(env);
mirror::Class* klass = soa.Decode<mirror::Class*>(classObj);
return mirror::Array::DataOffset(
Primitive::ComponentSize(klass->GetComponentType()->GetPrimitiveType())).Int32Value();
}
-extern "C" JNIEXPORT jint JNICALL Java_UnsafeTest_vmArrayIndexScale(JNIEnv* env, jclass, jobject classObj) {
+extern "C" JNIEXPORT jint JNICALL Java_Main_vmArrayIndexScale(JNIEnv* env, jclass, jobject classObj) {
ScopedObjectAccess soa(env);
mirror::Class* klass = soa.Decode<mirror::Class*>(classObj);
return Primitive::ComponentSize(klass->GetComponentType()->GetPrimitiveType());
diff --git a/test/004-annotations/build b/test/005-annotations/build
similarity index 100%
rename from test/004-annotations/build
rename to test/005-annotations/build
diff --git a/test/004-annotations/expected.txt b/test/005-annotations/expected.txt
similarity index 100%
rename from test/004-annotations/expected.txt
rename to test/005-annotations/expected.txt
diff --git a/test/004-annotations/info.txt b/test/005-annotations/info.txt
similarity index 100%
rename from test/004-annotations/info.txt
rename to test/005-annotations/info.txt
diff --git a/test/004-annotations/src/Main.java b/test/005-annotations/src/Main.java
similarity index 100%
rename from test/004-annotations/src/Main.java
rename to test/005-annotations/src/Main.java
diff --git a/test/004-annotations/src/android/test/AnnoSimplePackage1.java b/test/005-annotations/src/android/test/AnnoSimplePackage1.java
similarity index 100%
rename from test/004-annotations/src/android/test/AnnoSimplePackage1.java
rename to test/005-annotations/src/android/test/AnnoSimplePackage1.java
diff --git a/test/004-annotations/src/android/test/anno/AnnoArrayField.java b/test/005-annotations/src/android/test/anno/AnnoArrayField.java
similarity index 100%
rename from test/004-annotations/src/android/test/anno/AnnoArrayField.java
rename to test/005-annotations/src/android/test/anno/AnnoArrayField.java
diff --git a/test/004-annotations/src/android/test/anno/AnnoFancyConstructor.java b/test/005-annotations/src/android/test/anno/AnnoFancyConstructor.java
similarity index 100%
rename from test/004-annotations/src/android/test/anno/AnnoFancyConstructor.java
rename to test/005-annotations/src/android/test/anno/AnnoFancyConstructor.java
diff --git a/test/004-annotations/src/android/test/anno/AnnoFancyField.java b/test/005-annotations/src/android/test/anno/AnnoFancyField.java
similarity index 100%
rename from test/004-annotations/src/android/test/anno/AnnoFancyField.java
rename to test/005-annotations/src/android/test/anno/AnnoFancyField.java
diff --git a/test/004-annotations/src/android/test/anno/AnnoFancyMethod.java b/test/005-annotations/src/android/test/anno/AnnoFancyMethod.java
similarity index 100%
rename from test/004-annotations/src/android/test/anno/AnnoFancyMethod.java
rename to test/005-annotations/src/android/test/anno/AnnoFancyMethod.java
diff --git a/test/004-annotations/src/android/test/anno/AnnoFancyParameter.java b/test/005-annotations/src/android/test/anno/AnnoFancyParameter.java
similarity index 100%
rename from test/004-annotations/src/android/test/anno/AnnoFancyParameter.java
rename to test/005-annotations/src/android/test/anno/AnnoFancyParameter.java
diff --git a/test/004-annotations/src/android/test/anno/AnnoFancyType.java b/test/005-annotations/src/android/test/anno/AnnoFancyType.java
similarity index 100%
rename from test/004-annotations/src/android/test/anno/AnnoFancyType.java
rename to test/005-annotations/src/android/test/anno/AnnoFancyType.java
diff --git a/test/004-annotations/src/android/test/anno/AnnoSimpleConstructor.java b/test/005-annotations/src/android/test/anno/AnnoSimpleConstructor.java
similarity index 100%
rename from test/004-annotations/src/android/test/anno/AnnoSimpleConstructor.java
rename to test/005-annotations/src/android/test/anno/AnnoSimpleConstructor.java
diff --git a/test/004-annotations/src/android/test/anno/AnnoSimpleField.java b/test/005-annotations/src/android/test/anno/AnnoSimpleField.java
similarity index 100%
rename from test/004-annotations/src/android/test/anno/AnnoSimpleField.java
rename to test/005-annotations/src/android/test/anno/AnnoSimpleField.java
diff --git a/test/004-annotations/src/android/test/anno/AnnoSimpleLocalVariable.java b/test/005-annotations/src/android/test/anno/AnnoSimpleLocalVariable.java
similarity index 100%
rename from test/004-annotations/src/android/test/anno/AnnoSimpleLocalVariable.java
rename to test/005-annotations/src/android/test/anno/AnnoSimpleLocalVariable.java
diff --git a/test/004-annotations/src/android/test/anno/AnnoSimpleMethod.java b/test/005-annotations/src/android/test/anno/AnnoSimpleMethod.java
similarity index 100%
rename from test/004-annotations/src/android/test/anno/AnnoSimpleMethod.java
rename to test/005-annotations/src/android/test/anno/AnnoSimpleMethod.java
diff --git a/test/004-annotations/src/android/test/anno/AnnoSimplePackage.java b/test/005-annotations/src/android/test/anno/AnnoSimplePackage.java
similarity index 100%
rename from test/004-annotations/src/android/test/anno/AnnoSimplePackage.java
rename to test/005-annotations/src/android/test/anno/AnnoSimplePackage.java
diff --git a/test/004-annotations/src/android/test/anno/AnnoSimpleParameter.java b/test/005-annotations/src/android/test/anno/AnnoSimpleParameter.java
similarity index 100%
rename from test/004-annotations/src/android/test/anno/AnnoSimpleParameter.java
rename to test/005-annotations/src/android/test/anno/AnnoSimpleParameter.java
diff --git a/test/004-annotations/src/android/test/anno/AnnoSimpleType.java b/test/005-annotations/src/android/test/anno/AnnoSimpleType.java
similarity index 100%
rename from test/004-annotations/src/android/test/anno/AnnoSimpleType.java
rename to test/005-annotations/src/android/test/anno/AnnoSimpleType.java
diff --git a/test/004-annotations/src/android/test/anno/AnnoSimpleType2.java b/test/005-annotations/src/android/test/anno/AnnoSimpleType2.java
similarity index 100%
rename from test/004-annotations/src/android/test/anno/AnnoSimpleType2.java
rename to test/005-annotations/src/android/test/anno/AnnoSimpleType2.java
diff --git a/test/004-annotations/src/android/test/anno/AnnoSimpleTypeInvis.java b/test/005-annotations/src/android/test/anno/AnnoSimpleTypeInvis.java
similarity index 100%
rename from test/004-annotations/src/android/test/anno/AnnoSimpleTypeInvis.java
rename to test/005-annotations/src/android/test/anno/AnnoSimpleTypeInvis.java
diff --git a/test/004-annotations/src/android/test/anno/ExportedProperty.java b/test/005-annotations/src/android/test/anno/ExportedProperty.java
similarity index 100%
rename from test/004-annotations/src/android/test/anno/ExportedProperty.java
rename to test/005-annotations/src/android/test/anno/ExportedProperty.java
diff --git a/test/004-annotations/src/android/test/anno/FullyNoted.java b/test/005-annotations/src/android/test/anno/FullyNoted.java
similarity index 100%
rename from test/004-annotations/src/android/test/anno/FullyNoted.java
rename to test/005-annotations/src/android/test/anno/FullyNoted.java
diff --git a/test/004-annotations/src/android/test/anno/INoted.java b/test/005-annotations/src/android/test/anno/INoted.java
similarity index 100%
rename from test/004-annotations/src/android/test/anno/INoted.java
rename to test/005-annotations/src/android/test/anno/INoted.java
diff --git a/test/004-annotations/src/android/test/anno/IntToString.java b/test/005-annotations/src/android/test/anno/IntToString.java
similarity index 100%
rename from test/004-annotations/src/android/test/anno/IntToString.java
rename to test/005-annotations/src/android/test/anno/IntToString.java
diff --git a/test/004-annotations/src/android/test/anno/MissingAnnotation.java b/test/005-annotations/src/android/test/anno/MissingAnnotation.java
similarity index 100%
rename from test/004-annotations/src/android/test/anno/MissingAnnotation.java
rename to test/005-annotations/src/android/test/anno/MissingAnnotation.java
diff --git a/test/004-annotations/src/android/test/anno/SimplyNoted.java b/test/005-annotations/src/android/test/anno/SimplyNoted.java
similarity index 100%
rename from test/004-annotations/src/android/test/anno/SimplyNoted.java
rename to test/005-annotations/src/android/test/anno/SimplyNoted.java
diff --git a/test/004-annotations/src/android/test/anno/SomeClass.java b/test/005-annotations/src/android/test/anno/SomeClass.java
similarity index 100%
rename from test/004-annotations/src/android/test/anno/SomeClass.java
rename to test/005-annotations/src/android/test/anno/SomeClass.java
diff --git a/test/004-annotations/src/android/test/anno/SubNoted.java b/test/005-annotations/src/android/test/anno/SubNoted.java
similarity index 100%
rename from test/004-annotations/src/android/test/anno/SubNoted.java
rename to test/005-annotations/src/android/test/anno/SubNoted.java
diff --git a/test/004-annotations/src/android/test/anno/TestAnnotations.java b/test/005-annotations/src/android/test/anno/TestAnnotations.java
similarity index 100%
rename from test/004-annotations/src/android/test/anno/TestAnnotations.java
rename to test/005-annotations/src/android/test/anno/TestAnnotations.java
diff --git a/test/004-annotations/src/android/test/anno/package-info.java b/test/005-annotations/src/android/test/anno/package-info.java
similarity index 100%
rename from test/004-annotations/src/android/test/anno/package-info.java
rename to test/005-annotations/src/android/test/anno/package-info.java
diff --git a/test/004-annotations/src/android/test/package-info.java b/test/005-annotations/src/android/test/package-info.java
similarity index 100%
rename from test/004-annotations/src/android/test/package-info.java
rename to test/005-annotations/src/android/test/package-info.java
diff --git a/test/005-args/expected.txt b/test/006-args/expected.txt
similarity index 100%
rename from test/005-args/expected.txt
rename to test/006-args/expected.txt
diff --git a/test/005-args/info.txt b/test/006-args/info.txt
similarity index 100%
rename from test/005-args/info.txt
rename to test/006-args/info.txt
diff --git a/test/005-args/src/ArgsTest.java b/test/006-args/src/ArgsTest.java
similarity index 100%
rename from test/005-args/src/ArgsTest.java
rename to test/006-args/src/ArgsTest.java
diff --git a/test/005-args/src/Main.java b/test/006-args/src/Main.java
similarity index 100%
rename from test/005-args/src/Main.java
rename to test/006-args/src/Main.java
diff --git a/test/006-count10/expected.txt b/test/007-count10/expected.txt
similarity index 100%
rename from test/006-count10/expected.txt
rename to test/007-count10/expected.txt
diff --git a/test/006-count10/info.txt b/test/007-count10/info.txt
similarity index 100%
rename from test/006-count10/info.txt
rename to test/007-count10/info.txt
diff --git a/test/006-count10/src/Main.java b/test/007-count10/src/Main.java
similarity index 100%
rename from test/006-count10/src/Main.java
rename to test/007-count10/src/Main.java
diff --git a/test/007-exceptions/expected.txt b/test/008-exceptions/expected.txt
similarity index 100%
rename from test/007-exceptions/expected.txt
rename to test/008-exceptions/expected.txt
diff --git a/test/007-exceptions/info.txt b/test/008-exceptions/info.txt
similarity index 100%
rename from test/007-exceptions/info.txt
rename to test/008-exceptions/info.txt
diff --git a/test/007-exceptions/src/Main.java b/test/008-exceptions/src/Main.java
similarity index 100%
rename from test/007-exceptions/src/Main.java
rename to test/008-exceptions/src/Main.java
diff --git a/test/008-instanceof/expected.txt b/test/008-instanceof/expected.txt
deleted file mode 100644
index 77fd0cb..0000000
--- a/test/008-instanceof/expected.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-iface1.mFloaty = 5.0 wahoo
-aa.mFloaty = 5.0 wahoo
-bb.mWhoami = ImplB!
-aaOkay (false) = false
-bbOkay (true) = true
-Caught a ClassCastException (expected)
diff --git a/test/008-instanceof/src/Iface1.java b/test/008-instanceof/src/Iface1.java
deleted file mode 100644
index d7f5376..0000000
--- a/test/008-instanceof/src/Iface1.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (C) 2005 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Test stuff.
- */
-public interface Iface1 {
-
- public int iFunc1(int ii);
-
- public float mFloaty = 5.0f;
-
- public String mWahoo = new String("wahoo");
-}
diff --git a/test/008-instanceof/src/Iface2.java b/test/008-instanceof/src/Iface2.java
deleted file mode 100644
index 2b33c39..0000000
--- a/test/008-instanceof/src/Iface2.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (C) 2006 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Another interface.
- */
-public interface Iface2 {
-
- public int iFunc2(int ii);
-}
diff --git a/test/008-instanceof/src/Iface2Sub1.java b/test/008-instanceof/src/Iface2Sub1.java
deleted file mode 100644
index bcff8ab..0000000
--- a/test/008-instanceof/src/Iface2Sub1.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (C) 2006 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Another interface.
- */
-public interface Iface2Sub1 extends Iface2, Cloneable {
-
- //public int iFunc2(int ii);
-}
diff --git a/test/008-instanceof/src/ImplA.java b/test/008-instanceof/src/ImplA.java
deleted file mode 100644
index 27364f2..0000000
--- a/test/008-instanceof/src/ImplA.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (C) 2006 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Blah.
- */
-public class ImplA implements Iface1, Iface2 {
-
- public int iFunc1(int ii) {
- return ii+1;
- }
- public int iFunc2(int ii) {
- return ii+2;
- }
-}
diff --git a/test/008-instanceof/src/ImplB.java b/test/008-instanceof/src/ImplB.java
deleted file mode 100644
index 8b05702..0000000
--- a/test/008-instanceof/src/ImplB.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2006 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Blah.
- */
-public class ImplB implements Iface1, Iface2 {
-
- public int iFunc1(int ii) {
- return ii+10;
- }
- public int iFunc2(int ii) {
- return ii+20;
- }
-
- public static String mWhoami = new String("ImplB!");
-}
diff --git a/test/008-instanceof/src/ImplBSub.java b/test/008-instanceof/src/ImplBSub.java
deleted file mode 100644
index a94ae4d..0000000
--- a/test/008-instanceof/src/ImplBSub.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (C) 2006 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Interface test.
- */
-public class ImplBSub extends ImplB implements /*Iface2,*/ Iface2Sub1 {
-
- public int iFunc1(int ii) {
- return ii+100;
- }
- public int iFunc2(int ii) {
- return ii+200;
- }
-}
diff --git a/test/008-instanceof/src/Main.java b/test/008-instanceof/src/Main.java
deleted file mode 100644
index 77f3f51..0000000
--- a/test/008-instanceof/src/Main.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (C) 2007 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Test instanceof
- */
-public class Main {
- public static void main(String args[]) {
- Iface1 face1;
- ImplA aa = new ImplA();
- ImplBSub bb = new ImplBSub();
- boolean aaOkay, bbOkay;
-
- face1 = aa;
- face1 = bb;
-
- System.out.println("iface1.mFloaty = " + face1.mFloaty + " " + face1.mWahoo);
- System.out.println("aa.mFloaty = " + aa.mFloaty + " " + aa.mWahoo);
- System.out.println("bb.mWhoami = " + bb.mWhoami);
-
- aaOkay = face1 instanceof ImplA;
- System.out.print("aaOkay (false) = ");
- System.out.println(aaOkay);
- bbOkay = face1 instanceof ImplB;
- System.out.print("bbOkay (true) = ");
- System.out.println(bbOkay);
-
- bb = (ImplBSub) face1;
- try {
- aa = (ImplA) face1;
- }
- catch (ClassCastException cce) {
- System.out.println("Caught a ClassCastException (expected)");
- }
- }
-}
diff --git a/test/009-instanceof/expected.txt b/test/009-instanceof/expected.txt
new file mode 100644
index 0000000..967c0bf
--- /dev/null
+++ b/test/009-instanceof/expected.txt
@@ -0,0 +1,11 @@
+iface1.mFloaty = 5.0 wahoo
+aa.mFloaty = 5.0 wahoo
+bb.mWhoami = ImplB!
+aaOkay (false) = false
+bbOkay (true) = true
+Caught a ClassCastException (expected)
+instanceof Serializable = true
+instanceof Cloneable = true
+instanceof Runnable = false
+aaOkay (false) = false
+bbOkay (true) = true
diff --git a/test/008-instanceof/info.txt b/test/009-instanceof/info.txt
similarity index 100%
rename from test/008-instanceof/info.txt
rename to test/009-instanceof/info.txt
diff --git a/test/009-instanceof2/src/Iface1.java b/test/009-instanceof/src/Iface1.java
similarity index 100%
rename from test/009-instanceof2/src/Iface1.java
rename to test/009-instanceof/src/Iface1.java
diff --git a/test/009-instanceof2/src/Iface2.java b/test/009-instanceof/src/Iface2.java
similarity index 100%
rename from test/009-instanceof2/src/Iface2.java
rename to test/009-instanceof/src/Iface2.java
diff --git a/test/009-instanceof2/src/Iface2Sub1.java b/test/009-instanceof/src/Iface2Sub1.java
similarity index 100%
rename from test/009-instanceof2/src/Iface2Sub1.java
rename to test/009-instanceof/src/Iface2Sub1.java
diff --git a/test/009-instanceof2/src/ImplA.java b/test/009-instanceof/src/ImplA.java
similarity index 100%
rename from test/009-instanceof2/src/ImplA.java
rename to test/009-instanceof/src/ImplA.java
diff --git a/test/009-instanceof2/src/ImplB.java b/test/009-instanceof/src/ImplB.java
similarity index 100%
rename from test/009-instanceof2/src/ImplB.java
rename to test/009-instanceof/src/ImplB.java
diff --git a/test/009-instanceof2/src/ImplBSub.java b/test/009-instanceof/src/ImplBSub.java
similarity index 100%
rename from test/009-instanceof2/src/ImplBSub.java
rename to test/009-instanceof/src/ImplBSub.java
diff --git a/test/009-instanceof2/src/Main.java b/test/009-instanceof/src/Main.java
similarity index 66%
rename from test/009-instanceof2/src/Main.java
rename to test/009-instanceof/src/Main.java
index 15a1e50..807ae69 100644
--- a/test/009-instanceof2/src/Main.java
+++ b/test/009-instanceof/src/Main.java
@@ -19,10 +19,36 @@
*/
public class Main {
public static void main(String args[]) {
+ Iface1 face1;
+ ImplA aa = new ImplA();
+ ImplBSub bb = new ImplBSub();
+ boolean aaOkay, bbOkay;
+
+ face1 = aa;
+ face1 = bb;
+
+ System.out.println("iface1.mFloaty = " + face1.mFloaty + " " + face1.mWahoo);
+ System.out.println("aa.mFloaty = " + aa.mFloaty + " " + aa.mWahoo);
+ System.out.println("bb.mWhoami = " + bb.mWhoami);
+
+ aaOkay = face1 instanceof ImplA;
+ System.out.print("aaOkay (false) = ");
+ System.out.println(aaOkay);
+ bbOkay = face1 instanceof ImplB;
+ System.out.print("bbOkay (true) = ");
+ System.out.println(bbOkay);
+
+ bb = (ImplBSub) face1;
+ try {
+ aa = (ImplA) face1;
+ }
+ catch (ClassCastException cce) {
+ System.out.println("Caught a ClassCastException (expected)");
+ }
+
Iface1[] faceArray;
ImplA[] aaArray = new ImplA[5];
ImplBSub[] bbArray = new ImplBSub[5];
- boolean aaOkay, bbOkay;
faceArray = aaArray;
faceArray = bbArray;
diff --git a/test/009-instanceof2/expected.txt b/test/009-instanceof2/expected.txt
deleted file mode 100644
index 74ad202..0000000
--- a/test/009-instanceof2/expected.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-instanceof Serializable = true
-instanceof Cloneable = true
-instanceof Runnable = false
-aaOkay (false) = false
-bbOkay (true) = true
diff --git a/test/009-instanceof2/info.txt b/test/009-instanceof2/info.txt
deleted file mode 100644
index 08127da..0000000
--- a/test/009-instanceof2/info.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-This is a miscellaneous test that was imported into the new-at-the-time
-runtime test framework. The test is intended to exercise basic features,
-and as such cannot be build on top of junit, since failure of such basic
-features might disrupt junit.
-
-TODO: Real description goes here.
diff --git a/test/015-switch/expected.txt b/test/015-switch/expected.txt
index ca3b518..91b4714 100644
--- a/test/015-switch/expected.txt
+++ b/test/015-switch/expected.txt
@@ -8,3 +8,9 @@
CORRECT (default only)
CORRECT big sparse / first
CORRECT big sparse / last
+default
+254
+255
+256
+257
+default
diff --git a/test/015-switch/src/Main.java b/test/015-switch/src/Main.java
index 7198e2b..dd97a8c 100644
--- a/test/015-switch/src/Main.java
+++ b/test/015-switch/src/Main.java
@@ -101,5 +101,15 @@
case 100: System.out.print("CORRECT big sparse / last\n"); break;
default: System.out.print("blah!\n"); break;
}
+
+ for (a = 253; a <= 258; a++) {
+ switch (a) {
+ case 254: System.out.println("254"); break;
+ case 255: System.out.println("255"); break;
+ case 256: System.out.println("256"); break;
+ case 257: System.out.println("257"); break;
+ default: System.out.println("default"); break;
+ }
+ }
}
}
diff --git a/test/018-stack-overflow/expected.txt b/test/018-stack-overflow/expected.txt
index 98b45b7..cc10c0c 100644
--- a/test/018-stack-overflow/expected.txt
+++ b/test/018-stack-overflow/expected.txt
@@ -1,3 +1,10 @@
-caught SOE in testSelfRecursion
+libartd run.
+caught SOE3 in testSelfRecursion
+caught SOE10 in testSelfRecursion
+caught SOE in testMutualRecursion
+SOE test done
+libart run.
+caught SOE3 in testSelfRecursion
+caught SOE10 in testSelfRecursion
caught SOE in testMutualRecursion
SOE test done
diff --git a/test/018-stack-overflow/run b/test/018-stack-overflow/run
new file mode 100755
index 0000000..1a71a1a
--- /dev/null
+++ b/test/018-stack-overflow/run
@@ -0,0 +1,23 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Run normal. This will be the debug build.
+echo "libartd run."
+${RUN} "${@}"
+
+# Run non-debug.
+echo "libart run."
+${RUN} "${@/#libartd.so/libart.so}"
diff --git a/test/018-stack-overflow/src/Main.java b/test/018-stack-overflow/src/Main.java
index 41adabc..0961226 100644
--- a/test/018-stack-overflow/src/Main.java
+++ b/test/018-stack-overflow/src/Main.java
@@ -25,16 +25,38 @@
}
private static void testSelfRecursion() {
+// try {
+// stackOverflowTestSub0();
+// }
+// catch (StackOverflowError soe) {
+// System.out.println("caught SOE0 in testSelfRecursion");
+// }
try {
- stackOverflowTestSub(0.0, 0.0, 0.0);
+ stackOverflowTestSub3(0.0, 1.0, 2.0);
}
catch (StackOverflowError soe) {
- System.out.println("caught SOE in testSelfRecursion");
+ System.out.println("caught SOE3 in testSelfRecursion");
+ }
+ try {
+ stackOverflowTestSub10(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0);
+ }
+ catch (StackOverflowError soe) {
+ System.out.println("caught SOE10 in testSelfRecursion");
}
}
- private static void stackOverflowTestSub(double pad1, double pad2, double pad3) {
- stackOverflowTestSub(pad1, pad2, pad3);
+ private static void stackOverflowTestSub0() {
+ stackOverflowTestSub0();
+ }
+
+ private static void stackOverflowTestSub3(double pad1, double pad2, double pad3) {
+ stackOverflowTestSub3(pad1, pad2, pad3);
+ }
+
+ private static void stackOverflowTestSub10(double pad1, double pad2, double pad3, double pad4,
+ double pad5, double pad6, double pad7, double pad8,
+ double pad9, double pad10) {
+ stackOverflowTestSub10(pad1, pad2, pad3, pad4, pad5, pad6, pad7, pad8, pad9, pad10);
}
private static void testMutualRecursion() {
diff --git a/test/046-reflect/expected.txt b/test/046-reflect/expected.txt
index ecb3599..fa053fb 100644
--- a/test/046-reflect/expected.txt
+++ b/test/046-reflect/expected.txt
@@ -123,3 +123,17 @@
fields are .equals
methods are unique
methods are .equals
+type1 is a ParameterizedType
+type2 is a ParameterizedType
+type3 is a ParameterizedType
+type1(java.util.Set<java.lang.String>) equals type2(java.util.Set<java.lang.String>)
+type1(java.util.Set<java.lang.String>) equals type3(java.util.Set<java.lang.String>)
+type1(java.util.Set<java.lang.String>) hashCode equals type2(java.util.Set<java.lang.String>) hashCode
+type1(java.util.Set<java.lang.String>) hashCode equals type3(java.util.Set<java.lang.String>) hashCode
+type1 is a GenericArrayType
+type2 is a GenericArrayType
+type3 is a GenericArrayType
+type1(T[]) equals type2(T[])
+type1(T[]) equals type3(T[])
+type1(T[]) hashCode equals type2(T[]) hashCode
+type1(T[]) hashCode equals type3(T[]) hashCode
diff --git a/test/046-reflect/src/Main.java b/test/046-reflect/src/Main.java
index 3e6d700..11eb773 100644
--- a/test/046-reflect/src/Main.java
+++ b/test/046-reflect/src/Main.java
@@ -18,8 +18,10 @@
import java.io.IOException;
import java.util.Collections;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.List;
import java.util.Map;
+import java.util.Set;
/**
* Reflection test.
@@ -579,6 +581,118 @@
}
}
+ public static void checkParametrizedTypeEqualsAndHashCode() {
+ Method method1;
+ Method method2;
+ Method method3;
+ try {
+ method1 = ParametrizedTypeTest.class.getDeclaredMethod("aMethod", Set.class);
+ method2 = ParametrizedTypeTest.class.getDeclaredMethod("aMethod", Set.class);
+ method3 = ParametrizedTypeTest.class.getDeclaredMethod("aMethodIdentical", Set.class);
+ } catch (NoSuchMethodException nsme) {
+ throw new RuntimeException(nsme);
+ }
+
+ List<Type> types1 = Arrays.asList(method1.getGenericParameterTypes());
+ List<Type> types2 = Arrays.asList(method2.getGenericParameterTypes());
+ List<Type> types3 = Arrays.asList(method3.getGenericParameterTypes());
+
+ Type type1 = types1.get(0);
+ Type type2 = types2.get(0);
+ Type type3 = types3.get(0);
+
+ if (type1 instanceof ParameterizedType) {
+ System.out.println("type1 is a ParameterizedType");
+ }
+ if (type2 instanceof ParameterizedType) {
+ System.out.println("type2 is a ParameterizedType");
+ }
+ if (type3 instanceof ParameterizedType) {
+ System.out.println("type3 is a ParameterizedType");
+ }
+
+ if (type1.equals(type2)) {
+ System.out.println("type1("+type1+") equals type2("+type2+")");
+ } else {
+ System.out.println("type1("+type1+") does not equal type2("+type2+")");
+ }
+
+ if (type1.equals(type3)) {
+ System.out.println("type1("+type1+") equals type3("+type3+")");
+ } else {
+ System.out.println("type1("+type1+") does not equal type3("+type3+")");
+ }
+ if (type1.hashCode() == type2.hashCode()) {
+ System.out.println("type1("+type1+") hashCode equals type2("+type2+") hashCode");
+ } else {
+ System.out.println(
+ "type1("+type1+") hashCode does not equal type2("+type2+") hashCode");
+ }
+
+ if (type1.hashCode() == type3.hashCode()) {
+ System.out.println("type1("+type1+") hashCode equals type3("+type3+") hashCode");
+ } else {
+ System.out.println(
+ "type1("+type1+") hashCode does not equal type3("+type3+") hashCode");
+ }
+ }
+
+ public static void checkGenericArrayTypeEqualsAndHashCode() {
+ Method method1;
+ Method method2;
+ Method method3;
+ try {
+ method1 = GenericArrayTypeTest.class.getDeclaredMethod("aMethod", Object[].class);
+ method2 = GenericArrayTypeTest.class.getDeclaredMethod("aMethod", Object[].class);
+ method3 = GenericArrayTypeTest.class.getDeclaredMethod("aMethodIdentical", Object[].class);
+ } catch (NoSuchMethodException nsme) {
+ throw new RuntimeException(nsme);
+ }
+
+ List<Type> types1 = Arrays.asList(method1.getGenericParameterTypes());
+ List<Type> types2 = Arrays.asList(method2.getGenericParameterTypes());
+ List<Type> types3 = Arrays.asList(method3.getGenericParameterTypes());
+
+ Type type1 = types1.get(0);
+ Type type2 = types2.get(0);
+ Type type3 = types3.get(0);
+
+ if (type1 instanceof GenericArrayType) {
+ System.out.println("type1 is a GenericArrayType");
+ }
+ if (type2 instanceof GenericArrayType) {
+ System.out.println("type2 is a GenericArrayType");
+ }
+ if (type3 instanceof GenericArrayType) {
+ System.out.println("type3 is a GenericArrayType");
+ }
+
+ if (type1.equals(type2)) {
+ System.out.println("type1("+type1+") equals type2("+type2+")");
+ } else {
+ System.out.println("type1("+type1+") does not equal type2("+type2+")");
+ }
+
+ if (type1.equals(type3)) {
+ System.out.println("type1("+type1+") equals type3("+type3+")");
+ } else {
+ System.out.println("type1("+type1+") does not equal type3("+type3+")");
+ }
+ if (type1.hashCode() == type2.hashCode()) {
+ System.out.println("type1("+type1+") hashCode equals type2("+type2+") hashCode");
+ } else {
+ System.out.println(
+ "type1("+type1+") hashCode does not equal type2("+type2+") hashCode");
+ }
+
+ if (type1.hashCode() == type3.hashCode()) {
+ System.out.println("type1("+type1+") hashCode equals type3("+type3+") hashCode");
+ } else {
+ System.out.println(
+ "type1("+type1+") hashCode does not equal type3("+type3+") hashCode");
+ }
+ }
+
public static void main(String[] args) throws Exception {
Main test = new Main();
test.run();
@@ -589,6 +703,8 @@
checkClinitForMethods();
checkGeneric();
checkUnique();
+ checkParametrizedTypeEqualsAndHashCode();
+ checkGenericArrayTypeEqualsAndHashCode();
}
}
@@ -696,3 +812,13 @@
throw new UnsupportedOperationException();
}
}
+
+class ParametrizedTypeTest {
+ public void aMethod(Set<String> names) {}
+ public void aMethodIdentical(Set<String> names) {}
+}
+
+class GenericArrayTypeTest<T> {
+ public void aMethod(T[] names) {}
+ public void aMethodIdentical(T[] names) {}
+}
diff --git a/test/050-sync-test/src/Main.java b/test/050-sync-test/src/Main.java
index ec6d732..5364e2a 100644
--- a/test/050-sync-test/src/Main.java
+++ b/test/050-sync-test/src/Main.java
@@ -50,13 +50,14 @@
one = new CpuThread(1);
two = new CpuThread(2);
- one.start();
-
- try {
- Thread.sleep(100);
- } catch (InterruptedException ie) {
- System.out.println("INTERRUPT!");
- ie.printStackTrace();
+ synchronized (one) {
+ one.start();
+ try {
+ one.wait();
+ } catch (InterruptedException ie) {
+ System.out.println("INTERRUPT!");
+ ie.printStackTrace();
+ }
}
two.start();
@@ -101,6 +102,9 @@
//System.out.println(Thread.currentThread().getName());
synchronized (mSyncable) {
+ synchronized (this) {
+ this.notify();
+ }
for (int i = 0; i < 10; i++) {
output(mNumber);
}
diff --git a/test/080-oom-throw/src/Main.java b/test/080-oom-throw/src/Main.java
index 3ffe2f3..035690f 100644
--- a/test/080-oom-throw/src/Main.java
+++ b/test/080-oom-throw/src/Main.java
@@ -21,7 +21,7 @@
static void blowup(char[][] holder) {
try {
for (int i = 0; i < holder.length; ++i) {
- holder[i] = new char[128 * 1024];
+ holder[i] = new char[1024 * 1024];
}
} catch (OutOfMemoryError oome) {
ArrayMemEater.sawOome = true;
@@ -49,7 +49,7 @@
}
static boolean triggerArrayOOM() {
- ArrayMemEater.blowup(new char[1 * 1024 * 1024][]);
+ ArrayMemEater.blowup(new char[128 * 1024][]);
return ArrayMemEater.sawOome;
}
diff --git a/test/082-inline-execute/src/Main.java b/test/082-inline-execute/src/Main.java
index 1c3c89e..56972ff 100644
--- a/test/082-inline-execute/src/Main.java
+++ b/test/082-inline-execute/src/Main.java
@@ -34,6 +34,11 @@
test_Math_max_F();
test_Math_min_D();
test_Math_max_D();
+ test_Math_ceil();
+ test_Math_floor();
+ test_Math_rint();
+ test_Math_round_D();
+ test_Math_round_F();
test_Short_reverseBytes();
test_Integer_reverseBytes();
test_Long_reverseBytes();
@@ -49,6 +54,11 @@
test_StrictMath_max_F();
test_StrictMath_min_D();
test_StrictMath_max_D();
+ test_StrictMath_ceil();
+ test_StrictMath_floor();
+ test_StrictMath_rint();
+ test_StrictMath_round_D();
+ test_StrictMath_round_F();
test_String_charAt();
test_String_compareTo();
test_String_indexOf();
@@ -66,25 +76,6 @@
test_Memory_pokeLong();
}
- /*
- * Determine if two floating point numbers are approximately equal.
- *
- * (Assumes that floating point is generally working, so we can't use
- * this for the first set of tests.)
- */
- static boolean approxEqual(float a, float b, float maxDelta) {
- if (a > b)
- return (a - b) < maxDelta;
- else
- return (b - a) < maxDelta;
- }
- static boolean approxEqual(double a, double b, double maxDelta) {
- if (a > b)
- return (a - b) < maxDelta;
- else
- return (b - a) < maxDelta;
- }
-
/**
* Will test inlining Thread.currentThread().
*/
@@ -340,39 +331,157 @@
}
public static void test_Math_min_F() {
- Assert.assertTrue(approxEqual(Math.min(0.0f, 0.0f), 0.0f, 0.001f));
- Assert.assertTrue(approxEqual(Math.min(1.0f, 0.0f), 0.0f, 0.001f));
- Assert.assertTrue(approxEqual(Math.min(0.0f, 1.0f), 0.0f, 0.001f));
- Assert.assertTrue(approxEqual(Math.min(0.0f, Float.MAX_VALUE), 0.0f, 0.001f));
- Assert.assertTrue(approxEqual(Math.min(Float.MIN_VALUE, 0.0f), Float.MIN_VALUE, 0.001f));
- Assert.assertTrue(approxEqual(Math.min(Float.MIN_VALUE, Float.MAX_VALUE), Float.MIN_VALUE, 0.001f));
+ Assert.assertTrue(Float.isNaN(Math.min(1.0f, Float.NaN)));
+ Assert.assertTrue(Float.isNaN(Math.min(Float.NaN, 1.0f)));
+ Assert.assertEquals(Math.min(-0.0f, 0.0f), -0.0f);
+ Assert.assertEquals(Math.min(0.0f, -0.0f), -0.0f);
+ Assert.assertEquals(Math.min(-0.0f, -0.0f), -0.0f);
+ Assert.assertEquals(Math.min(0.0f, 0.0f), 0.0f);
+ Assert.assertEquals(Math.min(1.0f, 0.0f), 0.0f);
+ Assert.assertEquals(Math.min(0.0f, 1.0f), 0.0f);
+ Assert.assertEquals(Math.min(0.0f, Float.MAX_VALUE), 0.0f);
+ Assert.assertEquals(Math.min(Float.MIN_VALUE, 0.0f), 0.0f);
+ Assert.assertEquals(Math.min(Float.MIN_VALUE, Float.MAX_VALUE), Float.MIN_VALUE);
}
public static void test_Math_max_F() {
- Assert.assertTrue(approxEqual(Math.max(0.0f, 0.0f), 0.0f, 0.001f));
- Assert.assertTrue(approxEqual(Math.max(1.0f, 0.0f), 1.0f, 0.001f));
- Assert.assertTrue(approxEqual(Math.max(0.0f, 1.0f), 1.0f, 0.001f));
- Assert.assertTrue(approxEqual(Math.max(0.0f, Float.MAX_VALUE), Float.MAX_VALUE, 0.001f));
- Assert.assertTrue(approxEqual(Math.max(Float.MIN_VALUE, 0.0f), 0.0f, 0.001f));
- Assert.assertTrue(approxEqual(Math.max(Float.MIN_VALUE, Float.MAX_VALUE), Float.MAX_VALUE, 0.001f));
+ Assert.assertTrue(Float.isNaN(Math.max(1.0f, Float.NaN)));
+ Assert.assertTrue(Float.isNaN(Math.max(Float.NaN, 1.0f)));
+ Assert.assertEquals(Math.max(-0.0f, 0.0f), 0.0f);
+ Assert.assertEquals(Math.max(0.0f, -0.0f), 0.0f);
+ Assert.assertEquals(Math.max(-0.0f, -0.0f), -0.0f);
+ Assert.assertEquals(Math.max(0.0f, 0.0f), 0.0f);
+ Assert.assertEquals(Math.max(1.0f, 0.0f), 1.0f);
+ Assert.assertEquals(Math.max(0.0f, 1.0f), 1.0f);
+ Assert.assertEquals(Math.max(0.0f, Float.MAX_VALUE), Float.MAX_VALUE);
+ Assert.assertEquals(Math.max(Float.MIN_VALUE, 0.0f), Float.MIN_VALUE);
+ Assert.assertEquals(Math.max(Float.MIN_VALUE, Float.MAX_VALUE), Float.MAX_VALUE);
}
public static void test_Math_min_D() {
- Assert.assertTrue(approxEqual(Math.min(0.0d, 0.0d), 0.0d, 0.001d));
- Assert.assertTrue(approxEqual(Math.min(1.0d, 0.0d), 0.0d, 0.001d));
- Assert.assertTrue(approxEqual(Math.min(0.0d, 1.0d), 0.0d, 0.001d));
- Assert.assertTrue(approxEqual(Math.min(0.0d, Double.MAX_VALUE), 0.0d, 0.001d));
- Assert.assertTrue(approxEqual(Math.min(Double.MIN_VALUE, 0.0d), Double.MIN_VALUE, 0.001d));
- Assert.assertTrue(approxEqual(Math.min(Double.MIN_VALUE, Double.MAX_VALUE), Double.MIN_VALUE, 0.001d));
+ Assert.assertTrue(Double.isNaN(Math.min(1.0d, Double.NaN)));
+ Assert.assertTrue(Double.isNaN(Math.min(Double.NaN, 1.0d)));
+ Assert.assertEquals(Math.min(-0.0d, 0.0d), -0.0d);
+ Assert.assertEquals(Math.min(0.0d, -0.0d), -0.0d);
+ Assert.assertEquals(Math.min(-0.0d, -0.0d), -0.0d);
+ Assert.assertEquals(Math.min(0.0d, 0.0d), 0.0d);
+ Assert.assertEquals(Math.min(1.0d, 0.0d), 0.0d);
+ Assert.assertEquals(Math.min(0.0d, 1.0d), 0.0d);
+ Assert.assertEquals(Math.min(0.0d, Double.MAX_VALUE), 0.0d);
+ Assert.assertEquals(Math.min(Double.MIN_VALUE, 0.0d), 0.0d);
+ Assert.assertEquals(Math.min(Double.MIN_VALUE, Double.MAX_VALUE), Double.MIN_VALUE);
}
public static void test_Math_max_D() {
- Assert.assertTrue(approxEqual(Math.max(0.0d, 0.0d), 0.0d, 0.001d));
- Assert.assertTrue(approxEqual(Math.max(1.0d, 0.0d), 1.0d, 0.001d));
- Assert.assertTrue(approxEqual(Math.max(0.0d, 1.0d), 1.0d, 0.001d));
- Assert.assertTrue(approxEqual(Math.max(0.0d, Double.MAX_VALUE), Double.MAX_VALUE, 0.001d));
- Assert.assertTrue(approxEqual(Math.max(Double.MIN_VALUE, 0.0d), 0.0d, 0.001d));
- Assert.assertTrue(approxEqual(Math.max(Double.MIN_VALUE, Double.MAX_VALUE), Double.MAX_VALUE, 0.001d));
+ Assert.assertTrue(Double.isNaN(Math.max(1.0d, Double.NaN)));
+ Assert.assertTrue(Double.isNaN(Math.max(Double.NaN, 1.0d)));
+ Assert.assertEquals(Math.max(-0.0d, 0.0d), 0.0d);
+ Assert.assertEquals(Math.max(0.0d, -0.0d), 0.0d);
+ Assert.assertEquals(Math.max(-0.0d, -0.0d), -0.0d);
+ Assert.assertEquals(Math.max(0.0d, 0.0d), 0.0d);
+ Assert.assertEquals(Math.max(1.0d, 0.0d), 1.0d);
+ Assert.assertEquals(Math.max(0.0d, 1.0d), 1.0d);
+ Assert.assertEquals(Math.max(0.0d, Double.MAX_VALUE), Double.MAX_VALUE);
+ Assert.assertEquals(Math.max(Double.MIN_VALUE, 0.0d), Double.MIN_VALUE);
+ Assert.assertEquals(Math.max(Double.MIN_VALUE, Double.MAX_VALUE), Double.MAX_VALUE);
+ }
+
+ public static void test_Math_ceil() {
+ Assert.assertEquals(Math.ceil(+0.0), +0.0d, 0.0);
+ Assert.assertEquals(Math.ceil(-0.0), -0.0d, 0.0);
+ Assert.assertEquals(Math.ceil(-0.9), -0.0d, 0.0);
+ Assert.assertEquals(Math.ceil(-0.5), -0.0d, 0.0);
+ Assert.assertEquals(Math.ceil(0.0), -0.0d, 0.0);
+ Assert.assertEquals(Math.ceil(+2.0), +2.0d, 0.0);
+ Assert.assertEquals(Math.ceil(+2.1), +3.0d, 0.0);
+ Assert.assertEquals(Math.ceil(+2.5), +3.0d, 0.0);
+ Assert.assertEquals(Math.ceil(+2.9), +3.0d, 0.0);
+ Assert.assertEquals(Math.ceil(+3.0), +3.0d, 0.0);
+ Assert.assertEquals(Math.ceil(-2.0), -2.0d, 0.0);
+ Assert.assertEquals(Math.ceil(-2.1), -2.0d, 0.0);
+ Assert.assertEquals(Math.ceil(-2.5), -2.0d, 0.0);
+ Assert.assertEquals(Math.ceil(-2.9), -2.0d, 0.0);
+ Assert.assertEquals(Math.ceil(-3.0), -3.0d, 0.0);
+ Assert.assertEquals(Math.ceil(Double.NaN), Double.NaN, 0.0);
+ Assert.assertEquals(Math.ceil(Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY, 0.0);
+ Assert.assertEquals(Math.ceil(Double.NEGATIVE_INFINITY), Double.NEGATIVE_INFINITY, 0.0);
+ }
+
+ public static void test_Math_floor() {
+ Assert.assertEquals(Math.floor(+0.0), +0.0d, 0.0);
+ Assert.assertEquals(Math.floor(-0.0), -0.0d, 0.0);
+ Assert.assertEquals(Math.floor(+2.0), +2.0d, 0.0);
+ Assert.assertEquals(Math.floor(+2.1), +2.0d, 0.0);
+ Assert.assertEquals(Math.floor(+2.5), +2.0d, 0.0);
+ Assert.assertEquals(Math.floor(+2.9), +2.0d, 0.0);
+ Assert.assertEquals(Math.floor(+3.0), +3.0d, 0.0);
+ Assert.assertEquals(Math.floor(-2.0), -2.0d, 0.0);
+ Assert.assertEquals(Math.floor(-2.1), -3.0d, 0.0);
+ Assert.assertEquals(Math.floor(-2.5), -3.0d, 0.0);
+ Assert.assertEquals(Math.floor(-2.9), -3.0d, 0.0);
+ Assert.assertEquals(Math.floor(-3.0), -3.0d, 0.0);
+ Assert.assertEquals(Math.floor(Double.NaN), Double.NaN, 0.0);
+ Assert.assertEquals(Math.floor(Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY, 0.0);
+ Assert.assertEquals(Math.floor(Double.NEGATIVE_INFINITY), Double.NEGATIVE_INFINITY, 0.0);
+ }
+
+ public static void test_Math_rint() {
+ Assert.assertEquals(Math.rint(+0.0), +0.0d, 0.0);
+ Assert.assertEquals(Math.rint(-0.0), -0.0d, 0.0);
+ Assert.assertEquals(Math.rint(+2.0), +2.0d, 0.0);
+ Assert.assertEquals(Math.rint(+2.1), +2.0d, 0.0);
+ Assert.assertEquals(Math.rint(+2.5), +2.0d, 0.0);
+ Assert.assertEquals(Math.rint(+2.9), +3.0d, 0.0);
+ Assert.assertEquals(Math.rint(+3.0), +3.0d, 0.0);
+ Assert.assertEquals(Math.rint(-2.0), -2.0d, 0.0);
+ Assert.assertEquals(Math.rint(-2.1), -2.0d, 0.0);
+ Assert.assertEquals(Math.rint(-2.5), -2.0d, 0.0);
+ Assert.assertEquals(Math.rint(-2.9), -3.0d, 0.0);
+ Assert.assertEquals(Math.rint(-3.0), -3.0d, 0.0);
+ Assert.assertEquals(Math.rint(Double.NaN), Double.NaN, 0.0);
+ Assert.assertEquals(Math.rint(Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY, 0.0);
+ Assert.assertEquals(Math.rint(Double.NEGATIVE_INFINITY), Double.NEGATIVE_INFINITY, 0.0);
+ }
+
+ public static void test_Math_round_D() {
+ Assert.assertEquals(Math.round(+0.0d), (long)+0.0);
+ Assert.assertEquals(Math.round(-0.0d), (long)+0.0);
+ Assert.assertEquals(Math.round(2.0d), 2l);
+ Assert.assertEquals(Math.round(2.1d), 2l);
+ Assert.assertEquals(Math.round(2.5d), 3l);
+ Assert.assertEquals(Math.round(2.9d), 3l);
+ Assert.assertEquals(Math.round(3.0d), 3l);
+ Assert.assertEquals(Math.round(-2.0d), -2l);
+ Assert.assertEquals(Math.round(-2.1d), -2l);
+ Assert.assertEquals(Math.round(-2.5d), -2l);
+ Assert.assertEquals(Math.round(-2.9d), -3l);
+ Assert.assertEquals(Math.round(-3.0d), -3l);
+ Assert.assertEquals(Math.round(0.49999999999999994d), 1l);
+ Assert.assertEquals(Math.round(Double.NaN), (long)+0.0d);
+ Assert.assertEquals(Math.round(Long.MAX_VALUE + 1.0d), Long.MAX_VALUE);
+ Assert.assertEquals(Math.round(Long.MIN_VALUE - 1.0d), Long.MIN_VALUE);
+ Assert.assertEquals(Math.round(Double.POSITIVE_INFINITY), Long.MAX_VALUE);
+ Assert.assertEquals(Math.round(Double.NEGATIVE_INFINITY), Long.MIN_VALUE);
+ }
+
+ public static void test_Math_round_F() {
+ Assert.assertEquals(Math.round(+0.0f), (int)+0.0);
+ Assert.assertEquals(Math.round(-0.0f), (int)+0.0);
+ Assert.assertEquals(Math.round(2.0f), 2);
+ Assert.assertEquals(Math.round(2.1f), 2);
+ Assert.assertEquals(Math.round(2.5f), 3);
+ Assert.assertEquals(Math.round(2.9f), 3);
+ Assert.assertEquals(Math.round(3.0f), 3);
+ Assert.assertEquals(Math.round(-2.0f), -2);
+ Assert.assertEquals(Math.round(-2.1f), -2);
+ Assert.assertEquals(Math.round(-2.5f), -2);
+ Assert.assertEquals(Math.round(-2.9f), -3);
+ Assert.assertEquals(Math.round(-3.0f), -3);
+ Assert.assertEquals(Math.round(Float.NaN), (int)+0.0f);
+ Assert.assertEquals(Math.round(Integer.MAX_VALUE + 1.0f), Integer.MAX_VALUE);
+ Assert.assertEquals(Math.round(Integer.MIN_VALUE - 1.0f), Integer.MIN_VALUE);
+ Assert.assertEquals(Math.round(Float.POSITIVE_INFINITY), Integer.MAX_VALUE);
+ Assert.assertEquals(Math.round(Float.NEGATIVE_INFINITY), Integer.MIN_VALUE);
}
public static void test_StrictMath_abs_I() {
@@ -431,39 +540,157 @@
}
public static void test_StrictMath_min_F() {
- Assert.assertTrue(approxEqual(StrictMath.min(0.0f, 0.0f), 0.0f, 0.001f));
- Assert.assertTrue(approxEqual(StrictMath.min(1.0f, 0.0f), 0.0f, 0.001f));
- Assert.assertTrue(approxEqual(StrictMath.min(0.0f, 1.0f), 0.0f, 0.001f));
- Assert.assertTrue(approxEqual(StrictMath.min(0.0f, Float.MAX_VALUE), 0.0f, 0.001f));
- Assert.assertTrue(approxEqual(StrictMath.min(Float.MIN_VALUE, 0.0f), Float.MIN_VALUE, 0.001f));
- Assert.assertTrue(approxEqual(StrictMath.min(Float.MIN_VALUE, Float.MAX_VALUE), Float.MIN_VALUE, 0.001f));
+ Assert.assertTrue(Float.isNaN(StrictMath.min(1.0f, Float.NaN)));
+ Assert.assertTrue(Float.isNaN(StrictMath.min(Float.NaN, 1.0f)));
+ Assert.assertEquals(StrictMath.min(-0.0f, 0.0f), -0.0f);
+ Assert.assertEquals(StrictMath.min(0.0f, -0.0f), -0.0f);
+ Assert.assertEquals(StrictMath.min(-0.0f, -0.0f), -0.0f);
+ Assert.assertEquals(StrictMath.min(0.0f, 0.0f), 0.0f);
+ Assert.assertEquals(StrictMath.min(1.0f, 0.0f), 0.0f);
+ Assert.assertEquals(StrictMath.min(0.0f, 1.0f), 0.0f);
+ Assert.assertEquals(StrictMath.min(0.0f, Float.MAX_VALUE), 0.0f);
+ Assert.assertEquals(StrictMath.min(Float.MIN_VALUE, 0.0f), 0.0f);
+ Assert.assertEquals(StrictMath.min(Float.MIN_VALUE, Float.MAX_VALUE), Float.MIN_VALUE);
}
public static void test_StrictMath_max_F() {
- Assert.assertTrue(approxEqual(StrictMath.max(0.0f, 0.0f), 0.0f, 0.001f));
- Assert.assertTrue(approxEqual(StrictMath.max(1.0f, 0.0f), 1.0f, 0.001f));
- Assert.assertTrue(approxEqual(StrictMath.max(0.0f, 1.0f), 1.0f, 0.001f));
- Assert.assertTrue(approxEqual(StrictMath.max(0.0f, Float.MAX_VALUE), Float.MAX_VALUE, 0.001f));
- Assert.assertTrue(approxEqual(StrictMath.max(Float.MIN_VALUE, 0.0f), 0.0f, 0.001f));
- Assert.assertTrue(approxEqual(StrictMath.max(Float.MIN_VALUE, Float.MAX_VALUE), Float.MAX_VALUE, 0.001f));
+ Assert.assertTrue(Float.isNaN(StrictMath.max(1.0f, Float.NaN)));
+ Assert.assertTrue(Float.isNaN(StrictMath.max(Float.NaN, 1.0f)));
+ Assert.assertEquals(StrictMath.max(-0.0f, 0.0f), 0.0f);
+ Assert.assertEquals(StrictMath.max(0.0f, -0.0f), 0.0f);
+ Assert.assertEquals(StrictMath.max(-0.0f, -0.0f), -0.0f);
+ Assert.assertEquals(StrictMath.max(0.0f, 0.0f), 0.0f);
+ Assert.assertEquals(StrictMath.max(1.0f, 0.0f), 1.0f);
+ Assert.assertEquals(StrictMath.max(0.0f, 1.0f), 1.0f);
+ Assert.assertEquals(StrictMath.max(0.0f, Float.MAX_VALUE), Float.MAX_VALUE);
+ Assert.assertEquals(StrictMath.max(Float.MIN_VALUE, 0.0f), Float.MIN_VALUE);
+ Assert.assertEquals(StrictMath.max(Float.MIN_VALUE, Float.MAX_VALUE), Float.MAX_VALUE);
}
public static void test_StrictMath_min_D() {
- Assert.assertTrue(approxEqual(StrictMath.min(0.0d, 0.0d), 0.0d, 0.001d));
- Assert.assertTrue(approxEqual(StrictMath.min(1.0d, 0.0d), 0.0d, 0.001d));
- Assert.assertTrue(approxEqual(StrictMath.min(0.0d, 1.0d), 0.0d, 0.001d));
- Assert.assertTrue(approxEqual(StrictMath.min(0.0d, Double.MAX_VALUE), 0.0d, 0.001d));
- Assert.assertTrue(approxEqual(StrictMath.min(Double.MIN_VALUE, 0.0d), Double.MIN_VALUE, 0.001d));
- Assert.assertTrue(approxEqual(StrictMath.min(Double.MIN_VALUE, Double.MAX_VALUE), Double.MIN_VALUE, 0.001d));
+ Assert.assertTrue(Double.isNaN(StrictMath.min(1.0d, Double.NaN)));
+ Assert.assertTrue(Double.isNaN(StrictMath.min(Double.NaN, 1.0d)));
+ Assert.assertEquals(StrictMath.min(-0.0d, 0.0d), -0.0d);
+ Assert.assertEquals(StrictMath.min(0.0d, -0.0d), -0.0d);
+ Assert.assertEquals(StrictMath.min(-0.0d, -0.0d), -0.0d);
+ Assert.assertEquals(StrictMath.min(0.0d, 0.0d), 0.0d);
+ Assert.assertEquals(StrictMath.min(1.0d, 0.0d), 0.0d);
+ Assert.assertEquals(StrictMath.min(0.0d, 1.0d), 0.0d);
+ Assert.assertEquals(StrictMath.min(0.0d, Double.MAX_VALUE), 0.0d);
+ Assert.assertEquals(StrictMath.min(Double.MIN_VALUE, 0.0d), 0.0d);
+ Assert.assertEquals(StrictMath.min(Double.MIN_VALUE, Double.MAX_VALUE), Double.MIN_VALUE);
}
public static void test_StrictMath_max_D() {
- Assert.assertTrue(approxEqual(StrictMath.max(0.0d, 0.0d), 0.0d, 0.001d));
- Assert.assertTrue(approxEqual(StrictMath.max(1.0d, 0.0d), 1.0d, 0.001d));
- Assert.assertTrue(approxEqual(StrictMath.max(0.0d, 1.0d), 1.0d, 0.001d));
- Assert.assertTrue(approxEqual(StrictMath.max(0.0d, Double.MAX_VALUE), Double.MAX_VALUE, 0.001d));
- Assert.assertTrue(approxEqual(StrictMath.max(Double.MIN_VALUE, 0.0d), 0.0d, 0.001d));
- Assert.assertTrue(approxEqual(StrictMath.max(Double.MIN_VALUE, Double.MAX_VALUE), Double.MAX_VALUE, 0.001d));
+ Assert.assertTrue(Double.isNaN(StrictMath.max(1.0d, Double.NaN)));
+ Assert.assertTrue(Double.isNaN(StrictMath.max(Double.NaN, 1.0d)));
+ Assert.assertEquals(StrictMath.max(-0.0d, 0.0d), 0.0d);
+ Assert.assertEquals(StrictMath.max(0.0d, -0.0d), 0.0d);
+ Assert.assertEquals(StrictMath.max(-0.0d, -0.0d), -0.0d);
+ Assert.assertEquals(StrictMath.max(0.0d, 0.0d), 0.0d);
+ Assert.assertEquals(StrictMath.max(1.0d, 0.0d), 1.0d);
+ Assert.assertEquals(StrictMath.max(0.0d, 1.0d), 1.0d);
+ Assert.assertEquals(StrictMath.max(0.0d, Double.MAX_VALUE), Double.MAX_VALUE);
+ Assert.assertEquals(StrictMath.max(Double.MIN_VALUE, 0.0d), Double.MIN_VALUE);
+ Assert.assertEquals(StrictMath.max(Double.MIN_VALUE, Double.MAX_VALUE), Double.MAX_VALUE);
+ }
+
+ public static void test_StrictMath_ceil() {
+ Assert.assertEquals(StrictMath.ceil(+0.0), +0.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(-0.0), -0.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(-0.9), -0.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(-0.5), -0.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(0.0), -0.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(+2.0), +2.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(+2.1), +3.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(+2.5), +3.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(+2.9), +3.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(+3.0), +3.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(-2.0), -2.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(-2.1), -2.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(-2.5), -2.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(-2.9), -2.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(-3.0), -3.0d, 0.0);
+ Assert.assertEquals(StrictMath.ceil(Double.NaN), Double.NaN, 0.0);
+ Assert.assertEquals(StrictMath.ceil(Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY, 0.0);
+ Assert.assertEquals(StrictMath.ceil(Double.NEGATIVE_INFINITY), Double.NEGATIVE_INFINITY, 0.0);
+ }
+
+ public static void test_StrictMath_floor() {
+ Assert.assertEquals(StrictMath.floor(+0.0), +0.0d, 0.0);
+ Assert.assertEquals(StrictMath.floor(-0.0), -0.0d, 0.0);
+ Assert.assertEquals(StrictMath.floor(+2.0), +2.0d, 0.0);
+ Assert.assertEquals(StrictMath.floor(+2.1), +2.0d, 0.0);
+ Assert.assertEquals(StrictMath.floor(+2.5), +2.0d, 0.0);
+ Assert.assertEquals(StrictMath.floor(+2.9), +2.0d, 0.0);
+ Assert.assertEquals(StrictMath.floor(+3.0), +3.0d, 0.0);
+ Assert.assertEquals(StrictMath.floor(-2.0), -2.0d, 0.0);
+ Assert.assertEquals(StrictMath.floor(-2.1), -3.0d, 0.0);
+ Assert.assertEquals(StrictMath.floor(-2.5), -3.0d, 0.0);
+ Assert.assertEquals(StrictMath.floor(-2.9), -3.0d, 0.0);
+ Assert.assertEquals(StrictMath.floor(-3.0), -3.0d, 0.0);
+ Assert.assertEquals(StrictMath.floor(Double.NaN), Double.NaN, 0.0);
+ Assert.assertEquals(StrictMath.floor(Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY, 0.0);
+ Assert.assertEquals(StrictMath.floor(Double.NEGATIVE_INFINITY), Double.NEGATIVE_INFINITY, 0.0);
+ }
+
+ public static void test_StrictMath_rint() {
+ Assert.assertEquals(StrictMath.rint(+0.0), +0.0d, 0.0);
+ Assert.assertEquals(StrictMath.rint(-0.0), -0.0d, 0.0);
+ Assert.assertEquals(StrictMath.rint(+2.0), +2.0d, 0.0);
+ Assert.assertEquals(StrictMath.rint(+2.1), +2.0d, 0.0);
+ Assert.assertEquals(StrictMath.rint(+2.5), +2.0d, 0.0);
+ Assert.assertEquals(StrictMath.rint(+2.9), +3.0d, 0.0);
+ Assert.assertEquals(StrictMath.rint(+3.0), +3.0d, 0.0);
+ Assert.assertEquals(StrictMath.rint(-2.0), -2.0d, 0.0);
+ Assert.assertEquals(StrictMath.rint(-2.1), -2.0d, 0.0);
+ Assert.assertEquals(StrictMath.rint(-2.5), -2.0d, 0.0);
+ Assert.assertEquals(StrictMath.rint(-2.9), -3.0d, 0.0);
+ Assert.assertEquals(StrictMath.rint(-3.0), -3.0d, 0.0);
+ Assert.assertEquals(StrictMath.rint(Double.NaN), Double.NaN, 0.0);
+ Assert.assertEquals(StrictMath.rint(Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY, 0.0);
+ Assert.assertEquals(StrictMath.rint(Double.NEGATIVE_INFINITY), Double.NEGATIVE_INFINITY, 0.0);
+ }
+
+ public static void test_StrictMath_round_D() {
+ Assert.assertEquals(StrictMath.round(+0.0d), (long)+0.0);
+ Assert.assertEquals(StrictMath.round(-0.0d), (long)+0.0);
+ Assert.assertEquals(StrictMath.round(2.0d), 2l);
+ Assert.assertEquals(StrictMath.round(2.1d), 2l);
+ Assert.assertEquals(StrictMath.round(2.5d), 3l);
+ Assert.assertEquals(StrictMath.round(2.9d), 3l);
+ Assert.assertEquals(StrictMath.round(3.0d), 3l);
+ Assert.assertEquals(StrictMath.round(-2.0d), -2l);
+ Assert.assertEquals(StrictMath.round(-2.1d), -2l);
+ Assert.assertEquals(StrictMath.round(-2.5d), -2l);
+ Assert.assertEquals(StrictMath.round(-2.9d), -3l);
+ Assert.assertEquals(StrictMath.round(-3.0d), -3l);
+ Assert.assertEquals(StrictMath.round(0.49999999999999994d), 1l);
+ Assert.assertEquals(StrictMath.round(Double.NaN), (long)+0.0d);
+ Assert.assertEquals(StrictMath.round(Long.MAX_VALUE + 1.0d), Long.MAX_VALUE);
+ Assert.assertEquals(StrictMath.round(Long.MIN_VALUE - 1.0d), Long.MIN_VALUE);
+ Assert.assertEquals(StrictMath.round(Double.POSITIVE_INFINITY), Long.MAX_VALUE);
+ Assert.assertEquals(StrictMath.round(Double.NEGATIVE_INFINITY), Long.MIN_VALUE);
+ }
+
+ public static void test_StrictMath_round_F() {
+ Assert.assertEquals(StrictMath.round(+0.0f), (int)+0.0);
+ Assert.assertEquals(StrictMath.round(-0.0f), (int)+0.0);
+ Assert.assertEquals(StrictMath.round(2.0f), 2);
+ Assert.assertEquals(StrictMath.round(2.1f), 2);
+ Assert.assertEquals(StrictMath.round(2.5f), 3);
+ Assert.assertEquals(StrictMath.round(2.9f), 3);
+ Assert.assertEquals(StrictMath.round(3.0f), 3);
+ Assert.assertEquals(StrictMath.round(-2.0f), -2);
+ Assert.assertEquals(StrictMath.round(-2.1f), -2);
+ Assert.assertEquals(StrictMath.round(-2.5f), -2);
+ Assert.assertEquals(StrictMath.round(-2.9f), -3);
+ Assert.assertEquals(StrictMath.round(-3.0f), -3);
+ Assert.assertEquals(StrictMath.round(Float.NaN), (int)+0.0f);
+ Assert.assertEquals(StrictMath.round(Integer.MAX_VALUE + 1.0f), Integer.MAX_VALUE);
+ Assert.assertEquals(StrictMath.round(Integer.MIN_VALUE - 1.0f), Integer.MIN_VALUE);
+ Assert.assertEquals(StrictMath.round(Float.POSITIVE_INFINITY), Integer.MAX_VALUE);
+ Assert.assertEquals(StrictMath.round(Float.NEGATIVE_INFINITY), Integer.MIN_VALUE);
}
public static void test_Float_floatToRawIntBits() {
diff --git a/test/083-compiler-regressions/expected.txt b/test/083-compiler-regressions/expected.txt
index 10406c7..f8d92cc 100644
--- a/test/083-compiler-regressions/expected.txt
+++ b/test/083-compiler-regressions/expected.txt
@@ -14,6 +14,7 @@
false
b13679511Test finishing
b16177324TestWrapper caught NPE as expected.
+b16230771TestWrapper caught NPE as expected.
largeFrame passes
largeFrameFloat passes
mulBy1Test passes
@@ -36,3 +37,4 @@
atomicLong passes
LiveFlags passes trip 3
LiveFlags passes trip 1
+minDoubleWith3ConstsTest passes
diff --git a/test/083-compiler-regressions/src/Main.java b/test/083-compiler-regressions/src/Main.java
index 18bc674..c089c52 100644
--- a/test/083-compiler-regressions/src/Main.java
+++ b/test/083-compiler-regressions/src/Main.java
@@ -36,6 +36,7 @@
b5884080Test();
b13679511Test();
b16177324TestWrapper();
+ b16230771TestWrapper();
largeFrameTest();
largeFrameTestFloat();
mulBy1Test();
@@ -57,6 +58,21 @@
ManyFloatArgs();
atomicLong();
LiveFlags.test();
+ minDoubleWith3ConstsTest();
+ }
+
+ public static double minDouble(double a, double b, double c) {
+ return Math.min(Math.min(a, b), c);
+ }
+
+ public static void minDoubleWith3ConstsTest() {
+ double result = minDouble(1.2, 2.5, Double.NaN);
+ if (Double.isNaN(result)) {
+ System.out.println("minDoubleWith3ConstsTest passes");
+ } else {
+ System.out.println("minDoubleWith3ConstsTest fails: " + result +
+ " (expecting NaN)");
+ }
}
public static void atomicLong() {
@@ -927,6 +943,28 @@
System.out.println("Unexpectedly retrieved all values: " + v1 + ", " + v2 + ", " + v3);
}
+ static void b16230771TestWrapper() {
+ try {
+ b16230771Test();
+ } catch (NullPointerException expected) {
+ System.out.println("b16230771TestWrapper caught NPE as expected.");
+ }
+ }
+
+ static void b16230771Test() {
+ Integer[] array = { null };
+ for (Integer i : array) {
+ try {
+ int value = i; // Null check on unboxing should fail.
+ System.out.println("Unexpectedly retrieved value " + value);
+ } catch (NullPointerException e) {
+ int value = i; // Null check on unboxing should fail.
+ // The bug was a missing null check, so this would actually cause SIGSEGV.
+ System.out.println("Unexpectedly retrieved value " + value + " in NPE catch handler");
+ }
+ }
+ }
+
static double TooManyArgs(
long l00,
long l01,
diff --git a/test/100-reflect2/expected.txt b/test/100-reflect2/expected.txt
index bed0689..1af4121 100644
--- a/test/100-reflect2/expected.txt
+++ b/test/100-reflect2/expected.txt
@@ -32,7 +32,7 @@
62 (class java.lang.Long)
14 (class java.lang.Short)
[public java.lang.String(), java.lang.String(int,int,char[]), public java.lang.String(java.lang.String), public java.lang.String(java.lang.StringBuffer), public java.lang.String(java.lang.StringBuilder), public java.lang.String(byte[]), public java.lang.String(byte[],int), public java.lang.String(byte[],int,int), public java.lang.String(byte[],int,int,int), public java.lang.String(byte[],int,int,java.lang.String) throws java.io.UnsupportedEncodingException, public java.lang.String(byte[],int,int,java.nio.charset.Charset), public java.lang.String(byte[],java.lang.String) throws java.io.UnsupportedEncodingException, public java.lang.String(byte[],java.nio.charset.Charset), public java.lang.String(char[]), public java.lang.String(char[],int,int), public java.lang.String(int[],int,int)]
-[private final char[] java.lang.String.value, private final int java.lang.String.count, private int java.lang.String.hashCode, private final int java.lang.String.offset, private static final char[] java.lang.String.ASCII, public static final java.util.Comparator java.lang.String.CASE_INSENSITIVE_ORDER, private static final char java.lang.String.REPLACEMENT_CHAR, private static final long java.lang.String.serialVersionUID]
+[private final char[] java.lang.String.value, private final int java.lang.String.count, private int java.lang.String.hashCode, private final int java.lang.String.offset, private static final char[] java.lang.String.ASCII, public static final java.util.Comparator java.lang.String.CASE_INSENSITIVE_ORDER, private static final long java.lang.String.serialVersionUID, private static final char java.lang.String.REPLACEMENT_CHAR]
[void java.lang.String._getChars(int,int,char[],int), public char java.lang.String.charAt(int), public int java.lang.String.codePointAt(int), public int java.lang.String.codePointBefore(int), public int java.lang.String.codePointCount(int,int), public volatile int java.lang.String.compareTo(java.lang.Object), public native int java.lang.String.compareTo(java.lang.String), public int java.lang.String.compareToIgnoreCase(java.lang.String), public java.lang.String java.lang.String.concat(java.lang.String), public boolean java.lang.String.contains(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.StringBuffer), public boolean java.lang.String.endsWith(java.lang.String), public boolean java.lang.String.equals(java.lang.Object), public boolean java.lang.String.equalsIgnoreCase(java.lang.String), public void java.lang.String.getBytes(int,int,byte[],int), public [B java.lang.String.getBytes(), public [B java.lang.String.getBytes(java.lang.String) throws java.io.UnsupportedEncodingException, public [B java.lang.String.getBytes(java.nio.charset.Charset), public void java.lang.String.getChars(int,int,char[],int), public int java.lang.String.hashCode(), public int java.lang.String.indexOf(int), public int java.lang.String.indexOf(int,int), public int java.lang.String.indexOf(java.lang.String), public int java.lang.String.indexOf(java.lang.String,int), public native java.lang.String java.lang.String.intern(), public boolean java.lang.String.isEmpty(), public int java.lang.String.lastIndexOf(int), public int java.lang.String.lastIndexOf(int,int), public int java.lang.String.lastIndexOf(java.lang.String), public int java.lang.String.lastIndexOf(java.lang.String,int), public int java.lang.String.length(), public boolean java.lang.String.matches(java.lang.String), public int java.lang.String.offsetByCodePoints(int,int), public boolean java.lang.String.regionMatches(int,java.lang.String,int,int), public boolean java.lang.String.regionMatches(boolean,int,java.lang.String,int,int), public java.lang.String java.lang.String.replace(char,char), public java.lang.String java.lang.String.replace(java.lang.CharSequence,java.lang.CharSequence), public java.lang.String java.lang.String.replaceAll(java.lang.String,java.lang.String), public java.lang.String java.lang.String.replaceFirst(java.lang.String,java.lang.String), public [Ljava.lang.String; java.lang.String.split(java.lang.String), public [Ljava.lang.String; java.lang.String.split(java.lang.String,int), public boolean java.lang.String.startsWith(java.lang.String), public boolean java.lang.String.startsWith(java.lang.String,int), public java.lang.CharSequence java.lang.String.subSequence(int,int), public java.lang.String java.lang.String.substring(int), public java.lang.String java.lang.String.substring(int,int), public [C java.lang.String.toCharArray(), public java.lang.String java.lang.String.toLowerCase(), public java.lang.String java.lang.String.toLowerCase(java.util.Locale), public java.lang.String java.lang.String.toString(), public java.lang.String java.lang.String.toUpperCase(), public java.lang.String java.lang.String.toUpperCase(java.util.Locale), public java.lang.String java.lang.String.trim(), public static java.lang.String java.lang.String.copyValueOf(char[]), public static java.lang.String java.lang.String.copyValueOf(char[],int,int), private java.lang.StringIndexOutOfBoundsException java.lang.String.failedBoundsCheck(int,int,int), private native int java.lang.String.fastIndexOf(int,int), private char java.lang.String.foldCase(char), public static transient java.lang.String java.lang.String.format(java.lang.String,java.lang.Object[]), public static transient java.lang.String java.lang.String.format(java.util.Locale,java.lang.String,java.lang.Object[]), private java.lang.StringIndexOutOfBoundsException java.lang.String.indexAndLength(int), private static int java.lang.String.indexOf(java.lang.String,java.lang.String,int,int,char), private int java.lang.String.indexOfSupplementary(int,int), private int java.lang.String.lastIndexOfSupplementary(int,int), private java.lang.StringIndexOutOfBoundsException java.lang.String.startEndAndLength(int,int), public static java.lang.String java.lang.String.valueOf(char), public static java.lang.String java.lang.String.valueOf(double), public static java.lang.String java.lang.String.valueOf(float), public static java.lang.String java.lang.String.valueOf(int), public static java.lang.String java.lang.String.valueOf(long), public static java.lang.String java.lang.String.valueOf(java.lang.Object), public static java.lang.String java.lang.String.valueOf(boolean), public static java.lang.String java.lang.String.valueOf(char[]), public static java.lang.String java.lang.String.valueOf(char[],int,int)]
[]
[interface java.io.Serializable, interface java.lang.Comparable, interface java.lang.CharSequence]
diff --git a/test/114-ParallelGC/expected.txt b/test/114-ParallelGC/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/114-ParallelGC/expected.txt
diff --git a/test/114-ParallelGC/info.txt b/test/114-ParallelGC/info.txt
new file mode 100644
index 0000000..246b4e6
--- /dev/null
+++ b/test/114-ParallelGC/info.txt
@@ -0,0 +1 @@
+Imported from oat tests. Allocates and frees objects with multiple threads.
diff --git a/test/114-ParallelGC/src/Main.java b/test/114-ParallelGC/src/Main.java
new file mode 100644
index 0000000..8e2519d
--- /dev/null
+++ b/test/114-ParallelGC/src/Main.java
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.BrokenBarrierException;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+public class Main implements Runnable {
+
+ public final static long TIMEOUT_VALUE = 5; // Timeout in minutes.
+ public final static long MAX_SIZE = 1000; // Maximum size of array-list to allocate.
+
+ public static void main(String[] args) throws Exception {
+ Thread[] threads = new Thread[16];
+
+ // Use a cyclic system of synchronous queues to pass a boolean token around.
+ //
+ // The combinations are:
+ //
+ // Worker receives: true false false true
+ // Worker has OOM: false false true true
+ // |
+ // v
+ // Value to pass: true false false false
+ // Exit out of loop: false true true true
+ // Wait on in queue: true false false true
+ //
+ // Finally, the workers are supposed to wait on the barrier to synchronize the GC run.
+
+ CyclicBarrier barrier = new CyclicBarrier(threads.length);
+ List<SynchronousQueue<Boolean>> queues = new ArrayList<SynchronousQueue<Boolean>>(
+ threads.length);
+ for (int i = 0; i < threads.length; i++) {
+ queues.add(new SynchronousQueue<Boolean>());
+ }
+
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread(new Main(i, queues.get(i), queues.get((i + 1) % threads.length),
+ barrier));
+ }
+ for (Thread thread : threads) {
+ thread.start();
+ }
+
+ // Push off the cycle.
+ checkTimeout(queues.get(0).offer(Boolean.TRUE, TIMEOUT_VALUE, TimeUnit.MINUTES));
+
+ // Wait for the threads to finish.
+ for (Thread thread : threads) {
+ thread.join();
+ }
+
+ // Allocate objects to definitely run GC before quitting.
+ try {
+ for (int i = 0; i < 1000; i++) {
+ new ArrayList<Object>(i);
+ }
+ } catch (OutOfMemoryError oom) {
+ }
+ }
+
+ private static void checkTimeout(Object o) {
+ checkTimeout(o != null);
+ }
+
+ private static void checkTimeout(boolean b) {
+ if (!b) {
+ // Something went wrong.
+ System.out.println("Bad things happened, timeout.");
+ System.exit(1);
+ }
+ }
+
+ private final int id;
+ private final SynchronousQueue<Boolean> waitOn;
+ private final SynchronousQueue<Boolean> pushTo;
+ private final CyclicBarrier finalBarrier;
+
+ private Main(int id, SynchronousQueue<Boolean> waitOn, SynchronousQueue<Boolean> pushTo,
+ CyclicBarrier finalBarrier) {
+ this.id = id;
+ this.waitOn = waitOn;
+ this.pushTo = pushTo;
+ this.finalBarrier = finalBarrier;
+ }
+
+ public void run() {
+ try {
+ work();
+ } catch (Exception exc) {
+ // Any exception is bad.
+ exc.printStackTrace(System.err);
+ System.exit(1);
+ }
+ }
+
+ public void work() throws BrokenBarrierException, InterruptedException, TimeoutException {
+ ArrayList<Object> l = new ArrayList<Object>();
+
+ // Main loop.
+ for (int i = 0; ; i++) {
+ Boolean receivedB = waitOn.poll(TIMEOUT_VALUE, TimeUnit.MINUTES);
+ checkTimeout(receivedB);
+ boolean received = receivedB;
+
+ // This is the first stage, try to allocate up till MAX_SIZE.
+ boolean oom = i >= MAX_SIZE;
+ try {
+ l.add(new ArrayList<Object>(i));
+ } catch (OutOfMemoryError oome) {
+ oom = true;
+ }
+
+ if (!received || oom) {
+ // First stage, always push false.
+ checkTimeout(pushTo.offer(Boolean.FALSE, TIMEOUT_VALUE, TimeUnit.MINUTES));
+
+ // If we received true, wait for the false to come around.
+ if (received) {
+ checkTimeout(waitOn.poll(TIMEOUT_VALUE, TimeUnit.MINUTES));
+ }
+
+ // Break out of the loop.
+ break;
+ } else {
+ // Pass on true.
+ checkTimeout(pushTo.offer(Boolean.TRUE, TIMEOUT_VALUE, TimeUnit.MINUTES));
+ }
+ }
+
+ // We have reached the final point. Wait on the barrier, but at most a minute.
+ finalBarrier.await(TIMEOUT_VALUE, TimeUnit.MINUTES);
+
+ // Done.
+ }
+}
diff --git a/test/115-native-bridge/expected.txt b/test/115-native-bridge/expected.txt
new file mode 100644
index 0000000..5b41606
--- /dev/null
+++ b/test/115-native-bridge/expected.txt
@@ -0,0 +1,55 @@
+Ready for native bridge tests.
+Native bridge initialized.
+Checking for support.
+Getting trampoline for JNI_OnLoad with shorty (null).
+Test ART callbacks: all JNI function number is 9.
+ name:booleanMethod, signature:(ZZZZZZZZZZ)Z, shorty:ZZZZZZZZZZZ.
+ name:byteMethod, signature:(BBBBBBBBBB)B, shorty:BBBBBBBBBBB.
+ name:charMethod, signature:(CCCCCCCCCC)C, shorty:CCCCCCCCCCC.
+ name:shortMethod, signature:(SSSSSSSSSS)S, shorty:SSSSSSSSSSS.
+ name:testCallStaticVoidMethodOnSubClassNative, signature:()V, shorty:V.
+ name:testFindClassOnAttachedNativeThread, signature:()V, shorty:V.
+ name:testFindFieldOnAttachedNativeThreadNative, signature:()V, shorty:V.
+ name:testGetMirandaMethodNative, signature:()Ljava/lang/reflect/Method;, shorty:L.
+ name:testZeroLengthByteBuffers, signature:()V, shorty:V.
+trampoline_JNI_OnLoad called!
+Getting trampoline for Java_Main_testFindClassOnAttachedNativeThread with shorty V.
+trampoline_Java_Main_testFindClassOnAttachedNativeThread called!
+Getting trampoline for Java_Main_testFindFieldOnAttachedNativeThreadNative with shorty V.
+trampoline_Java_Main_testFindFieldOnAttachedNativeThreadNative called!
+Getting trampoline for Java_Main_testCallStaticVoidMethodOnSubClassNative with shorty V.
+trampoline_Java_Main_testCallStaticVoidMethodOnSubClassNative called!
+Getting trampoline for Java_Main_testGetMirandaMethodNative with shorty L.
+trampoline_Java_Main_testGetMirandaMethodNative called!
+Getting trampoline for Java_Main_testZeroLengthByteBuffers with shorty V.
+trampoline_Java_Main_testZeroLengthByteBuffers called!
+Getting trampoline for Java_Main_byteMethod with shorty BBBBBBBBBBB.
+trampoline_Java_Main_byteMethod called!
+trampoline_Java_Main_byteMethod called!
+trampoline_Java_Main_byteMethod called!
+trampoline_Java_Main_byteMethod called!
+trampoline_Java_Main_byteMethod called!
+trampoline_Java_Main_byteMethod called!
+trampoline_Java_Main_byteMethod called!
+Getting trampoline for Java_Main_shortMethod with shorty SSSSSSSSSSS.
+trampoline_Java_Main_shortMethod called!
+trampoline_Java_Main_shortMethod called!
+trampoline_Java_Main_shortMethod called!
+trampoline_Java_Main_shortMethod called!
+trampoline_Java_Main_shortMethod called!
+trampoline_Java_Main_shortMethod called!
+trampoline_Java_Main_shortMethod called!
+trampoline_Java_Main_shortMethod called!
+trampoline_Java_Main_shortMethod called!
+Getting trampoline for Java_Main_booleanMethod with shorty ZZZZZZZZZZZ.
+trampoline_Java_Main_booleanMethod called!
+trampoline_Java_Main_booleanMethod called!
+Getting trampoline for Java_Main_charMethod with shorty CCCCCCCCCCC.
+trampoline_Java_Main_charMethod called!
+trampoline_Java_Main_charMethod called!
+trampoline_Java_Main_charMethod called!
+trampoline_Java_Main_charMethod called!
+trampoline_Java_Main_charMethod called!
+trampoline_Java_Main_charMethod called!
+trampoline_Java_Main_charMethod called!
+trampoline_Java_Main_charMethod called!
diff --git a/test/115-native-bridge/info.txt b/test/115-native-bridge/info.txt
new file mode 100644
index 0000000..ccac7ae
--- /dev/null
+++ b/test/115-native-bridge/info.txt
@@ -0,0 +1 @@
+Test for the native bridge interface.
diff --git a/test/115-native-bridge/nativebridge.cc b/test/115-native-bridge/nativebridge.cc
new file mode 100644
index 0000000..268f0be
--- /dev/null
+++ b/test/115-native-bridge/nativebridge.cc
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// A simple implementation of the native-bridge interface.
+
+#include <algorithm>
+#include <dlfcn.h>
+#include <vector>
+
+#include "jni.h"
+#include "stdio.h"
+#include "string.h"
+#include "unistd.h"
+
+#include "native_bridge.h"
+
+
+// Native bridge interfaces...
+
+struct NativeBridgeArtCallbacks {
+ const char* (*getMethodShorty)(JNIEnv* env, jmethodID mid);
+ uint32_t (*getNativeMethodCount)(JNIEnv* env, jclass clazz);
+ uint32_t (*getNativeMethods)(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
+ uint32_t method_count);
+};
+
+struct NativeBridgeCallbacks {
+ bool (*initialize)(NativeBridgeArtCallbacks* art_cbs);
+ void* (*loadLibrary)(const char* libpath, int flag);
+ void* (*getTrampoline)(void* handle, const char* name, const char* shorty, uint32_t len);
+ bool (*isSupported)(const char* libpath);
+};
+
+struct NativeBridgeMethod {
+ const char* name;
+ const char* signature;
+ bool static_method;
+ void* fnPtr;
+ void* trampoline;
+};
+
+static NativeBridgeMethod* find_native_bridge_method(const char *name);
+static NativeBridgeArtCallbacks* gNativeBridgeArtCallbacks;
+
+static jint trampoline_JNI_OnLoad(JavaVM* vm, void* reserved) {
+ JNIEnv* env = nullptr;
+ typedef jint (*FnPtr_t)(JavaVM*, void*);
+ FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>(find_native_bridge_method("JNI_OnLoad")->fnPtr);
+
+ vm->GetEnv(reinterpret_cast<void **>(&env), JNI_VERSION_1_6);
+ if (env == nullptr) {
+ return 0;
+ }
+
+ jclass klass = env->FindClass("Main");
+ if (klass != nullptr) {
+ int i, count1, count2;
+ count1 = gNativeBridgeArtCallbacks->getNativeMethodCount(env, klass);
+ std::unique_ptr<JNINativeMethod[]> methods(new JNINativeMethod[count1]);
+ if (methods == nullptr) {
+ return 0;
+ }
+ count2 = gNativeBridgeArtCallbacks->getNativeMethods(env, klass, methods.get(), count1);
+ if (count1 == count2) {
+ printf("Test ART callbacks: all JNI function number is %d.\n", count1);
+ }
+
+ for (i = 0; i < count1; i++) {
+ NativeBridgeMethod* nb_method = find_native_bridge_method(methods[i].name);
+ if (nb_method != nullptr) {
+ jmethodID mid = nullptr;
+ if (nb_method->static_method) {
+ mid = env->GetStaticMethodID(klass, methods[i].name, nb_method->signature);
+ } else {
+ mid = env->GetMethodID(klass, methods[i].name, nb_method->signature);
+ }
+ if (mid != nullptr) {
+ const char* shorty = gNativeBridgeArtCallbacks->getMethodShorty(env, mid);
+ if (strcmp(shorty, methods[i].signature) == 0) {
+ printf(" name:%s, signature:%s, shorty:%s.\n",
+ methods[i].name, nb_method->signature, shorty);
+ }
+ }
+ }
+ }
+ methods.release();
+ }
+
+ printf("%s called!\n", __FUNCTION__);
+ return fnPtr(vm, reserved);
+}
+
+static void trampoline_Java_Main_testFindClassOnAttachedNativeThread(JNIEnv* env,
+ jclass klass) {
+ typedef void (*FnPtr_t)(JNIEnv*, jclass);
+ FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
+ (find_native_bridge_method("testFindClassOnAttachedNativeThread")->fnPtr);
+ printf("%s called!\n", __FUNCTION__);
+ return fnPtr(env, klass);
+}
+
+static void trampoline_Java_Main_testFindFieldOnAttachedNativeThreadNative(JNIEnv* env,
+ jclass klass) {
+ typedef void (*FnPtr_t)(JNIEnv*, jclass);
+ FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
+ (find_native_bridge_method("testFindFieldOnAttachedNativeThreadNative")->fnPtr);
+ printf("%s called!\n", __FUNCTION__);
+ return fnPtr(env, klass);
+}
+
+static void trampoline_Java_Main_testCallStaticVoidMethodOnSubClassNative(JNIEnv* env,
+ jclass klass) {
+ typedef void (*FnPtr_t)(JNIEnv*, jclass);
+ FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
+ (find_native_bridge_method("testCallStaticVoidMethodOnSubClassNative")->fnPtr);
+ printf("%s called!\n", __FUNCTION__);
+ return fnPtr(env, klass);
+}
+
+static jobject trampoline_Java_Main_testGetMirandaMethodNative(JNIEnv* env, jclass klass) {
+ typedef jobject (*FnPtr_t)(JNIEnv*, jclass);
+ FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
+ (find_native_bridge_method("testGetMirandaMethodNative")->fnPtr);
+ printf("%s called!\n", __FUNCTION__);
+ return fnPtr(env, klass);
+}
+
+static void trampoline_Java_Main_testZeroLengthByteBuffers(JNIEnv* env, jclass klass) {
+ typedef void (*FnPtr_t)(JNIEnv*, jclass);
+ FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
+ (find_native_bridge_method("testZeroLengthByteBuffers")->fnPtr);
+ printf("%s called!\n", __FUNCTION__);
+ return fnPtr(env, klass);
+}
+
+static jbyte trampoline_Java_Main_byteMethod(JNIEnv* env, jclass klass, jbyte b1, jbyte b2,
+ jbyte b3, jbyte b4, jbyte b5, jbyte b6,
+ jbyte b7, jbyte b8, jbyte b9, jbyte b10) {
+ typedef jbyte (*FnPtr_t)(JNIEnv*, jclass, jbyte, jbyte, jbyte, jbyte, jbyte,
+ jbyte, jbyte, jbyte, jbyte, jbyte);
+ FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>(find_native_bridge_method("byteMethod")->fnPtr);
+ printf("%s called!\n", __FUNCTION__);
+ return fnPtr(env, klass, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10);
+}
+
+static jshort trampoline_Java_Main_shortMethod(JNIEnv* env, jclass klass, jshort s1, jshort s2,
+ jshort s3, jshort s4, jshort s5, jshort s6,
+ jshort s7, jshort s8, jshort s9, jshort s10) {
+ typedef jshort (*FnPtr_t)(JNIEnv*, jclass, jshort, jshort, jshort, jshort, jshort,
+ jshort, jshort, jshort, jshort, jshort);
+ FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>(find_native_bridge_method("shortMethod")->fnPtr);
+ printf("%s called!\n", __FUNCTION__);
+ return fnPtr(env, klass, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10);
+}
+
+static jboolean trampoline_Java_Main_booleanMethod(JNIEnv* env, jclass klass, jboolean b1,
+ jboolean b2, jboolean b3, jboolean b4,
+ jboolean b5, jboolean b6, jboolean b7,
+ jboolean b8, jboolean b9, jboolean b10) {
+ typedef jboolean (*FnPtr_t)(JNIEnv*, jclass, jboolean, jboolean, jboolean, jboolean, jboolean,
+ jboolean, jboolean, jboolean, jboolean, jboolean);
+ FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>(find_native_bridge_method("booleanMethod")->fnPtr);
+ printf("%s called!\n", __FUNCTION__);
+ return fnPtr(env, klass, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10);
+}
+
+static jchar trampoline_Java_Main_charMethod(JNIEnv* env, jclass klass, jchar c1, jchar c2,
+ jchar c3, jchar c4, jchar c5, jchar c6,
+ jchar c7, jchar c8, jchar c9, jchar c10) {
+ typedef jchar (*FnPtr_t)(JNIEnv*, jclass, jchar, jchar, jchar, jchar, jchar,
+ jchar, jchar, jchar, jchar, jchar);
+ FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>(find_native_bridge_method("charMethod")->fnPtr);
+ printf("%s called!\n", __FUNCTION__);
+ return fnPtr(env, klass, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10);
+}
+
+NativeBridgeMethod gNativeBridgeMethods[] = {
+ { "JNI_OnLoad", "", true, nullptr,
+ reinterpret_cast<void*>(trampoline_JNI_OnLoad) },
+ { "booleanMethod", "(ZZZZZZZZZZ)Z", true, nullptr,
+ reinterpret_cast<void*>(trampoline_Java_Main_booleanMethod) },
+ { "byteMethod", "(BBBBBBBBBB)B", true, nullptr,
+ reinterpret_cast<void*>(trampoline_Java_Main_byteMethod) },
+ { "charMethod", "(CCCCCCCCCC)C", true, nullptr,
+ reinterpret_cast<void*>(trampoline_Java_Main_charMethod) },
+ { "shortMethod", "(SSSSSSSSSS)S", true, nullptr,
+ reinterpret_cast<void*>(trampoline_Java_Main_shortMethod) },
+ { "testCallStaticVoidMethodOnSubClassNative", "()V", true, nullptr,
+ reinterpret_cast<void*>(trampoline_Java_Main_testCallStaticVoidMethodOnSubClassNative) },
+ { "testFindClassOnAttachedNativeThread", "()V", true, nullptr,
+ reinterpret_cast<void*>(trampoline_Java_Main_testFindClassOnAttachedNativeThread) },
+ { "testFindFieldOnAttachedNativeThreadNative", "()V", true, nullptr,
+ reinterpret_cast<void*>(trampoline_Java_Main_testFindFieldOnAttachedNativeThreadNative) },
+ { "testGetMirandaMethodNative", "()Ljava/lang/reflect/Method;", true, nullptr,
+ reinterpret_cast<void*>(trampoline_Java_Main_testGetMirandaMethodNative) },
+ { "testZeroLengthByteBuffers", "()V", true, nullptr,
+ reinterpret_cast<void*>(trampoline_Java_Main_testZeroLengthByteBuffers) },
+};
+
+static NativeBridgeMethod* find_native_bridge_method(const char *name) {
+ const char* pname = name;
+ if (strncmp(name, "Java_Main_", 10) == 0) {
+ pname += 10;
+ }
+
+ for (size_t i = 0; i < sizeof(gNativeBridgeMethods) / sizeof(gNativeBridgeMethods[0]); i++) {
+ if (strcmp(pname, gNativeBridgeMethods[i].name) == 0) {
+ return &gNativeBridgeMethods[i];
+ }
+ }
+ return nullptr;
+}
+
+// NativeBridgeCallbacks implementations
+extern "C" bool native_bridge_initialize(NativeBridgeArtCallbacks* art_cbs) {
+ if (art_cbs != nullptr) {
+ gNativeBridgeArtCallbacks = art_cbs;
+ printf("Native bridge initialized.\n");
+ }
+ return true;
+}
+
+extern "C" void* native_bridge_loadLibrary(const char* libpath, int flag) {
+ size_t len = strlen(libpath);
+ char* tmp = new char[len + 10];
+ strncpy(tmp, libpath, len);
+ tmp[len - 3] = '2';
+ tmp[len - 2] = '.';
+ tmp[len - 1] = 's';
+ tmp[len] = 'o';
+ tmp[len + 1] = 0;
+ void* handle = dlopen(tmp, flag);
+ delete[] tmp;
+
+ if (handle == nullptr) {
+ printf("Handle = nullptr!\n");
+ printf("Was looking for %s.\n", libpath);
+ printf("Error = %s.\n", dlerror());
+ char cwd[1024];
+ if (getcwd(cwd, sizeof(cwd)) != nullptr) {
+ printf("Current working dir: %s\n", cwd);
+ }
+ }
+ return handle;
+}
+
+extern "C" void* native_bridge_getTrampoline(void* handle, const char* name, const char* shorty,
+ uint32_t len) {
+ printf("Getting trampoline for %s with shorty %s.\n", name, shorty);
+
+ // The name here is actually the JNI name, so we can directly do the lookup.
+ void* sym = dlsym(handle, name);
+ NativeBridgeMethod* method = find_native_bridge_method(name);
+ if (method == nullptr)
+ return nullptr;
+ method->fnPtr = sym;
+
+ return method->trampoline;
+}
+
+extern "C" bool native_bridge_isSupported(const char* libpath) {
+ printf("Checking for support.\n");
+
+ if (libpath == nullptr) {
+ return false;
+ }
+ // We don't want to hijack javacore. So we should get libarttest...
+ return strcmp(libpath, "libjavacore.so") != 0;
+}
+
+NativeBridgeCallbacks NativeBridgeItf {
+ .initialize = &native_bridge_initialize,
+ .loadLibrary = &native_bridge_loadLibrary,
+ .getTrampoline = &native_bridge_getTrampoline,
+ .isSupported = &native_bridge_isSupported
+};
diff --git a/test/115-native-bridge/run b/test/115-native-bridge/run
new file mode 100644
index 0000000..e475cd6
--- /dev/null
+++ b/test/115-native-bridge/run
@@ -0,0 +1,29 @@
+#!/bin/sh
+#
+# Copyright (C) 2012 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ARGS=${@}
+
+# Use libnativebridgetest as a native bridge, start NativeBridgeMain (Main is JniTest main file).
+LIBPATH=$(echo ${ARGS} | sed -r 's/.*Djava.library.path=([^ ]*) .*/\1/')
+cp ${LIBPATH}/libnativebridgetest.so .
+touch libarttest.so
+cp ${LIBPATH}/libarttest.so libarttest2.so
+
+# pwd likely has /, so it's a pain to put that into a sed rule.
+LEFT=$(echo ${ARGS} | sed -r 's/-Djava.library.path.*//')
+RIGHT=$(echo ${ARGS} | sed -r 's/.*Djava.library.path[^ ]* //')
+MODARGS="${LEFT} -Djava.library.path=`pwd` ${RIGHT}"
+exec ${RUN} --runtime-option -XX:NativeBridge=libnativebridgetest.so ${MODARGS} NativeBridgeMain
diff --git a/test/JniTest/JniTest.java b/test/115-native-bridge/src/NativeBridgeMain.java
similarity index 87%
copy from test/JniTest/JniTest.java
copy to test/115-native-bridge/src/NativeBridgeMain.java
index 33418a9..a531f92 100644
--- a/test/JniTest/JniTest.java
+++ b/test/115-native-bridge/src/NativeBridgeMain.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,12 +16,12 @@
import java.lang.reflect.Method;
-class JniTest {
+// This is named Main as it is a copy of JniTest, so that we can re-use the native implementations
+// from libarttest.
+class Main {
public static void main(String[] args) {
- System.loadLibrary("arttest");
testFindClassOnAttachedNativeThread();
testFindFieldOnAttachedNativeThread();
- testReflectFieldGetFromAttachedNativeThreadNative();
testCallStaticVoidMethodOnSubClass();
testGetMirandaMethod();
testZeroLengthByteBuffers();
@@ -31,15 +31,11 @@
testCharMethod();
}
- private static native void testFindClassOnAttachedNativeThread();
+ public static native void testFindClassOnAttachedNativeThread();
- private static boolean testFindFieldOnAttachedNativeThreadField;
+ public static boolean testFindFieldOnAttachedNativeThreadField;
- private static native void testReflectFieldGetFromAttachedNativeThreadNative();
-
- public static boolean testReflectFieldGetFromAttachedNativeThreadField;
-
- private static void testFindFieldOnAttachedNativeThread() {
+ public static void testFindFieldOnAttachedNativeThread() {
testFindFieldOnAttachedNativeThreadNative();
if (!testFindFieldOnAttachedNativeThreadField) {
throw new AssertionError();
@@ -94,7 +90,7 @@
native static byte byteMethod(byte b1, byte b2, byte b3, byte b4, byte b5, byte b6, byte b7,
byte b8, byte b9, byte b10);
- private static void testByteMethod() {
+ public static void testByteMethod() {
byte returns[] = { 0, 1, 2, 127, -1, -2, -128 };
for (int i = 0; i < returns.length; i++) {
byte result = byteMethod((byte)i, (byte)2, (byte)(-3), (byte)4, (byte)(-5), (byte)6,
@@ -126,7 +122,7 @@
native static boolean booleanMethod(boolean b1, boolean b2, boolean b3, boolean b4, boolean b5, boolean b6, boolean b7,
boolean b8, boolean b9, boolean b10);
- private static void testBooleanMethod() {
+ public static void testBooleanMethod() {
if (booleanMethod(false, true, false, true, false, true, false, true, false, true)) {
throw new AssertionError();
}
@@ -152,3 +148,13 @@
}
}
}
+
+public class NativeBridgeMain {
+ static public void main(String[] args) throws Exception {
+ System.out.println("Ready for native bridge tests.");
+
+ System.loadLibrary("arttest");
+
+ Main.main(null);
+ }
+}
diff --git a/test/401-optimizing-compiler/src/Main.java b/test/401-optimizing-compiler/src/Main.java
index a5192e1..2c6d1c2 100644
--- a/test/401-optimizing-compiler/src/Main.java
+++ b/test/401-optimizing-compiler/src/Main.java
@@ -75,6 +75,37 @@
if (m.$opt$TestReturnNewObject(m) == m) {
throw new Error("Unexpected value returned");
}
+
+ // Loop enough iterations to hope for a crash if no write barrier
+ // is emitted.
+ for (int j = 0; j < 3; j++) {
+ Main m1 = new Main();
+ $opt$SetFieldInOldObject(m1);
+ for (int i = 0; i < 1000; ++i) {
+ Object o = new byte[1024];
+ }
+ }
+
+ // Test that we do NPE checks on invokedirect.
+ Exception exception = null;
+ try {
+ invokePrivate();
+ } catch (NullPointerException e) {
+ exception = e;
+ }
+
+ if (exception == null) {
+ throw new Error("Missing NullPointerException");
+ }
+ }
+
+ public static void invokePrivate() {
+ Main m = null;
+ m.privateMethod();
+ }
+
+ private void privateMethod() {
+ Object o = new Object();
}
static int $opt$TestInvokeIntParameter(int param) {
@@ -169,4 +200,10 @@
public static void throwStaticMethod() {
throw new Error("Error");
}
+
+ public static void $opt$SetFieldInOldObject(Main m) {
+ m.o = new Main();
+ }
+
+ Object o;
}
diff --git a/test/407-arrays/expected.txt b/test/407-arrays/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/407-arrays/expected.txt
diff --git a/test/407-arrays/info.txt b/test/407-arrays/info.txt
new file mode 100644
index 0000000..f30b667
--- /dev/null
+++ b/test/407-arrays/info.txt
@@ -0,0 +1 @@
+Simple tests for array accesses.
diff --git a/test/407-arrays/src/Main.java b/test/407-arrays/src/Main.java
new file mode 100644
index 0000000..5d27e6d
--- /dev/null
+++ b/test/407-arrays/src/Main.java
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Simple test for array accesses.
+
+public class Main extends TestCase {
+ public static void main(String[] args) {
+ $opt$testReads(new boolean[1], new byte[1], new char[1], new short[1],
+ new int[1], new Object[1], new long[1], 0);
+ $opt$testWrites(new boolean[2], new byte[2], new char[2], new short[2],
+ new int[2], new Object[2], new long[2], 1);
+ ensureThrows(new boolean[2], 2);
+ ensureThrows(new boolean[2], 4);
+ ensureThrows(new boolean[2], -1);
+ ensureThrows(new boolean[2], Integer.MIN_VALUE);
+ ensureThrows(new boolean[2], Integer.MAX_VALUE);
+ }
+
+ static void $opt$testReads(boolean[] bools, byte[] bytes, char[] chars, short[] shorts,
+ int[] ints, Object[] objects, long[] longs, int index) {
+ assertEquals(false, bools[0]);
+ assertEquals(false, bools[index]);
+
+ assertEquals(0, bytes[0]);
+ assertEquals(0, bytes[index]);
+
+ assertEquals(0, chars[0]);
+ assertEquals(0, chars[index]);
+
+ assertEquals(0, shorts[0]);
+ assertEquals(0, shorts[index]);
+
+ assertEquals(0, ints[0]);
+ assertEquals(0, ints[index]);
+
+ assertNull(objects[0]);
+ assertNull(objects[index]);
+
+ assertEquals(0, longs[0]);
+ assertEquals(0, longs[index]);
+ }
+
+ static void $opt$testWrites(boolean[] bools, byte[] bytes, char[] chars, short[] shorts,
+ int[] ints, Object[] objects, long[] longs, int index) {
+ bools[0] = true;
+ assertEquals(true, bools[0]);
+ bools[1] = true;
+ assertEquals(true, bools[index]);
+
+ bytes[0] = -4;
+ assertEquals(-4, bytes[0]);
+ bytes[index] = -8;
+ assertEquals(-8, bytes[index]);
+
+ chars[0] = 'c';
+ assertEquals('c', chars[0]);
+ chars[index] = 'd';
+ assertEquals('d', chars[index]);
+
+ shorts[0] = -42;
+ assertEquals(-42, shorts[0]);
+ shorts[index] = -84;
+ assertEquals(-84, shorts[index]);
+
+ ints[0] = -32;
+ assertEquals(-32, ints[0]);
+ ints[index] = -64;
+ assertEquals(-64, ints[index]);
+
+ Object o1 = new Object();
+ objects[0] = o1;
+ assertEquals(o1, objects[0]);
+ Object o2 = new Object();
+ objects[index] = o2;
+ assertEquals(o2, objects[index]);
+
+ long l = -21876876876876876L;
+ longs[0] = l;
+ assertEquals(l, longs[0]);
+ l = -21876876876876877L;
+ longs[index] = l;
+ assertEquals(l, longs[index]);
+ }
+
+ public static void ensureThrows(boolean[] array, int index) {
+ ArrayIndexOutOfBoundsException exception = null;
+ try {
+ $opt$doArrayLoad(array, index);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ exception = e;
+ }
+
+ assertNotNull(exception);
+ assertTrue(exception.toString().contains(Integer.toString(index)));
+
+ exception = null;
+ try {
+ $opt$doArrayStore(array, index);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ exception = e;
+ }
+
+ assertNotNull(exception);
+ assertTrue(exception.toString().contains(Integer.toString(index)));
+ }
+
+ public static void $opt$doArrayLoad(boolean[] array, int index) {
+ boolean res = array[index];
+ }
+
+ public static void $opt$doArrayStore(boolean[] array, int index) {
+ array[index] = false;
+ }
+}
diff --git a/test/407-arrays/src/TestCase.java b/test/407-arrays/src/TestCase.java
new file mode 100644
index 0000000..ef77f71
--- /dev/null
+++ b/test/407-arrays/src/TestCase.java
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Common superclass for test cases.
+ */
+
+import java.util.Arrays;
+
+public abstract class TestCase {
+ public static void assertSame(Object expected, Object value) {
+ if (expected != value) {
+ throw new AssertionError("Objects are not the same: expected " +
+ String.valueOf(expected) + ", got " + String.valueOf(value));
+ }
+ }
+
+ public static void assertNotSame(Object expected, Object value) {
+ if (expected == value) {
+ throw new AssertionError(
+ "Objects are the same: " + String.valueOf(expected));
+ }
+ }
+
+ public static void assertEquals(String message, int expected, int actual) {
+ if (expected != actual) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertEquals(int expected, int actual) {
+ if (expected != actual) {
+ throw new AssertionError("Expected " + expected + " got " + actual);
+ }
+ }
+
+ public static void assertTrue(String message, boolean condition) {
+ if (!condition) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertTrue(boolean condition) {
+ assertTrue("Expected true", condition);
+ }
+
+ public static void assertFalse(String message, boolean condition) {
+ if (condition) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertFalse(boolean condition) {
+ assertFalse("Expected false", condition);
+ }
+
+ public static void assertEquals(Object expected, Object actual) {
+ if (!expected.equals(actual)) {
+ String msg = "Expected \"" + expected + "\" but got \"" + actual + "\"";
+ throw new AssertionError(msg);
+ }
+ }
+
+ public static void assertNotEquals(int expected, int actual) {
+ if (expected == actual) {
+ throw new AssertionError("Expected " + expected + " got " + actual);
+ }
+ }
+
+ public static void assertNotEquals(Object expected, Object actual) {
+ if (expected.equals(actual)) {
+ String msg = "Objects are the same: " + String.valueOf(expected);
+ throw new AssertionError(msg);
+ }
+ }
+
+ public static <T> void assertArrayEquals(T[] actual, T... expected) {
+ assertTrue(Arrays.equals(expected, actual));
+ }
+
+ public static void assertEquals(
+ String message, Object expected, Object actual) {
+ if (!expected.equals(actual)) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertEquals(
+ String message, long expected, long actual) {
+ if (expected != actual) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertEquals(long expected, long actual) {
+ if (expected != actual) {
+ throw new AssertionError("Expected " + expected + " got " + actual);
+ }
+ }
+
+ public static void assertEquals(
+ String message, boolean expected, boolean actual) {
+ if (expected != actual) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertEquals(boolean expected, boolean actual) {
+ if (expected != actual) {
+ throw new AssertionError("Expected " + expected + " got " + actual);
+ }
+ }
+
+ public static void assertEquals(
+ String message, float expected, float actual) {
+ if (expected != actual) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertEquals(float expected, float actual) {
+ if (expected != actual) {
+ throw new AssertionError("Expected " + expected + " got " + actual);
+ }
+ }
+
+ public static void assertEquals(float expected, float actual,
+ float tolerance) {
+ if ((actual < expected - tolerance) || (expected + tolerance < actual)) {
+ throw new AssertionError("Expected " + expected + " got " + actual +
+ " tolerance " + tolerance);
+ }
+ }
+
+ public static void assertEquals(
+ String message, double expected, double actual) {
+ if (expected != actual) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertEquals(double expected, double actual) {
+ if (expected != actual) {
+ throw new AssertionError("Expected " + expected + " got " + actual);
+ }
+ }
+
+ public static void assertEquals(double expected, double actual,
+ double tolerance) {
+ if ((actual < expected - tolerance) || (expected + tolerance < actual)) {
+ throw new AssertionError("Expected " + expected + " got " + actual +
+ " tolerance " + tolerance);
+ }
+ }
+
+ public static void assertSame(
+ String message, Object expected, Object actual) {
+ if (expected != actual) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertNull(String message, Object object) {
+ if (object != null) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertNull(Object object) {
+ assertNull("Expected null", object);
+ }
+
+ public static void assertNotNull(String message, Object object) {
+ if (object == null) {
+ throw new AssertionError(message);
+ }
+ }
+
+ public static void assertNotNull(Object object) {
+ assertNotNull("Expected non-null", object);
+ }
+
+ public static void fail(String msg) {
+ throw new AssertionError(msg);
+ }
+}
diff --git a/test/700-LoadArgRegs/expected.txt b/test/700-LoadArgRegs/expected.txt
index 4977df6..c0d5eee 100644
--- a/test/700-LoadArgRegs/expected.txt
+++ b/test/700-LoadArgRegs/expected.txt
@@ -74,3 +74,4 @@
-91, -92, -93, -94, -95, -96, -97, -98, -99
-1, -91, -92, -93, -94, -95, -96, -97, -98, -99
1, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 2, 3, 4, 5, 6
+1234605616436508552, -8613303245920329199, 1234605616436508552
diff --git a/test/700-LoadArgRegs/src/Main.java b/test/700-LoadArgRegs/src/Main.java
index 0e6de73..4649d05 100644
--- a/test/700-LoadArgRegs/src/Main.java
+++ b/test/700-LoadArgRegs/src/Main.java
@@ -274,6 +274,14 @@
System.out.println(i1+", "+d1+", "+d2+", "+d3+", "+d4+", "+d5+", "+d6+", "+d7+", "+d8+", "+d9+", "+i2+", "+i3+", "+i4+", "+i5+", "+i6);
}
+ static void testRefs1(Object o1, Object o2, Object o3, Object o4, Object o5, long l1, long l2, long l3) {
+ System.out.println(l1 + ", " + l2 + ", " + l3);
+ }
+
+ static void testRefs(Object o1, Object o2, Object o3, Object o4, Object o5, long l1, long l2, long l3) {
+ testRefs1(o1, o2, o3, o4, o5, l1, l2, l3);
+ }
+
static public void main(String[] args) throws Exception {
testI();
testB();
@@ -288,5 +296,8 @@
testLL();
testMore(1, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 2, 3, 4, 5, 6);
+
+ Object obj = new Object();
+ testRefs(obj, obj, obj, obj, obj, 0x1122334455667788L, 0x8877665544332211L, 0x1122334455667788L);
}
}
diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk
index bf3e2aa..f3563a4 100644
--- a/test/Android.libarttest.mk
+++ b/test/Android.libarttest.mk
@@ -19,11 +19,11 @@
include art/build/Android.common_build.mk
LIBARTTEST_COMMON_SRC_FILES := \
- JniTest/jni_test.cc \
- SignalTest/signaltest.cc \
- ReferenceMap/stack_walk_refmap_jni.cc \
- StackWalk/stack_walk_jni.cc \
- UnsafeTest/unsafe_test.cc
+ 004-JniTest/jni_test.cc \
+ 004-SignalTest/signaltest.cc \
+ 004-ReferenceMap/stack_walk_refmap_jni.cc \
+ 004-StackWalk/stack_walk_jni.cc \
+ 004-UnsafeTest/unsafe_test.cc
ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttest.so
ifdef TARGET_2ND_ARCH
diff --git a/test/Android.libnativebridgetest.mk b/test/Android.libnativebridgetest.mk
new file mode 100644
index 0000000..dd7255a
--- /dev/null
+++ b/test/Android.libnativebridgetest.mk
@@ -0,0 +1,87 @@
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH := $(call my-dir)
+
+include art/build/Android.common_build.mk
+
+LIBNATIVEBRIDGETEST_COMMON_SRC_FILES := \
+ 115-native-bridge/nativebridge.cc
+
+ART_TARGET_LIBNATIVEBRIDGETEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libnativebridgetest.so
+ifdef TARGET_2ND_ARCH
+ ART_TARGET_LIBNATIVEBRIDGETEST_$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_2ND_ARCH)/libnativebridgetest.so
+endif
+
+# $(1): target or host
+define build-libnativebridgetest
+ ifneq ($(1),target)
+ ifneq ($(1),host)
+ $$(error expected target or host for argument 1, received $(1))
+ endif
+ endif
+
+ art_target_or_host := $(1)
+
+ include $(CLEAR_VARS)
+ LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
+ LOCAL_MODULE := libnativebridgetest
+ ifeq ($$(art_target_or_host),target)
+ LOCAL_MODULE_TAGS := tests
+ endif
+ LOCAL_SRC_FILES := $(LIBNATIVEBRIDGETEST_COMMON_SRC_FILES)
+ LOCAL_SHARED_LIBRARIES += libartd
+ LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime
+ LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
+ LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.libnativebridgetest.mk
+ include external/libcxx/libcxx.mk
+ ifeq ($$(art_target_or_host),target)
+ $(call set-target-local-clang-vars)
+ $(call set-target-local-cflags-vars,debug)
+ LOCAL_SHARED_LIBRARIES += libdl libcutils
+ LOCAL_STATIC_LIBRARIES := libgtest
+ LOCAL_MULTILIB := both
+ LOCAL_MODULE_PATH_32 := $(ART_TARGET_TEST_OUT)/$(ART_TARGET_ARCH_32)
+ LOCAL_MODULE_PATH_64 := $(ART_TARGET_TEST_OUT)/$(ART_TARGET_ARCH_64)
+ LOCAL_MODULE_TARGET_ARCH := $(ART_SUPPORTED_ARCH)
+ include $(BUILD_SHARED_LIBRARY)
+ else # host
+ LOCAL_CLANG := $(ART_HOST_CLANG)
+ LOCAL_CFLAGS := $(ART_HOST_CFLAGS) $(ART_HOST_DEBUG_CFLAGS)
+ LOCAL_STATIC_LIBRARIES := libcutils
+ LOCAL_LDLIBS += -ldl -lpthread
+ ifeq ($(HOST_OS),linux)
+ LOCAL_LDLIBS += -lrt
+ endif
+ LOCAL_IS_HOST_MODULE := true
+ LOCAL_MULTILIB := both
+ include $(BUILD_HOST_SHARED_LIBRARY)
+ endif
+
+ # Clear locally used variables.
+ art_target_or_host :=
+endef
+
+ifeq ($(ART_BUILD_TARGET),true)
+ $(eval $(call build-libnativebridgetest,target))
+endif
+ifeq ($(ART_BUILD_HOST),true)
+ $(eval $(call build-libnativebridgetest,host))
+endif
+
+# Clear locally used variables.
+LOCAL_PATH :=
+LIBNATIVEBRIDGETEST_COMMON_SRC_FILES :=
diff --git a/test/Android.oat.mk b/test/Android.oat.mk
deleted file mode 100644
index 16300bb..0000000
--- a/test/Android.oat.mk
+++ /dev/null
@@ -1,455 +0,0 @@
-# Copyright (C) 2011 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-LOCAL_PATH := $(call my-dir)
-LOCAL_PID := $(shell echo $$PPID)
-
-include art/build/Android.common_test.mk
-
-########################################################################
-
-# Subdirectories in art/test which contain dex files used as inputs for oat tests. Declare the
-# simplest tests (Main, HelloWorld) first, the rest are alphabetical.
-TEST_OAT_DIRECTORIES := \
- Main \
- HelloWorld \
- InterfaceTest \
- JniTest \
- SignalTest \
- NativeAllocations \
- ParallelGC \
- ReferenceMap \
- StackWalk \
- ThreadStress \
- UnsafeTest
-
-# TODO: Enable when the StackWalk2 tests are passing
-# StackWalk2 \
-
-# Create build rules for each dex file recording the dependency.
-$(foreach dir,$(TEST_OAT_DIRECTORIES), $(eval $(call build-art-test-dex,art-oat-test,$(dir), \
- $(ART_TARGET_TEST_OUT),$(LOCAL_PATH)/Android.oat.mk,ART_TEST_TARGET_OAT_$(dir)_DEX, \
- ART_TEST_HOST_OAT_$(dir)_DEX)))
-
-########################################################################
-
-include $(LOCAL_PATH)/Android.libarttest.mk
-
-ART_TEST_TARGET_OAT_DEFAULT$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_OAT_DEFAULT$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_OAT_DEFAULT_RULES :=
-ART_TEST_TARGET_OAT_OPTIMIZING$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_OAT_OPTIMIZING$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_OAT_OPTIMIZING_RULES :=
-ART_TEST_TARGET_OAT_INTERPRETER$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_OAT_INTERPRETER$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_OAT_INTERPRETER_RULES :=
-ART_TEST_TARGET_OAT$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_OAT$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_OAT_RULES :=
-
-# We need dex2oat and dalvikvm on the target as well as the core image.
-TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_EXECUTABLES) $(TARGET_CORE_IMG_OUT) $(2ND_TARGET_CORE_IMG_OUT) $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttest.so
-ifdef TARGET_2ND_ARCH
-TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_TEST_OUT)/$(TARGET_2ND_ARCH)/libarttest.so
-endif
-
-# Define rule to run an individual oat test on the host. Output from the test is written to the
-# host in /tmp/android-data in a directory named after test's rule name (its target) and the parent
-# process' PID (ie the PID of make). On failure the output is dumped to the console. To test for
-# success on the target device a file is created following a successful test and this is pulled
-# onto the host. If the pull fails then the file wasn't created because the test failed.
-# $(1): directory - the name of the test we're building such as HelloWorld.
-# $(2): 2ND_ or undefined - used to differentiate between the primary and secondary architecture.
-# $(3): the target (rule name), e.g. test-art-target-oat-default-HelloWorld64
-# $(4): -Xint or undefined - do we want to run with the interpreter or default.
-define define-test-art-oat-rule-target
- # Add the test dependencies to test-art-target-sync, which will be a prerequisite for the test
- # to ensure files are pushed to the device.
- TEST_ART_TARGET_SYNC_DEPS += $$(ART_TEST_TARGET_OAT_$(1)_DEX)
-
-.PHONY: $(3)
-$(3): test-art-target-sync
- $(hide) mkdir -p $(ART_HOST_TEST_DIR)/android-data-$$@
- $(hide) echo Running: $$@
- $(hide) adb shell touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$(LOCAL_PID)
- $(hide) adb shell rm $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$(LOCAL_PID)
- $(hide) $$(call ART_TEST_SKIP,$$@) && \
- adb shell "/system/bin/dalvikvm$($(2)ART_PHONY_TEST_TARGET_SUFFIX) \
- $(DALVIKVM_FLAGS) $(4) -XXlib:libartd.so -Ximage:$(ART_TARGET_TEST_DIR)/core.art \
- -classpath $(ART_TARGET_TEST_DIR)/art-oat-test-$(1).jar \
- -Djava.library.path=$(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH) $(1) \
- && touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$(LOCAL_PID)" \
- > $(ART_HOST_TEST_DIR)/android-data-$$@/output.txt 2>&1 && \
- (adb pull $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$(LOCAL_PID) $(ART_HOST_TEST_DIR)/android-data-$$@ \
- && $$(call ART_TEST_PASSED,$$@)) \
- || (([ ! -f $(ART_HOST_TEST_DIR)/android-data-$$@/output.txt ] || \
- cat $(ART_HOST_TEST_DIR)/android-data-$$@/output.txt) && $$(call ART_TEST_FAILED,$$@))
- $$(hide) (echo $(MAKECMDGOALS) | grep -q $$@ && \
- echo "run-test run as top-level target, removing test directory $(ART_HOST_TEST_DIR)" && \
- rm -r $(ART_HOST_TEST_DIR)) || true
-
-endef # define-test-art-oat-rule-target
-
-# Define rules to run oat tests on the target.
-# $(1): directory - the name of the test we're building such as HelloWorld.
-# $(2): 2ND_ or undefined - used to differentiate between the primary and secondary architecture.
-define define-test-art-oat-rules-target
- # Define a phony rule to run a target oat test using the default compiler.
- default_test_rule := test-art-target-oat-default-$(1)$($(2)ART_PHONY_TEST_TARGET_SUFFIX)
- $(call define-test-art-oat-rule-target,$(1),$(2),$$(default_test_rule),)
-
- ART_TEST_TARGET_OAT_DEFAULT$$($(2)ART_PHONY_TEST_TARGET_SUFFIX)_RULES += $$(default_test_rule)
- ART_TEST_TARGET_OAT_DEFAULT_RULES += $$(default_test_rule)
- ART_TEST_TARGET_OAT_DEFAULT_$(1)_RULES += $$(default_test_rule)
-
- optimizing_test_rule := test-art-target-oat-optimizing-$(1)$($(2)ART_PHONY_TEST_TARGET_SUFFIX)
- $(call define-test-art-oat-rule-target,$(1),$(2),$$(optimizing_test_rule), \
- -Xcompiler-option --compiler-backend=Optimizing)
-
- ART_TEST_TARGET_OAT_OPTIMIZING$$($(2)ART_PHONY_TEST_TARGET_SUFFIX)_RULES += $$(optimizing_test_rule)
- ART_TEST_TARGET_OAT_OPTIMIZING_RULES += $$(optimizing_test_rule)
- ART_TEST_TARGET_OAT_OPTIMIZING_$(1)_RULES += $$(optimizing_test_rule)
-
- # Define a phony rule to run a target oat test using the interpeter.
- interpreter_test_rule := test-art-target-oat-interpreter-$(1)$($(2)ART_PHONY_TEST_TARGET_SUFFIX)
- $(call define-test-art-oat-rule-target,$(1),$(2),$$(interpreter_test_rule),-Xint)
-
- ART_TEST_TARGET_OAT_INTERPRETER$$($(2)ART_PHONY_TEST_TARGET_SUFFIX)_RULES += $$(interpreter_test_rule)
- ART_TEST_TARGET_OAT_INTERPRETER_RULES += $$(interpreter_test_rule)
- ART_TEST_TARGET_OAT_INTERPRETER_$(1)_RULES += $$(interpreter_test_rule)
-
- # Define a phony rule to run both the default and interpreter variants.
- all_test_rule := test-art-target-oat-$(1)$($(2)ART_PHONY_TEST_TARGET_SUFFIX)
-.PHONY: $$(all_test_rule)
-$$(all_test_rule): $$(default_test_rule) $$(optimizing_test_rule) $$(interpreter_test_rule)
- $(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@)
-
- ART_TEST_TARGET_OAT$$($(2)ART_PHONY_TEST_TARGET_SUFFIX)_RULES += $$(all_test_rule)
- ART_TEST_TARGET_OAT_RULES += $$(all_test_rule)
- ART_TEST_TARGET_OAT_$(1)_RULES += $$(all_test_rule)
-
- # Clear locally defined variables.
- interpreter_test_rule :=
- default_test_rule :=
- optimizing_test_rule :=
- all_test_rule :=
-endef # define-test-art-oat-rules-target
-
-ART_TEST_HOST_OAT_DEFAULT$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_OAT_DEFAULT$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_OAT_DEFAULT_RULES :=
-ART_TEST_HOST_OAT_OPTIMIZING$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_OAT_OPTIMIZING$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_OAT_OPTIMIZING_RULES :=
-ART_TEST_HOST_OAT_INTERPRETER$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_OAT_INTERPRETER$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_OAT_INTERPRETER_RULES :=
-ART_TEST_HOST_OAT$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_OAT$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_OAT_RULES :=
-
-# All tests require the host executables, libarttest and the core images.
-ART_TEST_HOST_OAT_DEPENDENCIES := \
- $(ART_HOST_EXECUTABLES) \
- $(ART_HOST_OUT_SHARED_LIBRARIES)/libarttest$(ART_HOST_SHLIB_EXTENSION) \
- $(ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION) \
- $(HOST_CORE_IMG_OUT)
-
-ifneq ($(HOST_PREFER_32_BIT),true)
-ART_TEST_HOST_OAT_DEPENDENCIES += \
- $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libarttest$(ART_HOST_SHLIB_EXTENSION) \
- $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION) \
- $(2ND_HOST_CORE_IMG_OUT)
-endif
-
-# Define rule to run an individual oat test on the host. Output from the test is written to the
-# host in /tmp/android-data in a directory named after test's rule name (its target) and the parent
-# process' PID (ie the PID of make). On failure the output is dumped to the console.
-# $(1): directory - the name of the test we're building such as HelloWorld.
-# $(2): 2ND_ or undefined - used to differentiate between the primary and secondary architecture.
-# $(3): the target (rule name), e.g. test-art-host-oat-default-HelloWorld64
-# $(4): argument to dex2oat
-# $(5): argument to runtime, e.g. -Xint or undefined
-define define-test-art-oat-rule-host
- # Remove the leading / from /tmp for the test directory.
- dex_file := $$(subst /tmp,tmp,$(ART_HOST_TEST_DIR))/android-data-$(3)/oat-test-dex-$(1).jar
- oat_file := $(ART_HOST_TEST_DIR)/android-data-$(3)/dalvik-cache/$$($(2)HOST_ARCH)/$$(subst /,@,$$(dex_file))@classes.dex
-$(3): PRIVATE_DEX_FILE := /$$(dex_file)
-$(3): PRIVATE_OAT_FILE := $$(oat_file)
-.PHONY: $(3)
-$(3): $$(ART_TEST_HOST_OAT_$(1)_DEX) $(ART_TEST_HOST_OAT_DEPENDENCIES)
- $(hide) mkdir -p $(ART_HOST_TEST_DIR)/android-data-$$@/dalvik-cache/$$($(2)HOST_ARCH)
- $(hide) cp $$(realpath $$<) $(ART_HOST_TEST_DIR)/android-data-$$@/oat-test-dex-$(1).jar
- $(hide) $(DEX2OATD) $(DEX2OAT_FLAGS) --runtime-arg -Xms$(DEX2OAT_XMS) --runtime-arg -Xmx$(DEX2OAT_XMX) $(4) \
- --boot-image=$$(HOST_CORE_IMG_LOCATION) \
- --dex-file=$$(PRIVATE_DEX_FILE) --oat-file=$$(PRIVATE_OAT_FILE) \
- --instruction-set=$($(2)ART_HOST_ARCH) --host --android-root=$(HOST_OUT) \
- || $$(call ART_TEST_FAILED,$$@)
- $(hide) $$(call ART_TEST_SKIP,$$@) && \
- ANDROID_DATA=$(ART_HOST_TEST_DIR)/android-data-$$@/ \
- ANDROID_ROOT=$(HOST_OUT) \
- ANDROID_LOG_TAGS='*:d' \
- LD_LIBRARY_PATH=$$($(2)ART_HOST_OUT_SHARED_LIBRARIES) \
- $(HOST_OUT_EXECUTABLES)/dalvikvm$$($(2)ART_PHONY_TEST_HOST_SUFFIX) $(DALVIKVM_FLAGS) $(5) \
- -XXlib:libartd$(HOST_SHLIB_SUFFIX) -Ximage:$$(HOST_CORE_IMG_LOCATION) \
- -classpath $(ART_HOST_TEST_DIR)/android-data-$$@/oat-test-dex-$(1).jar \
- -Djava.library.path=$$($(2)ART_HOST_OUT_SHARED_LIBRARIES) $(1) \
- > $(ART_HOST_TEST_DIR)/android-data-$$@/output.txt 2>&1 \
- && $$(call ART_TEST_PASSED,$$@) \
- || (([ ! -f $(ART_HOST_TEST_DIR)/android-data-$$@/output.txt ] || \
- cat $(ART_HOST_TEST_DIR)/android-data-$$@/output.txt) && $$(call ART_TEST_FAILED,$$@))
- $$(hide) (echo $(MAKECMDGOALS) | grep -q $$@ && \
- echo "run-test run as top-level target, removing test directory $(ART_HOST_TEST_DIR)" && \
- rm -r $(ART_HOST_TEST_DIR)) || true
-endef # define-test-art-oat-rule-host
-
-# Define rules to run oat tests on the host.
-# $(1): directory - the name of the test we're building such as HelloWorld.
-# $(2): 2ND_ or undefined - used to differentiate between the primary and secondary architecture.
-define define-test-art-oat-rules-host
- # Create a rule to run the host oat test with the default compiler.
- default_test_rule := test-art-host-oat-default-$(1)$$($(2)ART_PHONY_TEST_HOST_SUFFIX)
- $(call define-test-art-oat-rule-host,$(1),$(2),$$(default_test_rule),,)
-
- ART_TEST_HOST_OAT_DEFAULT$$($(2)ART_PHONY_TEST_HOST_SUFFIX)_RULES += $$(default_test_rule)
- ART_TEST_HOST_OAT_DEFAULT_RULES += $$(default_test_rule)
- ART_TEST_HOST_OAT_DEFAULT_$(1)_RULES += $$(default_test_rule)
-
- # Create a rule to run the host oat test with the optimizing compiler.
- optimizing_test_rule := test-art-host-oat-optimizing-$(1)$$($(2)ART_PHONY_TEST_HOST_SUFFIX)
- $(call define-test-art-oat-rule-host,$(1),$(2),$$(optimizing_test_rule),--compiler-backend=Optimizing,)
-
- ART_TEST_HOST_OAT_OPTIMIZING$$($(2)ART_PHONY_TEST_HOST_SUFFIX)_RULES += $$(optimizing_test_rule)
- ART_TEST_HOST_OAT_OPTIMIZING_RULES += $$(optimizing_test_rule)
- ART_TEST_HOST_OAT_OPTIMIZING_$(1)_RULES += $$(optimizing_test_rule)
-
- # Create a rule to run the host oat test with the interpreter.
- interpreter_test_rule := test-art-host-oat-interpreter-$(1)$$($(2)ART_PHONY_TEST_HOST_SUFFIX)
- $(call define-test-art-oat-rule-host,$(1),$(2),$$(interpreter_test_rule),--compiler-filter=interpret-only,-Xint)
-
- ART_TEST_HOST_OAT_INTERPRETER$$($(2)ART_PHONY_TEST_HOST_SUFFIX)_RULES += $$(interpreter_test_rule)
- ART_TEST_HOST_OAT_INTERPRETER_RULES += $$(interpreter_test_rule)
- ART_TEST_HOST_OAT_INTERPRETER_$(1)_RULES += $$(interpreter_test_rule)
-
- # Define a phony rule to run both the default and interpreter variants.
- all_test_rule := test-art-host-oat-$(1)$$($(2)ART_PHONY_TEST_HOST_SUFFIX)
-.PHONY: $$(all_test_rule)
-$$(all_test_rule): $$(default_test_rule) $$(interpreter_test_rule) $$(optimizing_test_rule)
- $(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@)
-
- ART_TEST_HOST_OAT$$($(2)ART_PHONY_TEST_HOST_SUFFIX)_RULES += $$(all_test_rule)
- ART_TEST_HOST_OAT_RULES += $$(all_test_rule)
- ART_TEST_HOST_OAT_$(1)_RULES += $$(all_test_rule)
-
- # Clear locally defined variables.
- default_test_rule :=
- optimizing_test_rule :=
- interpreter_test_rule :=
- all_test_rule :=
-endef # define-test-art-oat-rules-host
-
-# For a given test create all the combinations of host/target, compiler and suffix such as:
-# test-art-host-oat-HelloWord or test-art-target-oat-interpreter-HelloWorld64
-# $(1): test name, e.g. HelloWorld
-# $(2): host or target
-# $(3): HOST or TARGET
-# $(4): undefined, -default, -optimizing or -interpreter
-# $(5): undefined, _DEFAULT, _OPTIMIZING or _INTERPRETER
-define define-test-art-oat-combination-for-test
- ifeq ($(2),host)
- ifneq ($(3),HOST)
- $$(error argument mismatch $(2) and ($3))
- endif
- else
- ifneq ($(2),target)
- $$(error found $(2) expected host or target)
- endif
- ifneq ($(3),TARGET)
- $$(error argument mismatch $(2) and ($3))
- endif
- endif
-
- rule_name := test-art-$(2)-oat$(4)-$(1)
- dependencies := $$(ART_TEST_$(3)_OAT$(5)_$(1)_RULES)
-
- ifeq ($$(dependencies),)
- ifneq ($(4),-optimizing)
- $$(error $$(rule_name) has no dependencies)
- endif
- endif
-
-.PHONY: $$(rule_name)
-$$(rule_name): $$(dependencies)
- $(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@)
-
- # Clear locally defined variables.
- rule_name :=
- dependencies :=
-endef # define-test-art-oat-combination
-
-# Define target and host oat test rules for the differing multilib flavors and default vs
-# interpreter runs. The format of the generated rules (for running an individual test) is:
-# test-art-(host|target)-oat-(default|interpreter)-${directory}(32|64)
-# The rules are appended to various lists to enable shorter phony build rules to be built.
-# $(1): directory
-define define-test-art-oat-rules
- # Define target tests.
- ART_TEST_TARGET_OAT_DEFAULT_$(1)_RULES :=
- ART_TEST_TARGET_OAT_OPTIMIZING_$(1)_RULES :=
- ART_TEST_TARGET_OAT_INTERPRETER_$(1)_RULES :=
- ART_TEST_TARGET_OAT_$(1)_RULES :=
- $(call define-test-art-oat-rules-target,$(1),)
- ifdef TARGET_2ND_ARCH
- $(call define-test-art-oat-rules-target,$(1),2ND_)
- endif
- $(call define-test-art-oat-combination-for-test,$(1),target,TARGET,,))
- $(call define-test-art-oat-combination-for-test,$(1),target,TARGET,-default,_DEFAULT))
- $(call define-test-art-oat-combination-for-test,$(1),target,TARGET,-optimizing,_OPTIMIZING))
- $(call define-test-art-oat-combination-for-test,$(1),target,TARGET,-interpreter,_INTERPRETER))
-
- # Define host tests.
- ART_TEST_HOST_OAT_DEFAULT_$(1)_RULES :=
- ART_TEST_HOST_OAT_OPTIMIZING_$(1)_RULES :=
- ART_TEST_HOST_OAT_INTERPRETER_$(1)_RULES :=
- ART_TEST_HOST_OAT_$(1)_RULES :=
- $(call define-test-art-oat-rules-host,$(1),)
- ifneq ($(HOST_PREFER_32_BIT),true)
- $(call define-test-art-oat-rules-host,$(1),2ND_)
- endif
- $(call define-test-art-oat-combination-for-test,$(1),host,HOST,,)
- $(call define-test-art-oat-combination-for-test,$(1),host,HOST,-default,_DEFAULT)
- $(call define-test-art-oat-combination-for-test,$(1),host,HOST,-optimizing,_OPTIMIZING)
- $(call define-test-art-oat-combination-for-test,$(1),host,HOST,-interpreter,_INTERPRETER)
-
- # Clear locally defined variables.
- ART_TEST_TARGET_OAT_DEFAULT_$(1)_RULES :=
- ART_TEST_TARGET_OAT_OPTIMIZING_$(1)_RULES :=
- ART_TEST_TARGET_OAT_INTERPRETER_$(1)_RULES :=
- ART_TEST_TARGET_OAT_$(1)_RULES :=
- ART_TEST_HOST_OAT_DEFAULT_$(1)_RULES :=
- ART_TEST_HOST_OAT_OPTIMIZING_$(1)_RULES :=
- ART_TEST_HOST_OAT_INTERPRETER_$(1)_RULES :=
- ART_TEST_HOST_OAT_$(1)_RULES :=
-endef # define-test-art-oat-rules
-$(foreach dir,$(TEST_OAT_DIRECTORIES), $(eval $(call define-test-art-oat-rules,$(dir))))
-
-# Define all the combinations of host/target, compiler and suffix such as:
-# test-art-host-oat or test-art-target-oat-interpreter64
-# $(1): host or target
-# $(2): HOST or TARGET
-# $(3): undefined, -default, -optimizing or -interpreter
-# $(4): undefined, _DEFAULT, _OPTIMIZING or _INTERPRETER
-# $(5): undefined, 32 or 64
-define define-test-art-oat-combination
- ifeq ($(1),host)
- ifneq ($(2),HOST)
- $$(error argument mismatch $(1) and ($2))
- endif
- else
- ifneq ($(1),target)
- $$(error found $(1) expected host or target)
- endif
- ifneq ($(2),TARGET)
- $$(error argument mismatch $(1) and ($2))
- endif
- endif
-
- rule_name := test-art-$(1)-oat$(3)$(5)
- dependencies := $$(ART_TEST_$(2)_OAT$(4)$(5)_RULES)
-
- ifeq ($$(dependencies),)
- ifneq ($(3),-optimizing)
- $$(error $$(rule_name) has no dependencies)
- endif
- endif
-
-.PHONY: $$(rule_name)
-$$(rule_name): $$(dependencies)
- $(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@)
-
- # Clear locally defined variables.
- rule_name :=
- dependencies :=
-
-endef # define-test-art-oat-combination
-
-$(eval $(call define-test-art-oat-combination,target,TARGET,,,))
-$(eval $(call define-test-art-oat-combination,target,TARGET,-default,_DEFAULT,))
-$(eval $(call define-test-art-oat-combination,target,TARGET,-optimizing,_OPTIMIZING,))
-$(eval $(call define-test-art-oat-combination,target,TARGET,-interpreter,_INTERPRETER,))
-$(eval $(call define-test-art-oat-combination,target,TARGET,,,$(ART_PHONY_TEST_TARGET_SUFFIX)))
-$(eval $(call define-test-art-oat-combination,target,TARGET,-default,_DEFAULT,$(ART_PHONY_TEST_TARGET_SUFFIX)))
-$(eval $(call define-test-art-oat-combination,target,TARGET,-optimizing,_OPTIMIZING,$(ART_PHONY_TEST_TARGET_SUFFIX)))
-$(eval $(call define-test-art-oat-combination,target,TARGET,-interpreter,_INTERPRETER,$(ART_PHONY_TEST_TARGET_SUFFIX)))
-ifdef TARGET_2ND_ARCH
-$(eval $(call define-test-art-oat-combination,target,TARGET,,,$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)))
-$(eval $(call define-test-art-oat-combination,target,TARGET,-default,_DEFAULT,$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)))
-$(eval $(call define-test-art-oat-combination,target,TARGET,-optimizing,_OPTIMIZING,$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)))
-$(eval $(call define-test-art-oat-combination,target,TARGET,-interpreter,_INTERPRETER,$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)))
-endif
-
-$(eval $(call define-test-art-oat-combination,host,HOST,,,))
-$(eval $(call define-test-art-oat-combination,host,HOST,-default,_DEFAULT,))
-$(eval $(call define-test-art-oat-combination,host,HOST,-optimizing,_OPTIMIZING,))
-$(eval $(call define-test-art-oat-combination,host,HOST,-interpreter,_INTERPRETER,))
-$(eval $(call define-test-art-oat-combination,host,HOST,,,$(ART_PHONY_TEST_HOST_SUFFIX)))
-$(eval $(call define-test-art-oat-combination,host,HOST,-default,_DEFAULT,$(ART_PHONY_TEST_HOST_SUFFIX)))
-$(eval $(call define-test-art-oat-combination,host,HOST,-optimizing,_OPTIMIZING,$(ART_PHONY_TEST_HOST_SUFFIX)))
-$(eval $(call define-test-art-oat-combination,host,HOST,-interpreter,_INTERPRETER,$(ART_PHONY_TEST_HOST_SUFFIX)))
-ifneq ($(HOST_PREFER_32_BIT),true)
-$(eval $(call define-test-art-oat-combination,host,HOST,,,$(2ND_ART_PHONY_TEST_HOST_SUFFIX)))
-$(eval $(call define-test-art-oat-combination,host,HOST,-default,_DEFAULT,$(2ND_ART_PHONY_TEST_HOST_SUFFIX)))
-$(eval $(call define-test-art-oat-combination,host,HOST,-optimizing,_OPTIMIZING,$(2ND_ART_PHONY_TEST_HOST_SUFFIX)))
-$(eval $(call define-test-art-oat-combination,host,HOST,-interpreter,_INTERPRETER,$(2ND_ART_PHONY_TEST_HOST_SUFFIX)))
-endif
-
-# Clear locally defined variables.
-define-test-art-oat-rule-target :=
-define-test-art-oat-rules-target :=
-define-test-art-oat-rule-host :=
-define-test-art-oat-rules-host :=
-define-test-art-oat-combination-for-test :=
-define-test-art-oat-combination :=
-ART_TEST_TARGET_OAT_DEFAULT$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_OAT_DEFAULT$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_OAT_DEFAULT_RULES :=
-ART_TEST_TARGET_OAT_OPTIMIZING$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_OAT_OPTIMIZING$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_OAT_OPTIMIZING_RULES :=
-ART_TEST_TARGET_OAT_INTERPRETER$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_OAT_INTERPRETER$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_OAT_INTERPRETER_RULES :=
-ART_TEST_TARGET_OAT$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_OAT$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_OAT_RULES :=
-ART_TEST_HOST_OAT_DEFAULT$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_OAT_DEFAULT$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_OAT_DEFAULT_RULES :=
-ART_TEST_HOST_OAT_OPTIMIZING$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_OAT_OPTIMIZING$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_OAT_OPTIMIZING_RULES :=
-ART_TEST_HOST_OAT_INTERPRETER$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_OAT_INTERPRETER$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_OAT_INTERPRETER_RULES :=
-ART_TEST_HOST_OAT$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_OAT$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_OAT_RULES :=
-ART_TEST_HOST_OAT_DEPENDENCIES :=
-$(foreach dir,$(TEST_OAT_DIRECTORIES), $(eval ART_TEST_TARGET_OAT_$(dir)_DEX :=))
-$(foreach dir,$(TEST_OAT_DIRECTORIES), $(eval ART_TEST_HOST_OAT_$(dir)_DEX :=))
-TEST_OAT_DIRECTORIES :=
-LOCAL_PID :=
-LOCAL_PATH :=
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 1683074..d7ee383 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -21,24 +21,35 @@
TEST_ART_RUN_TESTS := $(wildcard $(LOCAL_PATH)/[0-9]*)
TEST_ART_RUN_TESTS := $(subst $(LOCAL_PATH)/,, $(TEST_ART_RUN_TESTS))
-# List all the test names for host and target excluding the -trace suffix
+# List all the test names for host and target and compiler variants.
# $(1): test name, e.g. 003-omnibus-opcodes
-# $(2): undefined or -trace
+# $(2): undefined, -trace, -gcverify or -gcstress
+# $(3): -relocate, -norelocate, -no-prebuild, or undefined.
define all-run-test-names
- test-art-host-run-test$(2)-default-$(1)32 \
- test-art-host-run-test$(2)-optimizing-$(1)32 \
- test-art-host-run-test$(2)-interpreter-$(1)32 \
- test-art-host-run-test$(2)-default-$(1)64 \
- test-art-host-run-test$(2)-optimizing-$(1)64 \
- test-art-host-run-test$(2)-interpreter-$(1)64 \
- test-art-target-run-test$(2)-default-$(1)32 \
- test-art-target-run-test$(2)-optimizing-$(1)32 \
- test-art-target-run-test$(2)-interpreter-$(1)32 \
- test-art-target-run-test$(2)-default-$(1)64 \
- test-art-target-run-test$(2)-optimizing-$(1)64 \
- test-art-target-run-test$(2)-interpreter-$(1)64
+ test-art-host-run-test$(2)-default$(3)-$(1)32 \
+ test-art-host-run-test$(2)-optimizing$(3)-$(1)32 \
+ test-art-host-run-test$(2)-interpreter$(3)-$(1)32 \
+ test-art-host-run-test$(2)-default$(3)-$(1)64 \
+ test-art-host-run-test$(2)-optimizing$(3)-$(1)64 \
+ test-art-host-run-test$(2)-interpreter$(3)-$(1)64 \
+ test-art-target-run-test$(2)-default$(3)-$(1)32 \
+ test-art-target-run-test$(2)-optimizing$(3)-$(1)32 \
+ test-art-target-run-test$(2)-interpreter$(3)-$(1)32 \
+ test-art-target-run-test$(2)-default$(3)-$(1)64 \
+ test-art-target-run-test$(2)-optimizing$(3)-$(1)64 \
+ test-art-target-run-test$(2)-interpreter$(3)-$(1)64
endef # all-run-test-names
+# Subset of the above for target only.
+define all-run-test-target-names
+ test-art-target-run-test$(2)-default$(3)-$(1)32 \
+ test-art-target-run-test$(2)-optimizing$(3)-$(1)32 \
+ test-art-target-run-test$(2)-interpreter$(3)-$(1)32 \
+ test-art-target-run-test$(2)-default$(3)-$(1)64 \
+ test-art-target-run-test$(2)-optimizing$(3)-$(1)64 \
+ test-art-target-run-test$(2)-interpreter$(3)-$(1)64
+endef # all-run-test-target-names
+
# Tests that are timing sensitive and flaky on heavily loaded systems.
TEST_ART_TIMING_SENSITIVE_RUN_TESTS := \
053-wait-some \
@@ -46,36 +57,72 @@
# disable timing sensitive tests on "dist" builds.
ifdef dist_goal
- ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),))
- ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-trace))
+ ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),,))
+ ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-trace,))
+ ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-gcverify,))
+ ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-gcstress,))
+ ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),,-relocate))
+ ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-trace,-relocate))
+ ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-gcverify,-relocate))
+ ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-gcstress,-relocate))
+ ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),,-norelocate))
+ ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-trace,-norelocate))
+ ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-gcverify,-norelocate))
+ ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-gcstress,-norelocate))
+ ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),,-prebuild))
+ ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-trace,-prebuild))
+ ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-gcverify,-prebuild))
+ ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-gcstress,-prebuild))
+ ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),,-no-prebuild))
+ ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-trace,-no-prebuild))
+ ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-gcverify,-no-prebuild))
+ ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-gcstress,-no-prebuild))
endif
# Tests that are broken in --trace mode.
TEST_ART_BROKEN_TRACE_RUN_TESTS := \
- 003-omnibus-opcodes \
- 004-annotations \
+ 004-SignalTest \
018-stack-overflow \
- 023-many-interfaces \
- 031-class-attributes \
- 037-inherit \
- 044-proxy \
- 046-reflect \
- 051-thread \
- 055-enum-performance \
- 064-field-access \
- 078-polymorphic-virtual \
- 080-oom-throw \
- 082-inline-execute \
- 083-compiler-regressions \
097-duplicate-method \
- 100-reflect2 \
- 102-concurrent-gc \
- 103-string-append \
- 107-int-math2 \
- 112-double-math \
- 701-easy-div-rem
+ 107-int-math2
-ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_TRACE_RUN_TESTS), $(call all-run-test-names,$(test),-trace))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_TRACE_RUN_TESTS), $(call all-run-test-names,$(test),-trace,-relocate))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_TRACE_RUN_TESTS), $(call all-run-test-names,$(test),-trace,-no-prebuild))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_TRACE_RUN_TESTS), $(call all-run-test-names,$(test),-trace,-prebuild))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_TRACE_RUN_TESTS), $(call all-run-test-names,$(test),-trace,-norelocate))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_TRACE_RUN_TESTS), $(call all-run-test-names,$(test),-trace,))
+
+# Tests that need more than 2MB of RAM or are running into other corner cases in GC stress related
+# to OOMEs.
+TEST_ART_BROKEN_GCSTRESS_RUN_TESTS :=
+
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_GCSTRESS_RUN_TESTS), $(call all-run-test-names,$(test),-gcstress,-relocate))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_GCSTRESS_RUN_TESTS), $(call all-run-test-names,$(test),-gcstress,-no-prebuild))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_GCSTRESS_RUN_TESTS), $(call all-run-test-names,$(test),-gcstress,-prebuild))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_GCSTRESS_RUN_TESTS), $(call all-run-test-names,$(test),-gcstress,-norelocate))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_GCSTRESS_RUN_TESTS), $(call all-run-test-names,$(test),-gcstress,))
+
+# 115-native-bridge setup is complicated. Need to implement it correctly for the target.
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,,)
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-trace,)
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-gcverify,)
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-gcstress,)
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,,-relocate)
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-trace,-relocate)
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-gcverify,-relocate)
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-gcstress,-relocate)
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,,-norelocate)
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-trace,-norelocate)
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-gcverify,-norelocate)
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-gcstress,-norelocate)
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,,-prebuild)
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-trace,-prebuild)
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-gcverify,-prebuild)
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-gcstress,-prebuild)
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,,-no-prebuild)
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-trace,-no-prebuild)
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-gcverify,-no-prebuild)
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-gcstress,-no-prebuild)
# The path where build only targets will be output, e.g.
# out/target/product/generic_x86_64/obj/PACKAGING/art-run-tests_intermediates/DATA
@@ -110,9 +157,13 @@
include $(BUILD_PHONY_PACKAGE)
# Clear temp vars.
-TEST_ART_RUN_TEST_BUILD_RULES :=
+all-run-test-names :=
art_run_tests_dir :=
define-build-art-run-test :=
+TEST_ART_RUN_TEST_BUILD_RULES :=
+TEST_ART_TIMING_SENSITIVE_RUN_TESTS :=
+TEST_ART_BROKEN_TRACE_RUN_TESTS :=
+TEST_ART_BROKEN_GCSTRESS_RUN_TESTS :=
########################################################################
@@ -120,38 +171,146 @@
ART_TEST_TARGET_RUN_TEST_DEFAULT_RULES :=
ART_TEST_TARGET_RUN_TEST_INTERPRETER_RULES :=
ART_TEST_TARGET_RUN_TEST_OPTIMIZING_RULES :=
+ART_TEST_TARGET_RUN_TEST_RELOCATE_RULES :=
+ART_TEST_TARGET_RUN_TEST_DEFAULT_RELOCATE_RULES :=
+ART_TEST_TARGET_RUN_TEST_INTERPRETER_RELOCATE_RULES :=
+ART_TEST_TARGET_RUN_TEST_OPTIMIZING_RELOCATE_RULES :=
+ART_TEST_TARGET_RUN_TEST_NORELOCATE_RULES :=
+ART_TEST_TARGET_RUN_TEST_DEFAULT_NORELOCATE_RULES :=
+ART_TEST_TARGET_RUN_TEST_INTERPRETER_NORELOCATE_RULES :=
+ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NORELOCATE_RULES :=
+ART_TEST_TARGET_RUN_TEST_NO_PREBUILD_RULES :=
+ART_TEST_TARGET_RUN_TEST_PREBUILD_RULES :=
+ART_TEST_TARGET_RUN_TEST_DEFAULT_NO_PREBUILD_RULES :=
+ART_TEST_TARGET_RUN_TEST_DEFAULT_PREBUILD_RULES :=
+ART_TEST_TARGET_RUN_TEST_INTERPRETER_NO_PREBUILD_RULES :=
+ART_TEST_TARGET_RUN_TEST_INTERPRETER_PREBUILD_RULES :=
+ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NO_PREBUILD_RULES :=
+ART_TEST_TARGET_RUN_TEST_OPTIMIZING_PREBUILD_RULES :=
ART_TEST_TARGET_RUN_TEST_ALL$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_RUN_TEST_DEFAULT$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_RUN_TEST_INTERPRETER$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_RUN_TEST_OPTIMIZING$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_RELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_DEFAULT_RELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_INTERPRETER_RELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_OPTIMIZING_RELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_NORELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_DEFAULT_NORELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_INTERPRETER_NORELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NORELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_NO_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_DEFAULT_NO_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_DEFAULT_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_INTERPRETER_NO_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_INTERPRETER_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_RUN_TEST_ALL$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_RUN_TEST_DEFAULT$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_RUN_TEST_INTERPRETER$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_RUN_TEST_OPTIMIZING$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_RELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_DEFAULT_RELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_INTERPRETER_RELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_OPTIMIZING_RELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_NORELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_DEFAULT_NORELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_INTERPRETER_NORELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NORELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_NO_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_DEFAULT_NO_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_DEFAULT_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_INTERPRETER_NO_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_INTERPRETER_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NO_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_OPTIMIZING_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_HOST_RUN_TEST_ALL_RULES :=
ART_TEST_HOST_RUN_TEST_DEFAULT_RULES :=
ART_TEST_HOST_RUN_TEST_INTERPRETER_RULES :=
ART_TEST_HOST_RUN_TEST_OPTIMIZING_RULES :=
+ART_TEST_HOST_RUN_TEST_RELOCATE_RULES :=
+ART_TEST_HOST_RUN_TEST_DEFAULT_RELOCATE_RULES :=
+ART_TEST_HOST_RUN_TEST_INTERPRETER_RELOCATE_RULES :=
+ART_TEST_HOST_RUN_TEST_OPTIMIZING_RELOCATE_RULES :=
+ART_TEST_HOST_RUN_TEST_NORELOCATE_RULES :=
+ART_TEST_HOST_RUN_TEST_DEFAULT_NORELOCATE_RULES :=
+ART_TEST_HOST_RUN_TEST_INTERPRETER_NORELOCATE_RULES :=
+ART_TEST_HOST_RUN_TEST_OPTIMIZING_NORELOCATE_RULES :=
+ART_TEST_HOST_RUN_TEST_NO_PREBUILD_RULES :=
+ART_TEST_HOST_RUN_TEST_PREBUILD_RULES :=
+ART_TEST_HOST_RUN_TEST_DEFAULT_NO_PREBUILD_RULES :=
+ART_TEST_HOST_RUN_TEST_DEFAULT_PREBUILD_RULES :=
+ART_TEST_HOST_RUN_TEST_INTERPRETER_NO_PREBUILD_RULES :=
+ART_TEST_HOST_RUN_TEST_INTERPRETER_PREBUILD_RULES :=
+ART_TEST_HOST_RUN_TEST_OPTIMIZING_NO_PREBUILD_RULES :=
+ART_TEST_HOST_RUN_TEST_OPTIMIZING_PREBUILD_RULES :=
ART_TEST_HOST_RUN_TEST_ALL$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
ART_TEST_HOST_RUN_TEST_DEFAULT$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
ART_TEST_HOST_RUN_TEST_INTERPRETER$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
ART_TEST_HOST_RUN_TEST_OPTIMIZING$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_RELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_DEFAULT_RELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_INTERPRETER_RELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_OPTIMIZING_RELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_NORELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_DEFAULT_NORELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_INTERPRETER_NORELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_OPTIMIZING_NORELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_NO_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_DEFAULT_NO_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_DEFAULT_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_INTERPRETER_NO_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_INTERPRETER_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
ART_TEST_HOST_RUN_TEST_ALL$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
ART_TEST_HOST_RUN_TEST_DEFAULT$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
ART_TEST_HOST_RUN_TEST_INTERPRETER$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
ART_TEST_HOST_RUN_TEST_OPTIMIZING$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_RELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_DEFAULT_RELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_INTERPRETER_RELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_OPTIMIZING_RELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_NORELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_DEFAULT_NORELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_INTERPRETER_NORELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_OPTIMIZING_NORELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_NO_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_DEFAULT_NO_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_DEFAULT_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_INTERPRETER_NO_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_INTERPRETER_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_OPTIMIZING_NO_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_OPTIMIZING_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
# We need dex2oat and dalvikvm on the target as well as the core image.
TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_EXECUTABLES) $(TARGET_CORE_IMG_OUT) $(2ND_TARGET_CORE_IMG_OUT)
+# Also need libarttest.
+TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttest.so
+ifdef TARGET_2ND_ARCH
+TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_TEST_OUT)/$(TARGET_2ND_ARCH)/libarttest.so
+endif
+
+# Also need libnativebridgetest.
+TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libnativebridgetest.so
+ifdef TARGET_2ND_ARCH
+TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_TEST_OUT)/$(TARGET_2ND_ARCH)/libnativebridgetest.so
+endif
+
# All tests require the host executables and the core images.
ART_TEST_HOST_RUN_TEST_DEPENDENCIES := \
$(ART_HOST_EXECUTABLES) \
+ $(ART_HOST_OUT_SHARED_LIBRARIES)/libarttest$(ART_HOST_SHLIB_EXTENSION) \
+ $(ART_HOST_OUT_SHARED_LIBRARIES)/libnativebridgetest$(ART_HOST_SHLIB_EXTENSION) \
$(ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION) \
$(HOST_CORE_IMG_OUT)
ifneq ($(HOST_PREFER_32_BIT),true)
ART_TEST_HOST_RUN_TEST_DEPENDENCIES += \
+ $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libarttest$(ART_HOST_SHLIB_EXTENSION) \
+ $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libnativebridgetest$(ART_HOST_SHLIB_EXTENSION) \
$(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION) \
$(2ND_HOST_CORE_IMG_OUT)
endif
@@ -162,12 +321,18 @@
# $(2): host or target
# $(3): default, optimizing or interpreter
# $(4): 32 or 64
-# $(5): run tests with tracing enabled or not: trace or undefined
+# $(5): run tests with tracing or GC verification enabled or not: trace, gcverify or undefined
+# $(6): relocate, norelocate, no-prebuild or undefined.
define define-test-art-run-test
run_test_options := $(addprefix --runtime-option ,$(DALVIKVM_FLAGS))
- run_test_rule_name := test-art-$(2)-run-test-$(3)-$(1)$(4)
+ run_test_rule_name :=
uc_host_or_target :=
prereq_rule :=
+ skip_test := false
+ uc_reloc_type :=
+ ifeq ($(ART_TEST_RUN_TEST_ALWAYS_CLEAN),true)
+ run_test_options += --always-clean
+ endif
ifeq ($(2),host)
uc_host_or_target := HOST
run_test_options += --host
@@ -180,10 +345,42 @@
$$(error found $(2) expected host or target)
endif
endif
+ ifeq ($(6),relocate)
+ uc_reloc_type := RELOCATE
+ run_test_options += --relocate --no-prebuild
+ ifneq ($(ART_TEST_RUN_TEST_RELOCATE),true)
+ skip_test := true
+ endif
+ else
+ ifeq ($(6),no-prebuild)
+ uc_reloc_type := NO_PREBUILD
+ run_test_options += --no-relocate --no-prebuild
+ ifneq ($(ART_TEST_RUN_TEST_NO_PREBUILD),true)
+ skip_test := true
+ endif
+ else
+ ifeq ($(6),norelocate)
+ uc_reloc_type := NORELOCATE
+ run_test_options += --no-relocate --prebuild
+ ifneq ($(ART_TEST_RUN_TEST_NO_RELOCATE),true)
+ skip_test := true
+ endif
+ else
+ uc_reloc_type := PREBUILD
+ run_test_options += --relocate --prebuild
+ ifneq ($(ART_TEST_RUN_TEST_PREBUILD),true)
+ skip_test := true
+ endif
+ endif
+ endif
+ endif
uc_compiler :=
ifeq ($(3),optimizing)
uc_compiler := OPTIMIZING
run_test_options += -Xcompiler-option --compiler-backend=Optimizing
+ ifneq ($$(ART_TEST_OPTIMIZING),true)
+ skip_test := true
+ endif
else
ifeq ($(3),interpreter)
uc_compiler := INTERPRETER
@@ -205,14 +402,37 @@
endif
ifeq ($(5),trace)
run_test_options += --trace
- run_test_rule_name := test-art-$(2)-run-test-trace-$(3)-$(1)$(4)
+ run_test_rule_name := test-art-$(2)-run-test-trace-$(3)-$(6)-$(1)$(4)
+ ifneq ($$(ART_TEST_TRACE),true)
+ skip_test := true
+ endif
else
- ifneq (,$(5))
- $$(error found $(5) expected undefined or -trace)
+ ifeq ($(5),gcverify)
+ run_test_options += --runtime-option -Xgc:preverify --runtime-option -Xgc:postverify \
+ --runtime-option -Xgc:preverify_rosalloc --runtime-option -Xgc:postverify_rosalloc
+ run_test_rule_name := test-art-$(2)-run-test-gcverify-$(3)-$(6)-$(1)$(4)
+ ifneq ($$(ART_TEST_GC_VERIFY),true)
+ skip_test := true
+ endif
+ else
+ ifeq ($(5),gcstress)
+ run_test_options += --runtime-option -Xgc:SS --runtime-option -Xms2m \
+ --runtime-option -Xmx2m --runtime-option -Xgc:preverify --runtime-option -Xgc:postverify
+ run_test_rule_name := test-art-$(2)-run-test-gcstress-$(3)-$(6)-$(1)$(4)
+ ifneq ($$(ART_TEST_GC_STRESS),true)
+ skip_test := true
+ endif
+ else
+ ifneq (,$(5))
+ $$(error found $(5) expected undefined or gcverify, gcstress or trace)
+ endif
+ run_test_rule_name := test-art-$(2)-run-test-$(3)-$(6)-$(1)$(4)
+ endif
endif
endif
- run_test_options := --output-path $(ART_HOST_TEST_DIR)/run-test-output/$$(run_test_rule_name) \
- $$(run_test_options)
+ ifeq ($$(skip_test),false)
+ run_test_options := --output-path $(ART_HOST_TEST_DIR)/run-test-output/$$(run_test_rule_name) \
+ $$(run_test_options)
$$(run_test_rule_name): PRIVATE_RUN_TEST_OPTIONS := $$(run_test_options)
.PHONY: $$(run_test_rule_name)
$$(run_test_rule_name): $(DX) $(HOST_OUT_EXECUTABLES)/jasmin $$(prereq_rule)
@@ -223,20 +443,28 @@
$$(hide) (echo $(MAKECMDGOALS) | grep -q $$@ && \
echo "run-test run as top-level target, removing test directory $(ART_HOST_TEST_DIR)" && \
rm -r $(ART_HOST_TEST_DIR)) || true
+ else
+ .PHONY: $$(run_test_rule_name)
+$$(run_test_rule_name):
+ endif
ART_TEST_$$(uc_host_or_target)_RUN_TEST_$$(uc_compiler)$(4)_RULES += $$(run_test_rule_name)
ART_TEST_$$(uc_host_or_target)_RUN_TEST_$$(uc_compiler)_RULES += $$(run_test_rule_name)
ART_TEST_$$(uc_host_or_target)_RUN_TEST_$$(uc_compiler)_$(1)_RULES += $$(run_test_rule_name)
- ART_TEST_$$(uc_host_or_target)_RUN_TEST_$$(uc_compiler)_RULES += $$(run_test_rule_name)
+ ART_TEST_$$(uc_host_or_target)_RUN_TEST_$$(uc_compiler)_$$(uc_reloc_type)_RULES += $$(run_test_rule_name)
ART_TEST_$$(uc_host_or_target)_RUN_TEST_$(1)_RULES += $$(run_test_rule_name)
+ ART_TEST_$$(uc_host_or_target)_RUN_TEST_$(1)$(4)_RULES += $$(run_test_rule_name)
ART_TEST_$$(uc_host_or_target)_RUN_TEST_ALL_RULES += $$(run_test_rule_name)
+ ART_TEST_$$(uc_host_or_target)_RUN_TEST_$$(uc_reloc_type)_RULES += $$(run_test_rule_name)
ART_TEST_$$(uc_host_or_target)_RUN_TEST_ALL$(4)_RULES += $$(run_test_rule_name)
# Clear locally defined variables.
+ skip_test :=
run_test_options :=
run_test_rule_name :=
uc_host_or_target :=
prereq_rule :=
+ uc_reloc_type :=
uc_compiler :=
endef # define-test-art-run-test
@@ -253,7 +481,8 @@
# Create rules for a group of run tests.
# $(1): test name, e.g. 003-omnibus-opcodes
# $(2): host or target
-define define-test-art-run-test-group
+# $(3): relocate, norelocate or no-prebuild, or prebuild.
+define define-test-art-run-test-group-type
group_uc_host_or_target :=
ifeq ($(2),host)
group_uc_host_or_target := HOST
@@ -265,17 +494,18 @@
endif
endif
- ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_DEFAULT_$(1)_RULES :=
- ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_INTERPRETER_$(1)_RULES :=
- ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_OPTIMIZING_$(1)_RULES :=
- ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_$(1)_RULES :=
- $$(eval $$(call define-test-art-run-test,$(1),$(2),default,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),))
- $$(eval $$(call define-test-art-run-test,$(1),$(2),interpreter,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),))
- $$(eval $$(call define-test-art-run-test,$(1),$(2),optimizing,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),))
- ifeq ($(2),host)
- # For now just test tracing on the host with default.
- $$(eval $$(call define-test-art-run-test,$(1),$(2),default,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),trace))
- endif
+ $$(eval $$(call define-test-art-run-test,$(1),$(2),default,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),,$(3)))
+ $$(eval $$(call define-test-art-run-test,$(1),$(2),interpreter,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),,$(3)))
+ $$(eval $$(call define-test-art-run-test,$(1),$(2),optimizing,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),,$(3)))
+ $$(eval $$(call define-test-art-run-test,$(1),$(2),default,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),trace,$(3)))
+ $$(eval $$(call define-test-art-run-test,$(1),$(2),interpreter,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),trace,$(3)))
+ $$(eval $$(call define-test-art-run-test,$(1),$(2),optimizing,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),trace,$(3)))
+ $$(eval $$(call define-test-art-run-test,$(1),$(2),default,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),gcverify,$(3)))
+ $$(eval $$(call define-test-art-run-test,$(1),$(2),interpreter,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),gcverify,$(3)))
+ $$(eval $$(call define-test-art-run-test,$(1),$(2),optimizing,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),gcverify,$(3)))
+ $$(eval $$(call define-test-art-run-test,$(1),$(2),default,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),gcstress,$(3)))
+ $$(eval $$(call define-test-art-run-test,$(1),$(2),interpreter,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),gcstress,$(3)))
+ $$(eval $$(call define-test-art-run-test,$(1),$(2),optimizing,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),gcstress,$(3)))
do_second := false
ifeq ($(2),host)
ifneq ($$(HOST_PREFER_32_BIT),true)
@@ -287,15 +517,57 @@
endif
endif
ifeq (true,$$(do_second))
- $$(eval $$(call define-test-art-run-test,$(1),$(2),default,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),))
- $$(eval $$(call define-test-art-run-test,$(1),$(2),interpreter,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),))
- $$(eval $$(call define-test-art-run-test,$(1),$(2),optimizing,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),))
- ifeq ($(2),host)
- # For now just test tracing on the host with default.
- $$(eval $$(call define-test-art-run-test,$(1),$(2),default,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),trace))
+ $$(eval $$(call define-test-art-run-test,$(1),$(2),default,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),,$(3)))
+ $$(eval $$(call define-test-art-run-test,$(1),$(2),interpreter,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),,$(3)))
+ $$(eval $$(call define-test-art-run-test,$(1),$(2),optimizing,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),,$(3)))
+ $$(eval $$(call define-test-art-run-test,$(1),$(2),default,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),trace,$(3)))
+ $$(eval $$(call define-test-art-run-test,$(1),$(2),interpreter,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),trace,$(3)))
+ $$(eval $$(call define-test-art-run-test,$(1),$(2),optimizing,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),trace,$(3)))
+ $$(eval $$(call define-test-art-run-test,$(1),$(2),default,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),gcverify,$(3)))
+ $$(eval $$(call define-test-art-run-test,$(1),$(2),interpreter,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),gcverify,$(3)))
+ $$(eval $$(call define-test-art-run-test,$(1),$(2),optimizing,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),gcverify,$(3)))
+ $$(eval $$(call define-test-art-run-test,$(1),$(2),default,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),gcstress,$(3)))
+ $$(eval $$(call define-test-art-run-test,$(1),$(2),interpreter,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),gcstress,$(3)))
+ $$(eval $$(call define-test-art-run-test,$(1),$(2),optimizing,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),gcstress,$(3)))
+ endif
+endef # define-test-art-run-test-group-type
+
+# Create rules for a group of run tests.
+# $(1): test name, e.g. 003-omnibus-opcodes
+# $(2): host or target
+define define-test-art-run-test-group
+ group_uc_host_or_target :=
+ ifeq ($(2),host)
+ group_uc_host_or_target := HOST
+ else
+ ifeq ($(2),target)
+ group_uc_host_or_target := TARGET
+ else
+ $$(error found $(2) expected host or target)
endif
endif
-
+ do_second := false
+ ifeq ($(2),host)
+ ifneq ($$(HOST_PREFER_32_BIT),true)
+ do_second := true
+ endif
+ else
+ ifdef TARGET_2ND_ARCH
+ do_second := true
+ endif
+ endif
+ ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_DEFAULT_$(1)_RULES :=
+ ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_INTERPRETER_$(1)_RULES :=
+ ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_OPTIMIZING_$(1)_RULES :=
+ ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_$(1)_RULES :=
+ ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_$(1)$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX)_RULES :=
+ ifeq ($$(do_second),true)
+ ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_$(1)$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX)_RULES :=
+ endif
+ $$(eval $$(call define-test-art-run-test-group-type,$(1),$(2),prebuild))
+ $$(eval $$(call define-test-art-run-test-group-type,$(1),$(2),norelocate))
+ $$(eval $$(call define-test-art-run-test-group-type,$(1),$(2),relocate))
+ $$(eval $$(call define-test-art-run-test-group-type,$(1),$(2),no-prebuild))
$$(eval $$(call define-test-art-run-test-group-rule,test-art-$(2)-run-test-default-$(1), \
$$(ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_DEFAULT_$(1)_RULES)))
$$(eval $$(call define-test-art-run-test-group-rule,test-art-$(2)-run-test-interpreter-$(1), \
@@ -304,12 +576,22 @@
$$(ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_OPTIMIZING_$(1)_RULES)))
$$(eval $$(call define-test-art-run-test-group-rule,test-art-$(2)-run-test-$(1), \
$$(ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_$(1)_RULES)))
+ $$(eval $$(call define-test-art-run-test-group-rule,test-art-$(2)-run-test-$(1)$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX), \
+ $$(ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_$(1)$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX)_RULES)))
+ ifeq ($$(do_second),true)
+ $$(eval $$(call define-test-art-run-test-group-rule,test-art-$(2)-run-test-$(1)$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX), \
+ $$(ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_$(1)$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX)_RULES)))
+ endif
# Clear locally defined variables.
ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_DEFAULT_$(1)_RULES :=
ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_INTERPRETER_$(1)_RULES :=
ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_OPTIMIZING_$(1)_RULES :=
ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_$(1)_RULES :=
+ ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_$(1)$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX)_RULES :=
+ ifeq ($$(do_second),true)
+ ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_$(1)$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX)_RULES :=
+ endif
group_uc_host_or_target :=
do_second :=
endef # define-test-art-run-test-group
@@ -317,6 +599,14 @@
$(foreach test, $(TEST_ART_RUN_TESTS), $(eval $(call define-test-art-run-test-group,$(test),target)))
$(foreach test, $(TEST_ART_RUN_TESTS), $(eval $(call define-test-art-run-test-group,$(test),host)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-no-prebuild, \
+ $(ART_TEST_TARGET_RUN_TEST_NO_PREBUILD_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-prebuild, \
+ $(ART_TEST_TARGET_RUN_TEST_PREBUILD_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-norelocate, \
+ $(ART_TEST_TARGET_RUN_TEST_NORELOCATE_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-relocate, \
+ $(ART_TEST_TARGET_RUN_TEST_RELOCATE_RULES)))
$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test, \
$(ART_TEST_TARGET_RUN_TEST_ALL_RULES)))
$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default, \
@@ -325,6 +615,30 @@
$(ART_TEST_TARGET_RUN_TEST_INTERPRETER_RULES)))
$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing, \
$(ART_TEST_TARGET_RUN_TEST_OPTIMIZING_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default-no-prebuild, \
+ $(ART_TEST_TARGET_RUN_TEST_DEFAULT_NO_PREBUILD_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default-prebuild, \
+ $(ART_TEST_TARGET_RUN_TEST_DEFAULT_PREBUILD_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter-no-prebuild, \
+ $(ART_TEST_TARGET_RUN_TEST_INTERPRETER_NO_PREBUILD_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter-prebuild, \
+ $(ART_TEST_TARGET_RUN_TEST_INTERPRETER_PREBUILD_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing-no-prebuild, \
+ $(ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NO_PREBUILD_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing-prebuild, \
+ $(ART_TEST_TARGET_RUN_TEST_OPTIMIZING_PREBUILD_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default-norelocate, \
+ $(ART_TEST_TARGET_RUN_TEST_DEFAULT_NORELOCATE_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter-norelocate, \
+ $(ART_TEST_TARGET_RUN_TEST_INTERPRETER_NORELOCATE_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing-norelocate, \
+ $(ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NORELOCATE_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default-relocate, \
+ $(ART_TEST_TARGET_RUN_TEST_DEFAULT_RELOCATE_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter-relocate, \
+ $(ART_TEST_TARGET_RUN_TEST_INTERPRETER_RELOCATE_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing-relocate, \
+ $(ART_TEST_TARGET_RUN_TEST_OPTIMIZING_RELOCATE_RULES)))
$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test$(ART_PHONY_TEST_TARGET_SUFFIX), \
$(ART_TEST_TARGET_RUN_TEST_ALL$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default$(ART_PHONY_TEST_TARGET_SUFFIX), \
@@ -333,6 +647,38 @@
$(ART_TEST_TARGET_RUN_TEST_INTERPRETER$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing$(ART_PHONY_TEST_TARGET_SUFFIX), \
$(ART_TEST_TARGET_RUN_TEST_OPTIMIZING$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-no-prebuild$(ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_NO_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-prebuild$(ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-norelocate$(ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_NORELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-relocate$(ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_RELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default-no-prebuild$(ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_DEFAULT_NO_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default-prebuild$(ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_DEFAULT_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter-no-prebuild$(ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_INTERPRETER_NO_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter-prebuild$(ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_INTERPRETER_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing-no-prebuild$(ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NO_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing-prebuild$(ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_OPTIMIZING_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default-norelocate$(ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_DEFAULT_NORELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter-norelocate$(ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_INTERPRETER_NORELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing-norelocate$(ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NORELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default-relocate$(ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_DEFAULT_RELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter-relocate$(ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_INTERPRETER_RELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing-relocate$(ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_OPTIMIZING_RELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
ifdef TARGET_2ND_ARCH
$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
$(ART_TEST_TARGET_RUN_TEST_ALL$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
@@ -342,8 +688,48 @@
$(ART_TEST_TARGET_RUN_TEST_INTERPRETER$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
$(ART_TEST_TARGET_RUN_TEST_OPTIMIZING$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-no-prebuild$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_NO_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-prebuild$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-norelocate$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_NORELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-relocate$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_RELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default-no-prebuild$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_DEFAULT_NO_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default-prebuild$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_DEFAULT_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter-no-prebuild$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_INTERPRETER_NO_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter-prebuild$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_INTERPRETER_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing-no-prebuild$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NO_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing-prebuild$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_OPTIMIZING_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default-norelocate$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_DEFAULT_NORELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter-norelocate$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_INTERPRETER_NORELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing-norelocate$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NORELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default-relocate$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_DEFAULT_RELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter-relocate$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_INTERPRETER_RELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing-relocate$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
+ $(ART_TEST_TARGET_RUN_TEST_OPTIMIZING_RELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
endif
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-no-prebuild, \
+ $(ART_TEST_HOST_RUN_TEST_NO_PREBUILD_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-prebuild, \
+ $(ART_TEST_HOST_RUN_TEST_PREBUILD_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-norelocate, \
+ $(ART_TEST_HOST_RUN_TEST_NORELOCATE_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-relocate, \
+ $(ART_TEST_HOST_RUN_TEST_RELOCATE_RULES)))
$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test, \
$(ART_TEST_HOST_RUN_TEST_ALL_RULES)))
$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default, \
@@ -352,6 +738,30 @@
$(ART_TEST_HOST_RUN_TEST_INTERPRETER_RULES)))
$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing, \
$(ART_TEST_HOST_RUN_TEST_OPTIMIZING_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default-no-prebuild, \
+ $(ART_TEST_HOST_RUN_TEST_DEFAULT_NO_PREBUILD_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default-prebuild, \
+ $(ART_TEST_HOST_RUN_TEST_DEFAULT_PREBUILD_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter-no-prebuild, \
+ $(ART_TEST_HOST_RUN_TEST_INTERPRETER_NO_PREBUILD_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter-prebuild, \
+ $(ART_TEST_HOST_RUN_TEST_INTERPRETER_PREBUILD_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing-no-prebuild, \
+ $(ART_TEST_HOST_RUN_TEST_OPTIMIZING_NO_PREBUILD_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing-prebuild, \
+ $(ART_TEST_HOST_RUN_TEST_OPTIMIZING_PREBUILD_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default-norelocate, \
+ $(ART_TEST_HOST_RUN_TEST_DEFAULT_NORELOCATE_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter-norelocate, \
+ $(ART_TEST_HOST_RUN_TEST_INTERPRETER_NORELOCATE_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing-norelocate, \
+ $(ART_TEST_HOST_RUN_TEST_OPTIMIZING_NORELOCATE_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default-relocate, \
+ $(ART_TEST_HOST_RUN_TEST_DEFAULT_RELOCATE_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter-relocate, \
+ $(ART_TEST_HOST_RUN_TEST_INTERPRETER_RELOCATE_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing-relocate, \
+ $(ART_TEST_HOST_RUN_TEST_OPTIMIZING_RELOCATE_RULES)))
$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test$(ART_PHONY_TEST_HOST_SUFFIX), \
$(ART_TEST_HOST_RUN_TEST_ALL$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default$(ART_PHONY_TEST_HOST_SUFFIX), \
@@ -360,6 +770,38 @@
$(ART_TEST_HOST_RUN_TEST_INTERPRETER$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing$(ART_PHONY_TEST_HOST_SUFFIX), \
$(ART_TEST_HOST_RUN_TEST_OPTIMIZING$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-no-prebuild$(ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_NO_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-prebuild$(ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-norelocate$(ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_NORELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-relocate$(ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_RELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default-no-prebuild$(ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_DEFAULT_NO_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default-prebuild$(ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_DEFAULT_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter-no-prebuild$(ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_INTERPRETER_NO_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter-prebuild$(ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_INTERPRETER_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing-no-prebuild$(ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_OPTIMIZING_NO_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing-prebuild$(ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_OPTIMIZING_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default-norelocate$(ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_DEFAULT_NORELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter-norelocate$(ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_INTERPRETER_NORELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing-norelocate$(ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_OPTIMIZING_NORELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default-relocate$(ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_DEFAULT_RELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter-relocate$(ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_INTERPRETER_RELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing-relocate$(ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_OPTIMIZING_RELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
ifneq ($(HOST_PREFER_32_BIT),true)
$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
$(ART_TEST_HOST_RUN_TEST_ALL$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
@@ -369,33 +811,163 @@
$(ART_TEST_HOST_RUN_TEST_INTERPRETER$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
$(ART_TEST_HOST_RUN_TEST_OPTIMIZING$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-no-prebuild$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_NO_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-prebuild$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-norelocate$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_NORELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-relocate$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_RELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default-no-prebuild$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_DEFAULT_NO_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default-prebuild$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_DEFAULT_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter-no-prebuild$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_INTERPRETER_NO_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter-prebuild$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_INTERPRETER_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing-no-prebuild$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_OPTIMIZING_NO_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing-prebuild$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_OPTIMIZING_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default-norelocate$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_DEFAULT_NORELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter-norelocate$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_INTERPRETER_NORELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing-norelocate$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_OPTIMIZING_NORELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default-relocate$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_DEFAULT_RELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter-relocate$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_INTERPRETER_RELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
+ $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing-relocate$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
+ $(ART_TEST_HOST_RUN_TEST_OPTIMIZING_RELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
endif
+# include libarttest build rules.
+include $(LOCAL_PATH)/Android.libarttest.mk
+
+# Include libnativebridgetest build rules.
+include art/test/Android.libnativebridgetest.mk
+
define-test-art-run-test :=
define-test-art-run-test-group-rule :=
define-test-art-run-test-group :=
-all-run-test-names :=
+TEST_ART_RUN_TESTS :=
ART_TEST_TARGET_RUN_TEST_ALL_RULES :=
ART_TEST_TARGET_RUN_TEST_DEFAULT_RULES :=
ART_TEST_TARGET_RUN_TEST_INTERPRETER_RULES :=
ART_TEST_TARGET_RUN_TEST_OPTIMIZING_RULES :=
+ART_TEST_TARGET_RUN_TEST_RELOCATE_RULES :=
+ART_TEST_TARGET_RUN_TEST_DEFAULT_RELOCATE_RULES :=
+ART_TEST_TARGET_RUN_TEST_INTERPRETER_RELOCATE_RULES :=
+ART_TEST_TARGET_RUN_TEST_OPTIMIZING_RELOCATE_RULES :=
+ART_TEST_TARGET_RUN_TEST_NORELOCATE_RULES :=
+ART_TEST_TARGET_RUN_TEST_DEFAULT_NORELOCATE_RULES :=
+ART_TEST_TARGET_RUN_TEST_INTERPRETER_NORELOCATE_RULES :=
+ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NORELOCATE_RULES :=
+ART_TEST_TARGET_RUN_TEST_NO_PREBUILD_RULES :=
+ART_TEST_TARGET_RUN_TEST_PREBUILD_RULES :=
+ART_TEST_TARGET_RUN_TEST_DEFAULT_NO_PREBUILD_RULES :=
+ART_TEST_TARGET_RUN_TEST_DEFAULT_PREBUILD_RULES :=
+ART_TEST_TARGET_RUN_TEST_INTERPRETER_NO_PREBUILD_RULES :=
+ART_TEST_TARGET_RUN_TEST_INTERPRETER_PREBUILD_RULES :=
+ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NO_PREBUILD_RULES :=
+ART_TEST_TARGET_RUN_TEST_OPTIMIZING_PREBUILD_RULES :=
ART_TEST_TARGET_RUN_TEST_ALL$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_RUN_TEST_DEFAULT$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_RUN_TEST_INTERPRETER$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_RUN_TEST_OPTIMIZING$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_RELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_DEFAULT_RELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_INTERPRETER_RELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_OPTIMIZING_RELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_NORELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_DEFAULT_NORELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_INTERPRETER_NORELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NORELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_NO_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_DEFAULT_NO_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_DEFAULT_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_INTERPRETER_NO_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_INTERPRETER_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_RUN_TEST_ALL$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_RUN_TEST_DEFAULT$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_RUN_TEST_INTERPRETER$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_RUN_TEST_OPTIMIZING$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_RELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_DEFAULT_RELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_INTERPRETER_RELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_OPTIMIZING_RELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_NORELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_DEFAULT_NORELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_INTERPRETER_NORELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NORELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_NO_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_DEFAULT_NO_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_DEFAULT_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_INTERPRETER_NO_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_INTERPRETER_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NO_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
+ART_TEST_TARGET_RUN_TEST_OPTIMIZING_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_HOST_RUN_TEST_ALL_RULES :=
ART_TEST_HOST_RUN_TEST_DEFAULT_RULES :=
ART_TEST_HOST_RUN_TEST_INTERPRETER_RULES :=
ART_TEST_HOST_RUN_TEST_OPTIMIZING_RULES :=
+ART_TEST_HOST_RUN_TEST_RELOCATE_RULES :=
+ART_TEST_HOST_RUN_TEST_DEFAULT_RELOCATE_RULES :=
+ART_TEST_HOST_RUN_TEST_INTERPRETER_RELOCATE_RULES :=
+ART_TEST_HOST_RUN_TEST_OPTIMIZING_RELOCATE_RULES :=
+ART_TEST_HOST_RUN_TEST_NORELOCATE_RULES :=
+ART_TEST_HOST_RUN_TEST_DEFAULT_NORELOCATE_RULES :=
+ART_TEST_HOST_RUN_TEST_INTERPRETER_NORELOCATE_RULES :=
+ART_TEST_HOST_RUN_TEST_OPTIMIZING_NORELOCATE_RULES :=
+ART_TEST_HOST_RUN_TEST_NO_PREBUILD_RULES :=
+ART_TEST_HOST_RUN_TEST_PREBUILD_RULES :=
+ART_TEST_HOST_RUN_TEST_DEFAULT_NO_PREBUILD_RULES :=
+ART_TEST_HOST_RUN_TEST_DEFAULT_PREBUILD_RULES :=
+ART_TEST_HOST_RUN_TEST_INTERPRETER_NO_PREBUILD_RULES :=
+ART_TEST_HOST_RUN_TEST_INTERPRETER_PREBUILD_RULES :=
+ART_TEST_HOST_RUN_TEST_OPTIMIZING_NO_PREBUILD_RULES :=
+ART_TEST_HOST_RUN_TEST_OPTIMIZING_PREBUILD_RULES :=
ART_TEST_HOST_RUN_TEST_ALL$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
ART_TEST_HOST_RUN_TEST_DEFAULT$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
ART_TEST_HOST_RUN_TEST_INTERPRETER$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
ART_TEST_HOST_RUN_TEST_OPTIMIZING$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_RELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_DEFAULT_RELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_INTERPRETER_RELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_OPTIMIZING_RELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_NORELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_DEFAULT_NORELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_INTERPRETER_NORELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_OPTIMIZING_NORELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_NO_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_DEFAULT_NO_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_DEFAULT_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_INTERPRETER_NO_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_INTERPRETER_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
ART_TEST_HOST_RUN_TEST_ALL$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
ART_TEST_HOST_RUN_TEST_DEFAULT$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
ART_TEST_HOST_RUN_TEST_INTERPRETER$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
ART_TEST_HOST_RUN_TEST_OPTIMIZING$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_RELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_DEFAULT_RELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_INTERPRETER_RELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_OPTIMIZING_RELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_NORELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_DEFAULT_NORELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_INTERPRETER_NORELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_OPTIMIZING_NORELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_NO_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_DEFAULT_NO_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_DEFAULT_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_INTERPRETER_NO_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_INTERPRETER_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_OPTIMIZING_NO_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+ART_TEST_HOST_RUN_TEST_OPTIMIZING_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
diff --git a/test/ParallelGC/ParallelGC.java b/test/ParallelGC/ParallelGC.java
deleted file mode 100644
index eb9e04e..0000000
--- a/test/ParallelGC/ParallelGC.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.util.ArrayList;
-import java.util.List;
-
-class ParallelGC implements Runnable {
- public static void main(String[] args) throws Exception {
- Thread[] threads = new Thread[16];
- for (int i = 0; i < threads.length; i++) {
- threads[i] = new Thread(new ParallelGC(i));
- }
- for (Thread thread : threads) {
- thread.start();
- }
- for (Thread thread : threads) {
- thread.join();
- }
- }
-
- private final int id;
-
- private ParallelGC(int id) {
- this.id = id;
- }
-
- public void run() {
- List l = new ArrayList();
- for (int i = 0; i < 1000; i++) {
- l.add(new ArrayList(i));
- System.out.print(id);
- }
- }
-}
diff --git a/test/etc/default-check b/test/etc/default-check
new file mode 100755
index 0000000..46a095c
--- /dev/null
+++ b/test/etc/default-check
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+diff --strip-trailing-cr -q "$1" "$2" >/dev/null
\ No newline at end of file
diff --git a/test/etc/host-run-test-jar b/test/etc/host-run-test-jar
index 4265f1c..2241f85 100755
--- a/test/etc/host-run-test-jar
+++ b/test/etc/host-run-test-jar
@@ -10,20 +10,29 @@
}
DEBUGGER="n"
+PREBUILD="n"
GDB="n"
+ISA="x86"
INTERPRETER="n"
VERIFY="y"
+RELOCATE="y"
OPTIMIZE="y"
INVOKE_WITH=""
DEV_MODE="n"
QUIET="n"
FLAGS=""
+COMPILER_FLAGS=""
+BUILD_BOOT_OPT=""
exe="${ANDROID_HOST_OUT}/bin/dalvikvm32"
+main="Main"
while true; do
if [ "x$1" = "x--quiet" ]; then
QUIET="y"
shift
+ elif [ "x$1" = "x--prebuild" ]; then
+ PREBUILD="y"
+ shift
elif [ "x$1" = "x--lib" ]; then
shift
if [ "x$1" = "x" ]; then
@@ -37,7 +46,9 @@
shift
elif [ "x$1" = "x--boot" ]; then
shift
- BOOT_OPT="$1"
+ option="$1"
+ BOOT_OPT="$option"
+ BUILD_BOOT_OPT="--boot-image=${option#-Ximage:}"
shift
elif [ "x$1" = "x--debug" ]; then
DEBUGGER="y"
@@ -65,6 +76,7 @@
INTERPRETER="y"
shift
elif [ "x$1" = "x--64" ]; then
+ ISA="x64"
exe="${ANDROID_HOST_OUT}/bin/dalvikvm64"
shift
elif [ "x$1" = "x--no-verify" ]; then
@@ -73,10 +85,17 @@
elif [ "x$1" = "x--no-optimize" ]; then
OPTIMIZE="n"
shift
+ elif [ "x$1" = "x--no-relocate" ]; then
+ RELOCATE="n"
+ shift
+ elif [ "x$1" = "x--relocate" ]; then
+ RELOCATE="y"
+ shift
elif [ "x$1" = "x-Xcompiler-option" ]; then
shift
option="$1"
FLAGS="${FLAGS} -Xcompiler-option $option"
+ COMPILER_FLAGS="${COMPILER_FLAGS} $option"
shift
elif [ "x$1" = "x--runtime-option" ]; then
shift
@@ -94,6 +113,12 @@
fi
done
+if [ "x$1" = "x" ] ; then
+ main="Main"
+else
+ main="$1"
+fi
+
msg "------------------------------"
export ANDROID_PRINTF_LOG=brief
@@ -129,13 +154,40 @@
if [ "$INTERPRETER" = "y" ]; then
INT_OPTS="-Xint"
+ COMPILER_FLAGS="${COMPILER_FLAGS} --compiler-filter=interpret-only"
+fi
+
+if [ "$RELOCATE" = "y" ]; then
+ FLAGS="${FLAGS} -Xrelocate"
+ COMPILER_FLAGS="${COMPILER_FLAGS} --runtime-arg -Xnorelocate --include-patch-information"
+ # Run test sets a fairly draconian ulimit that we will likely blow right over
+ # since we are relocating. Get the total size of the /system/framework directory
+ # in 512 byte blocks and set it as the ulimit. This should be more than enough
+ # room.
+ ulimit -S $(du -c -B512 ${ANDROID_ROOT}/framework | tail -1 | cut -f1) || exit 1
+else
+ FLAGS="${FLAGS} -Xnorelocate"
+ COMPILER_FLAGS="${COMPILER_FLAGS} --runtime-arg -Xnorelocate --no-include-patch-information"
+fi
+
+mkdir_cmd="mkdir -p ${DEX_LOCATION}/dalvik-cache/$ISA"
+if [ "$PREBUILD" = "y" ]; then
+ prebuild_cmd="${ANDROID_HOST_OUT}/bin/dex2oatd $COMPILER_FLAGS --instruction-set=$ISA $BUILD_BOOT_OPT --dex-file=$DEX_LOCATION/$TEST_NAME.jar --oat-file=$DEX_LOCATION/dalvik-cache/$ISA/$(echo $DEX_LOCATION/$TEST_NAME.jar/classes.dex | cut -d/ -f 2- | sed "s:/:@:g")"
+else
+ prebuild_cmd="true"
fi
JNI_OPTS="-Xjnigreflimit:512 -Xcheck:jni"
-
+cmdline="$INVOKE_WITH $gdb $exe $gdbargs -XXlib:$LIB $JNI_OPTS $FLAGS $INT_OPTS $DEBUGGER_OPTS $BOOT_OPT -cp $DEX_LOCATION/$TEST_NAME.jar $main"
if [ "$DEV_MODE" = "y" ]; then
- echo $cmdline "$@"
+ if [ "$PREBUILD" = "y" ]; then
+ echo "$mkdir_cmd && $prebuild_cmd && $cmdline"
+ elif [ "$RELOCATE" = "y" ]; then
+ echo "$mkdir_cmd && $cmdline"
+ else
+ echo $cmdline
+ fi
fi
cd $ANDROID_BUILD_TOP
-$INVOKE_WITH $gdb $exe $gdbargs -XXlib:$LIB $JNI_OPTS $FLAGS $INT_OPTS $DEBUGGER_OPTS $BOOT_OPT -cp $DEX_LOCATION/$TEST_NAME.jar Main "$@"
+$mkdir_cmd && $prebuild_cmd && LD_PRELOAD=libsigchain.so $cmdline "$@"
diff --git a/test/etc/push-and-run-prebuilt-test-jar b/test/etc/push-and-run-prebuilt-test-jar
new file mode 100755
index 0000000..ad23edf
--- /dev/null
+++ b/test/etc/push-and-run-prebuilt-test-jar
@@ -0,0 +1,213 @@
+#!/bin/sh
+#
+# Run the code in test.jar on the device. The jar should contain a top-level
+# class named Main to run.
+
+msg() {
+ if [ "$QUIET" = "n" ]; then
+ echo "$@"
+ fi
+}
+
+ARCHITECTURES_32="(arm|x86|mips|none)"
+ARCHITECTURES_64="(arm64|x86_64|none)"
+ARCHITECTURES_PATTERN="${ARCHITECTURES_32}"
+RELOCATE="y"
+GDB="n"
+DEBUGGER="n"
+INTERPRETER="n"
+VERIFY="y"
+OPTIMIZE="y"
+ZYGOTE=""
+QUIET="n"
+DEV_MODE="n"
+INVOKE_WITH=""
+FLAGS=""
+TARGET_SUFFIX="32"
+GDB_TARGET_SUFFIX=""
+COMPILE_FLAGS=""
+
+while true; do
+ if [ "x$1" = "x--quiet" ]; then
+ QUIET="y"
+ shift
+ elif [ "x$1" = "x--lib" ]; then
+ shift
+ if [ "x$1" = "x" ]; then
+ echo "$0 missing argument to --lib" 1>&2
+ exit 1
+ fi
+ LIB="$1"
+ shift
+ elif [ "x$1" = "x-Xcompiler-option" ]; then
+ shift
+ option="$1"
+ FLAGS="${FLAGS} -Xcompiler-option $option"
+ COMPILE_FLAGS="${COMPILE_FLAGS} $option"
+ shift
+ elif [ "x$1" = "x--runtime-option" ]; then
+ shift
+ option="$1"
+ FLAGS="${FLAGS} $option"
+ shift
+ elif [ "x$1" = "x--boot" ]; then
+ shift
+ BOOT_OPT="$1"
+ BUILD_BOOT_OPT="--boot-image=${1#-Ximage:}"
+ shift
+ elif [ "x$1" = "x--relocate" ]; then
+ RELOCATE="y"
+ shift
+ elif [ "x$1" = "x--no-relocate" ]; then
+ RELOCATE="n"
+ shift
+ elif [ "x$1" = "x--debug" ]; then
+ DEBUGGER="y"
+ shift
+ elif [ "x$1" = "x--gdb" ]; then
+ GDB="y"
+ DEV_MODE="y"
+ shift
+ elif [ "x$1" = "x--zygote" ]; then
+ ZYGOTE="--zygote"
+ msg "Spawning from zygote"
+ shift
+ elif [ "x$1" = "x--dev" ]; then
+ DEV_MODE="y"
+ shift
+ elif [ "x$1" = "x--interpreter" ]; then
+ INTERPRETER="y"
+ shift
+ elif [ "x$1" = "x--invoke-with" ]; then
+ shift
+ if [ "x$1" = "x" ]; then
+ echo "$0 missing argument to --invoke-with" 1>&2
+ exit 1
+ fi
+ if [ "x$INVOKE_WITH" = "x" ]; then
+ INVOKE_WITH="$1"
+ else
+ INVOKE_WITH="$INVOKE_WITH $1"
+ fi
+ shift
+ elif [ "x$1" = "x--no-verify" ]; then
+ VERIFY="n"
+ shift
+ elif [ "x$1" = "x--no-optimize" ]; then
+ OPTIMIZE="n"
+ shift
+ elif [ "x$1" = "x--" ]; then
+ shift
+ break
+ elif [ "x$1" = "x--64" ]; then
+ TARGET_SUFFIX="64"
+ GDB_TARGET_SUFFIX="64"
+ ARCHITECTURES_PATTERN="${ARCHITECTURES_64}"
+ shift
+ elif expr "x$1" : "x--" >/dev/null 2>&1; then
+ echo "unknown $0 option: $1" 1>&2
+ exit 1
+ else
+ break
+ fi
+done
+
+if [ "$ZYGOTE" = "" ]; then
+ if [ "$OPTIMIZE" = "y" ]; then
+ if [ "$VERIFY" = "y" ]; then
+ DEX_OPTIMIZE="-Xdexopt:verified"
+ else
+ DEX_OPTIMIZE="-Xdexopt:all"
+ fi
+ msg "Performing optimizations"
+ else
+ DEX_OPTIMIZE="-Xdexopt:none"
+ msg "Skipping optimizations"
+ fi
+
+ if [ "$VERIFY" = "y" ]; then
+ DEX_VERIFY=""
+ msg "Performing verification"
+ else
+ DEX_VERIFY="-Xverify:none"
+ msg "Skipping verification"
+ fi
+fi
+
+msg "------------------------------"
+
+ARCH=$(adb shell ls -F /data/dalvik-cache | grep -Ewo "${ARCHITECTURES_PATTERN}")
+if [ x"$ARCH" = "x" ]; then
+ echo "Unable to determine architecture"
+ exit 1
+fi
+
+if [ "$QUIET" = "n" ]; then
+ adb shell rm -r $DEX_LOCATION
+ adb shell mkdir -p $DEX_LOCATION
+ adb push $TEST_NAME.jar $DEX_LOCATION
+ adb push $TEST_NAME-ex.jar $DEX_LOCATION
+else
+ adb shell rm -r $DEX_LOCATION >/dev/null 2>&1
+ adb shell mkdir -p $DEX_LOCATION >/dev/null 2>&1
+ adb push $TEST_NAME.jar $DEX_LOCATION >/dev/null 2>&1
+ adb push $TEST_NAME-ex.jar $DEX_LOCATION >/dev/null 2>&1
+fi
+
+if [ "$DEBUGGER" = "y" ]; then
+ # Use this instead for ddms and connect by running 'ddms':
+ # DEBUGGER_OPTS="-agentlib:jdwp=transport=dt_android_adb,server=y,suspend=y"
+ # TODO: add a separate --ddms option?
+
+ PORT=12345
+ msg "Waiting for jdb to connect:"
+ msg " adb forward tcp:$PORT tcp:$PORT"
+ msg " jdb -attach localhost:$PORT"
+ DEBUGGER_OPTS="-agentlib:jdwp=transport=dt_socket,address=$PORT,server=y,suspend=y"
+fi
+
+if [ "$GDB" = "y" ]; then
+ gdb="gdbserver$GDB_TARGET_SUFFIX :5039"
+ gdbargs="$exe"
+fi
+
+if [ "$INTERPRETER" = "y" ]; then
+ INT_OPTS="-Xint"
+ COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=interpret-only"
+fi
+
+JNI_OPTS="-Xjnigreflimit:512 -Xcheck:jni"
+
+if [ "$RELOCATE" = "y" ]; then
+ RELOCATE_OPT="-Xrelocate"
+ BUILD_RELOCATE_OPT="--runtime-arg -Xnorelocate"
+ COMPILE_FLAGS="${COMPILE_FLAGS} --include-patch-information"
+ FLAGS="${FLAGS} -Xcompiler-option --include-patch-information"
+else
+ RELOCATE_OPT="-Xnorelocate"
+ BUILD_RELOCATE_OPT="--runtime-arg -Xnorelocate"
+fi
+
+# This is due to the fact this cmdline can get longer than the longest allowed
+# adb command and there is no way to get the exit status from a adb shell
+# command.
+cmdline="cd $DEX_LOCATION && export ANDROID_DATA=$DEX_LOCATION && export DEX_LOCATION=$DEX_LOCATION && \
+ mkdir -p $DEX_LOCATION/dalvik-cache/$ARCH/ && \
+ $INVOKE_WITH /system/bin/dex2oatd $COMPILE_FLAGS $BUILD_BOOT_OPT $BUILD_RELOCATE_OPT --runtime-arg -classpath --runtime-arg $DEX_LOCATION/$TEST_NAME.jar --dex-file=$DEX_LOCATION/$TEST_NAME.jar --oat-file=$DEX_LOCATION/dalvik-cache/$ARCH/$(echo $DEX_LOCATION/$TEST_NAME.jar/classes.dex | cut -d/ -f 2- | sed "s:/:@:g") --instruction-set=$ARCH && \
+ $INVOKE_WITH $gdb /system/bin/dalvikvm$TARGET_SUFFIX $FLAGS $gdbargs -XXlib:$LIB $ZYGOTE $JNI_OPTS $RELOCATE_OPT $INT_OPTS $DEBUGGER_OPTS $BOOT_OPT -cp $DEX_LOCATION/$TEST_NAME.jar Main $@"
+cmdfile=$(tempfile -p "cmd-" -s "-$TEST_NAME")
+echo "$cmdline" > $cmdfile
+
+if [ "$DEV_MODE" = "y" ]; then
+ echo $cmdline
+fi
+
+if [ "$QUIET" = "n" ]; then
+ adb push $cmdfile $DEX_LOCATION/cmdline.sh
+else
+ adb push $cmdfile $DEX_LOCATION/cmdline.sh > /dev/null 2>&1
+fi
+
+adb shell sh $DEX_LOCATION/cmdline.sh
+
+rm -f $cmdfile
diff --git a/test/etc/push-and-run-test-jar b/test/etc/push-and-run-test-jar
index b090c33..06075c2 100755
--- a/test/etc/push-and-run-test-jar
+++ b/test/etc/push-and-run-test-jar
@@ -9,6 +9,7 @@
fi
}
+RELOCATE="y"
GDB="n"
DEBUGGER="n"
INTERPRETER="n"
@@ -19,7 +20,8 @@
DEV_MODE="n"
INVOKE_WITH=""
FLAGS=""
-TARGET_SUFFIX=""
+TARGET_SUFFIX="32"
+GDB_TARGET_SUFFIX=""
while true; do
if [ "x$1" = "x--quiet" ]; then
@@ -61,6 +63,12 @@
elif [ "x$1" = "x--dev" ]; then
DEV_MODE="y"
shift
+ elif [ "x$1" = "x--relocate" ]; then
+ RELOCATE="y"
+ shift
+ elif [ "x$1" = "x--no-relocate" ]; then
+ RELOCATE="n"
+ shift
elif [ "x$1" = "x--interpreter" ]; then
INTERPRETER="y"
shift
@@ -87,6 +95,7 @@
break
elif [ "x$1" = "x--64" ]; then
TARGET_SUFFIX="64"
+ GDB_TARGET_SUFFIX="64"
shift
elif expr "x$1" : "x--" >/dev/null 2>&1; then
echo "unknown $0 option: $1" 1>&2
@@ -145,7 +154,7 @@
fi
if [ "$GDB" = "y" ]; then
- gdb="gdbserver$TARGET_SUFFIX :5039"
+ gdb="gdbserver$GDB_TARGET_SUFFIX :5039"
gdbargs="$exe"
fi
@@ -155,8 +164,15 @@
JNI_OPTS="-Xjnigreflimit:512 -Xcheck:jni"
+if [ "$RELOCATE" = "y" ]; then
+ RELOCATE_OPT="-Xrelocate"
+ FLAGS="${FLAGS} -Xcompiler-option --include-patch-information"
+else
+ RELOCATE_OPT="-Xnorelocate"
+fi
+
cmdline="cd $DEX_LOCATION && export ANDROID_DATA=$DEX_LOCATION && export DEX_LOCATION=$DEX_LOCATION && \
- $INVOKE_WITH $gdb /system/bin/dalvikvm$TARGET_SUFFIX $FLAGS $gdbargs -XXlib:$LIB $ZYGOTE $JNI_OPTS $INT_OPTS $DEBUGGER_OPTS $BOOT_OPT -cp $DEX_LOCATION/$TEST_NAME.jar Main"
+ $INVOKE_WITH $gdb /system/bin/dalvikvm$TARGET_SUFFIX $FLAGS $gdbargs -XXlib:$LIB $ZYGOTE $JNI_OPTS $RELOCATE_OPT $INT_OPTS $DEBUGGER_OPTS $BOOT_OPT -cp $DEX_LOCATION/$TEST_NAME.jar Main"
if [ "$DEV_MODE" = "y" ]; then
echo $cmdline "$@"
fi
diff --git a/test/run-all-tests b/test/run-all-tests
index 25d5c5f..284cca0 100755
--- a/test/run-all-tests
+++ b/test/run-all-tests
@@ -83,6 +83,20 @@
elif [ "x$1" = "x--trace" ]; then
run_args="${run_args} --trace"
shift
+ elif [ "x$1" = "x--relocate" ]; then
+ run_args="${run_args} --relocate"
+ shift
+ elif [ "x$1" = "x--no-relocate" ]; then
+ run_args="${run_args} --no-relocate"
+ shift
+ elif [ "x$1" = "x--no-prebuild" ]; then
+ run_args="${run_args} --no-prebuild"
+ shift;
+ elif [ "x$1" = "x--prebuild" ]; then
+ run_args="${run_args} --prebuild"
+ shift;
+ elif [ "x$1" = "x--always-clean" ]; then
+ run_args="${run_args} --always-clean"
elif expr "x$1" : "x--" >/dev/null 2>&1; then
echo "unknown $0 option: $1" 1>&2
usage="yes"
@@ -101,7 +115,8 @@
echo " Options are all passed to run-test; refer to that for " \
"further documentation:"
echo " --debug --dev --host --interpreter --jvm --no-optimize"
- echo " --no-verify -O --update --valgrind --zygote --64"
+ echo " --no-verify -O --update --valgrind --zygote --64 --relocate"
+ echo " --prebuild --always-clean"
echo " Specific Runtime Options:"
echo " --seq Run tests one-by-one, avoiding failures caused by busy CPU"
) 1>&2
diff --git a/test/run-test b/test/run-test
index 2989f25..ca7e68c 100755
--- a/test/run-test
+++ b/test/run-test
@@ -33,7 +33,11 @@
progdir=`pwd`
prog="${progdir}"/`basename "${prog}"`
test_dir="test-$$"
-tmp_dir="/tmp/$USER/${test_dir}"
+if [ -z "$TMPDIR" ]; then
+ tmp_dir="/tmp/$USER/${test_dir}"
+else
+ tmp_dir="${TMPDIR}/$USER/${test_dir}"
+fi
export JAVA="java"
export JAVAC="javac -g"
@@ -56,29 +60,33 @@
build="build"
run="run"
expected="expected.txt"
+check_cmd="check"
output="output.txt"
build_output="build-output.txt"
lib="libartd.so"
run_args="--quiet"
+prebuild_mode="yes"
target_mode="yes"
dev_mode="no"
update_mode="no"
+debug_mode="no"
+relocate="yes"
runtime="art"
usage="no"
build_only="no"
suffix64=""
+trace="false"
+always_clean="no"
while true; do
if [ "x$1" = "x--host" ]; then
target_mode="no"
- RUN="${progdir}/etc/host-run-test-jar"
DEX_LOCATION=$tmp_dir
shift
elif [ "x$1" = "x--jvm" ]; then
target_mode="no"
runtime="jvm"
- RUN="${progdir}/etc/reference-run-test-classes"
NEED_DEX="false"
shift
elif [ "x$1" = "x-O" ]; then
@@ -88,6 +96,18 @@
lib="libdvm.so"
runtime="dalvik"
shift
+ elif [ "x$1" = "x--relocate" ]; then
+ relocate="yes"
+ shift
+ elif [ "x$1" = "x--no-relocate" ]; then
+ relocate="no"
+ shift
+ elif [ "x$1" = "x--prebuild" ]; then
+ prebuild_mode="yes"
+ shift;
+ elif [ "x$1" = "x--no-prebuild" ]; then
+ prebuild_mode="no"
+ shift;
elif [ "x$1" = "x--image" ]; then
shift
image="$1"
@@ -162,7 +182,10 @@
suffix64="64"
shift
elif [ "x$1" = "x--trace" ]; then
- run_args="${run_args} --runtime-option -Xmethod-trace --runtime-option -Xmethod-trace-file:${DEX_LOCATION}/trace.bin"
+ trace="true"
+ shift
+ elif [ "x$1" = "x--always-clean" ]; then
+ always_clean="yes"
shift
elif expr "x$1" : "x--" >/dev/null 2>&1; then
echo "unknown $0 option: $1" 1>&2
@@ -174,13 +197,48 @@
done
mkdir -p $tmp_dir
+if [ "$trace" = "true" ]; then
+ run_args="${run_args} --runtime-option -Xmethod-trace --runtime-option -Xmethod-trace-file:${DEX_LOCATION}/trace.bin --runtime-option -Xmethod-trace-file-size:2000000"
+fi
+
+# Most interesting target architecture variables are Makefile variables, not environment variables.
+# Try to map the suffix64 flag and what we find in ${ANDROID_PRODUCT_OUT}/data/art-test to an architecture name.
+function guess_arch_name() {
+ grep32bit=`ls ${ANDROID_PRODUCT_OUT}/data/art-test | grep -E '^(arm|x86|mips)$'`
+ grep64bit=`ls ${ANDROID_PRODUCT_OUT}/data/art-test | grep -E '^(arm64|x86_64)$'`
+ if [ "x${suffix64}" = "x64" ]; then
+ target_arch_name=${grep64bit}
+ else
+ target_arch_name=${grep32bit}
+ fi
+}
+
+if [ "$target_mode" = "no" ]; then
+ if [ "$runtime" = "jvm" ]; then
+ RUN="${progdir}/etc/reference-run-test-classes"
+ if [ "$prebuild_mode" = "yes" ]; then
+ echo "--prebuild with --jvm is unsupported";
+ exit 1;
+ fi
+ else
+ RUN="${progdir}/etc/host-run-test-jar"
+ if [ "$prebuild_mode" = "yes" ]; then
+ run_args="${run_args} --prebuild"
+ fi
+ fi
+else
+ if [ "$prebuild_mode" = "yes" ]; then
+ RUN="${progdir}/etc/push-and-run-prebuilt-test-jar"
+ fi
+fi
+
if [ ! "$runtime" = "jvm" ]; then
run_args="${run_args} --lib $lib"
fi
if [ "$runtime" = "dalvik" ]; then
if [ "$target_mode" = "no" ]; then
- framework="${OUT}/system/framework"
+ framework="${ANDROID_PRODUCT_OUT}/system/framework"
bpath="${framework}/core.jar:${framework}/conscrypt.jar:${framework}/okhttp.jar:${framework}/core-junit.jar:${framework}/bouncycastle.jar:${framework}/ext.jar"
run_args="${run_args} --boot -Xbootclasspath:${bpath}"
else
@@ -196,9 +254,17 @@
export ANDROID_HOST_OUT=$ANDROID_BUILD_TOP/out/host/linux-x86
fi
run_args="${run_args} --boot -Ximage:${ANDROID_HOST_OUT}/framework/core.art"
+ run_args="${run_args} --runtime-option -Djava.library.path=${ANDROID_HOST_OUT}/lib${suffix64}"
else
+ guess_arch_name
+ run_args="${run_args} --runtime-option -Djava.library.path=/data/art-test/${target_arch_name}"
run_args="${run_args} --boot -Ximage:/data/art-test/core.art"
fi
+ if [ "$relocate" = "yes" ]; then
+ run_args="${run_args} --relocate"
+ else
+ run_args="${run_args} --no-relocate"
+ fi
fi
if [ "$dev_mode" = "yes" -a "$update_mode" = "yes" ]; then
@@ -241,7 +307,7 @@
echo " Runtime Options:"
echo " -O Run non-debug rather than debug build (off by default)."
echo " -Xcompiler-option Pass an option to the compiler."
- echo " -runtime-option Pass an option to the runtime."
+ echo " --runtime-option Pass an option to the runtime."
echo " --debug Wait for a debugger to attach."
echo " --gdb Run under gdb; incompatible with some tests."
echo " --build-only Build test files only (off by default)."
@@ -252,6 +318,13 @@
echo " --zygote Spawn the process from the Zygote." \
"If used, then the"
echo " other runtime options are ignored."
+ echo " --prebuild Run dex2oat on the files before starting test. (default)"
+ echo " --no-prebuild Do not run dex2oat on the files before starting"
+ echo " the test."
+ echo " --relocate Force the use of relocating in the test, making"
+ echo " the image and oat files be relocated to a random"
+ echo " address before running. (default)"
+ echo " --no-relocate Force the use of no relocating in the test"
echo " --host Use the host-mode virtual machine."
echo " --invoke-with Pass --invoke-with option to runtime."
echo " --dalvik Use Dalvik (off by default)."
@@ -260,6 +333,7 @@
"files."
echo " --64 Run the test in 64-bit mode"
echo " --trace Run with method tracing"
+ echo " --always-clean Delete the test files even if the test fails."
) 1>&2
exit 1
fi
@@ -296,8 +370,13 @@
cp "${progdir}/etc/default-run" run
fi
+if [ '!' -r "$check_cmd" ]; then
+ cp "${progdir}/etc/default-check" check
+fi
+
chmod 755 "$build"
chmod 755 "$run"
+chmod 755 "$check_cmd"
export TEST_NAME=`basename ${test_dir}`
@@ -307,8 +386,11 @@
file_size_limit=5120
elif echo "$test_dir" | grep 083; then
file_size_limit=5120
+elif echo "$test_dir" | grep 115; then
+# Native bridge test copies libarttest.so into its directory, which needs 2MB already.
+ file_size_limit=5120
fi
-if ! ulimit "$file_size_limit"; then
+if ! ulimit -S "$file_size_limit"; then
echo "ulimit file size setting failed"
fi
@@ -364,7 +446,7 @@
cp "$build_output" "$output"
echo "build exit status: $build_exit" >>"$output"
fi
- diff --strip-trailing-cr -q "$expected" "$output" >/dev/null
+ ./$check_cmd "$expected" "$output"
if [ "$?" = "0" ]; then
# output == expected
good="yes"
@@ -372,19 +454,8 @@
fi
fi
-# Clean up test files.
-if [ "$good" == "yes" ]; then
- cd "$oldwd"
- rm -rf "$tmp_dir"
- if [ "$target_mode" = "yes" -a "$build_exit" = "0" ]; then
- adb shell rm -rf $DEX_LOCATION
- fi
- exit 0
-fi
-
-
(
- if [ "$update_mode" != "yes" ]; then
+ if [ "$good" != "yes" -a "$update_mode" != "yes" ]; then
echo "${test_dir}: FAILED!"
echo ' '
echo '#################### info'
@@ -394,9 +465,33 @@
echo '####################'
echo ' '
fi
- echo "${TEST_NAME} files left in ${tmp_dir} on host"
- if [ "$target_mode" == "yes" ]; then
- echo "and in ${DEX_LOCATION} on target"
+
+) 1>&2
+
+# Clean up test files.
+if [ "$always_clean" = "yes" -o "$good" = "yes" ]; then
+ cd "$oldwd"
+ rm -rf "$tmp_dir"
+ if [ "$target_mode" = "yes" -a "$build_exit" = "0" ]; then
+ adb shell rm -rf $DEX_LOCATION
+ fi
+ if [ "$good" = "yes" ]; then
+ exit 0
+ fi
+fi
+
+
+(
+ if [ "$always_clean" = "yes" ]; then
+ echo "${TEST_NAME} files deleted from host "
+ if [ "$target_mode" == "yes" ]; then
+ echo "and from target"
+ fi
+ else
+ echo "${TEST_NAME} files left in ${tmp_dir} on host"
+ if [ "$target_mode" == "yes" ]; then
+ echo "and in ${DEX_LOCATION} on target"
+ fi
fi
) 1>&2
diff --git a/tools/art b/tools/art
old mode 100755
new mode 100644