Merge "Add support for CallKind::kCallOnMainAndSlowPath"
diff --git a/benchmark/Android.mk b/benchmark/Android.mk
index 17ea4da..2360bcc 100644
--- a/benchmark/Android.mk
+++ b/benchmark/Android.mk
@@ -45,7 +45,7 @@
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
ifeq ($$(art_target_or_host),target)
- $(call set-target-local-clang-vars)
+ LOCAL_CLANG := $(ART_TARGET_CLANG)
$(call set-target-local-cflags-vars,debug)
LOCAL_SHARED_LIBRARIES += libdl
LOCAL_MULTILIB := both
@@ -57,7 +57,7 @@
LOCAL_CLANG := $(ART_HOST_CLANG)
LOCAL_CFLAGS := $(ART_HOST_CFLAGS) $(ART_HOST_DEBUG_CFLAGS)
LOCAL_ASFLAGS := $(ART_HOST_ASFLAGS) $(ART_HOST_DEBUG_ASFLAGS)
- LOCAL_LDLIBS := $(ART_HOST_LDLIBS) -ldl -lpthread
+ LOCAL_LDLIBS := -ldl -lpthread
LOCAL_IS_HOST_MODULE := true
LOCAL_MULTILIB := both
include $(BUILD_HOST_SHARED_LIBRARY)
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index a679ac2..bac0ff3 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -76,132 +76,13 @@
endif
#
-# Used to enable JIT
-#
-ART_JIT := false
-ifneq ($(wildcard art/JIT_ART),)
-$(info Enabling ART_JIT because of existence of art/JIT_ART)
-ART_JIT := true
-endif
-ifeq ($(WITH_ART_JIT), true)
-ART_JIT := true
-endif
-
-#
# Used to change the default GC. Valid values are CMS, SS, GSS. The default is CMS.
#
ART_DEFAULT_GC_TYPE ?= CMS
art_default_gc_type_cflags := -DART_DEFAULT_GC_TYPE_IS_$(ART_DEFAULT_GC_TYPE)
-ART_HOST_CFLAGS :=
-ART_TARGET_CFLAGS :=
-
-ART_HOST_ASFLAGS :=
-ART_TARGET_ASFLAGS :=
-
-# Clang build support.
-
-# Host.
-ART_HOST_CLANG := false
-ifneq ($(WITHOUT_HOST_CLANG),true)
- # By default, host builds use clang for better warnings.
- ART_HOST_CLANG := true
-endif
-
-# Clang on the target. Target builds use GCC by default.
-ifneq ($(USE_CLANG_PLATFORM_BUILD),)
-ART_TARGET_CLANG := $(USE_CLANG_PLATFORM_BUILD)
-else
-ART_TARGET_CLANG := false
-endif
-ART_TARGET_CLANG_arm :=
-ART_TARGET_CLANG_arm64 :=
-ART_TARGET_CLANG_mips :=
-ART_TARGET_CLANG_mips64 :=
-ART_TARGET_CLANG_x86 :=
-ART_TARGET_CLANG_x86_64 :=
-
-define set-target-local-clang-vars
- LOCAL_CLANG := $(ART_TARGET_CLANG)
- $(foreach arch,$(ART_TARGET_SUPPORTED_ARCH),
- ifneq ($$(ART_TARGET_CLANG_$(arch)),)
- LOCAL_CLANG_$(arch) := $$(ART_TARGET_CLANG_$(arch))
- endif)
-endef
-
-ART_TARGET_CLANG_CFLAGS :=
-ART_TARGET_CLANG_CFLAGS_arm :=
-ART_TARGET_CLANG_CFLAGS_arm64 :=
-ART_TARGET_CLANG_CFLAGS_mips :=
-ART_TARGET_CLANG_CFLAGS_mips64 :=
-ART_TARGET_CLANG_CFLAGS_x86 :=
-ART_TARGET_CLANG_CFLAGS_x86_64 :=
-
-# Warn about thread safety violations with clang.
-art_clang_cflags := -Wthread-safety -Wthread-safety-negative
-
-# Warn if switch fallthroughs aren't annotated.
-art_clang_cflags += -Wimplicit-fallthrough
-
-# Enable float equality warnings.
-art_clang_cflags += -Wfloat-equal
-
-# Enable warning of converting ints to void*.
-art_clang_cflags += -Wint-to-void-pointer-cast
-
-# Enable warning of wrong unused annotations.
-art_clang_cflags += -Wused-but-marked-unused
-
-# Enable warning for deprecated language features.
-art_clang_cflags += -Wdeprecated
-
-# Enable warning for unreachable break & return.
-art_clang_cflags += -Wunreachable-code-break -Wunreachable-code-return
-
-# Bug: http://b/29823425 Disable -Wconstant-conversion and
-# -Wundefined-var-template for Clang update to r271374
-art_clang_cflags += -Wno-constant-conversion -Wno-undefined-var-template
-
-# Enable missing-noreturn only on non-Mac. As lots of things are not implemented for Apple, it's
-# a pain.
-ifneq ($(HOST_OS),darwin)
- art_clang_cflags += -Wmissing-noreturn
-endif
-
-
-# GCC-only warnings.
-art_gcc_cflags := -Wunused-but-set-parameter
-# Suggest const: too many false positives, but good for a trial run.
-# -Wsuggest-attribute=const
-# Useless casts: too many, as we need to be 32/64 agnostic, but the compiler knows.
-# -Wuseless-cast
-# Zero-as-null: Have to convert all NULL and "diagnostic ignore" all includes like libnativehelper
-# that are still stuck pre-C++11.
-# -Wzero-as-null-pointer-constant \
-# Suggest final: Have to move to a more recent GCC.
-# -Wsuggest-final-types
-
-ART_TARGET_CLANG_CFLAGS := $(art_clang_cflags)
-ifeq ($(ART_HOST_CLANG),true)
- # Bug: 15446488. We don't omit the frame pointer to work around
- # clang/libunwind bugs that cause SEGVs in run-test-004-ThreadStress.
- ART_HOST_CFLAGS += $(art_clang_cflags) -fno-omit-frame-pointer
-else
- ART_HOST_CFLAGS += $(art_gcc_cflags)
-endif
-ifneq ($(ART_TARGET_CLANG),true)
- ART_TARGET_CFLAGS += $(art_gcc_cflags)
-else
- # TODO: if we ever want to support GCC/Clang mix for multi-target products, this needs to be
- # split up.
- ifeq ($(ART_TARGET_CLANG_$(TARGET_ARCH)),false)
- ART_TARGET_CFLAGS += $(art_gcc_cflags)
- endif
-endif
-
-# Clear local variables now their use has ended.
-art_clang_cflags :=
-art_gcc_cflags :=
+ART_HOST_CLANG := true
+ART_TARGET_CLANG := true
ART_CPP_EXTENSION := .cc
@@ -219,8 +100,41 @@
# Note: technically we only need this on device, but this avoids the duplication of the includes.
ART_C_INCLUDES += bionic/libc/private
+art_cflags :=
+
+# Warn about thread safety violations with clang.
+art_cflags += -Wthread-safety -Wthread-safety-negative
+
+# Warn if switch fallthroughs aren't annotated.
+art_cflags += -Wimplicit-fallthrough
+
+# Enable float equality warnings.
+art_cflags += -Wfloat-equal
+
+# Enable warning of converting ints to void*.
+art_cflags += -Wint-to-void-pointer-cast
+
+# Enable warning of wrong unused annotations.
+art_cflags += -Wused-but-marked-unused
+
+# Enable warning for deprecated language features.
+art_cflags += -Wdeprecated
+
+# Enable warning for unreachable break & return.
+art_cflags += -Wunreachable-code-break -Wunreachable-code-return
+
+# Bug: http://b/29823425 Disable -Wconstant-conversion and
+# -Wundefined-var-template for Clang update to r271374
+art_cflags += -Wno-constant-conversion -Wno-undefined-var-template
+
+# Enable missing-noreturn only on non-Mac. As lots of things are not implemented for Apple, it's
+# a pain.
+ifneq ($(HOST_OS),darwin)
+ art_cflags += -Wmissing-noreturn
+endif
+
# Base set of cflags used by all things ART.
-art_cflags := \
+art_cflags += \
-fno-rtti \
-std=gnu++11 \
-ggdb3 \
@@ -363,18 +277,27 @@
endif
endif
+ART_HOST_CFLAGS := $(art_cflags)
+ART_TARGET_CFLAGS := $(art_cflags)
+
+ART_HOST_ASFLAGS := $(art_asflags)
+ART_TARGET_ASFLAGS := $(art_asflags)
+
+# Bug: 15446488. We don't omit the frame pointer to work around
+# clang/libunwind bugs that cause SEGVs in run-test-004-ThreadStress.
+ART_HOST_CFLAGS += -fno-omit-frame-pointer
+
ifndef LIBART_IMG_HOST_BASE_ADDRESS
$(error LIBART_IMG_HOST_BASE_ADDRESS unset)
endif
-ART_HOST_CFLAGS += $(art_cflags) -DART_BASE_ADDRESS=$(LIBART_IMG_HOST_BASE_ADDRESS)
+ART_HOST_CFLAGS += -DART_BASE_ADDRESS=$(LIBART_IMG_HOST_BASE_ADDRESS)
ART_HOST_CFLAGS += -DART_DEFAULT_INSTRUCTION_SET_FEATURES=default $(art_host_cflags)
-ART_HOST_ASFLAGS += $(art_asflags)
ifndef LIBART_IMG_TARGET_BASE_ADDRESS
$(error LIBART_IMG_TARGET_BASE_ADDRESS unset)
endif
-ART_TARGET_CFLAGS += $(art_cflags) -DART_TARGET \
+ART_TARGET_CFLAGS += -DART_TARGET \
-DART_BASE_ADDRESS=$(LIBART_IMG_TARGET_BASE_ADDRESS) \
ifeq ($(ART_TARGET_LINUX),true)
@@ -389,7 +312,6 @@
endif
ART_TARGET_CFLAGS += $(art_target_cflags)
-ART_TARGET_ASFLAGS += $(art_asflags)
ART_HOST_NON_DEBUG_CFLAGS := $(art_host_non_debug_cflags)
ART_TARGET_NON_DEBUG_CFLAGS := $(art_target_non_debug_cflags)
@@ -435,18 +357,11 @@
art_target_non_debug_cflags :=
art_default_gc_type_cflags :=
-ART_HOST_LDLIBS :=
-ifneq ($(ART_HOST_CLANG),true)
- # GCC lacks libc++ assumed atomic operations, grab via libatomic.
- ART_HOST_LDLIBS += -latomic
-endif
-
ART_TARGET_LDFLAGS :=
# $(1): ndebug_or_debug
define set-target-local-cflags-vars
LOCAL_CFLAGS += $(ART_TARGET_CFLAGS)
- LOCAL_CFLAGS_x86 += $(ART_TARGET_CFLAGS_x86)
LOCAL_ASFLAGS += $(ART_TARGET_ASFLAGS)
LOCAL_LDFLAGS += $(ART_TARGET_LDFLAGS)
art_target_cflags_ndebug_or_debug := $(1)
@@ -458,10 +373,6 @@
LOCAL_ASFLAGS += $(ART_TARGET_NON_DEBUG_ASFLAGS)
endif
- LOCAL_CLANG_CFLAGS := $(ART_TARGET_CLANG_CFLAGS)
- $(foreach arch,$(ART_TARGET_SUPPORTED_ARCH),
- LOCAL_CLANG_CFLAGS_$(arch) += $$(ART_TARGET_CLANG_CFLAGS_$(arch)))
-
# Clear locally used variables.
art_target_cflags_ndebug_or_debug :=
endef
diff --git a/build/Android.executable.mk b/build/Android.executable.mk
index 157500b..2db16af 100644
--- a/build/Android.executable.mk
+++ b/build/Android.executable.mk
@@ -80,12 +80,11 @@
endif
ifeq ($$(art_target_or_host),target)
- $(call set-target-local-clang-vars)
+ LOCAL_CLANG := $(ART_TARGET_CLANG)
$(call set-target-local-cflags-vars,$(6))
LOCAL_SHARED_LIBRARIES += libdl
else # host
LOCAL_CLANG := $(ART_HOST_CLANG)
- LOCAL_LDLIBS := $(ART_HOST_LDLIBS)
LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
LOCAL_ASFLAGS += $(ART_HOST_ASFLAGS)
ifeq ($$(art_ndebug_or_debug),debug)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index cc96cf0..1e2c4ef 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -71,7 +71,7 @@
ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod StaticLeafMethods ProfileTestMultiDex
ART_GTEST_dex_cache_test_DEX_DEPS := Main
ART_GTEST_dex_file_test_DEX_DEPS := GetMethodSignature Main Nested
-ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS)
+ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) Statics
ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
ART_GTEST_instrumentation_test_DEX_DEPS := Instrumentation
ART_GTEST_jni_compiler_test_DEX_DEPS := MyClassNatives
@@ -414,9 +414,9 @@
LOCAL_STATIC_LIBRARIES += libgtest
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.gtest.mk
-$(eval $(call set-target-local-clang-vars))
+$(eval LOCAL_CLANG := $(ART_TARGET_CLANG))
$(eval $(call set-target-local-cflags-vars,debug))
-LOCAL_CLANG_CFLAGS += -Wno-used-but-marked-unused -Wno-deprecated -Wno-missing-noreturn # gtest issue
+LOCAL_CFLAGS += -Wno-used-but-marked-unused -Wno-deprecated -Wno-missing-noreturn # gtest issue
include $(BUILD_SHARED_LIBRARY)
include $(CLEAR_VARS)
@@ -432,7 +432,7 @@
LOCAL_LDLIBS += -ldl -lpthread
LOCAL_MULTILIB := both
LOCAL_CLANG := $(ART_HOST_CLANG)
-LOCAL_CLANG_CFLAGS += -Wno-used-but-marked-unused -Wno-deprecated -Wno-missing-noreturn # gtest issue
+LOCAL_CFLAGS += -Wno-used-but-marked-unused -Wno-deprecated -Wno-missing-noreturn # gtest issue
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.gtest.mk
include $(BUILD_HOST_SHARED_LIBRARY)
@@ -634,13 +634,13 @@
LOCAL_CFLAGS := $$(ART_TEST_CFLAGS)
ifeq ($$(art_target_or_host),target)
- $$(eval $$(call set-target-local-clang-vars))
+ $$(eval LOCAL_CLANG := $$(ART_TARGET_CLANG))
$$(eval $$(call set-target-local-cflags-vars,debug))
LOCAL_SHARED_LIBRARIES += libdl libicuuc libicui18n libnativehelper libz libcutils libvixl-arm64
LOCAL_MODULE_PATH_32 := $$(ART_TARGET_NATIVETEST_OUT)/$$(ART_TARGET_ARCH_32)
LOCAL_MODULE_PATH_64 := $$(ART_TARGET_NATIVETEST_OUT)/$$(ART_TARGET_ARCH_64)
LOCAL_MULTILIB := both
- LOCAL_CLANG_CFLAGS += -Wno-used-but-marked-unused -Wno-deprecated -Wno-missing-noreturn # gtest issue
+ LOCAL_CFLAGS += -Wno-used-but-marked-unused -Wno-deprecated -Wno-missing-noreturn # gtest issue
include $$(BUILD_EXECUTABLE)
library_path :=
2nd_library_path :=
@@ -681,12 +681,12 @@
LOCAL_CFLAGS += $$(ART_HOST_CFLAGS) $$(ART_HOST_DEBUG_CFLAGS)
LOCAL_ASFLAGS += $$(ART_HOST_ASFLAGS) $$(ART_HOST_DEBUG_ASFLAGS)
LOCAL_SHARED_LIBRARIES += libicuuc-host libicui18n-host libnativehelper libziparchive-host libz-host libvixl-arm64
- LOCAL_LDLIBS := $(ART_HOST_LDLIBS) -lpthread -ldl
+ LOCAL_LDLIBS := -lpthread -ldl
LOCAL_IS_HOST_MODULE := true
LOCAL_MULTILIB := both
LOCAL_MODULE_STEM_32 := $$(art_gtest_name)32
LOCAL_MODULE_STEM_64 := $$(art_gtest_name)64
- LOCAL_CLANG_CFLAGS += -Wno-used-but-marked-unused -Wno-deprecated -Wno-missing-noreturn # gtest issue
+ LOCAL_CFLAGS += -Wno-used-but-marked-unused -Wno-deprecated -Wno-missing-noreturn # gtest issue
include $$(BUILD_HOST_EXECUTABLE)
ART_TEST_HOST_GTEST_$$(art_gtest_name)_RULES :=
diff --git a/compiler/Android.mk b/compiler/Android.mk
index e3f8a5c..8261a87 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -157,7 +157,6 @@
LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES := \
compiled_method.h \
- dex/compiler_enums.h \
dex/dex_to_dex_compiler.h \
driver/compiler_driver.h \
driver/compiler_options.h \
@@ -253,13 +252,12 @@
LOCAL_CFLAGS := $$(LIBART_COMPILER_CFLAGS)
ifeq ($$(art_target_or_host),target)
- $(call set-target-local-clang-vars)
+ LOCAL_CLANG := $(ART_TARGET_CLANG)
$(call set-target-local-cflags-vars,$(2))
else # host
LOCAL_CLANG := $(ART_HOST_CLANG)
LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
LOCAL_ASFLAGS += $(ART_HOST_ASFLAGS)
- LOCAL_LDLIBS := $(ART_HOST_LDLIBS)
ifeq ($$(art_static_or_shared),static)
LOCAL_LDFLAGS += -static
endif
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index bf29e1c..06a39b2 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -19,6 +19,7 @@
#include "arch/instruction_set_features.h"
#include "art_field-inl.h"
#include "art_method.h"
+#include "base/enums.h"
#include "class_linker.h"
#include "compiled_method.h"
#include "dex/quick_compiler_callbacks.h"
@@ -115,7 +116,7 @@
Handle<mirror::ClassLoader> loader(hs.NewHandle(class_loader));
mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader);
CHECK(klass != nullptr) << "Class not found " << class_name;
- size_t pointer_size = class_linker_->GetImagePointerSize();
+ PointerSize pointer_size = class_linker_->GetImagePointerSize();
for (auto& m : klass->GetMethods(pointer_size)) {
MakeExecutable(&m);
}
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 2d139eb..c942375 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -115,13 +115,6 @@
std::list<std::vector<uint8_t>> header_code_and_maps_chunks_;
};
-// TODO: When read barrier works with all tests, get rid of this.
-#define TEST_DISABLED_FOR_READ_BARRIER() \
- if (kUseReadBarrier) { \
- printf("WARNING: TEST DISABLED FOR READ BARRIER\n"); \
- return; \
- }
-
// TODO: When read barrier works with all Optimizing back ends, get rid of this.
#define TEST_DISABLED_FOR_READ_BARRIER_WITH_OPTIMIZING_FOR_UNSUPPORTED_INSTRUCTION_SETS() \
if (kUseReadBarrier && GetCompilerKind() == Compiler::kOptimizing) { \
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
deleted file mode 100644
index 8800e4b..0000000
--- a/compiler/dex/compiler_enums.h
+++ /dev/null
@@ -1,677 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_COMPILER_ENUMS_H_
-#define ART_COMPILER_DEX_COMPILER_ENUMS_H_
-
-#include "dex_instruction.h"
-
-namespace art {
-
-enum RegisterClass {
- kInvalidRegClass,
- kCoreReg,
- kFPReg,
- kRefReg,
- kAnyReg,
-};
-std::ostream& operator<<(std::ostream& os, const RegisterClass& rhs);
-
-enum BitsUsed {
- kSize32Bits,
- kSize64Bits,
- kSize128Bits,
- kSize256Bits,
- kSize512Bits,
- kSize1024Bits,
-};
-std::ostream& operator<<(std::ostream& os, const BitsUsed& rhs);
-
-enum SpecialTargetRegister {
- kSelf, // Thread pointer.
- kSuspend, // Used to reduce suspend checks for some targets.
- kLr,
- kPc,
- kSp,
- kArg0,
- kArg1,
- kArg2,
- kArg3,
- kArg4,
- kArg5,
- kArg6,
- kArg7,
- kFArg0,
- kFArg1,
- kFArg2,
- kFArg3,
- kFArg4,
- kFArg5,
- kFArg6,
- kFArg7,
- kFArg8,
- kFArg9,
- kFArg10,
- kFArg11,
- kFArg12,
- kFArg13,
- kFArg14,
- kFArg15,
- kRet0,
- kRet1,
- kInvokeTgt,
- kHiddenArg,
- kHiddenFpArg,
- kCount
-};
-std::ostream& operator<<(std::ostream& os, const SpecialTargetRegister& code);
-
-enum RegLocationType {
- kLocDalvikFrame = 0, // Normal Dalvik register
- kLocPhysReg,
- kLocCompilerTemp,
- kLocInvalid
-};
-std::ostream& operator<<(std::ostream& os, const RegLocationType& rhs);
-
-enum BBType {
- kNullBlock,
- kEntryBlock,
- kDalvikByteCode,
- kExitBlock,
- kExceptionHandling,
- kDead,
-};
-std::ostream& operator<<(std::ostream& os, const BBType& code);
-
-// Shared pseudo opcodes - must be < 0.
-enum LIRPseudoOpcode {
- kPseudoPrologueBegin = -18,
- kPseudoPrologueEnd = -17,
- kPseudoEpilogueBegin = -16,
- kPseudoEpilogueEnd = -15,
- kPseudoExportedPC = -14,
- kPseudoSafepointPC = -13,
- kPseudoIntrinsicRetry = -12,
- kPseudoSuspendTarget = -11,
- kPseudoThrowTarget = -10,
- kPseudoCaseLabel = -9,
- kPseudoBarrier = -8,
- kPseudoEntryBlock = -7,
- kPseudoExitBlock = -6,
- kPseudoTargetLabel = -5,
- kPseudoDalvikByteCodeBoundary = -4,
- kPseudoPseudoAlign4 = -3,
- kPseudoEHBlockLabel = -2,
- kPseudoNormalBlockLabel = -1,
-};
-std::ostream& operator<<(std::ostream& os, const LIRPseudoOpcode& rhs);
-
-enum ExtendedMIROpcode {
- kMirOpFirst = kNumPackedOpcodes,
- kMirOpPhi = kMirOpFirst,
-
- // @brief Copy from one VR to another.
- // @details
- // vA: destination VR
- // vB: source VR
- kMirOpCopy,
-
- // @brief Used to do float comparison with less-than bias.
- // @details Unlike cmpl-float, this does not store result of comparison in VR.
- // vA: left-hand side VR for comparison.
- // vB: right-hand side VR for comparison.
- kMirOpFusedCmplFloat,
-
- // @brief Used to do float comparison with greater-than bias.
- // @details Unlike cmpg-float, this does not store result of comparison in VR.
- // vA: left-hand side VR for comparison.
- // vB: right-hand side VR for comparison.
- kMirOpFusedCmpgFloat,
-
- // @brief Used to do double comparison with less-than bias.
- // @details Unlike cmpl-double, this does not store result of comparison in VR.
- // vA: left-hand side wide VR for comparison.
- // vB: right-hand side wide VR for comparison.
- kMirOpFusedCmplDouble,
-
- // @brief Used to do double comparison with greater-than bias.
- // @details Unlike cmpl-double, this does not store result of comparison in VR.
- // vA: left-hand side wide VR for comparison.
- // vB: right-hand side wide VR for comparison.
- kMirOpFusedCmpgDouble,
-
- // @brief Used to do comparison of 64-bit long integers.
- // @details Unlike cmp-long, this does not store result of comparison in VR.
- // vA: left-hand side wide VR for comparison.
- // vB: right-hand side wide VR for comparison.
- kMirOpFusedCmpLong,
-
- // @brief This represents no-op.
- kMirOpNop,
-
- // @brief Do a null check on the object register.
- // @details The backends may implement this implicitly or explicitly. This MIR is guaranteed
- // to have the correct offset as an exception thrower.
- // vA: object register
- kMirOpNullCheck,
-
- kMirOpRangeCheck,
- kMirOpDivZeroCheck,
- kMirOpCheck,
- kMirOpSelect,
-
- // Vector opcodes:
- // TypeSize is an encoded field giving the element type and the vector size.
- // It is encoded as OpSize << 16 | (number of bits in vector)
- //
- // Destination and source are integers that will be interpreted by the
- // backend that supports Vector operations. Backends are permitted to support only
- // certain vector register sizes.
- //
- // At this point, only two operand instructions are supported. Three operand instructions
- // could be supported by using a bit in TypeSize and arg[0] where needed.
-
- // @brief MIR to move constant data to a vector register
- // vA: destination
- // vB: number of bits in register
- // args[0]~args[3]: up to 128 bits of data for initialization
- kMirOpConstVector,
-
- // @brief MIR to move a vectorized register to another
- // vA: destination
- // vB: source
- // vC: TypeSize
- kMirOpMoveVector,
-
- // @brief Packed multiply of units in two vector registers: vB = vB .* vC using vA to know the type of the vector.
- // vA: destination and source
- // vB: source
- // vC: TypeSize
- kMirOpPackedMultiply,
-
- // @brief Packed addition of units in two vector registers: vB = vB .+ vC using vA to know the type of the vector.
- // vA: destination and source
- // vB: source
- // vC: TypeSize
- kMirOpPackedAddition,
-
- // @brief Packed subtraction of units in two vector registers: vB = vB .- vC using vA to know the type of the vector.
- // vA: destination and source
- // vB: source
- // vC: TypeSize
- kMirOpPackedSubtract,
-
- // @brief Packed shift left of units in two vector registers: vB = vB .<< vC using vA to know the type of the vector.
- // vA: destination and source
- // vB: amount to shift
- // vC: TypeSize
- kMirOpPackedShiftLeft,
-
- // @brief Packed signed shift right of units in two vector registers: vB = vB .>> vC using vA to know the type of the vector.
- // vA: destination and source
- // vB: amount to shift
- // vC: TypeSize
- kMirOpPackedSignedShiftRight,
-
- // @brief Packed unsigned shift right of units in two vector registers: vB = vB .>>> vC using vA to know the type of the vector.
- // vA: destination and source
- // vB: amount to shift
- // vC: TypeSize
- kMirOpPackedUnsignedShiftRight,
-
- // @brief Packed bitwise and of units in two vector registers: vB = vB .& vC using vA to know the type of the vector.
- // vA: destination and source
- // vB: source
- // vC: TypeSize
- kMirOpPackedAnd,
-
- // @brief Packed bitwise or of units in two vector registers: vB = vB .| vC using vA to know the type of the vector.
- // vA: destination and source
- // vB: source
- // vC: TypeSize
- kMirOpPackedOr,
-
- // @brief Packed bitwise xor of units in two vector registers: vB = vB .^ vC using vA to know the type of the vector.
- // vA: destination and source
- // vB: source
- // vC: TypeSize
- kMirOpPackedXor,
-
- // @brief Reduce a 128-bit packed element into a single VR by taking lower bits
- // @details Instruction does a horizontal addition of the packed elements and then adds it to VR
- // vA: destination and source VR (not vector register)
- // vB: source (vector register)
- // vC: TypeSize
- kMirOpPackedAddReduce,
-
- // @brief Extract a packed element into a single VR.
- // vA: destination VR (not vector register)
- // vB: source (vector register)
- // vC: TypeSize
- // arg[0]: The index to use for extraction from vector register (which packed element)
- kMirOpPackedReduce,
-
- // @brief Create a vector value, with all TypeSize values equal to vC
- // vA: destination vector register
- // vB: source VR (not vector register)
- // vC: TypeSize
- kMirOpPackedSet,
-
- // @brief Reserve a range of vector registers.
- // vA: Start vector register to reserve.
- // vB: Inclusive end vector register to reserve.
- // @note: The backend may choose to map vector numbers used in vector opcodes.
- // Reserved registers are removed from the list of backend temporary pool.
- kMirOpReserveVectorRegisters,
-
- // @brief Free a range of reserved vector registers
- // vA: Start vector register to unreserve.
- // vB: Inclusive end vector register to unreserve.
- // @note: All currently reserved vector registers are returned to the temporary pool.
- kMirOpReturnVectorRegisters,
-
- // @brief Create a memory barrier.
- // vA: a constant defined by enum MemBarrierKind.
- kMirOpMemBarrier,
-
- // @brief Used to fill a vector register with array values.
- // @details Just as with normal arrays, access on null object register must ensure NullPointerException
- // and invalid index must ensure ArrayIndexOutOfBoundsException. Exception behavior must be the same
- // as the aget it replaced and must happen at same index. Therefore, it is generally recommended that
- // before using this MIR, it is proven that exception is guaranteed to not be thrown and marked with
- // MIR_IGNORE_NULL_CHECK and MIR_IGNORE_RANGE_CHECK.
- // vA: destination vector register
- // vB: array register
- // vC: index register
- // arg[0]: TypeSize (most other vector opcodes have this in vC)
- kMirOpPackedArrayGet,
-
- // @brief Used to store a vector register into array.
- // @details Just as with normal arrays, access on null object register must ensure NullPointerException
- // and invalid index must ensure ArrayIndexOutOfBoundsException. Exception behavior must be the same
- // as the aget it replaced and must happen at same index. Therefore, it is generally recommended that
- // before using this MIR, it is proven that exception is guaranteed to not be thrown and marked with
- // MIR_IGNORE_NULL_CHECK and MIR_IGNORE_RANGE_CHECK.
- // vA: source vector register
- // vB: array register
- // vC: index register
- // arg[0]: TypeSize (most other vector opcodes have this in vC)
- kMirOpPackedArrayPut,
-
- // @brief Multiply-add integer.
- // vA: destination
- // vB: multiplicand
- // vC: multiplier
- // arg[0]: addend
- kMirOpMaddInt,
-
- // @brief Multiply-subtract integer.
- // vA: destination
- // vB: multiplicand
- // vC: multiplier
- // arg[0]: minuend
- kMirOpMsubInt,
-
- // @brief Multiply-add long.
- // vA: destination
- // vB: multiplicand
- // vC: multiplier
- // arg[0]: addend
- kMirOpMaddLong,
-
- // @brief Multiply-subtract long.
- // vA: destination
- // vB: multiplicand
- // vC: multiplier
- // arg[0]: minuend
- kMirOpMsubLong,
-
- kMirOpLast,
-};
-
-enum MIROptimizationFlagPositions {
- kMIRIgnoreNullCheck = 0,
- kMIRIgnoreRangeCheck,
- kMIRIgnoreCheckCast,
- kMIRStoreNonNullValue, // Storing non-null value, always mark GC card.
- kMIRClassIsInitialized,
- kMIRClassIsInDexCache,
- kMirIgnoreDivZeroCheck,
- kMIRInlined, // Invoke is inlined (ie dead).
- kMIRInlinedPred, // Invoke is inlined via prediction.
- kMIRCallee, // Instruction is inlined from callee.
- kMIRIgnoreSuspendCheck,
- kMIRDup,
- kMIRMark, // Temporary node mark can be used by
- // opt passes for their private needs.
- kMIRStoreNonTemporal,
- kMIRLastMIRFlag,
-};
-
-// For successor_block_list.
-enum BlockListType {
- kNotUsed = 0,
- kCatch,
- kPackedSwitch,
- kSparseSwitch,
-};
-std::ostream& operator<<(std::ostream& os, const BlockListType& rhs);
-
-enum AssemblerStatus {
- kSuccess,
- kRetryAll,
-};
-std::ostream& operator<<(std::ostream& os, const AssemblerStatus& rhs);
-
-enum OpSize {
- kWord, // Natural word size of target (32/64).
- k32,
- k64,
- kReference, // Object reference; compressed on 64-bit targets.
- kSingle,
- kDouble,
- kUnsignedHalf,
- kSignedHalf,
- kUnsignedByte,
- kSignedByte,
-};
-std::ostream& operator<<(std::ostream& os, const OpSize& kind);
-
-enum OpKind {
- kOpMov,
- kOpCmov,
- kOpMvn,
- kOpCmp,
- kOpLsl,
- kOpLsr,
- kOpAsr,
- kOpRor,
- kOpNot,
- kOpAnd,
- kOpOr,
- kOpXor,
- kOpNeg,
- kOpAdd,
- kOpAdc,
- kOpSub,
- kOpSbc,
- kOpRsub,
- kOpMul,
- kOpDiv,
- kOpRem,
- kOpBic,
- kOpCmn,
- kOpTst,
- kOpRev,
- kOpRevsh,
- kOpBkpt,
- kOpBlx,
- kOpPush,
- kOpPop,
- kOp2Char,
- kOp2Short,
- kOp2Byte,
- kOpCondBr,
- kOpUncondBr,
- kOpBx,
- kOpInvalid,
-};
-std::ostream& operator<<(std::ostream& os, const OpKind& rhs);
-
-enum MoveType {
- kMov8GP, // Move 8-bit general purpose register.
- kMov16GP, // Move 16-bit general purpose register.
- kMov32GP, // Move 32-bit general purpose register.
- kMov64GP, // Move 64-bit general purpose register.
- kMov32FP, // Move 32-bit FP register.
- kMov64FP, // Move 64-bit FP register.
- kMovLo64FP, // Move low 32-bits of 64-bit FP register.
- kMovHi64FP, // Move high 32-bits of 64-bit FP register.
- kMovU128FP, // Move 128-bit FP register to/from possibly unaligned region.
- kMov128FP = kMovU128FP,
- kMovA128FP, // Move 128-bit FP register to/from region surely aligned to 16-bytes.
- kMovLo128FP, // Move low 64-bits of 128-bit FP register.
- kMovHi128FP, // Move high 64-bits of 128-bit FP register.
-};
-std::ostream& operator<<(std::ostream& os, const MoveType& kind);
-
-enum ConditionCode {
- kCondEq, // equal
- kCondNe, // not equal
- kCondCs, // carry set
- kCondCc, // carry clear
- kCondUlt, // unsigned less than
- kCondUge, // unsigned greater than or same
- kCondMi, // minus
- kCondPl, // plus, positive or zero
- kCondVs, // overflow
- kCondVc, // no overflow
- kCondHi, // unsigned greater than
- kCondLs, // unsigned lower or same
- kCondGe, // signed greater than or equal
- kCondLt, // signed less than
- kCondGt, // signed greater than
- kCondLe, // signed less than or equal
- kCondAl, // always
- kCondNv, // never
-};
-std::ostream& operator<<(std::ostream& os, const ConditionCode& kind);
-
-// Target specific condition encodings
-enum ArmConditionCode {
- kArmCondEq = 0x0, // 0000
- kArmCondNe = 0x1, // 0001
- kArmCondCs = 0x2, // 0010
- kArmCondCc = 0x3, // 0011
- kArmCondMi = 0x4, // 0100
- kArmCondPl = 0x5, // 0101
- kArmCondVs = 0x6, // 0110
- kArmCondVc = 0x7, // 0111
- kArmCondHi = 0x8, // 1000
- kArmCondLs = 0x9, // 1001
- kArmCondGe = 0xa, // 1010
- kArmCondLt = 0xb, // 1011
- kArmCondGt = 0xc, // 1100
- kArmCondLe = 0xd, // 1101
- kArmCondAl = 0xe, // 1110
- kArmCondNv = 0xf, // 1111
-};
-std::ostream& operator<<(std::ostream& os, const ArmConditionCode& kind);
-
-enum X86ConditionCode {
- kX86CondO = 0x0, // overflow
- kX86CondNo = 0x1, // not overflow
-
- kX86CondB = 0x2, // below
- kX86CondNae = kX86CondB, // not-above-equal
- kX86CondC = kX86CondB, // carry
-
- kX86CondNb = 0x3, // not-below
- kX86CondAe = kX86CondNb, // above-equal
- kX86CondNc = kX86CondNb, // not-carry
-
- kX86CondZ = 0x4, // zero
- kX86CondEq = kX86CondZ, // equal
-
- kX86CondNz = 0x5, // not-zero
- kX86CondNe = kX86CondNz, // not-equal
-
- kX86CondBe = 0x6, // below-equal
- kX86CondNa = kX86CondBe, // not-above
-
- kX86CondNbe = 0x7, // not-below-equal
- kX86CondA = kX86CondNbe, // above
-
- kX86CondS = 0x8, // sign
- kX86CondNs = 0x9, // not-sign
-
- kX86CondP = 0xa, // 8-bit parity even
- kX86CondPE = kX86CondP,
-
- kX86CondNp = 0xb, // 8-bit parity odd
- kX86CondPo = kX86CondNp,
-
- kX86CondL = 0xc, // less-than
- kX86CondNge = kX86CondL, // not-greater-equal
-
- kX86CondNl = 0xd, // not-less-than
- kX86CondGe = kX86CondNl, // not-greater-equal
-
- kX86CondLe = 0xe, // less-than-equal
- kX86CondNg = kX86CondLe, // not-greater
-
- kX86CondNle = 0xf, // not-less-than
- kX86CondG = kX86CondNle, // greater
-};
-std::ostream& operator<<(std::ostream& os, const X86ConditionCode& kind);
-
-enum DividePattern {
- DivideNone,
- Divide3,
- Divide5,
- Divide7,
-};
-std::ostream& operator<<(std::ostream& os, const DividePattern& pattern);
-
-/**
- * @brief Memory barrier types (see "The JSR-133 Cookbook for Compiler Writers").
- * @details We define the combined barrier types that are actually required
- * by the Java Memory Model, rather than using exactly the terminology from
- * the JSR-133 cookbook. These should, in many cases, be replaced by acquire/release
- * primitives. Note that the JSR-133 cookbook generally does not deal with
- * store atomicity issues, and the recipes there are not always entirely sufficient.
- * The current recipe is as follows:
- * -# Use AnyStore ~= (LoadStore | StoreStore) ~= release barrier before volatile store.
- * -# Use AnyAny barrier after volatile store. (StoreLoad is as expensive.)
- * -# Use LoadAny barrier ~= (LoadLoad | LoadStore) ~= acquire barrier after each volatile load.
- * -# Use StoreStore barrier after all stores but before return from any constructor whose
- * class has final fields.
- * -# Use NTStoreStore to order non-temporal stores with respect to all later
- * store-to-memory instructions. Only generated together with non-temporal stores.
- */
-enum MemBarrierKind {
- kAnyStore,
- kLoadAny,
- kStoreStore,
- kAnyAny,
- kNTStoreStore,
- kLastBarrierKind = kNTStoreStore
-};
-std::ostream& operator<<(std::ostream& os, const MemBarrierKind& kind);
-
-enum OpFeatureFlags {
- kIsBranch = 0,
- kNoOperand,
- kIsUnaryOp,
- kIsBinaryOp,
- kIsTertiaryOp,
- kIsQuadOp,
- kIsQuinOp,
- kIsSextupleOp,
- kIsIT,
- kIsMoveOp,
- kMemLoad,
- kMemStore,
- kMemVolatile,
- kMemScaledx0,
- kMemScaledx2,
- kMemScaledx4,
- kPCRelFixup, // x86 FIXME: add NEEDS_FIXUP to instruction attributes.
- kRegDef0,
- kRegDef1,
- kRegDef2,
- kRegDefA,
- kRegDefD,
- kRegDefFPCSList0,
- kRegDefFPCSList2,
- kRegDefList0,
- kRegDefList1,
- kRegDefList2,
- kRegDefLR,
- kRegDefSP,
- kRegUse0,
- kRegUse1,
- kRegUse2,
- kRegUse3,
- kRegUse4,
- kRegUseA,
- kRegUseC,
- kRegUseD,
- kRegUseB,
- kRegUseFPCSList0,
- kRegUseFPCSList2,
- kRegUseList0,
- kRegUseList1,
- kRegUseLR,
- kRegUsePC,
- kRegUseSP,
- kSetsCCodes,
- kUsesCCodes,
- kUseFpStack,
- kUseHi,
- kUseLo,
- kDefHi,
- kDefLo
-};
-std::ostream& operator<<(std::ostream& os, const OpFeatureFlags& rhs);
-
-enum SelectInstructionKind {
- kSelectNone,
- kSelectConst,
- kSelectMove,
- kSelectGoto
-};
-std::ostream& operator<<(std::ostream& os, const SelectInstructionKind& kind);
-
-// LIR fixup kinds for Arm and X86.
-enum FixupKind {
- kFixupNone,
- kFixupLabel, // For labels we just adjust the offset.
- kFixupLoad, // Mostly for immediates.
- kFixupVLoad, // FP load which *may* be pc-relative.
- kFixupCBxZ, // Cbz, Cbnz.
- kFixupTBxZ, // Tbz, Tbnz.
- kFixupCondBranch, // Conditional branch
- kFixupT1Branch, // Thumb1 Unconditional branch
- kFixupT2Branch, // Thumb2 Unconditional branch
- kFixupBlx1, // Blx1 (start of Blx1/Blx2 pair).
- kFixupBl1, // Bl1 (start of Bl1/Bl2 pair).
- kFixupAdr, // Adr.
- kFixupMovImmLST, // kThumb2MovImm16LST.
- kFixupMovImmHST, // kThumb2MovImm16HST.
- kFixupAlign4, // Align to 4-byte boundary.
- kFixupA53Erratum835769, // Cortex A53 Erratum 835769.
- kFixupSwitchTable, // X86_64 packed switch table.
-};
-std::ostream& operator<<(std::ostream& os, const FixupKind& kind);
-
-enum VolatileKind {
- kNotVolatile, // Load/Store is not volatile
- kVolatile // Load/Store is volatile
-};
-std::ostream& operator<<(std::ostream& os, const VolatileKind& kind);
-
-enum WideKind {
- kNotWide, // Non-wide view
- kWide, // Wide view
- kRef // Ref width
-};
-std::ostream& operator<<(std::ostream& os, const WideKind& kind);
-
-} // namespace art
-
-#endif // ART_COMPILER_DEX_COMPILER_ENUMS_H_
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 951b075..8d53dbf 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -843,13 +843,14 @@
}
}
-uint32_t DexFileMethodInliner::GetOffsetForStringInit(uint32_t method_index, size_t pointer_size) {
+uint32_t DexFileMethodInliner::GetOffsetForStringInit(uint32_t method_index,
+ PointerSize pointer_size) {
ReaderMutexLock mu(Thread::Current(), lock_);
auto it = inline_methods_.find(method_index);
if (it != inline_methods_.end() && (it->second.opcode == kInlineStringInit)) {
uint32_t string_init_base_offset = Thread::QuickEntryPointOffsetWithSize(
OFFSETOF_MEMBER(QuickEntryPoints, pNewEmptyString), pointer_size);
- return string_init_base_offset + it->second.d.data * pointer_size;
+ return string_init_base_offset + it->second.d.data * static_cast<size_t>(pointer_size);
}
return 0;
}
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index fbe403f..dbdfa24 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -18,10 +18,11 @@
#define ART_COMPILER_DEX_QUICK_DEX_FILE_METHOD_INLINER_H_
#include <stdint.h>
+
+#include "base/enums.h"
#include "base/mutex.h"
#include "base/macros.h"
#include "safe_map.h"
-#include "dex/compiler_enums.h"
#include "dex_file.h"
#include "quick/inline_method_analyser.h"
@@ -31,6 +32,13 @@
class MethodVerifier;
} // namespace verifier
+enum OpSize {
+ k32,
+ k64,
+ kSignedHalf,
+ kSignedByte,
+};
+
/**
* Handles inlining of methods from a particular DexFile.
*
@@ -76,7 +84,7 @@
/**
* Gets the thread pointer entrypoint offset for a string init method index and pointer size.
*/
- uint32_t GetOffsetForStringInit(uint32_t method_index, size_t pointer_size)
+ uint32_t GetOffsetForStringInit(uint32_t method_index, PointerSize pointer_size)
REQUIRES(!lock_);
/**
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index bace014..4bcd59a 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -21,6 +21,7 @@
#include <vector>
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "base/stl_util.h"
#include "dex_file.h"
@@ -169,7 +170,7 @@
continue;
}
auto* cl = Runtime::Current()->GetClassLinker();
- size_t pointer_size = cl->GetImagePointerSize();
+ PointerSize pointer_size = cl->GetImagePointerSize();
ArtMethod* abstract_method = method_verifier->GetDexCache()->GetResolvedMethod(
is_range ? inst->VRegB_3rc() : inst->VRegB_35c(), pointer_size);
if (abstract_method == nullptr) {
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 94f5acc..3a260f5 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -21,6 +21,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "class_linker-inl.h"
#include "dex_compilation_unit.h"
#include "mirror/class_loader.h"
@@ -336,7 +337,7 @@
methods_declaring_class->IsFinal());
// For invoke-super, ensure the vtable index will be correct to dispatch in the vtable of
// the super class.
- const size_t pointer_size = InstructionSetPointerSize(GetInstructionSet());
+ const PointerSize pointer_size = InstructionSetPointerSize(GetInstructionSet());
// TODO We should be able to sharpen if we are going into the boot image as well.
bool can_sharpen_super_based_on_type = same_dex_file &&
(*invoke_type == kSuper) &&
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 4c0095d..8286033 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -27,6 +27,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/bit_vector.h"
+#include "base/enums.h"
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/time_utils.h"
@@ -434,10 +435,10 @@
#define CREATE_TRAMPOLINE(type, abi, offset) \
if (Is64BitInstructionSet(instruction_set_)) { \
return CreateTrampoline64(instruction_set_, abi, \
- type ## _ENTRYPOINT_OFFSET(8, offset)); \
+ type ## _ENTRYPOINT_OFFSET(PointerSize::k64, offset)); \
} else { \
return CreateTrampoline32(instruction_set_, abi, \
- type ## _ENTRYPOINT_OFFSET(4, offset)); \
+ type ## _ENTRYPOINT_OFFSET(PointerSize::k32, offset)); \
}
std::unique_ptr<const std::vector<uint8_t>> CompilerDriver::CreateJniDlsymLookup() const {
@@ -1015,7 +1016,7 @@
}
private:
- void ResolveExceptionsForMethod(ArtMethod* method_handle, size_t pointer_size)
+ void ResolveExceptionsForMethod(ArtMethod* method_handle, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = method_handle->GetCodeItem();
if (code_item == nullptr) {
@@ -1147,7 +1148,7 @@
// Make a copy of the handle so that we don't clobber it doing Assign.
MutableHandle<mirror::Class> klass(hs.NewHandle(c.Get()));
std::string temp;
- const size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ const PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
while (!klass->IsObjectClass()) {
const char* descriptor = klass->GetDescriptor(&temp);
std::pair<std::unordered_set<std::string>::iterator, bool> result =
@@ -2885,7 +2886,7 @@
bool CompilerDriver::IsStringInit(uint32_t method_index, const DexFile* dex_file, int32_t* offset) {
DexFileMethodInliner* inliner = GetMethodInlinerMap()->GetMethodInliner(dex_file);
- size_t pointer_size = InstructionSetPointerSize(GetInstructionSet());
+ PointerSize pointer_size = InstructionSetPointerSize(GetInstructionSet());
*offset = inliner->GetOffsetForStringInit(method_index, pointer_size);
return inliner->IsStringInitMethodIndex(method_index);
}
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index 38ac052..e223534 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -17,6 +17,7 @@
#include <memory>
#include "base/arena_allocator.h"
+#include "base/enums.h"
#include "class_linker.h"
#include "common_runtime_test.h"
#include "dex_file.h"
@@ -100,11 +101,11 @@
CHECK_ALIGNED(stack_maps_offset, 2);
}
- method_f_ = my_klass_->FindVirtualMethod("f", "()I", sizeof(void*));
+ method_f_ = my_klass_->FindVirtualMethod("f", "()I", kRuntimePointerSize);
ASSERT_TRUE(method_f_ != nullptr);
method_f_->SetEntryPointFromQuickCompiledCode(code_ptr);
- method_g_ = my_klass_->FindVirtualMethod("g", "(I)V", sizeof(void*));
+ method_g_ = my_klass_->FindVirtualMethod("g", "(I)V", kRuntimePointerSize);
ASSERT_TRUE(method_g_ != nullptr);
method_g_->SetEntryPointFromQuickCompiledCode(code_ptr);
}
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 063eb11..7c87a60 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -39,6 +39,7 @@
#include "gc/accounting/card_table-inl.h"
#include "gc/accounting/heap_bitmap.h"
#include "gc/accounting/space_bitmap-inl.h"
+#include "gc/collector/concurrent_copying.h"
#include "gc/heap.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
@@ -1377,6 +1378,8 @@
runtime->GetCalleeSaveMethod(Runtime::kRefsOnly);
image_methods_[ImageHeader::kRefsAndArgsSaveMethod] =
runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
+ image_methods_[ImageHeader::kSaveEverythingMethod] =
+ runtime->GetCalleeSaveMethod(Runtime::kSaveEverything);
// Visit image methods first to have the main runtime methods in the first image.
for (auto* m : image_methods_) {
CHECK(m != nullptr);
@@ -1416,7 +1419,7 @@
}
case kBinImTable:
case kBinIMTConflictTable: {
- bin_offset = RoundUp(bin_offset, target_ptr_size_);
+ bin_offset = RoundUp(bin_offset, static_cast<size_t>(target_ptr_size_));
break;
}
default: {
@@ -1573,7 +1576,7 @@
boot_image_end - boot_image_begin,
boot_oat_begin,
boot_oat_end - boot_oat_begin,
- target_ptr_size_,
+ static_cast<uint32_t>(target_ptr_size_),
compile_pic_,
/*is_pic*/compile_app_image_,
image_storage_mode_,
@@ -1823,6 +1826,11 @@
const auto it = saved_hashcode_map_.find(obj);
dst->SetLockWord(it != saved_hashcode_map_.end() ?
LockWord::FromHashCode(it->second, 0u) : LockWord::Default(), false);
+ if (kUseBakerReadBarrier && gc::collector::ConcurrentCopying::kGrayDirtyImmuneObjects) {
+ // Treat all of the objects in the image as marked to avoid unnecessary dirty pages. This is
+ // safe since we mark all of the objects that may reference non immune objects as gray.
+ CHECK(dst->AtomicSetMarkBit(0, 1));
+ }
FixupObject(obj, dst);
}
@@ -2029,7 +2037,7 @@
if (orig_strings != nullptr) {
copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::StringsOffset(),
NativeLocationInImage(orig_strings),
- /*pointer size*/8u);
+ PointerSize::k64);
orig_dex_cache->FixupStrings(NativeCopyLocation(orig_strings, orig_dex_cache),
ImageAddressVisitor(this));
}
@@ -2037,7 +2045,7 @@
if (orig_types != nullptr) {
copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::ResolvedTypesOffset(),
NativeLocationInImage(orig_types),
- /*pointer size*/8u);
+ PointerSize::k64);
orig_dex_cache->FixupResolvedTypes(NativeCopyLocation(orig_types, orig_dex_cache),
ImageAddressVisitor(this));
}
@@ -2045,7 +2053,7 @@
if (orig_methods != nullptr) {
copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::ResolvedMethodsOffset(),
NativeLocationInImage(orig_methods),
- /*pointer size*/8u);
+ PointerSize::k64);
ArtMethod** copy_methods = NativeCopyLocation(orig_methods, orig_dex_cache);
for (size_t i = 0, num = orig_dex_cache->NumResolvedMethods(); i != num; ++i) {
ArtMethod* orig = mirror::DexCache::GetElementPtrSize(orig_methods, i, target_ptr_size_);
@@ -2058,7 +2066,7 @@
if (orig_fields != nullptr) {
copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::ResolvedFieldsOffset(),
NativeLocationInImage(orig_fields),
- /*pointer size*/8u);
+ PointerSize::k64);
ArtField** copy_fields = NativeCopyLocation(orig_fields, orig_dex_cache);
for (size_t i = 0, num = orig_dex_cache->NumResolvedFields(); i != num; ++i) {
ArtField* orig = mirror::DexCache::GetElementPtrSize(orig_fields, i, target_ptr_size_);
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 1efdc22..7d13656 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -28,6 +28,7 @@
#include "base/bit_utils.h"
#include "base/dchecked_vector.h"
+#include "base/enums.h"
#include "base/length_prefixed_array.h"
#include "base/macros.h"
#include "driver/compiler_driver.h"
@@ -216,8 +217,7 @@
// uint32 = typeof(lockword_)
// Subtract read barrier bits since we want these to remain 0, or else it may result in DCHECK
// failures due to invalid read barrier bits during object field reads.
- static const size_t kBinShift = BitSizeOf<uint32_t>() - kBinBits -
- LockWord::kReadBarrierStateSize;
+ static const size_t kBinShift = BitSizeOf<uint32_t>() - kBinBits - LockWord::kGCStateSize;
// 111000.....0
static const size_t kBinMask = ((static_cast<size_t>(1) << kBinBits) - 1) << kBinShift;
@@ -524,7 +524,7 @@
const bool compile_app_image_;
// Size of pointers on the target architecture.
- size_t target_ptr_size_;
+ PointerSize target_ptr_size_;
// Image data indexed by the oat file index.
dchecked_vector<ImageInfo> image_infos_;
diff --git a/compiler/jni/jni_cfi_test_expected.inc b/compiler/jni/jni_cfi_test_expected.inc
index 16b4386..da72c75 100644
--- a/compiler/jni/jni_cfi_test_expected.inc
+++ b/compiler/jni/jni_cfi_test_expected.inc
@@ -1,8 +1,7 @@
static constexpr uint8_t expected_asm_kThumb2[] = {
0x2D, 0xE9, 0xE0, 0x4D, 0x2D, 0xED, 0x10, 0x8A, 0x89, 0xB0, 0x00, 0x90,
- 0xCD, 0xF8, 0x84, 0x10, 0x8D, 0xED, 0x22, 0x0A, 0xCD, 0xF8, 0x8C, 0x20,
- 0xCD, 0xF8, 0x90, 0x30, 0x88, 0xB0, 0x08, 0xB0, 0x09, 0xB0, 0xBD, 0xEC,
- 0x10, 0x8A, 0xBD, 0xE8, 0xE0, 0x8D,
+ 0x21, 0x91, 0x8D, 0xED, 0x22, 0x0A, 0x23, 0x92, 0x24, 0x93, 0x88, 0xB0,
+ 0x08, 0xB0, 0x09, 0xB0, 0xBD, 0xEC, 0x10, 0x8A, 0xBD, 0xE8, 0xE0, 0x8D,
};
static constexpr uint8_t expected_cfi_kThumb2[] = {
0x44, 0x0E, 0x1C, 0x85, 0x07, 0x86, 0x06, 0x87, 0x05, 0x88, 0x04, 0x8A,
@@ -11,7 +10,7 @@
0x55, 0x12, 0x05, 0x56, 0x11, 0x05, 0x57, 0x10, 0x05, 0x58, 0x0F, 0x05,
0x59, 0x0E, 0x05, 0x5A, 0x0D, 0x05, 0x5B, 0x0C, 0x05, 0x5C, 0x0B, 0x05,
0x5D, 0x0A, 0x05, 0x5E, 0x09, 0x05, 0x5F, 0x08, 0x42, 0x0E, 0x80, 0x01,
- 0x54, 0x0E, 0xA0, 0x01, 0x42, 0x0E, 0x80, 0x01, 0x0A, 0x42, 0x0E, 0x5C,
+ 0x4E, 0x0E, 0xA0, 0x01, 0x42, 0x0E, 0x80, 0x01, 0x0A, 0x42, 0x0E, 0x5C,
0x44, 0x0E, 0x1C, 0x06, 0x50, 0x06, 0x51, 0x06, 0x52, 0x06, 0x53, 0x06,
0x54, 0x06, 0x55, 0x06, 0x56, 0x06, 0x57, 0x06, 0x58, 0x06, 0x59, 0x06,
0x5A, 0x06, 0x5B, 0x06, 0x5C, 0x06, 0x5D, 0x06, 0x5E, 0x06, 0x5F, 0x44,
@@ -47,38 +46,38 @@
// 0x00000008: sub sp, sp, #36
// 0x0000000a: .cfi_def_cfa_offset: 128
// 0x0000000a: str r0, [sp, #0]
-// 0x0000000c: str.w r1, [sp, #132]
-// 0x00000010: vstr.f32 s0, [sp, #136]
-// 0x00000014: str.w r2, [sp, #140]
-// 0x00000018: str.w r3, [sp, #144]
-// 0x0000001c: sub sp, sp, #32
-// 0x0000001e: .cfi_def_cfa_offset: 160
-// 0x0000001e: add sp, sp, #32
-// 0x00000020: .cfi_def_cfa_offset: 128
-// 0x00000020: .cfi_remember_state
-// 0x00000020: add sp, sp, #36
-// 0x00000022: .cfi_def_cfa_offset: 92
-// 0x00000022: vpop.f32 {s16-s31}
-// 0x00000026: .cfi_def_cfa_offset: 28
-// 0x00000026: .cfi_restore_extended: r80
-// 0x00000026: .cfi_restore_extended: r81
-// 0x00000026: .cfi_restore_extended: r82
-// 0x00000026: .cfi_restore_extended: r83
-// 0x00000026: .cfi_restore_extended: r84
-// 0x00000026: .cfi_restore_extended: r85
-// 0x00000026: .cfi_restore_extended: r86
-// 0x00000026: .cfi_restore_extended: r87
-// 0x00000026: .cfi_restore_extended: r88
-// 0x00000026: .cfi_restore_extended: r89
-// 0x00000026: .cfi_restore_extended: r90
-// 0x00000026: .cfi_restore_extended: r91
-// 0x00000026: .cfi_restore_extended: r92
-// 0x00000026: .cfi_restore_extended: r93
-// 0x00000026: .cfi_restore_extended: r94
-// 0x00000026: .cfi_restore_extended: r95
-// 0x00000026: pop {r5, r6, r7, r8, r10, r11, pc}
-// 0x0000002a: .cfi_restore_state
-// 0x0000002a: .cfi_def_cfa_offset: 128
+// 0x0000000c: str r1, [sp, #132]
+// 0x0000000e: vstr.f32 s0, [sp, #136]
+// 0x00000012: str r2, [sp, #140]
+// 0x00000014: str r3, [sp, #144]
+// 0x00000016: sub sp, sp, #32
+// 0x00000018: .cfi_def_cfa_offset: 160
+// 0x00000018: add sp, sp, #32
+// 0x0000001a: .cfi_def_cfa_offset: 128
+// 0x0000001a: .cfi_remember_state
+// 0x0000001a: add sp, sp, #36
+// 0x0000001c: .cfi_def_cfa_offset: 92
+// 0x0000001c: vpop.f32 {s16-s31}
+// 0x00000020: .cfi_def_cfa_offset: 28
+// 0x00000020: .cfi_restore_extended: r80
+// 0x00000020: .cfi_restore_extended: r81
+// 0x00000020: .cfi_restore_extended: r82
+// 0x00000020: .cfi_restore_extended: r83
+// 0x00000020: .cfi_restore_extended: r84
+// 0x00000020: .cfi_restore_extended: r85
+// 0x00000020: .cfi_restore_extended: r86
+// 0x00000020: .cfi_restore_extended: r87
+// 0x00000020: .cfi_restore_extended: r88
+// 0x00000020: .cfi_restore_extended: r89
+// 0x00000020: .cfi_restore_extended: r90
+// 0x00000020: .cfi_restore_extended: r91
+// 0x00000020: .cfi_restore_extended: r92
+// 0x00000020: .cfi_restore_extended: r93
+// 0x00000020: .cfi_restore_extended: r94
+// 0x00000020: .cfi_restore_extended: r95
+// 0x00000020: pop {r5, r6, r7, r8, r10, r11, pc}
+// 0x00000024: .cfi_restore_state
+// 0x00000024: .cfi_def_cfa_offset: 128
static constexpr uint8_t expected_asm_kArm64[] = {
0xFF, 0x03, 0x03, 0xD1, 0xF3, 0x53, 0x06, 0xA9, 0xF5, 0x5B, 0x07, 0xA9,
diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc
index 29411f0..0d16260 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.cc
+++ b/compiler/jni/quick/arm/calling_convention_arm.cc
@@ -22,6 +22,8 @@
namespace art {
namespace arm {
+static_assert(kArmPointerSize == PointerSize::k32, "Unexpected ARM pointer size");
+
// Used by hard float.
static const Register kHFCoreArgumentRegisters[] = {
R0, R1, R2, R3
@@ -255,7 +257,7 @@
ArmJniCallingConvention::ArmJniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
- : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+ : JniCallingConvention(is_static, is_synchronized, shorty, kArmPointerSize) {
// Compute padding to ensure longs and doubles are not split in AAPCS. Ignore the 'this' jobject
// or jclass for static methods and the JNIEnv. We start at the aligned register r2.
size_t padding = 0;
@@ -287,9 +289,10 @@
size_t ArmJniCallingConvention::FrameSize() {
// Method*, LR and callee save area size, local reference segment state
- size_t frame_data_size = kArmPointerSize + (2 + CalleeSaveRegisters().size()) * kFramePointerSize;
+ size_t frame_data_size = static_cast<size_t>(kArmPointerSize)
+ + (2 + CalleeSaveRegisters().size()) * kFramePointerSize;
// References plus 2 words for HandleScope header
- size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kArmPointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
@@ -343,7 +346,8 @@
FrameOffset ArmJniCallingConvention::CurrentParamStackOffset() {
CHECK_GE(itr_slots_, 4u);
- size_t offset = displacement_.Int32Value() - OutArgSize() + ((itr_slots_ - 4) * kFramePointerSize);
+ size_t offset =
+ displacement_.Int32Value() - OutArgSize() + ((itr_slots_ - 4) * kFramePointerSize);
CHECK_LT(offset, OutArgSize());
return FrameOffset(offset);
}
diff --git a/compiler/jni/quick/arm/calling_convention_arm.h b/compiler/jni/quick/arm/calling_convention_arm.h
index 157880b..7c717cc 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.h
+++ b/compiler/jni/quick/arm/calling_convention_arm.h
@@ -17,17 +17,21 @@
#ifndef ART_COMPILER_JNI_QUICK_ARM_CALLING_CONVENTION_ARM_H_
#define ART_COMPILER_JNI_QUICK_ARM_CALLING_CONVENTION_ARM_H_
+#include "base/enums.h"
#include "jni/quick/calling_convention.h"
namespace art {
namespace arm {
-constexpr size_t kFramePointerSize = 4;
+constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k32);
class ArmManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
public:
ArmManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
- : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {}
+ : ManagedRuntimeCallingConvention(is_static,
+ is_synchronized,
+ shorty,
+ PointerSize::k32) {}
~ArmManagedRuntimeCallingConvention() OVERRIDE {}
// Calling convention
ManagedRegister ReturnRegister() OVERRIDE;
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index ab56c1c..afa707d 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -22,6 +22,8 @@
namespace art {
namespace arm64 {
+static_assert(kArm64PointerSize == PointerSize::k64, "Unexpected ARM64 pointer size");
+
static const XRegister kXArgumentRegisters[] = {
X0, X1, X2, X3, X4, X5, X6, X7
};
@@ -211,7 +213,7 @@
// JNI calling convention
Arm64JniCallingConvention::Arm64JniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
- : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+ : JniCallingConvention(is_static, is_synchronized, shorty, kArm64PointerSize) {
}
uint32_t Arm64JniCallingConvention::CoreSpillMask() const {
@@ -231,7 +233,7 @@
size_t frame_data_size = kFramePointerSize +
CalleeSaveRegisters().size() * kFramePointerSize + sizeof(uint32_t);
// References plus 2 words for HandleScope header
- size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kArm64PointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.h b/compiler/jni/quick/arm64/calling_convention_arm64.h
index 337e881..90b12e5 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.h
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.h
@@ -17,17 +17,21 @@
#ifndef ART_COMPILER_JNI_QUICK_ARM64_CALLING_CONVENTION_ARM64_H_
#define ART_COMPILER_JNI_QUICK_ARM64_CALLING_CONVENTION_ARM64_H_
+#include "base/enums.h"
#include "jni/quick/calling_convention.h"
namespace art {
namespace arm64 {
-constexpr size_t kFramePointerSize = 8;
+constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k64);
class Arm64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
public:
Arm64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
- : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {}
+ : ManagedRuntimeCallingConvention(is_static,
+ is_synchronized,
+ shorty,
+ PointerSize::k64) {}
~Arm64ManagedRuntimeCallingConvention() OVERRIDE {}
// Calling convention
ManagedRegister ReturnRegister() OVERRIDE;
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index e21f554..c7ed9c9 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -299,7 +299,7 @@
size_t JniCallingConvention::CurrentParamSize() {
if (itr_args_ <= kObjectOrClass) {
- return frame_pointer_size_; // JNIEnv or jobject/jclass
+ return static_cast<size_t>(frame_pointer_size_); // JNIEnv or jobject/jclass
} else {
int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
return ParamSize(arg_pos);
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index e8f738d..995fa51 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_JNI_QUICK_CALLING_CONVENTION_H_
#include "base/arena_object.h"
+#include "base/enums.h"
#include "handle_scope.h"
#include "primitive.h"
#include "thread.h"
@@ -70,8 +71,10 @@
virtual ~CallingConvention() {}
protected:
- CallingConvention(bool is_static, bool is_synchronized, const char* shorty,
- size_t frame_pointer_size)
+ CallingConvention(bool is_static,
+ bool is_synchronized,
+ const char* shorty,
+ PointerSize frame_pointer_size)
: itr_slots_(0), itr_refs_(0), itr_args_(0), itr_longs_and_doubles_(0),
itr_float_and_doubles_(0), displacement_(0),
frame_pointer_size_(frame_pointer_size),
@@ -198,7 +201,7 @@
// Space for frames below this on the stack.
FrameOffset displacement_;
// The size of a pointer.
- const size_t frame_pointer_size_;
+ const PointerSize frame_pointer_size_;
// The size of a reference entry within the handle scope.
const size_t handle_scope_pointer_size_;
@@ -255,7 +258,7 @@
ManagedRuntimeCallingConvention(bool is_static,
bool is_synchronized,
const char* shorty,
- size_t frame_pointer_size)
+ PointerSize frame_pointer_size)
: CallingConvention(is_static, is_synchronized, shorty, frame_pointer_size) {}
};
@@ -328,7 +331,7 @@
// Position of handle scope and interior fields
FrameOffset HandleScopeOffset() const {
- return FrameOffset(this->displacement_.Int32Value() + frame_pointer_size_);
+ return FrameOffset(this->displacement_.Int32Value() + static_cast<size_t>(frame_pointer_size_));
// above Method reference
}
@@ -356,8 +359,10 @@
kObjectOrClass = 1
};
- JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty,
- size_t frame_pointer_size)
+ JniCallingConvention(bool is_static,
+ bool is_synchronized,
+ const char* shorty,
+ PointerSize frame_pointer_size)
: CallingConvention(is_static, is_synchronized, shorty, frame_pointer_size) {}
// Number of stack slots for outgoing arguments, above which the handle scope is
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 4311a34..277b794 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -23,6 +23,7 @@
#include "art_method.h"
#include "base/arena_allocator.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "base/macros.h"
#include "calling_convention.h"
@@ -125,16 +126,16 @@
if (is_64_bit_target) {
__ CopyRawPtrFromThread64(main_jni_conv->HandleScopeLinkOffset(),
- Thread::TopHandleScopeOffset<8>(),
+ Thread::TopHandleScopeOffset<PointerSize::k64>(),
mr_conv->InterproceduralScratchRegister());
- __ StoreStackOffsetToThread64(Thread::TopHandleScopeOffset<8>(),
+ __ StoreStackOffsetToThread64(Thread::TopHandleScopeOffset<PointerSize::k64>(),
main_jni_conv->HandleScopeOffset(),
mr_conv->InterproceduralScratchRegister());
} else {
__ CopyRawPtrFromThread32(main_jni_conv->HandleScopeLinkOffset(),
- Thread::TopHandleScopeOffset<4>(),
+ Thread::TopHandleScopeOffset<PointerSize::k32>(),
mr_conv->InterproceduralScratchRegister());
- __ StoreStackOffsetToThread32(Thread::TopHandleScopeOffset<4>(),
+ __ StoreStackOffsetToThread32(Thread::TopHandleScopeOffset<PointerSize::k32>(),
main_jni_conv->HandleScopeOffset(),
mr_conv->InterproceduralScratchRegister());
}
@@ -188,9 +189,9 @@
// 4. Write out the end of the quick frames.
if (is_64_bit_target) {
- __ StoreStackPointerToThread64(Thread::TopOfManagedStackOffset<8>());
+ __ StoreStackPointerToThread64(Thread::TopOfManagedStackOffset<PointerSize::k64>());
} else {
- __ StoreStackPointerToThread32(Thread::TopOfManagedStackOffset<4>());
+ __ StoreStackPointerToThread32(Thread::TopOfManagedStackOffset<PointerSize::k32>());
}
// 5. Move frame down to allow space for out going args.
@@ -201,8 +202,10 @@
// Call the read barrier for the declaring class loaded from the method for a static call.
// Note that we always have outgoing param space available for at least two params.
if (kUseReadBarrier && is_static) {
- ThreadOffset<4> read_barrier32 = QUICK_ENTRYPOINT_OFFSET(4, pReadBarrierJni);
- ThreadOffset<8> read_barrier64 = QUICK_ENTRYPOINT_OFFSET(8, pReadBarrierJni);
+ ThreadOffset32 read_barrier32 =
+ QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pReadBarrierJni);
+ ThreadOffset64 read_barrier64 =
+ QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pReadBarrierJni);
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
main_jni_conv->Next(); // Skip JNIEnv.
FrameOffset class_handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
@@ -245,10 +248,14 @@
// can occur. The result is the saved JNI local state that is restored by the exit call. We
// abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer
// arguments.
- ThreadOffset<4> jni_start32 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(4, pJniMethodStartSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(4, pJniMethodStart);
- ThreadOffset<8> jni_start64 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(8, pJniMethodStartSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(8, pJniMethodStart);
+ ThreadOffset32 jni_start32 =
+ is_synchronized
+ ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodStartSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodStart);
+ ThreadOffset64 jni_start64 =
+ is_synchronized
+ ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodStartSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodStart);
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
FrameOffset locked_object_handle_scope_offset(0);
if (is_synchronized) {
@@ -346,17 +353,17 @@
ManagedRegister jni_env = main_jni_conv->CurrentParamRegister();
DCHECK(!jni_env.Equals(main_jni_conv->InterproceduralScratchRegister()));
if (is_64_bit_target) {
- __ LoadRawPtrFromThread64(jni_env, Thread::JniEnvOffset<8>());
+ __ LoadRawPtrFromThread64(jni_env, Thread::JniEnvOffset<PointerSize::k64>());
} else {
- __ LoadRawPtrFromThread32(jni_env, Thread::JniEnvOffset<4>());
+ __ LoadRawPtrFromThread32(jni_env, Thread::JniEnvOffset<PointerSize::k32>());
}
} else {
FrameOffset jni_env = main_jni_conv->CurrentParamStackOffset();
if (is_64_bit_target) {
- __ CopyRawPtrFromThread64(jni_env, Thread::JniEnvOffset<8>(),
+ __ CopyRawPtrFromThread64(jni_env, Thread::JniEnvOffset<PointerSize::k64>(),
main_jni_conv->InterproceduralScratchRegister());
} else {
- __ CopyRawPtrFromThread32(jni_env, Thread::JniEnvOffset<4>(),
+ __ CopyRawPtrFromThread32(jni_env, Thread::JniEnvOffset<PointerSize::k32>(),
main_jni_conv->InterproceduralScratchRegister());
}
}
@@ -387,7 +394,8 @@
main_jni_conv->GetReturnType() == Primitive::kPrimDouble &&
return_save_location.Uint32Value() % 8 != 0) {
// Ensure doubles are 8-byte aligned for MIPS
- return_save_location = FrameOffset(return_save_location.Uint32Value() + kMipsPointerSize);
+ return_save_location = FrameOffset(return_save_location.Uint32Value()
+ + static_cast<size_t>(kMipsPointerSize));
}
CHECK_LT(return_save_location.Uint32Value(), frame_size + main_out_arg_size);
__ Store(return_save_location, main_jni_conv->ReturnRegister(), main_jni_conv->SizeOfReturnValue());
@@ -406,21 +414,27 @@
}
// thread.
end_jni_conv->ResetIterator(FrameOffset(end_out_arg_size));
- ThreadOffset<4> jni_end32(-1);
- ThreadOffset<8> jni_end64(-1);
+ ThreadOffset32 jni_end32(-1);
+ ThreadOffset64 jni_end64(-1);
if (reference_return) {
// Pass result.
- jni_end32 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEndWithReferenceSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEndWithReference);
- jni_end64 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEndWithReferenceSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEndWithReference);
+ jni_end32 = is_synchronized
+ ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k32,
+ pJniMethodEndWithReferenceSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodEndWithReference);
+ jni_end64 = is_synchronized
+ ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k64,
+ pJniMethodEndWithReferenceSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodEndWithReference);
SetNativeParameter(jni_asm.get(), end_jni_conv.get(), end_jni_conv->ReturnRegister());
end_jni_conv->Next();
} else {
- jni_end32 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEndSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEnd);
- jni_end64 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEndSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEnd);
+ jni_end32 = is_synchronized
+ ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodEndSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodEnd);
+ jni_end64 = is_synchronized
+ ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodEndSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodEnd);
}
// Pass saved local reference state.
if (end_jni_conv->IsCurrentParamOnStack()) {
@@ -458,9 +472,11 @@
__ GetCurrentThread(end_jni_conv->CurrentParamStackOffset(),
end_jni_conv->InterproceduralScratchRegister());
if (is_64_bit_target) {
- __ CallFromThread64(ThreadOffset<8>(jni_end64), end_jni_conv->InterproceduralScratchRegister());
+ __ CallFromThread64(ThreadOffset64(jni_end64),
+ end_jni_conv->InterproceduralScratchRegister());
} else {
- __ CallFromThread32(ThreadOffset<4>(jni_end32), end_jni_conv->InterproceduralScratchRegister());
+ __ CallFromThread32(ThreadOffset32(jni_end32),
+ end_jni_conv->InterproceduralScratchRegister());
}
}
diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc
index 3d4d140..f5ab5f7 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.cc
+++ b/compiler/jni/quick/mips/calling_convention_mips.cc
@@ -172,7 +172,7 @@
MipsJniCallingConvention::MipsJniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
- : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+ : JniCallingConvention(is_static, is_synchronized, shorty, kMipsPointerSize) {
// Compute padding to ensure longs and doubles are not split in AAPCS. Ignore the 'this' jobject
// or jclass for static methods and the JNIEnv. We start at the aligned register A2.
size_t padding = 0;
@@ -203,10 +203,10 @@
size_t MipsJniCallingConvention::FrameSize() {
// ArtMethod*, RA and callee save area size, local reference segment state
- size_t frame_data_size = kMipsPointerSize +
+ size_t frame_data_size = static_cast<size_t>(kMipsPointerSize) +
(2 + CalleeSaveRegisters().size()) * kFramePointerSize;
// References plus 2 words for HandleScope header
- size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kMipsPointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
diff --git a/compiler/jni/quick/mips/calling_convention_mips.h b/compiler/jni/quick/mips/calling_convention_mips.h
index 5c128b0..e95a738 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.h
+++ b/compiler/jni/quick/mips/calling_convention_mips.h
@@ -17,17 +17,23 @@
#ifndef ART_COMPILER_JNI_QUICK_MIPS_CALLING_CONVENTION_MIPS_H_
#define ART_COMPILER_JNI_QUICK_MIPS_CALLING_CONVENTION_MIPS_H_
+#include "base/enums.h"
#include "jni/quick/calling_convention.h"
namespace art {
namespace mips {
constexpr size_t kFramePointerSize = 4;
+static_assert(kFramePointerSize == static_cast<size_t>(PointerSize::k32),
+ "Invalid frame pointer size");
class MipsManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
public:
MipsManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
- : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {}
+ : ManagedRuntimeCallingConvention(is_static,
+ is_synchronized,
+ shorty,
+ PointerSize::k32) {}
~MipsManagedRuntimeCallingConvention() OVERRIDE {}
// Calling convention
ManagedRegister ReturnRegister() OVERRIDE;
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.cc b/compiler/jni/quick/mips64/calling_convention_mips64.cc
index f2e1da8..8341e8e 100644
--- a/compiler/jni/quick/mips64/calling_convention_mips64.cc
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.cc
@@ -152,7 +152,7 @@
Mips64JniCallingConvention::Mips64JniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
- : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+ : JniCallingConvention(is_static, is_synchronized, shorty, kMips64PointerSize) {
}
uint32_t Mips64JniCallingConvention::CoreSpillMask() const {
@@ -172,7 +172,7 @@
size_t frame_data_size = kFramePointerSize +
(CalleeSaveRegisters().size() + 1) * kFramePointerSize + sizeof(uint32_t);
// References plus 2 words for HandleScope header
- size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kMips64PointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.h b/compiler/jni/quick/mips64/calling_convention_mips64.h
index 99ea3cd..a5fd111 100644
--- a/compiler/jni/quick/mips64/calling_convention_mips64.h
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.h
@@ -17,17 +17,23 @@
#ifndef ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_
#define ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_
+#include "base/enums.h"
#include "jni/quick/calling_convention.h"
namespace art {
namespace mips64 {
constexpr size_t kFramePointerSize = 8;
+static_assert(kFramePointerSize == static_cast<size_t>(PointerSize::k64),
+ "Invalid frame pointer size");
class Mips64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
public:
Mips64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
- : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {}
+ : ManagedRuntimeCallingConvention(is_static,
+ is_synchronized,
+ shorty,
+ PointerSize::k64) {}
~Mips64ManagedRuntimeCallingConvention() OVERRIDE {}
// Calling convention
ManagedRegister ReturnRegister() OVERRIDE;
diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc
index 22c7cd0..1d06f26 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.cc
+++ b/compiler/jni/quick/x86/calling_convention_x86.cc
@@ -23,6 +23,8 @@
namespace art {
namespace x86 {
+static_assert(kX86PointerSize == PointerSize::k32, "Unexpected x86 pointer size");
+
static constexpr ManagedRegister kCalleeSaveRegisters[] = {
// Core registers.
X86ManagedRegister::FromCpuRegister(EBP),
@@ -190,7 +192,7 @@
X86JniCallingConvention::X86JniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
- : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+ : JniCallingConvention(is_static, is_synchronized, shorty, kX86PointerSize) {
}
uint32_t X86JniCallingConvention::CoreSpillMask() const {
@@ -203,10 +205,10 @@
size_t X86JniCallingConvention::FrameSize() {
// Method*, return address and callee save area size, local reference segment state
- size_t frame_data_size = kX86PointerSize +
+ size_t frame_data_size = static_cast<size_t>(kX86PointerSize) +
(2 + CalleeSaveRegisters().size()) * kFramePointerSize;
// References plus 2 words for HandleScope header
- size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kX86PointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
diff --git a/compiler/jni/quick/x86/calling_convention_x86.h b/compiler/jni/quick/x86/calling_convention_x86.h
index 9d678b7..ff92fc9 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.h
+++ b/compiler/jni/quick/x86/calling_convention_x86.h
@@ -17,17 +17,21 @@
#ifndef ART_COMPILER_JNI_QUICK_X86_CALLING_CONVENTION_X86_H_
#define ART_COMPILER_JNI_QUICK_X86_CALLING_CONVENTION_X86_H_
+#include "base/enums.h"
#include "jni/quick/calling_convention.h"
namespace art {
namespace x86 {
-constexpr size_t kFramePointerSize = 4;
+constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k32);
class X86ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
public:
X86ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
- : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize),
+ : ManagedRuntimeCallingConvention(is_static,
+ is_synchronized,
+ shorty,
+ PointerSize::k32),
gpr_arg_count_(0) {}
~X86ManagedRuntimeCallingConvention() OVERRIDE {}
// Calling convention
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
index cc4d232..cbf10bd 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -24,6 +24,10 @@
namespace art {
namespace x86_64 {
+constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k64);
+
+static_assert(kX86_64PointerSize == PointerSize::k64, "Unexpected x86_64 pointer size");
+
static constexpr ManagedRegister kCalleeSaveRegisters[] = {
// Core registers.
X86_64ManagedRegister::FromCpuRegister(RBX),
@@ -136,7 +140,7 @@
FrameOffset X86_64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
return FrameOffset(displacement_.Int32Value() + // displacement
- kX86_64PointerSize + // Method ref
+ static_cast<size_t>(kX86_64PointerSize) + // Method ref
itr_slots_ * sizeof(uint32_t)); // offset into in args
}
@@ -163,7 +167,7 @@
X86_64JniCallingConvention::X86_64JniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
- : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+ : JniCallingConvention(is_static, is_synchronized, shorty, kX86_64PointerSize) {
}
uint32_t X86_64JniCallingConvention::CoreSpillMask() const {
@@ -176,10 +180,10 @@
size_t X86_64JniCallingConvention::FrameSize() {
// Method*, return address and callee save area size, local reference segment state
- size_t frame_data_size = kX86_64PointerSize +
+ size_t frame_data_size = static_cast<size_t>(kX86_64PointerSize) +
(2 + CalleeSaveRegisters().size()) * kFramePointerSize;
// References plus link_ (pointer) and number_of_references_ (uint32_t) for HandleScope header
- size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kX86_64PointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.h b/compiler/jni/quick/x86_64/calling_convention_x86_64.h
index e2d3d48..b98f505 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.h
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.h
@@ -17,17 +17,19 @@
#ifndef ART_COMPILER_JNI_QUICK_X86_64_CALLING_CONVENTION_X86_64_H_
#define ART_COMPILER_JNI_QUICK_X86_64_CALLING_CONVENTION_X86_64_H_
+#include "base/enums.h"
#include "jni/quick/calling_convention.h"
namespace art {
namespace x86_64 {
-constexpr size_t kFramePointerSize = 8;
-
class X86_64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
public:
X86_64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
- : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {}
+ : ManagedRuntimeCallingConvention(is_static,
+ is_synchronized,
+ shorty,
+ PointerSize::k64) {}
~X86_64ManagedRuntimeCallingConvention() OVERRIDE {}
// Calling convention
ManagedRegister ReturnRegister() OVERRIDE;
diff --git a/compiler/linker/arm/relative_patcher_arm_base.cc b/compiler/linker/arm/relative_patcher_arm_base.cc
index d4dd978..2471f79 100644
--- a/compiler/linker/arm/relative_patcher_arm_base.cc
+++ b/compiler/linker/arm/relative_patcher_arm_base.cc
@@ -31,10 +31,6 @@
}
uint32_t ArmBaseRelativePatcher::ReserveSpaceEnd(uint32_t offset) {
- // NOTE: The final thunk can be reserved from InitCodeMethodVisitor::EndClass() while it
- // may be written early by WriteCodeMethodVisitor::VisitMethod() for a deduplicated chunk
- // of code. To avoid any alignment discrepancies for the final chunk, we always align the
- // offset after reserving of writing any chunk.
uint32_t aligned_offset = CompiledMethod::AlignCode(offset, instruction_set_);
bool needs_thunk = ReserveSpaceProcessPatches(aligned_offset,
MethodReference(nullptr, 0u),
@@ -46,7 +42,7 @@
unprocessed_patches_.clear();
thunk_locations_.push_back(aligned_offset);
- offset = CompiledMethod::AlignCode(aligned_offset + thunk_code_.size(), instruction_set_);
+ offset = aligned_offset + thunk_code_.size();
}
return offset;
}
@@ -65,13 +61,7 @@
if (UNLIKELY(!WriteRelCallThunk(out, ArrayRef<const uint8_t>(thunk_code_)))) {
return 0u;
}
- uint32_t thunk_end_offset = aligned_offset + thunk_code_.size();
- // Align after writing chunk, see the ReserveSpace() above.
- offset = CompiledMethod::AlignCode(thunk_end_offset, instruction_set_);
- aligned_code_delta = offset - thunk_end_offset;
- if (aligned_code_delta != 0u && !WriteCodeAlignment(out, aligned_code_delta)) {
- return 0u;
- }
+ offset = aligned_offset + thunk_code_.size();
}
return offset;
}
@@ -92,7 +82,7 @@
MethodReference method_ref,
uint32_t max_extra_space) {
uint32_t quick_code_size = compiled_method->GetQuickCode().size();
- uint32_t quick_code_offset = compiled_method->AlignCode(offset) + sizeof(OatQuickMethodHeader);
+ uint32_t quick_code_offset = compiled_method->AlignCode(offset + sizeof(OatQuickMethodHeader));
uint32_t next_aligned_offset = compiled_method->AlignCode(quick_code_offset + quick_code_size);
// Adjust for extra space required by the subclass.
next_aligned_offset = compiled_method->AlignCode(next_aligned_offset + max_extra_space);
@@ -106,9 +96,9 @@
if (needs_thunk) {
// A single thunk will cover all pending patches.
unprocessed_patches_.clear();
- uint32_t thunk_location = compiled_method->AlignCode(offset);
+ uint32_t thunk_location = CompiledMethod::AlignCode(offset, instruction_set_);
thunk_locations_.push_back(thunk_location);
- offset = CompiledMethod::AlignCode(thunk_location + thunk_code_.size(), instruction_set_);
+ offset = thunk_location + thunk_code_.size();
}
}
for (const LinkerPatch& patch : compiled_method->GetPatches()) {
diff --git a/compiler/linker/arm/relative_patcher_thumb2_test.cc b/compiler/linker/arm/relative_patcher_thumb2_test.cc
index a8078e3..eace3d4 100644
--- a/compiler/linker/arm/relative_patcher_thumb2_test.cc
+++ b/compiler/linker/arm/relative_patcher_thumb2_test.cc
@@ -48,18 +48,18 @@
const ArrayRef<const LinkerPatch>& method3_patches,
uint32_t distance_without_thunks) {
CHECK_EQ(distance_without_thunks % kArmAlignment, 0u);
- const uint32_t method1_offset =
- CompiledCode::AlignCode(kTrampolineSize, kThumb2) + sizeof(OatQuickMethodHeader);
+ uint32_t method1_offset =
+ kTrampolineSize + CodeAlignmentSize(kTrampolineSize) + sizeof(OatQuickMethodHeader);
AddCompiledMethod(MethodRef(1u), method1_code, method1_patches);
// We want to put the method3 at a very precise offset.
const uint32_t method3_offset = method1_offset + distance_without_thunks;
- CHECK_ALIGNED(method3_offset - sizeof(OatQuickMethodHeader), kArmAlignment);
+ CHECK_ALIGNED(method3_offset, kArmAlignment);
// Calculate size of method2 so that we put method3 at the correct place.
+ const uint32_t method1_end = method1_offset + method1_code.size();
const uint32_t method2_offset =
- CompiledCode::AlignCode(method1_offset + method1_code.size(), kThumb2) +
- sizeof(OatQuickMethodHeader);
+ method1_end + CodeAlignmentSize(method1_end) + sizeof(OatQuickMethodHeader);
const uint32_t method2_size = (method3_offset - sizeof(OatQuickMethodHeader) - method2_offset);
std::vector<uint8_t> method2_raw_code(method2_size);
ArrayRef<const uint8_t> method2_code(method2_raw_code);
@@ -78,8 +78,11 @@
if (result3.second == method3_offset + 1 /* thumb mode */) {
return false; // No thunk.
} else {
- uint32_t aligned_thunk_size = CompiledCode::AlignCode(ThunkSize(), kThumb2);
- CHECK_EQ(result3.second, method3_offset + aligned_thunk_size + 1 /* thumb mode */);
+ uint32_t thunk_end =
+ CompiledCode::AlignCode(method3_offset - sizeof(OatQuickMethodHeader), kThumb2) +
+ ThunkSize();
+ uint32_t header_offset = thunk_end + CodeAlignmentSize(thunk_end);
+ CHECK_EQ(result3.second, header_offset + sizeof(OatQuickMethodHeader) + 1 /* thumb mode */);
return true; // Thunk present.
}
}
@@ -352,9 +355,12 @@
uint32_t method1_offset = GetMethodOffset(1u);
uint32_t method3_offset = GetMethodOffset(3u);
+ ASSERT_TRUE(IsAligned<kArmAlignment>(method3_offset));
uint32_t method3_header_offset = method3_offset - sizeof(OatQuickMethodHeader);
- ASSERT_TRUE(IsAligned<kArmAlignment>(method3_header_offset));
- uint32_t thunk_offset = method3_header_offset - CompiledCode::AlignCode(ThunkSize(), kThumb2);
+ uint32_t thunk_offset =
+ RoundDown(method3_header_offset - ThunkSize(), GetInstructionSetAlignment(kThumb2));
+ DCHECK_EQ(thunk_offset + ThunkSize() + CodeAlignmentSize(thunk_offset + ThunkSize()),
+ method3_header_offset);
ASSERT_TRUE(IsAligned<kArmAlignment>(thunk_offset));
uint32_t diff = thunk_offset - (method1_offset + bl_offset_in_method1 + 4u /* PC adjustment */);
ASSERT_EQ(diff & 1u, 0u);
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index fdd14be..4c8788e 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -83,7 +83,7 @@
// Now that we have the actual offset where the code will be placed, locate the ADRP insns
// that actually require the thunk.
- uint32_t quick_code_offset = compiled_method->AlignCode(offset) + sizeof(OatQuickMethodHeader);
+ uint32_t quick_code_offset = compiled_method->AlignCode(offset + sizeof(OatQuickMethodHeader));
ArrayRef<const uint8_t> code = compiled_method->GetQuickCode();
uint32_t thunk_offset = compiled_method->AlignCode(quick_code_offset + code.size());
DCHECK(compiled_method != nullptr);
diff --git a/compiler/linker/arm64/relative_patcher_arm64_test.cc b/compiler/linker/arm64/relative_patcher_arm64_test.cc
index 09729fd..573de73 100644
--- a/compiler/linker/arm64/relative_patcher_arm64_test.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64_test.cc
@@ -67,36 +67,39 @@
const ArrayRef<const LinkerPatch>& last_method_patches,
uint32_t distance_without_thunks) {
CHECK_EQ(distance_without_thunks % kArm64Alignment, 0u);
- const uint32_t method1_offset =
- CompiledCode::AlignCode(kTrampolineSize, kArm64) + sizeof(OatQuickMethodHeader);
+ uint32_t method1_offset =
+ kTrampolineSize + CodeAlignmentSize(kTrampolineSize) + sizeof(OatQuickMethodHeader);
AddCompiledMethod(MethodRef(1u), method1_code, method1_patches);
- const uint32_t gap_start =
- CompiledCode::AlignCode(method1_offset + method1_code.size(), kArm64);
+ const uint32_t gap_start = method1_offset + method1_code.size();
// We want to put the method3 at a very precise offset.
const uint32_t last_method_offset = method1_offset + distance_without_thunks;
+ CHECK_ALIGNED(last_method_offset, kArm64Alignment);
const uint32_t gap_end = last_method_offset - sizeof(OatQuickMethodHeader);
- CHECK_ALIGNED(gap_end, kArm64Alignment);
- // Fill the gap with intermediate methods in chunks of 2MiB and the last in [2MiB, 4MiB).
+ // Fill the gap with intermediate methods in chunks of 2MiB and the first in [2MiB, 4MiB).
// (This allows deduplicating the small chunks to avoid using 256MiB of memory for +-128MiB
- // offsets by this test.)
+ // offsets by this test. Making the first chunk bigger makes it easy to give all intermediate
+ // methods the same alignment of the end, so the thunk insertion adds a predictable size as
+ // long as it's after the first chunk.)
uint32_t method_idx = 2u;
constexpr uint32_t kSmallChunkSize = 2 * MB;
std::vector<uint8_t> gap_code;
- size_t gap_size = gap_end - gap_start;
- for (; gap_size >= 2u * kSmallChunkSize; gap_size -= kSmallChunkSize) {
- uint32_t chunk_code_size = kSmallChunkSize - sizeof(OatQuickMethodHeader);
+ uint32_t gap_size = gap_end - gap_start;
+ uint32_t num_small_chunks = std::max(gap_size / kSmallChunkSize, 1u) - 1u;
+ uint32_t chunk_start = gap_start;
+ uint32_t chunk_size = gap_size - num_small_chunks * kSmallChunkSize;
+ for (uint32_t i = 0; i <= num_small_chunks; ++i) { // num_small_chunks+1 iterations.
+ uint32_t chunk_code_size =
+ chunk_size - CodeAlignmentSize(chunk_start) - sizeof(OatQuickMethodHeader);
gap_code.resize(chunk_code_size, 0u);
AddCompiledMethod(MethodRef(method_idx), ArrayRef<const uint8_t>(gap_code),
ArrayRef<const LinkerPatch>());
method_idx += 1u;
+ chunk_start += chunk_size;
+ chunk_size = kSmallChunkSize; // For all but the first chunk.
+ DCHECK_EQ(CodeAlignmentSize(gap_end), CodeAlignmentSize(chunk_start));
}
- uint32_t chunk_code_size = gap_size - sizeof(OatQuickMethodHeader);
- gap_code.resize(chunk_code_size, 0u);
- AddCompiledMethod(MethodRef(method_idx), ArrayRef<const uint8_t>(gap_code),
- ArrayRef<const LinkerPatch>());
- method_idx += 1u;
// Add the last method and link
AddCompiledMethod(MethodRef(method_idx), last_method_code, last_method_patches);
@@ -109,8 +112,9 @@
// There may be a thunk before method2.
if (last_result.second != last_method_offset) {
// Thunk present. Check that there's only one.
- uint32_t aligned_thunk_size = CompiledCode::AlignCode(ThunkSize(), kArm64);
- CHECK_EQ(last_result.second, last_method_offset + aligned_thunk_size);
+ uint32_t thunk_end = CompiledCode::AlignCode(gap_end, kArm64) + ThunkSize();
+ uint32_t header_offset = thunk_end + CodeAlignmentSize(thunk_end);
+ CHECK_EQ(last_result.second, header_offset + sizeof(OatQuickMethodHeader));
}
return method_idx;
}
@@ -341,7 +345,7 @@
uint32_t dex_cache_arrays_begin,
uint32_t element_offset) {
uint32_t method1_offset =
- CompiledCode::AlignCode(kTrampolineSize, kArm64) + sizeof(OatQuickMethodHeader);
+ kTrampolineSize + CodeAlignmentSize(kTrampolineSize) + sizeof(OatQuickMethodHeader);
ASSERT_LT(method1_offset, adrp_offset);
CHECK_ALIGNED(adrp_offset, 4u);
uint32_t num_nops = (adrp_offset - method1_offset) / 4u;
@@ -391,7 +395,7 @@
bool has_thunk,
uint32_t string_offset) {
uint32_t method1_offset =
- CompiledCode::AlignCode(kTrampolineSize, kArm64) + sizeof(OatQuickMethodHeader);
+ kTrampolineSize + CodeAlignmentSize(kTrampolineSize) + sizeof(OatQuickMethodHeader);
ASSERT_LT(method1_offset, adrp_offset);
CHECK_ALIGNED(adrp_offset, 4u);
uint32_t num_nops = (adrp_offset - method1_offset) / 4u;
@@ -614,10 +618,12 @@
uint32_t method1_offset = GetMethodOffset(1u);
uint32_t last_method_offset = GetMethodOffset(last_method_idx);
+ ASSERT_TRUE(IsAligned<kArm64Alignment>(last_method_offset));
uint32_t last_method_header_offset = last_method_offset - sizeof(OatQuickMethodHeader);
- ASSERT_TRUE(IsAligned<kArm64Alignment>(last_method_header_offset));
- uint32_t thunk_offset = last_method_header_offset - CompiledCode::AlignCode(ThunkSize(), kArm64);
- ASSERT_TRUE(IsAligned<kArm64Alignment>(thunk_offset));
+ uint32_t thunk_offset =
+ RoundDown(last_method_header_offset - ThunkSize(), GetInstructionSetAlignment(kArm64));
+ DCHECK_EQ(thunk_offset + ThunkSize() + CodeAlignmentSize(thunk_offset + ThunkSize()),
+ last_method_header_offset);
uint32_t diff = thunk_offset - (method1_offset + bl_offset_in_method1);
CHECK_ALIGNED(diff, 4u);
ASSERT_LT(diff, 128 * MB);
diff --git a/compiler/linker/mips/relative_patcher_mips.cc b/compiler/linker/mips/relative_patcher_mips.cc
index 7c0423b..c09950c 100644
--- a/compiler/linker/mips/relative_patcher_mips.cc
+++ b/compiler/linker/mips/relative_patcher_mips.cc
@@ -49,6 +49,7 @@
uint32_t target_offset) {
uint32_t anchor_literal_offset = patch.PcInsnOffset();
uint32_t literal_offset = patch.LiteralOffset();
+ bool dex_cache_array = (patch.GetType() == LinkerPatch::Type::kDexCacheArray);
// Basic sanity checks.
if (is_r6) {
@@ -68,12 +69,16 @@
DCHECK_GE(code->size(), 16u);
DCHECK_LE(literal_offset, code->size() - 12u);
DCHECK_GE(literal_offset, 4u);
- DCHECK_EQ(literal_offset + 4u, anchor_literal_offset);
- // NAL
- DCHECK_EQ((*code)[literal_offset - 4], 0x00);
- DCHECK_EQ((*code)[literal_offset - 3], 0x00);
- DCHECK_EQ((*code)[literal_offset - 2], 0x10);
- DCHECK_EQ((*code)[literal_offset - 1], 0x04);
+ // The NAL instruction may not precede immediately as the PC+0 value may
+ // come from HMipsComputeBaseMethodAddress.
+ if (dex_cache_array) {
+ DCHECK_EQ(literal_offset + 4u, anchor_literal_offset);
+ // NAL
+ DCHECK_EQ((*code)[literal_offset - 4], 0x00);
+ DCHECK_EQ((*code)[literal_offset - 3], 0x00);
+ DCHECK_EQ((*code)[literal_offset - 2], 0x10);
+ DCHECK_EQ((*code)[literal_offset - 1], 0x04);
+ }
// LUI reg, offset_high
DCHECK_EQ((*code)[literal_offset + 0], 0x34);
DCHECK_EQ((*code)[literal_offset + 1], 0x12);
@@ -83,16 +88,22 @@
DCHECK_EQ((*code)[literal_offset + 4], 0x78);
DCHECK_EQ((*code)[literal_offset + 5], 0x56);
DCHECK_EQ(((*code)[literal_offset + 7] & 0xFC), 0x34);
- // ADDU reg, reg, RA
+ // ADDU reg, reg, reg2
DCHECK_EQ((*code)[literal_offset + 8], 0x21);
DCHECK_EQ(((*code)[literal_offset + 9] & 0x07), 0x00);
- DCHECK_EQ(((*code)[literal_offset + 10] & 0x1F), 0x1F);
+ if (dex_cache_array) {
+ // reg2 is either RA or from HMipsComputeBaseMethodAddress.
+ DCHECK_EQ(((*code)[literal_offset + 10] & 0x1F), 0x1F);
+ }
DCHECK_EQ(((*code)[literal_offset + 11] & 0xFC), 0x00);
}
// Apply patch.
uint32_t anchor_offset = patch_offset - literal_offset + anchor_literal_offset;
- uint32_t diff = target_offset - anchor_offset + kDexCacheArrayLwOffset;
+ uint32_t diff = target_offset - anchor_offset;
+ if (dex_cache_array) {
+ diff += kDexCacheArrayLwOffset;
+ }
if (is_r6) {
diff += (diff & 0x8000) << 1; // Account for sign extension in ADDIU.
}
diff --git a/compiler/linker/mips/relative_patcher_mips32r6_test.cc b/compiler/linker/mips/relative_patcher_mips32r6_test.cc
index 0f1dcbc..a16aaca 100644
--- a/compiler/linker/mips/relative_patcher_mips32r6_test.cc
+++ b/compiler/linker/mips/relative_patcher_mips32r6_test.cc
@@ -29,40 +29,78 @@
Mips32r6RelativePatcherTest() : RelativePatcherTest(kMips, "mips32r6") {}
protected:
+ static const uint8_t UnpatchedPcRelativeRawCode[];
+ static const uint32_t LiteralOffset;
+ static const uint32_t AnchorOffset;
+ static const ArrayRef<const uint8_t> UnpatchedPcRelativeCode;
+
uint32_t GetMethodOffset(uint32_t method_idx) {
auto result = method_offset_map_.FindMethodOffset(MethodRef(method_idx));
CHECK(result.first);
return result.second;
}
+
+ void CheckPcRelativePatch(const ArrayRef<const LinkerPatch>& patches, uint32_t target_offset);
+ void TestDexCacheReference(uint32_t dex_cache_arrays_begin, uint32_t element_offset);
+ void TestStringReference(uint32_t string_offset);
};
-TEST_F(Mips32r6RelativePatcherTest, DexCacheReference) {
- dex_cache_arrays_begin_ = 0x12345678;
- constexpr size_t kElementOffset = 0x1234;
- static const uint8_t raw_code[] = {
- 0x34, 0x12, 0x5E, 0xEE, // auipc s2, high(diff); placeholder = 0x1234
- 0x78, 0x56, 0x52, 0x26, // addiu s2, s2, low(diff); placeholder = 0x5678
- };
- constexpr uint32_t literal_offset = 0; // At auipc (where patching starts).
- constexpr uint32_t anchor_offset = literal_offset; // At auipc (where PC+0 points).
- ArrayRef<const uint8_t> code(raw_code);
- LinkerPatch patches[] = {
- LinkerPatch::DexCacheArrayPatch(literal_offset, nullptr, anchor_offset, kElementOffset),
- };
- AddCompiledMethod(MethodRef(1u), code, ArrayRef<const LinkerPatch>(patches));
+const uint8_t Mips32r6RelativePatcherTest::UnpatchedPcRelativeRawCode[] = {
+ 0x34, 0x12, 0x5E, 0xEE, // auipc s2, high(diff); placeholder = 0x1234
+ 0x78, 0x56, 0x52, 0x26, // addiu s2, s2, low(diff); placeholder = 0x5678
+};
+const uint32_t Mips32r6RelativePatcherTest::LiteralOffset = 0; // At auipc (where patching starts).
+const uint32_t Mips32r6RelativePatcherTest::AnchorOffset = 0; // At auipc (where PC+0 points).
+const ArrayRef<const uint8_t> Mips32r6RelativePatcherTest::UnpatchedPcRelativeCode(
+ UnpatchedPcRelativeRawCode);
+
+void Mips32r6RelativePatcherTest::CheckPcRelativePatch(const ArrayRef<const LinkerPatch>& patches,
+ uint32_t target_offset) {
+ AddCompiledMethod(MethodRef(1u), UnpatchedPcRelativeCode, ArrayRef<const LinkerPatch>(patches));
Link();
auto result = method_offset_map_.FindMethodOffset(MethodRef(1u));
ASSERT_TRUE(result.first);
- uint32_t diff = dex_cache_arrays_begin_ + kElementOffset - (result.second + anchor_offset) +
- kDexCacheArrayLwOffset;
+
+ uint32_t diff = target_offset - (result.second + AnchorOffset);
+ if (patches[0].GetType() == LinkerPatch::Type::kDexCacheArray) {
+ diff += kDexCacheArrayLwOffset;
+ }
diff += (diff & 0x8000) << 1; // Account for sign extension in addiu.
- static const uint8_t expected_code[] = {
+
+ const uint8_t expected_code[] = {
static_cast<uint8_t>(diff >> 16), static_cast<uint8_t>(diff >> 24), 0x5E, 0xEE,
static_cast<uint8_t>(diff), static_cast<uint8_t>(diff >> 8), 0x52, 0x26,
};
EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
}
+void Mips32r6RelativePatcherTest::TestDexCacheReference(uint32_t dex_cache_arrays_begin,
+ uint32_t element_offset) {
+ dex_cache_arrays_begin_ = dex_cache_arrays_begin;
+ LinkerPatch patches[] = {
+ LinkerPatch::DexCacheArrayPatch(LiteralOffset, nullptr, AnchorOffset, element_offset)
+ };
+ CheckPcRelativePatch(ArrayRef<const LinkerPatch>(patches),
+ dex_cache_arrays_begin_ + element_offset);
+}
+
+void Mips32r6RelativePatcherTest::TestStringReference(uint32_t string_offset) {
+ constexpr uint32_t kStringIndex = 1u;
+ string_index_to_offset_map_.Put(kStringIndex, string_offset);
+ LinkerPatch patches[] = {
+ LinkerPatch::RelativeStringPatch(LiteralOffset, nullptr, AnchorOffset, kStringIndex)
+ };
+ CheckPcRelativePatch(ArrayRef<const LinkerPatch>(patches), string_offset);
+}
+
+TEST_F(Mips32r6RelativePatcherTest, DexCacheReference) {
+ TestDexCacheReference(/* dex_cache_arrays_begin */ 0x12345678, /* element_offset */ 0x1234);
+}
+
+TEST_F(Mips32r6RelativePatcherTest, StringReference) {
+ TestStringReference(/* string_offset*/ 0x87651234);
+}
+
} // namespace linker
} // namespace art
diff --git a/compiler/linker/mips/relative_patcher_mips_test.cc b/compiler/linker/mips/relative_patcher_mips_test.cc
index 8391b53..335ce2e 100644
--- a/compiler/linker/mips/relative_patcher_mips_test.cc
+++ b/compiler/linker/mips/relative_patcher_mips_test.cc
@@ -29,36 +29,47 @@
MipsRelativePatcherTest() : RelativePatcherTest(kMips, "mips32r2") {}
protected:
+ static const uint8_t UnpatchedPcRelativeRawCode[];
+ static const uint32_t LiteralOffset;
+ static const uint32_t AnchorOffset;
+ static const ArrayRef<const uint8_t> UnpatchedPcRelativeCode;
+
uint32_t GetMethodOffset(uint32_t method_idx) {
auto result = method_offset_map_.FindMethodOffset(MethodRef(method_idx));
CHECK(result.first);
return result.second;
}
+
+ void CheckPcRelativePatch(const ArrayRef<const LinkerPatch>& patches, uint32_t target_offset);
+ void TestDexCacheReference(uint32_t dex_cache_arrays_begin, uint32_t element_offset);
+ void TestStringReference(uint32_t string_offset);
};
-TEST_F(MipsRelativePatcherTest, DexCacheReference) {
- dex_cache_arrays_begin_ = 0x12345678;
- constexpr size_t kElementOffset = 0x1234;
- static const uint8_t raw_code[] = {
- 0x00, 0x00, 0x10, 0x04, // nal
- 0x34, 0x12, 0x12, 0x3C, // lui s2, high(diff); placeholder = 0x1234
- 0x78, 0x56, 0x52, 0x36, // ori s2, s2, low(diff); placeholder = 0x5678
- 0x21, 0x90, 0x5F, 0x02, // addu s2, s2, ra
- };
- constexpr uint32_t literal_offset = 4; // At lui (where patching starts).
- constexpr uint32_t anchor_offset = 8; // At ori (where PC+0 points).
- ArrayRef<const uint8_t> code(raw_code);
- LinkerPatch patches[] = {
- LinkerPatch::DexCacheArrayPatch(literal_offset, nullptr, anchor_offset, kElementOffset),
- };
- AddCompiledMethod(MethodRef(1u), code, ArrayRef<const LinkerPatch>(patches));
+const uint8_t MipsRelativePatcherTest::UnpatchedPcRelativeRawCode[] = {
+ 0x00, 0x00, 0x10, 0x04, // nal
+ 0x34, 0x12, 0x12, 0x3C, // lui s2, high(diff); placeholder = 0x1234
+ 0x78, 0x56, 0x52, 0x36, // ori s2, s2, low(diff); placeholder = 0x5678
+ 0x21, 0x90, 0x5F, 0x02, // addu s2, s2, ra
+};
+const uint32_t MipsRelativePatcherTest::LiteralOffset = 4; // At lui (where patching starts).
+const uint32_t MipsRelativePatcherTest::AnchorOffset = 8; // At ori (where PC+0 points).
+const ArrayRef<const uint8_t> MipsRelativePatcherTest::UnpatchedPcRelativeCode(
+ UnpatchedPcRelativeRawCode);
+
+void MipsRelativePatcherTest::CheckPcRelativePatch(const ArrayRef<const LinkerPatch>& patches,
+ uint32_t target_offset) {
+ AddCompiledMethod(MethodRef(1u), UnpatchedPcRelativeCode, ArrayRef<const LinkerPatch>(patches));
Link();
auto result = method_offset_map_.FindMethodOffset(MethodRef(1u));
ASSERT_TRUE(result.first);
- uint32_t diff = dex_cache_arrays_begin_ + kElementOffset - (result.second + anchor_offset) +
- kDexCacheArrayLwOffset;
- static const uint8_t expected_code[] = {
+
+ uint32_t diff = target_offset - (result.second + AnchorOffset);
+ if (patches[0].GetType() == LinkerPatch::Type::kDexCacheArray) {
+ diff += kDexCacheArrayLwOffset;
+ }
+
+ const uint8_t expected_code[] = {
0x00, 0x00, 0x10, 0x04,
static_cast<uint8_t>(diff >> 16), static_cast<uint8_t>(diff >> 24), 0x12, 0x3C,
static_cast<uint8_t>(diff), static_cast<uint8_t>(diff >> 8), 0x52, 0x36,
@@ -67,5 +78,32 @@
EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
}
+void MipsRelativePatcherTest::TestDexCacheReference(uint32_t dex_cache_arrays_begin,
+ uint32_t element_offset) {
+ dex_cache_arrays_begin_ = dex_cache_arrays_begin;
+ LinkerPatch patches[] = {
+ LinkerPatch::DexCacheArrayPatch(LiteralOffset, nullptr, AnchorOffset, element_offset)
+ };
+ CheckPcRelativePatch(ArrayRef<const LinkerPatch>(patches),
+ dex_cache_arrays_begin_ + element_offset);
+}
+
+void MipsRelativePatcherTest::TestStringReference(uint32_t string_offset) {
+ constexpr uint32_t kStringIndex = 1u;
+ string_index_to_offset_map_.Put(kStringIndex, string_offset);
+ LinkerPatch patches[] = {
+ LinkerPatch::RelativeStringPatch(LiteralOffset, nullptr, AnchorOffset, kStringIndex)
+ };
+ CheckPcRelativePatch(ArrayRef<const LinkerPatch>(patches), string_offset);
+}
+
+TEST_F(MipsRelativePatcherTest, DexCacheReference) {
+ TestDexCacheReference(/* dex_cache_arrays_begin */ 0x12345678, /* element_offset */ 0x1234);
+}
+
+TEST_F(MipsRelativePatcherTest, StringReference) {
+ TestStringReference(/* string_offset*/ 0x87651234);
+}
+
} // namespace linker
} // namespace art
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index ec69107..d21f33e 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -98,6 +98,14 @@
patches));
}
+ uint32_t CodeAlignmentSize(uint32_t header_offset_to_align) {
+ // We want to align the code rather than the preheader.
+ uint32_t unaligned_code_offset = header_offset_to_align + sizeof(OatQuickMethodHeader);
+ uint32_t aligned_code_offset =
+ CompiledMethod::AlignCode(unaligned_code_offset, instruction_set_);
+ return aligned_code_offset - unaligned_code_offset;
+ }
+
void Link() {
// Reserve space.
static_assert(kTrampolineOffset == 0u, "Unexpected trampoline offset.");
@@ -106,9 +114,8 @@
for (auto& compiled_method : compiled_methods_) {
offset = patcher_->ReserveSpace(offset, compiled_method.get(), compiled_method_refs_[idx]);
- uint32_t aligned_offset = compiled_method->AlignCode(offset);
- uint32_t aligned_code_delta = aligned_offset - offset;
- offset += aligned_code_delta;
+ uint32_t alignment_size = CodeAlignmentSize(offset);
+ offset += alignment_size;
offset += sizeof(OatQuickMethodHeader);
uint32_t quick_code_offset = offset + compiled_method->CodeDelta();
@@ -136,11 +143,10 @@
for (auto& compiled_method : compiled_methods_) {
offset = patcher_->WriteThunks(&out_, offset);
- uint32_t aligned_offset = compiled_method->AlignCode(offset);
- uint32_t aligned_code_delta = aligned_offset - offset;
- CHECK_LE(aligned_code_delta, sizeof(kPadding));
- out_.WriteFully(kPadding, aligned_code_delta);
- offset += aligned_code_delta;
+ uint32_t alignment_size = CodeAlignmentSize(offset);
+ CHECK_LE(alignment_size, sizeof(kPadding));
+ out_.WriteFully(kPadding, alignment_size);
+ offset += alignment_size;
out_.WriteFully(dummy_header, sizeof(OatQuickMethodHeader));
offset += sizeof(OatQuickMethodHeader);
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 18ebfeb..ce044e8 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -16,6 +16,7 @@
#include "arch/instruction_set_features.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
#include "common_compiler_test.h"
@@ -444,7 +445,8 @@
EXPECT_EQ(72U, sizeof(OatHeader));
EXPECT_EQ(4U, sizeof(OatMethodOffsets));
EXPECT_EQ(20U, sizeof(OatQuickMethodHeader));
- EXPECT_EQ(164 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
+ EXPECT_EQ(162 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
+ sizeof(QuickEntryPoints));
}
TEST_F(OatTest, OatHeaderIsValid) {
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index b32199f..8273b15 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -23,6 +23,7 @@
#include "art_method-inl.h"
#include "base/allocator.h"
#include "base/bit_vector.h"
+#include "base/enums.h"
#include "base/file_magic.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
@@ -86,6 +87,13 @@
OatHeader* const oat_header_;
};
+inline uint32_t CodeAlignmentSize(uint32_t header_offset, const CompiledMethod& compiled_method) {
+ // We want to align the code rather than the preheader.
+ uint32_t unaligned_code_offset = header_offset + sizeof(OatQuickMethodHeader);
+ uint32_t aligned_code_offset = compiled_method.AlignCode(unaligned_code_offset);
+ return aligned_code_offset - unaligned_code_offset;
+}
+
} // anonymous namespace
// Defines the location of the raw dex file to write.
@@ -506,7 +514,7 @@
if (!HasBootImage()) {
// Allocate space for app dex cache arrays in the .bss section.
size_t bss_start = RoundUp(size_, kPageSize);
- size_t pointer_size = GetInstructionSetPointerSize(instruction_set);
+ PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set);
bss_size_ = 0u;
for (const DexFile* dex_file : *dex_files_) {
dex_cache_arrays_offsets_.Put(dex_file, bss_start + bss_size_);
@@ -816,8 +824,8 @@
uint32_t thumb_offset) {
offset_ = writer_->relative_patcher_->ReserveSpace(
offset_, compiled_method, MethodReference(dex_file_, it.GetMemberIndex()));
- offset_ = compiled_method->AlignCode(offset_);
- DCHECK_ALIGNED_PARAM(offset_,
+ offset_ += CodeAlignmentSize(offset_, *compiled_method);
+ DCHECK_ALIGNED_PARAM(offset_ + sizeof(OatQuickMethodHeader),
GetInstructionSetAlignment(compiled_method->GetInstructionSet()));
return offset_ + sizeof(OatQuickMethodHeader) + thumb_offset;
}
@@ -941,7 +949,7 @@
}
protected:
- const size_t pointer_size_;
+ const PointerSize pointer_size_;
};
class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
@@ -1010,17 +1018,16 @@
ReportWriteFailure("relative call thunk", it);
return false;
}
- uint32_t aligned_offset = compiled_method->AlignCode(offset_);
- uint32_t aligned_code_delta = aligned_offset - offset_;
- if (aligned_code_delta != 0) {
- if (!writer_->WriteCodeAlignment(out, aligned_code_delta)) {
+ uint32_t alignment_size = CodeAlignmentSize(offset_, *compiled_method);
+ if (alignment_size != 0) {
+ if (!writer_->WriteCodeAlignment(out, alignment_size)) {
ReportWriteFailure("code alignment padding", it);
return false;
}
- offset_ += aligned_code_delta;
+ offset_ += alignment_size;
DCHECK_OFFSET_();
}
- DCHECK_ALIGNED_PARAM(offset_,
+ DCHECK_ALIGNED_PARAM(offset_ + sizeof(OatQuickMethodHeader),
GetInstructionSetAlignment(compiled_method->GetInstructionSet()));
DCHECK_EQ(method_offsets.code_offset_,
offset_ + sizeof(OatQuickMethodHeader) + compiled_method->CodeDelta())
@@ -1149,7 +1156,8 @@
if (UNLIKELY(target_offset == 0)) {
ArtMethod* target = GetTargetMethod(patch);
DCHECK(target != nullptr);
- size_t size = GetInstructionSetPointerSize(writer_->compiler_driver_->GetInstructionSet());
+ PointerSize size =
+ GetInstructionSetPointerSize(writer_->compiler_driver_->GetInstructionSet());
const void* oat_code_offset = target->GetEntryPointFromQuickCompiledCodePtrSize(size);
if (oat_code_offset != 0) {
DCHECK(!writer_->HasBootImage());
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 9878d2b..5152075 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -137,7 +137,7 @@
size_t CodeGenerator::GetCachePointerOffset(uint32_t index) {
auto pointer_size = InstructionSetPointerSize(GetInstructionSet());
- return pointer_size * index;
+ return static_cast<size_t>(pointer_size) * index;
}
uint32_t CodeGenerator::GetArrayLengthOffset(HArrayLength* array_length) {
@@ -765,16 +765,24 @@
LocationSummary* locations = instruction->GetLocations();
uint32_t register_mask = locations->GetRegisterMask();
- if (locations->OnlyCallsOnSlowPath()) {
- // In case of slow path, we currently set the location of caller-save registers
- // to register (instead of their stack location when pushed before the slow-path
- // call). Therefore register_mask contains both callee-save and caller-save
- // registers that hold objects. We must remove the caller-save from the mask, since
- // they will be overwritten by the callee.
- register_mask &= core_callee_save_mask_;
+ if (instruction->IsSuspendCheck()) {
+ // Suspend check has special ABI that saves the caller-save registers in callee,
+ // so we want to emit stack maps containing the registers.
+ // TODO: Register allocator still reserves space for the caller-save registers.
+ // We should add slow-path-specific caller-save information into LocationSummary
+ // and refactor the code here as well as in the register allocator to use it.
+ } else {
+ if (locations->OnlyCallsOnSlowPath()) {
+ // In case of slow path, we currently set the location of caller-save registers
+ // to register (instead of their stack location when pushed before the slow-path
+ // call). Therefore register_mask contains both callee-save and caller-save
+ // registers that hold objects. We must remove the caller-save from the mask, since
+ // they will be overwritten by the callee.
+ register_mask &= core_callee_save_mask_;
+ }
+ // The register mask must be a subset of callee-save registers.
+ DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
}
- // The register mask must be a subset of callee-save registers.
- DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
stack_map_stream_.BeginStackMapEntry(outer_dex_pc,
native_pc,
register_mask,
@@ -1178,19 +1186,19 @@
<< "instruction->DebugName()=" << instruction->DebugName()
<< " slow_path->GetDescription()=" << slow_path->GetDescription();
DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) ||
- // When read barriers are enabled, some instructions use a
- // slow path to emit a read barrier, which does not trigger
- // GC, is not fatal, nor is emitted by HDeoptimize
- // instructions.
+ // When (non-Baker) read barriers are enabled, some instructions
+ // use a slow path to emit a read barrier, which does not trigger
+ // GC.
(kEmitCompilerReadBarrier &&
+ !kUseBakerReadBarrier &&
(instruction->IsInstanceFieldGet() ||
instruction->IsStaticFieldGet() ||
- instruction->IsArraySet() ||
instruction->IsArrayGet() ||
instruction->IsLoadClass() ||
instruction->IsLoadString() ||
instruction->IsInstanceOf() ||
- instruction->IsCheckCast())))
+ instruction->IsCheckCast() ||
+ (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified()))))
<< "instruction->DebugName()=" << instruction->DebugName()
<< " instruction->GetSideEffects().ToString()=" << instruction->GetSideEffects().ToString()
<< " slow_path->GetDescription()=" << slow_path->GetDescription();
@@ -1204,6 +1212,27 @@
<< instruction->DebugName() << ((slow_path != nullptr) ? slow_path->GetDescription() : "");
}
+void CodeGenerator::ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction* instruction,
+ SlowPathCode* slow_path) {
+ DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath())
+ << "instruction->DebugName()=" << instruction->DebugName()
+ << " slow_path->GetDescription()=" << slow_path->GetDescription();
+ // Only the Baker read barrier marking slow path used by certains
+ // instructions is expected to invoke the runtime without recording
+ // PC-related information.
+ DCHECK(kUseBakerReadBarrier);
+ DCHECK(instruction->IsInstanceFieldGet() ||
+ instruction->IsStaticFieldGet() ||
+ instruction->IsArrayGet() ||
+ instruction->IsLoadClass() ||
+ instruction->IsLoadString() ||
+ instruction->IsInstanceOf() ||
+ instruction->IsCheckCast() ||
+ (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified()))
+ << "instruction->DebugName()=" << instruction->DebugName()
+ << " slow_path->GetDescription()=" << slow_path->GetDescription();
+}
+
void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
RegisterSet* live_registers = locations->GetLiveRegisters();
size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 5e6e175..ad02ecf 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -22,6 +22,7 @@
#include "base/arena_containers.h"
#include "base/arena_object.h"
#include "base/bit_field.h"
+#include "base/enums.h"
#include "compiled_method.h"
#include "driver/compiler_options.h"
#include "globals.h"
@@ -191,7 +192,7 @@
size_t GetStackSlotOfParameter(HParameterValue* parameter) const {
// Note that this follows the current calling convention.
return GetFrameSize()
- + InstructionSetPointerSize(GetInstructionSet()) // Art method
+ + static_cast<size_t>(InstructionSetPointerSize(GetInstructionSet())) // Art method
+ parameter->GetIndex() * kVRegSize;
}
@@ -357,13 +358,14 @@
static uint32_t GetArrayDataOffset(HArrayGet* array_get);
// Return the entry point offset for ReadBarrierMarkRegX, where X is `reg`.
- template <size_t pointer_size>
+ template <PointerSize pointer_size>
static int32_t GetReadBarrierMarkEntryPointsOffset(size_t reg) {
- DCHECK_LT(reg, 32u);
+ // The entry point list defines 30 ReadBarrierMarkRegX entry points.
+ DCHECK_LT(reg, 30u);
// The ReadBarrierMarkRegX entry points are ordered by increasing
// register number in Thread::tls_Ptr_.quick_entrypoints.
return QUICK_ENTRYPOINT_OFFSET(pointer_size, pReadBarrierMarkReg00).Int32Value()
- + pointer_size * reg;
+ + static_cast<size_t>(pointer_size) * reg;
}
void EmitParallelMoves(Location from1,
@@ -379,8 +381,14 @@
return type == Primitive::kPrimNot && !value->IsNullConstant();
}
+
+ // Perfoms checks pertaining to an InvokeRuntime call.
void ValidateInvokeRuntime(HInstruction* instruction, SlowPathCode* slow_path);
+ // Perfoms checks pertaining to an InvokeRuntimeWithoutRecordingPcInfo call.
+ static void ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction* instruction,
+ SlowPathCode* slow_path);
+
void AddAllocatedRegister(Location location) {
allocated_registers_.Add(location);
}
@@ -693,7 +701,7 @@
size_t number_of_registers,
const F* fpu_registers,
size_t number_of_fpu_registers,
- size_t pointer_size)
+ PointerSize pointer_size)
: registers_(registers),
number_of_registers_(number_of_registers),
fpu_registers_(fpu_registers),
@@ -716,7 +724,7 @@
size_t GetStackOffsetOf(size_t index) const {
// We still reserve the space for parameters passed by registers.
// Add space for the method pointer.
- return pointer_size_ + index * kVRegSize;
+ return static_cast<size_t>(pointer_size_) + index * kVRegSize;
}
private:
@@ -724,7 +732,7 @@
const size_t number_of_registers_;
const F* fpu_registers_;
const size_t number_of_fpu_registers_;
- const size_t pointer_size_;
+ const PointerSize pointer_size_;
DISALLOW_COPY_AND_ASSIGN(CallingConvention);
};
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 236ed20..c105940 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -61,7 +61,7 @@
// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
#define __ down_cast<ArmAssembler*>(codegen->GetAssembler())-> // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmWordSize, x).Int32Value()
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, x).Int32Value()
class NullCheckSlowPathARM : public SlowPathCode {
public:
@@ -119,11 +119,9 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, instruction_->GetLocations());
arm_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
- RestoreLiveRegisters(codegen, instruction_->GetLocations());
if (successor_ == nullptr) {
__ b(GetReturnLabel());
} else {
@@ -431,8 +429,7 @@
instruction_->IsLoadString() ||
instruction_->IsInstanceOf() ||
instruction_->IsCheckCast() ||
- ((instruction_->IsInvokeStaticOrDirect() || instruction_->IsInvokeVirtual()) &&
- instruction_->GetLocations()->Intrinsified()))
+ (instruction_->IsInvokeVirtual()) && instruction_->GetLocations()->Intrinsified())
<< "Unexpected instruction in read barrier marking slow path: "
<< instruction_->DebugName();
@@ -440,7 +437,6 @@
// No need to save live registers; it's taken care of by the
// entrypoint. Also, there is no need to update the stack mask,
// as this runtime call will not trigger a garbage collection.
- InvokeRuntimeCallingConvention calling_convention;
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
DCHECK_NE(reg, SP);
DCHECK_NE(reg, LR);
@@ -461,12 +457,9 @@
// rX <- ReadBarrierMarkRegX(rX)
//
int32_t entry_point_offset =
- CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArmWordSize>(reg);
- // TODO: Do not emit a stack map for this runtime call.
- arm_codegen->InvokeRuntime(entry_point_offset,
- instruction_,
- instruction_->GetDexPc(),
- this);
+ CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArmPointerSize>(reg);
+ // This runtime call does not require a stack map.
+ arm_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
__ b(GetExitLabel());
}
@@ -516,8 +509,7 @@
instruction_->IsArrayGet() ||
instruction_->IsInstanceOf() ||
instruction_->IsCheckCast() ||
- ((instruction_->IsInvokeStaticOrDirect() || instruction_->IsInvokeVirtual()) &&
- instruction_->GetLocations()->Intrinsified()))
+ (instruction_->IsInvokeVirtual()) && instruction_->GetLocations()->Intrinsified())
<< "Unexpected instruction in read barrier for heap reference slow path: "
<< instruction_->DebugName();
@@ -972,7 +964,7 @@
if (fpu_spill_mask_ != 0) {
SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
__ vpops(start_register, POPCOUNT(fpu_spill_mask_));
- __ cfi().AdjustCFAOffset(-kArmPointerSize * POPCOUNT(fpu_spill_mask_));
+ __ cfi().AdjustCFAOffset(-static_cast<int>(kArmPointerSize) * POPCOUNT(fpu_spill_mask_));
__ cfi().RestoreMany(DWARFReg(SRegister(0)), fpu_spill_mask_);
}
// Pop LR into PC to return.
@@ -1224,7 +1216,7 @@
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- InvokeRuntime(GetThreadOffset<kArmWordSize>(entrypoint).Int32Value(),
+ InvokeRuntime(GetThreadOffset<kArmPointerSize>(entrypoint).Int32Value(),
instruction,
dex_pc,
slow_path);
@@ -1240,6 +1232,14 @@
RecordPcInfo(instruction, dex_pc, slow_path);
}
+void CodeGeneratorARM::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
+ HInstruction* instruction,
+ SlowPathCode* slow_path) {
+ ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction, slow_path);
+ __ LoadFromOffset(kLoadWord, LR, TR, entry_point_offset);
+ __ blx(LR);
+}
+
void InstructionCodeGeneratorARM::HandleGoto(HInstruction* got, HBasicBlock* successor) {
DCHECK(!successor->IsExitBlock());
@@ -1287,6 +1287,44 @@
void InstructionCodeGeneratorARM::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
+void InstructionCodeGeneratorARM::GenerateVcmp(HInstruction* instruction) {
+ Primitive::Type type = instruction->InputAt(0)->GetType();
+ Location lhs_loc = instruction->GetLocations()->InAt(0);
+ Location rhs_loc = instruction->GetLocations()->InAt(1);
+ if (rhs_loc.IsConstant()) {
+ // 0.0 is the only immediate that can be encoded directly in
+ // a VCMP instruction.
+ //
+ // Both the JLS (section 15.20.1) and the JVMS (section 6.5)
+ // specify that in a floating-point comparison, positive zero
+ // and negative zero are considered equal, so we can use the
+ // literal 0.0 for both cases here.
+ //
+ // Note however that some methods (Float.equal, Float.compare,
+ // Float.compareTo, Double.equal, Double.compare,
+ // Double.compareTo, Math.max, Math.min, StrictMath.max,
+ // StrictMath.min) consider 0.0 to be (strictly) greater than
+ // -0.0. So if we ever translate calls to these methods into a
+ // HCompare instruction, we must handle the -0.0 case with
+ // care here.
+ DCHECK(rhs_loc.GetConstant()->IsArithmeticZero());
+ if (type == Primitive::kPrimFloat) {
+ __ vcmpsz(lhs_loc.AsFpuRegister<SRegister>());
+ } else {
+ DCHECK_EQ(type, Primitive::kPrimDouble);
+ __ vcmpdz(FromLowSToD(lhs_loc.AsFpuRegisterPairLow<SRegister>()));
+ }
+ } else {
+ if (type == Primitive::kPrimFloat) {
+ __ vcmps(lhs_loc.AsFpuRegister<SRegister>(), rhs_loc.AsFpuRegister<SRegister>());
+ } else {
+ DCHECK_EQ(type, Primitive::kPrimDouble);
+ __ vcmpd(FromLowSToD(lhs_loc.AsFpuRegisterPairLow<SRegister>()),
+ FromLowSToD(rhs_loc.AsFpuRegisterPairLow<SRegister>()));
+ }
+ }
+}
+
void InstructionCodeGeneratorARM::GenerateFPJumps(HCondition* cond,
Label* true_label,
Label* false_label ATTRIBUTE_UNUSED) {
@@ -1387,22 +1425,14 @@
Label* true_target = true_target_in == nullptr ? &fallthrough_target : true_target_in;
Label* false_target = false_target_in == nullptr ? &fallthrough_target : false_target_in;
- LocationSummary* locations = condition->GetLocations();
- Location left = locations->InAt(0);
- Location right = locations->InAt(1);
-
Primitive::Type type = condition->InputAt(0)->GetType();
switch (type) {
case Primitive::kPrimLong:
GenerateLongComparesAndJumps(condition, true_target, false_target);
break;
case Primitive::kPrimFloat:
- __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
- GenerateFPJumps(condition, true_target, false_target);
- break;
case Primitive::kPrimDouble:
- __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
- FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
+ GenerateVcmp(condition);
GenerateFPJumps(condition, true_target, false_target);
break;
default:
@@ -1583,7 +1613,7 @@
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetInAt(1, ArithmeticZeroOrFpuRegister(cond->InputAt(1)));
if (!cond->IsEmittedAtUseSite()) {
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -1630,12 +1660,8 @@
GenerateLongComparesAndJumps(cond, &true_label, &false_label);
break;
case Primitive::kPrimFloat:
- __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
- GenerateFPJumps(cond, &true_label, &false_label);
- break;
case Primitive::kPrimDouble:
- __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
- FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
+ GenerateVcmp(cond);
GenerateFPJumps(cond, &true_label, &false_label);
break;
}
@@ -1937,7 +1963,7 @@
// temp = temp->GetImtEntryAt(method_offset);
__ LoadFromOffset(kLoadWord, temp, temp, method_offset);
uint32_t entry_point =
- ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmWordSize).Int32Value();
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value();
// LR = temp->GetEntryPoint();
__ LoadFromOffset(kLoadWord, LR, temp, entry_point);
// LR();
@@ -3528,7 +3554,7 @@
if (instruction->IsStringAlloc()) {
// String is allocated through StringFactory. Call NewEmptyString entry point.
Register temp = instruction->GetLocations()->GetTemp(0).AsRegister<Register>();
- MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmWordSize);
+ MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize);
__ LoadFromOffset(kLoadWord, temp, TR, QUICK_ENTRY_POINT(pNewEmptyString));
__ LoadFromOffset(kLoadWord, LR, temp, code_offset.Int32Value());
__ blx(LR);
@@ -3652,7 +3678,7 @@
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetInAt(1, ArithmeticZeroOrFpuRegister(compare->InputAt(1)));
locations->SetOut(Location::RequiresRegister());
break;
}
@@ -3697,12 +3723,7 @@
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
__ LoadImmediate(out, 0);
- if (type == Primitive::kPrimFloat) {
- __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>());
- } else {
- __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()),
- FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
- }
+ GenerateVcmp(compare);
__ vmstat(); // transfer FP status register to ARM APSR.
less_cond = ARMFPCondition(kCondLT, compare->IsGtBias());
break;
@@ -3996,6 +4017,17 @@
}
}
+Location LocationsBuilderARM::ArithmeticZeroOrFpuRegister(HInstruction* input) {
+ DCHECK(input->GetType() == Primitive::kPrimDouble || input->GetType() == Primitive::kPrimFloat)
+ << input->GetType();
+ if ((input->IsFloatConstant() && (input->AsFloatConstant()->IsArithmeticZero())) ||
+ (input->IsDoubleConstant() && (input->AsDoubleConstant()->IsArithmeticZero()))) {
+ return Location::ConstantLocation(input->AsConstant());
+ } else {
+ return Location::RequiresFpuRegister();
+ }
+}
+
Location LocationsBuilderARM::ArmEncodableConstantOrRegister(HInstruction* constant,
Opcode opcode) {
DCHECK(!Primitive::IsFloatingPointType(constant->GetType()));
@@ -4943,7 +4975,7 @@
if (can_be_null) {
__ CompareAndBranchIfZero(value, &is_null);
}
- __ LoadFromOffset(kLoadWord, card, TR, Thread::CardTableOffset<kArmWordSize>().Int32Value());
+ __ LoadFromOffset(kLoadWord, card, TR, Thread::CardTableOffset<kArmPointerSize>().Int32Value());
__ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
__ strb(card, Address(card, temp));
if (can_be_null) {
@@ -4994,7 +5026,7 @@
}
__ LoadFromOffset(
- kLoadUnsignedHalfword, IP, TR, Thread::ThreadFlagsOffset<kArmWordSize>().Int32Value());
+ kLoadUnsignedHalfword, IP, TR, Thread::ThreadFlagsOffset<kArmPointerSize>().Int32Value());
if (successor == nullptr) {
__ CompareAndBranchIfNonZero(IP, slow_path->GetEntryLabel());
__ Bind(slow_path->GetReturnLabel());
@@ -5575,7 +5607,7 @@
}
static int32_t GetExceptionTlsOffset() {
- return Thread::ExceptionOffset<kArmWordSize>().Int32Value();
+ return Thread::ExceptionOffset<kArmPointerSize>().Int32Value();
}
void LocationsBuilderARM::VisitLoadException(HLoadException* load) {
@@ -6330,7 +6362,7 @@
// IP = Thread::Current()->GetIsGcMarking()
__ LoadFromOffset(
- kLoadWord, IP, TR, Thread::IsGcMarkingOffset<kArmWordSize>().Int32Value());
+ kLoadWord, IP, TR, Thread::IsGcMarkingOffset<kArmPointerSize>().Int32Value());
__ CompareAndBranchIfNonZero(IP, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
} else {
@@ -6689,7 +6721,7 @@
// LR = callee_method->entry_point_from_quick_compiled_code_
__ LoadFromOffset(
kLoadWord, LR, callee_method.AsRegister<Register>(),
- ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmWordSize).Int32Value());
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value());
// LR()
__ blx(LR);
break;
@@ -6723,7 +6755,7 @@
__ MaybeUnpoisonHeapReference(temp);
// temp = temp->GetMethodAt(method_offset);
uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kArmWordSize).Int32Value();
+ kArmPointerSize).Int32Value();
__ LoadFromOffset(kLoadWord, temp, temp, method_offset);
// LR = temp->GetEntryPoint();
__ LoadFromOffset(kLoadWord, LR, temp, entry_point);
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index ef7913b..fa7709b 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -17,8 +17,8 @@
#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_H_
#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_H_
+#include "base/enums.h"
#include "code_generator.h"
-#include "dex/compiler_enums.h"
#include "driver/compiler_options.h"
#include "nodes.h"
#include "string_reference.h"
@@ -32,7 +32,7 @@
class CodeGeneratorARM;
// Use a local definition to prevent copying mistakes.
-static constexpr size_t kArmWordSize = kArmPointerSize;
+static constexpr size_t kArmWordSize = static_cast<size_t>(kArmPointerSize);
static constexpr size_t kArmBitsPerWord = kArmWordSize * kBitsPerByte;
static constexpr Register kParameterCoreRegisters[] = { R1, R2, R3 };
@@ -180,6 +180,7 @@
void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
+ Location ArithmeticZeroOrFpuRegister(HInstruction* input);
Location ArmEncodableConstantOrRegister(HInstruction* constant, Opcode opcode);
bool CanEncodeConstantAsImmediate(HConstant* input_cst, Opcode opcode);
bool CanEncodeConstantAsImmediate(uint32_t value, Opcode opcode);
@@ -281,6 +282,7 @@
void GenerateCompareTestAndBranch(HCondition* condition,
Label* true_target,
Label* false_target);
+ void GenerateVcmp(HInstruction* instruction);
void GenerateFPJumps(HCondition* cond, Label* true_label, Label* false_label);
void GenerateLongComparesAndJumps(HCondition* cond, Label* true_label, Label* false_label);
void DivRemOneOrMinusOne(HBinaryOperation* instruction);
@@ -394,6 +396,12 @@
uint32_t dex_pc,
SlowPathCode* slow_path);
+ // Generate code to invoke a runtime entry point, but do not record
+ // PC-related information in a stack map.
+ void InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
+ HInstruction* instruction,
+ SlowPathCode* slow_path);
+
// Emit a write barrier.
void MarkGCCard(Register temp, Register card, Register object, Register value, bool can_be_null);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 76b0797..54c9efc 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -133,7 +133,7 @@
// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
#define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()-> // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, x).Int32Value()
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, x).Int32Value()
// Calculate memory accessing operand for save/restore live registers.
static void SaveRestoreLiveRegistersHelper(CodeGenerator* codegen,
@@ -398,11 +398,9 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, instruction_->GetLocations());
arm64_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
- RestoreLiveRegisters(codegen, instruction_->GetLocations());
if (successor_ == nullptr) {
__ B(GetReturnLabel());
} else {
@@ -597,8 +595,7 @@
instruction_->IsLoadString() ||
instruction_->IsInstanceOf() ||
instruction_->IsCheckCast() ||
- ((instruction_->IsInvokeStaticOrDirect() || instruction_->IsInvokeVirtual()) &&
- instruction_->GetLocations()->Intrinsified()))
+ (instruction_->IsInvokeVirtual()) && instruction_->GetLocations()->Intrinsified())
<< "Unexpected instruction in read barrier marking slow path: "
<< instruction_->DebugName();
@@ -606,11 +603,12 @@
// No need to save live registers; it's taken care of by the
// entrypoint. Also, there is no need to update the stack mask,
// as this runtime call will not trigger a garbage collection.
- InvokeRuntimeCallingConvention calling_convention;
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
DCHECK_NE(obj_.reg(), LR);
DCHECK_NE(obj_.reg(), WSP);
DCHECK_NE(obj_.reg(), WZR);
+ // WIP0 is used by the slow path as a temp, it can not be the object register.
+ DCHECK_NE(obj_.reg(), IP0);
DCHECK(0 <= obj_.reg() && obj_.reg() < kNumberOfWRegisters) << obj_.reg();
// "Compact" slow path, saving two moves.
//
@@ -627,12 +625,9 @@
// rX <- ReadBarrierMarkRegX(rX)
//
int32_t entry_point_offset =
- CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArm64WordSize>(obj_.reg());
- // TODO: Do not emit a stack map for this runtime call.
- arm64_codegen->InvokeRuntime(entry_point_offset,
- instruction_,
- instruction_->GetDexPc(),
- this);
+ CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArm64PointerSize>(obj_.reg());
+ // This runtime call does not require a stack map.
+ arm64_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
__ B(GetExitLabel());
}
@@ -682,8 +677,7 @@
instruction_->IsArrayGet() ||
instruction_->IsInstanceOf() ||
instruction_->IsCheckCast() ||
- ((instruction_->IsInvokeStaticOrDirect() || instruction_->IsInvokeVirtual()) &&
- instruction_->GetLocations()->Intrinsified()))
+ (instruction_->IsInvokeVirtual()) && instruction_->GetLocations()->Intrinsified())
<< "Unexpected instruction in read barrier for heap reference slow path: "
<< instruction_->DebugName();
// The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
@@ -757,10 +751,7 @@
(instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile))
<< instruction_->AsInvoke()->GetIntrinsic();
DCHECK_EQ(offset_, 0U);
- DCHECK(index_.IsRegisterPair());
- // UnsafeGet's offset location is a register pair, the low
- // part contains the correct offset.
- index = index_.ToLow();
+ DCHECK(index_.IsRegister());
}
}
@@ -1111,7 +1102,7 @@
if (value_can_be_null) {
__ Cbz(value, &done);
}
- __ Ldr(card, MemOperand(tr, Thread::CardTableOffset<kArm64WordSize>().Int32Value()));
+ __ Ldr(card, MemOperand(tr, Thread::CardTableOffset<kArm64PointerSize>().Int32Value()));
__ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
__ Strb(card, MemOperand(card, temp.X()));
if (value_can_be_null) {
@@ -1290,17 +1281,21 @@
UseScratchRegisterScope temps(GetVIXLAssembler());
HConstant* src_cst = source.GetConstant();
CPURegister temp;
- if (src_cst->IsIntConstant() || src_cst->IsNullConstant()) {
- temp = temps.AcquireW();
- } else if (src_cst->IsLongConstant()) {
- temp = temps.AcquireX();
- } else if (src_cst->IsFloatConstant()) {
- temp = temps.AcquireS();
+ if (src_cst->IsZeroBitPattern()) {
+ temp = (src_cst->IsLongConstant() || src_cst->IsDoubleConstant()) ? xzr : wzr;
} else {
- DCHECK(src_cst->IsDoubleConstant());
- temp = temps.AcquireD();
+ if (src_cst->IsIntConstant()) {
+ temp = temps.AcquireW();
+ } else if (src_cst->IsLongConstant()) {
+ temp = temps.AcquireX();
+ } else if (src_cst->IsFloatConstant()) {
+ temp = temps.AcquireS();
+ } else {
+ DCHECK(src_cst->IsDoubleConstant());
+ temp = temps.AcquireD();
+ }
+ MoveConstant(temp, src_cst);
}
- MoveConstant(temp, src_cst);
__ Str(temp, StackOperandFrom(destination));
} else {
DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
@@ -1485,7 +1480,7 @@
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- InvokeRuntime(GetThreadOffset<kArm64WordSize>(entrypoint).Int32Value(),
+ InvokeRuntime(GetThreadOffset<kArm64PointerSize>(entrypoint).Int32Value(),
instruction,
dex_pc,
slow_path);
@@ -1502,6 +1497,15 @@
RecordPcInfo(instruction, dex_pc, slow_path);
}
+void CodeGeneratorARM64::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
+ HInstruction* instruction,
+ SlowPathCode* slow_path) {
+ ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction, slow_path);
+ BlockPoolsScope block_pools(GetVIXLAssembler());
+ __ Ldr(lr, MemOperand(tr, entry_point_offset));
+ __ Blr(lr);
+}
+
void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path,
Register class_reg) {
UseScratchRegisterScope temps(GetVIXLAssembler());
@@ -1559,7 +1563,7 @@
UseScratchRegisterScope temps(codegen_->GetVIXLAssembler());
Register temp = temps.AcquireW();
- __ Ldrh(temp, MemOperand(tr, Thread::ThreadFlagsOffset<kArm64WordSize>().SizeValue()));
+ __ Ldrh(temp, MemOperand(tr, Thread::ThreadFlagsOffset<kArm64PointerSize>().SizeValue()));
if (successor == nullptr) {
__ Cbnz(temp, slow_path->GetEntryLabel());
__ Bind(slow_path->GetReturnLabel());
@@ -3523,7 +3527,7 @@
Register temp = XRegisterFrom(locations->GetTemp(0));
Location receiver = locations->InAt(0);
Offset class_offset = mirror::Object::ClassOffset();
- Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize);
// The register ip1 is required to be used for the hidden argument in
// art_quick_imt_conflict_trampoline, so prevent VIXL from using it.
@@ -3675,7 +3679,7 @@
// /* ArtMethod*[] */ temp = temp.ptr_sized_fields_->dex_cache_resolved_methods_;
__ Ldr(reg.X(),
MemOperand(method_reg.X(),
- ArtMethod::DexCacheResolvedMethodsOffset(kArm64WordSize).Int32Value()));
+ ArtMethod::DexCacheResolvedMethodsOffset(kArm64PointerSize).Int32Value()));
// temp = temp[index_in_cache];
// Note: Don't use invoke->GetTargetMethod() as it may point to a different dex file.
uint32_t index_in_cache = invoke->GetDexMethodIndex();
@@ -3707,7 +3711,7 @@
// LR = callee_method->entry_point_from_quick_compiled_code_;
__ Ldr(lr, MemOperand(
XRegisterFrom(callee_method),
- ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize).Int32Value()));
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize).Int32Value()));
// lr()
__ Blr(lr);
break;
@@ -3727,7 +3731,7 @@
size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
invoke->GetVTableIndex(), kArm64PointerSize).SizeValue();
Offset class_offset = mirror::Object::ClassOffset();
- Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize);
BlockPoolsScope block_pools(GetVIXLAssembler());
@@ -4124,7 +4128,7 @@
}
static MemOperand GetExceptionTlsAddress() {
- return MemOperand(tr, Thread::ExceptionOffset<kArm64WordSize>().Int32Value());
+ return MemOperand(tr, Thread::ExceptionOffset<kArm64PointerSize>().Int32Value());
}
void LocationsBuilderARM64::VisitLoadException(HLoadException* load) {
@@ -4437,7 +4441,7 @@
if (instruction->IsStringAlloc()) {
// String is allocated through StringFactory. Call NewEmptyString entry point.
Location temp = instruction->GetLocations()->GetTemp(0);
- MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
+ MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize);
__ Ldr(XRegisterFrom(temp), MemOperand(tr, QUICK_ENTRY_POINT(pNewEmptyString)));
__ Ldr(lr, MemOperand(XRegisterFrom(temp), code_offset.Int32Value()));
__ Blr(lr);
@@ -5093,7 +5097,7 @@
UseScratchRegisterScope temps(masm);
Register temp = temps.AcquireW();
// temp = Thread::Current()->GetIsGcMarking()
- __ Ldr(temp, MemOperand(tr, Thread::IsGcMarkingOffset<kArm64WordSize>().Int32Value()));
+ __ Ldr(temp, MemOperand(tr, Thread::IsGcMarkingOffset<kArm64PointerSize>().Int32Value()));
__ Cbnz(temp, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
} else {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 7201e59..1b5fa85 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -20,7 +20,6 @@
#include "arch/arm64/quick_method_frame_info_arm64.h"
#include "code_generator.h"
#include "common_arm64.h"
-#include "dex/compiler_enums.h"
#include "driver/compiler_options.h"
#include "nodes.h"
#include "parallel_move_resolver.h"
@@ -41,7 +40,7 @@
class CodeGeneratorARM64;
// Use a local definition to prevent copying mistakes.
-static constexpr size_t kArm64WordSize = kArm64PointerSize;
+static constexpr size_t kArm64WordSize = static_cast<size_t>(kArm64PointerSize);
static const vixl::aarch64::Register kParameterCoreRegisters[] = {
vixl::aarch64::x1,
@@ -244,7 +243,7 @@
}
Arm64Assembler* GetAssembler() const { return assembler_; }
- vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->vixl_masm_; }
+ vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
private:
void GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path,
@@ -365,7 +364,7 @@
private:
Arm64Assembler* GetAssembler() const;
vixl::aarch64::MacroAssembler* GetVIXLAssembler() const {
- return GetAssembler()->vixl_masm_;
+ return GetAssembler()->GetVIXLAssembler();
}
CodeGeneratorARM64* const codegen_;
@@ -414,7 +413,7 @@
HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
Arm64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
const Arm64Assembler& GetAssembler() const OVERRIDE { return assembler_; }
- vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->vixl_masm_; }
+ vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
// Emit a write barrier.
void MarkGCCard(vixl::aarch64::Register object,
@@ -499,6 +498,12 @@
uint32_t dex_pc,
SlowPathCode* slow_path);
+ // Generate code to invoke a runtime entry point, but do not record
+ // PC-related information in a stack map.
+ void InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
+ HInstruction* instruction,
+ SlowPathCode* slow_path);
+
ParallelMoveResolverARM64* GetMoveResolver() OVERRIDE { return &move_resolver_; }
bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 39248aa..59e103a 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -147,7 +147,7 @@
// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
#define __ down_cast<CodeGeneratorMIPS*>(codegen)->GetAssembler()-> // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, x).Int32Value()
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, x).Int32Value()
class BoundsCheckSlowPathMIPS : public SlowPathCodeMIPS {
public:
@@ -351,14 +351,12 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, instruction_->GetLocations());
mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
instruction_,
instruction_->GetDexPc(),
this,
IsDirectEntrypoint(kQuickTestSuspend));
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
- RestoreLiveRegisters(codegen, instruction_->GetLocations());
if (successor_ == nullptr) {
__ B(GetReturnLabel());
} else {
@@ -482,11 +480,22 @@
move_resolver_(graph->GetArena(), this),
assembler_(graph->GetArena(), &isa_features),
isa_features_(isa_features),
+ uint32_literals_(std::less<uint32_t>(),
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
method_patches_(MethodReferenceComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
call_patches_(MethodReferenceComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ boot_image_string_patches_(StringReferenceValueComparator(),
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ boot_image_type_patches_(TypeReferenceValueComparator(),
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ boot_image_address_patches_(std::less<uint32_t>(),
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ clobbered_ra_(false) {
// Save RA (containing the return address) to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(RA));
}
@@ -494,7 +503,7 @@
#undef __
// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
#define __ down_cast<MipsAssembler*>(GetAssembler())-> // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, x).Int32Value()
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, x).Int32Value()
void CodeGeneratorMIPS::Finalize(CodeAllocator* allocator) {
// Ensure that we fix up branches.
@@ -688,6 +697,16 @@
if ((fpu_spill_mask_ != 0) && (POPCOUNT(core_spill_mask_) % 2 != 0)) {
core_spill_mask_ |= (1 << ZERO);
}
+ // If RA is clobbered by PC-relative operations on R2 and it's the only spilled register
+ // (this can happen in leaf methods), artificially spill the ZERO register in order to
+ // force explicit saving and restoring of RA. RA isn't saved/restored when it's the only
+ // spilled register.
+ // TODO: Can this be improved? It causes creation of a stack frame (while RA might be
+ // saved in an unused temporary register) and saving of RA and the current method pointer
+ // in the frame.
+ if (clobbered_ra_ && core_spill_mask_ == (1u << RA) && fpu_spill_mask_ == 0) {
+ core_spill_mask_ |= (1 << ZERO);
+ }
}
static dwarf::Reg DWARFReg(Register reg) {
@@ -962,7 +981,12 @@
size_t size =
method_patches_.size() +
call_patches_.size() +
- pc_relative_dex_cache_patches_.size();
+ pc_relative_dex_cache_patches_.size() +
+ pc_relative_string_patches_.size() +
+ pc_relative_type_patches_.size() +
+ boot_image_string_patches_.size() +
+ boot_image_type_patches_.size() +
+ boot_image_address_patches_.size();
linker_patches->reserve(size);
for (const auto& entry : method_patches_) {
const MethodReference& target_method = entry.first;
@@ -994,6 +1018,71 @@
pc_rel_offset,
base_element_offset));
}
+ for (const PcRelativePatchInfo& info : pc_relative_string_patches_) {
+ const DexFile& dex_file = info.target_dex_file;
+ size_t string_index = info.offset_or_index;
+ DCHECK(info.high_label.IsBound());
+ uint32_t high_offset = __ GetLabelLocation(&info.high_label);
+ // On R2 we use HMipsComputeBaseMethodAddress and patch relative to
+ // the assembler's base label used for PC-relative literals.
+ uint32_t pc_rel_offset = info.pc_rel_label.IsBound()
+ ? __ GetLabelLocation(&info.pc_rel_label)
+ : __ GetPcRelBaseLabelLocation();
+ linker_patches->push_back(LinkerPatch::RelativeStringPatch(high_offset,
+ &dex_file,
+ pc_rel_offset,
+ string_index));
+ }
+ for (const PcRelativePatchInfo& info : pc_relative_type_patches_) {
+ const DexFile& dex_file = info.target_dex_file;
+ size_t type_index = info.offset_or_index;
+ DCHECK(info.high_label.IsBound());
+ uint32_t high_offset = __ GetLabelLocation(&info.high_label);
+ // On R2 we use HMipsComputeBaseMethodAddress and patch relative to
+ // the assembler's base label used for PC-relative literals.
+ uint32_t pc_rel_offset = info.pc_rel_label.IsBound()
+ ? __ GetLabelLocation(&info.pc_rel_label)
+ : __ GetPcRelBaseLabelLocation();
+ linker_patches->push_back(LinkerPatch::RelativeTypePatch(high_offset,
+ &dex_file,
+ pc_rel_offset,
+ type_index));
+ }
+ for (const auto& entry : boot_image_string_patches_) {
+ const StringReference& target_string = entry.first;
+ Literal* literal = entry.second;
+ DCHECK(literal->GetLabel()->IsBound());
+ uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
+ linker_patches->push_back(LinkerPatch::StringPatch(literal_offset,
+ target_string.dex_file,
+ target_string.string_index));
+ }
+ for (const auto& entry : boot_image_type_patches_) {
+ const TypeReference& target_type = entry.first;
+ Literal* literal = entry.second;
+ DCHECK(literal->GetLabel()->IsBound());
+ uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
+ linker_patches->push_back(LinkerPatch::TypePatch(literal_offset,
+ target_type.dex_file,
+ target_type.type_index));
+ }
+ for (const auto& entry : boot_image_address_patches_) {
+ DCHECK(GetCompilerOptions().GetIncludePatchInformation());
+ Literal* literal = entry.second;
+ DCHECK(literal->GetLabel()->IsBound());
+ uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
+ linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset));
+ }
+}
+
+CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeStringPatch(
+ const DexFile& dex_file, uint32_t string_index) {
+ return NewPcRelativePatch(dex_file, string_index, &pc_relative_string_patches_);
+}
+
+CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeTypePatch(
+ const DexFile& dex_file, uint32_t type_index) {
+ return NewPcRelativePatch(dex_file, type_index, &pc_relative_type_patches_);
}
CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeDexCacheArrayPatch(
@@ -1007,6 +1096,12 @@
return &patches->back();
}
+Literal* CodeGeneratorMIPS::DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map) {
+ return map->GetOrCreate(
+ value,
+ [this, value]() { return __ NewLiteral<uint32_t>(value); });
+}
+
Literal* CodeGeneratorMIPS::DeduplicateMethodLiteral(MethodReference target_method,
MethodToLiteralMap* map) {
return map->GetOrCreate(
@@ -1022,6 +1117,26 @@
return DeduplicateMethodLiteral(target_method, &call_patches_);
}
+Literal* CodeGeneratorMIPS::DeduplicateBootImageStringLiteral(const DexFile& dex_file,
+ uint32_t string_index) {
+ return boot_image_string_patches_.GetOrCreate(
+ StringReference(&dex_file, string_index),
+ [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
+}
+
+Literal* CodeGeneratorMIPS::DeduplicateBootImageTypeLiteral(const DexFile& dex_file,
+ uint32_t type_index) {
+ return boot_image_type_patches_.GetOrCreate(
+ TypeReference(&dex_file, type_index),
+ [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
+}
+
+Literal* CodeGeneratorMIPS::DeduplicateBootImageAddressLiteral(uint32_t address) {
+ bool needs_patch = GetCompilerOptions().GetIncludePatchInformation();
+ Uint32ToLiteralMap* map = needs_patch ? &boot_image_address_patches_ : &uint32_literals_;
+ return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), map);
+}
+
void CodeGeneratorMIPS::MarkGCCard(Register object, Register value) {
MipsLabel done;
Register card = AT;
@@ -1030,7 +1145,7 @@
__ LoadFromOffset(kLoadWord,
card,
TR,
- Thread::CardTableOffset<kMipsWordSize>().Int32Value());
+ Thread::CardTableOffset<kMipsPointerSize>().Int32Value());
__ Srl(temp, object, gc::accounting::CardTable::kCardShift);
__ Addu(temp, card, temp);
__ Sb(card, temp, 0);
@@ -1067,6 +1182,15 @@
blocked_fpu_registers_[i] = true;
}
+ if (GetGraph()->IsDebuggable()) {
+ // Stubs do not save callee-save floating point registers. If the graph
+ // is debuggable, we need to deal with these registers differently. For
+ // now, just block them.
+ for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
+ blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
+ }
+ }
+
UpdateBlockedPairRegisters();
}
@@ -1113,7 +1237,7 @@
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- InvokeRuntime(GetThreadOffset<kMipsWordSize>(entrypoint).Int32Value(),
+ InvokeRuntime(GetThreadOffset<kMipsPointerSize>(entrypoint).Int32Value(),
instruction,
dex_pc,
slow_path,
@@ -1164,7 +1288,7 @@
__ LoadFromOffset(kLoadUnsignedHalfword,
TMP,
TR,
- Thread::ThreadFlagsOffset<kMipsWordSize>().Int32Value());
+ Thread::ThreadFlagsOffset<kMipsPointerSize>().Int32Value());
if (successor == nullptr) {
__ Bnez(TMP, slow_path->GetEntryLabel());
__ Bind(slow_path->GetReturnLabel());
@@ -3440,7 +3564,8 @@
if (field_type == Primitive::kPrimLong) {
locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimLong));
} else {
- locations->SetOut(Location::RequiresFpuRegister());
+ // Use Location::Any() to prevent situations when running out of available fp registers.
+ locations->SetOut(Location::Any());
// Need some temp core regs since FP results are returned in core registers
Location reg = calling_convention.GetReturnLocation(Primitive::kPrimLong);
locations->AddTemp(Location::RegisterLocation(reg.AsRegisterPairLow<Register>()));
@@ -3505,11 +3630,23 @@
IsDirectEntrypoint(kQuickA64Load));
CheckEntrypointTypes<kQuickA64Load, int64_t, volatile const int64_t*>();
if (type == Primitive::kPrimDouble) {
- // Need to move to FP regs since FP results are returned in core registers.
- __ Mtc1(locations->GetTemp(1).AsRegister<Register>(),
- locations->Out().AsFpuRegister<FRegister>());
- __ MoveToFpuHigh(locations->GetTemp(2).AsRegister<Register>(),
- locations->Out().AsFpuRegister<FRegister>());
+ // FP results are returned in core registers. Need to move them.
+ Location out = locations->Out();
+ if (out.IsFpuRegister()) {
+ __ Mtc1(locations->GetTemp(1).AsRegister<Register>(), out.AsFpuRegister<FRegister>());
+ __ MoveToFpuHigh(locations->GetTemp(2).AsRegister<Register>(),
+ out.AsFpuRegister<FRegister>());
+ } else {
+ DCHECK(out.IsDoubleStackSlot());
+ __ StoreToOffset(kStoreWord,
+ locations->GetTemp(1).AsRegister<Register>(),
+ SP,
+ out.GetStackIndex());
+ __ StoreToOffset(kStoreWord,
+ locations->GetTemp(2).AsRegister<Register>(),
+ SP,
+ out.GetStackIndex() + 4);
+ }
}
} else {
if (!Primitive::IsFloatingPointType(type)) {
@@ -3568,7 +3705,8 @@
locations->SetInAt(1, Location::RegisterPairLocation(
calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
} else {
- locations->SetInAt(1, Location::RequiresFpuRegister());
+ // Use Location::Any() to prevent situations when running out of available fp registers.
+ locations->SetInAt(1, Location::Any());
// Pass FP parameters in core registers.
locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(3)));
@@ -3627,10 +3765,28 @@
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
if (type == Primitive::kPrimDouble) {
// Pass FP parameters in core registers.
- __ Mfc1(locations->GetTemp(1).AsRegister<Register>(),
- locations->InAt(1).AsFpuRegister<FRegister>());
- __ MoveFromFpuHigh(locations->GetTemp(2).AsRegister<Register>(),
- locations->InAt(1).AsFpuRegister<FRegister>());
+ Location in = locations->InAt(1);
+ if (in.IsFpuRegister()) {
+ __ Mfc1(locations->GetTemp(1).AsRegister<Register>(), in.AsFpuRegister<FRegister>());
+ __ MoveFromFpuHigh(locations->GetTemp(2).AsRegister<Register>(),
+ in.AsFpuRegister<FRegister>());
+ } else if (in.IsDoubleStackSlot()) {
+ __ LoadFromOffset(kLoadWord,
+ locations->GetTemp(1).AsRegister<Register>(),
+ SP,
+ in.GetStackIndex());
+ __ LoadFromOffset(kLoadWord,
+ locations->GetTemp(2).AsRegister<Register>(),
+ SP,
+ in.GetStackIndex() + 4);
+ } else {
+ DCHECK(in.IsConstant());
+ DCHECK(in.GetConstant()->IsDoubleConstant());
+ int64_t value = bit_cast<int64_t, double>(in.GetConstant()->AsDoubleConstant()->GetValue());
+ __ LoadConst64(locations->GetTemp(2).AsRegister<Register>(),
+ locations->GetTemp(1).AsRegister<Register>(),
+ value);
+ }
}
codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pA64Store),
instruction,
@@ -3696,6 +3852,23 @@
HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
}
+void InstructionCodeGeneratorMIPS::GenerateGcRootFieldLoad(
+ HInstruction* instruction ATTRIBUTE_UNUSED,
+ Location root,
+ Register obj,
+ uint32_t offset) {
+ Register root_reg = root.AsRegister<Register>();
+ if (kEmitCompilerReadBarrier) {
+ UNIMPLEMENTED(FATAL) << "for read barrier";
+ } else {
+ // Plain GC root load with no read barrier.
+ // /* GcRoot<mirror::Object> */ root = *(obj + offset)
+ __ LoadFromOffset(kLoadWord, root_reg, obj, offset);
+ // Note that GC roots are not affected by heap poisoning, thus we
+ // do not have to unpoison `root_reg` here.
+ }
+}
+
void LocationsBuilderMIPS::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary::CallKind call_kind =
instruction->IsExactCheck() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
@@ -3774,7 +3947,7 @@
Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
Location receiver = invoke->GetLocations()->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsWordSize);
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsPointerSize);
// Set the hidden argument.
__ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<Register>(),
@@ -3861,16 +4034,80 @@
}
HLoadString::LoadKind CodeGeneratorMIPS::GetSupportedLoadStringKind(
- HLoadString::LoadKind desired_string_load_kind ATTRIBUTE_UNUSED) {
- // TODO: Implement other kinds.
- return HLoadString::LoadKind::kDexCacheViaMethod;
+ HLoadString::LoadKind desired_string_load_kind) {
+ if (kEmitCompilerReadBarrier) {
+ UNIMPLEMENTED(FATAL) << "for read barrier";
+ }
+ // We disable PC-relative load when there is an irreducible loop, as the optimization
+ // is incompatible with it.
+ bool has_irreducible_loops = GetGraph()->HasIrreducibleLoops();
+ bool fallback_load = has_irreducible_loops;
+ switch (desired_string_load_kind) {
+ case HLoadString::LoadKind::kBootImageLinkTimeAddress:
+ DCHECK(!GetCompilerOptions().GetCompilePic());
+ break;
+ case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
+ DCHECK(GetCompilerOptions().GetCompilePic());
+ break;
+ case HLoadString::LoadKind::kBootImageAddress:
+ break;
+ case HLoadString::LoadKind::kDexCacheAddress:
+ DCHECK(Runtime::Current()->UseJitCompilation());
+ fallback_load = false;
+ break;
+ case HLoadString::LoadKind::kDexCachePcRelative:
+ DCHECK(!Runtime::Current()->UseJitCompilation());
+ // TODO: Create as many MipsDexCacheArraysBase instructions as needed for methods
+ // with irreducible loops.
+ break;
+ case HLoadString::LoadKind::kDexCacheViaMethod:
+ fallback_load = false;
+ break;
+ }
+ if (fallback_load) {
+ desired_string_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
+ }
+ return desired_string_load_kind;
}
HLoadClass::LoadKind CodeGeneratorMIPS::GetSupportedLoadClassKind(
HLoadClass::LoadKind desired_class_load_kind) {
- DCHECK_NE(desired_class_load_kind, HLoadClass::LoadKind::kReferrersClass);
- // TODO: Implement other kinds.
- return HLoadClass::LoadKind::kDexCacheViaMethod;
+ if (kEmitCompilerReadBarrier) {
+ UNIMPLEMENTED(FATAL) << "for read barrier";
+ }
+ // We disable pc-relative load when there is an irreducible loop, as the optimization
+ // is incompatible with it.
+ bool has_irreducible_loops = GetGraph()->HasIrreducibleLoops();
+ bool fallback_load = has_irreducible_loops;
+ switch (desired_class_load_kind) {
+ case HLoadClass::LoadKind::kReferrersClass:
+ fallback_load = false;
+ break;
+ case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
+ DCHECK(!GetCompilerOptions().GetCompilePic());
+ break;
+ case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
+ DCHECK(GetCompilerOptions().GetCompilePic());
+ break;
+ case HLoadClass::LoadKind::kBootImageAddress:
+ break;
+ case HLoadClass::LoadKind::kDexCacheAddress:
+ DCHECK(Runtime::Current()->UseJitCompilation());
+ fallback_load = false;
+ break;
+ case HLoadClass::LoadKind::kDexCachePcRelative:
+ DCHECK(!Runtime::Current()->UseJitCompilation());
+ // TODO: Create as many MipsDexCacheArraysBase instructions as needed for methods
+ // with irreducible loops.
+ break;
+ case HLoadClass::LoadKind::kDexCacheViaMethod:
+ fallback_load = false;
+ break;
+ }
+ if (fallback_load) {
+ desired_class_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
+ }
+ return desired_class_load_kind;
}
Register CodeGeneratorMIPS::GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke,
@@ -4048,7 +4285,7 @@
T9,
callee_method.AsRegister<Register>(),
ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kMipsWordSize).Int32Value());
+ kMipsPointerSize).Int32Value());
// T9()
__ Jalr(T9);
__ Nop();
@@ -4081,7 +4318,7 @@
size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
invoke->GetVTableIndex(), kMipsPointerSize).SizeValue();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsWordSize);
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsPointerSize);
// temp = object->GetClass();
DCHECK(receiver.IsRegister());
@@ -4107,11 +4344,40 @@
}
void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) {
- InvokeRuntimeCallingConvention calling_convention;
- CodeGenerator::CreateLoadClassLocationSummary(
- cls,
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- Location::RegisterLocation(V0));
+ if (cls->NeedsAccessCheck()) {
+ InvokeRuntimeCallingConvention calling_convention;
+ CodeGenerator::CreateLoadClassLocationSummary(
+ cls,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Location::RegisterLocation(V0),
+ /* code_generator_supports_read_barrier */ false); // TODO: revisit this bool.
+ return;
+ }
+
+ LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || kEmitCompilerReadBarrier)
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ switch (load_kind) {
+ // We need an extra register for PC-relative literals on R2.
+ case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
+ case HLoadClass::LoadKind::kBootImageAddress:
+ case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
+ if (codegen_->GetInstructionSetFeatures().IsR6()) {
+ break;
+ }
+ FALLTHROUGH_INTENDED;
+ // We need an extra register for PC-relative dex cache accesses.
+ case HLoadClass::LoadKind::kDexCachePcRelative:
+ case HLoadClass::LoadKind::kReferrersClass:
+ case HLoadClass::LoadKind::kDexCacheViaMethod:
+ locations->SetInAt(0, Location::RequiresRegister());
+ break;
+ default:
+ break;
+ }
+ locations->SetOut(Location::RequiresRegister());
}
void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) {
@@ -4127,40 +4393,132 @@
return;
}
- Register out = locations->Out().AsRegister<Register>();
- Register current_method = locations->InAt(0).AsRegister<Register>();
- if (cls->IsReferrersClass()) {
- DCHECK(!cls->CanCallRuntime());
- DCHECK(!cls->MustGenerateClinitCheck());
- __ LoadFromOffset(kLoadWord, out, current_method,
- ArtMethod::DeclaringClassOffset().Int32Value());
- } else {
- __ LoadFromOffset(kLoadWord, out, current_method,
- ArtMethod::DexCacheResolvedTypesOffset(kMipsPointerSize).Int32Value());
- __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ Location out_loc = locations->Out();
+ Register out = out_loc.AsRegister<Register>();
+ Register base_or_current_method_reg;
+ bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
+ switch (load_kind) {
+ // We need an extra register for PC-relative literals on R2.
+ case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
+ case HLoadClass::LoadKind::kBootImageAddress:
+ case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
+ base_or_current_method_reg = isR6 ? ZERO : locations->InAt(0).AsRegister<Register>();
+ break;
+ // We need an extra register for PC-relative dex cache accesses.
+ case HLoadClass::LoadKind::kDexCachePcRelative:
+ case HLoadClass::LoadKind::kReferrersClass:
+ case HLoadClass::LoadKind::kDexCacheViaMethod:
+ base_or_current_method_reg = locations->InAt(0).AsRegister<Register>();
+ break;
+ default:
+ base_or_current_method_reg = ZERO;
+ break;
+ }
- if (!cls->IsInDexCache() || cls->MustGenerateClinitCheck()) {
- DCHECK(cls->CanCallRuntime());
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS(
- cls,
- cls,
- cls->GetDexPc(),
- cls->MustGenerateClinitCheck());
- codegen_->AddSlowPath(slow_path);
- if (!cls->IsInDexCache()) {
- __ Beqz(out, slow_path->GetEntryLabel());
- }
- if (cls->MustGenerateClinitCheck()) {
- GenerateClassInitializationCheck(slow_path, out);
+ bool generate_null_check = false;
+ switch (load_kind) {
+ case HLoadClass::LoadKind::kReferrersClass: {
+ DCHECK(!cls->CanCallRuntime());
+ DCHECK(!cls->MustGenerateClinitCheck());
+ // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
+ GenerateGcRootFieldLoad(cls,
+ out_loc,
+ base_or_current_method_reg,
+ ArtMethod::DeclaringClassOffset().Int32Value());
+ break;
+ }
+ case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
+ DCHECK(!kEmitCompilerReadBarrier);
+ __ LoadLiteral(out,
+ base_or_current_method_reg,
+ codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
+ cls->GetTypeIndex()));
+ break;
+ case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
+ DCHECK(!kEmitCompilerReadBarrier);
+ CodeGeneratorMIPS::PcRelativePatchInfo* info =
+ codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
+ if (isR6) {
+ __ Bind(&info->high_label);
+ __ Bind(&info->pc_rel_label);
+ // Add a 32-bit offset to PC.
+ __ Auipc(out, /* placeholder */ 0x1234);
+ __ Addiu(out, out, /* placeholder */ 0x5678);
} else {
- __ Bind(slow_path->GetExitLabel());
+ __ Bind(&info->high_label);
+ __ Lui(out, /* placeholder */ 0x1234);
+ // We do not bind info->pc_rel_label here, we'll use the assembler's label
+ // for PC-relative literals and the base from HMipsComputeBaseMethodAddress.
+ __ Ori(out, out, /* placeholder */ 0x5678);
+ // Add a 32-bit offset to PC.
+ __ Addu(out, out, base_or_current_method_reg);
}
+ break;
+ }
+ case HLoadClass::LoadKind::kBootImageAddress: {
+ DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK_NE(cls->GetAddress(), 0u);
+ uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
+ __ LoadLiteral(out,
+ base_or_current_method_reg,
+ codegen_->DeduplicateBootImageAddressLiteral(address));
+ break;
+ }
+ case HLoadClass::LoadKind::kDexCacheAddress: {
+ DCHECK_NE(cls->GetAddress(), 0u);
+ uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
+ static_assert(sizeof(GcRoot<mirror::Class>) == 4u, "Expected GC root to be 4 bytes.");
+ DCHECK_ALIGNED(cls->GetAddress(), 4u);
+ int16_t offset = Low16Bits(address);
+ uint32_t base_address = address - offset; // This accounts for offset sign extension.
+ __ Lui(out, High16Bits(base_address));
+ // /* GcRoot<mirror::Class> */ out = *(base_address + offset)
+ GenerateGcRootFieldLoad(cls, out_loc, out, offset);
+ generate_null_check = !cls->IsInDexCache();
+ break;
+ }
+ case HLoadClass::LoadKind::kDexCachePcRelative: {
+ HMipsDexCacheArraysBase* base = cls->InputAt(0)->AsMipsDexCacheArraysBase();
+ int32_t offset =
+ cls->GetDexCacheElementOffset() - base->GetElementOffset() - kDexCacheArrayLwOffset;
+ // /* GcRoot<mirror::Class> */ out = *(dex_cache_arrays_base + offset)
+ GenerateGcRootFieldLoad(cls, out_loc, base_or_current_method_reg, offset);
+ generate_null_check = !cls->IsInDexCache();
+ break;
+ }
+ case HLoadClass::LoadKind::kDexCacheViaMethod: {
+ // /* GcRoot<mirror::Class>[] */ out =
+ // current_method.ptr_sized_fields_->dex_cache_resolved_types_
+ __ LoadFromOffset(kLoadWord,
+ out,
+ base_or_current_method_reg,
+ ArtMethod::DexCacheResolvedTypesOffset(kArmPointerSize).Int32Value());
+ // /* GcRoot<mirror::Class> */ out = out[type_index]
+ size_t offset = CodeGenerator::GetCacheOffset(cls->GetTypeIndex());
+ GenerateGcRootFieldLoad(cls, out_loc, out, offset);
+ generate_null_check = !cls->IsInDexCache();
+ }
+ }
+
+ if (generate_null_check || cls->MustGenerateClinitCheck()) {
+ DCHECK(cls->CanCallRuntime());
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS(
+ cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+ codegen_->AddSlowPath(slow_path);
+ if (generate_null_check) {
+ __ Beqz(out, slow_path->GetEntryLabel());
+ }
+ if (cls->MustGenerateClinitCheck()) {
+ GenerateClassInitializationCheck(slow_path, out);
+ } else {
+ __ Bind(slow_path->GetExitLabel());
}
}
}
static int32_t GetExceptionTlsOffset() {
- return Thread::ExceptionOffset<kMipsWordSize>().Int32Value();
+ return Thread::ExceptionOffset<kMipsPointerSize>().Int32Value();
}
void LocationsBuilderMIPS::VisitLoadException(HLoadException* load) {
@@ -4183,21 +4541,132 @@
}
void LocationsBuilderMIPS::VisitLoadString(HLoadString* load) {
- LocationSummary::CallKind call_kind = load->NeedsEnvironment()
+ LocationSummary::CallKind call_kind = (load->NeedsEnvironment() || kEmitCompilerReadBarrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
- locations->SetInAt(0, Location::RequiresRegister());
+ HLoadString::LoadKind load_kind = load->GetLoadKind();
+ switch (load_kind) {
+ // We need an extra register for PC-relative literals on R2.
+ case HLoadString::LoadKind::kBootImageLinkTimeAddress:
+ case HLoadString::LoadKind::kBootImageAddress:
+ case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
+ if (codegen_->GetInstructionSetFeatures().IsR6()) {
+ break;
+ }
+ FALLTHROUGH_INTENDED;
+ // We need an extra register for PC-relative dex cache accesses.
+ case HLoadString::LoadKind::kDexCachePcRelative:
+ case HLoadString::LoadKind::kDexCacheViaMethod:
+ locations->SetInAt(0, Location::RequiresRegister());
+ break;
+ default:
+ break;
+ }
locations->SetOut(Location::RequiresRegister());
}
void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) {
+ HLoadString::LoadKind load_kind = load->GetLoadKind();
LocationSummary* locations = load->GetLocations();
- Register out = locations->Out().AsRegister<Register>();
- Register current_method = locations->InAt(0).AsRegister<Register>();
- __ LoadFromOffset(kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
- __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
- __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
+ Location out_loc = locations->Out();
+ Register out = out_loc.AsRegister<Register>();
+ Register base_or_current_method_reg;
+ bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
+ switch (load_kind) {
+ // We need an extra register for PC-relative literals on R2.
+ case HLoadString::LoadKind::kBootImageLinkTimeAddress:
+ case HLoadString::LoadKind::kBootImageAddress:
+ case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
+ base_or_current_method_reg = isR6 ? ZERO : locations->InAt(0).AsRegister<Register>();
+ break;
+ // We need an extra register for PC-relative dex cache accesses.
+ case HLoadString::LoadKind::kDexCachePcRelative:
+ case HLoadString::LoadKind::kDexCacheViaMethod:
+ base_or_current_method_reg = locations->InAt(0).AsRegister<Register>();
+ break;
+ default:
+ base_or_current_method_reg = ZERO;
+ break;
+ }
+
+ switch (load_kind) {
+ case HLoadString::LoadKind::kBootImageLinkTimeAddress:
+ DCHECK(!kEmitCompilerReadBarrier);
+ __ LoadLiteral(out,
+ base_or_current_method_reg,
+ codegen_->DeduplicateBootImageStringLiteral(load->GetDexFile(),
+ load->GetStringIndex()));
+ return; // No dex cache slow path.
+ case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
+ DCHECK(!kEmitCompilerReadBarrier);
+ CodeGeneratorMIPS::PcRelativePatchInfo* info =
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
+ if (isR6) {
+ __ Bind(&info->high_label);
+ __ Bind(&info->pc_rel_label);
+ // Add a 32-bit offset to PC.
+ __ Auipc(out, /* placeholder */ 0x1234);
+ __ Addiu(out, out, /* placeholder */ 0x5678);
+ } else {
+ __ Bind(&info->high_label);
+ __ Lui(out, /* placeholder */ 0x1234);
+ // We do not bind info->pc_rel_label here, we'll use the assembler's label
+ // for PC-relative literals and the base from HMipsComputeBaseMethodAddress.
+ __ Ori(out, out, /* placeholder */ 0x5678);
+ // Add a 32-bit offset to PC.
+ __ Addu(out, out, base_or_current_method_reg);
+ }
+ return; // No dex cache slow path.
+ }
+ case HLoadString::LoadKind::kBootImageAddress: {
+ DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK_NE(load->GetAddress(), 0u);
+ uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress());
+ __ LoadLiteral(out,
+ base_or_current_method_reg,
+ codegen_->DeduplicateBootImageAddressLiteral(address));
+ return; // No dex cache slow path.
+ }
+ case HLoadString::LoadKind::kDexCacheAddress: {
+ DCHECK_NE(load->GetAddress(), 0u);
+ uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress());
+ static_assert(sizeof(GcRoot<mirror::String>) == 4u, "Expected GC root to be 4 bytes.");
+ DCHECK_ALIGNED(load->GetAddress(), 4u);
+ int16_t offset = Low16Bits(address);
+ uint32_t base_address = address - offset; // This accounts for offset sign extension.
+ __ Lui(out, High16Bits(base_address));
+ // /* GcRoot<mirror::String> */ out = *(base_address + offset)
+ GenerateGcRootFieldLoad(load, out_loc, out, offset);
+ break;
+ }
+ case HLoadString::LoadKind::kDexCachePcRelative: {
+ HMipsDexCacheArraysBase* base = load->InputAt(0)->AsMipsDexCacheArraysBase();
+ int32_t offset =
+ load->GetDexCacheElementOffset() - base->GetElementOffset() - kDexCacheArrayLwOffset;
+ // /* GcRoot<mirror::String> */ out = *(dex_cache_arrays_base + offset)
+ GenerateGcRootFieldLoad(load, out_loc, base_or_current_method_reg, offset);
+ break;
+ }
+ case HLoadString::LoadKind::kDexCacheViaMethod: {
+ // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
+ GenerateGcRootFieldLoad(load,
+ out_loc,
+ base_or_current_method_reg,
+ ArtMethod::DeclaringClassOffset().Int32Value());
+ // /* GcRoot<mirror::String>[] */ out = out->dex_cache_strings_
+ __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
+ // /* GcRoot<mirror::String> */ out = out[string_index]
+ GenerateGcRootFieldLoad(load,
+ out_loc,
+ out,
+ CodeGenerator::GetCacheOffset(load->GetStringIndex()));
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected load kind: " << load->GetLoadKind();
+ UNREACHABLE();
+ }
if (!load->IsInDexCache()) {
SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load);
@@ -4412,7 +4881,7 @@
// Move an uint16_t value to a register.
__ LoadConst32(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
codegen_->InvokeRuntime(
- GetThreadOffset<kMipsWordSize>(instruction->GetEntrypoint()).Int32Value(),
+ GetThreadOffset<kMipsPointerSize>(instruction->GetEntrypoint()).Int32Value(),
instruction,
instruction->GetDexPc(),
nullptr,
@@ -4438,7 +4907,7 @@
if (instruction->IsStringAlloc()) {
// String is allocated through StringFactory. Call NewEmptyString entry point.
Register temp = instruction->GetLocations()->GetTemp(0).AsRegister<Register>();
- MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsWordSize);
+ MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsPointerSize);
__ LoadFromOffset(kLoadWord, temp, TR, QUICK_ENTRY_POINT(pNewEmptyString));
__ LoadFromOffset(kLoadWord, T9, temp, code_offset.Int32Value());
__ Jalr(T9);
@@ -4446,7 +4915,7 @@
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(
- GetThreadOffset<kMipsWordSize>(instruction->GetEntrypoint()).Int32Value(),
+ GetThreadOffset<kMipsPointerSize>(instruction->GetEntrypoint()).Int32Value(),
instruction,
instruction->GetDexPc(),
nullptr,
@@ -5327,6 +5796,7 @@
__ Nal();
// Grab the return address off RA.
__ Move(reg, RA);
+ // TODO: Can we share this code with that of VisitMipsDexCacheArraysBase()?
// Remember this offset (the obtained PC value) for later use with constant area.
__ BindPcRelBaseLabel();
@@ -5357,6 +5827,7 @@
__ Ori(reg, reg, /* placeholder */ 0x5678);
// Add a 32-bit offset to PC.
__ Addu(reg, reg, RA);
+ // TODO: Can we share this code with that of VisitMipsComputeBaseMethodAddress()?
}
}
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 08f74c0..63a0345 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -18,11 +18,12 @@
#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS_H_
#include "code_generator.h"
-#include "dex/compiler_enums.h"
#include "driver/compiler_options.h"
#include "nodes.h"
#include "parallel_move_resolver.h"
+#include "string_reference.h"
#include "utils/mips/assembler_mips.h"
+#include "utils/type_reference.h"
namespace art {
namespace mips {
@@ -226,6 +227,15 @@
void HandleShift(HBinaryOperation* operation);
void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info, uint32_t dex_pc);
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info, uint32_t dex_pc);
+ // Generate a GC root reference load:
+ //
+ // root <- *(obj + offset)
+ //
+ // while honoring read barriers (if any).
+ void GenerateGcRootFieldLoad(HInstruction* instruction,
+ Location root,
+ Register obj,
+ uint32_t offset);
void GenerateIntCompare(IfCondition cond, LocationSummary* locations);
void GenerateIntCompareAndBranch(IfCondition cond,
LocationSummary* locations,
@@ -298,6 +308,9 @@
size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id);
size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id);
size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id);
+ void ClobberRA() {
+ clobbered_ra_ = true;
+ }
void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
@@ -383,7 +396,7 @@
PcRelativePatchInfo(PcRelativePatchInfo&& other) = default;
const DexFile& target_dex_file;
- // Either the dex cache array element offset or the string index.
+ // Either the dex cache array element offset or the string/type index.
uint32_t offset_or_index;
// Label for the instruction loading the most significant half of the offset that's added to PC
// to form the base address (the least significant half is loaded with the instruction that
@@ -393,14 +406,27 @@
MipsLabel pc_rel_label;
};
+ PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file, uint32_t string_index);
+ PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, uint32_t type_index);
PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
uint32_t element_offset);
+ Literal* DeduplicateBootImageStringLiteral(const DexFile& dex_file, uint32_t string_index);
+ Literal* DeduplicateBootImageTypeLiteral(const DexFile& dex_file, uint32_t type_index);
+ Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
private:
Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, Register temp);
+ using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, Literal*>;
using MethodToLiteralMap = ArenaSafeMap<MethodReference, Literal*, MethodReferenceComparator>;
+ using BootStringToLiteralMap = ArenaSafeMap<StringReference,
+ Literal*,
+ StringReferenceValueComparator>;
+ using BootTypeToLiteralMap = ArenaSafeMap<TypeReference,
+ Literal*,
+ TypeReferenceValueComparator>;
+ Literal* DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map);
Literal* DeduplicateMethodLiteral(MethodReference target_method, MethodToLiteralMap* map);
Literal* DeduplicateMethodAddressLiteral(MethodReference target_method);
Literal* DeduplicateMethodCodeLiteral(MethodReference target_method);
@@ -417,11 +443,27 @@
MipsAssembler assembler_;
const MipsInstructionSetFeatures& isa_features_;
+ // Deduplication map for 32-bit literals, used for non-patchable boot image addresses.
+ Uint32ToLiteralMap uint32_literals_;
// Method patch info, map MethodReference to a literal for method address and method code.
MethodToLiteralMap method_patches_;
MethodToLiteralMap call_patches_;
// PC-relative patch info for each HMipsDexCacheArraysBase.
ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
+ // Deduplication map for boot string literals for kBootImageLinkTimeAddress.
+ BootStringToLiteralMap boot_image_string_patches_;
+ // PC-relative String patch info.
+ ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
+ // Deduplication map for boot type literals for kBootImageLinkTimeAddress.
+ BootTypeToLiteralMap boot_image_type_patches_;
+ // PC-relative type patch info.
+ ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
+ // Deduplication map for patchable boot image addresses.
+ Uint32ToLiteralMap boot_image_address_patches_;
+
+ // PC-relative loads on R2 clobber RA, which may need to be preserved explicitly in leaf methods.
+ // This is a flag set by pc_relative_fixups_mips and dex_cache_array_fixups_mips optimizations.
+ bool clobbered_ra_;
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorMIPS);
};
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 29b8c20..fe1fddc 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -104,7 +104,7 @@
// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
#define __ down_cast<CodeGeneratorMIPS64*>(codegen)->GetAssembler()-> // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, x).Int32Value()
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, x).Int32Value()
class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
public:
@@ -300,13 +300,11 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, instruction_->GetLocations());
mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
instruction_,
instruction_->GetDexPc(),
this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
- RestoreLiveRegisters(codegen, instruction_->GetLocations());
if (successor_ == nullptr) {
__ Bc(GetReturnLabel());
} else {
@@ -431,7 +429,7 @@
#undef __
// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
#define __ down_cast<Mips64Assembler*>(GetAssembler())-> // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, x).Int32Value()
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, x).Int32Value()
void CodeGeneratorMIPS64::Finalize(CodeAllocator* allocator) {
// Ensure that we fix up branches.
@@ -888,7 +886,7 @@
__ LoadFromOffset(kLoadDoubleword,
card,
TR,
- Thread::CardTableOffset<kMips64DoublewordSize>().Int32Value());
+ Thread::CardTableOffset<kMips64PointerSize>().Int32Value());
__ Dsrl(temp, object, gc::accounting::CardTable::kCardShift);
__ Daddu(temp, card, temp);
__ Sb(card, temp, 0);
@@ -964,7 +962,7 @@
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- InvokeRuntime(GetThreadOffset<kMips64DoublewordSize>(entrypoint).Int32Value(),
+ InvokeRuntime(GetThreadOffset<kMips64PointerSize>(entrypoint).Int32Value(),
instruction,
dex_pc,
slow_path);
@@ -1004,7 +1002,7 @@
__ LoadFromOffset(kLoadUnsignedHalfword,
TMP,
TR,
- Thread::ThreadFlagsOffset<kMips64DoublewordSize>().Int32Value());
+ Thread::ThreadFlagsOffset<kMips64PointerSize>().Int32Value());
if (successor == nullptr) {
__ Bnezc(TMP, slow_path->GetEntryLabel());
__ Bind(slow_path->GetReturnLabel());
@@ -2934,7 +2932,7 @@
GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
Location receiver = invoke->GetLocations()->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64DoublewordSize);
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64PointerSize);
// Set the hidden argument.
__ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<GpuRegister>(),
@@ -3115,7 +3113,7 @@
T9,
callee_method.AsRegister<GpuRegister>(),
ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kMips64DoublewordSize).Int32Value());
+ kMips64PointerSize).Int32Value());
// T9()
__ Jalr(T9);
__ Nop();
@@ -3153,7 +3151,7 @@
size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
invoke->GetVTableIndex(), kMips64PointerSize).SizeValue();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64DoublewordSize);
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64PointerSize);
// temp = object->GetClass();
__ LoadFromOffset(kLoadUnsignedWord, temp, receiver, class_offset);
@@ -3231,7 +3229,7 @@
}
static int32_t GetExceptionTlsOffset() {
- return Thread::ExceptionOffset<kMips64DoublewordSize>().Int32Value();
+ return Thread::ExceptionOffset<kMips64PointerSize>().Int32Value();
}
void LocationsBuilderMIPS64::VisitLoadException(HLoadException* load) {
@@ -3456,7 +3454,7 @@
// String is allocated through StringFactory. Call NewEmptyString entry point.
GpuRegister temp = instruction->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
MemberOffset code_offset =
- ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64DoublewordSize);
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64PointerSize);
__ LoadFromOffset(kLoadDoubleword, temp, TR, QUICK_ENTRY_POINT(pNewEmptyString));
__ LoadFromOffset(kLoadDoubleword, T9, temp, code_offset.Int32Value());
__ Jalr(T9);
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 4b462cc..197f86b 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -18,7 +18,6 @@
#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS64_H_
#include "code_generator.h"
-#include "dex/compiler_enums.h"
#include "driver/compiler_options.h"
#include "nodes.h"
#include "parallel_move_resolver.h"
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 82baaa0..ade2117 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -49,7 +49,7 @@
// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
#define __ down_cast<X86Assembler*>(codegen->GetAssembler())-> // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kX86WordSize, x).Int32Value()
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, x).Int32Value()
class NullCheckSlowPathX86 : public SlowPathCode {
public:
@@ -192,13 +192,11 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, instruction_->GetLocations());
x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
instruction_,
instruction_->GetDexPc(),
this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
- RestoreLiveRegisters(codegen, instruction_->GetLocations());
if (successor_ == nullptr) {
__ jmp(GetReturnLabel());
} else {
@@ -466,8 +464,7 @@
instruction_->IsLoadString() ||
instruction_->IsInstanceOf() ||
instruction_->IsCheckCast() ||
- ((instruction_->IsInvokeStaticOrDirect() || instruction_->IsInvokeVirtual()) &&
- instruction_->GetLocations()->Intrinsified()))
+ (instruction_->IsInvokeVirtual()) && instruction_->GetLocations()->Intrinsified())
<< "Unexpected instruction in read barrier marking slow path: "
<< instruction_->DebugName();
@@ -475,7 +472,6 @@
// No need to save live registers; it's taken care of by the
// entrypoint. Also, there is no need to update the stack mask,
// as this runtime call will not trigger a garbage collection.
- InvokeRuntimeCallingConvention calling_convention;
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
DCHECK_NE(reg, ESP);
DCHECK(0 <= reg && reg < kNumberOfCpuRegisters) << reg;
@@ -494,12 +490,9 @@
// rX <- ReadBarrierMarkRegX(rX)
//
int32_t entry_point_offset =
- CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86WordSize>(reg);
- // TODO: Do not emit a stack map for this runtime call.
- x86_codegen->InvokeRuntime(entry_point_offset,
- instruction_,
- instruction_->GetDexPc(),
- this);
+ CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86PointerSize>(reg);
+ // This runtime call does not require a stack map.
+ x86_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
__ jmp(GetExitLabel());
}
@@ -549,8 +542,7 @@
instruction_->IsArrayGet() ||
instruction_->IsInstanceOf() ||
instruction_->IsCheckCast() ||
- ((instruction_->IsInvokeStaticOrDirect() || instruction_->IsInvokeVirtual()) &&
- instruction_->GetLocations()->Intrinsified()))
+ (instruction_->IsInvokeVirtual()) && instruction_->GetLocations()->Intrinsified())
<< "Unexpected instruction in read barrier for heap reference slow path: "
<< instruction_->DebugName();
@@ -809,7 +801,7 @@
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- InvokeRuntime(GetThreadOffset<kX86WordSize>(entrypoint).Int32Value(),
+ InvokeRuntime(GetThreadOffset<kX86PointerSize>(entrypoint).Int32Value(),
instruction,
dex_pc,
slow_path);
@@ -824,6 +816,13 @@
RecordPcInfo(instruction, dex_pc, slow_path);
}
+void CodeGeneratorX86::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
+ HInstruction* instruction,
+ SlowPathCode* slow_path) {
+ ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction, slow_path);
+ __ fs()->call(Address::Absolute(entry_point_offset));
+}
+
CodeGeneratorX86::CodeGeneratorX86(HGraph* graph,
const X86InstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options,
@@ -2093,7 +2092,7 @@
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
__ call(Address(temp,
- ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86PointerSize).Int32Value()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -4033,7 +4032,7 @@
if (instruction->IsStringAlloc()) {
// String is allocated through StringFactory. Call NewEmptyString entry point.
Register temp = instruction->GetLocations()->GetTemp(0).AsRegister<Register>();
- MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize);
+ MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86PointerSize);
__ fs()->movl(temp, Address::Absolute(QUICK_ENTRY_POINT(pNewEmptyString)));
__ call(Address(temp, code_offset.Int32Value()));
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
@@ -4450,7 +4449,7 @@
// (callee_method + offset_of_quick_compiled_code)()
__ call(Address(callee_method.AsRegister<Register>(),
ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kX86WordSize).Int32Value()));
+ kX86PointerSize).Int32Value()));
break;
}
@@ -4484,7 +4483,7 @@
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
__ call(Address(
- temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
+ temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86PointerSize).Int32Value()));
}
void CodeGeneratorX86::RecordSimplePatch() {
@@ -4588,7 +4587,7 @@
__ testl(value, value);
__ j(kEqual, &is_null);
}
- __ fs()->movl(card, Address::Absolute(Thread::CardTableOffset<kX86WordSize>().Int32Value()));
+ __ fs()->movl(card, Address::Absolute(Thread::CardTableOffset<kX86PointerSize>().Int32Value()));
__ movl(temp, object);
__ shrl(temp, Immediate(gc::accounting::CardTable::kCardShift));
__ movb(Address(temp, card, TIMES_1, 0),
@@ -5680,7 +5679,7 @@
DCHECK_EQ(slow_path->GetSuccessor(), successor);
}
- __ fs()->cmpw(Address::Absolute(Thread::ThreadFlagsOffset<kX86WordSize>().Int32Value()),
+ __ fs()->cmpw(Address::Absolute(Thread::ThreadFlagsOffset<kX86PointerSize>().Int32Value()),
Immediate(0));
if (successor == nullptr) {
__ j(kNotEqual, slow_path->GetEntryLabel());
@@ -6276,7 +6275,7 @@
}
static Address GetExceptionTlsAddress() {
- return Address::Absolute(Thread::ExceptionOffset<kX86WordSize>().Int32Value());
+ return Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>().Int32Value());
}
void LocationsBuilderX86::VisitLoadException(HLoadException* load) {
@@ -6993,7 +6992,7 @@
new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86(instruction, root);
codegen_->AddSlowPath(slow_path);
- __ fs()->cmpl(Address::Absolute(Thread::IsGcMarkingOffset<kX86WordSize>().Int32Value()),
+ __ fs()->cmpl(Address::Absolute(Thread::IsGcMarkingOffset<kX86PointerSize>().Int32Value()),
Immediate(0));
__ j(kNotEqual, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 2a9fb80..f306b33 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -18,8 +18,8 @@
#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_H_
#include "arch/x86/instruction_set_features_x86.h"
+#include "base/enums.h"
#include "code_generator.h"
-#include "dex/compiler_enums.h"
#include "driver/compiler_options.h"
#include "nodes.h"
#include "parallel_move_resolver.h"
@@ -29,7 +29,7 @@
namespace x86 {
// Use a local definition to prevent copying mistakes.
-static constexpr size_t kX86WordSize = kX86PointerSize;
+static constexpr size_t kX86WordSize = static_cast<size_t>(kX86PointerSize);
class CodeGeneratorX86;
@@ -336,6 +336,12 @@
uint32_t dex_pc,
SlowPathCode* slow_path);
+ // Generate code to invoke a runtime entry point, but do not record
+ // PC-related information in a stack map.
+ void InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
+ HInstruction* instruction,
+ SlowPathCode* slow_path);
+
size_t GetWordSize() const OVERRIDE {
return kX86WordSize;
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index b6ba30e..eadb431 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -53,7 +53,7 @@
// NOLINT on __ macro to suppress wrong warning/fix from clang-tidy.
#define __ down_cast<X86_64Assembler*>(codegen->GetAssembler())-> // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, x).Int32Value()
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize, x).Int32Value()
class NullCheckSlowPathX86_64 : public SlowPathCode {
public:
@@ -149,13 +149,11 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
- SaveLiveRegisters(codegen, instruction_->GetLocations());
x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
instruction_,
instruction_->GetDexPc(),
this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
- RestoreLiveRegisters(codegen, instruction_->GetLocations());
if (successor_ == nullptr) {
__ jmp(GetReturnLabel());
} else {
@@ -487,8 +485,7 @@
instruction_->IsLoadString() ||
instruction_->IsInstanceOf() ||
instruction_->IsCheckCast() ||
- ((instruction_->IsInvokeStaticOrDirect() || instruction_->IsInvokeVirtual()) &&
- instruction_->GetLocations()->Intrinsified()))
+ (instruction_->IsInvokeVirtual()) && instruction_->GetLocations()->Intrinsified())
<< "Unexpected instruction in read barrier marking slow path: "
<< instruction_->DebugName();
@@ -496,7 +493,6 @@
// No need to save live registers; it's taken care of by the
// entrypoint. Also, there is no need to update the stack mask,
// as this runtime call will not trigger a garbage collection.
- InvokeRuntimeCallingConvention calling_convention;
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
DCHECK_NE(reg, RSP);
DCHECK(0 <= reg && reg < kNumberOfCpuRegisters) << reg;
@@ -515,12 +511,9 @@
// rX <- ReadBarrierMarkRegX(rX)
//
int32_t entry_point_offset =
- CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86_64WordSize>(reg);
- // TODO: Do not emit a stack map for this runtime call.
- x86_64_codegen->InvokeRuntime(entry_point_offset,
- instruction_,
- instruction_->GetDexPc(),
- this);
+ CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kX86_64PointerSize>(reg);
+ // This runtime call does not require a stack map.
+ x86_64_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
__ jmp(GetExitLabel());
}
@@ -570,8 +563,7 @@
instruction_->IsArrayGet() ||
instruction_->IsInstanceOf() ||
instruction_->IsCheckCast() ||
- ((instruction_->IsInvokeStaticOrDirect() || instruction_->IsInvokeVirtual()) &&
- instruction_->GetLocations()->Intrinsified()))
+ (instruction_->IsInvokeVirtual()) && instruction_->GetLocations()->Intrinsified())
<< "Unexpected instruction in read barrier for heap reference slow path: "
<< instruction_->DebugName();
@@ -889,7 +881,7 @@
// (callee_method + offset_of_quick_compiled_code)()
__ call(Address(callee_method.AsRegister<CpuRegister>(),
ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kX86_64WordSize).SizeValue()));
+ kX86_64PointerSize).SizeValue()));
break;
}
@@ -924,7 +916,7 @@
__ movq(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
__ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kX86_64WordSize).SizeValue()));
+ kX86_64PointerSize).SizeValue()));
}
void CodeGeneratorX86_64::RecordSimplePatch() {
@@ -1037,7 +1029,7 @@
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- InvokeRuntime(GetThreadOffset<kX86_64WordSize>(entrypoint).Int32Value(),
+ InvokeRuntime(GetThreadOffset<kX86_64PointerSize>(entrypoint).Int32Value(),
instruction,
dex_pc,
slow_path);
@@ -1052,6 +1044,13 @@
RecordPcInfo(instruction, dex_pc, slow_path);
}
+void CodeGeneratorX86_64::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
+ HInstruction* instruction,
+ SlowPathCode* slow_path) {
+ ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction, slow_path);
+ __ gs()->call(Address::Absolute(entry_point_offset, /* no_rip */ true));
+}
+
static constexpr int kNumberOfCpuRegisterPairs = 0;
// Use a fake return address register to mimic Quick.
static constexpr Register kFakeReturnRegister = Register(kLastCpuRegister + 1);
@@ -2322,8 +2321,8 @@
// temp = temp->GetImtEntryAt(method_offset);
__ movq(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
- __ call(Address(temp,
- ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86_64WordSize).SizeValue()));
+ __ call(Address(
+ temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86_64PointerSize).SizeValue()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -3961,7 +3960,7 @@
if (instruction->IsStringAlloc()) {
// String is allocated through StringFactory. Call NewEmptyString entry point.
CpuRegister temp = instruction->GetLocations()->GetTemp(0).AsRegister<CpuRegister>();
- MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86_64WordSize);
+ MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86_64PointerSize);
__ gs()->movq(temp, Address::Absolute(QUICK_ENTRY_POINT(pNewEmptyString), /* no_rip */ true));
__ call(Address(temp, code_offset.SizeValue()));
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
@@ -5117,7 +5116,7 @@
__ testl(value, value);
__ j(kEqual, &is_null);
}
- __ gs()->movq(card, Address::Absolute(Thread::CardTableOffset<kX86_64WordSize>().Int32Value(),
+ __ gs()->movq(card, Address::Absolute(Thread::CardTableOffset<kX86_64PointerSize>().Int32Value(),
/* no_rip */ true));
__ movq(temp, object);
__ shrq(temp, Immediate(gc::accounting::CardTable::kCardShift));
@@ -5169,7 +5168,7 @@
DCHECK_EQ(slow_path->GetSuccessor(), successor);
}
- __ gs()->cmpw(Address::Absolute(Thread::ThreadFlagsOffset<kX86_64WordSize>().Int32Value(),
+ __ gs()->cmpw(Address::Absolute(Thread::ThreadFlagsOffset<kX86_64PointerSize>().Int32Value(),
/* no_rip */ true),
Immediate(0));
if (successor == nullptr) {
@@ -5686,7 +5685,7 @@
}
static Address GetExceptionTlsAddress() {
- return Address::Absolute(Thread::ExceptionOffset<kX86_64WordSize>().Int32Value(),
+ return Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>().Int32Value(),
/* no_rip */ true);
}
@@ -6444,7 +6443,7 @@
new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86_64(instruction, root);
codegen_->AddSlowPath(slow_path);
- __ gs()->cmpl(Address::Absolute(Thread::IsGcMarkingOffset<kX86_64WordSize>().Int32Value(),
+ __ gs()->cmpl(Address::Absolute(Thread::IsGcMarkingOffset<kX86_64PointerSize>().Int32Value(),
/* no_rip */ true),
Immediate(0));
__ j(kNotEqual, slow_path->GetEntryLabel());
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index d7cfd37..4e0e34c 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -19,7 +19,6 @@
#include "arch/x86_64/instruction_set_features_x86_64.h"
#include "code_generator.h"
-#include "dex/compiler_enums.h"
#include "driver/compiler_options.h"
#include "nodes.h"
#include "parallel_move_resolver.h"
@@ -29,7 +28,7 @@
namespace x86_64 {
// Use a local definition to prevent copying mistakes.
-static constexpr size_t kX86_64WordSize = kX86_64PointerSize;
+static constexpr size_t kX86_64WordSize = static_cast<size_t>(kX86_64PointerSize);
// Some x86_64 instructions require a register to be available as temp.
static constexpr Register TMP = R11;
@@ -318,6 +317,12 @@
uint32_t dex_pc,
SlowPathCode* slow_path);
+ // Generate code to invoke a runtime entry point, but do not record
+ // PC-related information in a stack map.
+ void InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
+ HInstruction* instruction,
+ SlowPathCode* slow_path);
+
size_t GetWordSize() const OVERRIDE {
return kX86_64WordSize;
}
diff --git a/compiler/optimizing/dex_cache_array_fixups_mips.cc b/compiler/optimizing/dex_cache_array_fixups_mips.cc
index 0f42d9c..19bab08 100644
--- a/compiler/optimizing/dex_cache_array_fixups_mips.cc
+++ b/compiler/optimizing/dex_cache_array_fixups_mips.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "code_generator_mips.h"
#include "dex_cache_array_fixups_mips.h"
#include "base/arena_containers.h"
@@ -27,8 +28,9 @@
*/
class DexCacheArrayFixupsVisitor : public HGraphVisitor {
public:
- explicit DexCacheArrayFixupsVisitor(HGraph* graph)
+ explicit DexCacheArrayFixupsVisitor(HGraph* graph, CodeGenerator* codegen)
: HGraphVisitor(graph),
+ codegen_(down_cast<CodeGeneratorMIPS*>(codegen)),
dex_cache_array_bases_(std::less<const DexFile*>(),
// Attribute memory use to code generator.
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {}
@@ -41,9 +43,45 @@
HMipsDexCacheArraysBase* base = entry.second;
base->MoveBeforeFirstUserAndOutOfLoops();
}
+ // Computing the dex cache base for PC-relative accesses will clobber RA with
+ // the NAL instruction on R2. Take a note of this before generating the method
+ // entry.
+ if (!dex_cache_array_bases_.empty() && !codegen_->GetInstructionSetFeatures().IsR6()) {
+ codegen_->ClobberRA();
+ }
}
private:
+ void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
+ // If this is a load with PC-relative access to the dex cache types array,
+ // we need to add the dex cache arrays base as the special input.
+ if (load_class->GetLoadKind() == HLoadClass::LoadKind::kDexCachePcRelative) {
+ // Initialize base for target dex file if needed.
+ const DexFile& dex_file = load_class->GetDexFile();
+ HMipsDexCacheArraysBase* base = GetOrCreateDexCacheArrayBase(dex_file);
+ // Update the element offset in base.
+ DexCacheArraysLayout layout(kMipsPointerSize, &dex_file);
+ base->UpdateElementOffset(layout.TypeOffset(load_class->GetTypeIndex()));
+ // Add the special argument base to the load.
+ load_class->AddSpecialInput(base);
+ }
+ }
+
+ void VisitLoadString(HLoadString* load_string) OVERRIDE {
+ // If this is a load with PC-relative access to the dex cache strings array,
+ // we need to add the dex cache arrays base as the special input.
+ if (load_string->GetLoadKind() == HLoadString::LoadKind::kDexCachePcRelative) {
+ // Initialize base for target dex file if needed.
+ const DexFile& dex_file = load_string->GetDexFile();
+ HMipsDexCacheArraysBase* base = GetOrCreateDexCacheArrayBase(dex_file);
+ // Update the element offset in base.
+ DexCacheArraysLayout layout(kMipsPointerSize, &dex_file);
+ base->UpdateElementOffset(layout.StringOffset(load_string->GetStringIndex()));
+ // Add the special argument base to the load.
+ load_string->AddSpecialInput(base);
+ }
+ }
+
void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
// If this is an invoke with PC-relative access to the dex cache methods array,
// we need to add the dex cache arrays base as the special input.
@@ -74,6 +112,8 @@
});
}
+ CodeGeneratorMIPS* codegen_;
+
using DexCacheArraysBaseMap =
ArenaSafeMap<const DexFile*, HMipsDexCacheArraysBase*, std::less<const DexFile*>>;
DexCacheArraysBaseMap dex_cache_array_bases_;
@@ -85,7 +125,7 @@
// that can be live-in at the irreducible loop header.
return;
}
- DexCacheArrayFixupsVisitor visitor(graph_);
+ DexCacheArrayFixupsVisitor visitor(graph_, codegen_);
visitor.VisitInsertionOrder();
visitor.MoveBasesIfNeeded();
}
diff --git a/compiler/optimizing/dex_cache_array_fixups_mips.h b/compiler/optimizing/dex_cache_array_fixups_mips.h
index c8def28..21056e1 100644
--- a/compiler/optimizing/dex_cache_array_fixups_mips.h
+++ b/compiler/optimizing/dex_cache_array_fixups_mips.h
@@ -21,14 +21,21 @@
#include "optimization.h"
namespace art {
+
+class CodeGenerator;
+
namespace mips {
class DexCacheArrayFixups : public HOptimization {
public:
- DexCacheArrayFixups(HGraph* graph, OptimizingCompilerStats* stats)
- : HOptimization(graph, "dex_cache_array_fixups_mips", stats) {}
+ DexCacheArrayFixups(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
+ : HOptimization(graph, "dex_cache_array_fixups_mips", stats),
+ codegen_(codegen) {}
void Run() OVERRIDE;
+
+ private:
+ CodeGenerator* codegen_;
};
} // namespace mips
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 0b4c569..89d80cc 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -298,6 +298,12 @@
stream << constant->AsIntConstant()->GetValue();
} else if (constant->IsLongConstant()) {
stream << constant->AsLongConstant()->GetValue();
+ } else if (constant->IsFloatConstant()) {
+ stream << constant->AsFloatConstant()->GetValue();
+ } else if (constant->IsDoubleConstant()) {
+ stream << constant->AsDoubleConstant()->GetValue();
+ } else if (constant->IsNullConstant()) {
+ stream << "null";
}
} else if (location.IsInvalid()) {
stream << "invalid";
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index a592162..451aa38 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -17,6 +17,7 @@
#include "inliner.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "builder.h"
#include "class_linker.h"
#include "constant_folding.h"
@@ -151,7 +152,7 @@
}
ClassLinker* cl = Runtime::Current()->GetClassLinker();
- size_t pointer_size = cl->GetImagePointerSize();
+ PointerSize pointer_size = cl->GetImagePointerSize();
if (invoke->IsInvokeInterface()) {
resolved_method = info.GetTypeHandle()->FindVirtualMethodForInterface(
resolved_method, pointer_size);
@@ -243,7 +244,7 @@
~ScopedProfilingInfoInlineUse() {
if (profiling_info_ != nullptr) {
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
DCHECK_EQ(profiling_info_, method_->GetProfilingInfo(pointer_size));
Runtime::Current()->GetJit()->GetCodeCache()->DoneCompilerUse(method_, self_);
}
@@ -390,7 +391,7 @@
}
ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
- size_t pointer_size = class_linker->GetImagePointerSize();
+ PointerSize pointer_size = class_linker->GetImagePointerSize();
if (invoke_instruction->IsInvokeInterface()) {
resolved_method = ic.GetMonomorphicType()->FindVirtualMethodForInterface(
resolved_method, pointer_size);
@@ -482,7 +483,7 @@
}
ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
- size_t pointer_size = class_linker->GetImagePointerSize();
+ PointerSize pointer_size = class_linker->GetImagePointerSize();
const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
bool all_targets_inlined = true;
@@ -644,7 +645,7 @@
return false;
}
ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
- size_t pointer_size = class_linker->GetImagePointerSize();
+ PointerSize pointer_size = class_linker->GetImagePointerSize();
DCHECK(resolved_method != nullptr);
ArtMethod* actual_method = nullptr;
@@ -1004,7 +1005,7 @@
invoke_instruction->GetBlock()->InsertInstructionBefore(iput, invoke_instruction);
// Check whether the field is final. If it is, we need to add a barrier.
- size_t pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
+ PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size);
DCHECK(resolved_field != nullptr);
if (resolved_field->IsFinal()) {
@@ -1030,7 +1031,7 @@
uint32_t field_index,
HInstruction* obj)
SHARED_REQUIRES(Locks::mutator_lock_) {
- size_t pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
+ PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size);
DCHECK(resolved_field != nullptr);
HInstanceFieldGet* iget = new (graph_->GetArena()) HInstanceFieldGet(
@@ -1058,7 +1059,7 @@
HInstruction* obj,
HInstruction* value)
SHARED_REQUIRES(Locks::mutator_lock_) {
- size_t pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
+ PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size);
DCHECK(resolved_field != nullptr);
HInstanceFieldSet* iput = new (graph_->GetArena()) HInstanceFieldSet(
@@ -1087,8 +1088,11 @@
uint32_t method_index = resolved_method->GetDexMethodIndex();
ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
Handle<mirror::DexCache> dex_cache(handles_->NewHandle(resolved_method->GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(handles_->NewHandle(
+ resolved_method->GetDeclaringClass()->GetClassLoader()));
+
DexCompilationUnit dex_compilation_unit(
- caller_compilation_unit_.GetClassLoader(),
+ class_loader.ToJObject(),
class_linker,
callee_dex_file,
code_item,
@@ -1397,7 +1401,7 @@
}
}
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
// Iterate over the list of parameter types and test whether any of the
// actual inputs has a more specific reference type than the type declared in
@@ -1454,7 +1458,7 @@
// TODO: we could be more precise by merging the phi inputs but that requires
// some functionality from the reference type propagation.
DCHECK(return_replacement->IsPhi());
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
mirror::Class* cls = resolved_method->GetReturnType(false /* resolve */, pointer_size);
return_replacement->SetReferenceTypeInfo(GetClassRTI(cls));
}
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index afac5f9..e5dab56 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -19,6 +19,7 @@
#include "art_method-inl.h"
#include "bytecode_utils.h"
#include "class_linker.h"
+#include "dex_instruction-inl.h"
#include "driver/compiler_options.h"
#include "scoped_thread_state_change.h"
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index 9cfc065..517cf76 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -30,6 +30,8 @@
namespace art {
+class Instruction;
+
class HInstructionBuilder : public ValueObject {
public:
HInstructionBuilder(HGraph* graph,
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 789f389..27d9d48 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -1201,7 +1201,7 @@
}
__ LoadFromOffset(kLoadWord, LR, TR,
- QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pIndexOf).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pIndexOf).Int32Value());
CheckEntrypointTypes<kQuickIndexOf, int32_t, void*, uint32_t, uint32_t>();
__ blx(LR);
@@ -1270,8 +1270,10 @@
codegen_->AddSlowPath(slow_path);
__ b(slow_path->GetEntryLabel(), EQ);
- __ LoadFromOffset(
- kLoadWord, LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocStringFromBytes).Int32Value());
+ __ LoadFromOffset(kLoadWord,
+ LR,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pAllocStringFromBytes).Int32Value());
CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
__ blx(LR);
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1298,8 +1300,10 @@
// java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
//
// all include a null check on `data` before calling that method.
- __ LoadFromOffset(
- kLoadWord, LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocStringFromChars).Int32Value());
+ __ LoadFromOffset(kLoadWord,
+ LR,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pAllocStringFromChars).Int32Value());
CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
__ blx(LR);
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1325,7 +1329,7 @@
__ b(slow_path->GetEntryLabel(), EQ);
__ LoadFromOffset(kLoadWord,
- LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocStringFromString).Int32Value());
+ LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pAllocStringFromString).Int32Value());
CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
__ blx(LR);
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1718,7 +1722,7 @@
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(0)));
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(1)));
- __ LoadFromOffset(kLoadWord, LR, TR, GetThreadOffset<kArmWordSize>(entry).Int32Value());
+ __ LoadFromOffset(kLoadWord, LR, TR, GetThreadOffset<kArmPointerSize>(entry).Int32Value());
// Native code uses the soft float ABI.
__ vmovrrd(calling_convention.GetRegisterAt(0),
calling_convention.GetRegisterAt(1),
@@ -1744,7 +1748,7 @@
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(2)));
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(3)));
- __ LoadFromOffset(kLoadWord, LR, TR, GetThreadOffset<kArmWordSize>(entry).Int32Value());
+ __ LoadFromOffset(kLoadWord, LR, TR, GetThreadOffset<kArmPointerSize>(entry).Int32Value());
// Native code uses the soft float ABI.
__ vmovrrd(calling_convention.GetRegisterAt(0),
calling_convention.GetRegisterAt(1),
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index c9309fd..e7c40e6 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -26,7 +26,6 @@
#include "mirror/string.h"
#include "thread.h"
#include "utils/arm64/assembler_arm64.h"
-#include "utils/arm64/constants_arm64.h"
using namespace vixl::aarch64; // NOLINT(build/namespaces)
@@ -62,14 +61,14 @@
} // namespace
MacroAssembler* IntrinsicCodeGeneratorARM64::GetVIXLAssembler() {
- return codegen_->GetAssembler()->vixl_masm_;
+ return codegen_->GetVIXLAssembler();
}
ArenaAllocator* IntrinsicCodeGeneratorARM64::GetAllocator() {
return codegen_->GetGraph()->GetArena();
}
-#define __ codegen->GetAssembler()->vixl_masm_->
+#define __ codegen->GetVIXLAssembler()->
static void MoveFromReturnRegister(Location trg,
Primitive::Type type,
@@ -771,7 +770,7 @@
void IntrinsicCodeGeneratorARM64::VisitThreadCurrentThread(HInvoke* invoke) {
codegen_->Load(Primitive::kPrimNot, WRegisterFrom(invoke->GetLocations()->Out()),
- MemOperand(tr, Thread::PeerOffset<8>().Int32Value()));
+ MemOperand(tr, Thread::PeerOffset<kArm64PointerSize>().Int32Value()));
}
static void GenUnsafeGet(HInvoke* invoke,
@@ -782,7 +781,7 @@
DCHECK((type == Primitive::kPrimInt) ||
(type == Primitive::kPrimLong) ||
(type == Primitive::kPrimNot));
- MacroAssembler* masm = codegen->GetAssembler()->vixl_masm_;
+ MacroAssembler* masm = codegen->GetVIXLAssembler();
Location base_loc = locations->InAt(1);
Register base = WRegisterFrom(base_loc); // Object pointer.
Location offset_loc = locations->InAt(2);
@@ -916,7 +915,7 @@
bool is_volatile,
bool is_ordered,
CodeGeneratorARM64* codegen) {
- MacroAssembler* masm = codegen->GetAssembler()->vixl_masm_;
+ MacroAssembler* masm = codegen->GetVIXLAssembler();
Register base = WRegisterFrom(locations->InAt(1)); // Object pointer.
Register offset = XRegisterFrom(locations->InAt(2)); // Long offset.
@@ -1035,7 +1034,7 @@
}
static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGeneratorARM64* codegen) {
- MacroAssembler* masm = codegen->GetAssembler()->vixl_masm_;
+ MacroAssembler* masm = codegen->GetVIXLAssembler();
Register out = WRegisterFrom(locations->Out()); // Boolean result.
@@ -1398,7 +1397,7 @@
__ Mov(tmp_reg, 0);
}
- __ Ldr(lr, MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pIndexOf).Int32Value()));
+ __ Ldr(lr, MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pIndexOf).Int32Value()));
CheckEntrypointTypes<kQuickIndexOf, int32_t, void*, uint32_t, uint32_t>();
__ Blr(lr);
@@ -1468,7 +1467,8 @@
__ B(eq, slow_path->GetEntryLabel());
__ Ldr(lr,
- MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocStringFromBytes).Int32Value()));
+ MemOperand(tr,
+ QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pAllocStringFromBytes).Int32Value()));
CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
__ Blr(lr);
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1496,7 +1496,8 @@
//
// all include a null check on `data` before calling that method.
__ Ldr(lr,
- MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocStringFromChars).Int32Value()));
+ MemOperand(tr,
+ QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pAllocStringFromChars).Int32Value()));
CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
__ Blr(lr);
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1522,7 +1523,8 @@
__ B(eq, slow_path->GetEntryLabel());
__ Ldr(lr,
- MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocStringFromString).Int32Value()));
+ MemOperand(tr,
+ QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pAllocStringFromString).Int32Value()));
CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
__ Blr(lr);
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1563,7 +1565,8 @@
MacroAssembler* masm,
CodeGeneratorARM64* codegen,
QuickEntrypointEnum entry) {
- __ Ldr(lr, MemOperand(tr, GetThreadOffset<kArm64WordSize>(entry).Int32Value()));
+ __ Ldr(lr, MemOperand(tr,
+ GetThreadOffset<kArm64PointerSize>(entry).Int32Value()));
__ Blr(lr);
codegen->RecordPcInfo(invoke, invoke->GetDexPc());
}
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index fbcfdc4..55e1ab2 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -1899,8 +1899,7 @@
__ LoadFromOffset(kLoadWord,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMipsWordSize,
- pStringCompareTo).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, pStringCompareTo).Int32Value());
__ Jalr(T9);
__ Nop();
__ Bind(slow_path->GetExitLabel());
@@ -2059,7 +2058,7 @@
__ LoadFromOffset(kLoadWord,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, pIndexOf).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, pIndexOf).Int32Value());
__ Jalr(T9);
__ Nop();
@@ -2145,7 +2144,7 @@
__ LoadFromOffset(kLoadWord,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, pAllocStringFromBytes).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, pAllocStringFromBytes).Int32Value());
__ Jalr(T9);
__ Nop();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -2178,7 +2177,7 @@
__ LoadFromOffset(kLoadWord,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, pAllocStringFromChars).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, pAllocStringFromChars).Int32Value());
__ Jalr(T9);
__ Nop();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -2207,7 +2206,7 @@
__ LoadFromOffset(kLoadWord,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, pAllocStringFromString).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, pAllocStringFromString).Int32Value());
__ Jalr(T9);
__ Nop();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index ba10d2d..1e18540 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1543,7 +1543,7 @@
__ LoadFromOffset(kLoadDoubleword,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, pStringCompareTo).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, pStringCompareTo).Int32Value());
__ Jalr(T9);
__ Nop();
__ Bind(slow_path->GetExitLabel());
@@ -1694,7 +1694,7 @@
__ LoadFromOffset(kLoadDoubleword,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, pIndexOf).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, pIndexOf).Int32Value());
CheckEntrypointTypes<kQuickIndexOf, int32_t, void*, uint32_t, uint32_t>();
__ Jalr(T9);
__ Nop();
@@ -1771,7 +1771,7 @@
__ LoadFromOffset(kLoadDoubleword,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize,
+ QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize,
pAllocStringFromBytes).Int32Value());
CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
__ Jalr(T9);
@@ -1805,7 +1805,7 @@
__ LoadFromOffset(kLoadDoubleword,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize,
+ QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize,
pAllocStringFromChars).Int32Value());
CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
__ Jalr(T9);
@@ -1836,7 +1836,7 @@
__ LoadFromOffset(kLoadDoubleword,
T9,
TR,
- QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize,
+ QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize,
pAllocStringFromString).Int32Value());
CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
__ Jalr(T9);
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 9e5bf58..ea6e458 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -752,20 +752,20 @@
GenSSE41FPToFPIntrinsic(codegen_, invoke, GetAssembler(), 0);
}
-// Note that 32 bit x86 doesn't have the capability to inline MathRoundDouble,
-// as it needs 64 bit instructions.
void IntrinsicLocationsBuilderX86::VisitMathRoundFloat(HInvoke* invoke) {
- // See intrinsics.h.
- if (!kRoundIsPlusPointFive) {
- return;
- }
-
// Do we have instruction support?
if (codegen_->GetInstructionSetFeatures().HasSSE4_1()) {
+ HInvokeStaticOrDirect* static_or_direct = invoke->AsInvokeStaticOrDirect();
+ DCHECK(static_or_direct != nullptr);
LocationSummary* locations = new (arena_) LocationSummary(invoke,
LocationSummary::kNoCall,
kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
+ if (static_or_direct->HasSpecialInput() &&
+ invoke->InputAt(
+ static_or_direct->GetSpecialInputIndex())->IsX86ComputeBaseMethodAddress()) {
+ locations->SetInAt(1, Location::RequiresRegister());
+ }
locations->SetOut(Location::RequiresRegister());
locations->AddTemp(Location::RequiresFpuRegister());
locations->AddTemp(Location::RequiresFpuRegister());
@@ -774,7 +774,7 @@
// We have to fall back to a call to the intrinsic.
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly);
+ LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(Location::RegisterLocation(EAX));
@@ -784,47 +784,42 @@
void IntrinsicCodeGeneratorX86::VisitMathRoundFloat(HInvoke* invoke) {
LocationSummary* locations = invoke->GetLocations();
- if (locations->WillCall()) {
+ if (locations->WillCall()) { // TODO: can we reach this?
InvokeOutOfLineIntrinsic(codegen_, invoke);
return;
}
- // Implement RoundFloat as t1 = floor(input + 0.5f); convert to int.
XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>();
+ Register constant_area = locations->InAt(1).AsRegister<Register>();
+ XmmRegister t1 = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+ XmmRegister t2 = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
Register out = locations->Out().AsRegister<Register>();
- XmmRegister maxInt = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- XmmRegister inPlusPointFive = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
- NearLabel done, nan;
+ NearLabel skip_incr, done;
X86Assembler* assembler = GetAssembler();
- // Generate 0.5 into inPlusPointFive.
- __ movl(out, Immediate(bit_cast<int32_t, float>(0.5f)));
- __ movd(inPlusPointFive, out);
+ // Since no direct x86 rounding instruction matches the required semantics,
+ // this intrinsic is implemented as follows:
+ // result = floor(in);
+ // if (in - result >= 0.5f)
+ // result = result + 1.0f;
+ __ movss(t2, in);
+ __ roundss(t1, in, Immediate(1));
+ __ subss(t2, t1);
+ __ comiss(t2, codegen_->LiteralInt32Address(bit_cast<int32_t, float>(0.5f), constant_area));
+ __ j(kBelow, &skip_incr);
+ __ addss(t1, codegen_->LiteralInt32Address(bit_cast<int32_t, float>(1.0f), constant_area));
+ __ Bind(&skip_incr);
- // Add in the input.
- __ addss(inPlusPointFive, in);
-
- // And truncate to an integer.
- __ roundss(inPlusPointFive, inPlusPointFive, Immediate(1));
-
+ // Final conversion to an integer. Unfortunately this also does not have a
+ // direct x86 instruction, since NaN should map to 0 and large positive
+ // values need to be clipped to the extreme value.
__ movl(out, Immediate(kPrimIntMax));
- // maxInt = int-to-float(out)
- __ cvtsi2ss(maxInt, out);
-
- // if inPlusPointFive >= maxInt goto done
- __ comiss(inPlusPointFive, maxInt);
- __ j(kAboveEqual, &done);
-
- // if input == NaN goto nan
- __ j(kUnordered, &nan);
-
- // output = float-to-int-truncate(input)
- __ cvttss2si(out, inPlusPointFive);
- __ jmp(&done);
- __ Bind(&nan);
-
- // output = 0
- __ xorl(out, out);
+ __ cvtsi2ss(t2, out);
+ __ comiss(t1, t2);
+ __ j(kAboveEqual, &done); // clipped to max (already in out), does not jump on unordered
+ __ movl(out, Immediate(0)); // does not change flags
+ __ j(kUnordered, &done); // NaN mapped to 0 (just moved in out)
+ __ cvttss2si(out, t1);
__ Bind(&done);
}
@@ -857,7 +852,7 @@
}
// Now do the actual call.
- __ fs()->call(Address::Absolute(GetThreadOffset<kX86WordSize>(entry)));
+ __ fs()->call(Address::Absolute(GetThreadOffset<kX86PointerSize>(entry)));
// Extract the return value from the FP stack.
__ fstpl(Address(ESP, 0));
@@ -1237,7 +1232,7 @@
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pStringCompareTo)));
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pStringCompareTo)));
__ Bind(slow_path->GetExitLabel());
}
@@ -1510,7 +1505,7 @@
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromBytes)));
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pAllocStringFromBytes)));
CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
__ Bind(slow_path->GetExitLabel());
@@ -1536,7 +1531,7 @@
// java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
//
// all include a null check on `data` before calling that method.
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromChars)));
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pAllocStringFromChars)));
CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
@@ -1560,7 +1555,8 @@
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromString)));
+ __ fs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pAllocStringFromString)));
CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
__ Bind(slow_path->GetExitLabel());
@@ -1801,7 +1797,7 @@
void IntrinsicCodeGeneratorX86::VisitThreadCurrentThread(HInvoke* invoke) {
Register out = invoke->GetLocations()->Out().AsRegister<Register>();
- GetAssembler()->fs()->movl(out, Address::Absolute(Thread::PeerOffset<kX86WordSize>()));
+ GetAssembler()->fs()->movl(out, Address::Absolute(Thread::PeerOffset<kX86PointerSize>()));
}
static void GenUnsafeGet(HInvoke* invoke,
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index ee954f1..ab8b05c 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -583,6 +583,7 @@
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
locations->AddTemp(Location::RequiresFpuRegister());
+ locations->AddTemp(Location::RequiresFpuRegister());
return;
}
@@ -597,10 +598,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathRoundFloat(HInvoke* invoke) {
- // See intrinsics.h.
- if (kRoundIsPlusPointFive) {
- CreateSSE41FPToIntLocations(arena_, invoke, codegen_);
- }
+ CreateSSE41FPToIntLocations(arena_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitMathRoundFloat(HInvoke* invoke) {
@@ -610,47 +608,41 @@
return;
}
- // Implement RoundFloat as t1 = floor(input + 0.5f); convert to int.
XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>();
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
- XmmRegister inPlusPointFive = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- NearLabel done, nan;
+ XmmRegister t1 = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+ XmmRegister t2 = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
+ NearLabel skip_incr, done;
X86_64Assembler* assembler = GetAssembler();
- // Load 0.5 into inPlusPointFive.
- __ movss(inPlusPointFive, codegen_->LiteralFloatAddress(0.5f));
+ // Since no direct x86 rounding instruction matches the required semantics,
+ // this intrinsic is implemented as follows:
+ // result = floor(in);
+ // if (in - result >= 0.5f)
+ // result = result + 1.0f;
+ __ movss(t2, in);
+ __ roundss(t1, in, Immediate(1));
+ __ subss(t2, t1);
+ __ comiss(t2, codegen_->LiteralFloatAddress(0.5f));
+ __ j(kBelow, &skip_incr);
+ __ addss(t1, codegen_->LiteralFloatAddress(1.0f));
+ __ Bind(&skip_incr);
- // Add in the input.
- __ addss(inPlusPointFive, in);
-
- // And truncate to an integer.
- __ roundss(inPlusPointFive, inPlusPointFive, Immediate(1));
-
- // Load maxInt into out.
- codegen_->Load64BitValue(out, kPrimIntMax);
-
- // if inPlusPointFive >= maxInt goto done
- __ comiss(inPlusPointFive, codegen_->LiteralFloatAddress(static_cast<float>(kPrimIntMax)));
- __ j(kAboveEqual, &done);
-
- // if input == NaN goto nan
- __ j(kUnordered, &nan);
-
- // output = float-to-int-truncate(input)
- __ cvttss2si(out, inPlusPointFive);
- __ jmp(&done);
- __ Bind(&nan);
-
- // output = 0
- __ xorl(out, out);
+ // Final conversion to an integer. Unfortunately this also does not have a
+ // direct x86 instruction, since NaN should map to 0 and large positive
+ // values need to be clipped to the extreme value.
+ codegen_->Load32BitValue(out, kPrimIntMax);
+ __ cvtsi2ss(t2, out);
+ __ comiss(t1, t2);
+ __ j(kAboveEqual, &done); // clipped to max (already in out), does not jump on unordered
+ __ movl(out, Immediate(0)); // does not change flags
+ __ j(kUnordered, &done); // NaN mapped to 0 (just moved in out)
+ __ cvttss2si(out, t1);
__ Bind(&done);
}
void IntrinsicLocationsBuilderX86_64::VisitMathRoundDouble(HInvoke* invoke) {
- // See intrinsics.h.
- if (kRoundIsPlusPointFive) {
- CreateSSE41FPToIntLocations(arena_, invoke, codegen_);
- }
+ CreateSSE41FPToIntLocations(arena_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitMathRoundDouble(HInvoke* invoke) {
@@ -660,39 +652,36 @@
return;
}
- // Implement RoundDouble as t1 = floor(input + 0.5); convert to long.
XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>();
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
- XmmRegister inPlusPointFive = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- NearLabel done, nan;
+ XmmRegister t1 = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+ XmmRegister t2 = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
+ NearLabel skip_incr, done;
X86_64Assembler* assembler = GetAssembler();
- // Load 0.5 into inPlusPointFive.
- __ movsd(inPlusPointFive, codegen_->LiteralDoubleAddress(0.5));
+ // Since no direct x86 rounding instruction matches the required semantics,
+ // this intrinsic is implemented as follows:
+ // result = floor(in);
+ // if (in - result >= 0.5)
+ // result = result + 1.0f;
+ __ movsd(t2, in);
+ __ roundsd(t1, in, Immediate(1));
+ __ subsd(t2, t1);
+ __ comisd(t2, codegen_->LiteralDoubleAddress(0.5));
+ __ j(kBelow, &skip_incr);
+ __ addsd(t1, codegen_->LiteralDoubleAddress(1.0f));
+ __ Bind(&skip_incr);
- // Add in the input.
- __ addsd(inPlusPointFive, in);
-
- // And truncate to an integer.
- __ roundsd(inPlusPointFive, inPlusPointFive, Immediate(1));
-
- // Load maxLong into out.
+ // Final conversion to an integer. Unfortunately this also does not have a
+ // direct x86 instruction, since NaN should map to 0 and large positive
+ // values need to be clipped to the extreme value.
codegen_->Load64BitValue(out, kPrimLongMax);
-
- // if inPlusPointFive >= maxLong goto done
- __ comisd(inPlusPointFive, codegen_->LiteralDoubleAddress(static_cast<double>(kPrimLongMax)));
- __ j(kAboveEqual, &done);
-
- // if input == NaN goto nan
- __ j(kUnordered, &nan);
-
- // output = double-to-long-truncate(input)
- __ cvttsd2si(out, inPlusPointFive, /* is64bit */ true);
- __ jmp(&done);
- __ Bind(&nan);
-
- // output = 0
- __ xorl(out, out);
+ __ cvtsi2sd(t2, out, /* is64bit */ true);
+ __ comisd(t1, t2);
+ __ j(kAboveEqual, &done); // clipped to max (already in out), does not jump on unordered
+ __ movl(out, Immediate(0)); // does not change flags, implicit zero extension to 64-bit
+ __ j(kUnordered, &done); // NaN mapped to 0 (just moved in out)
+ __ cvttsd2si(out, t1, /* is64bit */ true);
__ Bind(&done);
}
@@ -720,7 +709,7 @@
DCHECK(invoke->IsInvokeStaticOrDirect());
X86_64Assembler* assembler = codegen->GetAssembler();
- __ gs()->call(Address::Absolute(GetThreadOffset<kX86_64WordSize>(entry), true));
+ __ gs()->call(Address::Absolute(GetThreadOffset<kX86_64PointerSize>(entry), true));
codegen->RecordPcInfo(invoke, invoke->GetDexPc());
}
@@ -1324,7 +1313,7 @@
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
- __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pStringCompareTo),
+ __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize, pStringCompareTo),
/* no_rip */ true));
__ Bind(slow_path->GetExitLabel());
}
@@ -1597,7 +1586,8 @@
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
- __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocStringFromBytes),
+ __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize,
+ pAllocStringFromBytes),
/* no_rip */ true));
CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1624,7 +1614,8 @@
// java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
//
// all include a null check on `data` before calling that method.
- __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocStringFromChars),
+ __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize,
+ pAllocStringFromChars),
/* no_rip */ true));
CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1649,7 +1640,8 @@
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
- __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocStringFromString),
+ __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize,
+ pAllocStringFromString),
/* no_rip */ true));
CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1875,7 +1867,7 @@
void IntrinsicCodeGeneratorX86_64::VisitThreadCurrentThread(HInvoke* invoke) {
CpuRegister out = invoke->GetLocations()->Out().AsRegister<CpuRegister>();
- GetAssembler()->gs()->movl(out, Address::Absolute(Thread::PeerOffset<kX86_64WordSize>(),
+ GetAssembler()->gs()->movl(out, Address::Absolute(Thread::PeerOffset<kX86_64PointerSize>(),
/* no_rip */ true));
}
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index d557f42..2808e1b 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2632,4 +2632,23 @@
}
}
+std::ostream& operator<<(std::ostream& os, const MemBarrierKind& kind) {
+ switch (kind) {
+ case MemBarrierKind::kAnyStore:
+ return os << "AnyStore";
+ case MemBarrierKind::kLoadAny:
+ return os << "LoadAny";
+ case MemBarrierKind::kStoreStore:
+ return os << "StoreStore";
+ case MemBarrierKind::kAnyAny:
+ return os << "AnyAny";
+ case MemBarrierKind::kNTStoreStore:
+ return os << "NTStoreStore";
+
+ default:
+ LOG(FATAL) << "Unknown MemBarrierKind: " << static_cast<int>(kind);
+ UNREACHABLE();
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 23ac457..dfa8276 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -25,7 +25,6 @@
#include "base/arena_containers.h"
#include "base/arena_object.h"
#include "base/stl_util.h"
-#include "dex/compiler_enums.h"
#include "dex_file.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "handle.h"
@@ -5626,9 +5625,12 @@
// Note: defined outside class to see operator<<(., HLoadClass::LoadKind).
inline void HLoadClass::AddSpecialInput(HInstruction* special_input) {
- // The special input is used for PC-relative loads on some architectures.
+ // The special input is used for PC-relative loads on some architectures,
+ // including literal pool loads, which are PC-relative too.
DCHECK(GetLoadKind() == LoadKind::kBootImageLinkTimePcRelative ||
- GetLoadKind() == LoadKind::kDexCachePcRelative) << GetLoadKind();
+ GetLoadKind() == LoadKind::kDexCachePcRelative ||
+ GetLoadKind() == LoadKind::kBootImageLinkTimeAddress ||
+ GetLoadKind() == LoadKind::kBootImageAddress) << GetLoadKind();
DCHECK(special_input_.GetInstruction() == nullptr);
special_input_ = HUserRecord<HInstruction*>(special_input);
special_input->AddUseAt(this, 0);
@@ -5836,9 +5838,12 @@
// Note: defined outside class to see operator<<(., HLoadString::LoadKind).
inline void HLoadString::AddSpecialInput(HInstruction* special_input) {
- // The special input is used for PC-relative loads on some architectures.
+ // The special input is used for PC-relative loads on some architectures,
+ // including literal pool loads, which are PC-relative too.
DCHECK(GetLoadKind() == LoadKind::kBootImageLinkTimePcRelative ||
- GetLoadKind() == LoadKind::kDexCachePcRelative) << GetLoadKind();
+ GetLoadKind() == LoadKind::kDexCachePcRelative ||
+ GetLoadKind() == LoadKind::kBootImageLinkTimeAddress ||
+ GetLoadKind() == LoadKind::kBootImageAddress) << GetLoadKind();
// HLoadString::GetInputRecords() returns an empty array at this point,
// so use the GetInputRecords() from the base class to set the input record.
DCHECK(special_input_.GetInstruction() == nullptr);
@@ -6305,6 +6310,32 @@
DISALLOW_COPY_AND_ASSIGN(HCheckCast);
};
+/**
+ * @brief Memory barrier types (see "The JSR-133 Cookbook for Compiler Writers").
+ * @details We define the combined barrier types that are actually required
+ * by the Java Memory Model, rather than using exactly the terminology from
+ * the JSR-133 cookbook. These should, in many cases, be replaced by acquire/release
+ * primitives. Note that the JSR-133 cookbook generally does not deal with
+ * store atomicity issues, and the recipes there are not always entirely sufficient.
+ * The current recipe is as follows:
+ * -# Use AnyStore ~= (LoadStore | StoreStore) ~= release barrier before volatile store.
+ * -# Use AnyAny barrier after volatile store. (StoreLoad is as expensive.)
+ * -# Use LoadAny barrier ~= (LoadLoad | LoadStore) ~= acquire barrier after each volatile load.
+ * -# Use StoreStore barrier after all stores but before return from any constructor whose
+ * class has final fields.
+ * -# Use NTStoreStore to order non-temporal stores with respect to all later
+ * store-to-memory instructions. Only generated together with non-temporal stores.
+ */
+enum MemBarrierKind {
+ kAnyStore,
+ kLoadAny,
+ kStoreStore,
+ kAnyAny,
+ kNTStoreStore,
+ kLastBarrierKind = kNTStoreStore
+};
+std::ostream& operator<<(std::ostream& os, const MemBarrierKind& kind);
+
class HMemoryBarrier FINAL : public HTemplateInstruction<0> {
public:
explicit HMemoryBarrier(MemBarrierKind barrier_kind, uint32_t dex_pc = kNoDexPc)
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 0bca186..d5b0d77 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -305,6 +305,18 @@
OVERRIDE
SHARED_REQUIRES(Locks::mutator_lock_);
+ protected:
+ virtual void RunOptimizations(HGraph* graph,
+ CodeGenerator* codegen,
+ CompilerDriver* driver,
+ const DexCompilationUnit& dex_compilation_unit,
+ PassObserver* pass_observer,
+ StackHandleScopeCollection* handles) const;
+
+ virtual void RunOptimizations(HOptimization* optimizations[],
+ size_t length,
+ PassObserver* pass_observer) const;
+
private:
// Create a 'CompiledMethod' for an optimized graph.
CompiledMethod* Emit(ArenaAllocator* arena,
@@ -333,6 +345,18 @@
ArtMethod* method,
bool osr) const;
+ void MaybeRunInliner(HGraph* graph,
+ CodeGenerator* codegen,
+ CompilerDriver* driver,
+ const DexCompilationUnit& dex_compilation_unit,
+ PassObserver* pass_observer,
+ StackHandleScopeCollection* handles) const;
+
+ void RunArchOptimizations(InstructionSet instruction_set,
+ HGraph* graph,
+ CodeGenerator* codegen,
+ PassObserver* pass_observer) const;
+
std::unique_ptr<OptimizingCompilerStats> compilation_stats_;
std::unique_ptr<std::ostream> visualizer_output_;
@@ -396,22 +420,22 @@
|| instruction_set == kX86_64;
}
-static void RunOptimizations(HOptimization* optimizations[],
- size_t length,
- PassObserver* pass_observer) {
+void OptimizingCompiler::RunOptimizations(HOptimization* optimizations[],
+ size_t length,
+ PassObserver* pass_observer) const {
for (size_t i = 0; i < length; ++i) {
PassScope scope(optimizations[i]->GetPassName(), pass_observer);
optimizations[i]->Run();
}
}
-static void MaybeRunInliner(HGraph* graph,
- CodeGenerator* codegen,
- CompilerDriver* driver,
- OptimizingCompilerStats* stats,
- const DexCompilationUnit& dex_compilation_unit,
- PassObserver* pass_observer,
- StackHandleScopeCollection* handles) {
+void OptimizingCompiler::MaybeRunInliner(HGraph* graph,
+ CodeGenerator* codegen,
+ CompilerDriver* driver,
+ const DexCompilationUnit& dex_compilation_unit,
+ PassObserver* pass_observer,
+ StackHandleScopeCollection* handles) const {
+ OptimizingCompilerStats* stats = compilation_stats_.get();
const CompilerOptions& compiler_options = driver->GetCompilerOptions();
bool should_inline = (compiler_options.GetInlineDepthLimit() > 0)
&& (compiler_options.GetInlineMaxCodeUnits() > 0);
@@ -435,11 +459,11 @@
RunOptimizations(optimizations, arraysize(optimizations), pass_observer);
}
-static void RunArchOptimizations(InstructionSet instruction_set,
- HGraph* graph,
- CodeGenerator* codegen,
- OptimizingCompilerStats* stats,
- PassObserver* pass_observer) {
+void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
+ HGraph* graph,
+ CodeGenerator* codegen,
+ PassObserver* pass_observer) const {
+ OptimizingCompilerStats* stats = compilation_stats_.get();
ArenaAllocator* arena = graph->GetArena();
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm
@@ -480,7 +504,7 @@
mips::PcRelativeFixups* pc_relative_fixups =
new (arena) mips::PcRelativeFixups(graph, codegen, stats);
mips::DexCacheArrayFixups* dex_cache_array_fixups =
- new (arena) mips::DexCacheArrayFixups(graph, stats);
+ new (arena) mips::DexCacheArrayFixups(graph, codegen, stats);
HOptimization* mips_optimizations[] = {
pc_relative_fixups,
dex_cache_array_fixups
@@ -539,13 +563,13 @@
}
}
-static void RunOptimizations(HGraph* graph,
- CodeGenerator* codegen,
- CompilerDriver* driver,
- OptimizingCompilerStats* stats,
- const DexCompilationUnit& dex_compilation_unit,
- PassObserver* pass_observer,
- StackHandleScopeCollection* handles) {
+void OptimizingCompiler::RunOptimizations(HGraph* graph,
+ CodeGenerator* codegen,
+ CompilerDriver* driver,
+ const DexCompilationUnit& dex_compilation_unit,
+ PassObserver* pass_observer,
+ StackHandleScopeCollection* handles) const {
+ OptimizingCompilerStats* stats = compilation_stats_.get();
ArenaAllocator* arena = graph->GetArena();
HDeadCodeElimination* dce1 = new (arena) HDeadCodeElimination(
graph, stats, HDeadCodeElimination::kInitialDeadCodeEliminationPassName);
@@ -578,7 +602,7 @@
};
RunOptimizations(optimizations1, arraysize(optimizations1), pass_observer);
- MaybeRunInliner(graph, codegen, driver, stats, dex_compilation_unit, pass_observer, handles);
+ MaybeRunInliner(graph, codegen, driver, dex_compilation_unit, pass_observer, handles);
HOptimization* optimizations2[] = {
// SelectGenerator depends on the InstructionSimplifier removing
@@ -601,7 +625,7 @@
};
RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer);
- RunArchOptimizations(driver->GetInstructionSet(), graph, codegen, stats, pass_observer);
+ RunArchOptimizations(driver->GetInstructionSet(), graph, codegen, pass_observer);
AllocateRegisters(graph, codegen, pass_observer);
}
@@ -813,7 +837,6 @@
RunOptimizations(graph,
codegen.get(),
compiler_driver,
- compilation_stats_.get(),
dex_compilation_unit,
&pass_observer,
&handles);
diff --git a/compiler/optimizing/pc_relative_fixups_mips.cc b/compiler/optimizing/pc_relative_fixups_mips.cc
index ba405cd..c6acc45 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.cc
+++ b/compiler/optimizing/pc_relative_fixups_mips.cc
@@ -37,6 +37,10 @@
// entry block) and relieve some pressure on the register allocator
// while avoiding recalculation of the base in a loop.
base_->MoveBeforeFirstUserAndOutOfLoops();
+ // Computing the base for PC-relative literals will clobber RA with
+ // the NAL instruction on R2. Take a note of this before generating
+ // the method entry.
+ codegen_->ClobberRA();
}
}
@@ -58,6 +62,36 @@
DCHECK(base_ != nullptr);
}
+ void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
+ HLoadClass::LoadKind load_kind = load_class->GetLoadKind();
+ switch (load_kind) {
+ case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
+ case HLoadClass::LoadKind::kBootImageAddress:
+ case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
+ // Add a base register for PC-relative literals on R2.
+ InitializePCRelativeBasePointer();
+ load_class->AddSpecialInput(base_);
+ break;
+ default:
+ break;
+ }
+ }
+
+ void VisitLoadString(HLoadString* load_string) OVERRIDE {
+ HLoadString::LoadKind load_kind = load_string->GetLoadKind();
+ switch (load_kind) {
+ case HLoadString::LoadKind::kBootImageLinkTimeAddress:
+ case HLoadString::LoadKind::kBootImageAddress:
+ case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
+ // Add a base register for PC-relative literals on R2.
+ InitializePCRelativeBasePointer();
+ load_string->AddSpecialInput(base_);
+ break;
+ default:
+ break;
+ }
+ }
+
void HandleInvoke(HInvoke* invoke) {
// If this is an invoke-static/-direct with PC-relative dex cache array
// addressing, we need the PC-relative address base.
@@ -77,7 +111,7 @@
// method pointer from the invoke.
if (invoke_static_or_direct->HasCurrentMethodInput()) {
DCHECK(!invoke_static_or_direct->HasPcRelativeDexCache());
- CHECK(!has_extra_input); // TODO: review this.
+ CHECK(!has_extra_input);
return;
}
@@ -116,7 +150,6 @@
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen_);
if (mips_codegen->GetInstructionSetFeatures().IsR6()) {
// Do nothing for R6 because it has PC-relative addressing.
- // TODO: review. Move this check into RunArchOptimizations()?
return;
}
if (graph_->HasIrreducibleLoops()) {
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index 921f3df..ad0921d 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -227,6 +227,7 @@
case Intrinsics::kMathMaxFloatFloat:
case Intrinsics::kMathMinDoubleDouble:
case Intrinsics::kMathMinFloatFloat:
+ case Intrinsics::kMathRoundFloat:
if (!base_added) {
DCHECK(invoke_static_or_direct != nullptr);
DCHECK(!invoke_static_or_direct->HasCurrentMethodInput());
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 965d5ee..e96ab19 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -16,6 +16,7 @@
#include "reference_type_propagation.h"
+#include "base/enums.h"
#include "class_linker-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
@@ -775,7 +776,7 @@
ClassLinker* cl = Runtime::Current()->GetClassLinker();
mirror::DexCache* dex_cache =
FindDexCacheWithHint(soa.Self(), instr->GetDexFile(), hint_dex_cache_);
- size_t pointer_size = cl->GetImagePointerSize();
+ PointerSize pointer_size = cl->GetImagePointerSize();
ArtMethod* method = dex_cache->GetResolvedMethod(instr->GetDexMethodIndex(), pointer_size);
mirror::Class* klass = (method == nullptr) ? nullptr : method->GetReturnType(false, pointer_size);
SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc
index a9151ba..768ed2d 100644
--- a/compiler/optimizing/register_allocator_linear_scan.cc
+++ b/compiler/optimizing/register_allocator_linear_scan.cc
@@ -20,6 +20,7 @@
#include <sstream>
#include "base/bit_vector-inl.h"
+#include "base/enums.h"
#include "code_generator.h"
#include "register_allocation_resolver.h"
#include "ssa_liveness_analysis.h"
@@ -77,8 +78,8 @@
// Always reserve for the current method and the graph's max out registers.
// TODO: compute it instead.
// ArtMethod* takes 2 vregs for 64 bits.
- reserved_out_slots_ = InstructionSetPointerSize(codegen->GetInstructionSet()) / kVRegSize +
- codegen->GetGraph()->GetMaximumNumberOfOutVRegs();
+ size_t ptr_size = static_cast<size_t>(InstructionSetPointerSize(codegen->GetInstructionSet()));
+ reserved_out_slots_ = ptr_size / kVRegSize + codegen->GetGraph()->GetMaximumNumberOfOutVRegs();
}
static bool ShouldProcess(bool processing_core_registers, LiveInterval* interval) {
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 97f34e6..b73f738 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -17,6 +17,7 @@
#include "sharpening.h"
#include "base/casts.h"
+#include "base/enums.h"
#include "class_linker.h"
#include "code_generator.h"
#include "driver/dex_compilation_unit.h"
@@ -259,7 +260,7 @@
load_class->SetLoadKindWithAddress(load_kind, address);
break;
case HLoadClass::LoadKind::kDexCachePcRelative: {
- size_t pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
+ PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
DexCacheArraysLayout layout(pointer_size, &dex_file);
size_t element_index = layout.TypeOffset(type_index);
load_class->SetLoadKindWithDexCacheReference(load_kind, dex_file, element_index);
@@ -358,7 +359,7 @@
load_string->SetLoadKindWithAddress(load_kind, address);
break;
case HLoadString::LoadKind::kDexCachePcRelative: {
- size_t pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
+ PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
DexCacheArraysLayout layout(pointer_size, &dex_file);
size_t element_index = layout.StringOffset(string_index);
load_string->SetLoadKindWithDexCacheReference(load_kind, dex_file, element_index);
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index 1ee1c4d..304e56b 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -50,7 +50,7 @@
#ifdef ART_ENABLE_CODEGEN_arm
namespace arm {
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
- ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset<4> offset) {
+ ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset32 offset) {
Thumb2Assembler assembler(arena);
switch (abi) {
@@ -80,7 +80,7 @@
#ifdef ART_ENABLE_CODEGEN_arm64
namespace arm64 {
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
- ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset<8> offset) {
+ ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset64 offset) {
Arm64Assembler assembler(arena);
switch (abi) {
@@ -119,7 +119,7 @@
#ifdef ART_ENABLE_CODEGEN_mips
namespace mips {
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
- ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset<4> offset) {
+ ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset32 offset) {
MipsAssembler assembler(arena);
switch (abi) {
@@ -151,7 +151,7 @@
#ifdef ART_ENABLE_CODEGEN_mips64
namespace mips64 {
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
- ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset<8> offset) {
+ ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset64 offset) {
Mips64Assembler assembler(arena);
switch (abi) {
@@ -183,7 +183,7 @@
#ifdef ART_ENABLE_CODEGEN_x86
namespace x86 {
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* arena,
- ThreadOffset<4> offset) {
+ ThreadOffset32 offset) {
X86Assembler assembler(arena);
// All x86 trampolines call via the Thread* held in fs.
@@ -204,7 +204,7 @@
#ifdef ART_ENABLE_CODEGEN_x86_64
namespace x86_64 {
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* arena,
- ThreadOffset<8> offset) {
+ ThreadOffset64 offset) {
x86_64::X86_64Assembler assembler(arena);
// All x86 trampolines call via the Thread* held in gs.
@@ -224,7 +224,7 @@
std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline64(InstructionSet isa,
EntryPointCallingConvention abi,
- ThreadOffset<8> offset) {
+ ThreadOffset64 offset) {
ArenaPool pool;
ArenaAllocator arena(&pool);
switch (isa) {
@@ -250,7 +250,7 @@
std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline32(InstructionSet isa,
EntryPointCallingConvention abi,
- ThreadOffset<4> offset) {
+ ThreadOffset32 offset) {
ArenaPool pool;
ArenaAllocator arena(&pool);
switch (isa) {
diff --git a/compiler/trampolines/trampoline_compiler.h b/compiler/trampolines/trampoline_compiler.h
index 8f823f1..1a10e4c 100644
--- a/compiler/trampolines/trampoline_compiler.h
+++ b/compiler/trampolines/trampoline_compiler.h
@@ -27,10 +27,10 @@
// Create code that will invoke the function held in thread local storage.
std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline32(InstructionSet isa,
EntryPointCallingConvention abi,
- ThreadOffset<4> entry_point_offset);
+ ThreadOffset32 entry_point_offset);
std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline64(InstructionSet isa,
EntryPointCallingConvention abi,
- ThreadOffset<8> entry_point_offset);
+ ThreadOffset64 entry_point_offset);
} // namespace art
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index a7f4547..1796b39 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -384,7 +384,7 @@
return dwarf::Reg::ArmFp(static_cast<int>(reg));
}
-constexpr size_t kFramePointerSize = kArmPointerSize;
+constexpr size_t kFramePointerSize = static_cast<size_t>(kArmPointerSize);
void ArmAssembler::BuildFrame(size_t frame_size,
ManagedRegister method_reg,
@@ -568,8 +568,9 @@
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
}
-void ArmAssembler::StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm,
- ManagedRegister mscratch) {
+void ArmAssembler::StoreImmediateToThread32(ThreadOffset32 dest,
+ uint32_t imm,
+ ManagedRegister mscratch) {
ArmManagedRegister scratch = mscratch.AsArm();
CHECK(scratch.IsCoreRegister()) << scratch;
LoadImmediate(scratch.AsCoreRegister(), imm);
@@ -600,19 +601,19 @@
return EmitLoad(this, m_dst, SP, src.Int32Value(), size);
}
-void ArmAssembler::LoadFromThread32(ManagedRegister m_dst, ThreadOffset<4> src, size_t size) {
+void ArmAssembler::LoadFromThread32(ManagedRegister m_dst, ThreadOffset32 src, size_t size) {
return EmitLoad(this, m_dst, TR, src.Int32Value(), size);
}
-void ArmAssembler::LoadRawPtrFromThread32(ManagedRegister m_dst, ThreadOffset<4> offs) {
+void ArmAssembler::LoadRawPtrFromThread32(ManagedRegister m_dst, ThreadOffset32 offs) {
ArmManagedRegister dst = m_dst.AsArm();
CHECK(dst.IsCoreRegister()) << dst;
LoadFromOffset(kLoadWord, dst.AsCoreRegister(), TR, offs.Int32Value());
}
void ArmAssembler::CopyRawPtrFromThread32(FrameOffset fr_offs,
- ThreadOffset<4> thr_offs,
- ManagedRegister mscratch) {
+ ThreadOffset32 thr_offs,
+ ManagedRegister mscratch) {
ArmManagedRegister scratch = mscratch.AsArm();
CHECK(scratch.IsCoreRegister()) << scratch;
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
@@ -621,9 +622,9 @@
SP, fr_offs.Int32Value());
}
-void ArmAssembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
+void ArmAssembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
ArmManagedRegister scratch = mscratch.AsArm();
CHECK(scratch.IsCoreRegister()) << scratch;
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
@@ -632,9 +633,9 @@
TR, thr_offs.Int32Value());
}
-void ArmAssembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
+void ArmAssembler::StoreStackOffsetToThread32(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
ArmManagedRegister scratch = mscratch.AsArm();
CHECK(scratch.IsCoreRegister()) << scratch;
AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value(), AL);
@@ -642,7 +643,7 @@
TR, thr_offs.Int32Value());
}
-void ArmAssembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs) {
+void ArmAssembler::StoreStackPointerToThread32(ThreadOffset32 thr_offs) {
StoreToOffset(kStoreWord, SP, TR, thr_offs.Int32Value());
}
@@ -831,7 +832,8 @@
// TODO: place reference map on call
}
-void ArmAssembler::CallFromThread32(ThreadOffset<4> /*offset*/, ManagedRegister /*scratch*/) {
+void ArmAssembler::CallFromThread32(ThreadOffset32 offset ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
@@ -848,8 +850,10 @@
ArmManagedRegister scratch = mscratch.AsArm();
ArmExceptionSlowPath* slow = new (GetArena()) ArmExceptionSlowPath(scratch, stack_adjust);
buffer_.EnqueueSlowPath(slow);
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
- TR, Thread::ExceptionOffset<4>().Int32Value());
+ LoadFromOffset(kLoadWord,
+ scratch.AsCoreRegister(),
+ TR,
+ Thread::ExceptionOffset<kArmPointerSize>().Int32Value());
cmp(scratch.AsCoreRegister(), ShifterOperand(0));
b(slow->Entry(), NE);
}
@@ -865,7 +869,10 @@
// Don't care about preserving R0 as this call won't return.
__ mov(R0, ShifterOperand(scratch_.AsCoreRegister()));
// Set up call to Thread::Current()->pDeliverException.
- __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(4, pDeliverException).Int32Value());
+ __ LoadFromOffset(kLoadWord,
+ R12,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pDeliverException).Int32Value());
__ blx(R12);
#undef __
}
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index 9cf72a2..2b7414d 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -904,13 +904,13 @@
void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
- void StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm, ManagedRegister scratch)
+ void StoreImmediateToThread32(ThreadOffset32 dest, uint32_t imm, ManagedRegister scratch)
OVERRIDE;
- void StoreStackOffsetToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs,
+ void StoreStackOffsetToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs,
ManagedRegister scratch) OVERRIDE;
- void StoreStackPointerToThread32(ThreadOffset<4> thr_offs) OVERRIDE;
+ void StoreStackPointerToThread32(ThreadOffset32 thr_offs) OVERRIDE;
void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
ManagedRegister scratch) OVERRIDE;
@@ -918,7 +918,7 @@
// Load routines
void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size) OVERRIDE;
+ void LoadFromThread32(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE;
void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
@@ -927,15 +927,15 @@
void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset<4> offs) OVERRIDE;
+ void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE;
// Copying routines
void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
- void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset<4> thr_offs,
+ void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset32 thr_offs,
ManagedRegister scratch) OVERRIDE;
- void CopyRawPtrToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+ void CopyRawPtrToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
OVERRIDE;
void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
@@ -990,7 +990,7 @@
// Call to address held at [base+offset]
void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread32(ThreadOffset<4> offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread32(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 353c729..4be7aae 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -2325,7 +2325,7 @@
}
Register rn = ad.GetRegister();
- if (IsHighRegister(rn) && rn != SP && rn != PC) {
+ if (IsHighRegister(rn) && (byte || half || (rn != SP && rn != PC))) {
must_be_32bit = true;
}
@@ -2337,24 +2337,24 @@
// Immediate offset
int32_t offset = ad.GetOffset();
- // The 16 bit SP relative instruction can only have a 10 bit offset.
- if (rn == SP && offset >= (1 << 10)) {
- must_be_32bit = true;
- }
-
if (byte) {
// 5 bit offset, no shift.
- if (offset >= (1 << 5)) {
+ if ((offset & ~0x1f) != 0) {
must_be_32bit = true;
}
} else if (half) {
- // 6 bit offset, shifted by 1.
- if (offset >= (1 << 6)) {
+ // 5 bit offset, shifted by 1.
+ if ((offset & ~(0x1f << 1)) != 0) {
+ must_be_32bit = true;
+ }
+ } else if (rn == SP || rn == PC) {
+ // The 16 bit SP/PC relative instruction can only have an (imm8 << 2) offset.
+ if ((offset & ~(0xff << 2)) != 0) {
must_be_32bit = true;
}
} else {
- // 7 bit offset, shifted by 2.
- if (offset >= (1 << 7)) {
+ // 5 bit offset, shifted by 2.
+ if ((offset & ~(0x1f << 2)) != 0) {
must_be_32bit = true;
}
}
@@ -2370,7 +2370,7 @@
} else {
// 16 bit thumb1.
uint8_t opA = 0;
- bool sp_relative = false;
+ bool sp_or_pc_relative = false;
if (byte) {
opA = 7U /* 0b0111 */;
@@ -2379,7 +2379,10 @@
} else {
if (rn == SP) {
opA = 9U /* 0b1001 */;
- sp_relative = true;
+ sp_or_pc_relative = true;
+ } else if (rn == PC) {
+ opA = 4U;
+ sp_or_pc_relative = true;
} else {
opA = 6U /* 0b0110 */;
}
@@ -2388,7 +2391,7 @@
(load ? B11 : 0);
CHECK_GE(offset, 0);
- if (sp_relative) {
+ if (sp_or_pc_relative) {
// SP relative, 10 bit offset.
CHECK_LT(offset, (1 << 10));
CHECK_ALIGNED(offset, 4);
diff --git a/compiler/utils/arm/assembler_thumb2_test.cc b/compiler/utils/arm/assembler_thumb2_test.cc
index abb09f7..3ca3714 100644
--- a/compiler/utils/arm/assembler_thumb2_test.cc
+++ b/compiler/utils/arm/assembler_thumb2_test.cc
@@ -279,6 +279,148 @@
DriverStr(expected, "smull");
}
+TEST_F(AssemblerThumb2Test, LoadByteFromThumbOffset) {
+ arm::LoadOperandType type = arm::kLoadUnsignedByte;
+
+ __ LoadFromOffset(type, arm::R0, arm::R7, 0);
+ __ LoadFromOffset(type, arm::R1, arm::R7, 31);
+ __ LoadFromOffset(type, arm::R2, arm::R7, 32);
+ __ LoadFromOffset(type, arm::R3, arm::R7, 4095);
+ __ LoadFromOffset(type, arm::R4, arm::SP, 0);
+
+ const char* expected =
+ "ldrb r0, [r7, #0]\n"
+ "ldrb r1, [r7, #31]\n"
+ "ldrb.w r2, [r7, #32]\n"
+ "ldrb.w r3, [r7, #4095]\n"
+ "ldrb.w r4, [sp, #0]\n";
+ DriverStr(expected, "LoadByteFromThumbOffset");
+}
+
+TEST_F(AssemblerThumb2Test, StoreByteToThumbOffset) {
+ arm::StoreOperandType type = arm::kStoreByte;
+
+ __ StoreToOffset(type, arm::R0, arm::R7, 0);
+ __ StoreToOffset(type, arm::R1, arm::R7, 31);
+ __ StoreToOffset(type, arm::R2, arm::R7, 32);
+ __ StoreToOffset(type, arm::R3, arm::R7, 4095);
+ __ StoreToOffset(type, arm::R4, arm::SP, 0);
+
+ const char* expected =
+ "strb r0, [r7, #0]\n"
+ "strb r1, [r7, #31]\n"
+ "strb.w r2, [r7, #32]\n"
+ "strb.w r3, [r7, #4095]\n"
+ "strb.w r4, [sp, #0]\n";
+ DriverStr(expected, "StoreByteToThumbOffset");
+}
+
+TEST_F(AssemblerThumb2Test, LoadHalfFromThumbOffset) {
+ arm::LoadOperandType type = arm::kLoadUnsignedHalfword;
+
+ __ LoadFromOffset(type, arm::R0, arm::R7, 0);
+ __ LoadFromOffset(type, arm::R1, arm::R7, 62);
+ __ LoadFromOffset(type, arm::R2, arm::R7, 64);
+ __ LoadFromOffset(type, arm::R3, arm::R7, 4094);
+ __ LoadFromOffset(type, arm::R4, arm::SP, 0);
+ __ LoadFromOffset(type, arm::R5, arm::R7, 1); // Unaligned
+
+ const char* expected =
+ "ldrh r0, [r7, #0]\n"
+ "ldrh r1, [r7, #62]\n"
+ "ldrh.w r2, [r7, #64]\n"
+ "ldrh.w r3, [r7, #4094]\n"
+ "ldrh.w r4, [sp, #0]\n"
+ "ldrh.w r5, [r7, #1]\n";
+ DriverStr(expected, "LoadHalfFromThumbOffset");
+}
+
+TEST_F(AssemblerThumb2Test, StoreHalfToThumbOffset) {
+ arm::StoreOperandType type = arm::kStoreHalfword;
+
+ __ StoreToOffset(type, arm::R0, arm::R7, 0);
+ __ StoreToOffset(type, arm::R1, arm::R7, 62);
+ __ StoreToOffset(type, arm::R2, arm::R7, 64);
+ __ StoreToOffset(type, arm::R3, arm::R7, 4094);
+ __ StoreToOffset(type, arm::R4, arm::SP, 0);
+ __ StoreToOffset(type, arm::R5, arm::R7, 1); // Unaligned
+
+ const char* expected =
+ "strh r0, [r7, #0]\n"
+ "strh r1, [r7, #62]\n"
+ "strh.w r2, [r7, #64]\n"
+ "strh.w r3, [r7, #4094]\n"
+ "strh.w r4, [sp, #0]\n"
+ "strh.w r5, [r7, #1]\n";
+ DriverStr(expected, "StoreHalfToThumbOffset");
+}
+
+TEST_F(AssemblerThumb2Test, LoadWordFromSpPlusOffset) {
+ arm::LoadOperandType type = arm::kLoadWord;
+
+ __ LoadFromOffset(type, arm::R0, arm::SP, 0);
+ __ LoadFromOffset(type, arm::R1, arm::SP, 124);
+ __ LoadFromOffset(type, arm::R2, arm::SP, 128);
+ __ LoadFromOffset(type, arm::R3, arm::SP, 1020);
+ __ LoadFromOffset(type, arm::R4, arm::SP, 1024);
+ __ LoadFromOffset(type, arm::R5, arm::SP, 4092);
+ __ LoadFromOffset(type, arm::R6, arm::SP, 1); // Unaligned
+
+ const char* expected =
+ "ldr r0, [sp, #0]\n"
+ "ldr r1, [sp, #124]\n"
+ "ldr r2, [sp, #128]\n"
+ "ldr r3, [sp, #1020]\n"
+ "ldr.w r4, [sp, #1024]\n"
+ "ldr.w r5, [sp, #4092]\n"
+ "ldr.w r6, [sp, #1]\n";
+ DriverStr(expected, "LoadWordFromSpPlusOffset");
+}
+
+TEST_F(AssemblerThumb2Test, StoreWordToSpPlusOffset) {
+ arm::StoreOperandType type = arm::kStoreWord;
+
+ __ StoreToOffset(type, arm::R0, arm::SP, 0);
+ __ StoreToOffset(type, arm::R1, arm::SP, 124);
+ __ StoreToOffset(type, arm::R2, arm::SP, 128);
+ __ StoreToOffset(type, arm::R3, arm::SP, 1020);
+ __ StoreToOffset(type, arm::R4, arm::SP, 1024);
+ __ StoreToOffset(type, arm::R5, arm::SP, 4092);
+ __ StoreToOffset(type, arm::R6, arm::SP, 1); // Unaligned
+
+ const char* expected =
+ "str r0, [sp, #0]\n"
+ "str r1, [sp, #124]\n"
+ "str r2, [sp, #128]\n"
+ "str r3, [sp, #1020]\n"
+ "str.w r4, [sp, #1024]\n"
+ "str.w r5, [sp, #4092]\n"
+ "str.w r6, [sp, #1]\n";
+ DriverStr(expected, "StoreWordToSpPlusOffset");
+}
+
+TEST_F(AssemblerThumb2Test, LoadWordFromPcPlusOffset) {
+ arm::LoadOperandType type = arm::kLoadWord;
+
+ __ LoadFromOffset(type, arm::R0, arm::PC, 0);
+ __ LoadFromOffset(type, arm::R1, arm::PC, 124);
+ __ LoadFromOffset(type, arm::R2, arm::PC, 128);
+ __ LoadFromOffset(type, arm::R3, arm::PC, 1020);
+ __ LoadFromOffset(type, arm::R4, arm::PC, 1024);
+ __ LoadFromOffset(type, arm::R5, arm::PC, 4092);
+ __ LoadFromOffset(type, arm::R6, arm::PC, 1); // Unaligned
+
+ const char* expected =
+ "ldr r0, [pc, #0]\n"
+ "ldr r1, [pc, #124]\n"
+ "ldr r2, [pc, #128]\n"
+ "ldr r3, [pc, #1020]\n"
+ "ldr.w r4, [pc, #1024]\n"
+ "ldr.w r5, [pc, #4092]\n"
+ "ldr.w r6, [pc, #1]\n";
+ DriverStr(expected, "LoadWordFromPcPlusOffset");
+}
+
TEST_F(AssemblerThumb2Test, StoreWordToThumbOffset) {
arm::StoreOperandType type = arm::kStoreWord;
int32_t offset = 4092;
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 9f2027f..dc1f24a 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -28,7 +28,7 @@
#ifdef ___
#error "ARM64 Assembler macro already defined."
#else
-#define ___ vixl_masm_->
+#define ___ vixl_masm_.
#endif
void Arm64Assembler::FinalizeCode() {
@@ -39,16 +39,16 @@
}
size_t Arm64Assembler::CodeSize() const {
- return vixl_masm_->GetBufferCapacity() - vixl_masm_->GetRemainingBufferSpace();
+ return vixl_masm_.GetBufferCapacity() - vixl_masm_.GetRemainingBufferSpace();
}
const uint8_t* Arm64Assembler::CodeBufferBaseAddress() const {
- return vixl_masm_->GetStartAddress<uint8_t*>();
+ return vixl_masm_.GetStartAddress<uint8_t*>();
}
void Arm64Assembler::FinalizeInstructions(const MemoryRegion& region) {
// Copy the instructions from the buffer.
- MemoryRegion from(vixl_masm_->GetStartAddress<void*>(), CodeSize());
+ MemoryRegion from(vixl_masm_.GetStartAddress<void*>(), CodeSize());
region.CopyFrom(0, from);
}
@@ -86,7 +86,7 @@
} else {
// temp = rd + value
// rd = cond ? temp : rn
- UseScratchRegisterScope temps(vixl_masm_);
+ UseScratchRegisterScope temps(&vixl_masm_);
temps.Exclude(reg_x(rd), reg_x(rn));
Register temp = temps.AcquireX();
___ Add(temp, reg_x(rn), value);
@@ -164,25 +164,26 @@
offs.Int32Value());
}
-void Arm64Assembler::StoreImmediateToThread64(ThreadOffset<8> offs, uint32_t imm,
- ManagedRegister m_scratch) {
+void Arm64Assembler::StoreImmediateToThread64(ThreadOffset64 offs,
+ uint32_t imm,
+ ManagedRegister m_scratch) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsXRegister()) << scratch;
LoadImmediate(scratch.AsXRegister(), imm);
StoreToOffset(scratch.AsXRegister(), TR, offs.Int32Value());
}
-void Arm64Assembler::StoreStackOffsetToThread64(ThreadOffset<8> tr_offs,
- FrameOffset fr_offs,
- ManagedRegister m_scratch) {
+void Arm64Assembler::StoreStackOffsetToThread64(ThreadOffset64 tr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister m_scratch) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsXRegister()) << scratch;
AddConstant(scratch.AsXRegister(), SP, fr_offs.Int32Value());
StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
}
-void Arm64Assembler::StoreStackPointerToThread64(ThreadOffset<8> tr_offs) {
- UseScratchRegisterScope temps(vixl_masm_);
+void Arm64Assembler::StoreStackPointerToThread64(ThreadOffset64 tr_offs) {
+ UseScratchRegisterScope temps(&vixl_masm_);
Register temp = temps.AcquireX();
___ Mov(temp, reg_x(SP));
___ Str(temp, MEM_OP(reg_x(TR), tr_offs.Int32Value()));
@@ -206,7 +207,7 @@
// temp = value
// rd = cond ? temp : rd
if (value != 0) {
- UseScratchRegisterScope temps(vixl_masm_);
+ UseScratchRegisterScope temps(&vixl_masm_);
temps.Exclude(reg_x(dest));
Register temp = temps.AcquireX();
___ Mov(temp, value);
@@ -285,7 +286,7 @@
return Load(m_dst.AsArm64(), SP, src.Int32Value(), size);
}
-void Arm64Assembler::LoadFromThread64(ManagedRegister m_dst, ThreadOffset<8> src, size_t size) {
+void Arm64Assembler::LoadFromThread64(ManagedRegister m_dst, ThreadOffset64 src, size_t size) {
return Load(m_dst.AsArm64(), TR, src.Int32Value(), size);
}
@@ -313,12 +314,12 @@
Arm64ManagedRegister base = m_base.AsArm64();
CHECK(dst.IsXRegister() && base.IsXRegister());
// Remove dst and base form the temp list - higher level API uses IP1, IP0.
- UseScratchRegisterScope temps(vixl_masm_);
+ UseScratchRegisterScope temps(&vixl_masm_);
temps.Exclude(reg_x(dst.AsXRegister()), reg_x(base.AsXRegister()));
___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
}
-void Arm64Assembler::LoadRawPtrFromThread64(ManagedRegister m_dst, ThreadOffset<8> offs) {
+void Arm64Assembler::LoadRawPtrFromThread64(ManagedRegister m_dst, ThreadOffset64 offs) {
Arm64ManagedRegister dst = m_dst.AsArm64();
CHECK(dst.IsXRegister()) << dst;
LoadFromOffset(dst.AsXRegister(), TR, offs.Int32Value());
@@ -355,17 +356,17 @@
}
void Arm64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs,
- ThreadOffset<8> tr_offs,
- ManagedRegister m_scratch) {
+ ThreadOffset64 tr_offs,
+ ManagedRegister m_scratch) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsXRegister()) << scratch;
LoadFromOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
StoreToOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
}
-void Arm64Assembler::CopyRawPtrToThread64(ThreadOffset<8> tr_offs,
- FrameOffset fr_offs,
- ManagedRegister m_scratch) {
+void Arm64Assembler::CopyRawPtrToThread64(ThreadOffset64 tr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister m_scratch) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsXRegister()) << scratch;
LoadFromOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
@@ -527,7 +528,7 @@
CHECK(base.IsXRegister()) << base;
CHECK(scratch.IsXRegister()) << scratch;
// Remove base and scratch form the temp list - higher level API uses IP1, IP0.
- UseScratchRegisterScope temps(vixl_masm_);
+ UseScratchRegisterScope temps(&vixl_masm_);
temps.Exclude(reg_x(base.AsXRegister()), reg_x(scratch.AsXRegister()));
___ Ldr(reg_x(scratch.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
___ Br(reg_x(scratch.AsXRegister()));
@@ -542,7 +543,8 @@
___ Blr(reg_x(scratch.AsXRegister()));
}
-void Arm64Assembler::CallFromThread64(ThreadOffset<8> /*offset*/, ManagedRegister /*scratch*/) {
+void Arm64Assembler::CallFromThread64(ThreadOffset64 offset ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
}
@@ -612,12 +614,14 @@
CHECK_ALIGNED(stack_adjust, kStackAlignment);
Arm64ManagedRegister scratch = m_scratch.AsArm64();
exception_blocks_.emplace_back(new Arm64Exception(scratch, stack_adjust));
- LoadFromOffset(scratch.AsXRegister(), TR, Thread::ExceptionOffset<8>().Int32Value());
+ LoadFromOffset(scratch.AsXRegister(),
+ TR,
+ Thread::ExceptionOffset<kArm64PointerSize>().Int32Value());
___ Cbnz(reg_x(scratch.AsXRegister()), exception_blocks_.back()->Entry());
}
void Arm64Assembler::EmitExceptionPoll(Arm64Exception *exception) {
- UseScratchRegisterScope temps(vixl_masm_);
+ UseScratchRegisterScope temps(&vixl_masm_);
temps.Exclude(reg_x(exception->scratch_.AsXRegister()));
Register temp = temps.AcquireX();
@@ -629,7 +633,9 @@
// Pass exception object as argument.
// Don't care about preserving X0 as this won't return.
___ Mov(reg_x(X0), reg_x(exception->scratch_.AsXRegister()));
- ___ Ldr(temp, MEM_OP(reg_x(TR), QUICK_ENTRYPOINT_OFFSET(8, pDeliverException).Int32Value()));
+ ___ Ldr(temp,
+ MEM_OP(reg_x(TR),
+ QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pDeliverException).Int32Value()));
___ Blr(temp);
// Call should never return.
@@ -647,7 +653,7 @@
void Arm64Assembler::SpillRegisters(CPURegList registers, int offset) {
int size = registers.GetRegisterSizeInBytes();
- const Register sp = vixl_masm_->StackPointer();
+ const Register sp = vixl_masm_.StackPointer();
// Since we are operating on register pairs, we would like to align on
// double the standard size; on the other hand, we don't want to insert
// an extra store, which will happen if the number of registers is even.
@@ -675,7 +681,7 @@
void Arm64Assembler::UnspillRegisters(CPURegList registers, int offset) {
int size = registers.GetRegisterSizeInBytes();
- const Register sp = vixl_masm_->StackPointer();
+ const Register sp = vixl_masm_.StackPointer();
// Be consistent with the logic for spilling registers.
if (!IsAlignedParam(offset, 2 * size) && registers.GetCount() % 2 != 0) {
const CPURegister& dst0 = registers.PopLowestIndex();
@@ -720,7 +726,7 @@
// Increase frame to required size.
DCHECK_ALIGNED(frame_size, kStackAlignment);
- DCHECK_GE(frame_size, core_reg_size + fp_reg_size + kArm64PointerSize);
+ DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
IncreaseFrameSize(frame_size);
// Save callee-saves.
@@ -734,7 +740,7 @@
StoreToOffset(X0, SP, 0);
// Write out entry spills
- int32_t offset = frame_size + kArm64PointerSize;
+ int32_t offset = frame_size + static_cast<size_t>(kArm64PointerSize);
for (size_t i = 0; i < entry_spills.size(); ++i) {
Arm64ManagedRegister reg = entry_spills.at(i).AsArm64();
if (reg.IsNoRegister()) {
@@ -776,7 +782,7 @@
// For now we only check that the size of the frame is large enough to hold spills and method
// reference.
- DCHECK_GE(frame_size, core_reg_size + fp_reg_size + kArm64PointerSize);
+ DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
DCHECK_ALIGNED(frame_size, kStackAlignment);
DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index a481544..b8434b9 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -23,7 +23,6 @@
#include "base/arena_containers.h"
#include "base/logging.h"
-#include "constants_arm64.h"
#include "utils/arm64/managed_register_arm64.h"
#include "utils/assembler.h"
#include "offsets.h"
@@ -84,16 +83,13 @@
class Arm64Assembler FINAL : public Assembler {
public:
- // We indicate the size of the initial code generation buffer to the VIXL
- // assembler. From there we it will automatically manage the buffer.
explicit Arm64Assembler(ArenaAllocator* arena)
: Assembler(arena),
- exception_blocks_(arena->Adapter(kArenaAllocAssembler)),
- vixl_masm_(new vixl::aarch64::MacroAssembler(kArm64BaseBufferSize)) {}
+ exception_blocks_(arena->Adapter(kArenaAllocAssembler)) {}
- virtual ~Arm64Assembler() {
- delete vixl_masm_;
- }
+ virtual ~Arm64Assembler() {}
+
+ vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return &vixl_masm_; }
// Finalize the code.
void FinalizeCode() OVERRIDE;
@@ -126,28 +122,28 @@
void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
- void StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm, ManagedRegister scratch)
+ void StoreImmediateToThread64(ThreadOffset64 dest, uint32_t imm, ManagedRegister scratch)
OVERRIDE;
- void StoreStackOffsetToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs,
+ void StoreStackOffsetToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs,
ManagedRegister scratch) OVERRIDE;
- void StoreStackPointerToThread64(ThreadOffset<8> thr_offs) OVERRIDE;
+ void StoreStackPointerToThread64(ThreadOffset64 thr_offs) OVERRIDE;
void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
ManagedRegister scratch) OVERRIDE;
// Load routines.
void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size) OVERRIDE;
+ void LoadFromThread64(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
bool unpoison_reference) OVERRIDE;
void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs) OVERRIDE;
+ void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
// Copying routines.
void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
- void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<8> thr_offs,
+ void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset64 thr_offs,
ManagedRegister scratch) OVERRIDE;
- void CopyRawPtrToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+ void CopyRawPtrToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
OVERRIDE;
void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
@@ -200,7 +196,7 @@
// Call to address held at [base+offset].
void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread64(ThreadOffset<8> offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread64(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
// Jump to address (not setting link register)
void JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch);
@@ -287,9 +283,8 @@
// List of exception blocks to generate at the end of the code cache.
ArenaVector<std::unique_ptr<Arm64Exception>> exception_blocks_;
- public:
- // Vixl assembler.
- vixl::aarch64::MacroAssembler* const vixl_masm_;
+ // VIXL assembler.
+ vixl::aarch64::MacroAssembler vixl_masm_;
// Used for testing.
friend class Arm64ManagedRegister_VixlRegisters_Test;
diff --git a/compiler/utils/arm64/constants_arm64.h b/compiler/utils/arm64/constants_arm64.h
deleted file mode 100644
index 01e8be9..0000000
--- a/compiler/utils/arm64/constants_arm64.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_UTILS_ARM64_CONSTANTS_ARM64_H_
-#define ART_COMPILER_UTILS_ARM64_CONSTANTS_ARM64_H_
-
-#include <stdint.h>
-#include <iosfwd>
-#include "arch/arm64/registers_arm64.h"
-#include "base/casts.h"
-#include "base/logging.h"
-#include "globals.h"
-
-// TODO: Extend this file by adding missing functionality.
-
-namespace art {
-namespace arm64 {
-
-constexpr size_t kArm64BaseBufferSize = 4096;
-
-} // namespace arm64
-} // namespace art
-
-#endif // ART_COMPILER_UTILS_ARM64_CONSTANTS_ARM64_H_
diff --git a/compiler/utils/arm64/managed_register_arm64.h b/compiler/utils/arm64/managed_register_arm64.h
index f7d74d2..7378a0a 100644
--- a/compiler/utils/arm64/managed_register_arm64.h
+++ b/compiler/utils/arm64/managed_register_arm64.h
@@ -17,8 +17,8 @@
#ifndef ART_COMPILER_UTILS_ARM64_MANAGED_REGISTER_ARM64_H_
#define ART_COMPILER_UTILS_ARM64_MANAGED_REGISTER_ARM64_H_
+#include "arch/arm64/registers_arm64.h"
#include "base/logging.h"
-#include "constants_arm64.h"
#include "debug/dwarf/register.h"
#include "utils/managed_register.h"
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index e6c3a18..0a1b733 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -162,90 +162,94 @@
}
}
-void Assembler::StoreImmediateToThread32(ThreadOffset<4> dest ATTRIBUTE_UNUSED,
+void Assembler::StoreImmediateToThread32(ThreadOffset32 dest ATTRIBUTE_UNUSED,
uint32_t imm ATTRIBUTE_UNUSED,
ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreImmediateToThread64(ThreadOffset<8> dest ATTRIBUTE_UNUSED,
+void Assembler::StoreImmediateToThread64(ThreadOffset64 dest ATTRIBUTE_UNUSED,
uint32_t imm ATTRIBUTE_UNUSED,
ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED,
- FrameOffset fr_offs ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
+void Assembler::StoreStackOffsetToThread32(
+ ThreadOffset32 thr_offs ATTRIBUTE_UNUSED,
+ FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackOffsetToThread64(ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED,
- FrameOffset fr_offs ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
+void Assembler::StoreStackOffsetToThread64(
+ ThreadOffset64 thr_offs ATTRIBUTE_UNUSED,
+ FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED) {
+void Assembler::StoreStackPointerToThread32(
+ ThreadOffset32 thr_offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackPointerToThread64(ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED) {
+void Assembler::StoreStackPointerToThread64(
+ ThreadOffset64 thr_offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
void Assembler::LoadFromThread32(ManagedRegister dest ATTRIBUTE_UNUSED,
- ThreadOffset<4> src ATTRIBUTE_UNUSED,
+ ThreadOffset32 src ATTRIBUTE_UNUSED,
size_t size ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
void Assembler::LoadFromThread64(ManagedRegister dest ATTRIBUTE_UNUSED,
- ThreadOffset<8> src ATTRIBUTE_UNUSED,
+ ThreadOffset64 src ATTRIBUTE_UNUSED,
size_t size ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
void Assembler::LoadRawPtrFromThread32(ManagedRegister dest ATTRIBUTE_UNUSED,
- ThreadOffset<4> offs ATTRIBUTE_UNUSED) {
+ ThreadOffset32 offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
void Assembler::LoadRawPtrFromThread64(ManagedRegister dest ATTRIBUTE_UNUSED,
- ThreadOffset<8> offs ATTRIBUTE_UNUSED) {
+ ThreadOffset64 offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
void Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs ATTRIBUTE_UNUSED,
- ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED,
+ ThreadOffset32 thr_offs ATTRIBUTE_UNUSED,
ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
void Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs ATTRIBUTE_UNUSED,
- ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED,
+ ThreadOffset64 thr_offs ATTRIBUTE_UNUSED,
ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED,
+void Assembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs ATTRIBUTE_UNUSED,
FrameOffset fr_offs ATTRIBUTE_UNUSED,
ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CopyRawPtrToThread64(ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED,
+void Assembler::CopyRawPtrToThread64(ThreadOffset64 thr_offs ATTRIBUTE_UNUSED,
FrameOffset fr_offs ATTRIBUTE_UNUSED,
ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CallFromThread32(ThreadOffset<4> offset ATTRIBUTE_UNUSED,
+void Assembler::CallFromThread32(ThreadOffset32 offset ATTRIBUTE_UNUSED,
ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CallFromThread64(ThreadOffset<8> offset ATTRIBUTE_UNUSED,
+void Assembler::CallFromThread64(ThreadOffset64 offset ATTRIBUTE_UNUSED,
ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 80aa630..89f7947 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -24,6 +24,7 @@
#include "arm/constants_arm.h"
#include "base/arena_allocator.h"
#include "base/arena_object.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "base/macros.h"
#include "debug/dwarf/debug_frame_opcode_writer.h"
@@ -382,8 +383,7 @@
const ManagedRegisterEntrySpills& entry_spills) = 0;
// Emit code that will remove an activation from the stack
- virtual void RemoveFrame(size_t frame_size,
- ArrayRef<const ManagedRegister> callee_save_regs) = 0;
+ virtual void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs) = 0;
virtual void IncreaseFrameSize(size_t adjust) = 0;
virtual void DecreaseFrameSize(size_t adjust) = 0;
@@ -393,23 +393,24 @@
virtual void StoreRef(FrameOffset dest, ManagedRegister src) = 0;
virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src) = 0;
- virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
- ManagedRegister scratch) = 0;
+ virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) = 0;
- virtual void StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm,
+ virtual void StoreImmediateToThread32(ThreadOffset32 dest,
+ uint32_t imm,
ManagedRegister scratch);
- virtual void StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm,
+ virtual void StoreImmediateToThread64(ThreadOffset64 dest,
+ uint32_t imm,
ManagedRegister scratch);
- virtual void StoreStackOffsetToThread32(ThreadOffset<4> thr_offs,
+ virtual void StoreStackOffsetToThread32(ThreadOffset32 thr_offs,
FrameOffset fr_offs,
ManagedRegister scratch);
- virtual void StoreStackOffsetToThread64(ThreadOffset<8> thr_offs,
+ virtual void StoreStackOffsetToThread64(ThreadOffset64 thr_offs,
FrameOffset fr_offs,
ManagedRegister scratch);
- virtual void StoreStackPointerToThread32(ThreadOffset<4> thr_offs);
- virtual void StoreStackPointerToThread64(ThreadOffset<8> thr_offs);
+ virtual void StoreStackPointerToThread32(ThreadOffset32 thr_offs);
+ virtual void StoreStackPointerToThread64(ThreadOffset64 thr_offs);
virtual void StoreSpanning(FrameOffset dest, ManagedRegister src,
FrameOffset in_off, ManagedRegister scratch) = 0;
@@ -417,8 +418,8 @@
// Load routines
virtual void Load(ManagedRegister dest, FrameOffset src, size_t size) = 0;
- virtual void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size);
- virtual void LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size);
+ virtual void LoadFromThread32(ManagedRegister dest, ThreadOffset32 src, size_t size);
+ virtual void LoadFromThread64(ManagedRegister dest, ThreadOffset64 src, size_t size);
virtual void LoadRef(ManagedRegister dest, FrameOffset src) = 0;
// If unpoison_reference is true and kPoisonReference is true, then we negate the read reference.
@@ -427,24 +428,27 @@
virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) = 0;
- virtual void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset<4> offs);
- virtual void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs);
+ virtual void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset32 offs);
+ virtual void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset64 offs);
// Copying routines
virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size) = 0;
- virtual void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset<4> thr_offs,
+ virtual void CopyRawPtrFromThread32(FrameOffset fr_offs,
+ ThreadOffset32 thr_offs,
ManagedRegister scratch);
- virtual void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<8> thr_offs,
+ virtual void CopyRawPtrFromThread64(FrameOffset fr_offs,
+ ThreadOffset64 thr_offs,
ManagedRegister scratch);
- virtual void CopyRawPtrToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs,
+ virtual void CopyRawPtrToThread32(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
ManagedRegister scratch);
- virtual void CopyRawPtrToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs,
+ virtual void CopyRawPtrToThread64(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
ManagedRegister scratch);
- virtual void CopyRef(FrameOffset dest, FrameOffset src,
- ManagedRegister scratch) = 0;
+ virtual void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) = 0;
virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) = 0;
@@ -474,24 +478,26 @@
// Exploit fast access in managed code to Thread::Current()
virtual void GetCurrentThread(ManagedRegister tr) = 0;
- virtual void GetCurrentThread(FrameOffset dest_offset,
- ManagedRegister scratch) = 0;
+ virtual void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) = 0;
// Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
// null.
- virtual void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
- ManagedRegister in_reg, bool null_allowed) = 0;
+ virtual void CreateHandleScopeEntry(ManagedRegister out_reg,
+ FrameOffset handlescope_offset,
+ ManagedRegister in_reg,
+ bool null_allowed) = 0;
// Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
- virtual void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
- ManagedRegister scratch, bool null_allowed) = 0;
+ virtual void CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handlescope_offset,
+ ManagedRegister scratch,
+ bool null_allowed) = 0;
// src holds a handle scope entry (Object**) load this into dst
- virtual void LoadReferenceFromHandleScope(ManagedRegister dst,
- ManagedRegister src) = 0;
+ virtual void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) = 0;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
@@ -499,12 +505,10 @@
virtual void VerifyObject(FrameOffset src, bool could_be_null) = 0;
// Call to address held at [base+offset]
- virtual void Call(ManagedRegister base, Offset offset,
- ManagedRegister scratch) = 0;
- virtual void Call(FrameOffset base, Offset offset,
- ManagedRegister scratch) = 0;
- virtual void CallFromThread32(ThreadOffset<4> offset, ManagedRegister scratch);
- virtual void CallFromThread64(ThreadOffset<8> offset, ManagedRegister scratch);
+ virtual void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) = 0;
+ virtual void Call(FrameOffset base, Offset offset, ManagedRegister scratch) = 0;
+ virtual void CallFromThread32(ThreadOffset32 offset, ManagedRegister scratch);
+ virtual void CallFromThread64(ThreadOffset64 offset, ManagedRegister scratch);
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index ebaf1c0..e6b32de 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -26,6 +26,11 @@
namespace art {
namespace mips {
+static_assert(static_cast<size_t>(kMipsPointerSize) == kMipsWordSize,
+ "Unexpected Mips pointer size.");
+static_assert(kMipsPointerSize == PointerSize::k32, "Unexpected Mips pointer size.");
+
+
std::ostream& operator<<(std::ostream& os, const DRegister& rhs) {
if (rhs >= D0 && rhs < kNumberOfDRegisters) {
os << "d" << static_cast<int>(rhs);
@@ -2024,6 +2029,10 @@
Bind(&pc_rel_base_label_);
}
+uint32_t MipsAssembler::GetPcRelBaseLabelLocation() const {
+ return GetLabelLocation(&pc_rel_base_label_);
+}
+
void MipsAssembler::FinalizeLabeledBranch(MipsLabel* label) {
uint32_t length = branches_.back().GetLength();
if (!label->IsBound()) {
@@ -2790,7 +2799,8 @@
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
}
-void MipsAssembler::StoreImmediateToThread32(ThreadOffset<kMipsWordSize> dest, uint32_t imm,
+void MipsAssembler::StoreImmediateToThread32(ThreadOffset32 dest,
+ uint32_t imm,
ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
@@ -2799,7 +2809,7 @@
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), S1, dest.Int32Value());
}
-void MipsAssembler::StoreStackOffsetToThread32(ThreadOffset<kMipsWordSize> thr_offs,
+void MipsAssembler::StoreStackOffsetToThread32(ThreadOffset32 thr_offs,
FrameOffset fr_offs,
ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
@@ -2809,7 +2819,7 @@
S1, thr_offs.Int32Value());
}
-void MipsAssembler::StoreStackPointerToThread32(ThreadOffset<kMipsWordSize> thr_offs) {
+void MipsAssembler::StoreStackPointerToThread32(ThreadOffset32 thr_offs) {
StoreToOffset(kStoreWord, SP, S1, thr_offs.Int32Value());
}
@@ -2826,8 +2836,7 @@
return EmitLoad(mdest, SP, src.Int32Value(), size);
}
-void MipsAssembler::LoadFromThread32(ManagedRegister mdest,
- ThreadOffset<kMipsWordSize> src, size_t size) {
+void MipsAssembler::LoadFromThread32(ManagedRegister mdest, ThreadOffset32 src, size_t size) {
return EmitLoad(mdest, S1, src.Int32Value(), size);
}
@@ -2855,8 +2864,7 @@
base.AsMips().AsCoreRegister(), offs.Int32Value());
}
-void MipsAssembler::LoadRawPtrFromThread32(ManagedRegister mdest,
- ThreadOffset<kMipsWordSize> offs) {
+void MipsAssembler::LoadRawPtrFromThread32(ManagedRegister mdest, ThreadOffset32 offs) {
MipsManagedRegister dest = mdest.AsMips();
CHECK(dest.IsCoreRegister());
LoadFromOffset(kLoadWord, dest.AsCoreRegister(), S1, offs.Int32Value());
@@ -2911,7 +2919,7 @@
}
void MipsAssembler::CopyRawPtrFromThread32(FrameOffset fr_offs,
- ThreadOffset<kMipsWordSize> thr_offs,
+ ThreadOffset32 thr_offs,
ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
@@ -2921,7 +2929,7 @@
SP, fr_offs.Int32Value());
}
-void MipsAssembler::CopyRawPtrToThread32(ThreadOffset<kMipsWordSize> thr_offs,
+void MipsAssembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs,
FrameOffset fr_offs,
ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
@@ -3095,7 +3103,7 @@
// TODO: place reference map on call.
}
-void MipsAssembler::CallFromThread32(ThreadOffset<kMipsWordSize> offset ATTRIBUTE_UNUSED,
+void MipsAssembler::CallFromThread32(ThreadOffset32 offset ATTRIBUTE_UNUSED,
ManagedRegister mscratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "no mips implementation";
}
@@ -3113,7 +3121,7 @@
MipsManagedRegister scratch = mscratch.AsMips();
exception_blocks_.emplace_back(scratch, stack_adjust);
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
- S1, Thread::ExceptionOffset<kMipsWordSize>().Int32Value());
+ S1, Thread::ExceptionOffset<kMipsPointerSize>().Int32Value());
// TODO: on MIPS32R6 prefer Bnezc(scratch.AsCoreRegister(), slow.Entry());
// as the NAL instruction (occurring in long R2 branches) may become deprecated.
// For now use common for R2 and R6 instructions as this code must execute on both.
@@ -3131,7 +3139,7 @@
Move(A0, exception->scratch_.AsCoreRegister());
// Set up call to Thread::Current()->pDeliverException.
LoadFromOffset(kLoadWord, T9, S1,
- QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, pDeliverException).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, pDeliverException).Int32Value());
Jr(T9);
Nop();
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 1f7781f..852ced6 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -500,15 +500,15 @@
void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE;
- void StoreImmediateToThread32(ThreadOffset<kMipsWordSize> dest,
+ void StoreImmediateToThread32(ThreadOffset32 dest,
uint32_t imm,
ManagedRegister mscratch) OVERRIDE;
- void StoreStackOffsetToThread32(ThreadOffset<kMipsWordSize> thr_offs,
+ void StoreStackOffsetToThread32(ThreadOffset32 thr_offs,
FrameOffset fr_offs,
ManagedRegister mscratch) OVERRIDE;
- void StoreStackPointerToThread32(ThreadOffset<kMipsWordSize> thr_offs) OVERRIDE;
+ void StoreStackPointerToThread32(ThreadOffset32 thr_offs) OVERRIDE;
void StoreSpanning(FrameOffset dest,
ManagedRegister msrc,
@@ -518,9 +518,7 @@
// Load routines.
void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread32(ManagedRegister mdest,
- ThreadOffset<kMipsWordSize> src,
- size_t size) OVERRIDE;
+ void LoadFromThread32(ManagedRegister mdest, ThreadOffset32 src, size_t size) OVERRIDE;
void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
@@ -531,16 +529,16 @@
void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread32(ManagedRegister mdest, ThreadOffset<kMipsWordSize> offs) OVERRIDE;
+ void LoadRawPtrFromThread32(ManagedRegister mdest, ThreadOffset32 offs) OVERRIDE;
// Copying routines.
void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE;
void CopyRawPtrFromThread32(FrameOffset fr_offs,
- ThreadOffset<kMipsWordSize> thr_offs,
+ ThreadOffset32 thr_offs,
ManagedRegister mscratch) OVERRIDE;
- void CopyRawPtrToThread32(ThreadOffset<kMipsWordSize> thr_offs,
+ void CopyRawPtrToThread32(ThreadOffset32 thr_offs,
FrameOffset fr_offs,
ManagedRegister mscratch) OVERRIDE;
@@ -619,7 +617,7 @@
// Call to address held at [base+offset].
void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE;
void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE;
- void CallFromThread32(ThreadOffset<kMipsWordSize> offset, ManagedRegister mscratch) OVERRIDE;
+ void CallFromThread32(ThreadOffset32 offset, ManagedRegister mscratch) OVERRIDE;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
@@ -646,6 +644,9 @@
// The assembler then computes literal offsets relative to this label.
void BindPcRelBaseLabel();
+ // Returns the location of the label bound with BindPcRelBaseLabel().
+ uint32_t GetPcRelBaseLabelLocation() const;
+
// Note that PC-relative literal loads are handled as pseudo branches because they need very
// similar relocation and may similarly expand in size to accomodate for larger offsets relative
// to PC.
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 447ede5..3fd77a0 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -26,6 +26,11 @@
namespace art {
namespace mips64 {
+static_assert(static_cast<size_t>(kMips64PointerSize) == kMips64DoublewordSize,
+ "Unexpected Mips64 pointer size.");
+static_assert(kMips64PointerSize == PointerSize::k64, "Unexpected Mips64 pointer size.");
+
+
void Mips64Assembler::FinalizeCode() {
for (auto& exception_block : exception_blocks_) {
EmitExceptionPoll(&exception_block);
@@ -2110,7 +2115,7 @@
StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value());
}
-void Mips64Assembler::StoreStackOffsetToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs,
+void Mips64Assembler::StoreStackOffsetToThread64(ThreadOffset64 thr_offs,
FrameOffset fr_offs,
ManagedRegister mscratch) {
Mips64ManagedRegister scratch = mscratch.AsMips64();
@@ -2119,7 +2124,7 @@
StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value());
}
-void Mips64Assembler::StoreStackPointerToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs) {
+void Mips64Assembler::StoreStackPointerToThread64(ThreadOffset64 thr_offs) {
StoreToOffset(kStoreDoubleword, SP, S1, thr_offs.Int32Value());
}
@@ -2136,9 +2141,7 @@
return EmitLoad(mdest, SP, src.Int32Value(), size);
}
-void Mips64Assembler::LoadFromThread64(ManagedRegister mdest,
- ThreadOffset<kMips64DoublewordSize> src,
- size_t size) {
+void Mips64Assembler::LoadFromThread64(ManagedRegister mdest, ThreadOffset64 src, size_t size) {
return EmitLoad(mdest, S1, src.Int32Value(), size);
}
@@ -2171,8 +2174,7 @@
base.AsMips64().AsGpuRegister(), offs.Int32Value());
}
-void Mips64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest,
- ThreadOffset<kMips64DoublewordSize> offs) {
+void Mips64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset64 offs) {
Mips64ManagedRegister dest = mdest.AsMips64();
CHECK(dest.IsGpuRegister());
LoadFromOffset(kLoadDoubleword, dest.AsGpuRegister(), S1, offs.Int32Value());
@@ -2217,7 +2219,7 @@
}
void Mips64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs,
- ThreadOffset<kMips64DoublewordSize> thr_offs,
+ ThreadOffset64 thr_offs,
ManagedRegister mscratch) {
Mips64ManagedRegister scratch = mscratch.AsMips64();
CHECK(scratch.IsGpuRegister()) << scratch;
@@ -2225,7 +2227,7 @@
StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, fr_offs.Int32Value());
}
-void Mips64Assembler::CopyRawPtrToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs,
+void Mips64Assembler::CopyRawPtrToThread64(ThreadOffset64 thr_offs,
FrameOffset fr_offs,
ManagedRegister mscratch) {
Mips64ManagedRegister scratch = mscratch.AsMips64();
@@ -2429,7 +2431,7 @@
// TODO: place reference map on call
}
-void Mips64Assembler::CallFromThread64(ThreadOffset<kMips64DoublewordSize> offset ATTRIBUTE_UNUSED,
+void Mips64Assembler::CallFromThread64(ThreadOffset64 offset ATTRIBUTE_UNUSED,
ManagedRegister mscratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "No MIPS64 implementation";
}
@@ -2449,7 +2451,7 @@
LoadFromOffset(kLoadDoubleword,
scratch.AsGpuRegister(),
S1,
- Thread::ExceptionOffset<kMips64DoublewordSize>().Int32Value());
+ Thread::ExceptionOffset<kMips64PointerSize>().Int32Value());
Bnezc(scratch.AsGpuRegister(), exception_blocks_.back().Entry());
}
@@ -2466,7 +2468,7 @@
LoadFromOffset(kLoadDoubleword,
T9,
S1,
- QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, pDeliverException).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, pDeliverException).Int32Value());
Jr(T9);
Nop();
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 0cd0708..1ad05b0 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -383,10 +383,11 @@
void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE;
- void StoreStackOffsetToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs, FrameOffset fr_offs,
+ void StoreStackOffsetToThread64(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
ManagedRegister mscratch) OVERRIDE;
- void StoreStackPointerToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs) OVERRIDE;
+ void StoreStackPointerToThread64(ThreadOffset64 thr_offs) OVERRIDE;
void StoreSpanning(FrameOffset dest, ManagedRegister msrc, FrameOffset in_off,
ManagedRegister mscratch) OVERRIDE;
@@ -394,9 +395,7 @@
// Load routines.
void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread64(ManagedRegister mdest,
- ThreadOffset<kMips64DoublewordSize> src,
- size_t size) OVERRIDE;
+ void LoadFromThread64(ManagedRegister mdest, ThreadOffset64 src, size_t size) OVERRIDE;
void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
@@ -405,16 +404,17 @@
void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread64(ManagedRegister mdest,
- ThreadOffset<kMips64DoublewordSize> offs) OVERRIDE;
+ void LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset64 offs) OVERRIDE;
// Copying routines.
void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE;
- void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<kMips64DoublewordSize> thr_offs,
+ void CopyRawPtrFromThread64(FrameOffset fr_offs,
+ ThreadOffset64 thr_offs,
ManagedRegister mscratch) OVERRIDE;
- void CopyRawPtrToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs, FrameOffset fr_offs,
+ void CopyRawPtrToThread64(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
ManagedRegister mscratch) OVERRIDE;
void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) OVERRIDE;
@@ -471,8 +471,7 @@
// Call to address held at [base+offset].
void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE;
void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE;
- void CallFromThread64(ThreadOffset<kMips64DoublewordSize> offset,
- ManagedRegister mscratch) OVERRIDE;
+ void CallFromThread64(ThreadOffset64 offset, ManagedRegister mscratch) OVERRIDE;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index f931d75..bd5fc40 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -727,6 +727,14 @@
}
+void X86Assembler::comiss(XmmRegister a, const Address& b) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x2F);
+ EmitOperand(a, b);
+}
+
+
void X86Assembler::comisd(XmmRegister a, XmmRegister b) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
@@ -736,6 +744,15 @@
}
+void X86Assembler::comisd(XmmRegister a, const Address& b) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x2F);
+ EmitOperand(a, b);
+}
+
+
void X86Assembler::ucomiss(XmmRegister a, XmmRegister b) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x0F);
@@ -2051,21 +2068,20 @@
movl(Address(ESP, dest), Immediate(imm));
}
-void X86Assembler::StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm,
- ManagedRegister) {
+void X86Assembler::StoreImmediateToThread32(ThreadOffset32 dest, uint32_t imm, ManagedRegister) {
fs()->movl(Address::Absolute(dest), Immediate(imm));
}
-void X86Assembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
+void X86Assembler::StoreStackOffsetToThread32(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
X86ManagedRegister scratch = mscratch.AsX86();
CHECK(scratch.IsCpuRegister());
leal(scratch.AsCpuRegister(), Address(ESP, fr_offs));
fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
}
-void X86Assembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs) {
+void X86Assembler::StoreStackPointerToThread32(ThreadOffset32 thr_offs) {
fs()->movl(Address::Absolute(thr_offs), ESP);
}
@@ -2101,7 +2117,7 @@
}
}
-void X86Assembler::LoadFromThread32(ManagedRegister mdest, ThreadOffset<4> src, size_t size) {
+void X86Assembler::LoadFromThread32(ManagedRegister mdest, ThreadOffset32 src, size_t size) {
X86ManagedRegister dest = mdest.AsX86();
if (dest.IsNoRegister()) {
CHECK_EQ(0u, size);
@@ -2111,7 +2127,7 @@
} else if (dest.IsRegisterPair()) {
CHECK_EQ(8u, size);
fs()->movl(dest.AsRegisterPairLow(), Address::Absolute(src));
- fs()->movl(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset<4>(src.Int32Value()+4)));
+ fs()->movl(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset32(src.Int32Value()+4)));
} else if (dest.IsX87Register()) {
if (size == 4) {
fs()->flds(Address::Absolute(src));
@@ -2152,7 +2168,7 @@
}
void X86Assembler::LoadRawPtrFromThread32(ManagedRegister mdest,
- ThreadOffset<4> offs) {
+ ThreadOffset32 offs) {
X86ManagedRegister dest = mdest.AsX86();
CHECK(dest.IsCpuRegister());
fs()->movl(dest.AsCpuRegister(), Address::Absolute(offs));
@@ -2215,17 +2231,17 @@
}
void X86Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs,
- ThreadOffset<4> thr_offs,
- ManagedRegister mscratch) {
+ ThreadOffset32 thr_offs,
+ ManagedRegister mscratch) {
X86ManagedRegister scratch = mscratch.AsX86();
CHECK(scratch.IsCpuRegister());
fs()->movl(scratch.AsCpuRegister(), Address::Absolute(thr_offs));
Store(fr_offs, scratch, 4);
}
-void X86Assembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
+void X86Assembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
X86ManagedRegister scratch = mscratch.AsX86();
CHECK(scratch.IsCpuRegister());
Load(scratch, fr_offs, 4);
@@ -2371,26 +2387,26 @@
call(Address(scratch, offset));
}
-void X86Assembler::CallFromThread32(ThreadOffset<4> offset, ManagedRegister /*mscratch*/) {
+void X86Assembler::CallFromThread32(ThreadOffset32 offset, ManagedRegister /*mscratch*/) {
fs()->call(Address::Absolute(offset));
}
void X86Assembler::GetCurrentThread(ManagedRegister tr) {
fs()->movl(tr.AsX86().AsCpuRegister(),
- Address::Absolute(Thread::SelfOffset<4>()));
+ Address::Absolute(Thread::SelfOffset<kX86PointerSize>()));
}
void X86Assembler::GetCurrentThread(FrameOffset offset,
ManagedRegister mscratch) {
X86ManagedRegister scratch = mscratch.AsX86();
- fs()->movl(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset<4>()));
+ fs()->movl(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset<kX86PointerSize>()));
movl(Address(ESP, offset), scratch.AsCpuRegister());
}
void X86Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
X86ExceptionSlowPath* slow = new (GetArena()) X86ExceptionSlowPath(stack_adjust);
buffer_.EnqueueSlowPath(slow);
- fs()->cmpl(Address::Absolute(Thread::ExceptionOffset<4>()), Immediate(0));
+ fs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()), Immediate(0));
j(kNotEqual, slow->Entry());
}
@@ -2403,8 +2419,8 @@
__ DecreaseFrameSize(stack_adjust_);
}
// Pass exception as argument in EAX
- __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset<4>()));
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(4, pDeliverException)));
+ __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()));
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pDeliverException)));
// this call should never return
__ int3();
#undef __
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index fa61662..6d519e4 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -195,7 +195,7 @@
return result;
}
- static Address Absolute(ThreadOffset<4> addr) {
+ static Address Absolute(ThreadOffset32 addr) {
return Absolute(addr.Int32Value());
}
@@ -423,7 +423,9 @@
void cvtdq2pd(XmmRegister dst, XmmRegister src);
void comiss(XmmRegister a, XmmRegister b);
+ void comiss(XmmRegister a, const Address& b);
void comisd(XmmRegister a, XmmRegister b);
+ void comisd(XmmRegister a, const Address& b);
void ucomiss(XmmRegister a, XmmRegister b);
void ucomiss(XmmRegister a, const Address& b);
void ucomisd(XmmRegister a, XmmRegister b);
@@ -652,13 +654,13 @@
void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
- void StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm, ManagedRegister scratch)
+ void StoreImmediateToThread32(ThreadOffset32 dest, uint32_t imm, ManagedRegister scratch)
OVERRIDE;
- void StoreStackOffsetToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs,
+ void StoreStackOffsetToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs,
ManagedRegister scratch) OVERRIDE;
- void StoreStackPointerToThread32(ThreadOffset<4> thr_offs) OVERRIDE;
+ void StoreStackPointerToThread32(ThreadOffset32 thr_offs) OVERRIDE;
void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
ManagedRegister scratch) OVERRIDE;
@@ -666,7 +668,7 @@
// Load routines
void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size) OVERRIDE;
+ void LoadFromThread32(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE;
void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
@@ -675,15 +677,15 @@
void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset<4> offs) OVERRIDE;
+ void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE;
// Copying routines
void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
- void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset<4> thr_offs,
+ void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset32 thr_offs,
ManagedRegister scratch) OVERRIDE;
- void CopyRawPtrToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+ void CopyRawPtrToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
OVERRIDE;
void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
@@ -740,7 +742,7 @@
// Call to address held at [base+offset]
void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread32(ThreadOffset<4> offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread32(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index 28043c9..307e034 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -322,18 +322,43 @@
DriverStr(RepeatRI(&x86::X86Assembler::roll, 1U, "roll ${imm}, %{reg}"), "rolli");
}
+TEST_F(AssemblerX86Test, ComissAddr) {
+ GetAssembler()->comiss(x86::XmmRegister(x86::XMM0), x86::Address(x86::EAX, 0));
+ const char* expected = "comiss 0(%EAX), %xmm0\n";
+ DriverStr(expected, "comiss");
+}
+
TEST_F(AssemblerX86Test, UComissAddr) {
GetAssembler()->ucomiss(x86::XmmRegister(x86::XMM0), x86::Address(x86::EAX, 0));
const char* expected = "ucomiss 0(%EAX), %xmm0\n";
DriverStr(expected, "ucomiss");
}
+TEST_F(AssemblerX86Test, ComisdAddr) {
+ GetAssembler()->comisd(x86::XmmRegister(x86::XMM0), x86::Address(x86::EAX, 0));
+ const char* expected = "comisd 0(%EAX), %xmm0\n";
+ DriverStr(expected, "comisd");
+}
+
TEST_F(AssemblerX86Test, UComisdAddr) {
GetAssembler()->ucomisd(x86::XmmRegister(x86::XMM0), x86::Address(x86::EAX, 0));
const char* expected = "ucomisd 0(%EAX), %xmm0\n";
DriverStr(expected, "ucomisd");
}
+TEST_F(AssemblerX86Test, RoundSS) {
+ GetAssembler()->roundss(
+ x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1), x86::Immediate(1));
+ const char* expected = "roundss $1, %xmm1, %xmm0\n";
+ DriverStr(expected, "roundss");
+}
+
+TEST_F(AssemblerX86Test, RoundSD) {
+ GetAssembler()->roundsd(
+ x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1), x86::Immediate(1));
+ const char* expected = "roundsd $1, %xmm1, %xmm0\n";
+ DriverStr(expected, "roundsd");
+}
TEST_F(AssemblerX86Test, CmovlAddress) {
GetAssembler()->cmovl(x86::kEqual, x86::Register(x86::EAX), x86::Address(
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 3046710..977ce9d 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -2683,7 +2683,8 @@
}
}
- DCHECK_EQ(kX86_64PointerSize, kFramePointerSize);
+ static_assert(static_cast<size_t>(kX86_64PointerSize) == kFramePointerSize,
+ "Unexpected frame pointer size.");
movq(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister());
@@ -2803,12 +2804,11 @@
movl(Address(CpuRegister(RSP), dest), Immediate(imm)); // TODO(64) movq?
}
-void X86_64Assembler::StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm,
- ManagedRegister) {
+void X86_64Assembler::StoreImmediateToThread64(ThreadOffset64 dest, uint32_t imm, ManagedRegister) {
gs()->movl(Address::Absolute(dest, true), Immediate(imm)); // TODO(64) movq?
}
-void X86_64Assembler::StoreStackOffsetToThread64(ThreadOffset<8> thr_offs,
+void X86_64Assembler::StoreStackOffsetToThread64(ThreadOffset64 thr_offs,
FrameOffset fr_offs,
ManagedRegister mscratch) {
X86_64ManagedRegister scratch = mscratch.AsX86_64();
@@ -2817,7 +2817,7 @@
gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
}
-void X86_64Assembler::StoreStackPointerToThread64(ThreadOffset<8> thr_offs) {
+void X86_64Assembler::StoreStackPointerToThread64(ThreadOffset64 thr_offs) {
gs()->movq(Address::Absolute(thr_offs, true), CpuRegister(RSP));
}
@@ -2858,7 +2858,7 @@
}
}
-void X86_64Assembler::LoadFromThread64(ManagedRegister mdest, ThreadOffset<8> src, size_t size) {
+void X86_64Assembler::LoadFromThread64(ManagedRegister mdest, ThreadOffset64 src, size_t size) {
X86_64ManagedRegister dest = mdest.AsX86_64();
if (dest.IsNoRegister()) {
CHECK_EQ(0u, size);
@@ -2907,7 +2907,7 @@
movq(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs));
}
-void X86_64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset<8> offs) {
+void X86_64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset64 offs) {
X86_64ManagedRegister dest = mdest.AsX86_64();
CHECK(dest.IsCpuRegister());
gs()->movq(dest.AsCpuRegister(), Address::Absolute(offs, true));
@@ -2969,7 +2969,7 @@
}
void X86_64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs,
- ThreadOffset<8> thr_offs,
+ ThreadOffset64 thr_offs,
ManagedRegister mscratch) {
X86_64ManagedRegister scratch = mscratch.AsX86_64();
CHECK(scratch.IsCpuRegister());
@@ -2977,7 +2977,7 @@
Store(fr_offs, scratch, 8);
}
-void X86_64Assembler::CopyRawPtrToThread64(ThreadOffset<8> thr_offs,
+void X86_64Assembler::CopyRawPtrToThread64(ThreadOffset64 thr_offs,
FrameOffset fr_offs,
ManagedRegister mscratch) {
X86_64ManagedRegister scratch = mscratch.AsX86_64();
@@ -3130,17 +3130,19 @@
call(Address(scratch, offset));
}
-void X86_64Assembler::CallFromThread64(ThreadOffset<8> offset, ManagedRegister /*mscratch*/) {
+void X86_64Assembler::CallFromThread64(ThreadOffset64 offset, ManagedRegister /*mscratch*/) {
gs()->call(Address::Absolute(offset, true));
}
void X86_64Assembler::GetCurrentThread(ManagedRegister tr) {
- gs()->movq(tr.AsX86_64().AsCpuRegister(), Address::Absolute(Thread::SelfOffset<8>(), true));
+ gs()->movq(tr.AsX86_64().AsCpuRegister(),
+ Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
}
void X86_64Assembler::GetCurrentThread(FrameOffset offset, ManagedRegister mscratch) {
X86_64ManagedRegister scratch = mscratch.AsX86_64();
- gs()->movq(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset<8>(), true));
+ gs()->movq(scratch.AsCpuRegister(),
+ Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
movq(Address(CpuRegister(RSP), offset), scratch.AsCpuRegister());
}
@@ -3156,7 +3158,7 @@
void X86_64Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
X86_64ExceptionSlowPath* slow = new (GetArena()) X86_64ExceptionSlowPath(stack_adjust);
buffer_.EnqueueSlowPath(slow);
- gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<8>(), true), Immediate(0));
+ gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true), Immediate(0));
j(kNotEqual, slow->Entry());
}
@@ -3169,8 +3171,10 @@
__ DecreaseFrameSize(stack_adjust_);
}
// Pass exception as argument in RDI
- __ gs()->movq(CpuRegister(RDI), Address::Absolute(Thread::ExceptionOffset<8>(), true));
- __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(8, pDeliverException), true));
+ __ gs()->movq(CpuRegister(RDI),
+ Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true));
+ __ gs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize, pDeliverException), true));
// this call should never return
__ int3();
#undef __
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 361f73c..52e39cf 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -258,7 +258,7 @@
}
// If no_rip is true then the Absolute address isn't RIP relative.
- static Address Absolute(ThreadOffset<8> addr, bool no_rip = false) {
+ static Address Absolute(ThreadOffset64 addr, bool no_rip = false) {
return Absolute(addr.Int32Value(), no_rip);
}
@@ -723,13 +723,13 @@
void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
- void StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm, ManagedRegister scratch)
+ void StoreImmediateToThread64(ThreadOffset64 dest, uint32_t imm, ManagedRegister scratch)
OVERRIDE;
- void StoreStackOffsetToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs,
+ void StoreStackOffsetToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs,
ManagedRegister scratch) OVERRIDE;
- void StoreStackPointerToThread64(ThreadOffset<8> thr_offs) OVERRIDE;
+ void StoreStackPointerToThread64(ThreadOffset64 thr_offs) OVERRIDE;
void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
ManagedRegister scratch) OVERRIDE;
@@ -737,7 +737,7 @@
// Load routines
void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size) OVERRIDE;
+ void LoadFromThread64(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
@@ -746,15 +746,15 @@
void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs) OVERRIDE;
+ void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
// Copying routines
void Move(ManagedRegister dest, ManagedRegister src, size_t size);
- void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<8> thr_offs,
+ void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset64 thr_offs,
ManagedRegister scratch) OVERRIDE;
- void CopyRawPtrToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+ void CopyRawPtrToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
OVERRIDE;
void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
@@ -812,7 +812,7 @@
// Call to address held at [base+offset]
void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread64(ThreadOffset<8> offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread64(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 93351e9..58dd047 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -14,9 +14,10 @@
* limitations under the License.
*/
+#include <regex>
+#include <sstream>
#include <string>
#include <vector>
-#include <sstream>
#include "common_runtime_test.h"
@@ -207,7 +208,7 @@
std::string dex_location = GetScratchDir() + "/Dex2OatSwapTest.jar";
std::string odex_location = GetOdexDir() + "/Dex2OatSwapTest.odex";
- Copy(GetDexSrc1(), dex_location);
+ Copy(GetTestDexFileName(), dex_location);
std::vector<std::string> copy(extra_args);
@@ -226,7 +227,11 @@
CheckResult(expect_use);
}
- void CheckResult(bool expect_use) {
+ virtual std::string GetTestDexFileName() {
+ return GetDexSrc1();
+ }
+
+ virtual void CheckResult(bool expect_use) {
if (kIsTargetBuild) {
CheckTargetResult(expect_use);
} else {
@@ -234,13 +239,13 @@
}
}
- void CheckTargetResult(bool expect_use ATTRIBUTE_UNUSED) {
+ virtual void CheckTargetResult(bool expect_use ATTRIBUTE_UNUSED) {
// TODO: Ignore for now, as we won't capture any output (it goes to the logcat). We may do
// something for variants with file descriptor where we can control the lifetime of
// the swap file and thus take a look at it.
}
- void CheckHostResult(bool expect_use) {
+ virtual void CheckHostResult(bool expect_use) {
if (!kIsTargetBuild) {
if (expect_use) {
EXPECT_NE(output_.find("Large app, accepted running with swap."), std::string::npos)
@@ -253,7 +258,7 @@
}
// Check whether the dex2oat run was really successful.
- void CheckValidity() {
+ virtual void CheckValidity() {
if (kIsTargetBuild) {
CheckTargetValidity();
} else {
@@ -261,14 +266,14 @@
}
}
- void CheckTargetValidity() {
+ virtual void CheckTargetValidity() {
// TODO: Ignore for now, as we won't capture any output (it goes to the logcat). We may do
// something for variants with file descriptor where we can control the lifetime of
// the swap file and thus take a look at it.
}
// On the host, we can get the dex2oat output. Here, look for "dex2oat took."
- void CheckHostValidity() {
+ virtual void CheckHostValidity() {
EXPECT_NE(output_.find("dex2oat took"), std::string::npos) << output_;
}
};
@@ -297,6 +302,127 @@
{ "--swap-dex-size-threshold=0", "--swap-dex-count-threshold=0" });
}
+class Dex2oatSwapUseTest : public Dex2oatSwapTest {
+ protected:
+ void CheckHostResult(bool expect_use) OVERRIDE {
+ if (!kIsTargetBuild) {
+ if (expect_use) {
+ EXPECT_NE(output_.find("Large app, accepted running with swap."), std::string::npos)
+ << output_;
+ } else {
+ EXPECT_EQ(output_.find("Large app, accepted running with swap."), std::string::npos)
+ << output_;
+ }
+ }
+ }
+
+ std::string GetTestDexFileName() OVERRIDE {
+ // Use Statics as it has a handful of functions.
+ return CommonRuntimeTest::GetTestDexFileName("Statics");
+ }
+
+ void GrabResult1() {
+ if (!kIsTargetBuild) {
+ native_alloc_1_ = ParseNativeAlloc();
+ swap_1_ = ParseSwap(false /* expected */);
+ } else {
+ native_alloc_1_ = std::numeric_limits<size_t>::max();
+ swap_1_ = 0;
+ }
+ }
+
+ void GrabResult2() {
+ if (!kIsTargetBuild) {
+ native_alloc_2_ = ParseNativeAlloc();
+ swap_2_ = ParseSwap(true /* expected */);
+ } else {
+ native_alloc_2_ = 0;
+ swap_2_ = std::numeric_limits<size_t>::max();
+ }
+ }
+
+ private:
+ size_t ParseNativeAlloc() {
+ std::regex native_alloc_regex("dex2oat took.*native alloc=[^ ]+ \\(([0-9]+)B\\)");
+ std::smatch native_alloc_match;
+ bool found = std::regex_search(output_, native_alloc_match, native_alloc_regex);
+ if (!found) {
+ EXPECT_TRUE(found);
+ return 0;
+ }
+ if (native_alloc_match.size() != 2U) {
+ EXPECT_EQ(native_alloc_match.size(), 2U);
+ return 0;
+ }
+
+ std::istringstream stream(native_alloc_match[1].str());
+ size_t value;
+ stream >> value;
+
+ return value;
+ }
+
+ size_t ParseSwap(bool expected) {
+ std::regex swap_regex("dex2oat took[^\\n]+swap=[^ ]+ \\(([0-9]+)B\\)");
+ std::smatch swap_match;
+ bool found = std::regex_search(output_, swap_match, swap_regex);
+ if (found != expected) {
+ EXPECT_EQ(expected, found);
+ return 0;
+ }
+
+ if (!found) {
+ return 0;
+ }
+
+ if (swap_match.size() != 2U) {
+ EXPECT_EQ(swap_match.size(), 2U);
+ return 0;
+ }
+
+ std::istringstream stream(swap_match[1].str());
+ size_t value;
+ stream >> value;
+
+ return value;
+ }
+
+ protected:
+ size_t native_alloc_1_;
+ size_t native_alloc_2_;
+
+ size_t swap_1_;
+ size_t swap_2_;
+};
+
+TEST_F(Dex2oatSwapUseTest, CheckSwapUsage) {
+ // The `native_alloc_2_ >= native_alloc_1_` assertion below may not
+ // hold true on some x86 systems when read barriers are enabled;
+ // disable this test while we investigate (b/29259363).
+ TEST_DISABLED_FOR_READ_BARRIER_ON_X86();
+
+ RunTest(false /* use_fd */,
+ false /* expect_use */);
+ GrabResult1();
+ std::string output_1 = output_;
+
+ output_ = "";
+
+ RunTest(false /* use_fd */,
+ true /* expect_use */,
+ { "--swap-dex-size-threshold=0", "--swap-dex-count-threshold=0" });
+ GrabResult2();
+ std::string output_2 = output_;
+
+ if (native_alloc_2_ >= native_alloc_1_ || swap_1_ >= swap_2_) {
+ EXPECT_LT(native_alloc_2_, native_alloc_1_);
+ EXPECT_LT(swap_1_, swap_2_);
+
+ LOG(ERROR) << output_1;
+ LOG(ERROR) << output_2;
+ }
+}
+
class Dex2oatVeryLargeTest : public Dex2oatTest {
protected:
void CheckFilter(CompilerFilter::Filter input ATTRIBUTE_UNUSED,
diff --git a/disassembler/Android.mk b/disassembler/Android.mk
index 6905f88..778fe8e 100644
--- a/disassembler/Android.mk
+++ b/disassembler/Android.mk
@@ -59,11 +59,10 @@
LOCAL_SRC_FILES := $$(LIBART_DISASSEMBLER_SRC_FILES)
ifeq ($$(art_target_or_host),target)
- $(call set-target-local-clang-vars)
+ LOCAL_CLANG := $(ART_TARGET_CLANG)
$(call set-target-local-cflags-vars,$(2))
else # host
LOCAL_CLANG := $(ART_HOST_CLANG)
- LOCAL_LDLIBS := $(ART_HOST_LDLIBS)
LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
LOCAL_ASFLAGS += $(ART_HOST_ASFLAGS)
ifeq ($$(art_ndebug_or_debug),debug)
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index c410cd9..4f0e144 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -329,7 +329,7 @@
}
if (rn.r == 9) {
args << " ; ";
- Thread::DumpThreadOffset<4>(args, offset);
+ Thread::DumpThreadOffset<kArmPointerSize>(args, offset);
}
}
}
@@ -941,17 +941,11 @@
opcode << (op != 0 ? "vsqrt" : "vneg") << (S != 0 ? ".f64" : ".f32");
args << d << ", " << m;
} else if (op5 == 4) {
- opcode << "vcmp" << (S != 0 ? ".f64" : ".f32");
+ opcode << "vcmp" << ((op != 0) ? "e" : "") << (S != 0 ? ".f64" : ".f32");
args << d << ", " << m;
- if (op != 0) {
- args << " (quiet nan)";
- }
} else if (op5 == 5) {
- opcode << "vcmpe" << (S != 0 ? ".f64" : ".f32");
+ opcode << "vcmp" << ((op != 0) ? "e" : "") << (S != 0 ? ".f64" : ".f32");
args << d << ", #0.0";
- if (op != 0) {
- args << " (quiet nan)";
- }
if ((instr & 0x2f) != 0) {
args << " (UNPREDICTABLE)";
}
@@ -1407,7 +1401,7 @@
args << Rt << ", [" << Rn << ", #" << (U != 0u ? "" : "-") << imm12 << "]";
if (Rn.r == TR && is_load) {
args << " ; ";
- Thread::DumpThreadOffset<4>(args, imm12);
+ Thread::DumpThreadOffset<kArmPointerSize>(args, imm12);
} else if (Rn.r == PC) {
T2LitType lit_type[] = {
kT2LitUByte, kT2LitUHalf, kT2LitHexWord, kT2LitInvalid,
diff --git a/disassembler/disassembler_arm64.cc b/disassembler/disassembler_arm64.cc
index a93f7d5..0ef9025 100644
--- a/disassembler/disassembler_arm64.cc
+++ b/disassembler/disassembler_arm64.cc
@@ -102,7 +102,7 @@
if (instr->GetRn() == TR) {
int64_t offset = instr->GetImmLSUnsigned() << instr->GetSizeLS();
std::ostringstream tmp_stream;
- Thread::DumpThreadOffset<8>(tmp_stream, static_cast<uint32_t>(offset));
+ Thread::DumpThreadOffset<kArm64PointerSize>(tmp_stream, static_cast<uint32_t>(offset));
AppendToOutput(" ; %s", tmp_stream.str().c_str());
}
}
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index 769263e..3448878 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -504,9 +504,9 @@
if (rs == 17) {
args << " ; ";
if (is64bit_) {
- Thread::DumpThreadOffset<8>(args, offset);
+ Thread::DumpThreadOffset<kMips64PointerSize>(args, offset);
} else {
- Thread::DumpThreadOffset<4>(args, offset);
+ Thread::DumpThreadOffset<kMipsPointerSize>(args, offset);
}
}
}
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index 7f6a7ba..147e0b1 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -597,7 +597,7 @@
opcode1 = "roundss";
prefix[2] = 0;
has_modrm = true;
- store = true;
+ load = true;
src_reg_file = SSE;
dst_reg_file = SSE;
immediate_bytes = 1;
@@ -606,7 +606,7 @@
opcode1 = "roundsd";
prefix[2] = 0;
has_modrm = true;
- store = true;
+ load = true;
src_reg_file = SSE;
dst_reg_file = SSE;
immediate_bytes = 1;
@@ -1409,11 +1409,11 @@
}
if (prefix[1] == kFs && !supports_rex_) {
args << " ; ";
- Thread::DumpThreadOffset<4>(args, address_bits);
+ Thread::DumpThreadOffset<kX86PointerSize>(args, address_bits);
}
if (prefix[1] == kGs && supports_rex_) {
args << " ; ";
- Thread::DumpThreadOffset<8>(args, address_bits);
+ Thread::DumpThreadOffset<kX86_64PointerSize>(args, address_bits);
}
const char* prefix_str;
switch (prefix[0]) {
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index f5669d7..21a0ca0 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -285,7 +285,7 @@
const backtrace_map_t& boot_map)
SHARED_REQUIRES(Locks::mutator_lock_) {
std::ostream& os = *os_;
- const size_t pointer_size = InstructionSetPointerSize(
+ const PointerSize pointer_size = InstructionSetPointerSize(
Runtime::Current()->GetInstructionSet());
std::string file_name =
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 64349b5..a0def61 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -76,6 +76,7 @@
"kCalleeSaveMethod",
"kRefsOnlySaveMethod",
"kRefsAndArgsSaveMethod",
+ "kSaveEverythingMethod",
};
const char* image_roots_descriptions_[] = {
@@ -1823,7 +1824,7 @@
}
ScopedIndentation indent1(&state->vios_);
DumpFields(os, obj, obj_class);
- const size_t image_pointer_size = state->image_header_.GetPointerSize();
+ const PointerSize image_pointer_size = state->image_header_.GetPointerSize();
if (obj->IsObjectArray()) {
auto* obj_array = obj->AsObjectArray<mirror::Object>();
for (int32_t i = 0, length = obj_array->GetLength(); i < length; i++) {
@@ -1968,7 +1969,7 @@
DCHECK(method != nullptr);
const void* quick_oat_code_begin = GetQuickOatCodeBegin(method);
const void* quick_oat_code_end = GetQuickOatCodeEnd(method);
- const size_t pointer_size = image_header_.GetPointerSize();
+ const PointerSize pointer_size = image_header_.GetPointerSize();
OatQuickMethodHeader* method_header = reinterpret_cast<OatQuickMethodHeader*>(
reinterpret_cast<uintptr_t>(quick_oat_code_begin) - sizeof(OatQuickMethodHeader));
if (method->IsNative()) {
diff --git a/oatdump/oatdump_test.cc b/oatdump/oatdump_test.cc
index 63dc476..db97055 100644
--- a/oatdump/oatdump_test.cc
+++ b/oatdump/oatdump_test.cc
@@ -104,11 +104,13 @@
// We must set --android-root.
int link[2];
if (pipe(link) == -1) {
+ *error_msg = strerror(errno);
return false;
}
const pid_t pid = fork();
if (pid == -1) {
+ *error_msg = strerror(errno);
return false;
}
@@ -116,10 +118,19 @@
dup2(link[1], STDOUT_FILENO);
close(link[0]);
close(link[1]);
- bool res = ::art::Exec(exec_argv, error_msg);
- // Delete the runtime to prevent memory leaks and please valgrind.
- delete Runtime::Current();
- exit(res ? 0 : 1);
+ // change process groups, so we don't get reaped by ProcessManager
+ setpgid(0, 0);
+ // Use execv here rather than art::Exec to avoid blocking on waitpid here.
+ std::vector<char*> argv;
+ for (size_t i = 0; i < exec_argv.size(); ++i) {
+ argv.push_back(const_cast<char*>(exec_argv[i].c_str()));
+ }
+ argv.push_back(nullptr);
+ UNUSED(execv(argv[0], &argv[0]));
+ const std::string command_line(Join(exec_argv, ' '));
+ PLOG(ERROR) << "Failed to execv(" << command_line << ")";
+ // _exit to avoid atexit handlers in child.
+ _exit(1);
} else {
close(link[1]);
static const size_t kLineMax = 256;
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 569c5e9..9432384 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -489,13 +489,13 @@
};
void PatchOat::PatchArtMethods(const ImageHeader* image_header) {
- const size_t pointer_size = InstructionSetPointerSize(isa_);
+ const PointerSize pointer_size = InstructionSetPointerSize(isa_);
PatchOatArtMethodVisitor visitor(this);
image_header->VisitPackedArtMethods(&visitor, heap_->Begin(), pointer_size);
}
void PatchOat::PatchImTables(const ImageHeader* image_header) {
- const size_t pointer_size = InstructionSetPointerSize(isa_);
+ const PointerSize pointer_size = InstructionSetPointerSize(isa_);
// We can safely walk target image since the conflict tables are independent.
image_header->VisitPackedImTables(
[this](ArtMethod* method) {
@@ -506,7 +506,7 @@
}
void PatchOat::PatchImtConflictTables(const ImageHeader* image_header) {
- const size_t pointer_size = InstructionSetPointerSize(isa_);
+ const PointerSize pointer_size = InstructionSetPointerSize(isa_);
// We can safely walk target image since the conflict tables are independent.
image_header->VisitPackedImtConflictTables(
[this](ArtMethod* method) {
@@ -584,7 +584,7 @@
void PatchOat::PatchDexFileArrays(mirror::ObjectArray<mirror::Object>* img_roots) {
auto* dex_caches = down_cast<mirror::ObjectArray<mirror::DexCache>*>(
img_roots->Get(ImageHeader::kDexCaches));
- const size_t pointer_size = InstructionSetPointerSize(isa_);
+ const PointerSize pointer_size = InstructionSetPointerSize(isa_);
for (size_t i = 0, count = dex_caches->GetLength(); i < count; ++i) {
auto* orig_dex_cache = dex_caches->GetWithoutChecks(i);
auto* copy_dex_cache = RelocatedCopyOf(orig_dex_cache);
@@ -705,7 +705,7 @@
PatchOat::PatchVisitor visitor(this, copy);
object->VisitReferences<kVerifyNone>(visitor, visitor);
if (object->IsClass<kVerifyNone>()) {
- const size_t pointer_size = InstructionSetPointerSize(isa_);
+ const PointerSize pointer_size = InstructionSetPointerSize(isa_);
mirror::Class* klass = object->AsClass();
mirror::Class* copy_klass = down_cast<mirror::Class*>(copy);
RelocatedPointerVisitor native_visitor(this);
@@ -736,7 +736,7 @@
}
void PatchOat::FixupMethod(ArtMethod* object, ArtMethod* copy) {
- const size_t pointer_size = InstructionSetPointerSize(isa_);
+ const PointerSize pointer_size = InstructionSetPointerSize(isa_);
copy->CopyFrom(object, pointer_size);
// Just update the entry points if it looks like we should.
// TODO: sanity check all the pointers' values
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index 61ec695..64efea9d 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -18,6 +18,7 @@
#define ART_PATCHOAT_PATCHOAT_H_
#include "arch/instruction_set.h"
+#include "base/enums.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "elf_file.h"
@@ -168,7 +169,7 @@
}
auto ret = reinterpret_cast<uintptr_t>(obj) + delta_;
// Trim off high bits in case negative relocation with 64 bit patchoat.
- if (InstructionSetPointerSize(isa_) == sizeof(uint32_t)) {
+ if (Is32BitISA()) {
ret = static_cast<uintptr_t>(static_cast<uint32_t>(ret));
}
return reinterpret_cast<T*>(ret);
@@ -181,12 +182,16 @@
}
T ret = obj + delta_;
// Trim off high bits in case negative relocation with 64 bit patchoat.
- if (InstructionSetPointerSize(isa_) == 4) {
+ if (Is32BitISA()) {
ret = static_cast<T>(static_cast<uint32_t>(ret));
}
return ret;
}
+ bool Is32BitISA() const {
+ return InstructionSetPointerSize(isa_) == PointerSize::k32;
+ }
+
// Walks through the old image and patches the mmap'd copy of it to the new offset. It does not
// change the heap.
class PatchVisitor {
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 99c4a82..9c813e2 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -342,6 +342,7 @@
LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := \
arch/instruction_set.h \
base/allocator.h \
+ base/enums.h \
base/mutex.h \
debugger.h \
base/unix_file/fd_file.h \
@@ -497,14 +498,13 @@
# Clang usage
ifeq ($$(art_target_or_host),target)
- $$(eval $$(call set-target-local-clang-vars))
+ $$(eval LOCAL_CLANG := $$(ART_TARGET_CLANG))
$$(eval $$(call set-target-local-cflags-vars,$(2)))
- LOCAL_CLANG_ASFLAGS_arm += -no-integrated-as
+ LOCAL_ASFLAGS_arm += -no-integrated-as
LOCAL_CFLAGS_$(DEX2OAT_TARGET_ARCH) += -DART_DEFAULT_INSTRUCTION_SET_FEATURES="$(LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES)"
LOCAL_CFLAGS_$(2ND_DEX2OAT_TARGET_ARCH) += -DART_DEFAULT_INSTRUCTION_SET_FEATURES="$(2ND_LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES)"
else # host
LOCAL_CLANG := $$(ART_HOST_CLANG)
- LOCAL_LDLIBS := $$(ART_HOST_LDLIBS)
LOCAL_LDLIBS += -ldl -lpthread
ifeq ($$(HOST_OS),linux)
LOCAL_LDLIBS += -lrt
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index ee31c58..6d80eb6 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -69,7 +69,9 @@
#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
static constexpr size_t kFrameSizeRefsAndArgsCalleeSave = FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE;
#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
-}
+static constexpr size_t kFrameSizeSaveEverythingCalleeSave = FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE;
+#undef FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE
+} // namespace arm
namespace arm64 {
#include "arch/arm64/asm_support_arm64.h"
@@ -79,7 +81,9 @@
#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
static constexpr size_t kFrameSizeRefsAndArgsCalleeSave = FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE;
#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
-}
+static constexpr size_t kFrameSizeSaveEverythingCalleeSave = FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE;
+#undef FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE
+} // namespace arm64
namespace mips {
#include "arch/mips/asm_support_mips.h"
@@ -89,7 +93,9 @@
#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
static constexpr size_t kFrameSizeRefsAndArgsCalleeSave = FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE;
#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
-}
+static constexpr size_t kFrameSizeSaveEverythingCalleeSave = FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE;
+#undef FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE
+} // namespace mips
namespace mips64 {
#include "arch/mips64/asm_support_mips64.h"
@@ -99,7 +105,9 @@
#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
static constexpr size_t kFrameSizeRefsAndArgsCalleeSave = FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE;
#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
-}
+static constexpr size_t kFrameSizeSaveEverythingCalleeSave = FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE;
+#undef FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE
+} // namespace mips64
namespace x86 {
#include "arch/x86/asm_support_x86.h"
@@ -109,7 +117,9 @@
#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
static constexpr size_t kFrameSizeRefsAndArgsCalleeSave = FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE;
#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
-}
+static constexpr size_t kFrameSizeSaveEverythingCalleeSave = FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE;
+#undef FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE
+} // namespace x86
namespace x86_64 {
#include "arch/x86_64/asm_support_x86_64.h"
@@ -119,13 +129,18 @@
#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
static constexpr size_t kFrameSizeRefsAndArgsCalleeSave = FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE;
#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
-}
+static constexpr size_t kFrameSizeSaveEverythingCalleeSave = FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE;
+#undef FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE
+} // namespace x86_64
// Check architecture specific constants are sound.
TEST_F(ArchTest, ARM) {
CheckFrameSize(InstructionSet::kArm, Runtime::kSaveAll, arm::kFrameSizeSaveAllCalleeSave);
CheckFrameSize(InstructionSet::kArm, Runtime::kRefsOnly, arm::kFrameSizeRefsOnlyCalleeSave);
CheckFrameSize(InstructionSet::kArm, Runtime::kRefsAndArgs, arm::kFrameSizeRefsAndArgsCalleeSave);
+ CheckFrameSize(InstructionSet::kArm,
+ Runtime::kSaveEverything,
+ arm::kFrameSizeSaveEverythingCalleeSave);
}
@@ -134,33 +149,51 @@
CheckFrameSize(InstructionSet::kArm64, Runtime::kRefsOnly, arm64::kFrameSizeRefsOnlyCalleeSave);
CheckFrameSize(InstructionSet::kArm64, Runtime::kRefsAndArgs,
arm64::kFrameSizeRefsAndArgsCalleeSave);
+ CheckFrameSize(InstructionSet::kArm64,
+ Runtime::kSaveEverything,
+ arm64::kFrameSizeSaveEverythingCalleeSave);
}
TEST_F(ArchTest, MIPS) {
CheckFrameSize(InstructionSet::kMips, Runtime::kSaveAll, mips::kFrameSizeSaveAllCalleeSave);
CheckFrameSize(InstructionSet::kMips, Runtime::kRefsOnly, mips::kFrameSizeRefsOnlyCalleeSave);
- CheckFrameSize(InstructionSet::kMips, Runtime::kRefsAndArgs,
+ CheckFrameSize(InstructionSet::kMips,
+ Runtime::kRefsAndArgs,
mips::kFrameSizeRefsAndArgsCalleeSave);
+ CheckFrameSize(InstructionSet::kMips,
+ Runtime::kSaveEverything,
+ mips::kFrameSizeSaveEverythingCalleeSave);
}
TEST_F(ArchTest, MIPS64) {
CheckFrameSize(InstructionSet::kMips64, Runtime::kSaveAll, mips64::kFrameSizeSaveAllCalleeSave);
CheckFrameSize(InstructionSet::kMips64, Runtime::kRefsOnly, mips64::kFrameSizeRefsOnlyCalleeSave);
- CheckFrameSize(InstructionSet::kMips64, Runtime::kRefsAndArgs,
+ CheckFrameSize(InstructionSet::kMips64,
+ Runtime::kRefsAndArgs,
mips64::kFrameSizeRefsAndArgsCalleeSave);
+ CheckFrameSize(InstructionSet::kMips64,
+ Runtime::kSaveEverything,
+ mips64::kFrameSizeSaveEverythingCalleeSave);
}
TEST_F(ArchTest, X86) {
CheckFrameSize(InstructionSet::kX86, Runtime::kSaveAll, x86::kFrameSizeSaveAllCalleeSave);
CheckFrameSize(InstructionSet::kX86, Runtime::kRefsOnly, x86::kFrameSizeRefsOnlyCalleeSave);
CheckFrameSize(InstructionSet::kX86, Runtime::kRefsAndArgs, x86::kFrameSizeRefsAndArgsCalleeSave);
+ CheckFrameSize(InstructionSet::kX86,
+ Runtime::kSaveEverything,
+ x86::kFrameSizeSaveEverythingCalleeSave);
}
TEST_F(ArchTest, X86_64) {
CheckFrameSize(InstructionSet::kX86_64, Runtime::kSaveAll, x86_64::kFrameSizeSaveAllCalleeSave);
CheckFrameSize(InstructionSet::kX86_64, Runtime::kRefsOnly, x86_64::kFrameSizeRefsOnlyCalleeSave);
- CheckFrameSize(InstructionSet::kX86_64, Runtime::kRefsAndArgs,
+ CheckFrameSize(InstructionSet::kX86_64,
+ Runtime::kRefsAndArgs,
x86_64::kFrameSizeRefsAndArgsCalleeSave);
+ CheckFrameSize(InstructionSet::kX86_64,
+ Runtime::kSaveEverything,
+ x86_64::kFrameSizeSaveEverythingCalleeSave);
}
} // namespace art
diff --git a/runtime/arch/arm/asm_support_arm.h b/runtime/arch/arm/asm_support_arm.h
index 1fa566b..67f6f7a 100644
--- a/runtime/arch/arm/asm_support_arm.h
+++ b/runtime/arch/arm/asm_support_arm.h
@@ -22,6 +22,7 @@
#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 112
#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 32
#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 112
+#define FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE 192
// Flag for enabling R4 optimization in arm runtime
// #define ARM_R4_SUSPEND_FLAG
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 966587d..0e2a672 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -152,8 +152,6 @@
qpoints->pReadBarrierMarkReg27 = nullptr;
qpoints->pReadBarrierMarkReg28 = nullptr;
qpoints->pReadBarrierMarkReg29 = nullptr;
- qpoints->pReadBarrierMarkReg30 = nullptr;
- qpoints->pReadBarrierMarkReg31 = nullptr;
qpoints->pReadBarrierSlow = artReadBarrierSlow;
qpoints->pReadBarrierForRootSlow = artReadBarrierForRootSlow;
}
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
index d105c67..befdd48 100644
--- a/runtime/arch/arm/fault_handler_arm.cc
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -20,6 +20,7 @@
#include <sys/ucontext.h>
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/macros.h"
#include "base/hex_dump.h"
#include "globals.h"
@@ -144,7 +145,8 @@
void* context) {
// These are the instructions to check for. The first one is the ldr r0,[r9,#xxx]
// where xxx is the offset of the suspend trigger.
- uint32_t checkinst1 = 0xf8d90000 + Thread::ThreadSuspendTriggerOffset<4>().Int32Value();
+ uint32_t checkinst1 = 0xf8d90000
+ + Thread::ThreadSuspendTriggerOffset<PointerSize::k32>().Int32Value();
uint16_t checkinst2 = 0x6800;
struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 34d3158..42418ad 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -168,6 +168,65 @@
.cfi_adjust_cfa_offset -40
.endm
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kSaveEverything)
+ */
+.macro SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME rTemp
+ push {r0-r12, lr} @ 14 words of callee saves and args.
+ .cfi_adjust_cfa_offset 56
+ .cfi_rel_offset r0, 0
+ .cfi_rel_offset r1, 4
+ .cfi_rel_offset r2, 8
+ .cfi_rel_offset r3, 12
+ .cfi_rel_offset r4, 16
+ .cfi_rel_offset r5, 20
+ .cfi_rel_offset r6, 24
+ .cfi_rel_offset r7, 28
+ .cfi_rel_offset r8, 32
+ .cfi_rel_offset r9, 36
+ .cfi_rel_offset r10, 40
+ .cfi_rel_offset r11, 44
+ .cfi_rel_offset ip, 48
+ .cfi_rel_offset lr, 52
+ vpush {s0-s31} @ 32 words of float args.
+ .cfi_adjust_cfa_offset 128
+ sub sp, #8 @ 2 words of space, alignment padding and Method*
+ .cfi_adjust_cfa_offset 8
+ RUNTIME_CURRENT1 \rTemp @ Load Runtime::Current into rTemp.
+ @ Load kSaveEverything Method* to rTemp.
+ ldr \rTemp, [\rTemp, #RUNTIME_SAVE_EVERYTHING_CALLEE_SAVE_FRAME_OFFSET]
+ str \rTemp, [sp, #0] @ Store kSaveEverything Method* to the bottom of the stack.
+ str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
+
+ // Ugly compile-time check, but we only have the preprocessor.
+#if (FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE != 56 + 128 + 8)
+#error "SAVE_EVERYTHING_CALLEE_SAVE_FRAME(ARM) size not as expected."
+#endif
+.endm
+
+.macro RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+ add sp, #8 @ rewind sp
+ .cfi_adjust_cfa_offset -8
+ vpop {s0-s31}
+ .cfi_adjust_cfa_offset -128
+ pop {r0-r12, lr} @ 14 words of callee saves
+ .cfi_restore r0
+ .cfi_restore r1
+ .cfi_restore r2
+ .cfi_restore r3
+ .cfi_restore r5
+ .cfi_restore r6
+ .cfi_restore r7
+ .cfi_restore r8
+ .cfi_restore r9
+ .cfi_restore r10
+ .cfi_restore r11
+ .cfi_restore r12
+ .cfi_restore lr
+ .cfi_adjust_cfa_offset -56
+.endm
+
.macro RETURN_IF_RESULT_IS_ZERO
cbnz r0, 1f @ result non-zero branch over
bx lr @ return
@@ -520,7 +579,7 @@
ldr r2, [r9, #THREAD_ID_OFFSET]
ldrex r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
mov r3, r1
- and r3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED @ zero the read barrier bits
+ and r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED @ zero the gc bits
cbnz r3, .Lnot_unlocked @ already thin locked
@ unlocked case - r1: original lock word that's zero except for the read barrier bits.
orr r2, r1, r2 @ r2 holds thread id with count of 0 with preserved read barrier bits
@@ -536,9 +595,9 @@
cbnz r2, .Lslow_lock @ lock word and self thread id's match -> recursive lock
@ else contention, go to slow path
mov r3, r1 @ copy the lock word to check count overflow.
- and r3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED @ zero the read barrier bits.
+ and r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED @ zero the gc bits.
add r2, r3, #LOCK_WORD_THIN_LOCK_COUNT_ONE @ increment count in lock word placing in r2 to check overflow
- lsr r3, r2, LOCK_WORD_READ_BARRIER_STATE_SHIFT @ if either of the upper two bits (28-29) are set, we overflowed.
+ lsr r3, r2, #LOCK_WORD_GC_STATE_SHIFT @ if the first gc state bit is set, we overflowed.
cbnz r3, .Lslow_lock @ if we overflow the count go slow path
add r2, r1, #LOCK_WORD_THIN_LOCK_COUNT_ONE @ increment count for real
strex r3, r2, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] @ strex necessary for read barrier bits
@@ -581,17 +640,17 @@
cbnz r2, .Lslow_unlock @ if either of the top two bits are set, go slow path
ldr r2, [r9, #THREAD_ID_OFFSET]
mov r3, r1 @ copy lock word to check thread id equality
- and r3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED @ zero the read barrier bits
+ and r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED @ zero the gc bits
eor r3, r3, r2 @ lock_word.ThreadId() ^ self->ThreadId()
uxth r3, r3 @ zero top 16 bits
cbnz r3, .Lslow_unlock @ do lock word and self thread id's match?
mov r3, r1 @ copy lock word to detect transition to unlocked
- and r3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED @ zero the read barrier bits
+ and r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED @ zero the gc bits
cmp r3, #LOCK_WORD_THIN_LOCK_COUNT_ONE
bpl .Lrecursive_thin_unlock
@ transition to unlocked
mov r3, r1
- and r3, #LOCK_WORD_READ_BARRIER_STATE_MASK @ r3: zero except for the preserved read barrier bits
+ and r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED @ r3: zero except for the preserved gc bits
dmb ish @ full (LoadStore|StoreStore) memory barrier
#ifndef USE_READ_BARRIER
str r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
@@ -1212,17 +1271,18 @@
.extern artTestSuspendFromCode
ENTRY art_quick_test_suspend
#ifdef ARM_R4_SUSPEND_FLAG
- ldrh r0, [rSELF, #THREAD_FLAGS_OFFSET]
- mov rSUSPEND, #SUSPEND_CHECK_INTERVAL @ reset rSUSPEND to SUSPEND_CHECK_INTERVAL
- cbnz r0, 1f @ check Thread::Current()->suspend_count_ == 0
- bx lr @ return if suspend_count_ == 0
+ ldrh rSUSPEND, [rSELF, #THREAD_FLAGS_OFFSET]
+ cbnz rSUSPEND, 1f @ check Thread::Current()->suspend_count_ == 0
+ mov rSUSPEND, #SUSPEND_CHECK_INTERVAL @ reset rSUSPEND to SUSPEND_CHECK_INTERVAL
+ bx lr @ return if suspend_count_ == 0
1:
+ mov rSUSPEND, #SUSPEND_CHECK_INTERVAL @ reset rSUSPEND to SUSPEND_CHECK_INTERVAL
#endif
+ SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME r0 @ save everything for GC stack crawl
mov r0, rSELF
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1 @ save callee saves for GC stack crawl
- @ TODO: save FPRs to enable access in the debugger?
- bl artTestSuspendFromCode @ (Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+ bl artTestSuspendFromCode @ (Thread*)
+ RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+ bx lr
END art_quick_test_suspend
ENTRY art_quick_implicit_suspend
@@ -1772,6 +1832,20 @@
*/
.macro READ_BARRIER_MARK_REG name, reg
ENTRY \name
+ // Null check so that we can load the lock word.
+ cmp \reg, #0
+ beq .Lret_rb_\name
+ // Check lock word for mark bit, if marked return.
+ push {r0}
+ ldr r0, [\reg, MIRROR_OBJECT_LOCK_WORD_OFFSET]
+ and r0, #LOCK_WORD_MARK_BIT_MASK_SHIFTED
+ cbz r0, .Lslow_rb_\name
+ // Restore LR and return.
+ pop {r0}
+ bx lr
+
+.Lslow_rb_\name:
+ pop {r0}
push {r0-r4, r9, r12, lr} @ save return address and core caller-save registers
.cfi_adjust_cfa_offset 32
.cfi_rel_offset r0, 0
@@ -1831,6 +1905,8 @@
.endif
.endif
pop {r0-r4, r9, r12, pc} @ restore caller-save registers and return
+.Lret_rb_\name:
+ bx lr
END \name
.endm
diff --git a/runtime/arch/arm/quick_method_frame_info_arm.h b/runtime/arch/arm/quick_method_frame_info_arm.h
index 5580ee4..c474d2e 100644
--- a/runtime/arch/arm/quick_method_frame_info_arm.h
+++ b/runtime/arch/arm/quick_method_frame_info_arm.h
@@ -34,6 +34,9 @@
(1 << art::arm::R1) | (1 << art::arm::R2) | (1 << art::arm::R3);
static constexpr uint32_t kArmCalleeSaveAllSpills =
(1 << art::arm::R4) | (1 << art::arm::R9);
+static constexpr uint32_t kArmCalleeSaveEverythingSpills =
+ (1 << art::arm::R0) | (1 << art::arm::R1) | (1 << art::arm::R2) | (1 << art::arm::R3) |
+ (1 << art::arm::R4) | (1 << art::arm::R9) | (1 << art::arm::R12);
static constexpr uint32_t kArmCalleeSaveFpAlwaysSpills = 0;
static constexpr uint32_t kArmCalleeSaveFpRefSpills = 0;
@@ -47,23 +50,27 @@
(1 << art::arm::S20) | (1 << art::arm::S21) | (1 << art::arm::S22) | (1 << art::arm::S23) |
(1 << art::arm::S24) | (1 << art::arm::S25) | (1 << art::arm::S26) | (1 << art::arm::S27) |
(1 << art::arm::S28) | (1 << art::arm::S29) | (1 << art::arm::S30) | (1 << art::arm::S31);
+static constexpr uint32_t kArmCalleeSaveFpEverythingSpills =
+ kArmCalleeSaveFpArgSpills | kArmCalleeSaveFpAllSpills;
constexpr uint32_t ArmCalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
return kArmCalleeSaveAlwaysSpills | kArmCalleeSaveRefSpills |
(type == Runtime::kRefsAndArgs ? kArmCalleeSaveArgSpills : 0) |
- (type == Runtime::kSaveAll ? kArmCalleeSaveAllSpills : 0);
+ (type == Runtime::kSaveAll ? kArmCalleeSaveAllSpills : 0) |
+ (type == Runtime::kSaveEverything ? kArmCalleeSaveEverythingSpills : 0);
}
constexpr uint32_t ArmCalleeSaveFpSpills(Runtime::CalleeSaveType type) {
return kArmCalleeSaveFpAlwaysSpills | kArmCalleeSaveFpRefSpills |
(type == Runtime::kRefsAndArgs ? kArmCalleeSaveFpArgSpills: 0) |
- (type == Runtime::kSaveAll ? kArmCalleeSaveFpAllSpills : 0);
+ (type == Runtime::kSaveAll ? kArmCalleeSaveFpAllSpills : 0) |
+ (type == Runtime::kSaveEverything ? kArmCalleeSaveFpEverythingSpills : 0);
}
constexpr uint32_t ArmCalleeSaveFrameSize(Runtime::CalleeSaveType type) {
return RoundUp((POPCOUNT(ArmCalleeSaveCoreSpills(type)) /* gprs */ +
POPCOUNT(ArmCalleeSaveFpSpills(type)) /* fprs */ +
- 1 /* Method* */) * kArmPointerSize, kStackAlignment);
+ 1 /* Method* */) * static_cast<size_t>(kArmPointerSize), kStackAlignment);
}
constexpr QuickMethodFrameInfo ArmCalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
@@ -75,17 +82,17 @@
constexpr size_t ArmCalleeSaveFpr1Offset(Runtime::CalleeSaveType type) {
return ArmCalleeSaveFrameSize(type) -
(POPCOUNT(ArmCalleeSaveCoreSpills(type)) +
- POPCOUNT(ArmCalleeSaveFpSpills(type))) * kArmPointerSize;
+ POPCOUNT(ArmCalleeSaveFpSpills(type))) * static_cast<size_t>(kArmPointerSize);
}
constexpr size_t ArmCalleeSaveGpr1Offset(Runtime::CalleeSaveType type) {
return ArmCalleeSaveFrameSize(type) -
- POPCOUNT(ArmCalleeSaveCoreSpills(type)) * kArmPointerSize;
+ POPCOUNT(ArmCalleeSaveCoreSpills(type)) * static_cast<size_t>(kArmPointerSize);
}
constexpr size_t ArmCalleeSaveLrOffset(Runtime::CalleeSaveType type) {
return ArmCalleeSaveFrameSize(type) -
- POPCOUNT(ArmCalleeSaveCoreSpills(type) & (-(1 << LR))) * kArmPointerSize;
+ POPCOUNT(ArmCalleeSaveCoreSpills(type) & (-(1 << LR))) * static_cast<size_t>(kArmPointerSize);
}
} // namespace arm
diff --git a/runtime/arch/arm/thread_arm.cc b/runtime/arch/arm/thread_arm.cc
index 2a551a8..ff4f81b 100644
--- a/runtime/arch/arm/thread_arm.cc
+++ b/runtime/arch/arm/thread_arm.cc
@@ -17,15 +17,16 @@
#include "thread.h"
#include "asm_support_arm.h"
+#include "base/enums.h"
#include "base/logging.h"
namespace art {
void Thread::InitCpu() {
- CHECK_EQ(THREAD_FLAGS_OFFSET, ThreadFlagsOffset<4>().Int32Value());
- CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<4>().Int32Value());
- CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<4>().Int32Value());
- CHECK_EQ(THREAD_ID_OFFSET, ThinLockIdOffset<4>().Int32Value());
+ CHECK_EQ(THREAD_FLAGS_OFFSET, ThreadFlagsOffset<PointerSize::k32>().Int32Value());
+ CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<PointerSize::k32>().Int32Value());
+ CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<PointerSize::k32>().Int32Value());
+ CHECK_EQ(THREAD_ID_OFFSET, ThinLockIdOffset<PointerSize::k32>().Int32Value());
}
void Thread::CleanupCpu() {
diff --git a/runtime/arch/arm64/asm_support_arm64.h b/runtime/arch/arm64/asm_support_arm64.h
index 989ecc6..68d12e9 100644
--- a/runtime/arch/arm64/asm_support_arm64.h
+++ b/runtime/arch/arm64/asm_support_arm64.h
@@ -22,5 +22,6 @@
#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 176
#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 96
#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 224
+#define FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE 512
#endif // ART_RUNTIME_ARCH_ARM64_ASM_SUPPORT_ARM64_H_
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 2e5f5ad..cc5bf29 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -124,6 +124,15 @@
// Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
+ // ARM64 is the architecture with the largest number of core
+ // registers (32) that supports the read barrier configuration.
+ // Because registers 30 (LR) and 31 (SP/XZR) cannot be used to pass
+ // arguments, only define ReadBarrierMarkRegX entrypoints for the
+ // first 30 registers. This limitation is not a problem on other
+ // supported architectures (ARM, x86 and x86-64) either, as they
+ // have less core registers (resp. 16, 8 and 16). (We may have to
+ // revise that design choice if read barrier support is added for
+ // MIPS and/or MIPS64.)
qpoints->pReadBarrierMarkReg00 = art_quick_read_barrier_mark_reg00;
qpoints->pReadBarrierMarkReg01 = art_quick_read_barrier_mark_reg01;
qpoints->pReadBarrierMarkReg02 = art_quick_read_barrier_mark_reg02;
@@ -154,8 +163,6 @@
qpoints->pReadBarrierMarkReg27 = art_quick_read_barrier_mark_reg27;
qpoints->pReadBarrierMarkReg28 = art_quick_read_barrier_mark_reg28;
qpoints->pReadBarrierMarkReg29 = art_quick_read_barrier_mark_reg29;
- qpoints->pReadBarrierMarkReg30 = nullptr; // Cannot use register 30 (LR) to pass arguments.
- qpoints->pReadBarrierMarkReg31 = nullptr; // Cannot use register 31 (SP/XZR) to pass arguments.
qpoints->pReadBarrierSlow = artReadBarrierSlow;
qpoints->pReadBarrierForRootSlow = artReadBarrierForRootSlow;
};
diff --git a/runtime/arch/arm64/fault_handler_arm64.cc b/runtime/arch/arm64/fault_handler_arm64.cc
index f591fcc..6724d6d 100644
--- a/runtime/arch/arm64/fault_handler_arm64.cc
+++ b/runtime/arch/arm64/fault_handler_arm64.cc
@@ -20,6 +20,7 @@
#include <sys/ucontext.h>
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/macros.h"
#include "globals.h"
#include "base/logging.h"
@@ -117,7 +118,8 @@
void* context) {
// These are the instructions to check for. The first one is the ldr x0,[r18,#xxx]
// where xxx is the offset of the suspend trigger.
- uint32_t checkinst1 = 0xf9400240 | (Thread::ThreadSuspendTriggerOffset<8>().Int32Value() << 7);
+ uint32_t checkinst1 = 0xf9400240 |
+ (Thread::ThreadSuspendTriggerOffset<PointerSize::k64>().Int32Value() << 7);
uint32_t checkinst2 = 0xf9400000;
struct ucontext *uc = reinterpret_cast<struct ucontext *>(context);
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 6173ae7..415bb71 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -316,6 +316,204 @@
.cfi_adjust_cfa_offset -224
.endm
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kSaveEverything)
+ */
+.macro SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+ sub sp, sp, #512
+ .cfi_adjust_cfa_offset 512
+
+ // Ugly compile-time check, but we only have the preprocessor.
+#if (FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE != 512)
+#error "SAVE_EVERYTHING_CALLEE_SAVE_FRAME(ARM64) size not as expected."
+#endif
+
+ // Save FP registers.
+ stp d0, d1, [sp, #8]
+ stp d2, d3, [sp, #24]
+ stp d4, d5, [sp, #40]
+ stp d6, d7, [sp, #56]
+ stp d8, d9, [sp, #72]
+ stp d10, d11, [sp, #88]
+ stp d12, d13, [sp, #104]
+ stp d14, d15, [sp, #120]
+ stp d16, d17, [sp, #136]
+ stp d18, d19, [sp, #152]
+ stp d20, d21, [sp, #168]
+ stp d22, d23, [sp, #184]
+ stp d24, d25, [sp, #200]
+ stp d26, d27, [sp, #216]
+ stp d28, d29, [sp, #232]
+ stp d30, d31, [sp, #248]
+
+ // Save core registers.
+ str x0, [sp, #264]
+ .cfi_rel_offset x0, 264
+
+ stp x1, x2, [sp, #272]
+ .cfi_rel_offset x1, 272
+ .cfi_rel_offset x2, 280
+
+ stp x3, x4, [sp, #288]
+ .cfi_rel_offset x3, 288
+ .cfi_rel_offset x4, 296
+
+ stp x5, x6, [sp, #304]
+ .cfi_rel_offset x5, 304
+ .cfi_rel_offset x6, 312
+
+ stp x7, x8, [sp, #320]
+ .cfi_rel_offset x7, 320
+ .cfi_rel_offset x8, 328
+
+ stp x9, x10, [sp, #336]
+ .cfi_rel_offset x9, 336
+ .cfi_rel_offset x10, 344
+
+ stp x11, x12, [sp, #352]
+ .cfi_rel_offset x11, 352
+ .cfi_rel_offset x12, 360
+
+ stp x13, x14, [sp, #368]
+ .cfi_rel_offset x13, 368
+ .cfi_rel_offset x14, 376
+
+ stp x15, x16, [sp, #384]
+ .cfi_rel_offset x15, 384
+ .cfi_rel_offset x16, 392
+
+ stp x17, x18, [sp, #400]
+ .cfi_rel_offset x17, 400
+ .cfi_rel_offset x18, 408
+
+ stp x19, x20, [sp, #416]
+ .cfi_rel_offset x19, 416
+ .cfi_rel_offset x20, 424
+
+ stp x21, x22, [sp, #432]
+ .cfi_rel_offset x21, 432
+ .cfi_rel_offset x22, 440
+
+ stp x23, x24, [sp, #448]
+ .cfi_rel_offset x23, 448
+ .cfi_rel_offset x24, 456
+
+ stp x25, x26, [sp, #464]
+ .cfi_rel_offset x25, 464
+ .cfi_rel_offset x26, 472
+
+ stp x27, x28, [sp, #480]
+ .cfi_rel_offset x27, 480
+ .cfi_rel_offset x28, 488
+
+ stp x29, xLR, [sp, #496]
+ .cfi_rel_offset x29, 496
+ .cfi_rel_offset x30, 504
+
+ adrp xIP0, :got:_ZN3art7Runtime9instance_E
+ ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
+
+ ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) .
+
+ // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kSaveEverything] .
+ // Loads appropriate callee-save-method.
+ ldr xIP0, [xIP0, RUNTIME_SAVE_EVERYTHING_CALLEE_SAVE_FRAME_OFFSET ]
+
+ // Store ArtMethod* Runtime::callee_save_methods_[kSaveEverything].
+ str xIP0, [sp]
+ // Place sp in Thread::Current()->top_quick_frame.
+ mov xIP0, sp
+ str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
+.endm
+
+.macro RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+ // Restore FP registers.
+ ldp d0, d1, [sp, #8]
+ ldp d2, d3, [sp, #24]
+ ldp d4, d5, [sp, #40]
+ ldp d6, d7, [sp, #56]
+ ldp d8, d9, [sp, #72]
+ ldp d10, d11, [sp, #88]
+ ldp d12, d13, [sp, #104]
+ ldp d14, d15, [sp, #120]
+ ldp d16, d17, [sp, #136]
+ ldp d18, d19, [sp, #152]
+ ldp d20, d21, [sp, #168]
+ ldp d22, d23, [sp, #184]
+ ldp d24, d25, [sp, #200]
+ ldp d26, d27, [sp, #216]
+ ldp d28, d29, [sp, #232]
+ ldp d30, d31, [sp, #248]
+
+ // Restore core registers.
+ ldr x0, [sp, #264]
+ .cfi_restore x0
+
+ ldp x1, x2, [sp, #272]
+ .cfi_restore x1
+ .cfi_restore x2
+
+ ldp x3, x4, [sp, #288]
+ .cfi_restore x3
+ .cfi_restore x4
+
+ ldp x5, x6, [sp, #304]
+ .cfi_restore x5
+ .cfi_restore x6
+
+ ldp x7, x8, [sp, #320]
+ .cfi_restore x7
+ .cfi_restore x8
+
+ ldp x9, x10, [sp, #336]
+ .cfi_restore x9
+ .cfi_restore x10
+
+ ldp x11, x12, [sp, #352]
+ .cfi_restore x11
+ .cfi_restore x12
+
+ ldp x13, x14, [sp, #368]
+ .cfi_restore x13
+ .cfi_restore x14
+
+ ldp x15, x16, [sp, #384]
+ .cfi_restore x15
+ .cfi_restore x16
+
+ ldp x17, x18, [sp, #400]
+ .cfi_restore x17
+ .cfi_restore x18
+
+ ldp x19, x20, [sp, #416]
+ .cfi_restore x19
+ .cfi_restore x20
+
+ ldp x21, x22, [sp, #432]
+ .cfi_restore x21
+ .cfi_restore x22
+
+ ldp x23, x24, [sp, #448]
+ .cfi_restore x23
+ .cfi_restore x24
+
+ ldp x25, x26, [sp, #464]
+ .cfi_restore x25
+ .cfi_restore x26
+
+ ldp x27, x28, [sp, #480]
+ .cfi_restore x27
+ .cfi_restore x28
+
+ ldp x29, xLR, [sp, #496]
+ .cfi_restore x29
+ .cfi_restore x30
+
+ add sp, sp, #512
+ .cfi_adjust_cfa_offset -512
+.endm
+
.macro RETURN_IF_RESULT_IS_ZERO
cbnz x0, 1f // result non-zero branch over
ret // return
@@ -1090,7 +1288,7 @@
ldr w2, [xSELF, #THREAD_ID_OFFSET] // TODO: Can the thread ID really change during the loop?
ldxr w1, [x4]
mov x3, x1
- and w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED // zero the read barrier bits
+ and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits
cbnz w3, .Lnot_unlocked // already thin locked
// unlocked case - x1: original lock word that's zero except for the read barrier bits.
orr x2, x1, x2 // x2 holds thread id with count of 0 with preserved read barrier bits
@@ -1106,9 +1304,9 @@
cbnz w2, .Lslow_lock // lock word and self thread id's match -> recursive lock
// else contention, go to slow path
mov x3, x1 // copy the lock word to check count overflow.
- and w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED // zero the read barrier bits.
+ and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits.
add w2, w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE // increment count in lock word placing in w2 to check overflow
- lsr w3, w2, LOCK_WORD_READ_BARRIER_STATE_SHIFT // if either of the upper two bits (28-29) are set, we overflowed.
+ lsr w3, w2, #LOCK_WORD_GC_STATE_SHIFT // if the first gc state bit is set, we overflowed.
cbnz w3, .Lslow_lock // if we overflow the count go slow path
add w2, w1, #LOCK_WORD_THIN_LOCK_COUNT_ONE // increment count for real
stxr w3, w2, [x4]
@@ -1152,17 +1350,17 @@
cbnz w2, .Lslow_unlock // if either of the top two bits are set, go slow path
ldr w2, [xSELF, #THREAD_ID_OFFSET]
mov x3, x1 // copy lock word to check thread id equality
- and w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED // zero the read barrier bits
+ and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits
eor w3, w3, w2 // lock_word.ThreadId() ^ self->ThreadId()
uxth w3, w3 // zero top 16 bits
cbnz w3, .Lslow_unlock // do lock word and self thread id's match?
mov x3, x1 // copy lock word to detect transition to unlocked
- and w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED // zero the read barrier bits
+ and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits
cmp w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE
bpl .Lrecursive_thin_unlock
// transition to unlocked
mov x3, x1
- and w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK // w3: zero except for the preserved read barrier bits
+ and w3, w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED // w3: zero except for the preserved read barrier bits
dmb ish // full (LoadStore|StoreStore) memory barrier
#ifndef USE_READ_BARRIER
str w3, [x4]
@@ -1276,8 +1474,18 @@
* name mismatch between instructions. This macro uses the lower 32b of register when possible.
* TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
*/
-.macro READ_BARRIER xDest, wDest, xObj, offset
+.macro READ_BARRIER xDest, wDest, xObj, xTemp, wTemp, offset, number
#ifdef USE_READ_BARRIER
+#ifdef USE_BAKER_READ_BARRIER
+ ldr \wTemp, [\xObj, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+ tbnz \wTemp, #LOCK_WORD_READ_BARRIER_STATE_SHIFT, .Lrb_slowpath\number
+ // False dependency to avoid needing load/load fence.
+ add \xObj, \xObj, \xTemp, lsr #32
+ ldr \wDest, [\xObj, #\offset] // Heap reference = 32b. This also zero-extends to \xDest.
+ UNPOISON_HEAP_REF \wDest
+ b .Lrb_exit\number
+#endif
+.Lrb_slowpath\number:
// Store registers used in art_quick_aput_obj (x0-x4, LR), stack is 16B aligned.
stp x0, x1, [sp, #-48]!
.cfi_adjust_cfa_offset 48
@@ -1311,6 +1519,7 @@
.cfi_restore x30
add sp, sp, #48
.cfi_adjust_cfa_offset -48
+.Lrb_exit\number:
#else
ldr \wDest, [\xObj, #\offset] // Heap reference = 32b. This also zero-extends to \xDest.
UNPOISON_HEAP_REF \wDest
@@ -1349,12 +1558,12 @@
#endif
ENTRY art_quick_aput_obj
cbz x2, .Ldo_aput_null
- READ_BARRIER x3, w3, x0, MIRROR_OBJECT_CLASS_OFFSET // Heap reference = 32b
- // This also zero-extends to x3
- READ_BARRIER x4, w4, x2, MIRROR_OBJECT_CLASS_OFFSET // Heap reference = 32b
- // This also zero-extends to x4
- READ_BARRIER x3, w3, x3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET // Heap reference = 32b
- // This also zero-extends to x3
+ READ_BARRIER x3, w3, x0, x3, w3, MIRROR_OBJECT_CLASS_OFFSET, 0 // Heap reference = 32b
+ // This also zero-extends to x3
+ READ_BARRIER x3, w3, x3, x4, w4, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, 1 // Heap reference = 32b
+ // This also zero-extends to x3
+ READ_BARRIER x4, w4, x2, x4, w4, MIRROR_OBJECT_CLASS_OFFSET, 2 // Heap reference = 32b
+ // This also zero-extends to x4
cmp w3, w4 // value's type == array's component type - trivial assignability
bne .Lcheck_assignability
.Ldo_aput:
@@ -1780,12 +1989,20 @@
ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array
// Load the class (x2)
ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
- // Read barrier for class load.
+
+ // Most common case: GC is not marking.
ldr w3, [xSELF, #THREAD_IS_GC_MARKING_OFFSET]
- cbnz x3, .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path
-.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit:
+ cbnz x3, .Lart_quick_alloc_object_region_tlab_marking
+.Lart_quick_alloc_object_region_tlab_do_allocation:
ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path
-.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path:
+.Lart_quick_alloc_object_region_tlab_marking:
+ // GC is marking, check the lock word of the class for the mark bit.
+ // If the class is null, go slow path. The check is required to read the lock word.
+ cbz w2, .Lart_quick_alloc_object_region_tlab_slow_path
+ // Class is not null, check mark bit in lock word.
+ ldr w3, [x2, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+ // If the bit is not zero, do the allocation.
+ tbnz w3, #LOCK_WORD_MARK_BIT_SHIFT, .Lart_quick_alloc_object_region_tlab_do_allocation
// The read barrier slow path. Mark
// the class.
stp x0, x1, [sp, #-32]! // Save registers (x0, x1, lr).
@@ -1796,7 +2013,7 @@
ldp x0, x1, [sp, #0] // Restore registers.
ldr xLR, [sp, #16]
add sp, sp, #32
- b .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
+ b .Lart_quick_alloc_object_region_tlab_do_allocation
.Lart_quick_alloc_object_region_tlab_slow_path:
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // Save callee saves in case of GC.
mov x2, xSELF // Pass Thread::Current.
@@ -1810,14 +2027,11 @@
*/
.extern artTestSuspendFromCode
ENTRY art_quick_test_suspend
- ldrh w0, [xSELF, #THREAD_FLAGS_OFFSET] // get xSELF->state_and_flags.as_struct.flags
- cbnz w0, .Lneed_suspend // check flags == 0
- ret // return if flags == 0
-.Lneed_suspend:
+ SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME // save callee saves for stack crawl
mov x0, xSELF
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves for stack crawl
bl artTestSuspendFromCode // (Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+ RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+ ret
END art_quick_test_suspend
ENTRY art_quick_implicit_suspend
@@ -2254,6 +2468,8 @@
*/
.macro READ_BARRIER_MARK_REG name, wreg, xreg
ENTRY \name
+ // Reference is null, no work to do at all.
+ cbz \wreg, .Lret_rb_\name
/*
* Allocate 46 stack slots * 8 = 368 bytes:
* - 20 slots for core registers X0-X19
@@ -2261,6 +2477,11 @@
* - 1 slot for return address register XLR
* - 1 padding slot for 16-byte stack alignment
*/
+ // Use wIP0 as temp and check the mark bit of the reference. wIP0 is not used by the compiler.
+ ldr wIP0, [\xreg, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+ tbz wIP0, #LOCK_WORD_MARK_BIT_SHIFT, .Lslow_path_rb_\name
+ ret
+.Lslow_path_rb_\name:
// Save all potentially live caller-save core registers.
stp x0, x1, [sp, #-368]!
.cfi_adjust_cfa_offset 368
@@ -2349,6 +2570,7 @@
.cfi_restore x30
add sp, sp, #368
.cfi_adjust_cfa_offset -368
+.Lret_rb_\name:
ret
END \name
.endm
diff --git a/runtime/arch/arm64/quick_method_frame_info_arm64.h b/runtime/arch/arm64/quick_method_frame_info_arm64.h
index b525309..188e46e 100644
--- a/runtime/arch/arm64/quick_method_frame_info_arm64.h
+++ b/runtime/arch/arm64/quick_method_frame_info_arm64.h
@@ -29,7 +29,7 @@
static constexpr uint32_t kArm64CalleeSaveAlwaysSpills =
// Note: ArtMethod::GetReturnPcOffsetInBytes() rely on the assumption that
// LR is always saved on the top of the frame for all targets.
- // That is, lr = *(sp + framesize - pointsize).
+ // That is, lr = *(sp + framesize - pointer_size).
(1 << art::arm64::LR);
// Callee saved registers
static constexpr uint32_t kArm64CalleeSaveRefSpills =
@@ -44,6 +44,14 @@
(1 << art::arm64::X7);
static constexpr uint32_t kArm64CalleeSaveAllSpills =
(1 << art::arm64::X19);
+static constexpr uint32_t kArm64CalleeSaveEverythingSpills =
+ (1 << art::arm64::X0) | (1 << art::arm64::X1) | (1 << art::arm64::X2) |
+ (1 << art::arm64::X3) | (1 << art::arm64::X4) | (1 << art::arm64::X5) |
+ (1 << art::arm64::X6) | (1 << art::arm64::X7) | (1 << art::arm64::X8) |
+ (1 << art::arm64::X9) | (1 << art::arm64::X10) | (1 << art::arm64::X11) |
+ (1 << art::arm64::X12) | (1 << art::arm64::X13) | (1 << art::arm64::X14) |
+ (1 << art::arm64::X15) | (1 << art::arm64::X16) | (1 << art::arm64::X17) |
+ (1 << art::arm64::X18) | (1 << art::arm64::X19);
static constexpr uint32_t kArm64CalleeSaveFpAlwaysSpills = 0;
static constexpr uint32_t kArm64CalleeSaveFpRefSpills = 0;
@@ -55,23 +63,37 @@
(1 << art::arm64::D8) | (1 << art::arm64::D9) | (1 << art::arm64::D10) |
(1 << art::arm64::D11) | (1 << art::arm64::D12) | (1 << art::arm64::D13) |
(1 << art::arm64::D14) | (1 << art::arm64::D15);
+static constexpr uint32_t kArm64CalleeSaveFpEverythingSpills =
+ (1 << art::arm64::D0) | (1 << art::arm64::D1) | (1 << art::arm64::D2) |
+ (1 << art::arm64::D3) | (1 << art::arm64::D4) | (1 << art::arm64::D5) |
+ (1 << art::arm64::D6) | (1 << art::arm64::D7) | (1 << art::arm64::D8) |
+ (1 << art::arm64::D9) | (1 << art::arm64::D10) | (1 << art::arm64::D11) |
+ (1 << art::arm64::D12) | (1 << art::arm64::D13) | (1 << art::arm64::D14) |
+ (1 << art::arm64::D15) | (1 << art::arm64::D16) | (1 << art::arm64::D17) |
+ (1 << art::arm64::D18) | (1 << art::arm64::D19) | (1 << art::arm64::D20) |
+ (1 << art::arm64::D21) | (1 << art::arm64::D22) | (1 << art::arm64::D23) |
+ (1 << art::arm64::D24) | (1 << art::arm64::D25) | (1 << art::arm64::D26) |
+ (1 << art::arm64::D27) | (1 << art::arm64::D28) | (1 << art::arm64::D29) |
+ (1 << art::arm64::D30) | (1 << art::arm64::D31);
constexpr uint32_t Arm64CalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
return kArm64CalleeSaveAlwaysSpills | kArm64CalleeSaveRefSpills |
(type == Runtime::kRefsAndArgs ? kArm64CalleeSaveArgSpills : 0) |
- (type == Runtime::kSaveAll ? kArm64CalleeSaveAllSpills : 0);
+ (type == Runtime::kSaveAll ? kArm64CalleeSaveAllSpills : 0) |
+ (type == Runtime::kSaveEverything ? kArm64CalleeSaveEverythingSpills : 0);
}
constexpr uint32_t Arm64CalleeSaveFpSpills(Runtime::CalleeSaveType type) {
return kArm64CalleeSaveFpAlwaysSpills | kArm64CalleeSaveFpRefSpills |
(type == Runtime::kRefsAndArgs ? kArm64CalleeSaveFpArgSpills: 0) |
- (type == Runtime::kSaveAll ? kArm64CalleeSaveFpAllSpills : 0);
+ (type == Runtime::kSaveAll ? kArm64CalleeSaveFpAllSpills : 0) |
+ (type == Runtime::kSaveEverything ? kArm64CalleeSaveFpEverythingSpills : 0);
}
constexpr uint32_t Arm64CalleeSaveFrameSize(Runtime::CalleeSaveType type) {
return RoundUp((POPCOUNT(Arm64CalleeSaveCoreSpills(type)) /* gprs */ +
POPCOUNT(Arm64CalleeSaveFpSpills(type)) /* fprs */ +
- 1 /* Method* */) * kArm64PointerSize, kStackAlignment);
+ 1 /* Method* */) * static_cast<size_t>(kArm64PointerSize), kStackAlignment);
}
constexpr QuickMethodFrameInfo Arm64CalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
@@ -83,17 +105,18 @@
constexpr size_t Arm64CalleeSaveFpr1Offset(Runtime::CalleeSaveType type) {
return Arm64CalleeSaveFrameSize(type) -
(POPCOUNT(Arm64CalleeSaveCoreSpills(type)) +
- POPCOUNT(Arm64CalleeSaveFpSpills(type))) * kArm64PointerSize;
+ POPCOUNT(Arm64CalleeSaveFpSpills(type))) * static_cast<size_t>(kArm64PointerSize);
}
constexpr size_t Arm64CalleeSaveGpr1Offset(Runtime::CalleeSaveType type) {
return Arm64CalleeSaveFrameSize(type) -
- POPCOUNT(Arm64CalleeSaveCoreSpills(type)) * kArm64PointerSize;
+ POPCOUNT(Arm64CalleeSaveCoreSpills(type)) * static_cast<size_t>(kArm64PointerSize);
}
constexpr size_t Arm64CalleeSaveLrOffset(Runtime::CalleeSaveType type) {
return Arm64CalleeSaveFrameSize(type) -
- POPCOUNT(Arm64CalleeSaveCoreSpills(type) & (-(1 << LR))) * kArm64PointerSize;
+ POPCOUNT(Arm64CalleeSaveCoreSpills(type) & (-(1 << LR))) *
+ static_cast<size_t>(kArm64PointerSize);
}
} // namespace arm64
diff --git a/runtime/arch/arm64/thread_arm64.cc b/runtime/arch/arm64/thread_arm64.cc
index 564dced..3483b70 100644
--- a/runtime/arch/arm64/thread_arm64.cc
+++ b/runtime/arch/arm64/thread_arm64.cc
@@ -17,15 +17,16 @@
#include "thread.h"
#include "asm_support_arm64.h"
+#include "base/enums.h"
#include "base/logging.h"
namespace art {
void Thread::InitCpu() {
- CHECK_EQ(THREAD_FLAGS_OFFSET, ThreadFlagsOffset<8>().Int32Value());
- CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<8>().Int32Value());
- CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<8>().Int32Value());
- CHECK_EQ(THREAD_ID_OFFSET, ThinLockIdOffset<8>().Int32Value());
+ CHECK_EQ(THREAD_FLAGS_OFFSET, ThreadFlagsOffset<PointerSize::k64>().Int32Value());
+ CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<PointerSize::k64>().Int32Value());
+ CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<PointerSize::k64>().Int32Value());
+ CHECK_EQ(THREAD_ID_OFFSET, ThinLockIdOffset<PointerSize::k64>().Int32Value());
}
void Thread::CleanupCpu() {
diff --git a/runtime/arch/instruction_set.h b/runtime/arch/instruction_set.h
index ff9c0b3..917acc9 100644
--- a/runtime/arch/instruction_set.h
+++ b/runtime/arch/instruction_set.h
@@ -20,6 +20,7 @@
#include <iosfwd>
#include <string>
+#include "base/enums.h"
#include "base/logging.h" // Logging is required for FATAL in the helper functions.
namespace art {
@@ -53,12 +54,12 @@
#endif
// Architecture-specific pointer sizes
-static constexpr size_t kArmPointerSize = 4;
-static constexpr size_t kArm64PointerSize = 8;
-static constexpr size_t kMipsPointerSize = 4;
-static constexpr size_t kMips64PointerSize = 8;
-static constexpr size_t kX86PointerSize = 4;
-static constexpr size_t kX86_64PointerSize = 8;
+static constexpr PointerSize kArmPointerSize = PointerSize::k32;
+static constexpr PointerSize kArm64PointerSize = PointerSize::k64;
+static constexpr PointerSize kMipsPointerSize = PointerSize::k32;
+static constexpr PointerSize kMips64PointerSize = PointerSize::k64;
+static constexpr PointerSize kX86PointerSize = PointerSize::k32;
+static constexpr PointerSize kX86_64PointerSize = PointerSize::k64;
// ARM instruction alignment. ARM processors require code to be 4-byte aligned,
// but ARM ELF requires 8..
@@ -82,7 +83,7 @@
InstructionSet GetInstructionSetFromELF(uint16_t e_machine, uint32_t e_flags);
-static inline size_t GetInstructionSetPointerSize(InstructionSet isa) {
+static inline PointerSize GetInstructionSetPointerSize(InstructionSet isa) {
switch (isa) {
case kArm:
// Fall-through.
@@ -147,8 +148,8 @@
}
}
-static inline size_t InstructionSetPointerSize(InstructionSet isa) {
- return Is64BitInstructionSet(isa) ? 8U : 4U;
+static inline PointerSize InstructionSetPointerSize(InstructionSet isa) {
+ return Is64BitInstructionSet(isa) ? PointerSize::k64 : PointerSize::k32;
}
static inline size_t GetBytesPerGprSpillLocation(InstructionSet isa) {
diff --git a/runtime/arch/instruction_set_test.cc b/runtime/arch/instruction_set_test.cc
index 2f3cf18..5dfc4b4 100644
--- a/runtime/arch/instruction_set_test.cc
+++ b/runtime/arch/instruction_set_test.cc
@@ -18,6 +18,7 @@
#include <gtest/gtest.h>
+#include "base/enums.h"
#include "base/stringprintf.h"
namespace art {
@@ -49,7 +50,7 @@
}
TEST(InstructionSetTest, PointerSize) {
- EXPECT_EQ(sizeof(void*), GetInstructionSetPointerSize(kRuntimeISA));
+ EXPECT_EQ(kRuntimePointerSize, GetInstructionSetPointerSize(kRuntimeISA));
}
} // namespace art
diff --git a/runtime/arch/mips/asm_support_mips.h b/runtime/arch/mips/asm_support_mips.h
index 453056d..2ef45f5 100644
--- a/runtime/arch/mips/asm_support_mips.h
+++ b/runtime/arch/mips/asm_support_mips.h
@@ -22,5 +22,6 @@
#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 96
#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 48
#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 80
+#define FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE 256
#endif // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 22efd19..09f8849 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -376,12 +376,6 @@
qpoints->pReadBarrierMarkReg29 = nullptr;
static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg29),
"Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg30 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg30),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg31 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg31),
- "Non-direct C stub marked direct.");
qpoints->pReadBarrierSlow = artReadBarrierSlow;
static_assert(IsDirectEntrypoint(kQuickReadBarrierSlow), "Direct C stub not marked direct.");
qpoints->pReadBarrierForRootSlow = artReadBarrierForRootSlow;
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index c1b8044..b926bdf 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -277,6 +277,197 @@
.endm
/*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kSaveEverything).
+ * Callee-save: $at, $v0-$v1, $a0-$a3, $t0-$t7, $s0-$s7, $t8-$t9, $gp, $fp $ra, $f0-$f31;
+ * 28(GPR)+ 32(FPR) + 3 words for padding and 1 word for Method*
+ * Clobbers $t0 and $t1.
+ * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
+ * Reserves FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE + ARG_SLOT_SIZE bytes on the stack.
+ * This macro sets up $gp; entrypoints using it should start with ENTRY_NO_GP.
+ */
+.macro SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+ addiu $sp, $sp, -256
+ .cfi_adjust_cfa_offset 256
+
+ // Ugly compile-time check, but we only have the preprocessor.
+#if (FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE != 256)
+#error "SAVE_EVERYTHING_CALLEE_SAVE_FRAME(MIPS) size not as expected."
+#endif
+
+ sw $ra, 252($sp)
+ .cfi_rel_offset 31, 252
+ sw $fp, 248($sp)
+ .cfi_rel_offset 30, 248
+ sw $gp, 244($sp)
+ .cfi_rel_offset 28, 244
+ sw $t9, 240($sp)
+ .cfi_rel_offset 25, 240
+ sw $t8, 236($sp)
+ .cfi_rel_offset 24, 236
+ sw $s7, 232($sp)
+ .cfi_rel_offset 23, 232
+ sw $s6, 228($sp)
+ .cfi_rel_offset 22, 228
+ sw $s5, 224($sp)
+ .cfi_rel_offset 21, 224
+ sw $s4, 220($sp)
+ .cfi_rel_offset 20, 220
+ sw $s3, 216($sp)
+ .cfi_rel_offset 19, 216
+ sw $s2, 212($sp)
+ .cfi_rel_offset 18, 212
+ sw $s1, 208($sp)
+ .cfi_rel_offset 17, 208
+ sw $s0, 204($sp)
+ .cfi_rel_offset 16, 204
+ sw $t7, 200($sp)
+ .cfi_rel_offset 15, 200
+ sw $t6, 196($sp)
+ .cfi_rel_offset 14, 196
+ sw $t5, 192($sp)
+ .cfi_rel_offset 13, 192
+ sw $t4, 188($sp)
+ .cfi_rel_offset 12, 188
+ sw $t3, 184($sp)
+ .cfi_rel_offset 11, 184
+ sw $t2, 180($sp)
+ .cfi_rel_offset 10, 180
+ sw $t1, 176($sp)
+ .cfi_rel_offset 9, 176
+ sw $t0, 172($sp)
+ .cfi_rel_offset 8, 172
+ sw $a3, 168($sp)
+ .cfi_rel_offset 7, 168
+ sw $a2, 164($sp)
+ .cfi_rel_offset 6, 164
+ sw $a1, 160($sp)
+ .cfi_rel_offset 5, 160
+ sw $a0, 156($sp)
+ .cfi_rel_offset 4, 156
+ sw $v1, 152($sp)
+ .cfi_rel_offset 3, 152
+ sw $v0, 148($sp)
+ .cfi_rel_offset 2, 148
+
+ // Set up $gp, clobbering $ra and using the branch delay slot for a useful instruction.
+ bal 1f
+ sw $at, 144($sp)
+ .cfi_rel_offset 1, 144
+1:
+ .cpload $ra
+
+ SDu $f30, $f31, 136, $sp, $t1
+ SDu $f28, $f29, 128, $sp, $t1
+ SDu $f26, $f27, 120, $sp, $t1
+ SDu $f24, $f25, 112, $sp, $t1
+ SDu $f22, $f23, 104, $sp, $t1
+ SDu $f20, $f21, 96, $sp, $t1
+ SDu $f18, $f19, 88, $sp, $t1
+ SDu $f16, $f17, 80, $sp, $t1
+ SDu $f14, $f15, 72, $sp, $t1
+ SDu $f12, $f13, 64, $sp, $t1
+ SDu $f10, $f11, 56, $sp, $t1
+ SDu $f8, $f9, 48, $sp, $t1
+ SDu $f6, $f7, 40, $sp, $t1
+ SDu $f4, $f5, 32, $sp, $t1
+ SDu $f2, $f3, 24, $sp, $t1
+ SDu $f0, $f1, 16, $sp, $t1
+
+ # 3 words padding and 1 word for holding Method*
+
+ lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
+ lw $t0, 0($t0)
+ lw $t0, RUNTIME_SAVE_EVERYTHING_CALLEE_SAVE_FRAME_OFFSET($t0)
+ sw $t0, 0($sp) # Place Method* at bottom of stack.
+ sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
+ addiu $sp, $sp, -ARG_SLOT_SIZE # reserve argument slots on the stack
+ .cfi_adjust_cfa_offset ARG_SLOT_SIZE
+.endm
+
+.macro RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+ addiu $sp, $sp, ARG_SLOT_SIZE # remove argument slots on the stack
+ .cfi_adjust_cfa_offset -ARG_SLOT_SIZE
+
+ LDu $f30, $f31, 136, $sp, $t1
+ LDu $f28, $f29, 128, $sp, $t1
+ LDu $f26, $f27, 120, $sp, $t1
+ LDu $f24, $f25, 112, $sp, $t1
+ LDu $f22, $f23, 104, $sp, $t1
+ LDu $f20, $f21, 96, $sp, $t1
+ LDu $f18, $f19, 88, $sp, $t1
+ LDu $f16, $f17, 80, $sp, $t1
+ LDu $f14, $f15, 72, $sp, $t1
+ LDu $f12, $f13, 64, $sp, $t1
+ LDu $f10, $f11, 56, $sp, $t1
+ LDu $f8, $f9, 48, $sp, $t1
+ LDu $f6, $f7, 40, $sp, $t1
+ LDu $f4, $f5, 32, $sp, $t1
+ LDu $f2, $f3, 24, $sp, $t1
+ LDu $f0, $f1, 16, $sp, $t1
+
+ lw $ra, 252($sp)
+ .cfi_restore 31
+ lw $fp, 248($sp)
+ .cfi_restore 30
+ lw $gp, 244($sp)
+ .cfi_restore 28
+ lw $t9, 240($sp)
+ .cfi_restore 25
+ lw $t8, 236($sp)
+ .cfi_restore 24
+ lw $s7, 232($sp)
+ .cfi_restore 23
+ lw $s6, 228($sp)
+ .cfi_restore 22
+ lw $s5, 224($sp)
+ .cfi_restore 21
+ lw $s4, 220($sp)
+ .cfi_restore 20
+ lw $s3, 216($sp)
+ .cfi_restore 19
+ lw $s2, 212($sp)
+ .cfi_restore 18
+ lw $s1, 208($sp)
+ .cfi_restore 17
+ lw $s0, 204($sp)
+ .cfi_restore 16
+ lw $t7, 200($sp)
+ .cfi_restore 15
+ lw $t6, 196($sp)
+ .cfi_restore 14
+ lw $t5, 192($sp)
+ .cfi_restore 13
+ lw $t4, 188($sp)
+ .cfi_restore 12
+ lw $t3, 184($sp)
+ .cfi_restore 11
+ lw $t2, 180($sp)
+ .cfi_restore 10
+ lw $t1, 176($sp)
+ .cfi_restore 9
+ lw $t0, 172($sp)
+ .cfi_restore 8
+ lw $a3, 168($sp)
+ .cfi_restore 7
+ lw $a2, 164($sp)
+ .cfi_restore 6
+ lw $a1, 160($sp)
+ .cfi_restore 5
+ lw $a0, 156($sp)
+ .cfi_restore 4
+ lw $v1, 152($sp)
+ .cfi_restore 3
+ lw $v0, 148($sp)
+ .cfi_restore 2
+ lw $at, 144($sp)
+ .cfi_restore 1
+
+ addiu $sp, $sp, 256 # pop frame
+ .cfi_adjust_cfa_offset -256
+.endm
+
+ /*
* Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending
* exception is Thread::Current()->exception_
*/
@@ -1652,18 +1843,20 @@
* Called by managed code when the value in rSUSPEND has been decremented to 0.
*/
.extern artTestSuspendFromCode
-ENTRY art_quick_test_suspend
- lh $a0, THREAD_FLAGS_OFFSET(rSELF)
- bnez $a0, 1f
+ENTRY_NO_GP art_quick_test_suspend
+ lh rSUSPEND, THREAD_FLAGS_OFFSET(rSELF)
+ bnez rSUSPEND, 1f
addiu rSUSPEND, $zero, SUSPEND_CHECK_INTERVAL # reset rSUSPEND to SUSPEND_CHECK_INTERVAL
jalr $zero, $ra
nop
1:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves for stack crawl
+ SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME # save everything for stack crawl
la $t9, artTestSuspendFromCode
- jalr $t9 # (Thread*)
+ jalr $t9 # (Thread*)
move $a0, rSELF
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+ RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+ jalr $zero, $ra
+ nop
END art_quick_test_suspend
/*
diff --git a/runtime/arch/mips/quick_method_frame_info_mips.h b/runtime/arch/mips/quick_method_frame_info_mips.h
index f5d13c2..170513d 100644
--- a/runtime/arch/mips/quick_method_frame_info_mips.h
+++ b/runtime/arch/mips/quick_method_frame_info_mips.h
@@ -34,6 +34,12 @@
(1 << art::mips::A1) | (1 << art::mips::A2) | (1 << art::mips::A3);
static constexpr uint32_t kMipsCalleeSaveAllSpills =
(1 << art::mips::S0) | (1 << art::mips::S1);
+static constexpr uint32_t kMipsCalleeSaveEverythingSpills =
+ (1 << art::mips::AT) | (1 << art::mips::V0) | (1 << art::mips::V1) |
+ (1 << art::mips::A0) | (1 << art::mips::A1) | (1 << art::mips::A2) | (1 << art::mips::A3) |
+ (1 << art::mips::T0) | (1 << art::mips::T1) | (1 << art::mips::T2) | (1 << art::mips::T3) |
+ (1 << art::mips::T4) | (1 << art::mips::T5) | (1 << art::mips::T6) | (1 << art::mips::T7) |
+ (1 << art::mips::S0) | (1 << art::mips::S1) | (1 << art::mips::T8) | (1 << art::mips::T9);
static constexpr uint32_t kMipsCalleeSaveFpAlwaysSpills = 0;
static constexpr uint32_t kMipsCalleeSaveFpRefSpills = 0;
@@ -43,23 +49,34 @@
(1 << art::mips::F20) | (1 << art::mips::F21) | (1 << art::mips::F22) | (1 << art::mips::F23) |
(1 << art::mips::F24) | (1 << art::mips::F25) | (1 << art::mips::F26) | (1 << art::mips::F27) |
(1 << art::mips::F28) | (1 << art::mips::F29) | (1 << art::mips::F30) | (1 << art::mips::F31);
+static constexpr uint32_t kMipsCalleeSaveFpEverythingSpills =
+ (1 << art::mips::F0) | (1 << art::mips::F1) | (1 << art::mips::F2) | (1 << art::mips::F3) |
+ (1 << art::mips::F4) | (1 << art::mips::F5) | (1 << art::mips::F6) | (1 << art::mips::F7) |
+ (1 << art::mips::F8) | (1 << art::mips::F9) | (1 << art::mips::F10) | (1 << art::mips::F11) |
+ (1 << art::mips::F12) | (1 << art::mips::F13) | (1 << art::mips::F14) | (1 << art::mips::F15) |
+ (1 << art::mips::F16) | (1 << art::mips::F17) | (1 << art::mips::F18) | (1 << art::mips::F19) |
+ (1 << art::mips::F20) | (1 << art::mips::F21) | (1 << art::mips::F22) | (1 << art::mips::F23) |
+ (1 << art::mips::F24) | (1 << art::mips::F25) | (1 << art::mips::F26) | (1 << art::mips::F27) |
+ (1 << art::mips::F28) | (1 << art::mips::F29) | (1 << art::mips::F30) | (1 << art::mips::F31);
constexpr uint32_t MipsCalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
return kMipsCalleeSaveAlwaysSpills | kMipsCalleeSaveRefSpills |
(type == Runtime::kRefsAndArgs ? kMipsCalleeSaveArgSpills : 0) |
- (type == Runtime::kSaveAll ? kMipsCalleeSaveAllSpills : 0);
+ (type == Runtime::kSaveAll ? kMipsCalleeSaveAllSpills : 0) |
+ (type == Runtime::kSaveEverything ? kMipsCalleeSaveEverythingSpills : 0);
}
constexpr uint32_t MipsCalleeSaveFPSpills(Runtime::CalleeSaveType type) {
return kMipsCalleeSaveFpAlwaysSpills | kMipsCalleeSaveFpRefSpills |
(type == Runtime::kRefsAndArgs ? kMipsCalleeSaveFpArgSpills : 0) |
- (type == Runtime::kSaveAll ? kMipsCalleeSaveAllFPSpills : 0);
+ (type == Runtime::kSaveAll ? kMipsCalleeSaveAllFPSpills : 0) |
+ (type == Runtime::kSaveEverything ? kMipsCalleeSaveFpEverythingSpills : 0);
}
constexpr uint32_t MipsCalleeSaveFrameSize(Runtime::CalleeSaveType type) {
return RoundUp((POPCOUNT(MipsCalleeSaveCoreSpills(type)) /* gprs */ +
POPCOUNT(MipsCalleeSaveFPSpills(type)) /* fprs */ +
- 1 /* Method* */) * kMipsPointerSize, kStackAlignment);
+ 1 /* Method* */) * static_cast<size_t>(kMipsPointerSize), kStackAlignment);
}
constexpr QuickMethodFrameInfo MipsCalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
diff --git a/runtime/arch/mips/thread_mips.cc b/runtime/arch/mips/thread_mips.cc
index a451496..0a9ab7a 100644
--- a/runtime/arch/mips/thread_mips.cc
+++ b/runtime/arch/mips/thread_mips.cc
@@ -17,14 +17,15 @@
#include "thread.h"
#include "asm_support_mips.h"
+#include "base/enums.h"
#include "base/logging.h"
namespace art {
void Thread::InitCpu() {
- CHECK_EQ(THREAD_FLAGS_OFFSET, ThreadFlagsOffset<4>().Int32Value());
- CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<4>().Int32Value());
- CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<4>().Int32Value());
+ CHECK_EQ(THREAD_FLAGS_OFFSET, ThreadFlagsOffset<PointerSize::k32>().Int32Value());
+ CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<PointerSize::k32>().Int32Value());
+ CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<PointerSize::k32>().Int32Value());
}
void Thread::CleanupCpu() {
diff --git a/runtime/arch/mips64/asm_support_mips64.h b/runtime/arch/mips64/asm_support_mips64.h
index 995fcf3..2c16c25 100644
--- a/runtime/arch/mips64/asm_support_mips64.h
+++ b/runtime/arch/mips64/asm_support_mips64.h
@@ -25,5 +25,7 @@
#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 80
// $f12-$f19, $a1-$a7, $s2-$s7 + $gp + $s8 + $ra, 16 total + 1x8 bytes padding + method*
#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 208
+// $f0-$f31, $at, $v0-$v1, $a0-$a7, $t0-$t3, $s0-$s7, $t8-$t9, $gp, $s8, $ra + padding + method*
+#define FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE 496
#endif // ART_RUNTIME_ARCH_MIPS64_ASM_SUPPORT_MIPS64_H_
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index b02edb6..34b0638 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -129,8 +129,6 @@
qpoints->pReadBarrierMarkReg27 = nullptr;
qpoints->pReadBarrierMarkReg28 = nullptr;
qpoints->pReadBarrierMarkReg29 = nullptr;
- qpoints->pReadBarrierMarkReg30 = nullptr;
- qpoints->pReadBarrierMarkReg31 = nullptr;
qpoints->pReadBarrierSlow = artReadBarrierSlow;
qpoints->pReadBarrierForRootSlow = artReadBarrierForRootSlow;
};
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index ae69620..0a37909 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -314,6 +314,227 @@
.endm
/*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kSaveEverything).
+ * callee-save: $at + $v0-$v1 + $a0-$a7 + $t0-$t3 + $s0-$s7 + $t8-$t9 + $gp + $s8 + $ra + $s8,
+ * $f0-$f31; 28(GPR)+ 32(FPR) + 1x8 bytes padding + method*
+ * This macro sets up $gp; entrypoints using it should start with ENTRY_NO_GP.
+ */
+.macro SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+ daddiu $sp, $sp, -496
+ .cfi_adjust_cfa_offset 496
+
+ // Ugly compile-time check, but we only have the preprocessor.
+#if (FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE != 496)
+#error "SAVE_EVERYTHING_CALLEE_SAVE_FRAME(MIPS64) size not as expected."
+#endif
+
+ // Save core registers.
+ sd $ra, 488($sp)
+ .cfi_rel_offset 31, 488
+ sd $s8, 480($sp)
+ .cfi_rel_offset 30, 480
+ sd $gp, 472($sp)
+ .cfi_rel_offset 28, 472
+ sd $t9, 464($sp)
+ .cfi_rel_offset 25, 464
+ sd $t8, 456($sp)
+ .cfi_rel_offset 24, 456
+ sd $s7, 448($sp)
+ .cfi_rel_offset 23, 448
+ sd $s6, 440($sp)
+ .cfi_rel_offset 22, 440
+ sd $s5, 432($sp)
+ .cfi_rel_offset 21, 432
+ sd $s4, 424($sp)
+ .cfi_rel_offset 20, 424
+ sd $s3, 416($sp)
+ .cfi_rel_offset 19, 416
+ sd $s2, 408($sp)
+ .cfi_rel_offset 18, 408
+ sd $s1, 400($sp)
+ .cfi_rel_offset 17, 400
+ sd $s0, 392($sp)
+ .cfi_rel_offset 16, 392
+ sd $t3, 384($sp)
+ .cfi_rel_offset 15, 384
+ sd $t2, 376($sp)
+ .cfi_rel_offset 14, 376
+ sd $t1, 368($sp)
+ .cfi_rel_offset 13, 368
+ sd $t0, 360($sp)
+ .cfi_rel_offset 12, 360
+ sd $a7, 352($sp)
+ .cfi_rel_offset 11, 352
+ sd $a6, 344($sp)
+ .cfi_rel_offset 10, 344
+ sd $a5, 336($sp)
+ .cfi_rel_offset 9, 336
+ sd $a4, 328($sp)
+ .cfi_rel_offset 8, 328
+ sd $a3, 320($sp)
+ .cfi_rel_offset 7, 320
+ sd $a2, 312($sp)
+ .cfi_rel_offset 6, 312
+ sd $a1, 304($sp)
+ .cfi_rel_offset 5, 304
+ sd $a0, 296($sp)
+ .cfi_rel_offset 4, 296
+ sd $v1, 288($sp)
+ .cfi_rel_offset 3, 288
+ sd $v0, 280($sp)
+ .cfi_rel_offset 2, 280
+
+ // Set up $gp, clobbering $ra and using the branch delay slot for a useful instruction.
+ bal 1f
+ sd $at, 272($sp)
+ .cfi_rel_offset 1, 272
+1:
+ // TODO: Can we avoid the unnecessary move $t8<-$gp?
+ .cpsetup $ra, $t8, 1b
+
+ // Save FP registers.
+ s.d $f31, 264($sp)
+ s.d $f30, 256($sp)
+ s.d $f29, 248($sp)
+ s.d $f28, 240($sp)
+ s.d $f27, 232($sp)
+ s.d $f26, 224($sp)
+ s.d $f25, 216($sp)
+ s.d $f24, 208($sp)
+ s.d $f23, 200($sp)
+ s.d $f22, 192($sp)
+ s.d $f21, 184($sp)
+ s.d $f20, 176($sp)
+ s.d $f19, 168($sp)
+ s.d $f18, 160($sp)
+ s.d $f17, 152($sp)
+ s.d $f16, 144($sp)
+ s.d $f15, 136($sp)
+ s.d $f14, 128($sp)
+ s.d $f13, 120($sp)
+ s.d $f12, 112($sp)
+ s.d $f11, 104($sp)
+ s.d $f10, 96($sp)
+ s.d $f9, 88($sp)
+ s.d $f8, 80($sp)
+ s.d $f7, 72($sp)
+ s.d $f6, 64($sp)
+ s.d $f5, 56($sp)
+ s.d $f4, 48($sp)
+ s.d $f3, 40($sp)
+ s.d $f2, 32($sp)
+ s.d $f1, 24($sp)
+ s.d $f0, 16($sp)
+
+ # load appropriate callee-save-method
+ ld $t1, %got(_ZN3art7Runtime9instance_E)($gp)
+ ld $t1, 0($t1)
+ ld $t1, RUNTIME_SAVE_EVERYTHING_CALLEE_SAVE_FRAME_OFFSET($t1)
+ sd $t1, 0($sp) # Place ArtMethod* at bottom of stack.
+ # Place sp in Thread::Current()->top_quick_frame.
+ sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)
+.endm
+
+.macro RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+ // Restore FP registers.
+ l.d $f31, 264($sp)
+ l.d $f30, 256($sp)
+ l.d $f29, 248($sp)
+ l.d $f28, 240($sp)
+ l.d $f27, 232($sp)
+ l.d $f26, 224($sp)
+ l.d $f25, 216($sp)
+ l.d $f24, 208($sp)
+ l.d $f23, 200($sp)
+ l.d $f22, 192($sp)
+ l.d $f21, 184($sp)
+ l.d $f20, 176($sp)
+ l.d $f19, 168($sp)
+ l.d $f18, 160($sp)
+ l.d $f17, 152($sp)
+ l.d $f16, 144($sp)
+ l.d $f15, 136($sp)
+ l.d $f14, 128($sp)
+ l.d $f13, 120($sp)
+ l.d $f12, 112($sp)
+ l.d $f11, 104($sp)
+ l.d $f10, 96($sp)
+ l.d $f9, 88($sp)
+ l.d $f8, 80($sp)
+ l.d $f7, 72($sp)
+ l.d $f6, 64($sp)
+ l.d $f5, 56($sp)
+ l.d $f4, 48($sp)
+ l.d $f3, 40($sp)
+ l.d $f2, 32($sp)
+ l.d $f1, 24($sp)
+ l.d $f0, 16($sp)
+
+ // Restore core registers.
+ ld $ra, 488($sp)
+ .cfi_restore 31
+ ld $s8, 480($sp)
+ .cfi_restore 30
+ ld $gp, 472($sp)
+ .cfi_restore 28
+ ld $t9, 464($sp)
+ .cfi_restore 25
+ ld $t8, 456($sp)
+ .cfi_restore 24
+ ld $s7, 448($sp)
+ .cfi_restore 23
+ ld $s6, 440($sp)
+ .cfi_restore 22
+ ld $s5, 432($sp)
+ .cfi_restore 21
+ ld $s4, 424($sp)
+ .cfi_restore 20
+ ld $s3, 416($sp)
+ .cfi_restore 19
+ ld $s2, 408($sp)
+ .cfi_restore 18
+ ld $s1, 400($sp)
+ .cfi_restore 17
+ ld $s0, 392($sp)
+ .cfi_restore 16
+ ld $t3, 384($sp)
+ .cfi_restore 15
+ ld $t2, 376($sp)
+ .cfi_restore 14
+ ld $t1, 368($sp)
+ .cfi_restore 13
+ ld $t0, 360($sp)
+ .cfi_restore 12
+ ld $a7, 352($sp)
+ .cfi_restore 11
+ ld $a6, 344($sp)
+ .cfi_restore 10
+ ld $a5, 336($sp)
+ .cfi_restore 9
+ ld $a4, 328($sp)
+ .cfi_restore 8
+ ld $a3, 320($sp)
+ .cfi_restore 7
+ ld $a2, 312($sp)
+ .cfi_restore 6
+ ld $a1, 304($sp)
+ .cfi_restore 5
+ ld $a0, 296($sp)
+ .cfi_restore 4
+ ld $v1, 288($sp)
+ .cfi_restore 3
+ ld $v0, 280($sp)
+ .cfi_restore 2
+ ld $at, 272($sp)
+ .cfi_restore 1
+
+ .cpreturn
+ daddiu $sp, $sp, 496
+ .cfi_adjust_cfa_offset -496
+.endm
+
+ /*
* Macro that set calls through to artDeliverPendingExceptionFromCode,
* where the pending
* exception is Thread::Current()->exception_
@@ -1673,17 +1894,19 @@
* Called by managed code when the value in rSUSPEND has been decremented to 0.
*/
.extern artTestSuspendFromCode
-ENTRY art_quick_test_suspend
- lh $a0, THREAD_FLAGS_OFFSET(rSELF)
- bne $a0, $zero, 1f
+ENTRY_NO_GP art_quick_test_suspend
+ lh rSUSPEND, THREAD_FLAGS_OFFSET(rSELF)
+ bne rSUSPEND, $zero, 1f
daddiu rSUSPEND, $zero, SUSPEND_CHECK_INTERVAL # reset rSUSPEND to SUSPEND_CHECK_INTERVAL
jalr $zero, $ra
- .cpreturn # Restore gp from t8 in branch delay slot.
+ nop
1:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves for stack crawl
+ SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME # save everything for stack crawl
jal artTestSuspendFromCode # (Thread*)
move $a0, rSELF
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+ RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME
+ jalr $zero, $ra
+ nop
END art_quick_test_suspend
/*
diff --git a/runtime/arch/mips64/quick_method_frame_info_mips64.h b/runtime/arch/mips64/quick_method_frame_info_mips64.h
index f967be0..d52945f 100644
--- a/runtime/arch/mips64/quick_method_frame_info_mips64.h
+++ b/runtime/arch/mips64/quick_method_frame_info_mips64.h
@@ -25,6 +25,8 @@
namespace art {
namespace mips64 {
+static constexpr uint32_t kMips64CalleeSaveAlwaysSpills =
+ (1 << art::mips64::RA);
static constexpr uint32_t kMips64CalleeSaveRefSpills =
(1 << art::mips64::S2) | (1 << art::mips64::S3) | (1 << art::mips64::S4) |
(1 << art::mips64::S5) | (1 << art::mips64::S6) | (1 << art::mips64::S7) |
@@ -35,6 +37,14 @@
(1 << art::mips64::A7);
static constexpr uint32_t kMips64CalleeSaveAllSpills =
(1 << art::mips64::S0) | (1 << art::mips64::S1);
+static constexpr uint32_t kMips64CalleeSaveEverythingSpills =
+ (1 << art::mips64::AT) | (1 << art::mips64::V0) | (1 << art::mips64::V1) |
+ (1 << art::mips64::A0) | (1 << art::mips64::A1) | (1 << art::mips64::A2) |
+ (1 << art::mips64::A3) | (1 << art::mips64::A4) | (1 << art::mips64::A5) |
+ (1 << art::mips64::A6) | (1 << art::mips64::A7) | (1 << art::mips64::T0) |
+ (1 << art::mips64::T1) | (1 << art::mips64::T2) | (1 << art::mips64::T3) |
+ (1 << art::mips64::S0) | (1 << art::mips64::S1) | (1 << art::mips64::T8) |
+ (1 << art::mips64::T9);
static constexpr uint32_t kMips64CalleeSaveFpRefSpills = 0;
static constexpr uint32_t kMips64CalleeSaveFpArgSpills =
@@ -46,23 +56,37 @@
(1 << art::mips64::F24) | (1 << art::mips64::F25) | (1 << art::mips64::F26) |
(1 << art::mips64::F27) | (1 << art::mips64::F28) | (1 << art::mips64::F29) |
(1 << art::mips64::F30) | (1 << art::mips64::F31);
+static constexpr uint32_t kMips64CalleeSaveFpEverythingSpills =
+ (1 << art::mips64::F0) | (1 << art::mips64::F1) | (1 << art::mips64::F2) |
+ (1 << art::mips64::F3) | (1 << art::mips64::F4) | (1 << art::mips64::F5) |
+ (1 << art::mips64::F6) | (1 << art::mips64::F7) | (1 << art::mips64::F8) |
+ (1 << art::mips64::F9) | (1 << art::mips64::F10) | (1 << art::mips64::F11) |
+ (1 << art::mips64::F12) | (1 << art::mips64::F13) | (1 << art::mips64::F14) |
+ (1 << art::mips64::F15) | (1 << art::mips64::F16) | (1 << art::mips64::F17) |
+ (1 << art::mips64::F18) | (1 << art::mips64::F19) | (1 << art::mips64::F20) |
+ (1 << art::mips64::F21) | (1 << art::mips64::F22) | (1 << art::mips64::F23) |
+ (1 << art::mips64::F24) | (1 << art::mips64::F25) | (1 << art::mips64::F26) |
+ (1 << art::mips64::F27) | (1 << art::mips64::F28) | (1 << art::mips64::F29) |
+ (1 << art::mips64::F30) | (1 << art::mips64::F31);
constexpr uint32_t Mips64CalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
- return kMips64CalleeSaveRefSpills |
+ return kMips64CalleeSaveAlwaysSpills | kMips64CalleeSaveRefSpills |
(type == Runtime::kRefsAndArgs ? kMips64CalleeSaveArgSpills : 0) |
- (type == Runtime::kSaveAll ? kMips64CalleeSaveAllSpills : 0) | (1 << art::mips64::RA);
+ (type == Runtime::kSaveAll ? kMips64CalleeSaveAllSpills : 0) |
+ (type == Runtime::kSaveEverything ? kMips64CalleeSaveEverythingSpills : 0);
}
constexpr uint32_t Mips64CalleeSaveFpSpills(Runtime::CalleeSaveType type) {
return kMips64CalleeSaveFpRefSpills |
(type == Runtime::kRefsAndArgs ? kMips64CalleeSaveFpArgSpills: 0) |
- (type == Runtime::kSaveAll ? kMips64CalleeSaveFpAllSpills : 0);
+ (type == Runtime::kSaveAll ? kMips64CalleeSaveFpAllSpills : 0) |
+ (type == Runtime::kSaveEverything ? kMips64CalleeSaveFpEverythingSpills : 0);
}
constexpr uint32_t Mips64CalleeSaveFrameSize(Runtime::CalleeSaveType type) {
return RoundUp((POPCOUNT(Mips64CalleeSaveCoreSpills(type)) /* gprs */ +
POPCOUNT(Mips64CalleeSaveFpSpills(type)) /* fprs */ +
- + 1 /* Method* */) * kMips64PointerSize, kStackAlignment);
+ + 1 /* Method* */) * static_cast<size_t>(kMips64PointerSize), kStackAlignment);
}
constexpr QuickMethodFrameInfo Mips64CalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
diff --git a/runtime/arch/mips64/thread_mips64.cc b/runtime/arch/mips64/thread_mips64.cc
index c55537c..3ce5e50 100644
--- a/runtime/arch/mips64/thread_mips64.cc
+++ b/runtime/arch/mips64/thread_mips64.cc
@@ -17,14 +17,15 @@
#include "thread.h"
#include "asm_support_mips64.h"
+#include "base/enums.h"
#include "base/logging.h"
namespace art {
void Thread::InitCpu() {
- CHECK_EQ(THREAD_FLAGS_OFFSET, ThreadFlagsOffset<8>().Int32Value());
- CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<8>().Int32Value());
- CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<8>().Int32Value());
+ CHECK_EQ(THREAD_FLAGS_OFFSET, ThreadFlagsOffset<PointerSize::k64>().Int32Value());
+ CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<PointerSize::k64>().Int32Value());
+ CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<PointerSize::k64>().Int32Value());
}
void Thread::CleanupCpu() {
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 09af373..80bb51d 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -18,6 +18,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
@@ -529,11 +530,7 @@
static uintptr_t GetEntrypoint(Thread* self, QuickEntrypointEnum entrypoint) {
int32_t offset;
-#ifdef __LP64__
- offset = GetThreadOffset<8>(entrypoint).Int32Value();
-#else
- offset = GetThreadOffset<4>(entrypoint).Int32Value();
-#endif
+ offset = GetThreadOffset<kRuntimePointerSize>(entrypoint).Int32Value();
return *reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(self) + offset);
}
@@ -1016,7 +1013,7 @@
// Use an arbitrary method from c to use as referrer
size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex()), // type_idx
// arbitrary
- reinterpret_cast<size_t>(c->GetVirtualMethod(0, sizeof(void*))),
+ reinterpret_cast<size_t>(c->GetVirtualMethod(0, kRuntimePointerSize)),
0U,
StubTest::GetEntrypoint(self, kQuickAllocObject),
self);
@@ -1147,12 +1144,13 @@
if ((false)) {
// Use an arbitrary method from c to use as referrer
- size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex()), // type_idx
- 10U,
- // arbitrary
- reinterpret_cast<size_t>(c_obj->GetVirtualMethod(0, sizeof(void*))),
- StubTest::GetEntrypoint(self, kQuickAllocArray),
- self);
+ size_t result = Invoke3(
+ static_cast<size_t>(c->GetDexTypeIndex()), // type_idx
+ 10U,
+ // arbitrary
+ reinterpret_cast<size_t>(c_obj->GetVirtualMethod(0, kRuntimePointerSize)),
+ StubTest::GetEntrypoint(self, kQuickAllocArray),
+ self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
@@ -1799,7 +1797,7 @@
Handle<mirror::Object> obj(hs.NewHandle(soa.Decode<mirror::Object*>(o)));
Handle<mirror::Class> c(hs.NewHandle(obj->GetClass()));
// Need a method as a referrer
- ArtMethod* m = c->GetDirectMethod(0, sizeof(void*));
+ ArtMethod* m = c->GetDirectMethod(0, kRuntimePointerSize);
// Play with it...
@@ -2015,10 +2013,10 @@
Runtime::Current()->GetClassLinker()->CreateImtConflictTable(/*count*/0u, linear_alloc);
void* data = linear_alloc->Alloc(
self,
- ImtConflictTable::ComputeSizeWithOneMoreEntry(empty_conflict_table, sizeof(void*)));
+ ImtConflictTable::ComputeSizeWithOneMoreEntry(empty_conflict_table, kRuntimePointerSize));
ImtConflictTable* new_table = new (data) ImtConflictTable(
- empty_conflict_table, inf_contains, contains_amethod, sizeof(void*));
- conflict_method->SetImtConflictTable(new_table, sizeof(void*));
+ empty_conflict_table, inf_contains, contains_amethod, kRuntimePointerSize);
+ conflict_method->SetImtConflictTable(new_table, kRuntimePointerSize);
size_t result =
Invoke3WithReferrerAndHidden(reinterpret_cast<size_t>(conflict_method),
diff --git a/runtime/arch/x86/asm_support_x86.h b/runtime/arch/x86/asm_support_x86.h
index b0a6017..ba5fd99 100644
--- a/runtime/arch/x86/asm_support_x86.h
+++ b/runtime/arch/x86/asm_support_x86.h
@@ -21,8 +21,7 @@
#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 32
#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 32
-
-// 32 bytes for GPRs and 32 bytes for FPRs.
#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE (32 + 32)
+#define FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE (48 + 64)
#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index 4e9756c..bdf11da 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -117,8 +117,6 @@
qpoints->pReadBarrierMarkReg27 = nullptr;
qpoints->pReadBarrierMarkReg28 = nullptr;
qpoints->pReadBarrierMarkReg29 = nullptr;
- qpoints->pReadBarrierMarkReg30 = nullptr;
- qpoints->pReadBarrierMarkReg31 = nullptr;
qpoints->pReadBarrierSlow = art_quick_read_barrier_slow;
qpoints->pReadBarrierForRootSlow = art_quick_read_barrier_for_root_slow;
};
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index 24e3a0d..3efeb40 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -20,6 +20,7 @@
#include <sys/ucontext.h>
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/macros.h"
#include "globals.h"
#include "base/logging.h"
@@ -70,18 +71,9 @@
namespace art {
-#if defined(__APPLE__) && defined(__x86_64__)
-// mac symbols have a prefix of _ on x86_64
-extern "C" void _art_quick_throw_null_pointer_exception_from_signal();
-extern "C" void _art_quick_throw_stack_overflow();
-extern "C" void _art_quick_test_suspend();
-#define EXT_SYM(sym) _ ## sym
-#else
extern "C" void art_quick_throw_null_pointer_exception_from_signal();
extern "C" void art_quick_throw_stack_overflow();
extern "C" void art_quick_test_suspend();
-#define EXT_SYM(sym) sym
-#endif
// Note this is different from the others (no underscore on 64 bit mac) due to
// the way the symbol is defined in the .S file.
@@ -319,7 +311,7 @@
uc->CTX_ESP = reinterpret_cast<uintptr_t>(next_sp);
uc->CTX_EIP = reinterpret_cast<uintptr_t>(
- EXT_SYM(art_quick_throw_null_pointer_exception_from_signal));
+ art_quick_throw_null_pointer_exception_from_signal);
// Pass the faulting address as the first argument of
// art_quick_throw_null_pointer_exception_from_signal.
#if defined(__x86_64__)
@@ -347,11 +339,7 @@
bool SuspensionHandler::Action(int, siginfo_t*, void* context) {
// These are the instructions to check for. The first one is the mov eax, fs:[xxx]
// where xxx is the offset of the suspend trigger.
-#if defined(__x86_64__)
- uint32_t trigger = Thread::ThreadSuspendTriggerOffset<8>().Int32Value();
-#else
- uint32_t trigger = Thread::ThreadSuspendTriggerOffset<4>().Int32Value();
-#endif
+ uint32_t trigger = Thread::ThreadSuspendTriggerOffset<kRuntimePointerSize>().Int32Value();
VLOG(signals) << "Checking for suspension point";
#if defined(__x86_64__)
@@ -400,7 +388,7 @@
*next_sp = retaddr;
uc->CTX_ESP = reinterpret_cast<uintptr_t>(next_sp);
- uc->CTX_EIP = reinterpret_cast<uintptr_t>(EXT_SYM(art_quick_test_suspend));
+ uc->CTX_EIP = reinterpret_cast<uintptr_t>(art_quick_test_suspend);
// Now remove the suspend trigger that caused this fault.
Thread::Current()->RemoveSuspendTrigger();
@@ -446,7 +434,7 @@
// the previous frame.
// Now arrange for the signal handler to return to art_quick_throw_stack_overflow.
- uc->CTX_EIP = reinterpret_cast<uintptr_t>(EXT_SYM(art_quick_throw_stack_overflow));
+ uc->CTX_EIP = reinterpret_cast<uintptr_t>(art_quick_throw_stack_overflow);
return true;
}
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 77e04e7..68ba0cf 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -222,6 +222,74 @@
END_MACRO
/*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kSaveEverything)
+ */
+MACRO2(SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME, got_reg, temp_reg)
+ // Save core registers.
+ PUSH edi
+ PUSH esi
+ PUSH ebp
+ PUSH ebx
+ PUSH edx
+ PUSH ecx
+ PUSH eax
+ // Create space for FPR registers and stack alignment padding.
+ subl MACRO_LITERAL(12 + 8 * 8), %esp
+ CFI_ADJUST_CFA_OFFSET(12 + 8 * 8)
+ // Save FPRs.
+ movsd %xmm0, 12(%esp)
+ movsd %xmm1, 20(%esp)
+ movsd %xmm2, 28(%esp)
+ movsd %xmm3, 36(%esp)
+ movsd %xmm4, 44(%esp)
+ movsd %xmm5, 52(%esp)
+ movsd %xmm6, 60(%esp)
+ movsd %xmm7, 68(%esp)
+
+ SETUP_GOT_NOSAVE RAW_VAR(got_reg)
+ // Load Runtime::instance_ from GOT.
+ movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg)), REG_VAR(temp_reg)
+ movl (REG_VAR(temp_reg)), REG_VAR(temp_reg)
+ // Push save everything callee-save method.
+ pushl RUNTIME_SAVE_EVERYTHING_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg))
+ CFI_ADJUST_CFA_OFFSET(4)
+ // Store esp as the stop quick frame.
+ movl %esp, %fs:THREAD_TOP_QUICK_FRAME_OFFSET
+
+ // Ugly compile-time check, but we only have the preprocessor.
+ // Last +4: implicit return address pushed on stack when caller made call.
+#if (FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE != 7*4 + 8*8 + 12 + 4 + 4)
+#error "SAVE_EVERYTHING_CALLEE_SAVE_FRAME(X86) size not as expected."
+#endif
+END_MACRO
+
+MACRO0(RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME)
+ // Restore FPRs. Method and padding is still on the stack.
+ movsd 16(%esp), %xmm0
+ movsd 24(%esp), %xmm1
+ movsd 32(%esp), %xmm2
+ movsd 40(%esp), %xmm3
+ movsd 48(%esp), %xmm4
+ movsd 56(%esp), %xmm5
+ movsd 64(%esp), %xmm6
+ movsd 72(%esp), %xmm7
+
+ // Remove save everything callee save method, stack alignment padding and FPRs.
+ addl MACRO_LITERAL(16 + 8 * 8), %esp
+ CFI_ADJUST_CFA_OFFSET(-(16 + 8 * 8))
+
+ // Restore core registers.
+ POP eax
+ POP ecx
+ POP edx
+ POP ebx
+ POP ebp
+ POP esi
+ POP edi
+END_MACRO
+
+ /*
* Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending
* exception is Thread::Current()->exception_.
*/
@@ -661,22 +729,6 @@
ret
END_FUNCTION art_quick_invoke_static_stub
-MACRO3(NO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
- DEFINE_FUNCTION VAR(c_name)
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
- // Outgoing argument set up
- subl MACRO_LITERAL(12), %esp // push padding
- CFI_ADJUST_CFA_OFFSET(12)
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- CFI_ADJUST_CFA_OFFSET(4)
- call CALLVAR(cxx_name) // cxx_name(Thread*)
- addl MACRO_LITERAL(16), %esp // pop arguments
- CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- CALL_MACRO(return_macro) // return or deliver exception
- END_FUNCTION VAR(c_name)
-END_MACRO
-
MACRO3(ONE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
@@ -1028,7 +1080,13 @@
movl 0(%edx, %eax, COMPRESSED_REFERENCE_SIZE), %edx
// Read barrier for class load.
cmpl LITERAL(0), %fs:THREAD_IS_GC_MARKING_OFFSET
- jne .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path
+ jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
+ // Null check so that we can load the lock word.
+ testl %edx, %edx
+ jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
+ // Check the mark bit, if it is 1 return.
+ testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(%edx)
+ jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path
.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit:
ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path
.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path:
@@ -1065,7 +1123,7 @@
test LITERAL(LOCK_WORD_STATE_MASK), %ecx // test the 2 high bits.
jne .Lslow_lock // slow path if either of the two high bits are set.
movl %ecx, %edx // save lock word (edx) to keep read barrier bits.
- andl LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), %ecx // zero the read barrier bits.
+ andl LITERAL(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), %ecx // zero the gc bits.
test %ecx, %ecx
jnz .Lalready_thin // lock word contains a thin lock
// unlocked case - edx: original lock word, eax: obj.
@@ -1081,9 +1139,9 @@
cmpw %cx, %dx // do we hold the lock already?
jne .Lslow_lock
movl %edx, %ecx // copy the lock word to check count overflow.
- andl LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), %ecx // zero the read barrier bits.
+ andl LITERAL(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), %ecx // zero the read barrier bits.
addl LITERAL(LOCK_WORD_THIN_LOCK_COUNT_ONE), %ecx // increment recursion count for overflow check.
- test LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK), %ecx // overflowed if either of the upper two bits (28-29) are set.
+ test LITERAL(LOCK_WORD_GC_STATE_MASK_SHIFTED), %ecx // overflowed if the first gc state bit is set.
jne .Lslow_lock // count overflowed so go slow
movl %eax, %ecx // save obj to use eax for cmpxchg.
movl %edx, %eax // copy the lock word as the old val for cmpxchg.
@@ -1137,13 +1195,13 @@
cmpw %cx, %dx // does the thread id match?
jne .Lslow_unlock
movl %ecx, %edx // copy the lock word to detect new count of 0.
- andl LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), %edx // zero the read barrier bits.
+ andl LITERAL(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), %edx // zero the gc bits.
cmpl LITERAL(LOCK_WORD_THIN_LOCK_COUNT_ONE), %edx
jae .Lrecursive_thin_unlock
// update lockword, cmpxchg necessary for read barrier bits.
movl %eax, %edx // edx: obj
movl %ecx, %eax // eax: old lock word.
- andl LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK), %ecx // ecx: new lock word zero except original rb bits.
+ andl LITERAL(LOCK_WORD_GC_STATE_MASK_SHIFTED), %ecx // ecx: new lock word zero except original rb bits.
#ifndef USE_READ_BARRIER
movl %ecx, MIRROR_OBJECT_LOCK_WORD_OFFSET(%edx)
#else
@@ -1397,7 +1455,19 @@
ret
END_FUNCTION art_quick_memcpy
-NO_ARG_DOWNCALL art_quick_test_suspend, artTestSuspendFromCode, ret
+DEFINE_FUNCTION art_quick_test_suspend
+ SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME ebx, ebx // save everything for GC
+ // Outgoing argument set up
+ subl MACRO_LITERAL(12), %esp // push padding
+ CFI_ADJUST_CFA_OFFSET(12)
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ CFI_ADJUST_CFA_OFFSET(4)
+ call SYMBOL(artTestSuspendFromCode) // (Thread*)
+ addl MACRO_LITERAL(16), %esp // pop arguments
+ CFI_ADJUST_CFA_OFFSET(-16)
+ RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME // restore frame up to return address
+ ret // return
+END_FUNCTION art_quick_test_suspend
DEFINE_FUNCTION art_quick_d2l
subl LITERAL(12), %esp // alignment padding, room for argument
@@ -1923,6 +1993,14 @@
// convention (e.g. standard callee-save registers are preserved).
MACRO2(READ_BARRIER_MARK_REG, name, reg)
DEFINE_FUNCTION VAR(name)
+ // Null check so that we can load the lock word.
+ test REG_VAR(reg), REG_VAR(reg)
+ jz .Lret_rb_\name
+ // Check the mark bit, if it is 1 return.
+ testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(REG_VAR(reg))
+ jz .Lslow_rb_\name
+ ret
+.Lslow_rb_\name:
// Save all potentially live caller-save core registers.
PUSH eax
PUSH ecx
@@ -1970,6 +2048,7 @@
POP_REG_NE edx, RAW_VAR(reg)
POP_REG_NE ecx, RAW_VAR(reg)
POP_REG_NE eax, RAW_VAR(reg)
+.Lret_rb_\name:
ret
END_FUNCTION VAR(name)
END_MACRO
diff --git a/runtime/arch/x86/quick_method_frame_info_x86.h b/runtime/arch/x86/quick_method_frame_info_x86.h
index ed1d860..a1612c3 100644
--- a/runtime/arch/x86/quick_method_frame_info_x86.h
+++ b/runtime/arch/x86/quick_method_frame_info_x86.h
@@ -36,27 +36,39 @@
XMM7 = 7,
};
+static constexpr uint32_t kX86CalleeSaveAlwaysSpills =
+ (1 << art::x86::kNumberOfCpuRegisters); // Fake return address callee save.
static constexpr uint32_t kX86CalleeSaveRefSpills =
(1 << art::x86::EBP) | (1 << art::x86::ESI) | (1 << art::x86::EDI);
static constexpr uint32_t kX86CalleeSaveArgSpills =
(1 << art::x86::ECX) | (1 << art::x86::EDX) | (1 << art::x86::EBX);
+static constexpr uint32_t kX86CalleeSaveEverythingSpills =
+ (1 << art::x86::EAX) | (1 << art::x86::ECX) | (1 << art::x86::EDX) | (1 << art::x86::EBX);
+
static constexpr uint32_t kX86CalleeSaveFpArgSpills =
(1 << art::x86::XMM0) | (1 << art::x86::XMM1) |
(1 << art::x86::XMM2) | (1 << art::x86::XMM3);
+static constexpr uint32_t kX86CalleeSaveFpEverythingSpills =
+ (1 << art::x86::XMM0) | (1 << art::x86::XMM1) |
+ (1 << art::x86::XMM2) | (1 << art::x86::XMM3) |
+ (1 << art::x86::XMM4) | (1 << art::x86::XMM5) |
+ (1 << art::x86::XMM6) | (1 << art::x86::XMM7);
constexpr uint32_t X86CalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
- return kX86CalleeSaveRefSpills | (type == Runtime::kRefsAndArgs ? kX86CalleeSaveArgSpills : 0) |
- (1 << art::x86::kNumberOfCpuRegisters); // fake return address callee save
+ return kX86CalleeSaveAlwaysSpills | kX86CalleeSaveRefSpills |
+ (type == Runtime::kRefsAndArgs ? kX86CalleeSaveArgSpills : 0) |
+ (type == Runtime::kSaveEverything ? kX86CalleeSaveEverythingSpills : 0);
}
constexpr uint32_t X86CalleeSaveFpSpills(Runtime::CalleeSaveType type) {
- return type == Runtime::kRefsAndArgs ? kX86CalleeSaveFpArgSpills : 0;
+ return (type == Runtime::kRefsAndArgs ? kX86CalleeSaveFpArgSpills : 0) |
+ (type == Runtime::kSaveEverything ? kX86CalleeSaveFpEverythingSpills : 0);
}
constexpr uint32_t X86CalleeSaveFrameSize(Runtime::CalleeSaveType type) {
return RoundUp((POPCOUNT(X86CalleeSaveCoreSpills(type)) /* gprs */ +
2 * POPCOUNT(X86CalleeSaveFpSpills(type)) /* fprs */ +
- 1 /* Method* */) * kX86PointerSize, kStackAlignment);
+ 1 /* Method* */) * static_cast<size_t>(kX86PointerSize), kStackAlignment);
}
constexpr QuickMethodFrameInfo X86CalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
diff --git a/runtime/arch/x86/thread_x86.cc b/runtime/arch/x86/thread_x86.cc
index c39d122..241650e 100644
--- a/runtime/arch/x86/thread_x86.cc
+++ b/runtime/arch/x86/thread_x86.cc
@@ -20,6 +20,7 @@
#include <sys/types.h>
#include "asm_support_x86.h"
+#include "base/enums.h"
#include "base/macros.h"
#include "thread-inl.h"
#include "thread_list.h"
@@ -136,7 +137,7 @@
// Sanity check that reads from %fs point to this Thread*.
Thread* self_check;
- CHECK_EQ(THREAD_SELF_OFFSET, SelfOffset<4>().Int32Value());
+ CHECK_EQ(THREAD_SELF_OFFSET, SelfOffset<PointerSize::k32>().Int32Value());
__asm__ __volatile__("movl %%fs:(%1), %0"
: "=r"(self_check) // output
: "r"(THREAD_SELF_OFFSET) // input
@@ -144,9 +145,9 @@
CHECK_EQ(self_check, this);
// Sanity check other offsets.
- CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<4>().Int32Value());
- CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<4>().Int32Value());
- CHECK_EQ(THREAD_ID_OFFSET, ThinLockIdOffset<4>().Int32Value());
+ CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<PointerSize::k32>().Int32Value());
+ CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<PointerSize::k32>().Int32Value());
+ CHECK_EQ(THREAD_ID_OFFSET, ThinLockIdOffset<PointerSize::k32>().Int32Value());
}
void Thread::CleanupCpu() {
diff --git a/runtime/arch/x86_64/asm_support_x86_64.S b/runtime/arch/x86_64/asm_support_x86_64.S
index c4e723c..0728f99 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.S
+++ b/runtime/arch/x86_64/asm_support_x86_64.S
@@ -31,7 +31,8 @@
// Clang/llvm does not support .altmacro. However, the clang/llvm preprocessor doesn't
// separate the backslash and parameter by a space. Everything just works.
#define RAW_VAR(name) \name
- #define VAR(name) SYMBOL(\name)
+ #define VAR(name) \name
+ #define CALLVAR(name) SYMBOL(\name)
#define PLT_VAR(name) \name@PLT
#define REG_VAR(name) %\name
#define CALL_MACRO(name) \name
@@ -45,6 +46,7 @@
.altmacro
#define RAW_VAR(name) name&
#define VAR(name) name&
+ #define CALLVAR(name) SYMBOL(name&)
#define PLT_VAR(name) name&@PLT
#define REG_VAR(name) %name
#define CALL_MACRO(name) name&
@@ -110,10 +112,10 @@
// for mac builds.
MACRO1(DEFINE_FUNCTION, c_name)
FUNCTION_TYPE(SYMBOL(\c_name))
- ASM_HIDDEN SYMBOL(\c_name)
- .globl VAR(c_name)
+ ASM_HIDDEN CALLVAR(c_name)
+ .globl CALLVAR(c_name)
ALIGN_FUNCTION_ENTRY
-VAR(c_name):
+CALLVAR(c_name):
CFI_STARTPROC
// Ensure we get a sane starting CFA.
CFI_DEF_CFA(rsp, 8)
diff --git a/runtime/arch/x86_64/asm_support_x86_64.h b/runtime/arch/x86_64/asm_support_x86_64.h
index 48bec73..58dc2fe 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.h
+++ b/runtime/arch/x86_64/asm_support_x86_64.h
@@ -21,6 +21,7 @@
#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE (64 + 4*8)
#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE (64 + 4*8)
-#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE (176 + 4*8)
+#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE (112 + 12*8)
+#define FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE (144 + 16*8)
#endif // ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_H_
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index c2e3023..42b9699 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -131,8 +131,6 @@
qpoints->pReadBarrierMarkReg27 = nullptr;
qpoints->pReadBarrierMarkReg28 = nullptr;
qpoints->pReadBarrierMarkReg29 = nullptr;
- qpoints->pReadBarrierMarkReg30 = nullptr;
- qpoints->pReadBarrierMarkReg31 = nullptr;
qpoints->pReadBarrierSlow = art_quick_read_barrier_slow;
qpoints->pReadBarrierForRootSlow = art_quick_read_barrier_for_root_slow;
#endif // __APPLE__
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 784ec39..4741ac0 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -165,8 +165,8 @@
PUSH rdx // Quick arg 2.
PUSH rcx // Quick arg 3.
// Create space for FPR args and create 2 slots for ArtMethod*.
- subq MACRO_LITERAL(80 + 4 * 8), %rsp
- CFI_ADJUST_CFA_OFFSET(80 + 4 * 8)
+ subq MACRO_LITERAL(16 + 12 * 8), %rsp
+ CFI_ADJUST_CFA_OFFSET(16 + 12 * 8)
// R10 := ArtMethod* for ref and args callee save frame method.
movq RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10
// Save FPRs.
@@ -189,7 +189,7 @@
// Ugly compile-time check, but we only have the preprocessor.
// Last +8: implicit return address pushed on stack when caller made call.
-#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 11 * 8 + 4 * 8 + 80 + 8)
+#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 11 * 8 + 12 * 8 + 16 + 8)
#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(X86_64) size not as expected."
#endif
#endif // __APPLE__
@@ -260,6 +260,108 @@
POP r15
END_MACRO
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kSaveEverything)
+ */
+MACRO0(SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME)
+#if defined(__APPLE__)
+ int3
+ int3
+#else
+ // Save core registers from highest to lowest to agree with core spills bitmap.
+ PUSH r15
+ PUSH r14
+ PUSH r13
+ PUSH r12
+ PUSH r11
+ PUSH r10
+ PUSH r9
+ PUSH r8
+ PUSH rdi
+ PUSH rsi
+ PUSH rbp
+ PUSH rbx
+ PUSH rdx
+ PUSH rcx
+ PUSH rax
+ // Create space for FPRs and stack alignment padding.
+ subq MACRO_LITERAL(8 + 16 * 8), %rsp
+ CFI_ADJUST_CFA_OFFSET(8 + 16 * 8)
+ // R10 := Runtime::Current()
+ movq _ZN3art7Runtime9instance_E@GOTPCREL(%rip), %r10
+ movq (%r10), %r10
+ // Save FPRs.
+ movq %xmm0, 8(%rsp)
+ movq %xmm1, 16(%rsp)
+ movq %xmm2, 24(%rsp)
+ movq %xmm3, 32(%rsp)
+ movq %xmm4, 40(%rsp)
+ movq %xmm5, 48(%rsp)
+ movq %xmm6, 56(%rsp)
+ movq %xmm7, 64(%rsp)
+ movq %xmm8, 72(%rsp)
+ movq %xmm9, 80(%rsp)
+ movq %xmm10, 88(%rsp)
+ movq %xmm11, 96(%rsp)
+ movq %xmm12, 104(%rsp)
+ movq %xmm13, 112(%rsp)
+ movq %xmm14, 120(%rsp)
+ movq %xmm15, 128(%rsp)
+ // Push ArtMethod* for save everything frame method.
+ pushq RUNTIME_SAVE_EVERYTHING_CALLEE_SAVE_FRAME_OFFSET(%r10)
+ CFI_ADJUST_CFA_OFFSET(8)
+ // Store rsp as the top quick frame.
+ movq %rsp, %gs:THREAD_TOP_QUICK_FRAME_OFFSET
+
+ // Ugly compile-time check, but we only have the preprocessor.
+ // Last +8: implicit return address pushed on stack when caller made call.
+#if (FRAME_SIZE_SAVE_EVERYTHING_CALLEE_SAVE != 15 * 8 + 16 * 8 + 16 + 8)
+#error "SAVE_EVERYTHING_CALLEE_SAVE_FRAME(X86_64) size not as expected."
+#endif
+#endif // __APPLE__
+END_MACRO
+
+MACRO0(RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME)
+ // Restore FPRs. Method and padding is still on the stack.
+ movq 16(%rsp), %xmm0
+ movq 24(%rsp), %xmm1
+ movq 32(%rsp), %xmm2
+ movq 40(%rsp), %xmm3
+ movq 48(%rsp), %xmm4
+ movq 56(%rsp), %xmm5
+ movq 64(%rsp), %xmm6
+ movq 72(%rsp), %xmm7
+ movq 80(%rsp), %xmm8
+ movq 88(%rsp), %xmm9
+ movq 96(%rsp), %xmm10
+ movq 104(%rsp), %xmm11
+ movq 112(%rsp), %xmm12
+ movq 120(%rsp), %xmm13
+ movq 128(%rsp), %xmm14
+ movq 136(%rsp), %xmm15
+
+ // Remove save everything callee save method, stack alignment padding and FPRs.
+ addq MACRO_LITERAL(16 + 16 * 8), %rsp
+ CFI_ADJUST_CFA_OFFSET(-(16 + 16 * 8))
+ // Restore callee and GPR args, mixed together to agree with core spills bitmap.
+ POP rax
+ POP rcx
+ POP rdx
+ POP rbx
+ POP rbp
+ POP rsi
+ POP rdi
+ POP r8
+ POP r9
+ POP r10
+ POP r11
+ POP r12
+ POP r13
+ POP r14
+ POP r15
+END_MACRO
+
/*
* Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending
@@ -278,7 +380,7 @@
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current()
- call VAR(cxx_name) // cxx_name(Thread*)
+ call CALLVAR(cxx_name) // cxx_name(Thread*)
UNREACHABLE
END_FUNCTION VAR(c_name)
END_MACRO
@@ -288,7 +390,7 @@
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
- call VAR(cxx_name) // cxx_name(arg1, Thread*)
+ call CALLVAR(cxx_name) // cxx_name(arg1, Thread*)
UNREACHABLE
END_FUNCTION VAR(c_name)
END_MACRO
@@ -298,7 +400,7 @@
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call VAR(cxx_name) // cxx_name(Thread*)
+ call CALLVAR(cxx_name) // cxx_name(Thread*)
UNREACHABLE
END_FUNCTION VAR(c_name)
END_MACRO
@@ -371,7 +473,7 @@
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread
movq %rsp, %rcx // pass SP
- call VAR(cxx_name) // cxx_name(arg1, arg2, Thread*, SP)
+ call CALLVAR(cxx_name) // cxx_name(arg1, arg2, Thread*, SP)
// save the code pointer
movq %rax, %rdi
movq %rdx, %rax
@@ -702,23 +804,12 @@
#endif // __APPLE__
END_FUNCTION art_quick_do_long_jump
-MACRO3(NO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
- DEFINE_FUNCTION VAR(c_name)
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
- // Outgoing argument set up
- movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current()
- call VAR(cxx_name) // cxx_name(Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- CALL_MACRO(return_macro) // return or deliver exception
- END_FUNCTION VAR(c_name)
-END_MACRO
-
MACRO3(ONE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
- call VAR(cxx_name) // cxx_name(arg0, Thread*)
+ call CALLVAR(cxx_name) // cxx_name(arg0, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
@@ -729,7 +820,7 @@
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call VAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
+ call CALLVAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
@@ -740,7 +831,7 @@
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
- call VAR(cxx_name) // cxx_name(arg0, arg1, arg2, Thread*)
+ call CALLVAR(cxx_name) // cxx_name(arg0, arg1, arg2, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
@@ -751,7 +842,7 @@
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %r8 // pass Thread::Current()
- call VAR(cxx_name) // cxx_name(arg1, arg2, arg3, arg4, Thread*)
+ call CALLVAR(cxx_name) // cxx_name(arg1, arg2, arg3, arg4, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
@@ -763,7 +854,7 @@
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
// arg0 is in rdi
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call VAR(cxx_name) // cxx_name(arg0, referrer, Thread*)
+ call CALLVAR(cxx_name) // cxx_name(arg0, referrer, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro)
END_FUNCTION VAR(c_name)
@@ -775,7 +866,7 @@
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
// arg0 and arg1 are in rdi/rsi
movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
- call VAR(cxx_name) // (arg0, arg1, referrer, Thread*)
+ call CALLVAR(cxx_name) // (arg0, arg1, referrer, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro)
END_FUNCTION VAR(c_name)
@@ -787,7 +878,7 @@
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
// arg0, arg1, and arg2 are in rdi/rsi/rdx
movq %gs:THREAD_SELF_OFFSET, %r8 // pass Thread::Current()
- call VAR(cxx_name) // cxx_name(arg0, arg1, arg2, referrer, Thread*)
+ call CALLVAR(cxx_name) // cxx_name(arg0, arg1, arg2, referrer, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
@@ -950,7 +1041,7 @@
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call VAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
+ call CALLVAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
END_MACRO
@@ -989,7 +1080,13 @@
// Load the class
movl 0(%rdx, %rdi, COMPRESSED_REFERENCE_SIZE), %edx
cmpl LITERAL(0), %gs:THREAD_IS_GC_MARKING_OFFSET
- jne .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path
+ jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
+ // Null check so that we can load the lock word.
+ testl %edx, %edx
+ jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
+ // Check the mark bit, if it is 1 return.
+ testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(%edx)
+ jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path
.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit:
ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path
.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path:
@@ -1022,7 +1119,7 @@
test LITERAL(LOCK_WORD_STATE_MASK), %ecx // Test the 2 high bits.
jne .Lslow_lock // Slow path if either of the two high bits are set.
movl %ecx, %edx // save lock word (edx) to keep read barrier bits.
- andl LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), %ecx // zero the read barrier bits.
+ andl LITERAL(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), %ecx // zero the gc bits.
test %ecx, %ecx
jnz .Lalready_thin // Lock word contains a thin lock.
// unlocked case - edx: original lock word, edi: obj.
@@ -1037,9 +1134,9 @@
cmpw %cx, %dx // do we hold the lock already?
jne .Lslow_lock
movl %edx, %ecx // copy the lock word to check count overflow.
- andl LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), %ecx // zero the read barrier bits.
+ andl LITERAL(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), %ecx // zero the gc bits.
addl LITERAL(LOCK_WORD_THIN_LOCK_COUNT_ONE), %ecx // increment recursion count
- test LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK), %ecx // overflowed if either of the upper two bits (28-29) are set
+ test LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK), %ecx // overflowed if the upper bit (28) is set
jne .Lslow_lock // count overflowed so go slow
movl %edx, %eax // copy the lock word as the old val for cmpxchg.
addl LITERAL(LOCK_WORD_THIN_LOCK_COUNT_ONE), %edx // increment recursion count again for real.
@@ -1074,12 +1171,12 @@
cmpw %cx, %dx // does the thread id match?
jne .Lslow_unlock
movl %ecx, %edx // copy the lock word to detect new count of 0.
- andl LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), %edx // zero the read barrier bits.
+ andl LITERAL(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), %edx // zero the gc bits.
cmpl LITERAL(LOCK_WORD_THIN_LOCK_COUNT_ONE), %edx
jae .Lrecursive_thin_unlock
// update lockword, cmpxchg necessary for read barrier bits.
movl %ecx, %eax // eax: old lock word.
- andl LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK), %ecx // ecx: new lock word zero except original rb bits.
+ andl LITERAL(LOCK_WORD_GC_STATE_MASK_SHIFTED), %ecx // ecx: new lock word zero except original gc bits.
#ifndef USE_READ_BARRIER
movl %ecx, MIRROR_OBJECT_LOCK_WORD_OFFSET(%edi)
#else
@@ -1329,7 +1426,14 @@
ret
END_FUNCTION art_quick_memcpy
-NO_ARG_DOWNCALL art_quick_test_suspend, artTestSuspendFromCode, ret
+DEFINE_FUNCTION art_quick_test_suspend
+ SETUP_SAVE_EVERYTHING_CALLEE_SAVE_FRAME // save everything for GC
+ // Outgoing argument set up
+ movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current()
+ call SYMBOL(artTestSuspendFromCode) // (Thread*)
+ RESTORE_SAVE_EVERYTHING_CALLEE_SAVE_FRAME // restore frame up to return address
+ ret
+END_FUNCTION art_quick_test_suspend
UNIMPLEMENTED art_quick_ldiv
UNIMPLEMENTED art_quick_lmod
@@ -1833,6 +1937,14 @@
// convention (e.g. standard callee-save registers are preserved).
MACRO2(READ_BARRIER_MARK_REG, name, reg)
DEFINE_FUNCTION VAR(name)
+ // Null check so that we can load the lock word.
+ testq REG_VAR(reg), REG_VAR(reg)
+ jz .Lret_rb_\name
+ // Check the mark bit, if it is 1 return.
+ testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(REG_VAR(reg))
+ jz .Lslow_rb_\name
+ ret
+.Lslow_rb_\name:
// Save all potentially live caller-save core registers.
PUSH rax
PUSH rcx
@@ -1897,6 +2009,7 @@
POP_REG_NE rdx, RAW_VAR(reg)
POP_REG_NE rcx, RAW_VAR(reg)
POP_REG_NE rax, RAW_VAR(reg)
+.Lret_rb_\name:
ret
END_FUNCTION VAR(name)
END_MACRO
diff --git a/runtime/arch/x86_64/quick_method_frame_info_x86_64.h b/runtime/arch/x86_64/quick_method_frame_info_x86_64.h
index 72d7e99..aa75b56 100644
--- a/runtime/arch/x86_64/quick_method_frame_info_x86_64.h
+++ b/runtime/arch/x86_64/quick_method_frame_info_x86_64.h
@@ -25,12 +25,19 @@
namespace art {
namespace x86_64 {
+static constexpr uint32_t kX86_64CalleeSaveAlwaysSpills =
+ (1 << art::x86_64::kNumberOfCpuRegisters); // Fake return address callee save.
static constexpr uint32_t kX86_64CalleeSaveRefSpills =
(1 << art::x86_64::RBX) | (1 << art::x86_64::RBP) | (1 << art::x86_64::R12) |
(1 << art::x86_64::R13) | (1 << art::x86_64::R14) | (1 << art::x86_64::R15);
static constexpr uint32_t kX86_64CalleeSaveArgSpills =
(1 << art::x86_64::RSI) | (1 << art::x86_64::RDX) | (1 << art::x86_64::RCX) |
(1 << art::x86_64::R8) | (1 << art::x86_64::R9);
+static constexpr uint32_t kX86_64CalleeSaveEverythingSpills =
+ (1 << art::x86_64::RAX) | (1 << art::x86_64::RCX) | (1 << art::x86_64::RDX) |
+ (1 << art::x86_64::RSI) | (1 << art::x86_64::RDI) | (1 << art::x86_64::R8) |
+ (1 << art::x86_64::R9) | (1 << art::x86_64::R10) | (1 << art::x86_64::R11);
+
static constexpr uint32_t kX86_64CalleeSaveFpArgSpills =
(1 << art::x86_64::XMM0) | (1 << art::x86_64::XMM1) | (1 << art::x86_64::XMM2) |
(1 << art::x86_64::XMM3) | (1 << art::x86_64::XMM4) | (1 << art::x86_64::XMM5) |
@@ -38,22 +45,30 @@
static constexpr uint32_t kX86_64CalleeSaveFpSpills =
(1 << art::x86_64::XMM12) | (1 << art::x86_64::XMM13) |
(1 << art::x86_64::XMM14) | (1 << art::x86_64::XMM15);
+static constexpr uint32_t kX86_64CalleeSaveFpEverythingSpills =
+ (1 << art::x86_64::XMM0) | (1 << art::x86_64::XMM1) |
+ (1 << art::x86_64::XMM2) | (1 << art::x86_64::XMM3) |
+ (1 << art::x86_64::XMM4) | (1 << art::x86_64::XMM5) |
+ (1 << art::x86_64::XMM6) | (1 << art::x86_64::XMM7) |
+ (1 << art::x86_64::XMM8) | (1 << art::x86_64::XMM9) |
+ (1 << art::x86_64::XMM10) | (1 << art::x86_64::XMM11);
constexpr uint32_t X86_64CalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
- return kX86_64CalleeSaveRefSpills |
+ return kX86_64CalleeSaveAlwaysSpills | kX86_64CalleeSaveRefSpills |
(type == Runtime::kRefsAndArgs ? kX86_64CalleeSaveArgSpills : 0) |
- (1 << art::x86_64::kNumberOfCpuRegisters); // fake return address callee save;
+ (type == Runtime::kSaveEverything ? kX86_64CalleeSaveEverythingSpills : 0);
}
constexpr uint32_t X86_64CalleeSaveFpSpills(Runtime::CalleeSaveType type) {
return kX86_64CalleeSaveFpSpills |
- (type == Runtime::kRefsAndArgs ? kX86_64CalleeSaveFpArgSpills : 0);
+ (type == Runtime::kRefsAndArgs ? kX86_64CalleeSaveFpArgSpills : 0) |
+ (type == Runtime::kSaveEverything ? kX86_64CalleeSaveFpEverythingSpills : 0);
}
constexpr uint32_t X86_64CalleeSaveFrameSize(Runtime::CalleeSaveType type) {
return RoundUp((POPCOUNT(X86_64CalleeSaveCoreSpills(type)) /* gprs */ +
POPCOUNT(X86_64CalleeSaveFpSpills(type)) /* fprs */ +
- 1 /* Method* */) * kX86_64PointerSize, kStackAlignment);
+ 1 /* Method* */) * static_cast<size_t>(kX86_64PointerSize), kStackAlignment);
}
constexpr QuickMethodFrameInfo X86_64CalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 32425d8..2421246 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -90,10 +90,12 @@
if (kIsDebugBuild) {
Thread* self = Thread::Current();
if (!Locks::mutator_lock_->IsSharedHeld(self)) {
- ScopedObjectAccess soa(self);
- CHECK(IsRuntimeMethod() ||
- GetDeclaringClass<kReadBarrierOption>()->IsIdxLoaded() ||
- GetDeclaringClass<kReadBarrierOption>()->IsErroneous());
+ if (self->IsThreadSuspensionAllowable()) {
+ ScopedObjectAccess soa(self);
+ CHECK(IsRuntimeMethod() ||
+ GetDeclaringClass<kReadBarrierOption>()->IsIdxLoaded() ||
+ GetDeclaringClass<kReadBarrierOption>()->IsErroneous());
+ }
} else {
// We cannot use SOA in this case. We might be holding the lock, but may not be in the
// runnable state (e.g., during GC).
@@ -124,20 +126,21 @@
return GetDexMethodIndex() % ImTable::kSize;
}
-inline ArtMethod** ArtMethod::GetDexCacheResolvedMethods(size_t pointer_size) {
+inline ArtMethod** ArtMethod::GetDexCacheResolvedMethods(PointerSize pointer_size) {
return GetNativePointer<ArtMethod**>(DexCacheResolvedMethodsOffset(pointer_size),
pointer_size);
}
-inline ArtMethod* ArtMethod::GetDexCacheResolvedMethod(uint16_t method_index, size_t ptr_size) {
+inline ArtMethod* ArtMethod::GetDexCacheResolvedMethod(uint16_t method_index,
+ PointerSize pointer_size) {
// NOTE: Unchecked, i.e. not throwing AIOOB. We don't even know the length here
// without accessing the DexCache and we don't want to do that in release build.
DCHECK_LT(method_index,
- GetInterfaceMethodIfProxy(ptr_size)->GetDeclaringClass()
+ GetInterfaceMethodIfProxy(pointer_size)->GetDeclaringClass()
->GetDexCache()->NumResolvedMethods());
- ArtMethod* method = mirror::DexCache::GetElementPtrSize(GetDexCacheResolvedMethods(ptr_size),
+ ArtMethod* method = mirror::DexCache::GetElementPtrSize(GetDexCacheResolvedMethods(pointer_size),
method_index,
- ptr_size);
+ pointer_size);
if (LIKELY(method != nullptr)) {
auto* declaring_class = method->GetDeclaringClass();
if (LIKELY(declaring_class == nullptr || !declaring_class->IsErroneous())) {
@@ -147,70 +150,72 @@
return nullptr;
}
-inline void ArtMethod::SetDexCacheResolvedMethod(uint16_t method_index, ArtMethod* new_method,
- size_t ptr_size) {
+inline void ArtMethod::SetDexCacheResolvedMethod(uint16_t method_index,
+ ArtMethod* new_method,
+ PointerSize pointer_size) {
// NOTE: Unchecked, i.e. not throwing AIOOB. We don't even know the length here
// without accessing the DexCache and we don't want to do that in release build.
DCHECK_LT(method_index,
- GetInterfaceMethodIfProxy(ptr_size)->GetDeclaringClass()
+ GetInterfaceMethodIfProxy(pointer_size)->GetDeclaringClass()
->GetDexCache()->NumResolvedMethods());
DCHECK(new_method == nullptr || new_method->GetDeclaringClass() != nullptr);
- mirror::DexCache::SetElementPtrSize(GetDexCacheResolvedMethods(ptr_size),
+ mirror::DexCache::SetElementPtrSize(GetDexCacheResolvedMethods(pointer_size),
method_index,
new_method,
- ptr_size);
+ pointer_size);
}
-inline bool ArtMethod::HasDexCacheResolvedMethods(size_t pointer_size) {
+inline bool ArtMethod::HasDexCacheResolvedMethods(PointerSize pointer_size) {
return GetDexCacheResolvedMethods(pointer_size) != nullptr;
}
inline bool ArtMethod::HasSameDexCacheResolvedMethods(ArtMethod** other_cache,
- size_t pointer_size) {
+ PointerSize pointer_size) {
return GetDexCacheResolvedMethods(pointer_size) == other_cache;
}
-inline bool ArtMethod::HasSameDexCacheResolvedMethods(ArtMethod* other, size_t pointer_size) {
+inline bool ArtMethod::HasSameDexCacheResolvedMethods(ArtMethod* other, PointerSize pointer_size) {
return GetDexCacheResolvedMethods(pointer_size) ==
other->GetDexCacheResolvedMethods(pointer_size);
}
-inline GcRoot<mirror::Class>* ArtMethod::GetDexCacheResolvedTypes(size_t pointer_size) {
+inline GcRoot<mirror::Class>* ArtMethod::GetDexCacheResolvedTypes(PointerSize pointer_size) {
return GetNativePointer<GcRoot<mirror::Class>*>(DexCacheResolvedTypesOffset(pointer_size),
pointer_size);
}
template <bool kWithCheck>
-inline mirror::Class* ArtMethod::GetDexCacheResolvedType(uint32_t type_index, size_t ptr_size) {
+inline mirror::Class* ArtMethod::GetDexCacheResolvedType(uint32_t type_index,
+ PointerSize pointer_size) {
if (kWithCheck) {
mirror::DexCache* dex_cache =
- GetInterfaceMethodIfProxy(ptr_size)->GetDeclaringClass()->GetDexCache();
+ GetInterfaceMethodIfProxy(pointer_size)->GetDeclaringClass()->GetDexCache();
if (UNLIKELY(type_index >= dex_cache->NumResolvedTypes())) {
ThrowArrayIndexOutOfBoundsException(type_index, dex_cache->NumResolvedTypes());
return nullptr;
}
}
- mirror::Class* klass = GetDexCacheResolvedTypes(ptr_size)[type_index].Read();
+ mirror::Class* klass = GetDexCacheResolvedTypes(pointer_size)[type_index].Read();
return (klass != nullptr && !klass->IsErroneous()) ? klass : nullptr;
}
-inline bool ArtMethod::HasDexCacheResolvedTypes(size_t pointer_size) {
+inline bool ArtMethod::HasDexCacheResolvedTypes(PointerSize pointer_size) {
return GetDexCacheResolvedTypes(pointer_size) != nullptr;
}
inline bool ArtMethod::HasSameDexCacheResolvedTypes(GcRoot<mirror::Class>* other_cache,
- size_t pointer_size) {
+ PointerSize pointer_size) {
return GetDexCacheResolvedTypes(pointer_size) == other_cache;
}
-inline bool ArtMethod::HasSameDexCacheResolvedTypes(ArtMethod* other, size_t pointer_size) {
+inline bool ArtMethod::HasSameDexCacheResolvedTypes(ArtMethod* other, PointerSize pointer_size) {
return GetDexCacheResolvedTypes(pointer_size) == other->GetDexCacheResolvedTypes(pointer_size);
}
inline mirror::Class* ArtMethod::GetClassFromTypeIndex(uint16_t type_idx,
bool resolve,
- size_t ptr_size) {
- mirror::Class* type = GetDexCacheResolvedType(type_idx, ptr_size);
+ PointerSize pointer_size) {
+ mirror::Class* type = GetDexCacheResolvedType(type_idx, pointer_size);
if (type == nullptr && resolve) {
type = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, this);
CHECK(type != nullptr || Thread::Current()->IsExceptionPending());
@@ -332,9 +337,9 @@
return GetDeclaringClass()->GetDexFile().GetCodeItem(GetCodeItemOffset());
}
-inline bool ArtMethod::IsResolvedTypeIdx(uint16_t type_idx, size_t ptr_size) {
+inline bool ArtMethod::IsResolvedTypeIdx(uint16_t type_idx, PointerSize pointer_size) {
DCHECK(!IsProxyMethod());
- return GetDexCacheResolvedType(type_idx, ptr_size) != nullptr;
+ return GetDexCacheResolvedType(type_idx, pointer_size) != nullptr;
}
inline int32_t ArtMethod::GetLineNumFromDexPC(uint32_t dex_pc) {
@@ -404,7 +409,7 @@
return GetDeclaringClass<kReadBarrierOption>()->IsProxyClass();
}
-inline ArtMethod* ArtMethod::GetInterfaceMethodIfProxy(size_t pointer_size) {
+inline ArtMethod* ArtMethod::GetInterfaceMethodIfProxy(PointerSize pointer_size) {
if (LIKELY(!IsProxyMethod())) {
return this;
}
@@ -420,22 +425,24 @@
}
inline void ArtMethod::SetDexCacheResolvedMethods(ArtMethod** new_dex_cache_methods,
- size_t ptr_size) {
- SetNativePointer(DexCacheResolvedMethodsOffset(ptr_size), new_dex_cache_methods, ptr_size);
+ PointerSize pointer_size) {
+ SetNativePointer(DexCacheResolvedMethodsOffset(pointer_size),
+ new_dex_cache_methods,
+ pointer_size);
}
inline void ArtMethod::SetDexCacheResolvedTypes(GcRoot<mirror::Class>* new_dex_cache_types,
- size_t ptr_size) {
- SetNativePointer(DexCacheResolvedTypesOffset(ptr_size), new_dex_cache_types, ptr_size);
+ PointerSize pointer_size) {
+ SetNativePointer(DexCacheResolvedTypesOffset(pointer_size), new_dex_cache_types, pointer_size);
}
-inline mirror::Class* ArtMethod::GetReturnType(bool resolve, size_t ptr_size) {
+inline mirror::Class* ArtMethod::GetReturnType(bool resolve, PointerSize pointer_size) {
DCHECK(!IsProxyMethod());
const DexFile* dex_file = GetDexFile();
const DexFile::MethodId& method_id = dex_file->GetMethodId(GetDexMethodIndex());
const DexFile::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
uint16_t return_type_idx = proto_id.return_type_idx_;
- mirror::Class* type = GetDexCacheResolvedType(return_type_idx, ptr_size);
+ mirror::Class* type = GetDexCacheResolvedType(return_type_idx, pointer_size);
if (type == nullptr && resolve) {
type = Runtime::Current()->GetClassLinker()->ResolveType(return_type_idx, this);
CHECK(type != nullptr || Thread::Current()->IsExceptionPending());
@@ -444,7 +451,7 @@
}
template<ReadBarrierOption kReadBarrierOption, typename RootVisitorType>
-void ArtMethod::VisitRoots(RootVisitorType& visitor, size_t pointer_size) {
+void ArtMethod::VisitRoots(RootVisitorType& visitor, PointerSize pointer_size) {
if (LIKELY(!declaring_class_.IsNull())) {
visitor.VisitRoot(declaring_class_.AddressWithoutBarrier());
mirror::Class* klass = declaring_class_.Read<kReadBarrierOption>();
@@ -480,7 +487,7 @@
template <typename Visitor>
inline void ArtMethod::UpdateObjectsForImageRelocation(const Visitor& visitor,
- size_t pointer_size) {
+ PointerSize pointer_size) {
mirror::Class* old_class = GetDeclaringClassUnchecked<kWithoutReadBarrier>();
mirror::Class* new_class = visitor(old_class);
if (old_class != new_class) {
@@ -499,7 +506,7 @@
}
template <ReadBarrierOption kReadBarrierOption, typename Visitor>
-inline void ArtMethod::UpdateEntrypoints(const Visitor& visitor, size_t pointer_size) {
+inline void ArtMethod::UpdateEntrypoints(const Visitor& visitor, PointerSize pointer_size) {
if (IsNative<kReadBarrierOption>()) {
const void* old_native_code = GetEntryPointFromJniPtrSize(pointer_size);
const void* new_native_code = visitor(old_native_code);
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 113827a..60975d4 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -122,7 +122,7 @@
return dex_file->GetMethodSignature(mid) == dex_file2->GetMethodSignature(mid2);
}
-ArtMethod* ArtMethod::FindOverriddenMethod(size_t pointer_size) {
+ArtMethod* ArtMethod::FindOverriddenMethod(PointerSize pointer_size) {
if (IsStatic()) {
return nullptr;
}
@@ -196,7 +196,7 @@
// Default to handler not found.
uint32_t found_dex_pc = DexFile::kDexNoIndex;
// Iterate over the catch handlers associated with dex_pc.
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
for (CatchHandlerIterator it(*code_item, dex_pc); it.HasNext(); it.Next()) {
uint16_t iter_type_idx = it.GetHandlerTypeIndex();
// Catch all case
@@ -245,7 +245,7 @@
if (kIsDebugBuild) {
self->AssertThreadSuspensionIsAllowable();
CHECK_EQ(kRunnable, self->GetState());
- CHECK_STREQ(GetInterfaceMethodIfProxy(sizeof(void*))->GetShorty(), shorty);
+ CHECK_STREQ(GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty(), shorty);
}
// Push a transition back into managed code onto the linked list in thread.
@@ -268,7 +268,7 @@
self, this, receiver, args + 1, result, /*stay_in_interpreter*/ true);
}
} else {
- DCHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
+ DCHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
constexpr bool kLogInvocationStartAndReturn = false;
bool have_quick_code = GetEntryPointFromQuickCompiledCode() != nullptr;
@@ -476,7 +476,7 @@
return Runtime::Current()->GetClassLinker()->GetOatMethodQuickCodeFor(this) != nullptr;
}
-void ArtMethod::CopyFrom(ArtMethod* src, size_t image_pointer_size) {
+void ArtMethod::CopyFrom(ArtMethod* src, PointerSize image_pointer_size) {
memcpy(reinterpret_cast<void*>(this), reinterpret_cast<const void*>(src),
Size(image_pointer_size));
declaring_class_ = GcRoot<mirror::Class>(const_cast<ArtMethod*>(src)->GetDeclaringClass());
@@ -499,18 +499,20 @@
hotness_count_ = 0;
}
-bool ArtMethod::IsImagePointerSize(size_t pointer_size) {
+bool ArtMethod::IsImagePointerSize(PointerSize pointer_size) {
// Hijack this function to get access to PtrSizedFieldsOffset.
//
// Ensure that PrtSizedFieldsOffset is correct. We rely here on usually having both 32-bit and
// 64-bit builds.
static_assert(std::is_standard_layout<ArtMethod>::value, "ArtMethod is not standard layout.");
- static_assert((sizeof(void*) != 4) ||
- (offsetof(ArtMethod, ptr_sized_fields_) == PtrSizedFieldsOffset(4)),
- "Unexpected 32-bit class layout.");
- static_assert((sizeof(void*) != 8) ||
- (offsetof(ArtMethod, ptr_sized_fields_) == PtrSizedFieldsOffset(8)),
- "Unexpected 64-bit class layout.");
+ static_assert(
+ (sizeof(void*) != 4) ||
+ (offsetof(ArtMethod, ptr_sized_fields_) == PtrSizedFieldsOffset(PointerSize::k32)),
+ "Unexpected 32-bit class layout.");
+ static_assert(
+ (sizeof(void*) != 8) ||
+ (offsetof(ArtMethod, ptr_sized_fields_) == PtrSizedFieldsOffset(PointerSize::k64)),
+ "Unexpected 64-bit class layout.");
Runtime* runtime = Runtime::Current();
if (runtime == nullptr) {
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 1d14203..acf06fd 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -21,6 +21,7 @@
#include "base/bit_utils.h"
#include "base/casts.h"
+#include "base/enums.h"
#include "dex_file.h"
#include "gc_root.h"
#include "invoke_type.h"
@@ -65,7 +66,7 @@
ImtConflictTable(ImtConflictTable* other,
ArtMethod* interface_method,
ArtMethod* implementation_method,
- size_t pointer_size) {
+ PointerSize pointer_size) {
const size_t count = other->NumEntries(pointer_size);
for (size_t i = 0; i < count; ++i) {
SetInterfaceMethod(i, pointer_size, other->GetInterfaceMethod(i, pointer_size));
@@ -79,30 +80,30 @@
}
// num_entries excludes the header.
- ImtConflictTable(size_t num_entries, size_t pointer_size) {
+ ImtConflictTable(size_t num_entries, PointerSize pointer_size) {
SetInterfaceMethod(num_entries, pointer_size, nullptr);
SetImplementationMethod(num_entries, pointer_size, nullptr);
}
// Set an entry at an index.
- void SetInterfaceMethod(size_t index, size_t pointer_size, ArtMethod* method) {
+ void SetInterfaceMethod(size_t index, PointerSize pointer_size, ArtMethod* method) {
SetMethod(index * kMethodCount + kMethodInterface, pointer_size, method);
}
- void SetImplementationMethod(size_t index, size_t pointer_size, ArtMethod* method) {
+ void SetImplementationMethod(size_t index, PointerSize pointer_size, ArtMethod* method) {
SetMethod(index * kMethodCount + kMethodImplementation, pointer_size, method);
}
- ArtMethod* GetInterfaceMethod(size_t index, size_t pointer_size) const {
+ ArtMethod* GetInterfaceMethod(size_t index, PointerSize pointer_size) const {
return GetMethod(index * kMethodCount + kMethodInterface, pointer_size);
}
- ArtMethod* GetImplementationMethod(size_t index, size_t pointer_size) const {
+ ArtMethod* GetImplementationMethod(size_t index, PointerSize pointer_size) const {
return GetMethod(index * kMethodCount + kMethodImplementation, pointer_size);
}
// Return true if two conflict tables are the same.
- bool Equals(ImtConflictTable* other, size_t pointer_size) const {
+ bool Equals(ImtConflictTable* other, PointerSize pointer_size) const {
size_t num = NumEntries(pointer_size);
if (num != other->NumEntries(pointer_size)) {
return false;
@@ -121,7 +122,7 @@
// NO_THREAD_SAFETY_ANALYSIS for calling with held locks. Visitor is passed a pair of ArtMethod*
// and also returns one. The order is <interface, implementation>.
template<typename Visitor>
- void Visit(const Visitor& visitor, size_t pointer_size) NO_THREAD_SAFETY_ANALYSIS {
+ void Visit(const Visitor& visitor, PointerSize pointer_size) NO_THREAD_SAFETY_ANALYSIS {
uint32_t table_index = 0;
for (;;) {
ArtMethod* interface_method = GetInterfaceMethod(table_index, pointer_size);
@@ -143,7 +144,7 @@
// Lookup the implementation ArtMethod associated to `interface_method`. Return null
// if not found.
- ArtMethod* Lookup(ArtMethod* interface_method, size_t pointer_size) const {
+ ArtMethod* Lookup(ArtMethod* interface_method, PointerSize pointer_size) const {
uint32_t table_index = 0;
for (;;) {
ArtMethod* current_interface_method = GetInterfaceMethod(table_index, pointer_size);
@@ -159,7 +160,7 @@
}
// Compute the number of entries in this table.
- size_t NumEntries(size_t pointer_size) const {
+ size_t NumEntries(PointerSize pointer_size) const {
uint32_t table_index = 0;
while (GetInterfaceMethod(table_index, pointer_size) != nullptr) {
++table_index;
@@ -168,41 +169,39 @@
}
// Compute the size in bytes taken by this table.
- size_t ComputeSize(size_t pointer_size) const {
+ size_t ComputeSize(PointerSize pointer_size) const {
// Add the end marker.
return ComputeSize(NumEntries(pointer_size), pointer_size);
}
// Compute the size in bytes needed for copying the given `table` and add
// one more entry.
- static size_t ComputeSizeWithOneMoreEntry(ImtConflictTable* table, size_t pointer_size) {
+ static size_t ComputeSizeWithOneMoreEntry(ImtConflictTable* table, PointerSize pointer_size) {
return table->ComputeSize(pointer_size) + EntrySize(pointer_size);
}
// Compute size with a fixed number of entries.
- static size_t ComputeSize(size_t num_entries, size_t pointer_size) {
+ static size_t ComputeSize(size_t num_entries, PointerSize pointer_size) {
return (num_entries + 1) * EntrySize(pointer_size); // Add one for null terminator.
}
- static size_t EntrySize(size_t pointer_size) {
- return pointer_size * static_cast<size_t>(kMethodCount);
+ static size_t EntrySize(PointerSize pointer_size) {
+ return static_cast<size_t>(pointer_size) * static_cast<size_t>(kMethodCount);
}
private:
- ArtMethod* GetMethod(size_t index, size_t pointer_size) const {
- if (pointer_size == 8) {
+ ArtMethod* GetMethod(size_t index, PointerSize pointer_size) const {
+ if (pointer_size == PointerSize::k64) {
return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(data64_[index]));
} else {
- DCHECK_EQ(pointer_size, 4u);
return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(data32_[index]));
}
}
- void SetMethod(size_t index, size_t pointer_size, ArtMethod* method) {
- if (pointer_size == 8) {
+ void SetMethod(size_t index, PointerSize pointer_size, ArtMethod* method) {
+ if (pointer_size == PointerSize::k64) {
data64_[index] = dchecked_integral_cast<uint64_t>(reinterpret_cast<uintptr_t>(method));
} else {
- DCHECK_EQ(pointer_size, 4u);
data32_[index] = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(method));
}
}
@@ -223,7 +222,7 @@
ArtMethod() : access_flags_(0), dex_code_item_offset_(0), dex_method_index_(0),
method_index_(0), hotness_count_(0) { }
- ArtMethod(ArtMethod* src, size_t image_pointer_size) {
+ ArtMethod(ArtMethod* src, PointerSize image_pointer_size) {
CopyFrom(src, image_pointer_size);
}
@@ -428,42 +427,45 @@
dex_method_index_ = new_idx;
}
- ALWAYS_INLINE ArtMethod** GetDexCacheResolvedMethods(size_t pointer_size)
+ ALWAYS_INLINE ArtMethod** GetDexCacheResolvedMethods(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE ArtMethod* GetDexCacheResolvedMethod(uint16_t method_index, size_t ptr_size)
+ ALWAYS_INLINE ArtMethod* GetDexCacheResolvedMethod(uint16_t method_index,
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE void SetDexCacheResolvedMethod(uint16_t method_index,
ArtMethod* new_method,
- size_t ptr_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE void SetDexCacheResolvedMethods(ArtMethod** new_dex_cache_methods, size_t ptr_size)
+ ALWAYS_INLINE void SetDexCacheResolvedMethods(ArtMethod** new_dex_cache_methods,
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- bool HasDexCacheResolvedMethods(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
- bool HasSameDexCacheResolvedMethods(ArtMethod* other, size_t pointer_size)
+ bool HasDexCacheResolvedMethods(PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool HasSameDexCacheResolvedMethods(ArtMethod* other, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- bool HasSameDexCacheResolvedMethods(ArtMethod** other_cache, size_t pointer_size)
+ bool HasSameDexCacheResolvedMethods(ArtMethod** other_cache, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
template <bool kWithCheck = true>
- mirror::Class* GetDexCacheResolvedType(uint32_t type_idx, size_t ptr_size)
+ mirror::Class* GetDexCacheResolvedType(uint32_t type_idx, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- void SetDexCacheResolvedTypes(GcRoot<mirror::Class>* new_dex_cache_types, size_t ptr_size)
+ void SetDexCacheResolvedTypes(GcRoot<mirror::Class>* new_dex_cache_types,
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- bool HasDexCacheResolvedTypes(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
- bool HasSameDexCacheResolvedTypes(ArtMethod* other, size_t pointer_size)
+ bool HasDexCacheResolvedTypes(PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool HasSameDexCacheResolvedTypes(ArtMethod* other, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- bool HasSameDexCacheResolvedTypes(GcRoot<mirror::Class>* other_cache, size_t pointer_size)
+ bool HasSameDexCacheResolvedTypes(GcRoot<mirror::Class>* other_cache, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Get the Class* from the type index into this method's dex cache.
- mirror::Class* GetClassFromTypeIndex(uint16_t type_idx, bool resolve, size_t ptr_size)
+ mirror::Class* GetClassFromTypeIndex(uint16_t type_idx, bool resolve, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Returns true if this method has the same name and signature of the other method.
bool HasSameNameAndSignature(ArtMethod* other) SHARED_REQUIRES(Locks::mutator_lock_);
// Find the method that this method overrides.
- ArtMethod* FindOverriddenMethod(size_t pointer_size)
+ ArtMethod* FindOverriddenMethod(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Find the method index for this method within other_dexfile. If this method isn't present then
@@ -478,21 +480,22 @@
SHARED_REQUIRES(Locks::mutator_lock_);
const void* GetEntryPointFromQuickCompiledCode() {
- return GetEntryPointFromQuickCompiledCodePtrSize(sizeof(void*));
+ return GetEntryPointFromQuickCompiledCodePtrSize(kRuntimePointerSize);
}
- ALWAYS_INLINE const void* GetEntryPointFromQuickCompiledCodePtrSize(size_t pointer_size) {
+ ALWAYS_INLINE const void* GetEntryPointFromQuickCompiledCodePtrSize(PointerSize pointer_size) {
return GetNativePointer<const void*>(
EntryPointFromQuickCompiledCodeOffset(pointer_size), pointer_size);
}
void SetEntryPointFromQuickCompiledCode(const void* entry_point_from_quick_compiled_code) {
SetEntryPointFromQuickCompiledCodePtrSize(entry_point_from_quick_compiled_code,
- sizeof(void*));
+ kRuntimePointerSize);
}
ALWAYS_INLINE void SetEntryPointFromQuickCompiledCodePtrSize(
- const void* entry_point_from_quick_compiled_code, size_t pointer_size) {
+ const void* entry_point_from_quick_compiled_code, PointerSize pointer_size) {
SetNativePointer(EntryPointFromQuickCompiledCodeOffset(pointer_size),
- entry_point_from_quick_compiled_code, pointer_size);
+ entry_point_from_quick_compiled_code,
+ pointer_size);
}
void RegisterNative(const void* native_method, bool is_fast)
@@ -500,81 +503,84 @@
void UnregisterNative() SHARED_REQUIRES(Locks::mutator_lock_);
- static MemberOffset DexCacheResolvedMethodsOffset(size_t pointer_size) {
+ static MemberOffset DexCacheResolvedMethodsOffset(PointerSize pointer_size) {
return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
- PtrSizedFields, dex_cache_resolved_methods_) / sizeof(void*) * pointer_size);
+ PtrSizedFields, dex_cache_resolved_methods_) / sizeof(void*)
+ * static_cast<size_t>(pointer_size));
}
- static MemberOffset DexCacheResolvedTypesOffset(size_t pointer_size) {
+ static MemberOffset DexCacheResolvedTypesOffset(PointerSize pointer_size) {
return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
- PtrSizedFields, dex_cache_resolved_types_) / sizeof(void*) * pointer_size);
+ PtrSizedFields, dex_cache_resolved_types_) / sizeof(void*)
+ * static_cast<size_t>(pointer_size));
}
- static MemberOffset DataOffset(size_t pointer_size) {
+ static MemberOffset DataOffset(PointerSize pointer_size) {
return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
- PtrSizedFields, data_) / sizeof(void*) * pointer_size);
+ PtrSizedFields, data_) / sizeof(void*) * static_cast<size_t>(pointer_size));
}
- static MemberOffset EntryPointFromJniOffset(size_t pointer_size) {
+ static MemberOffset EntryPointFromJniOffset(PointerSize pointer_size) {
return DataOffset(pointer_size);
}
- static MemberOffset EntryPointFromQuickCompiledCodeOffset(size_t pointer_size) {
+ static MemberOffset EntryPointFromQuickCompiledCodeOffset(PointerSize pointer_size) {
return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
- PtrSizedFields, entry_point_from_quick_compiled_code_) / sizeof(void*) * pointer_size);
+ PtrSizedFields, entry_point_from_quick_compiled_code_) / sizeof(void*)
+ * static_cast<size_t>(pointer_size));
}
- ImtConflictTable* GetImtConflictTable(size_t pointer_size) {
+ ImtConflictTable* GetImtConflictTable(PointerSize pointer_size) {
DCHECK(IsRuntimeMethod());
return reinterpret_cast<ImtConflictTable*>(GetDataPtrSize(pointer_size));
}
- ALWAYS_INLINE void SetImtConflictTable(ImtConflictTable* table, size_t pointer_size) {
+ ALWAYS_INLINE void SetImtConflictTable(ImtConflictTable* table, PointerSize pointer_size) {
DCHECK(IsRuntimeMethod());
SetDataPtrSize(table, pointer_size);
}
- ProfilingInfo* GetProfilingInfo(size_t pointer_size) {
+ ProfilingInfo* GetProfilingInfo(PointerSize pointer_size) {
return reinterpret_cast<ProfilingInfo*>(GetDataPtrSize(pointer_size));
}
ALWAYS_INLINE void SetProfilingInfo(ProfilingInfo* info) {
- SetDataPtrSize(info, sizeof(void*));
+ SetDataPtrSize(info, kRuntimePointerSize);
}
- ALWAYS_INLINE void SetProfilingInfoPtrSize(ProfilingInfo* info, size_t pointer_size) {
+ ALWAYS_INLINE void SetProfilingInfoPtrSize(ProfilingInfo* info, PointerSize pointer_size) {
SetDataPtrSize(info, pointer_size);
}
static MemberOffset ProfilingInfoOffset() {
- DCHECK(IsImagePointerSize(sizeof(void*)));
- return DataOffset(sizeof(void*));
+ DCHECK(IsImagePointerSize(kRuntimePointerSize));
+ return DataOffset(kRuntimePointerSize);
}
void* GetEntryPointFromJni() {
DCHECK(IsNative());
- return GetEntryPointFromJniPtrSize(sizeof(void*));
+ return GetEntryPointFromJniPtrSize(kRuntimePointerSize);
}
- ALWAYS_INLINE void* GetEntryPointFromJniPtrSize(size_t pointer_size) {
+ ALWAYS_INLINE void* GetEntryPointFromJniPtrSize(PointerSize pointer_size) {
return GetDataPtrSize(pointer_size);
}
void SetEntryPointFromJni(const void* entrypoint) {
DCHECK(IsNative());
- SetEntryPointFromJniPtrSize(entrypoint, sizeof(void*));
+ SetEntryPointFromJniPtrSize(entrypoint, kRuntimePointerSize);
}
- ALWAYS_INLINE void SetEntryPointFromJniPtrSize(const void* entrypoint, size_t pointer_size) {
+ ALWAYS_INLINE void SetEntryPointFromJniPtrSize(const void* entrypoint, PointerSize pointer_size) {
SetDataPtrSize(entrypoint, pointer_size);
}
- ALWAYS_INLINE void* GetDataPtrSize(size_t pointer_size) {
+ ALWAYS_INLINE void* GetDataPtrSize(PointerSize pointer_size) {
DCHECK(IsImagePointerSize(pointer_size));
return GetNativePointer<void*>(DataOffset(pointer_size), pointer_size);
}
- ALWAYS_INLINE void SetDataPtrSize(const void* data, size_t pointer_size) {
+ ALWAYS_INLINE void SetDataPtrSize(const void* data, PointerSize pointer_size) {
DCHECK(IsImagePointerSize(pointer_size));
SetNativePointer(DataOffset(pointer_size), data, pointer_size);
}
@@ -603,7 +609,7 @@
// NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename RootVisitorType>
- void VisitRoots(RootVisitorType& visitor, size_t pointer_size) NO_THREAD_SAFETY_ANALYSIS;
+ void VisitRoots(RootVisitorType& visitor, PointerSize pointer_size) NO_THREAD_SAFETY_ANALYSIS;
const DexFile* GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_);
@@ -624,7 +630,8 @@
const DexFile::CodeItem* GetCodeItem() SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsResolvedTypeIdx(uint16_t type_idx, size_t ptr_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsResolvedTypeIdx(uint16_t type_idx, PointerSize pointer_size)
+ SHARED_REQUIRES(Locks::mutator_lock_);
int32_t GetLineNumFromDexPC(uint32_t dex_pc) SHARED_REQUIRES(Locks::mutator_lock_);
@@ -645,14 +652,14 @@
// May cause thread suspension due to GetClassFromTypeIdx calling ResolveType this caused a large
// number of bugs at call sites.
- mirror::Class* GetReturnType(bool resolve, size_t ptr_size)
+ mirror::Class* GetReturnType(bool resolve, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
mirror::ClassLoader* GetClassLoader() SHARED_REQUIRES(Locks::mutator_lock_);
mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy(size_t pointer_size)
+ ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// May cause thread suspension due to class resolution.
@@ -660,22 +667,22 @@
SHARED_REQUIRES(Locks::mutator_lock_);
// Size of an instance of this native class.
- static size_t Size(size_t pointer_size) {
+ static size_t Size(PointerSize pointer_size) {
return PtrSizedFieldsOffset(pointer_size) +
- (sizeof(PtrSizedFields) / sizeof(void*)) * pointer_size;
+ (sizeof(PtrSizedFields) / sizeof(void*)) * static_cast<size_t>(pointer_size);
}
// Alignment of an instance of this native class.
- static size_t Alignment(size_t pointer_size) {
+ static size_t Alignment(PointerSize pointer_size) {
// The ArtMethod alignment is the same as image pointer size. This differs from
// alignof(ArtMethod) if cross-compiling with pointer_size != sizeof(void*).
- return pointer_size;
+ return static_cast<size_t>(pointer_size);
}
- void CopyFrom(ArtMethod* src, size_t image_pointer_size)
+ void CopyFrom(ArtMethod* src, PointerSize image_pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE GcRoot<mirror::Class>* GetDexCacheResolvedTypes(size_t pointer_size)
+ ALWAYS_INLINE GcRoot<mirror::Class>* GetDexCacheResolvedTypes(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Note, hotness_counter_ updates are non-atomic but it doesn't need to be precise. Also,
@@ -711,12 +718,13 @@
// Update heap objects and non-entrypoint pointers by the passed in visitor for image relocation.
// Does not use read barrier.
template <typename Visitor>
- ALWAYS_INLINE void UpdateObjectsForImageRelocation(const Visitor& visitor, size_t pointer_size)
+ ALWAYS_INLINE void UpdateObjectsForImageRelocation(const Visitor& visitor,
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Update entry points by passing them through the visitor.
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
- ALWAYS_INLINE void UpdateEntrypoints(const Visitor& visitor, size_t pointer_size);
+ ALWAYS_INLINE void UpdateEntrypoints(const Visitor& visitor, PointerSize pointer_size);
protected:
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
@@ -765,20 +773,20 @@
} ptr_sized_fields_;
private:
- static constexpr size_t PtrSizedFieldsOffset(size_t pointer_size) {
+ static constexpr size_t PtrSizedFieldsOffset(PointerSize pointer_size) {
// Round up to pointer size for padding field. Tested in art_method.cc.
- return RoundUp(offsetof(ArtMethod, hotness_count_) + sizeof(hotness_count_), pointer_size);
+ return RoundUp(offsetof(ArtMethod, hotness_count_) + sizeof(hotness_count_),
+ static_cast<size_t>(pointer_size));
}
// Compare given pointer size to the image pointer size.
- static bool IsImagePointerSize(size_t pointer_size);
+ static bool IsImagePointerSize(PointerSize pointer_size);
template<typename T>
- ALWAYS_INLINE T GetNativePointer(MemberOffset offset, size_t pointer_size) const {
+ ALWAYS_INLINE T GetNativePointer(MemberOffset offset, PointerSize pointer_size) const {
static_assert(std::is_pointer<T>::value, "T must be a pointer type");
- DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
const auto addr = reinterpret_cast<uintptr_t>(this) + offset.Uint32Value();
- if (pointer_size == sizeof(uint32_t)) {
+ if (pointer_size == PointerSize::k32) {
return reinterpret_cast<T>(*reinterpret_cast<const uint32_t*>(addr));
} else {
auto v = *reinterpret_cast<const uint64_t*>(addr);
@@ -787,11 +795,10 @@
}
template<typename T>
- ALWAYS_INLINE void SetNativePointer(MemberOffset offset, T new_value, size_t pointer_size) {
+ ALWAYS_INLINE void SetNativePointer(MemberOffset offset, T new_value, PointerSize pointer_size) {
static_assert(std::is_pointer<T>::value, "T must be a pointer type");
- DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
const auto addr = reinterpret_cast<uintptr_t>(this) + offset.Uint32Value();
- if (pointer_size == sizeof(uint32_t)) {
+ if (pointer_size == PointerSize::k32) {
uintptr_t ptr = reinterpret_cast<uintptr_t>(new_value);
*reinterpret_cast<uint32_t*>(addr) = dchecked_integral_cast<uint32_t>(ptr);
} else {
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 50a786f..0619af8 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -57,112 +57,70 @@
#if defined(__LP64__)
#define POINTER_SIZE_SHIFT 3
+#define POINTER_SIZE art::PointerSize::k64
#else
#define POINTER_SIZE_SHIFT 2
+#define POINTER_SIZE art::PointerSize::k32
#endif
ADD_TEST_EQ(static_cast<size_t>(1U << POINTER_SIZE_SHIFT),
static_cast<size_t>(__SIZEOF_POINTER__))
-// Size of references to the heap on the stack.
-#define STACK_REFERENCE_SIZE 4
-ADD_TEST_EQ(static_cast<size_t>(STACK_REFERENCE_SIZE), sizeof(art::StackReference<art::mirror::Object>))
-
-// Size of heap references
-#define COMPRESSED_REFERENCE_SIZE 4
-ADD_TEST_EQ(static_cast<size_t>(COMPRESSED_REFERENCE_SIZE),
- sizeof(art::mirror::CompressedReference<art::mirror::Object>))
-
-#define COMPRESSED_REFERENCE_SIZE_SHIFT 2
-ADD_TEST_EQ(static_cast<size_t>(1U << COMPRESSED_REFERENCE_SIZE_SHIFT),
- static_cast<size_t>(COMPRESSED_REFERENCE_SIZE))
-
-// Note: these callee save methods loads require read barriers.
-// Offset of field Runtime::callee_save_methods_[kSaveAll]
-#define RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET 0
-ADD_TEST_EQ(static_cast<size_t>(RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET),
- art::Runtime::GetCalleeSaveMethodOffset(art::Runtime::kSaveAll))
-
-// Offset of field Runtime::callee_save_methods_[kRefsOnly]
-#define RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET 8
-ADD_TEST_EQ(static_cast<size_t>(RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET),
- art::Runtime::GetCalleeSaveMethodOffset(art::Runtime::kRefsOnly))
-
-// Offset of field Runtime::callee_save_methods_[kRefsAndArgs]
-#define RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET (2 * 8)
-ADD_TEST_EQ(static_cast<size_t>(RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET),
- art::Runtime::GetCalleeSaveMethodOffset(art::Runtime::kRefsAndArgs))
-
-// Offset of field Thread::tls32_.state_and_flags.
-#define THREAD_FLAGS_OFFSET 0
-ADD_TEST_EQ(THREAD_FLAGS_OFFSET,
- art::Thread::ThreadFlagsOffset<__SIZEOF_POINTER__>().Int32Value())
-
-// Offset of field Thread::tls32_.thin_lock_thread_id.
-#define THREAD_ID_OFFSET 12
-ADD_TEST_EQ(THREAD_ID_OFFSET,
- art::Thread::ThinLockIdOffset<__SIZEOF_POINTER__>().Int32Value())
-
-// Offset of field Thread::tls32_.is_gc_marking.
-#define THREAD_IS_GC_MARKING_OFFSET 52
-ADD_TEST_EQ(THREAD_IS_GC_MARKING_OFFSET,
- art::Thread::IsGcMarkingOffset<__SIZEOF_POINTER__>().Int32Value())
-
-// Offset of field Thread::tlsPtr_.card_table.
-#define THREAD_CARD_TABLE_OFFSET 128
-ADD_TEST_EQ(THREAD_CARD_TABLE_OFFSET,
- art::Thread::CardTableOffset<__SIZEOF_POINTER__>().Int32Value())
+// Import platform-independent constant defines from our autogenerated list.
+// Export new defines (for assembly use) by editing cpp-define-generator def files.
+#define DEFINE_CHECK_EQ ADD_TEST_EQ
+#include "generated/asm_support_gen.h"
// Offset of field Thread::tlsPtr_.exception.
#define THREAD_EXCEPTION_OFFSET (THREAD_CARD_TABLE_OFFSET + __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_EXCEPTION_OFFSET,
- art::Thread::ExceptionOffset<__SIZEOF_POINTER__>().Int32Value())
+ art::Thread::ExceptionOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.managed_stack.top_quick_frame_.
#define THREAD_TOP_QUICK_FRAME_OFFSET (THREAD_CARD_TABLE_OFFSET + (3 * __SIZEOF_POINTER__))
ADD_TEST_EQ(THREAD_TOP_QUICK_FRAME_OFFSET,
- art::Thread::TopOfManagedStackOffset<__SIZEOF_POINTER__>().Int32Value())
+ art::Thread::TopOfManagedStackOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.self.
#define THREAD_SELF_OFFSET (THREAD_CARD_TABLE_OFFSET + (9 * __SIZEOF_POINTER__))
ADD_TEST_EQ(THREAD_SELF_OFFSET,
- art::Thread::SelfOffset<__SIZEOF_POINTER__>().Int32Value())
+ art::Thread::SelfOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_objects.
-#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_CARD_TABLE_OFFSET + 199 * __SIZEOF_POINTER__)
+#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_CARD_TABLE_OFFSET + 197 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET,
- art::Thread::ThreadLocalObjectsOffset<__SIZEOF_POINTER__>().Int32Value())
+ art::Thread::ThreadLocalObjectsOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_pos.
#define THREAD_LOCAL_POS_OFFSET (THREAD_LOCAL_OBJECTS_OFFSET + __SIZEOF_SIZE_T__)
ADD_TEST_EQ(THREAD_LOCAL_POS_OFFSET,
- art::Thread::ThreadLocalPosOffset<__SIZEOF_POINTER__>().Int32Value())
+ art::Thread::ThreadLocalPosOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_end.
#define THREAD_LOCAL_END_OFFSET (THREAD_LOCAL_POS_OFFSET + __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_END_OFFSET,
- art::Thread::ThreadLocalEndOffset<__SIZEOF_POINTER__>().Int32Value())
+ art::Thread::ThreadLocalEndOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.mterp_current_ibase.
#define THREAD_CURRENT_IBASE_OFFSET (THREAD_LOCAL_END_OFFSET + __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_CURRENT_IBASE_OFFSET,
- art::Thread::MterpCurrentIBaseOffset<__SIZEOF_POINTER__>().Int32Value())
+ art::Thread::MterpCurrentIBaseOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.mterp_default_ibase.
#define THREAD_DEFAULT_IBASE_OFFSET (THREAD_CURRENT_IBASE_OFFSET + __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_DEFAULT_IBASE_OFFSET,
- art::Thread::MterpDefaultIBaseOffset<__SIZEOF_POINTER__>().Int32Value())
+ art::Thread::MterpDefaultIBaseOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.mterp_alt_ibase.
#define THREAD_ALT_IBASE_OFFSET (THREAD_DEFAULT_IBASE_OFFSET + __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_ALT_IBASE_OFFSET,
- art::Thread::MterpAltIBaseOffset<__SIZEOF_POINTER__>().Int32Value())
+ art::Thread::MterpAltIBaseOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.rosalloc_runs.
#define THREAD_ROSALLOC_RUNS_OFFSET (THREAD_ALT_IBASE_OFFSET + __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_ROSALLOC_RUNS_OFFSET,
- art::Thread::RosAllocRunsOffset<__SIZEOF_POINTER__>().Int32Value())
+ art::Thread::RosAllocRunsOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_alloc_stack_top.
#define THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET (THREAD_ROSALLOC_RUNS_OFFSET + 16 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET,
- art::Thread::ThreadLocalAllocStackTopOffset<__SIZEOF_POINTER__>().Int32Value())
+ art::Thread::ThreadLocalAllocStackTopOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_alloc_stack_end.
#define THREAD_LOCAL_ALLOC_STACK_END_OFFSET (THREAD_ROSALLOC_RUNS_OFFSET + 17 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_ALLOC_STACK_END_OFFSET,
- art::Thread::ThreadLocalAllocStackEndOffset<__SIZEOF_POINTER__>().Int32Value())
+ art::Thread::ThreadLocalAllocStackEndOffset<POINTER_SIZE>().Int32Value())
// Offsets within ShadowFrame.
#define SHADOWFRAME_LINK_OFFSET 0
@@ -199,17 +157,6 @@
ADD_TEST_EQ(SHADOWFRAME_VREGS_OFFSET,
static_cast<int32_t>(art::ShadowFrame::VRegsOffset()))
-// Offsets within CodeItem
-#define CODEITEM_INSNS_OFFSET 16
-ADD_TEST_EQ(CODEITEM_INSNS_OFFSET,
- static_cast<int32_t>(OFFSETOF_MEMBER(art::DexFile::CodeItem, insns_)))
-
-// Offsets within java.lang.Object.
-#define MIRROR_OBJECT_CLASS_OFFSET 0
-ADD_TEST_EQ(MIRROR_OBJECT_CLASS_OFFSET, art::mirror::Object::ClassOffset().Int32Value())
-#define MIRROR_OBJECT_LOCK_WORD_OFFSET 4
-ADD_TEST_EQ(MIRROR_OBJECT_LOCK_WORD_OFFSET, art::mirror::Object::MonitorOffset().Int32Value())
-
#if defined(USE_BROOKS_READ_BARRIER)
#define MIRROR_OBJECT_HEADER_SIZE 16
#else
@@ -231,16 +178,6 @@
ADD_TEST_EQ(MIRROR_CLASS_STATUS_OFFSET,
art::mirror::Class::StatusOffset().Int32Value())
-#define MIRROR_CLASS_STATUS_INITIALIZED 10
-ADD_TEST_EQ(static_cast<uint32_t>(MIRROR_CLASS_STATUS_INITIALIZED),
- static_cast<uint32_t>(art::mirror::Class::kStatusInitialized))
-#define ACCESS_FLAGS_CLASS_IS_FINALIZABLE 0x80000000
-ADD_TEST_EQ(static_cast<uint32_t>(ACCESS_FLAGS_CLASS_IS_FINALIZABLE),
- static_cast<uint32_t>(art::kAccClassIsFinalizable))
-#define ACCESS_FLAGS_CLASS_IS_FINALIZABLE_BIT 31
-ADD_TEST_EQ(static_cast<uint32_t>(ACCESS_FLAGS_CLASS_IS_FINALIZABLE),
- static_cast<uint32_t>(1U << ACCESS_FLAGS_CLASS_IS_FINALIZABLE_BIT))
-
// Array offsets.
#define MIRROR_ARRAY_LENGTH_OFFSET MIRROR_OBJECT_HEADER_SIZE
ADD_TEST_EQ(MIRROR_ARRAY_LENGTH_OFFSET, art::mirror::Array::LengthOffset().Int32Value())
@@ -289,118 +226,7 @@
#define MIRROR_STRING_VALUE_OFFSET (8 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_STRING_VALUE_OFFSET, art::mirror::String::ValueOffset().Int32Value())
-// Offsets within java.lang.reflect.ArtMethod.
-#define ART_METHOD_DEX_CACHE_METHODS_OFFSET_32 20
-ADD_TEST_EQ(ART_METHOD_DEX_CACHE_METHODS_OFFSET_32,
- art::ArtMethod::DexCacheResolvedMethodsOffset(4).Int32Value())
-#define ART_METHOD_DEX_CACHE_METHODS_OFFSET_64 24
-ADD_TEST_EQ(ART_METHOD_DEX_CACHE_METHODS_OFFSET_64,
- art::ArtMethod::DexCacheResolvedMethodsOffset(8).Int32Value())
-
-#define ART_METHOD_DEX_CACHE_TYPES_OFFSET_32 24
-ADD_TEST_EQ(ART_METHOD_DEX_CACHE_TYPES_OFFSET_32,
- art::ArtMethod::DexCacheResolvedTypesOffset(4).Int32Value())
-
-#define ART_METHOD_DEX_CACHE_TYPES_OFFSET_64 32
-ADD_TEST_EQ(ART_METHOD_DEX_CACHE_TYPES_OFFSET_64,
- art::ArtMethod::DexCacheResolvedTypesOffset(8).Int32Value())
-
-#define ART_METHOD_JNI_OFFSET_32 28
-ADD_TEST_EQ(ART_METHOD_JNI_OFFSET_32,
- art::ArtMethod::EntryPointFromJniOffset(4).Int32Value())
-
-#define ART_METHOD_JNI_OFFSET_64 40
-ADD_TEST_EQ(ART_METHOD_JNI_OFFSET_64,
- art::ArtMethod::EntryPointFromJniOffset(8).Int32Value())
-
-#define ART_METHOD_QUICK_CODE_OFFSET_32 32
-ADD_TEST_EQ(ART_METHOD_QUICK_CODE_OFFSET_32,
- art::ArtMethod::EntryPointFromQuickCompiledCodeOffset(4).Int32Value())
-
-#define ART_METHOD_QUICK_CODE_OFFSET_64 48
-ADD_TEST_EQ(ART_METHOD_QUICK_CODE_OFFSET_64,
- art::ArtMethod::EntryPointFromQuickCompiledCodeOffset(8).Int32Value())
-
-#define LOCK_WORD_STATE_SHIFT 30
-ADD_TEST_EQ(LOCK_WORD_STATE_SHIFT, static_cast<int32_t>(art::LockWord::kStateShift))
-
-#define LOCK_WORD_STATE_MASK 0xC0000000
-ADD_TEST_EQ(LOCK_WORD_STATE_MASK, static_cast<uint32_t>(art::LockWord::kStateMaskShifted))
-
-#define LOCK_WORD_READ_BARRIER_STATE_SHIFT 28
-ADD_TEST_EQ(LOCK_WORD_READ_BARRIER_STATE_SHIFT,
- static_cast<int32_t>(art::LockWord::kReadBarrierStateShift))
-
-#define LOCK_WORD_READ_BARRIER_STATE_MASK 0x30000000
-ADD_TEST_EQ(LOCK_WORD_READ_BARRIER_STATE_MASK,
- static_cast<int32_t>(art::LockWord::kReadBarrierStateMaskShifted))
-
-#define LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED 0xCFFFFFFF
-ADD_TEST_EQ(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED,
- static_cast<uint32_t>(art::LockWord::kReadBarrierStateMaskShiftedToggled))
-
-#define LOCK_WORD_THIN_LOCK_COUNT_ONE 65536
-ADD_TEST_EQ(LOCK_WORD_THIN_LOCK_COUNT_ONE, static_cast<int32_t>(art::LockWord::kThinLockCountOne))
-
-#define OBJECT_ALIGNMENT_MASK 7
-ADD_TEST_EQ(static_cast<size_t>(OBJECT_ALIGNMENT_MASK), art::kObjectAlignment - 1)
-
-#define OBJECT_ALIGNMENT_MASK_TOGGLED 0xFFFFFFF8
-ADD_TEST_EQ(static_cast<uint32_t>(OBJECT_ALIGNMENT_MASK_TOGGLED),
- ~static_cast<uint32_t>(art::kObjectAlignment - 1))
-
-#define ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE 128
-ADD_TEST_EQ(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE,
- static_cast<int32_t>(art::gc::allocator::RosAlloc::kMaxThreadLocalBracketSize))
-
-#define ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT 3
-ADD_TEST_EQ(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT,
- static_cast<int32_t>(art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSizeShift))
-
-#define ROSALLOC_BRACKET_QUANTUM_SIZE_MASK 7
-ADD_TEST_EQ(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK,
- static_cast<int32_t>(art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1))
-
-#define ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED32 0xfffffff8
-ADD_TEST_EQ(static_cast<uint32_t>(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED32),
- ~static_cast<uint32_t>(
- art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1))
-
-#define ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED64 0xfffffffffffffff8
-ADD_TEST_EQ(static_cast<uint64_t>(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED64),
- ~static_cast<uint64_t>(
- art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1))
-
-#define ROSALLOC_RUN_FREE_LIST_OFFSET 8
-ADD_TEST_EQ(ROSALLOC_RUN_FREE_LIST_OFFSET,
- static_cast<int32_t>(art::gc::allocator::RosAlloc::RunFreeListOffset()))
-
-#define ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET 0
-ADD_TEST_EQ(ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET,
- static_cast<int32_t>(art::gc::allocator::RosAlloc::RunFreeListHeadOffset()))
-
-#define ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET 16
-ADD_TEST_EQ(ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET,
- static_cast<int32_t>(art::gc::allocator::RosAlloc::RunFreeListSizeOffset()))
-
-#define ROSALLOC_SLOT_NEXT_OFFSET 0
-ADD_TEST_EQ(ROSALLOC_SLOT_NEXT_OFFSET,
- static_cast<int32_t>(art::gc::allocator::RosAlloc::RunSlotNextOffset()))
-// Assert this so that we can avoid zeroing the next field by installing the class pointer.
-ADD_TEST_EQ(ROSALLOC_SLOT_NEXT_OFFSET, MIRROR_OBJECT_CLASS_OFFSET)
-
-#define THREAD_SUSPEND_REQUEST 1
-ADD_TEST_EQ(THREAD_SUSPEND_REQUEST, static_cast<int32_t>(art::kSuspendRequest))
-
-#define THREAD_CHECKPOINT_REQUEST 2
-ADD_TEST_EQ(THREAD_CHECKPOINT_REQUEST, static_cast<int32_t>(art::kCheckpointRequest))
-
-#define JIT_CHECK_OSR (-1)
-ADD_TEST_EQ(JIT_CHECK_OSR, static_cast<int32_t>(art::jit::kJitCheckForOSR))
-
-#define JIT_HOTNESS_DISABLE (-2)
-ADD_TEST_EQ(JIT_HOTNESS_DISABLE, static_cast<int32_t>(art::jit::kJitHotnessDisabled))
#if defined(__cplusplus)
} // End of CheckAsmSupportOffsets.
diff --git a/runtime/base/arena_containers.h b/runtime/base/arena_containers.h
index e2d4c24..68cacd5 100644
--- a/runtime/base/arena_containers.h
+++ b/runtime/base/arena_containers.h
@@ -20,6 +20,7 @@
#include <deque>
#include <queue>
#include <set>
+#include <stack>
#include <utility>
#include "arena_allocator.h"
@@ -54,6 +55,12 @@
using ArenaVector = dchecked_vector<T, ArenaAllocatorAdapter<T>>;
template <typename T, typename Comparator = std::less<T>>
+using ArenaPriorityQueue = std::priority_queue<T, ArenaVector<T>, Comparator>;
+
+template <typename T>
+using ArenaStdStack = std::stack<T, ArenaDeque<T>>;
+
+template <typename T, typename Comparator = std::less<T>>
using ArenaSet = std::set<T, Comparator, ArenaAllocatorAdapter<T>>;
template <typename K, typename V, typename Comparator = std::less<K>>
diff --git a/runtime/base/enums.h b/runtime/base/enums.h
new file mode 100644
index 0000000..51b86ea
--- /dev/null
+++ b/runtime/base/enums.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_ENUMS_H_
+#define ART_RUNTIME_BASE_ENUMS_H_
+
+#include <cstddef>
+#include <ostream>
+
+#include "base/logging.h"
+#include "base/macros.h"
+
+namespace art {
+
+enum class PointerSize : size_t {
+ k32 = 4,
+ k64 = 8
+};
+std::ostream& operator<<(std::ostream& os, const PointerSize& rhs);
+
+static constexpr PointerSize kRuntimePointerSize = sizeof(void*) == 8U
+ ? PointerSize::k64
+ : PointerSize::k32;
+
+template <typename T>
+static constexpr PointerSize ConvertToPointerSize(T any) {
+ if (any == 4 || any == 8) {
+ return static_cast<PointerSize>(any);
+ } else {
+ LOG(FATAL);
+ UNREACHABLE();
+ }
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_BASE_ENUMS_H_
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index d0dad64..3c64c81 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -323,7 +323,7 @@
quick_imt_conflict_trampoline_(nullptr),
quick_generic_jni_trampoline_(nullptr),
quick_to_interpreter_bridge_trampoline_(nullptr),
- image_pointer_size_(sizeof(void*)) {
+ image_pointer_size_(kRuntimePointerSize) {
CHECK(intern_table_ != nullptr);
static_assert(kFindArrayCacheSize == arraysize(find_array_class_cache_),
"Array cache size wrong.");
@@ -361,10 +361,6 @@
// Use the pointer size from the runtime since we are probably creating the image.
image_pointer_size_ = InstructionSetPointerSize(runtime->GetInstructionSet());
- if (!ValidPointerSize(image_pointer_size_)) {
- *error_msg = StringPrintf("Invalid image pointer size: %zu", image_pointer_size_);
- return false;
- }
// java_lang_Class comes first, it's needed for AllocClass
// The GC can't handle an object with a null class since we can't get the size of this object.
@@ -791,7 +787,7 @@
static void SanityCheckArtMethodPointerArray(mirror::PointerArray* arr,
mirror::Class* expected_class,
- size_t pointer_size,
+ PointerSize pointer_size,
const std::vector<gc::space::ImageSpace*>& spaces)
SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK(arr != nullptr);
@@ -809,7 +805,7 @@
static void SanityCheckArtMethodPointerArray(ArtMethod** arr,
size_t size,
- size_t pointer_size,
+ PointerSize pointer_size,
const std::vector<gc::space::ImageSpace*>& spaces)
SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK_EQ(arr != nullptr, size != 0u);
@@ -883,7 +879,7 @@
// Set image methods' entry point to interpreter.
class SetInterpreterEntrypointArtMethodVisitor : public ArtMethodVisitor {
public:
- explicit SetInterpreterEntrypointArtMethodVisitor(size_t image_pointer_size)
+ explicit SetInterpreterEntrypointArtMethodVisitor(PointerSize image_pointer_size)
: image_pointer_size_(image_pointer_size) {}
void Visit(ArtMethod* method) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
@@ -897,7 +893,7 @@
}
private:
- const size_t image_pointer_size_;
+ const PointerSize image_pointer_size_;
DISALLOW_COPY_AND_ASSIGN(SetInterpreterEntrypointArtMethodVisitor);
};
@@ -907,7 +903,7 @@
const void* quick_imt_conflict_trampoline;
const void* quick_generic_jni_trampoline;
const void* quick_to_interpreter_bridge_trampoline;
- size_t pointer_size;
+ PointerSize pointer_size;
ArtMethod* m;
bool error;
};
@@ -939,18 +935,19 @@
gc::Heap* const heap = runtime->GetHeap();
std::vector<gc::space::ImageSpace*> spaces = heap->GetBootImageSpaces();
CHECK(!spaces.empty());
- image_pointer_size_ = spaces[0]->GetImageHeader().GetPointerSize();
- if (!ValidPointerSize(image_pointer_size_)) {
- *error_msg = StringPrintf("Invalid image pointer size: %zu", image_pointer_size_);
+ uint32_t pointer_size_unchecked = spaces[0]->GetImageHeader().GetPointerSizeUnchecked();
+ if (!ValidPointerSize(pointer_size_unchecked)) {
+ *error_msg = StringPrintf("Invalid image pointer size: %u", pointer_size_unchecked);
return false;
}
+ image_pointer_size_ = spaces[0]->GetImageHeader().GetPointerSize();
if (!runtime->IsAotCompiler()) {
// Only the Aot compiler supports having an image with a different pointer size than the
// runtime. This happens on the host for compiling 32 bit tests since we use a 64 bit libart
// compiler. We may also use 32 bit dex2oat on a system with 64 bit apps.
- if (image_pointer_size_ != sizeof(void*)) {
+ if (image_pointer_size_ != kRuntimePointerSize) {
*error_msg = StringPrintf("Runtime must use current image pointer size: %zu vs %zu",
- image_pointer_size_,
+ static_cast<size_t>(image_pointer_size_),
sizeof(void*));
return false;
}
@@ -1150,7 +1147,7 @@
explicit FixupArtMethodArrayVisitor(const ImageHeader& header) : header_(header) {}
virtual void Visit(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) {
- GcRoot<mirror::Class>* resolved_types = method->GetDexCacheResolvedTypes(sizeof(void*));
+ GcRoot<mirror::Class>* resolved_types = method->GetDexCacheResolvedTypes(kRuntimePointerSize);
const bool is_copied = method->IsCopied();
if (resolved_types != nullptr) {
bool in_image_space = false;
@@ -1165,10 +1162,10 @@
if (!is_copied || in_image_space) {
// Go through the array so that we don't need to do a slow map lookup.
method->SetDexCacheResolvedTypes(*reinterpret_cast<GcRoot<mirror::Class>**>(resolved_types),
- sizeof(void*));
+ kRuntimePointerSize);
}
}
- ArtMethod** resolved_methods = method->GetDexCacheResolvedMethods(sizeof(void*));
+ ArtMethod** resolved_methods = method->GetDexCacheResolvedMethods(kRuntimePointerSize);
if (resolved_methods != nullptr) {
bool in_image_space = false;
if (kIsDebugBuild || is_copied) {
@@ -1182,7 +1179,7 @@
if (!is_copied || in_image_space) {
// Go through the array so that we don't need to do a slow map lookup.
method->SetDexCacheResolvedMethods(*reinterpret_cast<ArtMethod***>(resolved_methods),
- sizeof(void*));
+ kRuntimePointerSize);
}
}
}
@@ -1382,11 +1379,11 @@
VLOG(image) << "From " << klass->GetDexCache()->GetDexFile()->GetBaseLocation();
}
VLOG(image) << "Direct methods";
- for (ArtMethod& m : klass->GetDirectMethods(sizeof(void*))) {
+ for (ArtMethod& m : klass->GetDirectMethods(kRuntimePointerSize)) {
VLOG(image) << PrettyMethod(&m);
}
VLOG(image) << "Virtual methods";
- for (ArtMethod& m : klass->GetVirtualMethods(sizeof(void*))) {
+ for (ArtMethod& m : klass->GetVirtualMethods(kRuntimePointerSize)) {
VLOG(image) << PrettyMethod(&m);
}
}
@@ -1422,7 +1419,7 @@
}
}
if (kIsDebugBuild) {
- for (ArtMethod& m : klass->GetDirectMethods(sizeof(void*))) {
+ for (ArtMethod& m : klass->GetDirectMethods(kRuntimePointerSize)) {
const void* code = m.GetEntryPointFromQuickCompiledCode();
const void* oat_code = m.IsInvokable() ? GetQuickOatCodeFor(&m) : code;
if (!IsQuickResolutionStub(code) &&
@@ -1432,7 +1429,7 @@
DCHECK_EQ(code, oat_code) << PrettyMethod(&m);
}
}
- for (ArtMethod& m : klass->GetVirtualMethods(sizeof(void*))) {
+ for (ArtMethod& m : klass->GetVirtualMethods(kRuntimePointerSize)) {
const void* code = m.GetEntryPointFromQuickCompiledCode();
const void* oat_code = m.IsInvokable() ? GetQuickOatCodeFor(&m) : code;
if (!IsQuickResolutionStub(code) &&
@@ -1451,14 +1448,14 @@
if (*out_forward_dex_cache_array) {
ScopedTrace timing("Fixup ArtMethod dex cache arrays");
FixupArtMethodArrayVisitor visitor(header);
- header.VisitPackedArtMethods(&visitor, space->Begin(), sizeof(void*));
+ header.VisitPackedArtMethods(&visitor, space->Begin(), kRuntimePointerSize);
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader.Get());
}
if (kVerifyArtMethodDeclaringClasses) {
ScopedTrace timing("Verify declaring classes");
ReaderMutexLock rmu(self, *Locks::heap_bitmap_lock_);
VerifyDeclaringClassVisitor visitor;
- header.VisitPackedArtMethods(&visitor, space->Begin(), sizeof(void*));
+ header.VisitPackedArtMethods(&visitor, space->Begin(), kRuntimePointerSize);
}
return true;
}
@@ -1810,7 +1807,7 @@
// This verification needs to happen after the classes have been added to the class loader.
// Since it ensures classes are in the class table.
VerifyClassInTableArtMethodVisitor visitor2(class_table);
- header.VisitPackedArtMethods(&visitor2, space->Begin(), sizeof(void*));
+ header.VisitPackedArtMethods(&visitor2, space->Begin(), kRuntimePointerSize);
}
VLOG(class_linker) << "Adding image space took " << PrettyDuration(NanoTime() - start_time);
return true;
@@ -2054,9 +2051,10 @@
}
mirror::PointerArray* ClassLinker::AllocPointerArray(Thread* self, size_t length) {
- return down_cast<mirror::PointerArray*>(image_pointer_size_ == 8u ?
- static_cast<mirror::Array*>(mirror::LongArray::Alloc(self, length)) :
- static_cast<mirror::Array*>(mirror::IntArray::Alloc(self, length)));
+ return down_cast<mirror::PointerArray*>(
+ image_pointer_size_ == PointerSize::k64
+ ? static_cast<mirror::Array*>(mirror::LongArray::Alloc(self, length))
+ : static_cast<mirror::Array*>(mirror::IntArray::Alloc(self, length)));
}
mirror::DexCache* ClassLinker::AllocDexCache(Thread* self,
@@ -2081,8 +2079,6 @@
raw_arrays = dex_file.GetOatDexFile()->GetDexCacheArrays();
} else if (dex_file.NumStringIds() != 0u || dex_file.NumTypeIds() != 0u ||
dex_file.NumMethodIds() != 0u || dex_file.NumFieldIds() != 0u) {
- // NOTE: We "leak" the raw_arrays because we never destroy the dex cache.
- DCHECK(image_pointer_size_ == 4u || image_pointer_size_ == 8u);
// Zero-initialized.
raw_arrays = reinterpret_cast<uint8_t*>(linear_alloc->Alloc(self, layout.Size()));
}
@@ -2179,20 +2175,37 @@
}
// Wait for the class if it has not already been linked.
- if (!klass->IsResolved() && !klass->IsErroneous()) {
+ size_t index = 0;
+ // Maximum number of yield iterations until we start sleeping.
+ static const size_t kNumYieldIterations = 1000;
+ // How long each sleep is in us.
+ static const size_t kSleepDurationUS = 1000; // 1 ms.
+ while (!klass->IsResolved() && !klass->IsErroneous()) {
StackHandleScope<1> hs(self);
HandleWrapper<mirror::Class> h_class(hs.NewHandleWrapper(&klass));
- ObjectLock<mirror::Class> lock(self, h_class);
- // Check for circular dependencies between classes.
- if (!h_class->IsResolved() && h_class->GetClinitThreadId() == self->GetTid()) {
- ThrowClassCircularityError(h_class.Get());
- mirror::Class::SetStatus(h_class, mirror::Class::kStatusError, self);
- return nullptr;
+ {
+ ObjectTryLock<mirror::Class> lock(self, h_class);
+ // Can not use a monitor wait here since it may block when returning and deadlock if another
+ // thread has locked klass.
+ if (lock.Acquired()) {
+ // Check for circular dependencies between classes, the lock is required for SetStatus.
+ if (!h_class->IsResolved() && h_class->GetClinitThreadId() == self->GetTid()) {
+ ThrowClassCircularityError(h_class.Get());
+ mirror::Class::SetStatus(h_class, mirror::Class::kStatusError, self);
+ return nullptr;
+ }
+ }
}
- // Wait for the pending initialization to complete.
- while (!h_class->IsResolved() && !h_class->IsErroneous()) {
- lock.WaitIgnoringInterrupts();
+ {
+ // Handle wrapper deals with klass moving.
+ ScopedThreadSuspension sts(self, kSuspended);
+ if (index < kNumYieldIterations) {
+ sched_yield();
+ } else {
+ usleep(kSleepDurationUS);
+ }
}
+ ++index;
}
if (klass->IsErroneous()) {
@@ -4813,7 +4826,7 @@
}
static bool HasSameSignatureWithDifferentClassLoaders(Thread* self,
- size_t pointer_size,
+ PointerSize pointer_size,
Handle<mirror::Class> klass,
Handle<mirror::Class> super_klass,
ArtMethod* method1,
@@ -5029,7 +5042,7 @@
return class_loader == nullptr ? &boot_class_table_ : class_loader->GetClassTable();
}
-static ImTable* FindSuperImt(mirror::Class* klass, size_t pointer_size)
+static ImTable* FindSuperImt(mirror::Class* klass, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_) {
while (klass->HasSuperClass()) {
klass = klass->GetSuperClass();
@@ -5567,7 +5580,7 @@
LinkVirtualHashTable(Handle<mirror::Class> klass,
size_t hash_size,
uint32_t* hash_table,
- size_t image_pointer_size)
+ PointerSize image_pointer_size)
: klass_(klass),
hash_size_(hash_size),
hash_table_(hash_table),
@@ -5629,13 +5642,20 @@
Handle<mirror::Class> klass_;
const size_t hash_size_;
uint32_t* const hash_table_;
- const size_t image_pointer_size_;
+ const PointerSize image_pointer_size_;
};
const uint32_t LinkVirtualHashTable::invalid_index_ = std::numeric_limits<uint32_t>::max();
const uint32_t LinkVirtualHashTable::removed_index_ = std::numeric_limits<uint32_t>::max() - 1;
-bool ClassLinker::LinkVirtualMethods(
+// b/30419309
+#if defined(__i386__)
+#define X86_OPTNONE __attribute__((optnone))
+#else
+#define X86_OPTNONE
+#endif
+
+X86_OPTNONE bool ClassLinker::LinkVirtualMethods(
Thread* self,
Handle<mirror::Class> klass,
/*out*/std::unordered_map<size_t, ClassLinker::MethodTranslation>* default_translations) {
@@ -5883,7 +5903,7 @@
Handle<mirror::IfTable> iftable,
size_t ifstart,
Handle<mirror::Class> iface,
- size_t image_pointer_size)
+ PointerSize image_pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(self != nullptr);
DCHECK(iface.Get() != nullptr);
@@ -6032,7 +6052,7 @@
ArtMethod* interface_method,
ArtMethod* method,
bool force_new_conflict_method) {
- ImtConflictTable* current_table = conflict_method->GetImtConflictTable(sizeof(void*));
+ ImtConflictTable* current_table = conflict_method->GetImtConflictTable(kRuntimePointerSize);
Runtime* const runtime = Runtime::Current();
LinearAlloc* linear_alloc = GetAllocatorForClassLoader(klass->GetClassLoader());
bool new_entry = conflict_method == runtime->GetImtConflictMethod() || force_new_conflict_method;
@@ -6161,7 +6181,7 @@
ImtConflictTable* ClassLinker::CreateImtConflictTable(size_t count,
LinearAlloc* linear_alloc,
- size_t image_pointer_size) {
+ PointerSize image_pointer_size) {
void* data = linear_alloc->Alloc(Thread::Current(),
ImtConflictTable::ComputeSize(count,
image_pointer_size));
@@ -6494,7 +6514,7 @@
return nullptr;
}
-static void SanityCheckVTable(Handle<mirror::Class> klass, uint32_t pointer_size)
+static void SanityCheckVTable(Handle<mirror::Class> klass, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::PointerArray* check_vtable = klass->GetVTableDuringLinking();
mirror::Class* superclass = (klass->HasSuperClass()) ? klass->GetSuperClass() : nullptr;
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index d6822c5..fcc6b23 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -25,6 +25,7 @@
#include <vector>
#include "base/allocator.h"
+#include "base/enums.h"
#include "base/hash_set.h"
#include "base/macros.h"
#include "base/mutex.h"
@@ -566,8 +567,7 @@
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
- size_t GetImagePointerSize() const {
- DCHECK(ValidPointerSize(image_pointer_size_)) << image_pointer_size_;
+ PointerSize GetImagePointerSize() const {
return image_pointer_size_;
}
@@ -630,7 +630,7 @@
// Static version for when the class linker is not yet created.
static ImtConflictTable* CreateImtConflictTable(size_t count,
LinearAlloc* linear_alloc,
- size_t pointer_size);
+ PointerSize pointer_size);
// Create the IMT and conflict tables for a class.
@@ -1166,7 +1166,7 @@
const void* quick_to_interpreter_bridge_trampoline_;
// Image pointer size.
- size_t image_pointer_size_;
+ PointerSize image_pointer_size_;
friend class ImageDumper; // for DexLock
friend class ImageWriter; // for GetClassRoots
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 48b6316..5031cf3 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -21,6 +21,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "dex_file.h"
@@ -147,7 +148,7 @@
EXPECT_EQ(0U, JavaLangObject->NumStaticFields());
EXPECT_EQ(0U, JavaLangObject->NumDirectInterfaces());
- size_t pointer_size = class_linker_->GetImagePointerSize();
+ PointerSize pointer_size = class_linker_->GetImagePointerSize();
ArtMethod* unimplemented = runtime_->GetImtUnimplementedMethod();
ImTable* imt = JavaLangObject->GetImt(pointer_size);
ASSERT_NE(nullptr, imt);
@@ -216,7 +217,7 @@
mirror::Class* array_ptr = array->GetComponentType();
EXPECT_EQ(class_linker_->FindArrayClass(self, &array_ptr), array.Get());
- size_t pointer_size = class_linker_->GetImagePointerSize();
+ PointerSize pointer_size = class_linker_->GetImagePointerSize();
mirror::Class* JavaLangObject =
class_linker_->FindSystemClass(self, "Ljava/lang/Object;");
ImTable* JavaLangObject_imt = JavaLangObject->GetImt(pointer_size);
@@ -230,14 +231,14 @@
EXPECT_TRUE(method->GetName() != nullptr);
EXPECT_TRUE(method->GetSignature() != Signature::NoSignature());
- EXPECT_TRUE(method->HasDexCacheResolvedMethods(sizeof(void*)));
- EXPECT_TRUE(method->HasDexCacheResolvedTypes(sizeof(void*)));
+ EXPECT_TRUE(method->HasDexCacheResolvedMethods(kRuntimePointerSize));
+ EXPECT_TRUE(method->HasDexCacheResolvedTypes(kRuntimePointerSize));
EXPECT_TRUE(method->HasSameDexCacheResolvedMethods(
method->GetDeclaringClass()->GetDexCache()->GetResolvedMethods(),
- sizeof(void*)));
+ kRuntimePointerSize));
EXPECT_TRUE(method->HasSameDexCacheResolvedTypes(
method->GetDeclaringClass()->GetDexCache()->GetResolvedTypes(),
- sizeof(void*)));
+ kRuntimePointerSize));
}
void AssertField(mirror::Class* klass, ArtField* field)
@@ -275,7 +276,7 @@
if (klass->IsInterface()) {
EXPECT_TRUE(klass->IsAbstract());
// Check that all direct methods are static (either <clinit> or a regular static method).
- for (ArtMethod& m : klass->GetDirectMethods(sizeof(void*))) {
+ for (ArtMethod& m : klass->GetDirectMethods(kRuntimePointerSize)) {
EXPECT_TRUE(m.IsStatic());
EXPECT_TRUE(m.IsDirect());
}
@@ -312,19 +313,19 @@
EXPECT_FALSE(klass->IsPrimitive());
EXPECT_TRUE(klass->CanAccess(klass.Get()));
- for (ArtMethod& method : klass->GetDirectMethods(sizeof(void*))) {
+ for (ArtMethod& method : klass->GetDirectMethods(kRuntimePointerSize)) {
AssertMethod(&method);
EXPECT_TRUE(method.IsDirect());
EXPECT_EQ(klass.Get(), method.GetDeclaringClass());
}
- for (ArtMethod& method : klass->GetDeclaredVirtualMethods(sizeof(void*))) {
+ for (ArtMethod& method : klass->GetDeclaredVirtualMethods(kRuntimePointerSize)) {
AssertMethod(&method);
EXPECT_FALSE(method.IsDirect());
EXPECT_EQ(klass.Get(), method.GetDeclaringClass());
}
- for (ArtMethod& method : klass->GetCopiedMethods(sizeof(void*))) {
+ for (ArtMethod& method : klass->GetCopiedMethods(kRuntimePointerSize)) {
AssertMethod(&method);
EXPECT_FALSE(method.IsDirect());
EXPECT_TRUE(method.IsCopied());
@@ -435,7 +436,7 @@
auto* resolved_methods = dex_cache->GetResolvedMethods();
for (size_t i = 0, num_methods = dex_cache->NumResolvedMethods(); i != num_methods; ++i) {
EXPECT_TRUE(
- mirror::DexCache::GetElementPtrSize(resolved_methods, i, sizeof(void*)) != nullptr)
+ mirror::DexCache::GetElementPtrSize(resolved_methods, i, kRuntimePointerSize) != nullptr)
<< dex.GetLocation() << " i=" << i;
}
}
@@ -929,7 +930,7 @@
// Static final primitives that are initialized by a compile-time constant
// expression resolve to a copy of a constant value from the constant pool.
// So <clinit> should be null.
- ArtMethod* clinit = statics->FindDirectMethod("<clinit>", "()V", sizeof(void*));
+ ArtMethod* clinit = statics->FindDirectMethod("<clinit>", "()V", kRuntimePointerSize);
EXPECT_TRUE(clinit == nullptr);
EXPECT_EQ(9U, statics->NumStaticFields());
@@ -1016,15 +1017,15 @@
EXPECT_TRUE(J->IsAssignableFrom(B.Get()));
const Signature void_sig = I->GetDexCache()->GetDexFile()->CreateSignature("()V");
- ArtMethod* Ii = I->FindVirtualMethod("i", void_sig, sizeof(void*));
- ArtMethod* Jj1 = J->FindVirtualMethod("j1", void_sig, sizeof(void*));
- ArtMethod* Jj2 = J->FindVirtualMethod("j2", void_sig, sizeof(void*));
- ArtMethod* Kj1 = K->FindInterfaceMethod("j1", void_sig, sizeof(void*));
- ArtMethod* Kj2 = K->FindInterfaceMethod("j2", void_sig, sizeof(void*));
- ArtMethod* Kk = K->FindInterfaceMethod("k", void_sig, sizeof(void*));
- ArtMethod* Ai = A->FindVirtualMethod("i", void_sig, sizeof(void*));
- ArtMethod* Aj1 = A->FindVirtualMethod("j1", void_sig, sizeof(void*));
- ArtMethod* Aj2 = A->FindVirtualMethod("j2", void_sig, sizeof(void*));
+ ArtMethod* Ii = I->FindVirtualMethod("i", void_sig, kRuntimePointerSize);
+ ArtMethod* Jj1 = J->FindVirtualMethod("j1", void_sig, kRuntimePointerSize);
+ ArtMethod* Jj2 = J->FindVirtualMethod("j2", void_sig, kRuntimePointerSize);
+ ArtMethod* Kj1 = K->FindInterfaceMethod("j1", void_sig, kRuntimePointerSize);
+ ArtMethod* Kj2 = K->FindInterfaceMethod("j2", void_sig, kRuntimePointerSize);
+ ArtMethod* Kk = K->FindInterfaceMethod("k", void_sig, kRuntimePointerSize);
+ ArtMethod* Ai = A->FindVirtualMethod("i", void_sig, kRuntimePointerSize);
+ ArtMethod* Aj1 = A->FindVirtualMethod("j1", void_sig, kRuntimePointerSize);
+ ArtMethod* Aj2 = A->FindVirtualMethod("j2", void_sig, kRuntimePointerSize);
ASSERT_TRUE(Ii != nullptr);
ASSERT_TRUE(Jj1 != nullptr);
ASSERT_TRUE(Jj2 != nullptr);
@@ -1039,12 +1040,12 @@
EXPECT_NE(Jj2, Aj2);
EXPECT_EQ(Kj1, Jj1);
EXPECT_EQ(Kj2, Jj2);
- EXPECT_EQ(Ai, A->FindVirtualMethodForInterface(Ii, sizeof(void*)));
- EXPECT_EQ(Aj1, A->FindVirtualMethodForInterface(Jj1, sizeof(void*)));
- EXPECT_EQ(Aj2, A->FindVirtualMethodForInterface(Jj2, sizeof(void*)));
- EXPECT_EQ(Ai, A->FindVirtualMethodForVirtualOrInterface(Ii, sizeof(void*)));
- EXPECT_EQ(Aj1, A->FindVirtualMethodForVirtualOrInterface(Jj1, sizeof(void*)));
- EXPECT_EQ(Aj2, A->FindVirtualMethodForVirtualOrInterface(Jj2, sizeof(void*)));
+ EXPECT_EQ(Ai, A->FindVirtualMethodForInterface(Ii, kRuntimePointerSize));
+ EXPECT_EQ(Aj1, A->FindVirtualMethodForInterface(Jj1, kRuntimePointerSize));
+ EXPECT_EQ(Aj2, A->FindVirtualMethodForInterface(Jj2, kRuntimePointerSize));
+ EXPECT_EQ(Ai, A->FindVirtualMethodForVirtualOrInterface(Ii, kRuntimePointerSize));
+ EXPECT_EQ(Aj1, A->FindVirtualMethodForVirtualOrInterface(Jj1, kRuntimePointerSize));
+ EXPECT_EQ(Aj2, A->FindVirtualMethodForVirtualOrInterface(Jj2, kRuntimePointerSize));
ArtField* Afoo = mirror::Class::FindStaticField(soa.Self(), A, "foo", "Ljava/lang/String;");
ArtField* Bfoo = mirror::Class::FindStaticField(soa.Self(), B, "foo", "Ljava/lang/String;");
@@ -1069,8 +1070,8 @@
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
mirror::Class* klass = class_linker_->FindClass(soa.Self(), "LStaticsFromCode;", class_loader);
- ArtMethod* clinit = klass->FindClassInitializer(sizeof(void*));
- ArtMethod* getS0 = klass->FindDirectMethod("getS0", "()Ljava/lang/Object;", sizeof(void*));
+ ArtMethod* clinit = klass->FindClassInitializer(kRuntimePointerSize);
+ ArtMethod* getS0 = klass->FindDirectMethod("getS0", "()Ljava/lang/Object;", kRuntimePointerSize);
const DexFile::TypeId* type_id = dex_file->FindTypeId("LStaticsFromCode;");
ASSERT_TRUE(type_id != nullptr);
uint32_t type_idx = dex_file->GetIndexForTypeId(*type_id);
@@ -1134,19 +1135,19 @@
c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Class;", class_loader);
ASSERT_TRUE(c != nullptr);
- EXPECT_EQ(c->GetClassSize(), mirror::Class::ClassClassSize(sizeof(void*)));
+ EXPECT_EQ(c->GetClassSize(), mirror::Class::ClassClassSize(kRuntimePointerSize));
c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Object;", class_loader);
ASSERT_TRUE(c != nullptr);
- EXPECT_EQ(c->GetClassSize(), mirror::Object::ClassSize(sizeof(void*)));
+ EXPECT_EQ(c->GetClassSize(), mirror::Object::ClassSize(kRuntimePointerSize));
c = class_linker_->FindClass(soa.Self(), "Ljava/lang/String;", class_loader);
ASSERT_TRUE(c != nullptr);
- EXPECT_EQ(c->GetClassSize(), mirror::String::ClassSize(sizeof(void*)));
+ EXPECT_EQ(c->GetClassSize(), mirror::String::ClassSize(kRuntimePointerSize));
c = class_linker_->FindClass(soa.Self(), "Ljava/lang/DexCache;", class_loader);
ASSERT_TRUE(c != nullptr);
- EXPECT_EQ(c->GetClassSize(), mirror::DexCache::ClassSize(sizeof(void*)));
+ EXPECT_EQ(c->GetClassSize(), mirror::DexCache::ClassSize(kRuntimePointerSize));
}
static void CheckMethod(ArtMethod* method, bool verified)
@@ -1161,7 +1162,7 @@
SHARED_REQUIRES(Locks::mutator_lock_) {
EXPECT_EQ((c->GetAccessFlags() & kAccVerificationAttempted) != 0U, preverified)
<< "Class " << PrettyClass(c) << " not as expected";
- for (auto& m : c->GetMethods(sizeof(void*))) {
+ for (auto& m : c->GetMethods(kRuntimePointerSize)) {
CheckMethod(&m, preverified);
}
}
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index b68eb19..f445e52 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -141,11 +141,13 @@
std::unique_ptr<CompilerCallbacks> callbacks_;
- void SetUp();
+ virtual void SetUp();
- void TearDown();
+ virtual void TearDown();
- void FinalizeSetup();
+ // Called to finish up runtime creation and filling test fields. By default runs root
+ // initializers, initialize well-known classes, and creates the heap thread pool.
+ virtual void FinalizeSetup();
private:
static std::string GetCoreFileLocation(const char* suffix);
@@ -160,19 +162,13 @@
virtual ~CommonRuntimeTestBase() {}
protected:
- virtual void SetUp() {
+ virtual void SetUp() OVERRIDE {
CommonRuntimeTestImpl::SetUp();
}
- virtual void TearDown() {
+ virtual void TearDown() OVERRIDE {
CommonRuntimeTestImpl::TearDown();
}
-
- // Called to finish up runtime creation and filling test fields. By default runs root
- // initializers, initialize well-known classes, and creates the heap thread pool.
- virtual void FinalizeSetup() {
- CommonRuntimeTestImpl::FinalizeSetup();
- }
};
using CommonRuntimeTest = CommonRuntimeTestBase<testing::Test>;
@@ -205,6 +201,12 @@
return; \
}
+#define TEST_DISABLED_FOR_READ_BARRIER_ON_X86() \
+ if (kUseReadBarrier && kRuntimeISA == kX86) { \
+ printf("WARNING: TEST DISABLED FOR READ BARRIER ON X86\n"); \
+ return; \
+ }
+
} // namespace art
namespace std {
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 5b54f7d..9f3ff3f 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -23,6 +23,7 @@
#include "arch/context.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/time_utils.h"
#include "class_linker.h"
#include "class_linker-inl.h"
@@ -79,7 +80,7 @@
mirror::Class* declaring_class = m->GetDeclaringClass();
return declaring_class->FindDeclaredVirtualMethod(declaring_class->GetDexCache(),
m->GetDexMethodIndex(),
- sizeof(void*));
+ kRuntimePointerSize);
}
}
@@ -1406,7 +1407,7 @@
if (m == nullptr) {
return "null";
}
- return m->GetInterfaceMethodIfProxy(sizeof(void*))->GetName();
+ return m->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetName();
}
std::string Dbg::GetFieldName(JDWP::FieldId field_id) {
@@ -1526,9 +1527,9 @@
auto ptr_size = cl->GetImagePointerSize();
for (ArtMethod& m : c->GetMethods(ptr_size)) {
expandBufAddMethodId(pReply, ToMethodId(&m));
- expandBufAddUtf8String(pReply, m.GetInterfaceMethodIfProxy(sizeof(void*))->GetName());
- expandBufAddUtf8String(pReply,
- m.GetInterfaceMethodIfProxy(sizeof(void*))->GetSignature().ToString());
+ expandBufAddUtf8String(pReply, m.GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetName());
+ expandBufAddUtf8String(
+ pReply, m.GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetSignature().ToString());
if (with_generic) {
const char* generic_signature = "";
expandBufAddUtf8String(pReply, generic_signature);
@@ -3934,7 +3935,7 @@
mirror::Class* parameter_type =
m->GetClassFromTypeIndex(types->GetTypeItem(i).type_idx_,
true /* resolve */,
- sizeof(void*));
+ kRuntimePointerSize);
mirror::Object* argument = gRegistry->Get<mirror::Object*>(arg_values[i], &error);
if (error != JDWP::ERR_NONE) {
return JDWP::ERR_INVALID_OBJECT;
@@ -4025,7 +4026,7 @@
// Translate the method through the vtable, unless the debugger wants to suppress it.
ArtMethod* m = pReq->method;
- size_t image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver.Read() != nullptr) {
ArtMethod* actual_method =
pReq->klass.Read()->FindVirtualMethodForVirtualOrInterface(m, image_pointer_size);
@@ -5068,7 +5069,7 @@
ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
BufferedRootVisitor<128> root_visitor(visitor, RootInfo(kRootVMInternal));
for (Breakpoint& breakpoint : gBreakpoints) {
- breakpoint.Method()->VisitRoots(root_visitor, sizeof(void*));
+ breakpoint.Method()->VisitRoots(root_visitor, kRuntimePointerSize);
}
}
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 5d9ae14..a6eb5f6 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -29,6 +29,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/file_magic.h"
#include "base/hash_map.h"
#include "base/logging.h"
@@ -62,7 +63,9 @@
{'0', '3', '5', '\0'},
// Dex version 036 skipped because of an old dalvik bug on some versions of android where dex
// files with that version number would erroneously be accepted and run.
- {'0', '3', '7', '\0'}
+ {'0', '3', '7', '\0'},
+ // Dex version 038: Android "O" and beyond.
+ {'0', '3', '8', '\0'}
};
bool DexFile::GetChecksum(const char* filename, uint32_t* checksum, std::string* error_msg) {
@@ -335,6 +338,11 @@
*error_code = ZipOpenErrorCode::kEntryNotFound;
return nullptr;
}
+ if (zip_entry->GetUncompressedLength() == 0) {
+ *error_msg = StringPrintf("Dex file '%s' has zero length", location.c_str());
+ *error_code = ZipOpenErrorCode::kDexFileError;
+ return nullptr;
+ }
std::unique_ptr<MemMap> map(zip_entry->ExtractToMemMap(location.c_str(), entry_name, error_msg));
if (map.get() == nullptr) {
*error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", entry_name, location.c_str(),
@@ -432,6 +440,8 @@
MemMap* mem_map,
const OatDexFile* oat_dex_file,
std::string* error_msg) {
+ DCHECK(base != nullptr);
+ DCHECK_NE(size, 0U);
CHECK_ALIGNED(base, 4); // various dex file structures must be word aligned
std::unique_ptr<DexFile> dex_file(
new DexFile(base, size, location, location_checksum, mem_map, oat_dex_file));
@@ -1328,7 +1338,7 @@
AnnotationValue annotation_value;
StackHandleScope<2> hs(Thread::Current());
Handle<mirror::Class> h_klass(hs.NewHandle(klass));
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
Handle<mirror::Class> return_type(hs.NewHandle(
method->GetReturnType(true /* resolve */, pointer_size)));
if (!ProcessAnnotationValue(h_klass, &annotation, &annotation_value, return_type, kAllObjects)) {
@@ -1620,12 +1630,12 @@
Handle<mirror::String> string_name(
hs.NewHandle(mirror::String::AllocFromModifiedUtf8(self, name)));
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
ArtMethod* annotation_method =
- annotation_class->FindDeclaredVirtualMethodByName(name, sizeof(void*));
+ annotation_class->FindDeclaredVirtualMethodByName(name, pointer_size);
if (annotation_method == nullptr) {
return nullptr;
}
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
Handle<mirror::Class> method_return(hs.NewHandle(
annotation_method->GetReturnType(true /* resolve */, pointer_size)));
@@ -1638,8 +1648,16 @@
mirror::Class* annotation_member_class =
WellKnownClasses::ToClass(WellKnownClasses::libcore_reflect_AnnotationMember);
Handle<mirror::Object> new_member(hs.NewHandle(annotation_member_class->AllocObject(self)));
- Handle<mirror::Method> method_object(
- hs.NewHandle(mirror::Method::CreateFromArtMethod(self, annotation_method)));
+ mirror::Method* method_obj_ptr;
+ DCHECK(!Runtime::Current()->IsActiveTransaction());
+ if (pointer_size == PointerSize::k64) {
+ method_obj_ptr = mirror::Method::CreateFromArtMethod<PointerSize::k64, false>(
+ self, annotation_method);
+ } else {
+ method_obj_ptr = mirror::Method::CreateFromArtMethod<PointerSize::k32, false>(
+ self, annotation_method);
+ }
+ Handle<mirror::Method> method_object(hs.NewHandle(method_obj_ptr));
if (new_member.Get() == nullptr || string_name.Get() == nullptr ||
method_object.Get() == nullptr || method_return.Get() == nullptr) {
@@ -1947,16 +1965,31 @@
StackHandleScope<2> hs(self);
Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache()));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader()));
- ArtMethod* method = Runtime::Current()->GetClassLinker()->ResolveMethodWithoutInvokeType(
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ ArtMethod* method = class_linker->ResolveMethodWithoutInvokeType(
klass->GetDexFile(), index, dex_cache, class_loader);
if (method == nullptr) {
return false;
}
+ PointerSize pointer_size = class_linker->GetImagePointerSize();
set_object = true;
+ DCHECK(!Runtime::Current()->IsActiveTransaction());
if (method->IsConstructor()) {
- element_object = mirror::Constructor::CreateFromArtMethod(self, method);
+ if (pointer_size == PointerSize::k64) {
+ element_object = mirror::Constructor::CreateFromArtMethod<PointerSize::k64,
+ false>(self, method);
+ } else {
+ element_object = mirror::Constructor::CreateFromArtMethod<PointerSize::k32,
+ false>(self, method);
+ }
} else {
- element_object = mirror::Method::CreateFromArtMethod(self, method);
+ if (pointer_size == PointerSize::k64) {
+ element_object = mirror::Method::CreateFromArtMethod<PointerSize::k64,
+ false>(self, method);
+ } else {
+ element_object = mirror::Method::CreateFromArtMethod<PointerSize::k32,
+ false>(self, method);
+ }
}
if (element_object == nullptr) {
return false;
@@ -1978,7 +2011,12 @@
return false;
}
set_object = true;
- element_object = mirror::Field::CreateFromArtField(self, field, true);
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ if (pointer_size == PointerSize::k64) {
+ element_object = mirror::Field::CreateFromArtField<PointerSize::k64>(self, field, true);
+ } else {
+ element_object = mirror::Field::CreateFromArtField<PointerSize::k32>(self, field, true);
+ }
if (element_object == nullptr) {
return false;
}
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 3dffe4b..2eca495 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -63,7 +63,7 @@
static const uint32_t kClassDefinitionOrderEnforcedVersion = 37;
static const uint8_t kDexMagic[];
- static constexpr size_t kNumDexVersions = 2;
+ static constexpr size_t kNumDexVersions = 3;
static constexpr size_t kDexVersionLen = 4;
static const uint8_t kDexMagicVersions[kNumDexVersions][kDexVersionLen];
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 4f8e6f1..2704d8a 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -133,8 +133,46 @@
"AAACAAAAQAEAAAEgAAACAAAAVAEAAAYgAAACAAAAiAEAAAEQAAABAAAAqAEAAAIgAAAPAAAArgEA"
"AAMgAAACAAAAiAIAAAQgAAADAAAAlAIAAAAgAAACAAAAqwIAAAAQAAABAAAAxAIAAA==";
-static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64,
- const char* location) {
+// kRawDex38 and 39 are dex'ed versions of the following Java source :
+//
+// public class Main {
+// public static void main(String[] foo) {
+// }
+// }
+//
+// The dex file was manually edited to change its dex version code to 38
+// or 39, respectively.
+static const char kRawDex38[] =
+ "ZGV4CjAzOAC4OovJlJ1089ikzK6asMf/f8qp3Kve5VsgAgAAcAAAAHhWNBIAAAAAAAAAAIwBAAAI"
+ "AAAAcAAAAAQAAACQAAAAAgAAAKAAAAAAAAAAAAAAAAMAAAC4AAAAAQAAANAAAAAwAQAA8AAAACIB"
+ "AAAqAQAAMgEAAEYBAABRAQAAVAEAAFgBAABtAQAAAQAAAAIAAAAEAAAABgAAAAQAAAACAAAAAAAA"
+ "AAUAAAACAAAAHAEAAAAAAAAAAAAAAAABAAcAAAABAAAAAAAAAAAAAAABAAAAAQAAAAAAAAADAAAA"
+ "AAAAAH4BAAAAAAAAAQABAAEAAABzAQAABAAAAHAQAgAAAA4AAQABAAAAAAB4AQAAAQAAAA4AAAAB"
+ "AAAAAwAGPGluaXQ+AAZMTWFpbjsAEkxqYXZhL2xhbmcvT2JqZWN0OwAJTWFpbi5qYXZhAAFWAAJW"
+ "TAATW0xqYXZhL2xhbmcvU3RyaW5nOwAEbWFpbgABAAcOAAMBAAcOAAAAAgAAgYAE8AEBCYgCDAAA"
+ "AAAAAAABAAAAAAAAAAEAAAAIAAAAcAAAAAIAAAAEAAAAkAAAAAMAAAACAAAAoAAAAAUAAAADAAAA"
+ "uAAAAAYAAAABAAAA0AAAAAEgAAACAAAA8AAAAAEQAAABAAAAHAEAAAIgAAAIAAAAIgEAAAMgAAAC"
+ "AAAAcwEAAAAgAAABAAAAfgEAAAAQAAABAAAAjAEAAA==";
+
+static const char kRawDex39[] =
+ "ZGV4CjAzOQC4OovJlJ1089ikzK6asMf/f8qp3Kve5VsgAgAAcAAAAHhWNBIAAAAAAAAAAIwBAAAI"
+ "AAAAcAAAAAQAAACQAAAAAgAAAKAAAAAAAAAAAAAAAAMAAAC4AAAAAQAAANAAAAAwAQAA8AAAACIB"
+ "AAAqAQAAMgEAAEYBAABRAQAAVAEAAFgBAABtAQAAAQAAAAIAAAAEAAAABgAAAAQAAAACAAAAAAAA"
+ "AAUAAAACAAAAHAEAAAAAAAAAAAAAAAABAAcAAAABAAAAAAAAAAAAAAABAAAAAQAAAAAAAAADAAAA"
+ "AAAAAH4BAAAAAAAAAQABAAEAAABzAQAABAAAAHAQAgAAAA4AAQABAAAAAAB4AQAAAQAAAA4AAAAB"
+ "AAAAAwAGPGluaXQ+AAZMTWFpbjsAEkxqYXZhL2xhbmcvT2JqZWN0OwAJTWFpbi5qYXZhAAFWAAJW"
+ "TAATW0xqYXZhL2xhbmcvU3RyaW5nOwAEbWFpbgABAAcOAAMBAAcOAAAAAgAAgYAE8AEBCYgCDAAA"
+ "AAAAAAABAAAAAAAAAAEAAAAIAAAAcAAAAAIAAAAEAAAAkAAAAAMAAAACAAAAoAAAAAUAAAADAAAA"
+ "uAAAAAYAAAABAAAA0AAAAAEgAAACAAAA8AAAAAEQAAABAAAAHAEAAAIgAAAIAAAAIgEAAAMgAAAC"
+ "AAAAcwEAAAAgAAABAAAAfgEAAAAQAAABAAAAjAEAAA==";
+
+static const char kRawDexZeroLength[] =
+ "UEsDBAoAAAAAAOhxAkkAAAAAAAAAAAAAAAALABwAY2xhc3Nlcy5kZXhVVAkAA2QNoVdnDaFXdXgL"
+ "AAEE5AMBAASIEwAAUEsBAh4DCgAAAAAA6HECSQAAAAAAAAAAAAAAAAsAGAAAAAAAAAAAAKCBAAAA"
+ "AGNsYXNzZXMuZGV4VVQFAANkDaFXdXgLAAEE5AMBAASIEwAAUEsFBgAAAAABAAEAUQAAAEUAAAAA"
+ "AA==";
+
+static void DecodeAndWriteDexFile(const char* base64, const char* location) {
// decode base64
CHECK(base64 != nullptr);
size_t length;
@@ -150,7 +188,11 @@
if (file->FlushCloseOrErase() != 0) {
PLOG(FATAL) << "Could not flush and close test file.";
}
- file.reset();
+}
+
+static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64,
+ const char* location) {
+ DecodeAndWriteDexFile(base64, location);
// read dex file
ScopedObjectAccess soa(Thread::Current());
@@ -197,6 +239,39 @@
EXPECT_EQ(header.checksum_, raw->GetLocationChecksum());
}
+TEST_F(DexFileTest, Version38Accepted) {
+ ScratchFile tmp;
+ std::unique_ptr<const DexFile> raw(OpenDexFileBase64(kRawDex38, tmp.GetFilename().c_str()));
+ ASSERT_TRUE(raw.get() != nullptr);
+
+ const DexFile::Header& header = raw->GetHeader();
+ EXPECT_EQ(38u, header.GetVersion());
+}
+
+TEST_F(DexFileTest, Version39Rejected) {
+ ScratchFile tmp;
+ const char* location = tmp.GetFilename().c_str();
+ DecodeAndWriteDexFile(kRawDex39, location);
+
+ ScopedObjectAccess soa(Thread::Current());
+ static constexpr bool kVerifyChecksum = true;
+ std::string error_msg;
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ ASSERT_FALSE(DexFile::Open(location, location, kVerifyChecksum, &error_msg, &dex_files));
+}
+
+TEST_F(DexFileTest, ZeroLengthDexRejected) {
+ ScratchFile tmp;
+ const char* location = tmp.GetFilename().c_str();
+ DecodeAndWriteDexFile(kRawDexZeroLength, location);
+
+ ScopedObjectAccess soa(Thread::Current());
+ static constexpr bool kVerifyChecksum = true;
+ std::string error_msg;
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ ASSERT_FALSE(DexFile::Open(location, location, kVerifyChecksum, &error_msg, &dex_files));
+}
+
TEST_F(DexFileTest, GetLocationChecksum) {
ScopedObjectAccess soa(Thread::Current());
std::unique_ptr<const DexFile> raw(OpenTestDexFile("Main"));
diff --git a/runtime/dex_instruction.cc b/runtime/dex_instruction.cc
index 300e618..d04087a 100644
--- a/runtime/dex_instruction.cc
+++ b/runtime/dex_instruction.cc
@@ -28,7 +28,7 @@
namespace art {
const char* const Instruction::kInstructionNames[] = {
-#define INSTRUCTION_NAME(o, c, pname, f, r, i, a, v) pname,
+#define INSTRUCTION_NAME(o, c, pname, f, i, a, v) pname,
#include "dex_instruction_list.h"
DEX_INSTRUCTION_LIST(INSTRUCTION_NAME)
#undef DEX_INSTRUCTION_LIST
@@ -36,7 +36,7 @@
};
Instruction::Format const Instruction::kInstructionFormats[] = {
-#define INSTRUCTION_FORMAT(o, c, p, format, r, i, a, v) format,
+#define INSTRUCTION_FORMAT(o, c, p, format, i, a, v) format,
#include "dex_instruction_list.h"
DEX_INSTRUCTION_LIST(INSTRUCTION_FORMAT)
#undef DEX_INSTRUCTION_LIST
@@ -44,7 +44,7 @@
};
Instruction::IndexType const Instruction::kInstructionIndexTypes[] = {
-#define INSTRUCTION_INDEX_TYPE(o, c, p, f, r, index, a, v) index,
+#define INSTRUCTION_INDEX_TYPE(o, c, p, f, index, a, v) index,
#include "dex_instruction_list.h"
DEX_INSTRUCTION_LIST(INSTRUCTION_INDEX_TYPE)
#undef DEX_INSTRUCTION_LIST
@@ -52,7 +52,7 @@
};
int const Instruction::kInstructionFlags[] = {
-#define INSTRUCTION_FLAGS(o, c, p, f, r, i, flags, v) flags,
+#define INSTRUCTION_FLAGS(o, c, p, f, i, flags, v) flags,
#include "dex_instruction_list.h"
DEX_INSTRUCTION_LIST(INSTRUCTION_FLAGS)
#undef DEX_INSTRUCTION_LIST
@@ -60,7 +60,7 @@
};
int const Instruction::kInstructionVerifyFlags[] = {
-#define INSTRUCTION_VERIFY_FLAGS(o, c, p, f, r, i, a, vflags) vflags,
+#define INSTRUCTION_VERIFY_FLAGS(o, c, p, f, i, a, vflags) vflags,
#include "dex_instruction_list.h"
DEX_INSTRUCTION_LIST(INSTRUCTION_VERIFY_FLAGS)
#undef DEX_INSTRUCTION_LIST
@@ -68,7 +68,7 @@
};
int const Instruction::kInstructionSizeInCodeUnits[] = {
-#define INSTRUCTION_SIZE(opcode, c, p, format, r, i, a, v) \
+#define INSTRUCTION_SIZE(opcode, c, p, format, i, a, v) \
(((opcode) == NOP) ? -1 : \
(((format) >= k10x) && ((format) <= k10t)) ? 1 : \
(((format) >= k20t) && ((format) <= k25x)) ? 2 : \
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index 89c3db6..c7856f0 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -80,7 +80,7 @@
};
enum Code { // private marker to avoid generate-operator-out.py from processing.
-#define INSTRUCTION_ENUM(opcode, cname, p, f, r, i, a, v) cname = (opcode),
+#define INSTRUCTION_ENUM(opcode, cname, p, f, i, a, v) cname = (opcode),
#include "dex_instruction_list.h"
DEX_INSTRUCTION_LIST(INSTRUCTION_ENUM)
#undef DEX_INSTRUCTION_LIST
diff --git a/runtime/dex_instruction_list.h b/runtime/dex_instruction_list.h
index 9d7e0c4..acdffd9 100644
--- a/runtime/dex_instruction_list.h
+++ b/runtime/dex_instruction_list.h
@@ -17,264 +17,264 @@
#ifndef ART_RUNTIME_DEX_INSTRUCTION_LIST_H_
#define ART_RUNTIME_DEX_INSTRUCTION_LIST_H_
+// V(opcode, instruction_code, name, format, index, flags, verifier_flags);
#define DEX_INSTRUCTION_LIST(V) \
- V(0x00, NOP, "nop", k10x, false, kIndexNone, kContinue, kVerifyNone) \
- V(0x01, MOVE, "move", k12x, true, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB) \
- V(0x02, MOVE_FROM16, "move/from16", k22x, true, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB) \
- V(0x03, MOVE_16, "move/16", k32x, true, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB) \
- V(0x04, MOVE_WIDE, "move-wide", k12x, true, kIndexNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
- V(0x05, MOVE_WIDE_FROM16, "move-wide/from16", k22x, true, kIndexNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
- V(0x06, MOVE_WIDE_16, "move-wide/16", k32x, true, kIndexNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
- V(0x07, MOVE_OBJECT, "move-object", k12x, true, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB) \
- V(0x08, MOVE_OBJECT_FROM16, "move-object/from16", k22x, true, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB) \
- V(0x09, MOVE_OBJECT_16, "move-object/16", k32x, true, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB) \
- V(0x0A, MOVE_RESULT, "move-result", k11x, true, kIndexNone, kContinue, kVerifyRegA) \
- V(0x0B, MOVE_RESULT_WIDE, "move-result-wide", k11x, true, kIndexNone, kContinue, kVerifyRegAWide) \
- V(0x0C, MOVE_RESULT_OBJECT, "move-result-object", k11x, true, kIndexNone, kContinue, kVerifyRegA) \
- V(0x0D, MOVE_EXCEPTION, "move-exception", k11x, true, kIndexNone, kContinue, kVerifyRegA) \
- V(0x0E, RETURN_VOID, "return-void", k10x, false, kIndexNone, kReturn, kVerifyNone) \
- V(0x0F, RETURN, "return", k11x, false, kIndexNone, kReturn, kVerifyRegA) \
- V(0x10, RETURN_WIDE, "return-wide", k11x, false, kIndexNone, kReturn, kVerifyRegAWide) \
- V(0x11, RETURN_OBJECT, "return-object", k11x, false, kIndexNone, kReturn, kVerifyRegA) \
- V(0x12, CONST_4, "const/4", k11n, true, kIndexNone, kContinue | kRegBFieldOrConstant, kVerifyRegA) \
- V(0x13, CONST_16, "const/16", k21s, true, kIndexNone, kContinue | kRegBFieldOrConstant, kVerifyRegA) \
- V(0x14, CONST, "const", k31i, true, kIndexNone, kContinue | kRegBFieldOrConstant, kVerifyRegA) \
- V(0x15, CONST_HIGH16, "const/high16", k21h, true, kIndexNone, kContinue | kRegBFieldOrConstant, kVerifyRegA) \
- V(0x16, CONST_WIDE_16, "const-wide/16", k21s, true, kIndexNone, kContinue | kRegBFieldOrConstant, kVerifyRegAWide) \
- V(0x17, CONST_WIDE_32, "const-wide/32", k31i, true, kIndexNone, kContinue | kRegBFieldOrConstant, kVerifyRegAWide) \
- V(0x18, CONST_WIDE, "const-wide", k51l, true, kIndexNone, kContinue | kRegBFieldOrConstant, kVerifyRegAWide) \
- V(0x19, CONST_WIDE_HIGH16, "const-wide/high16", k21h, true, kIndexNone, kContinue | kRegBFieldOrConstant, kVerifyRegAWide) \
- V(0x1A, CONST_STRING, "const-string", k21c, true, kIndexStringRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBString) \
- V(0x1B, CONST_STRING_JUMBO, "const-string/jumbo", k31c, true, kIndexStringRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBString) \
- V(0x1C, CONST_CLASS, "const-class", k21c, true, kIndexTypeRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBType) \
- V(0x1D, MONITOR_ENTER, "monitor-enter", k11x, false, kIndexNone, kContinue | kThrow | kClobber, kVerifyRegA) \
- V(0x1E, MONITOR_EXIT, "monitor-exit", k11x, false, kIndexNone, kContinue | kThrow | kClobber, kVerifyRegA) \
- V(0x1F, CHECK_CAST, "check-cast", k21c, true, kIndexTypeRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBType) \
- V(0x20, INSTANCE_OF, "instance-of", k22c, true, kIndexTypeRef, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegCType) \
- V(0x21, ARRAY_LENGTH, "array-length", k12x, true, kIndexNone, kContinue | kThrow, kVerifyRegA | kVerifyRegB) \
- V(0x22, NEW_INSTANCE, "new-instance", k21c, true, kIndexTypeRef, kContinue | kThrow | kClobber, kVerifyRegA | kVerifyRegBNewInstance) \
- V(0x23, NEW_ARRAY, "new-array", k22c, true, kIndexTypeRef, kContinue | kThrow | kClobber, kVerifyRegA | kVerifyRegB | kVerifyRegCNewArray) \
- V(0x24, FILLED_NEW_ARRAY, "filled-new-array", k35c, false, kIndexTypeRef, kContinue | kThrow | kClobber, kVerifyRegBType | kVerifyVarArg) \
- V(0x25, FILLED_NEW_ARRAY_RANGE, "filled-new-array/range", k3rc, false, kIndexTypeRef, kContinue | kThrow | kClobber, kVerifyRegBType | kVerifyVarArgRange) \
- V(0x26, FILL_ARRAY_DATA, "fill-array-data", k31t, false, kIndexNone, kContinue | kThrow | kClobber, kVerifyRegA | kVerifyArrayData) \
- V(0x27, THROW, "throw", k11x, false, kIndexNone, kThrow, kVerifyRegA) \
- V(0x28, GOTO, "goto", k10t, false, kIndexNone, kBranch | kUnconditional, kVerifyBranchTarget) \
- V(0x29, GOTO_16, "goto/16", k20t, false, kIndexNone, kBranch | kUnconditional, kVerifyBranchTarget) \
- V(0x2A, GOTO_32, "goto/32", k30t, false, kIndexNone, kBranch | kUnconditional, kVerifyBranchTarget) \
- V(0x2B, PACKED_SWITCH, "packed-switch", k31t, false, kIndexNone, kContinue | kSwitch, kVerifyRegA | kVerifySwitchTargets) \
- V(0x2C, SPARSE_SWITCH, "sparse-switch", k31t, false, kIndexNone, kContinue | kSwitch, kVerifyRegA | kVerifySwitchTargets) \
- V(0x2D, CMPL_FLOAT, "cmpl-float", k23x, true, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x2E, CMPG_FLOAT, "cmpg-float", k23x, true, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x2F, CMPL_DOUBLE, "cmpl-double", k23x, true, kIndexNone, kContinue, kVerifyRegA | kVerifyRegBWide | kVerifyRegCWide) \
- V(0x30, CMPG_DOUBLE, "cmpg-double", k23x, true, kIndexNone, kContinue, kVerifyRegA | kVerifyRegBWide | kVerifyRegCWide) \
- V(0x31, CMP_LONG, "cmp-long", k23x, true, kIndexNone, kContinue, kVerifyRegA | kVerifyRegBWide | kVerifyRegCWide) \
- V(0x32, IF_EQ, "if-eq", k22t, false, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyRegB | kVerifyBranchTarget) \
- V(0x33, IF_NE, "if-ne", k22t, false, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyRegB | kVerifyBranchTarget) \
- V(0x34, IF_LT, "if-lt", k22t, false, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyRegB | kVerifyBranchTarget) \
- V(0x35, IF_GE, "if-ge", k22t, false, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyRegB | kVerifyBranchTarget) \
- V(0x36, IF_GT, "if-gt", k22t, false, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyRegB | kVerifyBranchTarget) \
- V(0x37, IF_LE, "if-le", k22t, false, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyRegB | kVerifyBranchTarget) \
- V(0x38, IF_EQZ, "if-eqz", k21t, false, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyBranchTarget) \
- V(0x39, IF_NEZ, "if-nez", k21t, false, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyBranchTarget) \
- V(0x3A, IF_LTZ, "if-ltz", k21t, false, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyBranchTarget) \
- V(0x3B, IF_GEZ, "if-gez", k21t, false, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyBranchTarget) \
- V(0x3C, IF_GTZ, "if-gtz", k21t, false, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyBranchTarget) \
- V(0x3D, IF_LEZ, "if-lez", k21t, false, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyBranchTarget) \
- V(0x3E, UNUSED_3E, "unused-3e", k10x, false, kIndexUnknown, 0, kVerifyError) \
- V(0x3F, UNUSED_3F, "unused-3f", k10x, false, kIndexUnknown, 0, kVerifyError) \
- V(0x40, UNUSED_40, "unused-40", k10x, false, kIndexUnknown, 0, kVerifyError) \
- V(0x41, UNUSED_41, "unused-41", k10x, false, kIndexUnknown, 0, kVerifyError) \
- V(0x42, UNUSED_42, "unused-42", k10x, false, kIndexUnknown, 0, kVerifyError) \
- V(0x43, UNUSED_43, "unused-43", k10x, false, kIndexUnknown, 0, kVerifyError) \
- V(0x44, AGET, "aget", k23x, true, kIndexNone, kContinue | kThrow | kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x45, AGET_WIDE, "aget-wide", k23x, true, kIndexNone, kContinue | kThrow | kLoad, kVerifyRegAWide | kVerifyRegB | kVerifyRegC) \
- V(0x46, AGET_OBJECT, "aget-object", k23x, true, kIndexNone, kContinue | kThrow | kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x47, AGET_BOOLEAN, "aget-boolean", k23x, true, kIndexNone, kContinue | kThrow | kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x48, AGET_BYTE, "aget-byte", k23x, true, kIndexNone, kContinue | kThrow | kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x49, AGET_CHAR, "aget-char", k23x, true, kIndexNone, kContinue | kThrow | kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x4A, AGET_SHORT, "aget-short", k23x, true, kIndexNone, kContinue | kThrow | kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x4B, APUT, "aput", k23x, false, kIndexNone, kContinue | kThrow | kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x4C, APUT_WIDE, "aput-wide", k23x, false, kIndexNone, kContinue | kThrow | kStore, kVerifyRegAWide | kVerifyRegB | kVerifyRegC) \
- V(0x4D, APUT_OBJECT, "aput-object", k23x, false, kIndexNone, kContinue | kThrow | kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x4E, APUT_BOOLEAN, "aput-boolean", k23x, false, kIndexNone, kContinue | kThrow | kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x4F, APUT_BYTE, "aput-byte", k23x, false, kIndexNone, kContinue | kThrow | kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x50, APUT_CHAR, "aput-char", k23x, false, kIndexNone, kContinue | kThrow | kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x51, APUT_SHORT, "aput-short", k23x, false, kIndexNone, kContinue | kThrow | kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x52, IGET, "iget", k22c, true, kIndexFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
- V(0x53, IGET_WIDE, "iget-wide", k22c, true, kIndexFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRegCField) \
- V(0x54, IGET_OBJECT, "iget-object", k22c, true, kIndexFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
- V(0x55, IGET_BOOLEAN, "iget-boolean", k22c, true, kIndexFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
- V(0x56, IGET_BYTE, "iget-byte", k22c, true, kIndexFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
- V(0x57, IGET_CHAR, "iget-char", k22c, true, kIndexFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
- V(0x58, IGET_SHORT, "iget-short", k22c, true, kIndexFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
- V(0x59, IPUT, "iput", k22c, false, kIndexFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
- V(0x5A, IPUT_WIDE, "iput-wide", k22c, false, kIndexFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRegCField) \
- V(0x5B, IPUT_OBJECT, "iput-object", k22c, false, kIndexFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
- V(0x5C, IPUT_BOOLEAN, "iput-boolean", k22c, false, kIndexFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
- V(0x5D, IPUT_BYTE, "iput-byte", k22c, false, kIndexFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
- V(0x5E, IPUT_CHAR, "iput-char", k22c, false, kIndexFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
- V(0x5F, IPUT_SHORT, "iput-short", k22c, false, kIndexFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
- V(0x60, SGET, "sget", k21c, true, kIndexFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
- V(0x61, SGET_WIDE, "sget-wide", k21c, true, kIndexFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegAWide | kVerifyRegBField) \
- V(0x62, SGET_OBJECT, "sget-object", k21c, true, kIndexFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
- V(0x63, SGET_BOOLEAN, "sget-boolean", k21c, true, kIndexFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
- V(0x64, SGET_BYTE, "sget-byte", k21c, true, kIndexFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
- V(0x65, SGET_CHAR, "sget-char", k21c, true, kIndexFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
- V(0x66, SGET_SHORT, "sget-short", k21c, true, kIndexFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
- V(0x67, SPUT, "sput", k21c, false, kIndexFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
- V(0x68, SPUT_WIDE, "sput-wide", k21c, false, kIndexFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegAWide | kVerifyRegBField) \
- V(0x69, SPUT_OBJECT, "sput-object", k21c, false, kIndexFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
- V(0x6A, SPUT_BOOLEAN, "sput-boolean", k21c, false, kIndexFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
- V(0x6B, SPUT_BYTE, "sput-byte", k21c, false, kIndexFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
- V(0x6C, SPUT_CHAR, "sput-char", k21c, false, kIndexFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
- V(0x6D, SPUT_SHORT, "sput-short", k21c, false, kIndexFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
- V(0x6E, INVOKE_VIRTUAL, "invoke-virtual", k35c, false, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgNonZero) \
- V(0x6F, INVOKE_SUPER, "invoke-super", k35c, false, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgNonZero) \
- V(0x70, INVOKE_DIRECT, "invoke-direct", k35c, false, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgNonZero) \
- V(0x71, INVOKE_STATIC, "invoke-static", k35c, false, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArg) \
- V(0x72, INVOKE_INTERFACE, "invoke-interface", k35c, false, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgNonZero) \
- V(0x73, RETURN_VOID_NO_BARRIER, "return-void-no-barrier", k10x, false, kIndexNone, kReturn, kVerifyNone) \
- V(0x74, INVOKE_VIRTUAL_RANGE, "invoke-virtual/range", k3rc, false, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRangeNonZero) \
- V(0x75, INVOKE_SUPER_RANGE, "invoke-super/range", k3rc, false, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRangeNonZero) \
- V(0x76, INVOKE_DIRECT_RANGE, "invoke-direct/range", k3rc, false, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRangeNonZero) \
- V(0x77, INVOKE_STATIC_RANGE, "invoke-static/range", k3rc, false, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRange) \
- V(0x78, INVOKE_INTERFACE_RANGE, "invoke-interface/range", k3rc, false, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRangeNonZero) \
- V(0x79, UNUSED_79, "unused-79", k10x, false, kIndexUnknown, 0, kVerifyError) \
- V(0x7A, UNUSED_7A, "unused-7a", k10x, false, kIndexUnknown, 0, kVerifyError) \
- V(0x7B, NEG_INT, "neg-int", k12x, true, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB) \
- V(0x7C, NOT_INT, "not-int", k12x, true, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB) \
- V(0x7D, NEG_LONG, "neg-long", k12x, true, kIndexNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
- V(0x7E, NOT_LONG, "not-long", k12x, true, kIndexNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
- V(0x7F, NEG_FLOAT, "neg-float", k12x, true, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB) \
- V(0x80, NEG_DOUBLE, "neg-double", k12x, true, kIndexNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
- V(0x81, INT_TO_LONG, "int-to-long", k12x, true, kIndexNone, kContinue | kCast, kVerifyRegAWide | kVerifyRegB) \
- V(0x82, INT_TO_FLOAT, "int-to-float", k12x, true, kIndexNone, kContinue | kCast, kVerifyRegA | kVerifyRegB) \
- V(0x83, INT_TO_DOUBLE, "int-to-double", k12x, true, kIndexNone, kContinue | kCast, kVerifyRegAWide | kVerifyRegB) \
- V(0x84, LONG_TO_INT, "long-to-int", k12x, true, kIndexNone, kContinue | kCast, kVerifyRegA | kVerifyRegBWide) \
- V(0x85, LONG_TO_FLOAT, "long-to-float", k12x, true, kIndexNone, kContinue | kCast, kVerifyRegA | kVerifyRegBWide) \
- V(0x86, LONG_TO_DOUBLE, "long-to-double", k12x, true, kIndexNone, kContinue | kCast, kVerifyRegAWide | kVerifyRegBWide) \
- V(0x87, FLOAT_TO_INT, "float-to-int", k12x, true, kIndexNone, kContinue | kCast, kVerifyRegA | kVerifyRegB) \
- V(0x88, FLOAT_TO_LONG, "float-to-long", k12x, true, kIndexNone, kContinue | kCast, kVerifyRegAWide | kVerifyRegB) \
- V(0x89, FLOAT_TO_DOUBLE, "float-to-double", k12x, true, kIndexNone, kContinue | kCast, kVerifyRegAWide | kVerifyRegB) \
- V(0x8A, DOUBLE_TO_INT, "double-to-int", k12x, true, kIndexNone, kContinue | kCast, kVerifyRegA | kVerifyRegBWide) \
- V(0x8B, DOUBLE_TO_LONG, "double-to-long", k12x, true, kIndexNone, kContinue | kCast, kVerifyRegAWide | kVerifyRegBWide) \
- V(0x8C, DOUBLE_TO_FLOAT, "double-to-float", k12x, true, kIndexNone, kContinue | kCast, kVerifyRegA | kVerifyRegBWide) \
- V(0x8D, INT_TO_BYTE, "int-to-byte", k12x, true, kIndexNone, kContinue | kCast, kVerifyRegA | kVerifyRegB) \
- V(0x8E, INT_TO_CHAR, "int-to-char", k12x, true, kIndexNone, kContinue | kCast, kVerifyRegA | kVerifyRegB) \
- V(0x8F, INT_TO_SHORT, "int-to-short", k12x, true, kIndexNone, kContinue | kCast, kVerifyRegA | kVerifyRegB) \
- V(0x90, ADD_INT, "add-int", k23x, true, kIndexNone, kContinue | kAdd, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x91, SUB_INT, "sub-int", k23x, true, kIndexNone, kContinue | kSubtract, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x92, MUL_INT, "mul-int", k23x, true, kIndexNone, kContinue | kMultiply, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x93, DIV_INT, "div-int", k23x, true, kIndexNone, kContinue | kThrow | kDivide, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x94, REM_INT, "rem-int", k23x, true, kIndexNone, kContinue | kThrow | kRemainder, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x95, AND_INT, "and-int", k23x, true, kIndexNone, kContinue | kAnd, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x96, OR_INT, "or-int", k23x, true, kIndexNone, kContinue | kOr, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x97, XOR_INT, "xor-int", k23x, true, kIndexNone, kContinue | kXor, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x98, SHL_INT, "shl-int", k23x, true, kIndexNone, kContinue | kShl, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x99, SHR_INT, "shr-int", k23x, true, kIndexNone, kContinue | kShr, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x9A, USHR_INT, "ushr-int", k23x, true, kIndexNone, kContinue | kUshr, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0x9B, ADD_LONG, "add-long", k23x, true, kIndexNone, kContinue | kAdd, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
- V(0x9C, SUB_LONG, "sub-long", k23x, true, kIndexNone, kContinue | kSubtract, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
- V(0x9D, MUL_LONG, "mul-long", k23x, true, kIndexNone, kContinue | kMultiply, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
- V(0x9E, DIV_LONG, "div-long", k23x, true, kIndexNone, kContinue | kThrow | kDivide, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
- V(0x9F, REM_LONG, "rem-long", k23x, true, kIndexNone, kContinue | kThrow | kRemainder, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
- V(0xA0, AND_LONG, "and-long", k23x, true, kIndexNone, kContinue | kAnd, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
- V(0xA1, OR_LONG, "or-long", k23x, true, kIndexNone, kContinue | kOr, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
- V(0xA2, XOR_LONG, "xor-long", k23x, true, kIndexNone, kContinue | kXor, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
- V(0xA3, SHL_LONG, "shl-long", k23x, true, kIndexNone, kContinue | kShl, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegC) \
- V(0xA4, SHR_LONG, "shr-long", k23x, true, kIndexNone, kContinue | kShr, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegC) \
- V(0xA5, USHR_LONG, "ushr-long", k23x, true, kIndexNone, kContinue | kUshr, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegC) \
- V(0xA6, ADD_FLOAT, "add-float", k23x, true, kIndexNone, kContinue | kAdd, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0xA7, SUB_FLOAT, "sub-float", k23x, true, kIndexNone, kContinue | kSubtract, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0xA8, MUL_FLOAT, "mul-float", k23x, true, kIndexNone, kContinue | kMultiply, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0xA9, DIV_FLOAT, "div-float", k23x, true, kIndexNone, kContinue | kDivide, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0xAA, REM_FLOAT, "rem-float", k23x, true, kIndexNone, kContinue | kRemainder, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
- V(0xAB, ADD_DOUBLE, "add-double", k23x, true, kIndexNone, kContinue | kAdd, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
- V(0xAC, SUB_DOUBLE, "sub-double", k23x, true, kIndexNone, kContinue | kSubtract, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
- V(0xAD, MUL_DOUBLE, "mul-double", k23x, true, kIndexNone, kContinue | kMultiply, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
- V(0xAE, DIV_DOUBLE, "div-double", k23x, true, kIndexNone, kContinue | kDivide, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
- V(0xAF, REM_DOUBLE, "rem-double", k23x, true, kIndexNone, kContinue | kRemainder, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
- V(0xB0, ADD_INT_2ADDR, "add-int/2addr", k12x, true, kIndexNone, kContinue | kAdd, kVerifyRegA | kVerifyRegB) \
- V(0xB1, SUB_INT_2ADDR, "sub-int/2addr", k12x, true, kIndexNone, kContinue | kSubtract, kVerifyRegA | kVerifyRegB) \
- V(0xB2, MUL_INT_2ADDR, "mul-int/2addr", k12x, true, kIndexNone, kContinue | kMultiply, kVerifyRegA | kVerifyRegB) \
- V(0xB3, DIV_INT_2ADDR, "div-int/2addr", k12x, true, kIndexNone, kContinue | kThrow | kDivide, kVerifyRegA | kVerifyRegB) \
- V(0xB4, REM_INT_2ADDR, "rem-int/2addr", k12x, true, kIndexNone, kContinue | kThrow | kRemainder, kVerifyRegA | kVerifyRegB) \
- V(0xB5, AND_INT_2ADDR, "and-int/2addr", k12x, true, kIndexNone, kContinue | kAnd, kVerifyRegA | kVerifyRegB) \
- V(0xB6, OR_INT_2ADDR, "or-int/2addr", k12x, true, kIndexNone, kContinue | kOr, kVerifyRegA | kVerifyRegB) \
- V(0xB7, XOR_INT_2ADDR, "xor-int/2addr", k12x, true, kIndexNone, kContinue | kXor, kVerifyRegA | kVerifyRegB) \
- V(0xB8, SHL_INT_2ADDR, "shl-int/2addr", k12x, true, kIndexNone, kContinue | kShl, kVerifyRegA | kVerifyRegB) \
- V(0xB9, SHR_INT_2ADDR, "shr-int/2addr", k12x, true, kIndexNone, kContinue | kShr, kVerifyRegA | kVerifyRegB) \
- V(0xBA, USHR_INT_2ADDR, "ushr-int/2addr", k12x, true, kIndexNone, kContinue | kUshr, kVerifyRegA | kVerifyRegB) \
- V(0xBB, ADD_LONG_2ADDR, "add-long/2addr", k12x, true, kIndexNone, kContinue | kAdd, kVerifyRegAWide | kVerifyRegBWide) \
- V(0xBC, SUB_LONG_2ADDR, "sub-long/2addr", k12x, true, kIndexNone, kContinue | kSubtract, kVerifyRegAWide | kVerifyRegBWide) \
- V(0xBD, MUL_LONG_2ADDR, "mul-long/2addr", k12x, true, kIndexNone, kContinue | kMultiply, kVerifyRegAWide | kVerifyRegBWide) \
- V(0xBE, DIV_LONG_2ADDR, "div-long/2addr", k12x, true, kIndexNone, kContinue | kThrow | kDivide, kVerifyRegAWide | kVerifyRegBWide) \
- V(0xBF, REM_LONG_2ADDR, "rem-long/2addr", k12x, true, kIndexNone, kContinue | kThrow | kRemainder, kVerifyRegAWide | kVerifyRegBWide) \
- V(0xC0, AND_LONG_2ADDR, "and-long/2addr", k12x, true, kIndexNone, kContinue | kAnd, kVerifyRegAWide | kVerifyRegBWide) \
- V(0xC1, OR_LONG_2ADDR, "or-long/2addr", k12x, true, kIndexNone, kContinue | kOr, kVerifyRegAWide | kVerifyRegBWide) \
- V(0xC2, XOR_LONG_2ADDR, "xor-long/2addr", k12x, true, kIndexNone, kContinue | kXor, kVerifyRegAWide | kVerifyRegBWide) \
- V(0xC3, SHL_LONG_2ADDR, "shl-long/2addr", k12x, true, kIndexNone, kContinue | kShl, kVerifyRegAWide | kVerifyRegB) \
- V(0xC4, SHR_LONG_2ADDR, "shr-long/2addr", k12x, true, kIndexNone, kContinue | kShr, kVerifyRegAWide | kVerifyRegB) \
- V(0xC5, USHR_LONG_2ADDR, "ushr-long/2addr", k12x, true, kIndexNone, kContinue | kUshr, kVerifyRegAWide | kVerifyRegB) \
- V(0xC6, ADD_FLOAT_2ADDR, "add-float/2addr", k12x, true, kIndexNone, kContinue | kAdd, kVerifyRegA | kVerifyRegB) \
- V(0xC7, SUB_FLOAT_2ADDR, "sub-float/2addr", k12x, true, kIndexNone, kContinue | kSubtract, kVerifyRegA | kVerifyRegB) \
- V(0xC8, MUL_FLOAT_2ADDR, "mul-float/2addr", k12x, true, kIndexNone, kContinue | kMultiply, kVerifyRegA | kVerifyRegB) \
- V(0xC9, DIV_FLOAT_2ADDR, "div-float/2addr", k12x, true, kIndexNone, kContinue | kDivide, kVerifyRegA | kVerifyRegB) \
- V(0xCA, REM_FLOAT_2ADDR, "rem-float/2addr", k12x, true, kIndexNone, kContinue | kRemainder, kVerifyRegA | kVerifyRegB) \
- V(0xCB, ADD_DOUBLE_2ADDR, "add-double/2addr", k12x, true, kIndexNone, kContinue | kAdd, kVerifyRegAWide | kVerifyRegBWide) \
- V(0xCC, SUB_DOUBLE_2ADDR, "sub-double/2addr", k12x, true, kIndexNone, kContinue | kSubtract, kVerifyRegAWide | kVerifyRegBWide) \
- V(0xCD, MUL_DOUBLE_2ADDR, "mul-double/2addr", k12x, true, kIndexNone, kContinue | kMultiply, kVerifyRegAWide | kVerifyRegBWide) \
- V(0xCE, DIV_DOUBLE_2ADDR, "div-double/2addr", k12x, true, kIndexNone, kContinue | kDivide, kVerifyRegAWide | kVerifyRegBWide) \
- V(0xCF, REM_DOUBLE_2ADDR, "rem-double/2addr", k12x, true, kIndexNone, kContinue | kRemainder, kVerifyRegAWide | kVerifyRegBWide) \
- V(0xD0, ADD_INT_LIT16, "add-int/lit16", k22s, true, kIndexNone, kContinue | kAdd | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xD1, RSUB_INT, "rsub-int", k22s, true, kIndexNone, kContinue | kSubtract | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xD2, MUL_INT_LIT16, "mul-int/lit16", k22s, true, kIndexNone, kContinue | kMultiply | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xD3, DIV_INT_LIT16, "div-int/lit16", k22s, true, kIndexNone, kContinue | kThrow | kDivide | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xD4, REM_INT_LIT16, "rem-int/lit16", k22s, true, kIndexNone, kContinue | kThrow | kRemainder | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xD5, AND_INT_LIT16, "and-int/lit16", k22s, true, kIndexNone, kContinue | kAnd | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xD6, OR_INT_LIT16, "or-int/lit16", k22s, true, kIndexNone, kContinue | kOr | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xD7, XOR_INT_LIT16, "xor-int/lit16", k22s, true, kIndexNone, kContinue | kXor | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xD8, ADD_INT_LIT8, "add-int/lit8", k22b, true, kIndexNone, kContinue | kAdd | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xD9, RSUB_INT_LIT8, "rsub-int/lit8", k22b, true, kIndexNone, kContinue | kSubtract | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xDA, MUL_INT_LIT8, "mul-int/lit8", k22b, true, kIndexNone, kContinue | kMultiply | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xDB, DIV_INT_LIT8, "div-int/lit8", k22b, true, kIndexNone, kContinue | kThrow | kDivide | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xDC, REM_INT_LIT8, "rem-int/lit8", k22b, true, kIndexNone, kContinue | kThrow | kRemainder | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xDD, AND_INT_LIT8, "and-int/lit8", k22b, true, kIndexNone, kContinue | kAnd | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xDE, OR_INT_LIT8, "or-int/lit8", k22b, true, kIndexNone, kContinue | kOr | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xDF, XOR_INT_LIT8, "xor-int/lit8", k22b, true, kIndexNone, kContinue | kXor | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xE0, SHL_INT_LIT8, "shl-int/lit8", k22b, true, kIndexNone, kContinue | kShl | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xE1, SHR_INT_LIT8, "shr-int/lit8", k22b, true, kIndexNone, kContinue | kShr | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xE2, USHR_INT_LIT8, "ushr-int/lit8", k22b, true, kIndexNone, kContinue | kUshr | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
- V(0xE3, IGET_QUICK, "iget-quick", k22c, true, kIndexFieldOffset, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xE4, IGET_WIDE_QUICK, "iget-wide-quick", k22c, true, kIndexFieldOffset, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xE5, IGET_OBJECT_QUICK, "iget-object-quick", k22c, true, kIndexFieldOffset, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xE6, IPUT_QUICK, "iput-quick", k22c, false, kIndexFieldOffset, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xE7, IPUT_WIDE_QUICK, "iput-wide-quick", k22c, false, kIndexFieldOffset, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xE8, IPUT_OBJECT_QUICK, "iput-object-quick", k22c, false, kIndexFieldOffset, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xE9, INVOKE_VIRTUAL_QUICK, "invoke-virtual-quick", k35c, false, kIndexVtableOffset, kContinue | kThrow | kInvoke, kVerifyVarArgNonZero | kVerifyRuntimeOnly) \
- V(0xEA, INVOKE_VIRTUAL_RANGE_QUICK, "invoke-virtual/range-quick", k3rc, false, kIndexVtableOffset, kContinue | kThrow | kInvoke, kVerifyVarArgRangeNonZero | kVerifyRuntimeOnly) \
- V(0xEB, IPUT_BOOLEAN_QUICK, "iput-boolean-quick", k22c, false, kIndexFieldOffset, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xEC, IPUT_BYTE_QUICK, "iput-byte-quick", k22c, false, kIndexFieldOffset, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xED, IPUT_CHAR_QUICK, "iput-char-quick", k22c, false, kIndexFieldOffset, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xEE, IPUT_SHORT_QUICK, "iput-short-quick", k22c, false, kIndexFieldOffset, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xEF, IGET_BOOLEAN_QUICK, "iget-boolean-quick", k22c, true, kIndexFieldOffset, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xF0, IGET_BYTE_QUICK, "iget-byte-quick", k22c, true, kIndexFieldOffset, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xF1, IGET_CHAR_QUICK, "iget-char-quick", k22c, true, kIndexFieldOffset, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xF2, IGET_SHORT_QUICK, "iget-short-quick", k22c, true, kIndexFieldOffset, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xF3, INVOKE_LAMBDA, "invoke-lambda", k25x, false, kIndexNone, kContinue | kThrow | kInvoke | kExperimental, kVerifyRegC /*TODO: | kVerifyVarArg*/) \
- V(0xF4, UNUSED_F4, "unused-f4", k10x, false, kIndexUnknown, 0, kVerifyError) \
- V(0xF5, CAPTURE_VARIABLE, "capture-variable", k21c, false, kIndexStringRef, kExperimental, kVerifyRegA | kVerifyRegBString) \
- /* TODO(iam): get rid of the unused 'false' column */ \
- V(0xF6, CREATE_LAMBDA, "create-lambda", k21c, false_UNUSED, kIndexMethodRef, kContinue | kThrow | kExperimental, kVerifyRegA | kVerifyRegBMethod) \
- V(0xF7, LIBERATE_VARIABLE, "liberate-variable", k22c, false, kIndexStringRef, kExperimental, kVerifyRegA | kVerifyRegB | kVerifyRegCString) \
- V(0xF8, BOX_LAMBDA, "box-lambda", k22x, true, kIndexNone, kContinue | kExperimental, kVerifyRegA | kVerifyRegB) \
- V(0xF9, UNBOX_LAMBDA, "unbox-lambda", k22c, true, kIndexTypeRef, kContinue | kThrow | kExperimental, kVerifyRegA | kVerifyRegB | kVerifyRegCType) \
- V(0xFA, UNUSED_FA, "unused-fa", k10x, false, kIndexUnknown, 0, kVerifyError) \
- V(0xFB, UNUSED_FB, "unused-fb", k10x, false, kIndexUnknown, 0, kVerifyError) \
- V(0xFC, UNUSED_FC, "unused-fc", k10x, false, kIndexUnknown, 0, kVerifyError) \
- V(0xFD, UNUSED_FD, "unused-fd", k10x, false, kIndexUnknown, 0, kVerifyError) \
- V(0xFE, UNUSED_FE, "unused-fe", k10x, false, kIndexUnknown, 0, kVerifyError) \
- V(0xFF, UNUSED_FF, "unused-ff", k10x, false, kIndexUnknown, 0, kVerifyError)
+ V(0x00, NOP, "nop", k10x, kIndexNone, kContinue, kVerifyNone) \
+ V(0x01, MOVE, "move", k12x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB) \
+ V(0x02, MOVE_FROM16, "move/from16", k22x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB) \
+ V(0x03, MOVE_16, "move/16", k32x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB) \
+ V(0x04, MOVE_WIDE, "move-wide", k12x, kIndexNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0x05, MOVE_WIDE_FROM16, "move-wide/from16", k22x, kIndexNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0x06, MOVE_WIDE_16, "move-wide/16", k32x, kIndexNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0x07, MOVE_OBJECT, "move-object", k12x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB) \
+ V(0x08, MOVE_OBJECT_FROM16, "move-object/from16", k22x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB) \
+ V(0x09, MOVE_OBJECT_16, "move-object/16", k32x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB) \
+ V(0x0A, MOVE_RESULT, "move-result", k11x, kIndexNone, kContinue, kVerifyRegA) \
+ V(0x0B, MOVE_RESULT_WIDE, "move-result-wide", k11x, kIndexNone, kContinue, kVerifyRegAWide) \
+ V(0x0C, MOVE_RESULT_OBJECT, "move-result-object", k11x, kIndexNone, kContinue, kVerifyRegA) \
+ V(0x0D, MOVE_EXCEPTION, "move-exception", k11x, kIndexNone, kContinue, kVerifyRegA) \
+ V(0x0E, RETURN_VOID, "return-void", k10x, kIndexNone, kReturn, kVerifyNone) \
+ V(0x0F, RETURN, "return", k11x, kIndexNone, kReturn, kVerifyRegA) \
+ V(0x10, RETURN_WIDE, "return-wide", k11x, kIndexNone, kReturn, kVerifyRegAWide) \
+ V(0x11, RETURN_OBJECT, "return-object", k11x, kIndexNone, kReturn, kVerifyRegA) \
+ V(0x12, CONST_4, "const/4", k11n, kIndexNone, kContinue | kRegBFieldOrConstant, kVerifyRegA) \
+ V(0x13, CONST_16, "const/16", k21s, kIndexNone, kContinue | kRegBFieldOrConstant, kVerifyRegA) \
+ V(0x14, CONST, "const", k31i, kIndexNone, kContinue | kRegBFieldOrConstant, kVerifyRegA) \
+ V(0x15, CONST_HIGH16, "const/high16", k21h, kIndexNone, kContinue | kRegBFieldOrConstant, kVerifyRegA) \
+ V(0x16, CONST_WIDE_16, "const-wide/16", k21s, kIndexNone, kContinue | kRegBFieldOrConstant, kVerifyRegAWide) \
+ V(0x17, CONST_WIDE_32, "const-wide/32", k31i, kIndexNone, kContinue | kRegBFieldOrConstant, kVerifyRegAWide) \
+ V(0x18, CONST_WIDE, "const-wide", k51l, kIndexNone, kContinue | kRegBFieldOrConstant, kVerifyRegAWide) \
+ V(0x19, CONST_WIDE_HIGH16, "const-wide/high16", k21h, kIndexNone, kContinue | kRegBFieldOrConstant, kVerifyRegAWide) \
+ V(0x1A, CONST_STRING, "const-string", k21c, kIndexStringRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBString) \
+ V(0x1B, CONST_STRING_JUMBO, "const-string/jumbo", k31c, kIndexStringRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBString) \
+ V(0x1C, CONST_CLASS, "const-class", k21c, kIndexTypeRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBType) \
+ V(0x1D, MONITOR_ENTER, "monitor-enter", k11x, kIndexNone, kContinue | kThrow | kClobber, kVerifyRegA) \
+ V(0x1E, MONITOR_EXIT, "monitor-exit", k11x, kIndexNone, kContinue | kThrow | kClobber, kVerifyRegA) \
+ V(0x1F, CHECK_CAST, "check-cast", k21c, kIndexTypeRef, kContinue | kThrow, kVerifyRegA | kVerifyRegBType) \
+ V(0x20, INSTANCE_OF, "instance-of", k22c, kIndexTypeRef, kContinue | kThrow, kVerifyRegA | kVerifyRegB | kVerifyRegCType) \
+ V(0x21, ARRAY_LENGTH, "array-length", k12x, kIndexNone, kContinue | kThrow, kVerifyRegA | kVerifyRegB) \
+ V(0x22, NEW_INSTANCE, "new-instance", k21c, kIndexTypeRef, kContinue | kThrow | kClobber, kVerifyRegA | kVerifyRegBNewInstance) \
+ V(0x23, NEW_ARRAY, "new-array", k22c, kIndexTypeRef, kContinue | kThrow | kClobber, kVerifyRegA | kVerifyRegB | kVerifyRegCNewArray) \
+ V(0x24, FILLED_NEW_ARRAY, "filled-new-array", k35c, kIndexTypeRef, kContinue | kThrow | kClobber, kVerifyRegBType | kVerifyVarArg) \
+ V(0x25, FILLED_NEW_ARRAY_RANGE, "filled-new-array/range", k3rc, kIndexTypeRef, kContinue | kThrow | kClobber, kVerifyRegBType | kVerifyVarArgRange) \
+ V(0x26, FILL_ARRAY_DATA, "fill-array-data", k31t, kIndexNone, kContinue | kThrow | kClobber, kVerifyRegA | kVerifyArrayData) \
+ V(0x27, THROW, "throw", k11x, kIndexNone, kThrow, kVerifyRegA) \
+ V(0x28, GOTO, "goto", k10t, kIndexNone, kBranch | kUnconditional, kVerifyBranchTarget) \
+ V(0x29, GOTO_16, "goto/16", k20t, kIndexNone, kBranch | kUnconditional, kVerifyBranchTarget) \
+ V(0x2A, GOTO_32, "goto/32", k30t, kIndexNone, kBranch | kUnconditional, kVerifyBranchTarget) \
+ V(0x2B, PACKED_SWITCH, "packed-switch", k31t, kIndexNone, kContinue | kSwitch, kVerifyRegA | kVerifySwitchTargets) \
+ V(0x2C, SPARSE_SWITCH, "sparse-switch", k31t, kIndexNone, kContinue | kSwitch, kVerifyRegA | kVerifySwitchTargets) \
+ V(0x2D, CMPL_FLOAT, "cmpl-float", k23x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x2E, CMPG_FLOAT, "cmpg-float", k23x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x2F, CMPL_DOUBLE, "cmpl-double", k23x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0x30, CMPG_DOUBLE, "cmpg-double", k23x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0x31, CMP_LONG, "cmp-long", k23x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0x32, IF_EQ, "if-eq", k22t, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyRegB | kVerifyBranchTarget) \
+ V(0x33, IF_NE, "if-ne", k22t, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyRegB | kVerifyBranchTarget) \
+ V(0x34, IF_LT, "if-lt", k22t, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyRegB | kVerifyBranchTarget) \
+ V(0x35, IF_GE, "if-ge", k22t, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyRegB | kVerifyBranchTarget) \
+ V(0x36, IF_GT, "if-gt", k22t, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyRegB | kVerifyBranchTarget) \
+ V(0x37, IF_LE, "if-le", k22t, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyRegB | kVerifyBranchTarget) \
+ V(0x38, IF_EQZ, "if-eqz", k21t, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyBranchTarget) \
+ V(0x39, IF_NEZ, "if-nez", k21t, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyBranchTarget) \
+ V(0x3A, IF_LTZ, "if-ltz", k21t, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyBranchTarget) \
+ V(0x3B, IF_GEZ, "if-gez", k21t, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyBranchTarget) \
+ V(0x3C, IF_GTZ, "if-gtz", k21t, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyBranchTarget) \
+ V(0x3D, IF_LEZ, "if-lez", k21t, kIndexNone, kContinue | kBranch, kVerifyRegA | kVerifyBranchTarget) \
+ V(0x3E, UNUSED_3E, "unused-3e", k10x, kIndexUnknown, 0, kVerifyError) \
+ V(0x3F, UNUSED_3F, "unused-3f", k10x, kIndexUnknown, 0, kVerifyError) \
+ V(0x40, UNUSED_40, "unused-40", k10x, kIndexUnknown, 0, kVerifyError) \
+ V(0x41, UNUSED_41, "unused-41", k10x, kIndexUnknown, 0, kVerifyError) \
+ V(0x42, UNUSED_42, "unused-42", k10x, kIndexUnknown, 0, kVerifyError) \
+ V(0x43, UNUSED_43, "unused-43", k10x, kIndexUnknown, 0, kVerifyError) \
+ V(0x44, AGET, "aget", k23x, kIndexNone, kContinue | kThrow | kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x45, AGET_WIDE, "aget-wide", k23x, kIndexNone, kContinue | kThrow | kLoad, kVerifyRegAWide | kVerifyRegB | kVerifyRegC) \
+ V(0x46, AGET_OBJECT, "aget-object", k23x, kIndexNone, kContinue | kThrow | kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x47, AGET_BOOLEAN, "aget-boolean", k23x, kIndexNone, kContinue | kThrow | kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x48, AGET_BYTE, "aget-byte", k23x, kIndexNone, kContinue | kThrow | kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x49, AGET_CHAR, "aget-char", k23x, kIndexNone, kContinue | kThrow | kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x4A, AGET_SHORT, "aget-short", k23x, kIndexNone, kContinue | kThrow | kLoad, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x4B, APUT, "aput", k23x, kIndexNone, kContinue | kThrow | kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x4C, APUT_WIDE, "aput-wide", k23x, kIndexNone, kContinue | kThrow | kStore, kVerifyRegAWide | kVerifyRegB | kVerifyRegC) \
+ V(0x4D, APUT_OBJECT, "aput-object", k23x, kIndexNone, kContinue | kThrow | kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x4E, APUT_BOOLEAN, "aput-boolean", k23x, kIndexNone, kContinue | kThrow | kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x4F, APUT_BYTE, "aput-byte", k23x, kIndexNone, kContinue | kThrow | kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x50, APUT_CHAR, "aput-char", k23x, kIndexNone, kContinue | kThrow | kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x51, APUT_SHORT, "aput-short", k23x, kIndexNone, kContinue | kThrow | kStore, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x52, IGET, "iget", k22c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+ V(0x53, IGET_WIDE, "iget-wide", k22c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRegCField) \
+ V(0x54, IGET_OBJECT, "iget-object", k22c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+ V(0x55, IGET_BOOLEAN, "iget-boolean", k22c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+ V(0x56, IGET_BYTE, "iget-byte", k22c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+ V(0x57, IGET_CHAR, "iget-char", k22c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+ V(0x58, IGET_SHORT, "iget-short", k22c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+ V(0x59, IPUT, "iput", k22c, kIndexFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+ V(0x5A, IPUT_WIDE, "iput-wide", k22c, kIndexFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRegCField) \
+ V(0x5B, IPUT_OBJECT, "iput-object", k22c, kIndexFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+ V(0x5C, IPUT_BOOLEAN, "iput-boolean", k22c, kIndexFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+ V(0x5D, IPUT_BYTE, "iput-byte", k22c, kIndexFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+ V(0x5E, IPUT_CHAR, "iput-char", k22c, kIndexFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+ V(0x5F, IPUT_SHORT, "iput-short", k22c, kIndexFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRegCField) \
+ V(0x60, SGET, "sget", k21c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+ V(0x61, SGET_WIDE, "sget-wide", k21c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegAWide | kVerifyRegBField) \
+ V(0x62, SGET_OBJECT, "sget-object", k21c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+ V(0x63, SGET_BOOLEAN, "sget-boolean", k21c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+ V(0x64, SGET_BYTE, "sget-byte", k21c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+ V(0x65, SGET_CHAR, "sget-char", k21c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+ V(0x66, SGET_SHORT, "sget-short", k21c, kIndexFieldRef, kContinue | kThrow | kLoad | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+ V(0x67, SPUT, "sput", k21c, kIndexFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+ V(0x68, SPUT_WIDE, "sput-wide", k21c, kIndexFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegAWide | kVerifyRegBField) \
+ V(0x69, SPUT_OBJECT, "sput-object", k21c, kIndexFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+ V(0x6A, SPUT_BOOLEAN, "sput-boolean", k21c, kIndexFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+ V(0x6B, SPUT_BYTE, "sput-byte", k21c, kIndexFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+ V(0x6C, SPUT_CHAR, "sput-char", k21c, kIndexFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+ V(0x6D, SPUT_SHORT, "sput-short", k21c, kIndexFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
+ V(0x6E, INVOKE_VIRTUAL, "invoke-virtual", k35c, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgNonZero) \
+ V(0x6F, INVOKE_SUPER, "invoke-super", k35c, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgNonZero) \
+ V(0x70, INVOKE_DIRECT, "invoke-direct", k35c, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgNonZero) \
+ V(0x71, INVOKE_STATIC, "invoke-static", k35c, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArg) \
+ V(0x72, INVOKE_INTERFACE, "invoke-interface", k35c, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgNonZero) \
+ V(0x73, RETURN_VOID_NO_BARRIER, "return-void-no-barrier", k10x, kIndexNone, kReturn, kVerifyNone) \
+ V(0x74, INVOKE_VIRTUAL_RANGE, "invoke-virtual/range", k3rc, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRangeNonZero) \
+ V(0x75, INVOKE_SUPER_RANGE, "invoke-super/range", k3rc, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRangeNonZero) \
+ V(0x76, INVOKE_DIRECT_RANGE, "invoke-direct/range", k3rc, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRangeNonZero) \
+ V(0x77, INVOKE_STATIC_RANGE, "invoke-static/range", k3rc, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRange) \
+ V(0x78, INVOKE_INTERFACE_RANGE, "invoke-interface/range", k3rc, kIndexMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRangeNonZero) \
+ V(0x79, UNUSED_79, "unused-79", k10x, kIndexUnknown, 0, kVerifyError) \
+ V(0x7A, UNUSED_7A, "unused-7a", k10x, kIndexUnknown, 0, kVerifyError) \
+ V(0x7B, NEG_INT, "neg-int", k12x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB) \
+ V(0x7C, NOT_INT, "not-int", k12x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB) \
+ V(0x7D, NEG_LONG, "neg-long", k12x, kIndexNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0x7E, NOT_LONG, "not-long", k12x, kIndexNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0x7F, NEG_FLOAT, "neg-float", k12x, kIndexNone, kContinue, kVerifyRegA | kVerifyRegB) \
+ V(0x80, NEG_DOUBLE, "neg-double", k12x, kIndexNone, kContinue, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0x81, INT_TO_LONG, "int-to-long", k12x, kIndexNone, kContinue | kCast, kVerifyRegAWide | kVerifyRegB) \
+ V(0x82, INT_TO_FLOAT, "int-to-float", k12x, kIndexNone, kContinue | kCast, kVerifyRegA | kVerifyRegB) \
+ V(0x83, INT_TO_DOUBLE, "int-to-double", k12x, kIndexNone, kContinue | kCast, kVerifyRegAWide | kVerifyRegB) \
+ V(0x84, LONG_TO_INT, "long-to-int", k12x, kIndexNone, kContinue | kCast, kVerifyRegA | kVerifyRegBWide) \
+ V(0x85, LONG_TO_FLOAT, "long-to-float", k12x, kIndexNone, kContinue | kCast, kVerifyRegA | kVerifyRegBWide) \
+ V(0x86, LONG_TO_DOUBLE, "long-to-double", k12x, kIndexNone, kContinue | kCast, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0x87, FLOAT_TO_INT, "float-to-int", k12x, kIndexNone, kContinue | kCast, kVerifyRegA | kVerifyRegB) \
+ V(0x88, FLOAT_TO_LONG, "float-to-long", k12x, kIndexNone, kContinue | kCast, kVerifyRegAWide | kVerifyRegB) \
+ V(0x89, FLOAT_TO_DOUBLE, "float-to-double", k12x, kIndexNone, kContinue | kCast, kVerifyRegAWide | kVerifyRegB) \
+ V(0x8A, DOUBLE_TO_INT, "double-to-int", k12x, kIndexNone, kContinue | kCast, kVerifyRegA | kVerifyRegBWide) \
+ V(0x8B, DOUBLE_TO_LONG, "double-to-long", k12x, kIndexNone, kContinue | kCast, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0x8C, DOUBLE_TO_FLOAT, "double-to-float", k12x, kIndexNone, kContinue | kCast, kVerifyRegA | kVerifyRegBWide) \
+ V(0x8D, INT_TO_BYTE, "int-to-byte", k12x, kIndexNone, kContinue | kCast, kVerifyRegA | kVerifyRegB) \
+ V(0x8E, INT_TO_CHAR, "int-to-char", k12x, kIndexNone, kContinue | kCast, kVerifyRegA | kVerifyRegB) \
+ V(0x8F, INT_TO_SHORT, "int-to-short", k12x, kIndexNone, kContinue | kCast, kVerifyRegA | kVerifyRegB) \
+ V(0x90, ADD_INT, "add-int", k23x, kIndexNone, kContinue | kAdd, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x91, SUB_INT, "sub-int", k23x, kIndexNone, kContinue | kSubtract, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x92, MUL_INT, "mul-int", k23x, kIndexNone, kContinue | kMultiply, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x93, DIV_INT, "div-int", k23x, kIndexNone, kContinue | kThrow | kDivide, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x94, REM_INT, "rem-int", k23x, kIndexNone, kContinue | kThrow | kRemainder, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x95, AND_INT, "and-int", k23x, kIndexNone, kContinue | kAnd, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x96, OR_INT, "or-int", k23x, kIndexNone, kContinue | kOr, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x97, XOR_INT, "xor-int", k23x, kIndexNone, kContinue | kXor, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x98, SHL_INT, "shl-int", k23x, kIndexNone, kContinue | kShl, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x99, SHR_INT, "shr-int", k23x, kIndexNone, kContinue | kShr, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x9A, USHR_INT, "ushr-int", k23x, kIndexNone, kContinue | kUshr, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0x9B, ADD_LONG, "add-long", k23x, kIndexNone, kContinue | kAdd, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0x9C, SUB_LONG, "sub-long", k23x, kIndexNone, kContinue | kSubtract, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0x9D, MUL_LONG, "mul-long", k23x, kIndexNone, kContinue | kMultiply, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0x9E, DIV_LONG, "div-long", k23x, kIndexNone, kContinue | kThrow | kDivide, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0x9F, REM_LONG, "rem-long", k23x, kIndexNone, kContinue | kThrow | kRemainder, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0xA0, AND_LONG, "and-long", k23x, kIndexNone, kContinue | kAnd, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0xA1, OR_LONG, "or-long", k23x, kIndexNone, kContinue | kOr, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0xA2, XOR_LONG, "xor-long", k23x, kIndexNone, kContinue | kXor, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0xA3, SHL_LONG, "shl-long", k23x, kIndexNone, kContinue | kShl, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegC) \
+ V(0xA4, SHR_LONG, "shr-long", k23x, kIndexNone, kContinue | kShr, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegC) \
+ V(0xA5, USHR_LONG, "ushr-long", k23x, kIndexNone, kContinue | kUshr, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegC) \
+ V(0xA6, ADD_FLOAT, "add-float", k23x, kIndexNone, kContinue | kAdd, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0xA7, SUB_FLOAT, "sub-float", k23x, kIndexNone, kContinue | kSubtract, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0xA8, MUL_FLOAT, "mul-float", k23x, kIndexNone, kContinue | kMultiply, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0xA9, DIV_FLOAT, "div-float", k23x, kIndexNone, kContinue | kDivide, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0xAA, REM_FLOAT, "rem-float", k23x, kIndexNone, kContinue | kRemainder, kVerifyRegA | kVerifyRegB | kVerifyRegC) \
+ V(0xAB, ADD_DOUBLE, "add-double", k23x, kIndexNone, kContinue | kAdd, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0xAC, SUB_DOUBLE, "sub-double", k23x, kIndexNone, kContinue | kSubtract, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0xAD, MUL_DOUBLE, "mul-double", k23x, kIndexNone, kContinue | kMultiply, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0xAE, DIV_DOUBLE, "div-double", k23x, kIndexNone, kContinue | kDivide, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0xAF, REM_DOUBLE, "rem-double", k23x, kIndexNone, kContinue | kRemainder, kVerifyRegAWide | kVerifyRegBWide | kVerifyRegCWide) \
+ V(0xB0, ADD_INT_2ADDR, "add-int/2addr", k12x, kIndexNone, kContinue | kAdd, kVerifyRegA | kVerifyRegB) \
+ V(0xB1, SUB_INT_2ADDR, "sub-int/2addr", k12x, kIndexNone, kContinue | kSubtract, kVerifyRegA | kVerifyRegB) \
+ V(0xB2, MUL_INT_2ADDR, "mul-int/2addr", k12x, kIndexNone, kContinue | kMultiply, kVerifyRegA | kVerifyRegB) \
+ V(0xB3, DIV_INT_2ADDR, "div-int/2addr", k12x, kIndexNone, kContinue | kThrow | kDivide, kVerifyRegA | kVerifyRegB) \
+ V(0xB4, REM_INT_2ADDR, "rem-int/2addr", k12x, kIndexNone, kContinue | kThrow | kRemainder, kVerifyRegA | kVerifyRegB) \
+ V(0xB5, AND_INT_2ADDR, "and-int/2addr", k12x, kIndexNone, kContinue | kAnd, kVerifyRegA | kVerifyRegB) \
+ V(0xB6, OR_INT_2ADDR, "or-int/2addr", k12x, kIndexNone, kContinue | kOr, kVerifyRegA | kVerifyRegB) \
+ V(0xB7, XOR_INT_2ADDR, "xor-int/2addr", k12x, kIndexNone, kContinue | kXor, kVerifyRegA | kVerifyRegB) \
+ V(0xB8, SHL_INT_2ADDR, "shl-int/2addr", k12x, kIndexNone, kContinue | kShl, kVerifyRegA | kVerifyRegB) \
+ V(0xB9, SHR_INT_2ADDR, "shr-int/2addr", k12x, kIndexNone, kContinue | kShr, kVerifyRegA | kVerifyRegB) \
+ V(0xBA, USHR_INT_2ADDR, "ushr-int/2addr", k12x, kIndexNone, kContinue | kUshr, kVerifyRegA | kVerifyRegB) \
+ V(0xBB, ADD_LONG_2ADDR, "add-long/2addr", k12x, kIndexNone, kContinue | kAdd, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0xBC, SUB_LONG_2ADDR, "sub-long/2addr", k12x, kIndexNone, kContinue | kSubtract, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0xBD, MUL_LONG_2ADDR, "mul-long/2addr", k12x, kIndexNone, kContinue | kMultiply, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0xBE, DIV_LONG_2ADDR, "div-long/2addr", k12x, kIndexNone, kContinue | kThrow | kDivide, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0xBF, REM_LONG_2ADDR, "rem-long/2addr", k12x, kIndexNone, kContinue | kThrow | kRemainder, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0xC0, AND_LONG_2ADDR, "and-long/2addr", k12x, kIndexNone, kContinue | kAnd, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0xC1, OR_LONG_2ADDR, "or-long/2addr", k12x, kIndexNone, kContinue | kOr, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0xC2, XOR_LONG_2ADDR, "xor-long/2addr", k12x, kIndexNone, kContinue | kXor, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0xC3, SHL_LONG_2ADDR, "shl-long/2addr", k12x, kIndexNone, kContinue | kShl, kVerifyRegAWide | kVerifyRegB) \
+ V(0xC4, SHR_LONG_2ADDR, "shr-long/2addr", k12x, kIndexNone, kContinue | kShr, kVerifyRegAWide | kVerifyRegB) \
+ V(0xC5, USHR_LONG_2ADDR, "ushr-long/2addr", k12x, kIndexNone, kContinue | kUshr, kVerifyRegAWide | kVerifyRegB) \
+ V(0xC6, ADD_FLOAT_2ADDR, "add-float/2addr", k12x, kIndexNone, kContinue | kAdd, kVerifyRegA | kVerifyRegB) \
+ V(0xC7, SUB_FLOAT_2ADDR, "sub-float/2addr", k12x, kIndexNone, kContinue | kSubtract, kVerifyRegA | kVerifyRegB) \
+ V(0xC8, MUL_FLOAT_2ADDR, "mul-float/2addr", k12x, kIndexNone, kContinue | kMultiply, kVerifyRegA | kVerifyRegB) \
+ V(0xC9, DIV_FLOAT_2ADDR, "div-float/2addr", k12x, kIndexNone, kContinue | kDivide, kVerifyRegA | kVerifyRegB) \
+ V(0xCA, REM_FLOAT_2ADDR, "rem-float/2addr", k12x, kIndexNone, kContinue | kRemainder, kVerifyRegA | kVerifyRegB) \
+ V(0xCB, ADD_DOUBLE_2ADDR, "add-double/2addr", k12x, kIndexNone, kContinue | kAdd, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0xCC, SUB_DOUBLE_2ADDR, "sub-double/2addr", k12x, kIndexNone, kContinue | kSubtract, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0xCD, MUL_DOUBLE_2ADDR, "mul-double/2addr", k12x, kIndexNone, kContinue | kMultiply, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0xCE, DIV_DOUBLE_2ADDR, "div-double/2addr", k12x, kIndexNone, kContinue | kDivide, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0xCF, REM_DOUBLE_2ADDR, "rem-double/2addr", k12x, kIndexNone, kContinue | kRemainder, kVerifyRegAWide | kVerifyRegBWide) \
+ V(0xD0, ADD_INT_LIT16, "add-int/lit16", k22s, kIndexNone, kContinue | kAdd | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xD1, RSUB_INT, "rsub-int", k22s, kIndexNone, kContinue | kSubtract | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xD2, MUL_INT_LIT16, "mul-int/lit16", k22s, kIndexNone, kContinue | kMultiply | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xD3, DIV_INT_LIT16, "div-int/lit16", k22s, kIndexNone, kContinue | kThrow | kDivide | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xD4, REM_INT_LIT16, "rem-int/lit16", k22s, kIndexNone, kContinue | kThrow | kRemainder | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xD5, AND_INT_LIT16, "and-int/lit16", k22s, kIndexNone, kContinue | kAnd | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xD6, OR_INT_LIT16, "or-int/lit16", k22s, kIndexNone, kContinue | kOr | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xD7, XOR_INT_LIT16, "xor-int/lit16", k22s, kIndexNone, kContinue | kXor | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xD8, ADD_INT_LIT8, "add-int/lit8", k22b, kIndexNone, kContinue | kAdd | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xD9, RSUB_INT_LIT8, "rsub-int/lit8", k22b, kIndexNone, kContinue | kSubtract | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xDA, MUL_INT_LIT8, "mul-int/lit8", k22b, kIndexNone, kContinue | kMultiply | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xDB, DIV_INT_LIT8, "div-int/lit8", k22b, kIndexNone, kContinue | kThrow | kDivide | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xDC, REM_INT_LIT8, "rem-int/lit8", k22b, kIndexNone, kContinue | kThrow | kRemainder | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xDD, AND_INT_LIT8, "and-int/lit8", k22b, kIndexNone, kContinue | kAnd | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xDE, OR_INT_LIT8, "or-int/lit8", k22b, kIndexNone, kContinue | kOr | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xDF, XOR_INT_LIT8, "xor-int/lit8", k22b, kIndexNone, kContinue | kXor | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xE0, SHL_INT_LIT8, "shl-int/lit8", k22b, kIndexNone, kContinue | kShl | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xE1, SHR_INT_LIT8, "shr-int/lit8", k22b, kIndexNone, kContinue | kShr | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xE2, USHR_INT_LIT8, "ushr-int/lit8", k22b, kIndexNone, kContinue | kUshr | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB) \
+ V(0xE3, IGET_QUICK, "iget-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xE4, IGET_WIDE_QUICK, "iget-wide-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xE5, IGET_OBJECT_QUICK, "iget-object-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xE6, IPUT_QUICK, "iput-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xE7, IPUT_WIDE_QUICK, "iput-wide-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xE8, IPUT_OBJECT_QUICK, "iput-object-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xE9, INVOKE_VIRTUAL_QUICK, "invoke-virtual-quick", k35c, kIndexVtableOffset, kContinue | kThrow | kInvoke, kVerifyVarArgNonZero | kVerifyRuntimeOnly) \
+ V(0xEA, INVOKE_VIRTUAL_RANGE_QUICK, "invoke-virtual/range-quick", k3rc, kIndexVtableOffset, kContinue | kThrow | kInvoke, kVerifyVarArgRangeNonZero | kVerifyRuntimeOnly) \
+ V(0xEB, IPUT_BOOLEAN_QUICK, "iput-boolean-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xEC, IPUT_BYTE_QUICK, "iput-byte-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xED, IPUT_CHAR_QUICK, "iput-char-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xEE, IPUT_SHORT_QUICK, "iput-short-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xEF, IGET_BOOLEAN_QUICK, "iget-boolean-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xF0, IGET_BYTE_QUICK, "iget-byte-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xF1, IGET_CHAR_QUICK, "iget-char-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xF2, IGET_SHORT_QUICK, "iget-short-quick", k22c, kIndexFieldOffset, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xF3, INVOKE_LAMBDA, "invoke-lambda", k25x, kIndexNone, kContinue | kThrow | kInvoke | kExperimental, kVerifyRegC /*TODO: | kVerifyVarArg*/) \
+ V(0xF4, UNUSED_F4, "unused-f4", k10x, kIndexUnknown, 0, kVerifyError) \
+ V(0xF5, CAPTURE_VARIABLE, "capture-variable", k21c, kIndexStringRef, kExperimental, kVerifyRegA | kVerifyRegBString) \
+ V(0xF6, CREATE_LAMBDA, "create-lambda", k21c, kIndexMethodRef, kContinue | kThrow | kExperimental, kVerifyRegA | kVerifyRegBMethod) \
+ V(0xF7, LIBERATE_VARIABLE, "liberate-variable", k22c, kIndexStringRef, kExperimental, kVerifyRegA | kVerifyRegB | kVerifyRegCString) \
+ V(0xF8, BOX_LAMBDA, "box-lambda", k22x, kIndexNone, kContinue | kExperimental, kVerifyRegA | kVerifyRegB) \
+ V(0xF9, UNBOX_LAMBDA, "unbox-lambda", k22c, kIndexTypeRef, kContinue | kThrow | kExperimental, kVerifyRegA | kVerifyRegB | kVerifyRegCType) \
+ V(0xFA, UNUSED_FA, "unused-fa", k10x, kIndexUnknown, 0, kVerifyError) \
+ V(0xFB, UNUSED_FB, "unused-fb", k10x, kIndexUnknown, 0, kVerifyError) \
+ V(0xFC, UNUSED_FC, "unused-fc", k10x, kIndexUnknown, 0, kVerifyError) \
+ V(0xFD, UNUSED_FD, "unused-fd", k10x, kIndexUnknown, 0, kVerifyError) \
+ V(0xFE, UNUSED_FE, "unused-fe", k10x, kIndexUnknown, 0, kVerifyError) \
+ V(0xFF, UNUSED_FF, "unused-ff", k10x, kIndexUnknown, 0, kVerifyError)
#define DEX_INSTRUCTION_FORMAT_LIST(V) \
V(k10x) \
diff --git a/runtime/dex_instruction_visitor.h b/runtime/dex_instruction_visitor.h
index 795b95b..42af6a9 100644
--- a/runtime/dex_instruction_visitor.h
+++ b/runtime/dex_instruction_visitor.h
@@ -32,7 +32,7 @@
while (i < size_in_code_units) {
const Instruction* inst = Instruction::At(&code[i]);
switch (inst->Opcode()) {
-#define INSTRUCTION_CASE(o, cname, p, f, r, i, a, v) \
+#define INSTRUCTION_CASE(o, cname, p, f, i, a, v) \
case Instruction::cname: { \
derived->Do_ ## cname(inst); \
break; \
@@ -50,7 +50,7 @@
private:
// Specific handlers for each instruction.
-#define INSTRUCTION_VISITOR(o, cname, p, f, r, i, a, v) \
+#define INSTRUCTION_VISITOR(o, cname, p, f, i, a, v) \
void Do_ ## cname(const Instruction* inst) { \
T* derived = static_cast<T*>(this); \
derived->Do_Default(inst); \
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 7ecd595..08fec91 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -20,6 +20,7 @@
#include "entrypoint_utils.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "class_linker-inl.h"
#include "common_throws.h"
#include "dex_file.h"
@@ -52,7 +53,8 @@
uint32_t method_index = inline_info.GetMethodIndexAtDepth(encoding, inlining_depth);
InvokeType invoke_type = static_cast<InvokeType>(
inline_info.GetInvokeTypeAtDepth(encoding, inlining_depth));
- ArtMethod* inlined_method = outer_method->GetDexCacheResolvedMethod(method_index, sizeof(void*));
+ ArtMethod* inlined_method = outer_method->GetDexCacheResolvedMethod(method_index,
+ kRuntimePointerSize);
if (!inlined_method->IsRuntimeMethod()) {
return inlined_method;
}
@@ -89,7 +91,7 @@
Runtime::Current()->GetClassLinker()->GetClassRoot(ClassLinker::kJavaLangString);
// Update the dex cache for future lookups.
caller->GetDexCache()->SetResolvedType(method_id.class_idx_, cls);
- inlined_method = cls->FindVirtualMethod("charAt", "(I)C", sizeof(void*));
+ inlined_method = cls->FindVirtualMethod("charAt", "(I)C", kRuntimePointerSize);
} else {
mirror::Class* klass = caller->GetDexCache()->GetResolvedType(method_id.class_idx_);
DCHECK_EQ(klass->GetDexCache(), caller->GetDexCache())
@@ -98,12 +100,12 @@
case kDirect:
case kStatic:
inlined_method =
- klass->FindDirectMethod(klass->GetDexCache(), method_index, sizeof(void*));
+ klass->FindDirectMethod(klass->GetDexCache(), method_index, kRuntimePointerSize);
break;
case kSuper:
case kVirtual:
inlined_method =
- klass->FindVirtualMethod(klass->GetDexCache(), method_index, sizeof(void*));
+ klass->FindVirtualMethod(klass->GetDexCache(), method_index, kRuntimePointerSize);
break;
default:
LOG(FATAL) << "Unimplemented inlined invocation type: " << invoke_type;
@@ -114,7 +116,7 @@
// Update the dex cache for future lookups. Note that for static methods, this is safe
// when the class is being initialized, as the entrypoint for the ArtMethod is at
// this point still the resolution trampoline.
- outer_method->SetDexCacheResolvedMethod(method_index, inlined_method, sizeof(void*));
+ outer_method->SetDexCacheResolvedMethod(method_index, inlined_method, kRuntimePointerSize);
return inlined_method;
}
@@ -130,7 +132,7 @@
ArtMethod* method,
Thread* self, bool* slow_path) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- size_t pointer_size = class_linker->GetImagePointerSize();
+ PointerSize pointer_size = class_linker->GetImagePointerSize();
mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx, pointer_size);
if (UNLIKELY(klass == nullptr)) {
klass = class_linker->ResolveType(type_idx, method);
@@ -148,6 +150,11 @@
*slow_path = true;
return nullptr; // Failure
}
+ if (UNLIKELY(klass->IsClassClass())) {
+ ThrowIllegalAccessError(nullptr, "Class %s is inaccessible", PrettyDescriptor(klass).c_str());
+ *slow_path = true;
+ return nullptr; // Failure
+ }
mirror::Class* referrer = method->GetDeclaringClass();
if (UNLIKELY(!referrer->CanAccess(klass))) {
ThrowIllegalAccessErrorClass(referrer, klass);
@@ -275,7 +282,7 @@
return nullptr; // Failure
}
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- size_t pointer_size = class_linker->GetImagePointerSize();
+ PointerSize pointer_size = class_linker->GetImagePointerSize();
mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx, pointer_size);
if (UNLIKELY(klass == nullptr)) { // Not in dex cache so try to resolve
klass = class_linker->ResolveType(type_idx, method);
@@ -381,7 +388,7 @@
//
// In particular, don't assume the dex instruction already correctly knows if the
// real field is static or not. The resolution must not be aware of this.
- ArtMethod* method = referrer->GetInterfaceMethodIfProxy(sizeof(void*));
+ ArtMethod* method = referrer->GetInterfaceMethodIfProxy(kRuntimePointerSize);
StackHandleScope<2> hs(self);
Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(method->GetDexCache()));
@@ -601,7 +608,7 @@
}
case kInterface: {
uint32_t imt_index = resolved_method->GetImtIndex();
- size_t pointer_size = class_linker->GetImagePointerSize();
+ PointerSize pointer_size = class_linker->GetImagePointerSize();
ArtMethod* imt_method = (*this_object)->GetClass()->GetImt(pointer_size)->
Get(imt_index, pointer_size);
if (!imt_method->IsRuntimeMethod()) {
@@ -655,7 +662,8 @@
inline ArtField* FindFieldFast(uint32_t field_idx, ArtMethod* referrer, FindFieldType type,
size_t expected_size) {
ArtField* resolved_field =
- referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx, sizeof(void*));
+ referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx,
+ kRuntimePointerSize);
if (UNLIKELY(resolved_field == nullptr)) {
return nullptr;
}
@@ -710,7 +718,7 @@
}
mirror::Class* referring_class = referrer->GetDeclaringClass();
ArtMethod* resolved_method =
- referring_class->GetDexCache()->GetResolvedMethod(method_idx, sizeof(void*));
+ referring_class->GetDexCache()->GetResolvedMethod(method_idx, kRuntimePointerSize);
if (UNLIKELY(resolved_method == nullptr)) {
return nullptr;
}
@@ -729,7 +737,8 @@
}
}
if (type == kInterface) { // Most common form of slow path dispatch.
- return this_object->GetClass()->FindVirtualMethodForInterface(resolved_method, sizeof(void*));
+ return this_object->GetClass()->FindVirtualMethodForInterface(resolved_method,
+ kRuntimePointerSize);
} else if (type == kStatic || type == kDirect) {
return resolved_method;
} else if (type == kSuper) {
@@ -752,15 +761,15 @@
// The super class does not have the method.
return nullptr;
}
- return super_class->GetVTableEntry(resolved_method->GetMethodIndex(), sizeof(void*));
+ return super_class->GetVTableEntry(resolved_method->GetMethodIndex(), kRuntimePointerSize);
} else {
return method_reference_class->FindVirtualMethodForInterfaceSuper(
- resolved_method, sizeof(void*));
+ resolved_method, kRuntimePointerSize);
}
} else {
DCHECK(type == kVirtual);
return this_object->GetClass()->GetVTableEntry(
- resolved_method->GetMethodIndex(), sizeof(void*));
+ resolved_method->GetMethodIndex(), kRuntimePointerSize);
}
}
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 197caa1..fd1c02f 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -18,6 +18,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/mutex.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
@@ -48,7 +49,7 @@
return nullptr; // Failure
}
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- size_t pointer_size = class_linker->GetImagePointerSize();
+ PointerSize pointer_size = class_linker->GetImagePointerSize();
mirror::Class* klass = referrer->GetDexCacheResolvedType<false>(type_idx, pointer_size);
if (UNLIKELY(klass == nullptr)) { // Not in dex cache so try to resolve
klass = class_linker->ResolveType(type_idx, referrer);
@@ -125,7 +126,7 @@
}
// Make sure that the result is an instance of the type this method was expected to return.
mirror::Class* return_type = self->GetCurrentMethod(nullptr)->GetReturnType(true /* resolve */,
- sizeof(void*));
+ kRuntimePointerSize);
if (!o->InstanceOf(return_type)) {
Runtime::Current()->GetJavaVM()->JniAbortF(nullptr,
@@ -188,7 +189,7 @@
StackHandleScope<1> hs(soa.Self());
auto h_interface_method(hs.NewHandle(soa.Decode<mirror::Method*>(interface_method_jobj)));
// This can cause thread suspension.
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
mirror::Class* result_type =
h_interface_method->GetArtMethod()->GetReturnType(true /* resolve */, pointer_size);
mirror::Object* result_ref = soa.Decode<mirror::Object*>(result);
@@ -208,10 +209,10 @@
mirror::Class* proxy_class = rcvr->GetClass();
mirror::Method* interface_method = soa.Decode<mirror::Method*>(interface_method_jobj);
ArtMethod* proxy_method = rcvr->GetClass()->FindVirtualMethodForInterface(
- interface_method->GetArtMethod(), sizeof(void*));
- auto virtual_methods = proxy_class->GetVirtualMethodsSlice(sizeof(void*));
+ interface_method->GetArtMethod(), kRuntimePointerSize);
+ auto virtual_methods = proxy_class->GetVirtualMethodsSlice(kRuntimePointerSize);
size_t num_virtuals = proxy_class->NumVirtualMethods();
- size_t method_size = ArtMethod::Size(sizeof(void*));
+ size_t method_size = ArtMethod::Size(kRuntimePointerSize);
// Rely on the fact that the methods are contiguous to determine the index of the method in
// the slice.
int throws_index = (reinterpret_cast<uintptr_t>(proxy_method) -
diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h
index 331de91..a81a7e7 100644
--- a/runtime/entrypoints/quick/callee_save_frame.h
+++ b/runtime/entrypoints/quick/callee_save_frame.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_ENTRYPOINTS_QUICK_CALLEE_SAVE_FRAME_H_
#include "arch/instruction_set.h"
+#include "base/enums.h"
#include "base/mutex.h"
#include "runtime.h"
#include "thread-inl.h"
@@ -86,7 +87,7 @@
}
// Note: this specialized statement is sanity-checked in the quick-trampoline gtest.
-static constexpr size_t GetConstExprPointerSize(InstructionSet isa) {
+static constexpr PointerSize GetConstExprPointerSize(InstructionSet isa) {
// constexpr must be a return statement.
return (isa == kArm || isa == kThumb2) ? kArmPointerSize :
isa == kArm64 ? kArm64PointerSize :
@@ -94,14 +95,14 @@
isa == kMips64 ? kMips64PointerSize :
isa == kX86 ? kX86PointerSize :
isa == kX86_64 ? kX86_64PointerSize :
- isa == kNone ? (LOG(FATAL) << "kNone has no pointer size", 0) :
- (LOG(FATAL) << "Unknown instruction set" << isa, 0);
+ isa == kNone ? (LOG(FATAL) << "kNone has no pointer size", PointerSize::k32) :
+ (LOG(FATAL) << "Unknown instruction set" << isa, PointerSize::k32);
}
// Note: this specialized statement is sanity-checked in the quick-trampoline gtest.
static constexpr size_t GetCalleeSaveReturnPcOffset(InstructionSet isa,
Runtime::CalleeSaveType type) {
- return GetCalleeSaveFrameSize(isa, type) - GetConstExprPointerSize(isa);
+ return GetCalleeSaveFrameSize(isa, type) - static_cast<size_t>(GetConstExprPointerSize(isa));
}
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index c3b3ac0..4686a51 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -17,6 +17,7 @@
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "callee_save_frame.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "mirror/class-inl.h"
@@ -33,7 +34,7 @@
SHARED_REQUIRES(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !(instrumented_bool) && (allocator_type) == gc::kAllocatorTypeTLAB) { \
- mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx, sizeof(void*)); \
+ mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx, kRuntimePointerSize); \
if (LIKELY(klass != nullptr && klass->IsInitialized() && !klass->IsFinalizable())) { \
size_t byte_count = klass->GetObjectSize(); \
byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment); \
diff --git a/runtime/entrypoints/quick/quick_entrypoints_enum.h b/runtime/entrypoints/quick/quick_entrypoints_enum.h
index 5a95491..8de1137 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_enum.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_enum.h
@@ -36,7 +36,7 @@
std::ostream& operator<<(std::ostream& os, const QuickEntrypointEnum& kind);
// Translate a QuickEntrypointEnum value to the corresponding ThreadOffset.
-template <size_t pointer_size>
+template <PointerSize pointer_size>
static ThreadOffset<pointer_size> GetThreadOffset(QuickEntrypointEnum trampoline) {
switch (trampoline)
{ // NOLINT(whitespace/braces)
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index e0ec68e..07f0394 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -194,8 +194,6 @@
V(ReadBarrierMarkReg27, mirror::Object*, mirror::Object*) \
V(ReadBarrierMarkReg28, mirror::Object*, mirror::Object*) \
V(ReadBarrierMarkReg29, mirror::Object*, mirror::Object*) \
- V(ReadBarrierMarkReg30, mirror::Object*, mirror::Object*) \
- V(ReadBarrierMarkReg31, mirror::Object*, mirror::Object*) \
V(ReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t) \
V(ReadBarrierForRootSlow, mirror::Object*, GcRoot<mirror::Object>*)
diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
index 8e660a2..b5e560f 100644
--- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
@@ -15,6 +15,7 @@
*/
#include "art_method-inl.h"
+#include "base/enums.h"
#include "callee_save_frame.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "instrumentation.h"
@@ -37,7 +38,7 @@
if (instrumentation->IsDeoptimized(method)) {
result = GetQuickToInterpreterBridge();
} else {
- result = instrumentation->GetQuickCodeFor(method, sizeof(void*));
+ result = instrumentation->GetQuickCodeFor(method, kRuntimePointerSize);
DCHECK(!Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(result));
}
bool interpreter_entry = (result == GetQuickToInterpreterBridge());
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 49043f6..9678079 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -15,6 +15,7 @@
*/
#include "art_method-inl.h"
+#include "base/enums.h"
#include "callee_save_frame.h"
#include "common_throws.h"
#include "dex_file-inl.h"
@@ -366,7 +367,7 @@
// next register is even.
static_assert(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0,
"Number of Quick FPR arguments not even");
- DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
}
virtual ~QuickArgumentVisitor() {}
@@ -659,7 +660,7 @@
DCHECK(!method->IsNative()) << PrettyMethod(method);
uint32_t shorty_len = 0;
- ArtMethod* non_proxy_method = method->GetInterfaceMethodIfProxy(sizeof(void*));
+ ArtMethod* non_proxy_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
const DexFile::CodeItem* code_item = non_proxy_method->GetCodeItem();
DCHECK(code_item != nullptr) << PrettyMethod(method);
const char* shorty = non_proxy_method->GetShorty(&shorty_len);
@@ -859,7 +860,7 @@
jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
// Placing arguments into args vector and remove the receiver.
- ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(sizeof(void*));
+ ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
CHECK(!non_proxy_method->IsStatic()) << PrettyMethod(proxy_method) << " "
<< PrettyMethod(non_proxy_method);
std::vector<jvalue> args;
@@ -872,12 +873,15 @@
args.erase(args.begin());
// Convert proxy method into expected interface method.
- ArtMethod* interface_method = proxy_method->FindOverriddenMethod(sizeof(void*));
+ ArtMethod* interface_method = proxy_method->FindOverriddenMethod(kRuntimePointerSize);
DCHECK(interface_method != nullptr) << PrettyMethod(proxy_method);
DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
self->EndAssertNoThreadSuspension(old_cause);
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
+ DCHECK(!Runtime::Current()->IsActiveTransaction());
jobject interface_method_jobj = soa.AddLocalReference<jobject>(
- mirror::Method::CreateFromArtMethod(soa.Self(), interface_method));
+ mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(),
+ interface_method));
// All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
// that performs allocations.
@@ -1035,10 +1039,10 @@
ArtMethod* orig_called = called;
if (invoke_type == kVirtual) {
CHECK(receiver != nullptr) << invoke_type;
- called = receiver->GetClass()->FindVirtualMethodForVirtual(called, sizeof(void*));
+ called = receiver->GetClass()->FindVirtualMethodForVirtual(called, kRuntimePointerSize);
} else if (invoke_type == kInterface) {
CHECK(receiver != nullptr) << invoke_type;
- called = receiver->GetClass()->FindVirtualMethodForInterface(called, sizeof(void*));
+ called = receiver->GetClass()->FindVirtualMethodForInterface(called, kRuntimePointerSize);
} else {
DCHECK_EQ(invoke_type, kSuper);
CHECK(caller != nullptr) << invoke_type;
@@ -1051,10 +1055,10 @@
mirror::Class* ref_class = linker->ResolveReferencedClassOfMethod(
called_method.dex_method_index, dex_cache, class_loader);
if (ref_class->IsInterface()) {
- called = ref_class->FindVirtualMethodForInterfaceSuper(called, sizeof(void*));
+ called = ref_class->FindVirtualMethodForInterfaceSuper(called, kRuntimePointerSize);
} else {
called = caller->GetDeclaringClass()->GetSuperClass()->GetVTableEntry(
- called->GetMethodIndex(), sizeof(void*));
+ called->GetMethodIndex(), kRuntimePointerSize);
}
}
@@ -1068,7 +1072,7 @@
// FindVirtualMethodFor... This is ok for FindDexMethodIndexInOtherDexFile that only cares
// about the name and signature.
uint32_t update_dex_cache_method_index = called->GetDexMethodIndex();
- if (!called->HasSameDexCacheResolvedMethods(caller, sizeof(void*))) {
+ if (!called->HasSameDexCacheResolvedMethods(caller, kRuntimePointerSize)) {
// Calling from one dex file to another, need to compute the method index appropriate to
// the caller's dex file. Since we get here only if the original called was a runtime
// method, we've got the correct dex_file and a dex_method_idx from above.
@@ -1082,8 +1086,10 @@
}
if ((update_dex_cache_method_index != DexFile::kDexNoIndex) &&
(caller->GetDexCacheResolvedMethod(
- update_dex_cache_method_index, sizeof(void*)) != called)) {
- caller->SetDexCacheResolvedMethod(update_dex_cache_method_index, called, sizeof(void*));
+ update_dex_cache_method_index, kRuntimePointerSize) != called)) {
+ caller->SetDexCacheResolvedMethod(update_dex_cache_method_index,
+ called,
+ kRuntimePointerSize);
}
} else if (invoke_type == kStatic) {
const auto called_dex_method_idx = called->GetDexMethodIndex();
@@ -1093,7 +1099,9 @@
// b/19175856
if (called->GetDexFile() == called_method.dex_file &&
called_method.dex_method_index != called_dex_method_idx) {
- called->GetDexCache()->SetResolvedMethod(called_dex_method_idx, called, sizeof(void*));
+ called->GetDexCache()->SetResolvedMethod(called_dex_method_idx,
+ called,
+ kRuntimePointerSize);
}
}
@@ -1627,7 +1635,7 @@
SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* method = **m;
- DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp);
@@ -2162,22 +2170,22 @@
}
ArtMethod* interface_method = caller_method->GetDexCacheResolvedMethod(
- dex_method_idx, sizeof(void*));
+ dex_method_idx, kRuntimePointerSize);
DCHECK(interface_method != nullptr) << dex_method_idx << " " << PrettyMethod(caller_method);
ArtMethod* method = nullptr;
- ImTable* imt = cls->GetImt(sizeof(void*));
+ ImTable* imt = cls->GetImt(kRuntimePointerSize);
if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) {
// If the dex cache already resolved the interface method, look whether we have
// a match in the ImtConflictTable.
- ArtMethod* conflict_method = imt->Get(interface_method->GetImtIndex(), sizeof(void*));
+ ArtMethod* conflict_method = imt->Get(interface_method->GetImtIndex(), kRuntimePointerSize);
if (LIKELY(conflict_method->IsRuntimeMethod())) {
- ImtConflictTable* current_table = conflict_method->GetImtConflictTable(sizeof(void*));
+ ImtConflictTable* current_table = conflict_method->GetImtConflictTable(kRuntimePointerSize);
DCHECK(current_table != nullptr);
- method = current_table->Lookup(interface_method, sizeof(void*));
+ method = current_table->Lookup(interface_method, kRuntimePointerSize);
} else {
// It seems we aren't really a conflict method!
- method = cls->FindVirtualMethodForInterface(interface_method, sizeof(void*));
+ method = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize);
}
if (method != nullptr) {
return GetTwoWordSuccessValue(
@@ -2186,7 +2194,7 @@
}
// No match, use the IfTable.
- method = cls->FindVirtualMethodForInterface(interface_method, sizeof(void*));
+ method = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize);
if (UNLIKELY(method == nullptr)) {
ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(
interface_method, this_object, caller_method);
@@ -2215,14 +2223,15 @@
CHECK(self->IsExceptionPending());
return GetTwoWordFailureValue(); // Failure.
}
- interface_method = caller_method->GetDexCacheResolvedMethod(dex_method_idx, sizeof(void*));
+ interface_method =
+ caller_method->GetDexCacheResolvedMethod(dex_method_idx, kRuntimePointerSize);
DCHECK(!interface_method->IsRuntimeMethod());
}
// We arrive here if we have found an implementation, and it is not in the ImtConflictTable.
// We create a new table with the new pair { interface_method, method }.
uint32_t imt_index = interface_method->GetImtIndex();
- ArtMethod* conflict_method = imt->Get(imt_index, sizeof(void*));
+ ArtMethod* conflict_method = imt->Get(imt_index, kRuntimePointerSize);
if (conflict_method->IsRuntimeMethod()) {
ArtMethod* new_conflict_method = Runtime::Current()->GetClassLinker()->AddMethodToConflictTable(
cls.Get(),
@@ -2235,7 +2244,7 @@
// data is consistent.
imt->Set(imt_index,
new_conflict_method,
- sizeof(void*));
+ kRuntimePointerSize);
}
}
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index ffe4109..e3203dc 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -383,11 +383,7 @@
sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pReadBarrierMarkReg28, pReadBarrierMarkReg29,
sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pReadBarrierMarkReg29, pReadBarrierMarkReg30,
- sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pReadBarrierMarkReg30, pReadBarrierMarkReg31,
- sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pReadBarrierMarkReg31, pReadBarrierSlow, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pReadBarrierMarkReg29, pReadBarrierSlow, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pReadBarrierSlow, pReadBarrierForRootSlow,
sizeof(void*));
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 9f073a6..f86921c 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -341,7 +341,7 @@
// If we don't have a potential method, we're outta here.
VLOG(signals) << "potential method: " << method_obj;
// TODO: Check linear alloc and image.
- DCHECK_ALIGNED(ArtMethod::Size(sizeof(void*)), sizeof(void*))
+ DCHECK_ALIGNED(ArtMethod::Size(kRuntimePointerSize), sizeof(void*))
<< "ArtMethod is not pointer aligned";
if (method_obj == nullptr || !IsAligned<sizeof(void*)>(method_obj)) {
VLOG(signals) << "no method";
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 4e40aea..4e83913 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -318,6 +318,18 @@
const std::set<mirror::Object*>& references_;
};
+class EmptyMarkObjectVisitor : public MarkObjectVisitor {
+ public:
+ mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE {return obj;}
+ void MarkHeapReference(mirror::HeapReference<mirror::Object>*) OVERRIDE {}
+};
+
+void ModUnionTable::FilterCards() {
+ EmptyMarkObjectVisitor visitor;
+ // Use empty visitor since filtering is automatically done by UpdateAndMarkReferences.
+ UpdateAndMarkReferences(&visitor);
+}
+
void ModUnionTableReferenceCache::Verify() {
// Start by checking that everything in the mod union table is marked.
for (const auto& ref_pair : references_) {
@@ -364,6 +376,31 @@
}
}
+void ModUnionTableReferenceCache::VisitObjects(ObjectCallback* callback, void* arg) {
+ CardTable* const card_table = heap_->GetCardTable();
+ ContinuousSpaceBitmap* live_bitmap = space_->GetLiveBitmap();
+ for (uint8_t* card : cleared_cards_) {
+ uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
+ uintptr_t end = start + CardTable::kCardSize;
+ live_bitmap->VisitMarkedRange(start,
+ end,
+ [this, callback, arg](mirror::Object* obj) {
+ callback(obj, arg);
+ });
+ }
+ // This may visit the same card twice, TODO avoid this.
+ for (const auto& pair : references_) {
+ const uint8_t* card = pair.first;
+ uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
+ uintptr_t end = start + CardTable::kCardSize;
+ live_bitmap->VisitMarkedRange(start,
+ end,
+ [this, callback, arg](mirror::Object* obj) {
+ callback(obj, arg);
+ });
+ }
+}
+
void ModUnionTableReferenceCache::UpdateAndMarkReferences(MarkObjectVisitor* visitor) {
CardTable* const card_table = heap_->GetCardTable();
std::vector<mirror::HeapReference<mirror::Object>*> cards_references;
@@ -502,6 +539,22 @@
0, RoundUp(space_->Size(), CardTable::kCardSize) / CardTable::kCardSize, bit_visitor);
}
+void ModUnionTableCardCache::VisitObjects(ObjectCallback* callback, void* arg) {
+ card_bitmap_->VisitSetBits(
+ 0,
+ RoundUp(space_->Size(), CardTable::kCardSize) / CardTable::kCardSize,
+ [this, callback, arg](size_t bit_index) {
+ const uintptr_t start = card_bitmap_->AddrFromBitIndex(bit_index);
+ DCHECK(space_->HasAddress(reinterpret_cast<mirror::Object*>(start)))
+ << start << " " << *space_;
+ space_->GetLiveBitmap()->VisitMarkedRange(start,
+ start + CardTable::kCardSize,
+ [this, callback, arg](mirror::Object* obj) {
+ callback(obj, arg);
+ });
+ });
+}
+
void ModUnionTableCardCache::Dump(std::ostream& os) {
os << "ModUnionTable dirty cards: [";
// TODO: Find cleaner way of doing this.
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index a7a4246..bf665c5 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -68,6 +68,9 @@
// spaces which are stored in the mod-union table.
virtual void UpdateAndMarkReferences(MarkObjectVisitor* visitor) = 0;
+ // Visit all of the objects that may contain references to other spaces.
+ virtual void VisitObjects(ObjectCallback* callback, void* arg) = 0;
+
// Verification, sanity checks that we don't have clean cards which conflict with out cached data
// for said cards. Exclusive lock is required since verify sometimes uses
// SpaceBitmap::VisitMarkedRange and VisitMarkedRange can't know if the callback will modify the
@@ -78,6 +81,9 @@
// doesn't need to be aligned.
virtual bool ContainsCardFor(uintptr_t addr) = 0;
+ // Filter out cards that don't need to be marked. Automatically done with UpdateAndMarkReferences.
+ void FilterCards();
+
virtual void Dump(std::ostream& os) = 0;
space::ContinuousSpace* GetSpace() {
@@ -115,6 +121,10 @@
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_);
+ virtual void VisitObjects(ObjectCallback* callback, void* arg) OVERRIDE
+ REQUIRES(Locks::heap_bitmap_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
// Exclusive lock is required since verify uses SpaceBitmap::VisitMarkedRange and
// VisitMarkedRange can't know if the callback will modify the bitmap or not.
void Verify() OVERRIDE
@@ -156,6 +166,10 @@
REQUIRES(Locks::heap_bitmap_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
+ virtual void VisitObjects(ObjectCallback* callback, void* arg) OVERRIDE
+ REQUIRES(Locks::heap_bitmap_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
// Nothing to verify.
virtual void Verify() OVERRIDE {}
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 6489a39..522f236 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -17,6 +17,7 @@
#include "allocation_record.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/stl_util.h"
#include "stack.h"
@@ -112,7 +113,7 @@
for (size_t i = 0, depth = record.GetDepth(); i < depth; ++i) {
const AllocRecordStackTraceElement& element = record.StackElement(i);
DCHECK(element.GetMethod() != nullptr);
- element.GetMethod()->VisitRoots(buffered_visitor, sizeof(void*));
+ element.GetMethod()->VisitRoots(buffered_visitor, kRuntimePointerSize);
}
}
}
@@ -200,7 +201,7 @@
ArtMethod* m = GetMethod();
// m may be null if we have inlined methods of unresolved classes. b/27858645
if (m != nullptr && !m->IsRuntimeMethod()) {
- m = m->GetInterfaceMethodIfProxy(sizeof(void*));
+ m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
trace_->AddStackElement(AllocRecordStackTraceElement(m, GetDexPc()));
}
return true;
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 4019a5b..fb774a4 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -154,11 +154,30 @@
}
inline mirror::Object* ConcurrentCopying::MarkFromReadBarrier(mirror::Object* from_ref) {
+ mirror::Object* ret;
+ // TODO: Delete GetMarkBit check when all of the callers properly check the bit. Remaining caller
+ // is array allocations.
+ if (from_ref == nullptr || from_ref->GetMarkBit()) {
+ return from_ref;
+ }
// TODO: Consider removing this check when we are done investigating slow paths. b/30162165
if (UNLIKELY(mark_from_read_barrier_measurements_)) {
- return MarkFromReadBarrierWithMeasurements(from_ref);
+ ret = MarkFromReadBarrierWithMeasurements(from_ref);
+ } else {
+ ret = Mark(from_ref);
}
- return Mark(from_ref);
+ // Only set the mark bit for baker barrier.
+ if (kUseBakerReadBarrier && LIKELY(!rb_mark_bit_stack_full_ && ret->AtomicSetMarkBit(0, 1))) {
+ // If the mark stack is full, we may temporarily go to mark and back to unmarked. Seeing both
+ // values are OK since the only race is doing an unnecessary Mark.
+ if (!rb_mark_bit_stack_->AtomicPushBack(ret)) {
+ // Mark stack is full, set the bit back to zero.
+ CHECK(ret->AtomicSetMarkBit(1, 0));
+ // Set rb_mark_bit_stack_full_, this is racy but OK since AtomicPushBack is thread safe.
+ rb_mark_bit_stack_full_ = true;
+ }
+ }
+ return ret;
}
inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) {
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index d2d2f23..071537d 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -17,11 +17,13 @@
#include "concurrent_copying.h"
#include "art_field-inl.h"
+#include "base/enums.h"
#include "base/histogram-inl.h"
#include "base/stl_util.h"
#include "base/systrace.h"
#include "debugger.h"
#include "gc/accounting/heap_bitmap-inl.h"
+#include "gc/accounting/mod_union_table-inl.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/reference_processor.h"
#include "gc/space/image_space.h"
@@ -40,6 +42,16 @@
namespace collector {
static constexpr size_t kDefaultGcMarkStackSize = 2 * MB;
+// If kFilterModUnionCards then we attempt to filter cards that don't need to be dirty in the mod
+// union table. Disabled since it does not seem to help the pause much.
+static constexpr bool kFilterModUnionCards = kIsDebugBuild;
+// If kDisallowReadBarrierDuringScan is true then the GC aborts if there are any that occur during
+// ConcurrentCopying::Scan. May be used to diagnose possibly unnecessary read barriers.
+// Only enabled for kIsDebugBuild to avoid performance hit.
+static constexpr bool kDisallowReadBarrierDuringScan = kIsDebugBuild;
+// Slow path mark stack size, increase this if the stack is getting full and it is causing
+// performance problems.
+static constexpr size_t kReadBarrierMarkStackSize = 512 * KB;
ConcurrentCopying::ConcurrentCopying(Heap* heap,
const std::string& name_prefix,
@@ -51,6 +63,10 @@
gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack",
kDefaultGcMarkStackSize,
kDefaultGcMarkStackSize)),
+ rb_mark_bit_stack_(accounting::ObjectStack::Create("rb copying gc mark stack",
+ kReadBarrierMarkStackSize,
+ kReadBarrierMarkStackSize)),
+ rb_mark_bit_stack_full_(false),
mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
thread_running_gc_(nullptr),
is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
@@ -175,6 +191,7 @@
CHECK(false_gray_stack_.empty());
}
+ rb_mark_bit_stack_full_ = false;
mark_from_read_barrier_measurements_ = measure_read_barrier_slow_path_;
if (measure_read_barrier_slow_path_) {
rb_slow_path_ns_.StoreRelaxed(0);
@@ -315,12 +332,87 @@
TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings());
Runtime::Current()->VisitTransactionRoots(cc);
}
+ if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
+ cc->GrayAllDirtyImmuneObjects();
+ if (kIsDebugBuild) {
+ // Check that all non-gray immune objects only refernce immune objects.
+ cc->VerifyGrayImmuneObjects();
+ }
+ }
}
private:
ConcurrentCopying* const concurrent_copying_;
};
+class ConcurrentCopying::VerifyGrayImmuneObjectsVisitor {
+ public:
+ explicit VerifyGrayImmuneObjectsVisitor(ConcurrentCopying* collector)
+ : collector_(collector) {}
+
+ void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
+ const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
+ CheckReference(obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset),
+ obj, offset);
+ }
+
+ void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
+ CHECK(klass->IsTypeOfReferenceClass());
+ CheckReference(ref->GetReferent<kWithoutReadBarrier>(),
+ ref,
+ mirror::Reference::ReferentOffset());
+ }
+
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ ALWAYS_INLINE
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ ALWAYS_INLINE
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ CheckReference(root->AsMirrorPtr(), nullptr, MemberOffset(0));
+ }
+
+ private:
+ ConcurrentCopying* const collector_;
+
+ void CheckReference(mirror::Object* ref, mirror::Object* holder, MemberOffset offset) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (ref != nullptr) {
+ CHECK(collector_->immune_spaces_.ContainsObject(ref))
+ << "Non gray object references non immune object "<< ref << " " << PrettyTypeOf(ref)
+ << " in holder " << holder << " " << PrettyTypeOf(holder) << " offset="
+ << offset.Uint32Value();
+ }
+ }
+};
+
+void ConcurrentCopying::VerifyGrayImmuneObjects() {
+ TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
+ for (auto& space : immune_spaces_.GetSpaces()) {
+ DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
+ accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ VerifyGrayImmuneObjectsVisitor visitor(this);
+ live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
+ reinterpret_cast<uintptr_t>(space->Limit()),
+ [&visitor](mirror::Object* obj)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ // If an object is not gray, it should only have references to things in the immune spaces.
+ if (obj->GetReadBarrierPointer() != ReadBarrier::GrayPtr()) {
+ obj->VisitReferences</*kVisitNativeRoots*/true,
+ kDefaultVerifyFlags,
+ kWithoutReadBarrier>(visitor, visitor);
+ }
+ });
+ }
+}
+
// Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
void ConcurrentCopying::FlipThreadRoots() {
TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
@@ -350,6 +442,52 @@
}
}
+class ConcurrentCopying::GrayImmuneObjectVisitor {
+ public:
+ explicit GrayImmuneObjectVisitor() {}
+
+ ALWAYS_INLINE void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (kUseBakerReadBarrier) {
+ if (kIsDebugBuild) {
+ Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
+ }
+ obj->SetReadBarrierPointer(ReadBarrier::GrayPtr());
+ }
+ }
+
+ static void Callback(mirror::Object* obj, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) {
+ reinterpret_cast<GrayImmuneObjectVisitor*>(arg)->operator()(obj);
+ }
+};
+
+void ConcurrentCopying::GrayAllDirtyImmuneObjects() {
+ TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
+ gc::Heap* const heap = Runtime::Current()->GetHeap();
+ accounting::CardTable* const card_table = heap->GetCardTable();
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
+ DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
+ GrayImmuneObjectVisitor visitor;
+ accounting::ModUnionTable* table = heap->FindModUnionTableFromSpace(space);
+ // Mark all the objects on dirty cards since these may point to objects in other space.
+ // Once these are marked, the GC will eventually clear them later.
+ // Table is non null for boot image and zygote spaces. It is only null for application image
+ // spaces.
+ if (table != nullptr) {
+ // TODO: Add preclean outside the pause.
+ table->ClearCards();
+ table->VisitObjects(GrayImmuneObjectVisitor::Callback, &visitor);
+ } else {
+ // TODO: Consider having a mark bitmap for app image spaces and avoid scanning during the
+ // pause because app image spaces are all dirty pages anyways.
+ card_table->Scan<false>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor);
+ }
+ }
+ // Since all of the objects that may point to other spaces are marked, we can avoid all the read
+ // barriers in the immune spaces.
+ updated_all_immune_objects_.StoreRelaxed(true);
+}
+
void ConcurrentCopying::SwapStacks() {
heap_->SwapStacks();
}
@@ -392,8 +530,22 @@
explicit ImmuneSpaceScanObjVisitor(ConcurrentCopying* cc)
: collector_(cc) {}
- void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) {
- collector_->ScanImmuneObject(obj);
+ ALWAYS_INLINE void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
+ if (obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
+ collector_->ScanImmuneObject(obj);
+ // Done scanning the object, go back to white.
+ bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
+ ReadBarrier::WhitePtr());
+ CHECK(success);
+ }
+ } else {
+ collector_->ScanImmuneObject(obj);
+ }
+ }
+
+ static void Callback(mirror::Object* obj, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) {
+ reinterpret_cast<ImmuneSpaceScanObjVisitor*>(arg)->operator()(obj);
}
private:
@@ -415,13 +567,21 @@
if (kUseBakerReadBarrier) {
gc_grays_immune_objects_ = false;
}
- for (auto& space : immune_spaces_.GetSpaces()) {
- DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
- accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
- ImmuneSpaceScanObjVisitor visitor(this);
- live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
- reinterpret_cast<uintptr_t>(space->Limit()),
- visitor);
+ {
+ TimingLogger::ScopedTiming split2("ScanImmuneSpaces", GetTimings());
+ for (auto& space : immune_spaces_.GetSpaces()) {
+ DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
+ accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
+ ImmuneSpaceScanObjVisitor visitor(this);
+ if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects && table != nullptr) {
+ table->VisitObjects(ImmuneSpaceScanObjVisitor::Callback, &visitor);
+ } else {
+ live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
+ reinterpret_cast<uintptr_t>(space->Limit()),
+ visitor);
+ }
+ }
}
if (kUseBakerReadBarrier) {
// This release fence makes the field updates in the above loop visible before allowing mutator
@@ -759,9 +919,9 @@
}
collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
if (kUseBakerReadBarrier) {
- CHECK(ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr())
+ CHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr())
<< "Ref " << ref << " " << PrettyTypeOf(ref)
- << " has non-white rb_ptr " << ref->GetReadBarrierPointer();
+ << " has non-white rb_ptr ";
}
}
@@ -827,7 +987,7 @@
VerifyNoFromSpaceRefsFieldVisitor visitor(collector);
obj->VisitReferences(visitor, visitor);
if (kUseBakerReadBarrier) {
- CHECK(obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr())
+ CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr())
<< "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer();
}
}
@@ -1438,7 +1598,7 @@
ArtMethod* method = gc_root_source->GetArtMethod();
LOG(INTERNAL_FATAL) << "gc root in method " << method << " " << PrettyMethod(method);
RootPrinter root_printer;
- method->VisitRoots(root_printer, sizeof(void*));
+ method->VisitRoots(root_printer, kRuntimePointerSize);
}
ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
region_space_->DumpNonFreeRegions(LOG(INTERNAL_FATAL));
@@ -1573,12 +1733,19 @@
// Scan ref fields of an object.
inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
+ if (kDisallowReadBarrierDuringScan) {
+ // Avoid all read barriers during visit references to help performance.
+ Thread::Current()->ModifyDebugDisallowReadBarrier(1);
+ }
DCHECK(!region_space_->IsInFromSpace(to_ref));
DCHECK_EQ(Thread::Current(), thread_running_gc_);
RefFieldsVisitor visitor(this);
// Disable the read barrier for a performance reason.
to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
visitor, visitor);
+ if (kDisallowReadBarrierDuringScan) {
+ Thread::Current()->ModifyDebugDisallowReadBarrier(-1);
+ }
}
// Process a field.
@@ -1692,29 +1859,33 @@
ScopedGcGraysImmuneObjects scoped_gc_gray_immune_objects(this);
CHECK_ALIGNED(byte_size, kObjectAlignment);
memset(dummy_obj, 0, byte_size);
- mirror::Class* int_array_class = mirror::IntArray::GetArrayClass();
+ // Avoid going through read barrier for since kDisallowReadBarrierDuringScan may be enabled.
+ // Explicitly mark to make sure to get an object in the to-space.
+ mirror::Class* int_array_class = down_cast<mirror::Class*>(
+ Mark(mirror::IntArray::GetArrayClass<kWithoutReadBarrier>()));
CHECK(int_array_class != nullptr);
AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
- size_t component_size = int_array_class->GetComponentSize();
+ size_t component_size = int_array_class->GetComponentSize<kWithoutReadBarrier>();
CHECK_EQ(component_size, sizeof(int32_t));
size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue();
if (data_offset > byte_size) {
// An int array is too big. Use java.lang.Object.
mirror::Class* java_lang_Object = WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object);
AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object);
- CHECK_EQ(byte_size, java_lang_Object->GetObjectSize());
+ CHECK_EQ(byte_size, (java_lang_Object->GetObjectSize<kVerifyNone, kWithoutReadBarrier>()));
dummy_obj->SetClass(java_lang_Object);
- CHECK_EQ(byte_size, dummy_obj->SizeOf());
+ CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone, kWithoutReadBarrier>()));
} else {
// Use an int array.
dummy_obj->SetClass(int_array_class);
- CHECK(dummy_obj->IsArrayInstance());
+ CHECK((dummy_obj->IsArrayInstance<kVerifyNone, kWithoutReadBarrier>()));
int32_t length = (byte_size - data_offset) / component_size;
- dummy_obj->AsArray()->SetLength(length);
- CHECK_EQ(dummy_obj->AsArray()->GetLength(), length)
+ mirror::Array* dummy_arr = dummy_obj->AsArray<kVerifyNone, kWithoutReadBarrier>();
+ dummy_arr->SetLength(length);
+ CHECK_EQ(dummy_arr->GetLength(), length)
<< "byte_size=" << byte_size << " length=" << length
<< " component_size=" << component_size << " data_offset=" << data_offset;
- CHECK_EQ(byte_size, dummy_obj->SizeOf())
+ CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone, kWithoutReadBarrier>()))
<< "byte_size=" << byte_size << " length=" << length
<< " component_size=" << component_size << " data_offset=" << data_offset;
}
@@ -1726,14 +1897,16 @@
CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment);
Thread* self = Thread::Current();
size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
- MutexLock mu(self, skipped_blocks_lock_);
- auto it = skipped_blocks_map_.lower_bound(alloc_size);
- if (it == skipped_blocks_map_.end()) {
- // Not found.
- return nullptr;
- }
+ size_t byte_size;
+ uint8_t* addr;
{
- size_t byte_size = it->first;
+ MutexLock mu(self, skipped_blocks_lock_);
+ auto it = skipped_blocks_map_.lower_bound(alloc_size);
+ if (it == skipped_blocks_map_.end()) {
+ // Not found.
+ return nullptr;
+ }
+ byte_size = it->first;
CHECK_GE(byte_size, alloc_size);
if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) {
// If remainder would be too small for a dummy object, retry with a larger request size.
@@ -1746,27 +1919,33 @@
CHECK_GE(it->first - alloc_size, min_object_size)
<< "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size;
}
+ // Found a block.
+ CHECK(it != skipped_blocks_map_.end());
+ byte_size = it->first;
+ addr = it->second;
+ CHECK_GE(byte_size, alloc_size);
+ CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
+ CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment);
+ if (kVerboseMode) {
+ LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
+ }
+ skipped_blocks_map_.erase(it);
}
- // Found a block.
- CHECK(it != skipped_blocks_map_.end());
- size_t byte_size = it->first;
- uint8_t* addr = it->second;
- CHECK_GE(byte_size, alloc_size);
- CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
- CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment);
- if (kVerboseMode) {
- LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
- }
- skipped_blocks_map_.erase(it);
memset(addr, 0, byte_size);
if (byte_size > alloc_size) {
// Return the remainder to the map.
CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment);
CHECK_GE(byte_size - alloc_size, min_object_size);
+ // FillWithDummyObject may mark an object, avoid holding skipped_blocks_lock_ to prevent lock
+ // violation and possible deadlock. The deadlock case is a recursive case:
+ // FillWithDummyObject -> IntArray::GetArrayClass -> Mark -> Copy -> AllocateInSkippedBlock.
FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size),
byte_size - alloc_size);
CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size)));
- skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size));
+ {
+ MutexLock mu(self, skipped_blocks_lock_);
+ skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size));
+ }
}
return reinterpret_cast<mirror::Object*>(addr);
}
@@ -2052,8 +2231,32 @@
}
{
ReaderMutexLock mu(self, *Locks::mutator_lock_);
- WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
- heap_->ClearMarkedObjects();
+ {
+ WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
+ heap_->ClearMarkedObjects();
+ }
+ if (kUseBakerReadBarrier && kFilterModUnionCards) {
+ TimingLogger::ScopedTiming split("FilterModUnionCards", GetTimings());
+ ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
+ gc::Heap* const heap = Runtime::Current()->GetHeap();
+ for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
+ DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
+ accounting::ModUnionTable* table = heap->FindModUnionTableFromSpace(space);
+ // Filter out cards that don't need to be set.
+ if (table != nullptr) {
+ table->FilterCards();
+ }
+ }
+ }
+ if (kUseBakerReadBarrier) {
+ TimingLogger::ScopedTiming split("EmptyRBMarkBitStack", GetTimings());
+ DCHECK(rb_mark_bit_stack_.get() != nullptr);
+ const auto* limit = rb_mark_bit_stack_->End();
+ for (StackReference<mirror::Object>* it = rb_mark_bit_stack_->Begin(); it != limit; ++it) {
+ CHECK(it->AsMirrorPtr()->AtomicSetMarkBit(1, 0));
+ }
+ rb_mark_bit_stack_->Reset();
+ }
}
if (measure_read_barrier_slow_path_) {
MutexLock mu(self, rb_slow_path_histogram_lock_);
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 6a8d052..a862802 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -57,6 +57,9 @@
static constexpr bool kEnableFromSpaceAccountingCheck = kIsDebugBuild;
// Enable verbose mode.
static constexpr bool kVerboseMode = false;
+ // If kGrayDirtyImmuneObjects is true then we gray dirty objects in the GC pause to prevent dirty
+ // pages.
+ static constexpr bool kGrayDirtyImmuneObjects = true;
ConcurrentCopying(Heap* heap,
const std::string& name_prefix = "",
@@ -127,7 +130,7 @@
void PushOntoMarkStack(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
mirror::Object* Copy(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_)
- REQUIRES(!skipped_blocks_lock_, !mark_stack_lock_);
+ REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
void Scan(mirror::Object* to_ref) SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
void Process(mirror::Object* obj, MemberOffset offset)
@@ -152,6 +155,12 @@
bool ProcessMarkStackOnce() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
void ProcessMarkStackRef(mirror::Object* to_ref) SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
+ void GrayAllDirtyImmuneObjects()
+ REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_);
+ void VerifyGrayImmuneObjects()
+ REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_);
size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access)
@@ -179,9 +188,11 @@
void SweepLargeObjects(bool swap_bitmaps)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
void FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size)
+ REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
mirror::Object* AllocateInSkippedBlock(size_t alloc_size)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!skipped_blocks_lock_);
+ REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void CheckEmptyMarkStack() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
void IssueEmptyCheckpoint() SHARED_REQUIRES(Locks::mutator_lock_);
bool IsOnAllocStack(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_);
@@ -222,6 +233,8 @@
space::RegionSpace* region_space_; // The underlying region space.
std::unique_ptr<Barrier> gc_barrier_;
std::unique_ptr<accounting::ObjectStack> gc_mark_stack_;
+ std::unique_ptr<accounting::ObjectStack> rb_mark_bit_stack_;
+ bool rb_mark_bit_stack_full_;
std::vector<mirror::Object*> false_gray_stack_ GUARDED_BY(mark_stack_lock_);
Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::vector<accounting::ObjectStack*> revoked_mark_stacks_
@@ -294,12 +307,14 @@
class ComputeUnevacFromSpaceLiveRatioVisitor;
class DisableMarkingCheckpoint;
class FlipCallback;
+ class GrayImmuneObjectVisitor;
class ImmuneSpaceScanObjVisitor;
class LostCopyVisitor;
class RefFieldsVisitor;
class RevokeThreadLocalMarkStackCheckpoint;
class ScopedGcGraysImmuneObjects;
class ThreadFlipVisitor;
+ class VerifyGrayImmuneObjectsVisitor;
class VerifyNoFromSpaceRefsFieldVisitor;
class VerifyNoFromSpaceRefsObjectVisitor;
class VerifyNoFromSpaceRefsVisitor;
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 9f54f1c..e276137 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -23,6 +23,7 @@
#include <vector>
#include "base/bounded_fifo.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/mutex-inl.h"
@@ -430,7 +431,7 @@
<< " first_ref_field_offset="
<< (holder_->IsClass()
? holder_->AsClass()->GetFirstReferenceStaticFieldOffset(
- sizeof(void*))
+ kRuntimePointerSize)
: holder_->GetClass()->GetFirstReferenceInstanceFieldOffset())
<< " num_of_ref_fields="
<< (holder_->IsClass()
diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h
index c602081..7899a7c 100644
--- a/runtime/gc/collector_type.h
+++ b/runtime/gc/collector_type.h
@@ -51,6 +51,8 @@
kCollectorTypeHomogeneousSpaceCompact,
// Class linker fake collector.
kCollectorTypeClassLinker,
+ // JIT Code cache fake collector.
+ kCollectorTypeJitCodeCache,
};
std::ostream& operator<<(std::ostream& os, const CollectorType& collector_type);
diff --git a/runtime/gc/gc_cause.cc b/runtime/gc/gc_cause.cc
index ad9bb92..1d377a4 100644
--- a/runtime/gc/gc_cause.cc
+++ b/runtime/gc/gc_cause.cc
@@ -37,6 +37,7 @@
case kGcCauseAddRemoveAppImageSpace: return "AddRemoveAppImageSpace";
case kGcCauseDebugger: return "Debugger";
case kGcCauseClassLinker: return "ClassLinker";
+ case kGcCauseJitCodeCache: return "JitCodeCache";
default:
LOG(FATAL) << "Unreachable";
UNREACHABLE();
diff --git a/runtime/gc/gc_cause.h b/runtime/gc/gc_cause.h
index 797ec34..4348a41 100644
--- a/runtime/gc/gc_cause.h
+++ b/runtime/gc/gc_cause.h
@@ -49,6 +49,8 @@
kGcCauseHomogeneousSpaceCompact,
// Class linker cause, used to guard filling art methods with special values.
kGcCauseClassLinker,
+ // Not a real GC cause, used to implement exclusion between code cache metadata and GC.
+ kGcCauseJitCodeCache,
};
const char* PrettyCause(GcCause cause);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 6f4767e..5485cd2 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -2538,6 +2538,17 @@
AddSpace(zygote_space_);
non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
AddSpace(non_moving_space_);
+ if (kUseBakerReadBarrier && gc::collector::ConcurrentCopying::kGrayDirtyImmuneObjects) {
+ // Treat all of the objects in the zygote as marked to avoid unnecessary dirty pages. This is
+ // safe since we mark all of the objects that may reference non immune objects as gray.
+ zygote_space_->GetLiveBitmap()->VisitMarkedRange(
+ reinterpret_cast<uintptr_t>(zygote_space_->Begin()),
+ reinterpret_cast<uintptr_t>(zygote_space_->Limit()),
+ [](mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
+ CHECK(obj->AtomicSetMarkBit(0, 1));
+ });
+ }
+
// Create the zygote space mod union table.
accounting::ModUnionTable* mod_union_table =
new accounting::ModUnionTableCardCache("zygote space mod-union table", this,
@@ -2546,6 +2557,9 @@
// Set all the cards in the mod-union table since we don't know which objects contain references
// to large objects.
mod_union_table->SetCards();
+ // Filter out cards that do not to be dirty. This is mostly for CC collector so that it does
+ // not gray the objects on all the cards in the zygote space.
+ mod_union_table->FilterCards();
AddModUnionTable(mod_union_table);
large_object_space_->SetAllLargeObjectsAsZygoteObjects(self);
if (collector::SemiSpace::kUseRememberedSet) {
@@ -2717,7 +2731,8 @@
}
// It's time to clear all inline caches, in case some classes can be unloaded.
- if ((gc_type == collector::kGcTypeFull) && (runtime->GetJit() != nullptr)) {
+ if (((gc_type == collector::kGcTypeFull) || (gc_type == collector::kGcTypePartial)) &&
+ (runtime->GetJit() != nullptr)) {
runtime->GetJit()->GetCodeCache()->ClearGcRootsInInlineCaches(self);
}
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 6088a43..62625c4 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -44,7 +44,9 @@
// 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
list_ = ref;
} else {
- mirror::Reference* head = list_->GetPendingNext();
+ // The list is owned by the GC, everything that has been inserted must already be at least
+ // gray.
+ mirror::Reference* head = list_->GetPendingNext<kWithoutReadBarrier>();
DCHECK(head != nullptr);
ref->SetPendingNext(head);
}
@@ -54,14 +56,14 @@
mirror::Reference* ReferenceQueue::DequeuePendingReference() {
DCHECK(!IsEmpty());
- mirror::Reference* ref = list_->GetPendingNext();
+ mirror::Reference* ref = list_->GetPendingNext<kWithoutReadBarrier>();
DCHECK(ref != nullptr);
// Note: the following code is thread-safe because it is only called from ProcessReferences which
// is single threaded.
if (list_ == ref) {
list_ = nullptr;
} else {
- mirror::Reference* next = ref->GetPendingNext();
+ mirror::Reference* next = ref->GetPendingNext<kWithoutReadBarrier>();
list_->SetPendingNext(next);
}
ref->SetPendingNext(nullptr);
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 8cadc2e..8ade185 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -23,6 +23,7 @@
#include <unistd.h>
#include "art_method.h"
+#include "base/enums.h"
#include "base/macros.h"
#include "base/stl_util.h"
#include "base/scoped_flock.h"
@@ -754,7 +755,7 @@
public:
template<typename... Args>
explicit FixupObjectVisitor(gc::accounting::ContinuousSpaceBitmap* visited,
- const size_t pointer_size,
+ const PointerSize pointer_size,
Args... args)
: FixupVisitor(args...),
pointer_size_(pointer_size),
@@ -874,7 +875,7 @@
}
private:
- const size_t pointer_size_;
+ const PointerSize pointer_size_;
gc::accounting::ContinuousSpaceBitmap* const visited_;
};
@@ -908,7 +909,7 @@
class FixupArtMethodVisitor : public FixupVisitor, public ArtMethodVisitor {
public:
template<typename... Args>
- explicit FixupArtMethodVisitor(bool fixup_heap_objects, size_t pointer_size, Args... args)
+ explicit FixupArtMethodVisitor(bool fixup_heap_objects, PointerSize pointer_size, Args... args)
: FixupVisitor(args...),
fixup_heap_objects_(fixup_heap_objects),
pointer_size_(pointer_size) {}
@@ -938,7 +939,7 @@
private:
const bool fixup_heap_objects_;
- const size_t pointer_size_;
+ const PointerSize pointer_size_;
};
class FixupArtFieldVisitor : public FixupVisitor, public ArtFieldVisitor {
@@ -974,7 +975,7 @@
uint32_t boot_image_end = 0;
uint32_t boot_oat_begin = 0;
uint32_t boot_oat_end = 0;
- const size_t pointer_size = image_header.GetPointerSize();
+ const PointerSize pointer_size = image_header.GetPointerSize();
gc::Heap* const heap = Runtime::Current()->GetHeap();
heap->GetBootImagesSize(&boot_image_begin, &boot_image_end, &boot_oat_begin, &boot_oat_end);
if (boot_image_begin == boot_image_end) {
@@ -1435,6 +1436,8 @@
image_header->GetImageMethod(ImageHeader::kRefsOnlySaveMethod));
CHECK_EQ(runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs),
image_header->GetImageMethod(ImageHeader::kRefsAndArgsSaveMethod));
+ CHECK_EQ(runtime->GetCalleeSaveMethod(Runtime::kSaveEverything),
+ image_header->GetImageMethod(ImageHeader::kSaveEverythingMethod));
} else if (!runtime->HasResolutionMethod()) {
runtime->SetInstructionSet(space->oat_file_non_owned_->GetOatHeader().GetInstructionSet());
runtime->SetResolutionMethod(image_header->GetImageMethod(ImageHeader::kResolutionMethod));
@@ -1447,6 +1450,8 @@
image_header->GetImageMethod(ImageHeader::kRefsOnlySaveMethod), Runtime::kRefsOnly);
runtime->SetCalleeSaveMethod(
image_header->GetImageMethod(ImageHeader::kRefsAndArgsSaveMethod), Runtime::kRefsAndArgs);
+ runtime->SetCalleeSaveMethod(
+ image_header->GetImageMethod(ImageHeader::kSaveEverythingMethod), Runtime::kSaveEverything);
}
VLOG(image) << "ImageSpace::Init exiting " << *space.get();
diff --git a/runtime/generated/asm_support_gen.h b/runtime/generated/asm_support_gen.h
new file mode 100644
index 0000000..c66029d
--- /dev/null
+++ b/runtime/generated/asm_support_gen.h
@@ -0,0 +1,127 @@
+
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GENERATED_ASM_SUPPORT_GEN_H_
+#define ART_RUNTIME_GENERATED_ASM_SUPPORT_GEN_H_
+
+// This file has been auto-generated by cpp-define-generator; do not edit directly.
+
+#define STACK_REFERENCE_SIZE 0x4
+DEFINE_CHECK_EQ(static_cast<size_t>(STACK_REFERENCE_SIZE), (static_cast<size_t>(sizeof(art::StackReference<art::mirror::Object>))))
+#define COMPRESSED_REFERENCE_SIZE 0x4
+DEFINE_CHECK_EQ(static_cast<size_t>(COMPRESSED_REFERENCE_SIZE), (static_cast<size_t>(sizeof(art::mirror::CompressedReference<art::mirror::Object>))))
+#define COMPRESSED_REFERENCE_SIZE_SHIFT 0x2
+DEFINE_CHECK_EQ(static_cast<size_t>(COMPRESSED_REFERENCE_SIZE_SHIFT), (static_cast<size_t>(art::WhichPowerOf2(sizeof(art::mirror::CompressedReference<art::mirror::Object>)))))
+#define RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET 0
+DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::Runtime:: kSaveAll))))
+#define RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET 0x8
+DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::Runtime:: kRefsOnly))))
+#define RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET 0x10
+DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::Runtime:: kRefsAndArgs))))
+#define RUNTIME_SAVE_EVERYTHING_CALLEE_SAVE_FRAME_OFFSET 0x18
+DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_EVERYTHING_CALLEE_SAVE_FRAME_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::Runtime:: kSaveEverything))))
+#define THREAD_FLAGS_OFFSET 0
+DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_FLAGS_OFFSET), (static_cast<int32_t>(art::Thread:: ThreadFlagsOffset<art::kRuntimePointerSize>().Int32Value())))
+#define THREAD_ID_OFFSET 12
+DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_ID_OFFSET), (static_cast<int32_t>(art::Thread:: ThinLockIdOffset<art::kRuntimePointerSize>().Int32Value())))
+#define THREAD_IS_GC_MARKING_OFFSET 52
+DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_IS_GC_MARKING_OFFSET), (static_cast<int32_t>(art::Thread:: IsGcMarkingOffset<art::kRuntimePointerSize>().Int32Value())))
+#define THREAD_CARD_TABLE_OFFSET 128
+DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_CARD_TABLE_OFFSET), (static_cast<int32_t>(art::Thread:: CardTableOffset<art::kRuntimePointerSize>().Int32Value())))
+#define CODEITEM_INSNS_OFFSET 16
+DEFINE_CHECK_EQ(static_cast<int32_t>(CODEITEM_INSNS_OFFSET), (static_cast<int32_t>(__builtin_offsetof(art::DexFile::CodeItem, insns_))))
+#define MIRROR_OBJECT_CLASS_OFFSET 0
+DEFINE_CHECK_EQ(static_cast<int32_t>(MIRROR_OBJECT_CLASS_OFFSET), (static_cast<int32_t>(art::mirror::Object:: ClassOffset().Int32Value())))
+#define MIRROR_OBJECT_LOCK_WORD_OFFSET 4
+DEFINE_CHECK_EQ(static_cast<int32_t>(MIRROR_OBJECT_LOCK_WORD_OFFSET), (static_cast<int32_t>(art::mirror::Object:: MonitorOffset().Int32Value())))
+#define MIRROR_CLASS_STATUS_INITIALIZED 0xa
+DEFINE_CHECK_EQ(static_cast<uint32_t>(MIRROR_CLASS_STATUS_INITIALIZED), (static_cast<uint32_t>((art::mirror::Class::kStatusInitialized))))
+#define ACCESS_FLAGS_CLASS_IS_FINALIZABLE 0x80000000
+DEFINE_CHECK_EQ(static_cast<uint32_t>(ACCESS_FLAGS_CLASS_IS_FINALIZABLE), (static_cast<uint32_t>((art::kAccClassIsFinalizable))))
+#define ACCESS_FLAGS_CLASS_IS_FINALIZABLE_BIT 0x1f
+DEFINE_CHECK_EQ(static_cast<uint32_t>(ACCESS_FLAGS_CLASS_IS_FINALIZABLE_BIT), (static_cast<uint32_t>((art::MostSignificantBit(art::kAccClassIsFinalizable)))))
+#define ART_METHOD_DEX_CACHE_METHODS_OFFSET_32 20
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_DEX_CACHE_METHODS_OFFSET_32), (static_cast<int32_t>(art::ArtMethod:: DexCacheResolvedMethodsOffset(art::PointerSize::k32).Int32Value())))
+#define ART_METHOD_DEX_CACHE_METHODS_OFFSET_64 24
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_DEX_CACHE_METHODS_OFFSET_64), (static_cast<int32_t>(art::ArtMethod:: DexCacheResolvedMethodsOffset(art::PointerSize::k64).Int32Value())))
+#define ART_METHOD_DEX_CACHE_TYPES_OFFSET_32 24
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_DEX_CACHE_TYPES_OFFSET_32), (static_cast<int32_t>(art::ArtMethod:: DexCacheResolvedTypesOffset(art::PointerSize::k32).Int32Value())))
+#define ART_METHOD_DEX_CACHE_TYPES_OFFSET_64 32
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_DEX_CACHE_TYPES_OFFSET_64), (static_cast<int32_t>(art::ArtMethod:: DexCacheResolvedTypesOffset(art::PointerSize::k64).Int32Value())))
+#define ART_METHOD_JNI_OFFSET_32 28
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_JNI_OFFSET_32), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromJniOffset(art::PointerSize::k32).Int32Value())))
+#define ART_METHOD_JNI_OFFSET_64 40
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_JNI_OFFSET_64), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromJniOffset(art::PointerSize::k64).Int32Value())))
+#define ART_METHOD_QUICK_CODE_OFFSET_32 32
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_32), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k32).Int32Value())))
+#define ART_METHOD_QUICK_CODE_OFFSET_64 48
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_64), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k64).Int32Value())))
+#define LOCK_WORD_STATE_SHIFT 30
+DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_STATE_SHIFT), (static_cast<int32_t>(art::LockWord::kStateShift)))
+#define LOCK_WORD_STATE_MASK 0xc0000000
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_MASK), (static_cast<uint32_t>(art::LockWord::kStateMaskShifted)))
+#define LOCK_WORD_READ_BARRIER_STATE_SHIFT 28
+DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_READ_BARRIER_STATE_SHIFT), (static_cast<int32_t>(art::LockWord::kReadBarrierStateShift)))
+#define LOCK_WORD_READ_BARRIER_STATE_MASK 0x10000000
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_READ_BARRIER_STATE_MASK), (static_cast<uint32_t>(art::LockWord::kReadBarrierStateMaskShifted)))
+#define LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED 0xefffffff
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), (static_cast<uint32_t>(art::LockWord::kReadBarrierStateMaskShiftedToggled)))
+#define LOCK_WORD_THIN_LOCK_COUNT_ONE 65536
+DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_THIN_LOCK_COUNT_ONE), (static_cast<int32_t>(art::LockWord::kThinLockCountOne)))
+#define LOCK_WORD_GC_STATE_MASK_SHIFTED 0x30000000
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_GC_STATE_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kGCStateMaskShifted)))
+#define LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED 0xcfffffff
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), (static_cast<uint32_t>(art::LockWord::kGCStateMaskShiftedToggled)))
+#define LOCK_WORD_GC_STATE_SHIFT 28
+DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_GC_STATE_SHIFT), (static_cast<int32_t>(art::LockWord::kGCStateShift)))
+#define LOCK_WORD_MARK_BIT_SHIFT 29
+DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_MARK_BIT_SHIFT), (static_cast<int32_t>(art::LockWord::kMarkBitStateShift)))
+#define LOCK_WORD_MARK_BIT_MASK_SHIFTED 0x20000000
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_MARK_BIT_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kMarkBitStateMaskShifted)))
+#define OBJECT_ALIGNMENT_MASK 0x7
+DEFINE_CHECK_EQ(static_cast<size_t>(OBJECT_ALIGNMENT_MASK), (static_cast<size_t>(art::kObjectAlignment - 1)))
+#define OBJECT_ALIGNMENT_MASK_TOGGLED 0xfffffff8
+DEFINE_CHECK_EQ(static_cast<uint32_t>(OBJECT_ALIGNMENT_MASK_TOGGLED), (static_cast<uint32_t>(~static_cast<uint32_t>(art::kObjectAlignment - 1))))
+#define ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE 128
+DEFINE_CHECK_EQ(static_cast<int32_t>(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), (static_cast<int32_t>((art::gc::allocator::RosAlloc::kMaxThreadLocalBracketSize))))
+#define ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT 3
+DEFINE_CHECK_EQ(static_cast<int32_t>(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), (static_cast<int32_t>((art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSizeShift))))
+#define ROSALLOC_BRACKET_QUANTUM_SIZE_MASK 7
+DEFINE_CHECK_EQ(static_cast<int32_t>(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK), (static_cast<int32_t>((static_cast<int32_t>(art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1)))))
+#define ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED32 0xfffffff8
+DEFINE_CHECK_EQ(static_cast<uint32_t>(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED32), (static_cast<uint32_t>((~static_cast<uint32_t>(art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1)))))
+#define ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED64 0xfffffffffffffff8
+DEFINE_CHECK_EQ(static_cast<uint64_t>(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED64), (static_cast<uint64_t>((~static_cast<uint64_t>(art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1)))))
+#define ROSALLOC_RUN_FREE_LIST_OFFSET 8
+DEFINE_CHECK_EQ(static_cast<int32_t>(ROSALLOC_RUN_FREE_LIST_OFFSET), (static_cast<int32_t>((art::gc::allocator::RosAlloc::RunFreeListOffset()))))
+#define ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET 0
+DEFINE_CHECK_EQ(static_cast<int32_t>(ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET), (static_cast<int32_t>((art::gc::allocator::RosAlloc::RunFreeListHeadOffset()))))
+#define ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET 16
+DEFINE_CHECK_EQ(static_cast<int32_t>(ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET), (static_cast<int32_t>((art::gc::allocator::RosAlloc::RunFreeListSizeOffset()))))
+#define ROSALLOC_SLOT_NEXT_OFFSET 0
+DEFINE_CHECK_EQ(static_cast<int32_t>(ROSALLOC_SLOT_NEXT_OFFSET), (static_cast<int32_t>((art::gc::allocator::RosAlloc::RunSlotNextOffset()))))
+#define THREAD_SUSPEND_REQUEST 1
+DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_SUSPEND_REQUEST), (static_cast<int32_t>((art::kSuspendRequest))))
+#define THREAD_CHECKPOINT_REQUEST 2
+DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_CHECKPOINT_REQUEST), (static_cast<int32_t>((art::kCheckpointRequest))))
+#define JIT_CHECK_OSR (-1)
+DEFINE_CHECK_EQ(static_cast<int16_t>(JIT_CHECK_OSR), (static_cast<int16_t>((art::jit::kJitCheckForOSR))))
+#define JIT_HOTNESS_DISABLE (-2)
+DEFINE_CHECK_EQ(static_cast<int16_t>(JIT_HOTNESS_DISABLE), (static_cast<int16_t>((art::jit::kJitHotnessDisabled))))
+
+#endif // ART_RUNTIME_GENERATED_ASM_SUPPORT_GEN_H_
+
diff --git a/runtime/globals.h b/runtime/globals.h
index 0b44c47..9045d40 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -47,7 +47,8 @@
}
// Required object alignment
-static constexpr size_t kObjectAlignment = 8;
+static constexpr size_t kObjectAlignmentShift = 3;
+static constexpr size_t kObjectAlignment = 1u << kObjectAlignmentShift;
static constexpr size_t kLargeObjectAlignment = kPageSize;
// Whether or not this is a debug build. Useful in conditionals where NDEBUG isn't.
diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h
index ca206ef..2e1b8ed 100644
--- a/runtime/handle_scope-inl.h
+++ b/runtime/handle_scope-inl.h
@@ -57,9 +57,9 @@
return header_size + data_size;
}
-inline size_t HandleScope::SizeOf(size_t pointer_size, uint32_t num_references) {
+inline size_t HandleScope::SizeOf(PointerSize pointer_size, uint32_t num_references) {
// Assume that the layout is packed.
- size_t header_size = pointer_size + sizeof(number_of_references_);
+ size_t header_size = ReferencesOffset(pointer_size);
size_t data_size = sizeof(StackReference<mirror::Object>) * num_references;
return header_size + data_size;
}
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index d53a0e4..67d7054 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -19,6 +19,7 @@
#include <stack>
+#include "base/enums.h"
#include "base/logging.h"
#include "base/macros.h"
#include "handle.h"
@@ -52,7 +53,7 @@
static size_t SizeOf(uint32_t num_references);
// Returns the size of a HandleScope containing num_references handles.
- static size_t SizeOf(size_t pointer_size, uint32_t num_references);
+ static size_t SizeOf(PointerSize pointer_size, uint32_t num_references);
// Link to previous HandleScope or null.
HandleScope* GetLink() const {
@@ -73,18 +74,18 @@
ALWAYS_INLINE bool Contains(StackReference<mirror::Object>* handle_scope_entry) const;
// Offset of link within HandleScope, used by generated code.
- static size_t LinkOffset(size_t pointer_size ATTRIBUTE_UNUSED) {
+ static constexpr size_t LinkOffset(PointerSize pointer_size ATTRIBUTE_UNUSED) {
return 0;
}
// Offset of length within handle scope, used by generated code.
- static size_t NumberOfReferencesOffset(size_t pointer_size) {
- return pointer_size;
+ static constexpr size_t NumberOfReferencesOffset(PointerSize pointer_size) {
+ return static_cast<size_t>(pointer_size);
}
// Offset of link within handle scope, used by generated code.
- static size_t ReferencesOffset(size_t pointer_size) {
- return pointer_size + sizeof(number_of_references_);
+ static constexpr size_t ReferencesOffset(PointerSize pointer_size) {
+ return NumberOfReferencesOffset(pointer_size) + sizeof(number_of_references_);
}
// Placement new creation.
@@ -96,7 +97,7 @@
protected:
// Return backing storage used for references.
ALWAYS_INLINE StackReference<mirror::Object>* GetReferences() const {
- uintptr_t address = reinterpret_cast<uintptr_t>(this) + ReferencesOffset(sizeof(void*));
+ uintptr_t address = reinterpret_cast<uintptr_t>(this) + ReferencesOffset(kRuntimePointerSize);
return reinterpret_cast<StackReference<mirror::Object>*>(address);
}
diff --git a/runtime/handle_scope_test.cc b/runtime/handle_scope_test.cc
index dc99987..58f3800 100644
--- a/runtime/handle_scope_test.cc
+++ b/runtime/handle_scope_test.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "base/enums.h"
#include "gtest/gtest.h"
#include "handle_scope-inl.h"
#include "scoped_thread_state_change.h"
@@ -48,13 +49,13 @@
{
uintptr_t* link_ptr = reinterpret_cast<uintptr_t*>(table_base_ptr +
- HandleScope::LinkOffset(sizeof(void*)));
+ HandleScope::LinkOffset(kRuntimePointerSize));
EXPECT_EQ(*link_ptr, static_cast<size_t>(0x5678));
}
{
uint32_t* num_ptr = reinterpret_cast<uint32_t*>(table_base_ptr +
- HandleScope::NumberOfReferencesOffset(sizeof(void*)));
+ HandleScope::NumberOfReferencesOffset(kRuntimePointerSize));
EXPECT_EQ(*num_ptr, static_cast<size_t>(0x9ABC));
}
@@ -64,7 +65,7 @@
EXPECT_EQ(sizeof(StackReference<mirror::Object>), sizeof(uint32_t));
uint32_t* ref_ptr = reinterpret_cast<uint32_t*>(table_base_ptr +
- HandleScope::ReferencesOffset(sizeof(void*)));
+ HandleScope::ReferencesOffset(kRuntimePointerSize));
EXPECT_EQ(*ref_ptr, static_cast<uint32_t>(0x1234));
}
}
diff --git a/runtime/image-inl.h b/runtime/image-inl.h
index cd0557a..28620db 100644
--- a/runtime/image-inl.h
+++ b/runtime/image-inl.h
@@ -48,7 +48,7 @@
template <typename Visitor>
inline void ImageHeader::VisitPackedImTables(const Visitor& visitor,
uint8_t* base,
- size_t pointer_size) const {
+ PointerSize pointer_size) const {
const ImageSection& section = GetImageSection(kSectionImTables);
for (size_t pos = 0; pos < section.Size();) {
ImTable* imt = reinterpret_cast<ImTable*>(base + section.Offset() + pos);
@@ -66,7 +66,7 @@
template <typename Visitor>
inline void ImageHeader::VisitPackedImtConflictTables(const Visitor& visitor,
uint8_t* base,
- size_t pointer_size) const {
+ PointerSize pointer_size) const {
const ImageSection& section = GetImageSection(kSectionIMTConflictTables);
for (size_t pos = 0; pos < section.Size(); ) {
auto* table = reinterpret_cast<ImtConflictTable*>(base + section.Offset() + pos);
diff --git a/runtime/image.cc b/runtime/image.cc
index 2362a92..6888183 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -160,7 +160,7 @@
void ImageHeader::VisitPackedArtMethods(ArtMethodVisitor* visitor,
uint8_t* base,
- size_t pointer_size) const {
+ PointerSize pointer_size) const {
const size_t method_alignment = ArtMethod::Alignment(pointer_size);
const size_t method_size = ArtMethod::Size(pointer_size);
const ImageSection& methods = GetMethodsSection();
diff --git a/runtime/image.h b/runtime/image.h
index 06f06ee..207a818 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -19,6 +19,7 @@
#include <string.h>
+#include "base/enums.h"
#include "globals.h"
#include "mirror/object.h"
@@ -156,7 +157,11 @@
return reinterpret_cast<uint8_t*>(oat_file_end_);
}
- uint32_t GetPointerSize() const {
+ PointerSize GetPointerSize() const {
+ return ConvertToPointerSize(pointer_size_);
+ }
+
+ uint32_t GetPointerSizeUnchecked() const {
return pointer_size_;
}
@@ -181,6 +186,7 @@
kCalleeSaveMethod,
kRefsOnlySaveMethod,
kRefsAndArgsSaveMethod,
+ kSaveEverythingMethod,
kImageMethodsCount, // Number of elements in enum.
};
@@ -273,7 +279,9 @@
// Visit ArtMethods in the section starting at base. Includes runtime methods.
// TODO: Delete base parameter if it is always equal to GetImageBegin.
- void VisitPackedArtMethods(ArtMethodVisitor* visitor, uint8_t* base, size_t pointer_size) const;
+ void VisitPackedArtMethods(ArtMethodVisitor* visitor,
+ uint8_t* base,
+ PointerSize pointer_size) const;
// Visit ArtMethods in the section starting at base.
// TODO: Delete base parameter if it is always equal to GetImageBegin.
@@ -282,12 +290,12 @@
template <typename Visitor>
void VisitPackedImTables(const Visitor& visitor,
uint8_t* base,
- size_t pointer_size) const;
+ PointerSize pointer_size) const;
template <typename Visitor>
void VisitPackedImtConflictTables(const Visitor& visitor,
uint8_t* base,
- size_t pointer_size) const;
+ PointerSize pointer_size) const;
private:
static const uint8_t kImageMagic[4];
diff --git a/runtime/imtable.h b/runtime/imtable.h
index 51faf70..2416621 100644
--- a/runtime/imtable.h
+++ b/runtime/imtable.h
@@ -32,10 +32,10 @@
// (non-marker) interfaces.
static constexpr size_t kSize = IMT_SIZE;
- ArtMethod* Get(size_t index, size_t pointer_size) {
+ ArtMethod* Get(size_t index, PointerSize pointer_size) {
DCHECK_LT(index, kSize);
uint8_t* ptr = reinterpret_cast<uint8_t*>(this) + OffsetOfElement(index, pointer_size);
- if (pointer_size == 4) {
+ if (pointer_size == PointerSize::k32) {
uint32_t value = *reinterpret_cast<uint32_t*>(ptr);
return reinterpret_cast<ArtMethod*>(value);
} else {
@@ -44,10 +44,10 @@
}
}
- void Set(size_t index, ArtMethod* method, size_t pointer_size) {
+ void Set(size_t index, ArtMethod* method, PointerSize pointer_size) {
DCHECK_LT(index, kSize);
uint8_t* ptr = reinterpret_cast<uint8_t*>(this) + OffsetOfElement(index, pointer_size);
- if (pointer_size == 4) {
+ if (pointer_size == PointerSize::k32) {
uintptr_t value = reinterpret_cast<uintptr_t>(method);
DCHECK_EQ(static_cast<uint32_t>(value), value); // Check that we dont lose any non 0 bits.
*reinterpret_cast<uint32_t*>(ptr) = static_cast<uint32_t>(value);
@@ -56,18 +56,18 @@
}
}
- static size_t OffsetOfElement(size_t index, size_t pointer_size) {
- return index * pointer_size;
+ static size_t OffsetOfElement(size_t index, PointerSize pointer_size) {
+ return index * static_cast<size_t>(pointer_size);
}
- void Populate(ArtMethod** data, size_t pointer_size) {
+ void Populate(ArtMethod** data, PointerSize pointer_size) {
for (size_t i = 0; i < kSize; ++i) {
Set(i, data[i], pointer_size);
}
}
- constexpr static size_t SizeInBytes(size_t pointer_size) {
- return kSize * pointer_size;
+ constexpr static size_t SizeInBytes(PointerSize pointer_size) {
+ return kSize * static_cast<size_t>(pointer_size);
}
};
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 7dfc83f..61ffe44 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -94,7 +94,7 @@
// We need the class to be resolved to install/uninstall stubs. Otherwise its methods
// could not be initialized or linked with regards to class inheritance.
} else {
- for (ArtMethod& method : klass->GetMethods(sizeof(void*))) {
+ for (ArtMethod& method : klass->GetMethods(kRuntimePointerSize)) {
InstallStubsForMethod(&method);
}
}
@@ -886,7 +886,7 @@
ConfigureStubs(key, InstrumentationLevel::kInstrumentNothing);
}
-const void* Instrumentation::GetQuickCodeFor(ArtMethod* method, size_t pointer_size) const {
+const void* Instrumentation::GetQuickCodeFor(ArtMethod* method, PointerSize pointer_size) const {
Runtime* runtime = Runtime::Current();
if (LIKELY(!instrumentation_stubs_installed_)) {
const void* code = method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size);
@@ -1063,7 +1063,7 @@
ArtMethod* method = instrumentation_frame.method_;
uint32_t length;
- const size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ const PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
char return_shorty = method->GetInterfaceMethodIfProxy(pointer_size)->GetShorty(&length)[0];
JValue return_value;
if (return_shorty == 'V') {
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 49dd060..757be8e 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -22,6 +22,7 @@
#include <unordered_set>
#include "arch/instruction_set.h"
+#include "base/enums.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "gc_root.h"
@@ -234,7 +235,7 @@
// Get the quick code for the given method. More efficient than asking the class linker as it
// will short-cut to GetCode if instrumentation and static method resolution stubs aren't
// installed.
- const void* GetQuickCodeFor(ArtMethod* method, size_t pointer_size) const
+ const void* GetQuickCodeFor(ArtMethod* method, PointerSize pointer_size) const
SHARED_REQUIRES(Locks::mutator_lock_);
void ForceInterpretOnly() {
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index 56e3bc5..684c471 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -16,6 +16,7 @@
#include "instrumentation.h"
+#include "base/enums.h"
#include "common_runtime_test.h"
#include "common_throws.h"
#include "class_linker-inl.h"
@@ -461,7 +462,7 @@
mirror::Class* klass = class_linker->FindClass(soa.Self(), "LInstrumentation;", loader);
ASSERT_TRUE(klass != nullptr);
ArtMethod* method_to_deoptimize = klass->FindDeclaredDirectMethod("instanceMethod", "()V",
- sizeof(void*));
+ kRuntimePointerSize);
ASSERT_TRUE(method_to_deoptimize != nullptr);
EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
@@ -508,7 +509,7 @@
mirror::Class* klass = class_linker->FindClass(soa.Self(), "LInstrumentation;", loader);
ASSERT_TRUE(klass != nullptr);
ArtMethod* method_to_deoptimize = klass->FindDeclaredDirectMethod("instanceMethod", "()V",
- sizeof(void*));
+ kRuntimePointerSize);
ASSERT_TRUE(method_to_deoptimize != nullptr);
EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 53d5e43..11b7ef4 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -18,6 +18,7 @@
#include <cmath>
+#include "base/enums.h"
#include "debugger.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "jit/jit.h"
@@ -537,7 +538,7 @@
}
method->Invoke(self, shadow_frame->GetVRegArgs(arg_offset),
(shadow_frame->NumberOfVRegs() - arg_offset) * sizeof(uint32_t),
- result, method->GetInterfaceMethodIfProxy(sizeof(void*))->GetShorty());
+ result, method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty());
}
void SetStringInitValueToAllAliases(ShadowFrame* shadow_frame,
@@ -656,7 +657,8 @@
// As a special case for proxy methods, which are not dex-backed,
// we have to retrieve type information from the proxy's method
// interface method instead (which is dex backed since proxies are never interfaces).
- ArtMethod* method = new_shadow_frame->GetMethod()->GetInterfaceMethodIfProxy(sizeof(void*));
+ ArtMethod* method =
+ new_shadow_frame->GetMethod()->GetInterfaceMethodIfProxy(kRuntimePointerSize);
// We need to do runtime check on reference assignment. We need to load the shorty
// to get the exact type of each reference argument.
@@ -686,7 +688,7 @@
case 'L': {
Object* o = shadow_frame.GetVRegReference(src_reg);
if (do_assignability_check && o != nullptr) {
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
Class* arg_type =
method->GetClassFromTypeIndex(
params->GetTypeItem(shorty_pos).type_idx_, true /* resolve */, pointer_size);
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 7dfa6e2..174d4e0 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -26,6 +26,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "base/macros.h"
#include "class_linker-inl.h"
@@ -681,7 +682,7 @@
const uint32_t vtable_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
CHECK(receiver->GetClass()->ShouldHaveEmbeddedVTable());
ArtMethod* const called_method = receiver->GetClass()->GetEmbeddedVTableEntry(
- vtable_idx, sizeof(void*));
+ vtable_idx, kRuntimePointerSize);
if (UNLIKELY(called_method == nullptr)) {
CHECK(self->IsExceptionPending());
result->SetJ(0);
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index f03036b..3b6e015 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -163,14 +163,14 @@
static const void* const handlersTable[instrumentation::kNumHandlerTables][kNumPackedOpcodes] = {
{
// Main handler table.
-#define INSTRUCTION_HANDLER(o, code, n, f, r, i, a, v) &&op_##code,
+#define INSTRUCTION_HANDLER(o, code, n, f, i, a, v) &&op_##code,
#include "dex_instruction_list.h"
DEX_INSTRUCTION_LIST(INSTRUCTION_HANDLER)
#undef DEX_INSTRUCTION_LIST
#undef INSTRUCTION_HANDLER
}, {
// Alternative handler table.
-#define INSTRUCTION_HANDLER(o, code, n, f, r, i, a, v) &&alt_op_##code,
+#define INSTRUCTION_HANDLER(o, code, n, f, i, a, v) &&alt_op_##code,
#include "dex_instruction_list.h"
DEX_INSTRUCTION_LIST(INSTRUCTION_HANDLER)
#undef DEX_INSTRUCTION_LIST
@@ -2597,7 +2597,7 @@
// Note: we do not use the kReturn instruction flag here (to test the instruction is a return). The
// compiler seems to not evaluate "(Instruction::FlagsOf(Instruction::code) & kReturn) != 0" to
// a constant condition that would remove the "if" statement so the test is free.
-#define INSTRUMENTATION_INSTRUCTION_HANDLER(o, code, n, f, r, i, a, v) \
+#define INSTRUMENTATION_INSTRUCTION_HANDLER(o, code, n, f, i, a, v) \
alt_op_##code: { \
if (UNLIKELY(instrumentation->HasDexPcListeners())) { \
Object* this_object = shadow_frame.GetThisObject(code_item->ins_size_); \
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 18330ba..8bfc10c 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "base/enums.h"
#include "base/stl_util.h" // MakeUnique
#include "experimental_flags.h"
#include "interpreter_common.h"
@@ -283,7 +284,7 @@
const size_t ref_idx = inst->VRegA_11x(inst_data);
Object* obj_result = shadow_frame.GetVRegReference(ref_idx);
if (do_assignability_check && obj_result != nullptr) {
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
Class* return_type = shadow_frame.GetMethod()->GetReturnType(true /* resolve */,
pointer_size);
// Re-load since it might have moved.
diff --git a/runtime/interpreter/mterp/arm64/binopLit8.S b/runtime/interpreter/mterp/arm64/binopLit8.S
index 326c657..0b7c68a 100644
--- a/runtime/interpreter/mterp/arm64/binopLit8.S
+++ b/runtime/interpreter/mterp/arm64/binopLit8.S
@@ -13,7 +13,7 @@
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
/* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC
+ FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
lsr w9, wINST, #8 // w9<- AA
and w2, w3, #255 // w2<- BB
GET_VREG w0, w2 // w0<- vBB
diff --git a/runtime/interpreter/mterp/arm64/header.S b/runtime/interpreter/mterp/arm64/header.S
index 4257200..c791eb5 100644
--- a/runtime/interpreter/mterp/arm64/header.S
+++ b/runtime/interpreter/mterp/arm64/header.S
@@ -272,6 +272,14 @@
.endm
/*
+ * Get the 32-bit value from a Dalvik register and sign-extend to 64-bit.
+ * Used to avoid an extra instruction in int-to-long.
+ */
+.macro GET_VREG_S reg, vreg
+ ldrsw \reg, [xFP, \vreg, uxtw #2]
+.endm
+
+/*
* Convert a virtual register index into an address.
*/
.macro VREG_INDEX_TO_ADDR reg, vreg
diff --git a/runtime/interpreter/mterp/arm64/op_const_16.S b/runtime/interpreter/mterp/arm64/op_const_16.S
index 27f5273..f0e8192 100644
--- a/runtime/interpreter/mterp/arm64/op_const_16.S
+++ b/runtime/interpreter/mterp/arm64/op_const_16.S
@@ -1,5 +1,5 @@
/* const/16 vAA, #+BBBB */
- FETCH_S w0, 1 // w0<- ssssBBBB (sign-extended
+ FETCH_S w0, 1 // w0<- ssssBBBB (sign-extended)
lsr w3, wINST, #8 // w3<- AA
FETCH_ADVANCE_INST 2 // advance xPC, load wINST
SET_VREG w0, w3 // vAA<- w0
diff --git a/runtime/interpreter/mterp/arm64/op_const_4.S b/runtime/interpreter/mterp/arm64/op_const_4.S
index 04cd4f8..9a36115 100644
--- a/runtime/interpreter/mterp/arm64/op_const_4.S
+++ b/runtime/interpreter/mterp/arm64/op_const_4.S
@@ -1,8 +1,7 @@
/* const/4 vA, #+B */
- lsl w1, wINST, #16 // w1<- Bxxx0000
+ sbfx w1, wINST, #12, #4 // w1<- sssssssB
ubfx w0, wINST, #8, #4 // w0<- A
FETCH_ADVANCE_INST 1 // advance xPC, load wINST
- asr w1, w1, #28 // w1<- sssssssB (sign-extended)
GET_INST_OPCODE ip // ip<- opcode from xINST
SET_VREG w1, w0 // fp[A]<- w1
GOTO_OPCODE ip // execute next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_const_high16.S b/runtime/interpreter/mterp/arm64/op_const_high16.S
index dd51ce1..3a9edff 100644
--- a/runtime/interpreter/mterp/arm64/op_const_high16.S
+++ b/runtime/interpreter/mterp/arm64/op_const_high16.S
@@ -1,5 +1,5 @@
/* const/high16 vAA, #+BBBB0000 */
- FETCH w0, 1 // r0<- 0000BBBB (zero-extended
+ FETCH w0, 1 // r0<- 0000BBBB (zero-extended)
lsr w3, wINST, #8 // r3<- AA
lsl w0, w0, #16 // r0<- BBBB0000
FETCH_ADVANCE_INST 2 // advance rPC, load rINST
diff --git a/runtime/interpreter/mterp/arm64/op_if_eqz.S b/runtime/interpreter/mterp/arm64/op_if_eqz.S
index 1d3202e1..47c1dee 100644
--- a/runtime/interpreter/mterp/arm64/op_if_eqz.S
+++ b/runtime/interpreter/mterp/arm64/op_if_eqz.S
@@ -1 +1 @@
-%include "arm64/zcmp.S" { "condition":"eq" }
+%include "arm64/zcmp.S" { "compare":"0", "branch":"cbz w2," }
diff --git a/runtime/interpreter/mterp/arm64/op_if_gez.S b/runtime/interpreter/mterp/arm64/op_if_gez.S
index 8e3abd3..087e094 100644
--- a/runtime/interpreter/mterp/arm64/op_if_gez.S
+++ b/runtime/interpreter/mterp/arm64/op_if_gez.S
@@ -1 +1 @@
-%include "arm64/zcmp.S" { "condition":"ge" }
+%include "arm64/zcmp.S" { "compare":"0", "branch":"tbz w2, #31," }
diff --git a/runtime/interpreter/mterp/arm64/op_if_gtz.S b/runtime/interpreter/mterp/arm64/op_if_gtz.S
index a4f2f6b..476b265 100644
--- a/runtime/interpreter/mterp/arm64/op_if_gtz.S
+++ b/runtime/interpreter/mterp/arm64/op_if_gtz.S
@@ -1 +1 @@
-%include "arm64/zcmp.S" { "condition":"gt" }
+%include "arm64/zcmp.S" { "branch":"b.gt" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_lez.S b/runtime/interpreter/mterp/arm64/op_if_lez.S
index c1425fdd..2717a60 100644
--- a/runtime/interpreter/mterp/arm64/op_if_lez.S
+++ b/runtime/interpreter/mterp/arm64/op_if_lez.S
@@ -1 +1 @@
-%include "arm64/zcmp.S" { "condition":"le" }
+%include "arm64/zcmp.S" { "branch":"b.le" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_ltz.S b/runtime/interpreter/mterp/arm64/op_if_ltz.S
index 03cd3d6..86089c1 100644
--- a/runtime/interpreter/mterp/arm64/op_if_ltz.S
+++ b/runtime/interpreter/mterp/arm64/op_if_ltz.S
@@ -1 +1 @@
-%include "arm64/zcmp.S" { "condition":"lt" }
+%include "arm64/zcmp.S" { "compare":"0", "branch":"tbnz w2, #31," }
diff --git a/runtime/interpreter/mterp/arm64/op_if_nez.S b/runtime/interpreter/mterp/arm64/op_if_nez.S
index 21e1bc2..efacc88 100644
--- a/runtime/interpreter/mterp/arm64/op_if_nez.S
+++ b/runtime/interpreter/mterp/arm64/op_if_nez.S
@@ -1 +1 @@
-%include "arm64/zcmp.S" { "condition":"ne" }
+%include "arm64/zcmp.S" { "compare":"0", "branch":"cbnz w2," }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_wide_quick.S b/runtime/interpreter/mterp/arm64/op_iget_wide_quick.S
index 30b30c2..e9388e4 100644
--- a/runtime/interpreter/mterp/arm64/op_iget_wide_quick.S
+++ b/runtime/interpreter/mterp/arm64/op_iget_wide_quick.S
@@ -4,8 +4,7 @@
GET_VREG w3, w2 // w3<- object we're operating on
ubfx w2, wINST, #8, #4 // w2<- A
cbz w3, common_errNullObject // object was null
- add x4, x3, x4 // create direct pointer
- ldr x0, [x4]
+ ldr x0, [x3, x4] // x0<- obj.field
FETCH_ADVANCE_INST 2 // advance rPC, load wINST
SET_VREG_WIDE x0, w2
GET_INST_OPCODE ip // extract opcode from wINST
diff --git a/runtime/interpreter/mterp/arm64/op_int_to_long.S b/runtime/interpreter/mterp/arm64/op_int_to_long.S
index 35830f3..45e3112 100644
--- a/runtime/interpreter/mterp/arm64/op_int_to_long.S
+++ b/runtime/interpreter/mterp/arm64/op_int_to_long.S
@@ -1 +1,8 @@
-%include "arm64/funopWider.S" {"instr":"sxtw x0, w0", "srcreg":"w0", "tgtreg":"x0"}
+ /* int-to-long vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w4, wINST, #8, #4 // w4<- A
+ GET_VREG_S x0, w3 // x0<- sign_extend(fp[B])
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE x0, w4 // fp[A]<- x0
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_iput_quick.S b/runtime/interpreter/mterp/arm64/op_iput_quick.S
index 2afc51b..e95da76 100644
--- a/runtime/interpreter/mterp/arm64/op_iput_quick.S
+++ b/runtime/interpreter/mterp/arm64/op_iput_quick.S
@@ -5,7 +5,6 @@
FETCH w1, 1 // w1<- field byte offset
GET_VREG w3, w2 // w3<- fp[B], the object pointer
ubfx w2, wINST, #8, #4 // w2<- A
- cmp w3, #0 // check object for null
cbz w3, common_errNullObject // object was null
GET_VREG w0, w2 // w0<- fp[A]
FETCH_ADVANCE_INST 2 // advance rPC, load rINST
diff --git a/runtime/interpreter/mterp/arm64/op_iput_wide_quick.S b/runtime/interpreter/mterp/arm64/op_iput_wide_quick.S
index 566e2bf..6cec363 100644
--- a/runtime/interpreter/mterp/arm64/op_iput_wide_quick.S
+++ b/runtime/interpreter/mterp/arm64/op_iput_wide_quick.S
@@ -6,7 +6,6 @@
cbz w2, common_errNullObject // object was null
GET_VREG_WIDE x0, w0 // x0-< fp[A]
FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- add x1, x2, x3 // create a direct pointer
- str x0, [x1]
+ str x0, [x2, x3] // obj.field<- x0
GET_INST_OPCODE ip // extract opcode from wINST
GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_long_to_int.S b/runtime/interpreter/mterp/arm64/op_long_to_int.S
index 360a69b..73f58d8 100644
--- a/runtime/interpreter/mterp/arm64/op_long_to_int.S
+++ b/runtime/interpreter/mterp/arm64/op_long_to_int.S
@@ -1 +1,2 @@
-%include "arm64/funopNarrower.S" {"instr":"", "srcreg":"x0", "tgtreg":"w0"}
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+%include "arm64/op_move.S"
diff --git a/runtime/interpreter/mterp/arm64/op_neg_double.S b/runtime/interpreter/mterp/arm64/op_neg_double.S
index e9064c4..d77859d 100644
--- a/runtime/interpreter/mterp/arm64/op_neg_double.S
+++ b/runtime/interpreter/mterp/arm64/op_neg_double.S
@@ -1 +1 @@
-%include "arm64/unopWide.S" {"preinstr":"mov x1, #0x8000000000000000", "instr":"add x0, x0, x1"}
+%include "arm64/unopWide.S" {"instr":"eor x0, x0, #0x8000000000000000"}
diff --git a/runtime/interpreter/mterp/arm64/op_neg_float.S b/runtime/interpreter/mterp/arm64/op_neg_float.S
index 49d51af..6652aec 100644
--- a/runtime/interpreter/mterp/arm64/op_neg_float.S
+++ b/runtime/interpreter/mterp/arm64/op_neg_float.S
@@ -1 +1 @@
-%include "arm64/unop.S" {"preinstr":"mov w4, #0x80000000", "instr":"add w0, w0, w4"}
+%include "arm64/unop.S" {"instr":"eor w0, w0, #0x80000000"}
diff --git a/runtime/interpreter/mterp/arm64/unop.S b/runtime/interpreter/mterp/arm64/unop.S
index 474a961..e681968 100644
--- a/runtime/interpreter/mterp/arm64/unop.S
+++ b/runtime/interpreter/mterp/arm64/unop.S
@@ -1,4 +1,3 @@
-%default {"preinstr":""}
/*
* Generic 32-bit unary operation. Provide an "instr" line that
* specifies an instruction that performs "result = op w0".
@@ -11,7 +10,6 @@
lsr w3, wINST, #12 // w3<- B
GET_VREG w0, w3 // w0<- vB
ubfx w9, wINST, #8, #4 // w9<- A
- $preinstr // optional op; may set condition codes
FETCH_ADVANCE_INST 1 // advance rPC, load rINST
$instr // w0<- op, w0-w3 changed
GET_INST_OPCODE ip // extract opcode from rINST
diff --git a/runtime/interpreter/mterp/arm64/unopWide.S b/runtime/interpreter/mterp/arm64/unopWide.S
index 109302a..6ee4f92 100644
--- a/runtime/interpreter/mterp/arm64/unopWide.S
+++ b/runtime/interpreter/mterp/arm64/unopWide.S
@@ -1,4 +1,4 @@
-%default {"instr":"sub x0, xzr, x0", "preinstr":""}
+%default {"instr":"sub x0, xzr, x0"}
/*
* Generic 64-bit unary operation. Provide an "instr" line that
* specifies an instruction that performs "result = op x0".
@@ -10,7 +10,6 @@
ubfx w4, wINST, #8, #4 // w4<- A
GET_VREG_WIDE x0, w3
FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- $preinstr
$instr
GET_INST_OPCODE ip // extract opcode from wINST
SET_VREG_WIDE x0, w4
diff --git a/runtime/interpreter/mterp/arm64/zcmp.S b/runtime/interpreter/mterp/arm64/zcmp.S
index b303e6a..510a3c1 100644
--- a/runtime/interpreter/mterp/arm64/zcmp.S
+++ b/runtime/interpreter/mterp/arm64/zcmp.S
@@ -1,3 +1,4 @@
+%default { "compare":"1" }
/*
* Generic one-operand compare-and-branch operation. Provide a "condition"
* fragment that specifies the comparison to perform.
@@ -8,8 +9,10 @@
lsr w0, wINST, #8 // w0<- AA
GET_VREG w2, w0 // w2<- vAA
FETCH_S wINST, 1 // w1<- branch offset, in code units
+ .if ${compare}
cmp w2, #0 // compare (vA, 0)
- b.${condition} MterpCommonTakenBranchNoFlags
+ .endif
+ ${branch} MterpCommonTakenBranchNoFlags
cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
b.eq .L_check_not_taken_osr
FETCH_ADVANCE_INST 2
diff --git a/runtime/interpreter/mterp/config_arm64 b/runtime/interpreter/mterp/config_arm64
index 57206d2..6427ead 100644
--- a/runtime/interpreter/mterp/config_arm64
+++ b/runtime/interpreter/mterp/config_arm64
@@ -20,9 +20,6 @@
handler-style computed-goto
handler-size 128
-# source for alternate entry stub
-asm-alt-stub arm64/alt_stub.S
-
# file header and basic definitions
import arm64/header.S
@@ -295,5 +292,12 @@
# op op_unused_ff FALLBACK
op-end
-# common subroutines for asm
+# common subroutines for asm; we emit the footer before alternate
+# entry stubs, so that TBZ/TBNZ from ops can reach targets in footer
import arm64/footer.S
+
+# source for alternate entry stub
+asm-alt-stub arm64/alt_stub.S
+
+# emit alternate entry stubs
+alt-ops
diff --git a/runtime/interpreter/mterp/out/mterp_arm64.S b/runtime/interpreter/mterp/out/mterp_arm64.S
index e318782..de37e07 100644
--- a/runtime/interpreter/mterp/out/mterp_arm64.S
+++ b/runtime/interpreter/mterp/out/mterp_arm64.S
@@ -279,6 +279,14 @@
.endm
/*
+ * Get the 32-bit value from a Dalvik register and sign-extend to 64-bit.
+ * Used to avoid an extra instruction in int-to-long.
+ */
+.macro GET_VREG_S reg, vreg
+ ldrsw \reg, [xFP, \vreg, uxtw #2]
+.endm
+
+/*
* Convert a virtual register index into an address.
*/
.macro VREG_INDEX_TO_ADDR reg, vreg
@@ -695,10 +703,9 @@
.L_op_const_4: /* 0x12 */
/* File: arm64/op_const_4.S */
/* const/4 vA, #+B */
- lsl w1, wINST, #16 // w1<- Bxxx0000
+ sbfx w1, wINST, #12, #4 // w1<- sssssssB
ubfx w0, wINST, #8, #4 // w0<- A
FETCH_ADVANCE_INST 1 // advance xPC, load wINST
- asr w1, w1, #28 // w1<- sssssssB (sign-extended)
GET_INST_OPCODE ip // ip<- opcode from xINST
SET_VREG w1, w0 // fp[A]<- w1
GOTO_OPCODE ip // execute next instruction
@@ -708,7 +715,7 @@
.L_op_const_16: /* 0x13 */
/* File: arm64/op_const_16.S */
/* const/16 vAA, #+BBBB */
- FETCH_S w0, 1 // w0<- ssssBBBB (sign-extended
+ FETCH_S w0, 1 // w0<- ssssBBBB (sign-extended)
lsr w3, wINST, #8 // w3<- AA
FETCH_ADVANCE_INST 2 // advance xPC, load wINST
SET_VREG w0, w3 // vAA<- w0
@@ -734,7 +741,7 @@
.L_op_const_high16: /* 0x15 */
/* File: arm64/op_const_high16.S */
/* const/high16 vAA, #+BBBB0000 */
- FETCH w0, 1 // r0<- 0000BBBB (zero-extended
+ FETCH w0, 1 // r0<- 0000BBBB (zero-extended)
lsr w3, wINST, #8 // r3<- AA
lsl w0, w0, #16 // r0<- BBBB0000
FETCH_ADVANCE_INST 2 // advance rPC, load rINST
@@ -1465,8 +1472,10 @@
lsr w0, wINST, #8 // w0<- AA
GET_VREG w2, w0 // w2<- vAA
FETCH_S wINST, 1 // w1<- branch offset, in code units
+ .if 0
cmp w2, #0 // compare (vA, 0)
- b.eq MterpCommonTakenBranchNoFlags
+ .endif
+ cbz w2, MterpCommonTakenBranchNoFlags
cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
b.eq .L_check_not_taken_osr
FETCH_ADVANCE_INST 2
@@ -1489,8 +1498,10 @@
lsr w0, wINST, #8 // w0<- AA
GET_VREG w2, w0 // w2<- vAA
FETCH_S wINST, 1 // w1<- branch offset, in code units
+ .if 0
cmp w2, #0 // compare (vA, 0)
- b.ne MterpCommonTakenBranchNoFlags
+ .endif
+ cbnz w2, MterpCommonTakenBranchNoFlags
cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
b.eq .L_check_not_taken_osr
FETCH_ADVANCE_INST 2
@@ -1513,8 +1524,10 @@
lsr w0, wINST, #8 // w0<- AA
GET_VREG w2, w0 // w2<- vAA
FETCH_S wINST, 1 // w1<- branch offset, in code units
+ .if 0
cmp w2, #0 // compare (vA, 0)
- b.lt MterpCommonTakenBranchNoFlags
+ .endif
+ tbnz w2, #31, MterpCommonTakenBranchNoFlags
cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
b.eq .L_check_not_taken_osr
FETCH_ADVANCE_INST 2
@@ -1537,8 +1550,10 @@
lsr w0, wINST, #8 // w0<- AA
GET_VREG w2, w0 // w2<- vAA
FETCH_S wINST, 1 // w1<- branch offset, in code units
+ .if 0
cmp w2, #0 // compare (vA, 0)
- b.ge MterpCommonTakenBranchNoFlags
+ .endif
+ tbz w2, #31, MterpCommonTakenBranchNoFlags
cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
b.eq .L_check_not_taken_osr
FETCH_ADVANCE_INST 2
@@ -1561,7 +1576,9 @@
lsr w0, wINST, #8 // w0<- AA
GET_VREG w2, w0 // w2<- vAA
FETCH_S wINST, 1 // w1<- branch offset, in code units
+ .if 1
cmp w2, #0 // compare (vA, 0)
+ .endif
b.gt MterpCommonTakenBranchNoFlags
cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
b.eq .L_check_not_taken_osr
@@ -1585,7 +1602,9 @@
lsr w0, wINST, #8 // w0<- AA
GET_VREG w2, w0 // w2<- vAA
FETCH_S wINST, 1 // w1<- branch offset, in code units
+ .if 1
cmp w2, #0 // compare (vA, 0)
+ .endif
b.le MterpCommonTakenBranchNoFlags
cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
b.eq .L_check_not_taken_osr
@@ -3192,7 +3211,6 @@
lsr w3, wINST, #12 // w3<- B
GET_VREG w0, w3 // w0<- vB
ubfx w9, wINST, #8, #4 // w9<- A
- // optional op; may set condition codes
FETCH_ADVANCE_INST 1 // advance rPC, load rINST
sub w0, wzr, w0 // w0<- op, w0-w3 changed
GET_INST_OPCODE ip // extract opcode from rINST
@@ -3218,7 +3236,6 @@
lsr w3, wINST, #12 // w3<- B
GET_VREG w0, w3 // w0<- vB
ubfx w9, wINST, #8, #4 // w9<- A
- // optional op; may set condition codes
FETCH_ADVANCE_INST 1 // advance rPC, load rINST
mvn w0, w0 // w0<- op, w0-w3 changed
GET_INST_OPCODE ip // extract opcode from rINST
@@ -3243,7 +3260,6 @@
ubfx w4, wINST, #8, #4 // w4<- A
GET_VREG_WIDE x0, w3
FETCH_ADVANCE_INST 1 // advance rPC, load wINST
-
sub x0, xzr, x0
GET_INST_OPCODE ip // extract opcode from wINST
SET_VREG_WIDE x0, w4
@@ -3267,7 +3283,6 @@
ubfx w4, wINST, #8, #4 // w4<- A
GET_VREG_WIDE x0, w3
FETCH_ADVANCE_INST 1 // advance rPC, load wINST
-
mvn x0, x0
GET_INST_OPCODE ip // extract opcode from wINST
SET_VREG_WIDE x0, w4
@@ -3292,9 +3307,8 @@
lsr w3, wINST, #12 // w3<- B
GET_VREG w0, w3 // w0<- vB
ubfx w9, wINST, #8, #4 // w9<- A
- mov w4, #0x80000000 // optional op; may set condition codes
FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- add w0, w0, w4 // w0<- op, w0-w3 changed
+ eor w0, w0, #0x80000000 // w0<- op, w0-w3 changed
GET_INST_OPCODE ip // extract opcode from rINST
SET_VREG w0, w9 // vAA<- w0
GOTO_OPCODE ip // jump to next instruction
@@ -3317,8 +3331,7 @@
ubfx w4, wINST, #8, #4 // w4<- A
GET_VREG_WIDE x0, w3
FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- mov x1, #0x8000000000000000
- add x0, x0, x1
+ eor x0, x0, #0x8000000000000000
GET_INST_OPCODE ip // extract opcode from wINST
SET_VREG_WIDE x0, w4
GOTO_OPCODE ip // jump to next instruction
@@ -3329,24 +3342,15 @@
.balign 128
.L_op_int_to_long: /* 0x81 */
/* File: arm64/op_int_to_long.S */
-/* File: arm64/funopWider.S */
- /*
- * Generic 32bit-to-64bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "x0 = op w0".
- *
- * For: int-to-double, float-to-double, float-to-long
- */
- /* unop vA, vB */
+ /* int-to-long vA, vB */
lsr w3, wINST, #12 // w3<- B
ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG w0, w3
+ GET_VREG_S x0, w3 // x0<- sign_extend(fp[B])
FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- sxtw x0, w0 // d0<- op
GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x0, w4 // vA<- d0
+ SET_VREG_WIDE x0, w4 // fp[A]<- x0
GOTO_OPCODE ip // jump to next instruction
-
/* ------------------------------ */
.balign 128
.L_op_int_to_float: /* 0x82 */
@@ -3396,22 +3400,21 @@
.balign 128
.L_op_long_to_int: /* 0x84 */
/* File: arm64/op_long_to_int.S */
-/* File: arm64/funopNarrower.S */
- /*
- * Generic 64bit-to-32bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "w0 = op x0".
- *
- * For: int-to-double, float-to-double, float-to-long
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG_WIDE x0, w3
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+/* File: arm64/op_move.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ lsr w1, wINST, #12 // x1<- B from 15:12
+ ubfx w0, wINST, #8, #4 // x0<- A from 11:8
FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- // d0<- op
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG w0, w4 // vA<- d0
- GOTO_OPCODE ip // jump to next instruction
+ GET_VREG w2, w1 // x2<- fp[B]
+ GET_INST_OPCODE ip // ip<- opcode from wINST
+ .if 0
+ SET_VREG_OBJECT w2, w0 // fp[A]<- x2
+ .else
+ SET_VREG w2, w0 // fp[A]<- x2
+ .endif
+ GOTO_OPCODE ip // execute next instruction
/* ------------------------------ */
@@ -3608,7 +3611,6 @@
lsr w3, wINST, #12 // w3<- B
GET_VREG w0, w3 // w0<- vB
ubfx w9, wINST, #8, #4 // w9<- A
- // optional op; may set condition codes
FETCH_ADVANCE_INST 1 // advance rPC, load rINST
sxtb w0, w0 // w0<- op, w0-w3 changed
GET_INST_OPCODE ip // extract opcode from rINST
@@ -3634,7 +3636,6 @@
lsr w3, wINST, #12 // w3<- B
GET_VREG w0, w3 // w0<- vB
ubfx w9, wINST, #8, #4 // w9<- A
- // optional op; may set condition codes
FETCH_ADVANCE_INST 1 // advance rPC, load rINST
uxth w0, w0 // w0<- op, w0-w3 changed
GET_INST_OPCODE ip // extract opcode from rINST
@@ -3660,7 +3661,6 @@
lsr w3, wINST, #12 // w3<- B
GET_VREG w0, w3 // w0<- vB
ubfx w9, wINST, #8, #4 // w9<- A
- // optional op; may set condition codes
FETCH_ADVANCE_INST 1 // advance rPC, load rINST
sxth w0, w0 // w0<- op, w0-w3 changed
GET_INST_OPCODE ip // extract opcode from rINST
@@ -6052,7 +6052,7 @@
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
/* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC
+ FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
lsr w9, wINST, #8 // w9<- AA
and w2, w3, #255 // w2<- BB
GET_VREG w0, w2 // w0<- vBB
@@ -6088,7 +6088,7 @@
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
/* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC
+ FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
lsr w9, wINST, #8 // w9<- AA
and w2, w3, #255 // w2<- BB
GET_VREG w0, w2 // w0<- vBB
@@ -6125,7 +6125,7 @@
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
/* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC
+ FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
lsr w9, wINST, #8 // w9<- AA
and w2, w3, #255 // w2<- BB
GET_VREG w0, w2 // w0<- vBB
@@ -6161,7 +6161,7 @@
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
/* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC
+ FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
lsr w9, wINST, #8 // w9<- AA
and w2, w3, #255 // w2<- BB
GET_VREG w0, w2 // w0<- vBB
@@ -6197,7 +6197,7 @@
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
/* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC
+ FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
lsr w9, wINST, #8 // w9<- AA
and w2, w3, #255 // w2<- BB
GET_VREG w0, w2 // w0<- vBB
@@ -6233,7 +6233,7 @@
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
/* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC
+ FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
lsr w9, wINST, #8 // w9<- AA
and w2, w3, #255 // w2<- BB
GET_VREG w0, w2 // w0<- vBB
@@ -6269,7 +6269,7 @@
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
/* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC
+ FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
lsr w9, wINST, #8 // w9<- AA
and w2, w3, #255 // w2<- BB
GET_VREG w0, w2 // w0<- vBB
@@ -6305,7 +6305,7 @@
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
/* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC
+ FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
lsr w9, wINST, #8 // w9<- AA
and w2, w3, #255 // w2<- BB
GET_VREG w0, w2 // w0<- vBB
@@ -6341,7 +6341,7 @@
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
/* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC
+ FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
lsr w9, wINST, #8 // w9<- AA
and w2, w3, #255 // w2<- BB
GET_VREG w0, w2 // w0<- vBB
@@ -6377,7 +6377,7 @@
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
/* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC
+ FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
lsr w9, wINST, #8 // w9<- AA
and w2, w3, #255 // w2<- BB
GET_VREG w0, w2 // w0<- vBB
@@ -6413,7 +6413,7 @@
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
/* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC
+ FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
lsr w9, wINST, #8 // w9<- AA
and w2, w3, #255 // w2<- BB
GET_VREG w0, w2 // w0<- vBB
@@ -6458,8 +6458,7 @@
GET_VREG w3, w2 // w3<- object we're operating on
ubfx w2, wINST, #8, #4 // w2<- A
cbz w3, common_errNullObject // object was null
- add x4, x3, x4 // create direct pointer
- ldr x0, [x4]
+ ldr x0, [x3, x4] // x0<- obj.field
FETCH_ADVANCE_INST 2 // advance rPC, load wINST
SET_VREG_WIDE x0, w2
GET_INST_OPCODE ip // extract opcode from wINST
@@ -6495,7 +6494,6 @@
FETCH w1, 1 // w1<- field byte offset
GET_VREG w3, w2 // w3<- fp[B], the object pointer
ubfx w2, wINST, #8, #4 // w2<- A
- cmp w3, #0 // check object for null
cbz w3, common_errNullObject // object was null
GET_VREG w0, w2 // w0<- fp[A]
FETCH_ADVANCE_INST 2 // advance rPC, load rINST
@@ -6515,8 +6513,7 @@
cbz w2, common_errNullObject // object was null
GET_VREG_WIDE x0, w0 // x0-< fp[A]
FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- add x1, x2, x3 // create a direct pointer
- str x0, [x1]
+ str x0, [x2, x3] // obj.field<- x0
GET_INST_OPCODE ip // extract opcode from wINST
GOTO_OPCODE ip // jump to next instruction
@@ -6597,7 +6594,6 @@
FETCH w1, 1 // w1<- field byte offset
GET_VREG w3, w2 // w3<- fp[B], the object pointer
ubfx w2, wINST, #8, #4 // w2<- A
- cmp w3, #0 // check object for null
cbz w3, common_errNullObject // object was null
GET_VREG w0, w2 // w0<- fp[A]
FETCH_ADVANCE_INST 2 // advance rPC, load rINST
@@ -6617,7 +6613,6 @@
FETCH w1, 1 // w1<- field byte offset
GET_VREG w3, w2 // w3<- fp[B], the object pointer
ubfx w2, wINST, #8, #4 // w2<- A
- cmp w3, #0 // check object for null
cbz w3, common_errNullObject // object was null
GET_VREG w0, w2 // w0<- fp[A]
FETCH_ADVANCE_INST 2 // advance rPC, load rINST
@@ -6637,7 +6632,6 @@
FETCH w1, 1 // w1<- field byte offset
GET_VREG w3, w2 // w3<- fp[B], the object pointer
ubfx w2, wINST, #8, #4 // w2<- A
- cmp w3, #0 // check object for null
cbz w3, common_errNullObject // object was null
GET_VREG w0, w2 // w0<- fp[A]
FETCH_ADVANCE_INST 2 // advance rPC, load rINST
@@ -6657,7 +6651,6 @@
FETCH w1, 1 // w1<- field byte offset
GET_VREG w3, w2 // w3<- fp[B], the object pointer
ubfx w2, wINST, #8, #4 // w2<- A
- cmp w3, #0 // check object for null
cbz w3, common_errNullObject // object was null
GET_VREG w0, w2 // w0<- fp[A]
FETCH_ADVANCE_INST 2 // advance rPC, load rINST
@@ -6885,6 +6878,321 @@
.global artMterpAsmSisterEnd
artMterpAsmSisterEnd:
+/* File: arm64/footer.S */
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+common_errDivideByZero:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogDivideByZeroException
+#endif
+ b MterpCommonFallback
+
+common_errArrayIndex:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogArrayIndexException
+#endif
+ b MterpCommonFallback
+
+common_errNegativeArraySize:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogNegativeArraySizeException
+#endif
+ b MterpCommonFallback
+
+common_errNoSuchMethod:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogNoSuchMethodException
+#endif
+ b MterpCommonFallback
+
+common_errNullObject:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogNullObjectException
+#endif
+ b MterpCommonFallback
+
+common_exceptionThrown:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogExceptionThrownException
+#endif
+ b MterpCommonFallback
+
+MterpSuspendFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ ldr x2, [xSELF, #THREAD_FLAGS_OFFSET]
+ bl MterpLogSuspendFallback
+#endif
+ b MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary. If there is a pending
+ * exception, handle it. Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+ ldr x0, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ cbz x0, MterpFallback // If not, fall back to reference interpreter.
+ /* intentional fallthrough - handle pending exception. */
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+MterpException:
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpHandleException // (self, shadow_frame)
+ cbz w0, MterpExceptionReturn // no local catch, back to caller.
+ ldr x0, [xFP, #OFF_FP_CODE_ITEM]
+ ldr w1, [xFP, #OFF_FP_DEX_PC]
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
+ add xPC, x0, #CODEITEM_INSNS_OFFSET
+ add xPC, xPC, x1, lsl #1 // generate new dex_pc_ptr
+ /* Do we need to switch interpreters? */
+ bl MterpShouldSwitchInterpreters
+ cbnz w0, MterpFallback
+ /* resume execution at catch block */
+ EXPORT_PC
+ FETCH_INST
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+ /* NOTE: no fallthrough */
+/*
+ * Common handling for branches with support for Jit profiling.
+ * On entry:
+ * wINST <= signed offset
+ * wPROFILE <= signed hotness countdown (expanded to 32 bits)
+ * condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
+ *
+ * We have quite a few different cases for branch profiling, OSR detection and
+ * suspend check support here.
+ *
+ * Taken backward branches:
+ * If profiling active, do hotness countdown and report if we hit zero.
+ * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
+ * Is there a pending suspend request? If so, suspend.
+ *
+ * Taken forward branches and not-taken backward branches:
+ * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
+ *
+ * Our most common case is expected to be a taken backward branch with active jit profiling,
+ * but no full OSR check and no pending suspend request.
+ * Next most common case is not-taken branch with no full OSR check.
+ *
+ */
+MterpCommonTakenBranchNoFlags:
+ cmp wINST, #0
+ b.gt .L_forward_branch // don't add forward branches to hotness
+ tbnz wPROFILE, #31, .L_no_count_backwards // go if negative
+ subs wPROFILE, wPROFILE, #1 // countdown
+ b.eq .L_add_batch // counted down to zero - report
+.L_resume_backward_branch:
+ ldr lr, [xSELF, #THREAD_FLAGS_OFFSET]
+ add w2, wINST, wINST // w2<- byte offset
+ FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
+ REFRESH_IBASE
+ ands lr, lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ b.ne .L_suspend_request_pending
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+.L_suspend_request_pending:
+ EXPORT_PC
+ mov x0, xSELF
+ bl MterpSuspendCheck // (self)
+ cbnz x0, MterpFallback
+ REFRESH_IBASE // might have changed during suspend
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+.L_no_count_backwards:
+ cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
+ b.ne .L_resume_backward_branch
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, xINST
+ EXPORT_PC
+ bl MterpMaybeDoOnStackReplacement // (self, shadow_frame, offset)
+ cbnz x0, MterpOnStackReplacement
+ b .L_resume_backward_branch
+
+.L_forward_branch:
+ cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
+ b.eq .L_check_osr_forward
+.L_resume_forward_branch:
+ add w2, wINST, wINST // w2<- byte offset
+ FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+.L_check_osr_forward:
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, xINST
+ EXPORT_PC
+ bl MterpMaybeDoOnStackReplacement // (self, shadow_frame, offset)
+ cbnz x0, MterpOnStackReplacement
+ b .L_resume_forward_branch
+
+.L_add_batch:
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ strh wPROFILE, [x1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
+ ldr x0, [xFP, #OFF_FP_METHOD]
+ mov x2, xSELF
+ bl MterpAddHotnessBatch // (method, shadow_frame, self)
+ mov wPROFILE, w0 // restore new hotness countdown to wPROFILE
+ b .L_no_count_backwards
+
+/*
+ * Entered from the conditional branch handlers when OSR check request active on
+ * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
+ */
+.L_check_not_taken_osr:
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, #2
+ EXPORT_PC
+ bl MterpMaybeDoOnStackReplacement // (self, shadow_frame, offset)
+ cbnz x0, MterpOnStackReplacement
+ FETCH_ADVANCE_INST 2
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+
+/*
+ * Check for suspend check request. Assumes wINST already loaded, xPC advanced and
+ * still needs to get the opcode and branch to it, and flags are in lr.
+ */
+MterpCheckSuspendAndContinue:
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh xIBASE
+ ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ b.ne check1
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+check1:
+ EXPORT_PC
+ mov x0, xSELF
+ bl MterpSuspendCheck // (self)
+ cbnz x0, MterpFallback // Something in the environment changed, switch interpreters
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/*
+ * On-stack replacement has happened, and now we've returned from the compiled method.
+ */
+MterpOnStackReplacement:
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ sxtw x2, wINST
+ bl MterpLogOSR
+#endif
+ mov x0, #1 // Signal normal return
+ b MterpDone
+
+/*
+ * Bail out to reference interpreter.
+ */
+MterpFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogFallback
+#endif
+MterpCommonFallback:
+ mov x0, #0 // signal retry with reference interpreter.
+ b MterpDone
+
+/*
+ * We pushed some registers on the stack in ExecuteMterpImpl, then saved
+ * SP and LR. Here we restore SP, restore the registers, and then restore
+ * LR to PC.
+ *
+ * On entry:
+ * uint32_t* xFP (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+ mov x0, #1 // signal return to caller.
+ b MterpDone
+MterpReturn:
+ ldr x2, [xFP, #OFF_FP_RESULT_REGISTER]
+ ldr lr, [xSELF, #THREAD_FLAGS_OFFSET]
+ str x0, [x2]
+ mov x0, xSELF
+ ands lr, lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ b.eq check2
+ bl MterpSuspendCheck // (self)
+check2:
+ mov x0, #1 // signal return to caller.
+MterpDone:
+/*
+ * At this point, we expect wPROFILE to be non-zero. If negative, hotness is disabled or we're
+ * checking for OSR. If greater than zero, we might have unreported hotness to register
+ * (the difference between the ending wPROFILE and the cached hotness counter). wPROFILE
+ * should only reach zero immediately after a hotness decrement, and is then reset to either
+ * a negative special state or the new non-zero countdown value.
+ */
+ cmp wPROFILE, #0
+ bgt MterpProfileActive // if > 0, we may have some counts to report.
+ ldp fp, lr, [sp, #64]
+ ldp xPC, xFP, [sp, #48]
+ ldp xSELF, xINST, [sp, #32]
+ ldp xIBASE, xREFS, [sp, #16]
+ ldp xPROFILE, x27, [sp], #80
+ ret
+
+MterpProfileActive:
+ mov xINST, x0 // stash return value
+ /* Report cached hotness counts */
+ ldr x0, [xFP, #OFF_FP_METHOD]
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, xSELF
+ strh wPROFILE, [x1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
+ bl MterpAddHotnessBatch // (method, shadow_frame, self)
+ mov x0, xINST // restore return value
+ ldp fp, lr, [sp, #64]
+ ldp xPC, xFP, [sp, #48]
+ ldp xSELF, xINST, [sp, #32]
+ ldp xIBASE, xREFS, [sp, #16]
+ ldp xPROFILE, x27, [sp], #80
+ ret
+
+ .cfi_endproc
+ .size ExecuteMterpImpl, .-ExecuteMterpImpl
+
+
.global artMterpAsmAltInstructionStart
.type artMterpAsmAltInstructionStart, %function
@@ -11247,318 +11555,3 @@
.size artMterpAsmAltInstructionStart, .-artMterpAsmAltInstructionStart
.global artMterpAsmAltInstructionEnd
artMterpAsmAltInstructionEnd:
-/* File: arm64/footer.S */
-/*
- * ===========================================================================
- * Common subroutines and data
- * ===========================================================================
- */
-
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogDivideByZeroException
-#endif
- b MterpCommonFallback
-
-common_errArrayIndex:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogArrayIndexException
-#endif
- b MterpCommonFallback
-
-common_errNegativeArraySize:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogNegativeArraySizeException
-#endif
- b MterpCommonFallback
-
-common_errNoSuchMethod:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogNoSuchMethodException
-#endif
- b MterpCommonFallback
-
-common_errNullObject:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogNullObjectException
-#endif
- b MterpCommonFallback
-
-common_exceptionThrown:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogExceptionThrownException
-#endif
- b MterpCommonFallback
-
-MterpSuspendFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- ldr x2, [xSELF, #THREAD_FLAGS_OFFSET]
- bl MterpLogSuspendFallback
-#endif
- b MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary. If there is a pending
- * exception, handle it. Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
- ldr x0, [xSELF, #THREAD_EXCEPTION_OFFSET]
- cbz x0, MterpFallback // If not, fall back to reference interpreter.
- /* intentional fallthrough - handle pending exception. */
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpHandleException // (self, shadow_frame)
- cbz w0, MterpExceptionReturn // no local catch, back to caller.
- ldr x0, [xFP, #OFF_FP_CODE_ITEM]
- ldr w1, [xFP, #OFF_FP_DEX_PC]
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
- add xPC, x0, #CODEITEM_INSNS_OFFSET
- add xPC, xPC, x1, lsl #1 // generate new dex_pc_ptr
- /* Do we need to switch interpreters? */
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- /* resume execution at catch block */
- EXPORT_PC
- FETCH_INST
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
- /* NOTE: no fallthrough */
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- * wINST <= signed offset
- * wPROFILE <= signed hotness countdown (expanded to 32 bits)
- * condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- * If profiling active, do hotness countdown and report if we hit zero.
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- * Is there a pending suspend request? If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranchNoFlags:
- cmp wINST, #0
- b.gt .L_forward_branch // don't add forward branches to hotness
- tbnz wPROFILE, #31, .L_no_count_backwards // go if negative
- subs wPROFILE, wPROFILE, #1 // countdown
- b.eq .L_add_batch // counted down to zero - report
-.L_resume_backward_branch:
- ldr lr, [xSELF, #THREAD_FLAGS_OFFSET]
- add w2, wINST, wINST // w2<- byte offset
- FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
- REFRESH_IBASE
- ands lr, lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
- b.ne .L_suspend_request_pending
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-.L_suspend_request_pending:
- EXPORT_PC
- mov x0, xSELF
- bl MterpSuspendCheck // (self)
- cbnz x0, MterpFallback
- REFRESH_IBASE // might have changed during suspend
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-.L_no_count_backwards:
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.ne .L_resume_backward_branch
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xINST
- EXPORT_PC
- bl MterpMaybeDoOnStackReplacement // (self, shadow_frame, offset)
- cbnz x0, MterpOnStackReplacement
- b .L_resume_backward_branch
-
-.L_forward_branch:
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_osr_forward
-.L_resume_forward_branch:
- add w2, wINST, wINST // w2<- byte offset
- FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-.L_check_osr_forward:
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xINST
- EXPORT_PC
- bl MterpMaybeDoOnStackReplacement // (self, shadow_frame, offset)
- cbnz x0, MterpOnStackReplacement
- b .L_resume_forward_branch
-
-.L_add_batch:
- add x1, xFP, #OFF_FP_SHADOWFRAME
- strh wPROFILE, [x1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
- ldr x0, [xFP, #OFF_FP_METHOD]
- mov x2, xSELF
- bl MterpAddHotnessBatch // (method, shadow_frame, self)
- mov wPROFILE, w0 // restore new hotness countdown to wPROFILE
- b .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, #2
- EXPORT_PC
- bl MterpMaybeDoOnStackReplacement // (self, shadow_frame, offset)
- cbnz x0, MterpOnStackReplacement
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/*
- * Check for suspend check request. Assumes wINST already loaded, xPC advanced and
- * still needs to get the opcode and branch to it, and flags are in lr.
- */
-MterpCheckSuspendAndContinue:
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh xIBASE
- ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
- b.ne check1
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-check1:
- EXPORT_PC
- mov x0, xSELF
- bl MterpSuspendCheck // (self)
- cbnz x0, MterpFallback // Something in the environment changed, switch interpreters
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- sxtw x2, wINST
- bl MterpLogOSR
-#endif
- mov x0, #1 // Signal normal return
- b MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogFallback
-#endif
-MterpCommonFallback:
- mov x0, #0 // signal retry with reference interpreter.
- b MterpDone
-
-/*
- * We pushed some registers on the stack in ExecuteMterpImpl, then saved
- * SP and LR. Here we restore SP, restore the registers, and then restore
- * LR to PC.
- *
- * On entry:
- * uint32_t* xFP (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
- mov x0, #1 // signal return to caller.
- b MterpDone
-MterpReturn:
- ldr x2, [xFP, #OFF_FP_RESULT_REGISTER]
- ldr lr, [xSELF, #THREAD_FLAGS_OFFSET]
- str x0, [x2]
- mov x0, xSELF
- ands lr, lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
- b.eq check2
- bl MterpSuspendCheck // (self)
-check2:
- mov x0, #1 // signal return to caller.
-MterpDone:
-/*
- * At this point, we expect wPROFILE to be non-zero. If negative, hotness is disabled or we're
- * checking for OSR. If greater than zero, we might have unreported hotness to register
- * (the difference between the ending wPROFILE and the cached hotness counter). wPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
- cmp wPROFILE, #0
- bgt MterpProfileActive // if > 0, we may have some counts to report.
- ldp fp, lr, [sp, #64]
- ldp xPC, xFP, [sp, #48]
- ldp xSELF, xINST, [sp, #32]
- ldp xIBASE, xREFS, [sp, #16]
- ldp xPROFILE, x27, [sp], #80
- ret
-
-MterpProfileActive:
- mov xINST, x0 // stash return value
- /* Report cached hotness counts */
- ldr x0, [xFP, #OFF_FP_METHOD]
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xSELF
- strh wPROFILE, [x1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
- bl MterpAddHotnessBatch // (method, shadow_frame, self)
- mov x0, xINST // restore return value
- ldp fp, lr, [sp, #64]
- ldp xPC, xFP, [sp, #48]
- ldp xSELF, xINST, [sp, #32]
- ldp xIBASE, xREFS, [sp, #16]
- ldp xPROFILE, x27, [sp], #80
- ret
-
- .cfi_endproc
- .size ExecuteMterpImpl, .-ExecuteMterpImpl
-
-
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 1f473e4..57443f1 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -29,6 +29,7 @@
#include "art_method-inl.h"
#include "base/casts.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "base/macros.h"
#include "class_linker.h"
@@ -300,11 +301,27 @@
PrettyDescriptor(klass).c_str());
return;
}
- if (Runtime::Current()->IsActiveTransaction()) {
- result->SetL(mirror::Field::CreateFromArtField<true>(self, found, true));
+ Runtime* runtime = Runtime::Current();
+ PointerSize pointer_size = runtime->GetClassLinker()->GetImagePointerSize();
+ mirror::Field* field;
+ if (runtime->IsActiveTransaction()) {
+ if (pointer_size == PointerSize::k64) {
+ field = mirror::Field::CreateFromArtField<PointerSize::k64, true>(
+ self, found, true);
+ } else {
+ field = mirror::Field::CreateFromArtField<PointerSize::k32, true>(
+ self, found, true);
+ }
} else {
- result->SetL(mirror::Field::CreateFromArtField<false>(self, found, true));
+ if (pointer_size == PointerSize::k64) {
+ field = mirror::Field::CreateFromArtField<PointerSize::k64, false>(
+ self, found, true);
+ } else {
+ field = mirror::Field::CreateFromArtField<PointerSize::k32, false>(
+ self, found, true);
+ }
}
+ result->SetL(field);
}
// This is required for Enum(Set) code, as that uses reflection to inspect enum classes.
@@ -319,11 +336,28 @@
mirror::String* name = shadow_frame->GetVRegReference(arg_offset + 1)->AsString();
mirror::ObjectArray<mirror::Class>* args =
shadow_frame->GetVRegReference(arg_offset + 2)->AsObjectArray<mirror::Class>();
- if (Runtime::Current()->IsActiveTransaction()) {
- result->SetL(mirror::Class::GetDeclaredMethodInternal<true>(self, klass, name, args));
+ Runtime* runtime = Runtime::Current();
+ bool transaction = runtime->IsActiveTransaction();
+ PointerSize pointer_size = runtime->GetClassLinker()->GetImagePointerSize();
+ mirror::Method* method;
+ if (transaction) {
+ if (pointer_size == PointerSize::k64) {
+ method = mirror::Class::GetDeclaredMethodInternal<PointerSize::k64, true>(
+ self, klass, name, args);
+ } else {
+ method = mirror::Class::GetDeclaredMethodInternal<PointerSize::k32, true>(
+ self, klass, name, args);
+ }
} else {
- result->SetL(mirror::Class::GetDeclaredMethodInternal<false>(self, klass, name, args));
+ if (pointer_size == PointerSize::k64) {
+ method = mirror::Class::GetDeclaredMethodInternal<PointerSize::k64, false>(
+ self, klass, name, args);
+ } else {
+ method = mirror::Class::GetDeclaredMethodInternal<PointerSize::k32, false>(
+ self, klass, name, args);
+ }
}
+ result->SetL(method);
}
// Special managed code cut-out to allow constructor lookup in a un-started runtime.
@@ -336,11 +370,28 @@
}
mirror::ObjectArray<mirror::Class>* args =
shadow_frame->GetVRegReference(arg_offset + 1)->AsObjectArray<mirror::Class>();
- if (Runtime::Current()->IsActiveTransaction()) {
- result->SetL(mirror::Class::GetDeclaredConstructorInternal<true>(self, klass, args));
+ Runtime* runtime = Runtime::Current();
+ bool transaction = runtime->IsActiveTransaction();
+ PointerSize pointer_size = runtime->GetClassLinker()->GetImagePointerSize();
+ mirror::Constructor* constructor;
+ if (transaction) {
+ if (pointer_size == PointerSize::k64) {
+ constructor = mirror::Class::GetDeclaredConstructorInternal<PointerSize::k64,
+ true>(self, klass, args);
+ } else {
+ constructor = mirror::Class::GetDeclaredConstructorInternal<PointerSize::k32,
+ true>(self, klass, args);
+ }
} else {
- result->SetL(mirror::Class::GetDeclaredConstructorInternal<false>(self, klass, args));
+ if (pointer_size == PointerSize::k64) {
+ constructor = mirror::Class::GetDeclaredConstructorInternal<PointerSize::k64,
+ false>(self, klass, args);
+ } else {
+ constructor = mirror::Class::GetDeclaredConstructorInternal<PointerSize::k32,
+ false>(self, klass, args);
+ }
}
+ result->SetL(constructor);
}
void UnstartedRuntime::UnstartedClassGetEnclosingClass(
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index 814b001..7e1f795 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -20,6 +20,7 @@
#include <locale>
#include "base/casts.h"
+#include "base/enums.h"
#include "base/memory_tool.h"
#include "class_linker.h"
#include "common_runtime_test.h"
@@ -383,7 +384,7 @@
ScopedObjectAccess soa(self);
mirror::Class* klass = mirror::String::GetJavaLangString();
ArtMethod* method = klass->FindDeclaredDirectMethod("<init>", "(Ljava/lang/String;)V",
- sizeof(void*));
+ kRuntimePointerSize);
// create instruction data for invoke-direct {v0, v1} of method with fake index
uint16_t inst_data[3] = { 0x2070, 0x0000, 0x0010 };
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index cfe6cd1..d52030f 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -19,6 +19,7 @@
#include <dlfcn.h>
#include "art_method-inl.h"
+#include "base/enums.h"
#include "debugger.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "interpreter/interpreter.h"
@@ -258,7 +259,7 @@
// If we get a request to compile a proxy method, we pass the actual Java method
// of that proxy method, as the compiler does not expect a proxy method.
- ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(sizeof(void*));
+ ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
if (!code_cache_->NotifyCompilationOf(method_to_compile, self, osr)) {
return false;
}
@@ -410,7 +411,7 @@
// Get the actual Java method if this method is from a proxy class. The compiler
// and the JIT code cache do not expect methods from proxy classes.
- method = method->GetInterfaceMethodIfProxy(sizeof(void*));
+ method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
// Cheap check if the method has been compiled already. That's an indicator that we should
// osr into it.
@@ -616,7 +617,7 @@
int32_t new_count = starting_count + count; // int32 here to avoid wrap-around;
if (starting_count < warm_method_threshold_) {
if ((new_count >= warm_method_threshold_) &&
- (method->GetProfilingInfo(sizeof(void*)) == nullptr)) {
+ (method->GetProfilingInfo(kRuntimePointerSize) == nullptr)) {
bool success = ProfilingInfo::Create(self, method, /* retry_allocation */ false);
if (success) {
VLOG(jit) << "Start profiling " << PrettyMethod(method);
@@ -671,7 +672,7 @@
return;
}
- ProfilingInfo* profiling_info = method->GetProfilingInfo(sizeof(void*));
+ ProfilingInfo* profiling_info = method->GetProfilingInfo(kRuntimePointerSize);
// Update the entrypoint if the ProfilingInfo has one. The interpreter will call it
// instead of interpreting the method.
if ((profiling_info != nullptr) && (profiling_info->GetSavedEntryPoint() != nullptr)) {
@@ -689,7 +690,7 @@
ArtMethod* callee ATTRIBUTE_UNUSED) {
ScopedAssertNoThreadSuspension ants(thread, __FUNCTION__);
DCHECK(this_object != nullptr);
- ProfilingInfo* info = caller->GetProfilingInfo(sizeof(void*));
+ ProfilingInfo* info = caller->GetProfilingInfo(kRuntimePointerSize);
if (info != nullptr) {
// Since the instrumentation is marked from the declaring class we need to mark the card so
// that mod-union tables and card rescanning know about the update.
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 6b6f5a5..b1079dd 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -19,12 +19,14 @@
#include <sstream>
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/time_utils.h"
#include "debugger_interface.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/accounting/bitmap-inl.h"
+#include "gc/scoped_gc_critical_section.h"
#include "jit/jit.h"
#include "jit/profiling_info.h"
#include "linear_alloc.h"
@@ -727,6 +729,9 @@
RemoveUnmarkedCode(self);
if (collect_profiling_info) {
+ ScopedThreadSuspension sts(self, kSuspended);
+ gc::ScopedGCCriticalSection gcs(
+ self, gc::kGcCauseJitCodeCache, gc::kCollectorTypeJitCodeCache);
MutexLock mu(self, lock_);
// Free all profiling infos of methods not compiled nor being compiled.
auto profiling_kept_end = std::remove_if(profiling_infos_.begin(), profiling_infos_.end(),
@@ -738,13 +743,14 @@
// a method has compiled code but no ProfilingInfo.
// We make sure compiled methods have a ProfilingInfo object. It is needed for
// code cache collection.
- if (ContainsPc(ptr) && info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr) {
+ if (ContainsPc(ptr) &&
+ info->GetMethod()->GetProfilingInfo(kRuntimePointerSize) == nullptr) {
// We clear the inline caches as classes in it might be stalled.
info->ClearGcRootsInInlineCaches();
// Do a fence to make sure the clearing is seen before attaching to the method.
QuasiAtomic::ThreadFenceRelease();
info->GetMethod()->SetProfilingInfo(info);
- } else if (info->GetMethod()->GetProfilingInfo(sizeof(void*)) != info) {
+ } else if (info->GetMethod()->GetProfilingInfo(kRuntimePointerSize) != info) {
// No need for this ProfilingInfo object anymore.
FreeData(reinterpret_cast<uint8_t*>(info));
return true;
@@ -762,7 +768,7 @@
// have memory leaks of compiled code otherwise.
for (const auto& it : method_code_map_) {
ArtMethod* method = it.second;
- if (method->GetProfilingInfo(sizeof(void*)) == nullptr) {
+ if (method->GetProfilingInfo(kRuntimePointerSize) == nullptr) {
const void* code_ptr = it.first;
const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
@@ -851,7 +857,7 @@
sizeof(void*));
// Check whether some other thread has concurrently created it.
- ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
+ ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
if (info != nullptr) {
return info;
}
@@ -919,7 +925,7 @@
return false;
}
- ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
+ ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
if (info == nullptr) {
VLOG(jit) << PrettyMethod(method) << " needs a ProfilingInfo to be compiled";
// Because the counter is not atomic, there are some rare cases where we may not
@@ -939,7 +945,7 @@
ProfilingInfo* JitCodeCache::NotifyCompilerUse(ArtMethod* method, Thread* self) {
MutexLock mu(self, lock_);
- ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
+ ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
if (info != nullptr) {
info->IncrementInlineUse();
}
@@ -948,13 +954,13 @@
void JitCodeCache::DoneCompilerUse(ArtMethod* method, Thread* self) {
MutexLock mu(self, lock_);
- ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
+ ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
DCHECK(info != nullptr);
info->DecrementInlineUse();
}
void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self ATTRIBUTE_UNUSED, bool osr) {
- ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
+ ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
DCHECK(info->IsMethodBeingCompiled(osr));
info->SetIsMethodBeingCompiled(false, osr);
}
@@ -966,7 +972,7 @@
void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method,
const OatQuickMethodHeader* header) {
- ProfilingInfo* profiling_info = method->GetProfilingInfo(sizeof(void*));
+ ProfilingInfo* profiling_info = method->GetProfilingInfo(kRuntimePointerSize);
if ((profiling_info != nullptr) &&
(profiling_info->GetSavedEntryPoint() == header->GetEntryPoint())) {
// Prevent future uses of the compiled code.
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 4df6762..6dc1578 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -255,8 +255,7 @@
SHARED_REQUIRES(Locks::mutator_lock_);
bool CheckLiveCompiledCodeHasProfilingInfo()
- REQUIRES(lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(lock_);
void FreeCode(uint8_t* code) REQUIRES(lock_);
uint8_t* AllocateCode(size_t code_size) REQUIRES(lock_);
diff --git a/runtime/jit/offline_profiling_info.cc b/runtime/jit/offline_profiling_info.cc
index 5039d2d..aa606a2 100644
--- a/runtime/jit/offline_profiling_info.cc
+++ b/runtime/jit/offline_profiling_info.cc
@@ -637,7 +637,7 @@
os << "\n\tclasses: ";
for (const auto class_it : dex_data.class_set) {
if (dex_file != nullptr) {
- os << "\n\t\t" << PrettyType(class_it, *dex_file);
+ os << "\n\t\t" << dex_file->GetClassDescriptor(dex_file->GetClassDef(class_it));
} else {
os << class_it << ",";
}
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 4d4d1ea..5a469e5 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -21,6 +21,7 @@
#include <fcntl.h>
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/systrace.h"
#include "base/time_utils.h"
#include "compiler_filter.h"
@@ -206,12 +207,13 @@
if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) {
return true;
}
- for (ArtMethod& method : klass->GetMethods(sizeof(void*))) {
+ for (ArtMethod& method : klass->GetMethods(kRuntimePointerSize)) {
if (!method.IsNative()) {
if (method.GetCounter() >= startup_method_samples_ ||
- method.GetProfilingInfo(sizeof(void*)) != nullptr) {
+ method.GetProfilingInfo(kRuntimePointerSize) != nullptr) {
// Have samples, add to profile.
- const DexFile* dex_file = method.GetInterfaceMethodIfProxy(sizeof(void*))->GetDexFile();
+ const DexFile* dex_file =
+ method.GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetDexFile();
methods_->push_back(MethodReference(dex_file, method.GetDexMethodIndex()));
}
}
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 8cdf96d..e1a4e2a 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -27,6 +27,7 @@
#include "art_method-inl.h"
#include "atomic.h"
#include "base/allocator.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "base/mutex.h"
#include "base/stl_util.h"
@@ -375,10 +376,12 @@
ScopedObjectAccess soa(env);
ArtMethod* m = soa.DecodeMethod(mid);
mirror::AbstractMethod* method;
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
+ DCHECK(!Runtime::Current()->IsActiveTransaction());
if (m->IsConstructor()) {
- method = mirror::Constructor::CreateFromArtMethod(soa.Self(), m);
+ method = mirror::Constructor::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), m);
} else {
- method = mirror::Method::CreateFromArtMethod(soa.Self(), m);
+ method = mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), m);
}
return soa.AddLocalReference<jobject>(method);
}
@@ -387,7 +390,8 @@
CHECK_NON_NULL_ARGUMENT(fid);
ScopedObjectAccess soa(env);
ArtField* f = soa.DecodeField(fid);
- return soa.AddLocalReference<jobject>(mirror::Field::CreateFromArtField(soa.Self(), f, true));
+ return soa.AddLocalReference<jobject>(
+ mirror::Field::CreateFromArtField<kRuntimePointerSize>(soa.Self(), f, true));
}
static jclass GetObjectClass(JNIEnv* env, jobject java_object) {
diff --git a/runtime/lock_word-inl.h b/runtime/lock_word-inl.h
index 341501b..4a2a293 100644
--- a/runtime/lock_word-inl.h
+++ b/runtime/lock_word-inl.h
@@ -43,17 +43,15 @@
inline size_t LockWord::ForwardingAddress() const {
DCHECK_EQ(GetState(), kForwardingAddress);
- return value_ << kStateSize;
+ return value_ << kForwardingAddressShift;
}
inline LockWord::LockWord() : value_(0) {
DCHECK_EQ(GetState(), kUnlocked);
}
-inline LockWord::LockWord(Monitor* mon, uint32_t rb_state)
- : value_(mon->GetMonitorId() | (rb_state << kReadBarrierStateShift) |
- (kStateFat << kStateShift)) {
- DCHECK_EQ(rb_state & ~kReadBarrierStateMask, 0U);
+inline LockWord::LockWord(Monitor* mon, uint32_t gc_state)
+ : value_(mon->GetMonitorId() | (gc_state << kGCStateShift) | (kStateFat << kStateShift)) {
#ifndef __LP64__
DCHECK_ALIGNED(mon, kMonitorIdAlignment);
#endif
diff --git a/runtime/lock_word.h b/runtime/lock_word.h
index 5d0d204..538b6eb 100644
--- a/runtime/lock_word.h
+++ b/runtime/lock_word.h
@@ -35,27 +35,27 @@
* the state. The four possible states are fat locked, thin/unlocked, hash code, and forwarding
* address. When the lock word is in the "thin" state and its bits are formatted as follows:
*
- * |33|22|222222221111|1111110000000000|
- * |10|98|765432109876|5432109876543210|
- * |00|rb| lock count |thread id owner |
+ * |33|2|2|222222221111|1111110000000000|
+ * |10|9|8|765432109876|5432109876543210|
+ * |00|m|r| lock count |thread id owner |
*
* When the lock word is in the "fat" state and its bits are formatted as follows:
*
- * |33|22|2222222211111111110000000000|
- * |10|98|7654321098765432109876543210|
- * |01|rb| MonitorId |
+ * |33|2|2|2222222211111111110000000000|
+ * |10|9|8|7654321098765432109876543210|
+ * |01|m|r| MonitorId |
*
* When the lock word is in hash state and its bits are formatted as follows:
*
- * |33|22|2222222211111111110000000000|
- * |10|98|7654321098765432109876543210|
- * |10|rb| HashCode |
+ * |33|2|2|2222222211111111110000000000|
+ * |10|9|8|7654321098765432109876543210|
+ * |10|m|r| HashCode |
*
- * When the lock word is in fowarding address state and its bits are formatted as follows:
+ * When the lock word is in forwarding address state and its bits are formatted as follows:
*
- * |33|22|2222222211111111110000000000|
- * |10|98|7654321098765432109876543210|
- * |11| ForwardingAddress |
+ * |33|2|22222222211111111110000000000|
+ * |10|9|87654321098765432109876543210|
+ * |11|0| ForwardingAddress |
*
* The rb bits store the read barrier state.
*/
@@ -64,11 +64,13 @@
enum SizeShiftsAndMasks { // private marker to avoid generate-operator-out.py from processing.
// Number of bits to encode the state, currently just fat or thin/unlocked or hash code.
kStateSize = 2,
- kReadBarrierStateSize = 2,
+ kReadBarrierStateSize = 1,
+ kMarkBitStateSize = 1,
// Number of bits to encode the thin lock owner.
kThinLockOwnerSize = 16,
// Remaining bits are the recursive lock count.
- kThinLockCountSize = 32 - kThinLockOwnerSize - kStateSize - kReadBarrierStateSize,
+ kThinLockCountSize = 32 - kThinLockOwnerSize - kStateSize - kReadBarrierStateSize -
+ kMarkBitStateSize,
// Thin lock bits. Owner in lowest bits.
kThinLockOwnerShift = 0,
@@ -81,25 +83,43 @@
kThinLockCountOne = 1 << kThinLockCountShift, // == 65536 (0x10000)
// State in the highest bits.
- kStateShift = kReadBarrierStateSize + kThinLockCountSize + kThinLockCountShift,
+ kStateShift = kReadBarrierStateSize + kThinLockCountSize + kThinLockCountShift +
+ kMarkBitStateSize,
kStateMask = (1 << kStateSize) - 1,
kStateMaskShifted = kStateMask << kStateShift,
kStateThinOrUnlocked = 0,
kStateFat = 1,
kStateHash = 2,
kStateForwardingAddress = 3,
+
+ // Read barrier bit.
kReadBarrierStateShift = kThinLockCountSize + kThinLockCountShift,
kReadBarrierStateMask = (1 << kReadBarrierStateSize) - 1,
kReadBarrierStateMaskShifted = kReadBarrierStateMask << kReadBarrierStateShift,
kReadBarrierStateMaskShiftedToggled = ~kReadBarrierStateMaskShifted,
+ // Mark bit.
+ kMarkBitStateShift = kReadBarrierStateSize + kReadBarrierStateShift,
+ kMarkBitStateMask = (1 << kMarkBitStateSize) - 1,
+ kMarkBitStateMaskShifted = kMarkBitStateMask << kMarkBitStateShift,
+ kMarkBitStateMaskShiftedToggled = ~kMarkBitStateMaskShifted,
+
+ // GC state is mark bit and read barrier state.
+ kGCStateSize = kReadBarrierStateSize + kMarkBitStateSize,
+ kGCStateShift = kReadBarrierStateShift,
+ kGCStateMaskShifted = kReadBarrierStateMaskShifted | kMarkBitStateMaskShifted,
+ kGCStateMaskShiftedToggled = ~kGCStateMaskShifted,
+
// When the state is kHashCode, the non-state bits hold the hashcode.
// Note Object.hashCode() has the hash code layout hardcoded.
kHashShift = 0,
- kHashSize = 32 - kStateSize - kReadBarrierStateSize,
+ kHashSize = 32 - kStateSize - kReadBarrierStateSize - kMarkBitStateSize,
kHashMask = (1 << kHashSize) - 1,
kMaxHash = kHashMask,
+ // Forwarding address shift.
+ kForwardingAddressShift = kObjectAlignmentShift,
+
kMonitorIdShift = kHashShift,
kMonitorIdSize = kHashSize,
kMonitorIdMask = kHashMask,
@@ -108,31 +128,31 @@
kMaxMonitorId = kMaxHash
};
- static LockWord FromThinLockId(uint32_t thread_id, uint32_t count, uint32_t rb_state) {
+ static LockWord FromThinLockId(uint32_t thread_id, uint32_t count, uint32_t gc_state) {
CHECK_LE(thread_id, static_cast<uint32_t>(kThinLockMaxOwner));
CHECK_LE(count, static_cast<uint32_t>(kThinLockMaxCount));
- DCHECK_EQ(rb_state & ~kReadBarrierStateMask, 0U);
- return LockWord((thread_id << kThinLockOwnerShift) | (count << kThinLockCountShift) |
- (rb_state << kReadBarrierStateShift) |
+ // DCHECK_EQ(gc_bits & kGCStateMaskToggled, 0U);
+ return LockWord((thread_id << kThinLockOwnerShift) |
+ (count << kThinLockCountShift) |
+ (gc_state << kGCStateShift) |
(kStateThinOrUnlocked << kStateShift));
}
static LockWord FromForwardingAddress(size_t target) {
DCHECK_ALIGNED(target, (1 << kStateSize));
- return LockWord((target >> kStateSize) | (kStateForwardingAddress << kStateShift));
+ return LockWord((target >> kForwardingAddressShift) | (kStateForwardingAddress << kStateShift));
}
- static LockWord FromHashCode(uint32_t hash_code, uint32_t rb_state) {
+ static LockWord FromHashCode(uint32_t hash_code, uint32_t gc_state) {
CHECK_LE(hash_code, static_cast<uint32_t>(kMaxHash));
- DCHECK_EQ(rb_state & ~kReadBarrierStateMask, 0U);
+ // DCHECK_EQ(gc_bits & kGCStateMaskToggled, 0U);
return LockWord((hash_code << kHashShift) |
- (rb_state << kReadBarrierStateShift) |
+ (gc_state << kGCStateShift) |
(kStateHash << kStateShift));
}
- static LockWord FromDefault(uint32_t rb_state) {
- DCHECK_EQ(rb_state & ~kReadBarrierStateMask, 0U);
- return LockWord(rb_state << kReadBarrierStateShift);
+ static LockWord FromDefault(uint32_t gc_state) {
+ return LockWord(gc_state << kGCStateShift);
}
static bool IsDefault(LockWord lw) {
@@ -154,7 +174,7 @@
LockState GetState() const {
CheckReadBarrierState();
if ((!kUseReadBarrier && UNLIKELY(value_ == 0)) ||
- (kUseReadBarrier && UNLIKELY((value_ & kReadBarrierStateMaskShiftedToggled) == 0))) {
+ (kUseReadBarrier && UNLIKELY((value_ & kGCStateMaskShiftedToggled) == 0))) {
return kUnlocked;
} else {
uint32_t internal_state = (value_ >> kStateShift) & kStateMask;
@@ -176,6 +196,10 @@
return (value_ >> kReadBarrierStateShift) & kReadBarrierStateMask;
}
+ uint32_t GCState() const {
+ return (value_ & kGCStateMaskShifted) >> kGCStateShift;
+ }
+
void SetReadBarrierState(uint32_t rb_state) {
DCHECK_EQ(rb_state & ~kReadBarrierStateMask, 0U);
DCHECK_NE(static_cast<uint32_t>(GetState()), static_cast<uint32_t>(kForwardingAddress));
@@ -184,6 +208,19 @@
value_ |= (rb_state & kReadBarrierStateMask) << kReadBarrierStateShift;
}
+
+ uint32_t MarkBitState() const {
+ return (value_ >> kMarkBitStateShift) & kMarkBitStateMask;
+ }
+
+ void SetMarkBitState(uint32_t mark_bit) {
+ DCHECK_EQ(mark_bit & ~kMarkBitStateMask, 0U);
+ DCHECK_NE(static_cast<uint32_t>(GetState()), static_cast<uint32_t>(kForwardingAddress));
+ // Clear and or the bits.
+ value_ &= kMarkBitStateMaskShiftedToggled;
+ value_ |= mark_bit << kMarkBitStateShift;
+ }
+
// Return the owner thin lock thread id.
uint32_t ThinLockOwner() const;
@@ -197,7 +234,7 @@
size_t ForwardingAddress() const;
// Constructor a lock word for inflation to use a Monitor.
- LockWord(Monitor* mon, uint32_t rb_state);
+ LockWord(Monitor* mon, uint32_t gc_state);
// Return the hash code stored in the lock word, must be kHashCode state.
int32_t GetHashCode() const;
@@ -207,7 +244,7 @@
if (kIncludeReadBarrierState) {
return lw1.GetValue() == lw2.GetValue();
}
- return lw1.GetValueWithoutReadBarrierState() == lw2.GetValueWithoutReadBarrierState();
+ return lw1.GetValueWithoutGCState() == lw2.GetValueWithoutGCState();
}
void Dump(std::ostream& os) {
@@ -248,9 +285,9 @@
return value_;
}
- uint32_t GetValueWithoutReadBarrierState() const {
+ uint32_t GetValueWithoutGCState() const {
CheckReadBarrierState();
- return value_ & ~(kReadBarrierStateMask << kReadBarrierStateShift);
+ return value_ & kGCStateMaskShiftedToggled;
}
// Only Object should be converting LockWords to/from uints.
diff --git a/runtime/memory_region.h b/runtime/memory_region.h
index fbb0441..f018c1f 100644
--- a/runtime/memory_region.h
+++ b/runtime/memory_region.h
@@ -178,9 +178,9 @@
}
// Is `address` aligned on a machine word?
- template<typename T> static bool IsWordAligned(const T* address) {
+ template<typename T> static constexpr bool IsWordAligned(const T* address) {
// Word alignment in bytes.
- size_t kWordAlignment = GetInstructionSetPointerSize(kRuntimeISA);
+ size_t kWordAlignment = static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA));
return IsAlignedParam(address, kWordAlignment);
}
diff --git a/runtime/mirror/abstract_method.cc b/runtime/mirror/abstract_method.cc
index 5a07dee..b4dce58 100644
--- a/runtime/mirror/abstract_method.cc
+++ b/runtime/mirror/abstract_method.cc
@@ -21,12 +21,9 @@
namespace art {
namespace mirror {
-template <bool kTransactionActive>
+template <PointerSize kPointerSize, bool kTransactionActive>
bool AbstractMethod::CreateFromArtMethod(ArtMethod* method) {
- auto* interface_method = method->GetInterfaceMethodIfProxy(
- kTransactionActive
- ? Runtime::Current()->GetClassLinker()->GetImagePointerSize()
- : sizeof(void*));
+ auto* interface_method = method->GetInterfaceMethodIfProxy(kPointerSize);
SetArtMethod<kTransactionActive>(method);
SetFieldObject<kTransactionActive>(DeclaringClassOffset(), method->GetDeclaringClass());
SetFieldObject<kTransactionActive>(
@@ -36,8 +33,14 @@
return true;
}
-template bool AbstractMethod::CreateFromArtMethod<false>(ArtMethod* method);
-template bool AbstractMethod::CreateFromArtMethod<true>(ArtMethod* method);
+template bool AbstractMethod::CreateFromArtMethod<PointerSize::k32, false>(
+ ArtMethod* method);
+template bool AbstractMethod::CreateFromArtMethod<PointerSize::k32, true>(
+ ArtMethod* method);
+template bool AbstractMethod::CreateFromArtMethod<PointerSize::k64, false>(
+ ArtMethod* method);
+template bool AbstractMethod::CreateFromArtMethod<PointerSize::k64, true>(
+ ArtMethod* method);
ArtMethod* AbstractMethod::GetArtMethod() {
return reinterpret_cast<ArtMethod*>(GetField64(ArtMethodOffset()));
diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h
index a39f94d..cfbe492 100644
--- a/runtime/mirror/abstract_method.h
+++ b/runtime/mirror/abstract_method.h
@@ -34,7 +34,7 @@
class MANAGED AbstractMethod : public AccessibleObject {
public:
// Called from Constructor::CreateFromArtMethod, Method::CreateFromArtMethod.
- template <bool kTransactionActive = false>
+ template <PointerSize kPointerSize, bool kTransactionActive>
bool CreateFromArtMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index c6fa15d..014e54b 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -30,7 +30,7 @@
namespace art {
namespace mirror {
-inline uint32_t Array::ClassSize(size_t pointer_size) {
+inline uint32_t Array::ClassSize(PointerSize pointer_size) {
uint32_t vtable_entries = Object::kVTableLength;
return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size);
}
@@ -371,25 +371,23 @@
}
template<typename T, VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
-inline T PointerArray::GetElementPtrSize(uint32_t idx, size_t ptr_size) {
+inline T PointerArray::GetElementPtrSize(uint32_t idx, PointerSize ptr_size) {
// C style casts here since we sometimes have T be a pointer, or sometimes an integer
// (for stack traces).
- if (ptr_size == 8) {
+ if (ptr_size == PointerSize::k64) {
return (T)static_cast<uintptr_t>(
AsLongArray<kVerifyFlags, kReadBarrierOption>()->GetWithoutChecks(idx));
}
- DCHECK_EQ(ptr_size, 4u);
return (T)static_cast<uintptr_t>(
AsIntArray<kVerifyFlags, kReadBarrierOption>()->GetWithoutChecks(idx));
}
template<bool kTransactionActive, bool kUnchecked>
-inline void PointerArray::SetElementPtrSize(uint32_t idx, uint64_t element, size_t ptr_size) {
- if (ptr_size == 8) {
+inline void PointerArray::SetElementPtrSize(uint32_t idx, uint64_t element, PointerSize ptr_size) {
+ if (ptr_size == PointerSize::k64) {
(kUnchecked ? down_cast<LongArray*>(static_cast<Object*>(this)) : AsLongArray())->
SetWithoutChecks<kTransactionActive>(idx, element);
} else {
- DCHECK_EQ(ptr_size, 4u);
DCHECK_LE(element, static_cast<uint64_t>(0xFFFFFFFFu));
(kUnchecked ? down_cast<IntArray*>(static_cast<Object*>(this)) : AsIntArray())
->SetWithoutChecks<kTransactionActive>(idx, static_cast<uint32_t>(element));
@@ -397,7 +395,7 @@
}
template<bool kTransactionActive, bool kUnchecked, typename T>
-inline void PointerArray::SetElementPtrSize(uint32_t idx, T* element, size_t ptr_size) {
+inline void PointerArray::SetElementPtrSize(uint32_t idx, T* element, PointerSize ptr_size) {
SetElementPtrSize<kTransactionActive, kUnchecked>(idx,
reinterpret_cast<uintptr_t>(element),
ptr_size);
@@ -405,7 +403,7 @@
template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption, typename Visitor>
inline void PointerArray::Fixup(mirror::PointerArray* dest,
- size_t pointer_size,
+ PointerSize pointer_size,
const Visitor& visitor) {
for (size_t i = 0, count = GetLength(); i < count; ++i) {
void* ptr = GetElementPtrSize<void*, kVerifyFlags, kReadBarrierOption>(i, pointer_size);
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 9a21ec2..ec10a43 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_MIRROR_ARRAY_H_
#define ART_RUNTIME_MIRROR_ARRAY_H_
+#include "base/enums.h"
#include "gc_root.h"
#include "gc/allocator_type.h"
#include "object.h"
@@ -31,7 +32,7 @@
class MANAGED Array : public Object {
public:
// The size of a java.lang.Class representing an array.
- static uint32_t ClassSize(size_t pointer_size);
+ static uint32_t ClassSize(PointerSize pointer_size);
// Allocates an array with the given properties, if kFillUsable is true the array will be of at
// least component_count size, however, if there's usable space at the end of the allocation the
@@ -162,9 +163,10 @@
array_class_ = GcRoot<Class>(array_class);
}
+ template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
static Class* GetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(!array_class_.IsNull());
- return array_class_.Read();
+ return array_class_.Read<kReadBarrierOption>();
}
static void ResetArrayClass() {
@@ -186,14 +188,14 @@
template<typename T,
VerifyObjectFlags kVerifyFlags = kVerifyNone,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- T GetElementPtrSize(uint32_t idx, size_t ptr_size)
+ T GetElementPtrSize(uint32_t idx, PointerSize ptr_size)
SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive = false, bool kUnchecked = false>
- void SetElementPtrSize(uint32_t idx, uint64_t element, size_t ptr_size)
+ void SetElementPtrSize(uint32_t idx, uint64_t element, PointerSize ptr_size)
SHARED_REQUIRES(Locks::mutator_lock_);
template<bool kTransactionActive = false, bool kUnchecked = false, typename T>
- void SetElementPtrSize(uint32_t idx, T* element, size_t ptr_size)
+ void SetElementPtrSize(uint32_t idx, T* element, PointerSize ptr_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Fixup the pointers in the dest arrays by passing our pointers through the visitor. Only copies
@@ -201,7 +203,7 @@
template <VerifyObjectFlags kVerifyFlags = kVerifyNone,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
- void Fixup(mirror::PointerArray* dest, size_t pointer_size, const Visitor& visitor)
+ void Fixup(mirror::PointerArray* dest, PointerSize pointer_size, const Visitor& visitor)
SHARED_REQUIRES(Locks::mutator_lock_);
};
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index b783a01..8f5419c 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -80,13 +80,12 @@
}
template<VerifyObjectFlags kVerifyFlags>
-inline ArraySlice<ArtMethod> Class::GetDirectMethodsSlice(size_t pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetDirectMethodsSlice(PointerSize pointer_size) {
DCHECK(IsLoaded() || IsErroneous());
- DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
return GetDirectMethodsSliceUnchecked(pointer_size);
}
-inline ArraySlice<ArtMethod> Class::GetDirectMethodsSliceUnchecked(size_t pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetDirectMethodsSliceUnchecked(PointerSize pointer_size) {
return ArraySlice<ArtMethod>(GetMethodsPtr(),
GetDirectMethodsStartOffset(),
GetVirtualMethodsStartOffset(),
@@ -95,13 +94,12 @@
}
template<VerifyObjectFlags kVerifyFlags>
-inline ArraySlice<ArtMethod> Class::GetDeclaredMethodsSlice(size_t pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetDeclaredMethodsSlice(PointerSize pointer_size) {
DCHECK(IsLoaded() || IsErroneous());
- DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
return GetDeclaredMethodsSliceUnchecked(pointer_size);
}
-inline ArraySlice<ArtMethod> Class::GetDeclaredMethodsSliceUnchecked(size_t pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetDeclaredMethodsSliceUnchecked(PointerSize pointer_size) {
return ArraySlice<ArtMethod>(GetMethodsPtr(),
GetDirectMethodsStartOffset(),
GetCopiedMethodsStartOffset(),
@@ -109,13 +107,13 @@
ArtMethod::Alignment(pointer_size));
}
template<VerifyObjectFlags kVerifyFlags>
-inline ArraySlice<ArtMethod> Class::GetDeclaredVirtualMethodsSlice(size_t pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetDeclaredVirtualMethodsSlice(PointerSize pointer_size) {
DCHECK(IsLoaded() || IsErroneous());
- DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
return GetDeclaredVirtualMethodsSliceUnchecked(pointer_size);
}
-inline ArraySlice<ArtMethod> Class::GetDeclaredVirtualMethodsSliceUnchecked(size_t pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetDeclaredVirtualMethodsSliceUnchecked(
+ PointerSize pointer_size) {
return ArraySlice<ArtMethod>(GetMethodsPtr(),
GetVirtualMethodsStartOffset(),
GetCopiedMethodsStartOffset(),
@@ -124,13 +122,12 @@
}
template<VerifyObjectFlags kVerifyFlags>
-inline ArraySlice<ArtMethod> Class::GetVirtualMethodsSlice(size_t pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetVirtualMethodsSlice(PointerSize pointer_size) {
DCHECK(IsLoaded() || IsErroneous());
- DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
return GetVirtualMethodsSliceUnchecked(pointer_size);
}
-inline ArraySlice<ArtMethod> Class::GetVirtualMethodsSliceUnchecked(size_t pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetVirtualMethodsSliceUnchecked(PointerSize pointer_size) {
LengthPrefixedArray<ArtMethod>* methods = GetMethodsPtr();
return ArraySlice<ArtMethod>(methods,
GetVirtualMethodsStartOffset(),
@@ -140,13 +137,12 @@
}
template<VerifyObjectFlags kVerifyFlags>
-inline ArraySlice<ArtMethod> Class::GetCopiedMethodsSlice(size_t pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetCopiedMethodsSlice(PointerSize pointer_size) {
DCHECK(IsLoaded() || IsErroneous());
- DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
return GetCopiedMethodsSliceUnchecked(pointer_size);
}
-inline ArraySlice<ArtMethod> Class::GetCopiedMethodsSliceUnchecked(size_t pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetCopiedMethodsSliceUnchecked(PointerSize pointer_size) {
LengthPrefixedArray<ArtMethod>* methods = GetMethodsPtr();
return ArraySlice<ArtMethod>(methods,
GetCopiedMethodsStartOffset(),
@@ -161,7 +157,7 @@
}
template<VerifyObjectFlags kVerifyFlags>
-inline ArraySlice<ArtMethod> Class::GetMethodsSlice(size_t pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetMethodsSlice(PointerSize pointer_size) {
DCHECK(IsLoaded() || IsErroneous());
LengthPrefixedArray<ArtMethod>* methods = GetMethodsPtr();
return ArraySlice<ArtMethod>(methods,
@@ -177,12 +173,12 @@
return (methods == nullptr) ? 0 : methods->size();
}
-inline ArtMethod* Class::GetDirectMethodUnchecked(size_t i, size_t pointer_size) {
+inline ArtMethod* Class::GetDirectMethodUnchecked(size_t i, PointerSize pointer_size) {
CheckPointerSize(pointer_size);
return &GetDirectMethodsSliceUnchecked(pointer_size).At(i);
}
-inline ArtMethod* Class::GetDirectMethod(size_t i, size_t pointer_size) {
+inline ArtMethod* Class::GetDirectMethod(size_t i, PointerSize pointer_size) {
CheckPointerSize(pointer_size);
return &GetDirectMethodsSlice(pointer_size).At(i);
}
@@ -212,20 +208,20 @@
}
template<VerifyObjectFlags kVerifyFlags>
-inline ArtMethod* Class::GetVirtualMethod(size_t i, size_t pointer_size) {
+inline ArtMethod* Class::GetVirtualMethod(size_t i, PointerSize pointer_size) {
CheckPointerSize(pointer_size);
DCHECK(IsResolved<kVerifyFlags>() || IsErroneous<kVerifyFlags>())
<< PrettyClass(this) << " status=" << GetStatus();
return GetVirtualMethodUnchecked(i, pointer_size);
}
-inline ArtMethod* Class::GetVirtualMethodDuringLinking(size_t i, size_t pointer_size) {
+inline ArtMethod* Class::GetVirtualMethodDuringLinking(size_t i, PointerSize pointer_size) {
CheckPointerSize(pointer_size);
DCHECK(IsLoaded() || IsErroneous());
return GetVirtualMethodUnchecked(i, pointer_size);
}
-inline ArtMethod* Class::GetVirtualMethodUnchecked(size_t i, size_t pointer_size) {
+inline ArtMethod* Class::GetVirtualMethodUnchecked(size_t i, PointerSize pointer_size) {
CheckPointerSize(pointer_size);
return &GetVirtualMethodsSliceUnchecked(pointer_size).At(i);
}
@@ -258,7 +254,7 @@
return GetVTable() != nullptr ? GetVTable()->GetLength() : 0;
}
-inline ArtMethod* Class::GetVTableEntry(uint32_t i, size_t pointer_size) {
+inline ArtMethod* Class::GetVTableEntry(uint32_t i, PointerSize pointer_size) {
if (ShouldHaveEmbeddedVTable()) {
return GetEmbeddedVTableEntry(i, pointer_size);
}
@@ -275,29 +271,29 @@
SetField32<false>(MemberOffset(EmbeddedVTableLengthOffset()), len);
}
-inline ImTable* Class::GetImt(size_t pointer_size) {
+inline ImTable* Class::GetImt(PointerSize pointer_size) {
return GetFieldPtrWithSize<ImTable*>(MemberOffset(ImtPtrOffset(pointer_size)), pointer_size);
}
-inline void Class::SetImt(ImTable* imt, size_t pointer_size) {
+inline void Class::SetImt(ImTable* imt, PointerSize pointer_size) {
return SetFieldPtrWithSize<false>(MemberOffset(ImtPtrOffset(pointer_size)), imt, pointer_size);
}
-inline MemberOffset Class::EmbeddedVTableEntryOffset(uint32_t i, size_t pointer_size) {
+inline MemberOffset Class::EmbeddedVTableEntryOffset(uint32_t i, PointerSize pointer_size) {
return MemberOffset(
EmbeddedVTableOffset(pointer_size).Uint32Value() + i * VTableEntrySize(pointer_size));
}
-inline ArtMethod* Class::GetEmbeddedVTableEntry(uint32_t i, size_t pointer_size) {
+inline ArtMethod* Class::GetEmbeddedVTableEntry(uint32_t i, PointerSize pointer_size) {
return GetFieldPtrWithSize<ArtMethod*>(EmbeddedVTableEntryOffset(i, pointer_size), pointer_size);
}
inline void Class::SetEmbeddedVTableEntryUnchecked(
- uint32_t i, ArtMethod* method, size_t pointer_size) {
+ uint32_t i, ArtMethod* method, PointerSize pointer_size) {
SetFieldPtrWithSize<false>(EmbeddedVTableEntryOffset(i, pointer_size), method, pointer_size);
}
-inline void Class::SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size) {
+inline void Class::SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method, PointerSize pointer_size) {
auto* vtable = GetVTableDuringLinking();
CHECK_EQ(method, vtable->GetElementPtrSize<ArtMethod*>(i, pointer_size));
SetEmbeddedVTableEntryUnchecked(i, method, pointer_size);
@@ -453,10 +449,12 @@
return false;
}
-inline ArtMethod* Class::FindVirtualMethodForInterface(ArtMethod* method, size_t pointer_size) {
+inline ArtMethod* Class::FindVirtualMethodForInterface(ArtMethod* method,
+ PointerSize pointer_size) {
Class* declaring_class = method->GetDeclaringClass();
DCHECK(declaring_class != nullptr) << PrettyClass(this);
DCHECK(declaring_class->IsInterface()) << PrettyMethod(method);
+ DCHECK(!method->IsCopied());
// TODO cache to improve lookup speed
const int32_t iftable_count = GetIfTableCount();
IfTable* iftable = GetIfTable();
@@ -469,7 +467,7 @@
return nullptr;
}
-inline ArtMethod* Class::FindVirtualMethodForVirtual(ArtMethod* method, size_t pointer_size) {
+inline ArtMethod* Class::FindVirtualMethodForVirtual(ArtMethod* method, PointerSize pointer_size) {
// Only miranda or default methods may come from interfaces and be used as a virtual.
DCHECK(!method->GetDeclaringClass()->IsInterface() || method->IsDefault() || method->IsMiranda());
// The argument method may from a super class.
@@ -477,13 +475,13 @@
return GetVTableEntry(method->GetMethodIndex(), pointer_size);
}
-inline ArtMethod* Class::FindVirtualMethodForSuper(ArtMethod* method, size_t pointer_size) {
+inline ArtMethod* Class::FindVirtualMethodForSuper(ArtMethod* method, PointerSize pointer_size) {
DCHECK(!method->GetDeclaringClass()->IsInterface());
return GetSuperClass()->GetVTableEntry(method->GetMethodIndex(), pointer_size);
}
inline ArtMethod* Class::FindVirtualMethodForVirtualOrInterface(ArtMethod* method,
- size_t pointer_size) {
+ PointerSize pointer_size) {
if (method->IsDirect()) {
return method;
}
@@ -527,7 +525,7 @@
}
template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
-inline MemberOffset Class::GetFirstReferenceStaticFieldOffset(size_t pointer_size) {
+inline MemberOffset Class::GetFirstReferenceStaticFieldOffset(PointerSize pointer_size) {
DCHECK(IsResolved());
uint32_t base = sizeof(mirror::Class); // Static fields come after the class.
if (ShouldHaveEmbeddedVTable<kVerifyFlags, kReadBarrierOption>()) {
@@ -538,7 +536,8 @@
return MemberOffset(base);
}
-inline MemberOffset Class::GetFirstReferenceStaticFieldOffsetDuringLinking(size_t pointer_size) {
+inline MemberOffset Class::GetFirstReferenceStaticFieldOffsetDuringLinking(
+ PointerSize pointer_size) {
DCHECK(IsLoaded());
uint32_t base = sizeof(mirror::Class); // Static fields come after the class.
if (ShouldHaveEmbeddedVTable()) {
@@ -707,13 +706,13 @@
uint32_t num_32bit_static_fields,
uint32_t num_64bit_static_fields,
uint32_t num_ref_static_fields,
- size_t pointer_size) {
+ PointerSize pointer_size) {
// Space used by java.lang.Class and its instance fields.
uint32_t size = sizeof(Class);
// Space used by embedded tables.
if (has_embedded_vtable) {
- size = RoundUp(size + sizeof(uint32_t), pointer_size);
- size += pointer_size; // size of pointer to IMT
+ size = RoundUp(size + sizeof(uint32_t), static_cast<size_t>(pointer_size));
+ size += static_cast<size_t>(pointer_size); // size of pointer to IMT
size += num_vtable_entries * VTableEntrySize(pointer_size);
}
@@ -907,7 +906,7 @@
}
template<ReadBarrierOption kReadBarrierOption, class Visitor>
-void mirror::Class::VisitNativeRoots(Visitor& visitor, size_t pointer_size) {
+void mirror::Class::VisitNativeRoots(Visitor& visitor, PointerSize pointer_size) {
for (ArtField& field : GetSFieldsUnchecked()) {
// Visit roots first in case the declaring class gets moved.
field.VisitRoots(visitor);
@@ -927,35 +926,34 @@
}
}
-inline IterationRange<StrideIterator<ArtMethod>> Class::GetDirectMethods(size_t pointer_size) {
+inline IterationRange<StrideIterator<ArtMethod>> Class::GetDirectMethods(PointerSize pointer_size) {
CheckPointerSize(pointer_size);
return GetDirectMethodsSliceUnchecked(pointer_size).AsRange();
}
inline IterationRange<StrideIterator<ArtMethod>> Class::GetDeclaredMethods(
- size_t pointer_size) {
- CheckPointerSize(pointer_size);
+ PointerSize pointer_size) {
return GetDeclaredMethodsSliceUnchecked(pointer_size).AsRange();
}
inline IterationRange<StrideIterator<ArtMethod>> Class::GetDeclaredVirtualMethods(
- size_t pointer_size) {
- CheckPointerSize(pointer_size);
+ PointerSize pointer_size) {
return GetDeclaredVirtualMethodsSliceUnchecked(pointer_size).AsRange();
}
-inline IterationRange<StrideIterator<ArtMethod>> Class::GetVirtualMethods(size_t pointer_size) {
+inline IterationRange<StrideIterator<ArtMethod>> Class::GetVirtualMethods(
+ PointerSize pointer_size) {
CheckPointerSize(pointer_size);
return GetVirtualMethodsSliceUnchecked(pointer_size).AsRange();
}
-inline IterationRange<StrideIterator<ArtMethod>> Class::GetCopiedMethods(size_t pointer_size) {
+inline IterationRange<StrideIterator<ArtMethod>> Class::GetCopiedMethods(PointerSize pointer_size) {
CheckPointerSize(pointer_size);
return GetCopiedMethodsSliceUnchecked(pointer_size).AsRange();
}
-inline IterationRange<StrideIterator<ArtMethod>> Class::GetMethods(size_t pointer_size) {
+inline IterationRange<StrideIterator<ArtMethod>> Class::GetMethods(PointerSize pointer_size) {
CheckPointerSize(pointer_size);
return MakeIterationRangeFromLengthPrefixedArray(GetMethodsPtr(),
ArtMethod::Size(pointer_size),
@@ -978,13 +976,12 @@
return MakeIterationRangeFromLengthPrefixedArray(GetSFieldsPtrUnchecked());
}
-inline MemberOffset Class::EmbeddedVTableOffset(size_t pointer_size) {
+inline MemberOffset Class::EmbeddedVTableOffset(PointerSize pointer_size) {
CheckPointerSize(pointer_size);
- return MemberOffset(ImtPtrOffset(pointer_size).Uint32Value() + pointer_size);
+ return MemberOffset(ImtPtrOffset(pointer_size).Uint32Value() + static_cast<size_t>(pointer_size));
}
-inline void Class::CheckPointerSize(size_t pointer_size) {
- DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
+inline void Class::CheckPointerSize(PointerSize pointer_size) {
DCHECK_EQ(pointer_size, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
}
@@ -1039,7 +1036,7 @@
template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption, typename Visitor>
inline void Class::FixupNativePointers(mirror::Class* dest,
- size_t pointer_size,
+ PointerSize pointer_size,
const Visitor& visitor) {
// Update the field arrays.
LengthPrefixedArray<ArtField>* const sfields = GetSFieldsPtr();
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 1c31c57..f948be7 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -334,8 +334,9 @@
}
}
-ArtMethod* Class::FindInterfaceMethod(const StringPiece& name, const StringPiece& signature,
- size_t pointer_size) {
+ArtMethod* Class::FindInterfaceMethod(const StringPiece& name,
+ const StringPiece& signature,
+ PointerSize pointer_size) {
// Check the current class before checking the interfaces.
ArtMethod* method = FindDeclaredVirtualMethod(name, signature, pointer_size);
if (method != nullptr) {
@@ -353,8 +354,9 @@
return nullptr;
}
-ArtMethod* Class::FindInterfaceMethod(const StringPiece& name, const Signature& signature,
- size_t pointer_size) {
+ArtMethod* Class::FindInterfaceMethod(const StringPiece& name,
+ const Signature& signature,
+ PointerSize pointer_size) {
// Check the current class before checking the interfaces.
ArtMethod* method = FindDeclaredVirtualMethod(name, signature, pointer_size);
if (method != nullptr) {
@@ -372,8 +374,9 @@
return nullptr;
}
-ArtMethod* Class::FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
- size_t pointer_size) {
+ArtMethod* Class::FindInterfaceMethod(const DexCache* dex_cache,
+ uint32_t dex_method_idx,
+ PointerSize pointer_size) {
// Check the current class before checking the interfaces.
ArtMethod* method = FindDeclaredVirtualMethod(dex_cache, dex_method_idx, pointer_size);
if (method != nullptr) {
@@ -392,8 +395,9 @@
return nullptr;
}
-ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature,
- size_t pointer_size) {
+ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name,
+ const StringPiece& signature,
+ PointerSize pointer_size) {
for (auto& method : GetDirectMethods(pointer_size)) {
if (name == method.GetName() && method.GetSignature() == signature) {
return &method;
@@ -402,8 +406,9 @@
return nullptr;
}
-ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature,
- size_t pointer_size) {
+ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name,
+ const Signature& signature,
+ PointerSize pointer_size) {
for (auto& method : GetDirectMethods(pointer_size)) {
if (name == method.GetName() && signature == method.GetSignature()) {
return &method;
@@ -412,8 +417,9 @@
return nullptr;
}
-ArtMethod* Class::FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
- size_t pointer_size) {
+ArtMethod* Class::FindDeclaredDirectMethod(const DexCache* dex_cache,
+ uint32_t dex_method_idx,
+ PointerSize pointer_size) {
if (GetDexCache() == dex_cache) {
for (auto& method : GetDirectMethods(pointer_size)) {
if (method.GetDexMethodIndex() == dex_method_idx) {
@@ -424,8 +430,9 @@
return nullptr;
}
-ArtMethod* Class::FindDirectMethod(const StringPiece& name, const StringPiece& signature,
- size_t pointer_size) {
+ArtMethod* Class::FindDirectMethod(const StringPiece& name,
+ const StringPiece& signature,
+ PointerSize pointer_size) {
for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
ArtMethod* method = klass->FindDeclaredDirectMethod(name, signature, pointer_size);
if (method != nullptr) {
@@ -435,8 +442,9 @@
return nullptr;
}
-ArtMethod* Class::FindDirectMethod(const StringPiece& name, const Signature& signature,
- size_t pointer_size) {
+ArtMethod* Class::FindDirectMethod(const StringPiece& name,
+ const Signature& signature,
+ PointerSize pointer_size) {
for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
ArtMethod* method = klass->FindDeclaredDirectMethod(name, signature, pointer_size);
if (method != nullptr) {
@@ -447,7 +455,7 @@
}
ArtMethod* Class::FindDirectMethod(
- const DexCache* dex_cache, uint32_t dex_method_idx, size_t pointer_size) {
+ const DexCache* dex_cache, uint32_t dex_method_idx, PointerSize pointer_size) {
for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
ArtMethod* method = klass->FindDeclaredDirectMethod(dex_cache, dex_method_idx, pointer_size);
if (method != nullptr) {
@@ -457,7 +465,8 @@
return nullptr;
}
-ArtMethod* Class::FindDeclaredDirectMethodByName(const StringPiece& name, size_t pointer_size) {
+ArtMethod* Class::FindDeclaredDirectMethodByName(const StringPiece& name,
+ PointerSize pointer_size) {
for (auto& method : GetDirectMethods(pointer_size)) {
ArtMethod* const np_method = method.GetInterfaceMethodIfProxy(pointer_size);
if (name == np_method->GetName()) {
@@ -471,8 +480,9 @@
// because they do not only find 'declared' methods and will return copied methods. This behavior is
// desired and correct but the naming can lead to confusion because in the java language declared
// excludes interface methods which might be found by this.
-ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature,
- size_t pointer_size) {
+ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name,
+ const StringPiece& signature,
+ PointerSize pointer_size) {
for (auto& method : GetVirtualMethods(pointer_size)) {
ArtMethod* const np_method = method.GetInterfaceMethodIfProxy(pointer_size);
if (name == np_method->GetName() && np_method->GetSignature() == signature) {
@@ -482,8 +492,9 @@
return nullptr;
}
-ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature,
- size_t pointer_size) {
+ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name,
+ const Signature& signature,
+ PointerSize pointer_size) {
for (auto& method : GetVirtualMethods(pointer_size)) {
ArtMethod* const np_method = method.GetInterfaceMethodIfProxy(pointer_size);
if (name == np_method->GetName() && signature == np_method->GetSignature()) {
@@ -493,8 +504,9 @@
return nullptr;
}
-ArtMethod* Class::FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
- size_t pointer_size) {
+ArtMethod* Class::FindDeclaredVirtualMethod(const DexCache* dex_cache,
+ uint32_t dex_method_idx,
+ PointerSize pointer_size) {
if (GetDexCache() == dex_cache) {
for (auto& method : GetDeclaredVirtualMethods(pointer_size)) {
if (method.GetDexMethodIndex() == dex_method_idx) {
@@ -505,7 +517,8 @@
return nullptr;
}
-ArtMethod* Class::FindDeclaredVirtualMethodByName(const StringPiece& name, size_t pointer_size) {
+ArtMethod* Class::FindDeclaredVirtualMethodByName(const StringPiece& name,
+ PointerSize pointer_size) {
for (auto& method : GetVirtualMethods(pointer_size)) {
ArtMethod* const np_method = method.GetInterfaceMethodIfProxy(pointer_size);
if (name == np_method->GetName()) {
@@ -516,7 +529,7 @@
}
ArtMethod* Class::FindVirtualMethod(
- const StringPiece& name, const StringPiece& signature, size_t pointer_size) {
+ const StringPiece& name, const StringPiece& signature, PointerSize pointer_size) {
for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
ArtMethod* method = klass->FindDeclaredVirtualMethod(name, signature, pointer_size);
if (method != nullptr) {
@@ -527,7 +540,7 @@
}
ArtMethod* Class::FindVirtualMethod(
- const StringPiece& name, const Signature& signature, size_t pointer_size) {
+ const StringPiece& name, const Signature& signature, PointerSize pointer_size) {
for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
ArtMethod* method = klass->FindDeclaredVirtualMethod(name, signature, pointer_size);
if (method != nullptr) {
@@ -538,7 +551,7 @@
}
ArtMethod* Class::FindVirtualMethod(
- const DexCache* dex_cache, uint32_t dex_method_idx, size_t pointer_size) {
+ const DexCache* dex_cache, uint32_t dex_method_idx, PointerSize pointer_size) {
for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
ArtMethod* method = klass->FindDeclaredVirtualMethod(dex_cache, dex_method_idx, pointer_size);
if (method != nullptr) {
@@ -548,7 +561,7 @@
return nullptr;
}
-ArtMethod* Class::FindVirtualMethodForInterfaceSuper(ArtMethod* method, size_t pointer_size) {
+ArtMethod* Class::FindVirtualMethodForInterfaceSuper(ArtMethod* method, PointerSize pointer_size) {
DCHECK(method->GetDeclaringClass()->IsInterface());
DCHECK(IsInterface()) << "Should only be called on a interface class";
// Check if we have one defined on this interface first. This includes searching copied ones to
@@ -613,7 +626,7 @@
return abstract_methods.empty() ? nullptr : abstract_methods[0];
}
-ArtMethod* Class::FindClassInitializer(size_t pointer_size) {
+ArtMethod* Class::FindClassInitializer(PointerSize pointer_size) {
for (ArtMethod& method : GetDirectMethods(pointer_size)) {
if (method.IsClassInitializer()) {
DCHECK_STREQ(method.GetName(), "<clinit>");
@@ -803,7 +816,7 @@
return nullptr;
}
-void Class::SetSkipAccessChecksFlagOnAllMethods(size_t pointer_size) {
+void Class::SetSkipAccessChecksFlagOnAllMethods(PointerSize pointer_size) {
DCHECK(IsVerified());
for (auto& m : GetMethods(pointer_size)) {
if (!m.IsNative() && m.IsInvokable()) {
@@ -917,7 +930,7 @@
return GetDexFile().GetInterfacesList(*class_def);
}
-void Class::PopulateEmbeddedVTable(size_t pointer_size) {
+void Class::PopulateEmbeddedVTable(PointerSize pointer_size) {
PointerArray* table = GetVTableDuringLinking();
CHECK(table != nullptr) << PrettyClass(this);
const size_t table_length = table->GetLength();
@@ -963,9 +976,12 @@
// The pre-fence visitor for Class::CopyOf().
class CopyClassVisitor {
public:
- CopyClassVisitor(Thread* self, Handle<mirror::Class>* orig, size_t new_length,
- size_t copy_bytes, ImTable* imt,
- size_t pointer_size)
+ CopyClassVisitor(Thread* self,
+ Handle<mirror::Class>* orig,
+ size_t new_length,
+ size_t copy_bytes,
+ ImTable* imt,
+ PointerSize pointer_size)
: self_(self), orig_(orig), new_length_(new_length),
copy_bytes_(copy_bytes), imt_(imt), pointer_size_(pointer_size) {
}
@@ -991,12 +1007,11 @@
const size_t new_length_;
const size_t copy_bytes_;
ImTable* imt_;
- const size_t pointer_size_;
+ const PointerSize pointer_size_;
DISALLOW_COPY_AND_ASSIGN(CopyClassVisitor);
};
-Class* Class::CopyOf(Thread* self, int32_t new_length,
- ImTable* imt, size_t pointer_size) {
+Class* Class::CopyOf(Thread* self, int32_t new_length, ImTable* imt, PointerSize pointer_size) {
DCHECK_GE(new_length, static_cast<int32_t>(sizeof(Class)));
// We may get copied by a compacting GC.
StackHandleScope<1> hs(self);
@@ -1022,14 +1037,14 @@
// TODO: Move this to java_lang_Class.cc?
ArtMethod* Class::GetDeclaredConstructor(
- Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args, size_t pointer_size) {
+ Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args, PointerSize pointer_size) {
for (auto& m : GetDirectMethods(pointer_size)) {
// Skip <clinit> which is a static constructor, as well as non constructors.
if (m.IsStatic() || !m.IsConstructor()) {
continue;
}
// May cause thread suspension and exceptions.
- if (m.GetInterfaceMethodIfProxy(sizeof(void*))->EqualParameters(args)) {
+ if (m.GetInterfaceMethodIfProxy(kRuntimePointerSize)->EqualParameters(args)) {
return &m;
}
if (UNLIKELY(self->IsExceptionPending())) {
@@ -1053,7 +1068,7 @@
return (type_id == nullptr) ? DexFile::kDexNoIndex : dex_file.GetIndexForTypeId(*type_id);
}
-template <bool kTransactionActive>
+template <PointerSize kPointerSize, bool kTransactionActive>
mirror::Method* Class::GetDeclaredMethodInternal(Thread* self,
mirror::Class* klass,
mirror::String* name,
@@ -1074,11 +1089,8 @@
auto h_args = hs.NewHandle(args);
Handle<mirror::Class> h_klass = hs.NewHandle(klass);
ArtMethod* result = nullptr;
- const size_t pointer_size = kTransactionActive
- ? Runtime::Current()->GetClassLinker()->GetImagePointerSize()
- : sizeof(void*);
- for (auto& m : h_klass->GetDeclaredVirtualMethods(pointer_size)) {
- auto* np_method = m.GetInterfaceMethodIfProxy(pointer_size);
+ for (auto& m : h_klass->GetDeclaredVirtualMethods(kPointerSize)) {
+ auto* np_method = m.GetInterfaceMethodIfProxy(kPointerSize);
// May cause thread suspension.
mirror::String* np_name = np_method->GetNameAsString(self);
if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) {
@@ -1089,19 +1101,19 @@
}
auto modifiers = m.GetAccessFlags();
if ((modifiers & kSkipModifiers) == 0) {
- return mirror::Method::CreateFromArtMethod<kTransactionActive>(self, &m);
+ return mirror::Method::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, &m);
}
if ((modifiers & kAccMiranda) == 0) {
result = &m; // Remember as potential result if it's not a miranda method.
}
}
if (result == nullptr) {
- for (auto& m : h_klass->GetDirectMethods(pointer_size)) {
+ for (auto& m : h_klass->GetDirectMethods(kPointerSize)) {
auto modifiers = m.GetAccessFlags();
if ((modifiers & kAccConstructor) != 0) {
continue;
}
- auto* np_method = m.GetInterfaceMethodIfProxy(pointer_size);
+ auto* np_method = m.GetInterfaceMethodIfProxy(kPointerSize);
// May cause thread suspension.
mirror::String* np_name = np_method->GetNameAsString(self);
if (np_name == nullptr) {
@@ -1115,50 +1127,73 @@
continue;
}
if ((modifiers & kSkipModifiers) == 0) {
- return mirror::Method::CreateFromArtMethod<kTransactionActive>(self, &m);
+ return mirror::Method::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, &m);
}
// Direct methods cannot be miranda methods, so this potential result must be synthetic.
result = &m;
}
}
return result != nullptr
- ? mirror::Method::CreateFromArtMethod<kTransactionActive>(self, result)
+ ? mirror::Method::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, result)
: nullptr;
}
template
-mirror::Method* Class::GetDeclaredMethodInternal<false>(Thread* self,
- mirror::Class* klass,
- mirror::String* name,
- mirror::ObjectArray<mirror::Class>* args);
+mirror::Method* Class::GetDeclaredMethodInternal<PointerSize::k32, false>(
+ Thread* self,
+ mirror::Class* klass,
+ mirror::String* name,
+ mirror::ObjectArray<mirror::Class>* args);
template
-mirror::Method* Class::GetDeclaredMethodInternal<true>(Thread* self,
- mirror::Class* klass,
- mirror::String* name,
- mirror::ObjectArray<mirror::Class>* args);
+mirror::Method* Class::GetDeclaredMethodInternal<PointerSize::k32, true>(
+ Thread* self,
+ mirror::Class* klass,
+ mirror::String* name,
+ mirror::ObjectArray<mirror::Class>* args);
+template
+mirror::Method* Class::GetDeclaredMethodInternal<PointerSize::k64, false>(
+ Thread* self,
+ mirror::Class* klass,
+ mirror::String* name,
+ mirror::ObjectArray<mirror::Class>* args);
+template
+mirror::Method* Class::GetDeclaredMethodInternal<PointerSize::k64, true>(
+ Thread* self,
+ mirror::Class* klass,
+ mirror::String* name,
+ mirror::ObjectArray<mirror::Class>* args);
-template <bool kTransactionActive>
+template <PointerSize kPointerSize, bool kTransactionActive>
mirror::Constructor* Class::GetDeclaredConstructorInternal(
Thread* self,
mirror::Class* klass,
mirror::ObjectArray<mirror::Class>* args) {
StackHandleScope<1> hs(self);
- const size_t pointer_size = kTransactionActive
- ? Runtime::Current()->GetClassLinker()->GetImagePointerSize()
- : sizeof(void*);
- ArtMethod* result = klass->GetDeclaredConstructor(self, hs.NewHandle(args), pointer_size);
+ ArtMethod* result = klass->GetDeclaredConstructor(self, hs.NewHandle(args), kPointerSize);
return result != nullptr
- ? mirror::Constructor::CreateFromArtMethod<kTransactionActive>(self, result)
+ ? mirror::Constructor::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, result)
: nullptr;
}
// mirror::Constructor::CreateFromArtMethod<kTransactionActive>(self, result)
-template mirror::Constructor* Class::GetDeclaredConstructorInternal<false>(
+template
+mirror::Constructor* Class::GetDeclaredConstructorInternal<PointerSize::k32, false>(
Thread* self,
mirror::Class* klass,
mirror::ObjectArray<mirror::Class>* args);
-template mirror::Constructor* Class::GetDeclaredConstructorInternal<true>(
+template
+mirror::Constructor* Class::GetDeclaredConstructorInternal<PointerSize::k32, true>(
+ Thread* self,
+ mirror::Class* klass,
+ mirror::ObjectArray<mirror::Class>* args);
+template
+mirror::Constructor* Class::GetDeclaredConstructorInternal<PointerSize::k64, false>(
+ Thread* self,
+ mirror::Class* klass,
+ mirror::ObjectArray<mirror::Class>* args);
+template
+mirror::Constructor* Class::GetDeclaredConstructorInternal<PointerSize::k64, true>(
Thread* self,
mirror::Class* klass,
mirror::ObjectArray<mirror::Class>* args);
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 9be9f01..5c490de 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_MIRROR_CLASS_H_
#define ART_RUNTIME_MIRROR_CLASS_H_
+#include "base/enums.h"
#include "base/iteration_range.h"
#include "dex_file.h"
#include "class_flags.h"
@@ -471,7 +472,7 @@
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
size_t GetComponentSize() SHARED_REQUIRES(Locks::mutator_lock_) {
- return 1U << GetComponentSizeShift();
+ return 1U << GetComponentSizeShift<kReadBarrierOption>();
}
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
@@ -556,17 +557,17 @@
uint32_t num_32bit_static_fields,
uint32_t num_64bit_static_fields,
uint32_t num_ref_static_fields,
- size_t pointer_size);
+ PointerSize pointer_size);
// The size of java.lang.Class.class.
- static uint32_t ClassClassSize(size_t pointer_size) {
+ static uint32_t ClassClassSize(PointerSize pointer_size) {
// The number of vtable entries in java.lang.Class.
uint32_t vtable_entries = Object::kVTableLength + 72;
return ComputeClassSize(true, vtable_entries, 0, 0, 4, 1, 0, pointer_size);
}
// The size of a java.lang.Class representing a primitive such as int.class.
- static uint32_t PrimitiveClassSize(size_t pointer_size) {
+ static uint32_t PrimitiveClassSize(PointerSize pointer_size) {
return ComputeClassSize(false, 0, 0, 0, 0, 0, 0, pointer_size);
}
@@ -703,7 +704,7 @@
// Also updates the dex_cache_strings_ variable from new_dex_cache.
void SetDexCache(DexCache* new_dex_cache) SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDirectMethods(size_t pointer_size)
+ ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDirectMethods(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE LengthPrefixedArray<ArtMethod>* GetMethodsPtr()
@@ -713,7 +714,7 @@
return MemberOffset(OFFSETOF_MEMBER(Class, methods_));
}
- ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetMethods(size_t pointer_size)
+ ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetMethods(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
void SetMethodsPtr(LengthPrefixedArray<ArtMethod>* new_methods,
@@ -727,65 +728,66 @@
SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE ArraySlice<ArtMethod> GetDirectMethodsSlice(size_t pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetDirectMethodsSlice(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE ArtMethod* GetDirectMethod(size_t i, size_t pointer_size)
+ ALWAYS_INLINE ArtMethod* GetDirectMethod(size_t i, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Use only when we are allocating populating the method arrays.
- ALWAYS_INLINE ArtMethod* GetDirectMethodUnchecked(size_t i, size_t pointer_size)
+ ALWAYS_INLINE ArtMethod* GetDirectMethodUnchecked(size_t i, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE ArtMethod* GetVirtualMethodUnchecked(size_t i, size_t pointer_size)
+ ALWAYS_INLINE ArtMethod* GetVirtualMethodUnchecked(size_t i, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Returns the number of static, private, and constructor methods.
ALWAYS_INLINE uint32_t NumDirectMethods() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE ArraySlice<ArtMethod> GetMethodsSlice(size_t pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetMethodsSlice(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredMethodsSlice(size_t pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredMethodsSlice(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDeclaredMethods(
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- template <bool kTransactionActive = false>
+ template <PointerSize kPointerSize, bool kTransactionActive>
static Method* GetDeclaredMethodInternal(Thread* self,
mirror::Class* klass,
mirror::String* name,
mirror::ObjectArray<mirror::Class>* args)
SHARED_REQUIRES(Locks::mutator_lock_);
- template <bool kTransactionActive = false>
+ template <PointerSize kPointerSize, bool kTransactionActive>
static Constructor* GetDeclaredConstructorInternal(Thread* self,
mirror::Class* klass,
mirror::ObjectArray<mirror::Class>* args)
SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredVirtualMethodsSlice(size_t pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredVirtualMethodsSlice(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDeclaredVirtualMethods(
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE ArraySlice<ArtMethod> GetCopiedMethodsSlice(size_t pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetCopiedMethodsSlice(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetCopiedMethods(size_t pointer_size)
+ ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetCopiedMethods(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE ArraySlice<ArtMethod> GetVirtualMethodsSlice(size_t pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetVirtualMethodsSlice(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetVirtualMethods(size_t pointer_size)
+ ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetVirtualMethods(
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Returns the number of non-inherited virtual methods (sum of declared and copied methods).
@@ -800,10 +802,10 @@
ALWAYS_INLINE uint32_t NumMethods() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ArtMethod* GetVirtualMethod(size_t i, size_t pointer_size)
+ ArtMethod* GetVirtualMethod(size_t i, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ArtMethod* GetVirtualMethodDuringLinking(size_t i, size_t pointer_size)
+ ArtMethod* GetVirtualMethodDuringLinking(size_t i, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -822,9 +824,10 @@
return MemberOffset(sizeof(Class));
}
- static MemberOffset ImtPtrOffset(size_t pointer_size) {
+ static MemberOffset ImtPtrOffset(PointerSize pointer_size) {
return MemberOffset(
- RoundUp(EmbeddedVTableLengthOffset().Uint32Value() + sizeof(uint32_t), pointer_size));
+ RoundUp(EmbeddedVTableLengthOffset().Uint32Value() + sizeof(uint32_t),
+ static_cast<size_t>(pointer_size)));
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -841,124 +844,126 @@
bool HasVTable() SHARED_REQUIRES(Locks::mutator_lock_);
- static MemberOffset EmbeddedVTableEntryOffset(uint32_t i, size_t pointer_size);
+ static MemberOffset EmbeddedVTableEntryOffset(uint32_t i, PointerSize pointer_size);
int32_t GetVTableLength() SHARED_REQUIRES(Locks::mutator_lock_);
- ArtMethod* GetVTableEntry(uint32_t i, size_t pointer_size)
+ ArtMethod* GetVTableEntry(uint32_t i, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
int32_t GetEmbeddedVTableLength() SHARED_REQUIRES(Locks::mutator_lock_);
void SetEmbeddedVTableLength(int32_t len) SHARED_REQUIRES(Locks::mutator_lock_);
- ImTable* GetImt(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ ImTable* GetImt(PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
- void SetImt(ImTable* imt, size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetImt(ImTable* imt, PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
- ArtMethod* GetEmbeddedVTableEntry(uint32_t i, size_t pointer_size)
+ ArtMethod* GetEmbeddedVTableEntry(uint32_t i, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- void SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size)
+ void SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- inline void SetEmbeddedVTableEntryUnchecked(uint32_t i, ArtMethod* method, size_t pointer_size)
+ inline void SetEmbeddedVTableEntryUnchecked(uint32_t i,
+ ArtMethod* method,
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- void PopulateEmbeddedVTable(size_t pointer_size)
+ void PopulateEmbeddedVTable(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Given a method implemented by this class but potentially from a super class, return the
// specific implementation method for this class.
- ArtMethod* FindVirtualMethodForVirtual(ArtMethod* method, size_t pointer_size)
+ ArtMethod* FindVirtualMethodForVirtual(ArtMethod* method, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Given a method implemented by this class' super class, return the specific implementation
// method for this class.
- ArtMethod* FindVirtualMethodForSuper(ArtMethod* method, size_t pointer_size)
+ ArtMethod* FindVirtualMethodForSuper(ArtMethod* method, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Given a method from some implementor of this interface, return the specific implementation
// method for this class.
- ArtMethod* FindVirtualMethodForInterfaceSuper(ArtMethod* method, size_t pointer_size)
+ ArtMethod* FindVirtualMethodForInterfaceSuper(ArtMethod* method, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Given a method implemented by this class, but potentially from a
// super class or interface, return the specific implementation
// method for this class.
- ArtMethod* FindVirtualMethodForInterface(ArtMethod* method, size_t pointer_size)
+ ArtMethod* FindVirtualMethodForInterface(ArtMethod* method, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE;
- ArtMethod* FindVirtualMethodForVirtualOrInterface(ArtMethod* method, size_t pointer_size)
+ ArtMethod* FindVirtualMethodForVirtualOrInterface(ArtMethod* method, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindInterfaceMethod(const StringPiece& name, const StringPiece& signature,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindInterfaceMethod(const StringPiece& name, const Signature& signature,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDirectMethod(const StringPiece& name, const StringPiece& signature,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDirectMethod(const StringPiece& name, const Signature& signature,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ArtMethod* FindDeclaredVirtualMethodByName(const StringPiece& name, size_t pointer_size)
+ ArtMethod* FindDeclaredVirtualMethodByName(const StringPiece& name, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ArtMethod* FindDeclaredDirectMethodByName(const StringPiece& name, size_t pointer_size)
+ ArtMethod* FindDeclaredDirectMethodByName(const StringPiece& name, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindVirtualMethod(const StringPiece& name, const StringPiece& signature,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindVirtualMethod(const StringPiece& name, const Signature& signature,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ArtMethod* FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ArtMethod* FindClassInitializer(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtMethod* FindClassInitializer(PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
bool HasDefaultMethods() SHARED_REQUIRES(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccHasDefaultMethod) != 0;
@@ -1040,11 +1045,11 @@
// Get the offset of the first reference static field. Other reference static fields follow.
template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- MemberOffset GetFirstReferenceStaticFieldOffset(size_t pointer_size)
+ MemberOffset GetFirstReferenceStaticFieldOffset(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Get the offset of the first reference static field. Other reference static fields follow.
- MemberOffset GetFirstReferenceStaticFieldOffsetDuringLinking(size_t pointer_size)
+ MemberOffset GetFirstReferenceStaticFieldOffsetDuringLinking(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Gets the static fields of the class.
@@ -1154,11 +1159,11 @@
// Visit native roots visits roots which are keyed off the native pointers such as ArtFields and
// ArtMethods.
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, class Visitor>
- void VisitNativeRoots(Visitor& visitor, size_t pointer_size)
+ void VisitNativeRoots(Visitor& visitor, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// When class is verified, set the kAccSkipAccessChecks flag on each method.
- void SetSkipAccessChecksFlagOnAllMethods(size_t pointer_size)
+ void SetSkipAccessChecksFlagOnAllMethods(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Get the descriptor of the class. In a few cases a std::string is required, rather than
@@ -1193,7 +1198,7 @@
SHARED_REQUIRES(Locks::mutator_lock_);
Class* CopyOf(Thread* self, int32_t new_length, ImTable* imt,
- size_t pointer_size)
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
// For proxy class only.
@@ -1217,7 +1222,7 @@
// May cause thread suspension due to EqualParameters.
ArtMethod* GetDeclaredConstructor(
- Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args, size_t pointer_size)
+ Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
static int32_t GetInnerClassFlags(Handle<Class> h_this, int32_t default_value)
@@ -1244,27 +1249,28 @@
return GetClassLoader() == nullptr;
}
- static size_t ImTableEntrySize(size_t pointer_size) {
- return pointer_size;
+ static size_t ImTableEntrySize(PointerSize pointer_size) {
+ return static_cast<size_t>(pointer_size);
}
- static size_t VTableEntrySize(size_t pointer_size) {
- return pointer_size;
+ static size_t VTableEntrySize(PointerSize pointer_size) {
+ return static_cast<size_t>(pointer_size);
}
- ALWAYS_INLINE ArraySlice<ArtMethod> GetDirectMethodsSliceUnchecked(size_t pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetDirectMethodsSliceUnchecked(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE ArraySlice<ArtMethod> GetVirtualMethodsSliceUnchecked(size_t pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetVirtualMethodsSliceUnchecked(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredMethodsSliceUnchecked(size_t pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredMethodsSliceUnchecked(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredVirtualMethodsSliceUnchecked(size_t pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredVirtualMethodsSliceUnchecked(
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE ArraySlice<ArtMethod> GetCopiedMethodsSliceUnchecked(size_t pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetCopiedMethodsSliceUnchecked(PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Fix up all of the native pointers in the class by running them through the visitor. Only sets
@@ -1274,7 +1280,7 @@
template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
- void FixupNativePointers(mirror::Class* dest, size_t pointer_size, const Visitor& visitor)
+ void FixupNativePointers(mirror::Class* dest, PointerSize pointer_size, const Visitor& visitor)
SHARED_REQUIRES(Locks::mutator_lock_);
private:
@@ -1318,8 +1324,9 @@
bool ProxyDescriptorEquals(const char* match) SHARED_REQUIRES(Locks::mutator_lock_);
// Check that the pointer size matches the one in the class linker.
- ALWAYS_INLINE static void CheckPointerSize(size_t pointer_size);
- static MemberOffset EmbeddedVTableOffset(size_t pointer_size);
+ ALWAYS_INLINE static void CheckPointerSize(PointerSize pointer_size);
+
+ static MemberOffset EmbeddedVTableOffset(PointerSize pointer_size);
template <bool kVisitNativeRoots,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index 0b3461f..84469ea 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -22,6 +22,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/casts.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "mirror/class.h"
#include "runtime.h"
@@ -29,7 +30,7 @@
namespace art {
namespace mirror {
-inline uint32_t DexCache::ClassSize(size_t pointer_size) {
+inline uint32_t DexCache::ClassSize(PointerSize pointer_size) {
uint32_t vtable_entries = Object::kVTableLength + 5;
return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size);
}
@@ -60,7 +61,7 @@
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(this);
}
-inline ArtField* DexCache::GetResolvedField(uint32_t field_idx, size_t ptr_size) {
+inline ArtField* DexCache::GetResolvedField(uint32_t field_idx, PointerSize ptr_size) {
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size);
DCHECK_LT(field_idx, NumResolvedFields()); // NOTE: Unchecked, i.e. not throwing AIOOB.
ArtField* field = GetElementPtrSize(GetResolvedFields(), field_idx, ptr_size);
@@ -70,13 +71,13 @@
return field;
}
-inline void DexCache::SetResolvedField(uint32_t field_idx, ArtField* field, size_t ptr_size) {
+inline void DexCache::SetResolvedField(uint32_t field_idx, ArtField* field, PointerSize ptr_size) {
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size);
DCHECK_LT(field_idx, NumResolvedFields()); // NOTE: Unchecked, i.e. not throwing AIOOB.
SetElementPtrSize(GetResolvedFields(), field_idx, field, ptr_size);
}
-inline ArtMethod* DexCache::GetResolvedMethod(uint32_t method_idx, size_t ptr_size) {
+inline ArtMethod* DexCache::GetResolvedMethod(uint32_t method_idx, PointerSize ptr_size) {
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size);
DCHECK_LT(method_idx, NumResolvedMethods()); // NOTE: Unchecked, i.e. not throwing AIOOB.
ArtMethod* method = GetElementPtrSize<ArtMethod*>(GetResolvedMethods(), method_idx, ptr_size);
@@ -88,19 +89,20 @@
return method;
}
-inline void DexCache::SetResolvedMethod(uint32_t method_idx, ArtMethod* method, size_t ptr_size) {
+inline void DexCache::SetResolvedMethod(uint32_t method_idx,
+ ArtMethod* method,
+ PointerSize ptr_size) {
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size);
DCHECK_LT(method_idx, NumResolvedMethods()); // NOTE: Unchecked, i.e. not throwing AIOOB.
SetElementPtrSize(GetResolvedMethods(), method_idx, method, ptr_size);
}
template <typename PtrType>
-inline PtrType DexCache::GetElementPtrSize(PtrType* ptr_array, size_t idx, size_t ptr_size) {
- if (ptr_size == 8u) {
+inline PtrType DexCache::GetElementPtrSize(PtrType* ptr_array, size_t idx, PointerSize ptr_size) {
+ if (ptr_size == PointerSize::k64) {
uint64_t element = reinterpret_cast<const uint64_t*>(ptr_array)[idx];
return reinterpret_cast<PtrType>(dchecked_integral_cast<uintptr_t>(element));
} else {
- DCHECK_EQ(ptr_size, 4u);
uint32_t element = reinterpret_cast<const uint32_t*>(ptr_array)[idx];
return reinterpret_cast<PtrType>(dchecked_integral_cast<uintptr_t>(element));
}
@@ -110,12 +112,11 @@
inline void DexCache::SetElementPtrSize(PtrType* ptr_array,
size_t idx,
PtrType ptr,
- size_t ptr_size) {
- if (ptr_size == 8u) {
+ PointerSize ptr_size) {
+ if (ptr_size == PointerSize::k64) {
reinterpret_cast<uint64_t*>(ptr_array)[idx] =
dchecked_integral_cast<uint64_t>(reinterpret_cast<uintptr_t>(ptr));
} else {
- DCHECK_EQ(ptr_size, 4u);
reinterpret_cast<uint32_t*>(ptr_array)[idx] =
dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(ptr));
}
diff --git a/runtime/mirror/dex_cache.cc b/runtime/mirror/dex_cache.cc
index 692c6cb..57066d8 100644
--- a/runtime/mirror/dex_cache.cc
+++ b/runtime/mirror/dex_cache.cc
@@ -41,7 +41,7 @@
uint32_t num_resolved_methods,
ArtField** resolved_fields,
uint32_t num_resolved_fields,
- size_t pointer_size) {
+ PointerSize pointer_size) {
CHECK(dex_file != nullptr);
CHECK(location != nullptr);
CHECK_EQ(num_strings != 0u, strings != nullptr);
@@ -67,7 +67,7 @@
}
}
-void DexCache::Fixup(ArtMethod* trampoline, size_t pointer_size) {
+void DexCache::Fixup(ArtMethod* trampoline, PointerSize pointer_size) {
// Fixup the resolve methods array to contain trampoline for resolution.
CHECK(trampoline != nullptr);
CHECK(trampoline->IsRuntimeMethod());
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 7912510..d02a0d8 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -39,7 +39,7 @@
class MANAGED DexCache FINAL : public Object {
public:
// Size of java.lang.DexCache.class.
- static uint32_t ClassSize(size_t pointer_size);
+ static uint32_t ClassSize(PointerSize pointer_size);
// Size of an instance of java.lang.DexCache not including referenced values.
static constexpr uint32_t InstanceSize() {
@@ -56,9 +56,9 @@
uint32_t num_resolved_methods,
ArtField** resolved_fields,
uint32_t num_resolved_fields,
- size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
- void Fixup(ArtMethod* trampoline, size_t pointer_size)
+ void Fixup(ArtMethod* trampoline, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
@@ -119,18 +119,20 @@
void SetResolvedType(uint32_t type_idx, Class* resolved) SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx, size_t ptr_size)
+ ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx, PointerSize ptr_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE void SetResolvedMethod(uint32_t method_idx, ArtMethod* resolved, size_t ptr_size)
+ ALWAYS_INLINE void SetResolvedMethod(uint32_t method_idx,
+ ArtMethod* resolved,
+ PointerSize ptr_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Pointer sized variant, used for patching.
- ALWAYS_INLINE ArtField* GetResolvedField(uint32_t idx, size_t ptr_size)
+ ALWAYS_INLINE ArtField* GetResolvedField(uint32_t idx, PointerSize ptr_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Pointer sized variant, used for patching.
- ALWAYS_INLINE void SetResolvedField(uint32_t idx, ArtField* field, size_t ptr_size)
+ ALWAYS_INLINE void SetResolvedField(uint32_t idx, ArtField* field, PointerSize ptr_size)
SHARED_REQUIRES(Locks::mutator_lock_);
GcRoot<String>* GetStrings() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
@@ -202,10 +204,10 @@
// so they need to be public.
template <typename PtrType>
- static PtrType GetElementPtrSize(PtrType* ptr_array, size_t idx, size_t ptr_size);
+ static PtrType GetElementPtrSize(PtrType* ptr_array, size_t idx, PointerSize ptr_size);
template <typename PtrType>
- static void SetElementPtrSize(PtrType* ptr_array, size_t idx, PtrType ptr, size_t ptr_size);
+ static void SetElementPtrSize(PtrType* ptr_array, size_t idx, PtrType ptr, PointerSize ptr_size);
private:
// Visit instance fields of the dex cache as well as its associated arrays.
diff --git a/runtime/mirror/field-inl.h b/runtime/mirror/field-inl.h
index 8a0daec..8b0f8ce 100644
--- a/runtime/mirror/field-inl.h
+++ b/runtime/mirror/field-inl.h
@@ -27,9 +27,8 @@
namespace mirror {
-template <bool kTransactionActive>
-inline mirror::Field* Field::CreateFromArtField(Thread* self, ArtField* field,
- bool force_resolve) {
+template <PointerSize kPointerSize, bool kTransactionActive>
+inline mirror::Field* Field::CreateFromArtField(Thread* self, ArtField* field, bool force_resolve) {
StackHandleScope<2> hs(self);
// Try to resolve type before allocating since this is a thread suspension point.
Handle<mirror::Class> type = hs.NewHandle(field->GetType<true>());
@@ -54,10 +53,8 @@
self->AssertPendingOOMException();
return nullptr;
}
- const auto pointer_size = kTransactionActive ?
- Runtime::Current()->GetClassLinker()->GetImagePointerSize() : sizeof(void*);
auto dex_field_index = field->GetDexFieldIndex();
- auto* resolved_field = field->GetDexCache()->GetResolvedField(dex_field_index, pointer_size);
+ auto* resolved_field = field->GetDexCache()->GetResolvedField(dex_field_index, kPointerSize);
if (field->GetDeclaringClass()->IsProxyClass()) {
DCHECK(field->IsStatic());
DCHECK_LT(dex_field_index, 2U);
@@ -70,7 +67,7 @@
} else {
// We rely on the field being resolved so that we can back to the ArtField
// (i.e. FromReflectedMethod).
- field->GetDexCache()->SetResolvedField(dex_field_index, field, pointer_size);
+ field->GetDexCache()->SetResolvedField(dex_field_index, field, kPointerSize);
}
}
ret->SetType<kTransactionActive>(type.Get());
diff --git a/runtime/mirror/field.cc b/runtime/mirror/field.cc
index ff6847c..65f6b16 100644
--- a/runtime/mirror/field.cc
+++ b/runtime/mirror/field.cc
@@ -68,7 +68,7 @@
}
}
mirror::DexCache* const dex_cache = declaring_class->GetDexCache();
- ArtField* const art_field = dex_cache->GetResolvedField(GetDexFieldIndex(), sizeof(void*));
+ ArtField* const art_field = dex_cache->GetResolvedField(GetDexFieldIndex(), kRuntimePointerSize);
CHECK(art_field != nullptr);
CHECK_EQ(declaring_class, art_field->GetDeclaringClass());
return art_field;
diff --git a/runtime/mirror/field.h b/runtime/mirror/field.h
index edaddbd..93fd7f1 100644
--- a/runtime/mirror/field.h
+++ b/runtime/mirror/field.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_MIRROR_FIELD_H_
#include "accessible_object.h"
+#include "base/enums.h"
#include "gc_root.h"
#include "object.h"
#include "object_callbacks.h"
@@ -92,7 +93,7 @@
// Slow, try to use only for PrettyField and such.
ArtField* GetArtField() SHARED_REQUIRES(Locks::mutator_lock_);
- template <bool kTransactionActive = false>
+ template <PointerSize kPointerSize, bool kTransactionActive = false>
static mirror::Field* CreateFromArtField(Thread* self, ArtField* field,
bool force_resolve)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
diff --git a/runtime/mirror/method.cc b/runtime/mirror/method.cc
index 9838b71..ef16719 100644
--- a/runtime/mirror/method.cc
+++ b/runtime/mirror/method.cc
@@ -51,18 +51,25 @@
array_class_ = GcRoot<Class>(nullptr);
}
-template <bool kTransactionActive>
+template <PointerSize kPointerSize, bool kTransactionActive>
Method* Method::CreateFromArtMethod(Thread* self, ArtMethod* method) {
DCHECK(!method->IsConstructor()) << PrettyMethod(method);
auto* ret = down_cast<Method*>(StaticClass()->AllocObject(self));
if (LIKELY(ret != nullptr)) {
- static_cast<AbstractMethod*>(ret)->CreateFromArtMethod<kTransactionActive>(method);
+ static_cast<AbstractMethod*>(ret)->
+ CreateFromArtMethod<kPointerSize, kTransactionActive>(method);
}
return ret;
}
-template Method* Method::CreateFromArtMethod<false>(Thread* self, ArtMethod* method);
-template Method* Method::CreateFromArtMethod<true>(Thread* self, ArtMethod* method);
+template Method* Method::CreateFromArtMethod<PointerSize::k32, false>(Thread* self,
+ ArtMethod* method);
+template Method* Method::CreateFromArtMethod<PointerSize::k32, true>(Thread* self,
+ ArtMethod* method);
+template Method* Method::CreateFromArtMethod<PointerSize::k64, false>(Thread* self,
+ ArtMethod* method);
+template Method* Method::CreateFromArtMethod<PointerSize::k64, true>(Thread* self,
+ ArtMethod* method);
void Method::VisitRoots(RootVisitor* visitor) {
static_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
@@ -96,18 +103,25 @@
array_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
}
-template <bool kTransactionActive>
+template <PointerSize kPointerSize, bool kTransactionActive>
Constructor* Constructor::CreateFromArtMethod(Thread* self, ArtMethod* method) {
DCHECK(method->IsConstructor()) << PrettyMethod(method);
auto* ret = down_cast<Constructor*>(StaticClass()->AllocObject(self));
if (LIKELY(ret != nullptr)) {
- static_cast<AbstractMethod*>(ret)->CreateFromArtMethod<kTransactionActive>(method);
+ static_cast<AbstractMethod*>(ret)->
+ CreateFromArtMethod<kPointerSize, kTransactionActive>(method);
}
return ret;
}
-template Constructor* Constructor::CreateFromArtMethod<false>(Thread* self, ArtMethod* method);
-template Constructor* Constructor::CreateFromArtMethod<true>(Thread* self, ArtMethod* method);
+template Constructor* Constructor::CreateFromArtMethod<PointerSize::k32, false>(
+ Thread* self, ArtMethod* method);
+template Constructor* Constructor::CreateFromArtMethod<PointerSize::k32, true>(
+ Thread* self, ArtMethod* method);
+template Constructor* Constructor::CreateFromArtMethod<PointerSize::k64, false>(
+ Thread* self, ArtMethod* method);
+template Constructor* Constructor::CreateFromArtMethod<PointerSize::k64, true>(
+ Thread* self, ArtMethod* method);
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/method.h b/runtime/mirror/method.h
index 0b56964..be51784 100644
--- a/runtime/mirror/method.h
+++ b/runtime/mirror/method.h
@@ -28,7 +28,7 @@
// C++ mirror of java.lang.reflect.Method.
class MANAGED Method : public AbstractMethod {
public:
- template <bool kTransactionActive = false>
+ template <PointerSize kPointerSize, bool kTransactionActive>
static Method* CreateFromArtMethod(Thread* self, ArtMethod* method)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
@@ -60,7 +60,7 @@
// C++ mirror of java.lang.reflect.Constructor.
class MANAGED Constructor: public AbstractMethod {
public:
- template <bool kTransactionActive = false>
+ template <PointerSize kPointerSize, bool kTransactionActive>
static Constructor* CreateFromArtMethod(Thread* self, ArtMethod* method)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 76a36ac..0495c95 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -40,7 +40,7 @@
namespace art {
namespace mirror {
-inline uint32_t Object::ClassSize(size_t pointer_size) {
+inline uint32_t Object::ClassSize(PointerSize pointer_size) {
uint32_t vtable_entries = kVTableLength;
return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size);
}
@@ -106,7 +106,11 @@
}
inline mirror::Object* Object::MonitorEnter(Thread* self) {
- return Monitor::MonitorEnter(self, this);
+ return Monitor::MonitorEnter(self, this, /*trylock*/false);
+}
+
+inline mirror::Object* Object::MonitorTryEnter(Thread* self) {
+ return Monitor::MonitorEnter(self, this, /*trylock*/true);
}
inline bool Object::MonitorExit(Thread* self) {
@@ -143,10 +147,20 @@
#endif
}
+inline uint32_t Object::GetMarkBit() {
+#ifdef USE_READ_BARRIER
+ return GetLockWord(false).MarkBitState();
+#else
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+#endif
+}
+
inline void Object::SetReadBarrierPointer(Object* rb_ptr) {
#ifdef USE_BAKER_READ_BARRIER
DCHECK(kUseBakerReadBarrier);
DCHECK_EQ(reinterpret_cast<uint64_t>(rb_ptr) >> 32, 0U);
+ DCHECK_NE(rb_ptr, ReadBarrier::BlackPtr()) << "Setting to black is not supported";
LockWord lw = GetLockWord(false);
lw.SetReadBarrierState(static_cast<uint32_t>(reinterpret_cast<uintptr_t>(rb_ptr)));
SetLockWord(lw, false);
@@ -169,6 +183,8 @@
DCHECK(kUseBakerReadBarrier);
DCHECK_EQ(reinterpret_cast<uint64_t>(expected_rb_ptr) >> 32, 0U);
DCHECK_EQ(reinterpret_cast<uint64_t>(rb_ptr) >> 32, 0U);
+ DCHECK_NE(expected_rb_ptr, ReadBarrier::BlackPtr()) << "Setting to black is not supported";
+ DCHECK_NE(rb_ptr, ReadBarrier::BlackPtr()) << "Setting to black is not supported";
LockWord expected_lw;
LockWord new_lw;
do {
@@ -212,6 +228,24 @@
#endif
}
+inline bool Object::AtomicSetMarkBit(uint32_t expected_mark_bit, uint32_t mark_bit) {
+ LockWord expected_lw;
+ LockWord new_lw;
+ do {
+ LockWord lw = GetLockWord(false);
+ if (UNLIKELY(lw.MarkBitState() != expected_mark_bit)) {
+ // Lost the race.
+ return false;
+ }
+ expected_lw = lw;
+ new_lw = lw;
+ new_lw.SetMarkBitState(mark_bit);
+ // Since this is only set from the mutator, we can use the non release Cas.
+ } while (!CasLockWordWeakRelaxed(expected_lw, new_lw));
+ return true;
+}
+
+
inline void Object::AssertReadBarrierPointer() const {
if (kUseBakerReadBarrier) {
Object* obj = const_cast<Object*>(this);
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 701c600..13c536e 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -163,8 +163,7 @@
case LockWord::kUnlocked: {
// Try to compare and swap in a new hash, if we succeed we will return the hash on the next
// loop iteration.
- LockWord hash_word = LockWord::FromHashCode(GenerateIdentityHashCode(),
- lw.ReadBarrierState());
+ LockWord hash_word = LockWord::FromHashCode(GenerateIdentityHashCode(), lw.GCState());
DCHECK_EQ(hash_word.GetState(), LockWord::kHashCode);
if (const_cast<Object*>(this)->CasLockWordWeakRelaxed(lw, hash_word)) {
return hash_word.GetHashCode();
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 0ee46c3..5b129bf 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_MIRROR_OBJECT_H_
#include "base/casts.h"
+#include "base/enums.h"
#include "globals.h"
#include "object_reference.h"
#include "offsets.h"
@@ -74,7 +75,7 @@
static constexpr size_t kVTableLength = 11;
// The size of the java.lang.Class representing a java.lang.Object.
- static uint32_t ClassSize(size_t pointer_size);
+ static uint32_t ClassSize(PointerSize pointer_size);
// Size of an instance of java.lang.Object.
static constexpr uint32_t InstanceSize() {
@@ -92,6 +93,7 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetClass(Class* new_klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ // TODO: Clean this up and change to return int32_t
Object* GetReadBarrierPointer() SHARED_REQUIRES(Locks::mutator_lock_);
#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
@@ -102,6 +104,12 @@
template<bool kCasRelease = false>
ALWAYS_INLINE bool AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* rb_ptr)
SHARED_REQUIRES(Locks::mutator_lock_);
+
+ ALWAYS_INLINE uint32_t GetMarkBit() SHARED_REQUIRES(Locks::mutator_lock_);
+
+ ALWAYS_INLINE bool AtomicSetMarkBit(uint32_t expected_mark_bit, uint32_t mark_bit)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
void AssertReadBarrierPointer() const SHARED_REQUIRES(Locks::mutator_lock_);
// The verifier treats all interfaces as java.lang.Object and relies on runtime checks in
@@ -140,6 +148,11 @@
SHARED_REQUIRES(Locks::mutator_lock_);
uint32_t GetLockOwnerThreadId();
+ // Try to enter the monitor, returns non null if we succeeded.
+ mirror::Object* MonitorTryEnter(Thread* self)
+ EXCLUSIVE_LOCK_FUNCTION()
+ REQUIRES(!Roles::uninterruptible_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
mirror::Object* MonitorEnter(Thread* self)
EXCLUSIVE_LOCK_FUNCTION()
REQUIRES(!Roles::uninterruptible_)
@@ -468,7 +481,7 @@
void SetFieldPtr(MemberOffset field_offset, T new_value)
SHARED_REQUIRES(Locks::mutator_lock_) {
SetFieldPtrWithSize<kTransactionActive, kCheckTransaction, kVerifyFlags>(
- field_offset, new_value, sizeof(void*));
+ field_offset, new_value, kRuntimePointerSize);
}
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T>
@@ -480,11 +493,11 @@
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T>
- ALWAYS_INLINE void SetFieldPtrWithSize(MemberOffset field_offset, T new_value,
- size_t pointer_size)
+ ALWAYS_INLINE void SetFieldPtrWithSize(MemberOffset field_offset,
+ T new_value,
+ PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_) {
- DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size;
- if (pointer_size == 4) {
+ if (pointer_size == PointerSize::k32) {
intptr_t ptr = reinterpret_cast<intptr_t>(new_value);
DCHECK_EQ(static_cast<int32_t>(ptr), ptr); // Check that we dont lose any non 0 bits.
SetField32<kTransactionActive, kCheckTransaction, kVerifyFlags>(
@@ -516,19 +529,19 @@
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
T GetFieldPtr(MemberOffset field_offset)
SHARED_REQUIRES(Locks::mutator_lock_) {
- return GetFieldPtrWithSize<T, kVerifyFlags, kIsVolatile>(field_offset, sizeof(void*));
+ return GetFieldPtrWithSize<T, kVerifyFlags, kIsVolatile>(field_offset, kRuntimePointerSize);
}
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
T GetFieldPtr64(MemberOffset field_offset)
SHARED_REQUIRES(Locks::mutator_lock_) {
- return GetFieldPtrWithSize<T, kVerifyFlags, kIsVolatile>(field_offset, 8u);
+ return GetFieldPtrWithSize<T, kVerifyFlags, kIsVolatile>(field_offset,
+ PointerSize::k64);
}
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
- ALWAYS_INLINE T GetFieldPtrWithSize(MemberOffset field_offset, size_t pointer_size)
+ ALWAYS_INLINE T GetFieldPtrWithSize(MemberOffset field_offset, PointerSize pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_) {
- DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size;
- if (pointer_size == 4) {
+ if (pointer_size == PointerSize::k32) {
return reinterpret_cast<T>(GetField32<kVerifyFlags, kIsVolatile>(field_offset));
} else {
int64_t v = GetField64<kVerifyFlags, kIsVolatile>(field_offset);
diff --git a/runtime/mirror/object_array.h b/runtime/mirror/object_array.h
index 4257396..a99d616 100644
--- a/runtime/mirror/object_array.h
+++ b/runtime/mirror/object_array.h
@@ -26,7 +26,7 @@
class MANAGED ObjectArray: public Array {
public:
// The size of Object[].class.
- static uint32_t ClassSize(size_t pointer_size) {
+ static uint32_t ClassSize(PointerSize pointer_size) {
return Array::ClassSize(pointer_size);
}
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index c1284a6..0034220 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -24,6 +24,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "asm_support.h"
+#include "base/enums.h"
#include "class-inl.h"
#include "class_linker.h"
#include "class_linker-inl.h"
@@ -78,9 +79,11 @@
EXPECT_EQ(kObjectReferenceSize, sizeof(HeapReference<Object>));
EXPECT_EQ(kObjectHeaderSize, sizeof(Object));
EXPECT_EQ(ART_METHOD_QUICK_CODE_OFFSET_32,
- ArtMethod::EntryPointFromQuickCompiledCodeOffset(4).Int32Value());
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(PointerSize::k32).
+ Int32Value());
EXPECT_EQ(ART_METHOD_QUICK_CODE_OFFSET_64,
- ArtMethod::EntryPointFromQuickCompiledCodeOffset(8).Int32Value());
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(PointerSize::k64).
+ Int32Value());
}
TEST_F(ObjectTest, IsInSamePackage) {
@@ -306,7 +309,7 @@
// pretend we are trying to call 'new char[3]' from String.toCharArray
ScopedObjectAccess soa(Thread::Current());
Class* java_util_Arrays = class_linker_->FindSystemClass(soa.Self(), "Ljava/util/Arrays;");
- ArtMethod* sort = java_util_Arrays->FindDirectMethod("sort", "([I)V", sizeof(void*));
+ ArtMethod* sort = java_util_Arrays->FindDirectMethod("sort", "([I)V", kRuntimePointerSize);
const DexFile::TypeId* type_id = java_lang_dex_file_->FindTypeId("[I");
ASSERT_TRUE(type_id != nullptr);
uint32_t type_idx = java_lang_dex_file_->GetIndexForTypeId(*type_id);
@@ -363,7 +366,7 @@
StackHandleScope<2> hs(soa.Self());
Handle<mirror::ClassLoader> loader(hs.NewHandle(soa.Decode<ClassLoader*>(class_loader)));
Class* klass = class_linker_->FindClass(soa.Self(), "LStaticsFromCode;", loader);
- ArtMethod* clinit = klass->FindClassInitializer(sizeof(void*));
+ ArtMethod* clinit = klass->FindClassInitializer(kRuntimePointerSize);
const DexFile::TypeId* klass_type_id = dex_file->FindTypeId("LStaticsFromCode;");
ASSERT_TRUE(klass_type_id != nullptr);
@@ -499,22 +502,22 @@
Class* klass2 = linker->FindClass(soa.Self(), "LProtoCompare2;", class_loader_2);
ASSERT_TRUE(klass2 != nullptr);
- ArtMethod* m1_1 = klass1->GetVirtualMethod(0, sizeof(void*));
+ ArtMethod* m1_1 = klass1->GetVirtualMethod(0, kRuntimePointerSize);
EXPECT_STREQ(m1_1->GetName(), "m1");
- ArtMethod* m2_1 = klass1->GetVirtualMethod(1, sizeof(void*));
+ ArtMethod* m2_1 = klass1->GetVirtualMethod(1, kRuntimePointerSize);
EXPECT_STREQ(m2_1->GetName(), "m2");
- ArtMethod* m3_1 = klass1->GetVirtualMethod(2, sizeof(void*));
+ ArtMethod* m3_1 = klass1->GetVirtualMethod(2, kRuntimePointerSize);
EXPECT_STREQ(m3_1->GetName(), "m3");
- ArtMethod* m4_1 = klass1->GetVirtualMethod(3, sizeof(void*));
+ ArtMethod* m4_1 = klass1->GetVirtualMethod(3, kRuntimePointerSize);
EXPECT_STREQ(m4_1->GetName(), "m4");
- ArtMethod* m1_2 = klass2->GetVirtualMethod(0, sizeof(void*));
+ ArtMethod* m1_2 = klass2->GetVirtualMethod(0, kRuntimePointerSize);
EXPECT_STREQ(m1_2->GetName(), "m1");
- ArtMethod* m2_2 = klass2->GetVirtualMethod(1, sizeof(void*));
+ ArtMethod* m2_2 = klass2->GetVirtualMethod(1, kRuntimePointerSize);
EXPECT_STREQ(m2_2->GetName(), "m2");
- ArtMethod* m3_2 = klass2->GetVirtualMethod(2, sizeof(void*));
+ ArtMethod* m3_2 = klass2->GetVirtualMethod(2, kRuntimePointerSize);
EXPECT_STREQ(m3_2->GetName(), "m3");
- ArtMethod* m4_2 = klass2->GetVirtualMethod(3, sizeof(void*));
+ ArtMethod* m4_2 = klass2->GetVirtualMethod(3, kRuntimePointerSize);
EXPECT_STREQ(m4_2->GetName(), "m4");
}
diff --git a/runtime/mirror/reference-inl.h b/runtime/mirror/reference-inl.h
index 12bfe38..039989b 100644
--- a/runtime/mirror/reference-inl.h
+++ b/runtime/mirror/reference-inl.h
@@ -22,7 +22,7 @@
namespace art {
namespace mirror {
-inline uint32_t Reference::ClassSize(size_t pointer_size) {
+inline uint32_t Reference::ClassSize(PointerSize pointer_size) {
uint32_t vtable_entries = Object::kVTableLength + 4;
return Class::ComputeClassSize(false, vtable_entries, 2, 0, 0, 0, 0, pointer_size);
}
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index 3baa12e..38c6616 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_MIRROR_REFERENCE_H_
#define ART_RUNTIME_MIRROR_REFERENCE_H_
+#include "base/enums.h"
#include "class.h"
#include "gc_root.h"
#include "object.h"
@@ -43,7 +44,7 @@
class MANAGED Reference : public Object {
public:
// Size of java.lang.ref.Reference.class.
- static uint32_t ClassSize(size_t pointer_size);
+ static uint32_t ClassSize(PointerSize pointer_size);
// Size of an instance of java.lang.ref.Reference.
static constexpr uint32_t InstanceSize() {
@@ -76,8 +77,9 @@
SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), nullptr);
}
+ template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
Reference* GetPendingNext() SHARED_REQUIRES(Locks::mutator_lock_) {
- return GetFieldObject<Reference>(PendingNextOffset());
+ return GetFieldObject<Reference, kDefaultVerifyFlags, kReadBarrierOption>(PendingNextOffset());
}
void SetPendingNext(Reference* pending_next)
@@ -102,7 +104,7 @@
// removed from the list after having determined the reference is not ready
// to be enqueued on a java ReferenceQueue.
bool IsUnprocessed() SHARED_REQUIRES(Locks::mutator_lock_) {
- return GetPendingNext() == nullptr;
+ return GetPendingNext<kWithoutReadBarrier>() == nullptr;
}
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index 96f2098..d3660e5 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -33,7 +33,7 @@
namespace art {
namespace mirror {
-inline uint32_t String::ClassSize(size_t pointer_size) {
+inline uint32_t String::ClassSize(PointerSize pointer_size) {
uint32_t vtable_entries = Object::kVTableLength + 57;
return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 1, 2, pointer_size);
}
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index e2cfb8d..d492ba3 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -35,7 +35,7 @@
class MANAGED String FINAL : public Object {
public:
// Size of java.lang.String.class.
- static uint32_t ClassSize(size_t pointer_size);
+ static uint32_t ClassSize(PointerSize pointer_size);
// Size of an instance of java.lang.String not including its value array.
static constexpr uint32_t InstanceSize() {
diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc
index f068b3e..0bccc8b 100644
--- a/runtime/mirror/throwable.cc
+++ b/runtime/mirror/throwable.cc
@@ -17,6 +17,7 @@
#include "throwable.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "class-inl.h"
#include "dex_file-inl.h"
#include "gc/accounting/card_table-inl.h"
@@ -106,7 +107,7 @@
if (depth == 0) {
result += "(Throwable with empty stack trace)";
} else {
- const size_t ptr_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ const PointerSize ptr_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
for (int32_t i = 0; i < depth; ++i) {
ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, ptr_size);
uintptr_t dex_pc = method_trace->GetElementPtrSize<uintptr_t>(i + depth, ptr_size);
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 396c946..e863ea9 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -155,7 +155,7 @@
return false;
}
}
- LockWord fat(this, lw.ReadBarrierState());
+ LockWord fat(this, lw.GCState());
// Publish the updated lock word, which may race with other threads.
bool success = GetObject()->CasLockWordWeakSequentiallyConsistent(lw, fat);
// Lock profiling.
@@ -314,21 +314,34 @@
return oss.str();
}
+bool Monitor::TryLockLocked(Thread* self) {
+ if (owner_ == nullptr) { // Unowned.
+ owner_ = self;
+ CHECK_EQ(lock_count_, 0);
+ // When debugging, save the current monitor holder for future
+ // acquisition failures to use in sampled logging.
+ if (lock_profiling_threshold_ != 0) {
+ locking_method_ = self->GetCurrentMethod(&locking_dex_pc_);
+ }
+ } else if (owner_ == self) { // Recursive.
+ lock_count_++;
+ } else {
+ return false;
+ }
+ AtraceMonitorLock(self, GetObject(), false /* is_wait */);
+ return true;
+}
+
+bool Monitor::TryLock(Thread* self) {
+ MutexLock mu(self, monitor_lock_);
+ return TryLockLocked(self);
+}
+
void Monitor::Lock(Thread* self) {
MutexLock mu(self, monitor_lock_);
while (true) {
- if (owner_ == nullptr) { // Unowned.
- owner_ = self;
- CHECK_EQ(lock_count_, 0);
- // When debugging, save the current monitor holder for future
- // acquisition failures to use in sampled logging.
- if (lock_profiling_threshold_ != 0) {
- locking_method_ = self->GetCurrentMethod(&locking_dex_pc_);
- }
- break;
- } else if (owner_ == self) { // Recursive.
- lock_count_++;
- break;
+ if (TryLockLocked(self)) {
+ return;
}
// Contended.
const bool log_contention = (lock_profiling_threshold_ != 0);
@@ -430,8 +443,6 @@
monitor_lock_.Lock(self); // Reacquire locks in order.
--num_waiters_;
}
-
- AtraceMonitorLock(self, GetObject(), false /* is_wait */);
}
static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...)
@@ -763,20 +774,21 @@
return false;
}
// Deflate to a thin lock.
- LockWord new_lw = LockWord::FromThinLockId(owner->GetThreadId(), monitor->lock_count_,
- lw.ReadBarrierState());
+ LockWord new_lw = LockWord::FromThinLockId(owner->GetThreadId(),
+ monitor->lock_count_,
+ lw.GCState());
// Assume no concurrent read barrier state changes as mutators are suspended.
obj->SetLockWord(new_lw, false);
VLOG(monitor) << "Deflated " << obj << " to thin lock " << owner->GetTid() << " / "
<< monitor->lock_count_;
} else if (monitor->HasHashCode()) {
- LockWord new_lw = LockWord::FromHashCode(monitor->GetHashCode(), lw.ReadBarrierState());
+ LockWord new_lw = LockWord::FromHashCode(monitor->GetHashCode(), lw.GCState());
// Assume no concurrent read barrier state changes as mutators are suspended.
obj->SetLockWord(new_lw, false);
VLOG(monitor) << "Deflated " << obj << " to hash monitor " << monitor->GetHashCode();
} else {
// No lock and no hash, just put an empty lock word inside the object.
- LockWord new_lw = LockWord::FromDefault(lw.ReadBarrierState());
+ LockWord new_lw = LockWord::FromDefault(lw.GCState());
// Assume no concurrent read barrier state changes as mutators are suspended.
obj->SetLockWord(new_lw, false);
VLOG(monitor) << "Deflated" << obj << " to empty lock word";
@@ -852,7 +864,7 @@
return obj;
}
-mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
+mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj, bool trylock) {
DCHECK(self != nullptr);
DCHECK(obj != nullptr);
self->AssertThreadSuspensionIsAllowable();
@@ -865,7 +877,7 @@
LockWord lock_word = h_obj->GetLockWord(true);
switch (lock_word.GetState()) {
case LockWord::kUnlocked: {
- LockWord thin_locked(LockWord::FromThinLockId(thread_id, 0, lock_word.ReadBarrierState()));
+ LockWord thin_locked(LockWord::FromThinLockId(thread_id, 0, lock_word.GCState()));
if (h_obj->CasLockWordWeakSequentiallyConsistent(lock_word, thin_locked)) {
AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
// CasLockWord enforces more than the acquire ordering we need here.
@@ -879,8 +891,9 @@
// We own the lock, increase the recursion count.
uint32_t new_count = lock_word.ThinLockCount() + 1;
if (LIKELY(new_count <= LockWord::kThinLockMaxCount)) {
- LockWord thin_locked(LockWord::FromThinLockId(thread_id, new_count,
- lock_word.ReadBarrierState()));
+ LockWord thin_locked(LockWord::FromThinLockId(thread_id,
+ new_count,
+ lock_word.GCState()));
if (!kUseReadBarrier) {
h_obj->SetLockWord(thin_locked, true);
AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
@@ -898,6 +911,9 @@
InflateThinLocked(self, h_obj, lock_word, 0);
}
} else {
+ if (trylock) {
+ return nullptr;
+ }
// Contention.
contention_count++;
Runtime* runtime = Runtime::Current();
@@ -916,8 +932,12 @@
}
case LockWord::kFatLocked: {
Monitor* mon = lock_word.FatLockMonitor();
- mon->Lock(self);
- return h_obj.Get(); // Success!
+ if (trylock) {
+ return mon->TryLock(self) ? h_obj.Get() : nullptr;
+ } else {
+ mon->Lock(self);
+ return h_obj.Get(); // Success!
+ }
}
case LockWord::kHashCode:
// Inflate with the existing hashcode.
@@ -957,9 +977,9 @@
LockWord new_lw = LockWord::Default();
if (lock_word.ThinLockCount() != 0) {
uint32_t new_count = lock_word.ThinLockCount() - 1;
- new_lw = LockWord::FromThinLockId(thread_id, new_count, lock_word.ReadBarrierState());
+ new_lw = LockWord::FromThinLockId(thread_id, new_count, lock_word.GCState());
} else {
- new_lw = LockWord::FromDefault(lock_word.ReadBarrierState());
+ new_lw = LockWord::FromDefault(lock_word.GCState());
}
if (!kUseReadBarrier) {
DCHECK_EQ(new_lw.ReadBarrierState(), 0U);
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 7b4b8f9..1d829e1 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -62,7 +62,7 @@
NO_THREAD_SAFETY_ANALYSIS; // TODO: Reading lock owner without holding lock is racy.
// NO_THREAD_SAFETY_ANALYSIS for mon->Lock.
- static mirror::Object* MonitorEnter(Thread* thread, mirror::Object* obj)
+ static mirror::Object* MonitorEnter(Thread* thread, mirror::Object* obj, bool trylock)
EXCLUSIVE_LOCK_FUNCTION(obj)
NO_THREAD_SAFETY_ANALYSIS
REQUIRES(!Roles::uninterruptible_)
@@ -193,6 +193,15 @@
!monitor_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Try to lock without blocking, returns true if we acquired the lock.
+ bool TryLock(Thread* self)
+ REQUIRES(!monitor_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ // Variant for already holding the monitor lock.
+ bool TryLockLocked(Thread* self)
+ REQUIRES(monitor_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
void Lock(Thread* self)
REQUIRES(!monitor_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index 83e0c0d..48d256c 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -26,6 +26,7 @@
#include "handle_scope-inl.h"
#include "mirror/class-inl.h"
#include "mirror/string-inl.h" // Strings are easiest to allocate
+#include "object_lock.h"
#include "scoped_thread_state_change.h"
#include "thread_pool.h"
@@ -374,4 +375,60 @@
"Monitor test thread pool 3");
}
+class TryLockTask : public Task {
+ public:
+ explicit TryLockTask(Handle<mirror::Object> obj) : obj_(obj) {}
+
+ void Run(Thread* self) {
+ ScopedObjectAccess soa(self);
+ // Lock is held by other thread, try lock should fail.
+ ObjectTryLock<mirror::Object> lock(self, obj_);
+ EXPECT_FALSE(lock.Acquired());
+ }
+
+ void Finalize() {
+ delete this;
+ }
+
+ private:
+ Handle<mirror::Object> obj_;
+};
+
+// Test trylock in deadlock scenarios.
+TEST_F(MonitorTest, TestTryLock) {
+ ScopedLogSeverity sls(LogSeverity::FATAL);
+
+ Thread* const self = Thread::Current();
+ ThreadPool thread_pool("the pool", 2);
+ ScopedObjectAccess soa(self);
+ StackHandleScope<3> hs(self);
+ Handle<mirror::Object> obj1(
+ hs.NewHandle<mirror::Object>(mirror::String::AllocFromModifiedUtf8(self, "hello, world!")));
+ Handle<mirror::Object> obj2(
+ hs.NewHandle<mirror::Object>(mirror::String::AllocFromModifiedUtf8(self, "hello, world!")));
+ {
+ ObjectLock<mirror::Object> lock1(self, obj1);
+ ObjectLock<mirror::Object> lock2(self, obj1);
+ {
+ ObjectTryLock<mirror::Object> trylock(self, obj1);
+ EXPECT_TRUE(trylock.Acquired());
+ }
+ // Test failure case.
+ thread_pool.AddTask(self, new TryLockTask(obj1));
+ thread_pool.StartWorkers(self);
+ ScopedThreadSuspension sts(self, kSuspended);
+ thread_pool.Wait(Thread::Current(), /*do_work*/false, /*may_hold_locks*/false);
+ }
+ // Test that the trylock actually locks the object.
+ {
+ ObjectTryLock<mirror::Object> trylock(self, obj1);
+ EXPECT_TRUE(trylock.Acquired());
+ obj1->Notify(self);
+ // Since we hold the lock there should be no monitor state exeception.
+ self->AssertNoPendingException();
+ }
+ thread_pool.StopWorkers(self);
+}
+
+
} // namespace art
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index d987f65..45e49e2 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -29,6 +29,7 @@
#include "art_method-inl.h"
#include "arch/instruction_set.h"
+#include "base/enums.h"
#include "class_linker-inl.h"
#include "common_throws.h"
#include "debugger.h"
@@ -329,7 +330,7 @@
static void PreloadDexCachesResolveField(Handle<mirror::DexCache> dex_cache, uint32_t field_idx,
bool is_static)
SHARED_REQUIRES(Locks::mutator_lock_) {
- ArtField* field = dex_cache->GetResolvedField(field_idx, sizeof(void*));
+ ArtField* field = dex_cache->GetResolvedField(field_idx, kRuntimePointerSize);
if (field != nullptr) {
return;
}
@@ -350,14 +351,14 @@
return;
}
// LOG(INFO) << "VMRuntime.preloadDexCaches resolved field " << PrettyField(field);
- dex_cache->SetResolvedField(field_idx, field, sizeof(void*));
+ dex_cache->SetResolvedField(field_idx, field, kRuntimePointerSize);
}
// Based on ClassLinker::ResolveMethod.
static void PreloadDexCachesResolveMethod(Handle<mirror::DexCache> dex_cache, uint32_t method_idx,
InvokeType invoke_type)
SHARED_REQUIRES(Locks::mutator_lock_) {
- ArtMethod* method = dex_cache->GetResolvedMethod(method_idx, sizeof(void*));
+ ArtMethod* method = dex_cache->GetResolvedMethod(method_idx, kRuntimePointerSize);
if (method != nullptr) {
return;
}
@@ -370,14 +371,14 @@
switch (invoke_type) {
case kDirect:
case kStatic:
- method = klass->FindDirectMethod(dex_cache.Get(), method_idx, sizeof(void*));
+ method = klass->FindDirectMethod(dex_cache.Get(), method_idx, kRuntimePointerSize);
break;
case kInterface:
- method = klass->FindInterfaceMethod(dex_cache.Get(), method_idx, sizeof(void*));
+ method = klass->FindInterfaceMethod(dex_cache.Get(), method_idx, kRuntimePointerSize);
break;
case kSuper:
case kVirtual:
- method = klass->FindVirtualMethod(dex_cache.Get(), method_idx, sizeof(void*));
+ method = klass->FindVirtualMethod(dex_cache.Get(), method_idx, kRuntimePointerSize);
break;
default:
LOG(FATAL) << "Unreachable - invocation type: " << invoke_type;
@@ -387,7 +388,7 @@
return;
}
// LOG(INFO) << "VMRuntime.preloadDexCaches resolved method " << PrettyMethod(method);
- dex_cache->SetResolvedMethod(method_idx, method, sizeof(void*));
+ dex_cache->SetResolvedMethod(method_idx, method, kRuntimePointerSize);
}
struct DexCacheStats {
@@ -462,7 +463,7 @@
}
}
for (size_t j = 0; j < dex_cache->NumResolvedMethods(); j++) {
- ArtMethod* method = dex_cache->GetResolvedMethod(j, sizeof(void*));
+ ArtMethod* method = dex_cache->GetResolvedMethod(j, kRuntimePointerSize);
if (method != nullptr) {
filled->num_methods++;
}
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 0624da3..6d5e7c7 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -19,6 +19,7 @@
#include <iostream>
#include "art_field-inl.h"
+#include "base/enums.h"
#include "class_linker.h"
#include "common_throws.h"
#include "dex_file-inl.h"
@@ -136,7 +137,9 @@
}
for (ArtField& field : ifields) {
if (!public_only || field.IsPublic()) {
- auto* reflect_field = mirror::Field::CreateFromArtField(self, &field, force_resolve);
+ auto* reflect_field = mirror::Field::CreateFromArtField<kRuntimePointerSize>(self,
+ &field,
+ force_resolve);
if (reflect_field == nullptr) {
if (kIsDebugBuild) {
self->AssertPendingException();
@@ -149,7 +152,9 @@
}
for (ArtField& field : sfields) {
if (!public_only || field.IsPublic()) {
- auto* reflect_field = mirror::Field::CreateFromArtField(self, &field, force_resolve);
+ auto* reflect_field = mirror::Field::CreateFromArtField<kRuntimePointerSize>(self,
+ &field,
+ force_resolve);
if (reflect_field == nullptr) {
if (kIsDebugBuild) {
self->AssertPendingException();
@@ -222,11 +227,11 @@
SHARED_REQUIRES(Locks::mutator_lock_) {
ArtField* art_field = FindFieldByName(self, name, c->GetIFieldsPtr());
if (art_field != nullptr) {
- return mirror::Field::CreateFromArtField(self, art_field, true);
+ return mirror::Field::CreateFromArtField<kRuntimePointerSize>(self, art_field, true);
}
art_field = FindFieldByName(self, name, c->GetSFieldsPtr());
if (art_field != nullptr) {
- return mirror::Field::CreateFromArtField(self, art_field, true);
+ return mirror::Field::CreateFromArtField<kRuntimePointerSize>(self, art_field, true);
}
return nullptr;
}
@@ -323,7 +328,10 @@
static jobject Class_getDeclaredConstructorInternal(
JNIEnv* env, jobject javaThis, jobjectArray args) {
ScopedFastNativeObjectAccess soa(env);
- mirror::Constructor* result = mirror::Class::GetDeclaredConstructorInternal(
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
+ DCHECK(!Runtime::Current()->IsActiveTransaction());
+ mirror::Constructor* result = mirror::Class::GetDeclaredConstructorInternal<kRuntimePointerSize,
+ false>(
soa.Self(),
DecodeClass(soa, javaThis),
soa.Decode<mirror::ObjectArray<mirror::Class>*>(args));
@@ -343,7 +351,7 @@
Handle<mirror::Class> h_klass = hs.NewHandle(DecodeClass(soa, javaThis));
size_t constructor_count = 0;
// Two pass approach for speed.
- for (auto& m : h_klass->GetDirectMethods(sizeof(void*))) {
+ for (auto& m : h_klass->GetDirectMethods(kRuntimePointerSize)) {
constructor_count += MethodMatchesConstructor(&m, publicOnly != JNI_FALSE) ? 1u : 0u;
}
auto h_constructors = hs.NewHandle(mirror::ObjectArray<mirror::Constructor>::Alloc(
@@ -353,9 +361,12 @@
return nullptr;
}
constructor_count = 0;
- for (auto& m : h_klass->GetDirectMethods(sizeof(void*))) {
+ for (auto& m : h_klass->GetDirectMethods(kRuntimePointerSize)) {
if (MethodMatchesConstructor(&m, publicOnly != JNI_FALSE)) {
- auto* constructor = mirror::Constructor::CreateFromArtMethod(soa.Self(), &m);
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
+ DCHECK(!Runtime::Current()->IsActiveTransaction());
+ auto* constructor = mirror::Constructor::CreateFromArtMethod<kRuntimePointerSize, false>(
+ soa.Self(), &m);
if (UNLIKELY(constructor == nullptr)) {
soa.Self()->AssertPendingOOMException();
return nullptr;
@@ -369,7 +380,9 @@
static jobject Class_getDeclaredMethodInternal(JNIEnv* env, jobject javaThis,
jobject name, jobjectArray args) {
ScopedFastNativeObjectAccess soa(env);
- mirror::Method* result = mirror::Class::GetDeclaredMethodInternal(
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
+ DCHECK(!Runtime::Current()->IsActiveTransaction());
+ mirror::Method* result = mirror::Class::GetDeclaredMethodInternal<kRuntimePointerSize, false>(
soa.Self(),
DecodeClass(soa, javaThis),
soa.Decode<mirror::String*>(name),
@@ -383,7 +396,7 @@
StackHandleScope<2> hs(soa.Self());
Handle<mirror::Class> klass = hs.NewHandle(DecodeClass(soa, javaThis));
size_t num_methods = 0;
- for (auto& m : klass->GetDeclaredMethods(sizeof(void*))) {
+ for (auto& m : klass->GetDeclaredMethods(kRuntimePointerSize)) {
auto modifiers = m.GetAccessFlags();
// Add non-constructor declared methods.
if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) &&
@@ -394,11 +407,14 @@
auto ret = hs.NewHandle(mirror::ObjectArray<mirror::Method>::Alloc(
soa.Self(), mirror::Method::ArrayClass(), num_methods));
num_methods = 0;
- for (auto& m : klass->GetDeclaredMethods(sizeof(void*))) {
+ for (auto& m : klass->GetDeclaredMethods(kRuntimePointerSize)) {
auto modifiers = m.GetAccessFlags();
if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) &&
(modifiers & kAccConstructor) == 0) {
- auto* method = mirror::Method::CreateFromArtMethod(soa.Self(), &m);
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
+ DCHECK(!Runtime::Current()->IsActiveTransaction());
+ auto* method =
+ mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), &m);
if (method == nullptr) {
soa.Self()->AssertPendingException();
return nullptr;
@@ -610,7 +626,7 @@
auto* constructor = klass->GetDeclaredConstructor(
soa.Self(),
ScopedNullHandle<mirror::ObjectArray<mirror::Class>>(),
- sizeof(void*));
+ kRuntimePointerSize);
if (UNLIKELY(constructor == nullptr)) {
soa.Self()->ThrowNewExceptionF("Ljava/lang/InstantiationException;",
"%s has no zero argument constructor",
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index 54b8afd..dd46233 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -17,6 +17,7 @@
#include "java_lang_reflect_Constructor.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "class_linker.h"
#include "class_linker-inl.h"
#include "jni_internal.h"
@@ -65,7 +66,7 @@
static jobjectArray Constructor_getExceptionTypes(JNIEnv* env, jobject javaMethod) {
ScopedFastNativeObjectAccess soa(env);
ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod)
- ->GetInterfaceMethodIfProxy(sizeof(void*));
+ ->GetInterfaceMethodIfProxy(kRuntimePointerSize);
mirror::ObjectArray<mirror::Class>* result_array =
method->GetDexFile()->GetExceptionTypesForMethod(method);
if (result_array == nullptr) {
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index 78999c2..c3f2a27 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -17,6 +17,7 @@
#include "java_lang_reflect_Method.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "class_linker.h"
#include "class_linker-inl.h"
#include "jni_internal.h"
@@ -57,7 +58,7 @@
mirror::Class* klass = method->GetDeclaringClass();
int throws_index = -1;
size_t i = 0;
- for (const auto& m : klass->GetDeclaredVirtualMethods(sizeof(void*))) {
+ for (const auto& m : klass->GetDeclaredVirtualMethods(kRuntimePointerSize)) {
if (&m == method) {
throws_index = i;
break;
diff --git a/runtime/native_bridge_art_interface.cc b/runtime/native_bridge_art_interface.cc
index 61a1085..155c008 100644
--- a/runtime/native_bridge_art_interface.cc
+++ b/runtime/native_bridge_art_interface.cc
@@ -21,6 +21,7 @@
#include "nativebridge/native_bridge.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "base/macros.h"
#include "dex_file-inl.h"
@@ -45,7 +46,7 @@
mirror::Class* c = soa.Decode<mirror::Class*>(clazz);
uint32_t native_method_count = 0;
- for (auto& m : c->GetMethods(sizeof(void*))) {
+ for (auto& m : c->GetMethods(kRuntimePointerSize)) {
native_method_count += m.IsNative() ? 1u : 0u;
}
return native_method_count;
@@ -60,7 +61,7 @@
mirror::Class* c = soa.Decode<mirror::Class*>(clazz);
uint32_t count = 0;
- for (auto& m : c->GetMethods(sizeof(void*))) {
+ for (auto& m : c->GetMethods(kRuntimePointerSize)) {
if (m.IsNative()) {
if (count < method_count) {
methods[count].name = m.GetName();
diff --git a/runtime/oat.h b/runtime/oat.h
index 9b8f545..7c84fe9 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '0', '8', '4', '\0' };
+ static constexpr uint8_t kOatVersion[] = { '0', '8', '6', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 61dc287..68610a7 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -34,6 +34,7 @@
#include "art_method-inl.h"
#include "base/bit_vector.h"
+#include "base/enums.h"
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/unix_file/fd_file.h"
@@ -260,7 +261,7 @@
return false;
}
- size_t pointer_size = GetInstructionSetPointerSize(GetOatHeader().GetInstructionSet());
+ PointerSize pointer_size = GetInstructionSetPointerSize(GetOatHeader().GetInstructionSet());
uint8_t* dex_cache_arrays = bss_begin_;
uint32_t dex_file_count = GetOatHeader().GetDexFileCount();
oat_dex_files_storage_.reserve(dex_file_count);
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 8700a90..fd58907 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -16,18 +16,7 @@
#include "oat_file_assistant.h"
-#include <fcntl.h>
-#ifdef __linux__
-#include <sys/sendfile.h>
-#else
-#include <sys/socket.h>
-#endif
-#include <sys/types.h>
#include <sys/stat.h>
-#include <unistd.h>
-
-#include <set>
-
#include "base/logging.h"
#include "base/stringprintf.h"
#include "compiler_filter.h"
@@ -71,7 +60,7 @@
const char* oat_location,
const InstructionSet isa,
bool load_executable)
- : isa_(isa), load_executable_(load_executable) {
+ : isa_(isa), load_executable_(load_executable), odex_(this), oat_(this) {
CHECK(dex_location != nullptr) << "OatFileAssistant: null dex location";
dex_location_.assign(dex_location);
@@ -81,15 +70,23 @@
load_executable_ = false;
}
+ // Get the odex filename.
std::string error_msg;
- if (!DexLocationToOdexFilename(dex_location_, isa_, &odex_file_name_, &error_msg)) {
+ std::string odex_file_name;
+ if (DexLocationToOdexFilename(dex_location_, isa_, &odex_file_name, &error_msg)) {
+ odex_.Reset(odex_file_name);
+ } else {
LOG(WARNING) << "Failed to determine odex file name: " << error_msg;
}
+ // Get the oat filename.
if (oat_location != nullptr) {
- oat_file_name_ = std::string(oat_location);
+ oat_.Reset(oat_location);
} else {
- if (!DexLocationToOatFilename(dex_location_, isa_, &oat_file_name_, &error_msg)) {
+ std::string oat_file_name;
+ if (DexLocationToOatFilename(dex_location_, isa_, &oat_file_name, &error_msg)) {
+ oat_.Reset(oat_file_name);
+ } else {
LOG(WARNING) << "Failed to determine oat file name for dex location "
<< dex_location_ << ": " << error_msg;
}
@@ -124,11 +121,12 @@
CHECK(error_msg != nullptr);
CHECK(!flock_.HasFile()) << "OatFileAssistant::Lock already acquired";
- if (OatFileName() == nullptr) {
+ const std::string* oat_file_name = oat_.Filename();
+ if (oat_file_name == nullptr) {
*error_msg = "Failed to determine lock file";
return false;
}
- std::string lock_file_name = *OatFileName() + ".flock";
+ std::string lock_file_name = *oat_file_name + ".flock";
if (!flock_.Init(lock_file_name.c_str(), error_msg)) {
unlink(lock_file_name.c_str());
@@ -137,64 +135,33 @@
return true;
}
-static bool GivenOatFileCompilerFilterIsOkay(const OatFile& oat_file,
- CompilerFilter::Filter target,
- bool profile_changed) {
- CompilerFilter::Filter current = oat_file.GetCompilerFilter();
-
- if (profile_changed && CompilerFilter::DependsOnProfile(current)) {
- VLOG(oat) << "Compiler filter not okay because Profile changed";
- return false;
- }
- return CompilerFilter::IsAsGoodAs(current, target);
-}
-
-bool OatFileAssistant::OatFileCompilerFilterIsOkay(CompilerFilter::Filter target,
- bool profile_changed) {
- const OatFile* oat_file = GetOatFile();
- if (oat_file != nullptr) {
- return GivenOatFileCompilerFilterIsOkay(*oat_file, target, profile_changed);
- }
- return false;
-}
-
-bool OatFileAssistant::OdexFileCompilerFilterIsOkay(CompilerFilter::Filter target,
- bool profile_changed) {
- const OatFile* odex_file = GetOdexFile();
- if (odex_file != nullptr) {
- return GivenOatFileCompilerFilterIsOkay(*odex_file, target, profile_changed);
- }
- return false;
-}
-
OatFileAssistant::DexOptNeeded
-OatFileAssistant::GetDexOptNeeded(CompilerFilter::Filter target,
- bool profile_changed) {
+OatFileAssistant::GetDexOptNeeded(CompilerFilter::Filter target, bool profile_changed) {
bool compilation_desired = CompilerFilter::IsBytecodeCompilationEnabled(target);
// See if the oat file is in good shape as is.
- bool oat_okay = OatFileCompilerFilterIsOkay(target, profile_changed);
+ bool oat_okay = oat_.CompilerFilterIsOkay(target, profile_changed);
if (oat_okay) {
if (compilation_desired) {
- if (OatFileIsUpToDate()) {
+ if (oat_.IsUpToDate()) {
return kNoDexOptNeeded;
}
} else {
- if (!OatFileIsOutOfDate()) {
+ if (!oat_.IsOutOfDate()) {
return kNoDexOptNeeded;
}
}
}
// See if the odex file is in good shape as is.
- bool odex_okay = OdexFileCompilerFilterIsOkay(target, profile_changed);
+ bool odex_okay = odex_.CompilerFilterIsOkay(target, profile_changed);
if (odex_okay) {
if (compilation_desired) {
- if (OdexFileIsUpToDate()) {
+ if (odex_.IsUpToDate()) {
return kNoDexOptNeeded;
}
} else {
- if (!OdexFileIsOutOfDate()) {
+ if (!odex_.IsOutOfDate()) {
return kNoDexOptNeeded;
}
}
@@ -202,11 +169,11 @@
// See if we can get an up-to-date file by running patchoat.
if (compilation_desired) {
- if (odex_okay && OdexFileNeedsRelocation() && OdexFileHasPatchInfo()) {
+ if (odex_okay && odex_.NeedsRelocation() && odex_.HasPatchInfo()) {
return kPatchOatNeeded;
}
- if (oat_okay && OatFileNeedsRelocation() && OatFileHasPatchInfo()) {
+ if (oat_okay && oat_.NeedsRelocation() && oat_.HasPatchInfo()) {
return kSelfPatchOatNeeded;
}
}
@@ -251,8 +218,8 @@
switch (GetDexOptNeeded(target, profile_changed)) {
case kNoDexOptNeeded: return kUpdateSucceeded;
case kDex2OatNeeded: return GenerateOatFile(error_msg);
- case kPatchOatNeeded: return RelocateOatFile(OdexFileName(), error_msg);
- case kSelfPatchOatNeeded: return RelocateOatFile(OatFileName(), error_msg);
+ case kPatchOatNeeded: return RelocateOatFile(odex_.Filename(), error_msg);
+ case kSelfPatchOatNeeded: return RelocateOatFile(oat_.Filename(), error_msg);
}
UNREACHABLE();
}
@@ -263,46 +230,40 @@
// 2. Not out-of-date files that are already opened non-executable.
// 3. Not out-of-date files that we must reopen non-executable.
- if (OatFileIsUpToDate()) {
- oat_file_released_ = true;
- return std::move(cached_oat_file_);
+ if (oat_.IsUpToDate()) {
+ return oat_.ReleaseFile();
}
- if (OdexFileIsUpToDate()) {
- oat_file_released_ = true;
- return std::move(cached_odex_file_);
+ if (odex_.IsUpToDate()) {
+ return odex_.ReleaseFile();
}
VLOG(oat) << "Oat File Assistant: No relocated oat file found,"
<< " attempting to fall back to interpreting oat file instead.";
- if (!OatFileIsOutOfDate() && !OatFileIsExecutable()) {
- oat_file_released_ = true;
- return std::move(cached_oat_file_);
+ if (!oat_.IsOutOfDate() && !oat_.IsExecutable()) {
+ return oat_.ReleaseFile();
}
- if (!OdexFileIsOutOfDate() && !OdexFileIsExecutable()) {
- oat_file_released_ = true;
- return std::move(cached_odex_file_);
+ if (!odex_.IsOutOfDate() && !odex_.IsExecutable()) {
+ return odex_.ReleaseFile();
}
- if (!OatFileIsOutOfDate()) {
+ if (!oat_.IsOutOfDate()) {
load_executable_ = false;
- ClearOatFileCache();
- if (!OatFileIsOutOfDate()) {
- CHECK(!OatFileIsExecutable());
- oat_file_released_ = true;
- return std::move(cached_oat_file_);
+ oat_.Reset();
+ if (!oat_.IsOutOfDate()) {
+ CHECK(!oat_.IsExecutable());
+ return oat_.ReleaseFile();
}
}
- if (!OdexFileIsOutOfDate()) {
+ if (!odex_.IsOutOfDate()) {
load_executable_ = false;
- ClearOdexFileCache();
- if (!OdexFileIsOutOfDate()) {
- CHECK(!OdexFileIsExecutable());
- oat_file_released_ = true;
- return std::move(cached_odex_file_);
+ odex_.Reset();
+ if (!odex_.IsOutOfDate()) {
+ CHECK(!odex_.IsExecutable());
+ return odex_.ReleaseFile();
}
}
@@ -358,43 +319,31 @@
}
const std::string* OatFileAssistant::OdexFileName() {
- return odex_file_name_.empty() ? nullptr : &odex_file_name_;
+ return odex_.Filename();
}
bool OatFileAssistant::OdexFileExists() {
- return GetOdexFile() != nullptr;
+ return odex_.Exists();
}
OatFileAssistant::OatStatus OatFileAssistant::OdexFileStatus() {
- if (!odex_file_status_attempted_) {
- odex_file_status_attempted_ = true;
- const OatFile* odex_file = GetOdexFile();
- if (odex_file == nullptr) {
- cached_odex_file_status_ = kOatOutOfDate;
- } else {
- cached_odex_file_status_ = GivenOatFileStatus(*odex_file);
- }
- }
- return cached_odex_file_status_;
+ return odex_.Status();
}
bool OatFileAssistant::OdexFileIsOutOfDate() {
- return OdexFileStatus() == kOatOutOfDate;
+ return odex_.IsOutOfDate();
}
bool OatFileAssistant::OdexFileNeedsRelocation() {
- return OdexFileStatus() == kOatNeedsRelocation;
+ return odex_.NeedsRelocation();
}
bool OatFileAssistant::OdexFileIsUpToDate() {
- return OdexFileStatus() == kOatUpToDate;
+ return odex_.IsUpToDate();
}
CompilerFilter::Filter OatFileAssistant::OdexFileCompilerFilter() {
- const OatFile* odex_file = GetOdexFile();
- CHECK(odex_file != nullptr);
-
- return odex_file->GetCompilerFilter();
+ return odex_.CompilerFilter();
}
static std::string ArtFileName(const OatFile* oat_file) {
@@ -409,43 +358,31 @@
}
const std::string* OatFileAssistant::OatFileName() {
- return oat_file_name_.empty() ? nullptr : &oat_file_name_;
+ return oat_.Filename();
}
bool OatFileAssistant::OatFileExists() {
- return GetOatFile() != nullptr;
+ return oat_.Exists();
}
OatFileAssistant::OatStatus OatFileAssistant::OatFileStatus() {
- if (!oat_file_status_attempted_) {
- oat_file_status_attempted_ = true;
- const OatFile* oat_file = GetOatFile();
- if (oat_file == nullptr) {
- cached_oat_file_status_ = kOatOutOfDate;
- } else {
- cached_oat_file_status_ = GivenOatFileStatus(*oat_file);
- }
- }
- return cached_oat_file_status_;
+ return oat_.Status();
}
bool OatFileAssistant::OatFileIsOutOfDate() {
- return OatFileStatus() == kOatOutOfDate;
+ return oat_.IsOutOfDate();
}
bool OatFileAssistant::OatFileNeedsRelocation() {
- return OatFileStatus() == kOatNeedsRelocation;
+ return oat_.NeedsRelocation();
}
bool OatFileAssistant::OatFileIsUpToDate() {
- return OatFileStatus() == kOatUpToDate;
+ return oat_.IsUpToDate();
}
CompilerFilter::Filter OatFileAssistant::OatFileCompilerFilter() {
- const OatFile* oat_file = GetOatFile();
- CHECK(oat_file != nullptr);
-
- return oat_file->GetCompilerFilter();
+ return oat_.CompilerFilter();
}
OatFileAssistant::OatStatus OatFileAssistant::GivenOatFileStatus(const OatFile& file) {
@@ -572,12 +509,12 @@
}
const std::string& input_file_name = *input_file;
- if (OatFileName() == nullptr) {
+ if (oat_.Filename() == nullptr) {
*error_msg = "Patching of oat file for dex location " + dex_location_
+ " not attempted because the oat file name could not be determined.";
return kUpdateNotAttempted;
}
- const std::string& oat_file_name = *OatFileName();
+ const std::string& oat_file_name = *oat_.Filename();
const ImageInfo* image_info = GetImageInfo();
Runtime* runtime = Runtime::Current();
@@ -609,7 +546,7 @@
}
// Mark that the oat file has changed and we should try to reload.
- ClearOatFileCache();
+ oat_.Reset();
return kUpdateSucceeded;
}
@@ -624,12 +561,12 @@
return kUpdateNotAttempted;
}
- if (OatFileName() == nullptr) {
+ if (oat_.Filename() == nullptr) {
*error_msg = "Generation of oat file for dex location " + dex_location_
+ " not attempted because the oat file name could not be determined.";
return kUpdateNotAttempted;
}
- const std::string& oat_file_name = *OatFileName();
+ const std::string& oat_file_name = *oat_.Filename();
// dex2oat ignores missing dex files and doesn't report an error.
// Check explicitly here so we can detect the error properly.
@@ -674,7 +611,7 @@
}
// Mark that the oat file has changed and we should try to reload.
- ClearOatFileCache();
+ oat_.Reset();
return kUpdateSucceeded;
}
@@ -821,7 +758,7 @@
has_original_dex_files_ = false;
// Get the checksum from the odex if we can.
- const OatFile* odex_file = GetOdexFile();
+ const OatFile* odex_file = odex_.GetFile();
if (odex_file != nullptr) {
const OatFile::OatDexFile* odex_dex_file = odex_file->GetOatDexFile(
dex_location_.c_str(), nullptr, false);
@@ -835,86 +772,6 @@
return required_dex_checksum_found_ ? &cached_required_dex_checksum_ : nullptr;
}
-const OatFile* OatFileAssistant::GetOdexFile() {
- CHECK(!oat_file_released_) << "OdexFile called after oat file released.";
- if (!odex_file_load_attempted_) {
- odex_file_load_attempted_ = true;
- if (OdexFileName() != nullptr) {
- const std::string& odex_file_name = *OdexFileName();
- std::string error_msg;
- cached_odex_file_.reset(OatFile::Open(odex_file_name.c_str(),
- odex_file_name.c_str(),
- nullptr,
- nullptr,
- load_executable_,
- /*low_4gb*/false,
- dex_location_.c_str(),
- &error_msg));
- if (cached_odex_file_.get() == nullptr) {
- VLOG(oat) << "OatFileAssistant test for existing pre-compiled oat file "
- << odex_file_name << ": " << error_msg;
- }
- }
- }
- return cached_odex_file_.get();
-}
-
-bool OatFileAssistant::OdexFileIsExecutable() {
- const OatFile* odex_file = GetOdexFile();
- return (odex_file != nullptr && odex_file->IsExecutable());
-}
-
-bool OatFileAssistant::OdexFileHasPatchInfo() {
- const OatFile* odex_file = GetOdexFile();
- return (odex_file != nullptr && odex_file->HasPatchInfo());
-}
-
-void OatFileAssistant::ClearOdexFileCache() {
- odex_file_load_attempted_ = false;
- cached_odex_file_.reset();
- odex_file_status_attempted_ = false;
-}
-
-const OatFile* OatFileAssistant::GetOatFile() {
- CHECK(!oat_file_released_) << "OatFile called after oat file released.";
- if (!oat_file_load_attempted_) {
- oat_file_load_attempted_ = true;
- if (OatFileName() != nullptr) {
- const std::string& oat_file_name = *OatFileName();
- std::string error_msg;
- cached_oat_file_.reset(OatFile::Open(oat_file_name.c_str(),
- oat_file_name.c_str(),
- nullptr,
- nullptr,
- load_executable_,
- /*low_4gb*/false,
- dex_location_.c_str(),
- &error_msg));
- if (cached_oat_file_.get() == nullptr) {
- VLOG(oat) << "OatFileAssistant test for existing oat file "
- << oat_file_name << ": " << error_msg;
- }
- }
- }
- return cached_oat_file_.get();
-}
-
-bool OatFileAssistant::OatFileIsExecutable() {
- const OatFile* oat_file = GetOatFile();
- return (oat_file != nullptr && oat_file->IsExecutable());
-}
-
-bool OatFileAssistant::OatFileHasPatchInfo() {
- const OatFile* oat_file = GetOatFile();
- return (oat_file != nullptr && oat_file->HasPatchInfo());
-}
-
-void OatFileAssistant::ClearOatFileCache() {
- oat_file_load_attempted_ = false;
- cached_oat_file_.reset();
- oat_file_status_attempted_ = false;
-}
-
const OatFileAssistant::ImageInfo* OatFileAssistant::GetImageInfo() {
if (!image_info_load_attempted_) {
image_info_load_attempted_ = true;
@@ -990,5 +847,113 @@
return ret;
}
+OatFileAssistant::OatFileInfo::OatFileInfo(OatFileAssistant* oat_file_assistant)
+ : oat_file_assistant_(oat_file_assistant)
+{}
+
+const std::string* OatFileAssistant::OatFileInfo::Filename() {
+ return filename_provided_ ? &filename_ : nullptr;
+}
+
+bool OatFileAssistant::OatFileInfo::Exists() {
+ return GetFile() != nullptr;
+}
+
+OatFileAssistant::OatStatus OatFileAssistant::OatFileInfo::Status() {
+ if (!status_attempted_) {
+ status_attempted_ = true;
+ const OatFile* file = GetFile();
+ if (file == nullptr) {
+ status_ = kOatOutOfDate;
+ } else {
+ status_ = oat_file_assistant_->GivenOatFileStatus(*file);
+ }
+ }
+ return status_;
+}
+
+bool OatFileAssistant::OatFileInfo::IsOutOfDate() {
+ return Status() == kOatOutOfDate;
+}
+
+bool OatFileAssistant::OatFileInfo::NeedsRelocation() {
+ return Status() == kOatNeedsRelocation;
+}
+
+bool OatFileAssistant::OatFileInfo::IsUpToDate() {
+ return Status() == kOatUpToDate;
+}
+
+CompilerFilter::Filter OatFileAssistant::OatFileInfo::CompilerFilter() {
+ const OatFile* file = GetFile();
+ CHECK(file != nullptr);
+ return file->GetCompilerFilter();
+}
+
+const OatFile* OatFileAssistant::OatFileInfo::GetFile() {
+ CHECK(!file_released_) << "GetFile called after oat file released.";
+ if (!load_attempted_) {
+ load_attempted_ = true;
+ if (filename_provided_) {
+ std::string error_msg;
+ file_.reset(OatFile::Open(filename_.c_str(),
+ filename_.c_str(),
+ nullptr,
+ nullptr,
+ oat_file_assistant_->load_executable_,
+ /*low_4gb*/false,
+ oat_file_assistant_->dex_location_.c_str(),
+ &error_msg));
+ if (file_.get() == nullptr) {
+ VLOG(oat) << "OatFileAssistant test for existing oat file "
+ << filename_ << ": " << error_msg;
+ }
+ }
+ }
+ return file_.get();
+}
+
+bool OatFileAssistant::OatFileInfo::CompilerFilterIsOkay(
+ CompilerFilter::Filter target, bool profile_changed) {
+ const OatFile* file = GetFile();
+ if (file == nullptr) {
+ return false;
+ }
+
+ CompilerFilter::Filter current = file->GetCompilerFilter();
+ if (profile_changed && CompilerFilter::DependsOnProfile(current)) {
+ VLOG(oat) << "Compiler filter not okay because Profile changed";
+ return false;
+ }
+ return CompilerFilter::IsAsGoodAs(current, target);
+}
+
+bool OatFileAssistant::OatFileInfo::IsExecutable() {
+ const OatFile* file = GetFile();
+ return (file != nullptr && file->IsExecutable());
+}
+
+bool OatFileAssistant::OatFileInfo::HasPatchInfo() {
+ const OatFile* file = GetFile();
+ return (file != nullptr && file->HasPatchInfo());
+}
+
+void OatFileAssistant::OatFileInfo::Reset() {
+ load_attempted_ = false;
+ file_.reset();
+ status_attempted_ = false;
+}
+
+void OatFileAssistant::OatFileInfo::Reset(const std::string& filename) {
+ filename_provided_ = true;
+ filename_ = filename;
+ Reset();
+}
+
+std::unique_ptr<OatFile> OatFileAssistant::OatFileInfo::ReleaseFile() {
+ file_released_ = true;
+ return std::move(file_);
+}
+
} // namespace art
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index 04bd20c..effeabb 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -306,6 +306,73 @@
std::string location;
};
+ class OatFileInfo {
+ public:
+ // Initially the info is for no file in particular. It will treat the
+ // file as out of date until Reset is called with a real filename to use
+ // the cache for.
+ explicit OatFileInfo(OatFileAssistant* oat_file_assistant);
+
+ const std::string* Filename();
+ bool Exists();
+ OatStatus Status();
+ bool IsOutOfDate();
+ bool NeedsRelocation();
+ bool IsUpToDate();
+ // Must only be called if the associated file exists, i.e, if
+ // |Exists() == true|.
+ CompilerFilter::Filter CompilerFilter();
+
+ // Returns the loaded file.
+ // Loads the file if needed. Returns null if the file failed to load.
+ // The caller shouldn't clean up or free the returned pointer.
+ const OatFile* GetFile();
+
+ // Returns true if the compiler filter used to generate the file is at
+ // least as good as the given target filter. profile_changed should be
+ // true to indicate the profile has recently changed for this dex
+ // location.
+ bool CompilerFilterIsOkay(CompilerFilter::Filter target, bool profile_changed);
+
+ // Returns true if the file is opened executable.
+ bool IsExecutable();
+
+ // Returns true if the file has patch info required to run patchoat.
+ bool HasPatchInfo();
+
+ // Clear any cached information about the file that depends on the
+ // contents of the file. This does not reset the provided filename.
+ void Reset();
+
+ // Clear any cached information and switch to getting info about the oat
+ // file with the given filename.
+ void Reset(const std::string& filename);
+
+ // Release the loaded oat file.
+ // Returns null if the oat file hasn't been loaded.
+ //
+ // After this call, no other methods of the OatFileInfo should be
+ // called, because access to the loaded oat file has been taken away from
+ // the OatFileInfo object.
+ std::unique_ptr<OatFile> ReleaseFile();
+
+ private:
+ OatFileAssistant* oat_file_assistant_;
+
+ bool filename_provided_ = false;
+ std::string filename_;
+
+ bool load_attempted_ = false;
+ std::unique_ptr<OatFile> file_;
+
+ bool status_attempted_ = false;
+ OatStatus status_;
+
+ // For debugging only.
+ // If this flag is set, the file has been released to the user and the
+ // OatFileInfo object is in a bad state and should no longer be used.
+ bool file_released_ = false;
+ };
// Returns the current image location.
// Returns an empty string if the image location could not be retrieved.
@@ -322,46 +389,6 @@
// found for the dex_location_ dex file.
const uint32_t* GetRequiredDexChecksum();
- // Returns the loaded odex file.
- // Loads the file if needed. Returns null if the file failed to load.
- // The caller shouldn't clean up or free the returned pointer.
- const OatFile* GetOdexFile();
-
- // Returns true if the compiler filter used to generate the odex file is at
- // least as good as the given target filter. profile_changed should be true
- // to indicate the profile has recently changed for this dex location.
- bool OdexFileCompilerFilterIsOkay(CompilerFilter::Filter target, bool profile_changed);
-
- // Returns true if the odex file is opened executable.
- bool OdexFileIsExecutable();
-
- // Returns true if the odex file has patch info required to run patchoat.
- bool OdexFileHasPatchInfo();
-
- // Clear any cached information about the odex file that depends on the
- // contents of the file.
- void ClearOdexFileCache();
-
- // Returns the loaded oat file.
- // Loads the file if needed. Returns null if the file failed to load.
- // The caller shouldn't clean up or free the returned pointer.
- const OatFile* GetOatFile();
-
- // Returns true if the compiler filter used to generate the oat file is at
- // least as good as the given target filter. profile_changed should be true
- // to indicate the profile has recently changed for this dex location.
- bool OatFileCompilerFilterIsOkay(CompilerFilter::Filter target, bool profile_changed);
-
- // Returns true if the oat file is opened executable.
- bool OatFileIsExecutable();
-
- // Returns true if the oat file has patch info required to run patchoat.
- bool OatFileHasPatchInfo();
-
- // Clear any cached information about the oat file that depends on the
- // contents of the file.
- void ClearOatFileCache();
-
// Returns the loaded image info.
// Loads the image info if needed. Returns null if the image info failed
// to load.
@@ -391,33 +418,8 @@
bool required_dex_checksum_found_;
bool has_original_dex_files_;
- // The sentinel value "" is used if the odex file name could not be
- // determined.
- std::string odex_file_name_;
-
- // Cached value of the loaded odex file.
- // Use the GetOdexFile method rather than accessing this directly, unless you
- // know the odex file isn't out of date.
- bool odex_file_load_attempted_ = false;
- std::unique_ptr<OatFile> cached_odex_file_;
-
- // Cached results for OdexFileStatus
- bool odex_file_status_attempted_ = false;
- OatStatus cached_odex_file_status_;
-
- // The sentinel value "" is used if the oat file name could not be
- // determined.
- std::string oat_file_name_;
-
- // Cached value of the loaded oat file.
- // Use the GetOatFile method rather than accessing this directly, unless you
- // know the oat file isn't out of date.
- bool oat_file_load_attempted_ = false;
- std::unique_ptr<OatFile> cached_oat_file_;
-
- // Cached results for OatFileStatus
- bool oat_file_status_attempted_ = false;
- OatStatus cached_oat_file_status_;
+ OatFileInfo odex_;
+ OatFileInfo oat_;
// Cached value of the image info.
// Use the GetImageInfo method rather than accessing these directly.
@@ -428,12 +430,6 @@
ImageInfo cached_image_info_;
uint32_t combined_image_checksum_ = 0;
- // For debugging only.
- // If this flag is set, the oat or odex file has been released to the user
- // of the OatFileAssistant object and the OatFileAssistant object is in a
- // bad state and should no longer be used.
- bool oat_file_released_ = false;
-
DISALLOW_COPY_AND_ASSIGN(OatFileAssistant);
};
diff --git a/runtime/object_lock.cc b/runtime/object_lock.cc
index f7accc0..b8754a4 100644
--- a/runtime/object_lock.cc
+++ b/runtime/object_lock.cc
@@ -47,7 +47,22 @@
obj_->NotifyAll(self_);
}
+template <typename T>
+ObjectTryLock<T>::ObjectTryLock(Thread* self, Handle<T> object) : self_(self), obj_(object) {
+ CHECK(object.Get() != nullptr);
+ acquired_ = obj_->MonitorTryEnter(self_) != nullptr;
+}
+
+template <typename T>
+ObjectTryLock<T>::~ObjectTryLock() {
+ if (acquired_) {
+ obj_->MonitorExit(self_);
+ }
+}
+
template class ObjectLock<mirror::Class>;
template class ObjectLock<mirror::Object>;
+template class ObjectTryLock<mirror::Class>;
+template class ObjectTryLock<mirror::Object>;
} // namespace art
diff --git a/runtime/object_lock.h b/runtime/object_lock.h
index eb7cbd8..7f02b37 100644
--- a/runtime/object_lock.h
+++ b/runtime/object_lock.h
@@ -45,6 +45,27 @@
DISALLOW_COPY_AND_ASSIGN(ObjectLock);
};
+template <typename T>
+class ObjectTryLock {
+ public:
+ ObjectTryLock(Thread* self, Handle<T> object) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ ~ObjectTryLock() SHARED_REQUIRES(Locks::mutator_lock_);
+
+ bool Acquired() const {
+ return acquired_;
+ }
+
+ private:
+ Thread* const self_;
+ Handle<T> const obj_;
+ bool acquired_;
+
+
+ DISALLOW_COPY_AND_ASSIGN(ObjectTryLock);
+};
+
+
} // namespace art
#endif // ART_RUNTIME_OBJECT_LOCK_H_
diff --git a/runtime/offsets.h b/runtime/offsets.h
index 9d5063f..aaf5c0c 100644
--- a/runtime/offsets.h
+++ b/runtime/offsets.h
@@ -19,6 +19,7 @@
#include <ostream>
+#include "base/enums.h"
#include "globals.h"
namespace art {
@@ -51,12 +52,15 @@
};
// Offsets relative to the current running thread.
-template<size_t pointer_size>
+template<PointerSize pointer_size>
class ThreadOffset : public Offset {
public:
explicit ThreadOffset(size_t val) : Offset(val) {}
};
+using ThreadOffset32 = ThreadOffset<PointerSize::k32>;
+using ThreadOffset64 = ThreadOffset<PointerSize::k64>;
+
// Offsets relative to an object.
class MemberOffset : public Offset {
public:
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index 4d9ca6d..82e57b4 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -18,6 +18,7 @@
#include <vector>
#include "art_field-inl.h"
+#include "base/enums.h"
#include "class_linker-inl.h"
#include "common_compiler_test.h"
#include "mirror/field-inl.h"
@@ -60,29 +61,31 @@
jsize array_index = 0;
// Fill the method array
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
ArtMethod* method = javaLangObject->FindDeclaredVirtualMethod(
- "equals", "(Ljava/lang/Object;)Z", sizeof(void*));
+ "equals", "(Ljava/lang/Object;)Z", kRuntimePointerSize);
+ CHECK(method != nullptr);
+ DCHECK(!Runtime::Current()->IsActiveTransaction());
+ soa.Env()->SetObjectArrayElement(
+ proxyClassMethods, array_index++, soa.AddLocalReference<jobject>(
+ mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), method)));
+ method = javaLangObject->FindDeclaredVirtualMethod("hashCode", "()I", kRuntimePointerSize);
CHECK(method != nullptr);
soa.Env()->SetObjectArrayElement(
proxyClassMethods, array_index++, soa.AddLocalReference<jobject>(
- mirror::Method::CreateFromArtMethod(soa.Self(), method)));
- method = javaLangObject->FindDeclaredVirtualMethod("hashCode", "()I", sizeof(void*));
- CHECK(method != nullptr);
- soa.Env()->SetObjectArrayElement(
- proxyClassMethods, array_index++, soa.AddLocalReference<jobject>(
- mirror::Method::CreateFromArtMethod(soa.Self(), method)));
+ mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), method)));
method = javaLangObject->FindDeclaredVirtualMethod(
- "toString", "()Ljava/lang/String;", sizeof(void*));
+ "toString", "()Ljava/lang/String;", kRuntimePointerSize);
CHECK(method != nullptr);
soa.Env()->SetObjectArrayElement(
proxyClassMethods, array_index++, soa.AddLocalReference<jobject>(
- mirror::Method::CreateFromArtMethod(soa.Self(), method)));
+ mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), method)));
// Now adds all interfaces virtual methods.
for (mirror::Class* interface : interfaces) {
- for (auto& m : interface->GetDeclaredVirtualMethods(sizeof(void*))) {
+ for (auto& m : interface->GetDeclaredVirtualMethods(kRuntimePointerSize)) {
soa.Env()->SetObjectArrayElement(
proxyClassMethods, array_index++, soa.AddLocalReference<jobject>(
- mirror::Method::CreateFromArtMethod(soa.Self(), &m)));
+ mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), &m)));
}
}
CHECK_EQ(array_index, methods_count);
@@ -226,14 +229,20 @@
EXPECT_EQ(static_fields1->At(0).GetDeclaringClass(), proxyClass1.Get());
EXPECT_EQ(static_fields1->At(1).GetDeclaringClass(), proxyClass1.Get());
+ ASSERT_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
+ ASSERT_FALSE(Runtime::Current()->IsActiveTransaction());
Handle<mirror::Field> field00 =
- hs.NewHandle(mirror::Field::CreateFromArtField(soa.Self(), &static_fields0->At(0), true));
+ hs.NewHandle(mirror::Field::CreateFromArtField<kRuntimePointerSize, false>(
+ soa.Self(), &static_fields0->At(0), true));
Handle<mirror::Field> field01 =
- hs.NewHandle(mirror::Field::CreateFromArtField(soa.Self(), &static_fields0->At(1), true));
+ hs.NewHandle(mirror::Field::CreateFromArtField<kRuntimePointerSize, false>(
+ soa.Self(), &static_fields0->At(1), true));
Handle<mirror::Field> field10 =
- hs.NewHandle(mirror::Field::CreateFromArtField(soa.Self(), &static_fields1->At(0), true));
+ hs.NewHandle(mirror::Field::CreateFromArtField<kRuntimePointerSize, false>(
+ soa.Self(), &static_fields1->At(0), true));
Handle<mirror::Field> field11 =
- hs.NewHandle(mirror::Field::CreateFromArtField(soa.Self(), &static_fields1->At(1), true));
+ hs.NewHandle(mirror::Field::CreateFromArtField<kRuntimePointerSize, false>(
+ soa.Self(), &static_fields1->At(1), true));
EXPECT_EQ(field00->GetArtField(), &static_fields0->At(0));
EXPECT_EQ(field01->GetArtField(), &static_fields0->At(1));
EXPECT_EQ(field10->GetArtField(), &static_fields1->At(0));
diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc
index 1dea562..a6e3693 100644
--- a/runtime/quick/inline_method_analyser.cc
+++ b/runtime/quick/inline_method_analyser.cc
@@ -18,6 +18,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
#include "dex_instruction.h"
@@ -145,7 +146,7 @@
DCHECK_EQ(invoke_direct->VRegC_35c(),
method->GetCodeItem()->registers_size_ - method->GetCodeItem()->ins_size_);
uint32_t method_index = invoke_direct->VRegB_35c();
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
ArtMethod* target_method =
method->GetDexCache()->GetResolvedMethod(method_index, pointer_size);
if (kIsDebugBuild && target_method != nullptr) {
@@ -214,7 +215,7 @@
SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(IsInstructionIPut(new_iput->Opcode()));
uint32_t field_index = new_iput->VRegC_22c();
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
mirror::DexCache* dex_cache = method->GetDexCache();
ArtField* field = dex_cache->GetResolvedField(field_index, pointer_size);
if (UNLIKELY(field == nullptr)) {
@@ -732,7 +733,7 @@
return false;
}
mirror::DexCache* dex_cache = method->GetDexCache();
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
ArtField* field = dex_cache->GetResolvedField(field_idx, pointer_size);
if (field == nullptr || field->IsStatic()) {
return false;
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index e9dd7aa..46d9e7f 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -18,6 +18,7 @@
#include "arch/context.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "dex_instruction.h"
#include "entrypoints/entrypoint_utils.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
@@ -509,11 +510,7 @@
} else {
// PC needs to be of the quick-to-interpreter bridge.
int32_t offset;
- #ifdef __LP64__
- offset = GetThreadOffset<8>(kQuickQuickToInterpreterBridge).Int32Value();
- #else
- offset = GetThreadOffset<4>(kQuickQuickToInterpreterBridge).Int32Value();
- #endif
+ offset = GetThreadOffset<kRuntimePointerSize>(kQuickQuickToInterpreterBridge).Int32Value();
handler_quick_frame_pc_ = *reinterpret_cast<uintptr_t*>(
reinterpret_cast<uint8_t*>(self_) + offset);
}
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index 42e959c..5d32c09 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -99,8 +99,9 @@
// Note: These couldn't be constexpr pointers as reinterpret_cast isn't compatible with them.
static constexpr uintptr_t white_ptr_ = 0x0; // Not marked.
static constexpr uintptr_t gray_ptr_ = 0x1; // Marked, but not marked through. On mark stack.
+ // TODO: black_ptr_ is unused, we should remove it.
static constexpr uintptr_t black_ptr_ = 0x2; // Marked through. Used for non-moving objects.
- static constexpr uintptr_t rb_ptr_mask_ = 0x3; // The low 2 bits for white|gray|black.
+ static constexpr uintptr_t rb_ptr_mask_ = 0x1; // The low bits for white|gray.
};
} // namespace art
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 28c27cd..8a531d9 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -18,6 +18,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "class_linker.h"
#include "common_throws.h"
#include "dex_file-inl.h"
@@ -222,7 +223,7 @@
for (size_t i = 1, args_offset = 0; i < shorty_len_; ++i, ++args_offset) {
mirror::Object* arg = args->Get(args_offset);
if (((shorty_[i] == 'L') && (arg != nullptr)) || ((arg == nullptr && shorty_[i] != 'L'))) {
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
mirror::Class* dst_class =
m->GetClassFromTypeIndex(classes->GetTypeItem(args_offset).type_idx_,
true /* resolve */,
@@ -358,7 +359,7 @@
}
// TODO: If args contain object references, it may cause problems.
Thread* const self = Thread::Current();
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
for (uint32_t i = 0; i < num_params; i++) {
uint16_t type_idx = params->GetTypeItem(i).type_idx_;
mirror::Class* param_type = m->GetClassFromTypeIndex(type_idx,
@@ -424,7 +425,7 @@
static ArtMethod* FindVirtualMethod(mirror::Object* receiver, ArtMethod* method)
SHARED_REQUIRES(Locks::mutator_lock_) {
- return receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(method, sizeof(void*));
+ return receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(method, kRuntimePointerSize);
}
@@ -434,7 +435,7 @@
SHARED_REQUIRES(Locks::mutator_lock_) {
uint32_t* args = arg_array->GetArray();
if (UNLIKELY(soa.Env()->check_jni)) {
- CheckMethodArguments(soa.Vm(), method->GetInterfaceMethodIfProxy(sizeof(void*)), args);
+ CheckMethodArguments(soa.Vm(), method->GetInterfaceMethodIfProxy(kRuntimePointerSize), args);
}
method->Invoke(soa.Self(), args, arg_array->GetNumBytes(), result, shorty);
}
@@ -458,7 +459,8 @@
}
mirror::Object* receiver = method->IsStatic() ? nullptr : soa.Decode<mirror::Object*>(obj);
uint32_t shorty_len = 0;
- const char* shorty = method->GetInterfaceMethodIfProxy(sizeof(void*))->GetShorty(&shorty_len);
+ const char* shorty =
+ method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty(&shorty_len);
JValue result;
ArgArray arg_array(shorty, shorty_len);
arg_array.BuildArgArrayFromVarArgs(soa, receiver, args);
@@ -488,7 +490,8 @@
}
mirror::Object* receiver = method->IsStatic() ? nullptr : soa.Decode<mirror::Object*>(obj);
uint32_t shorty_len = 0;
- const char* shorty = method->GetInterfaceMethodIfProxy(sizeof(void*))->GetShorty(&shorty_len);
+ const char* shorty =
+ method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty(&shorty_len);
JValue result;
ArgArray arg_array(shorty, shorty_len);
arg_array.BuildArgArrayFromJValues(soa, receiver, args);
@@ -519,7 +522,8 @@
receiver = nullptr;
}
uint32_t shorty_len = 0;
- const char* shorty = method->GetInterfaceMethodIfProxy(sizeof(void*))->GetShorty(&shorty_len);
+ const char* shorty =
+ method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty(&shorty_len);
JValue result;
ArgArray arg_array(shorty, shorty_len);
arg_array.BuildArgArrayFromJValues(soa, receiver, args);
@@ -550,7 +554,8 @@
receiver = nullptr;
}
uint32_t shorty_len = 0;
- const char* shorty = method->GetInterfaceMethodIfProxy(sizeof(void*))->GetShorty(&shorty_len);
+ const char* shorty =
+ method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty(&shorty_len);
JValue result;
ArgArray arg_array(shorty, shorty_len);
arg_array.BuildArgArrayFromVarArgs(soa, receiver, args);
@@ -602,13 +607,13 @@
}
// Find the actual implementation of the virtual method.
- m = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(m, sizeof(void*));
+ m = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(m, kRuntimePointerSize);
}
}
// Get our arrays of arguments and their types, and check they're the same size.
auto* objects = soa.Decode<mirror::ObjectArray<mirror::Object>*>(javaArgs);
- auto* np_method = m->GetInterfaceMethodIfProxy(sizeof(void*));
+ auto* np_method = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
const DexFile::TypeList* classes = np_method->GetParameterTypeList();
uint32_t classes_size = (classes == nullptr) ? 0 : classes->Size();
uint32_t arg_count = (objects != nullptr) ? objects->GetLength() : 0;
@@ -775,8 +780,9 @@
UnboxingFailureKind(f).c_str(),
PrettyDescriptor(dst_class).c_str()).c_str());
} else {
- ThrowNullPointerException(StringPrintf("Expected to unbox a '%s' primitive type but was returned null",
- PrettyDescriptor(dst_class).c_str()).c_str());
+ ThrowNullPointerException(
+ StringPrintf("Expected to unbox a '%s' primitive type but was returned null",
+ PrettyDescriptor(dst_class).c_str()).c_str());
}
return false;
}
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index a098bf0..016f3c7 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -21,6 +21,7 @@
#include "ScopedLocalRef.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "common_compiler_test.h"
#include "scoped_thread_state_change.h"
@@ -107,8 +108,8 @@
class_loader);
CHECK(c != nullptr);
- *method = is_static ? c->FindDirectMethod(method_name, method_signature, sizeof(void*))
- : c->FindVirtualMethod(method_name, method_signature, sizeof(void*));
+ *method = is_static ? c->FindDirectMethod(method_name, method_signature, kRuntimePointerSize)
+ : c->FindVirtualMethod(method_name, method_signature, kRuntimePointerSize);
CHECK(method != nullptr);
if (is_static) {
@@ -517,7 +518,9 @@
mirror::Class* klass = class_linker_->FindClass(soa.Self(), "LMain;", class_loader);
ASSERT_TRUE(klass != nullptr);
- ArtMethod* method = klass->FindDirectMethod("main", "([Ljava/lang/String;)V", sizeof(void*));
+ ArtMethod* method = klass->FindDirectMethod("main",
+ "([Ljava/lang/String;)V",
+ kRuntimePointerSize);
ASSERT_TRUE(method != nullptr);
// Start runtime.
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index bfa8c54..265587d 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -45,9 +45,11 @@
return GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
} else if (method == GetCalleeSaveMethodUnchecked(Runtime::kSaveAll)) {
return GetCalleeSaveMethodFrameInfo(Runtime::kSaveAll);
- } else {
- DCHECK_EQ(method, GetCalleeSaveMethodUnchecked(Runtime::kRefsOnly));
+ } else if (method == GetCalleeSaveMethodUnchecked(Runtime::kRefsOnly)) {
return GetCalleeSaveMethodFrameInfo(Runtime::kRefsOnly);
+ } else {
+ DCHECK_EQ(method, GetCalleeSaveMethodUnchecked(Runtime::kSaveEverything));
+ return GetCalleeSaveMethodFrameInfo(Runtime::kSaveEverything);
}
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 079c079..9f0ef7c 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -55,6 +55,7 @@
#include "atomic.h"
#include "base/arena_allocator.h"
#include "base/dumpable.h"
+#include "base/enums.h"
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/unix_file/fd_file.h"
@@ -1508,7 +1509,7 @@
// Visiting the roots of these ArtMethods is not currently required since all the GcRoots are
// null.
BufferedRootVisitor<16> buffered_visitor(visitor, RootInfo(kRootVMInternal));
- const size_t pointer_size = GetClassLinker()->GetImagePointerSize();
+ const PointerSize pointer_size = GetClassLinker()->GetImagePointerSize();
if (HasResolutionMethod()) {
resolution_method_->VisitRoots(buffered_visitor, pointer_size);
}
@@ -1592,7 +1593,7 @@
ClassLinker* const class_linker = GetClassLinker();
ArtMethod* method = class_linker->CreateRuntimeMethod(linear_alloc);
// When compiling, the code pointer will get set later when the image is loaded.
- const size_t pointer_size = GetInstructionSetPointerSize(instruction_set_);
+ const PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set_);
if (IsAotCompiler()) {
method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
} else {
@@ -1614,7 +1615,7 @@
auto* method = GetClassLinker()->CreateRuntimeMethod(GetLinearAlloc());
// When compiling, the code pointer will get set later when the image is loaded.
if (IsAotCompiler()) {
- size_t pointer_size = GetInstructionSetPointerSize(instruction_set_);
+ PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set_);
method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
} else {
method->SetEntryPointFromQuickCompiledCode(GetQuickResolutionStub());
@@ -1624,7 +1625,7 @@
ArtMethod* Runtime::CreateCalleeSaveMethod() {
auto* method = GetClassLinker()->CreateRuntimeMethod(GetLinearAlloc());
- size_t pointer_size = GetInstructionSetPointerSize(instruction_set_);
+ PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set_);
method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
DCHECK_NE(instruction_set_, kNone);
DCHECK(method->IsRuntimeMethod());
@@ -1919,7 +1920,7 @@
void Runtime::FixupConflictTables() {
// We can only do this after the class linker is created.
- const size_t pointer_size = GetClassLinker()->GetImagePointerSize();
+ const PointerSize pointer_size = GetClassLinker()->GetImagePointerSize();
if (imt_unimplemented_method_->GetImtConflictTable(pointer_size) == nullptr) {
imt_unimplemented_method_->SetImtConflictTable(
ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size),
diff --git a/runtime/runtime.h b/runtime/runtime.h
index afa8e48..7e269af 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -387,9 +387,10 @@
// Returns a special method that describes all callee saves being spilled to the stack.
enum CalleeSaveType {
- kSaveAll,
+ kSaveAll, // All callee-save registers.
kRefsOnly,
kRefsAndArgs,
+ kSaveEverything, // Even caller-save registers.
kLastCalleeSaveType // Value used for iteration
};
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 1d913f2..dc5cada 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -18,6 +18,7 @@
#include "arch/context.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/hex_dump.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "entrypoints/runtime_asm_entrypoints.h"
@@ -167,7 +168,7 @@
SHARED_REQUIRES(Locks::mutator_lock_);
mirror::Object* StackVisitor::GetThisObject() const {
- DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
ArtMethod* m = GetMethod();
if (m->IsStatic()) {
return nullptr;
@@ -748,7 +749,8 @@
// The only remaining case is if the method is native and uses the generic JNI stub.
DCHECK(method->IsNative());
ClassLinker* class_linker = runtime->GetClassLinker();
- const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(method, sizeof(void*));
+ const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(method,
+ kRuntimePointerSize);
DCHECK(class_linker->IsQuickGenericJniStub(entry_point)) << PrettyMethod(method);
// Generic JNI frame.
uint32_t handle_refs = GetNumberOfReferenceArgsWithoutReceiver(method) + 1;
@@ -908,7 +910,7 @@
int StackVisitor::GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item,
uint32_t core_spills, uint32_t fp_spills,
size_t frame_size, int reg, InstructionSet isa) {
- size_t pointer_size = InstructionSetPointerSize(isa);
+ PointerSize pointer_size = InstructionSetPointerSize(isa);
if (kIsDebugBuild) {
auto* runtime = Runtime::Current();
if (runtime != nullptr) {
@@ -931,7 +933,8 @@
* Special temporaries may have custom locations and the logic above deals with that.
* However, non-special temporaries are placed relative to the outs.
*/
- int temps_start = code_item->outs_size_ * sizeof(uint32_t) + pointer_size /* art method */;
+ int temps_start = code_item->outs_size_ * sizeof(uint32_t)
+ + static_cast<size_t>(pointer_size) /* art method */;
int relative_offset = (reg - (temp_threshold + max_num_special_temps)) * sizeof(uint32_t);
return temps_start + relative_offset;
} else if (reg < num_regs) {
@@ -939,7 +942,8 @@
return locals_start + (reg * sizeof(uint32_t));
} else {
// Handle ins.
- return frame_size + ((reg - num_regs) * sizeof(uint32_t)) + pointer_size /* art method */;
+ return frame_size + ((reg - num_regs) * sizeof(uint32_t))
+ + static_cast<size_t>(pointer_size) /* art method */;
}
}
diff --git a/runtime/stack.h b/runtime/stack.h
index c594ec6..cf33ae1 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -734,7 +734,7 @@
static int GetOutVROffset(uint16_t out_num, InstructionSet isa) {
// According to stack model, the first out is above the Method referernce.
- return InstructionSetPointerSize(isa) + out_num * sizeof(uint32_t);
+ return static_cast<size_t>(InstructionSetPointerSize(isa)) + out_num * sizeof(uint32_t);
}
bool IsInInlinedFrame() const {
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 3fd66a7..3aa1fc2 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -93,6 +93,18 @@
return static_cast<ThreadState>(old_state_and_flags.as_struct.state);
}
+inline bool Thread::IsThreadSuspensionAllowable() const {
+ if (tls32_.no_thread_suspension != 0) {
+ return false;
+ }
+ for (int i = kLockLevelCount - 1; i >= 0; --i) {
+ if (i != kMutatorLock && GetHeldMutex(static_cast<LockLevel>(i)) != nullptr) {
+ return false;
+ }
+ }
+ return true;
+}
+
inline void Thread::AssertThreadSuspensionIsAllowable(bool check_locks) const {
if (kIsDebugBuild) {
if (gAborting == 0) {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 50f76da..76f3161 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1383,7 +1383,7 @@
if (m->IsRuntimeMethod()) {
return true;
}
- m = m->GetInterfaceMethodIfProxy(sizeof(void*));
+ m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
const int kMaxRepetition = 3;
mirror::Class* c = m->GetDeclaringClass();
mirror::DexCache* dex_cache = c->GetDexCache();
@@ -2111,7 +2111,7 @@
// the i'th frame.
mirror::ObjectArray<mirror::Object>* trace_;
// For cross compilation.
- const size_t pointer_size_;
+ const PointerSize pointer_size_;
DISALLOW_COPY_AND_ASSIGN(BuildInternalStackTraceVisitor);
};
@@ -2198,9 +2198,9 @@
mirror::PointerArray* const method_trace =
down_cast<mirror::PointerArray*>(decoded_traces->Get(0));
// Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
- ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, sizeof(void*));
+ ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, kRuntimePointerSize);
uint32_t dex_pc = method_trace->GetElementPtrSize<uint32_t>(
- i + method_trace->GetLength() / 2, sizeof(void*));
+ i + method_trace->GetLength() / 2, kRuntimePointerSize);
int32_t line_number;
StackHandleScope<3> hs(soa.Self());
auto class_name_object(hs.NewHandle<mirror::String>(nullptr));
@@ -2231,7 +2231,7 @@
}
}
}
- const char* method_name = method->GetInterfaceMethodIfProxy(sizeof(void*))->GetName();
+ const char* method_name = method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetName();
CHECK(method_name != nullptr);
Handle<mirror::String> method_name_object(
hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name)));
@@ -2408,10 +2408,12 @@
}
// Explicitly instantiate 32 and 64bit thread offset dumping support.
-template void Thread::DumpThreadOffset<4>(std::ostream& os, uint32_t offset);
-template void Thread::DumpThreadOffset<8>(std::ostream& os, uint32_t offset);
+template
+void Thread::DumpThreadOffset<PointerSize::k32>(std::ostream& os, uint32_t offset);
+template
+void Thread::DumpThreadOffset<PointerSize::k64>(std::ostream& os, uint32_t offset);
-template<size_t ptr_size>
+template<PointerSize ptr_size>
void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
#define DO_THREAD_OFFSET(x, y) \
if (offset == (x).Uint32Value()) { \
@@ -2604,8 +2606,6 @@
QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg27)
QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg28)
QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg29)
- QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg30)
- QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg31)
QUICK_ENTRY_POINT_INFO(pReadBarrierSlow)
QUICK_ENTRY_POINT_INFO(pReadBarrierForRootSlow)
#undef QUICK_ENTRY_POINT_INFO
diff --git a/runtime/thread.h b/runtime/thread.h
index a3a4005..840b781 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -28,6 +28,7 @@
#include "arch/context.h"
#include "arch/instruction_set.h"
#include "atomic.h"
+#include "base/enums.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "entrypoints/jni/jni_entrypoints.h"
@@ -179,7 +180,7 @@
SHARED_REQUIRES(Locks::mutator_lock_);
// Translates 172 to pAllocArrayFromCode and so on.
- template<size_t size_of_pointers>
+ template<PointerSize size_of_pointers>
static void DumpThreadOffset(std::ostream& os, uint32_t offset);
// Dumps a one-line summary of thread state (used for operator<<).
@@ -287,6 +288,9 @@
void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
+ // Return true if thread suspension is allowable.
+ bool IsThreadSuspensionAllowable() const;
+
bool IsDaemon() const {
return tls32_.daemon;
}
@@ -529,21 +533,21 @@
// Offsets of various members of native Thread class, used by compiled code.
//
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> ThinLockIdOffset() {
return ThreadOffset<pointer_size>(
OFFSETOF_MEMBER(Thread, tls32_) +
OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> ThreadFlagsOffset() {
return ThreadOffset<pointer_size>(
OFFSETOF_MEMBER(Thread, tls32_) +
OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> IsGcMarkingOffset() {
return ThreadOffset<pointer_size>(
OFFSETOF_MEMBER(Thread, tls32_) +
@@ -554,121 +558,125 @@
void DeoptimizeWithDeoptimizationException(JValue* result) SHARED_REQUIRES(Locks::mutator_lock_);
private:
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
size_t scale;
size_t shrink;
- if (pointer_size == sizeof(void*)) {
+ if (pointer_size == kRuntimePointerSize) {
scale = 1;
shrink = 1;
- } else if (pointer_size > sizeof(void*)) {
- scale = pointer_size / sizeof(void*);
+ } else if (pointer_size > kRuntimePointerSize) {
+ scale = static_cast<size_t>(pointer_size) / static_cast<size_t>(kRuntimePointerSize);
shrink = 1;
} else {
- DCHECK_GT(sizeof(void*), pointer_size);
+ DCHECK_GT(kRuntimePointerSize, pointer_size);
scale = 1;
- shrink = sizeof(void*) / pointer_size;
+ shrink = static_cast<size_t>(kRuntimePointerSize) / static_cast<size_t>(pointer_size);
}
return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
}
public:
static uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,
- size_t pointer_size) {
- DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size;
- if (pointer_size == 4) {
- return QuickEntryPointOffset<4>(quick_entrypoint_offset).Uint32Value();
+ PointerSize pointer_size) {
+ if (pointer_size == PointerSize::k32) {
+ return QuickEntryPointOffset<PointerSize::k32>(quick_entrypoint_offset).
+ Uint32Value();
} else {
- return QuickEntryPointOffset<8>(quick_entrypoint_offset).Uint32Value();
+ return QuickEntryPointOffset<PointerSize::k64>(quick_entrypoint_offset).
+ Uint32Value();
}
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> QuickEntryPointOffset(size_t quick_entrypoint_offset) {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> SelfOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> MterpCurrentIBaseOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_current_ibase));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> MterpDefaultIBaseOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_default_ibase));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> MterpAltIBaseOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_alt_ibase));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> ExceptionOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> PeerOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> CardTableOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> ThreadLocalPosOffset() {
- return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, thread_local_pos));
+ return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
+ thread_local_pos));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> ThreadLocalEndOffset() {
- return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, thread_local_end));
+ return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
+ thread_local_end));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
- return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, thread_local_objects));
+ return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
+ thread_local_objects));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> RosAllocRunsOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
rosalloc_runs));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
thread_local_alloc_stack_top));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
thread_local_alloc_stack_end));
@@ -710,19 +718,19 @@
return tlsPtr_.stack_end == tlsPtr_.stack_begin;
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> StackEndOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> JniEnvOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> TopOfManagedStackOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
@@ -749,7 +757,7 @@
return tlsPtr_.managed_stack.PopShadowFrame();
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> TopShadowFrameOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
@@ -791,7 +799,7 @@
return handle_scope;
}
- template<size_t pointer_size>
+ template<PointerSize pointer_size>
static ThreadOffset<pointer_size> TopHandleScopeOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
top_handle_scope));
diff --git a/runtime/trace.cc b/runtime/trace.cc
index e77a11e..56a26de 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -21,6 +21,7 @@
#include "art_method-inl.h"
#include "base/casts.h"
+#include "base/enums.h"
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/time_utils.h"
@@ -645,31 +646,11 @@
}
}
-static void GetVisitedMethodsFromBitSets(
- const std::map<const DexFile*, DexIndexBitSet*>& seen_methods,
- std::set<ArtMethod*>* visited_methods) SHARED_REQUIRES(Locks::mutator_lock_) {
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- Thread* const self = Thread::Current();
- for (auto& e : seen_methods) {
- DexIndexBitSet* bit_set = e.second;
- // TODO: Visit trace methods as roots.
- mirror::DexCache* dex_cache = class_linker->FindDexCache(self, *e.first, false);
- for (uint32_t i = 0; i < bit_set->size(); ++i) {
- if ((*bit_set)[i]) {
- visited_methods->insert(dex_cache->GetResolvedMethod(i, sizeof(void*)));
- }
- }
- }
-}
-
void Trace::FinishTracing() {
size_t final_offset = 0;
std::set<ArtMethod*> visited_methods;
if (trace_output_mode_ == TraceOutputMode::kStreaming) {
- // Write the secondary file with all the method names.
- GetVisitedMethodsFromBitSets(seen_methods_, &visited_methods);
-
// Clean up.
STLDeleteValues(&seen_methods_);
} else {
@@ -850,11 +831,6 @@
bool Trace::RegisterMethod(ArtMethod* method) {
mirror::DexCache* dex_cache = method->GetDexCache();
const DexFile* dex_file = dex_cache->GetDexFile();
- auto* resolved_method = dex_cache->GetResolvedMethod(method->GetDexMethodIndex(), sizeof(void*));
- if (resolved_method != method) {
- DCHECK(resolved_method == nullptr);
- dex_cache->SetResolvedMethod(method->GetDexMethodIndex(), method, sizeof(void*));
- }
if (seen_methods_.find(dex_file) == seen_methods_.end()) {
seen_methods_.insert(std::make_pair(dex_file, new DexIndexBitSet()));
}
@@ -879,9 +855,8 @@
}
std::string Trace::GetMethodLine(ArtMethod* method) {
- method = method->GetInterfaceMethodIfProxy(sizeof(void*));
- return StringPrintf("%p\t%s\t%s\t%s\t%s\n",
- reinterpret_cast<void*>((EncodeTraceMethod(method) << TraceActionBits)),
+ method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
+ return StringPrintf("%#x\t%s\t%s\t%s\t%s\n", (EncodeTraceMethod(method) << TraceActionBits),
PrettyDescriptor(method->GetDeclaringClassDescriptor()).c_str(), method->GetName(),
method->GetSignature().ToString().c_str(), method->GetDeclaringClassSourceFile());
}
diff --git a/runtime/utils/dex_cache_arrays_layout-inl.h b/runtime/utils/dex_cache_arrays_layout-inl.h
index 6922564..7733a51 100644
--- a/runtime/utils/dex_cache_arrays_layout-inl.h
+++ b/runtime/utils/dex_cache_arrays_layout-inl.h
@@ -27,7 +27,7 @@
namespace art {
-inline DexCacheArraysLayout::DexCacheArraysLayout(size_t pointer_size,
+inline DexCacheArraysLayout::DexCacheArraysLayout(PointerSize pointer_size,
const DexFile::Header& header)
: pointer_size_(pointer_size),
/* types_offset_ is always 0u, so it's constexpr */
@@ -39,10 +39,9 @@
RoundUp(strings_offset_ + StringsSize(header.string_ids_size_), FieldsAlignment())),
size_(
RoundUp(fields_offset_ + FieldsSize(header.field_ids_size_), Alignment())) {
- DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
}
-inline DexCacheArraysLayout::DexCacheArraysLayout(size_t pointer_size, const DexFile* dex_file)
+inline DexCacheArraysLayout::DexCacheArraysLayout(PointerSize pointer_size, const DexFile* dex_file)
: DexCacheArraysLayout(pointer_size, dex_file->GetHeader()) {
}
@@ -50,19 +49,24 @@
// GcRoot<> alignment is 4, i.e. lower than or equal to the pointer alignment.
static_assert(alignof(GcRoot<mirror::Class>) == 4, "Expecting alignof(GcRoot<>) == 4");
static_assert(alignof(GcRoot<mirror::String>) == 4, "Expecting alignof(GcRoot<>) == 4");
- DCHECK(pointer_size_ == 4u || pointer_size_ == 8u);
// Pointer alignment is the same as pointer size.
- return pointer_size_;
+ return static_cast<size_t>(pointer_size_);
+}
+
+template <typename T>
+static constexpr PointerSize GcRootAsPointerSize() {
+ return ConvertToPointerSize(sizeof(GcRoot<T>));
}
inline size_t DexCacheArraysLayout::TypeOffset(uint32_t type_idx) const {
- return types_offset_ + ElementOffset(sizeof(GcRoot<mirror::Class>), type_idx);
+ return types_offset_ + ElementOffset(GcRootAsPointerSize<mirror::Class>(), type_idx);
}
inline size_t DexCacheArraysLayout::TypesSize(size_t num_elements) const {
// App image patching relies on having enough room for a forwarding pointer in the types array.
// See FixupArtMethodArrayVisitor and ClassLinker::AddImageSpace.
- return std::max(ArraySize(sizeof(GcRoot<mirror::Class>), num_elements), pointer_size_);
+ return std::max(ArraySize(GcRootAsPointerSize<mirror::Class>(), num_elements),
+ static_cast<size_t>(pointer_size_));
}
inline size_t DexCacheArraysLayout::TypesAlignment() const {
@@ -75,19 +79,19 @@
inline size_t DexCacheArraysLayout::MethodsSize(size_t num_elements) const {
// App image patching relies on having enough room for a forwarding pointer in the methods array.
- return std::max(ArraySize(pointer_size_, num_elements), pointer_size_);
+ return std::max(ArraySize(pointer_size_, num_elements), static_cast<size_t>(pointer_size_));
}
inline size_t DexCacheArraysLayout::MethodsAlignment() const {
- return pointer_size_;
+ return static_cast<size_t>(pointer_size_);
}
inline size_t DexCacheArraysLayout::StringOffset(uint32_t string_idx) const {
- return strings_offset_ + ElementOffset(sizeof(GcRoot<mirror::String>), string_idx);
+ return strings_offset_ + ElementOffset(GcRootAsPointerSize<mirror::String>(), string_idx);
}
inline size_t DexCacheArraysLayout::StringsSize(size_t num_elements) const {
- return ArraySize(sizeof(GcRoot<mirror::String>), num_elements);
+ return ArraySize(GcRootAsPointerSize<mirror::String>(), num_elements);
}
inline size_t DexCacheArraysLayout::StringsAlignment() const {
@@ -103,15 +107,15 @@
}
inline size_t DexCacheArraysLayout::FieldsAlignment() const {
- return pointer_size_;
+ return static_cast<size_t>(pointer_size_);
}
-inline size_t DexCacheArraysLayout::ElementOffset(size_t element_size, uint32_t idx) {
- return element_size * idx;
+inline size_t DexCacheArraysLayout::ElementOffset(PointerSize element_size, uint32_t idx) {
+ return static_cast<size_t>(element_size) * idx;
}
-inline size_t DexCacheArraysLayout::ArraySize(size_t element_size, uint32_t num_elements) {
- return element_size * num_elements;
+inline size_t DexCacheArraysLayout::ArraySize(PointerSize element_size, uint32_t num_elements) {
+ return static_cast<size_t>(element_size) * num_elements;
}
} // namespace art
diff --git a/runtime/utils/dex_cache_arrays_layout.h b/runtime/utils/dex_cache_arrays_layout.h
index cd84460..f2437fa 100644
--- a/runtime/utils/dex_cache_arrays_layout.h
+++ b/runtime/utils/dex_cache_arrays_layout.h
@@ -31,7 +31,7 @@
// Construct an invalid layout.
DexCacheArraysLayout()
: /* types_offset_ is always 0u */
- pointer_size_(0u),
+ pointer_size_(kRuntimePointerSize),
methods_offset_(0u),
strings_offset_(0u),
fields_offset_(0u),
@@ -39,10 +39,10 @@
}
// Construct a layout for a particular dex file header.
- DexCacheArraysLayout(size_t pointer_size, const DexFile::Header& header);
+ DexCacheArraysLayout(PointerSize pointer_size, const DexFile::Header& header);
// Construct a layout for a particular dex file.
- DexCacheArraysLayout(size_t pointer_size, const DexFile* dex_file);
+ DexCacheArraysLayout(PointerSize pointer_size, const DexFile* dex_file);
bool Valid() const {
return Size() != 0u;
@@ -96,17 +96,17 @@
private:
static constexpr size_t types_offset_ = 0u;
- const size_t pointer_size_; // Must be first for construction initialization order.
+ const PointerSize pointer_size_; // Must be first for construction initialization order.
const size_t methods_offset_;
const size_t strings_offset_;
const size_t fields_offset_;
const size_t size_;
- static size_t Alignment(size_t pointer_size);
+ static size_t Alignment(PointerSize pointer_size);
- static size_t ElementOffset(size_t element_size, uint32_t idx);
+ static size_t ElementOffset(PointerSize element_size, uint32_t idx);
- static size_t ArraySize(size_t element_size, uint32_t num_elements);
+ static size_t ArraySize(PointerSize element_size, uint32_t num_elements);
};
} // namespace art
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index f00edff..55b6e01 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -16,6 +16,7 @@
#include "utils.h"
+#include "base/enums.h"
#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "mirror/array.h"
@@ -187,17 +188,17 @@
ASSERT_TRUE(c != nullptr);
ArtMethod* m;
- m = c->FindVirtualMethod("charAt", "(I)C", sizeof(void*));
+ m = c->FindVirtualMethod("charAt", "(I)C", kRuntimePointerSize);
ASSERT_TRUE(m != nullptr);
EXPECT_EQ("Java_java_lang_String_charAt", JniShortName(m));
EXPECT_EQ("Java_java_lang_String_charAt__I", JniLongName(m));
- m = c->FindVirtualMethod("indexOf", "(Ljava/lang/String;I)I", sizeof(void*));
+ m = c->FindVirtualMethod("indexOf", "(Ljava/lang/String;I)I", kRuntimePointerSize);
ASSERT_TRUE(m != nullptr);
EXPECT_EQ("Java_java_lang_String_indexOf", JniShortName(m));
EXPECT_EQ("Java_java_lang_String_indexOf__Ljava_lang_String_2I", JniLongName(m));
- m = c->FindDirectMethod("copyValueOf", "([CII)Ljava/lang/String;", sizeof(void*));
+ m = c->FindDirectMethod("copyValueOf", "([CII)Ljava/lang/String;", kRuntimePointerSize);
ASSERT_TRUE(m != nullptr);
EXPECT_EQ("Java_java_lang_String_copyValueOf", JniShortName(m));
EXPECT_EQ("Java_java_lang_String_copyValueOf___3CII", JniLongName(m));
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index f2ae85a..03de399 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -20,6 +20,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "base/mutex-inl.h"
#include "base/stl_util.h"
@@ -1257,6 +1258,10 @@
if (descriptor[0] != 'L') {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "can't call new-instance on type '" << descriptor << "'";
return false;
+ } else if (strcmp(descriptor, "Ljava/lang/Class;") == 0) {
+ // An unlikely new instance on Class is not allowed. Fall back to interpreter to ensure an
+ // exception is thrown when this statement is executed (compiled code would not do that).
+ Fail(VERIFY_ERROR_INSTANTIATION);
}
return true;
}
@@ -2839,7 +2844,7 @@
ArtMethod* called_method = VerifyInvocationArgs(inst, type, is_range);
const RegType* return_type = nullptr;
if (called_method != nullptr) {
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
mirror::Class* return_type_class = called_method->GetReturnType(can_load_classes_,
pointer_size);
if (return_type_class != nullptr) {
@@ -2882,7 +2887,7 @@
} else {
is_constructor = called_method->IsConstructor();
return_type_descriptor = called_method->GetReturnTypeDescriptor();
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
mirror::Class* return_type_class = called_method->GetReturnType(can_load_classes_,
pointer_size);
if (return_type_class != nullptr) {
@@ -4558,10 +4563,10 @@
<< "non-reference type " << obj_type;
return nullptr;
} else {
+ std::string temp;
mirror::Class* klass = field->GetDeclaringClass();
const RegType& field_klass =
- FromClass(dex_file_->GetFieldDeclaringClassDescriptor(field_id),
- klass, klass->CannotBeAssignedFromOtherTypes());
+ FromClass(klass->GetDescriptor(&temp), klass, klass->CannotBeAssignedFromOtherTypes());
if (obj_type.IsUninitializedTypes()) {
// Field accesses through uninitialized references are only allowable for constructors where
// the field is declared in this class.
@@ -4969,7 +4974,7 @@
const RegType& MethodVerifier::GetMethodReturnType() {
if (return_type_ == nullptr) {
if (mirror_method_ != nullptr) {
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
mirror::Class* return_type_class = mirror_method_->GetReturnType(can_load_classes_,
pointer_size);
if (return_type_class != nullptr) {
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 308c2aa..85daba9 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -773,6 +773,8 @@
}
if (!klass_.IsNull()) {
CHECK(!descriptor_.empty()) << *this;
+ std::string temp;
+ CHECK_EQ(descriptor_.ToString(), klass_.Read()->GetDescriptor(&temp)) << *this;
}
}
diff --git a/sigchainlib/Android.mk b/sigchainlib/Android.mk
index b9e37a1..e1120e4 100644
--- a/sigchainlib/Android.mk
+++ b/sigchainlib/Android.mk
@@ -24,7 +24,7 @@
LOCAL_CFLAGS += $(ART_TARGET_CFLAGS)
LOCAL_ASFLAGS += $(ART_TARGET_ASFLAGS)
LOCAL_SRC_FILES := sigchain_dummy.cc
-LOCAL_CLANG = $(ART_TARGET_CLANG)
+LOCAL_CLANG := $(ART_TARGET_CLANG)
LOCAL_MODULE:= libsigchain
LOCAL_SHARED_LIBRARIES := liblog
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
@@ -39,7 +39,7 @@
LOCAL_CFLAGS += $(ART_TARGET_CFLAGS)
LOCAL_ASFLAGS += $(ART_TARGET_ASFLAGS)
LOCAL_SRC_FILES := sigchain.cc
-LOCAL_CLANG = $(ART_TARGET_CLANG)
+LOCAL_CLANG := $(ART_TARGET_CLANG)
LOCAL_MODULE:= libsigchain
LOCAL_SHARED_LIBRARIES := liblog
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
@@ -54,11 +54,11 @@
LOCAL_IS_HOST_MODULE := true
LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
LOCAL_ASFLAGS += $(ART_HOST_ASFLAGS)
-LOCAL_CLANG = $(ART_HOST_CLANG)
+LOCAL_CLANG := $(ART_HOST_CLANG)
LOCAL_SRC_FILES := sigchain_dummy.cc
LOCAL_MODULE:= libsigchain
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
-LOCAL_LDLIBS = -ldl
+LOCAL_LDLIBS := -ldl
LOCAL_MULTILIB := both
LOCAL_NATIVE_COVERAGE := $(ART_COVERAGE)
include $(BUILD_HOST_SHARED_LIBRARY)
@@ -69,11 +69,11 @@
LOCAL_IS_HOST_MODULE := true
LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
LOCAL_ASFLAGS += $(ART_HOST_ASFLAGS)
-LOCAL_CLANG = $(ART_HOST_CLANG)
+LOCAL_CLANG := $(ART_HOST_CLANG)
LOCAL_SRC_FILES := sigchain.cc
LOCAL_MODULE:= libsigchain
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
-LOCAL_LDLIBS = -ldl
+LOCAL_LDLIBS := -ldl
LOCAL_MULTILIB := both
include $(BUILD_HOST_STATIC_LIBRARY)
@@ -85,10 +85,10 @@
LOCAL_MODULE_TAGS := optional
LOCAL_IS_HOST_MODULE := true
LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
-LOCAL_CLANG = $(ART_HOST_CLANG)
+LOCAL_CLANG := $(ART_HOST_CLANG)
LOCAL_SRC_FILES := sigchain_dummy.cc
LOCAL_MODULE:= libsigchain_dummy
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
-LOCAL_LDLIBS = -ldl
+LOCAL_LDLIBS := -ldl
LOCAL_MULTILIB := both
include $(BUILD_HOST_STATIC_LIBRARY)
diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc
index f29301d..c1efecd 100644
--- a/sigchainlib/sigchain.cc
+++ b/sigchainlib/sigchain.cc
@@ -233,7 +233,7 @@
return linked_sigaction(signal, new_action, old_action);
}
-extern "C" sighandler_t signal(int signal, sighandler_t handler) {
+static sighandler_t signal_impl(int signal, sighandler_t handler) {
struct sigaction sa;
sigemptyset(&sa.sa_mask);
sa.sa_handler = handler;
@@ -272,6 +272,16 @@
return reinterpret_cast<sighandler_t>(sa.sa_handler);
}
+extern "C" sighandler_t signal(int signal, sighandler_t handler) {
+ return signal_impl(signal, handler);
+}
+
+#if !defined(__LP64__)
+extern "C" sighandler_t bsd_signal(int signal, sighandler_t handler) {
+ return signal_impl(signal, handler);
+}
+#endif
+
extern "C" int sigprocmask(int how, const sigset_t* bionic_new_set, sigset_t* bionic_old_set) {
const sigset_t* new_set_ptr = bionic_new_set;
sigset_t tmpset;
diff --git a/sigchainlib/version-script32.txt b/sigchainlib/version-script32.txt
new file mode 100644
index 0000000..eec9103
--- /dev/null
+++ b/sigchainlib/version-script32.txt
@@ -0,0 +1,15 @@
+{
+global:
+ ClaimSignalChain;
+ UnclaimSignalChain;
+ InvokeUserSignalHandler;
+ InitializeSignalChain;
+ EnsureFrontOfChain;
+ SetSpecialSignalHandlerFn;
+ bsd_signal;
+ sigaction;
+ signal;
+ sigprocmask;
+local:
+ *;
+};
diff --git a/sigchainlib/version-script.txt b/sigchainlib/version-script64.txt
similarity index 100%
rename from sigchainlib/version-script.txt
rename to sigchainlib/version-script64.txt
diff --git a/test/496-checker-inlining-and-class-loader/src/Main.java b/test/496-checker-inlining-and-class-loader/src/Main.java
index 8de6318..78e8a40 100644
--- a/test/496-checker-inlining-and-class-loader/src/Main.java
+++ b/test/496-checker-inlining-and-class-loader/src/Main.java
@@ -107,7 +107,8 @@
/* Load and initialize FirstSeenByMyClassLoader */
/// CHECK: LoadClass gen_clinit_check:true
/* Load and initialize System */
- /// CHECK-NEXT: LoadClass gen_clinit_check:true
+ // There may be MipsComputeBaseMethodAddress here.
+ /// CHECK: LoadClass gen_clinit_check:true
/// CHECK-NEXT: StaticFieldGet
// There may be HArmDexCacheArraysBase or HX86ComputeBaseMethodAddress here.
/// CHECK: LoadString
diff --git a/test/497-inlining-and-class-loader/clear_dex_cache.cc b/test/497-inlining-and-class-loader/clear_dex_cache.cc
index 50d1a63..1597c4a 100644
--- a/test/497-inlining-and-class-loader/clear_dex_cache.cc
+++ b/test/497-inlining-and-class-loader/clear_dex_cache.cc
@@ -15,6 +15,7 @@
*/
#include "art_method-inl.h"
+#include "base/enums.h"
#include "jni.h"
#include "scoped_thread_state_change.h"
#include "stack.h"
@@ -44,8 +45,8 @@
CHECK(array != nullptr);
mirror::PointerArray* pointer_array = soa.Decode<mirror::PointerArray*>(array);
for (size_t i = 0; i != num_methods; ++i) {
- ArtMethod* method = mirror::DexCache::GetElementPtrSize(methods, i, sizeof(void*));
- pointer_array->SetElementPtrSize(i, method, sizeof(void*));
+ ArtMethod* method = mirror::DexCache::GetElementPtrSize(methods, i, kRuntimePointerSize);
+ pointer_array->SetElementPtrSize(i, method, kRuntimePointerSize);
}
return array;
}
@@ -61,8 +62,8 @@
CHECK_EQ(methods != nullptr, old != nullptr);
CHECK_EQ(num_methods, static_cast<size_t>(old->GetLength()));
for (size_t i = 0; i != num_methods; ++i) {
- ArtMethod* method = old->GetElementPtrSize<ArtMethod*>(i, sizeof(void*));
- mirror::DexCache::SetElementPtrSize(methods, i, method, sizeof(void*));
+ ArtMethod* method = old->GetElementPtrSize<ArtMethod*>(i, kRuntimePointerSize);
+ mirror::DexCache::SetElementPtrSize(methods, i, method, kRuntimePointerSize);
}
}
diff --git a/test/552-checker-sharpening/src/Main.java b/test/552-checker-sharpening/src/Main.java
index 09a77ed..2232ff4 100644
--- a/test/552-checker-sharpening/src/Main.java
+++ b/test/552-checker-sharpening/src/Main.java
@@ -51,6 +51,10 @@
/// CHECK-START-ARM64: int Main.testSimple(int) sharpening (after)
/// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
+ /// CHECK-START-MIPS: int Main.testSimple(int) sharpening (after)
+ /// CHECK-NOT: MipsDexCacheArraysBase
+ /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
+
/// CHECK-START-X86: int Main.testSimple(int) sharpening (after)
/// CHECK-NOT: X86ComputeBaseMethodAddress
/// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
@@ -62,6 +66,10 @@
/// CHECK: ArmDexCacheArraysBase
/// CHECK-NOT: ArmDexCacheArraysBase
+ /// CHECK-START-MIPS: int Main.testSimple(int) dex_cache_array_fixups_mips (after)
+ /// CHECK: MipsDexCacheArraysBase
+ /// CHECK-NOT: MipsDexCacheArraysBase
+
/// CHECK-START-X86: int Main.testSimple(int) pc_relative_fixups_x86 (after)
/// CHECK: X86ComputeBaseMethodAddress
/// CHECK-NOT: X86ComputeBaseMethodAddress
@@ -83,6 +91,11 @@
/// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
/// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
+ /// CHECK-START-MIPS: int Main.testDiamond(boolean, int) sharpening (after)
+ /// CHECK-NOT: MipsDexCacheArraysBase
+ /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
+ /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
+
/// CHECK-START-X86: int Main.testDiamond(boolean, int) sharpening (after)
/// CHECK-NOT: X86ComputeBaseMethodAddress
/// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
@@ -100,6 +113,14 @@
/// CHECK: ArmDexCacheArraysBase
/// CHECK-NEXT: If
+ /// CHECK-START-MIPS: int Main.testDiamond(boolean, int) dex_cache_array_fixups_mips (after)
+ /// CHECK: MipsDexCacheArraysBase
+ /// CHECK-NOT: MipsDexCacheArraysBase
+
+ /// CHECK-START-MIPS: int Main.testDiamond(boolean, int) dex_cache_array_fixups_mips (after)
+ /// CHECK: MipsDexCacheArraysBase
+ /// CHECK-NEXT: If
+
/// CHECK-START-X86: int Main.testDiamond(boolean, int) pc_relative_fixups_x86 (after)
/// CHECK: X86ComputeBaseMethodAddress
/// CHECK-NOT: X86ComputeBaseMethodAddress
@@ -110,7 +131,7 @@
public static int testDiamond(boolean negate, int x) {
// These calls should use PC-relative dex cache array loads to retrieve the target method.
- // PC-relative bases used by X86 and ARM should be pulled before the If.
+ // PC-relative bases used by ARM, MIPS and X86 should be pulled before the If.
if (negate) {
return $noinline$foo(-x);
} else {
@@ -154,8 +175,26 @@
/// CHECK: begin_block
/// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
+ /// CHECK-START-MIPS: int Main.testLoop(int[], int) dex_cache_array_fixups_mips (before)
+ /// CHECK-NOT: MipsDexCacheArraysBase
+
+ /// CHECK-START-MIPS: int Main.testLoop(int[], int) dex_cache_array_fixups_mips (after)
+ /// CHECK: MipsDexCacheArraysBase
+ /// CHECK-NOT: MipsDexCacheArraysBase
+
+ /// CHECK-START-MIPS: int Main.testLoop(int[], int) dex_cache_array_fixups_mips (after)
+ /// CHECK: InvokeStaticOrDirect
+ /// CHECK-NOT: InvokeStaticOrDirect
+
+ /// CHECK-START-MIPS: int Main.testLoop(int[], int) dex_cache_array_fixups_mips (after)
+ /// CHECK: ArrayLength
+ /// CHECK-NEXT: MipsDexCacheArraysBase
+ /// CHECK-NEXT: Goto
+ /// CHECK: begin_block
+ /// CHECK: InvokeStaticOrDirect method_load_kind:dex_cache_pc_relative
+
public static int testLoop(int[] array, int x) {
- // PC-relative bases used by X86 and ARM should be pulled before the loop.
+ // PC-relative bases used by ARM, MIPS and X86 should be pulled before the loop.
for (int i : array) {
x += $noinline$foo(i);
}
@@ -182,8 +221,18 @@
/// CHECK-NEXT: ArmDexCacheArraysBase
/// CHECK-NEXT: Goto
+ /// CHECK-START-MIPS: int Main.testLoopWithDiamond(int[], boolean, int) dex_cache_array_fixups_mips (before)
+ /// CHECK-NOT: MipsDexCacheArraysBase
+
+ /// CHECK-START-MIPS: int Main.testLoopWithDiamond(int[], boolean, int) dex_cache_array_fixups_mips (after)
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: ArrayLength
+ /// CHECK-NEXT: MipsDexCacheArraysBase
+ /// CHECK-NEXT: Goto
+
public static int testLoopWithDiamond(int[] array, boolean negate, int x) {
- // PC-relative bases used by X86 and ARM should be pulled before the loop
+ // PC-relative bases used by ARM, MIPS and X86 should be pulled before the loop
// but not outside the if.
if (array != null) {
for (int i : array) {
@@ -220,6 +269,11 @@
// TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
/// CHECK: LoadString load_kind:{{BootImageAddress|DexCachePcRelative|DexCacheViaMethod}}
+ /// CHECK-START-MIPS: java.lang.String Main.$noinline$getBootImageString() sharpening (after)
+ // Note: load kind depends on PIC/non-PIC
+ // TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
+ /// CHECK: LoadString load_kind:{{BootImageAddress|DexCachePcRelative|DexCacheViaMethod}}
+
public static String $noinline$getBootImageString() {
// Prevent inlining to avoid the string comparison being optimized away.
if (doThrow) { throw new Error(); }
@@ -250,6 +304,13 @@
/// CHECK-START-ARM64: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
/// CHECK: LoadString load_kind:DexCachePcRelative
+ /// CHECK-START-MIPS: java.lang.String Main.$noinline$getNonBootImageString() sharpening (after)
+ /// CHECK: LoadString load_kind:DexCachePcRelative
+
+ /// CHECK-START-MIPS: java.lang.String Main.$noinline$getNonBootImageString() dex_cache_array_fixups_mips (after)
+ /// CHECK-DAG: MipsDexCacheArraysBase
+ /// CHECK-DAG: LoadString load_kind:DexCachePcRelative
+
public static String $noinline$getNonBootImageString() {
// Prevent inlining to avoid the string comparison being optimized away.
if (doThrow) { throw new Error(); }
@@ -280,6 +341,11 @@
// TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
/// CHECK: LoadClass load_kind:{{BootImageAddress|DexCachePcRelative|DexCacheViaMethod}} class_name:java.lang.String
+ /// CHECK-START-MIPS: java.lang.Class Main.$noinline$getStringClass() sharpening (after)
+ // Note: load kind depends on PIC/non-PIC
+ // TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
+ /// CHECK: LoadClass load_kind:{{BootImageAddress|DexCachePcRelative|DexCacheViaMethod}} class_name:java.lang.String
+
public static Class<?> $noinline$getStringClass() {
// Prevent inlining to avoid the string comparison being optimized away.
if (doThrow) { throw new Error(); }
@@ -310,6 +376,13 @@
/// CHECK-START-ARM64: java.lang.Class Main.$noinline$getOtherClass() sharpening (after)
/// CHECK: LoadClass load_kind:DexCachePcRelative class_name:Other
+ /// CHECK-START-MIPS: java.lang.Class Main.$noinline$getOtherClass() sharpening (after)
+ /// CHECK: LoadClass load_kind:DexCachePcRelative class_name:Other
+
+ /// CHECK-START-MIPS: java.lang.Class Main.$noinline$getOtherClass() dex_cache_array_fixups_mips (after)
+ /// CHECK-DAG: MipsDexCacheArraysBase
+ /// CHECK-DAG: LoadClass load_kind:DexCachePcRelative class_name:Other
+
public static Class<?> $noinline$getOtherClass() {
// Prevent inlining to avoid the string comparison being optimized away.
if (doThrow) { throw new Error(); }
diff --git a/test/566-polymorphic-inlining/polymorphic_inline.cc b/test/566-polymorphic-inlining/polymorphic_inline.cc
index 9f4c6c9..89293cc 100644
--- a/test/566-polymorphic-inlining/polymorphic_inline.cc
+++ b/test/566-polymorphic-inlining/polymorphic_inline.cc
@@ -15,6 +15,7 @@
*/
#include "art_method.h"
+#include "base/enums.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "jit/profiling_info.h"
@@ -29,7 +30,7 @@
mirror::Class* klass = soa.Decode<mirror::Class*>(cls);
jit::Jit* jit = Runtime::Current()->GetJit();
jit::JitCodeCache* code_cache = jit->GetCodeCache();
- ArtMethod* method = klass->FindDeclaredDirectMethodByName(method_name, sizeof(void*));
+ ArtMethod* method = klass->FindDeclaredDirectMethodByName(method_name, kRuntimePointerSize);
OatQuickMethodHeader* header = nullptr;
// Infinite loop... Test harness will have its own timeout.
@@ -53,7 +54,7 @@
static void allocate_profiling_info(jclass cls, const char* method_name) {
ScopedObjectAccess soa(Thread::Current());
mirror::Class* klass = soa.Decode<mirror::Class*>(cls);
- ArtMethod* method = klass->FindDeclaredDirectMethodByName(method_name, sizeof(void*));
+ ArtMethod* method = klass->FindDeclaredDirectMethodByName(method_name, kRuntimePointerSize);
ProfilingInfo::Create(soa.Self(), method, /* retry_allocation */ true);
}
diff --git a/test/580-checker-round/src/Main.java b/test/580-checker-round/src/Main.java
index 9e248ef..83bc55c 100644
--- a/test/580-checker-round/src/Main.java
+++ b/test/580-checker-round/src/Main.java
@@ -36,7 +36,8 @@
expectEquals32(-2, round32(-1.51f));
expectEquals32(-1, round32(-1.2f));
expectEquals32(-1, round32(-1.0f));
- expectEquals32(-1, round32(-0.51f));
+ expectEquals32(-1, round32(-0.5000001f));
+ expectEquals32(0, round32(-0.5f));
expectEquals32(0, round32(-0.2f));
expectEquals32(0, round32(-0.0f));
expectEquals32(0, round32(+0.0f));
@@ -47,11 +48,23 @@
expectEquals32(2, round32(+1.5f));
expectEquals32(2147483647, round32(Float.POSITIVE_INFINITY));
+ // Near minint.
+ expectEquals32(-2147483648, round32(Math.nextAfter(-2147483648.0f, Float.NEGATIVE_INFINITY)));
+ expectEquals32(-2147483648, round32(-2147483648.0f));
+ expectEquals32(-2147483520, round32(Math.nextAfter(-2147483648.0f, Float.POSITIVE_INFINITY)));
+
+ // Near maxint.
+ expectEquals32(2147483520, round32(Math.nextAfter(2147483648.0f, Float.NEGATIVE_INFINITY)));
+ expectEquals32(2147483647, round32(2147483648.0f));
+ expectEquals32(2147483647, round32(Math.nextAfter(2147483648.0f, Float.POSITIVE_INFINITY)));
+
// Some others.
for (int i = -100; i <= 100; ++i) {
expectEquals32(i - 1, round32((float) i - 0.51f));
+ expectEquals32(i, round32((float) i - 0.5f));
expectEquals32(i, round32((float) i));
expectEquals32(i + 1, round32((float) i + 0.5f));
+ expectEquals32(i + 1, round32((float) i + 0.51f));
}
for (float f = -1.5f; f <= -1.499f; f = Math.nextAfter(f, Float.POSITIVE_INFINITY)) {
expectEquals32(-1, round32(f));
@@ -61,8 +74,10 @@
float[] fvals = {
-16777215.5f,
-16777215.0f,
- -0.4999f,
- 0.4999f,
+ -0.49999998f,
+ -0.4999999701976776123046875f,
+ 0.4999999701976776123046875f,
+ 0.49999998f,
16777215.0f,
16777215.5f
};
@@ -71,6 +86,8 @@
-16777215,
0,
0,
+ 0,
+ 0,
16777215,
16777216
};
@@ -98,7 +115,8 @@
expectEquals64(-2L, round64(-1.51d));
expectEquals64(-1L, round64(-1.2d));
expectEquals64(-1L, round64(-1.0d));
- expectEquals64(-1L, round64(-0.51d));
+ expectEquals64(-1L, round64(-0.5000001f));
+ expectEquals64(0L, round64(-0.5d));
expectEquals64(0L, round64(-0.2d));
expectEquals64(0L, round64(-0.0d));
expectEquals64(0L, round64(+0.0d));
@@ -109,11 +127,27 @@
expectEquals64(2L, round64(+1.5d));
expectEquals64(9223372036854775807L, round64(Double.POSITIVE_INFINITY));
+ // Near minlong.
+ expectEquals64(-9223372036854775808L,
+ round64(Math.nextAfter(-9223372036854775808.0, Double.NEGATIVE_INFINITY)));
+ expectEquals64(-9223372036854775808L, round64(-9223372036854775808.0));
+ expectEquals64(-9223372036854774784L,
+ round64(Math.nextAfter(-9223372036854775809.0, Double.POSITIVE_INFINITY)));
+
+ // Near maxlong.
+ expectEquals64(9223372036854774784L,
+ round64(Math.nextAfter(9223372036854775808.0, Double.NEGATIVE_INFINITY)));
+ expectEquals64(9223372036854775807L, round64(9223372036854775808.0));
+ expectEquals64(9223372036854775807L,
+ round64(Math.nextAfter(9223372036854775808.0, Double.POSITIVE_INFINITY)));
+
// Some others.
for (long l = -100; l <= 100; ++l) {
expectEquals64(l - 1, round64((double) l - 0.51d));
+ expectEquals64(l, round64((double) l - 0.5d));
+ expectEquals64(l, round64((double) l));
expectEquals64(l + 1, round64((double) l + 0.5d));
- expectEquals64(l + 1, round64((double) l + 0.5d));
+ expectEquals64(l + 1, round64((double) l + 0.51d));
}
for (double d = -1.5d; d <= -1.49999999999d; d = Math.nextAfter(d, Double.POSITIVE_INFINITY)) {
expectEquals64(-1L, round64(d));
@@ -123,8 +157,10 @@
double[] dvals = {
-9007199254740991.5d,
-9007199254740991.0d,
+ -0.49999999999999997d,
-0.49999999999999994d,
0.49999999999999994d,
+ 0.49999999999999997d,
9007199254740991.0d,
9007199254740991.5d
};
@@ -133,6 +169,8 @@
-9007199254740991L,
0L,
0L,
+ 0L,
+ 0L,
9007199254740991L,
9007199254740992L
};
diff --git a/test/600-verifier-fails/expected.txt b/test/600-verifier-fails/expected.txt
index eaa0c93..974b995 100644
--- a/test/600-verifier-fails/expected.txt
+++ b/test/600-verifier-fails/expected.txt
@@ -3,3 +3,4 @@
passed C
passed D
passed E
+passed F
diff --git a/test/600-verifier-fails/info.txt b/test/600-verifier-fails/info.txt
index df2396e..23f3ebc 100644
--- a/test/600-verifier-fails/info.txt
+++ b/test/600-verifier-fails/info.txt
@@ -17,4 +17,7 @@
later on
(E) b/29068831:
access validation on method should occur prior to null reference check
+(F) b/29758098:
+ new-instance of java.lang.Class should throw an IllegalAccessError to
+ avoid interpreter crash on zero size object later
diff --git a/test/601-verifier-fails/smali/iget.smali b/test/600-verifier-fails/smali/class.smali
similarity index 88%
rename from test/601-verifier-fails/smali/iget.smali
rename to test/600-verifier-fails/smali/class.smali
index 5c045e6..b2eb254 100644
--- a/test/601-verifier-fails/smali/iget.smali
+++ b/test/600-verifier-fails/smali/class.smali
@@ -13,13 +13,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-.class public LD;
+.class public LF;
.super Ljava/lang/Object;
.method public constructor <init>()V
- .registers 2
+.registers 2
invoke-direct {v1}, Ljava/lang/Object;-><init>()V
- const v0, 2
- iget v1, v0, LMain;->privateField:I
+ new-instance v0, Ljava/lang/Class;
return-void
.end method
diff --git a/test/600-verifier-fails/src/Main.java b/test/600-verifier-fails/src/Main.java
index fa25d58..1726bc4 100644
--- a/test/600-verifier-fails/src/Main.java
+++ b/test/600-verifier-fails/src/Main.java
@@ -39,5 +39,6 @@
test("C");
test("D");
test("E");
+ test("F");
}
}
diff --git a/test/601-verifier-fails/expected.txt b/test/601-verifier-fails/expected.txt
deleted file mode 100644
index 8399969..0000000
--- a/test/601-verifier-fails/expected.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-passed A
-passed B
-passed C
-passed D
diff --git a/test/601-verifier-fails/info.txt b/test/601-verifier-fails/info.txt
deleted file mode 100644
index f77de05..0000000
--- a/test/601-verifier-fails/info.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-The situations in these tests were discovered by running the mutating
-dexfuzz on the DEX files of fuzzingly random generated Java test.
-
-(A) b/28908555:
- soft verification failure (on the final field modification) should
- not hide the hard verification failure (on the type mismatch) to
- avoid compiler crash later on
-(B) b/29070461:
- hard verification failure (not calling super in constructor) should
- bail immediately and not allow soft verification failures to pile up
- behind it to avoid fatal message later on
-(C) b/29068831:
- access validation should occur prior to null reference check
-(D) b/29126870:
- soft verification failure (cannot access) should not hide the hard
- verification failure (non-reference type) to avoid a compiler crash
- later on
-
diff --git a/test/601-verifier-fails/smali/construct.smali b/test/601-verifier-fails/smali/construct.smali
deleted file mode 100644
index 417ced9..0000000
--- a/test/601-verifier-fails/smali/construct.smali
+++ /dev/null
@@ -1,25 +0,0 @@
-#
-# Copyright (C) 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-.class public LB;
-.super Ljava/lang/Object;
-
-.method public constructor <init>()V
- .registers 1
- if-eqz v0, :bail
- invoke-direct {v0}, LB;->append(Ljava/lang/String;)V
-:bail
- return-void
-.end method
diff --git a/test/601-verifier-fails/src/Main.java b/test/601-verifier-fails/src/Main.java
deleted file mode 100644
index a6a07fd..0000000
--- a/test/601-verifier-fails/src/Main.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class Main {
-
- public static final String staticFinalField = null;
-
- private static String staticPrivateField = null;
-
- private int privateField = 0;
-
- private static void test(String name) throws Exception {
- try {
- Class<?> a = Class.forName(name);
- a.newInstance();
- } catch (java.lang.LinkageError e) {
- System.out.println("passed " + name);
- }
- }
-
- public static void main(String[] args) throws Exception {
- test("A");
- test("B");
- test("C");
- test("D");
- }
-}
diff --git a/test/613-inlining-dex-cache/expected.txt b/test/613-inlining-dex-cache/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/613-inlining-dex-cache/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/613-inlining-dex-cache/info.txt b/test/613-inlining-dex-cache/info.txt
new file mode 100644
index 0000000..e80f642
--- /dev/null
+++ b/test/613-inlining-dex-cache/info.txt
@@ -0,0 +1,2 @@
+Regression test for the JIT compiler which used to
+wrongly update the dex cache of a class loader.
diff --git a/test/601-verifier-fails/smali/sput.smali b/test/613-inlining-dex-cache/run
similarity index 75%
rename from test/601-verifier-fails/smali/sput.smali
rename to test/613-inlining-dex-cache/run
index e8e56ac..9c1e7aa 100644
--- a/test/601-verifier-fails/smali/sput.smali
+++ b/test/613-inlining-dex-cache/run
@@ -1,3 +1,4 @@
+#!/bin/bash
#
# Copyright (C) 2016 The Android Open Source Project
#
@@ -13,11 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-.class public LA;
-.super Ljava/lang/Object;
-
-.method public foo(I)V
-.registers 2
- sput v1, LMain;->staticFinalField:Ljava/lang/String;
- return-void
-.end method
+flags="$@"
+# We need the dex files pre-verified to avoid running the verifier
+# at runtime which will update the dex cache.
+exec ${RUN} ${flags/verify-at-runtime/interpret-only}
diff --git a/test/613-inlining-dex-cache/src-ex/B.java b/test/613-inlining-dex-cache/src-ex/B.java
new file mode 100644
index 0000000..4da9a1d
--- /dev/null
+++ b/test/613-inlining-dex-cache/src-ex/B.java
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class B {
+}
diff --git a/test/613-inlining-dex-cache/src-ex/LoadedByAppClassLoader.java b/test/613-inlining-dex-cache/src-ex/LoadedByAppClassLoader.java
new file mode 100644
index 0000000..f4e0f10
--- /dev/null
+++ b/test/613-inlining-dex-cache/src-ex/LoadedByAppClassLoader.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class LoadedByAppClassLoader {
+ public static void letMeInlineYou() {
+ // We used to pass the wrong class loader when trying to inline 'Main.foo'.
+ Main.foo(null);
+ }
+}
diff --git a/test/613-inlining-dex-cache/src/B.java b/test/613-inlining-dex-cache/src/B.java
new file mode 100644
index 0000000..6e7e55d
--- /dev/null
+++ b/test/613-inlining-dex-cache/src/B.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class B {
+ public void foo() {
+ }
+}
diff --git a/test/613-inlining-dex-cache/src/Main.java b/test/613-inlining-dex-cache/src/Main.java
new file mode 100644
index 0000000..31ab1d2
--- /dev/null
+++ b/test/613-inlining-dex-cache/src/Main.java
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+import java.lang.reflect.InvocationTargetException;
+
+import dalvik.system.PathClassLoader;
+
+// ClassLoader not delegating for non java. packages.
+class DelegateLastPathClassLoader extends PathClassLoader {
+
+ public DelegateLastPathClassLoader(String dexPath, ClassLoader parent) {
+ super(dexPath, parent);
+ }
+
+ @Override
+ protected Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
+ if (!name.startsWith("java.")) {
+ try {
+ return findClass(name);
+ } catch (ClassNotFoundException ignore) {
+ // Ignore and fall through to parent class loader.
+ }
+ }
+ return super.loadClass(name, resolve);
+ }
+}
+
+public class Main {
+
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
+ final String DEX_FILE = System.getenv("DEX_LOCATION") + "/613-inlining-dex-cache-ex.jar";
+ ClassLoader loader = new DelegateLastPathClassLoader(DEX_FILE, Main.class.getClassLoader());
+ Class cls = loader.loadClass("LoadedByAppClassLoader");
+ Method m = cls.getDeclaredMethod("letMeInlineYou");
+ // Invoke the method enough times to get JITted.
+ for (int i = 0; i < 10000; ++i) {
+ m.invoke(null);
+ }
+ ensureJitCompiled(cls, "letMeInlineYou");
+ ClassLoader bLoader = areYouB();
+ if (bLoader != Main.class.getClassLoader()) {
+ throw new Error("Wrong class loader");
+ }
+ }
+
+ public static void foo(Main o) {
+ // LoadedByAppClassLoader.letMeInlineYou will try to inline this
+ // method but used to pass the wrong class loader. As a result,
+ // the lookup of B.foo was updating the dex cache with the other
+ // class loader's B class.
+ if (o != null) {
+ o.myField.foo();
+ }
+ }
+
+ public B myField;
+
+ public static ClassLoader areYouB() {
+ return OtherClass.getB().getClassLoader();
+ }
+
+ public static native void ensureJitCompiled(Class cls, String method_name);
+}
+
+class OtherClass {
+ public static Class getB() {
+ // This used to return the B class of another class loader.
+ return B.class;
+ }
+}
diff --git a/test/614-checker-dump-constant-location/expected.txt b/test/614-checker-dump-constant-location/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/614-checker-dump-constant-location/expected.txt
diff --git a/test/614-checker-dump-constant-location/info.txt b/test/614-checker-dump-constant-location/info.txt
new file mode 100644
index 0000000..4a94ffa
--- /dev/null
+++ b/test/614-checker-dump-constant-location/info.txt
@@ -0,0 +1,2 @@
+Test that the graph visualizer outputs useful information for constant
+locations in parallel moves.
diff --git a/test/614-checker-dump-constant-location/src/Main.java b/test/614-checker-dump-constant-location/src/Main.java
new file mode 100644
index 0000000..f6bc063
--- /dev/null
+++ b/test/614-checker-dump-constant-location/src/Main.java
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ public static int array_int[] = { 0 };
+ public static long array_long[] = { 0 };
+ public static float array_float[] = { 0.0f };
+ public static double array_double[] = { 0.0 };
+
+ // The code used to print constant locations in parallel moves is architecture
+ // independent. We only test for ARM and ARM64 as it is easy: 'store'
+ // instructions only take registers as a source.
+
+ /// CHECK-START-ARM: void Main.store_to_arrays() register (after)
+ /// CHECK: ParallelMove {{.*#1->.*#2->.*#3\.3->.*#4\.4->.*}}
+
+ /// CHECK-START-ARM64: void Main.store_to_arrays() register (after)
+ /// CHECK: ParallelMove {{.*#1->.*#2->.*#3\.3->.*#4\.4->.*}}
+
+ public void store_to_arrays() {
+ array_int[0] = 1;
+ array_long[0] = 2;
+ array_float[0] = 3.3f;
+ array_double[0] = 4.4;
+ }
+
+ public static void main(String args[]) {}
+}
diff --git a/test/615-checker-arm64-zr-parallel-move/expected.txt b/test/615-checker-arm64-zr-parallel-move/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/615-checker-arm64-zr-parallel-move/expected.txt
diff --git a/test/615-checker-arm64-zr-parallel-move/info.txt b/test/615-checker-arm64-zr-parallel-move/info.txt
new file mode 100644
index 0000000..199755d
--- /dev/null
+++ b/test/615-checker-arm64-zr-parallel-move/info.txt
@@ -0,0 +1 @@
+Checker test to verify we correctly use wzr and xzr to synthesize zero constants.
diff --git a/test/615-checker-arm64-zr-parallel-move/src/Main.java b/test/615-checker-arm64-zr-parallel-move/src/Main.java
new file mode 100644
index 0000000..5024f28
--- /dev/null
+++ b/test/615-checker-arm64-zr-parallel-move/src/Main.java
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ public static boolean doThrow = false;
+
+ public void $noinline$foo(int in_w1,
+ int in_w2,
+ int in_w3,
+ int in_w4,
+ int in_w5,
+ int in_w6,
+ int in_w7,
+ int on_stack_int,
+ long on_stack_long,
+ float in_s0,
+ float in_s1,
+ float in_s2,
+ float in_s3,
+ float in_s4,
+ float in_s5,
+ float in_s6,
+ float in_s7,
+ float on_stack_float,
+ double on_stack_double) {
+ if (doThrow) throw new Error();
+ }
+
+ // We expect a parallel move that moves four times the zero constant to stack locations.
+ /// CHECK-START-ARM64: void Main.bar() register (after)
+ /// CHECK: ParallelMove {{.*#0->[0-9x]+\(sp\).*#0->[0-9x]+\(sp\).*#0->[0-9x]+\(sp\).*#0->[0-9x]+\(sp\).*}}
+
+ // Those four moves should generate four 'store' instructions using directly the zero register.
+ /// CHECK-START-ARM64: void Main.bar() disassembly (after)
+ /// CHECK-DAG: {{(str|stur)}} wzr, [sp, #{{[0-9]+}}]
+ /// CHECK-DAG: {{(str|stur)}} xzr, [sp, #{{[0-9]+}}]
+ /// CHECK-DAG: {{(str|stur)}} wzr, [sp, #{{[0-9]+}}]
+ /// CHECK-DAG: {{(str|stur)}} xzr, [sp, #{{[0-9]+}}]
+
+ public void bar() {
+ $noinline$foo(1, 2, 3, 4, 5, 6, 7, // Integral values in registers.
+ 0, 0L, // Integral values on the stack.
+ 1, 2, 3, 4, 5, 6, 7, 8, // Floating-point values in registers.
+ 0.0f, 0.0); // Floating-point values on the stack.
+ }
+
+ public static void main(String args[]) {}
+}
diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt
index 8473e06..3bb3725 100644
--- a/test/800-smali/expected.txt
+++ b/test/800-smali/expected.txt
@@ -69,4 +69,5 @@
b/28187158
b/29778499 (1)
b/29778499 (2)
+b/30458218
Done!
diff --git a/test/601-verifier-fails/smali/iput.smali b/test/800-smali/smali/B30458218.smali
similarity index 61%
rename from test/601-verifier-fails/smali/iput.smali
rename to test/800-smali/smali/B30458218.smali
index bd8b928..67b882a 100644
--- a/test/601-verifier-fails/smali/iput.smali
+++ b/test/800-smali/smali/B30458218.smali
@@ -1,11 +1,10 @@
-#
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
@@ -13,13 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-.class public LC;
-.super Ljava/lang/Object;
+.class public LB30458218;
+.super Ljava/io/InterruptedIOException;
-.method public constructor <init>()V
+.method public static run()V
.registers 2
- invoke-direct {v1}, Ljava/lang/Object;-><init>()V
- const v0, 0
- iput-object v0, v0, LMain;->staticPrivateField:Ljava/lang/String;
+ new-instance v0, LB30458218;
+ invoke-direct {v0}, LB30458218;-><init>()V
+
+ # IGET used to wrongly cache 'InterruptedIOException' class under the key 'LB30458218;'
+ iget v1, v0, LB30458218;->bytesTransferred:I
+
return-void
.end method
diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java
index bf50879..34f2580 100644
--- a/test/800-smali/src/Main.java
+++ b/test/800-smali/src/Main.java
@@ -180,6 +180,7 @@
new IncompatibleClassChangeError(), null));
testCases.add(new TestCase("b/29778499 (2)", "B29778499_2", "run", null,
new IncompatibleClassChangeError(), null));
+ testCases.add(new TestCase("b/30458218", "B30458218", "run", null, null, null));
}
public void runTests() {
diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk
index 7813d16..ec5b7d2 100644
--- a/test/Android.libarttest.mk
+++ b/test/Android.libarttest.mk
@@ -86,7 +86,7 @@
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.libarttest.mk
ifeq ($$(art_target_or_host),target)
- $(call set-target-local-clang-vars)
+ LOCAL_CLANG := $(ART_TARGET_CLANG)
ifeq ($$(suffix),d)
$(call set-target-local-cflags-vars,debug)
else
@@ -109,7 +109,7 @@
LOCAL_CFLAGS += $(ART_HOST_NON_DEBUG_CFLAGS)
LOCAL_ASFLAGS += $(ART_HOST_NON_DEBUG_ASFLAGS)
endif
- LOCAL_LDLIBS := $(ART_HOST_LDLIBS) -ldl -lpthread
+ LOCAL_LDLIBS := -ldl -lpthread
LOCAL_IS_HOST_MODULE := true
LOCAL_MULTILIB := both
include $(BUILD_HOST_SHARED_LIBRARY)
diff --git a/test/Android.libnativebridgetest.mk b/test/Android.libnativebridgetest.mk
index 992332e..5c97e4d 100644
--- a/test/Android.libnativebridgetest.mk
+++ b/test/Android.libnativebridgetest.mk
@@ -48,7 +48,7 @@
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.libnativebridgetest.mk
ifeq ($$(art_target_or_host),target)
- $(call set-target-local-clang-vars)
+ LOCAL_CLANG := $(ART_TARGET_CLANG)
$(call set-target-local-cflags-vars,debug)
LOCAL_SHARED_LIBRARIES += libdl
LOCAL_STATIC_LIBRARIES := libgtest
@@ -62,7 +62,7 @@
LOCAL_CFLAGS := $(ART_HOST_CFLAGS) $(ART_HOST_DEBUG_CFLAGS)
LOCAL_ASFLAGS := $(ART_HOST_ASFLAGS) $(ART_HOST_DEBUG_ASFLAGS)
LOCAL_SHARED_LIBRARIES := libcutils
- LOCAL_LDLIBS := $(ART_HOST_LDLIBS) -ldl -lpthread
+ LOCAL_LDLIBS := -ldl -lpthread
ifeq ($(HOST_OS),linux)
LOCAL_LDLIBS += -lrt
endif
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 8f8b667..8d7d70d 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -26,7 +26,8 @@
# The path where build only targets will be output, e.g.
# out/target/product/generic_x86_64/obj/PACKAGING/art-run-tests_intermediates/DATA
-art_run_tests_dir := $(call intermediates-dir-for,PACKAGING,art-run-tests)/DATA
+art_run_tests_build_dir := $(call intermediates-dir-for,JAVA_LIBRARIES,art-run-tests)/DATA
+art_run_tests_install_dir := $(call intermediates-dir-for,PACKAGING,art-run-tests)/DATA
# A generated list of prerequisites that call 'run-test --build-only', the actual prerequisite is
# an empty file touched in the intermediate directory.
@@ -49,7 +50,8 @@
# Helper to create individual build targets for tests. Must be called with $(eval).
# $(1): the test number
define define-build-art-run-test
- dmart_target := $(art_run_tests_dir)/art-run-tests/$(1)/touch
+ dmart_target := $(art_run_tests_build_dir)/art-run-tests/$(1)/touch
+ dmart_install_target := $(art_run_tests_install_dir)/art-run-tests/$(1)/touch
run_test_options = --build-only
ifeq ($(ART_TEST_QUIET),true)
run_test_options += --quiet
@@ -67,8 +69,13 @@
$(LOCAL_PATH)/run-test $$(PRIVATE_RUN_TEST_OPTIONS) --output-path $$(abspath $$(dir $$@)) $(1)
$(hide) touch $$@
- TEST_ART_RUN_TEST_BUILD_RULES += $$(dmart_target)
+$$(dmart_install_target): $$(dmart_target)
+ $(hide) rm -rf $$(dir $$@) && mkdir -p $$(dir $$@)
+ $(hide) cp $$(dir $$<)/* $$(dir $$@)/
+
+ TEST_ART_RUN_TEST_BUILD_RULES += $$(dmart_install_target)
dmart_target :=
+ dmart_install_target :=
run_test_options :=
endef
$(foreach test, $(TEST_ART_RUN_TESTS), $(eval $(call define-build-art-run-test,$(test))))
@@ -78,12 +85,13 @@
LOCAL_MODULE := art-run-tests
LOCAL_ADDITIONAL_DEPENDENCIES := $(TEST_ART_RUN_TEST_BUILD_RULES)
# The build system use this flag to pick up files generated by declare-make-art-run-test.
-LOCAL_PICKUP_FILES := $(art_run_tests_dir)
+LOCAL_PICKUP_FILES := $(art_run_tests_install_dir)
include $(BUILD_PHONY_PACKAGE)
# Clear temp vars.
-art_run_tests_dir :=
+art_run_tests_build_dir :=
+art_run_tests_install_dir :=
define-build-art-run-test :=
TEST_ART_RUN_TEST_BUILD_RULES :=
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index e70a95c..ee2ee1a 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -16,6 +16,7 @@
#include "jni.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "dex_file-inl.h"
#include "jit/jit.h"
@@ -129,18 +130,18 @@
return;
}
- ScopedObjectAccess soa(Thread::Current());
+ ArtMethod* method = nullptr;
+ {
+ ScopedObjectAccess soa(Thread::Current());
- ScopedUtfChars chars(env, method_name);
- CHECK(chars.c_str() != nullptr);
-
- mirror::Class* klass = soa.Decode<mirror::Class*>(cls);
- ArtMethod* method = klass->FindDeclaredDirectMethodByName(chars.c_str(), sizeof(void*));
+ ScopedUtfChars chars(env, method_name);
+ CHECK(chars.c_str() != nullptr);
+ method = soa.Decode<mirror::Class*>(cls)->FindDeclaredDirectMethodByName(
+ chars.c_str(), kRuntimePointerSize);
+ }
jit::JitCodeCache* code_cache = jit->GetCodeCache();
OatQuickMethodHeader* header = nullptr;
- // Make sure there is a profiling info, required by the compiler.
- ProfilingInfo::Create(soa.Self(), method, /* retry_allocation */ true);
while (true) {
header = OatQuickMethodHeader::FromEntryPoint(method->GetEntryPointFromQuickCompiledCode());
if (code_cache->ContainsPc(header->GetCode())) {
@@ -148,6 +149,9 @@
} else {
// Sleep to yield to the compiler thread.
usleep(1000);
+ ScopedObjectAccess soa(Thread::Current());
+ // Make sure there is a profiling info, required by the compiler.
+ ProfilingInfo::Create(soa.Self(), method, /* retry_allocation */ true);
// Will either ensure it's compiled or do the compilation itself.
jit->CompileMethod(method, soa.Self(), /* osr */ false);
}
diff --git a/test/etc/default-build b/test/etc/default-build
index 962ae38..37ce0f2 100755
--- a/test/etc/default-build
+++ b/test/etc/default-build
@@ -64,6 +64,9 @@
SKIP_DX_MERGER="false"
EXPERIMENTAL=""
+# The key for default arguments if no experimental things are enabled.
+DEFAULT_EXPERIMENT="no-experiment"
+
# Setup experimental flag mappings in a bash associative array.
declare -A JACK_EXPERIMENTAL_ARGS
JACK_EXPERIMENTAL_ARGS["default-methods"]="-D jack.java.source.version=1.8 -D jack.android.min-api-level=24"
@@ -72,6 +75,11 @@
declare -A SMALI_EXPERIMENTAL_ARGS
SMALI_EXPERIMENTAL_ARGS["default-methods"]="--api-level 24"
+declare -A JAVAC_EXPERIMENTAL_ARGS
+JAVAC_EXPERIMENTAL_ARGS["default-methods"]="-source 1.8 -target 1.8"
+JAVAC_EXPERIMENTAL_ARGS["lambdas"]="-source 1.8 -target 1.8"
+JAVAC_EXPERIMENTAL_ARGS[${DEFAULT_EXPERIMENT}]="-source 1.7 -target 1.7"
+
while true; do
if [ "x$1" = "x--dx-option" ]; then
shift
@@ -100,6 +108,8 @@
shift
elif [ "x$1" = "x--experimental" ]; then
shift
+ # We have a specific experimental configuration so don't use the default.
+ DEFAULT_EXPERIMENT=""
EXPERIMENTAL="${EXPERIMENTAL} $1"
shift
elif expr "x$1" : "x--" >/dev/null 2>&1; then
@@ -110,10 +120,14 @@
fi
done
+# Be sure to get any default arguments if not doing any experiments.
+EXPERIMENTAL="${EXPERIMENTAL} ${DEFAULT_EXPERIMENT}"
+
# Add args from the experimental mappings.
for experiment in ${EXPERIMENTAL}; do
JACK_ARGS="${JACK_ARGS} ${JACK_EXPERIMENTAL_ARGS[${experiment}]}"
SMALI_ARGS="${SMALI_ARGS} ${SMALI_EXPERIMENTAL_ARGS[${experiment}]}"
+ JAVAC_ARGS="${JAVAC_ARGS} ${JAVAC_EXPERIMENTAL_ARGS[${experiment}]}"
done
if [ -e classes.dex ]; then
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 64bf4f3..c6c9380 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -553,12 +553,10 @@
if [ "$TIME_OUT" = "timeout" ]; then
# Add timeout command if time out is desired.
#
- # Note: We use nested timeouts. The inner timeout sends SIGRTMIN+2 (usually 36) to ART, which
- # will induce a full thread dump before abort. However, dumping threads might deadlock,
- # so the outer timeout sends the regular SIGTERM after an additional minute to ensure
- # termination (without dumping all threads).
- TIME_PLUS_ONE=$(($TIME_OUT_VALUE + 60))
- cmdline="timeout ${TIME_PLUS_ONE}s timeout -s SIGRTMIN+2 ${TIME_OUT_VALUE}s $cmdline"
+ # Note: We first send SIGRTMIN+2 (usually 36) to ART, which will induce a full thread dump
+ # before abort. However, dumping threads might deadlock, so we also use the "-k"
+ # option to definitely kill the child.
+ cmdline="timeout -k 120s -s SIGRTMIN+2 ${TIME_OUT_VALUE}s $cmdline"
fi
if [ "$DEV_MODE" = "y" ]; then
diff --git a/test/run-test b/test/run-test
index 1ef5428..edee4ae 100755
--- a/test/run-test
+++ b/test/run-test
@@ -41,7 +41,7 @@
fi
checker="${progdir}/../tools/checker/checker.py"
export JAVA="java"
-export JAVAC="javac -g -source 1.7 -target 1.7 -Xlint:-options"
+export JAVAC="javac -g -Xlint:-options"
export RUN="${progdir}/etc/run-test-jar"
export DEX_LOCATION=/data/run-test/${test_dir}
export NEED_DEX="true"
diff --git a/tools/ahat/src/InstanceUtils.java b/tools/ahat/src/InstanceUtils.java
index 3cdb40c..8769d11 100644
--- a/tools/ahat/src/InstanceUtils.java
+++ b/tools/ahat/src/InstanceUtils.java
@@ -95,9 +95,7 @@
return null;
}
- // TODO: When perflib provides a better way to get the length of the
- // array, we should use that here.
- int numChars = chars.getValues().length;
+ int numChars = chars.getLength();
int count = getIntField(inst, "count", numChars);
if (count == 0) {
return "";
diff --git a/tools/ahat/src/Main.java b/tools/ahat/src/Main.java
index d784599..fdc5a86 100644
--- a/tools/ahat/src/Main.java
+++ b/tools/ahat/src/Main.java
@@ -67,12 +67,14 @@
return;
}
- System.out.println("Processing hprof file...");
- AhatSnapshot ahat = AhatSnapshot.fromHprof(hprof);
-
+ // Launch the server before parsing the hprof file so we get
+ // BindExceptions quickly.
InetAddress loopback = InetAddress.getLoopbackAddress();
InetSocketAddress addr = new InetSocketAddress(loopback, port);
HttpServer server = HttpServer.create(addr, 0);
+
+ System.out.println("Processing hprof file...");
+ AhatSnapshot ahat = AhatSnapshot.fromHprof(hprof);
server.createContext("/", new AhatHttpHandler(new OverviewHandler(ahat, hprof)));
server.createContext("/rooted", new AhatHttpHandler(new RootedHandler(ahat)));
server.createContext("/object", new AhatHttpHandler(new ObjectHandler(ahat)));
diff --git a/tools/cpp-define-generator/Android.mk b/tools/cpp-define-generator/Android.mk
new file mode 100644
index 0000000..6ba643c
--- /dev/null
+++ b/tools/cpp-define-generator/Android.mk
@@ -0,0 +1,34 @@
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH := $(call my-dir)
+
+include art/build/Android.executable.mk
+
+CPP_DEFINE_GENERATOR_SRC_FILES := \
+ main.cc
+
+CPP_DEFINE_GENERATOR_EXTRA_SHARED_LIBRARIES :=
+CPP_DEFINE_GENERATOR_EXTRA_INCLUDE :=
+CPP_DEFINE_GENERATOR_MULTILIB :=
+
+# Build a "data" binary which will hold all the symbol values that will be parsed by the other scripts.
+#
+# Builds are for host only, target-specific define generation is possibly but is trickier and would need extra tooling.
+#
+# In the future we may wish to parameterize this on (32,64)x(read_barrier,no_read_barrier).
+$(eval $(call build-art-executable,cpp-define-generator-data,$(CPP_DEFINE_GENERATOR_SRC_FILES),$(CPP_DEFINE_GENERATOR_EXTRA_SHARED_LIBRARIES),$(CPP_DEFINE_GENERATOR_EXTRA_INCLUDE),host,debug,$(CPP_DEFINE_GENERATOR_MULTILIB),shared))
+
diff --git a/tools/cpp-define-generator/common.def b/tools/cpp-define-generator/common.def
new file mode 100644
index 0000000..76c64c9
--- /dev/null
+++ b/tools/cpp-define-generator/common.def
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Convenience macro to define an offset expression.
+
+#ifndef DEFINE_OFFSET_EXPR
+#define DEFINE_OFFSET_EXPR(holder_type, field_name, field_type, expr) \
+ DEFINE_EXPR(holder_type ## _ ## field_name ## _OFFSET, field_type, expr)
+#define DEFINE_OFFSET_EXPR_STANDARD_DEFINITION
+#endif
+
diff --git a/tools/cpp-define-generator/common_undef.def b/tools/cpp-define-generator/common_undef.def
new file mode 100644
index 0000000..c44aba7
--- /dev/null
+++ b/tools/cpp-define-generator/common_undef.def
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifdef DEFINE_OFFSET_EXPR_STANDARD_DEFINITION
+#undef DEFINE_OFFSET_EXPR_STANDARD_DEFINITION
+#undef DEFINE_OFFSET_EXPR
+#endif
diff --git a/tools/cpp-define-generator/constant_class.def b/tools/cpp-define-generator/constant_class.def
new file mode 100644
index 0000000..58372f9
--- /dev/null
+++ b/tools/cpp-define-generator/constant_class.def
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if defined(DEFINE_INCLUDE_DEPENDENCIES)
+#include "mirror/class.h" // kStatusInitialized
+#include "modifiers.h" // kAccClassIsFinalizable
+#include "base/bit_utils.h" // MostSignificantBit
+#endif
+
+#define DEFINE_FLAG_OFFSET(type_name, field_name, expr) \
+ DEFINE_EXPR(type_name ## _ ## field_name, uint32_t, (expr))
+
+DEFINE_FLAG_OFFSET(MIRROR_CLASS, STATUS_INITIALIZED, art::mirror::Class::kStatusInitialized)
+DEFINE_FLAG_OFFSET(ACCESS_FLAGS, CLASS_IS_FINALIZABLE, art::kAccClassIsFinalizable)
+// TODO: We should really have a BitPosition which also checks it's a power of 2.
+DEFINE_FLAG_OFFSET(ACCESS_FLAGS, CLASS_IS_FINALIZABLE_BIT, art::MostSignificantBit(art::kAccClassIsFinalizable))
+
+#undef DEFINE_FLAG_OFFSET
diff --git a/tools/cpp-define-generator/constant_globals.def b/tools/cpp-define-generator/constant_globals.def
new file mode 100644
index 0000000..1e24d64
--- /dev/null
+++ b/tools/cpp-define-generator/constant_globals.def
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Export global values.
+
+#if defined(DEFINE_INCLUDE_DEPENDENCIES)
+#include "globals.h" // art::kObjectAlignment
+#endif
+
+#define DEFINE_OBJECT_EXPR(macro_name, type, constant_field_name) \
+ DEFINE_EXPR(OBJECT_ ## macro_name, type, constant_field_name)
+
+DEFINE_OBJECT_EXPR(ALIGNMENT_MASK, size_t, art::kObjectAlignment - 1)
+DEFINE_OBJECT_EXPR(ALIGNMENT_MASK_TOGGLED, uint32_t, ~static_cast<uint32_t>(art::kObjectAlignment - 1))
+
+#undef DEFINE_OBJECT_EXPR
+
diff --git a/tools/cpp-define-generator/constant_jit.def b/tools/cpp-define-generator/constant_jit.def
new file mode 100644
index 0000000..5fa5194
--- /dev/null
+++ b/tools/cpp-define-generator/constant_jit.def
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Constants within jit.h.
+
+#if defined(DEFINE_INCLUDE_DEPENDENCIES)
+#include "jit/jit.h" // art::kSuspendRequest, etc.
+#endif
+
+#define DEFINE_JIT_CONSTANT(macro_name, type, expr) \
+ DEFINE_EXPR(JIT_ ## macro_name, type, (expr))
+
+DEFINE_JIT_CONSTANT(CHECK_OSR, int16_t, art::jit::kJitCheckForOSR)
+DEFINE_JIT_CONSTANT(HOTNESS_DISABLE, int16_t, art::jit::kJitHotnessDisabled)
+
+#undef DEFINE_JIT_CONSTANT
diff --git a/tools/cpp-define-generator/constant_lockword.def b/tools/cpp-define-generator/constant_lockword.def
new file mode 100644
index 0000000..67ed5b5
--- /dev/null
+++ b/tools/cpp-define-generator/constant_lockword.def
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Export lockword values.
+
+#if defined(DEFINE_INCLUDE_DEPENDENCIES)
+#include "lock_word.h" // art::LockWord
+#endif
+
+#define DEFINE_LOCK_WORD_EXPR(macro_name, type, constant_field_name) \
+ DEFINE_EXPR(LOCK_WORD_ ## macro_name, type, art::LockWord::constant_field_name)
+
+DEFINE_LOCK_WORD_EXPR(STATE_SHIFT, int32_t, kStateShift)
+DEFINE_LOCK_WORD_EXPR(STATE_MASK, uint32_t, kStateMaskShifted)
+DEFINE_LOCK_WORD_EXPR(READ_BARRIER_STATE_SHIFT, int32_t, kReadBarrierStateShift)
+DEFINE_LOCK_WORD_EXPR(READ_BARRIER_STATE_MASK, uint32_t, kReadBarrierStateMaskShifted)
+DEFINE_LOCK_WORD_EXPR(READ_BARRIER_STATE_MASK_TOGGLED, uint32_t, kReadBarrierStateMaskShiftedToggled)
+DEFINE_LOCK_WORD_EXPR(THIN_LOCK_COUNT_ONE, int32_t, kThinLockCountOne)
+
+DEFINE_LOCK_WORD_EXPR(GC_STATE_MASK_SHIFTED, uint32_t, kGCStateMaskShifted)
+DEFINE_LOCK_WORD_EXPR(GC_STATE_MASK_SHIFTED_TOGGLED, uint32_t, kGCStateMaskShiftedToggled)
+DEFINE_LOCK_WORD_EXPR(GC_STATE_SHIFT, int32_t, kGCStateShift)
+
+DEFINE_LOCK_WORD_EXPR(MARK_BIT_SHIFT, int32_t, kMarkBitStateShift)
+DEFINE_LOCK_WORD_EXPR(MARK_BIT_MASK_SHIFTED, uint32_t, kMarkBitStateMaskShifted)
+
+#undef DEFINE_LOCK_WORD_EXPR
+
diff --git a/tools/cpp-define-generator/constant_reference.def b/tools/cpp-define-generator/constant_reference.def
new file mode 100644
index 0000000..d312f76
--- /dev/null
+++ b/tools/cpp-define-generator/constant_reference.def
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if defined(DEFINE_INCLUDE_DEPENDENCIES)
+#include "mirror/object.h" // mirror::Object
+#include "stack.h" // StackReference
+#include "mirror/object_reference.h" // mirror::CompressedReference
+#include "base/bit_utils.h" // WhichPowerOf2
+#endif
+
+// Size of references to the heap on the stack.
+DEFINE_EXPR(STACK_REFERENCE_SIZE, size_t, sizeof(art::StackReference<art::mirror::Object>))
+// Size of heap references
+DEFINE_EXPR(COMPRESSED_REFERENCE_SIZE, size_t, sizeof(art::mirror::CompressedReference<art::mirror::Object>))
+DEFINE_EXPR(COMPRESSED_REFERENCE_SIZE_SHIFT, size_t, art::WhichPowerOf2(sizeof(art::mirror::CompressedReference<art::mirror::Object>)))
+
+#undef DEFINE_REFERENCE_OFFSET
diff --git a/tools/cpp-define-generator/constant_rosalloc.def b/tools/cpp-define-generator/constant_rosalloc.def
new file mode 100644
index 0000000..2007cef
--- /dev/null
+++ b/tools/cpp-define-generator/constant_rosalloc.def
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Constants within RosAlloc.
+
+#if defined(DEFINE_INCLUDE_DEPENDENCIES)
+#include "gc/allocator/rosalloc.h" // art::gc::allocator::RosAlloc
+#endif
+
+#define DEFINE_ROSALLOC_CONSTANT(macro_name, type, expr) \
+ DEFINE_EXPR(ROSALLOC_ ## macro_name, type, (expr))
+
+DEFINE_ROSALLOC_CONSTANT(MAX_THREAD_LOCAL_BRACKET_SIZE, int32_t, art::gc::allocator::RosAlloc::kMaxThreadLocalBracketSize)
+DEFINE_ROSALLOC_CONSTANT(BRACKET_QUANTUM_SIZE_SHIFT, int32_t, art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSizeShift)
+// TODO: This should be a BitUtils helper, e.g. BitMaskFromSize or something like that.
+DEFINE_ROSALLOC_CONSTANT(BRACKET_QUANTUM_SIZE_MASK, int32_t, static_cast<int32_t>(art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1))
+DEFINE_ROSALLOC_CONSTANT(BRACKET_QUANTUM_SIZE_MASK_TOGGLED32,\
+ uint32_t, ~static_cast<uint32_t>(art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1))
+DEFINE_ROSALLOC_CONSTANT(BRACKET_QUANTUM_SIZE_MASK_TOGGLED64,\
+ uint64_t, ~static_cast<uint64_t>(art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1))
+DEFINE_ROSALLOC_CONSTANT(RUN_FREE_LIST_OFFSET, int32_t, art::gc::allocator::RosAlloc::RunFreeListOffset())
+DEFINE_ROSALLOC_CONSTANT(RUN_FREE_LIST_HEAD_OFFSET, int32_t, art::gc::allocator::RosAlloc::RunFreeListHeadOffset())
+DEFINE_ROSALLOC_CONSTANT(RUN_FREE_LIST_SIZE_OFFSET, int32_t, art::gc::allocator::RosAlloc::RunFreeListSizeOffset())
+DEFINE_ROSALLOC_CONSTANT(SLOT_NEXT_OFFSET, int32_t, art::gc::allocator::RosAlloc::RunSlotNextOffset())
+
+
+#undef DEFINE_ROSALLOC_CONSTANT
diff --git a/tools/cpp-define-generator/constant_thread.def b/tools/cpp-define-generator/constant_thread.def
new file mode 100644
index 0000000..af5ca21
--- /dev/null
+++ b/tools/cpp-define-generator/constant_thread.def
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Constants within thread.h.
+
+#if defined(DEFINE_INCLUDE_DEPENDENCIES)
+#include "thread.h" // art::kSuspendRequest, etc.
+#endif
+
+#define DEFINE_THREAD_CONSTANT(macro_name, type, expr) \
+ DEFINE_EXPR(THREAD_ ## macro_name, type, (expr))
+
+DEFINE_THREAD_CONSTANT(SUSPEND_REQUEST, int32_t, art::kSuspendRequest)
+DEFINE_THREAD_CONSTANT(CHECKPOINT_REQUEST, int32_t, art::kCheckpointRequest)
+
+#undef DEFINE_THREAD_CONSTANT
diff --git a/tools/cpp-define-generator/generate-asm-support b/tools/cpp-define-generator/generate-asm-support
new file mode 100755
index 0000000..f95648b
--- /dev/null
+++ b/tools/cpp-define-generator/generate-asm-support
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+# Generates asm_support_gen.h
+# - This must be run after a build since it uses cpp-define-generator-data
+
+[[ -z ${ANDROID_BUILD_TOP+x} ]] && (echo "Run source build/envsetup.sh first" >&2 && exit 1)
+
+cpp-define-generator-datad > ${ANDROID_BUILD_TOP}/art/runtime/generated/asm_support_gen.h
diff --git a/tools/cpp-define-generator/main.cc b/tools/cpp-define-generator/main.cc
new file mode 100644
index 0000000..a1b463a
--- /dev/null
+++ b/tools/cpp-define-generator/main.cc
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+#include <sstream>
+#include <type_traits>
+#include <ios>
+#include <algorithm>
+#include <string>
+
+// Art Offset file dependencies
+#define DEFINE_INCLUDE_DEPENDENCIES
+#include "offsets_all.def"
+
+std::string to_upper(std::string input) {
+ std::transform(input.begin(), input.end(), input.begin(), ::toupper);
+ return input;
+}
+
+template <typename T, typename = void>
+typename std::enable_if<!std::is_signed<T>::value, std::string>::type
+pretty_format(T value) {
+ // Print most values as hex.
+ std::stringstream ss;
+ ss << std::showbase << std::hex << value;
+ return ss.str();
+}
+
+template <typename T, typename = void>
+typename std::enable_if<std::is_signed<T>::value, std::string>::type
+pretty_format(T value) {
+ // Print "signed" values as decimal so that the negativity doesn't get lost.
+ std::stringstream ss;
+
+ // For negative values add a (). Omit it from positive values for conciseness.
+ if (value < 0) {
+ ss << "(";
+ }
+
+ ss << value;
+
+ if (value < 0) {
+ ss << ")";
+ }
+ return ss.str();
+}
+
+template <typename T>
+void cpp_define(std::string name, T value) {
+ std::cout << "#define " << name << " " << pretty_format(value) << std::endl;
+}
+
+template <typename T>
+void emit_check_eq(T value, std::string expr) {
+ std::cout << "DEFINE_CHECK_EQ(" << value << ", (" << expr << "))" << std::endl;
+}
+
+const char *kFileHeader = /* // NOLINT [readability/multiline_string] [5] */ R"L1C3NS3(
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GENERATED_ASM_SUPPORT_GEN_H_
+#define ART_RUNTIME_GENERATED_ASM_SUPPORT_GEN_H_
+
+// This file has been auto-generated by cpp-define-generator; do not edit directly.
+)L1C3NS3"; // NOLINT [readability/multiline_string] [5]
+
+const char *kFileFooter = /* // NOLINT [readability/multiline_string] [5] */ R"F00T3R(
+#endif // ART_RUNTIME_GENERATED_ASM_SUPPORT_GEN_H_
+)F00T3R"; // NOLINT [readability/multiline_string] [5]
+
+#define MACROIZE(holder_type, field_name) to_upper(#holder_type "_" #field_name "_OFFSET")
+
+int main() {
+ std::cout << kFileHeader << std::endl;
+
+ std::string z = "";
+
+ // Print every constant expression to stdout as a #define or a CHECK_EQ
+#define DEFINE_EXPR(macro_name, field_type, expr) \
+ cpp_define(to_upper(#macro_name), static_cast<field_type>(expr)); \
+ emit_check_eq(z + "static_cast<" #field_type ">(" + to_upper(#macro_name) + ")", \
+ "static_cast<" #field_type ">(" #expr ")");
+#include "offsets_all.def"
+
+ std::cout << kFileFooter << std::endl;
+ return 0;
+}
diff --git a/tools/cpp-define-generator/offset_codeitem.def b/tools/cpp-define-generator/offset_codeitem.def
new file mode 100644
index 0000000..e5acd1d
--- /dev/null
+++ b/tools/cpp-define-generator/offset_codeitem.def
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Offsets within CodeItem.
+
+#if defined(DEFINE_INCLUDE_DEPENDENCIES)
+#include <cstddef> // offsetof
+#include "dex_file.h" // art::DexFile
+#endif
+
+#include "common.def" // DEFINE_OFFSET_EXPR
+
+#define DEFINE_CODEITEM_OFFSET(field_name) \
+ DEFINE_OFFSET_EXPR(CodeItem, field_name, int32_t, offsetof(art::DexFile::CodeItem, field_name ## _))
+
+// Field Name
+DEFINE_CODEITEM_OFFSET(insns)
+
+#undef DEFINE_CODEITEM_OFFSET
+#include "common_undef.def" // undef DEFINE_OFFSET_EXPR
diff --git a/tools/cpp-define-generator/offset_dexcache.def b/tools/cpp-define-generator/offset_dexcache.def
new file mode 100644
index 0000000..3b26518
--- /dev/null
+++ b/tools/cpp-define-generator/offset_dexcache.def
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Offsets within art::ArtMethod.
+
+#if defined(DEFINE_INCLUDE_DEPENDENCIES)
+#include "art_method.h" // art::ArtMethod
+#include "base/enums.h" // PointerSize
+#endif
+
+#define DEFINE_ART_METHOD_OFFSET(field_name, method_name) \
+ DEFINE_EXPR(ART_METHOD_ ## field_name ## _OFFSET_32, int32_t, art::ArtMethod::method_name##Offset(art::PointerSize::k32).Int32Value()) \
+ DEFINE_EXPR(ART_METHOD_ ## field_name ## _OFFSET_64, int32_t, art::ArtMethod::method_name##Offset(art::PointerSize::k64).Int32Value())
+
+// New macro suffix Method Name (of the Offset method)
+DEFINE_ART_METHOD_OFFSET(DEX_CACHE_METHODS, DexCacheResolvedMethods)
+DEFINE_ART_METHOD_OFFSET(DEX_CACHE_TYPES, DexCacheResolvedTypes)
+DEFINE_ART_METHOD_OFFSET(JNI, EntryPointFromJni)
+DEFINE_ART_METHOD_OFFSET(QUICK_CODE, EntryPointFromQuickCompiledCode)
+
+#undef DEFINE_ART_METHOD_OFFSET
diff --git a/tools/cpp-define-generator/offset_mirror_object.def b/tools/cpp-define-generator/offset_mirror_object.def
new file mode 100644
index 0000000..9b99634
--- /dev/null
+++ b/tools/cpp-define-generator/offset_mirror_object.def
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Offsets within java.lang.Object (mirror::Object).
+
+#if defined(DEFINE_INCLUDE_DEPENDENCIES)
+#include "mirror/object.h" // art::mirror::Object
+#endif
+
+#include "common.def" // DEFINE_OFFSET_EXPR
+
+#define DEFINE_MIRROR_OBJECT_OFFSET(field_name, method_name) \
+ DEFINE_OFFSET_EXPR(MIRROR_OBJECT, field_name, int32_t, art::mirror::Object::method_name##Offset().Int32Value())
+
+// New macro suffix Method Name (of the Offset method)
+DEFINE_MIRROR_OBJECT_OFFSET(CLASS, Class)
+DEFINE_MIRROR_OBJECT_OFFSET(LOCK_WORD, Monitor)
+
+#undef DEFINE_MIRROR_OBJECT_OFFSET
+#include "common_undef.def" // undef DEFINE_OFFSET_EXPR
diff --git a/tools/cpp-define-generator/offset_runtime.def b/tools/cpp-define-generator/offset_runtime.def
new file mode 100644
index 0000000..123992f
--- /dev/null
+++ b/tools/cpp-define-generator/offset_runtime.def
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Offsets within ShadowFrame.
+
+#if defined(DEFINE_INCLUDE_DEPENDENCIES)
+#include "runtime.h" // art::Runtime
+#endif
+
+#include "common.def" // DEFINE_OFFSET_EXPR
+
+// Note: these callee save methods loads require read barriers.
+
+#define DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(field_name, constant_name) \
+ DEFINE_OFFSET_EXPR(Runtime, field_name ## _CALLEE_SAVE_FRAME, size_t, art::Runtime::GetCalleeSaveMethodOffset(art::Runtime:: constant_name))
+
+ // Macro substring Constant name
+// Offset of field Runtime::callee_save_methods_[kSaveAll]
+DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_ALL, kSaveAll)
+// Offset of field Runtime::callee_save_methods_[kRefsOnly]
+DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(REFS_ONLY, kRefsOnly)
+// Offset of field Runtime::callee_save_methods_[kRefsAndArgs]
+DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(REFS_AND_ARGS, kRefsAndArgs)
+// Offset of field Runtime::callee_save_methods_[kSaveEverything]
+DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_EVERYTHING, kSaveEverything)
+
+#undef DEFINE_RUNTIME_CALLEE_SAVE_OFFSET
+#include "common_undef.def" // undef DEFINE_OFFSET_EXPR
diff --git a/tools/cpp-define-generator/offset_shadow_frame.def b/tools/cpp-define-generator/offset_shadow_frame.def
new file mode 100644
index 0000000..b49a340
--- /dev/null
+++ b/tools/cpp-define-generator/offset_shadow_frame.def
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Offsets within ShadowFrame.
+
+#if defined(DEFINE_INCLUDE_DEPENDENCIES)
+#include "stack.h" // art::ShadowFrame
+#endif
+
+#include "common.def" // DEFINE_OFFSET_EXPR
+
+#define DEFINE_SHADOW_FRAME_OFFSET(field_name, method_name) \
+ DEFINE_OFFSET_EXPR(ShadowFrame, field_name, int32_t, art::ShadowFrame::method_name##Offset())
+
+// New macro suffix Method Name (of the Offset method)
+DEFINE_SHADOW_FRAME_OFFSET(LINK, Link)
+DEFINE_SHADOW_FRAME_OFFSET(METHOD, Method)
+DEFINE_SHADOW_FRAME_OFFSET(RESULT_REGISTER, ResultRegister)
+DEFINE_SHADOW_FRAME_OFFSET(DEX_PC_PTR, DexPCPtr)
+DEFINE_SHADOW_FRAME_OFFSET(CODE_ITEM, CodeItem)
+DEFINE_SHADOW_FRAME_OFFSET(LOCK_COUNT_DATA, LockCountData)
+DEFINE_SHADOW_FRAME_OFFSET(NUMBER_OF_VREGS, NumberOfVRegs)
+DEFINE_SHADOW_FRAME_OFFSET(DEX_PC, DexPC)
+DEFINE_SHADOW_FRAME_OFFSET(CACHED_HOTNESS_COUNTDOWN, CachedHotnessCountdown)
+DEFINE_SHADOW_FRAME_OFFSET(VREGS, VRegs)
+
+#undef DEFINE_SHADOW_FRAME_OFFSET
+#include "common_undef.def" // undef DEFINE_OFFSET_EXPR
diff --git a/tools/cpp-define-generator/offset_thread.def b/tools/cpp-define-generator/offset_thread.def
new file mode 100644
index 0000000..6f94d38
--- /dev/null
+++ b/tools/cpp-define-generator/offset_thread.def
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Offsets within ShadowFrame.
+
+#if defined(DEFINE_INCLUDE_DEPENDENCIES)
+#include "base/enums.h" // PointerSize
+#include "stack.h" // art::ShadowFrame
+#endif
+
+#include "common.def" // DEFINE_OFFSET_EXPR
+
+#define DEFINE_THREAD_OFFSET(field_name, method_name) \
+ DEFINE_OFFSET_EXPR(Thread, field_name, int32_t, art::Thread::method_name##Offset<art::kRuntimePointerSize>().Int32Value())
+
+// New macro suffix Method Name (of the Offset method)
+DEFINE_THREAD_OFFSET(FLAGS, ThreadFlags)
+DEFINE_THREAD_OFFSET(ID, ThinLockId)
+DEFINE_THREAD_OFFSET(IS_GC_MARKING, IsGcMarking)
+DEFINE_THREAD_OFFSET(CARD_TABLE, CardTable)
+
+// TODO: The rest of the offsets
+// are dependent on __SIZEOF_POINTER__
+
+#undef DEFINE_THREAD_OFFSET
+
+#include "common_undef.def" // undef DEFINE_OFFSET_EXPR
diff --git a/tools/cpp-define-generator/offsets_all.def b/tools/cpp-define-generator/offsets_all.def
new file mode 100644
index 0000000..01e4d5b
--- /dev/null
+++ b/tools/cpp-define-generator/offsets_all.def
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Includes every single offset file in art.
+// Useful for processing every single offset together.
+
+// Usage:
+// #define DEFINE_INCLUDE_DEPENDENCIES
+// #include "offsets_all.def"
+// to automatically include each def file's header dependencies.
+//
+// Afterwards,
+// #define DEFINE_EXPR(define_name, field_type, expr) ...
+// #include "offsets_all.def"
+// to process each offset however one wants.
+#if defined(DEFINE_INCLUDE_DEPENDENCIES)
+#define DEFINE_EXPR(define_name, field_type, expr)
+#endif
+
+#if !defined(DEFINE_EXPR)
+#error "Either DEFINE_INCLUDE_DEPENDENCIES or DEFINE_EXPR must be defined"
+#endif
+
+#include "constant_reference.def"
+#include "offset_runtime.def"
+// TODO: rest of THREAD_ offsets (depends on __SIZEOF__POINTER__).
+#include "offset_thread.def"
+// TODO: SHADOW_FRAME depends on __SIZEOF__POINTER__
+// #include "offset_shadow_frame.def"
+#include "offset_codeitem.def"
+// TODO: MIRROR_OBJECT_HEADER_SIZE (depends on #ifdef read barrier)
+// TODO: MIRROR_CLASS offsets (see above)
+#include "offset_mirror_object.def"
+#include "constant_class.def"
+// TODO: MIRROR_*_ARRAY offsets (depends on header size)
+// TODO: MIRROR_STRING offsets (depends on header size)
+#include "offset_dexcache.def"
+#include "constant_lockword.def"
+#include "constant_globals.def"
+#include "constant_rosalloc.def"
+#include "constant_thread.def"
+#include "constant_jit.def"
+
+// TODO: MIRROR_OBJECT_HEADER_SIZE #ifdef depends on read barriers
+// TODO: Array offsets (depends on MIRROR_OBJECT_HEADER_SIZE)
+
+#if defined(DEFINE_INCLUDE_DEPENDENCIES)
+#undef DEFINE_EXPR
+#undef DEFINE_INCLUDE_DEPENDENCIES
+#endif
+
+
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 996f2f8..8d87e4f 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -36,6 +36,15 @@
names: ["libcore.io.OsTest#testUnixDomainSockets_in_file_system"]
},
{
+ description: "TCP_USER_TIMEOUT is not defined on host's tcp.h (glibc-2.15-4.8).",
+ result: EXEC_FAILED,
+ modes: [host],
+ names: ["libcore.android.system.OsConstantsTest#testTcpUserTimeoutIsDefined",
+ "libcore.io.OsTest#test_socket_tcpUserTimeout_setAndGet",
+ "libcore.io.OsTest#test_socket_tcpUserTimeout_doesNotWorkOnDatagramSocket"],
+ bug: 30402085
+},
+{
description: "Issue with incorrect device time (1970)",
result: EXEC_FAILED,
modes: [device],
@@ -174,38 +183,7 @@
description: "Failing tests after OpenJDK move.",
result: EXEC_FAILED,
bug: 26326992,
- names: ["libcore.icu.RelativeDateTimeFormatterTest#test_getRelativeDateTimeStringDST",
- "libcore.java.lang.OldSystemTest#test_load",
- "libcore.java.text.NumberFormatTest#test_currencyWithPatternDigits",
- "libcore.java.text.NumberFormatTest#test_setCurrency",
- "libcore.java.text.OldNumberFormatTest#test_getIntegerInstanceLjava_util_Locale",
- "libcore.java.util.CalendarTest#testAddOneDayAndOneDayOver30MinuteDstForwardAdds48Hours",
- "libcore.java.util.CalendarTest#testNewCalendarKoreaIsSelfConsistent",
- "libcore.java.util.CalendarTest#testSetTimeInZoneWhereDstIsNoLongerUsed",
- "libcore.java.util.CalendarTest#test_nullLocale",
- "libcore.java.util.FormatterTest#test_numberLocalization",
- "libcore.java.util.FormatterTest#test_uppercaseConversions",
- "libcore.javax.crypto.CipherTest#testCipher_getInstance_WrongType_Failure",
- "libcore.javax.crypto.CipherTest#testDecryptBufferZeroSize_mustDecodeToEmptyString",
- "libcore.javax.security.auth.x500.X500PrincipalTest#testExceptionsForWrongDNs",
- "org.apache.harmony.luni.tests.java.net.URLConnectionTest#test_getDate",
- "org.apache.harmony.luni.tests.java.net.URLConnectionTest#test_getExpiration",
- "org.apache.harmony.regex.tests.java.util.regex.PatternSyntaxExceptionTest#testPatternSyntaxException",
- "org.apache.harmony.tests.java.lang.FloatTest#test_parseFloat_LString_Harmony6261",
- "org.apache.harmony.tests.java.lang.ThreadTest#test_isDaemon",
- "org.apache.harmony.tests.java.text.DecimalFormatSymbolsTest#test_setInternationalCurrencySymbolLjava_lang_String",
- "org.apache.harmony.tests.java.text.DecimalFormatTest#testSerializationHarmonyRICompatible",
- "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parseLjava_lang_StringLjava_text_ParsePosition",
- "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_W_w_dd_MMMM_yyyy_EEEE",
- "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_dayOfYearPatterns",
- "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_h_m_z",
- "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_h_z_2DigitOffsetFromGMT",
- "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_h_z_4DigitOffsetFromGMT",
- "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_h_z_4DigitOffsetNoGMT",
- "org.apache.harmony.tests.java.util.jar.JarFileTest#test_getInputStreamLjava_util_jar_JarEntry_subtest0",
- "libcore.java.util.CalendarTest#test_clear_45877",
- "org.apache.harmony.crypto.tests.javax.crypto.spec.SecretKeySpecTest#testGetFormat",
- "org.apache.harmony.tests.java.util.TimerTaskTest#test_scheduledExecutionTime"]
+ names: ["libcore.java.lang.OldSystemTest#test_load"]
},
{
description: "Missing resource in classpath",
@@ -260,5 +238,14 @@
bug: 30107038,
modes: [device],
names: ["org.apache.harmony.tests.java.lang.ProcessTest#test_destroyForcibly"]
+},
+{
+ description: "Flaky failure, native crash in the runtime.
+ Unclear if this relates to the tests running sh as a child process.",
+ result: EXEC_FAILED,
+ bug: 30657148,
+ modes: [device],
+ names: ["libcore.java.lang.ProcessBuilderTest#testRedirectInherit",
+ "libcore.java.lang.ProcessBuilderTest#testRedirect_nullStreams"]
}
]